diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim index 9cc2228c8..ad74c03c5 100644 --- a/apps/chat2/chat2.nim +++ b/apps/chat2/chat2.nim @@ -1,7 +1,7 @@ ## chat2 is an example of usage of Waku v2. For suggested usage options, please ## see dingpu tutorial in docs folder. -when not(compileOption("threads")): +when not (compileOption("threads")): {.fatal: "Please, compile this program with the --threads:on option!".} when (NimMajor, NimMinor) < (1, 4): @@ -10,18 +10,29 @@ else: {.push raises: [].} import std/[strformat, strutils, times, options, random] -import confutils, chronicles, chronos, stew/shims/net as stewNet, - eth/keys, bearssl, stew/[byteutils, results], - metrics, - metrics/chronos_httpserver -import libp2p/[switch, # manage transports, a single entry point for dialing and listening - crypto/crypto, # cryptographic functions - stream/connection, # create and close stream read / write connections - multiaddress, # encode different addressing schemes. For example, /ip4/7.7.7.7/tcp/6543 means it is using IPv4 protocol and TCP - peerinfo, # manage the information of a peer, such as peer ID and public / private key - peerid, # Implement how peers interact - protobuf/minprotobuf, # message serialisation/deserialisation from and to protobufs - nameresolving/dnsresolver]# define DNS resolution +import + confutils, + chronicles, + chronos, + stew/shims/net as stewNet, + eth/keys, + bearssl, + stew/[byteutils, results], + metrics, + metrics/chronos_httpserver +import + libp2p/[ + switch, # manage transports, a single entry point for dialing and listening + crypto/crypto, # cryptographic functions + stream/connection, # create and close stream read / write connections + multiaddress, + # encode different addressing schemes. For example, /ip4/7.7.7.7/tcp/6543 means it is using IPv4 protocol and TCP + peerinfo, + # manage the information of a peer, such as peer ID and public / private key + peerid, # Implement how peers interact + protobuf/minprotobuf, # message serialisation/deserialisation from and to protobufs + nameresolving/dnsresolver, + ] # define DNS resolution import ../../waku/waku_core, ../../waku/waku_lightpush/common, @@ -37,13 +48,11 @@ import ../../waku/common/utils/nat, ./config_chat2 -import - libp2p/protocols/pubsub/rpc/messages, - libp2p/protocols/pubsub/pubsub -import - ../../waku/waku_rln_relay +import libp2p/protocols/pubsub/rpc/messages, libp2p/protocols/pubsub/pubsub +import ../../waku/waku_rln_relay -const Help = """ +const Help = + """ Commands: /[?|help|connect|nick|exit] help: Prints this help connect: dials a remote peer @@ -55,14 +64,14 @@ const Help = """ # Could poll connection pool or something here, I suppose # TODO Ensure connected turns true on incoming connections, or get rid of it type Chat = ref object - node: WakuNode # waku node for publishing, subscribing, etc - transp: StreamTransport # transport streams between read & write file descriptor - subscribed: bool # indicates if a node is subscribed or not to a topic - connected: bool # if the node is connected to another peer - started: bool # if the node has started - nick: string # nickname for this chat session - prompt: bool # chat prompt is showing - contentTopic: string # default content topic for chat messages + node: WakuNode # waku node for publishing, subscribing, etc + transp: StreamTransport # transport streams between read & write file descriptor + subscribed: bool # indicates if a node is subscribed or not to a topic + connected: bool # if the node is connected to another peer + started: bool # if the node has started + nick: string # nickname for this chat session + prompt: bool # chat prompt is showing + contentTopic: string # default content topic for chat messages type PrivateKey* = crypto.PrivateKey @@ -85,11 +94,11 @@ proc init*(T: type Chat2Message, buffer: seq[byte]): ProtoResult[T] = let pb = initProtoBuffer(buffer) var timestamp: uint64 - discard ? pb.getField(1, timestamp) + discard ?pb.getField(1, timestamp) msg.timestamp = int64(timestamp) - discard ? pb.getField(2, msg.nick) - discard ? pb.getField(3, msg.payload) + discard ?pb.getField(2, msg.nick) + discard ?pb.getField(3, msg.payload) ok(msg) @@ -124,19 +133,25 @@ proc showChatPrompt(c: Chat) = except IOError: discard -proc getChatLine(c: Chat, msg:WakuMessage): Result[string, string]= +proc getChatLine(c: Chat, msg: WakuMessage): Result[string, string] = # No payload encoding/encryption from Waku let pb = Chat2Message.init(msg.payload) - chatLine = if pb.isOk: pb[].toString() - else: string.fromBytes(msg.payload) + chatLine = + if pb.isOk: + pb[].toString() + else: + string.fromBytes(msg.payload) return ok(chatline) proc printReceivedMessage(c: Chat, msg: WakuMessage) = let pb = Chat2Message.init(msg.payload) - chatLine = if pb.isOk: pb[].toString() - else: string.fromBytes(msg.payload) + chatLine = + if pb.isOk: + pb[].toString() + else: + string.fromBytes(msg.payload) try: echo &"{chatLine}" except ValueError: @@ -145,8 +160,8 @@ proc printReceivedMessage(c: Chat, msg: WakuMessage) = c.prompt = false showChatPrompt(c) - trace "Printing message", topic=DefaultPubsubTopic, chatLine, - contentTopic = msg.contentTopic + trace "Printing message", + topic = DefaultPubsubTopic, chatLine, contentTopic = msg.contentTopic proc readNick(transp: StreamTransport): Future[string] {.async.} = # Chat prompt @@ -154,9 +169,10 @@ proc readNick(transp: StreamTransport): Future[string] {.async.} = stdout.flushFile() return await transp.readLine() - -proc startMetricsServer(serverIp: IpAddress, serverPort: Port): Result[MetricsHttpServerRef, string] = - info "Starting metrics HTTP server", serverIp= $serverIp, serverPort= $serverPort +proc startMetricsServer( + serverIp: IpAddress, serverPort: Port +): Result[MetricsHttpServerRef, string] = + info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort let metricsServerRes = MetricsHttpServerRef.new($serverIp, serverPort) if metricsServerRes.isErr(): @@ -168,23 +184,25 @@ proc startMetricsServer(serverIp: IpAddress, serverPort: Port): Result[MetricsHt except CatchableError: return err("metrics HTTP server start failed: " & getCurrentExceptionMsg()) - info "Metrics HTTP server started", serverIp= $serverIp, serverPort= $serverPort + info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort ok(metricsServerRes.value) - proc publish(c: Chat, line: string) = # First create a Chat2Message protobuf with this line of text let time = getTime().toUnix() - let chat2pb = Chat2Message(timestamp: time, - nick: c.nick, - payload: line.toBytes()).encode() + let chat2pb = + Chat2Message(timestamp: time, nick: c.nick, payload: line.toBytes()).encode() ## @TODO: error handling on failure proc handler(response: PushResponse) {.gcsafe, closure.} = - trace "lightpush response received", response=response + trace "lightpush response received", response = response - var message = WakuMessage(payload: chat2pb.buffer, - contentTopic: c.contentTopic, version: 0, timestamp: getNanosecondTime(time)) + var message = WakuMessage( + payload: chat2pb.buffer, + contentTopic: c.contentTopic, + version: 0, + timestamp: getNanosecondTime(time), + ) if not isNil(c.node.wakuRlnRelay): # for future version when we support more than one rln protected content topic, # we should check the message content topic as well @@ -201,7 +219,8 @@ proc publish(c: Chat, line: string) = # TODO move it to log after dogfooding let msgEpoch = fromEpoch(proof.epoch) if fromEpoch(c.node.wakuRlnRelay.lastEpoch) == msgEpoch: - echo "--rln epoch: ", msgEpoch, " ⚠️ message rate violation! you are spamming the network!" + echo "--rln epoch: ", + msgEpoch, " ⚠️ message rate violation! you are spamming the network!" else: echo "--rln epoch: ", msgEpoch # update the last epoch @@ -216,25 +235,25 @@ proc publish(c: Chat, line: string) = (waitFor c.node.publish(some(DefaultPubsubTopic), message)).isOkOr: error "failed to publish message", error = error except CatchableError: - error "caught error publishing message: ", error = getCurrentExceptionMsg() + error "caught error publishing message: ", error = getCurrentExceptionMsg() # TODO This should read or be subscribe handler subscribe proc readAndPrint(c: Chat) {.async.} = while true: -# while p.connected: -# # TODO: echo &"{p.id} -> " -# -# echo cast[string](await p.conn.readLp(1024)) + # while p.connected: + # # TODO: echo &"{p.id} -> " + # + # echo cast[string](await p.conn.readLp(1024)) #echo "readAndPrint subscribe NYI" await sleepAsync(100.millis) # TODO Implement proc writeAndPrint(c: Chat) {.async.} = while true: -# Connect state not updated on incoming WakuRelay connections -# if not c.connected: -# echo "type an address or wait for a connection:" -# echo "type /[help|?] for help" + # Connect state not updated on incoming WakuRelay connections + # if not c.connected: + # echo "type an address or wait for a connection:" + # echo "type /[help|?] for help" # Chat prompt showChatPrompt(c) @@ -244,11 +263,11 @@ proc writeAndPrint(c: Chat) {.async.} = echo Help continue -# if line.startsWith("/disconnect"): -# echo "Ending current session" -# if p.connected and p.conn.closed.not: -# await p.conn.close() -# p.connected = false + # if line.startsWith("/disconnect"): + # echo "Ending current session" + # if p.connected and p.conn.closed.not: + # await p.conn.close() + # p.connected = false elif line.startsWith("/connect"): # TODO Should be able to connect to multiple peers for Waku chat if c.connected: @@ -259,19 +278,21 @@ proc writeAndPrint(c: Chat) {.async.} = let address = await c.transp.readLine() if address.len > 0: await c.connectToNodes(@[address]) - elif line.startsWith("/nick"): # Set a new nickname c.nick = await readNick(c.transp) echo "You are now known as " & c.nick - elif line.startsWith("/exit"): if not c.node.wakuFilterLegacy.isNil(): echo "unsubscribing from content filters..." let peerOpt = c.node.peerManager.selectPeer(WakuLegacyFilterCodec) if peerOpt.isSome(): - await c.node.legacyFilterUnsubscribe(pubsubTopic=some(DefaultPubsubTopic), contentTopics=c.contentTopic, peer=peerOpt.get()) + await c.node.legacyFilterUnsubscribe( + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = c.contentTopic, + peer = peerOpt.get(), + ) echo "quitting..." @@ -307,21 +328,28 @@ proc readInput(wfd: AsyncFD) {.thread, raises: [Defect, CatchableError].} = let line = stdin.readLine() discard waitFor transp.write(line & "\r\n") -{.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError +{.pop.} + # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = let transp = fromPipe(rfd) conf = Chat2Conf.load() - nodekey = if conf.nodekey.isSome(): conf.nodekey.get() - else: PrivateKey.random(Secp256k1, rng[]).tryGet() + nodekey = + if conf.nodekey.isSome(): + conf.nodekey.get() + else: + PrivateKey.random(Secp256k1, rng[]).tryGet() # set log level if conf.logLevel != LogLevel.NONE: setLogLevel(conf.logLevel) - let natRes = setupNat(conf.nat, clientId, - Port(uint16(conf.tcpPort) + conf.portsShift), - Port(uint16(conf.udpPort) + conf.portsShift)) + let natRes = setupNat( + conf.nat, + clientId, + Port(uint16(conf.tcpPort) + conf.portsShift), + Port(uint16(conf.udpPort) + conf.portsShift), + ) if natRes.isErr(): raise newException(ValueError, "setupNat error " & natRes.error) @@ -333,20 +361,28 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = let recordRes = enrBuilder.build() let record = if recordRes.isErr(): - error "failed to create enr record", error=recordRes.error + error "failed to create enr record", error = recordRes.error quit(QuitFailure) - else: recordRes.get() + else: + recordRes.get() let node = block: - var builder = WakuNodeBuilder.init() - builder.withNodeKey(nodeKey) - builder.withRecord(record) - builder.withNetworkConfigurationDetails(conf.listenAddress, Port(uint16(conf.tcpPort) + conf.portsShift), - extIp, extTcpPort, - wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift), - wsEnabled = conf.websocketSupport, - wssEnabled = conf.websocketSecureSupport).tryGet() - builder.build().tryGet() + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodeKey) + builder.withRecord(record) + + builder + .withNetworkConfigurationDetails( + conf.listenAddress, + Port(uint16(conf.tcpPort) + conf.portsShift), + extIp, + extTcpPort, + wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift), + wsEnabled = conf.websocketSupport, + wssEnabled = conf.websocketSecureSupport, + ) + .tryGet() + builder.build().tryGet() await node.start() @@ -361,14 +397,16 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = let nick = await readNick(transp) echo "Welcome, " & nick & "!" - var chat = Chat(node: node, - transp: transp, - subscribed: true, - connected: false, - started: true, - nick: nick, - prompt: false, - contentTopic: conf.contentTopic) + var chat = Chat( + node: node, + transp: transp, + subscribed: true, + connected: false, + started: true, + nick: nick, + prompt: false, + contentTopic: conf.contentTopic, + ) if conf.staticnodes.len > 0: echo "Connecting to static peers..." @@ -381,14 +419,17 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = echo "Connecting to " & $conf.fleet & " fleet using DNS discovery..." if conf.fleet == Fleet.test: - dnsDiscoveryUrl = some("enrtree://AO47IDOLBKH72HIZZOXQP6NMRESAN7CHYWIBNXDXWRJRZWLODKII6@test.wakuv2.nodes.status.im") + dnsDiscoveryUrl = some( + "enrtree://AO47IDOLBKH72HIZZOXQP6NMRESAN7CHYWIBNXDXWRJRZWLODKII6@test.wakuv2.nodes.status.im" + ) else: # Connect to prod by default - dnsDiscoveryUrl = some("enrtree://ANEDLO25QVUGJOUTQFRYKWX6P4Z4GKVESBMHML7DZ6YK4LGS5FC5O@prod.wakuv2.nodes.status.im") - + dnsDiscoveryUrl = some( + "enrtree://ANEDLO25QVUGJOUTQFRYKWX6P4Z4GKVESBMHML7DZ6YK4LGS5FC5O@prod.wakuv2.nodes.status.im" + ) elif conf.dnsDiscovery and conf.dnsDiscoveryUrl != "": # No pre-selected fleet. Discover nodes via DNS using user config - debug "Discovering nodes using Waku DNS discovery", url=conf.dnsDiscoveryUrl + debug "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl) var discoveredNodes: seq[RemotePeerInfo] @@ -401,12 +442,11 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = let dnsResolver = DnsResolver.new(nameServers) proc resolver(domain: string): Future[string] {.async, gcsafe.} = - trace "resolving", domain=domain + trace "resolving", domain = domain let resolved = await dnsResolver.resolveTxt(domain) return resolved[0] # Use only first answer - var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl.get(), - resolver) + var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl.get(), resolver) if wakuDnsDiscovery.isOk: let discoveredPeers = wakuDnsDiscovery.get().findPeers() if discoveredPeers.isOk: @@ -432,10 +472,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = storenode = some(peerInfo.value) else: error "Incorrect conf.storenode", error = peerInfo.error - elif discoveredNodes.len > 0: echo "Store enabled, but no store nodes configured. Choosing one at random from discovered peers" - storenode = some(discoveredNodes[rand(0..len(discoveredNodes) - 1)]) + storenode = some(discoveredNodes[rand(0 .. len(discoveredNodes) - 1)]) if storenode.isSome(): # We have a viable storenode. Let's query it for historical messages. @@ -448,8 +487,11 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = for msg in response.messages: let pb = Chat2Message.init(msg.payload) - chatLine = if pb.isOk: pb[].toString() - else: string.fromBytes(msg.payload) + chatLine = + if pb.isOk: + pb[].toString() + else: + string.fromBytes(msg.payload) echo &"{chatLine}" info "Hit store handler" @@ -466,7 +508,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = node.peerManager.addServicePeer(peerInfo.value, WakuLightpushCodec) else: error "LightPush not mounted. Couldn't parse conf.lightpushnode", - error = peerInfo.error + error = peerInfo.error if conf.filternode != "": let peerInfo = parsePeerInfo(conf.filternode) @@ -476,19 +518,22 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = await node.mountFilterClient() node.peerManager.addServicePeer(peerInfo.value, WakuLegacyFilterCodec) - proc filterHandler(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async, gcsafe, closure.} = - trace "Hit filter handler", contentTopic=msg.contentTopic + proc filterHandler( + pubsubTopic: PubsubTopic, msg: WakuMessage + ) {.async, gcsafe, closure.} = + trace "Hit filter handler", contentTopic = msg.contentTopic chat.printReceivedMessage(msg) - await node.legacyFilterSubscribe(pubsubTopic=some(DefaultPubsubTopic), - contentTopics=chat.contentTopic, - filterHandler, - peerInfo.value) + await node.legacyFilterSubscribe( + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = chat.contentTopic, + filterHandler, + peerInfo.value, + ) # TODO: Here to support FilterV2 relevant subscription, but still # Legacy Filter is concurrent to V2 untill legacy filter will be removed else: - error "Filter not mounted. Couldn't parse conf.filternode", - error = peerInfo.error + error "Filter not mounted. Couldn't parse conf.filternode", error = peerInfo.error # Subscribe to a topic, if relay is mounted if conf.relay: @@ -524,7 +569,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = rlnRelayCredPath: conf.rlnRelayCredPath, rlnRelayCredPassword: conf.rlnRelayCredPassword, rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit, - rlnEpochSizeSec: conf.rlnEpochSizeSec + rlnEpochSizeSec: conf.rlnEpochSizeSec, ) else: let rlnConf = WakuRlnConfig( @@ -534,16 +579,16 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress), rlnRelayCredPath: conf.rlnRelayCredPath, rlnRelayCredPassword: conf.rlnRelayCredPassword, - rlnEpochSizeSec: conf.rlnEpochSizeSec + rlnEpochSizeSec: conf.rlnEpochSizeSec, ) - waitFor node.mountRlnRelay(rlnConf, - spamHandler=some(spamHandler)) + waitFor node.mountRlnRelay(rlnConf, spamHandler = some(spamHandler)) let membershipIndex = node.wakuRlnRelay.groupManager.membershipIndex.get() let identityCredential = node.wakuRlnRelay.groupManager.idCredentials.get() echo "your membership index is: ", membershipIndex - echo "your rln identity commitment key is: ", identityCredential.idCommitment.inHex() + echo "your rln identity commitment key is: ", + identityCredential.idCommitment.inHex() else: info "WakuRLNRelay is disabled" echo "WakuRLNRelay is disabled, please enable it by passing in the --rln-relay flag" @@ -552,11 +597,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = if conf.metricsServer: let metricsServer = startMetricsServer( - conf.metricsServerAddress, - Port(conf.metricsServerPort + conf.portsShift) + conf.metricsServerAddress, Port(conf.metricsServerPort + conf.portsShift) ) - await chat.readWriteLoop() if conf.keepAlive: @@ -578,7 +621,6 @@ proc main(rng: ref HmacDrbgContext) {.async.} = except ConfigurationError as e: raise e - when isMainModule: # isMainModule = true when the module is compiled as the main file let rng = crypto.newRng() try: diff --git a/apps/chat2/config_chat2.nim b/apps/chat2/config_chat2.nim index 4d0e6299f..ae32d9d3e 100644 --- a/apps/chat2/config_chat2.nim +++ b/apps/chat2/config_chat2.nim @@ -1,254 +1,268 @@ import - chronicles, chronos, - confutils, confutils/defs, confutils/std/net, + chronicles, + chronos, + confutils, + confutils/defs, + confutils/std/net, eth/keys, libp2p/crypto/crypto, libp2p/crypto/secp, nimcrypto/utils, std/strutils, regex -import - ../../../waku/waku_core +import ../../../waku/waku_core type - Fleet* = enum + Fleet* = enum none prod test + EthRpcUrl = distinct string - Chat2Conf* = object - ## General node config - + Chat2Conf* = object ## General node config logLevel* {. - desc: "Sets the log level." - defaultValue: LogLevel.INFO - name: "log-level" }: LogLevel + desc: "Sets the log level.", defaultValue: LogLevel.INFO, name: "log-level" + .}: LogLevel - nodekey* {. - desc: "P2P node private key as 64 char hex string.", - name: "nodekey" }: Option[crypto.PrivateKey] + nodekey* {.desc: "P2P node private key as 64 char hex string.", name: "nodekey".}: + Option[crypto.PrivateKey] listenAddress* {. - defaultValue: defaultListenAddress(config) - desc: "Listening address for the LibP2P traffic." - name: "listen-address"}: IpAddress + defaultValue: defaultListenAddress(config), + desc: "Listening address for the LibP2P traffic.", + name: "listen-address" + .}: IpAddress - tcpPort* {. - desc: "TCP listening port." - defaultValue: 60000 - name: "tcp-port" }: Port + tcpPort* {.desc: "TCP listening port.", defaultValue: 60000, name: "tcp-port".}: + Port - udpPort* {. - desc: "UDP listening port." - defaultValue: 60000 - name: "udp-port" }: Port + udpPort* {.desc: "UDP listening port.", defaultValue: 60000, name: "udp-port".}: + Port portsShift* {. - desc: "Add a shift to all port numbers." - defaultValue: 0 - name: "ports-shift" }: uint16 + desc: "Add a shift to all port numbers.", defaultValue: 0, name: "ports-shift" + .}: uint16 nat* {. - desc: "Specify method to use for determining public address. " & - "Must be one of: any, none, upnp, pmp, extip:." - defaultValue: "any" }: string + desc: + "Specify method to use for determining public address. " & + "Must be one of: any, none, upnp, pmp, extip:.", + defaultValue: "any" + .}: string ## Persistence config - dbPath* {. - desc: "The database path for peristent storage", - defaultValue: "" - name: "db-path" }: string + desc: "The database path for peristent storage", defaultValue: "", name: "db-path" + .}: string persistPeers* {. desc: "Enable peer persistence: true|false", - defaultValue: false - name: "persist-peers" }: bool + defaultValue: false, + name: "persist-peers" + .}: bool persistMessages* {. desc: "Enable message persistence: true|false", - defaultValue: false - name: "persist-messages" }: bool + defaultValue: false, + name: "persist-messages" + .}: bool ## Relay config - relay* {. - desc: "Enable relay protocol: true|false", - defaultValue: true - name: "relay" }: bool + desc: "Enable relay protocol: true|false", defaultValue: true, name: "relay" + .}: bool staticnodes* {. - desc: "Peer multiaddr to directly connect with. Argument may be repeated." - name: "staticnode" }: seq[string] + desc: "Peer multiaddr to directly connect with. Argument may be repeated.", + name: "staticnode" + .}: seq[string] keepAlive* {. desc: "Enable keep-alive for idle connections: true|false", - defaultValue: false - name: "keep-alive" }: bool + defaultValue: false, + name: "keep-alive" + .}: bool topics* {. - desc: "Default topics to subscribe to (space separated list)." - defaultValue: "/waku/2/default-waku/proto" - name: "topics" .}: string + desc: "Default topics to subscribe to (space separated list).", + defaultValue: "/waku/2/default-waku/proto", + name: "topics" + .}: string ## Store config - store* {. - desc: "Enable store protocol: true|false", - defaultValue: true - name: "store" }: bool + desc: "Enable store protocol: true|false", defaultValue: true, name: "store" + .}: bool storenode* {. - desc: "Peer multiaddr to query for storage.", - defaultValue: "" - name: "storenode" }: string + desc: "Peer multiaddr to query for storage.", defaultValue: "", name: "storenode" + .}: string ## Filter config - filter* {. - desc: "Enable filter protocol: true|false", - defaultValue: false - name: "filter" }: bool + desc: "Enable filter protocol: true|false", defaultValue: false, name: "filter" + .}: bool filternode* {. desc: "Peer multiaddr to request content filtering of messages.", - defaultValue: "" - name: "filternode" }: string + defaultValue: "", + name: "filternode" + .}: string ## Lightpush config - lightpush* {. desc: "Enable lightpush protocol: true|false", - defaultValue: false - name: "lightpush" }: bool + defaultValue: false, + name: "lightpush" + .}: bool lightpushnode* {. desc: "Peer multiaddr to request lightpush of published messages.", - defaultValue: "" - name: "lightpushnode" }: string + defaultValue: "", + name: "lightpushnode" + .}: string ## Metrics config - metricsServer* {. - desc: "Enable the metrics server: true|false" - defaultValue: false - name: "metrics-server" }: bool + desc: "Enable the metrics server: true|false", + defaultValue: false, + name: "metrics-server" + .}: bool metricsServerAddress* {. - desc: "Listening address of the metrics server." - defaultValue: parseIpAddress("127.0.0.1") - name: "metrics-server-address" }: IpAddress + desc: "Listening address of the metrics server.", + defaultValue: parseIpAddress("127.0.0.1"), + name: "metrics-server-address" + .}: IpAddress metricsServerPort* {. - desc: "Listening HTTP port of the metrics server." - defaultValue: 8008 - name: "metrics-server-port" }: uint16 + desc: "Listening HTTP port of the metrics server.", + defaultValue: 8008, + name: "metrics-server-port" + .}: uint16 metricsLogging* {. - desc: "Enable metrics logging: true|false" - defaultValue: true - name: "metrics-logging" }: bool + desc: "Enable metrics logging: true|false", + defaultValue: true, + name: "metrics-logging" + .}: bool ## DNS discovery config - dnsDiscovery* {. - desc: "Enable discovering nodes via DNS" - defaultValue: false - name: "dns-discovery" }: bool + desc: "Enable discovering nodes via DNS", + defaultValue: false, + name: "dns-discovery" + .}: bool dnsDiscoveryUrl* {. desc: "URL for DNS node list in format 'enrtree://@'", - defaultValue: "" - name: "dns-discovery-url" }: string + defaultValue: "", + name: "dns-discovery-url" + .}: string dnsDiscoveryNameServers* {. - desc: "DNS name server IPs to query. Argument may be repeated." - defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")] - name: "dns-discovery-name-server" }: seq[IpAddress] + desc: "DNS name server IPs to query. Argument may be repeated.", + defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], + name: "dns-discovery-name-server" + .}: seq[IpAddress] ## Chat2 configuration - fleet* {. - desc: "Select the fleet to connect to. This sets the DNS discovery URL to the selected fleet." - defaultValue: Fleet.prod - name: "fleet" }: Fleet + desc: + "Select the fleet to connect to. This sets the DNS discovery URL to the selected fleet.", + defaultValue: Fleet.prod, + name: "fleet" + .}: Fleet contentTopic* {. - desc: "Content topic for chat messages." - defaultValue: "/toy-chat/2/huilong/proto" - name: "content-topic" }: string + desc: "Content topic for chat messages.", + defaultValue: "/toy-chat/2/huilong/proto", + name: "content-topic" + .}: string ## Websocket Configuration websocketSupport* {. desc: "Enable websocket: true|false", - defaultValue: false - name: "websocket-support"}: bool + defaultValue: false, + name: "websocket-support" + .}: bool websocketPort* {. - desc: "WebSocket listening port." - defaultValue: 8000 - name: "websocket-port" }: Port + desc: "WebSocket listening port.", defaultValue: 8000, name: "websocket-port" + .}: Port websocketSecureSupport* {. - desc: "WebSocket Secure Support." - defaultValue: false - name: "websocket-secure-support" }: bool + desc: "WebSocket Secure Support.", + defaultValue: false, + name: "websocket-secure-support" + .}: bool ## rln-relay configuration - rlnRelay* {. desc: "Enable spam protection through rln-relay: true|false", - defaultValue: false - name: "rln-relay" }: bool + defaultValue: false, + name: "rln-relay" + .}: bool rlnRelayCredPath* {. desc: "The path for peristing rln-relay credential", - defaultValue: "" - name: "rln-relay-cred-path" }: string + defaultValue: "", + name: "rln-relay-cred-path" + .}: string rlnRelayCredIndex* {. - desc: "the index of the onchain commitment to use", - name: "rln-relay-cred-index" }: Option[uint] + desc: "the index of the onchain commitment to use", name: "rln-relay-cred-index" + .}: Option[uint] rlnRelayDynamic* {. desc: "Enable waku-rln-relay with on-chain dynamic group management: true|false", - defaultValue: false - name: "rln-relay-dynamic" }: bool + defaultValue: false, + name: "rln-relay-dynamic" + .}: bool rlnRelayIdKey* {. desc: "Rln relay identity secret key as a Hex string", - defaultValue: "" - name: "rln-relay-id-key" }: string + defaultValue: "", + name: "rln-relay-id-key" + .}: string rlnRelayIdCommitmentKey* {. desc: "Rln relay identity commitment key as a Hex string", - defaultValue: "" - name: "rln-relay-id-commitment-key" }: string + defaultValue: "", + name: "rln-relay-id-commitment-key" + .}: string rlnRelayEthClientAddress* {. desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/", - defaultValue: "http://localhost:8540/" - name: "rln-relay-eth-client-address" }: EthRpcUrl + defaultValue: "http://localhost:8540/", + name: "rln-relay-eth-client-address" + .}: EthRpcUrl rlnRelayEthContractAddress* {. desc: "Address of membership contract on an Ethereum testnet", - defaultValue: "" - name: "rln-relay-eth-contract-address" }: string + defaultValue: "", + name: "rln-relay-eth-contract-address" + .}: string rlnRelayCredPassword* {. desc: "Password for encrypting RLN credentials", - defaultValue: "" - name: "rln-relay-cred-password" }: string + defaultValue: "", + name: "rln-relay-cred-password" + .}: string rlnRelayUserMessageLimit* {. - desc: "Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.", + desc: + "Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.", defaultValue: 1, - name: "rln-relay-user-message-limit" .}: uint64 + name: "rln-relay-user-message-limit" + .}: uint64 rlnEpochSizeSec* {. - desc: "Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.", - defaultValue: 1 - name: "rln-relay-epoch-sec" .}: uint64 + desc: + "Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.", + defaultValue: 1, + name: "rln-relay-epoch-sec" + .}: uint64 # NOTE: Keys are different in nim-libp2p proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T = @@ -300,10 +314,14 @@ proc parseCmdArg*(T: type EthRpcUrl, s: string): T = ## https://url:port/path?query ## disallowed patterns: ## any valid/invalid ws or wss url - var httpPattern = re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" - var wsPattern = re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" + var httpPattern = + re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" + var wsPattern = + re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" if regex.match(s, wsPattern): - raise newException(ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL") + raise newException( + ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL" + ) if not regex.match(s, httpPattern): raise newException(ValueError, "Invalid HTTP RPC URL") return EthRpcUrl(s) diff --git a/apps/chat2bridge/chat2bridge.nim b/apps/chat2bridge/chat2bridge.nim index dba70be83..97e0f328f 100644 --- a/apps/chat2bridge/chat2bridge.nim +++ b/apps/chat2bridge/chat2bridge.nim @@ -5,8 +5,13 @@ else: import std/[tables, times, strutils, hashes, sequtils], - chronos, confutils, chronicles, chronicles/topics_registry, chronos/streams/tlsstream, - metrics, metrics/chronos_httpserver, + chronos, + confutils, + chronicles, + chronicles/topics_registry, + chronos/streams/tlsstream, + metrics, + metrics/chronos_httpserver, stew/byteutils, eth/net/nat, json_rpc/rpcserver, @@ -27,7 +32,8 @@ import # Common cli config ./config_chat2bridge -declarePublicCounter chat2_mb_transfers, "Number of messages transferred between chat2 and Matterbridge", ["type"] +declarePublicCounter chat2_mb_transfers, + "Number of messages transferred between chat2 and Matterbridge", ["type"] declarePublicCounter chat2_mb_dropped, "Number of messages dropped", ["reason"] logScope: @@ -37,8 +43,7 @@ logScope: # Default values # ################## -const - DeduplQSize = 20 # Maximum number of seen messages to keep in deduplication queue +const DeduplQSize = 20 # Maximum number of seen messages to keep in deduplication queue ######### # Types # @@ -53,7 +58,7 @@ type seen: seq[Hash] #FIFO queue contentTopic: string - MbMessageHandler = proc (jsonNode: JsonNode) {.async.} + MbMessageHandler = proc(jsonNode: JsonNode) {.async.} ################### # Helper funtions # @@ -65,25 +70,27 @@ proc containsOrAdd(sequence: var seq[Hash], hash: Hash): bool = if sequence.len >= DeduplQSize: trace "Deduplication queue full. Removing oldest item." - sequence.delete 0, 0 # Remove first item in queue + sequence.delete 0, 0 # Remove first item in queue sequence.add(hash) return false -proc toWakuMessage(cmb: Chat2MatterBridge, jsonNode: JsonNode): WakuMessage {.raises: [Defect, KeyError]} = +proc toWakuMessage( + cmb: Chat2MatterBridge, jsonNode: JsonNode +): WakuMessage {.raises: [Defect, KeyError].} = # Translates a Matterbridge API JSON response to a Waku v2 message let msgFields = jsonNode.getFields() # @TODO error handling here - verify expected fields - let chat2pb = Chat2Message(timestamp: getTime().toUnix(), # @TODO use provided timestamp - nick: msgFields["username"].getStr(), - payload: msgFields["text"].getStr().toBytes()).encode() + let chat2pb = Chat2Message( + timestamp: getTime().toUnix(), # @TODO use provided timestamp + nick: msgFields["username"].getStr(), + payload: msgFields["text"].getStr().toBytes(), + ).encode() - WakuMessage(payload: chat2pb.buffer, - contentTopic: cmb.contentTopic, - version: 0) + WakuMessage(payload: chat2pb.buffer, contentTopic: cmb.contentTopic, version: 0) proc toChat2(cmb: Chat2MatterBridge, jsonNode: JsonNode) {.async.} = let msg = cmb.toWakuMessage(jsonNode) @@ -100,7 +107,9 @@ proc toChat2(cmb: Chat2MatterBridge, jsonNode: JsonNode) {.async.} = (await cmb.nodev2.publish(some(DefaultPubsubTopic), msg)).isOkOr: error "failed to publish message", error = error -proc toMatterbridge(cmb: Chat2MatterBridge, msg: WakuMessage) {.gcsafe, raises: [Exception].} = +proc toMatterbridge( + cmb: Chat2MatterBridge, msg: WakuMessage +) {.gcsafe, raises: [Exception].} = if cmb.seen.containsOrAdd(msg.payload.hash()): # This is a duplicate message. Return. chat2_mb_dropped.inc(labelValues = ["duplicate"]) @@ -119,8 +128,9 @@ proc toMatterbridge(cmb: Chat2MatterBridge, msg: WakuMessage) {.gcsafe, raises: assert chat2Msg.isOk - let postRes = cmb.mbClient.postMessage(text = string.fromBytes(chat2Msg[].payload), - username = chat2Msg[].nick) + let postRes = cmb.mbClient.postMessage( + text = string.fromBytes(chat2Msg[].payload), username = chat2Msg[].nick + ) if postRes.isErr() or (postRes[] == false): chat2_mb_dropped.inc(labelValues = ["duplicate"]) @@ -142,41 +152,50 @@ proc pollMatterbridge(cmb: Chat2MatterBridge, handler: MbMessageHandler) {.async ############## # Public API # ############## -proc new*(T: type Chat2MatterBridge, - # Matterbridge initialisation - mbHostUri: string, - mbGateway: string, - # NodeV2 initialisation - nodev2Key: crypto.PrivateKey, - nodev2BindIp: IpAddress, nodev2BindPort: Port, - nodev2ExtIp = none[IpAddress](), nodev2ExtPort = none[Port](), - contentTopic: string): T - {.raises: [Defect, ValueError, KeyError, TLSStreamProtocolError, IOError, LPError].} = - +proc new*( + T: type Chat2MatterBridge, + # Matterbridge initialisation + mbHostUri: string, + mbGateway: string, + # NodeV2 initialisation + nodev2Key: crypto.PrivateKey, + nodev2BindIp: IpAddress, + nodev2BindPort: Port, + nodev2ExtIp = none[IpAddress](), + nodev2ExtPort = none[Port](), + contentTopic: string, +): T {. + raises: [Defect, ValueError, KeyError, TLSStreamProtocolError, IOError, LPError] +.} = # Setup Matterbridge - let - mbClient = MatterbridgeClient.new(mbHostUri, mbGateway) + let mbClient = MatterbridgeClient.new(mbHostUri, mbGateway) # Let's verify the Matterbridge configuration before continuing let clientHealth = mbClient.isHealthy() if clientHealth.isOk() and clientHealth[]: - info "Reached Matterbridge host", host=mbClient.host + info "Reached Matterbridge host", host = mbClient.host else: raise newException(ValueError, "Matterbridge client not reachable/healthy") # Setup Waku v2 node let nodev2 = block: - var builder = WakuNodeBuilder.init() - builder.withNodeKey(nodev2Key) - builder.withNetworkConfigurationDetails(nodev2BindIp, nodev2BindPort, nodev2ExtIp, nodev2ExtPort).tryGet() - builder.build().tryGet() + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodev2Key) + builder + .withNetworkConfigurationDetails( + nodev2BindIp, nodev2BindPort, nodev2ExtIp, nodev2ExtPort + ) + .tryGet() + builder.build().tryGet() - return Chat2MatterBridge(mbClient: mbClient, - nodev2: nodev2, - running: false, - pollPeriod: chronos.seconds(1), - contentTopic: contentTopic) + return Chat2MatterBridge( + mbClient: mbClient, + nodev2: nodev2, + running: false, + pollPeriod: chronos.seconds(1), + contentTopic: contentTopic, + ) proc start*(cmb: Chat2MatterBridge) {.async.} = info "Starting Chat2MatterBridge" @@ -187,7 +206,7 @@ proc start*(cmb: Chat2MatterBridge) {.async.} = # Start Matterbridge polling (@TODO: use streaming interface) proc mbHandler(jsonNode: JsonNode) {.async.} = - trace "Bridging message from Matterbridge to chat2", jsonNode=jsonNode + trace "Bridging message from Matterbridge to chat2", jsonNode = jsonNode waitFor cmb.toChat2(jsonNode) asyncSpawn cmb.pollMatterbridge(mbHandler) @@ -203,8 +222,10 @@ proc start*(cmb: Chat2MatterBridge) {.async.} = # Bridging # Handle messages on Waku v2 and bridge to Matterbridge - proc relayHandler(pubsubTopic: PubsubTopic, msg: WakuMessage): Future[void] {.async.} = - trace "Bridging message from Chat2 to Matterbridge", msg=msg + proc relayHandler( + pubsubTopic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async.} = + trace "Bridging message from Chat2 to Matterbridge", msg = msg try: cmb.toMatterbridge(msg) except: @@ -219,11 +240,10 @@ proc stop*(cmb: Chat2MatterBridge) {.async: (raises: [Exception]).} = await cmb.nodev2.stop() -{.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError +{.pop.} + # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError when isMainModule: - import - ../../../waku/common/utils/nat, - ../../waku/waku_api/message_cache + import ../../../waku/common/utils/nat, ../../waku/waku_api/message_cache let rng = newRng() @@ -232,9 +252,12 @@ when isMainModule: if conf.logLevel != LogLevel.NONE: setLogLevel(conf.logLevel) - let natRes = setupNat(conf.nat, clientId, - Port(uint16(conf.libp2pTcpPort) + conf.portsShift), - Port(uint16(conf.udpPort) + conf.portsShift)) + let natRes = setupNat( + conf.nat, + clientId, + Port(uint16(conf.libp2pTcpPort) + conf.portsShift), + Port(uint16(conf.udpPort) + conf.portsShift), + ) if natRes.isErr(): error "Error in setupNat", error = natRes.error @@ -243,19 +266,22 @@ when isMainModule: (nodev2ExtIp, nodev2ExtPort, _) = natRes.get() ## The following heuristic assumes that, in absence of manual ## config, the external port is the same as the bind port. - extPort = if nodev2ExtIp.isSome() and nodev2ExtPort.isNone(): - some(Port(uint16(conf.libp2pTcpPort) + conf.portsShift)) - else: - nodev2ExtPort + extPort = + if nodev2ExtIp.isSome() and nodev2ExtPort.isNone(): + some(Port(uint16(conf.libp2pTcpPort) + conf.portsShift)) + else: + nodev2ExtPort - let - bridge = Chat2Matterbridge.new( - mbHostUri = "http://" & $initTAddress(conf.mbHostAddress, Port(conf.mbHostPort)), - mbGateway = conf.mbGateway, - nodev2Key = conf.nodekey, - nodev2BindIp = conf.listenAddress, nodev2BindPort = Port(uint16(conf.libp2pTcpPort) + conf.portsShift), - nodev2ExtIp = nodev2ExtIp, nodev2ExtPort = extPort, - contentTopic = conf.contentTopic) + let bridge = Chat2Matterbridge.new( + mbHostUri = "http://" & $initTAddress(conf.mbHostAddress, Port(conf.mbHostPort)), + mbGateway = conf.mbGateway, + nodev2Key = conf.nodekey, + nodev2BindIp = conf.listenAddress, + nodev2BindPort = Port(uint16(conf.libp2pTcpPort) + conf.portsShift), + nodev2ExtIp = nodev2ExtIp, + nodev2ExtPort = extPort, + contentTopic = conf.contentTopic, + ) waitFor bridge.start() @@ -284,7 +310,9 @@ when isMainModule: let filterPeer = parsePeerInfo(conf.filternode) if filterPeer.isOk(): bridge.nodev2.peerManager.addServicePeer(filterPeer.value, WakuLegacyFilterCodec) - bridge.nodev2.peerManager.addServicePeer(filterPeer.value, WakuFilterSubscribeCodec) + bridge.nodev2.peerManager.addServicePeer( + filterPeer.value, WakuFilterSubscribeCodec + ) else: error "Error parsing conf.filternode", error = filterPeer.error diff --git a/apps/chat2bridge/config_chat2bridge.nim b/apps/chat2bridge/config_chat2bridge.nim index c6ccdd5b4..81137b2b1 100644 --- a/apps/chat2bridge/config_chat2bridge.nim +++ b/apps/chat2bridge/config_chat2bridge.nim @@ -1,118 +1,125 @@ import - confutils, confutils/defs, confutils/std/net, chronicles, chronos, + confutils, + confutils/defs, + confutils/std/net, + chronicles, + chronos, libp2p/crypto/[crypto, secp], eth/keys -type - Chat2MatterbridgeConf* = object - logLevel* {. - desc: "Sets the log level" - defaultValue: LogLevel.INFO - name: "log-level" .}: LogLevel +type Chat2MatterbridgeConf* = object + logLevel* {. + desc: "Sets the log level", defaultValue: LogLevel.INFO, name: "log-level" + .}: LogLevel - listenAddress* {. - defaultValue: defaultListenAddress(config) - desc: "Listening address for the LibP2P traffic" - name: "listen-address"}: IpAddress + listenAddress* {. + defaultValue: defaultListenAddress(config), + desc: "Listening address for the LibP2P traffic", + name: "listen-address" + .}: IpAddress - libp2pTcpPort* {. - desc: "Libp2p TCP listening port (for Waku v2)" - defaultValue: 9000 - name: "libp2p-tcp-port" .}: uint16 + libp2pTcpPort* {. + desc: "Libp2p TCP listening port (for Waku v2)", + defaultValue: 9000, + name: "libp2p-tcp-port" + .}: uint16 - udpPort* {. - desc: "UDP listening port" - defaultValue: 9000 - name: "udp-port" .}: uint16 + udpPort* {.desc: "UDP listening port", defaultValue: 9000, name: "udp-port".}: uint16 - portsShift* {. - desc: "Add a shift to all default port numbers" - defaultValue: 0 - name: "ports-shift" .}: uint16 + portsShift* {. + desc: "Add a shift to all default port numbers", + defaultValue: 0, + name: "ports-shift" + .}: uint16 - nat* {. - desc: "Specify method to use for determining public address. " & - "Must be one of: any, none, upnp, pmp, extip:" - defaultValue: "any" .}: string + nat* {. + desc: + "Specify method to use for determining public address. " & + "Must be one of: any, none, upnp, pmp, extip:", + defaultValue: "any" + .}: string - metricsServer* {. - desc: "Enable the metrics server" - defaultValue: false - name: "metrics-server" .}: bool + metricsServer* {. + desc: "Enable the metrics server", defaultValue: false, name: "metrics-server" + .}: bool - metricsServerAddress* {. - desc: "Listening address of the metrics server" - defaultValue: parseIpAddress("127.0.0.1") - name: "metrics-server-address" }: IpAddress + metricsServerAddress* {. + desc: "Listening address of the metrics server", + defaultValue: parseIpAddress("127.0.0.1"), + name: "metrics-server-address" + .}: IpAddress - metricsServerPort* {. - desc: "Listening HTTP port of the metrics server" - defaultValue: 8008 - name: "metrics-server-port" .}: uint16 + metricsServerPort* {. + desc: "Listening HTTP port of the metrics server", + defaultValue: 8008, + name: "metrics-server-port" + .}: uint16 - ### Waku v2 options - - staticnodes* {. - desc: "Multiaddr of peer to directly connect with. Argument may be repeated" - name: "staticnode" }: seq[string] + ### Waku v2 options + staticnodes* {. + desc: "Multiaddr of peer to directly connect with. Argument may be repeated", + name: "staticnode" + .}: seq[string] - nodekey* {. - desc: "P2P node private key as hex" - defaultValue: crypto.PrivateKey.random(Secp256k1, newRng()[]).tryGet() - name: "nodekey" }: crypto.PrivateKey + nodekey* {. + desc: "P2P node private key as hex", + defaultValue: crypto.PrivateKey.random(Secp256k1, newRng()[]).tryGet(), + name: "nodekey" + .}: crypto.PrivateKey - topics* {. - desc: "Default topics to subscribe to (space separated list)" - defaultValue: "/waku/2/default-waku/proto" - name: "topics" .}: string + topics* {. + desc: "Default topics to subscribe to (space separated list)", + defaultValue: "/waku/2/default-waku/proto", + name: "topics" + .}: string - store* {. - desc: "Flag whether to start store protocol", - defaultValue: true - name: "store" }: bool + store* {. + desc: "Flag whether to start store protocol", defaultValue: true, name: "store" + .}: bool - filter* {. - desc: "Flag whether to start filter protocol", - defaultValue: false - name: "filter" }: bool + filter* {. + desc: "Flag whether to start filter protocol", defaultValue: false, name: "filter" + .}: bool - relay* {. - desc: "Flag whether to start relay protocol", - defaultValue: true - name: "relay" }: bool + relay* {. + desc: "Flag whether to start relay protocol", defaultValue: true, name: "relay" + .}: bool - storenode* {. - desc: "Multiaddr of peer to connect with for waku store protocol" - defaultValue: "" - name: "storenode" }: string + storenode* {. + desc: "Multiaddr of peer to connect with for waku store protocol", + defaultValue: "", + name: "storenode" + .}: string - filternode* {. - desc: "Multiaddr of peer to connect with for waku filter protocol" - defaultValue: "" - name: "filternode" }: string - - # Matterbridge options - mbHostAddress* {. - desc: "Listening address of the Matterbridge host", - defaultValue: parseIpAddress("127.0.0.1") - name: "mb-host-address" }: IpAddress + filternode* {. + desc: "Multiaddr of peer to connect with for waku filter protocol", + defaultValue: "", + name: "filternode" + .}: string - mbHostPort* {. - desc: "Listening port of the Matterbridge host", - defaultValue: 4242 - name: "mb-host-port" }: uint16 - - mbGateway* {. - desc: "Matterbridge gateway" - defaultValue: "gateway1" - name: "mb-gateway" }: string + # Matterbridge options + mbHostAddress* {. + desc: "Listening address of the Matterbridge host", + defaultValue: parseIpAddress("127.0.0.1"), + name: "mb-host-address" + .}: IpAddress - ## Chat2 options + mbHostPort* {. + desc: "Listening port of the Matterbridge host", + defaultValue: 4242, + name: "mb-host-port" + .}: uint16 - contentTopic* {. - desc: "Content topic to bridge chat messages to." - defaultValue: "/toy-chat/2/huilong/proto" - name: "content-topic" }: string + mbGateway* {. + desc: "Matterbridge gateway", defaultValue: "gateway1", name: "mb-gateway" + .}: string + + ## Chat2 options + contentTopic* {. + desc: "Content topic to bridge chat messages to.", + defaultValue: "/toy-chat/2/huilong/proto", + name: "content-topic" + .}: string proc parseCmdArg*(T: type keys.KeyPair, p: string): T = try: diff --git a/apps/networkmonitor/networkmonitor.nim b/apps/networkmonitor/networkmonitor.nim index 0171001eb..2e5d33bfc 100644 --- a/apps/networkmonitor/networkmonitor.nim +++ b/apps/networkmonitor/networkmonitor.nim @@ -4,7 +4,7 @@ else: {.push raises: [].} import - std/[tables,strutils,times,sequtils], + std/[tables, strutils, times, sequtils], stew/results, stew/shims/net, chronicles, @@ -44,19 +44,22 @@ const AvgPingWindow = 10.0 const git_version* {.strdefine.} = "n/a" -proc setDiscoveredPeersCapabilities( - routingTableNodes: seq[Node]) = +proc setDiscoveredPeersCapabilities(routingTableNodes: seq[Node]) = for capability in @[Relay, Store, Filter, Lightpush]: - let nOfNodesWithCapability = routingTableNodes.countIt(it.record.supportsCapability(capability)) - info "capabilities as per ENR waku flag", capability=capability, amount=nOfNodesWithCapability - networkmonitor_peer_type_as_per_enr.set(int64(nOfNodesWithCapability), labelValues = [$capability]) + let nOfNodesWithCapability = + routingTableNodes.countIt(it.record.supportsCapability(capability)) + info "capabilities as per ENR waku flag", + capability = capability, amount = nOfNodesWithCapability + networkmonitor_peer_type_as_per_enr.set( + int64(nOfNodesWithCapability), labelValues = [$capability] + ) proc analyzePeer( - customPeerInfo: CustomPeerInfoRef, - peerInfo: RemotePeerInfo, - node: WakuNode, - timeout: chronos.Duration - ): Future[Result[string, string]] {.async.} = + customPeerInfo: CustomPeerInfoRef, + peerInfo: RemotePeerInfo, + node: WakuNode, + timeout: chronos.Duration, +): Future[Result[string, string]] {.async.} = var pingDelay: chronos.Duration proc ping(): Future[Result[void, string]] {.async, gcsafe.} = @@ -64,12 +67,11 @@ proc analyzePeer( let conn = await node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec) pingDelay = await node.libp2pPing.ping(conn) return ok() - except CatchableError: var msg = getCurrentExceptionMsg() if msg == "Future operation cancelled!": msg = "timedout" - warn "failed to ping the peer", peer=peerInfo, err=msg + warn "failed to ping the peer", peer = peerInfo, err = msg customPeerInfo.connError = msg return err("could not ping peer: " & msg) @@ -81,36 +83,44 @@ proc analyzePeer( return err(customPeerInfo.connError) customPeerInfo.connError = "" - info "successfully pinged peer", peer=peerInfo, duration=pingDelay.millis + info "successfully pinged peer", peer = peerInfo, duration = pingDelay.millis networkmonitor_peer_ping.observe(pingDelay.millis) if customPeerInfo.avgPingDuration == 0.millis: customPeerInfo.avgPingDuration = pingDelay # TODO: check why the calculation ends up losing precision - customPeerInfo.avgPingDuration = int64((float64(customPeerInfo.avgPingDuration.millis) * (AvgPingWindow - 1.0) + float64(pingDelay.millis)) / AvgPingWindow).millis + customPeerInfo.avgPingDuration = int64( + ( + float64(customPeerInfo.avgPingDuration.millis) * (AvgPingWindow - 1.0) + + float64(pingDelay.millis) + ) / AvgPingWindow + ).millis customPeerInfo.lastPingDuration = pingDelay return ok(customPeerInfo.peerId) proc shouldReconnect(customPeerInfo: CustomPeerInfoRef): bool = - let reconnetIntervalCheck = getTime().toUnix() >= customPeerInfo.lastTimeConnected + ReconnectTime + let reconnetIntervalCheck = + getTime().toUnix() >= customPeerInfo.lastTimeConnected + ReconnectTime var retriesCheck = customPeerInfo.retries < MaxConnectionRetries - if not retriesCheck and getTime().toUnix() >= customPeerInfo.lastTimeConnected + ResetRetriesAfter: + if not retriesCheck and + getTime().toUnix() >= customPeerInfo.lastTimeConnected + ResetRetriesAfter: customPeerInfo.retries = 0 retriesCheck = true - info "resetting retries counter", peerId=customPeerInfo.peerId + info "resetting retries counter", peerId = customPeerInfo.peerId return reconnetIntervalCheck and retriesCheck # TODO: Split in discover, connect -proc setConnectedPeersMetrics(discoveredNodes: seq[Node], - node: WakuNode, - timeout: chronos.Duration, - restClient: RestClientRef, - allPeers: CustomPeersTableRef) {.async.} = - +proc setConnectedPeersMetrics( + discoveredNodes: seq[Node], + node: WakuNode, + timeout: chronos.Duration, + restClient: RestClientRef, + allPeers: CustomPeersTableRef, +) {.async.} = let currentTime = getTime().toUnix() var newPeers = 0 @@ -122,18 +132,18 @@ proc setConnectedPeersMetrics(discoveredNodes: seq[Node], for discNode in discoveredNodes: let typedRecord = discNode.record.toTypedRecord() if not typedRecord.isOk(): - warn "could not convert record to typed record", record=discNode.record + warn "could not convert record to typed record", record = discNode.record continue let secp256k1 = typedRecord.get().secp256k1 if not secp256k1.isSome(): - warn "could not get secp256k1 key", typedRecord=typedRecord.get() + warn "could not get secp256k1 key", typedRecord = typedRecord.get() continue let peerRes = toRemotePeerInfo(discNode.record) - let peerInfo = peerRes.valueOr(): - warn "error converting record to remote peer info", record=discNode.record + let peerInfo = peerRes.valueOr: + warn "error converting record to remote peer info", record = discNode.record continue # create new entry if new peerId found @@ -143,7 +153,7 @@ proc setConnectedPeersMetrics(discoveredNodes: seq[Node], allPeers[peerId] = CustomPeerInfoRef(peerId: peerId) newPeers += 1 else: - info "already seen", peerId=peerId + info "already seen", peerId = peerId let customPeerInfo = allPeers[peerId] @@ -153,7 +163,7 @@ proc setConnectedPeersMetrics(discoveredNodes: seq[Node], customPeerInfo.discovered += 1 if not typedRecord.get().ip.isSome(): - warn "ip field is not set", record=typedRecord.get() + warn "ip field is not set", record = typedRecord.get() continue let ip = $typedRecord.get().ip.get().join(".") @@ -162,7 +172,8 @@ proc setConnectedPeersMetrics(discoveredNodes: seq[Node], # try to ping the peer if shouldReconnect(customPeerInfo): if customPeerInfo.retries > 0: - warn "trying to dial failed peer again", peerId=peerId, retry=customPeerInfo.retries + warn "trying to dial failed peer again", + peerId = peerId, retry = customPeerInfo.retries analyzeFuts.add(analyzePeer(customPeerInfo, peerInfo, node, timeout)) # Wait for all connection attempts to finish @@ -170,16 +181,16 @@ proc setConnectedPeersMetrics(discoveredNodes: seq[Node], for peerIdFut in analyzedPeers: let peerIdRes = await peerIdFut - let peerIdStr = peerIdRes.valueOr(): + let peerIdStr = peerIdRes.valueOr: continue successfulConnections += 1 - let peerId = PeerId.init(peerIdStr).valueOr(): - warn "failed to parse peerId", peerId=peerIdStr + let peerId = PeerId.init(peerIdStr).valueOr: + warn "failed to parse peerId", peerId = peerIdStr continue var customPeerInfo = allPeers[peerIdStr] - debug "connected to peer", peer=customPeerInfo[] + debug "connected to peer", peer = customPeerInfo[] # after connection, get supported protocols let lp2pPeerStore = node.switch.peerStore @@ -191,9 +202,9 @@ proc setConnectedPeersMetrics(discoveredNodes: seq[Node], let nodeUserAgent = lp2pPeerStore[AgentBook][peerId] customPeerInfo.userAgent = nodeUserAgent - info "number of newly discovered peers", amount=newPeers + info "number of newly discovered peers", amount = newPeers # inform the total connections that we did in this round - info "number of successful connections", amount=successfulConnections + info "number of successful connections", amount = successfulConnections proc updateMetrics(allPeersRef: CustomPeersTableRef) {.gcsafe.} = var allProtocols: Table[string, int] @@ -207,8 +218,9 @@ proc updateMetrics(allPeersRef: CustomPeersTableRef) {.gcsafe.} = for protocol in peerInfo.supportedProtocols: allProtocols[protocol] = allProtocols.mgetOrPut(protocol, 0) + 1 - # store available user-agents in the network - allAgentStrings[peerInfo.userAgent] = allAgentStrings.mgetOrPut(peerInfo.userAgent, 0) + 1 + # store available user-agents in the network + allAgentStrings[peerInfo.userAgent] = + allAgentStrings.mgetOrPut(peerInfo.userAgent, 0) + 1 if peerInfo.country != "": countries[peerInfo.country] = countries.mgetOrPut(peerInfo.country, 0) + 1 @@ -219,25 +231,32 @@ proc updateMetrics(allPeersRef: CustomPeersTableRef) {.gcsafe.} = networkmonitor_peer_count.set(int64(connectedPeers), labelValues = ["true"]) networkmonitor_peer_count.set(int64(failedPeers), labelValues = ["false"]) - # update count on each protocol + # update count on each protocol for protocol in allProtocols.keys(): let countOfProtocols = allProtocols.mgetOrPut(protocol, 0) - networkmonitor_peer_type_as_per_protocol.set(int64(countOfProtocols), labelValues = [protocol]) - info "supported protocols in the network", protocol=protocol, count=countOfProtocols + networkmonitor_peer_type_as_per_protocol.set( + int64(countOfProtocols), labelValues = [protocol] + ) + info "supported protocols in the network", + protocol = protocol, count = countOfProtocols # update count on each user-agent for userAgent in allAgentStrings.keys(): let countOfUserAgent = allAgentStrings.mgetOrPut(userAgent, 0) - networkmonitor_peer_user_agents.set(int64(countOfUserAgent), labelValues = [userAgent]) - info "user agents participating in the network", userAgent=userAgent, count=countOfUserAgent + networkmonitor_peer_user_agents.set( + int64(countOfUserAgent), labelValues = [userAgent] + ) + info "user agents participating in the network", + userAgent = userAgent, count = countOfUserAgent for country in countries.keys(): let peerCount = countries.mgetOrPut(country, 0) networkmonitor_peer_country_count.set(int64(peerCount), labelValues = [country]) - info "number of peers per country", country=country, count=peerCount + info "number of peers per country", country = country, count = peerCount -proc populateInfoFromIp(allPeersRef: CustomPeersTableRef, - restClient: RestClientRef) {.async.} = +proc populateInfoFromIp( + allPeersRef: CustomPeersTableRef, restClient: RestClientRef +) {.async.} = for peer in allPeersRef.keys(): if allPeersRef[peer].country != "" and allPeersRef[peer].city != "": continue @@ -252,7 +271,7 @@ proc populateInfoFromIp(allPeersRef: CustomPeersTableRef, let response = await restClient.ipToLocation(allPeersRef[peer].ip) location = response.data except CatchableError: - warn "could not get location", ip=allPeersRef[peer].ip + warn "could not get location", ip = allPeersRef[peer].ip continue allPeersRef[peer].country = location.country allPeersRef[peer].city = location.city @@ -260,12 +279,13 @@ proc populateInfoFromIp(allPeersRef: CustomPeersTableRef, # TODO: Split in discovery, connections, and ip2location # crawls the network discovering peers and trying to connect to them # metrics are processed and exposed -proc crawlNetwork(node: WakuNode, - wakuDiscv5: WakuDiscoveryV5, - restClient: RestClientRef, - conf: NetworkMonitorConf, - allPeersRef: CustomPeersTableRef) {.async.} = - +proc crawlNetwork( + node: WakuNode, + wakuDiscv5: WakuDiscoveryV5, + restClient: RestClientRef, + conf: NetworkMonitorConf, + allPeersRef: CustomPeersTableRef, +) {.async.} = let crawlInterval = conf.refreshInterval * 1000 while true: let startTime = Moment.now() @@ -281,7 +301,9 @@ proc crawlNetwork(node: WakuNode, # tries to connect to all newly discovered nodes # and populates metrics related to peers we could connect # note random discovered nodes can be already known - await setConnectedPeersMetrics(discoveredNodes, node, conf.timeout, restClient, allPeersRef) + await setConnectedPeersMetrics( + discoveredNodes, node, conf.timeout, restClient, allPeersRef + ) updateMetrics(allPeersRef) @@ -291,7 +313,7 @@ proc crawlNetwork(node: WakuNode, let totalNodes = flatNodes.len let seenNodes = flatNodes.countIt(it.seen) - info "discovered nodes: ", total=totalNodes, seen=seenNodes + info "discovered nodes: ", total = totalNodes, seen = seenNodes # Notes: # we dont run ipMajorityLoop @@ -299,14 +321,16 @@ proc crawlNetwork(node: WakuNode, let endTime = Moment.now() let elapsed = (endTime - startTime).nanos - info "crawl duration", time=elapsed.millis + info "crawl duration", time = elapsed.millis await sleepAsync(crawlInterval.millis - elapsed.millis) -proc retrieveDynamicBootstrapNodes(dnsDiscovery: bool, dnsDiscoveryUrl: string, dnsDiscoveryNameServers: seq[IpAddress]): Result[seq[RemotePeerInfo], string] = +proc retrieveDynamicBootstrapNodes( + dnsDiscovery: bool, dnsDiscoveryUrl: string, dnsDiscoveryNameServers: seq[IpAddress] +): Result[seq[RemotePeerInfo], string] = if dnsDiscovery and dnsDiscoveryUrl != "": # DNS discovery - debug "Discovering nodes using Waku DNS discovery", url=dnsDiscoveryUrl + debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl var nameServers: seq[TransportAddress] for ip in dnsDiscoveryNameServers: @@ -315,24 +339,29 @@ proc retrieveDynamicBootstrapNodes(dnsDiscovery: bool, dnsDiscoveryUrl: string, let dnsResolver = DnsResolver.new(nameServers) proc resolver(domain: string): Future[string] {.async, gcsafe.} = - trace "resolving", domain=domain + trace "resolving", domain = domain let resolved = await dnsResolver.resolveTxt(domain) return resolved[0] # Use only first answer var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl, resolver) if wakuDnsDiscovery.isOk(): - return wakuDnsDiscovery.get().findPeers() - .mapErr(proc (e: cstring): string = $e) + return wakuDnsDiscovery.get().findPeers().mapErr( + proc(e: cstring): string = + $e + ) else: warn "Failed to init Waku DNS discovery" debug "No method for retrieving dynamic bootstrap nodes specified." ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default -proc getBootstrapFromDiscDns(conf: NetworkMonitorConf): Result[seq[enr.Record], string] = +proc getBootstrapFromDiscDns( + conf: NetworkMonitorConf +): Result[seq[enr.Record], string] = try: let dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")] - let dynamicBootstrapNodesRes = retrieveDynamicBootstrapNodes(true, conf.dnsDiscoveryUrl, dnsNameServers) + let dynamicBootstrapNodesRes = + retrieveDynamicBootstrapNodes(true, conf.dnsDiscoveryUrl, dnsNameServers) if not dynamicBootstrapNodesRes.isOk(): error("failed discovering peers from DNS") let dynamicBootstrapNodes = dynamicBootstrapNodesRes.get() @@ -345,22 +374,28 @@ proc getBootstrapFromDiscDns(conf: NetworkMonitorConf): Result[seq[enr.Record], let enr = n.enr.get() tenrRes = enr.toTypedRecord() - if tenrRes.isOk() and (tenrRes.get().udp.isSome() or tenrRes.get().udp6.isSome()): + if tenrRes.isOk() and ( + tenrRes.get().udp.isSome() or tenrRes.get().udp6.isSome() + ): discv5BootstrapEnrs.add(enr) return ok(discv5BootstrapEnrs) except CatchableError: error("failed discovering peers from DNS") -proc initAndStartApp(conf: NetworkMonitorConf): Result[(WakuNode, WakuDiscoveryV5), string] = - let bindIp = try: - parseIpAddress("0.0.0.0") - except CatchableError: - return err("could not start node: " & getCurrentExceptionMsg()) +proc initAndStartApp( + conf: NetworkMonitorConf +): Result[(WakuNode, WakuDiscoveryV5), string] = + let bindIp = + try: + parseIpAddress("0.0.0.0") + except CatchableError: + return err("could not start node: " & getCurrentExceptionMsg()) - let extIp = try: - parseIpAddress("127.0.0.1") - except CatchableError: - return err("could not start node: " & getCurrentExceptionMsg()) + let extIp = + try: + parseIpAddress("127.0.0.1") + except CatchableError: + return err("could not start node: " & getCurrentExceptionMsg()) let # some hardcoded parameters @@ -368,34 +403,33 @@ proc initAndStartApp(conf: NetworkMonitorConf): Result[(WakuNode, WakuDiscoveryV key = crypto.PrivateKey.random(Secp256k1, rng[])[] nodeTcpPort = Port(60000) nodeUdpPort = Port(9000) - flags = CapabilitiesBitfield.init(lightpush = false, filter = false, store = false, relay = true) + flags = CapabilitiesBitfield.init( + lightpush = false, filter = false, store = false, relay = true + ) var builder = EnrBuilder.init(key) builder.withIpAddressAndPorts( - ipAddr = some(extIp), - tcpPort = some(nodeTcpPort), - udpPort = some(nodeUdpPort), + ipAddr = some(extIp), tcpPort = some(nodeTcpPort), udpPort = some(nodeUdpPort) ) builder.withWakuCapabilities(flags) let addShardedTopics = builder.withShardedTopics(conf.pubsubTopics) if addShardedTopics.isErr(): - error "failed to add sharded topics to ENR", error=addShardedTopics.error + error "failed to add sharded topics to ENR", error = addShardedTopics.error return err($addShardedTopics.error) let recordRes = builder.build() let record = if recordRes.isErr(): return err("cannot build record: " & $recordRes.error) - else: recordRes.get() + else: + recordRes.get() var nodeBuilder = WakuNodeBuilder.init() nodeBuilder.withNodeKey(key) nodeBuilder.withRecord(record) - nodeBuilder.withPeerManagerConfig( - maxRelayPeers = none(int), - shardAware = true) + nodeBuilder.withPeerManagerConfig(maxRelayPeers = none(int), shardAware = true) let res = nodeBuilder.withNetworkConfigurationDetails(bindIp, nodeTcpPort) if res.isErr(): return err("node building error" & $res.error) @@ -404,7 +438,8 @@ proc initAndStartApp(conf: NetworkMonitorConf): Result[(WakuNode, WakuDiscoveryV let node = if nodeRes.isErr(): return err("node building error" & $res.error) - else: nodeRes.get() + else: + nodeRes.get() var discv5BootstrapEnrsRes = getBootstrapFromDiscDns(conf) if discv5BootstrapEnrsRes.isErr(): @@ -422,7 +457,7 @@ proc initAndStartApp(conf: NetworkMonitorConf): Result[(WakuNode, WakuDiscoveryV port: nodeUdpPort, privateKey: keys.PrivateKey(key.skkey), bootstrapRecords: discv5BootstrapEnrs, - autoupdateRecord: false + autoupdateRecord: false, ) let wakuDiscv5 = WakuDiscoveryV5.new(node.rng, discv5Conf, some(record)) @@ -434,15 +469,17 @@ proc initAndStartApp(conf: NetworkMonitorConf): Result[(WakuNode, WakuDiscoveryV ok((node, wakuDiscv5)) -proc startRestApiServer(conf: NetworkMonitorConf, - allPeersInfo: CustomPeersTableRef, - numMessagesPerContentTopic: ContentTopicMessageTableRef - ): Result[void, string] = +proc startRestApiServer( + conf: NetworkMonitorConf, + allPeersInfo: CustomPeersTableRef, + numMessagesPerContentTopic: ContentTopicMessageTableRef, +): Result[void, string] = try: - let serverAddress = initTAddress(conf.metricsRestAddress & ":" & $conf.metricsRestPort) + let serverAddress = + initTAddress(conf.metricsRestAddress & ":" & $conf.metricsRestPort) proc validate(pattern: string, value: string): int = - if pattern.startsWith("{") and pattern.endsWith("}"): 0 - else: 1 + if pattern.startsWith("{") and pattern.endsWith("}"): 0 else: 1 + var router = RestRouter.init(validate) router.installHandler(allPeersInfo, numMessagesPerContentTopic) var sres = RestServerRef.new(router, serverAddress) @@ -454,13 +491,16 @@ proc startRestApiServer(conf: NetworkMonitorConf, # handles rx of messages over a topic (see subscribe) # counts the number of messages per content topic -proc subscribeAndHandleMessages(node: WakuNode, - pubsubTopic: PubsubTopic, - msgPerContentTopic: ContentTopicMessageTableRef) = - +proc subscribeAndHandleMessages( + node: WakuNode, + pubsubTopic: PubsubTopic, + msgPerContentTopic: ContentTopicMessageTableRef, +) = # handle function - proc handler(pubsubTopic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = - trace "rx message", pubsubTopic=pubsubTopic, contentTopic=msg.contentTopic + proc handler( + pubsubTopic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + trace "rx message", pubsubTopic = pubsubTopic, contentTopic = msg.contentTopic # If we reach a table limit size, remove c topics with the least messages. let tableSize = 100 @@ -482,11 +522,11 @@ when isMainModule: {.pop.} let confRes = NetworkMonitorConf.loadConfig() if confRes.isErr(): - error "could not load cli variables", err=confRes.error + error "could not load cli variables", err = confRes.error quit(1) var conf = confRes.get() - info "cli flags", conf=conf + info "cli flags", conf = conf if conf.clusterId == 1: let twnClusterConf = ClusterConf.TheWakuNetworkConf() @@ -509,22 +549,23 @@ when isMainModule: # start metrics server if conf.metricsServer: - let res = startMetricsServer(conf.metricsServerAddress, Port(conf.metricsServerPort)) + let res = + startMetricsServer(conf.metricsServerAddress, Port(conf.metricsServerPort)) if res.isErr(): - error "could not start metrics server", err=res.error + error "could not start metrics server", err = res.error quit(1) # start rest server for custom metrics let res = startRestApiServer(conf, allPeersInfo, msgPerContentTopic) if res.isErr(): - error "could not start rest api server", err=res.error + error "could not start rest api server", err = res.error quit(1) # create a rest client - let clientRest = RestClientRef.new(url="http://ip-api.com", - connectTimeout=ctime.seconds(2)) + let clientRest = + RestClientRef.new(url = "http://ip-api.com", connectTimeout = ctime.seconds(2)) if clientRest.isErr(): - error "could not start rest api client", err=res.error + error "could not start rest api client", err = res.error quit(1) let restClient = clientRest.get() @@ -540,7 +581,6 @@ when isMainModule: waitFor node.mountLibp2pPing() if conf.rlnRelayEthContractAddress != "": - let rlnConf = WakuRlnConfig( rlnRelayDynamic: conf.rlnRelayDynamic, rlnRelayCredIndex: some(uint(0)), @@ -549,17 +589,17 @@ when isMainModule: rlnRelayCredPath: "", rlnRelayCredPassword: "", rlnRelayTreePath: conf.rlnRelayTreePath, - rlnEpochSizeSec: conf.rlnEpochSizeSec + rlnEpochSizeSec: conf.rlnEpochSizeSec, ) try: waitFor node.mountRlnRelay(rlnConf) except CatchableError: - error "failed to setup RLN", err=getCurrentExceptionMsg() + error "failed to setup RLN", err = getCurrentExceptionMsg() quit 1 node.mountMetadata(conf.clusterId).isOkOr: - error "failed to mount waku metadata protocol: ", err=error + error "failed to mount waku metadata protocol: ", err = error quit 1 for pubsubTopic in conf.pubsubTopics: diff --git a/apps/networkmonitor/networkmonitor_config.nim b/apps/networkmonitor/networkmonitor_config.nim index 2f0841d34..d56a4941b 100644 --- a/apps/networkmonitor/networkmonitor_config.nim +++ b/apps/networkmonitor/networkmonitor_config.nim @@ -10,106 +10,127 @@ import type EthRpcUrl = distinct string -type - NetworkMonitorConf* = object - logLevel* {. - desc: "Sets the log level", - defaultValue: LogLevel.INFO, - name: "log-level", - abbr: "l" .}: LogLevel +type NetworkMonitorConf* = object + logLevel* {. + desc: "Sets the log level", + defaultValue: LogLevel.INFO, + name: "log-level", + abbr: "l" + .}: LogLevel - timeout* {. - desc: "Timeout to consider that the connection failed", - defaultValue: chronos.seconds(10), - name: "timeout", - abbr: "t" }: chronos.Duration + timeout* {. + desc: "Timeout to consider that the connection failed", + defaultValue: chronos.seconds(10), + name: "timeout", + abbr: "t" + .}: chronos.Duration - bootstrapNodes* {. - desc: "Bootstrap ENR node. Argument may be repeated.", - defaultValue: @[""], - name: "bootstrap-node", - abbr: "b" }: seq[string] + bootstrapNodes* {. + desc: "Bootstrap ENR node. Argument may be repeated.", + defaultValue: @[""], + name: "bootstrap-node", + abbr: "b" + .}: seq[string] - dnsDiscoveryUrl* {. - desc: "URL for DNS node list in format 'enrtree://@'", - defaultValue: "" - name: "dns-discovery-url" }: string + dnsDiscoveryUrl* {. + desc: "URL for DNS node list in format 'enrtree://@'", + defaultValue: "", + name: "dns-discovery-url" + .}: string - pubsubTopics* {. - desc: "Default pubsub topic to subscribe to. Argument may be repeated." - name: "pubsub-topic" .}: seq[string] + pubsubTopics* {. + desc: "Default pubsub topic to subscribe to. Argument may be repeated.", + name: "pubsub-topic" + .}: seq[string] - refreshInterval* {. - desc: "How often new peers are discovered and connected to (in seconds)", - defaultValue: 5, - name: "refresh-interval", - abbr: "r" }: int + refreshInterval* {. + desc: "How often new peers are discovered and connected to (in seconds)", + defaultValue: 5, + name: "refresh-interval", + abbr: "r" + .}: int - clusterId* {. - desc: "Cluster id that the node is running in. Node in a different cluster id is disconnected." - defaultValue: 1 - name: "cluster-id" }: uint32 + clusterId* {. + desc: + "Cluster id that the node is running in. Node in a different cluster id is disconnected.", + defaultValue: 1, + name: "cluster-id" + .}: uint32 - rlnRelay* {. - desc: "Enable spam protection through rln-relay: true|false", - defaultValue: true - name: "rln-relay" }: bool + rlnRelay* {. + desc: "Enable spam protection through rln-relay: true|false", + defaultValue: true, + name: "rln-relay" + .}: bool - rlnRelayDynamic* {. - desc: "Enable waku-rln-relay with on-chain dynamic group management: true|false", - defaultValue: true - name: "rln-relay-dynamic" }: bool + rlnRelayDynamic* {. + desc: "Enable waku-rln-relay with on-chain dynamic group management: true|false", + defaultValue: true, + name: "rln-relay-dynamic" + .}: bool - rlnRelayTreePath* {. - desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)", - defaultValue: "" - name: "rln-relay-tree-path" }: string + rlnRelayTreePath* {. + desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)", + defaultValue: "", + name: "rln-relay-tree-path" + .}: string - rlnRelayEthClientAddress* {. - desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/", - defaultValue: "http://localhost:8540/", - name: "rln-relay-eth-client-address" }: EthRpcUrl + rlnRelayEthClientAddress* {. + desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/", + defaultValue: "http://localhost:8540/", + name: "rln-relay-eth-client-address" + .}: EthRpcUrl - rlnRelayEthContractAddress* {. - desc: "Address of membership contract on an Ethereum testnet", - defaultValue: "", - name: "rln-relay-eth-contract-address" }: string + rlnRelayEthContractAddress* {. + desc: "Address of membership contract on an Ethereum testnet", + defaultValue: "", + name: "rln-relay-eth-contract-address" + .}: string - rlnEpochSizeSec* {. - desc: "Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.", - defaultValue: 1 - name: "rln-relay-epoch-sec" .}: uint64 + rlnEpochSizeSec* {. + desc: + "Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.", + defaultValue: 1, + name: "rln-relay-epoch-sec" + .}: uint64 - rlnRelayUserMessageLimit* {. - desc: "Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.", - defaultValue: 1, - name: "rln-relay-user-message-limit" .}: uint64 + rlnRelayUserMessageLimit* {. + desc: + "Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.", + defaultValue: 1, + name: "rln-relay-user-message-limit" + .}: uint64 - ## Prometheus metrics config - metricsServer* {. - desc: "Enable the metrics server: true|false" - defaultValue: true - name: "metrics-server" }: bool + ## Prometheus metrics config + metricsServer* {. + desc: "Enable the metrics server: true|false", + defaultValue: true, + name: "metrics-server" + .}: bool - metricsServerAddress* {. - desc: "Listening address of the metrics server." - defaultValue: parseIpAddress("127.0.0.1") - name: "metrics-server-address" }: IpAddress + metricsServerAddress* {. + desc: "Listening address of the metrics server.", + defaultValue: parseIpAddress("127.0.0.1"), + name: "metrics-server-address" + .}: IpAddress - metricsServerPort* {. - desc: "Listening HTTP port of the metrics server." - defaultValue: 8008 - name: "metrics-server-port" }: uint16 + metricsServerPort* {. + desc: "Listening HTTP port of the metrics server.", + defaultValue: 8008, + name: "metrics-server-port" + .}: uint16 - ## Custom metrics rest server - metricsRestAddress* {. - desc: "Listening address of the metrics rest server.", - defaultValue: "127.0.0.1", - name: "metrics-rest-address" }: string - metricsRestPort* {. - desc: "Listening HTTP port of the metrics rest server.", - defaultValue: 8009, - name: "metrics-rest-port" }: uint16 + ## Custom metrics rest server + metricsRestAddress* {. + desc: "Listening address of the metrics rest server.", + defaultValue: "127.0.0.1", + name: "metrics-rest-address" + .}: string + metricsRestPort* {. + desc: "Listening HTTP port of the metrics rest server.", + defaultValue: 8009, + name: "metrics-rest-port" + .}: uint16 proc parseCmdArg*(T: type IpAddress, p: string): T = try: @@ -143,18 +164,22 @@ proc parseCmdArg*(T: type EthRpcUrl, s: string): T = ## https://url:port/path?query ## disallowed patterns: ## any valid/invalid ws or wss url - var httpPattern = re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" - var wsPattern = re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" + var httpPattern = + re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" + var wsPattern = + re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" if regex.match(s, wsPattern): echo "here" - raise newException(ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL") + raise newException( + ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL" + ) if not regex.match(s, httpPattern): raise newException(ValueError, "Invalid HTTP RPC URL") return EthRpcUrl(s) proc loadConfig*(T: type NetworkMonitorConf): Result[T, string] = try: - let conf = NetworkMonitorConf.load(version=git_version) + let conf = NetworkMonitorConf.load(version = git_version) ok(conf) except CatchableError: err(getCurrentExceptionMsg()) diff --git a/apps/networkmonitor/networkmonitor_metrics.nim b/apps/networkmonitor/networkmonitor_metrics.nim index 8ebc765b0..70b10cdc4 100644 --- a/apps/networkmonitor/networkmonitor_metrics.nim +++ b/apps/networkmonitor/networkmonitor_metrics.nim @@ -4,7 +4,7 @@ else: {.push raises: [].} import - std/[json,tables,sequtils], + std/[json, tables, sequtils], chronicles, chronicles/topics_registry, chronos, @@ -26,32 +26,29 @@ logScope: #discovery_message_requests_outgoing_total{response="no_response"} declarePublicGauge networkmonitor_peer_type_as_per_enr, - "Number of peers supporting each capability according to the ENR", - labels = ["capability"] + "Number of peers supporting each capability according to the ENR", + labels = ["capability"] declarePublicGauge networkmonitor_peer_type_as_per_protocol, - "Number of peers supporting each protocol, after a successful connection) ", - labels = ["protocols"] + "Number of peers supporting each protocol, after a successful connection) ", + labels = ["protocols"] declarePublicGauge networkmonitor_peer_user_agents, - "Number of peers with each user agent", - labels = ["user_agent"] + "Number of peers with each user agent", labels = ["user_agent"] declarePublicHistogram networkmonitor_peer_ping, - "Histogram tracking ping durations for discovered peers", - buckets = [100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0, 2000.0, Inf] + "Histogram tracking ping durations for discovered peers", + buckets = + [100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0, 2000.0, Inf] declarePublicGauge networkmonitor_peer_count, - "Number of discovered peers", - labels = ["connected"] + "Number of discovered peers", labels = ["connected"] declarePublicGauge networkmonitor_peer_country_count, - "Number of peers per country", - labels = ["country"] + "Number of peers per country", labels = ["country"] type - CustomPeerInfo* = object - # populated after discovery + CustomPeerInfo* = object # populated after discovery lastTimeDiscovered*: int64 discovered*: int64 peerId*: string @@ -80,23 +77,32 @@ type # stores the content topic and the count of rx messages ContentTopicMessageTableRef* = TableRef[string, int] -proc installHandler*(router: var RestRouter, - allPeers: CustomPeersTableRef, - numMessagesPerContentTopic: ContentTopicMessageTableRef) = - router.api(MethodGet, "/allpeersinfo") do () -> RestApiResponse: +proc installHandler*( + router: var RestRouter, + allPeers: CustomPeersTableRef, + numMessagesPerContentTopic: ContentTopicMessageTableRef, +) = + router.api(MethodGet, "/allpeersinfo") do() -> RestApiResponse: let values = toSeq(allPeers.values()) - return RestApiResponse.response(values.toJson(), contentType="application/json") - router.api(MethodGet, "/contenttopics") do () -> RestApiResponse: + return RestApiResponse.response(values.toJson(), contentType = "application/json") + router.api(MethodGet, "/contenttopics") do() -> RestApiResponse: # TODO: toJson() includes the hash - return RestApiResponse.response($(%numMessagesPerContentTopic), contentType="application/json") + return RestApiResponse.response( + $(%numMessagesPerContentTopic), contentType = "application/json" + ) proc startMetricsServer*(serverIp: IpAddress, serverPort: Port): Result[void, string] = - info "Starting metrics HTTP server", serverIp, serverPort + info "Starting metrics HTTP server", serverIp, serverPort - try: - startMetricsHttpServer($serverIp, serverPort) - except Exception as e: - error("Failed to start metrics HTTP server", serverIp=serverIp, serverPort=serverPort, msg=e.msg) + try: + startMetricsHttpServer($serverIp, serverPort) + except Exception as e: + error( + "Failed to start metrics HTTP server", + serverIp = serverIp, + serverPort = serverPort, + msg = e.msg, + ) - info "Metrics HTTP server started", serverIp, serverPort - ok() + info "Metrics HTTP server started", serverIp, serverPort + ok() diff --git a/apps/networkmonitor/networkmonitor_utils.nim b/apps/networkmonitor/networkmonitor_utils.nim index 27d9fbc21..34cd3093a 100644 --- a/apps/networkmonitor/networkmonitor_utils.nim +++ b/apps/networkmonitor/networkmonitor_utils.nim @@ -10,15 +10,14 @@ import chronicles, chronicles/topics_registry, chronos, - presto/[client,common] + presto/[client, common] -type - NodeLocation* = object - country*: string - city*: string - lat*: string - long*: string - isp*: string +type NodeLocation* = object + country*: string + city*: string + lat*: string + long*: string + isp*: string proc flatten*[T](a: seq[seq[T]]): seq[T] = var aFlat = newSeq[T](0) @@ -26,8 +25,9 @@ proc flatten*[T](a: seq[seq[T]]): seq[T] = aFlat &= subseq return aFlat -proc decodeBytes*(t: typedesc[NodeLocation], value: openArray[byte], - contentType: Opt[ContentTypeData]): RestResult[NodeLocation] = +proc decodeBytes*( + t: typedesc[NodeLocation], value: openArray[byte], contentType: Opt[ContentTypeData] +): RestResult[NodeLocation] = var res: string if len(value) > 0: res = newString(len(value)) @@ -35,19 +35,23 @@ proc decodeBytes*(t: typedesc[NodeLocation], value: openArray[byte], try: let jsonContent = parseJson(res) if $jsonContent["status"].getStr() != "success": - error "query failed", result=jsonContent + error "query failed", result = jsonContent return err("query failed") - return ok(NodeLocation( - country: jsonContent["country"].getStr(), - city: jsonContent["city"].getStr(), - lat: $jsonContent["lat"].getFloat(), - long: $jsonContent["lon"].getFloat(), - isp: jsonContent["isp"].getStr() - )) + return ok( + NodeLocation( + country: jsonContent["country"].getStr(), + city: jsonContent["city"].getStr(), + lat: $jsonContent["lat"].getFloat(), + long: $jsonContent["lon"].getFloat(), + isp: jsonContent["isp"].getStr(), + ) + ) except Exception: return err("failed to get the location: " & getCurrentExceptionMsg()) proc encodeString*(value: string): RestResult[string] = ok(value) -proc ipToLocation*(ip: string): RestResponse[NodeLocation] {.rest, endpoint: "json/{ip}", meth: MethodGet.} +proc ipToLocation*( + ip: string +): RestResponse[NodeLocation] {.rest, endpoint: "json/{ip}", meth: MethodGet.} diff --git a/apps/wakucanary/certsgenerator.nim b/apps/wakucanary/certsgenerator.nim index 5cf2b5f2e..b8a9e9da3 100644 --- a/apps/wakucanary/certsgenerator.nim +++ b/apps/wakucanary/certsgenerator.nim @@ -1,8 +1,4 @@ -import - osproc, - os, - httpclient, - strutils +import osproc, os, httpclient, strutils proc getPublicIP(): string = let client = newHttpClient() @@ -14,29 +10,28 @@ proc getPublicIP(): string = return "127.0.0.1" # Function to generate a self-signed certificate -proc generateSelfSignedCertificate*(certPath: string, keyPath: string) : int = - +proc generateSelfSignedCertificate*(certPath: string, keyPath: string): int = # Ensure the OpenSSL is installed if findExe("openssl") == "": echo "OpenSSL is not installed or not in the PATH." return 1 let publicIP = getPublicIP() - + if publicIP != "127.0.0.1": - echo "Your public IP address is: ", publicIP - + echo "Your public IP address is: ", publicIP + # Command to generate private key and cert - let - cmd = "openssl req -x509 -newkey rsa:4096 -keyout " & keyPath & " -out " & certPath & - " -sha256 -days 3650 -nodes -subj '/C=XX/ST=StateName/L=CityName/O=CompanyName/OU=CompanySectionName/CN=" & - publicIP & "'" + let + cmd = + "openssl req -x509 -newkey rsa:4096 -keyout " & keyPath & " -out " & certPath & + " -sha256 -days 3650 -nodes -subj '/C=XX/ST=StateName/L=CityName/O=CompanyName/OU=CompanySectionName/CN=" & + publicIP & "'" res = execCmd(cmd) if res == 0: echo "Successfully generated self-signed certificate and key." else: echo "Failed to generate certificate and key." - + return res - \ No newline at end of file diff --git a/apps/wakucanary/wakucanary.nim b/apps/wakucanary/wakucanary.nim index 55d6e86d1..d5821c728 100644 --- a/apps/wakucanary/wakucanary.nim +++ b/apps/wakucanary/wakucanary.nim @@ -30,52 +30,58 @@ const WebSocketPortOffset = 1000 const CertsDirectory = "./certs" # cli flags -type - WakuCanaryConf* = object - address* {. - desc: "Multiaddress of the peer node to attempt to dial", - defaultValue: "", - name: "address", - abbr: "a".}: string +type WakuCanaryConf* = object + address* {. + desc: "Multiaddress of the peer node to attempt to dial", + defaultValue: "", + name: "address", + abbr: "a" + .}: string - timeout* {. - desc: "Timeout to consider that the connection failed", - defaultValue: chronos.seconds(10), - name: "timeout", - abbr: "t".}: chronos.Duration + timeout* {. + desc: "Timeout to consider that the connection failed", + defaultValue: chronos.seconds(10), + name: "timeout", + abbr: "t" + .}: chronos.Duration - protocols* {. - desc: "Protocol required to be supported: store,relay,lightpush,filter (can be used multiple times)", - name: "protocol", - abbr: "p".}: seq[string] + protocols* {. + desc: + "Protocol required to be supported: store,relay,lightpush,filter (can be used multiple times)", + name: "protocol", + abbr: "p" + .}: seq[string] - logLevel* {. - desc: "Sets the log level", - defaultValue: LogLevel.INFO, - name: "log-level", - abbr: "l".}: LogLevel + logLevel* {. + desc: "Sets the log level", + defaultValue: LogLevel.INFO, + name: "log-level", + abbr: "l" + .}: LogLevel - nodePort* {. - desc: "Listening port for waku node", - defaultValue: 60000, - name: "node-port", - abbr: "np".}: uint16 + nodePort* {. + desc: "Listening port for waku node", + defaultValue: 60000, + name: "node-port", + abbr: "np" + .}: uint16 - ## websocket secure config - websocketSecureKeyPath* {. - desc: "Secure websocket key path: '/path/to/key.txt' ", - defaultValue: "" - name: "websocket-secure-key-path".}: string + ## websocket secure config + websocketSecureKeyPath* {. + desc: "Secure websocket key path: '/path/to/key.txt' ", + defaultValue: "", + name: "websocket-secure-key-path" + .}: string - websocketSecureCertPath* {. - desc: "Secure websocket Certificate path: '/path/to/cert.txt' ", - defaultValue: "" - name: "websocket-secure-cert-path".}: string + websocketSecureCertPath* {. + desc: "Secure websocket Certificate path: '/path/to/cert.txt' ", + defaultValue: "", + name: "websocket-secure-cert-path" + .}: string - ping* {. - desc: "Ping the peer node to measure latency", - defaultValue: true, - name: "ping" .}: bool + ping* {. + desc: "Ping the peer node to measure latency", defaultValue: true, name: "ping" + .}: bool proc parseCmdArg*(T: type chronos.Duration, p: string): T = try: @@ -88,17 +94,15 @@ proc completeCmdArg*(T: type chronos.Duration, val: string): seq[string] = # checks if rawProtocols (skipping version) are supported in nodeProtocols proc areProtocolsSupported( - rawProtocols: seq[string], - nodeProtocols: seq[string]): bool = - + rawProtocols: seq[string], nodeProtocols: seq[string] +): bool = var numOfSupportedProt: int = 0 for nodeProtocol in nodeProtocols: for rawProtocol in rawProtocols: let protocolTag = ProtocolsTable[rawProtocol] if nodeProtocol.startsWith(protocolTag): - info "Supported protocol ok", expected = protocolTag, - supported = nodeProtocol + info "Supported protocol ok", expected = protocolTag, supported = nodeProtocol numOfSupportedProt += 1 break @@ -107,26 +111,29 @@ proc areProtocolsSupported( return false -proc pingNode(node: WakuNode, peerInfo: RemotePeerInfo): Future[void] {.async, gcsafe.} = +proc pingNode( + node: WakuNode, peerInfo: RemotePeerInfo +): Future[void] {.async, gcsafe.} = try: let conn = await node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec) let pingDelay = await node.libp2pPing.ping(conn) - info "Peer response time (ms)", peerId = peerInfo.peerId, ping=pingDelay.millis - + info "Peer response time (ms)", peerId = peerInfo.peerId, ping = pingDelay.millis except CatchableError: var msg = getCurrentExceptionMsg() if msg == "Future operation cancelled!": msg = "timedout" - error "Failed to ping the peer", peer=peerInfo, err=msg + error "Failed to ping the peer", peer = peerInfo, err = msg proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = let conf: WakuCanaryConf = WakuCanaryConf.load() # create dns resolver let - nameServers = @[ - initTAddress(parseIpAddress("1.1.1.1"), Port(53)), - initTAddress(parseIpAddress("1.0.0.1"), Port(53))] + nameServers = + @[ + initTAddress(parseIpAddress("1.1.1.1"), Port(53)), + initTAddress(parseIpAddress("1.0.0.1"), Port(53)), + ] resolver: DnsResolver = DnsResolver.new(nameServers) if conf.logLevel != LogLevel.NONE: @@ -158,14 +165,16 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = nodeTcpPort = Port(conf.nodePort) isWs = peer.addrs[0].contains(multiCodec("ws")).get() isWss = peer.addrs[0].contains(multiCodec("wss")).get() - keyPath = if conf.websocketSecureKeyPath.len > 0: - conf.websocketSecureKeyPath - else: - CertsDirectory & "/key.pem" - certPath = if conf.websocketSecureCertPath.len > 0: - conf.websocketSecureCertPath - else: - CertsDirectory & "/cert.pem" + keyPath = + if conf.websocketSecureKeyPath.len > 0: + conf.websocketSecureKeyPath + else: + CertsDirectory & "/key.pem" + certPath = + if conf.websocketSecureCertPath.len > 0: + conf.websocketSecureCertPath + else: + CertsDirectory & "/cert.pem" var builder = WakuNodeBuilder.init() builder.withNodeKey(nodeKey) @@ -183,12 +192,13 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = let recordRes = enrBuilder.build() let record = if recordRes.isErr(): - error "failed to create enr record", error=recordRes.error + error "failed to create enr record", error = recordRes.error quit(QuitFailure) - else: recordRes.get() + else: + recordRes.get() - if isWss and (conf.websocketSecureKeyPath.len == 0 or - conf.websocketSecureCertPath.len == 0): + if isWss and + (conf.websocketSecureKeyPath.len == 0 or conf.websocketSecureCertPath.len == 0): info "WebSocket Secure requires key and certificate. Generating them" if not dirExists(CertsDirectory): createDir(CertsDirectory) @@ -199,9 +209,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = builder.withRecord(record) builder.withNetworkConfiguration(netConfig.tryGet()) builder.withSwitchConfiguration( - secureKey = some(keyPath), - secureCert = some(certPath), - nameResolver = resolver, + secureKey = some(keyPath), secureCert = some(certPath), nameResolver = resolver ) let node = builder.build().tryGet() @@ -215,7 +223,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = await node.start() - var pingFut:Future[bool] + var pingFut: Future[bool] if conf.ping: pingFut = pingNode(node, peer).withTimeout(conf.timeout) @@ -233,8 +241,8 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = if conStatus in [Connected, CanConnect]: let nodeProtocols = lp2pPeerStore[ProtoBook][peer.peerId] if not areProtocolsSupported(conf.protocols, nodeProtocols): - error "Not all protocols are supported", expected = conf.protocols, - supported = nodeProtocols + error "Not all protocols are supported", + expected = conf.protocols, supported = nodeProtocols return 1 elif conStatus == CannotConnect: error "Could not connect", peerId = peer.peerId diff --git a/apps/wakunode2/app.nim b/apps/wakunode2/app.nim index 38f94115d..eba83297b 100644 --- a/apps/wakunode2/app.nim +++ b/apps/wakunode2/app.nim @@ -57,7 +57,6 @@ import logScope: topics = "wakunode app" - # Git version in git describe format (defined at compile time) const git_version* {.strdefine.} = "n/a" @@ -78,7 +77,6 @@ type AppResult*[T] = Result[T, string] - func node*(app: App): WakuNode = app.node @@ -87,14 +85,12 @@ func version*(app: App): string = ## Retrieve dynamic bootstrap nodes (DNS discovery) -proc retrieveDynamicBootstrapNodes*(dnsDiscovery: bool, - dnsDiscoveryUrl: string, - dnsDiscoveryNameServers: seq[IpAddress]): - Result[seq[RemotePeerInfo], string] = - +proc retrieveDynamicBootstrapNodes*( + dnsDiscovery: bool, dnsDiscoveryUrl: string, dnsDiscoveryNameServers: seq[IpAddress] +): Result[seq[RemotePeerInfo], string] = if dnsDiscovery and dnsDiscoveryUrl != "": # DNS discovery - debug "Discovering nodes using Waku DNS discovery", url=dnsDiscoveryUrl + debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl var nameServers: seq[TransportAddress] for ip in dnsDiscoveryNameServers: @@ -103,14 +99,16 @@ proc retrieveDynamicBootstrapNodes*(dnsDiscovery: bool, let dnsResolver = DnsResolver.new(nameServers) proc resolver(domain: string): Future[string] {.async, gcsafe.} = - trace "resolving", domain=domain + trace "resolving", domain = domain let resolved = await dnsResolver.resolveTxt(domain) return resolved[0] # Use only first answer var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl, resolver) if wakuDnsDiscovery.isOk(): - return wakuDnsDiscovery.get().findPeers() - .mapErr(proc (e: cstring): string = $e) + return wakuDnsDiscovery.get().findPeers().mapErr( + proc(e: cstring): string = + $e + ) else: warn "Failed to init Waku DNS discovery" @@ -120,47 +118,48 @@ proc retrieveDynamicBootstrapNodes*(dnsDiscovery: bool, ## Initialisation proc init*(T: type App, conf: WakuNodeConf): Result[App, string] = - var confCopy = conf let rng = crypto.newRng() if not confCopy.nodekey.isSome(): let keyRes = crypto.PrivateKey.random(Secp256k1, rng[]) - if keyRes.isErr(): + if keyRes.isErr(): error "Failed to generate key", error = $keyRes.error return err("Failed to generate key: " & $keyRes.error) confCopy.nodekey = some(keyRes.get()) debug "Retrieve dynamic bootstrap nodes" - let dynamicBootstrapNodesRes = retrieveDynamicBootstrapNodes(confCopy.dnsDiscovery, - confCopy.dnsDiscoveryUrl, - confCopy.dnsDiscoveryNameServers) + let dynamicBootstrapNodesRes = retrieveDynamicBootstrapNodes( + confCopy.dnsDiscovery, confCopy.dnsDiscoveryUrl, confCopy.dnsDiscoveryNameServers + ) if dynamicBootstrapNodesRes.isErr(): - error "Retrieving dynamic bootstrap nodes failed", error = dynamicBootstrapNodesRes.error - return err("Retrieving dynamic bootstrap nodes failed: " & dynamicBootstrapNodesRes.error) + error "Retrieving dynamic bootstrap nodes failed", + error = dynamicBootstrapNodesRes.error + return err( + "Retrieving dynamic bootstrap nodes failed: " & dynamicBootstrapNodesRes.error + ) let nodeRes = setupNode(confCopy, some(rng)) - if nodeRes.isErr(): - error "Failed setting up node", error=nodeRes.error + if nodeRes.isErr(): + error "Failed setting up node", error = nodeRes.error return err("Failed setting up node: " & nodeRes.error) var app = App( - version: git_version, - conf: confCopy, - rng: rng, - key: confCopy.nodekey.get(), - node: nodeRes.get(), - dynamicBootstrapNodes: dynamicBootstrapNodesRes.get() - ) + version: git_version, + conf: confCopy, + rng: rng, + key: confCopy.nodekey.get(), + node: nodeRes.get(), + dynamicBootstrapNodes: dynamicBootstrapNodesRes.get(), + ) ok(app) ## Setup DiscoveryV5 proc setupDiscoveryV5*(app: App): WakuDiscoveryV5 = - let dynamicBootstrapEnrs = app.dynamicBootstrapNodes - .filterIt(it.hasUdpPort()) - .mapIt(it.enr.get()) + let dynamicBootstrapEnrs = + app.dynamicBootstrapNodes.filterIt(it.hasUdpPort()).mapIt(it.enr.get()) var discv5BootstrapEnrs: seq[enr.Record] @@ -170,9 +169,9 @@ proc setupDiscoveryV5*(app: App): WakuDiscoveryV5 = discv5BootstrapEnrs.add(dynamicBootstrapEnrs) - let discv5Config = DiscoveryConfig.init(app.conf.discv5TableIpLimit, - app.conf.discv5BucketIpLimit, - app.conf.discv5BitsPerHop) + let discv5Config = DiscoveryConfig.init( + app.conf.discv5TableIpLimit, app.conf.discv5BucketIpLimit, app.conf.discv5BitsPerHop + ) let discv5UdpPort = Port(uint16(app.conf.discv5UdpPort) + app.conf.portsShift) @@ -193,9 +192,9 @@ proc setupDiscoveryV5*(app: App): WakuDiscoveryV5 = app.node.topicSubscriptionQueue, ) -proc getPorts(listenAddrs: seq[MultiAddress]): - AppResult[tuple[tcpPort, websocketPort: Option[Port]]] = - +proc getPorts( + listenAddrs: seq[MultiAddress] +): AppResult[tuple[tcpPort, websocketPort: Option[Port]]] = var tcpPort, websocketPort = none(Port) for a in listenAddrs: @@ -212,7 +211,6 @@ proc getPorts(listenAddrs: seq[MultiAddress]): return ok((tcpPort: tcpPort, websocketPort: websocketPort)) proc getRunningNetConfig(app: App): AppResult[NetConfig] = - var conf = app.conf let (tcpPort, websocketPort) = getPorts(app.node.switch.peerInfo.listenAddrs).valueOr: return err("Could not retrieve ports " & error) @@ -230,7 +228,6 @@ proc getRunningNetConfig(app: App): AppResult[NetConfig] = return ok(netConf) proc updateEnr(app: var App, netConf: NetConfig): AppResult[void] = - let record = enrConfiguration(app.conf, netConf, app.key).valueOr: return err("ENR setup failed: " & error) @@ -242,9 +239,7 @@ proc updateEnr(app: var App, netConf: NetConfig): AppResult[void] = return ok() proc updateApp(app: var App): AppResult[void] = - if app.conf.tcpPort == Port(0) or app.conf.websocketPort == Port(0): - let netConf = getRunningNetConfig(app).valueOr: return err("error calling updateNetConfig: " & $error) @@ -258,8 +253,8 @@ proc updateApp(app: var App): AppResult[void] = return ok() proc startApp*(app: var App): AppResult[void] = - - let nodeRes = catch: (waitFor startNode(app.node, app.conf, app.dynamicBootstrapNodes)) + let nodeRes = catch: + (waitFor startNode(app.node, app.conf, app.dynamicBootstrapNodes)) if nodeRes.isErr(): return err("exception starting node: " & nodeRes.error.msg) @@ -273,10 +268,11 @@ proc startApp*(app: var App): AppResult[void] = ## Discv5 if app.conf.discv5Discovery: app.wakuDiscV5 = some(app.setupDiscoveryV5()) - + if app.wakuDiscv5.isSome(): let wakuDiscv5 = app.wakuDiscv5.get() - let catchRes = catch: (waitFor wakuDiscv5.start()) + let catchRes = catch: + (waitFor wakuDiscv5.start()) let startRes = catchRes.valueOr: return err("failed to start waku discovery v5: " & catchRes.error.msg) @@ -285,38 +281,37 @@ proc startApp*(app: var App): AppResult[void] = return ok() - - ## Monitoring and external interfaces -proc startRestServer(app: App, - address: IpAddress, - port: Port, - conf: WakuNodeConf): - AppResult[WakuRestServerRef] = - +proc startRestServer( + app: App, address: IpAddress, port: Port, conf: WakuNodeConf +): AppResult[WakuRestServerRef] = # Used to register api endpoints that are not currently installed as keys, # values are holding error messages to be returned to the client var notInstalledTab: Table[string, string] = initTable[string, string]() - let requestErrorHandler : RestRequestErrorHandler = proc (error: RestRequestError, - request: HttpRequestRef): - Future[HttpResponseRef] - {.async: (raises: [CancelledError]).} = + let requestErrorHandler: RestRequestErrorHandler = proc( + error: RestRequestError, request: HttpRequestRef + ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = try: case error of RestRequestError.Invalid: return await request.respond(Http400, "Invalid request", HttpTable.init()) of RestRequestError.NotFound: let paths = request.rawPath.split("/") - let rootPath = if len(paths) > 1: - paths[1] - else: - "" + let rootPath = + if len(paths) > 1: + paths[1] + else: + "" notInstalledTab.withValue(rootPath, errMsg): return await request.respond(Http404, errMsg[], HttpTable.init()) do: - return await request.respond(Http400, "Bad request initiated. Invalid path or method used.", HttpTable.init()) + return await request.respond( + Http400, + "Bad request initiated. Invalid path or method used.", + HttpTable.init(), + ) of RestRequestError.InvalidContentBody: return await request.respond(Http400, "Invalid content body", HttpTable.init()) of RestRequestError.InvalidContentType: @@ -329,14 +324,19 @@ proc startRestServer(app: App, return defaultResponse() - let allowedOrigin = if len(conf.restAllowOrigin) > 0 : - some(conf.restAllowOrigin.join(",")) - else: - none(string) + let allowedOrigin = + if len(conf.restAllowOrigin) > 0: + some(conf.restAllowOrigin.join(",")) + else: + none(string) - let server = ? newRestHttpServer(address, port, - allowedOrigin = allowedOrigin, - requestErrorHandler = requestErrorHandler) + let server = + ?newRestHttpServer( + address, + port, + allowedOrigin = allowedOrigin, + requestErrorHandler = requestErrorHandler, + ) ## Admin REST API if conf.restAdmin: @@ -364,59 +364,65 @@ proc startRestServer(app: App, installRelayApiHandlers(server.router, app.node, cache) else: - notInstalledTab["relay"] = "/relay endpoints are not available. Please check your configuration: --relay" + notInstalledTab["relay"] = + "/relay endpoints are not available. Please check your configuration: --relay" ## Filter REST API - if conf.filternode != "" and - app.node.wakuFilterClient != nil and - app.node.wakuFilterClientLegacy != nil: - + if conf.filternode != "" and app.node.wakuFilterClient != nil and + app.node.wakuFilterClientLegacy != nil: let legacyFilterCache = MessageCache.init() - rest_legacy_filter_api.installLegacyFilterRestApiHandlers(server.router, app.node, legacyFilterCache) + rest_legacy_filter_api.installLegacyFilterRestApiHandlers( + server.router, app.node, legacyFilterCache + ) let filterCache = MessageCache.init() let filterDiscoHandler = if app.wakuDiscv5.isSome(): some(defaultDiscoveryHandler(app.wakuDiscv5.get(), Filter)) - else: none(DiscoveryHandler) + else: + none(DiscoveryHandler) rest_filter_api.installFilterRestApiHandlers( - server.router, - app.node, - filterCache, - filterDiscoHandler, + server.router, app.node, filterCache, filterDiscoHandler ) else: - notInstalledTab["filter"] = "/filter endpoints are not available. Please check your configuration: --filternode" + notInstalledTab["filter"] = + "/filter endpoints are not available. Please check your configuration: --filternode" ## Store REST API let storeDiscoHandler = if app.wakuDiscv5.isSome(): some(defaultDiscoveryHandler(app.wakuDiscv5.get(), Store)) - else: none(DiscoveryHandler) + else: + none(DiscoveryHandler) installStoreApiHandlers(server.router, app.node, storeDiscoHandler) ## Light push API - if conf.lightpushnode != "" and - app.node.wakuLightpushClient != nil: + if conf.lightpushnode != "" and app.node.wakuLightpushClient != nil: let lightDiscoHandler = if app.wakuDiscv5.isSome(): some(defaultDiscoveryHandler(app.wakuDiscv5.get(), Lightpush)) - else: none(DiscoveryHandler) + else: + none(DiscoveryHandler) - rest_lightpush_api.installLightPushRequestHandler(server.router, app.node, lightDiscoHandler) + rest_lightpush_api.installLightPushRequestHandler( + server.router, app.node, lightDiscoHandler + ) else: - notInstalledTab["lightpush"] = "/lightpush endpoints are not available. Please check your configuration: --lightpushnode" + notInstalledTab["lightpush"] = + "/lightpush endpoints are not available. Please check your configuration: --lightpushnode" server.start() info "Starting REST HTTP server", url = "http://" & $address & ":" & $port & "/" ok(server) -proc startMetricsServer(serverIp: IpAddress, serverPort: Port): AppResult[MetricsHttpServerRef] = - info "Starting metrics HTTP server", serverIp= $serverIp, serverPort= $serverPort +proc startMetricsServer( + serverIp: IpAddress, serverPort: Port +): AppResult[MetricsHttpServerRef] = + info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort let metricsServerRes = MetricsHttpServerRef.new($serverIp, serverPort) if metricsServerRes.isErr(): @@ -428,7 +434,7 @@ proc startMetricsServer(serverIp: IpAddress, serverPort: Port): AppResult[Metric except CatchableError: return err("metrics HTTP server start failed: " & getCurrentExceptionMsg()) - info "Metrics HTTP server started", serverIp= $serverIp, serverPort= $serverPort + info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort ok(server) proc startMetricsLogging(): AppResult[void] = @@ -437,28 +443,34 @@ proc startMetricsLogging(): AppResult[void] = proc setupMonitoringAndExternalInterfaces*(app: var App): AppResult[void] = if app.conf.rest: - let startRestServerRes = startRestServer(app, app.conf.restAddress, Port(app.conf.restPort + app.conf.portsShift), app.conf) + let startRestServerRes = startRestServer( + app, app.conf.restAddress, Port(app.conf.restPort + app.conf.portsShift), app.conf + ) if startRestServerRes.isErr(): - error "Starting REST server failed. Continuing in current state.", error=startRestServerRes.error + error "Starting REST server failed. Continuing in current state.", + error = startRestServerRes.error else: app.restServer = some(startRestServerRes.value) - if app.conf.metricsServer: - let startMetricsServerRes = startMetricsServer(app.conf.metricsServerAddress, Port(app.conf.metricsServerPort + app.conf.portsShift)) + let startMetricsServerRes = startMetricsServer( + app.conf.metricsServerAddress, + Port(app.conf.metricsServerPort + app.conf.portsShift), + ) if startMetricsServerRes.isErr(): - error "Starting metrics server failed. Continuing in current state.", error=startMetricsServerRes.error + error "Starting metrics server failed. Continuing in current state.", + error = startMetricsServerRes.error else: app.metricsServer = some(startMetricsServerRes.value) if app.conf.metricsLogging: let startMetricsLoggingRes = startMetricsLogging() if startMetricsLoggingRes.isErr(): - error "Starting metrics console logging failed. Continuing in current state.", error=startMetricsLoggingRes.error + error "Starting metrics console logging failed. Continuing in current state.", + error = startMetricsLoggingRes.error ok() - # App shutdown proc stop*(app: App): Future[void] {.async: (raises: [Exception]).} = diff --git a/apps/wakunode2/networks_config.nim b/apps/wakunode2/networks_config.nim index 3a7d78aa4..9c793db59 100644 --- a/apps/wakunode2/networks_config.nim +++ b/apps/wakunode2/networks_config.nim @@ -33,13 +33,13 @@ proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf = pubsubTopics: @[ "/waku/2/rs/1/0", "/waku/2/rs/1/1", "/waku/2/rs/1/2", "/waku/2/rs/1/3", - "/waku/2/rs/1/4", "/waku/2/rs/1/5", "/waku/2/rs/1/6", "/waku/2/rs/1/7" + "/waku/2/rs/1/4", "/waku/2/rs/1/5", "/waku/2/rs/1/6", "/waku/2/rs/1/7", ], discv5Discovery: true, discv5BootstrapNodes: @[ "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Ugl_r25UHQJ3f1rIRrpzxJXSMaJe4yk1XFSAYJpZIJ2NIJpcISygI2rim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQJATXRSRSUyTw_QLB6H_U3oziVQgNRgrXpK7wp2AMyNxYN0Y3CCdl-DdWRwgiMohXdha3UyDw", "enr:-QEkuECnZ3IbVAgkOzv-QLnKC4dRKAPRY80m1-R7G8jZ7yfT3ipEfBrhKN7ARcQgQ-vg-h40AQzyvAkPYlHPaFKk6u9uAYJpZIJ2NIJpcIQiEAFDim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQMIJwesBVgUiBCi8yiXGx7RWylBQkYm1U9dvEy-neLG2YN0Y3CCdl-DdWRwgiMohXdha3UyDw", - "enr:-QEkuEDzQyIAhs-CgBHIrJqtBv3EY1uP1Psrc-y8yJKsmxW7dh3DNcq2ergMUWSFVcJNlfcgBeVsFPkgd_QopRIiCV2pAYJpZIJ2NIJpcIQI2ttrim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQJIN4qwz3v4r2Q8Bv8zZD0eqBcKw6bdLvdkV7-JLjqIj4N0Y3CCdl-DdWRwgiMohXdha3UyDw" + "enr:-QEkuEDzQyIAhs-CgBHIrJqtBv3EY1uP1Psrc-y8yJKsmxW7dh3DNcq2ergMUWSFVcJNlfcgBeVsFPkgd_QopRIiCV2pAYJpZIJ2NIJpcIQI2ttrim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQJIN4qwz3v4r2Q8Bv8zZD0eqBcKw6bdLvdkV7-JLjqIj4N0Y3CCdl-DdWRwgiMohXdha3UyDw", ], ) diff --git a/apps/wakunode2/wakunode2.nim b/apps/wakunode2/wakunode2.nim index dc8bc6d68..0fda95035 100644 --- a/apps/wakunode2/wakunode2.nim +++ b/apps/wakunode2/wakunode2.nim @@ -32,12 +32,10 @@ proc logConfig(conf: WakuNodeConf) = lightpush = conf.lightpush, peerExchange = conf.peerExchange - info "Configuration. Network", - cluster = conf.clusterId, - maxPeers = conf.maxRelayPeers + info "Configuration. Network", cluster = conf.clusterId, maxPeers = conf.maxRelayPeers for shard in conf.pubsubTopics: - info "Configuration. Shards", shard=shard + info "Configuration. Shards", shard = shard for i in conf.discv5BootstrapNodes: info "Configuration. Bootstrap nodes", node = i @@ -123,7 +121,7 @@ when isMainModule: wakunode2.setupMonitoringAndExternalInterfaces().isOkOr: error "Starting monitoring and external interfaces failed", error = error - quit(QuitFailure) + quit(QuitFailure) debug "Setting up shutdown hooks" ## Setup shutdown hooks for this process. diff --git a/examples/filter_subscriber.nim b/examples/filter_subscriber.nim index 8b0a05c97..2f4556f7b 100644 --- a/examples/filter_subscriber.nim +++ b/examples/filter_subscriber.nim @@ -1,11 +1,7 @@ ## Example showing how a resource restricted client may ## subscribe to messages without relay -import - chronicles, - chronos, - stew/byteutils, - stew/results +import chronicles, chronos, stew/byteutils, stew/results import ../../../waku/common/logging, ../../../waku/node/peer_manager, @@ -13,34 +9,42 @@ import ../../../waku/waku_filter_v2/client const - FilterPeer = "/ip4/104.154.239.128/tcp/30303/p2p/16Uiu2HAmJb2e28qLXxT5kZxVUUoJt72EMzNGXB47Rxx5hw3q4YjS" # node-01.gc-us-central1-a.wakuv2.test.statusim.net on wakuv2.test + FilterPeer = + "/ip4/104.154.239.128/tcp/30303/p2p/16Uiu2HAmJb2e28qLXxT5kZxVUUoJt72EMzNGXB47Rxx5hw3q4YjS" + # node-01.gc-us-central1-a.wakuv2.test.statusim.net on wakuv2.test FilterPubsubTopic = PubsubTopic("/waku/2/default-waku/proto") FilterContentTopic = ContentTopic("/examples/1/light-pubsub-example/proto") -proc unsubscribe(wfc: WakuFilterClient, - filterPeer: RemotePeerInfo, - filterPubsubTopic: PubsubTopic, - filterContentTopic: ContentTopic) {.async.} = +proc unsubscribe( + wfc: WakuFilterClient, + filterPeer: RemotePeerInfo, + filterPubsubTopic: PubsubTopic, + filterContentTopic: ContentTopic, +) {.async.} = notice "unsubscribing from filter" - let unsubscribeRes = await wfc.unsubscribe(filterPeer, filterPubsubTopic, @[filterContentTopic]) + let unsubscribeRes = + await wfc.unsubscribe(filterPeer, filterPubsubTopic, @[filterContentTopic]) if unsubscribeRes.isErr: - notice "unsubscribe request failed", err=unsubscribeRes.error + notice "unsubscribe request failed", err = unsubscribeRes.error else: notice "unsubscribe request successful" -proc messagePushHandler(pubsubTopic: PubsubTopic, message: WakuMessage) - {.async, gcsafe.} = +proc messagePushHandler( + pubsubTopic: PubsubTopic, message: WakuMessage +) {.async, gcsafe.} = let payloadStr = string.fromBytes(message.payload) - notice "message received", payload=payloadStr, - pubsubTopic=pubsubTopic, - contentTopic=message.contentTopic, - timestamp=message.timestamp + notice "message received", + payload = payloadStr, + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + timestamp = message.timestamp - -proc maintainSubscription(wfc: WakuFilterClient, - filterPeer: RemotePeerInfo, - filterPubsubTopic: PubsubTopic, - filterContentTopic: ContentTopic) {.async.} = +proc maintainSubscription( + wfc: WakuFilterClient, + filterPeer: RemotePeerInfo, + filterPubsubTopic: PubsubTopic, + filterContentTopic: ContentTopic, +) {.async.} = while true: notice "maintaining subscription" # First use filter-ping to check if we have an active subscription @@ -49,10 +53,11 @@ proc maintainSubscription(wfc: WakuFilterClient, # No subscription found. Let's subscribe. notice "no subscription found. Sending subscribe request" - let subscribeRes = await wfc.subscribe(filterPeer, filterPubsubTopic, @[filterContentTopic]) + let subscribeRes = + await wfc.subscribe(filterPeer, filterPubsubTopic, @[filterContentTopic]) if subscribeRes.isErr(): - notice "subscribe request failed. Quitting.", err=subscribeRes.error + notice "subscribe request failed. Quitting.", err = subscribeRes.error break else: notice "subscribe request successful." @@ -78,7 +83,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) = wfc.registerPushHandler(messagePushHandler) # Start maintaining subscription - asyncSpawn maintainSubscription(wfc, filterPeer, FilterPubsubTopic, FilterContentTopic) + asyncSpawn maintainSubscription( + wfc, filterPeer, FilterPubsubTopic, FilterContentTopic + ) when isMainModule: let rng = newRng() diff --git a/examples/lightpush_publisher.nim b/examples/lightpush_publisher.nim index 28982594d..05eb74a41 100644 --- a/examples/lightpush_publisher.nim +++ b/examples/lightpush_publisher.nim @@ -1,11 +1,7 @@ ## Example showing how a resource restricted client may ## use lightpush to publish messages without relay -import - chronicles, - chronos, - stew/byteutils, - stew/results +import chronicles, chronos, stew/byteutils, stew/results import ../../../waku/common/logging, ../../../waku/node/peer_manager, @@ -13,27 +9,33 @@ import ../../../waku/waku_lightpush/client const - LightpushPeer = "/ip4/134.209.139.210/tcp/30303/p2p/16Uiu2HAmPLe7Mzm8TsYUubgCAW1aJoeFScxrLj8ppHFivPo97bUZ" # node-01.do-ams3.wakuv2.test.statusim.net on wakuv2.test + LightpushPeer = + "/ip4/134.209.139.210/tcp/30303/p2p/16Uiu2HAmPLe7Mzm8TsYUubgCAW1aJoeFScxrLj8ppHFivPo97bUZ" + # node-01.do-ams3.wakuv2.test.statusim.net on wakuv2.test LightpushPubsubTopic = PubsubTopic("/waku/2/default-waku/proto") LightpushContentTopic = ContentTopic("/examples/1/light-pubsub-example/proto") -proc publishMessages(wlc: WakuLightpushClient, - lightpushPeer: RemotePeerInfo, - lightpushPubsubTopic: PubsubTopic, - lightpushContentTopic: ContentTopic) {.async.} = +proc publishMessages( + wlc: WakuLightpushClient, + lightpushPeer: RemotePeerInfo, + lightpushPubsubTopic: PubsubTopic, + lightpushContentTopic: ContentTopic, +) {.async.} = while true: let text = "hi there i'm a lightpush publisher" - let message = WakuMessage(payload: toBytes(text), # content of the message - contentTopic: lightpushContentTopic, # content topic to publish to - ephemeral: true, # tell store nodes to not store it - timestamp: getNowInNanosecondTime()) # current timestamp + let message = WakuMessage( + payload: toBytes(text), # content of the message + contentTopic: lightpushContentTopic, # content topic to publish to + ephemeral: true, # tell store nodes to not store it + timestamp: getNowInNanosecondTime(), + ) # current timestamp let wlpRes = await wlc.publish(lightpushPubsubTopic, message, lightpushPeer) if wlpRes.isOk(): - notice "published message using lightpush", message=message + notice "published message using lightpush", message = message else: - notice "failed to publish message using lightpush", err=wlpRes.error() + notice "failed to publish message using lightpush", err = wlpRes.error() await sleepAsync(5000) # Publish every 5 seconds @@ -49,7 +51,9 @@ proc setupAndPublish(rng: ref HmacDrbgContext) = wlc = WakuLightpushClient.new(pm, rng) # Start maintaining subscription - asyncSpawn publishMessages(wlc, lightpushPeer, LightpushPubsubTopic, LightpushContentTopic) + asyncSpawn publishMessages( + wlc, lightpushPeer, LightpushPubsubTopic, LightpushContentTopic + ) when isMainModule: let rng = newRng() diff --git a/examples/publisher.nim b/examples/publisher.nim index 800ea8932..a8910f5c2 100644 --- a/examples/publisher.nim +++ b/examples/publisher.nim @@ -1,5 +1,5 @@ import - std/[tables,times,sequtils], + std/[tables, times, sequtils], stew/byteutils, stew/shims/net, chronicles, @@ -23,103 +23,113 @@ proc now*(): Timestamp = # An accesible bootstrap node. See wakuv2.prod fleets.status.im - -const bootstrapNode = "enr:-Nm4QOdTOKZJKTUUZ4O_W932CXIET-M9NamewDnL78P5u9D" & - "OGnZlK0JFZ4k0inkfe6iY-0JAaJVovZXc575VV3njeiABgmlkgn" & - "Y0gmlwhAjS3ueKbXVsdGlhZGRyc7g6ADg2MW5vZGUtMDEuYWMtY" & - "24taG9uZ2tvbmctYy53YWt1djIucHJvZC5zdGF0dXNpbS5uZXQG" & - "H0DeA4lzZWNwMjU2azGhAo0C-VvfgHiXrxZi3umDiooXMGY9FvY" & - "j5_d1Q4EeS7eyg3RjcIJ2X4N1ZHCCIyiFd2FrdTIP" +const bootstrapNode = + "enr:-Nm4QOdTOKZJKTUUZ4O_W932CXIET-M9NamewDnL78P5u9D" & + "OGnZlK0JFZ4k0inkfe6iY-0JAaJVovZXc575VV3njeiABgmlkgn" & + "Y0gmlwhAjS3ueKbXVsdGlhZGRyc7g6ADg2MW5vZGUtMDEuYWMtY" & + "24taG9uZ2tvbmctYy53YWt1djIucHJvZC5zdGF0dXNpbS5uZXQG" & + "H0DeA4lzZWNwMjU2azGhAo0C-VvfgHiXrxZi3umDiooXMGY9FvY" & + "j5_d1Q4EeS7eyg3RjcIJ2X4N1ZHCCIyiFd2FrdTIP" # careful if running pub and sub in the same machine const wakuPort = 60000 const discv5Port = 9000 proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} = - # use notice to filter all waku messaging - setupLogLevel(logging.LogLevel.NOTICE) - notice "starting publisher", wakuPort=wakuPort, discv5Port=discv5Port - let - nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).get() - ip = parseIpAddress("0.0.0.0") - flags = CapabilitiesBitfield.init(lightpush = false, filter = false, store = false, relay = true) - - var enrBuilder = EnrBuilder.init(nodeKey) - - let recordRes = enrBuilder.build() - let record = - if recordRes.isErr(): - error "failed to create enr record", error=recordRes.error - quit(QuitFailure) - else: recordRes.get() - - var builder = WakuNodeBuilder.init() - builder.withNodeKey(nodeKey) - builder.withRecord(record) - builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet() - let node = builder.build().tryGet() - - var bootstrapNodeEnr: enr.Record - discard bootstrapNodeEnr.fromURI(bootstrapNode) - - let discv5Conf = WakuDiscoveryV5Config( - discv5Config: none(DiscoveryConfig), - address: ip, - port: Port(discv5Port), - privateKey: keys.PrivateKey(nodeKey.skkey), - bootstrapRecords: @[bootstrapNodeEnr], - autoupdateRecord: true, + # use notice to filter all waku messaging + setupLogLevel(logging.LogLevel.NOTICE) + notice "starting publisher", wakuPort = wakuPort, discv5Port = discv5Port + let + nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).get() + ip = parseIpAddress("0.0.0.0") + flags = CapabilitiesBitfield.init( + lightpush = false, filter = false, store = false, relay = true ) - # assumes behind a firewall, so not care about being discoverable - let wakuDiscv5 = WakuDiscoveryV5.new( - node.rng, - discv5Conf, - some(node.enr), - some(node.peerManager), - node.topicSubscriptionQueue, - ) + var enrBuilder = EnrBuilder.init(nodeKey) - await node.start() - await node.mountRelay() - node.peerManager.start() + let recordRes = enrBuilder.build() + let record = + if recordRes.isErr(): + error "failed to create enr record", error = recordRes.error + quit(QuitFailure) + else: + recordRes.get() - (await wakuDiscv5.start()).isOkOr: - error "failed to start discv5", error = error - quit(1) + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodeKey) + builder.withRecord(record) + builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet() + let node = builder.build().tryGet() - # wait for a minimum of peers to be connected, otherwise messages wont be gossiped - while true: - let numConnectedPeers = node.peerManager.peerStore[ConnectionBook].book.values().countIt(it == Connected) - if numConnectedPeers >= 6: - notice "publisher is ready", connectedPeers=numConnectedPeers, required=6 - break - notice "waiting to be ready", connectedPeers=numConnectedPeers, required=6 - await sleepAsync(5000) + var bootstrapNodeEnr: enr.Record + discard bootstrapNodeEnr.fromURI(bootstrapNode) - # Make sure it matches the publisher. Use default value - # see spec: https://rfc.vac.dev/spec/23/ - let pubSubTopic = PubsubTopic("/waku/2/default-waku/proto") + let discv5Conf = WakuDiscoveryV5Config( + discv5Config: none(DiscoveryConfig), + address: ip, + port: Port(discv5Port), + privateKey: keys.PrivateKey(nodeKey.skkey), + bootstrapRecords: @[bootstrapNodeEnr], + autoupdateRecord: true, + ) - # any content topic can be chosen - let contentTopic = ContentTopic("/examples/1/pubsub-example/proto") + # assumes behind a firewall, so not care about being discoverable + let wakuDiscv5 = WakuDiscoveryV5.new( + node.rng, + discv5Conf, + some(node.enr), + some(node.peerManager), + node.topicSubscriptionQueue, + ) - notice "publisher service started" - while true: - let text = "hi there i'm a publisher" - let message = WakuMessage(payload: toBytes(text), # content of the message - contentTopic: contentTopic, # content topic to publish to - ephemeral: true, # tell store nodes to not store it - timestamp: now()) # current timestamp - - let res = await node.publish(some(pubSubTopic), message) - - if res.isOk: - notice "published message", text = text, timestamp = message.timestamp, psTopic = pubSubTopic, contentTopic = contentTopic - else: - error "failed to publish message", error = res.error - - await sleepAsync(5000) + await node.start() + await node.mountRelay() + node.peerManager.start() + + (await wakuDiscv5.start()).isOkOr: + error "failed to start discv5", error = error + quit(1) + + # wait for a minimum of peers to be connected, otherwise messages wont be gossiped + while true: + let numConnectedPeers = + node.peerManager.peerStore[ConnectionBook].book.values().countIt(it == Connected) + if numConnectedPeers >= 6: + notice "publisher is ready", connectedPeers = numConnectedPeers, required = 6 + break + notice "waiting to be ready", connectedPeers = numConnectedPeers, required = 6 + await sleepAsync(5000) + + # Make sure it matches the publisher. Use default value + # see spec: https://rfc.vac.dev/spec/23/ + let pubSubTopic = PubsubTopic("/waku/2/default-waku/proto") + + # any content topic can be chosen + let contentTopic = ContentTopic("/examples/1/pubsub-example/proto") + + notice "publisher service started" + while true: + let text = "hi there i'm a publisher" + let message = WakuMessage( + payload: toBytes(text), # content of the message + contentTopic: contentTopic, # content topic to publish to + ephemeral: true, # tell store nodes to not store it + timestamp: now(), + ) # current timestamp + + let res = await node.publish(some(pubSubTopic), message) + + if res.isOk: + notice "published message", + text = text, + timestamp = message.timestamp, + psTopic = pubSubTopic, + contentTopic = contentTopic + else: + error "failed to publish message", error = res.error + + await sleepAsync(5000) when isMainModule: let rng = crypto.newRng() diff --git a/examples/subscriber.nim b/examples/subscriber.nim index a415cc419..78bbbdbbe 100644 --- a/examples/subscriber.nim +++ b/examples/subscriber.nim @@ -19,94 +19,100 @@ import ../../../waku/factory/builder # An accesible bootstrap node. See wakuv2.prod fleets.status.im -const bootstrapNode = "enr:-Nm4QOdTOKZJKTUUZ4O_W932CXIET-M9NamewDnL78P5u9DOGnZl" & - "K0JFZ4k0inkfe6iY-0JAaJVovZXc575VV3njeiABgmlkgnY0gmlwhAjS" & - "3ueKbXVsdGlhZGRyc7g6ADg2MW5vZGUtMDEuYWMtY24taG9uZ2tvbmct" & - "Yy53YWt1djIucHJvZC5zdGF0dXNpbS5uZXQGH0DeA4lzZWNwMjU2azGh" & - "Ao0C-VvfgHiXrxZi3umDiooXMGY9FvYj5_d1Q4EeS7eyg3RjcIJ2X4N1" & - "ZHCCIyiFd2FrdTIP" +const bootstrapNode = + "enr:-Nm4QOdTOKZJKTUUZ4O_W932CXIET-M9NamewDnL78P5u9DOGnZl" & + "K0JFZ4k0inkfe6iY-0JAaJVovZXc575VV3njeiABgmlkgnY0gmlwhAjS" & + "3ueKbXVsdGlhZGRyc7g6ADg2MW5vZGUtMDEuYWMtY24taG9uZ2tvbmct" & + "Yy53YWt1djIucHJvZC5zdGF0dXNpbS5uZXQGH0DeA4lzZWNwMjU2azGh" & + "Ao0C-VvfgHiXrxZi3umDiooXMGY9FvYj5_d1Q4EeS7eyg3RjcIJ2X4N1" & "ZHCCIyiFd2FrdTIP" # careful if running pub and sub in the same machine const wakuPort = 50000 const discv5Port = 8000 proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} = - # use notice to filter all waku messaging - setupLogLevel(logging.LogLevel.NOTICE) - notice "starting subscriber", wakuPort=wakuPort, discv5Port=discv5Port - let - nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[] - ip = parseIpAddress("0.0.0.0") - flags = CapabilitiesBitfield.init(lightpush = false, filter = false, store = false, relay = true) - - var enrBuilder = EnrBuilder.init(nodeKey) - - let recordRes = enrBuilder.build() - let record = - if recordRes.isErr(): - error "failed to create enr record", error=recordRes.error - quit(QuitFailure) - else: recordRes.get() - - var builder = WakuNodeBuilder.init() - builder.withNodeKey(nodeKey) - builder.withRecord(record) - builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet() - let node = builder.build().tryGet() - - var bootstrapNodeEnr: enr.Record - discard bootstrapNodeEnr.fromURI(bootstrapNode) - - let discv5Conf = WakuDiscoveryV5Config( - discv5Config: none(DiscoveryConfig), - address: ip, - port: Port(discv5Port), - privateKey: keys.PrivateKey(nodeKey.skkey), - bootstrapRecords: @[bootstrapNodeEnr], - autoupdateRecord: true, + # use notice to filter all waku messaging + setupLogLevel(logging.LogLevel.NOTICE) + notice "starting subscriber", wakuPort = wakuPort, discv5Port = discv5Port + let + nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[] + ip = parseIpAddress("0.0.0.0") + flags = CapabilitiesBitfield.init( + lightpush = false, filter = false, store = false, relay = true ) - # assumes behind a firewall, so not care about being discoverable - let wakuDiscv5 = WakuDiscoveryV5.new( - node.rng, - discv5Conf, - some(node.enr), - some(node.peerManager), - node.topicSubscriptionQueue, - ) + var enrBuilder = EnrBuilder.init(nodeKey) - await node.start() - await node.mountRelay() - node.peerManager.start() + let recordRes = enrBuilder.build() + let record = + if recordRes.isErr(): + error "failed to create enr record", error = recordRes.error + quit(QuitFailure) + else: + recordRes.get() - (await wakuDiscv5.start()).isOkOr: - error "failed to start discv5", error = error - quit(1) + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodeKey) + builder.withRecord(record) + builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet() + let node = builder.build().tryGet() - # wait for a minimum of peers to be connected, otherwise messages wont be gossiped - while true: - let numConnectedPeers = node.peerManager.peerStore[ConnectionBook].book.values().countIt(it == Connected) - if numConnectedPeers >= 6: - notice "subscriber is ready", connectedPeers=numConnectedPeers, required=6 - break - notice "waiting to be ready", connectedPeers=numConnectedPeers, required=6 - await sleepAsync(5000) + var bootstrapNodeEnr: enr.Record + discard bootstrapNodeEnr.fromURI(bootstrapNode) - # Make sure it matches the publisher. Use default value - # see spec: https://rfc.vac.dev/spec/23/ - let pubSubTopic = PubsubTopic("/waku/2/default-waku/proto") + let discv5Conf = WakuDiscoveryV5Config( + discv5Config: none(DiscoveryConfig), + address: ip, + port: Port(discv5Port), + privateKey: keys.PrivateKey(nodeKey.skkey), + bootstrapRecords: @[bootstrapNodeEnr], + autoupdateRecord: true, + ) - # any content topic can be chosen. make sure it matches the publisher - let contentTopic = ContentTopic("/examples/1/pubsub-example/proto") + # assumes behind a firewall, so not care about being discoverable + let wakuDiscv5 = WakuDiscoveryV5.new( + node.rng, + discv5Conf, + some(node.enr), + some(node.peerManager), + node.topicSubscriptionQueue, + ) - proc handler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = - let payloadStr = string.fromBytes(msg.payload) - if msg.contentTopic == contentTopic: - notice "message received", payload=payloadStr, - pubsubTopic=pubsubTopic, - contentTopic=msg.contentTopic, - timestamp=msg.timestamp - node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(handler)) + await node.start() + await node.mountRelay() + node.peerManager.start() + + (await wakuDiscv5.start()).isOkOr: + error "failed to start discv5", error = error + quit(1) + + # wait for a minimum of peers to be connected, otherwise messages wont be gossiped + while true: + let numConnectedPeers = + node.peerManager.peerStore[ConnectionBook].book.values().countIt(it == Connected) + if numConnectedPeers >= 6: + notice "subscriber is ready", connectedPeers = numConnectedPeers, required = 6 + break + notice "waiting to be ready", connectedPeers = numConnectedPeers, required = 6 + await sleepAsync(5000) + + # Make sure it matches the publisher. Use default value + # see spec: https://rfc.vac.dev/spec/23/ + let pubSubTopic = PubsubTopic("/waku/2/default-waku/proto") + + # any content topic can be chosen. make sure it matches the publisher + let contentTopic = ContentTopic("/examples/1/pubsub-example/proto") + + proc handler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + let payloadStr = string.fromBytes(msg.payload) + if msg.contentTopic == contentTopic: + notice "message received", + payload = payloadStr, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + timestamp = msg.timestamp + + node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(handler)) when isMainModule: let rng = crypto.newRng() diff --git a/examples/wakustealthcommitments/erc_5564_interface.nim b/examples/wakustealthcommitments/erc_5564_interface.nim index 94ed4accb..a2bd53b6e 100644 --- a/examples/wakustealthcommitments/erc_5564_interface.nim +++ b/examples/wakustealthcommitments/erc_5564_interface.nim @@ -30,32 +30,47 @@ type CKeyPair* = object private_key: CFr public_key: CG1Projective -proc drop_ffi_derive_public_key*(ptrx: ptr CReturn[CG1Projective]) {.importc: "drop_ffi_derive_public_key".} +proc drop_ffi_derive_public_key*( + ptrx: ptr CReturn[CG1Projective] +) {.importc: "drop_ffi_derive_public_key".} -proc drop_ffi_generate_random_fr*(ptrx: ptr CReturn[CFr]) {.importc: "drop_ffi_generate_random_fr".} +proc drop_ffi_generate_random_fr*( + ptrx: ptr CReturn[CFr] +) {.importc: "drop_ffi_generate_random_fr".} -proc drop_ffi_generate_stealth_commitment*(ptrx: ptr CReturn[CStealthCommitment]) {.importc: "drop_ffi_generate_stealth_commitment".} +proc drop_ffi_generate_stealth_commitment*( + ptrx: ptr CReturn[CStealthCommitment] +) {.importc: "drop_ffi_generate_stealth_commitment".} -proc drop_ffi_generate_stealth_private_key*(ptrx: ptr CReturn[CFr]) {.importc: "drop_ffi_generate_stealth_private_key".} +proc drop_ffi_generate_stealth_private_key*( + ptrx: ptr CReturn[CFr] +) {.importc: "drop_ffi_generate_stealth_private_key".} -proc drop_ffi_random_keypair*(ptrx: ptr CReturn[CKeyPair]) {.importc: "drop_ffi_random_keypair".} +proc drop_ffi_random_keypair*( + ptrx: ptr CReturn[CKeyPair] +) {.importc: "drop_ffi_random_keypair".} -proc ffi_derive_public_key*(private_key: ptr CFr): (ptr CReturn[CG1Projective]) {.importc: "ffi_derive_public_key".} +proc ffi_derive_public_key*( + private_key: ptr CFr +): (ptr CReturn[CG1Projective]) {.importc: "ffi_derive_public_key".} proc ffi_generate_random_fr*(): (ptr CReturn[CFr]) {.importc: "ffi_generate_random_fr".} -proc ffi_generate_stealth_commitment*(viewing_public_key: ptr CG1Projective, - spending_public_key: ptr CG1Projective, - ephemeral_private_key: ptr CFr): (ptr CReturn[CStealthCommitment]) {.importc: "ffi_generate_stealth_commitment".} +proc ffi_generate_stealth_commitment*( + viewing_public_key: ptr CG1Projective, + spending_public_key: ptr CG1Projective, + ephemeral_private_key: ptr CFr, +): (ptr CReturn[CStealthCommitment]) {.importc: "ffi_generate_stealth_commitment".} -proc ffi_generate_stealth_private_key*(ephemeral_public_key: ptr CG1Projective, - spending_key: ptr CFr, - viewing_key: ptr CFr, - view_tag: ptr uint64): (ptr CReturn[CFr]) {.importc: "ffi_generate_stealth_private_key".} +proc ffi_generate_stealth_private_key*( + ephemeral_public_key: ptr CG1Projective, + spending_key: ptr CFr, + viewing_key: ptr CFr, + view_tag: ptr uint64, +): (ptr CReturn[CFr]) {.importc: "ffi_generate_stealth_private_key".} proc ffi_random_keypair*(): (ptr CReturn[CKeyPair]) {.importc: "ffi_random_keypair".} - ## Nim wrappers and types for the ERC-5564-BN254 module type FFIResult[T] = Result[T, string] @@ -64,9 +79,11 @@ type G1Projective = array[32, uint8] type KeyPair* = object private_key*: Fr public_key*: G1Projective + type StealthCommitment* = object stealth_commitment*: G1Projective view_tag*: uint64 + type PrivateKey* = Fr type PublicKey* = G1Projective @@ -88,13 +105,18 @@ proc generateKeypair*(): FFIResult[KeyPair] = drop_ffi_random_keypair(res_ptr) return err("Error generating random keypair: " & $res_value.err_code) - let ret = KeyPair(private_key: res_value.value.private_key.x0, public_key: res_value.value.public_key.x0) + let ret = KeyPair( + private_key: res_value.value.private_key.x0, + public_key: res_value.value.public_key.x0, + ) drop_ffi_random_keypair(res_ptr) return ok(ret) -proc generateStealthCommitment*(viewing_public_key: G1Projective, - spending_public_key: G1Projective, - ephemeral_private_key: Fr): FFIResult[StealthCommitment] = +proc generateStealthCommitment*( + viewing_public_key: G1Projective, + spending_public_key: G1Projective, + ephemeral_private_key: Fr, +): FFIResult[StealthCommitment] = let viewing_public_key = CG1Projective(x0: viewing_public_key) let viewing_public_key_ptr = unsafeAddr(viewing_public_key) let spending_public_key = CG1Projective(x0: spending_public_key) @@ -102,20 +124,29 @@ proc generateStealthCommitment*(viewing_public_key: G1Projective, let ephemeral_private_key = CFr(x0: ephemeral_private_key) let ephemeral_private_key_ptr = unsafeAddr(ephemeral_private_key) - let res_ptr = (ffi_generate_stealth_commitment(viewing_public_key_ptr, spending_public_key_ptr, ephemeral_private_key_ptr)) + let res_ptr = ( + ffi_generate_stealth_commitment( + viewing_public_key_ptr, spending_public_key_ptr, ephemeral_private_key_ptr + ) + ) let res_value = res_ptr[] if res_value.err_code != 0: drop_ffi_generate_stealth_commitment(res_ptr) return err("Error generating stealth commitment: " & $res_value.err_code) - let ret = StealthCommitment(stealth_commitment: res_value.value.stealth_commitment.x0, view_tag: res_value.value.view_tag) + let ret = StealthCommitment( + stealth_commitment: res_value.value.stealth_commitment.x0, + view_tag: res_value.value.view_tag, + ) drop_ffi_generate_stealth_commitment(res_ptr) return ok(ret) -proc generateStealthPrivateKey*(ephemeral_public_key: G1Projective, - spending_key: Fr, - viewing_key: Fr, - view_tag: uint64): FFIResult[Fr] = +proc generateStealthPrivateKey*( + ephemeral_public_key: G1Projective, + spending_key: Fr, + viewing_key: Fr, + view_tag: uint64, +): FFIResult[Fr] = let ephemeral_public_key = CG1Projective(x0: ephemeral_public_key) let ephemeral_public_key_ptr = unsafeAddr(ephemeral_public_key) let spending_key = CFr(x0: spending_key) @@ -124,7 +155,11 @@ proc generateStealthPrivateKey*(ephemeral_public_key: G1Projective, let viewing_key_ptr = unsafeAddr(viewing_key) let view_tag_ptr = unsafeAddr(view_tag) - let res_ptr = (ffi_generate_stealth_private_key(ephemeral_public_key_ptr, spending_key_ptr, viewing_key_ptr, view_tag_ptr)) + let res_ptr = ( + ffi_generate_stealth_private_key( + ephemeral_public_key_ptr, spending_key_ptr, viewing_key_ptr, view_tag_ptr + ) + ) let res_value = res_ptr[] if res_value.err_code != 0: drop_ffi_generate_stealth_private_key(res_ptr) diff --git a/examples/wakustealthcommitments/node_spec.nim b/examples/wakustealthcommitments/node_spec.nim index d2b1e81b0..f13f1cbba 100644 --- a/examples/wakustealthcommitments/node_spec.nim +++ b/examples/wakustealthcommitments/node_spec.nim @@ -15,19 +15,8 @@ import libp2p/crypto/crypto export - networks_config, - app, - logging, - options, - strutils, - os, - sequtils, - stewNet, - chronicles, - chronos, - metrics, - libbacktrace, - crypto + networks_config, app, logging, options, strutils, os, sequtils, stewNet, chronicles, + chronos, metrics, libbacktrace, crypto proc setup*(): App = const versionString = "version / git commit hash: " & app.git_version diff --git a/examples/wakustealthcommitments/stealth_commitment_protocol.nim b/examples/wakustealthcommitments/stealth_commitment_protocol.nim index c9cf0acce..2f4f066a0 100644 --- a/examples/wakustealthcommitments/stealth_commitment_protocol.nim +++ b/examples/wakustealthcommitments/stealth_commitment_protocol.nim @@ -12,9 +12,7 @@ import ./node_spec, ./wire_spec -export - wire_spec, - logging +export wire_spec, logging type StealthCommitmentProtocol* = object wakuApp: App @@ -22,28 +20,36 @@ type StealthCommitmentProtocol* = object spendingKeyPair: StealthCommitmentFFI.KeyPair viewingKeyPair: StealthCommitmentFFI.KeyPair -proc deserialize(T: type StealthCommitmentFFI.PublicKey, v: SerializedKey): Result[T, string] = +proc deserialize( + T: type StealthCommitmentFFI.PublicKey, v: SerializedKey +): Result[T, string] = # deserialize seq[byte] into array[32, uint8] if v.len != 32: return err("invalid key length") var buf: array[32, uint8] - for i in 0.. 0: some(spendingPubKey) else: none(SerializedKey) + discard ?pb.getField(2, spendingPubKey) + msg.spendingPubKey = + if spendingPubKey.len > 0: + some(spendingPubKey) + else: + none(SerializedKey) var viewingPubKey = newSeq[byte]() - discard ? pb.getField(3, viewingPubKey) - msg.viewingPubKey = if viewingPubKey.len > 0: some(viewingPubKey) else: none(SerializedKey) - + discard ?pb.getField(3, viewingPubKey) + msg.viewingPubKey = + if viewingPubKey.len > 0: + some(viewingPubKey) + else: + none(SerializedKey) + if msg.spendingPubKey.isSome() and msg.viewingPubKey.isSome(): msg.stealthCommitment = none(SerializedKey) msg.viewTag = none(uint64) @@ -58,20 +51,32 @@ proc decode*(T: type WakuStealthCommitmentMsg, buffer: seq[byte]): ProtoResult[T if msg.request == true and msg.spendingPubKey.isNone() and msg.viewingPubKey.isNone(): return err(ProtoError.RequiredFieldMissing) - var stealthCommitment = newSeq[byte]() - discard ? pb.getField(4, stealthCommitment) - msg.stealthCommitment = if stealthCommitment.len > 0: some(stealthCommitment) else: none(SerializedKey) + discard ?pb.getField(4, stealthCommitment) + msg.stealthCommitment = + if stealthCommitment.len > 0: + some(stealthCommitment) + else: + none(SerializedKey) var ephemeralPubKey = newSeq[byte]() - discard ? pb.getField(5, ephemeralPubKey) - msg.ephemeralPubKey = if ephemeralPubKey.len > 0: some(ephemeralPubKey) else: none(SerializedKey) + discard ?pb.getField(5, ephemeralPubKey) + msg.ephemeralPubKey = + if ephemeralPubKey.len > 0: + some(ephemeralPubKey) + else: + none(SerializedKey) var viewTag: uint64 - discard ? pb.getField(6, viewTag) - msg.viewTag = if viewTag != 0: some(viewTag) else: none(uint64) + discard ?pb.getField(6, viewTag) + msg.viewTag = + if viewTag != 0: + some(viewTag) + else: + none(uint64) - if msg.stealthCommitment.isNone() and msg.viewTag.isNone() and msg.ephemeralPubKey.isNone(): + if msg.stealthCommitment.isNone() and msg.viewTag.isNone() and + msg.ephemeralPubKey.isNone(): return err(ProtoError.RequiredFieldMissing) if msg.stealthCommitment.isSome() and msg.viewTag.isNone(): @@ -90,7 +95,7 @@ proc encode*(msg: WakuStealthCommitmentMsg): ProtoBuffer = var serialised = initProtoBuffer() serialised.write(1, uint64(msg.request)) - + if msg.spendingPubKey.isSome(): serialised.write(2, msg.spendingPubKey.get()) if msg.viewingPubKey.isSome(): @@ -108,8 +113,21 @@ func toByteSeq*(str: string): seq[byte] {.inline.} = ## Converts a string to the corresponding byte sequence. @(str.toOpenArrayByte(0, str.high)) -proc constructRequest*(spendingPubKey: SerializedKey, viewingPubKey: SerializedKey): WakuStealthCommitmentMsg = - WakuStealthCommitmentMsg(request: true, spendingPubKey: some(spendingPubKey), viewingPubKey: some(viewingPubKey)) +proc constructRequest*( + spendingPubKey: SerializedKey, viewingPubKey: SerializedKey +): WakuStealthCommitmentMsg = + WakuStealthCommitmentMsg( + request: true, + spendingPubKey: some(spendingPubKey), + viewingPubKey: some(viewingPubKey), + ) -proc constructResponse*(stealthCommitment: SerializedKey, ephemeralPubKey: SerializedKey, viewTag: uint64): WakuStealthCommitmentMsg = - WakuStealthCommitmentMsg(request: false, stealthCommitment: some(stealthCommitment), ephemeralPubKey: some(ephemeralPubKey), viewTag: some(viewTag)) \ No newline at end of file +proc constructResponse*( + stealthCommitment: SerializedKey, ephemeralPubKey: SerializedKey, viewTag: uint64 +): WakuStealthCommitmentMsg = + WakuStealthCommitmentMsg( + request: false, + stealthCommitment: some(stealthCommitment), + ephemeralPubKey: some(ephemeralPubKey), + viewTag: some(viewTag), + ) diff --git a/library/alloc.nim b/library/alloc.nim index a08ea5776..251dffde0 100644 --- a/library/alloc.nim +++ b/library/alloc.nim @@ -13,7 +13,7 @@ proc alloc*(str: string): cstring = ## There should be the corresponding manual deallocation with deallocShared ! var ret = cast[cstring](allocShared(str.len + 1)) let s = cast[seq[char]](str) - for i in 0.. 0: - let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex - return ok(msgHash) + let numPeers = await node.wakuRelay.publish(pubsubTopic, msg) + if numPeers == 0: + return err("Message not sent because no peers found.") + elif numPeers > 0: + let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex + return ok(msgHash) return ok("") diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim index 2ae76903e..d92c2a5dd 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim @@ -1,10 +1,5 @@ - -import - std/[options,sequtils,strutils] -import - chronos, - stew/results, - stew/shims/net +import std/[options, sequtils, strutils] +import chronos, stew/results, stew/shims/net import ../../../../../waku/node/waku_node, ../../../../../waku/waku_archive/driver/builder, @@ -14,36 +9,34 @@ import ../../../../alloc, ../../../../callback -type - StoreReqType* = enum - REMOTE_QUERY ## to perform a query to another Store node - LOCAL_QUERY ## to retrieve the data from 'self' node +type StoreReqType* = enum + REMOTE_QUERY ## to perform a query to another Store node + LOCAL_QUERY ## to retrieve the data from 'self' node -type - StoreQueryRequest* = object - queryJson: cstring - peerAddr: cstring - timeoutMs: cint - storeCallback: WakuCallBack +type StoreQueryRequest* = object + queryJson: cstring + peerAddr: cstring + timeoutMs: cint + storeCallback: WakuCallBack -type - StoreRequest* = object - operation: StoreReqType - storeReq: pointer +type StoreRequest* = object + operation: StoreReqType + storeReq: pointer -proc createShared*(T: type StoreRequest, - operation: StoreReqType, - request: pointer): ptr type T = +proc createShared*( + T: type StoreRequest, operation: StoreReqType, request: pointer +): ptr type T = var ret = createShared(T) ret[].request = request return ret -proc createShared*(T: type StoreQueryRequest, - queryJson: cstring, - peerAddr: cstring, - timeoutMs: cint, - storeCallback: WakuCallBack = nil): ptr type T = - +proc createShared*( + T: type StoreQueryRequest, + queryJson: cstring, + peerAddr: cstring, + timeoutMs: cint, + storeCallback: WakuCallBack = nil, +): ptr type T = var ret = createShared(T) ret[].timeoutMs = timeoutMs ret[].queryJson = queryJson.alloc() @@ -56,20 +49,23 @@ proc destroyShared(self: ptr StoreQueryRequest) = deallocShared(self[].peerAddr) deallocShared(self) -proc process(self: ptr StoreQueryRequest, - node: ptr WakuNode): Future[Result[string, string]] {.async.} = - defer: destroyShared(self) +proc process( + self: ptr StoreQueryRequest, node: ptr WakuNode +): Future[Result[string, string]] {.async.} = + defer: + destroyShared(self) -proc process*(self: ptr StoreRequest, - node: ptr WakuNode): Future[Result[string, string]] {.async.} = +proc process*( + self: ptr StoreRequest, node: ptr WakuNode +): Future[Result[string, string]] {.async.} = + defer: + deallocShared(self) - defer: deallocShared(self) - - case self.operation: - of REMOTE_QUERY: - return await cast[ptr StoreQueryRequest](self[].storeReq).process(node) - of LOCAL_QUERY: - discard - # cast[ptr StoreQueryRequest](request[].reqContent).process(node) + case self.operation + of REMOTE_QUERY: + return await cast[ptr StoreQueryRequest](self[].storeReq).process(node) + of LOCAL_QUERY: + discard + # cast[ptr StoreQueryRequest](request[].reqContent).process(node) return ok("") diff --git a/library/waku_thread/inter_thread_communication/waku_thread_request.nim b/library/waku_thread/inter_thread_communication/waku_thread_request.nim index 4113e5d20..146d46fc9 100644 --- a/library/waku_thread/inter_thread_communication/waku_thread_request.nim +++ b/library/waku_thread/inter_thread_communication/waku_thread_request.nim @@ -1,13 +1,9 @@ - ## This file contains the base message request type that will be handled. ## The requests are created by the main thread and processed by ## the Waku Thread. -import - std/json, - stew/results -import - chronos +import std/json, stew/results +import chronos import ../../../waku/node/waku_node, ./requests/node_lifecycle_request, @@ -16,50 +12,48 @@ import ./requests/protocols/store_request, ./requests/debug_node_request -type - RequestType* {.pure.} = enum - LIFECYCLE, - PEER_MANAGER, - RELAY, - STORE, - DEBUG, +type RequestType* {.pure.} = enum + LIFECYCLE + PEER_MANAGER + RELAY + STORE + DEBUG -type - InterThreadRequest* = object - reqType: RequestType - reqContent: pointer +type InterThreadRequest* = object + reqType: RequestType + reqContent: pointer -proc createShared*(T: type InterThreadRequest, - reqType: RequestType, - reqContent: pointer): ptr type T = +proc createShared*( + T: type InterThreadRequest, reqType: RequestType, reqContent: pointer +): ptr type T = var ret = createShared(T) ret[].reqType = reqType ret[].reqContent = reqContent return ret -proc process*(T: type InterThreadRequest, - request: ptr InterThreadRequest, - node: ptr WakuNode): - Future[Result[string, string]] {.async.} = +proc process*( + T: type InterThreadRequest, request: ptr InterThreadRequest, node: ptr WakuNode +): Future[Result[string, string]] {.async.} = ## Processes the request and deallocates its memory - defer: deallocShared(request) + defer: + deallocShared(request) echo "Request received: " & $request[].reqType let retFut = case request[].reqType - of LIFECYCLE: - cast[ptr NodeLifecycleRequest](request[].reqContent).process(node) - of PEER_MANAGER: - cast[ptr PeerManagementRequest](request[].reqContent).process(node[]) - of RELAY: - cast[ptr RelayRequest](request[].reqContent).process(node) - of STORE: - cast[ptr StoreRequest](request[].reqContent).process(node) - of DEBUG: - cast[ptr DebugNodeRequest](request[].reqContent).process(node[]) + of LIFECYCLE: + cast[ptr NodeLifecycleRequest](request[].reqContent).process(node) + of PEER_MANAGER: + cast[ptr PeerManagementRequest](request[].reqContent).process(node[]) + of RELAY: + cast[ptr RelayRequest](request[].reqContent).process(node) + of STORE: + cast[ptr StoreRequest](request[].reqContent).process(node) + of DEBUG: + cast[ptr DebugNodeRequest](request[].reqContent).process(node[]) return await retFut proc `$`*(self: InterThreadRequest): string = - return $self.reqType \ No newline at end of file + return $self.reqType diff --git a/library/waku_thread/inter_thread_communication/waku_thread_response.nim b/library/waku_thread/inter_thread_communication/waku_thread_response.nim index 894c4d511..69a4a38d9 100644 --- a/library/waku_thread/inter_thread_communication/waku_thread_response.nim +++ b/library/waku_thread/inter_thread_communication/waku_thread_response.nim @@ -1,26 +1,21 @@ - ## This file contains the base message response type that will be handled. ## The response will be created from the Waku Thread and processed in ## the main thread. -import - std/json, - stew/results -import - ../../alloc +import std/json, stew/results +import ../../alloc -type - ResponseType {.pure.} = enum - OK, - ERR, +type ResponseType {.pure.} = enum + OK + ERR -type - InterThreadResponse* = object - respType: ResponseType - content: cstring +type InterThreadResponse* = object + respType: ResponseType + content: cstring -proc createShared*(T: type InterThreadResponse, - res: Result[string, string]): ptr type T = +proc createShared*( + T: type InterThreadResponse, res: Result[string, string] +): ptr type T = ## Converts a `Result[string, string]` into a `ptr InterThreadResponse` ## so that it can be transfered to another thread in a safe way. @@ -35,9 +30,9 @@ proc createShared*(T: type InterThreadResponse, ret[].content = res.error.alloc() return ret -proc process*(T: type InterThreadResponse, - resp: ptr InterThreadResponse): - Result[string, string] = +proc process*( + T: type InterThreadResponse, resp: ptr InterThreadResponse +): Result[string, string] = ## Converts the received `ptr InterThreadResponse` into a ## `Result[string, string]`. Notice that the response is expected to be ## allocated from the Waku Thread and deallocated by the main thread. @@ -47,7 +42,7 @@ proc process*(T: type InterThreadResponse, deallocShared(resp) case resp[].respType - of OK: - return ok($resp[].content) - of ERR: - return err($resp[].content) + of OK: + return ok($resp[].content) + of ERR: + return err($resp[].content) diff --git a/library/waku_thread/waku_thread.nim b/library/waku_thread/waku_thread.nim index 079e22f29..eb5b49bc0 100644 --- a/library/waku_thread/waku_thread.nim +++ b/library/waku_thread/waku_thread.nim @@ -1,10 +1,8 @@ - {.pragma: exported, exportc, cdecl, raises: [].} {.pragma: callback, cdecl, raises: [], gcsafe.} {.passc: "-fPIC".} -import - std/[json,sequtils,times,strformat,options,atomics,strutils,os] +import std/[json, sequtils, times, strformat, options, atomics, strutils, os] import chronicles, chronos, @@ -14,20 +12,19 @@ import stew/shims/net import ../../../waku/node/waku_node, - ../events/[json_message_event,json_base_event], + ../events/[json_message_event, json_base_event], ./inter_thread_communication/waku_thread_request, ./inter_thread_communication/waku_thread_response -type - Context* = object - thread: Thread[(ptr Context)] - reqChannel: ChannelSPSCSingle[ptr InterThreadRequest] - reqSignal: ThreadSignalPtr - respChannel: ChannelSPSCSingle[ptr InterThreadResponse] - respSignal: ThreadSignalPtr - userData*: pointer - eventCallback*: pointer - eventUserdata*: pointer +type Context* = object + thread: Thread[(ptr Context)] + reqChannel: ChannelSPSCSingle[ptr InterThreadRequest] + reqSignal: ThreadSignalPtr + respChannel: ChannelSPSCSingle[ptr InterThreadResponse] + respSignal: ThreadSignalPtr + userData*: pointer + eventCallback*: pointer + eventUserdata*: pointer # To control when the thread is running var running: Atomic[bool] @@ -40,7 +37,8 @@ var initialized: Atomic[bool] proc waku_init() = if not initialized.exchange(true): NimMain() # Every Nim library needs to call `NimMain` once exactly - when declared(setupForeignThreadGc): setupForeignThreadGc() + when declared(setupForeignThreadGc): + setupForeignThreadGc() when declared(nimGC_setStackBottom): var locals {.volatile, noinit.}: pointer locals = addr(locals) @@ -59,8 +57,7 @@ proc run(ctx: ptr Context) {.thread.} = waitFor ctx.reqSignal.wait() let recvOk = ctx.reqChannel.tryRecv(request) if recvOk == true: - let resultResponse = - waitFor InterThreadRequest.process(request, addr node) + let resultResponse = waitFor InterThreadRequest.process(request, addr node) ## Converting a `Result` into a thread-safe transferable response type let threadSafeResp = InterThreadResponse.createShared(resultResponse) @@ -106,10 +103,9 @@ proc stopWakuThread*(ctx: ptr Context): Result[void, string] = freeShared(ctx) return ok() -proc sendRequestToWakuThread*(ctx: ptr Context, - reqType: RequestType, - reqContent: pointer): Result[string, string] = - +proc sendRequestToWakuThread*( + ctx: ptr Context, reqType: RequestType, reqContent: pointer +): Result[string, string] = let req = InterThreadRequest.createShared(reqType, reqContent) ## Sending the request diff --git a/migrations/message_store_postgres/content_script_version_1.nim b/migrations/message_store_postgres/content_script_version_1.nim index 37c6bf2ec..18133bdca 100644 --- a/migrations/message_store_postgres/content_script_version_1.nim +++ b/migrations/message_store_postgres/content_script_version_1.nim @@ -1,4 +1,5 @@ -const ContentScriptVersion_1* = """ +const ContentScriptVersion_1* = + """ CREATE TABLE IF NOT EXISTS messages ( pubsubTopic VARCHAR NOT NULL, contentTopic VARCHAR NOT NULL, diff --git a/migrations/message_store_postgres/content_script_version_2.nim b/migrations/message_store_postgres/content_script_version_2.nim index 4065a26c6..8c3656e64 100644 --- a/migrations/message_store_postgres/content_script_version_2.nim +++ b/migrations/message_store_postgres/content_script_version_2.nim @@ -1,4 +1,5 @@ -const ContentScriptVersion_2* = """ +const ContentScriptVersion_2* = + """ ALTER TABLE IF EXISTS messages_backup RENAME TO messages; ALTER TABLE messages RENAME TO messages_backup; ALTER TABLE messages_backup DROP CONSTRAINT messageIndex; diff --git a/migrations/message_store_postgres/pg_migration_manager.nim b/migrations/message_store_postgres/pg_migration_manager.nim index a43a289b5..661e23cae 100644 --- a/migrations/message_store_postgres/pg_migration_manager.nim +++ b/migrations/message_store_postgres/pg_migration_manager.nim @@ -1,37 +1,22 @@ +import content_script_version_1, content_script_version_2 -import - content_script_version_1, - content_script_version_2 +type MigrationScript* = object + version*: int + scriptContent*: string -type - MigrationScript* = object - version*: int - scriptContent*: string +proc init*(T: type MigrationScript, targetVersion: int, scriptContent: string): T = + return MigrationScript(targetVersion: targetVersion, scriptContent: scriptContent) -proc init*(T: type MigrationScript, - targetVersion: int, - scriptContent: string): T = +const PgMigrationScripts* = + @[ + MigrationScript(version: 1, scriptContent: ContentScriptVersion_1), + MigrationScript(version: 2, scriptContent: ContentScriptVersion_2), + ] - return MigrationScript( - targetVersion: targetVersion, - scriptContent: scriptContent) - -const PgMigrationScripts* = @[ - MigrationScript( - version: 1, - scriptContent: ContentScriptVersion_1), - MigrationScript( - version: 2, - scriptContent: ContentScriptVersion_2) -] - -proc getMigrationScripts*(currentVersion: int64, - targetVersion: int64): seq[string] = +proc getMigrationScripts*(currentVersion: int64, targetVersion: int64): seq[string] = var ret = newSeq[string]() var v = currentVersion while v < targetVersion: ret.add(PgMigrationScripts[v].scriptContent) v.inc() return ret - - diff --git a/tests/all_tests_waku.nim b/tests/all_tests_waku.nim index 1cf376495..86f2dc762 100644 --- a/tests/all_tests_waku.nim +++ b/tests/all_tests_waku.nim @@ -21,11 +21,10 @@ import const os* {.strdefine.} = "" when os == "Linux" and -# GitHub only supports container actions on Linux -# and we need to start a postgress database in a docker container -defined(postgres): - import - ./waku_archive/test_driver_postgres_query, ./waku_archive/test_driver_postgres + # GitHub only supports container actions on Linux + # and we need to start a postgress database in a docker container + defined(postgres): + import ./waku_archive/test_driver_postgres_query, ./waku_archive/test_driver_postgres # Waku store test suite import diff --git a/tests/common/test_base64_codec.nim b/tests/common/test_base64_codec.nim index 836f9b6bb..1f34e87f1 100644 --- a/tests/common/test_base64_codec.nim +++ b/tests/common/test_base64_codec.nim @@ -1,37 +1,31 @@ - {.used.} -import - std/strutils, - stew/[results, byteutils], - testutils/unittests -import - ../../waku/common/base64 - +import std/strutils, stew/[results, byteutils], testutils/unittests +import ../../waku/common/base64 suite "Waku Common - stew base64 wrapper": - const TestData = @[ - # Test vectors from RFC 4648 - # See: https://datatracker.ietf.org/doc/html/rfc4648#section-10 - ("", Base64String("")), - ("f", Base64String("Zg==")), - ("fo", Base64String("Zm8=")), - ("foo", Base64String("Zm9v")), - ("foob", Base64String("Zm9vYg==")), - ("fooba", Base64String("Zm9vYmE=")), - ("foobar", Base64String("Zm9vYmFy")), + const TestData = + @[ + # Test vectors from RFC 4648 + # See: https://datatracker.ietf.org/doc/html/rfc4648#section-10 + ("", Base64String("")), + ("f", Base64String("Zg==")), + ("fo", Base64String("Zm8=")), + ("foo", Base64String("Zm9v")), + ("foob", Base64String("Zm9vYg==")), + ("fooba", Base64String("Zm9vYmE=")), + ("foobar", Base64String("Zm9vYmFy")), - # Custom test vectors - ("\x01", Base64String("AQ==")), - ("\x13", Base64String("Ew==")), - ("\x01\x02\x03\x04", Base64String("AQIDBA==")) - ] + # Custom test vectors + ("\x01", Base64String("AQ==")), + ("\x13", Base64String("Ew==")), + ("\x01\x02\x03\x04", Base64String("AQIDBA==")), + ] for (plaintext, encoded) in TestData: - test "encode into base64 (" & escape(plaintext) & " -> \"" & string(encoded) & "\")": ## Given - let data = plaintext + let data = plaintext ## When let encodedData = base64.encode(data) @@ -40,7 +34,6 @@ suite "Waku Common - stew base64 wrapper": check: encodedData == encoded - test "decode from base64 (\"" & string(encoded) & "\" -> " & escape(plaintext) & ")": ## Given let data = encoded @@ -55,4 +48,3 @@ suite "Waku Common - stew base64 wrapper": let decoded = decodedRes.tryGet() check: decoded == toBytes(plaintext) - diff --git a/tests/common/test_confutils_envvar.nim b/tests/common/test_confutils_envvar.nim index 7220565cc..907819b37 100644 --- a/tests/common/test_confutils_envvar.nim +++ b/tests/common/test_confutils_envvar.nim @@ -15,31 +15,28 @@ import type ConfResult[T] = Result[T, string] type TestConf = object - configFile* {. - desc: "Configuration file path" - name: "config-file" }: Option[InputFile] - - testFile* {. - desc: "Configuration test file path" - name: "test-file" }: Option[InputFile] + configFile* {.desc: "Configuration file path", name: "config-file".}: + Option[InputFile] - listenAddress* {. - defaultValue: parseIpAddress("127.0.0.1"), - desc: "Listening address", - name: "listen-address"}: IpAddress + testFile* {.desc: "Configuration test file path", name: "test-file".}: + Option[InputFile] - tcpPort* {. - desc: "TCP listening port", - defaultValue: 60000, - name: "tcp-port" }: Port + listenAddress* {. + defaultValue: parseIpAddress("127.0.0.1"), + desc: "Listening address", + name: "listen-address" + .}: IpAddress + + tcpPort* {.desc: "TCP listening port", defaultValue: 60000, name: "tcp-port".}: Port {.push warning[ProveInit]: off.} proc load*(T: type TestConf, prefix: string): ConfResult[T] = try: let conf = TestConf.load( - secondarySources = proc (conf: TestConf, sources: auto) - {.gcsafe, raises: [ConfigurationError].} = + secondarySources = proc( + conf: TestConf, sources: auto + ) {.gcsafe, raises: [ConfigurationError].} = sources.addConfigFile(Envvar, InputFile(prefix)) ) ok(conf) @@ -52,7 +49,7 @@ suite "nim-confutils - envvar": test "load configuration from environment variables": ## Given let prefix = "test-prefix" - + let listenAddress = "1.1.1.1" tcpPort = "8080" @@ -62,7 +59,7 @@ suite "nim-confutils - envvar": os.putEnv("TEST_PREFIX_CONFIG_FILE", configFile) os.putEnv("TEST_PREFIX_LISTEN_ADDRESS", listenAddress) os.putEnv("TEST_PREFIX_TCP_PORT", tcpPort) - + let confLoadRes = TestConf.load(prefix) ## Then @@ -72,7 +69,7 @@ suite "nim-confutils - envvar": check: conf.listenAddress == parseIpAddress(listenAddress) conf.tcpPort == Port(8080) - + conf.configFile.isSome() conf.configFile.get().string == configFile diff --git a/tests/common/test_enr_builder.nim b/tests/common/test_enr_builder.nim index 5963e0125..67eed3c7c 100644 --- a/tests/common/test_enr_builder.nim +++ b/tests/common/test_enr_builder.nim @@ -1,17 +1,9 @@ {.used.} -import - std/options, - stew/results, - stew/shims/net, - testutils/unittests -import - ../../waku/common/enr, - ../testlib/wakucore - +import std/options, stew/results, stew/shims/net, testutils/unittests +import ../../waku/common/enr, ../testlib/wakucore suite "nim-eth ENR - builder and typed record": - test "Non-supported private key (ECDSA)": ## Given let privateKey = generateEcdsaKey() @@ -45,28 +37,28 @@ suite "nim-eth ENR - builder and typed record": publicKey.isSome() @(publicKey.get()) == expectedPubKey - suite "nim-eth ENR - Ext: IP address and TCP/UDP ports": - test "EIP-778 test vector": ## Given # Test vector from EIP-778 # See: https://eips.ethereum.org/EIPS/eip-778#test-vectors - let expectedEnr = "-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04j" & - "RzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJ" & - "c2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0x" & - "OIN1ZHCCdl8" + let expectedEnr = + "-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04j" & + "RzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJ" & + "c2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0x" & "OIN1ZHCCdl8" let seqNum = 1u64 - privateKey = ethSecp256k1Key("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + privateKey = ethSecp256k1Key( + "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291" + ) enrIpAddr = parseIpAddress("127.0.0.1") enrUdpPort = Port(30303) ## When var builder = EnrBuilder.init(privateKey, seqNum) - builder.withIpAddressAndPorts(ipAddr=some(enrIpAddr), udpPort=some(enrUdpPort)) + builder.withIpAddressAndPorts(ipAddr = some(enrIpAddr), udpPort = some(enrUdpPort)) let enrRes = builder.build() @@ -89,10 +81,7 @@ suite "nim-eth ENR - Ext: IP address and TCP/UDP ports": ## When var builder = EnrBuilder.init(privateKey, seqNum) - builder.withIpAddressAndPorts( - ipAddr=some(enrIpAddr), - tcpPort=some(enrTcpPort), - ) + builder.withIpAddressAndPorts(ipAddr = some(enrIpAddr), tcpPort = some(enrTcpPort)) let enrRes = builder.build() @@ -119,10 +108,7 @@ suite "nim-eth ENR - Ext: IP address and TCP/UDP ports": ## When var builder = EnrBuilder.init(privateKey, seqNum) - builder.withIpAddressAndPorts( - ipAddr=some(enrIpAddr), - udpPort=some(enrUdpPort), - ) + builder.withIpAddressAndPorts(ipAddr = some(enrIpAddr), udpPort = some(enrUdpPort)) let enrRes = builder.build() diff --git a/tests/common/test_envvar_serialization.nim b/tests/common/test_envvar_serialization.nim index 9700a14f8..2c0e97419 100644 --- a/tests/common/test_envvar_serialization.nim +++ b/tests/common/test_envvar_serialization.nim @@ -1,20 +1,17 @@ {.used.} -import - testutils/unittests -import - ../../waku/common/envvar_serialization/utils - +import testutils/unittests +import ../../waku/common/envvar_serialization/utils suite "nim-envvar-serialization - utils": test "construct env var key": ## Given let prefix = "some-prefix" let name = @["db-url"] - + ## When let key = constructKey(prefix, name) ## Then check: - key == "SOME_PREFIX_DB_URL" \ No newline at end of file + key == "SOME_PREFIX_DB_URL" diff --git a/tests/common/test_parse_size.nim b/tests/common/test_parse_size.nim index 7d3f880fb..ef274f04e 100644 --- a/tests/common/test_parse_size.nim +++ b/tests/common/test_parse_size.nim @@ -1,10 +1,7 @@ {.used.} -import - testutils/unittests, - stew/results -import - ../../waku/common/utils/parse_size_units +import testutils/unittests, stew/results +import ../../waku/common/utils/parse_size_units suite "Size serialization test": test "parse normal sizes": @@ -104,4 +101,4 @@ suite "Size serialization test": assert sizeInBytesRes.isErr(), "The size should be considered incorrect" sizeInBytesRes = parseMsgSize("15..0 KiB") - assert sizeInBytesRes.isErr(), "The size should be considered incorrect" \ No newline at end of file + assert sizeInBytesRes.isErr(), "The size should be considered incorrect" diff --git a/tests/common/test_protobuf_validation.nim b/tests/common/test_protobuf_validation.nim index 150787e99..200076817 100644 --- a/tests/common/test_protobuf_validation.nim +++ b/tests/common/test_protobuf_validation.nim @@ -1,18 +1,14 @@ - {.used.} -import - testutils/unittests -import - ../../waku/common/protobuf - +import testutils/unittests +import ../../waku/common/protobuf ## Fixtures const MaxTestRpcFieldLen = 5 type TestRpc = object - testField*: string + testField*: string proc init(T: type TestRpc, field: string): T = T(testField: field) @@ -40,11 +36,9 @@ proc decode(T: type TestRpc, buf: seq[byte]): ProtobufResult[T] = ok(TestRpc.init(field)) - ## Tests suite "Waku Common - libp2p minprotobuf wrapper": - test "serialize and deserialize - valid length field": ## Given let field = "12345" @@ -82,10 +76,9 @@ suite "Waku Common - libp2p minprotobuf wrapper": error.kind == ProtobufErrorKind.MissingRequiredField error.field == "test_field" - test "serialize and deserialize - invalid length field": ## Given - let field = "123456" # field.len = MaxTestRpcFieldLen + 1 + let field = "123456" # field.len = MaxTestRpcFieldLen + 1 let rpc = TestRpc.init(field) diff --git a/tests/common/test_sqlite_migrations.nim b/tests/common/test_sqlite_migrations.nim index 9e673d14e..3a95e5009 100644 --- a/tests/common/test_sqlite_migrations.nim +++ b/tests/common/test_sqlite_migrations.nim @@ -1,19 +1,12 @@ {.used.} -import - std/[strutils, os], - stew/results, - testutils/unittests -import - ../../waku/common/databases/db_sqlite {.all.}, - ../waku_archive/archive_utils - - -template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0] +import std/[strutils, os], stew/results, testutils/unittests +import ../../waku/common/databases/db_sqlite {.all.}, ../waku_archive/archive_utils +template sourceDir(): string = + currentSourcePath.rsplit(DirSep, 1)[0] suite "SQLite - migrations": - test "set and get user version": ## Given let database = newSqliteDatabase() @@ -36,16 +29,17 @@ suite "SQLite - migrations": test "filter and order migration script file paths": ## Given - let paths = @[ - sourceDir / "00001_valid.up.sql", - sourceDir / "00002_alsoValidWithUpperCaseExtension.UP.SQL", - sourceDir / "00007_unorderedValid.up.sql", - sourceDir / "00003_validRepeated.up.sql", - sourceDir / "00003_validRepeated.up.sql", - sourceDir / "00666_noMigrationScript.bmp", - sourceDir / "00X00_invalidVersion.down.sql", - sourceDir / "00008_notWithinVersionRange.up.sql", - ] + let paths = + @[ + sourceDir / "00001_valid.up.sql", + sourceDir / "00002_alsoValidWithUpperCaseExtension.UP.SQL", + sourceDir / "00007_unorderedValid.up.sql", + sourceDir / "00003_validRepeated.up.sql", + sourceDir / "00003_validRepeated.up.sql", + sourceDir / "00666_noMigrationScript.bmp", + sourceDir / "00X00_invalidVersion.down.sql", + sourceDir / "00008_notWithinVersionRange.up.sql", + ] let lowerVersion = 0 @@ -53,29 +47,33 @@ suite "SQLite - migrations": ## When var migrationSciptPaths: seq[string] - migrationSciptPaths = filterMigrationScripts(paths, lowerVersion, highVersion, direction="up") + migrationSciptPaths = + filterMigrationScripts(paths, lowerVersion, highVersion, direction = "up") migrationSciptPaths = sortMigrationScripts(migrationSciptPaths) ## Then check: - migrationSciptPaths == @[ - sourceDir / "00001_valid.up.sql", - sourceDir / "00002_alsoValidWithUpperCaseExtension.UP.SQL", - sourceDir / "00003_validRepeated.up.sql", - sourceDir / "00003_validRepeated.up.sql", - sourceDir / "00007_unorderedValid.up.sql" - ] + migrationSciptPaths == + @[ + sourceDir / "00001_valid.up.sql", + sourceDir / "00002_alsoValidWithUpperCaseExtension.UP.SQL", + sourceDir / "00003_validRepeated.up.sql", + sourceDir / "00003_validRepeated.up.sql", + sourceDir / "00007_unorderedValid.up.sql", + ] test "break migration scripts into queries": ## Given - let statement1 = """CREATE TABLE contacts1 ( + let statement1 = + """CREATE TABLE contacts1 ( contact_id INTEGER PRIMARY KEY, first_name TEXT NOT NULL, last_name TEXT NOT NULL, email TEXT NOT NULL UNIQUE, phone TEXT NOT NULL UNIQUE );""" - let statement2 = """CREATE TABLE contacts2 ( + let statement2 = + """CREATE TABLE contacts2 ( contact_id INTEGER PRIMARY KEY, first_name TEXT NOT NULL, last_name TEXT NOT NULL, @@ -89,18 +87,20 @@ suite "SQLite - migrations": ## Then check: - statements == @[statement1, statement2] + statements == @[statement1, statement2] test "break statements script into queries - empty statements": ## Given - let statement1 = """CREATE TABLE contacts1 ( + let statement1 = + """CREATE TABLE contacts1 ( contact_id INTEGER PRIMARY KEY, first_name TEXT NOT NULL, last_name TEXT NOT NULL, email TEXT NOT NULL UNIQUE, phone TEXT NOT NULL UNIQUE );""" - let statement2 = """CREATE TABLE contacts2 ( + let statement2 = + """CREATE TABLE contacts2 ( contact_id INTEGER PRIMARY KEY, first_name TEXT NOT NULL, last_name TEXT NOT NULL, diff --git a/tests/factory/test_node_factory.nim b/tests/factory/test_node_factory.nim index d00608561..c06ba792e 100644 --- a/tests/factory/test_node_factory.nim +++ b/tests/factory/test_node_factory.nim @@ -1,16 +1,11 @@ {.used.} -import - testutils/unittests, - chronos +import testutils/unittests, chronos -import - ../testlib/wakunode, - ../../waku/factory/node_factory, - ../../waku/waku_node +import ../testlib/wakunode, ../../waku/factory/node_factory, ../../waku/waku_node suite "Node Factory": - test "Set up a node based on default configurations": + test "Set up a node based on default configurations": let conf = defaultTestWakuNodeConf() let node = setupNode(conf).valueOr: @@ -24,7 +19,7 @@ suite "Node Factory": not node.wakuStoreClient.isNil() not node.rendezvous.isNil() - test "Set up a node with Store enabled": + test "Set up a node with Store enabled": var conf = defaultTestWakuNodeConf() conf.store = true @@ -36,32 +31,33 @@ suite "Node Factory": not node.wakuStore.isNil() not node.wakuArchive.isNil() -test "Set up a node with Filter enabled": - var conf = defaultTestWakuNodeConf() - conf.filter = true +test "Set up a node with Filter enabled": + var conf = defaultTestWakuNodeConf() + conf.filter = true - let node = setupNode(conf).valueOr: - raiseAssert error + let node = setupNode(conf).valueOr: + raiseAssert error - check: - not node.isNil() - not node.wakuFilter.isNil() + check: + not node.isNil() + not node.wakuFilter.isNil() -test "Start a node based on default configurations": - let conf = defaultTestWakuNodeConf() +test "Start a node based on default configurations": + let conf = defaultTestWakuNodeConf() - let node = setupNode(conf).valueOr: - raiseAssert error + let node = setupNode(conf).valueOr: + raiseAssert error - assert not node.isNil(), "Node can't be nil" + assert not node.isNil(), "Node can't be nil" - let startRes = catch: (waitFor startNode(node, conf)) + let startRes = catch: + (waitFor startNode(node, conf)) - assert not startRes.isErr(), "Exception starting node" - assert startRes.get().isOk(), "Error starting node " & startRes.get().error + assert not startRes.isErr(), "Exception starting node" + assert startRes.get().isOk(), "Error starting node " & startRes.get().error - check: - node.started == true + check: + node.started == true - ## Cleanup - waitFor node.stop() + ## Cleanup + waitFor node.stop() diff --git a/tests/node/peer_manager/peer_store/test_peer_storage.nim b/tests/node/peer_manager/peer_store/test_peer_storage.nim index 9c77cb912..6cb6a0bf5 100644 --- a/tests/node/peer_manager/peer_store/test_peer_storage.nim +++ b/tests/node/peer_manager/peer_store/test_peer_storage.nim @@ -17,9 +17,7 @@ suite "PeerStorage": suite "getAll": test "unimplemented": - let - emptyClosure = - proc(remotePeerInfo: RemotePeerInfo) = - discard + let emptyClosure = proc(remotePeerInfo: RemotePeerInfo) = + discard check: peerStorage.getAll(emptyClosure) == PeerStorageResult[void].err("Unimplemented") diff --git a/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim b/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim index 466ed0a0a..807aa2655 100644 --- a/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim +++ b/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim @@ -13,23 +13,22 @@ import ../../../../waku/node/peer_manager/peer_store/waku_peer_storage proc `==`(a, b: RemotePeerInfo): bool = - let - comparisons = - @[ - a.peerId == b.peerId, - a.addrs == b.addrs, - a.enr == b.enr, - a.protocols == b.protocols, - a.agent == b.agent, - a.protoVersion == b.protoVersion, - a.publicKey == b.publicKey, - a.connectedness == b.connectedness, - a.disconnectTime == b.disconnectTime, - a.origin == b.origin, - a.direction == b.direction, - a.lastFailedConn == b.lastFailedConn, - a.numberFailedConn == b.numberFailedConn - ] + let comparisons = + @[ + a.peerId == b.peerId, + a.addrs == b.addrs, + a.enr == b.enr, + a.protocols == b.protocols, + a.agent == b.agent, + a.protoVersion == b.protoVersion, + a.publicKey == b.publicKey, + a.connectedness == b.connectedness, + a.disconnectTime == b.disconnectTime, + a.origin == b.origin, + a.direction == b.direction, + a.lastFailedConn == b.lastFailedConn, + a.numberFailedConn == b.numberFailedConn, + ] allIt(comparisons, it == true) @@ -65,19 +64,18 @@ suite "Protobuf Serialisation": suite "encode": test "simple": # Given the expected bytes representation of a valid RemotePeerInfo - let - expectedBuffer: seq[byte] = - @[ - 10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40, - 145, 217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232, - 170, 74, 141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144, - 34, 95, 8, 3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6, - 8, 42, 134, 72, 206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106, - 224, 232, 245, 213, 48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22, - 42, 75, 201, 1, 216, 230, 236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37, - 231, 29, 104, 81, 81, 12, 9, 142, 92, 71, 198, 70, 165, 151, 251, 77, 206, - 192, 52, 233, 247, 124, 64, 158, 98, 40, 0, 48, 0 - ] + let expectedBuffer: seq[byte] = + @[ + 10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40, 145, + 217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232, 170, 74, + 141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144, 34, 95, 8, + 3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6, 8, 42, 134, + 72, 206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106, 224, 232, 245, + 213, 48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22, 42, 75, 201, 1, + 216, 230, 236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37, 231, 29, 104, 81, + 81, 12, 9, 142, 92, 71, 198, 70, 165, 151, 251, 77, 206, 192, 52, 233, 247, + 124, 64, 158, 98, 40, 0, 48, 0, + ] # When converting a valid RemotePeerInfo to a ProtoBuffer let encodedRemotePeerInfo = encode(remotePeerInfo).get() @@ -92,19 +90,18 @@ suite "Protobuf Serialisation": suite "decode": test "simple": # Given the bytes representation of a valid RemotePeerInfo - let - buffer: seq[byte] = - @[ - 10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40, - 145, 217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232, - 170, 74, 141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144, - 34, 95, 8, 3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6, - 8, 42, 134, 72, 206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106, - 224, 232, 245, 213, 48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22, - 42, 75, 201, 1, 216, 230, 236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37, - 231, 29, 104, 81, 81, 12, 9, 142, 92, 71, 198, 70, 165, 151, 251, 77, 206, - 192, 52, 233, 247, 124, 64, 158, 98, 40, 0, 48, 0 - ] + let buffer: seq[byte] = + @[ + 10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40, 145, + 217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232, 170, 74, + 141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144, 34, 95, 8, + 3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6, 8, 42, 134, + 72, 206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106, 224, 232, 245, + 213, 48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22, 42, 75, 201, 1, + 216, 230, 236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37, 231, 29, 104, 81, + 81, 12, 9, 142, 92, 71, 198, 70, 165, 151, 251, 77, 206, 192, 52, 233, 247, + 124, 64, 158, 98, 40, 0, 48, 0, + ] # When converting a valid buffer to RemotePeerInfo let decodedRemotePeerInfo = RemotePeerInfo.decode(buffer).get() diff --git a/tests/node/test_wakunode_filter.nim b/tests/node/test_wakunode_filter.nim index 771f2985b..f29b9f252 100644 --- a/tests/node/test_wakunode_filter.nim +++ b/tests/node/test_wakunode_filter.nim @@ -16,7 +16,7 @@ import node/waku_node, waku_filter_v2, waku_filter_v2/client, - waku_filter_v2/subscriptions + waku_filter_v2/subscriptions, ], ../testlib/[common, wakucore, wakunode, testasync, futures, testutils] @@ -34,11 +34,10 @@ suite "Waku Filter - End to End": asyncSetup: pushHandlerFuture = newFuture[(string, WakuMessage)]() - messagePushHandler = - proc(pubsubTopic: PubsubTopic, message: WakuMessage): Future[void] {. - async, closure, gcsafe - .} = - pushHandlerFuture.complete((pubsubTopic, message)) + messagePushHandler = proc( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async, closure, gcsafe.} = + pushHandlerFuture.complete((pubsubTopic, message)) pubsubTopic = DefaultPubsubTopic contentTopic = DefaultContentTopic @@ -72,11 +71,9 @@ suite "Waku Filter - End to End": asyncTest "Client Node receives Push from Server Node, via Filter": # When a client node subscribes to a filter node - let - subscribeResponse = - await client.filterSubscribe( - some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo - ) + let subscribeResponse = await client.filterSubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) # Then the subscription is successful check: @@ -96,11 +93,9 @@ suite "Waku Filter - End to End": pushedMsg1 == msg1 # When unsubscribing from the subscription - let - unsubscribeResponse = - await client.filterUnsubscribe( - some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo - ) + let unsubscribeResponse = await client.filterUnsubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) # Then the unsubscription is successful check: @@ -121,11 +116,9 @@ suite "Waku Filter - End to End": await server.mountRelay() # And valid filter subscription - let - subscribeResponse = - await client.filterSubscribe( - some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo - ) + let subscribeResponse = await client.filterSubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) require: subscribeResponse.isOk() server.wakuFilter.subscriptions.subscribedPeerCount() == 1 @@ -149,22 +142,18 @@ suite "Waku Filter - End to End": let serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() # When a client node subscribes to the server node - let - subscribeResponse = - await client.filterSubscribe( - some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo - ) + let subscribeResponse = await client.filterSubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) # Then the subscription is successful check (not subscribeResponse.isOk()) asyncTest "Filter Client Node can receive messages after subscribing and restarting, via Filter": # Given a valid filter subscription - let - subscribeResponse = - await client.filterSubscribe( - some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo - ) + let subscribeResponse = await client.filterSubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) require: subscribeResponse.isOk() server.wakuFilter.subscriptions.subscribedPeerCount() == 1 @@ -188,11 +177,9 @@ suite "Waku Filter - End to End": await server.mountRelay() # Given a valid filter subscription - let - subscribeResponse = - await client.filterSubscribe( - some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo - ) + let subscribeResponse = await client.filterSubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) require: subscribeResponse.isOk() server.wakuFilter.subscriptions.subscribedPeerCount() == 1 @@ -209,11 +196,9 @@ suite "Waku Filter - End to End": check (not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT)) # Given the client refreshes the subscription - let - subscribeResponse2 = - await clientClone.filterSubscribe( - some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo - ) + let subscribeResponse2 = await clientClone.filterSubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) check: subscribeResponse2.isOk() server.wakuFilter.subscriptions.subscribedPeerCount() == 1 diff --git a/tests/node/test_wakunode_lightpush.nim b/tests/node/test_wakunode_lightpush.nim index 0a8141c15..0e2a93f9f 100644 --- a/tests/node/test_wakunode_lightpush.nim +++ b/tests/node/test_wakunode_lightpush.nim @@ -21,7 +21,7 @@ import waku_lightpush/common, waku_lightpush/client, waku_lightpush/protocol_metrics, - waku_lightpush/rpc + waku_lightpush/rpc, ], ../testlib/[assertions, common, wakucore, wakunode, testasync, futures, testutils] @@ -40,12 +40,11 @@ suite "Waku Lightpush - End To End": asyncSetup: handlerFuture = newPushHandlerFuture() - handler = - proc( + handler = proc( peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage - ): Future[WakuLightPushResult[void]] {.async.} = - handlerFuture.complete((pubsubTopic, message)) - return ok() + ): Future[WakuLightPushResult[void]] {.async.} = + handlerFuture.complete((pubsubTopic, message)) + return ok() let serverKey = generateSecp256k1Key() @@ -72,19 +71,14 @@ suite "Waku Lightpush - End To End": suite "Assessment of Message Relaying Mechanisms": asyncTest "Via 11/WAKU2-RELAY from Relay/Full Node": # Given a light lightpush client - let - lightpushClient = - newTestWakuNode( - generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0) - ) + let lightpushClient = + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) lightpushClient.mountLightpushClient() # When the client publishes a message - let - publishResponse = - await lightpushClient.lightpushPublish( - some(pubsubTopic), message, serverRemotePeerInfo - ) + let publishResponse = await lightpushClient.lightpushPublish( + some(pubsubTopic), message, serverRemotePeerInfo + ) if not publishResponse.isOk(): echo "Publish failed: ", publishResponse.error() diff --git a/tests/node/test_wakunode_peer_exchange.nim b/tests/node/test_wakunode_peer_exchange.nim index 1cb95e097..f4dc516c4 100644 --- a/tests/node/test_wakunode_peer_exchange.nim +++ b/tests/node/test_wakunode_peer_exchange.nim @@ -19,7 +19,7 @@ import waku_peer_exchange, node/peer_manager, waku_relay/protocol, - waku_core + waku_core, ], ../waku_peer_exchange/utils, ../testlib/[wakucore, wakunode, testasync] @@ -187,82 +187,72 @@ suite "Waku Peer Exchange with discv5": ## Given (copied from test_waku_discv5.nim) let # todo: px flag - flags = - CapabilitiesBitfield.init( - lightpush = false, filter = false, store = false, relay = true - ) + flags = CapabilitiesBitfield.init( + lightpush = false, filter = false, store = false, relay = true + ) bindIp = parseIpAddress("0.0.0.0") extIp = parseIpAddress("127.0.0.1") nodeKey1 = generateSecp256k1Key() nodeTcpPort1 = Port(64010) nodeUdpPort1 = Port(9000) - node1 = - newTestWakuNode( - nodeKey1, - bindIp, - nodeTcpPort1, - some(extIp), - wakuFlags = some(flags), - discv5UdpPort = some(nodeUdpPort1), - ) + node1 = newTestWakuNode( + nodeKey1, + bindIp, + nodeTcpPort1, + some(extIp), + wakuFlags = some(flags), + discv5UdpPort = some(nodeUdpPort1), + ) nodeKey2 = generateSecp256k1Key() nodeTcpPort2 = Port(64012) nodeUdpPort2 = Port(9002) - node2 = - newTestWakuNode( - nodeKey2, - bindIp, - nodeTcpPort2, - some(extIp), - wakuFlags = some(flags), - discv5UdpPort = some(nodeUdpPort2), - ) + node2 = newTestWakuNode( + nodeKey2, + bindIp, + nodeTcpPort2, + some(extIp), + wakuFlags = some(flags), + discv5UdpPort = some(nodeUdpPort2), + ) nodeKey3 = generateSecp256k1Key() nodeTcpPort3 = Port(64014) nodeUdpPort3 = Port(9004) - node3 = - newTestWakuNode( - nodeKey3, - bindIp, - nodeTcpPort3, - some(extIp), - wakuFlags = some(flags), - discv5UdpPort = some(nodeUdpPort3), - ) + node3 = newTestWakuNode( + nodeKey3, + bindIp, + nodeTcpPort3, + some(extIp), + wakuFlags = some(flags), + discv5UdpPort = some(nodeUdpPort3), + ) # discv5 - let - conf1 = - WakuDiscoveryV5Config( - discv5Config: none(DiscoveryConfig), - address: bindIp, - port: nodeUdpPort1, - privateKey: keys.PrivateKey(nodeKey1.skkey), - bootstrapRecords: @[], - autoupdateRecord: true, - ) + let conf1 = WakuDiscoveryV5Config( + discv5Config: none(DiscoveryConfig), + address: bindIp, + port: nodeUdpPort1, + privateKey: keys.PrivateKey(nodeKey1.skkey), + bootstrapRecords: @[], + autoupdateRecord: true, + ) - let - disc1 = - WakuDiscoveryV5.new(node1.rng, conf1, some(node1.enr), some(node1.peerManager)) + let disc1 = + WakuDiscoveryV5.new(node1.rng, conf1, some(node1.enr), some(node1.peerManager)) - let - conf2 = - WakuDiscoveryV5Config( - discv5Config: none(DiscoveryConfig), - address: bindIp, - port: nodeUdpPort2, - privateKey: keys.PrivateKey(nodeKey2.skkey), - bootstrapRecords: @[disc1.protocol.getRecord()], - autoupdateRecord: true, - ) + let conf2 = WakuDiscoveryV5Config( + discv5Config: none(DiscoveryConfig), + address: bindIp, + port: nodeUdpPort2, + privateKey: keys.PrivateKey(nodeKey2.skkey), + bootstrapRecords: @[disc1.protocol.getRecord()], + autoupdateRecord: true, + ) - let - disc2 = - WakuDiscoveryV5.new(node2.rng, conf2, some(node2.enr), some(node2.peerManager)) + let disc2 = + WakuDiscoveryV5.new(node2.rng, conf2, some(node2.enr), some(node2.peerManager)) await allFutures(node1.start(), node2.start(), node3.start()) let resultDisc1StartRes = await disc1.start() @@ -286,9 +276,8 @@ suite "Waku Peer Exchange with discv5": await node1.mountPeerExchange() await node3.mountPeerExchange() - let - dialResponse = - await node3.dialForPeerExchange(node1.switch.peerInfo.toRemotePeerInfo()) + let dialResponse = + await node3.dialForPeerExchange(node1.switch.peerInfo.toRemotePeerInfo()) check dialResponse.isOk diff --git a/tests/node/test_wakunode_relay_rln.nim b/tests/node/test_wakunode_relay_rln.nim index ff08da513..33aeb33c5 100644 --- a/tests/node/test_wakunode_relay_rln.nim +++ b/tests/node/test_wakunode_relay_rln.nim @@ -12,13 +12,8 @@ import from std/times import epochTime import - ../../../waku/[ - node/waku_node, - node/peer_manager, - waku_core, - waku_node, - waku_rln_relay, - ], + ../../../waku/ + [node/waku_node, node/peer_manager, waku_core, waku_node, waku_rln_relay], ../waku_store/store_utils, ../waku_archive/archive_utils, ../testlib/[wakucore, wakunode, testasync, futures], @@ -30,7 +25,7 @@ proc setupRln(node: WakuNode, identifier: uint) {.async.} = rlnRelayDynamic: false, rlnRelayCredIndex: some(identifier), rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $identifier), - rlnEpochSizeSec: 1 + rlnEpochSizeSec: 1, ) ) @@ -73,12 +68,11 @@ proc sendRlnMessageWithInvalidProof( ): Future[bool] {.async.} = let extraBytes: seq[byte] = @[byte(1), 2, 3] - rateLimitProofRes = - client.wakuRlnRelay.groupManager.generateProof( - concat(payload, extraBytes), - # we add extra bytes to invalidate proof verification against original payload - client.wakuRlnRelay.getCurrentEpoch() - ) + rateLimitProofRes = client.wakuRlnRelay.groupManager.generateProof( + concat(payload, extraBytes), + # we add extra bytes to invalidate proof verification against original payload + client.wakuRlnRelay.getCurrentEpoch(), + ) rateLimitProof = rateLimitProofRes.get().encode().buffer message = WakuMessage(payload: @payload, contentTopic: contentTopic, proof: rateLimitProof) @@ -127,10 +121,8 @@ suite "Waku RlnRelay - End to End": server.wakuRlnRelay == nil # When RlnRelay is mounted - let - catchRes = - catch: - await server.setupRln(1) + let catchRes = catch: + await server.setupRln(1) # Then Relay and RLN are not mounted,and the process fails check: @@ -156,9 +148,8 @@ suite "Waku RlnRelay - End to End": var completionFuture = subscribeCompletionHandler(server, pubsubTopic) # When the client sends a valid RLN message - let - isCompleted1 = - await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture) + let isCompleted1 = + await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture) # Then the valid RLN message is relayed check: @@ -167,11 +158,9 @@ suite "Waku RlnRelay - End to End": # When the client sends an invalid RLN message completionFuture = newBoolFuture() - let - isCompleted2 = - await sendRlnMessageWithInvalidProof( - client, pubsubTopic, contentTopic, completionFuture - ) + let isCompleted2 = await sendRlnMessageWithInvalidProof( + client, pubsubTopic, contentTopic, completionFuture + ) # Then the invalid RLN message is not relayed check: @@ -191,9 +180,8 @@ suite "Waku RlnRelay - End to End": await sleepAsync(FUTURE_TIMEOUT) # When the client sends a valid RLN message - let - isCompleted1 = - await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture) + let isCompleted1 = + await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture) # Then the valid RLN message is relayed check: @@ -202,11 +190,9 @@ suite "Waku RlnRelay - End to End": # When the client sends an invalid RLN message completionFuture = newBoolFuture() - let - isCompleted2 = - await sendRlnMessageWithInvalidProof( - client, pubsubTopic, contentTopic, completionFuture - ) + let isCompleted2 = await sendRlnMessageWithInvalidProof( + client, pubsubTopic, contentTopic, completionFuture + ) # Then the invalid RLN message is not relayed check: @@ -250,18 +236,26 @@ suite "Waku RlnRelay - End to End": WakuMessage(payload: @payload150kibPlus, contentTopic: contentTopic) doAssert( - client.wakuRlnRelay.appendRLNProof(message1b, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 0).isOk() + client.wakuRlnRelay + .appendRLNProof(message1b, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 0) + .isOk() ) doAssert( - client.wakuRlnRelay.appendRLNProof(message1kib, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 1).isOk() + client.wakuRlnRelay + .appendRLNProof(message1kib, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 1) + .isOk() ) doAssert( - client.wakuRlnRelay.appendRLNProof(message150kib, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 2).isOk() + client.wakuRlnRelay + .appendRLNProof(message150kib, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 2) + .isOk() ) doAssert( - client.wakuRlnRelay.appendRLNProof( + client.wakuRlnRelay + .appendRLNProof( message151kibPlus, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 3 - ).isOk() + ) + .isOk() ) # When sending the 1B message @@ -319,9 +313,8 @@ suite "Waku RlnRelay - End to End": overhead: uint64 = 419 payload150kibPlus = getByteSequence((150 * 1024) - overhead + 1) - var - message151kibPlus = - WakuMessage(payload: @payload150kibPlus, contentTopic: contentTopic) + var message151kibPlus = + WakuMessage(payload: @payload150kibPlus, contentTopic: contentTopic) doAssert( client.wakuRlnRelay.appendRLNProof( diff --git a/tests/node/test_wakunode_store.nim b/tests/node/test_wakunode_store.nim index a98407b49..e6cea171a 100644 --- a/tests/node/test_wakunode_store.nim +++ b/tests/node/test_wakunode_store.nim @@ -17,7 +17,7 @@ import waku_store/client, waku_archive, waku_archive/driver/sqlite_driver, - common/databases/db_sqlite + common/databases/db_sqlite, ], ../waku_store/store_utils, ../waku_archive/archive_utils, @@ -55,16 +55,15 @@ suite "Waku Store - End to End - Sorted Archive": fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)), fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)), fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), - fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)) + fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), ] - historyQuery = - HistoryQuery( - pubsubTopic: some(pubsubTopic), - contentTopics: contentTopicSeq, - direction: PagingDirection.Forward, - pageSize: 5, - ) + historyQuery = HistoryQuery( + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.Forward, + pageSize: 5, + ) let serverKey = generateSecp256k1Key() @@ -95,26 +94,24 @@ suite "Waku Store - End to End - Sorted Archive": # Then the response contains the messages check: - queryResponse.get().messages == archiveMessages[0..<5] + queryResponse.get().messages == archiveMessages[0 ..< 5] # Given the next query - var - otherHistoryQuery = - HistoryQuery( - cursor: queryResponse.get().cursor, - pubsubTopic: some(pubsubTopic), - contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 5, - ) + var otherHistoryQuery = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) # When making the next history query - let - otherQueryResponse = await client.query(otherHistoryQuery, serverRemotePeerInfo) + let otherQueryResponse = + await client.query(otherHistoryQuery, serverRemotePeerInfo) # Then the response contains the messages check: - otherQueryResponse.get().messages == archiveMessages[5..<10] + otherQueryResponse.get().messages == archiveMessages[5 ..< 10] asyncTest "Backward Pagination": # Given the history query is backward @@ -125,26 +122,24 @@ suite "Waku Store - End to End - Sorted Archive": # Then the response contains the messages check: - queryResponse.get().messages == archiveMessages[5..<10] + queryResponse.get().messages == archiveMessages[5 ..< 10] # Given the next query - var - nextHistoryQuery = - HistoryQuery( - cursor: queryResponse.get().cursor, - pubsubTopic: some(pubsubTopic), - contentTopics: contentTopicSeq, - direction: PagingDirection.BACKWARD, - pageSize: 5, - ) + var nextHistoryQuery = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.BACKWARD, + pageSize: 5, + ) # When making the next history query - let - otherQueryResponse = await client.query(nextHistoryQuery, serverRemotePeerInfo) + let otherQueryResponse = + await client.query(nextHistoryQuery, serverRemotePeerInfo) # Then the response contains the messages check: - otherQueryResponse.get().messages == archiveMessages[0..<5] + otherQueryResponse.get().messages == archiveMessages[0 ..< 5] suite "Pagination with Differente Page Sizes": asyncTest "Pagination with Small Page Size": @@ -156,79 +151,71 @@ suite "Waku Store - End to End - Sorted Archive": # Then the response contains the messages check: - queryResponse1.get().messages == archiveMessages[0..<2] + queryResponse1.get().messages == archiveMessages[0 ..< 2] # Given the next query (2/5) - let - historyQuery2 = - HistoryQuery( - cursor: queryResponse1.get().cursor, - pubsubTopic: some(pubsubTopic), - contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 2, - ) + let historyQuery2 = HistoryQuery( + cursor: queryResponse1.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 2, + ) # When making the next history query let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) # Then the response contains the messages check: - queryResponse2.get().messages == archiveMessages[2..<4] + queryResponse2.get().messages == archiveMessages[2 ..< 4] # Given the next query (3/5) - let - historyQuery3 = - HistoryQuery( - cursor: queryResponse2.get().cursor, - pubsubTopic: some(pubsubTopic), - contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 2, - ) + let historyQuery3 = HistoryQuery( + cursor: queryResponse2.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 2, + ) # When making the next history query let queryResponse3 = await client.query(historyQuery3, serverRemotePeerInfo) # Then the response contains the messages check: - queryResponse3.get().messages == archiveMessages[4..<6] + queryResponse3.get().messages == archiveMessages[4 ..< 6] # Given the next query (4/5) - let - historyQuery4 = - HistoryQuery( - cursor: queryResponse3.get().cursor, - pubsubTopic: some(pubsubTopic), - contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 2, - ) + let historyQuery4 = HistoryQuery( + cursor: queryResponse3.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 2, + ) # When making the next history query let queryResponse4 = await client.query(historyQuery4, serverRemotePeerInfo) # Then the response contains the messages check: - queryResponse4.get().messages == archiveMessages[6..<8] + queryResponse4.get().messages == archiveMessages[6 ..< 8] # Given the next query (5/5) - let - historyQuery5 = - HistoryQuery( - cursor: queryResponse4.get().cursor, - pubsubTopic: some(pubsubTopic), - contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 2, - ) + let historyQuery5 = HistoryQuery( + cursor: queryResponse4.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 2, + ) # When making the next history query let queryResponse5 = await client.query(historyQuery5, serverRemotePeerInfo) # Then the response contains the messages check: - queryResponse5.get().messages == archiveMessages[8..<10] + queryResponse5.get().messages == archiveMessages[8 ..< 10] asyncTest "Pagination with Large Page Size": # Given the first query (1/2) @@ -239,25 +226,23 @@ suite "Waku Store - End to End - Sorted Archive": # Then the response contains the messages check: - queryResponse1.get().messages == archiveMessages[0..<8] + queryResponse1.get().messages == archiveMessages[0 ..< 8] # Given the next query (2/2) - let - historyQuery2 = - HistoryQuery( - cursor: queryResponse1.get().cursor, - pubsubTopic: some(pubsubTopic), - contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 8, - ) + let historyQuery2 = HistoryQuery( + cursor: queryResponse1.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 8, + ) # When making the next history query let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) # Then the response contains the messages check: - queryResponse2.get().messages == archiveMessages[8..<10] + queryResponse2.get().messages == archiveMessages[8 ..< 10] asyncTest "Pagination with Excessive Page Size": # Given the first query (1/1) @@ -268,7 +253,7 @@ suite "Waku Store - End to End - Sorted Archive": # Then the response contains the messages check: - queryResponse1.get().messages == archiveMessages[0..<10] + queryResponse1.get().messages == archiveMessages[0 ..< 10] asyncTest "Pagination with Mixed Page Size": # Given the first query (1/3) @@ -279,43 +264,39 @@ suite "Waku Store - End to End - Sorted Archive": # Then the response contains the messages check: - queryResponse1.get().messages == archiveMessages[0..<2] + queryResponse1.get().messages == archiveMessages[0 ..< 2] # Given the next query (2/3) - let - historyQuery2 = - HistoryQuery( - cursor: queryResponse1.get().cursor, - pubsubTopic: some(pubsubTopic), - contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 4, - ) + let historyQuery2 = HistoryQuery( + cursor: queryResponse1.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 4, + ) # When making the next history query let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) # Then the response contains the messages check: - queryResponse2.get().messages == archiveMessages[2..<6] + queryResponse2.get().messages == archiveMessages[2 ..< 6] # Given the next query (3/3) - let - historyQuery3 = - HistoryQuery( - cursor: queryResponse2.get().cursor, - pubsubTopic: some(pubsubTopic), - contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 6, - ) + let historyQuery3 = HistoryQuery( + cursor: queryResponse2.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 6, + ) # When making the next history query let queryResponse3 = await client.query(historyQuery3, serverRemotePeerInfo) # Then the response contains the messages check: - queryResponse3.get().messages == archiveMessages[6..<10] + queryResponse3.get().messages == archiveMessages[6 ..< 10] asyncTest "Pagination with Zero Page Size (Behaves as DefaultPageSize)": # Given a message list of size higher than the default page size @@ -326,7 +307,7 @@ suite "Waku Store - End to End - Sorted Archive": let lastMessageTimestamp = archiveMessages[archiveMessages.len - 1].timestamp var extraMessages: seq[WakuMessage] = @[] - for i in 0..?`~" - EMOJI* = "😀 😃 😄 😁 😆 😅 🤣 😂 🙂 🙃 😉 😊 😇 🥰 😍 🤩 😘 😗 😚 😙" + ALPHANUMERIC_SPECIAL* = + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()_+-=[]{}|;':\\\",./<>?`~" + EMOJI* = + "😀 😃 😄 😁 😆 😅 🤣 😂 🙂 🙃 😉 😊 😇 🥰 😍 🤩 😘 😗 😚 😙" CODE* = "def main():\n\tprint('Hello, world!')" - QUERY* = """ + QUERY* = + """ SELECT u.id, u.name, @@ -28,7 +30,8 @@ const u.id = 1 """ TEXT_SMALL* = "Lorem ipsum dolor sit amet, consectetur adipiscing elit." - TEXT_LARGE* = """ + TEXT_LARGE* = + """ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras gravida vulputate semper. Proin eleifend varius cursus. Morbi lacinia posuere quam sit amet pretium. Sed non metus fermentum, venenatis nisl id, vestibulum eros. Quisque non lorem sit amet lectus faucibus elementum eu @@ -40,35 +43,12 @@ const proc getSampleJsonDictionary*(): JsonNode = %*{ - "shapes": [ - { - "type": "circle", - "radius": 10 - }, - { - "type": "square", - "side": 10 - } - ], - "colours": [ - "red", - "green", - "blue" - ] + "shapes": [{"type": "circle", "radius": 10}, {"type": "square", "side": 10}], + "colours": ["red", "green", "blue"], } -proc getSampleJsonList*(): JsonNode = - %*[ - { - "type": "cat", - "name": "Salem" - }, - { - "type": "dog", - "name": "Oberon" - }, - ] - +proc getSampleJsonList*(): JsonNode = + %*[{"type": "cat", "name": "Salem"}, {"type": "dog", "name": "Oberon"}] proc getByteSequence*(bytesNumber: uint64): seq[byte] = result = newSeq[byte](bytesNumber) diff --git a/tests/resources/pubsub_topics.nim b/tests/resources/pubsub_topics.nim index c850fb0df..07ac74160 100644 --- a/tests/resources/pubsub_topics.nim +++ b/tests/resources/pubsub_topics.nim @@ -3,7 +3,6 @@ import std/strformat proc getPubsubTopic*(pubsubTopicName: string): string = return fmt"/waku/2/{pubsubTopicName}" - const CURRENT* = getPubsubTopic("test") CURRENT_NESTED* = getPubsubTopic("test/nested") diff --git a/tests/test_all.nim b/tests/test_all.nim index 2138adc3b..a72cabaf2 100644 --- a/tests/test_all.nim +++ b/tests/test_all.nim @@ -1,6 +1,3 @@ {.used.} -import - ./all_tests_common, - ./all_tests_waku, - ./all_tests_wakunode2 +import ./all_tests_common, ./all_tests_waku, ./all_tests_wakunode2 diff --git a/tests/test_helpers.nim b/tests/test_helpers.nim index a4d2fd286..bd1d837b6 100644 --- a/tests/test_helpers.nim +++ b/tests/test_helpers.nim @@ -1,6 +1,4 @@ -import - chronos, bearssl/rand, - eth/[keys, p2p] +import chronos, bearssl/rand, eth/[keys, p2p] import libp2p/crypto/crypto @@ -8,20 +6,23 @@ var nextPort = 30303 proc localAddress*(port: int): Address = let port = Port(port) - result = Address(udpPort: port, tcpPort: port, - ip: parseIpAddress("127.0.0.1")) + result = Address(udpPort: port, tcpPort: port, ip: parseIpAddress("127.0.0.1")) proc setupTestNode*( - rng: ref HmacDrbgContext, - capabilities: varargs[ProtocolInfo, `protocolInfo`]): EthereumNode = + rng: ref HmacDrbgContext, capabilities: varargs[ProtocolInfo, `protocolInfo`] +): EthereumNode = let keys1 = keys.KeyPair.random(rng[]) address = localAddress(nextPort) - result = newEthereumNode(keys1, address, NetworkId(1), - addAllCapabilities = false, - bindUdpPort = address.udpPort, # Assume same as external - bindTcpPort = address.tcpPort, # Assume same as external - rng = rng) + result = newEthereumNode( + keys1, + address, + NetworkId(1), + addAllCapabilities = false, + bindUdpPort = address.udpPort, # Assume same as external + bindTcpPort = address.tcpPort, # Assume same as external + rng = rng, + ) nextPort.inc for capability in capabilities: result.addCapability capability diff --git a/tests/test_message_cache.nim b/tests/test_message_cache.nim index 98a0c997c..ae877f421 100644 --- a/tests/test_message_cache.nim +++ b/tests/test_message_cache.nim @@ -1,13 +1,7 @@ {.used.} -import - std/sets, - stew/[results, byteutils], - testutils/unittests -import - ../../waku/waku_core, - ../../waku/waku_api/message_cache, - ./testlib/wakucore +import std/sets, stew/[results, byteutils], testutils/unittests +import ../../waku/waku_core, ../../waku/waku_api/message_cache, ./testlib/wakucore suite "MessageCache": setup: @@ -77,7 +71,7 @@ suite "MessageCache": cache.addMessage(testPubsubTopic, testMessage) ## When - var res = cache.getMessages(testPubsubTopic, clear=true) + var res = cache.getMessages(testPubsubTopic, clear = true) require(res.isOk()) res = cache.getMessages(testPubsubTopic) @@ -121,15 +115,15 @@ suite "MessageCache": ## Then let res = cache.getMessages(testPubsubTopic) check: - res.isErr() - res.error() == "not subscribed to any pubsub topics" + res.isErr() + res.error() == "not subscribed to any pubsub topics" test "add messages beyond the capacity": ## Given var testMessages = @[fakeWakuMessage(toBytes("MSG-1"))] # Prevent duplicate messages timestamp - for i in 0..<5: + for i in 0 ..< 5: var msg = fakeWakuMessage(toBytes("MSG-1")) while msg.timestamp <= testMessages[i].timestamp: @@ -166,7 +160,7 @@ suite "MessageCache": check: getRes.isOk getRes.get() == @[fakeMessage] - + test "add same message twice": cache.pubsubSubscribe(testPubsubTopic) @@ -214,4 +208,4 @@ suite "MessageCache": # msg0 was delete because no refs check: - cache.messagesCount() == 2 \ No newline at end of file + cache.messagesCount() == 2 diff --git a/tests/test_peer_manager.nim b/tests/test_peer_manager.nim index 8de5c696f..db7036be8 100644 --- a/tests/test_peer_manager.nim +++ b/tests/test_peer_manager.nim @@ -37,20 +37,28 @@ import procSuite "Peer Manager": asyncTest "connectRelay() works": # Create 2 nodes - let nodes = toSeq(0..<2).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 2).mapIt( + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + ) await allFutures(nodes.mapIt(it.start())) - let connOk = await nodes[0].peerManager.connectRelay(nodes[1].peerInfo.toRemotePeerInfo()) + let connOk = + await nodes[0].peerManager.connectRelay(nodes[1].peerInfo.toRemotePeerInfo()) await sleepAsync(chronos.milliseconds(500)) check: connOk == true - nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[1].peerInfo.peerId) - nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) == Connectedness.Connected + nodes[0].peerManager.peerStore.peers().anyIt( + it.peerId == nodes[1].peerInfo.peerId + ) + nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) == + Connectedness.Connected asyncTest "dialPeer() works": # Create 2 nodes - let nodes = toSeq(0..<2).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 2).mapIt( + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + ) await allFutures(nodes.mapIt(it.start())) await allFutures(nodes.mapIt(it.mountRelay())) @@ -58,7 +66,9 @@ procSuite "Peer Manager": await allFutures(nodes.mapIt(it.mountLegacyFilter())) # Dial node2 from node1 - let conn = await nodes[0].peerManager.dialPeer(nodes[1].peerInfo.toRemotePeerInfo(), WakuLegacyFilterCodec) + let conn = await nodes[0].peerManager.dialPeer( + nodes[1].peerInfo.toRemotePeerInfo(), WakuLegacyFilterCodec + ) await sleepAsync(chronos.milliseconds(500)) # Check connection @@ -69,32 +79,42 @@ procSuite "Peer Manager": # Check that node2 is being managed in node1 check: - nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[1].peerInfo.peerId) + nodes[0].peerManager.peerStore.peers().anyIt( + it.peerId == nodes[1].peerInfo.peerId + ) # Check connectedness check: - nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) == Connectedness.Connected + nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) == + Connectedness.Connected await allFutures(nodes.mapIt(it.stop())) asyncTest "dialPeer() fails gracefully": # Create 2 nodes and start them - let nodes = toSeq(0..<2).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 2).mapIt( + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + ) await allFutures(nodes.mapIt(it.start())) await allFutures(nodes.mapIt(it.mountRelay())) - let nonExistentPeerRes = parsePeerInfo("/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e") + let nonExistentPeerRes = parsePeerInfo( + "/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e" + ) require nonExistentPeerRes.isOk() let nonExistentPeer = nonExistentPeerRes.value # Dial non-existent peer from node1 - let conn1 = await nodes[0].peerManager.dialPeer(nonExistentPeer, WakuLegacyFilterCodec) + let conn1 = + await nodes[0].peerManager.dialPeer(nonExistentPeer, WakuLegacyFilterCodec) check: conn1.isNone() # Dial peer not supporting given protocol - let conn2 = await nodes[0].peerManager.dialPeer(nodes[1].peerInfo.toRemotePeerInfo(), WakuLegacyFilterCodec) + let conn2 = await nodes[0].peerManager.dialPeer( + nodes[1].peerInfo.toRemotePeerInfo(), WakuLegacyFilterCodec + ) check: conn2.isNone() @@ -102,7 +122,8 @@ procSuite "Peer Manager": asyncTest "Adding, selecting and filtering peers work": let - node = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + node = + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) # Create filter peer filterLoc = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet() @@ -117,23 +138,29 @@ procSuite "Peer Manager": node.mountStoreClient() node.peerManager.addServicePeer(storePeer.toRemotePeerInfo(), WakuStoreCodec) - node.peerManager.addServicePeer(filterPeer.toRemotePeerInfo(), WakuLegacyFilterCodec) + node.peerManager.addServicePeer( + filterPeer.toRemotePeerInfo(), WakuLegacyFilterCodec + ) # Check peers were successfully added to peer manager check: node.peerManager.peerStore.peers().len == 2 - node.peerManager.peerStore.peers(WakuLegacyFilterCodec).allIt(it.peerId == filterPeer.peerId and - it.addrs.contains(filterLoc) and - it.protocols.contains(WakuLegacyFilterCodec)) - node.peerManager.peerStore.peers(WakuStoreCodec).allIt(it.peerId == storePeer.peerId and - it.addrs.contains(storeLoc) and - it.protocols.contains(WakuStoreCodec)) + node.peerManager.peerStore.peers(WakuLegacyFilterCodec).allIt( + it.peerId == filterPeer.peerId and it.addrs.contains(filterLoc) and + it.protocols.contains(WakuLegacyFilterCodec) + ) + node.peerManager.peerStore.peers(WakuStoreCodec).allIt( + it.peerId == storePeer.peerId and it.addrs.contains(storeLoc) and + it.protocols.contains(WakuStoreCodec) + ) await node.stop() asyncTest "Peer manager keeps track of connections": # Create 2 nodes - let nodes = toSeq(0..<2).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 2).mapIt( + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + ) await allFutures(nodes.mapIt(it.start())) await allFutures(nodes.mapIt(it.mountRelay())) @@ -142,10 +169,13 @@ procSuite "Peer Manager": nodes[0].peerManager.addPeer(nodes[1].peerInfo.toRemotePeerInfo()) check: # No information about node2's connectedness - nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) == NotConnected + nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) == + NotConnected # Failed connection - let nonExistentPeerRes = parsePeerInfo("/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e") + let nonExistentPeerRes = parsePeerInfo( + "/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e" + ) require: nonExistentPeerRes.isOk() @@ -156,11 +186,13 @@ procSuite "Peer Manager": check: # Cannot connect to node2 - nodes[0].peerManager.peerStore.connectedness(nonExistentPeer.peerId) == CannotConnect + nodes[0].peerManager.peerStore.connectedness(nonExistentPeer.peerId) == + CannotConnect # Successful connection require: - (await nodes[0].peerManager.connectRelay(nodes[1].peerInfo.toRemotePeerInfo())) == true + (await nodes[0].peerManager.connectRelay(nodes[1].peerInfo.toRemotePeerInfo())) == + true await sleepAsync(chronos.milliseconds(500)) check: @@ -171,18 +203,23 @@ procSuite "Peer Manager": await nodes[0].stop() check: # Not currently connected to node2, but had recent, successful connection. - nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) == CanConnect + nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) == + CanConnect await nodes[1].stop() asyncTest "Peer manager updates failed peers correctly": # Create 2 nodes - let nodes = toSeq(0..<2).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 2).mapIt( + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + ) await allFutures(nodes.mapIt(it.start())) await allFutures(nodes.mapIt(it.mountRelay())) - let nonExistentPeerRes = parsePeerInfo("/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e") + let nonExistentPeerRes = parsePeerInfo( + "/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e" + ) require nonExistentPeerRes.isOk() let nonExistentPeer = nonExistentPeerRes.value @@ -197,8 +234,10 @@ procSuite "Peer Manager": let conn1Ok = await nodes[0].peerManager.connectRelay(nonExistentPeer) check: # Cannot connect to node2 - nodes[0].peerManager.peerStore.connectedness(nonExistentPeer.peerId) == CannotConnect - nodes[0].peerManager.peerStore[ConnectionBook][nonExistentPeer.peerId] == CannotConnect + nodes[0].peerManager.peerStore.connectedness(nonExistentPeer.peerId) == + CannotConnect + nodes[0].peerManager.peerStore[ConnectionBook][nonExistentPeer.peerId] == + CannotConnect nodes[0].peerManager.peerStore[NumberFailedConnBook][nonExistentPeer.peerId] == 1 # Connection attempt failed @@ -216,7 +255,8 @@ procSuite "Peer Manager": # After a successful connection, the number of failed connections is reset nodes[0].peerManager.peerStore[NumberFailedConnBook][nodes[1].peerInfo.peerId] = 4 - let conn2Ok = await nodes[0].peerManager.connectRelay(nodes[1].peerInfo.toRemotePeerInfo()) + let conn2Ok = + await nodes[0].peerManager.connectRelay(nodes[1].peerInfo.toRemotePeerInfo()) check: conn2Ok == true nodes[0].peerManager.peerStore[NumberFailedConnBook][nodes[1].peerInfo.peerId] == 0 @@ -231,10 +271,12 @@ procSuite "Peer Manager": generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(44048), - peerStorage = storage - ) - node2 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(34023)) - + peerStorage = storage, + ) + node2 = newTestWakuNode( + generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(34023) + ) + node1.mountMetadata(0).expect("Mounted Waku Metadata") node2.mountMetadata(0).expect("Mounted Waku Metadata") @@ -243,7 +285,7 @@ procSuite "Peer Manager": await node1.mountRelay() await node2.mountRelay() - + let peerInfo2 = node2.switch.peerInfo var remotePeerInfo2 = peerInfo2.toRemotePeerInfo() remotePeerInfo2.enr = some(node2.enr) @@ -252,7 +294,8 @@ procSuite "Peer Manager": assert is12Connected == true, "Node 1 and 2 not connected" check: - node1.peerManager.peerStore[AddressBook][remotePeerInfo2.peerId] == remotePeerInfo2.addrs + node1.peerManager.peerStore[AddressBook][remotePeerInfo2.peerId] == + remotePeerInfo2.addrs # wait for the peer store update await sleepAsync(chronos.milliseconds(500)) @@ -268,8 +311,8 @@ procSuite "Peer Manager": generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(56037), - peerStorage = storage - ) + peerStorage = storage, + ) node3.mountMetadata(0).expect("Mounted Waku Metadata") @@ -303,10 +346,12 @@ procSuite "Peer Manager": generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(44048), - peerStorage = storage - ) - node2 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(34023)) - + peerStorage = storage, + ) + node2 = newTestWakuNode( + generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(34023) + ) + node1.mountMetadata(0).expect("Mounted Waku Metadata") node2.mountMetadata(0).expect("Mounted Waku Metadata") @@ -315,7 +360,7 @@ procSuite "Peer Manager": await node1.mountRelay() await node2.mountRelay() - + let peerInfo2 = node2.switch.peerInfo var remotePeerInfo2 = peerInfo2.toRemotePeerInfo() remotePeerInfo2.enr = some(node2.enr) @@ -324,7 +369,8 @@ procSuite "Peer Manager": assert is12Connected == true, "Node 1 and 2 not connected" check: - node1.peerManager.peerStore[AddressBook][remotePeerInfo2.peerId] == remotePeerInfo2.addrs + node1.peerManager.peerStore[AddressBook][remotePeerInfo2.peerId] == + remotePeerInfo2.addrs # wait for the peer store update await sleepAsync(chronos.milliseconds(500)) @@ -340,8 +386,8 @@ procSuite "Peer Manager": generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(56037), - peerStorage = storage - ) + peerStorage = storage, + ) node3.mountMetadata(0).expect("Mounted Waku Metadata") @@ -405,13 +451,19 @@ procSuite "Peer Manager": await allFutures([node1.start(), node2.start(), node3.start()]) # 1->2 (fails) - let conn1 = await node1.peerManager.dialPeer(node2.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec) + let conn1 = await node1.peerManager.dialPeer( + node2.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec + ) # 1->3 (fails) - let conn2 = await node1.peerManager.dialPeer(node3.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec) + let conn2 = await node1.peerManager.dialPeer( + node3.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec + ) # 2->3 (succeeds) - let conn3 = await node2.peerManager.dialPeer(node3.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec) + let conn3 = await node2.peerManager.dialPeer( + node3.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec + ) check: conn1.isNone @@ -423,8 +475,14 @@ procSuite "Peer Manager": let database = SqliteDatabase.new(":memory:")[] storage = WakuPeerStorage.new(database)[] - node1 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0), peerStorage = storage) - node2 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + node1 = newTestWakuNode( + generateSecp256k1Key(), + ValidIpAddress.init("0.0.0.0"), + Port(0), + peerStorage = storage, + ) + node2 = + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) peerInfo2 = node2.switch.peerInfo betaCodec = "/vac/waku/relay/2.0.0-beta2" stableCodec = "/vac/waku/relay/2.0.0" @@ -443,12 +501,18 @@ procSuite "Peer Manager": # Currently connected to node2 node1.peerManager.peerStore.peers().len == 1 node1.peerManager.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) - node1.peerManager.peerStore.peers().anyIt(it.protocols.contains(node2.wakuRelay.codec)) + node1.peerManager.peerStore.peers().anyIt( + it.protocols.contains(node2.wakuRelay.codec) + ) node1.peerManager.peerStore.connectedness(peerInfo2.peerId) == Connected # Simulate restart by initialising a new node using the same storage - let - node3 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0), peerStorage = storage) + let node3 = newTestWakuNode( + generateSecp256k1Key(), + ValidIpAddress.init("0.0.0.0"), + Port(0), + peerStorage = storage, + ) await node3.mountRelay() node3.wakuRelay.codec = stableCodec @@ -476,16 +540,14 @@ procSuite "Peer Manager": asyncTest "Peer manager connects to all peers supporting a given protocol": # Create 4 nodes - let nodes = - toSeq(0..<4) - .mapIt( - newTestWakuNode( - nodeKey = generateSecp256k1Key(), - bindIp = ValidIpAddress.init("0.0.0.0"), - bindPort = Port(0), - wakuFlags = some(CapabilitiesBitfield.init(@[Relay])) - ) + let nodes = toSeq(0 ..< 4).mapIt( + newTestWakuNode( + nodeKey = generateSecp256k1Key(), + bindIp = ValidIpAddress.init("0.0.0.0"), + bindPort = Port(0), + wakuFlags = some(CapabilitiesBitfield.init(@[Relay])), ) + ) # Start them discard nodes.mapIt(it.mountMetadata(0)) @@ -494,7 +556,7 @@ procSuite "Peer Manager": # Get all peer infos let peerInfos = collect: - for i in 0..nodes.high: + for i in 0 .. nodes.high: let peerInfo = nodes[i].switch.peerInfo.toRemotePeerInfo() peerInfo.enr = some(nodes[i].enr) peerInfo @@ -512,34 +574,47 @@ procSuite "Peer Manager": nodes[0].peerManager.peerStore.peers().len == 3 # All peer ids are correct - nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[1].switch.peerInfo.peerId) - nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[2].switch.peerInfo.peerId) - nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[3].switch.peerInfo.peerId) + nodes[0].peerManager.peerStore.peers().anyIt( + it.peerId == nodes[1].switch.peerInfo.peerId + ) + nodes[0].peerManager.peerStore.peers().anyIt( + it.peerId == nodes[2].switch.peerInfo.peerId + ) + nodes[0].peerManager.peerStore.peers().anyIt( + it.peerId == nodes[3].switch.peerInfo.peerId + ) # All peers support the relay protocol - nodes[0].peerManager.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(WakuRelayCodec) - nodes[0].peerManager.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(WakuRelayCodec) - nodes[0].peerManager.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(WakuRelayCodec) + nodes[0].peerManager.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + nodes[0].peerManager.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + nodes[0].peerManager.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) # All peers are connected - nodes[0].peerManager.peerStore[ConnectionBook][nodes[1].switch.peerInfo.peerId] == Connected - nodes[0].peerManager.peerStore[ConnectionBook][nodes[2].switch.peerInfo.peerId] == Connected - nodes[0].peerManager.peerStore[ConnectionBook][nodes[3].switch.peerInfo.peerId] == Connected + nodes[0].peerManager.peerStore[ConnectionBook][nodes[1].switch.peerInfo.peerId] == + Connected + nodes[0].peerManager.peerStore[ConnectionBook][nodes[2].switch.peerInfo.peerId] == + Connected + nodes[0].peerManager.peerStore[ConnectionBook][nodes[3].switch.peerInfo.peerId] == + Connected await allFutures(nodes.mapIt(it.stop())) asyncTest "Sharded peer manager connects to all peers supporting a given protocol": # Create 4 nodes - let nodes = - toSeq(0..<4) - .mapIt( - newTestWakuNode( - nodeKey = generateSecp256k1Key(), - bindIp = ValidIpAddress.init("0.0.0.0"), - bindPort = Port(0), - wakuFlags = some(CapabilitiesBitfield.init(@[Relay])) - ) + let nodes = toSeq(0 ..< 4).mapIt( + newTestWakuNode( + nodeKey = generateSecp256k1Key(), + bindIp = ValidIpAddress.init("0.0.0.0"), + bindPort = Port(0), + wakuFlags = some(CapabilitiesBitfield.init(@[Relay])), ) + ) # Start them discard nodes.mapIt(it.mountMetadata(0)) @@ -548,7 +623,7 @@ procSuite "Peer Manager": # Get all peer infos let peerInfos = collect: - for i in 0..nodes.high: + for i in 0 .. nodes.high: let peerInfo = nodes[i].switch.peerInfo.toRemotePeerInfo() peerInfo.enr = some(nodes[i].enr) peerInfo @@ -566,25 +641,42 @@ procSuite "Peer Manager": nodes[0].peerManager.peerStore.peers().len == 3 # All peer ids are correct - nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[1].switch.peerInfo.peerId) - nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[2].switch.peerInfo.peerId) - nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[3].switch.peerInfo.peerId) + nodes[0].peerManager.peerStore.peers().anyIt( + it.peerId == nodes[1].switch.peerInfo.peerId + ) + nodes[0].peerManager.peerStore.peers().anyIt( + it.peerId == nodes[2].switch.peerInfo.peerId + ) + nodes[0].peerManager.peerStore.peers().anyIt( + it.peerId == nodes[3].switch.peerInfo.peerId + ) # All peers support the relay protocol - nodes[0].peerManager.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(WakuRelayCodec) - nodes[0].peerManager.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(WakuRelayCodec) - nodes[0].peerManager.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(WakuRelayCodec) + nodes[0].peerManager.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + nodes[0].peerManager.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + nodes[0].peerManager.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) # All peers are connected - nodes[0].peerManager.peerStore[ConnectionBook][nodes[1].switch.peerInfo.peerId] == Connected - nodes[0].peerManager.peerStore[ConnectionBook][nodes[2].switch.peerInfo.peerId] == Connected - nodes[0].peerManager.peerStore[ConnectionBook][nodes[3].switch.peerInfo.peerId] == Connected + nodes[0].peerManager.peerStore[ConnectionBook][nodes[1].switch.peerInfo.peerId] == + Connected + nodes[0].peerManager.peerStore[ConnectionBook][nodes[2].switch.peerInfo.peerId] == + Connected + nodes[0].peerManager.peerStore[ConnectionBook][nodes[3].switch.peerInfo.peerId] == + Connected await allFutures(nodes.mapIt(it.stop())) asyncTest "Peer store keeps track of incoming connections": # Create 4 nodes - let nodes = toSeq(0..<4).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 4).mapIt( + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + ) # Start them await allFutures(nodes.mapIt(it.start())) @@ -616,29 +708,50 @@ procSuite "Peer Manager": nodes[3].peerManager.peerStore.getPeersByDirection(Outbound).len == 1 # All peer ids are correct - nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[1].switch.peerInfo.peerId) - nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[2].switch.peerInfo.peerId) - nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[3].switch.peerInfo.peerId) + nodes[0].peerManager.peerStore.peers().anyIt( + it.peerId == nodes[1].switch.peerInfo.peerId + ) + nodes[0].peerManager.peerStore.peers().anyIt( + it.peerId == nodes[2].switch.peerInfo.peerId + ) + nodes[0].peerManager.peerStore.peers().anyIt( + it.peerId == nodes[3].switch.peerInfo.peerId + ) # All peers support the relay protocol - nodes[0].peerManager.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(WakuRelayCodec) - nodes[0].peerManager.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(WakuRelayCodec) - nodes[0].peerManager.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(WakuRelayCodec) + nodes[0].peerManager.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + nodes[0].peerManager.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + nodes[0].peerManager.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) # All peers are connected - nodes[0].peerManager.peerStore[ConnectionBook][nodes[1].switch.peerInfo.peerId] == Connected - nodes[0].peerManager.peerStore[ConnectionBook][nodes[2].switch.peerInfo.peerId] == Connected - nodes[0].peerManager.peerStore[ConnectionBook][nodes[3].switch.peerInfo.peerId] == Connected + nodes[0].peerManager.peerStore[ConnectionBook][nodes[1].switch.peerInfo.peerId] == + Connected + nodes[0].peerManager.peerStore[ConnectionBook][nodes[2].switch.peerInfo.peerId] == + Connected + nodes[0].peerManager.peerStore[ConnectionBook][nodes[3].switch.peerInfo.peerId] == + Connected # All peers are Inbound in peer 0 - nodes[0].peerManager.peerStore[DirectionBook][nodes[1].switch.peerInfo.peerId] == Inbound - nodes[0].peerManager.peerStore[DirectionBook][nodes[2].switch.peerInfo.peerId] == Inbound - nodes[0].peerManager.peerStore[DirectionBook][nodes[3].switch.peerInfo.peerId] == Inbound + nodes[0].peerManager.peerStore[DirectionBook][nodes[1].switch.peerInfo.peerId] == + Inbound + nodes[0].peerManager.peerStore[DirectionBook][nodes[2].switch.peerInfo.peerId] == + Inbound + nodes[0].peerManager.peerStore[DirectionBook][nodes[3].switch.peerInfo.peerId] == + Inbound # All peers have an Outbound connection with peer 0 - nodes[1].peerManager.peerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] == Outbound - nodes[2].peerManager.peerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] == Outbound - nodes[3].peerManager.peerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] == Outbound + nodes[1].peerManager.peerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] == + Outbound + nodes[2].peerManager.peerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] == + Outbound + nodes[3].peerManager.peerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] == + Outbound await allFutures(nodes.mapIt(it.stop())) @@ -647,13 +760,12 @@ procSuite "Peer Manager": let basePeerId = "16Uiu2HAm7QGEZKujdSbbo1aaQyfDPQ6Bw3ybQnj6fruH5Dxwd7D" let - node = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) - peers = toSeq(1..5) - .mapIt( - parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it) - ) - .filterIt(it.isOk()) - .mapIt(it.value) + node = + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + peers = toSeq(1 .. 5) + .mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it)) + .filterIt(it.isOk()) + .mapIt(it.value) require: peers.len == 5 @@ -689,7 +801,9 @@ procSuite "Peer Manager": asyncTest "connectedPeers() returns expected number of connections per protocol": # Create 4 nodes - let nodes = toSeq(0..<4).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 4).mapIt( + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + ) # Start them with relay + filter await allFutures(nodes.mapIt(it.start())) @@ -706,12 +820,14 @@ procSuite "Peer Manager": (await nodes[0].peerManager.connectRelay(pInfos[2])) == true (await nodes[1].peerManager.connectRelay(pInfos[2])) == true - (await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == true - (await nodes[0].peerManager.dialPeer(pInfos[2], WakuLegacyFilterCodec)).isSome() == true + (await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == + true + (await nodes[0].peerManager.dialPeer(pInfos[2], WakuLegacyFilterCodec)).isSome() == + true # isolated dial creates a relay conn under the hood (libp2p behaviour) - (await nodes[2].peerManager.dialPeer(pInfos[3], WakuLegacyFilterCodec)).isSome() == true - + (await nodes[2].peerManager.dialPeer(pInfos[3], WakuLegacyFilterCodec)).isSome() == + true # assert physical connections check: @@ -741,7 +857,9 @@ procSuite "Peer Manager": asyncTest "getNumStreams() returns expected number of connections per protocol": # Create 2 nodes - let nodes = toSeq(0..<2).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 2).mapIt( + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + ) # Start them with relay + filter await allFutures(nodes.mapIt(it.start())) @@ -754,10 +872,14 @@ procSuite "Peer Manager": require: # multiple streams are multiplexed over a single connection. # note that a relay connection is created under the hood when dialing a peer (libp2p behaviour) - (await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == true - (await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == true - (await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == true - (await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == true + (await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == + true + (await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == + true + (await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == + true + (await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == + true check: nodes[0].peerManager.getNumStreams(WakuRelayCodec) == (1, 1) @@ -773,19 +895,21 @@ procSuite "Peer Manager": # Create peer manager let pm = PeerManager.new( switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise().build(), - storage = nil) + storage = nil, + ) # Create 3 peer infos - let peers = toSeq(1..3) - .mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it)) - .filterIt(it.isOk()) - .mapIt(it.value) + let peers = toSeq(1 .. 3) + .mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it)) + .filterIt(it.isOk()) + .mapIt(it.value) require: peers.len == 3 # Add a peer[0] to the peerstore pm.peerStore[AddressBook][peers[0].peerId] = peers[0].addrs - pm.peerStore[ProtoBook][peers[0].peerId] = @[WakuRelayCodec, WakuStoreCodec, WakuLegacyFilterCodec] + pm.peerStore[ProtoBook][peers[0].peerId] = + @[WakuRelayCodec, WakuStoreCodec, WakuLegacyFilterCodec] # When no service peers, we get one from the peerstore let selectedPeer1 = pm.selectPeer(WakuStoreCodec) @@ -826,28 +950,38 @@ procSuite "Peer Manager": expect(Defect): let pm = PeerManager.new( - switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise() - .withPeerStore(peerStoreSize) - .withMaxConnections(maxConnections) - .build(), - storage = nil) + switch = SwitchBuilder + .new() + .withRng(rng) + .withMplex() + .withNoise() + .withPeerStore(peerStoreSize) + .withMaxConnections(maxConnections) + .build(), + storage = nil, + ) test "prunePeerStore() correctly removes peers to match max quota": # Create peer manager let pm = PeerManager.new( - switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise() - .withPeerStore(10) - .withMaxConnections(5) - .build(), + switch = SwitchBuilder + .new() + .withRng(rng) + .withMplex() + .withNoise() + .withPeerStore(10) + .withMaxConnections(5) + .build(), maxFailedAttempts = 1, maxRelayPeers = some(5), - storage = nil) + storage = nil, + ) # Create 15 peers and add them to the peerstore - let peers = toSeq(1..15) - .mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/0/p2p/" & $PeerId.random().get())) - .filterIt(it.isOk()) - .mapIt(it.value) + let peers = toSeq(1 .. 15) + .mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/0/p2p/" & $PeerId.random().get())) + .filterIt(it.isOk()) + .mapIt(it.value) for p in peers: pm.addPeer(p) @@ -886,19 +1020,24 @@ procSuite "Peer Manager": asyncTest "canBeConnected() returns correct value": let pm = PeerManager.new( - switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise() - .withPeerStore(10) - .withMaxConnections(5) - .build(), - initialBackoffInSec = 1, # with InitialBackoffInSec = 1 backoffs are: 1, 2, 4, 8secs. + switch = SwitchBuilder + .new() + .withRng(rng) + .withMplex() + .withNoise() + .withPeerStore(10) + .withMaxConnections(5) + .build(), + initialBackoffInSec = 1, + # with InitialBackoffInSec = 1 backoffs are: 1, 2, 4, 8secs. backoffFactor = 2, maxFailedAttempts = 10, maxRelayPeers = some(5), - storage = nil) + storage = nil, + ) var p1: PeerId require p1.init("QmeuZJbXrszW2jdT7GdduSjQskPU3S7vvGWKtKgDfkDvW" & "1") - # new peer with no errors can be connected check: pm.canBeConnected(p1) == true @@ -938,37 +1077,54 @@ procSuite "Peer Manager": # Should result in overflow exception expect(Defect): let pm = PeerManager.new( - switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise() - .withPeerStore(10) - .withMaxConnections(5) - .build(), + switch = SwitchBuilder + .new() + .withRng(rng) + .withMplex() + .withNoise() + .withPeerStore(10) + .withMaxConnections(5) + .build(), maxRelayPeers = some(5), maxFailedAttempts = 150, - storage = nil) + storage = nil, + ) # Should result in backoff > 1 week expect(Defect): let pm = PeerManager.new( - switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise() + switch = SwitchBuilder + .new() + .withRng(rng) + .withMplex() + .withNoise() + .withPeerStore(10) + .withMaxConnections(5) + .build(), + maxFailedAttempts = 10, + maxRelayPeers = some(5), + storage = nil, + ) + + let pm = PeerManager.new( + switch = SwitchBuilder + .new() + .withRng(rng) + .withMplex() + .withNoise() .withPeerStore(10) .withMaxConnections(5) .build(), - maxFailedAttempts = 10, - maxRelayPeers = some(5), - storage = nil) - - let pm = PeerManager.new( - switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise() - .withPeerStore(10) - .withMaxConnections(5) - .build(), maxFailedAttempts = 5, maxRelayPeers = some(5), - storage = nil) + storage = nil, + ) asyncTest "colocationLimit is enforced by pruneConnsByIp()": # Create 5 nodes - let nodes = toSeq(0..<5).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 5).mapIt( + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + ) # Start them with relay + filter await allFutures(nodes.mapIt(it.start())) diff --git a/tests/test_peer_storage.nim b/tests/test_peer_storage.nim index a9150b6ab..5c183b6b6 100644 --- a/tests/test_peer_storage.nim +++ b/tests/test_peer_storage.nim @@ -1,10 +1,6 @@ {.used.} -import - std/options, - testutils/unittests, - eth/p2p/discoveryv5/enr, - libp2p/crypto/crypto +import std/options, testutils/unittests, eth/p2p/discoveryv5/enr, libp2p/crypto/crypto import ../../waku/common/databases/db_sqlite, ../../waku/node/peer_manager/peer_manager, @@ -12,9 +8,7 @@ import ../../waku/waku_enr, ./testlib/wakucore - suite "Peer Storage": - test "Store, replace and retrieve from persistent peer storage": let database = SqliteDatabase.new(":memory:").tryGet() @@ -28,22 +22,24 @@ suite "Peer Storage": connectedness = Connectedness.CanConnect disconn = 999999 topics = @["/waku/2/rs/2/0", "/waku/2/rs/2/1"] - + # Create ENR var enrBuilder = EnrBuilder.init(peerKey) enrBuilder.withShardedTopics(topics).expect("Valid topics") let record = enrBuilder.build().expect("Valid record") let stored = RemotePeerInfo( - peerId: peer.peerId, - addrs: @[peerLoc], - enr: some(record), - protocols: @[peerProto], - publicKey: peerKey.getPublicKey().tryGet(), - connectedness: connectedness, - disconnectTime: disconn) + peerId: peer.peerId, + addrs: @[peerLoc], + enr: some(record), + protocols: @[peerProto], + publicKey: peerKey.getPublicKey().tryGet(), + connectedness: connectedness, + disconnectTime: disconn, + ) - defer: storage.close() + defer: + storage.close() # Test insert and retrieve @@ -69,9 +65,10 @@ suite "Peer Storage": resStoredInfo.publicKey == peerKey.getPublicKey().tryGet() resStoredInfo.connectedness == connectedness resStoredInfo.disconnectTime == disconn - + assert resStoredInfo.enr.isSome(), "The ENR info wasn't properly stored" - check: resStoredInfo.enr.get() == record + check: + resStoredInfo.enr.get() == record # Test replace and retrieve (update an existing entry) stored.connectedness = CannotConnect diff --git a/tests/test_peer_store_extended.nim b/tests/test_peer_store_extended.nim index db70cb51c..0686138f8 100644 --- a/tests/test_peer_store_extended.nim +++ b/tests/test_peer_store_extended.nim @@ -14,7 +14,6 @@ import ../../waku/waku_node, ./testlib/wakucore - suite "Extended nim-libp2p Peer Store": # Valid peerId missing the last digit. Useful for creating new peerIds # basePeerId & "1" @@ -64,7 +63,8 @@ suite "Extended nim-libp2p Peer Store": # Peer3: Connected peerStore[AddressBook][p3] = @[MultiAddress.init("/ip4/127.0.0.1/tcp/3").tryGet()] - peerStore[ProtoBook][p3] = @["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"] + peerStore[ProtoBook][p3] = + @["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"] peerStore[KeyBook][p3] = generateEcdsaKeyPair().pubkey peerStore[AgentBook][p3] = "gowaku" peerStore[ProtoVersionBook][p3] = "protoVersion3" @@ -180,7 +180,8 @@ suite "Extended nim-libp2p Peer Store": # Only p3 supports that protocol lpPeers.len == 1 lpPeers.anyIt(it.peerId == p3) - lpPeers[0].protocols == @["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"] + lpPeers[0].protocols == + @["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"] test "peers() returns all StoredInfo matching a given protocolMatcher": # When @@ -197,15 +198,20 @@ suite "Extended nim-libp2p Peer Store": pMatcherStorePeers.anyIt(it.peerId == p5) check: - pMatcherStorePeers.filterIt(it.peerId == p1)[0].protocols == @["/vac/waku/relay/2.0.0-beta1", "/vac/waku/store/2.0.0"] - pMatcherStorePeers.filterIt(it.peerId == p2)[0].protocols == @["/vac/waku/relay/2.0.0", "/vac/waku/store/2.0.0"] - pMatcherStorePeers.filterIt(it.peerId == p3)[0].protocols == @["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"] - pMatcherStorePeers.filterIt(it.peerId == p5)[0].protocols == @["/vac/waku/swap/2.0.0", "/vac/waku/store/2.0.0-beta2"] + pMatcherStorePeers.filterIt(it.peerId == p1)[0].protocols == + @["/vac/waku/relay/2.0.0-beta1", "/vac/waku/store/2.0.0"] + pMatcherStorePeers.filterIt(it.peerId == p2)[0].protocols == + @["/vac/waku/relay/2.0.0", "/vac/waku/store/2.0.0"] + pMatcherStorePeers.filterIt(it.peerId == p3)[0].protocols == + @["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"] + pMatcherStorePeers.filterIt(it.peerId == p5)[0].protocols == + @["/vac/waku/swap/2.0.0", "/vac/waku/store/2.0.0-beta2"] check: pMatcherSwapPeers.len == 1 pMatcherSwapPeers.anyIt(it.peerId == p5) - pMatcherSwapPeers[0].protocols == @["/vac/waku/swap/2.0.0", "/vac/waku/store/2.0.0-beta2"] + pMatcherSwapPeers[0].protocols == + @["/vac/waku/swap/2.0.0", "/vac/waku/store/2.0.0-beta2"] test "toRemotePeerInfo() converts a StoredInfo to a RemotePeerInfo": # Given diff --git a/tests/test_relay_peer_exchange.nim b/tests/test_relay_peer_exchange.nim index 6f842245f..3ba5b878d 100644 --- a/tests/test_relay_peer_exchange.nim +++ b/tests/test_relay_peer_exchange.nim @@ -11,10 +11,7 @@ import libp2p/protocols/pubsub/gossipsub import - ../../waku/waku_core, - ../../waku/waku_node, - ./testlib/wakucore, - ./testlib/wakunode + ../../waku/waku_core, ../../waku/waku_node, ./testlib/wakucore, ./testlib/wakunode procSuite "Relay (GossipSub) Peer Exchange": asyncTest "Mount relay without peer exchange handler": diff --git a/tests/test_utils_compat.nim b/tests/test_utils_compat.nim index 79411b596..4184d651b 100644 --- a/tests/test_utils_compat.nim +++ b/tests/test_utils_compat.nim @@ -1,7 +1,6 @@ {.used.} -import - testutils/unittests +import testutils/unittests import stew/results, ../../waku/waku_core/message, @@ -9,7 +8,6 @@ import ./testlib/common suite "Waku Payload": - test "Encode/Decode waku message with timestamp": ## Test encoding and decoding of the timestamp field of a WakuMessage @@ -21,7 +19,7 @@ suite "Waku Payload": msg = WakuMessage(payload: payload, version: version, timestamp: timestamp) ## When - let pb = msg.encode() + let pb = msg.encode() let msgDecoded = WakuMessage.decode(pb.buffer) ## Then @@ -42,7 +40,7 @@ suite "Waku Payload": msg = WakuMessage(payload: payload, version: version) ## When - let pb = msg.encode() + let pb = msg.encode() let msgDecoded = WakuMessage.decode(pb.buffer) ## Then diff --git a/tests/test_waku_dnsdisc.nim b/tests/test_waku_dnsdisc.nim index c27f4bc53..ceec17e43 100644 --- a/tests/test_waku_dnsdisc.nim +++ b/tests/test_waku_dnsdisc.nim @@ -42,9 +42,12 @@ suite "Waku DNS Discovery": await allFutures([node1.start(), node2.start(), node3.start()]) # Build and sign tree - var tree = buildTree(1, # Seq no - @[enr1, enr2, enr3], # ENR entries - @[]).get() # No link entries + var tree = buildTree( + 1, # Seq no + @[enr1, enr2, enr3], # ENR entries + @[], + ) + .get() # No link entries let treeKeys = keys.KeyPair.random(rng[]) @@ -57,7 +60,8 @@ suite "Waku DNS Discovery": domain = "testnodes.aq" zoneTxts = tree.buildTXT(domain).get() username = Base32.encode(treeKeys.pubkey().toRawCompressed()) - location = LinkPrefix & username & "@" & domain # See EIP-1459: https://eips.ethereum.org/EIPS/eip-1459 + location = LinkPrefix & username & "@" & domain + # See EIP-1459: https://eips.ethereum.org/EIPS/eip-1459 # Create a resolver for the domain @@ -90,11 +94,20 @@ suite "Waku DNS Discovery": check: # We have successfully connected to all discovered nodes - node4.peerManager.peerStore.peers().anyIt(it.peerId == node1.switch.peerInfo.peerId) - node4.peerManager.peerStore.connectedness(node1.switch.peerInfo.peerId) == Connected - node4.peerManager.peerStore.peers().anyIt(it.peerId == node2.switch.peerInfo.peerId) - node4.peerManager.peerStore.connectedness(node2.switch.peerInfo.peerId) == Connected - node4.peerManager.peerStore.peers().anyIt(it.peerId == node3.switch.peerInfo.peerId) - node4.peerManager.peerStore.connectedness(node3.switch.peerInfo.peerId) == Connected + node4.peerManager.peerStore.peers().anyIt( + it.peerId == node1.switch.peerInfo.peerId + ) + node4.peerManager.peerStore.connectedness(node1.switch.peerInfo.peerId) == + Connected + node4.peerManager.peerStore.peers().anyIt( + it.peerId == node2.switch.peerInfo.peerId + ) + node4.peerManager.peerStore.connectedness(node2.switch.peerInfo.peerId) == + Connected + node4.peerManager.peerStore.peers().anyIt( + it.peerId == node3.switch.peerInfo.peerId + ) + node4.peerManager.peerStore.connectedness(node3.switch.peerInfo.peerId) == + Connected await allFutures([node1.stop(), node2.stop(), node3.stop(), node4.stop()]) diff --git a/tests/test_waku_enr.nim b/tests/test_waku_enr.nim index 848fb64ea..0e794faf2 100644 --- a/tests/test_waku_enr.nim +++ b/tests/test_waku_enr.nim @@ -1,19 +1,12 @@ {.used.} -import - std/[options, sequtils], - stew/results, - testutils/unittests -import - ../../waku/waku_core, - ../../waku/waku_enr, - ./testlib/wakucore - +import std/[options, sequtils], stew/results, testutils/unittests +import ../../waku/waku_core, ../../waku/waku_enr, ./testlib/wakucore suite "Waku ENR - Capabilities bitfield": test "check capabilities support": ## Given - let bitfield: CapabilitiesBitfield = 0b0000_1101u8 # Lightpush, Filter, Relay + let bitfield: CapabilitiesBitfield = 0b0000_1101u8 # Lightpush, Filter, Relay ## Then check: @@ -25,11 +18,8 @@ suite "Waku ENR - Capabilities bitfield": test "bitfield to capabilities list": ## Given let bitfield = CapabilitiesBitfield.init( - relay = true, - store = false, - lightpush = true, - filter = true - ) + relay = true, store = false, lightpush = true, filter = true + ) ## When let caps = bitfield.toCapabilities() @@ -83,9 +73,10 @@ suite "Waku ENR - Capabilities bitfield": test "check capabilities on a waku node record": ## Given - let wakuRecord = "-Hy4QC73_E3B_FkZhsOakaD4pHe-U--UoGASdG9N0F3SFFUDY_jdQbud8" & - "EXVyrlOZ5pZ7VYFBDPMRCENwy87Lh74dFIBgmlkgnY0iXNlY3AyNTZrMaECvNt1jIWbWGp" & - "AWWdlLGYm1E1OjlkQk3ONoxDC5sfw8oOFd2FrdTID" + let wakuRecord = + "-Hy4QC73_E3B_FkZhsOakaD4pHe-U--UoGASdG9N0F3SFFUDY_jdQbud8" & + "EXVyrlOZ5pZ7VYFBDPMRCENwy87Lh74dFIBgmlkgnY0iXNlY3AyNTZrMaECvNt1jIWbWGp" & + "AWWdlLGYm1E1OjlkQk3ONoxDC5sfw8oOFd2FrdTID" ## When var record: Record @@ -109,9 +100,10 @@ suite "Waku ENR - Capabilities bitfield": test "check capabilities on a non-waku node record": ## Given # non waku enr, i.e. Ethereum one - let nonWakuEnr = "enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2G" & - "xb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNl" & - "Y3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA" + let nonWakuEnr = + "enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2G" & + "xb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNl" & + "Y3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA" ## When var record: Record @@ -131,26 +123,33 @@ suite "Waku ENR - Capabilities bitfield": record.supportsCapability(Capabilities.Filter) == false record.supportsCapability(Capabilities.Lightpush) == false - suite "Waku ENR - Multiaddresses": - test "decode record with multiaddrs field": ## Given - let enrUri = "enr:-QEnuEBEAyErHEfhiQxAVQoWowGTCuEF9fKZtXSd7H_PymHFhGJA3rGAYDVSH" & - "KCyJDGRLBGsloNbS8AZF33IVuefjOO6BIJpZIJ2NIJpcIQS39tkim11bHRpYWRkcn" & - "O4lgAvNihub2RlLTAxLmRvLWFtczMud2FrdXYyLnRlc3Quc3RhdHVzaW0ubmV0BgG" & - "73gMAODcxbm9kZS0wMS5hYy1jbi1ob25na29uZy1jLndha3V2Mi50ZXN0LnN0YXR1" & - "c2ltLm5ldAYBu94DACm9A62t7AQL4Ef5ZYZosRpQTzFVAB8jGjf1TER2wH-0zBOe1" & - "-MDBNLeA4lzZWNwMjU2azGhAzfsxbxyCkgCqq8WwYsVWH7YkpMLnU2Bw5xJSimxKa" & - "v-g3VkcIIjKA" + let enrUri = + "enr:-QEnuEBEAyErHEfhiQxAVQoWowGTCuEF9fKZtXSd7H_PymHFhGJA3rGAYDVSH" & + "KCyJDGRLBGsloNbS8AZF33IVuefjOO6BIJpZIJ2NIJpcIQS39tkim11bHRpYWRkcn" & + "O4lgAvNihub2RlLTAxLmRvLWFtczMud2FrdXYyLnRlc3Quc3RhdHVzaW0ubmV0BgG" & + "73gMAODcxbm9kZS0wMS5hYy1jbi1ob25na29uZy1jLndha3V2Mi50ZXN0LnN0YXR1" & + "c2ltLm5ldAYBu94DACm9A62t7AQL4Ef5ZYZosRpQTzFVAB8jGjf1TER2wH-0zBOe1" & + "-MDBNLeA4lzZWNwMjU2azGhAzfsxbxyCkgCqq8WwYsVWH7YkpMLnU2Bw5xJSimxKa" & + "v-g3VkcIIjKA" var record: Record require record.fromURI(enrUri) let - expectedAddr1 = MultiAddress.init("/dns4/node-01.do-ams3.wakuv2.test.statusim.net/tcp/443/wss").get() - expectedAddr2 = MultiAddress.init("/dns6/node-01.ac-cn-hongkong-c.wakuv2.test.statusim.net/tcp/443/wss").get() - expectedAddr3 = MultiAddress.init("/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:1234/wss").get() + expectedAddr1 = MultiAddress + .init("/dns4/node-01.do-ams3.wakuv2.test.statusim.net/tcp/443/wss") + .get() + expectedAddr2 = MultiAddress + .init("/dns6/node-01.ac-cn-hongkong-c.wakuv2.test.statusim.net/tcp/443/wss") + .get() + expectedAddr3 = MultiAddress + .init( + "/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:1234/wss" + ) + .get() ## When let typedRecord = record.toTyped() @@ -225,7 +224,11 @@ suite "Waku ENR - Multiaddresses": enrPrivKey = generatesecp256k1key() let - addr1 = MultiAddress.init("/ip4/127.0.0.1/tcp/80/ws/p2p/16Uiu2HAm4v86W3bmT1BiH6oSPzcsSr31iDQpSN5Qa882BCjjwgrD").get() + addr1 = MultiAddress + .init( + "/ip4/127.0.0.1/tcp/80/ws/p2p/16Uiu2HAm4v86W3bmT1BiH6oSPzcsSr31iDQpSN5Qa882BCjjwgrD" + ) + .get() addr2 = MultiAddress.init("/ip4/127.0.0.1/tcp/443/wss").get() let expectedAddr1 = MultiAddress.init("/ip4/127.0.0.1/tcp/80/ws").get() @@ -252,9 +255,7 @@ suite "Waku ENR - Multiaddresses": multiaddrs.contains(expectedAddr1) multiaddrs.contains(addr2) - suite "Waku ENR - Relay static sharding": - test "new relay shards object with single invalid shard id": ## Given let @@ -374,7 +375,8 @@ suite "Waku ENR - Relay static sharding": enrSeqNum = 1u64 enrPrivKey = generatesecp256k1key() - let shardsTopics = RelayShards.init(33, toSeq(0u16 ..< 64u16)).expect("Valid Shards") + let shardsTopics = + RelayShards.init(33, toSeq(0u16 ..< 64u16)).expect("Valid Shards") var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) require builder.withWakuRelaySharding(shardsTopics).isOk() @@ -402,9 +404,12 @@ suite "Waku ENR - Relay static sharding": enrPrivKey = generatesecp256k1key() let - relayShardsIndicesList = RelayShards.init(22, @[1u16, 1u16, 2u16, 3u16, 5u16, 8u16]).expect("Valid Shards") - relayShardsBitVector = RelayShards.init(33, @[13u16, 24u16, 37u16, 61u16, 98u16, 159u16]).expect("Valid Shards") - + relayShardsIndicesList = RelayShards + .init(22, @[1u16, 1u16, 2u16, 3u16, 5u16, 8u16]) + .expect("Valid Shards") + relayShardsBitVector = RelayShards + .init(33, @[13u16, 24u16, 37u16, 61u16, 98u16, 159u16]) + .expect("Valid Shards") var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) require builder.withWakuRelayShardingIndicesList(relayShardsIndicesList).isOk() diff --git a/tests/test_waku_filter_legacy.nim b/tests/test_waku_filter_legacy.nim index 36694b0d6..9cb222d3a 100644 --- a/tests/test_waku_filter_legacy.nim +++ b/tests/test_waku_filter_legacy.nim @@ -1,11 +1,7 @@ {.used.} import - std/[options, tables], - testutils/unittests, - chronicles, - chronos, - libp2p/crypto/crypto + std/[options, tables], testutils/unittests, chronicles, chronos, libp2p/crypto/crypto import ../../waku/node/peer_manager, ../../waku/waku_core, @@ -14,8 +10,9 @@ import ./testlib/common, ./testlib/wakucore - -proc newTestWakuFilterNode(switch: Switch, timeout: Duration = 2.hours): Future[WakuFilterLegacy] {.async.} = +proc newTestWakuFilterNode( + switch: Switch, timeout: Duration = 2.hours +): Future[WakuFilterLegacy] {.async.} = let peerManager = PeerManager.new(switch) proto = WakuFilterLegacy.new(peerManager, rng, timeout) @@ -35,7 +32,6 @@ proc newTestWakuFilterClient(switch: Switch): Future[WakuFilterClientLegacy] {.a return proto - # TODO: Extend test coverage suite "Waku Filter": asyncTest "should forward messages to client after subscribed": @@ -54,16 +50,20 @@ suite "Waku Filter": let serverAddr = serverSwitch.peerInfo.toRemotePeerInfo() let pushHandlerFuture = newFuture[(string, WakuMessage)]() - proc pushHandler(pubsubTopic: PubsubTopic, message: WakuMessage) {.async, gcsafe, closure.} = + proc pushHandler( + pubsubTopic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe, closure.} = pushHandlerFuture.complete((pubsubTopic, message)) let pubsubTopic = DefaultPubsubTopic contentTopic = "test-content-topic" - msg = fakeWakuMessage(contentTopic=contentTopic) + msg = fakeWakuMessage(contentTopic = contentTopic) ## When - require (await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer=serverAddr)).isOk() + require ( + await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer = serverAddr) + ).isOk() # WARN: Sleep necessary to avoid a race condition between the subscription and the handle message proc await sleepAsync(500.milliseconds) @@ -97,16 +97,20 @@ suite "Waku Filter": let serverAddr = serverSwitch.peerInfo.toRemotePeerInfo() var pushHandlerFuture = newFuture[void]() - proc pushHandler(pubsubTopic: PubsubTopic, message: WakuMessage) {.async, gcsafe, closure.} = + proc pushHandler( + pubsubTopic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe, closure.} = pushHandlerFuture.complete() let pubsubTopic = DefaultPubsubTopic contentTopic = "test-content-topic" - msg = fakeWakuMessage(contentTopic=contentTopic) + msg = fakeWakuMessage(contentTopic = contentTopic) ## When - require (await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer=serverAddr)).isOk() + require ( + await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer = serverAddr) + ).isOk() # WARN: Sleep necessary to avoid a race condition between the subscription and the handle message proc await sleepAsync(500.milliseconds) @@ -118,7 +122,7 @@ suite "Waku Filter": # Reset to test unsubscribe pushHandlerFuture = newFuture[void]() - require (await client.unsubscribe(pubsubTopic, contentTopic, peer=serverAddr)).isOk() + require (await client.unsubscribe(pubsubTopic, contentTopic, peer = serverAddr)).isOk() # WARN: Sleep necessary to avoid a race condition between the unsubscription and the handle message proc await sleepAsync(500.milliseconds) @@ -126,7 +130,8 @@ suite "Waku Filter": await server.handleMessage(pubsubTopic, msg) ## Then - let handlerWasCalledAfterUnsubscription = await pushHandlerFuture.withTimeout(1.seconds) + let handlerWasCalledAfterUnsubscription = + await pushHandlerFuture.withTimeout(1.seconds) check: not handlerWasCalledAfterUnsubscription @@ -142,23 +147,27 @@ suite "Waku Filter": await allFutures(serverSwitch.start(), clientSwitch.start()) let - server = await newTestWakuFilterNode(serverSwitch, timeout=200.milliseconds) + server = await newTestWakuFilterNode(serverSwitch, timeout = 200.milliseconds) client = await newTestWakuFilterClient(clientSwitch) ## Given let serverAddr = serverSwitch.peerInfo.toRemotePeerInfo() var pushHandlerFuture = newFuture[void]() - proc pushHandler(pubsubTopic: PubsubTopic, message: WakuMessage) {.async, gcsafe, closure.} = + proc pushHandler( + pubsubTopic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe, closure.} = pushHandlerFuture.complete() let pubsubTopic = DefaultPubsubTopic contentTopic = "test-content-topic" - msg = fakeWakuMessage(contentTopic=contentTopic) + msg = fakeWakuMessage(contentTopic = contentTopic) ## When - require (await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer=serverAddr)).isOk() + require ( + await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer = serverAddr) + ).isOk() # WARN: Sleep necessary to avoid a race condition between the unsubscription and the handle message proc await sleepAsync(500.milliseconds) @@ -207,23 +216,27 @@ suite "Waku Filter": await allFutures(serverSwitch.start(), clientSwitch.start()) let - server = await newTestWakuFilterNode(serverSwitch, timeout=200.milliseconds) + server = await newTestWakuFilterNode(serverSwitch, timeout = 200.milliseconds) client = await newTestWakuFilterClient(clientSwitch) ## Given let serverAddr = serverSwitch.peerInfo.toRemotePeerInfo() var pushHandlerFuture = newFuture[void]() - proc pushHandler(pubsubTopic: PubsubTopic, message: WakuMessage) {.async, gcsafe, closure.} = + proc pushHandler( + pubsubTopic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe, closure.} = pushHandlerFuture.complete() let pubsubTopic = DefaultPubsubTopic contentTopic = "test-content-topic" - msg = fakeWakuMessage(contentTopic=contentTopic) + msg = fakeWakuMessage(contentTopic = contentTopic) ## When - require (await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer=serverAddr)).isOk() + require ( + await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer = serverAddr) + ).isOk() # WARN: Sleep necessary to avoid a race condition between the unsubscription and the handle message proc await sleepAsync(500.milliseconds) @@ -255,8 +268,7 @@ suite "Waku Filter": # Start switch with same key as before let clientSwitch2 = newTestSwitch( - some(clientSwitch.peerInfo.privateKey), - some(clientSwitch.peerInfo.addrs[0]) + some(clientSwitch.peerInfo.privateKey), some(clientSwitch.peerInfo.addrs[0]) ) await clientSwitch2.start() await client.start() diff --git a/tests/test_waku_keepalive.nim b/tests/test_waku_keepalive.nim index 159fac682..47d9be6fd 100644 --- a/tests/test_waku_keepalive.nim +++ b/tests/test_waku_keepalive.nim @@ -12,14 +12,9 @@ import libp2p/stream/connection, libp2p/crypto/crypto import - ../../waku/waku_core, - ../../waku/waku_node, - ./testlib/wakucore, - ./testlib/wakunode - + ../../waku/waku_core, ../../waku/waku_node, ./testlib/wakucore, ./testlib/wakunode suite "Waku Keepalive": - asyncTest "handle ping keepalives": let nodeKey1 = generateSecp256k1Key() diff --git a/tests/test_waku_keystore.nim b/tests/test_waku_keystore.nim index 952f186eb..ceda34871 100644 --- a/tests/test_waku_keystore.nim +++ b/tests/test_waku_keystore.nim @@ -1,38 +1,30 @@ {.used.} -import - std/[os, json], - chronos, - testutils/unittests -import - ../../waku/waku_keystore, - ./testlib/common +import std/[os, json], chronos, testutils/unittests +import ../../waku/waku_keystore, ./testlib/common -from ../../waku/waku_noise/noise_utils import randomSeqByte +from ../../waku/waku_noise/noise_utils import randomSeqByte procSuite "Credentials test suite": - let testAppInfo = AppInfo(application: "test", appIdentifier: "1234", version: "0.1") test "Create keystore": - let filepath = "./testAppKeystore.txt" - defer: removeFile(filepath) + defer: + removeFile(filepath) - let keystoreRes = createAppKeystore(path = filepath, - appInfo = testAppInfo) + let keystoreRes = createAppKeystore(path = filepath, appInfo = testAppInfo) check: keystoreRes.isOk() test "Load keystore": - let filepath = "./testAppKeystore.txt" - defer: removeFile(filepath) + defer: + removeFile(filepath) # If no keystore exists at filepath, a new one is created for appInfo and empty credentials - let keystoreRes = loadAppKeystore(path = filepath, - appInfo = testAppInfo) + let keystoreRes = loadAppKeystore(path = filepath, appInfo = testAppInfo) check: keystoreRes.isOk() @@ -48,106 +40,134 @@ procSuite "Credentials test suite": keystore["credentials"].len() == 0 test "Add credentials to keystore": - let filepath = "./testAppKeystore.txt" - defer: removeFile(filepath) + defer: + removeFile(filepath) # We generate a random identity credential (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen) var idTrapdoor = randomSeqByte(rng[], 32) - idNullifier = randomSeqByte(rng[], 32) + idNullifier = randomSeqByte(rng[], 32) idSecretHash = randomSeqByte(rng[], 32) idCommitment = randomSeqByte(rng[], 32) - var idCredential = IdentityCredential(idTrapdoor: idTrapdoor, idNullifier: idNullifier, idSecretHash: idSecretHash, idCommitment: idCommitment) + var idCredential = IdentityCredential( + idTrapdoor: idTrapdoor, + idNullifier: idNullifier, + idSecretHash: idSecretHash, + idCommitment: idCommitment, + ) - var contract = MembershipContract(chainId: "5", address: "0x0123456789012345678901234567890123456789") + var contract = MembershipContract( + chainId: "5", address: "0x0123456789012345678901234567890123456789" + ) var index = MembershipIndex(1) - let membershipCredential = KeystoreMembership(membershipContract: contract, - treeIndex: index, - identityCredential: idCredential) + let membershipCredential = KeystoreMembership( + membershipContract: contract, treeIndex: index, identityCredential: idCredential + ) let password = "%m0um0ucoW%" - let keystoreRes = addMembershipCredentials(path = filepath, - membership = membershipCredential, - password = password, - appInfo = testAppInfo) + let keystoreRes = addMembershipCredentials( + path = filepath, + membership = membershipCredential, + password = password, + appInfo = testAppInfo, + ) check: keystoreRes.isOk() test "Add/retrieve credentials in keystore": - let filepath = "./testAppKeystore.txt" - defer: removeFile(filepath) + defer: + removeFile(filepath) # We generate two random identity credentials (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen) var idTrapdoor = randomSeqByte(rng[], 32) - idNullifier = randomSeqByte(rng[], 32) + idNullifier = randomSeqByte(rng[], 32) idSecretHash = randomSeqByte(rng[], 32) idCommitment = randomSeqByte(rng[], 32) - idCredential = IdentityCredential(idTrapdoor: idTrapdoor, idNullifier: idNullifier, idSecretHash: idSecretHash, idCommitment: idCommitment) + idCredential = IdentityCredential( + idTrapdoor: idTrapdoor, + idNullifier: idNullifier, + idSecretHash: idSecretHash, + idCommitment: idCommitment, + ) # We generate two distinct membership groups - var contract = MembershipContract(chainId: "5", address: "0x0123456789012345678901234567890123456789") + var contract = MembershipContract( + chainId: "5", address: "0x0123456789012345678901234567890123456789" + ) var index = MembershipIndex(1) - var membershipCredential = KeystoreMembership(membershipContract: contract, - treeIndex: index, - identityCredential: idCredential) - + var membershipCredential = KeystoreMembership( + membershipContract: contract, treeIndex: index, identityCredential: idCredential + ) let password = "%m0um0ucoW%" # We add credentials to the keystore. Note that only 3 credentials should be effectively added, since rlnMembershipCredentials3 is equal to membershipCredentials2 - let keystoreRes = addMembershipCredentials(path = filepath, - membership = membershipCredential, - password = password, - appInfo = testAppInfo) + let keystoreRes = addMembershipCredentials( + path = filepath, + membership = membershipCredential, + password = password, + appInfo = testAppInfo, + ) check: keystoreRes.isOk() # We test retrieval of credentials. var expectedMembership = membershipCredential - let membershipQuery = KeystoreMembership(membershipContract: contract, - treeIndex: index) + let membershipQuery = + KeystoreMembership(membershipContract: contract, treeIndex: index) - var recoveredCredentialsRes = getMembershipCredentials(path = filepath, - password = password, - query = membershipQuery, - appInfo = testAppInfo) + var recoveredCredentialsRes = getMembershipCredentials( + path = filepath, + password = password, + query = membershipQuery, + appInfo = testAppInfo, + ) check: recoveredCredentialsRes.isOk() recoveredCredentialsRes.get() == expectedMembership test "if the keystore contains only one credential, fetch that irrespective of treeIndex": - let filepath = "./testAppKeystore.txt" - defer: removeFile(filepath) + defer: + removeFile(filepath) # We generate random identity credentials (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen) let idTrapdoor = randomSeqByte(rng[], 32) - idNullifier = randomSeqByte(rng[], 32) + idNullifier = randomSeqByte(rng[], 32) idSecretHash = randomSeqByte(rng[], 32) idCommitment = randomSeqByte(rng[], 32) - idCredential = IdentityCredential(idTrapdoor: idTrapdoor, idNullifier: idNullifier, idSecretHash: idSecretHash, idCommitment: idCommitment) + idCredential = IdentityCredential( + idTrapdoor: idTrapdoor, + idNullifier: idNullifier, + idSecretHash: idSecretHash, + idCommitment: idCommitment, + ) - let contract = MembershipContract(chainId: "5", address: "0x0123456789012345678901234567890123456789") + let contract = MembershipContract( + chainId: "5", address: "0x0123456789012345678901234567890123456789" + ) let index = MembershipIndex(1) - let membershipCredential = KeystoreMembership(membershipContract: contract, - treeIndex: index, - identityCredential: idCredential) + let membershipCredential = KeystoreMembership( + membershipContract: contract, treeIndex: index, identityCredential: idCredential + ) let password = "%m0um0ucoW%" - let keystoreRes = addMembershipCredentials(path = filepath, - membership = membershipCredential, - password = password, - appInfo = testAppInfo) + let keystoreRes = addMembershipCredentials( + path = filepath, + membership = membershipCredential, + password = password, + appInfo = testAppInfo, + ) assert(keystoreRes.isOk(), $keystoreRes.error) @@ -155,56 +175,73 @@ procSuite "Credentials test suite": let expectedMembership = membershipCredential let membershipQuery = KeystoreMembership(membershipContract: contract) - let recoveredCredentialsRes = getMembershipCredentials(path = filepath, - password = password, - query = membershipQuery, - appInfo = testAppInfo) + let recoveredCredentialsRes = getMembershipCredentials( + path = filepath, + password = password, + query = membershipQuery, + appInfo = testAppInfo, + ) assert(recoveredCredentialsRes.isOk(), $recoveredCredentialsRes.error) - check: recoveredCredentialsRes.get() == expectedMembership + check: + recoveredCredentialsRes.get() == expectedMembership test "if the keystore contains multiple credentials, then error out if treeIndex has not been passed in": let filepath = "./testAppKeystore.txt" - defer: removeFile(filepath) + defer: + removeFile(filepath) # We generate random identity credentials (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen) let idTrapdoor = randomSeqByte(rng[], 32) - idNullifier = randomSeqByte(rng[], 32) + idNullifier = randomSeqByte(rng[], 32) idSecretHash = randomSeqByte(rng[], 32) idCommitment = randomSeqByte(rng[], 32) - idCredential = IdentityCredential(idTrapdoor: idTrapdoor, idNullifier: idNullifier, idSecretHash: idSecretHash, idCommitment: idCommitment) + idCredential = IdentityCredential( + idTrapdoor: idTrapdoor, + idNullifier: idNullifier, + idSecretHash: idSecretHash, + idCommitment: idCommitment, + ) # We generate two distinct membership groups - let contract = MembershipContract(chainId: "5", address: "0x0123456789012345678901234567890123456789") + let contract = MembershipContract( + chainId: "5", address: "0x0123456789012345678901234567890123456789" + ) let index = MembershipIndex(1) - var membershipCredential = KeystoreMembership(membershipContract: contract, - treeIndex: index, - identityCredential: idCredential) + var membershipCredential = KeystoreMembership( + membershipContract: contract, treeIndex: index, identityCredential: idCredential + ) let password = "%m0um0ucoW%" - let keystoreRes = addMembershipCredentials(path = filepath, - membership = membershipCredential, - password = password, - appInfo = testAppInfo) + let keystoreRes = addMembershipCredentials( + path = filepath, + membership = membershipCredential, + password = password, + appInfo = testAppInfo, + ) assert(keystoreRes.isOk(), $keystoreRes.error) membershipCredential.treeIndex = MembershipIndex(2) - let keystoreRes2 = addMembershipCredentials(path = filepath, - membership = membershipCredential, - password = password, - appInfo = testAppInfo) + let keystoreRes2 = addMembershipCredentials( + path = filepath, + membership = membershipCredential, + password = password, + appInfo = testAppInfo, + ) assert(keystoreRes2.isOk(), $keystoreRes2.error) # We test retrieval of credentials. let membershipQuery = KeystoreMembership(membershipContract: contract) - let recoveredCredentialsRes = getMembershipCredentials(path = filepath, - password = password, - query = membershipQuery, - appInfo = testAppInfo) + let recoveredCredentialsRes = getMembershipCredentials( + path = filepath, + password = password, + query = membershipQuery, + appInfo = testAppInfo, + ) check: recoveredCredentialsRes.isErr() diff --git a/tests/test_waku_keystore_keyfile.nim b/tests/test_waku_keystore_keyfile.nim index 28bdf30da..f857332ab 100644 --- a/tests/test_waku_keystore_keyfile.nim +++ b/tests/test_waku_keystore_keyfile.nim @@ -1,26 +1,19 @@ {.used.} -import - std/[json, os], - stew/byteutils, - testutils/unittests, chronos, - eth/keys -import - ../../waku/waku_keystore, - ./testlib/common +import std/[json, os], stew/byteutils, testutils/unittests, chronos, eth/keys +import ../../waku/waku_keystore, ./testlib/common from ../../waku/waku_noise/noise_utils import randomSeqByte suite "KeyFile test suite": - test "Create/Save/Load single keyfile": - # The password we use to encrypt our secret let password = "randompassword" # The filepath were the keyfile will be stored let filepath = "./test.keyfile" - defer: removeFile(filepath) + defer: + removeFile(filepath) # The secret var secret = randomSeqByte(rng[], 300) @@ -48,7 +41,6 @@ suite "KeyFile test suite": secret == decodedSecret.get() test "Create/Save/Load multiple keyfiles in same file": - # We set different passwords for different keyfiles that will be stored in same file let password1 = string.fromBytes(randomSeqByte(rng[], 20)) let password2 = "" @@ -56,7 +48,8 @@ suite "KeyFile test suite": var keyfile: KfResult[JsonNode] let filepath = "./test.keyfile" - defer: removeFile(filepath) + defer: + removeFile(filepath) # We generate 6 different secrets and we encrypt them using 3 different passwords, and we store the obtained keystore @@ -133,218 +126,218 @@ suite "KeyFile test suite": secret3 == decodedSecretsPassword3[0].get() secret4 == decodedSecretsPassword3[1].get() - # The following tests are originally from the nim-eth keyfile tests module https://github.com/status-im/nim-eth/blob/master/tests/keyfile/test_keyfile.nim # and are slightly adapted to test backwards compatibility with nim-eth implementation of our customized version of the utils/keyfile module # Note: the original nim-eth "Create/Save/Load test" is redefined and expanded above in "KeyFile test suite" suite "KeyFile test suite (adapted from nim-eth keyfile tests)": - # Testvectors originally from https://github.com/status-im/nim-eth/blob/fef47331c37ee8abb8608037222658737ff498a6/tests/keyfile/test_keyfile.nim#L22-L168 let TestVectors = [ %*{ "keyfile": { - "crypto" : { - "cipher" : "aes-128-ctr", - "cipherparams" : {"iv" : "6087dab2f9fdbbfaddc31a909735c1e6"}, - "ciphertext" : "5318b4d5bcd28de64ee5559e671353e16f075ecae9f99c7a79a38af5f869aa46", - "kdf" : "pbkdf2", - "kdfparams" : { - "c" : 262144, - "dklen" : 32, - "prf" : "hmac-sha256", - "salt" : "ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd" + "crypto": { + "cipher": "aes-128-ctr", + "cipherparams": {"iv": "6087dab2f9fdbbfaddc31a909735c1e6"}, + "ciphertext": + "5318b4d5bcd28de64ee5559e671353e16f075ecae9f99c7a79a38af5f869aa46", + "kdf": "pbkdf2", + "kdfparams": { + "c": 262144, + "dklen": 32, + "prf": "hmac-sha256", + "salt": "ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd", }, - "mac" : "517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e9b2" + "mac": "517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e9b2", }, - "id" : "3198bc9c-6672-5ab3-d995-4942343ae5b6", - "version" : 3 + "id": "3198bc9c-6672-5ab3-d995-4942343ae5b6", + "version": 3, }, "name": "test1", "password": "testpassword", - "priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d" + "priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d", }, %*{ "keyfile": { "version": 3, "crypto": { - "ciphertext": "ee75456c006b1e468133c5d2a916bacd3cf515ced4d9b021b5c59978007d1e87", + "ciphertext": + "ee75456c006b1e468133c5d2a916bacd3cf515ced4d9b021b5c59978007d1e87", "version": 1, "kdf": "pbkdf2", "kdfparams": { "dklen": 32, "c": 262144, "prf": "hmac-sha256", - "salt": "504490577620f64f43d73f29479c2cf0" + "salt": "504490577620f64f43d73f29479c2cf0", }, "mac": "196815708465de9af7504144a1360d08874fc3c30bb0e648ce88fbc36830d35d", "cipherparams": {"iv": "514ccc8c4fb3e60e5538e0cf1e27c233"}, - "cipher": "aes-128-ctr" + "cipher": "aes-128-ctr", }, - "id": "98d193c7-5174-4c7c-5345-c1daf95477b5" + "id": "98d193c7-5174-4c7c-5345-c1daf95477b5", }, "name": "python_generated_test_with_odd_iv", "password": "foo", - "priv": "0101010101010101010101010101010101010101010101010101010101010101" + "priv": "0101010101010101010101010101010101010101010101010101010101010101", }, %*{ "keyfile": { "version": 3, "crypto": { - "ciphertext": "d69313b6470ac1942f75d72ebf8818a0d484ac78478a132ee081cd954d6bd7a9", + "ciphertext": + "d69313b6470ac1942f75d72ebf8818a0d484ac78478a132ee081cd954d6bd7a9", "cipherparams": {"iv": "ffffffffffffffffffffffffffffffff"}, "kdf": "pbkdf2", "kdfparams": { "dklen": 32, "c": 262144, "prf": "hmac-sha256", - "salt": "c82ef14476014cbf438081a42709e2ed" + "salt": "c82ef14476014cbf438081a42709e2ed", }, "mac": "cf6bfbcc77142a22c4a908784b4a16f1023a1d0e2aff404c20158fa4f1587177", "cipher": "aes-128-ctr", - "version": 1 + "version": 1, }, - "id": "abb67040-8dbe-0dad-fc39-2b082ef0ee5f" + "id": "abb67040-8dbe-0dad-fc39-2b082ef0ee5f", }, "name": "evilnonce", "password": "bar", - "priv": "0202020202020202020202020202020202020202020202020202020202020202" + "priv": "0202020202020202020202020202020202020202020202020202020202020202", }, %*{ "keyfile": { - "version" : 3, - "crypto" : { - "cipher" : "aes-128-ctr", - "cipherparams" : { - "iv" : "83dbcc02d8ccb40e466191a123791e0e" - }, - "ciphertext" : "d172bf743a674da9cdad04534d56926ef8358534d458fffccd4e6ad2fbde479c", - "kdf" : "scrypt", - "kdfparams" : { - "dklen" : 32, - "n" : 262144, - "r" : 1, - "p" : 8, - "salt" : "ab0c7876052600dd703518d6fc3fe8984592145b591fc8fb5c6d43190334ba19" - }, - "mac" : "2103ac29920d71da29f15d75b4a16dbe95cfd7ff8faea1056c33131d846e3097" + "version": 3, + "crypto": { + "cipher": "aes-128-ctr", + "cipherparams": {"iv": "83dbcc02d8ccb40e466191a123791e0e"}, + "ciphertext": + "d172bf743a674da9cdad04534d56926ef8358534d458fffccd4e6ad2fbde479c", + "kdf": "scrypt", + "kdfparams": { + "dklen": 32, + "n": 262144, + "r": 1, + "p": 8, + "salt": "ab0c7876052600dd703518d6fc3fe8984592145b591fc8fb5c6d43190334ba19", + }, + "mac": "2103ac29920d71da29f15d75b4a16dbe95cfd7ff8faea1056c33131d846e3097", }, - "id" : "3198bc9c-6672-5ab3-d995-4942343ae5b6" + "id": "3198bc9c-6672-5ab3-d995-4942343ae5b6", }, - "name" : "test2", + "name": "test2", "password": "testpassword", - "priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d" + "priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d", }, %*{ "keyfile": { "version": 3, "address": "460121576cc7df020759730751f92bd62fd78dd6", "crypto": { - "ciphertext": "54ae683c6287fa3d58321f09d56e26d94e58a00d4f90bdd95782ae0e4aab618b", - "cipherparams": { - "iv": "681679cdb125bba9495d068b002816a4" - }, - "cipher": "aes-128-ctr", - "kdf": "scrypt", - "kdfparams": { - "dklen": 32, - "salt": "c3407f363fce02a66e3c4bf4a8f6b7da1c1f54266cef66381f0625c251c32785", - "n": 8192, - "r": 8, - "p": 1 - }, - "mac": "dea6bdf22a2f522166ed82808c22a6311e84c355f4bbe100d4260483ff675a46" + "ciphertext": + "54ae683c6287fa3d58321f09d56e26d94e58a00d4f90bdd95782ae0e4aab618b", + "cipherparams": {"iv": "681679cdb125bba9495d068b002816a4"}, + "cipher": "aes-128-ctr", + "kdf": "scrypt", + "kdfparams": { + "dklen": 32, + "salt": "c3407f363fce02a66e3c4bf4a8f6b7da1c1f54266cef66381f0625c251c32785", + "n": 8192, + "r": 8, + "p": 1, + }, + "mac": "dea6bdf22a2f522166ed82808c22a6311e84c355f4bbe100d4260483ff675a46", }, - "id": "0eb785e0-340a-4290-9c42-90a11973ee47" + "id": "0eb785e0-340a-4290-9c42-90a11973ee47", }, "name": "mycrypto", "password": "foobartest121", - "priv": "05a4d3eb46c742cb8850440145ce70cbc80b59f891cf5f50fd3e9c280b50c4e4" + "priv": "05a4d3eb46c742cb8850440145ce70cbc80b59f891cf5f50fd3e9c280b50c4e4", }, %*{ "keyfile": { - "crypto": { - "cipher": "aes-128-ctr", - "cipherparams": { - "iv": "7e7b02d2b4ef45d6c98cb885e75f48d5", - }, - "ciphertext": "a7a5743a6c7eb3fa52396bd3fd94043b79075aac3ccbae8e62d3af94db00397c", - "kdf": "scrypt", - "kdfparams": { - "dklen": 32, - "n": 8192, - "p": 1, - "r": 8, - "salt": "247797c7a357b707a3bdbfaa55f4c553756bca09fec20ddc938e7636d21e4a20", - }, - "mac": "5a3ba5bebfda2c384586eda5fcda9c8397d37c9b0cc347fea86525cf2ea3a468", + "crypto": { + "cipher": "aes-128-ctr", + "cipherparams": {"iv": "7e7b02d2b4ef45d6c98cb885e75f48d5"}, + "ciphertext": + "a7a5743a6c7eb3fa52396bd3fd94043b79075aac3ccbae8e62d3af94db00397c", + "kdf": "scrypt", + "kdfparams": { + "dklen": 32, + "n": 8192, + "p": 1, + "r": 8, + "salt": "247797c7a357b707a3bdbfaa55f4c553756bca09fec20ddc938e7636d21e4a20", }, - "address": "0b6f2de3dee015a95d3330dcb7baf8e08aa0112d", - "id": "3c8efdd6-d538-47ec-b241-36783d3418b9", - "version": 3 + "mac": "5a3ba5bebfda2c384586eda5fcda9c8397d37c9b0cc347fea86525cf2ea3a468", + }, + "address": "0b6f2de3dee015a95d3330dcb7baf8e08aa0112d", + "id": "3c8efdd6-d538-47ec-b241-36783d3418b9", + "version": 3, }, "password": "moomoocow", "priv": "21eac69b9a52f466bfe9047f0f21c9caf3a5cdaadf84e2750a9b3265d450d481", - "name": "eth-keyfile-conftest" - } + "name": "eth-keyfile-conftest", + }, ] test "Testing nim-eth test vectors": - var secret: KfResult[seq[byte]] var expectedSecret: seq[byte] - for i in 0.. bool: + storedMsg.all do(item: auto) -> bool: let (pubsubTopic, msg, _, _, hash) = item - msg.contentTopic == contentTopic and - pubsubTopic == DefaultPubsubTopic and - hash == msgHash + msg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic and + hash == msgHash ## Cleanup (waitFor driver.close()).expect("driver to close") diff --git a/tests/waku_archive/test_driver_sqlite_query.nim b/tests/waku_archive/test_driver_sqlite_query.nim index e20193276..f9d6a2746 100644 --- a/tests/waku_archive/test_driver_sqlite_query.nim +++ b/tests/waku_archive/test_driver_sqlite_query.nim @@ -1,10 +1,7 @@ {.used.} import - std/[options, sequtils, random, algorithm], - testutils/unittests, - chronos, - chronicles + std/[options, sequtils, random, algorithm], testutils/unittests, chronos, chronicles import ../../../waku/common/databases/db_sqlite, @@ -16,47 +13,48 @@ import ../testlib/wakucore, ../waku_archive/archive_utils - logScope: topics = "test archive _driver" - # Initialize the random number generator common.randomize() - suite "SQLite driver - query by content topic": - asyncTest "no content topic": ## Given const contentTopic = "test-content-topic" let driver = newSqliteArchiveDriver() - let expected = @[ - fakeWakuMessage(@[byte 0], contentTopic=DefaultContentTopic, ts=ts(00)), - fakeWakuMessage(@[byte 1], contentTopic=DefaultContentTopic, ts=ts(10)), - fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30)), - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40)), - - fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60)), - fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() ## When - let res = await driver.getMessages( - maxPageSize=5, - ascendingOrder=true - ) + let res = await driver.getMessages(maxPageSize = 5, ascendingOrder = true) ## Then check: @@ -64,7 +62,7 @@ suite "SQLite driver - query by content topic": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[0..4] + filteredMessages == expected[0 .. 4] ## Cleanup (await driver.close()).expect("driver to close") @@ -75,31 +73,36 @@ suite "SQLite driver - query by content topic": let driver = newSqliteArchiveDriver() - let expected = @[ - fakeWakuMessage(@[byte 0], ts=ts(00)), - fakeWakuMessage(@[byte 1], ts=ts(10)), - - fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30)), - - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40)), - fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60)), - fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - maxPageSize=2, - ascendingOrder=true + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true ) ## Then @@ -108,7 +111,7 @@ suite "SQLite driver - query by content topic": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[2..3] + filteredMessages == expected[2 .. 3] ## Cleanup (await driver.close()).expect("driver to close") @@ -119,31 +122,36 @@ suite "SQLite driver - query by content topic": let driver = newSqliteArchiveDriver() - let expected = @[ - fakeWakuMessage(@[byte 0], ts=ts(00)), - fakeWakuMessage(@[byte 1], ts=ts(10)), - - fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30)), - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40)), - fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50)), - - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60)), - fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - maxPageSize=2, - ascendingOrder=false + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = false ) ## Then @@ -152,7 +160,7 @@ suite "SQLite driver - query by content topic": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[6..7].reversed() + filteredMessages == expected[6 .. 7].reversed() ## Cleanup (await driver.close()).expect("driver to close") @@ -165,31 +173,38 @@ suite "SQLite driver - query by content topic": let driver = newSqliteArchiveDriver() - let expected = @[ - fakeWakuMessage(@[byte 0], ts=ts(00)), - fakeWakuMessage(@[byte 1], ts=ts(10)), - - fakeWakuMessage(@[byte 2], contentTopic=contentTopic1, ts=ts(20)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic2, ts=ts(30)), - - fakeWakuMessage(@[byte 4], contentTopic=contentTopic3, ts=ts(40)), - fakeWakuMessage(@[byte 5], contentTopic=contentTopic1, ts=ts(50)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic2, ts=ts(60)), - fakeWakuMessage(@[byte 7], contentTopic=contentTopic3, ts=ts(70)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic1, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic2, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic3, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic1, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic2, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic3, ts = ts(70)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() ## When let res = await driver.getMessages( - contentTopic= @[contentTopic1, contentTopic2], - maxPageSize=2, - ascendingOrder=true + contentTopic = @[contentTopic1, contentTopic2], + maxPageSize = 2, + ascendingOrder = true, ) ## Then @@ -198,7 +213,7 @@ suite "SQLite driver - query by content topic": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[2..3] + filteredMessages == expected[2 .. 3] ## Cleanup (await driver.close()).expect("driver to close") @@ -209,26 +224,33 @@ suite "SQLite driver - query by content topic": let driver = newSqliteArchiveDriver() - let expected = @[ - fakeWakuMessage(@[byte 0], contentTopic=DefaultContentTopic, ts=ts(00)), - fakeWakuMessage(@[byte 1], contentTopic=DefaultContentTopic, ts=ts(10)), - fakeWakuMessage(@[byte 2], contentTopic=DefaultContentTopic, ts=ts(20)), - fakeWakuMessage(@[byte 3], contentTopic=DefaultContentTopic, ts=ts(30)), - fakeWakuMessage(@[byte 4], contentTopic=DefaultContentTopic, ts=ts(40)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic, ts = ts(40)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - maxPageSize=2, - ascendingOrder=true + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true ) ## Then @@ -248,15 +270,23 @@ suite "SQLite driver - query by content topic": let driver = newSqliteArchiveDriver() - for t in 0..<40: - let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts=ts(t)) - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + for t in 0 ..< 40: + let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts = ts(t)) + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() ## When let res = await driver.getMessages( - contentTopic= @[DefaultContentTopic], - maxPageSize=pageSize, - ascendingOrder=true + contentTopic = @[DefaultContentTopic], + maxPageSize = pageSize, + ascendingOrder = true, ) ## Then @@ -270,9 +300,7 @@ suite "SQLite driver - query by content topic": ## Cleanup (await driver.close()).expect("driver to close") - suite "SQLite driver - query by pubsub topic": - asyncTest "pubsub topic": ## Given const contentTopic = "test-content-topic" @@ -280,31 +308,52 @@ suite "SQLite driver - query by pubsub topic": let driver = newSqliteArchiveDriver() - let expected = @[ - (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts=ts(00))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts=ts(10))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30))), - - (pubsubTopic, fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40))), - (pubsubTopic, fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50))), - (pubsubTopic, fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60))), - (pubsubTopic, fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70))), - ] + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it[1].payload) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) for row in messages: let (topic, msg) = row - require (await driver.put(topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() ## When let res = await driver.getMessages( - pubsubTopic=some(pubsubTopic), - maxPageSize=2, - ascendingOrder=true + pubsubTopic = some(pubsubTopic), maxPageSize = 2, ascendingOrder = true ) ## Then @@ -314,7 +363,7 @@ suite "SQLite driver - query by pubsub topic": let expectedMessages = expected.mapIt(it[1]) let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expectedMessages[4..5] + filteredMessages == expectedMessages[4 .. 5] ## Cleanup (await driver.close()).expect("driver to close") @@ -326,31 +375,51 @@ suite "SQLite driver - query by pubsub topic": let driver = newSqliteArchiveDriver() - let expected = @[ - (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts=ts(00))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts=ts(10))), - - (DefaultPubsubTopic, fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30))), - (pubsubTopic, fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40))), - (pubsubTopic, fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50))), - (pubsubTopic, fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60))), - (pubsubTopic, fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70))), - ] + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it[1].payload) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) for row in messages: let (topic, msg) = row - require (await driver.put(topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() ## When - let res = await driver.getMessages( - maxPageSize=2, - ascendingOrder=true - ) + let res = await driver.getMessages(maxPageSize = 2, ascendingOrder = true) ## Then check: @@ -359,7 +428,7 @@ suite "SQLite driver - query by pubsub topic": let expectedMessages = expected.mapIt(it[1]) let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expectedMessages[0..1] + filteredMessages == expectedMessages[0 .. 1] ## Cleanup (await driver.close()).expect("driver to close") @@ -371,33 +440,55 @@ suite "SQLite driver - query by pubsub topic": let driver = newSqliteArchiveDriver() - let expected = @[ - (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts=ts(00))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts=ts(10))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30))), - - (pubsubTopic, fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40))), - (pubsubTopic, fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50))), - - (pubsubTopic, fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60))), - (pubsubTopic, fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70))), - ] + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it[1].payload) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) for row in messages: let (topic, msg) = row - require (await driver.put(topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - pubsubTopic=some(pubsubTopic), - maxPageSize=2, - ascendingOrder=true + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + maxPageSize = 2, + ascendingOrder = true, ) ## Then @@ -407,47 +498,51 @@ suite "SQLite driver - query by pubsub topic": let expectedMessages = expected.mapIt(it[1]) let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expectedMessages[4..5] + filteredMessages == expectedMessages[4 .. 5] ## Cleanup (await driver.close()).expect("driver to close") - suite "SQLite driver - query by cursor": - asyncTest "only cursor": ## Given const contentTopic = "test-content-topic" let driver = newSqliteArchiveDriver() - let expected = @[ - fakeWakuMessage(@[byte 0], ts=ts(00)), - fakeWakuMessage(@[byte 1], ts=ts(10)), - fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30)), - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40)), # << cursor - - fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60)), - - fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[4]) ## When let res = await driver.getMessages( - cursor=some(cursor), - maxPageSize=2, - ascendingOrder=true + cursor = some(cursor), maxPageSize = 2, ascendingOrder = true ) ## Then @@ -456,7 +551,7 @@ suite "SQLite driver - query by cursor": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[5..6] + filteredMessages == expected[5 .. 6] ## Cleanup (await driver.close()).expect("driver to close") @@ -467,33 +562,39 @@ suite "SQLite driver - query by cursor": let driver = newSqliteArchiveDriver() - let expected = @[ - fakeWakuMessage(@[byte 0], ts=ts(00)), - fakeWakuMessage(@[byte 1], ts=ts(10)), - - fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30)), - - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40)), # << cursor - fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60)), - fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[4]) ## When let res = await driver.getMessages( - cursor=some(cursor), - maxPageSize=2, - ascendingOrder=false + cursor = some(cursor), maxPageSize = 2, ascendingOrder = false ) ## Then @@ -502,7 +603,7 @@ suite "SQLite driver - query by cursor": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[2..3].reversed() + filteredMessages == expected[2 .. 3].reversed() ## Cleanup (await driver.close()).expect("driver to close") @@ -513,32 +614,42 @@ suite "SQLite driver - query by cursor": let driver = newSqliteArchiveDriver() - let expected = @[ - fakeWakuMessage(@[byte 0], ts=ts(00)), - fakeWakuMessage(@[byte 1], ts=ts(10)), - fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30)), - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40)), # << cursor - fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60)), - fakeWakuMessage(@[byte 7], ts=ts(70)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], ts = ts(70)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[4]) ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - cursor=some(cursor), - maxPageSize=10, - ascendingOrder=true + contentTopic = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, ) ## Then @@ -547,7 +658,7 @@ suite "SQLite driver - query by cursor": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[5..6] + filteredMessages == expected[5 .. 6] ## Cleanup (await driver.close()).expect("driver to close") @@ -558,32 +669,42 @@ suite "SQLite driver - query by cursor": let driver = newSqliteArchiveDriver() - let expected = @[ - fakeWakuMessage(@[byte 0], ts=ts(00)), - fakeWakuMessage(@[byte 1], ts=ts(10)), - fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30)), - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40)), - fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60)), # << cursor - fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[6]) ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - cursor=some(cursor), - maxPageSize=10, - ascendingOrder=false + contentTopic = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, ) ## Then @@ -592,7 +713,7 @@ suite "SQLite driver - query by cursor": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[2..5].reversed() + filteredMessages == expected[2 .. 5].reversed() ## Cleanup (await driver.close()).expect("driver to close") @@ -605,37 +726,81 @@ suite "SQLite driver - query by cursor": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts=ts(00, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts=ts(10, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50, timeOrigin))), # << cursor - - (pubsubTopic, fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70, timeOrigin))), - - (DefaultPubsubTopic, fakeWakuMessage(@[byte 8], contentTopic=contentTopic, ts=ts(80, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 9], contentTopic=contentTopic, ts=ts(90, timeOrigin))), - ] + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it[1].payload) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) for row in messages: let (topic, msg) = row - require (await driver.put(topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() let cursor = computeArchiveCursor(expected[5][0], expected[5][1]) ## When let res = await driver.getMessages( - pubsubTopic=some(pubsubTopic), - cursor=some(cursor), - maxPageSize=10, - ascendingOrder=true + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, ) ## Then @@ -645,7 +810,7 @@ suite "SQLite driver - query by cursor": let expectedMessages = expected.mapIt(it[1]) let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expectedMessages[6..7] + filteredMessages == expectedMessages[6 .. 7] ## Cleanup (await driver.close()).expect("driver to close") @@ -658,37 +823,81 @@ suite "SQLite driver - query by cursor": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts=ts(00, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts=ts(10, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin))), - - (pubsubTopic, fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50, timeOrigin))), - - (pubsubTopic, fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60, timeOrigin))), # << cursor - (pubsubTopic, fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 8], contentTopic=contentTopic, ts=ts(80, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 9], contentTopic=contentTopic, ts=ts(90, timeOrigin))), - ] + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it[1].payload) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) for row in messages: let (topic, msg) = row - require (await driver.put(topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() let cursor = computeArchiveCursor(expected[6][0], expected[6][1]) ## When let res = await driver.getMessages( - pubsubTopic=some(pubsubTopic), - cursor=some(cursor), - maxPageSize=10, - ascendingOrder=false + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, ) ## Then @@ -698,14 +907,12 @@ suite "SQLite driver - query by cursor": let expectedMessages = expected.mapIt(it[1]) let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expectedMessages[4..5].reversed() + filteredMessages == expectedMessages[4 .. 5].reversed() ## Cleanup (await driver.close()).expect("driver to close") - suite "SQLite driver - query by time range": - asyncTest "start time only": ## Given const contentTopic = "test-content-topic" @@ -713,29 +920,36 @@ suite "SQLite driver - query by time range": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - fakeWakuMessage(@[byte 0], contentTopic=contentTopic, ts=ts(00, timeOrigin)), - fakeWakuMessage(@[byte 1], contentTopic=contentTopic, ts=ts(10, timeOrigin)), - # start_time - fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20, timeOrigin)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin)), - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin)), - fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50, timeOrigin)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60, timeOrigin)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() ## When let res = await driver.getMessages( - startTime=some(ts(15, timeOrigin)), - maxPageSize=10, - ascendingOrder=true + startTime = some(ts(15, timeOrigin)), maxPageSize = 10, ascendingOrder = true ) ## Then @@ -744,7 +958,7 @@ suite "SQLite driver - query by time range": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[2..6] + filteredMessages == expected[2 .. 6] ## Cleanup (await driver.close()).expect("driver to close") @@ -756,29 +970,36 @@ suite "SQLite driver - query by time range": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - fakeWakuMessage(@[byte 0], contentTopic=contentTopic, ts=ts(00, timeOrigin)), - fakeWakuMessage(@[byte 1], contentTopic=contentTopic, ts=ts(10, timeOrigin)), - fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20, timeOrigin)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin)), - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin)), - # end_time - fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50, timeOrigin)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60, timeOrigin)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() ## When let res = await driver.getMessages( - endTime=some(ts(45, timeOrigin)), - maxPageSize=10, - ascendingOrder=true + endTime = some(ts(45, timeOrigin)), maxPageSize = 10, ascendingOrder = true ) ## Then @@ -787,7 +1008,7 @@ suite "SQLite driver - query by time range": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[0..4] + filteredMessages == expected[0 .. 4] ## Cleanup (await driver.close()).expect("driver to close") @@ -800,35 +1021,81 @@ suite "SQLite driver - query by time range": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts=ts(00, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts=ts(10, timeOrigin))), - # start_time - (DefaultPubsubTopic, fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin))), - # end_time - (pubsubTopic, fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 8], contentTopic=contentTopic, ts=ts(80, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 9], contentTopic=contentTopic, ts=ts(90, timeOrigin))), - ] + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # start_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it[1].payload) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) for row in messages: let (topic, msg) = row - require (await driver.put(topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() ## When let res = await driver.getMessages( - startTime=some(ts(15, timeOrigin)), - endTime=some(ts(45, timeOrigin)), - maxPageSize=10, - ascendingOrder=true + startTime = some(ts(15, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, ) ## Then @@ -838,7 +1105,7 @@ suite "SQLite driver - query by time range": let expectedMessages = expected.mapIt(it[1]) let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expectedMessages[2..4] + filteredMessages == expectedMessages[2 .. 4] ## Cleanup (await driver.close()).expect("driver to close") @@ -850,32 +1117,41 @@ suite "SQLite driver - query by time range": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - fakeWakuMessage(@[byte 0], contentTopic=contentTopic, ts=ts(00, timeOrigin)), - fakeWakuMessage(@[byte 1], contentTopic=contentTopic, ts=ts(10, timeOrigin)), - # start_time - fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20, timeOrigin)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin)), - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin)), - # end_time - fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50, timeOrigin)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60, timeOrigin)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - startTime=some(ts(45, timeOrigin)), - endTime=some(ts(15, timeOrigin)), - maxPageSize=2, - ascendingOrder=true + contentTopic = @[contentTopic], + startTime = some(ts(45, timeOrigin)), + endTime = some(ts(15, timeOrigin)), + maxPageSize = 2, + ascendingOrder = true, ) check: @@ -895,30 +1171,39 @@ suite "SQLite driver - query by time range": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - fakeWakuMessage(@[byte 0], contentTopic=contentTopic, ts=ts(00, timeOrigin)), - fakeWakuMessage(@[byte 1], contentTopic=contentTopic, ts=ts(10, timeOrigin)), - # start_time - fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20, timeOrigin)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin)), - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin)), - fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50, timeOrigin)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60, timeOrigin)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - startTime=some(ts(15, timeOrigin)), - maxPageSize=10, - ascendingOrder=true + contentTopic = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, ) check: @@ -926,7 +1211,7 @@ suite "SQLite driver - query by time range": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[2..6] + filteredMessages == expected[2 .. 6] ## Cleanup (await driver.close()).expect("driver to close") @@ -938,33 +1223,42 @@ suite "SQLite driver - query by time range": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - fakeWakuMessage(@[byte 0], contentTopic=contentTopic, ts=ts(00, timeOrigin)), - fakeWakuMessage(@[byte 1], contentTopic=contentTopic, ts=ts(10, timeOrigin)), - # start_time - fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20, timeOrigin)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin)), - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin)), - fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50, timeOrigin)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60, timeOrigin)), - fakeWakuMessage(@[byte 7], ts=ts(70, timeOrigin)), - fakeWakuMessage(@[byte 8], ts=ts(80, timeOrigin)), - fakeWakuMessage(@[byte 9], ts=ts(90, timeOrigin)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], ts = ts(90, timeOrigin)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - startTime=some(ts(15, timeOrigin)), - maxPageSize=10, - ascendingOrder=false + contentTopic = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, ) check: @@ -972,7 +1266,7 @@ suite "SQLite driver - query by time range": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[2..6].reversed() + filteredMessages == expected[2 .. 6].reversed() ## Cleanup (await driver.close()).expect("driver to close") @@ -984,36 +1278,46 @@ suite "SQLite driver - query by time range": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - fakeWakuMessage(@[byte 0], contentTopic=contentTopic, ts=ts(00, timeOrigin)), - fakeWakuMessage(@[byte 1], contentTopic=contentTopic, ts=ts(10, timeOrigin)), - # start_time - fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20, timeOrigin)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin)), # << cursor - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin)), - fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50, timeOrigin)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60, timeOrigin)), - fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70, timeOrigin)), - fakeWakuMessage(@[byte 8], contentTopic=contentTopic, ts=ts(80, timeOrigin)), - fakeWakuMessage(@[byte 9], contentTopic=contentTopic, ts=ts(90, timeOrigin)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[3]) ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - cursor=some(cursor), - startTime=some(ts(15, timeOrigin)), - maxPageSize=10, - ascendingOrder=true + contentTopic = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, ) check: @@ -1021,7 +1325,7 @@ suite "SQLite driver - query by time range": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[4..9] + filteredMessages == expected[4 .. 9] ## Cleanup (await driver.close()).expect("driver to close") @@ -1033,36 +1337,46 @@ suite "SQLite driver - query by time range": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - fakeWakuMessage(@[byte 0], contentTopic=contentTopic, ts=ts(00, timeOrigin)), - fakeWakuMessage(@[byte 1], contentTopic=contentTopic, ts=ts(10, timeOrigin)), - # start_time - fakeWakuMessage(@[byte 2], ts=ts(20, timeOrigin)), - fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin)), - fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin)), - fakeWakuMessage(@[byte 5], ts=ts(50, timeOrigin)), - fakeWakuMessage(@[byte 6], contentTopic=contentTopic, ts=ts(60, timeOrigin)), # << cursor - fakeWakuMessage(@[byte 7], contentTopic=contentTopic, ts=ts(70, timeOrigin)), - fakeWakuMessage(@[byte 8], contentTopic=contentTopic, ts=ts(80, timeOrigin)), - fakeWakuMessage(@[byte 9], contentTopic=contentTopic, ts=ts(90, timeOrigin)), - ] + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) for msg in messages: - require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[6]) ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - cursor=some(cursor), - startTime=some(ts(15, timeOrigin)), - maxPageSize=10, - ascendingOrder=false + contentTopic = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, ) check: @@ -1070,7 +1384,7 @@ suite "SQLite driver - query by time range": let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expected[3..4].reversed() + filteredMessages == expected[3 .. 4].reversed() ## Cleanup (await driver.close()).expect("driver to close") @@ -1083,40 +1397,77 @@ suite "SQLite driver - query by time range": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - # start_time - (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts=ts(00, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts=ts(10, timeOrigin))), # << cursor - (DefaultPubsubTopic, fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin))), - # end_time - (pubsubTopic, fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 6], ts=ts(60, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 7], ts=ts(70, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 8], contentTopic=contentTopic, ts=ts(80, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 9], contentTopic=contentTopic, ts=ts(90, timeOrigin))), - ] + let expected = + @[ + # start_time + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it[1].payload) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) for row in messages: let (topic, msg) = row - require (await driver.put(topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[1][1]) ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - pubsubTopic=some(pubsubTopic), - cursor=some(cursor), - startTime=some(ts(0, timeOrigin)), - endTime=some(ts(45, timeOrigin)), - maxPageSize=10, - ascendingOrder=true + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(0, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, ) check: @@ -1125,7 +1476,7 @@ suite "SQLite driver - query by time range": let expectedMessages = expected.mapIt(it[1]) let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expectedMessages[3..4] + filteredMessages == expectedMessages[3 .. 4] ## Cleanup (await driver.close()).expect("driver to close") @@ -1138,40 +1489,76 @@ suite "SQLite driver - query by time range": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts=ts(00, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts=ts(10, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin))), - # start_time - (pubsubTopic, fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 6], ts=ts(60, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 7], ts=ts(70, timeOrigin))), # << cursor - (DefaultPubsubTopic, fakeWakuMessage(@[byte 8], contentTopic=contentTopic, ts=ts(80, timeOrigin))), - # end_time - (DefaultPubsubTopic, fakeWakuMessage(@[byte 9], contentTopic=contentTopic, ts=ts(90, timeOrigin))), - ] + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it[1].payload) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) for row in messages: let (topic, msg) = row - require (await driver.put(topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() let cursor = computeArchiveCursor(expected[7][0], expected[7][1]) ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - pubsubTopic=some(pubsubTopic), - cursor=some(cursor), - startTime=some(ts(35, timeOrigin)), - endTime=some(ts(85, timeOrigin)), - maxPageSize=10, - ascendingOrder=false + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, ) check: @@ -1180,7 +1567,7 @@ suite "SQLite driver - query by time range": let expectedMessages = expected.mapIt(it[1]) let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expectedMessages[4..5].reversed() + filteredMessages == expectedMessages[4 .. 5].reversed() ## Cleanup (await driver.close()).expect("driver to close") @@ -1193,40 +1580,77 @@ suite "SQLite driver - query by time range": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts=ts(00, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts=ts(10, timeOrigin))), # << cursor - (DefaultPubsubTopic, fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin))), - # start_time - (pubsubTopic, fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 6], ts=ts(60, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 7], ts=ts(70, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 8], contentTopic=contentTopic, ts=ts(80, timeOrigin))), - # end_time - (DefaultPubsubTopic, fakeWakuMessage(@[byte 9], contentTopic=contentTopic, ts=ts(90, timeOrigin))), - ] + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it[1].payload) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) for row in messages: let (topic, msg) = row - require (await driver.put(topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() let cursor = computeArchiveCursor(expected[1][0], expected[1][1]) ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - pubsubTopic=some(pubsubTopic), - cursor=some(cursor), - startTime=some(ts(35, timeOrigin)), - endTime=some(ts(85, timeOrigin)), - maxPageSize=10, - ascendingOrder=true + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, ) ## Then @@ -1236,7 +1660,7 @@ suite "SQLite driver - query by time range": let expectedMessages = expected.mapIt(it[1]) let filteredMessages = res.tryGet().mapIt(it[1]) check: - filteredMessages == expectedMessages[4..5] + filteredMessages == expectedMessages[4 .. 5] ## Cleanup (await driver.close()).expect("driver to close") @@ -1249,40 +1673,77 @@ suite "SQLite driver - query by time range": let driver = newSqliteArchiveDriver() let timeOrigin = now() - let expected = @[ - (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts=ts(00, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts=ts(10, timeOrigin))), # << cursor - (DefaultPubsubTopic, fakeWakuMessage(@[byte 2], contentTopic=contentTopic, ts=ts(20, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 3], contentTopic=contentTopic, ts=ts(30, timeOrigin))), - # start_time - (pubsubTopic, fakeWakuMessage(@[byte 4], contentTopic=contentTopic, ts=ts(40, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 5], contentTopic=contentTopic, ts=ts(50, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 6], ts=ts(60, timeOrigin))), - (pubsubTopic, fakeWakuMessage(@[byte 7], ts=ts(70, timeOrigin))), - (DefaultPubsubTopic, fakeWakuMessage(@[byte 8], contentTopic=contentTopic, ts=ts(80, timeOrigin))), - # end_time - (DefaultPubsubTopic, fakeWakuMessage(@[byte 9], contentTopic=contentTopic, ts=ts(90, timeOrigin))), - ] + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] var messages = expected shuffle(messages) - debug "randomized message insertion sequence", sequence=messages.mapIt(it[1].payload) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) for row in messages: let (topic, msg) = row - require (await driver.put(topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp)).isOk() + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() let cursor = computeArchiveCursor(expected[1][0], expected[1][1]) ## When let res = await driver.getMessages( - contentTopic= @[contentTopic], - pubsubTopic=some(pubsubTopic), - cursor=some(cursor), - startTime=some(ts(35, timeOrigin)), - endTime=some(ts(85, timeOrigin)), - maxPageSize=10, - ascendingOrder=false, + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, ) ## Then diff --git a/tests/waku_archive/test_retention_policy.nim b/tests/waku_archive/test_retention_policy.nim index 26137bbab..f0d6f82e4 100644 --- a/tests/waku_archive/test_retention_policy.nim +++ b/tests/waku_archive/test_retention_policy.nim @@ -1,10 +1,6 @@ {.used.} -import - std/[sequtils,times], - stew/results, - testutils/unittests, - chronos +import std/[sequtils, times], stew/results, testutils/unittests, chronos import ../../../waku/common/databases/db_sqlite, ../../../waku/waku_core, @@ -18,9 +14,7 @@ import ../testlib/common, ../testlib/wakucore - suite "Waku Archive - Retention policy": - test "capacity retention policy - windowed message deletion": ## Given let @@ -29,14 +23,25 @@ suite "Waku Archive - Retention policy": let driver = newSqliteArchiveDriver() - let retentionPolicy: RetentionPolicy = CapacityRetentionPolicy.new(capacity=capacity) + let retentionPolicy: RetentionPolicy = + CapacityRetentionPolicy.new(capacity = capacity) var putFutures = newSeq[Future[ArchiveDriverResult[void]]]() ## When - for i in 1..capacity+excess: - let msg = fakeWakuMessage(payload= @[byte i], contentTopic=DefaultContentTopic, ts=Timestamp(i)) - putFutures.add(driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)) - + for i in 1 .. capacity + excess: + let msg = fakeWakuMessage( + payload = @[byte i], contentTopic = DefaultContentTopic, ts = Timestamp(i) + ) + putFutures.add( + driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ) + discard waitFor allFinished(putFutures) require (waitFor retentionPolicy.execute(driver)).isOk() @@ -51,17 +56,17 @@ suite "Waku Archive - Retention policy": ## Cleanup (waitFor driver.close()).expect("driver to close") - + test "size retention policy - windowed message deletion": ## Given let # in bytes - sizeLimit:int64 = 52428 + sizeLimit: int64 = 52428 excess = 325 let driver = newSqliteArchiveDriver() - let retentionPolicy: RetentionPolicy = SizeRetentionPolicy.new(size=sizeLimit) + let retentionPolicy: RetentionPolicy = SizeRetentionPolicy.new(size = sizeLimit) var putFutures = newSeq[Future[ArchiveDriverResult[void]]]() # make sure that the db is empty to before test begins @@ -69,16 +74,26 @@ suite "Waku Archive - Retention policy": # if there are messages in db, empty them if storedMsg.len > 0: let now = getNanosecondTime(getTime().toUnixFloat()) - require (waitFor driver.deleteMessagesOlderThanTimestamp(ts=now)).isOk() + require (waitFor driver.deleteMessagesOlderThanTimestamp(ts = now)).isOk() require (waitFor driver.performVacuum()).isOk() ## When ## # create a number of messages so that the size of the DB overshoots - for i in 1..excess: - let msg = fakeWakuMessage(payload= @[byte i], contentTopic=DefaultContentTopic, ts=Timestamp(i)) - putFutures.add(driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)) + for i in 1 .. excess: + let msg = fakeWakuMessage( + payload = @[byte i], contentTopic = DefaultContentTopic, ts = Timestamp(i) + ) + putFutures.add( + driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ) # waitFor is used to synchronously wait for the futures to complete. discard waitFor allFinished(putFutures) @@ -95,7 +110,7 @@ suite "Waku Archive - Retention policy": # execute policy provided the current db size oveflows, results in rows deletion require (sizeDB >= sizeLimit) require (waitFor retentionPolicy.execute(driver)).isOk() - + # get the number or rows from database let rowCountAfterDeletion = (waitFor driver.getMessagesCount()).tryGet() @@ -115,33 +130,40 @@ suite "Waku Archive - Retention policy": let driver = newSqliteArchiveDriver() - retentionPolicy: RetentionPolicy = CapacityRetentionPolicy.new(capacity=capacity) + retentionPolicy: RetentionPolicy = + CapacityRetentionPolicy.new(capacity = capacity) - let messages = @[ - fakeWakuMessage(contentTopic=DefaultContentTopic, ts=ts(0)), - fakeWakuMessage(contentTopic=DefaultContentTopic, ts=ts(1)), - - fakeWakuMessage(contentTopic=contentTopic, ts=ts(2)), - fakeWakuMessage(contentTopic=contentTopic, ts=ts(3)), - fakeWakuMessage(contentTopic=contentTopic, ts=ts(4)), - fakeWakuMessage(contentTopic=contentTopic, ts=ts(5)), - fakeWakuMessage(contentTopic=contentTopic, ts=ts(6)) - ] + let messages = + @[ + fakeWakuMessage(contentTopic = DefaultContentTopic, ts = ts(0)), + fakeWakuMessage(contentTopic = DefaultContentTopic, ts = ts(1)), + fakeWakuMessage(contentTopic = contentTopic, ts = ts(2)), + fakeWakuMessage(contentTopic = contentTopic, ts = ts(3)), + fakeWakuMessage(contentTopic = contentTopic, ts = ts(4)), + fakeWakuMessage(contentTopic = contentTopic, ts = ts(5)), + fakeWakuMessage(contentTopic = contentTopic, ts = ts(6)), + ] ## When for msg in messages: - require (waitFor driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() require (waitFor retentionPolicy.execute(driver)).isOk() ## Then let storedMsg = (waitFor driver.getAllMessages()).tryGet() check: storedMsg.len == capacity - storedMsg.all do (item: auto) -> bool: + storedMsg.all do(item: auto) -> bool: let (pubsubTopic, msg, _, _, _) = item - msg.contentTopic == contentTopic and - pubsubTopic == DefaultPubsubTopic + msg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic ## Cleanup (waitFor driver.close()).expect("driver to close") - diff --git a/tests/waku_archive/test_waku_archive.nim b/tests/waku_archive/test_waku_archive.nim index bd445839d..45947c76e 100644 --- a/tests/waku_archive/test_waku_archive.nim +++ b/tests/waku_archive/test_waku_archive.nim @@ -18,9 +18,7 @@ import ../testlib/common, ../testlib/wakucore - suite "Waku Archive - message handling": - test "it should archive a valid and non-ephemeral message": ## Setup let driver = newSqliteArchiveDriver() @@ -28,7 +26,7 @@ suite "Waku Archive - message handling": ## Given let validSenderTime = now() - let message = fakeWakuMessage(ephemeral=false, ts=validSenderTime) + let message = fakeWakuMessage(ephemeral = false, ts = validSenderTime) ## When waitFor archive.handleMessage(DefaultPubSubTopic, message) @@ -43,13 +41,14 @@ suite "Waku Archive - message handling": let archive = newWakuArchive(driver) ## Given - let msgList = @[ - fakeWakuMessage(ephemeral = false, payload = "1"), - fakeWakuMessage(ephemeral = true, payload = "2"), - fakeWakuMessage(ephemeral = true, payload = "3"), - fakeWakuMessage(ephemeral = true, payload = "4"), - fakeWakuMessage(ephemeral = false, payload = "5"), - ] + let msgList = + @[ + fakeWakuMessage(ephemeral = false, payload = "1"), + fakeWakuMessage(ephemeral = true, payload = "2"), + fakeWakuMessage(ephemeral = true, payload = "3"), + fakeWakuMessage(ephemeral = true, payload = "4"), + fakeWakuMessage(ephemeral = false, payload = "5"), + ] ## When for msg in msgList: @@ -66,7 +65,7 @@ suite "Waku Archive - message handling": ## Given let invalidSenderTime = 0 - let message = fakeWakuMessage(ts=invalidSenderTime) + let message = fakeWakuMessage(ts = invalidSenderTime) ## When waitFor archive.handleMessage(DefaultPubSubTopic, message) @@ -83,9 +82,10 @@ suite "Waku Archive - message handling": ## Given let now = now() - invalidSenderTime = now + MaxMessageTimestampVariance + 1_000_000_000 # 1 second over the max variance + invalidSenderTime = now + MaxMessageTimestampVariance + 1_000_000_000 + # 1 second over the max variance - let message = fakeWakuMessage(ts=invalidSenderTime) + let message = fakeWakuMessage(ts = invalidSenderTime) ## When waitFor archive.handleMessage(DefaultPubSubTopic, message) @@ -104,7 +104,7 @@ suite "Waku Archive - message handling": now = now() invalidSenderTime = now - MaxMessageTimestampVariance - 1 - let message = fakeWakuMessage(ts=invalidSenderTime) + let message = fakeWakuMessage(ts = invalidSenderTime) ## When waitFor archive.handleMessage(DefaultPubSubTopic, message) @@ -113,22 +113,42 @@ suite "Waku Archive - message handling": check: (waitFor driver.getMessagesCount()).tryGet() == 0 - procSuite "Waku Archive - find messages": ## Fixtures let timeOrigin = now() - let msgListA = @[ - fakeWakuMessage(@[byte 00], contentTopic=ContentTopic("2"), ts=ts(00, timeOrigin)), - fakeWakuMessage(@[byte 01], contentTopic=ContentTopic("1"), ts=ts(10, timeOrigin)), - fakeWakuMessage(@[byte 02], contentTopic=ContentTopic("2"), ts=ts(20, timeOrigin)), - fakeWakuMessage(@[byte 03], contentTopic=ContentTopic("1"), ts=ts(30, timeOrigin)), - fakeWakuMessage(@[byte 04], contentTopic=ContentTopic("2"), ts=ts(40, timeOrigin)), - fakeWakuMessage(@[byte 05], contentTopic=ContentTopic("1"), ts=ts(50, timeOrigin)), - fakeWakuMessage(@[byte 06], contentTopic=ContentTopic("2"), ts=ts(60, timeOrigin)), - fakeWakuMessage(@[byte 07], contentTopic=ContentTopic("1"), ts=ts(70, timeOrigin)), - fakeWakuMessage(@[byte 08], contentTopic=ContentTopic("2"), ts=ts(80, timeOrigin)), - fakeWakuMessage(@[byte 09], contentTopic=ContentTopic("1"), ts=ts(90, timeOrigin)) - ] + let msgListA = + @[ + fakeWakuMessage( + @[byte 00], contentTopic = ContentTopic("2"), ts = ts(00, timeOrigin) + ), + fakeWakuMessage( + @[byte 01], contentTopic = ContentTopic("1"), ts = ts(10, timeOrigin) + ), + fakeWakuMessage( + @[byte 02], contentTopic = ContentTopic("2"), ts = ts(20, timeOrigin) + ), + fakeWakuMessage( + @[byte 03], contentTopic = ContentTopic("1"), ts = ts(30, timeOrigin) + ), + fakeWakuMessage( + @[byte 04], contentTopic = ContentTopic("2"), ts = ts(40, timeOrigin) + ), + fakeWakuMessage( + @[byte 05], contentTopic = ContentTopic("1"), ts = ts(50, timeOrigin) + ), + fakeWakuMessage( + @[byte 06], contentTopic = ContentTopic("2"), ts = ts(60, timeOrigin) + ), + fakeWakuMessage( + @[byte 07], contentTopic = ContentTopic("1"), ts = ts(70, timeOrigin) + ), + fakeWakuMessage( + @[byte 08], contentTopic = ContentTopic("2"), ts = ts(80, timeOrigin) + ), + fakeWakuMessage( + @[byte 09], contentTopic = ContentTopic("1"), ts = ts(90, timeOrigin) + ), + ] let archiveA = block: let @@ -136,7 +156,15 @@ procSuite "Waku Archive - find messages": archive = newWakuArchive(driver) for msg in msgListA: - require (waitFor driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() archive @@ -148,7 +176,7 @@ procSuite "Waku Archive - find messages": let topic = ContentTopic("1") let - msg1 = fakeWakuMessage(contentTopic=topic) + msg1 = fakeWakuMessage(contentTopic = topic) msg2 = fakeWakuMessage() waitFor archive.handleMessage("foo", msg1) @@ -181,9 +209,9 @@ procSuite "Waku Archive - find messages": topic3 = ContentTopic("3") let - msg1 = fakeWakuMessage(contentTopic=topic1) - msg2 = fakeWakuMessage(contentTopic=topic2) - msg3 = fakeWakuMessage(contentTopic=topic3) + msg1 = fakeWakuMessage(contentTopic = topic1) + msg2 = fakeWakuMessage(contentTopic = topic2) + msg3 = fakeWakuMessage(contentTopic = topic3) waitFor archive.handleMessage("foo", msg1) waitFor archive.handleMessage("foo", msg2) @@ -211,7 +239,7 @@ procSuite "Waku Archive - find messages": driver = newSqliteArchiveDriver() archive = newWakuArchive(driver) - let queryTopics = toSeq(1..15).mapIt(ContentTopic($it)) + let queryTopics = toSeq(1 .. 15).mapIt(ContentTopic($it)) ## Given let req = ArchiveQuery(contentTopics: queryTopics) @@ -244,9 +272,9 @@ procSuite "Waku Archive - find messages": contentTopic3 = ContentTopic("3") let - msg1 = fakeWakuMessage(contentTopic=contentTopic1) - msg2 = fakeWakuMessage(contentTopic=contentTopic2) - msg3 = fakeWakuMessage(contentTopic=contentTopic3) + msg1 = fakeWakuMessage(contentTopic = contentTopic1) + msg2 = fakeWakuMessage(contentTopic = contentTopic2) + msg3 = fakeWakuMessage(contentTopic = contentTopic3) waitFor archive.handleMessage(pubsubtopic1, msg1) waitFor archive.handleMessage(pubsubtopic2, msg2) @@ -255,8 +283,7 @@ procSuite "Waku Archive - find messages": ## Given # This query targets: pubsubtopic1 AND (contentTopic1 OR contentTopic3) let req = ArchiveQuery( - pubsubTopic: some(pubsubTopic1), - contentTopics: @[contentTopic1, contentTopic3] + pubsubTopic: some(pubsubTopic1), contentTopics: @[contentTopic1, contentTopic3] ) ## When @@ -313,9 +340,9 @@ procSuite "Waku Archive - find messages": let pubsubTopic = "queried-topic" let - msg1 = fakeWakuMessage(payload="TEST-1") - msg2 = fakeWakuMessage(payload="TEST-2") - msg3 = fakeWakuMessage(payload="TEST-3") + msg1 = fakeWakuMessage(payload = "TEST-1") + msg2 = fakeWakuMessage(payload = "TEST-2") + msg3 = fakeWakuMessage(payload = "TEST-3") waitFor archive.handleMessage(pubsubTopic, msg1) waitFor archive.handleMessage(pubsubTopic, msg2) @@ -340,10 +367,7 @@ procSuite "Waku Archive - find messages": test "handle query with forward pagination": ## Given - let req = ArchiveQuery( - pageSize: 4, - direction: PagingDirection.FORWARD - ) + let req = ArchiveQuery(pageSize: 4, direction: PagingDirection.FORWARD) ## When var nextReq = req # copy @@ -351,7 +375,7 @@ procSuite "Waku Archive - find messages": var pages = newSeq[seq[WakuMessage]](3) var cursors = newSeq[Option[ArchiveCursor]](3) - for i in 0..<3: + for i in 0 ..< 3: let res = waitFor archiveA.findMessages(nextReq) require res.isOk() @@ -370,16 +394,13 @@ procSuite "Waku Archive - find messages": cursors[2] == none(ArchiveCursor) check: - pages[0] == msgListA[0..3] - pages[1] == msgListA[4..7] - pages[2] == msgListA[8..9] + pages[0] == msgListA[0 .. 3] + pages[1] == msgListA[4 .. 7] + pages[2] == msgListA[8 .. 9] test "handle query with backward pagination": ## Given - let req = ArchiveQuery( - pageSize: 4, - direction: PagingDirection.BACKWARD - ) + let req = ArchiveQuery(pageSize: 4, direction: PagingDirection.BACKWARD) ## When var nextReq = req # copy @@ -387,7 +408,7 @@ procSuite "Waku Archive - find messages": var pages = newSeq[seq[WakuMessage]](3) var cursors = newSeq[Option[ArchiveCursor]](3) - for i in 0..<3: + for i in 0 ..< 3: let res = waitFor archiveA.findMessages(nextReq) require res.isOk() @@ -406,9 +427,9 @@ procSuite "Waku Archive - find messages": cursors[2] == none(ArchiveCursor) check: - pages[0] == msgListA[6..9] - pages[1] == msgListA[2..5] - pages[2] == msgListA[0..1] + pages[0] == msgListA[6 .. 9] + pages[1] == msgListA[2 .. 5] + pages[2] == msgListA[0 .. 1] test "handle query with no paging info - auto-pagination": ## Setup @@ -416,21 +437,30 @@ procSuite "Waku Archive - find messages": driver = newSqliteArchiveDriver() archive = newWakuArchive(driver) - let msgList = @[ - fakeWakuMessage(@[byte 0], contentTopic=ContentTopic("2")), - fakeWakuMessage(@[byte 1], contentTopic=DefaultContentTopic), - fakeWakuMessage(@[byte 2], contentTopic=DefaultContentTopic), - fakeWakuMessage(@[byte 3], contentTopic=DefaultContentTopic), - fakeWakuMessage(@[byte 4], contentTopic=DefaultContentTopic), - fakeWakuMessage(@[byte 5], contentTopic=DefaultContentTopic), - fakeWakuMessage(@[byte 6], contentTopic=DefaultContentTopic), - fakeWakuMessage(@[byte 7], contentTopic=DefaultContentTopic), - fakeWakuMessage(@[byte 8], contentTopic=DefaultContentTopic), - fakeWakuMessage(@[byte 9], contentTopic=ContentTopic("2")) + let msgList = + @[ + fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("2")), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 5], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 6], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 7], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 8], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("2")), ] for msg in msgList: - require (waitFor driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk() + require ( + waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() ## Given let req = ArchiveQuery(contentTopics: @[DefaultContentTopic]) @@ -455,7 +485,7 @@ procSuite "Waku Archive - find messages": contentTopics: @[ContentTopic("1")], startTime: some(ts(15, timeOrigin)), endTime: some(ts(55, timeOrigin)), - direction: PagingDirection.FORWARD + direction: PagingDirection.FORWARD, ) ## When @@ -475,7 +505,7 @@ procSuite "Waku Archive - find messages": let req = ArchiveQuery( contentTopics: @[ContentTopic("1")], startTime: some(Timestamp(2)), - endTime: some(Timestamp(2)) + endTime: some(Timestamp(2)), ) ## When @@ -494,7 +524,7 @@ procSuite "Waku Archive - find messages": let req = ArchiveQuery( contentTopics: @[ContentTopic("1")], startTime: some(Timestamp(5)), - endTime: some(Timestamp(2)) + endTime: some(Timestamp(2)), ) ## When diff --git a/tests/waku_core/test_all.nim b/tests/waku_core/test_all.nim index bd24a0bbe..f7f4fad38 100644 --- a/tests/waku_core/test_all.nim +++ b/tests/waku_core/test_all.nim @@ -1,6 +1,6 @@ {.used.} -import +import ./test_message_digest, ./test_namespaced_topics, ./test_peers, diff --git a/tests/waku_core/test_message_digest.nim b/tests/waku_core/test_message_digest.nim index 64f669698..ab6764782 100644 --- a/tests/waku_core/test_message_digest.nim +++ b/tests/waku_core/test_message_digest.nim @@ -1,16 +1,9 @@ {.used.} -import - std/sequtils, - stew/byteutils, - stew/endians2, - testutils/unittests -import - ../../../waku/waku_core, - ../testlib/wakucore +import std/sequtils, stew/byteutils, stew/endians2, testutils/unittests +import ../../../waku/waku_core, ../testlib/wakucore suite "Waku Message - Deterministic hashing": - test "digest computation - empty meta field": ## Test vector: ## @@ -23,25 +16,28 @@ suite "Waku Message - Deterministic hashing": ## message_hash = 0xa2554498b31f5bcdfcbf7fa58ad1c2d45f0254f3f8110a85588ec3cf10720fd8 ## Given - let pubsubTopic = DefaultPubsubTopic # /waku/2/default-waku/proto + let pubsubTopic = DefaultPubsubTopic # /waku/2/default-waku/proto let message = fakeWakuMessage( - contentTopic = DefaultContentTopic, # /waku/2/default-content/proto - payload = "\x01\x02\x03\x04TEST\x05\x06\x07\x08".toBytes(), - meta = newSeq[byte](), - ts = getNanosecondTime(1681964442) # Apr 20 2023 04:20:42 - ) + contentTopic = DefaultContentTopic, # /waku/2/default-content/proto + payload = "\x01\x02\x03\x04TEST\x05\x06\x07\x08".toBytes(), + meta = newSeq[byte](), + ts = getNanosecondTime(1681964442), # Apr 20 2023 04:20:42 + ) ## When let messageHash = computeMessageHash(pubsubTopic, message) ## Then check: - byteutils.toHex(pubsubTopic.toBytes()) == "2f77616b752f322f64656661756c742d77616b752f70726f746f" - byteutils.toHex(message.contentTopic.toBytes()) == "2f77616b752f322f64656661756c742d636f6e74656e742f70726f746f" + byteutils.toHex(pubsubTopic.toBytes()) == + "2f77616b752f322f64656661756c742d77616b752f70726f746f" + byteutils.toHex(message.contentTopic.toBytes()) == + "2f77616b752f322f64656661756c742d636f6e74656e742f70726f746f" byteutils.toHex(message.payload) == "010203045445535405060708" byteutils.toHex(message.meta) == "" byteutils.toHex(toBytesBE(uint64(message.timestamp))) == "175789bfa23f8400" - messageHash.toHex() == "a2554498b31f5bcdfcbf7fa58ad1c2d45f0254f3f8110a85588ec3cf10720fd8" + messageHash.toHex() == + "a2554498b31f5bcdfcbf7fa58ad1c2d45f0254f3f8110a85588ec3cf10720fd8" test "digest computation - meta field (12 bytes)": ## Test vector: @@ -55,25 +51,28 @@ suite "Waku Message - Deterministic hashing": ## message_hash = 0x64cce733fed134e83da02b02c6f689814872b1a0ac97ea56b76095c3c72bfe05 ## Given - let pubsubTopic = DefaultPubsubTopic # /waku/2/default-waku/proto + let pubsubTopic = DefaultPubsubTopic # /waku/2/default-waku/proto let message = fakeWakuMessage( - contentTopic = DefaultContentTopic, # /waku/2/default-content/proto - payload = "\x01\x02\x03\x04TEST\x05\x06\x07\x08".toBytes(), - meta = "\x73\x75\x70\x65\x72\x2d\x73\x65\x63\x72\x65\x74".toBytes(), - ts = getNanosecondTime(1681964442) # Apr 20 2023 04:20:42 - ) + contentTopic = DefaultContentTopic, # /waku/2/default-content/proto + payload = "\x01\x02\x03\x04TEST\x05\x06\x07\x08".toBytes(), + meta = "\x73\x75\x70\x65\x72\x2d\x73\x65\x63\x72\x65\x74".toBytes(), + ts = getNanosecondTime(1681964442), # Apr 20 2023 04:20:42 + ) ## When let messageHash = computeMessageHash(pubsubTopic, message) ## Then check: - byteutils.toHex(pubsubTopic.toBytes()) == "2f77616b752f322f64656661756c742d77616b752f70726f746f" - byteutils.toHex(message.contentTopic.toBytes()) == "2f77616b752f322f64656661756c742d636f6e74656e742f70726f746f" + byteutils.toHex(pubsubTopic.toBytes()) == + "2f77616b752f322f64656661756c742d77616b752f70726f746f" + byteutils.toHex(message.contentTopic.toBytes()) == + "2f77616b752f322f64656661756c742d636f6e74656e742f70726f746f" byteutils.toHex(message.payload) == "010203045445535405060708" byteutils.toHex(message.meta) == "73757065722d736563726574" byteutils.toHex(toBytesBE(uint64(message.timestamp))) == "175789bfa23f8400" - messageHash.toHex() == "64cce733fed134e83da02b02c6f689814872b1a0ac97ea56b76095c3c72bfe05" + messageHash.toHex() == + "64cce733fed134e83da02b02c6f689814872b1a0ac97ea56b76095c3c72bfe05" test "digest computation - meta field (64 bytes)": ## Test vector: @@ -87,12 +86,12 @@ suite "Waku Message - Deterministic hashing": ## message_hash = 0x7158b6498753313368b9af8f6e0a0a05104f68f972981da42a43bc53fb0c1b27" ## Given - let pubsubTopic = DefaultPubsubTopic # /waku/2/default-waku/proto + let pubsubTopic = DefaultPubsubTopic # /waku/2/default-waku/proto let message = fakeWakuMessage( - contentTopic = DefaultContentTopic, # /waku/2/default-content/proto + contentTopic = DefaultContentTopic, # /waku/2/default-content/proto payload = "\x01\x02\x03\x04TEST\x05\x06\x07\x08".toBytes(), - meta = toSeq(0.byte..63.byte), - ts = getNanosecondTime(1681964442) # Apr 20 2023 04:20:42 + meta = toSeq(0.byte .. 63.byte), + ts = getNanosecondTime(1681964442), # Apr 20 2023 04:20:42 ) ## When @@ -100,12 +99,16 @@ suite "Waku Message - Deterministic hashing": ## Then check: - byteutils.toHex(pubsubTopic.toBytes()) == "2f77616b752f322f64656661756c742d77616b752f70726f746f" - byteutils.toHex(message.contentTopic.toBytes()) == "2f77616b752f322f64656661756c742d636f6e74656e742f70726f746f" + byteutils.toHex(pubsubTopic.toBytes()) == + "2f77616b752f322f64656661756c742d77616b752f70726f746f" + byteutils.toHex(message.contentTopic.toBytes()) == + "2f77616b752f322f64656661756c742d636f6e74656e742f70726f746f" byteutils.toHex(message.payload) == "010203045445535405060708" - byteutils.toHex(message.meta) == "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f" + byteutils.toHex(message.meta) == + "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f" byteutils.toHex(toBytesBE(uint64(message.timestamp))) == "175789bfa23f8400" - messageHash.toHex() == "7158b6498753313368b9af8f6e0a0a05104f68f972981da42a43bc53fb0c1b27" + messageHash.toHex() == + "7158b6498753313368b9af8f6e0a0a05104f68f972981da42a43bc53fb0c1b27" test "digest computation - zero length payload": ## Test vector: @@ -119,28 +122,28 @@ suite "Waku Message - Deterministic hashing": ## message_hash = 0x483ea950cb63f9b9d6926b262bb36194d3f40a0463ce8446228350bd44e96de4 ## Given - let pubsubTopic = DefaultPubsubTopic # /waku/2/default-waku/proto + let pubsubTopic = DefaultPubsubTopic # /waku/2/default-waku/proto let message = fakeWakuMessage( - contentTopic = DefaultContentTopic, # /waku/2/default-content/proto - payload = newSeq[byte](), - meta = "\x73\x75\x70\x65\x72\x2d\x73\x65\x63\x72\x65\x74".toBytes(), - ts = getNanosecondTime(1681964442) # Apr 20 2023 04:20:42 - ) + contentTopic = DefaultContentTopic, # /waku/2/default-content/proto + payload = newSeq[byte](), + meta = "\x73\x75\x70\x65\x72\x2d\x73\x65\x63\x72\x65\x74".toBytes(), + ts = getNanosecondTime(1681964442), # Apr 20 2023 04:20:42 + ) ## When let messageHash = computeMessageHash(pubsubTopic, message) ## Then check: - messageHash.toHex() == "483ea950cb63f9b9d6926b262bb36194d3f40a0463ce8446228350bd44e96de4" + messageHash.toHex() == + "483ea950cb63f9b9d6926b262bb36194d3f40a0463ce8446228350bd44e96de4" - test "waku message - check meta size is enforced": # create message with meta size > 64 bytes (invalid) let message = fakeWakuMessage( contentTopic = DefaultContentTopic, payload = "\x01\x02\x03\x04TEST\x05\x06\x07\x08".toBytes(), - meta = toSeq(0.byte..66.byte) # 67 bytes + meta = toSeq(0.byte .. 66.byte), # 67 bytes ) let encodedInvalidMsg = message.encode diff --git a/tests/waku_core/test_namespaced_topics.nim b/tests/waku_core/test_namespaced_topics.nim index 46145999a..3344b5631 100644 --- a/tests/waku_core/test_namespaced_topics.nim +++ b/tests/waku_core/test_namespaced_topics.nim @@ -1,14 +1,9 @@ {.used.} -import - std/options, - stew/results, - testutils/unittests -import - ../../../waku/waku_core/topics +import std/options, stew/results, testutils/unittests +import ../../../waku/waku_core/topics suite "Waku Message - Content topics namespacing": - test "Stringify namespaced content topic": ## Given var ns = NsContentTopic() @@ -89,7 +84,8 @@ suite "Waku Message - Content topics namespacing": let err = ns.tryError() check: err.kind == ParsingErrorKind.InvalidFormat - err.cause == "Invalid content topic structure. Expected either //// or /////" + err.cause == + "Invalid content topic structure. Expected either //// or /////" test "Parse content topic string - Invalid string: missing encoding part": ## Given @@ -104,7 +100,8 @@ suite "Waku Message - Content topics namespacing": let err = ns.tryError() check: err.kind == ParsingErrorKind.InvalidFormat - err.cause == "Invalid content topic structure. Expected either //// or /////" + err.cause == + "Invalid content topic structure. Expected either //// or /////" test "Parse content topic string - Invalid string: wrong extra parts": ## Given @@ -137,7 +134,6 @@ suite "Waku Message - Content topics namespacing": err.cause == "generation should be a numeric value" suite "Waku Message - Pub-sub topics namespacing": - test "Stringify named sharding pub-sub topic": ## Given var ns = NsPubsubTopic.named("waku-dev") @@ -151,7 +147,7 @@ suite "Waku Message - Pub-sub topics namespacing": test "Stringify static sharding pub-sub topic": ## Given - var ns = NsPubsubTopic.staticSharding(clusterId=0, shardId=2) + var ns = NsPubsubTopic.staticSharding(clusterId = 0, shardId = 2) ## When let topic = $ns diff --git a/tests/waku_core/test_peers.nim b/tests/waku_core/test_peers.nim index 76742f487..f886a9912 100644 --- a/tests/waku_core/test_peers.nim +++ b/tests/waku_core/test_peers.nim @@ -1,19 +1,14 @@ {.used.} import - stew/results, - testutils/unittests, - libp2p/multiaddress, - libp2p/peerid, - libp2p/errors -import - ../../waku/waku_core + stew/results, testutils/unittests, libp2p/multiaddress, libp2p/peerid, libp2p/errors +import ../../waku/waku_core suite "Waku Core - Peers": - test "Peer info parses correctly": ## Given - let address = "/ip4/127.0.0.1/tcp/65002/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + let address = + "/ip4/127.0.0.1/tcp/65002/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" ## When let remotePeerInfoRes = parsePeerInfo(address) @@ -29,7 +24,8 @@ suite "Waku Core - Peers": test "DNS multiaddrs parsing - dns peer": ## Given - let address = "/dns/localhost/tcp/65012/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + let address = + "/dns/localhost/tcp/65012/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" ## When let dnsPeerRes = parsePeerInfo(address) @@ -45,7 +41,8 @@ suite "Waku Core - Peers": test "DNS multiaddrs parsing - dnsaddr peer": ## Given - let address = "/dnsaddr/localhost/tcp/65022/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + let address = + "/dnsaddr/localhost/tcp/65022/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" ## When let dnsAddrPeerRes = parsePeerInfo(address) @@ -61,7 +58,8 @@ suite "Waku Core - Peers": test "DNS multiaddrs parsing - dns4 peer": ## Given - let address = "/dns4/localhost/tcp/65032/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + let address = + "/dns4/localhost/tcp/65032/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" ## When let dns4PeerRes = parsePeerInfo(address) @@ -77,7 +75,8 @@ suite "Waku Core - Peers": test "DNS multiaddrs parsing - dns6 peer": ## Given - let address = "/dns6/localhost/tcp/65042/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + let address = + "/dns6/localhost/tcp/65042/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" ## When let dns6PeerRes = parsePeerInfo(address) @@ -101,7 +100,8 @@ suite "Waku Core - Peers": test "Multiaddr parsing should fail with leading whitespace": ## Given - let address = " /ip4/127.0.0.1/tcp/65062/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + let address = + " /ip4/127.0.0.1/tcp/65062/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" ## Then check: @@ -109,7 +109,8 @@ suite "Waku Core - Peers": test "Multiaddr parsing should fail with trailing whitespace": ## Given - let address = "/ip4/127.0.0.1/tcp/65072/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc " + let address = + "/ip4/127.0.0.1/tcp/65072/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc " ## Then check: @@ -117,7 +118,8 @@ suite "Waku Core - Peers": test "Multiaddress parsing should fail with invalid IP address": ## Given - let address = "/ip4/127.0.0.0.1/tcp/65082/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + let address = + "/ip4/127.0.0.0.1/tcp/65082/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" ## Then check: @@ -133,9 +135,9 @@ suite "Waku Core - Peers": test "Multiaddress parsing should fail with unsupported transport": ## Given - let address = "/ip4/127.0.0.1/udp/65102/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + let address = + "/ip4/127.0.0.1/udp/65102/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" ## Then check: parsePeerInfo(address).isErr() - diff --git a/tests/waku_core/test_published_address.nim b/tests/waku_core/test_published_address.nim index e437d864a..37f263ea0 100644 --- a/tests/waku_core/test_published_address.nim +++ b/tests/waku_core/test_published_address.nim @@ -1,27 +1,19 @@ {.used.} -import - stew/shims/net as stewNet, - std/strutils, - testutils/unittests -import - ../testlib/wakucore, - ../testlib/wakunode +import stew/shims/net as stewNet, std/strutils, testutils/unittests +import ../testlib/wakucore, ../testlib/wakunode suite "Waku Core - Published Address": - - test "Test IP 0.0.0.0": - let - node = newTestWakuNode(generateSecp256k1Key(), parseIpAddress( - "0.0.0.0"),Port(0)) - + test "Test IP 0.0.0.0": + let node = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + check: ($node.announcedAddresses).contains("127.0.0.1") - test "Test custom IP": - let - node = newTestWakuNode(generateSecp256k1Key(), parseIpAddress( - "8.8.8.8"),Port(0)) - + test "Test custom IP": + let node = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("8.8.8.8"), Port(0)) + check: - ($node.announcedAddresses).contains("8.8.8.8") \ No newline at end of file + ($node.announcedAddresses).contains("8.8.8.8") diff --git a/tests/waku_core/test_time.nim b/tests/waku_core/test_time.nim index d575e5727..0ec000bd2 100644 --- a/tests/waku_core/test_time.nim +++ b/tests/waku_core/test_time.nim @@ -1,21 +1,21 @@ {.used.} -import - testutils/unittests -import - ../../waku/waku_core/time +import testutils/unittests +import ../../waku/waku_core/time suite "Waku Core - Time": - test "Test timestamp conversion": ## Given let nanoseconds = 1676562429123456789.int64 secondsPart = nanoseconds div 1_000_000_000 nanosecondsPart = nanoseconds mod 1_000_000_000 - secondsFloat = secondsPart.float64 + (nanosecondsPart.float64 / 1_000_000_000.float64) - lowResTimestamp = Timestamp(secondsPart.int64 * 1_000_000_000.int64) # 1676562429000000000 - highResTimestamp = Timestamp(secondsFloat * 1_000_000_000.float64) # 1676562429123456789 + secondsFloat = + secondsPart.float64 + (nanosecondsPart.float64 / 1_000_000_000.float64) + lowResTimestamp = Timestamp(secondsPart.int64 * 1_000_000_000.int64) + # 1676562429000000000 + highResTimestamp = Timestamp(secondsFloat * 1_000_000_000.float64) + # 1676562429123456789 require highResTimestamp > lowResTimestamp # Sanity check diff --git a/tests/waku_discv5/test_waku_discv5.nim b/tests/waku_discv5/test_waku_discv5.nim index cdcd9461d..2220646c8 100644 --- a/tests/waku_discv5/test_waku_discv5.nim +++ b/tests/waku_discv5/test_waku_discv5.nim @@ -26,10 +26,10 @@ procSuite "Waku Discovery v5": enrNoCapabilities = initRecord(1, pk1, {"rs": @[0.byte, 0.byte, 1.byte, 0.byte, 0.byte]}).value() - enrRelay = - initRecord( + enrRelay = initRecord( 1, pk2, {"waku2": @[1.byte], "rs": @[0.byte, 1.byte, 1.byte, 0.byte, 1.byte]} - ).value() + ) + .value() enrNoShardingInfo = initRecord(1, pk1, {"waku2": @[1.byte]}).value() suite "shardingPredicate": @@ -39,71 +39,65 @@ procSuite "Waku Discovery v5": recordCluster22Indices2 {.threadvar.}: Record asyncSetup: - recordCluster21 = - block: - let - enrSeqNum = 1u64 - enrPrivKey = generatesecp256k1key() + recordCluster21 = block: + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() - let - clusterId: uint16 = 21 - shardIds: seq[uint16] = @[1u16, 2u16, 5u16, 7u16, 9u16, 11u16] + let + clusterId: uint16 = 21 + shardIds: seq[uint16] = @[1u16, 2u16, 5u16, 7u16, 9u16, 11u16] - let - shardsTopics = - RelayShards.init(clusterId, shardIds).expect("Valid shardIds") + let shardsTopics = + RelayShards.init(clusterId, shardIds).expect("Valid shardIds") - var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) - require builder.withWakuRelaySharding(shardsTopics).isOk() - builder.withWakuCapabilities(Relay) + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + require builder.withWakuRelaySharding(shardsTopics).isOk() + builder.withWakuCapabilities(Relay) - let recordRes = builder.build() - require recordRes.isOk() - recordRes.tryGet() + let recordRes = builder.build() + require recordRes.isOk() + recordRes.tryGet() - recordCluster22Indices1 = - block: - let - enrSeqNum = 1u64 - enrPrivKey = generatesecp256k1key() + recordCluster22Indices1 = block: + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() - let - clusterId: uint16 = 22 - shardIds: seq[uint16] = @[2u16, 4u16, 5u16, 8u16, 10u16, 12u16] + let + clusterId: uint16 = 22 + shardIds: seq[uint16] = @[2u16, 4u16, 5u16, 8u16, 10u16, 12u16] - let - shardsTopics = - RelayShards.init(clusterId, shardIds).expect("Valid shardIds") + let shardsTopics = + RelayShards.init(clusterId, shardIds).expect("Valid shardIds") - var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) - require builder.withWakuRelaySharding(shardsTopics).isOk() - builder.withWakuCapabilities(Relay) + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + require builder.withWakuRelaySharding(shardsTopics).isOk() + builder.withWakuCapabilities(Relay) - let recordRes = builder.build() - require recordRes.isOk() - recordRes.tryGet() + let recordRes = builder.build() + require recordRes.isOk() + recordRes.tryGet() - recordCluster22Indices2 = - block: - let - enrSeqNum = 1u64 - enrPrivKey = generatesecp256k1key() + recordCluster22Indices2 = block: + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() - let - clusterId: uint16 = 22 - shardIds: seq[uint16] = @[1u16, 3u16, 6u16, 7u16, 9u16, 11u16] + let + clusterId: uint16 = 22 + shardIds: seq[uint16] = @[1u16, 3u16, 6u16, 7u16, 9u16, 11u16] - let - shardsTopics = - RelayShards.init(clusterId, shardIds).expect("Valid shardIds") + let shardsTopics = + RelayShards.init(clusterId, shardIds).expect("Valid shardIds") - var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) - require builder.withWakuRelaySharding(shardsTopics).isOk() - builder.withWakuCapabilities(Relay) + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + require builder.withWakuRelaySharding(shardsTopics).isOk() + builder.withWakuCapabilities(Relay) - let recordRes = builder.build() - require recordRes.isOk() - recordRes.tryGet() + let recordRes = builder.build() + require recordRes.isOk() + recordRes.tryGet() asyncTest "filter peer per contained shard": # When @@ -157,9 +151,8 @@ procSuite "Waku Discovery v5": predicateRecord.isNone() asyncTest "no relay sharding info": - let - predicateNoShardingInfo = - shardingPredicate(enrNoShardingInfo, @[enrNoShardingInfo]) + let predicateNoShardingInfo = + shardingPredicate(enrNoShardingInfo, @[enrNoShardingInfo]) check: predicateNoShardingInfo.isNone() @@ -176,51 +169,44 @@ procSuite "Waku Discovery v5": ): (WakuDiscoveryV5, Record) = let privKey = generateSecp256k1Key() - record = - newTestEnrRecord( - privKey = privKey, - extIp = extIp, - tcpPort = tcpPort, - udpPort = udpPort, - indices = indices, - flags = recordFlags, - ) - node = - newTestDiscv5( - privKey = privKey, - bindIp = bindIp, - tcpPort = tcpPort, - udpPort = udpPort, - record = record, - bootstrapRecords = bootstrapRecords, - ) + record = newTestEnrRecord( + privKey = privKey, + extIp = extIp, + tcpPort = tcpPort, + udpPort = udpPort, + indices = indices, + flags = recordFlags, + ) + node = newTestDiscv5( + privKey = privKey, + bindIp = bindIp, + tcpPort = tcpPort, + udpPort = udpPort, + record = record, + bootstrapRecords = bootstrapRecords, + ) (node, record) - let - filterForStore: WakuDiscv5Predicate = - proc(record: waku_enr.Record): bool = - let typedRecord = record.toTyped() - if typedRecord.isErr(): - return false + let filterForStore: WakuDiscv5Predicate = proc(record: waku_enr.Record): bool = + let typedRecord = record.toTyped() + if typedRecord.isErr(): + return false - let capabilities = typedRecord.value.waku2 - if capabilities.isNone(): - return false + let capabilities = typedRecord.value.waku2 + if capabilities.isNone(): + return false - return capabilities.get().supportsCapability(Capabilities.Store) + return capabilities.get().supportsCapability(Capabilities.Store) asyncTest "find random peers without predicate": # Given 3 nodes let (node1, record1) = buildNode(tcpPort = 61500u16, udpPort = 9000u16) (node2, record2) = buildNode(tcpPort = 61502u16, udpPort = 9002u16) - (node3, record3) = - buildNode( - tcpPort = 61504u16, - udpPort = 9004u16, - bootstrapRecords = @[record1, record2], - ) + (node3, record3) = buildNode( + tcpPort = 61504u16, udpPort = 9004u16, bootstrapRecords = @[record1, record2] + ) let res1 = await node1.start() assertResultOk res1 @@ -250,35 +236,31 @@ procSuite "Waku Discovery v5": asyncTest "find random peers with parameter predicate": # Given 4 nodes let - (node3, record3) = - buildNode( - tcpPort = 61504u16, - udpPort = 9004u16, - recordFlags = - some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Filter)), - ) - (node4, record4) = - buildNode( - tcpPort = 61506u16, - udpPort = 9006u16, - recordFlags = - some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Store)), - ) - (node2, record2) = - buildNode( - tcpPort = 61502u16, - udpPort = 9002u16, - recordFlags = - some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Store)), - bootstrapRecords = @[record3, record4], - ) - (node1, record1) = - buildNode( - tcpPort = 61500u16, - udpPort = 9000u16, - recordFlags = some(CapabilitiesBitfield.init(Capabilities.Relay)), - bootstrapRecords = @[record2], - ) + (node3, record3) = buildNode( + tcpPort = 61504u16, + udpPort = 9004u16, + recordFlags = + some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Filter)), + ) + (node4, record4) = buildNode( + tcpPort = 61506u16, + udpPort = 9006u16, + recordFlags = + some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Store)), + ) + (node2, record2) = buildNode( + tcpPort = 61502u16, + udpPort = 9002u16, + recordFlags = + some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Store)), + bootstrapRecords = @[record3, record4], + ) + (node1, record1) = buildNode( + tcpPort = 61500u16, + udpPort = 9000u16, + recordFlags = some(CapabilitiesBitfield.init(Capabilities.Relay)), + bootstrapRecords = @[record2], + ) # Start nodes' discoveryV5 protocols let res1 = await node1.start() @@ -310,37 +292,32 @@ procSuite "Waku Discovery v5": ## Setup # Records let - (node3, record3) = - buildNode( - tcpPort = 61504u16, - udpPort = 9004u16, - recordFlags = - some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Filter)), - ) - (node4, record4) = - buildNode( - tcpPort = 61506u16, - udpPort = 9006u16, - recordFlags = - some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Store)), - ) - (node2, record2) = - buildNode( - tcpPort = 61502u16, - udpPort = 9002u16, - recordFlags = - some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Store)), - bootstrapRecords = @[record3, record4], - ) - let - (node1, record1) = - buildNode( - tcpPort = 61500u16, - udpPort = 9000u16, - recordFlags = some(CapabilitiesBitfield.init(Capabilities.Relay)), - indices = @[0u64, 0u64, 1u64, 0u64, 0u64], - bootstrapRecords = @[record2], - ) + (node3, record3) = buildNode( + tcpPort = 61504u16, + udpPort = 9004u16, + recordFlags = + some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Filter)), + ) + (node4, record4) = buildNode( + tcpPort = 61506u16, + udpPort = 9006u16, + recordFlags = + some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Store)), + ) + (node2, record2) = buildNode( + tcpPort = 61502u16, + udpPort = 9002u16, + recordFlags = + some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Store)), + bootstrapRecords = @[record3, record4], + ) + let (node1, record1) = buildNode( + tcpPort = 61500u16, + udpPort = 9000u16, + recordFlags = some(CapabilitiesBitfield.init(Capabilities.Relay)), + indices = @[0u64, 0u64, 1u64, 0u64, 0u64], + bootstrapRecords = @[record2], + ) # Start nodes' discoveryV5 protocols let res1 = await node1.start() @@ -369,11 +346,10 @@ procSuite "Waku Discovery v5": await allFutures(node1.stop(), node2.stop(), node3.stop(), node4.stop()) suite "addBoostrapNode": - let - validEnr = - "enr:-I-4QG3mX250ArniAs2DLpW-QHOLKSD5x_Ibp8AYcQZbz1HhHFJtl2dNDGcha" & - "U5ugLbDKRgtTDZH8NsxXlTXDpYAgzgBgmlkgnY0gnJzjwAVBgABAAIABQAHAAkAC4" & - "lzZWNwMjU2azGhA4_KwN0NRRmmfQ-B9B2h2PZjoJvBnaIOi6sR_b2UTQBBhXdha3U" & "yAQ" + let validEnr = + "enr:-I-4QG3mX250ArniAs2DLpW-QHOLKSD5x_Ibp8AYcQZbz1HhHFJtl2dNDGcha" & + "U5ugLbDKRgtTDZH8NsxXlTXDpYAgzgBgmlkgnY0gnJzjwAVBgABAAIABQAHAAkAC4" & + "lzZWNwMjU2azGhA4_KwN0NRRmmfQ-B9B2h2PZjoJvBnaIOi6sR_b2UTQBBhXdha3U" & "yAQ" asyncTest "address is valid": # Given an empty list of enrs @@ -388,7 +364,7 @@ procSuite "Waku Discovery v5": # Then the enr is added to the list check: enrs.len == 1 - enrs[0].toBase64() == validEnr[4..^1] + enrs[0].toBase64() == validEnr[4 ..^ 1] asyncTest "address is empty": # Given an empty list of enrs diff --git a/tests/waku_discv5/utils.nim b/tests/waku_discv5/utils.nim index 9a8464a4e..e2fd5a3bd 100644 --- a/tests/waku_discv5/utils.nim +++ b/tests/waku_discv5/utils.nim @@ -6,8 +6,7 @@ import eth/keys as eth_keys import - ../../../waku/[waku_core/topics, waku_enr, waku_discv5], - ../testlib/[common, wakucore] + ../../../waku/[waku_core/topics, waku_enr, waku_discv5], ../testlib/[common, wakucore] proc newTestDiscv5*( privKey: libp2p_keys.PrivateKey, @@ -18,19 +17,15 @@ proc newTestDiscv5*( bootstrapRecords = newSeq[waku_enr.Record](), queue = newAsyncEventQueue[SubscriptionEvent](30), ): WakuDiscoveryV5 = - let - config = - WakuDiscoveryV5Config( - privateKey: eth_keys.PrivateKey(privKey.skkey), - address: parseIpAddress(bindIp), - port: Port(udpPort), - bootstrapRecords: bootstrapRecords, - ) + let config = WakuDiscoveryV5Config( + privateKey: eth_keys.PrivateKey(privKey.skkey), + address: parseIpAddress(bindIp), + port: Port(udpPort), + bootstrapRecords: bootstrapRecords, + ) - let - discv5 = - WakuDiscoveryV5.new( - rng = rng(), conf = config, record = some(record), queue = queue - ) + let discv5 = WakuDiscoveryV5.new( + rng = rng(), conf = config, record = some(record), queue = queue + ) return discv5 diff --git a/tests/waku_enr/test_sharding.nim b/tests/waku_enr/test_sharding.nim index 547d2c62e..0c7b79f4c 100644 --- a/tests/waku_enr/test_sharding.nim +++ b/tests/waku_enr/test_sharding.nim @@ -20,19 +20,14 @@ suite "Sharding": ## Given let mixedTopics = @["/waku/2/thisisatest", "/waku/2/rs/0/2", "/waku/2/rs/0/8"] let shardedTopics = @["/waku/2/rs/0/2", "/waku/2/rs/0/4", "/waku/2/rs/0/8"] - let - namedTopics = - @["/waku/2/thisisatest", "/waku/2/atestthisis", "/waku/2/isthisatest"] - let - gibberish = - @["aedyttydcb/uioasduyio", "jhdfsjhlsdfjhk/sadjhk", "khfsd/hjfdsgjh/dfs"] + let namedTopics = + @["/waku/2/thisisatest", "/waku/2/atestthisis", "/waku/2/isthisatest"] + let gibberish = + @["aedyttydcb/uioasduyio", "jhdfsjhlsdfjhk/sadjhk", "khfsd/hjfdsgjh/dfs"] let empty: seq[string] = @[] - let - shardsTopics = - RelayShards.init(0, @[uint16(2), uint16(4), uint16(8)]).expect( - "Valid shardIds" - ) + let shardsTopics = + RelayShards.init(0, @[uint16(2), uint16(4), uint16(8)]).expect("Valid shardIds") ## When @@ -66,24 +61,20 @@ suite "Sharding": tcpPort = 61500u16 udpPort = 9000u16 - let - record = - newTestEnrRecord( - privKey = privKey, extIp = extIp, tcpPort = tcpPort, udpPort = udpPort - ) + let record = newTestEnrRecord( + privKey = privKey, extIp = extIp, tcpPort = tcpPort, udpPort = udpPort + ) let queue = newAsyncEventQueue[SubscriptionEvent](30) - let - node = - newTestDiscv5( - privKey = privKey, - bindIp = bindIp, - tcpPort = tcpPort, - udpPort = udpPort, - record = record, - queue = queue, - ) + let node = newTestDiscv5( + privKey = privKey, + bindIp = bindIp, + tcpPort = tcpPort, + udpPort = udpPort, + record = record, + queue = queue, + ) let res = await node.start() assert res.isOk(), res.error diff --git a/tests/waku_filter_v2/test_all.nim b/tests/waku_filter_v2/test_all.nim index a91b8ba4b..4f09f28c2 100644 --- a/tests/waku_filter_v2/test_all.nim +++ b/tests/waku_filter_v2/test_all.nim @@ -1,5 +1,3 @@ {.used.} -import - ./test_waku_client, - ./test_waku_filter_protocol +import ./test_waku_client, ./test_waku_filter_protocol diff --git a/tests/waku_filter_v2/test_waku_client.nim b/tests/waku_filter_v2/test_waku_client.nim index b0f2a653f..25d86d8c2 100644 --- a/tests/waku_filter_v2/test_waku_client.nim +++ b/tests/waku_filter_v2/test_waku_client.nim @@ -1,42 +1,18 @@ {.used.} import - std/[ - options, - tables, - sequtils, - strutils, - json - ], + std/[options, tables, sequtils, strutils, json], testutils/unittests, - stew/[ - results, - byteutils - ], + stew/[results, byteutils], chronos, chronicles, os, libp2p/peerstore import - ../../../waku/[ - node/peer_manager, - waku_core, - waku_filter/rpc_codec - ], - ../../../waku/waku_filter_v2/[ - common, - client, - subscriptions, - protocol - ], - ../testlib/[ - wakucore, - testasync, - testutils, - futures, - sequtils - ], + ../../../waku/[node/peer_manager, waku_core, waku_filter/rpc_codec], + ../../../waku/waku_filter_v2/[common, client, subscriptions, protocol], + ../testlib/[wakucore, testasync, testutils, futures, sequtils], ./waku_filter_utils, ../resources/payloads @@ -59,7 +35,7 @@ suite "Waku Filter - End to End": msgSeq = @[] pushHandlerFuture = newPushHandlerFuture() messagePushHandler = proc( - pubsubTopic: PubsubTopic, message: WakuMessage + pubsubTopic: PubsubTopic, message: WakuMessage ): Future[void] {.async, closure, gcsafe.} = msgSeq.add((pubsubTopic, message)) pushHandlerFuture.complete((pubsubTopic, message)) @@ -78,15 +54,19 @@ suite "Waku Filter - End to End": clientPeerId = clientSwitch.peerInfo.toRemotePeerInfo().peerId asyncTeardown: - await allFutures(wakuFilter.stop(), wakuFilterClient.stop(), serverSwitch.stop(), clientSwitch.stop()) + await allFutures( + wakuFilter.stop(), + wakuFilterClient.stop(), + serverSwitch.stop(), + clientSwitch.stop(), + ) suite "Subscriber Ping": asyncTest "Active Subscription Identification": # Given - let - subscribeResponse = await wakuFilterClient.subscribe( - serverRemotePeerInfo, pubsubTopic, contentTopicSeq - ) + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) assert subscribeResponse.isOk(), $subscribeResponse.error check wakuFilter.subscriptions.isSubscribed(clientPeerId) @@ -107,10 +87,9 @@ suite "Waku Filter - End to End": unsubscribedPingResponse.error().kind == FilterSubscribeErrorKind.NOT_FOUND asyncTest "After Unsubscription": # Given - let - subscribeResponse = await wakuFilterClient.subscribe( - serverRemotePeerInfo, pubsubTopic, contentTopicSeq - ) + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) assert subscribeResponse.isOk(), $subscribeResponse.error check wakuFilter.subscriptions.isSubscribed(clientPeerId) @@ -132,7 +111,8 @@ suite "Waku Filter - End to End": asyncTest "Server remote peer info doesn't match an online server": # Given an offline service node let offlineServerSwitch = newStandardSwitch() - let offlineServerRemotePeerInfo = offlineServerSwitch.peerInfo.toRemotePeerInfo() + let offlineServerRemotePeerInfo = + offlineServerSwitch.peerInfo.toRemotePeerInfo() # When subscribing to the offline service node let subscribeResponse = await wakuFilterClient.subscribe( @@ -146,9 +126,8 @@ suite "Waku Filter - End to End": asyncTest "Subscribing to an empty content topic": # When subscribing to an empty content topic - let subscribeResponse = await wakuFilterClient.subscribe( - serverRemotePeerInfo, pubsubTopic, @[] - ) + let subscribeResponse = + await wakuFilterClient.subscribe(serverRemotePeerInfo, pubsubTopic, @[]) # Then the subscription is not successful check: @@ -169,10 +148,12 @@ suite "Waku Filter - End to End": check: wakuFilter.subscriptions.subscribedPeerCount() == 1 wakuFilter.subscriptions.isSubscribed(clientPeerId) - unorderedCompare(wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) # When sending a message to the subscribed content topic - let msg1 = fakeWakuMessage(contentTopic=contentTopic) + let msg1 = fakeWakuMessage(contentTopic = contentTopic) await wakuFilter.handleMessage(pubsubTopic, msg1) # Then the message is pushed to the client @@ -184,7 +165,7 @@ suite "Waku Filter - End to End": # When sending a message to a non-subscribed content topic (before unsubscription) pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg2 = fakeWakuMessage(contentTopic=nonExistentContentTopic) + let msg2 = fakeWakuMessage(contentTopic = nonExistentContentTopic) await wakuFilter.handleMessage(pubsubTopic, msg2) # Then the message is not pushed to the client @@ -201,7 +182,7 @@ suite "Waku Filter - End to End": # When sending a message to the previously unsubscribed content topic pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg3 = fakeWakuMessage(contentTopic=contentTopic) + let msg3 = fakeWakuMessage(contentTopic = contentTopic) await wakuFilter.handleMessage(pubsubTopic, msg3) # Then the message is not pushed to the client @@ -210,7 +191,7 @@ suite "Waku Filter - End to End": # When sending a message to a non-subscribed content topic (after unsubscription) pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg4 = fakeWakuMessage(contentTopic=nonExistentContentTopic) + let msg4 = fakeWakuMessage(contentTopic = nonExistentContentTopic) await wakuFilter.handleMessage(pubsubTopic, msg4) # Then the message is not pushed to the client @@ -231,10 +212,12 @@ suite "Waku Filter - End to End": check: wakuFilter.subscriptions.subscribedPeerCount() == 1 wakuFilter.subscriptions.isSubscribed(clientPeerId) - unorderedCompare(wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicsSeq) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicsSeq + ) # When sending a message to the one of the subscribed content topics - let msg1 = fakeWakuMessage(contentTopic=contentTopic) + let msg1 = fakeWakuMessage(contentTopic = contentTopic) await wakuFilter.handleMessage(pubsubTopic, msg1) # Then the message is pushed to the client @@ -246,7 +229,7 @@ suite "Waku Filter - End to End": # When sending a message to the other subscribed content topic pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg2 = fakeWakuMessage(contentTopic=otherContentTopic) + let msg2 = fakeWakuMessage(contentTopic = otherContentTopic) await wakuFilter.handleMessage(pubsubTopic, msg2) # Then the message is pushed to the client @@ -258,7 +241,7 @@ suite "Waku Filter - End to End": # When sending a message to a non-subscribed content topic (before unsubscription) pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg3 = fakeWakuMessage(contentTopic=nonExistentContentTopic) + let msg3 = fakeWakuMessage(contentTopic = nonExistentContentTopic) await wakuFilter.handleMessage(pubsubTopic, msg3) # Then the message is not pushed to the client @@ -274,7 +257,7 @@ suite "Waku Filter - End to End": # When sending a message to the previously unsubscribed content topic pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg4 = fakeWakuMessage(contentTopic=contentTopic) + let msg4 = fakeWakuMessage(contentTopic = contentTopic) await wakuFilter.handleMessage(pubsubTopic, msg4) # Then the message is not pushed to the client @@ -283,7 +266,7 @@ suite "Waku Filter - End to End": # When sending a message to the other previously unsubscribed content topic pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg5 = fakeWakuMessage(contentTopic=otherContentTopic) + let msg5 = fakeWakuMessage(contentTopic = otherContentTopic) await wakuFilter.handleMessage(pubsubTopic, msg5) # Then the message is not pushed to the client @@ -292,7 +275,7 @@ suite "Waku Filter - End to End": # When sending a message to a non-subscribed content topic (after unsubscription) pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg6 = fakeWakuMessage(contentTopic=nonExistentContentTopic) + let msg6 = fakeWakuMessage(contentTopic = nonExistentContentTopic) await wakuFilter.handleMessage(pubsubTopic, msg6) # Then the message is not pushed to the client @@ -315,7 +298,9 @@ suite "Waku Filter - End to End": check: wakuFilter.subscriptions.subscribedPeerCount() == 1 wakuFilter.subscriptions.isSubscribed(clientPeerId) - unorderedCompare(wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) # When subscribing to a different pubsub topic let subscribeResponse2 = await wakuFilterClient.subscribe( @@ -327,10 +312,13 @@ suite "Waku Filter - End to End": check: wakuFilter.subscriptions.subscribedPeerCount() == 1 wakuFilter.subscriptions.isSubscribed(clientPeerId) - unorderedCompare(wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq & otherContentTopicSeq) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + contentTopicSeq & otherContentTopicSeq, + ) # When sending a message to one of the subscribed content topics - let msg1 = fakeWakuMessage(contentTopic=contentTopic) + let msg1 = fakeWakuMessage(contentTopic = contentTopic) await wakuFilter.handleMessage(pubsubTopic, msg1) # Then the message is pushed to the client @@ -342,7 +330,7 @@ suite "Waku Filter - End to End": # When sending a message to the other subscribed content topic pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg2 = fakeWakuMessage(contentTopic=otherContentTopic) + let msg2 = fakeWakuMessage(contentTopic = otherContentTopic) await wakuFilter.handleMessage(otherPubsubTopic, msg2) # Then the message is pushed to the client @@ -354,7 +342,7 @@ suite "Waku Filter - End to End": # When sending a message to a non-subscribed content topic (before unsubscription) pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg3 = fakeWakuMessage(contentTopic="non-existent-content-topic") + let msg3 = fakeWakuMessage(contentTopic = "non-existent-content-topic") await wakuFilter.handleMessage(pubsubTopic, msg3) # Then @@ -371,11 +359,13 @@ suite "Waku Filter - End to End": check: wakuFilter.subscriptions.subscribedPeerCount() == 1 wakuFilter.subscriptions.isSubscribed(clientPeerId) - unorderedCompare(wakuFilter.getSubscribedContentTopics(clientPeerId), otherContentTopicSeq) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), otherContentTopicSeq + ) # When sending a message to the previously subscribed content topic pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg4 = fakeWakuMessage(contentTopic=contentTopic) + let msg4 = fakeWakuMessage(contentTopic = contentTopic) await wakuFilter.handleMessage(pubsubTopic, msg4) # Then the message is not pushed to the client @@ -384,7 +374,7 @@ suite "Waku Filter - End to End": # When sending a message to the still subscribed content topic pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg5 = fakeWakuMessage(contentTopic=otherContentTopic) + let msg5 = fakeWakuMessage(contentTopic = otherContentTopic) await wakuFilter.handleMessage(otherPubsubTopic, msg5) # Then the message is pushed to the client @@ -406,7 +396,7 @@ suite "Waku Filter - End to End": # When sending a message to the previously unsubscribed content topic pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg6 = fakeWakuMessage(contentTopic=contentTopic) + let msg6 = fakeWakuMessage(contentTopic = contentTopic) await wakuFilter.handleMessage(pubsubTopic, msg6) # Then the message is not pushed to the client @@ -429,7 +419,9 @@ suite "Waku Filter - End to End": check: wakuFilter.subscriptions.subscribedPeerCount() == 1 wakuFilter.subscriptions.isSubscribed(clientPeerId) - unorderedCompare(wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) # When subscribing to a different content topic let subscribeResponse2 = await wakuFilterClient.subscribe( @@ -441,10 +433,13 @@ suite "Waku Filter - End to End": check: wakuFilter.subscriptions.subscribedPeerCount() == 1 wakuFilter.subscriptions.isSubscribed(clientPeerId) - unorderedCompare(wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq & otherContentTopicSeq) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + contentTopicSeq & otherContentTopicSeq, + ) # When sending a message to one of the subscribed content topics - let msg1 = fakeWakuMessage(contentTopic=contentTopic) + let msg1 = fakeWakuMessage(contentTopic = contentTopic) await wakuFilter.handleMessage(pubsubTopic, msg1) # Then the message is pushed to the client @@ -456,7 +451,7 @@ suite "Waku Filter - End to End": # When sending a message to the other subscribed content topic pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg2 = fakeWakuMessage(contentTopic=otherContentTopic) + let msg2 = fakeWakuMessage(contentTopic = otherContentTopic) await wakuFilter.handleMessage(otherPubsubTopic, msg2) # Then the message is pushed to the client @@ -468,7 +463,7 @@ suite "Waku Filter - End to End": # When sending a message to a non-subscribed content topic (before unsubscription) pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg3 = fakeWakuMessage(contentTopic="non-existent-content-topic") + let msg3 = fakeWakuMessage(contentTopic = "non-existent-content-topic") await wakuFilter.handleMessage(pubsubTopic, msg3) # Then @@ -476,7 +471,8 @@ suite "Waku Filter - End to End": not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) # When unsubscribing from one of the subscriptions - let unsubscribeResponse = await wakuFilterClient.unsubscribeAll(serverRemotePeerInfo) + let unsubscribeResponse = + await wakuFilterClient.unsubscribeAll(serverRemotePeerInfo) # Then the unsubscription is successful assert unsubscribeResponse.isOk(), $unsubscribeResponse.error @@ -485,8 +481,8 @@ suite "Waku Filter - End to End": # When sending a message the previously subscribed content topics pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg4 = fakeWakuMessage(contentTopic=contentTopic) - let msg5 = fakeWakuMessage(contentTopic=otherContentTopic) + let msg4 = fakeWakuMessage(contentTopic = contentTopic) + let msg5 = fakeWakuMessage(contentTopic = otherContentTopic) await wakuFilter.handleMessage(pubsubTopic, msg4) await wakuFilter.handleMessage(otherPubsubTopic, msg5) @@ -512,7 +508,9 @@ suite "Waku Filter - End to End": check: wakuFilter.subscriptions.subscribedPeerCount() == 1 wakuFilter.subscriptions.isSubscribed(clientPeerId) - unorderedCompare(wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicsSeq1) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicsSeq1 + ) # When subscribing to a different pubsub topic let subscribeResponse2 = await wakuFilterClient.subscribe( @@ -524,10 +522,13 @@ suite "Waku Filter - End to End": check: wakuFilter.subscriptions.subscribedPeerCount() == 1 wakuFilter.subscriptions.isSubscribed(clientPeerId) - unorderedCompare(wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicsSeq1 & contentTopicsSeq2) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + contentTopicsSeq1 & contentTopicsSeq2, + ) # When sending a message to (pubsubTopic, contentTopic) - let msg1 = fakeWakuMessage(contentTopic=contentTopic) + let msg1 = fakeWakuMessage(contentTopic = contentTopic) await wakuFilter.handleMessage(pubsubTopic, msg1) # Then the message is pushed to the client @@ -539,7 +540,7 @@ suite "Waku Filter - End to End": # When sending a message to (pubsubTopic, otherContentTopic1) pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg2 = fakeWakuMessage(contentTopic=otherContentTopic1) + let msg2 = fakeWakuMessage(contentTopic = otherContentTopic1) await wakuFilter.handleMessage(pubsubTopic, msg2) # Then the message is pushed to the client @@ -551,7 +552,7 @@ suite "Waku Filter - End to End": # When sending a message to (otherPubsubTopic, contentTopic) pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg3 = fakeWakuMessage(contentTopic=contentTopic) + let msg3 = fakeWakuMessage(contentTopic = contentTopic) await wakuFilter.handleMessage(otherPubsubTopic, msg3) # Then the message is pushed to the client @@ -563,7 +564,7 @@ suite "Waku Filter - End to End": # When sending a message to (otherPubsubTopic, otherContentTopic2) pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg4 = fakeWakuMessage(contentTopic=otherContentTopic2) + let msg4 = fakeWakuMessage(contentTopic = otherContentTopic2) await wakuFilter.handleMessage(otherPubsubTopic, msg4) # Then the message is pushed to the client @@ -587,11 +588,14 @@ suite "Waku Filter - End to End": check: wakuFilter.subscriptions.subscribedPeerCount() == 1 wakuFilter.subscriptions.isSubscribed(clientPeerId) - unorderedCompare(wakuFilter.getSubscribedContentTopics(clientPeerId), @[contentTopic, otherContentTopic2]) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + @[contentTopic, otherContentTopic2], + ) # When sending a message to (pubsubTopic, contentTopic) pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg5 = fakeWakuMessage(contentTopic=contentTopic) + let msg5 = fakeWakuMessage(contentTopic = contentTopic) await wakuFilter.handleMessage(pubsubTopic, msg5) # Then the message is pushed to the client @@ -603,7 +607,7 @@ suite "Waku Filter - End to End": # When sending a message to (otherPubsubTopic, otherContentTopic2) pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg6 = fakeWakuMessage(contentTopic=otherContentTopic2) + let msg6 = fakeWakuMessage(contentTopic = otherContentTopic2) await wakuFilter.handleMessage(otherPubsubTopic, msg6) # Then the message is pushed to the client @@ -615,9 +619,9 @@ suite "Waku Filter - End to End": # When sending a message to (pubsubTopic, otherContentTopic1) and (otherPubsubTopic, contentTopic) pushHandlerFuture = newPushHandlerFuture() # Clear previous future - let msg7 = fakeWakuMessage(contentTopic=otherContentTopic1) + let msg7 = fakeWakuMessage(contentTopic = otherContentTopic1) await wakuFilter.handleMessage(pubsubTopic, msg7) - let msg8 = fakeWakuMessage(contentTopic=contentTopic) + let msg8 = fakeWakuMessage(contentTopic = contentTopic) await wakuFilter.handleMessage(otherPubsubTopic, msg8) # Then the messages are not pushed to the client @@ -626,31 +630,32 @@ suite "Waku Filter - End to End": asyncTest "Max Topic Size": # Given a topic list of 100 topics - var topicSeq: seq[string] = toSeq(0.. 0: let takeNumber = min(topicSeq.len, MaxContentTopicsPerRequest) - let topicSeqBatch = topicSeq[0.. Out of Max Size + msg1 = + fakeWakuMessage(contentTopic = contentTopic, payload = getByteSequence(1024)) + # 1KiB + msg2 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(10 * 1024) + ) # 10KiB + msg3 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(100 * 1024) + ) # 100KiB + msg4 = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(MaxWakuMessageSize - sizeEmptyMsg - 38), + ) # Max Size (Inclusive Limit) + msg5 = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(MaxWakuMessageSize - sizeEmptyMsg - 37), + ) # Max Size (Exclusive Limit) + msg6 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(MaxWakuMessageSize) + ) # MaxWakuMessageSize -> Out of Max Size # Notice that the message is wrapped with more data in https://github.com/status-im/nim-libp2p/blob/3011ba4326fa55220a758838835797ff322619fc/libp2p/protocols/pubsub/gossipsub.nim#L627-L632 # And therefore, we need to substract a hard-coded values above (for msg4 & msg5), obtained empirically, @@ -1049,7 +1084,7 @@ suite "Waku Relay": handlerFuture = newPushHandlerFuture() otherHandlerFuture = newPushHandlerFuture() discard await node.publish(pubsubTopic, msg3) - + # Then the message is received in both nodes check: await handlerFuture.withTimeout(FUTURE_TIMEOUT) @@ -1108,13 +1143,17 @@ suite "Waku Relay": # Create a different handler than the default to include messages in a seq var thisHandlerFuture = newPushHandlerFuture() var thisMessageSeq: seq[(PubsubTopic, WakuMessage)] = @[] - proc thisSimpleFutureHandler(topic: PubsubTopic, message: WakuMessage) {.async, gcsafe.} = + proc thisSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = thisMessageSeq.add((topic, message)) thisHandlerFuture.complete((topic, message)) var otherHandlerFuture = newPushHandlerFuture() var otherMessageSeq: seq[(PubsubTopic, WakuMessage)] = @[] - proc otherSimpleFutureHandler(topic: PubsubTopic, message: WakuMessage) {.async, gcsafe.} = + proc otherSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = otherMessageSeq.add((topic, message)) otherHandlerFuture.complete((topic, message)) @@ -1131,7 +1170,7 @@ suite "Waku Relay": msg2 = fakeWakuMessage("msg2", pubsubTopic) msg3 = fakeWakuMessage("msg3", pubsubTopic) msg4 = fakeWakuMessage("msg4", pubsubTopic) - + discard await node.publish(pubsubTopic, msg1) check await thisHandlerFuture.withTimeout(FUTURE_TIMEOUT) check await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) @@ -1151,19 +1190,21 @@ suite "Waku Relay": check: await thisHandlerFuture.withTimeout(FUTURE_TIMEOUT) - thisMessageSeq == @[ - (pubsubTopic, msg1), - (pubsubTopic, msg2), - (pubsubTopic, msg3), - (pubsubTopic, msg4) - ] + thisMessageSeq == + @[ + (pubsubTopic, msg1), + (pubsubTopic, msg2), + (pubsubTopic, msg3), + (pubsubTopic, msg4), + ] await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) - otherMessageSeq == @[ - (pubsubTopic, msg1), - (pubsubTopic, msg2), - (pubsubTopic, msg3), - (pubsubTopic, msg4) - ] + otherMessageSeq == + @[ + (pubsubTopic, msg1), + (pubsubTopic, msg2), + (pubsubTopic, msg3), + (pubsubTopic, msg4), + ] # Finally stop the other node await allFutures(otherSwitch.stop(), otherNode.stop()) @@ -1177,7 +1218,7 @@ suite "Waku Relay": otherNode = await newTestWakuRelay(otherSwitch) await allFutures(otherSwitch.start(), otherNode.start()) - let + let otherRemotePeerInfo = otherSwitch.peerInfo.toRemotePeerInfo() otherPeerId = otherRemotePeerInfo.peerId @@ -1185,7 +1226,9 @@ suite "Waku Relay": # Given both are subscribed to the same pubsub topic var otherHandlerFuture = newPushHandlerFuture() - proc otherSimpleFutureHandler(topic: PubsubTopic, message: WakuMessage) {.async, gcsafe.} = + proc otherSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = otherHandlerFuture.complete((topic, message)) discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) @@ -1234,7 +1277,7 @@ suite "Waku Relay": otherHandlerFuture = newPushHandlerFuture() let msg3 = fakeWakuMessage(testMessage, pubsubTopic) discard await node.publish(pubsubTopic, msg3) - + # Then the message is received in both nodes check: await handlerFuture.withTimeout(FUTURE_TIMEOUT) @@ -1247,7 +1290,7 @@ suite "Waku Relay": otherHandlerFuture = newPushHandlerFuture() let msg4 = fakeWakuMessage(testMessage, pubsubTopic) discard await otherNode.publish(pubsubTopic, msg4) - + # Then the message is received in both nodes check: await handlerFuture.withTimeout(FUTURE_TIMEOUT) @@ -1266,7 +1309,7 @@ suite "Waku Relay": otherNode = await newTestWakuRelay(otherSwitch) await allFutures(otherSwitch.start(), otherNode.start()) - let + let otherRemotePeerInfo = otherSwitch.peerInfo.toRemotePeerInfo() otherPeerId = otherRemotePeerInfo.peerId @@ -1274,7 +1317,9 @@ suite "Waku Relay": # Given both are subscribed to the same pubsub topic var otherHandlerFuture = newPushHandlerFuture() - proc otherSimpleFutureHandler(topic: PubsubTopic, message: WakuMessage) {.async, gcsafe.} = + proc otherSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = otherHandlerFuture.complete((topic, message)) discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) @@ -1282,7 +1327,7 @@ suite "Waku Relay": check: node.subscribedTopics == pubsubTopicSeq otherNode.subscribedTopics == pubsubTopicSeq - + await sleepAsync(500.millis) # Given other node is stopped without unsubscribing diff --git a/tests/waku_relay/test_wakunode_relay.nim b/tests/waku_relay/test_wakunode_relay.nim index 15c1f09e6..1acb4819e 100644 --- a/tests/waku_relay/test_wakunode_relay.nim +++ b/tests/waku_relay/test_wakunode_relay.nim @@ -18,12 +18,13 @@ import ../testlib/wakucore, ../testlib/wakunode -template sourceDir: string = currentSourcePath.parentDir() +template sourceDir(): string = + currentSourcePath.parentDir() + const KEY_PATH = sourceDir / "resources/test_key.pem" const CERT_PATH = sourceDir / "resources/test_cert.pem" suite "WakuNode - Relay": - asyncTest "Relay protocol is started correctly": let nodeKey1 = generateSecp256k1Key() @@ -81,11 +82,13 @@ suite "WakuNode - Relay": await allFutures( node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]), - node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]), ) var completionFut = newFuture[bool]() - proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = check: topic == pubSubTopic msg.contentTopic == contentTopic @@ -150,13 +153,15 @@ suite "WakuNode - Relay": var completionFutValidatorRej = newFuture[bool]() # set a topic validator for pubSubTopic - proc validator(topic: string, msg: WakuMessage): Future[ValidationResult] {.async.} = + proc validator( + topic: string, msg: WakuMessage + ): Future[ValidationResult] {.async.} = ## the validator that only allows messages with contentTopic1 to be relayed check: topic == pubSubTopic # only relay messages with contentTopic1 - if msg.contentTopic != contentTopic1: + if msg.contentTopic != contentTopic1: completionFutValidatorRej.complete(true) return ValidationResult.Reject @@ -166,7 +171,9 @@ suite "WakuNode - Relay": node2.wakuRelay.addValidator(validator) var completionFut = newFuture[bool]() - proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = check: topic == pubSubTopic # check that only messages with contentTopic1 is relayed (but not contentTopic2) @@ -179,13 +186,13 @@ suite "WakuNode - Relay": var res = await node1.publish(some(pubSubTopic), message1) assert res.isOk(), $res.error - + await sleepAsync(500.millis) # message2 never gets relayed because of the validator res = await node1.publish(some(pubSubTopic), message2) assert res.isOk(), $res.error - + await sleepAsync(500.millis) check: @@ -200,14 +207,18 @@ suite "WakuNode - Relay": # TODO: Add a function to validate the WakuMessage integrity xasyncTest "Stats of peer sending wrong WakuMessages are updated": # Create 2 nodes - let nodes = toSeq(0..1).mapIt(newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))) + let nodes = toSeq(0 .. 1).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) # Start all the nodes and mount relay with await allFutures(nodes.mapIt(it.start())) await allFutures(nodes.mapIt(it.mountRelay())) # Connect nodes - let connOk = await nodes[0].peerManager.connectRelay(nodes[1].switch.peerInfo.toRemotePeerInfo()) + let connOk = await nodes[0].peerManager.connectRelay( + nodes[1].switch.peerInfo.toRemotePeerInfo() + ) require: connOk == true @@ -216,8 +227,9 @@ suite "WakuNode - Relay": await sleepAsync(500.millis) # Node 0 publishes 5 messages not compliant with WakuMessage (aka random bytes) - for i in 0..4: - discard await nodes[0].wakuRelay.publish(DefaultPubsubTopic, urandom(1*(10^2))) + for i in 0 .. 4: + discard + await nodes[0].wakuRelay.publish(DefaultPubsubTopic, urandom(1 * (10 ^ 2))) # Wait for gossip await sleepAsync(500.millis) @@ -226,18 +238,29 @@ suite "WakuNode - Relay": # meaning that message validity is enforced to gossip messages var peerStats = nodes[1].wakuRelay.peerStats check: - peerStats[nodes[0].switch.peerInfo.peerId].topicInfos[DefaultPubsubTopic].invalidMessageDeliveries == 5.0 + peerStats[nodes[0].switch.peerInfo.peerId].topicInfos[DefaultPubsubTopic].invalidMessageDeliveries == + 5.0 await allFutures(nodes.mapIt(it.stop())) asyncTest "Messages are relayed between two websocket nodes": let nodeKey1 = generateSecp256k1Key() - node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), - bindPort = Port(0), wsBindPort = Port(0), wsEnabled = true) + node1 = newTestWakuNode( + nodeKey1, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wsEnabled = true, + ) nodeKey2 = generateSecp256k1Key() - node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), - bindPort = Port(0), wsBindPort = Port(0), wsEnabled = true) + node2 = newTestWakuNode( + nodeKey2, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wsEnabled = true, + ) pubSubTopic = "test" contentTopic = ContentTopic("/waku/2/default-content/proto") payload = "hello world".toBytes() @@ -252,7 +275,9 @@ suite "WakuNode - Relay": await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) var completionFut = newFuture[bool]() - proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = check: topic == pubSubTopic msg.contentTopic == contentTopic @@ -264,9 +289,8 @@ suite "WakuNode - Relay": let res = await node2.publish(some(pubSubTopic), message) assert res.isOk(), $res.error - - await sleepAsync(500.millis) + await sleepAsync(500.millis) check: (await completionFut.withTimeout(5.seconds)) == true @@ -276,11 +300,15 @@ suite "WakuNode - Relay": asyncTest "Messages are relayed between nodes with multiple transports (TCP and Websockets)": let nodeKey1 = generateSecp256k1Key() - node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), - bindPort = Port(0), wsBindPort = Port(0), wsEnabled = true) + node1 = newTestWakuNode( + nodeKey1, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wsEnabled = true, + ) nodeKey2 = generateSecp256k1Key() - node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), - bindPort = Port(0)) + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), bindPort = Port(0)) pubSubTopic = "test" contentTopic = ContentTopic("/waku/2/default-content/proto") payload = "hello world".toBytes() @@ -295,7 +323,9 @@ suite "WakuNode - Relay": await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) var completionFut = newFuture[bool]() - proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = check: topic == pubSubTopic msg.contentTopic == contentTopic @@ -310,7 +340,6 @@ suite "WakuNode - Relay": await sleepAsync(500.millis) - check: (await completionFut.withTimeout(5.seconds)) == true await node1.stop() @@ -319,11 +348,15 @@ suite "WakuNode - Relay": asyncTest "Messages relaying fails with non-overlapping transports (TCP or Websockets)": let nodeKey1 = generateSecp256k1Key() - node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), - bindPort = Port(0)) + node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), bindPort = Port(0)) nodeKey2 = generateSecp256k1Key() - node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), - bindPort = Port(0), wsBindPort = Port(0), wsEnabled = true) + node2 = newTestWakuNode( + nodeKey2, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wsEnabled = true, + ) pubSubTopic = "test" contentTopic = ContentTopic("/waku/2/default-content/proto") payload = "hello world".toBytes() @@ -342,7 +375,9 @@ suite "WakuNode - Relay": await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) var completionFut = newFuture[bool]() - proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = check: topic == pubSubTopic msg.contentTopic == contentTopic @@ -365,11 +400,17 @@ suite "WakuNode - Relay": asyncTest "Messages are relayed between nodes with multiple transports (TCP and secure Websockets)": let nodeKey1 = generateSecp256k1Key() - node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), - bindPort = Port(0), wsBindPort = Port(0), wssEnabled = true, secureKey = KEY_PATH, secureCert = CERT_PATH) + node1 = newTestWakuNode( + nodeKey1, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wssEnabled = true, + secureKey = KEY_PATH, + secureCert = CERT_PATH, + ) nodeKey2 = generateSecp256k1Key() - node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), - bindPort = Port(0)) + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), bindPort = Port(0)) pubSubTopic = "test" contentTopic = ContentTopic("/waku/2/default-content/proto") payload = "hello world".toBytes() @@ -384,7 +425,9 @@ suite "WakuNode - Relay": await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) var completionFut = newFuture[bool]() - proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = check: topic == pubSubTopic msg.contentTopic == contentTopic @@ -407,9 +450,23 @@ suite "WakuNode - Relay": asyncTest "Messages are relayed between nodes with multiple transports (websocket and secure Websockets)": let nodeKey1 = generateSecp256k1Key() - node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), bindPort = Port(0), wsBindPort = Port(0), wssEnabled = true, secureKey = KEY_PATH, secureCert = CERT_PATH) + node1 = newTestWakuNode( + nodeKey1, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wssEnabled = true, + secureKey = KEY_PATH, + secureCert = CERT_PATH, + ) nodeKey2 = generateSecp256k1Key() - node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), bindPort = Port(0),wsBindPort = Port(0), wsEnabled = true ) + node2 = newTestWakuNode( + nodeKey2, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wsEnabled = true, + ) let pubSubTopic = "test" @@ -426,7 +483,9 @@ suite "WakuNode - Relay": await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) var completionFut = newFuture[bool]() - proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = check: topic == pubSubTopic msg.contentTopic == contentTopic @@ -438,9 +497,8 @@ suite "WakuNode - Relay": let res = await node2.publish(some(pubSubTopic), message) assert res.isOk(), $res.error - - await sleepAsync(500.millis) + await sleepAsync(500.millis) check: (await completionFut.withTimeout(5.seconds)) == true @@ -449,40 +507,45 @@ suite "WakuNode - Relay": asyncTest "Bad peers with low reputation are disconnected": # Create 5 nodes - let nodes = toSeq(0..<5).mapIt(newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 5).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) await allFutures(nodes.mapIt(it.start())) await allFutures(nodes.mapIt(it.mountRelay())) # subscribe all nodes to a topic let topic = "topic" - for node in nodes: discard node.wakuRelay.subscribe(topic, nil) + for node in nodes: + discard node.wakuRelay.subscribe(topic, nil) await sleepAsync(500.millis) # connect nodes in full mesh - for i in 0..<5: - for j in 0..<5: + for i in 0 ..< 5: + for j in 0 ..< 5: if i == j: continue - let connOk = await nodes[i].peerManager.connectRelay(nodes[j].switch.peerInfo.toRemotePeerInfo()) + let connOk = await nodes[i].peerManager.connectRelay( + nodes[j].switch.peerInfo.toRemotePeerInfo() + ) require connOk # connection triggers different actions, wait for them await sleepAsync(1.seconds) # all peers are connected in a mesh, 4 conns each - for i in 0..<5: + for i in 0 ..< 5: check: nodes[i].peerManager.switch.connManager.getConnections().len == 4 # node[0] publishes wrong messages (random bytes not decoding into WakuMessage) - for j in 0..<50: - discard await nodes[0].wakuRelay.publish(topic, urandom(1*(10^3))) + for j in 0 ..< 50: + discard await nodes[0].wakuRelay.publish(topic, urandom(1 * (10 ^ 3))) # long wait, must be higher than the configured decayInterval (how often score is updated) await sleepAsync(20.seconds) # all nodes lower the score of nodes[0] (will change if gossipsub params or amount of msg changes) - for i in 1..<5: + for i in 1 ..< 5: check: nodes[i].wakuRelay.peerStats[nodes[0].switch.peerInfo.peerId].score == -249999.9 @@ -491,7 +554,7 @@ suite "WakuNode - Relay": nodes[0].peerManager.switch.connManager.getConnections().len == 0 # the rest of the nodes now have 1 conn less (kicked nodes[0] out) - for i in 1..<5: + for i in 1 ..< 5: check: nodes[i].peerManager.switch.connManager.getConnections().len == 3 @@ -514,16 +577,17 @@ suite "WakuNode - Relay": contentTopicA = DefaultContentTopic contentTopicB = ContentTopic("/waku/2/default-content1/proto") contentTopicC = ContentTopic("/waku/2/default-content2/proto") - handler: WakuRelayHandler = - proc( - pubsubTopic: PubsubTopic, - message: WakuMessage - ): Future[void] {.gcsafe, raises: [Defect].} = - discard pubsubTopic - discard message - assert shard == node.wakuSharding.getShard(contentTopicA).expect("Valid Topic"), "topic must use the same shard" - assert shard == node.wakuSharding.getShard(contentTopicB).expect("Valid Topic"), "topic must use the same shard" - assert shard == node.wakuSharding.getShard(contentTopicC).expect("Valid Topic"), "topic must use the same shard" + handler: WakuRelayHandler = proc( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.gcsafe, raises: [Defect].} = + discard pubsubTopic + discard message + assert shard == node.wakuSharding.getShard(contentTopicA).expect("Valid Topic"), + "topic must use the same shard" + assert shard == node.wakuSharding.getShard(contentTopicB).expect("Valid Topic"), + "topic must use the same shard" + assert shard == node.wakuSharding.getShard(contentTopicC).expect("Valid Topic"), + "topic must use the same shard" ## When node.subscribe((kind: ContentSub, topic: contentTopicA), some(handler)) @@ -541,4 +605,4 @@ suite "WakuNode - Relay": check not node.wakuRelay.isSubscribed(shard) ## Cleanup - await node.stop() \ No newline at end of file + await node.stop() diff --git a/tests/waku_relay/utils.nim b/tests/waku_relay/utils.nim index bb394ada2..3bdb1d43f 100644 --- a/tests/waku_relay/utils.nim +++ b/tests/waku_relay/utils.nim @@ -1,21 +1,14 @@ {.used.} -import - std/[strutils], - stew/shims/net as stewNet, - chronos - -import - ../../../waku/waku_relay, - ../../../waku/waku_core, - ../testlib/wakucore +import std/[strutils], stew/shims/net as stewNet, chronos +import ../../../waku/waku_relay, ../../../waku/waku_core, ../testlib/wakucore proc noopRawHandler*(): WakuRelayHandler = - var handler: WakuRelayHandler - handler = proc(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = discard - handler - + var handler: WakuRelayHandler + handler = proc(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + discard + handler proc newTestWakuRelay*(switch = newTestSwitch()): Future[WakuRelay] {.async.} = let proto = WakuRelay.new(switch).tryGet() diff --git a/tests/waku_rln_relay/rln/buffer_utils.nim b/tests/waku_rln_relay/rln/buffer_utils.nim index 740766ffb..82814341d 100644 --- a/tests/waku_rln_relay/rln/buffer_utils.nim +++ b/tests/waku_rln_relay/rln/buffer_utils.nim @@ -5,7 +5,7 @@ proc `==`*(a: Buffer, b: seq[uint8]): bool = return false let bufferArray = cast[ptr UncheckedArray[uint8]](a.ptr) - for i in 0.. epoch2": # check edge cases @@ -607,40 +624,42 @@ suite "Waku rln relay": # create some dummy nullifiers and secret shares var nullifier1: Nullifier - for index, x in nullifier1.mpairs: nullifier1[index] = 1 + for index, x in nullifier1.mpairs: + nullifier1[index] = 1 var shareX1: MerkleNode - for index, x in shareX1.mpairs: shareX1[index] = 1 + for index, x in shareX1.mpairs: + shareX1[index] = 1 let shareY1 = shareX1 var nullifier2: Nullifier - for index, x in nullifier2.mpairs: nullifier2[index] = 2 + for index, x in nullifier2.mpairs: + nullifier2[index] = 2 var shareX2: MerkleNode - for index, x in shareX2.mpairs: shareX2[index] = 2 + for index, x in shareX2.mpairs: + shareX2[index] = 2 let shareY2 = shareX2 let nullifier3 = nullifier1 var shareX3: MerkleNode - for index, x in shareX3.mpairs: shareX3[index] = 3 + for index, x in shareX3.mpairs: + shareX3[index] = 3 let shareY3 = shareX3 proc encodeAndGetBuf(proof: RateLimitProof): seq[byte] = return proof.encode().buffer let - proof1 = RateLimitProof(epoch: epoch, - nullifier: nullifier1, - shareX: shareX1, - shareY: shareY1) + proof1 = RateLimitProof( + epoch: epoch, nullifier: nullifier1, shareX: shareX1, shareY: shareY1 + ) wm1 = WakuMessage(proof: proof1.encodeAndGetBuf()) - proof2 = RateLimitProof(epoch: epoch, - nullifier: nullifier2, - shareX: shareX2, - shareY: shareY2) + proof2 = RateLimitProof( + epoch: epoch, nullifier: nullifier2, shareX: shareX2, shareY: shareY2 + ) wm2 = WakuMessage(proof: proof2.encodeAndGetBuf()) - proof3 = RateLimitProof(epoch: epoch, - nullifier: nullifier3, - shareX: shareX3, - shareY: shareY3) + proof3 = RateLimitProof( + epoch: epoch, nullifier: nullifier3, shareX: shareX3, shareY: shareY3 + ) wm3 = WakuMessage(proof: proof3.encodeAndGetBuf()) # check whether hasDuplicate correctly finds records with the same nullifiers but different secret shares @@ -662,7 +681,9 @@ suite "Waku rln relay": discard wakuRlnRelay.updateLog(epoch, proofMetadata2) # proof3 has the same nullifier as proof1 but different secret shares, it should be detected as duplicate - let isDuplicate3 = wakuRlnRelay.hasDuplicate(epoch, proof3.extractMetadata().tryGet()).valueOr: + let isDuplicate3 = wakuRlnRelay.hasDuplicate( + epoch, proof3.extractMetadata().tryGet() + ).valueOr: raiseAssert $error # it is a duplicate assert isDuplicate3, "duplicate should be found" @@ -671,16 +692,20 @@ suite "Waku rln relay": let index = MembershipIndex(5) when defined(rln_v2): - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(index), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_2")) + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(index), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_2"), + ) else: - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(index), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_2")) + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(index), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_2"), + ) let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: raiseAssert $error @@ -700,7 +725,7 @@ suite "Waku rln relay": raiseAssert $error wakuRlnRelay.unsafeAppendRLNProof(wm2, time).isOkOr: raiseAssert $error - wakuRlnRelay.unsafeAppendRLNProof(wm3, time+float64(wakuRlnRelay.rlnEpochSizeSec)).isOkOr: + wakuRlnRelay.unsafeAppendRLNProof(wm3, time + float64(wakuRlnRelay.rlnEpochSizeSec)).isOkOr: raiseAssert $error # validate messages @@ -714,7 +739,6 @@ suite "Waku rln relay": # wm4 has no rln proof and should not be validated msgValidate4 = wakuRlnRelay.validateMessageAndUpdateLog(wm4, some(time)) - check: msgValidate1 == MessageValidationResult.Valid msgValidate2 == MessageValidationResult.Spam @@ -726,31 +750,39 @@ suite "Waku rln relay": let index2 = MembershipIndex(6) when defined(rln_v2): - let rlnConf1 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(index1), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_3")) + let rlnConf1 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(index1), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_3"), + ) else: - let rlnConf1 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(index1), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_3")) - + let rlnConf1 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(index1), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_3"), + ) + let wakuRlnRelay1 = (await WakuRlnRelay.new(rlnConf1)).valueOr: raiseAssert "failed to create waku rln relay: " & $error when defined(rln_v2): - let rlnConf2 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(index2), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4")) + let rlnConf2 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(index2), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4"), + ) else: - let rlnConf2 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(index2), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4")) + let rlnConf2 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(index2), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4"), + ) let wakuRlnRelay2 = (await WakuRlnRelay.new(rlnConf2)).valueOr: raiseAssert "failed to create waku rln relay: " & $error # get the current epoch time @@ -762,7 +794,6 @@ suite "Waku rln relay": # another message in the same epoch as wm1, it will break the messaging rate limit wm2 = WakuMessage(payload: "Valid message from sender 2".toBytes()) - wakuRlnRelay1.appendRLNProof(wm1, time).isOkOr: raiseAssert $error wakuRlnRelay2.appendRLNProof(wm2, time).isOkOr: @@ -831,8 +862,7 @@ suite "Waku rln relay": let keystoreMembership = KeystoreMembership( membershipContract: MembershipContract( - chainId: "5", - address: "0x0123456789012345678901234567890123456789" + chainId: "5", address: "0x0123456789012345678901234567890123456789" ), treeIndex: index, identityCredential: idCredential, @@ -840,24 +870,30 @@ suite "Waku rln relay": let password = "%m0um0ucoW%" let filepath = "./testRLNCredentials.txt" - defer: removeFile(filepath) + defer: + removeFile(filepath) # Write RLN credentials require: - addMembershipCredentials(path = filepath, - membership = keystoreMembership, - password = password, - appInfo = RLNAppInfo).isOk() + addMembershipCredentials( + path = filepath, + membership = keystoreMembership, + password = password, + appInfo = RLNAppInfo, + ) + .isOk() - let readKeystoreRes = getMembershipCredentials(path = filepath, - password = password, - # here the query would not include - # the identityCredential, - # since it is not part of the query - # but have used the same value - # to avoid re-declaration - query = keystoreMembership, - appInfo = RLNAppInfo) + let readKeystoreRes = getMembershipCredentials( + path = filepath, + password = password, + # here the query would not include + # the identityCredential, + # since it is not part of the query + # but have used the same value + # to avoid re-declaration + query = keystoreMembership, + appInfo = RLNAppInfo, + ) assert readKeystoreRes.isOk(), $readKeystoreRes.error # getMembershipCredentials returns the credential in the keystore which matches diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim index f3399909c..d9f705945 100644 --- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim +++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim @@ -22,7 +22,6 @@ from std/times import epochTime procSuite "WakuNode - RLN relay": # NOTE: we set the rlnRelayUserMessageLimit to 1 to make the tests easier to reason about asyncTest "testing rln-relay with valid proof": - let # publisher node nodeKey1 = generateSecp256k1Key() @@ -42,16 +41,20 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode when defined(rln_v2): - let wakuRlnConfig1 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode")) + let wakuRlnConfig1 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(1.uint), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode"), + ) else: - let wakuRlnConfig1 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode")) + let wakuRlnConfig1 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(1.uint), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode"), + ) await node1.mountRlnRelay(wakuRlnConfig1) await node1.start() @@ -60,16 +63,20 @@ procSuite "WakuNode - RLN relay": await node2.mountRelay(@[DefaultPubsubTopic]) # mount rlnrelay in off-chain mode when defined(rln_v2): - let wakuRlnConfig2 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(2.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_2")) + let wakuRlnConfig2 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(2.uint), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_2"), + ) else: - let wakuRlnConfig2 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(2.uint), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_2")) + let wakuRlnConfig2 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(2.uint), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_2"), + ) await node2.mountRlnRelay(wakuRlnConfig2) await node2.start() @@ -78,16 +85,20 @@ procSuite "WakuNode - RLN relay": await node3.mountRelay(@[DefaultPubsubTopic]) when defined(rln_v2): - let wakuRlnConfig3 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(3.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_3")) + let wakuRlnConfig3 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(3.uint), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_3"), + ) else: - let wakuRlnConfig3 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(3.uint), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_3")) + let wakuRlnConfig3 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(3.uint), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_3"), + ) await node3.mountRlnRelay(wakuRlnConfig3) await node3.start() @@ -97,7 +108,9 @@ procSuite "WakuNode - RLN relay": await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) var completionFut = newFuture[bool]() - proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = debug "The received topic:", topic if topic == DefaultPubsubTopic: completionFut.complete(true) @@ -113,14 +126,12 @@ procSuite "WakuNode - RLN relay": var message = WakuMessage(payload: @payload, contentTopic: contentTopic) doAssert(node1.wakuRlnRelay.unsafeAppendRLNProof(message, epochTime()).isOk()) - ## node1 publishes a message with a rate limit proof, the message is then relayed to node2 which in turn ## verifies the rate limit proof of the message and relays the message to node3 ## verification at node2 occurs inside a topic validator which is installed as part of the waku-rln-relay mount proc discard await node1.publish(some(DefaultPubsubTopic), message) await sleepAsync(2000.millis) - check: (await completionFut.withTimeout(10.seconds)) == true @@ -129,17 +140,22 @@ procSuite "WakuNode - RLN relay": await node3.stop() asyncTest "testing rln-relay is applied in all rln pubsub/content topics": - # create 3 nodes - let nodes = toSeq(0..<3).mapIt(newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 3).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) await allFutures(nodes.mapIt(it.start())) - let pubsubTopics = @[ + let pubsubTopics = + @[ PubsubTopic("/waku/2/pubsubtopic-a/proto"), - PubsubTopic("/waku/2/pubsubtopic-b/proto")] - let contentTopics = @[ + PubsubTopic("/waku/2/pubsubtopic-b/proto"), + ] + let contentTopics = + @[ ContentTopic("/waku/2/content-topic-a/proto"), - ContentTopic("/waku/2/content-topic-b/proto")] + ContentTopic("/waku/2/content-topic-b/proto"), + ] # set up three nodes await allFutures(nodes.mapIt(it.mountRelay(pubsubTopics))) @@ -147,16 +163,20 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode for index, node in nodes: when defined(rln_v2): - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(index.uint + 1), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $(index+1))) + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(index.uint + 1), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)), + ) else: - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(index.uint + 1), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $(index+1))) + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(index.uint + 1), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)), + ) await node.mountRlnRelay(wakuRlnConfig) # start them @@ -168,7 +188,9 @@ procSuite "WakuNode - RLN relay": var rxMessagesTopic1 = 0 var rxMessagesTopic2 = 0 - proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = info "relayHandler. The received topic:", topic if topic == pubsubTopics[0]: rxMessagesTopic1 = rxMessagesTopic1 + 1 @@ -188,22 +210,28 @@ procSuite "WakuNode - RLN relay": let epochTime = epochTime() - for i in 0..<3: - var message = WakuMessage(payload: ("Payload_" & $i).toBytes(), contentTopic: contentTopics[0]) + for i in 0 ..< 3: + var message = WakuMessage( + payload: ("Payload_" & $i).toBytes(), contentTopic: contentTopics[0] + ) nodes[0].wakuRlnRelay.unsafeAppendRLNProof(message, epochTime).isOkOr: raiseAssert $error messages1.add(message) - for i in 0..<3: - var message = WakuMessage(payload: ("Payload_" & $i).toBytes(), contentTopic: contentTopics[1]) + for i in 0 ..< 3: + var message = WakuMessage( + payload: ("Payload_" & $i).toBytes(), contentTopic: contentTopics[1] + ) nodes[1].wakuRlnRelay.unsafeAppendRLNProof(message, epochTime).isOkOr: raiseAssert $error messages2.add(message) # publish 3 messages from node[0] (last 2 are spam, window is 10 secs) # publish 3 messages from node[1] (last 2 are spam, window is 10 secs) - for msg in messages1: discard await nodes[0].publish(some(pubsubTopics[0]), msg) - for msg in messages2: discard await nodes[1].publish(some(pubsubTopics[1]), msg) + for msg in messages1: + discard await nodes[0].publish(some(pubsubTopics[0]), msg) + for msg in messages2: + discard await nodes[1].publish(some(pubsubTopics[1]), msg) # wait for gossip to propagate await sleepAsync(5000.millis) @@ -236,16 +264,20 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode when defined(rln_v2): - let wakuRlnConfig1 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_4")) + let wakuRlnConfig1 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(1.uint), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_4"), + ) else: - let wakuRlnConfig1 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_4")) + let wakuRlnConfig1 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(1.uint), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_4"), + ) await node1.mountRlnRelay(wakuRlnConfig1) await node1.start() @@ -254,16 +286,20 @@ procSuite "WakuNode - RLN relay": await node2.mountRelay(@[DefaultPubsubTopic]) # mount rlnrelay in off-chain mode when defined(rln_v2): - let wakuRlnConfig2 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(2.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_5")) + let wakuRlnConfig2 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(2.uint), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_5"), + ) else: - let wakuRlnConfig2 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(2.uint), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_5")) + let wakuRlnConfig2 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(2.uint), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_5"), + ) await node2.mountRlnRelay(wakuRlnConfig2) await node2.start() @@ -272,16 +308,20 @@ procSuite "WakuNode - RLN relay": await node3.mountRelay(@[DefaultPubsubTopic]) when defined(rln_v2): - let wakuRlnConfig3 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(3.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_6")) + let wakuRlnConfig3 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(3.uint), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_6"), + ) else: - let wakuRlnConfig3 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(3.uint), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_6")) + let wakuRlnConfig3 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(3.uint), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_6"), + ) await node3.mountRlnRelay(wakuRlnConfig3) await node3.start() @@ -291,7 +331,9 @@ procSuite "WakuNode - RLN relay": # define a custom relay handler var completionFut = newFuture[bool]() - proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = debug "The received topic:", topic if topic == DefaultPubsubTopic: completionFut.complete(true) @@ -310,23 +352,25 @@ procSuite "WakuNode - RLN relay": let contentTopicBytes = contentTopic.toBytes input = concat(payload, contentTopicBytes) - extraBytes: seq[byte] = @[byte(1),2,3] + extraBytes: seq[byte] = @[byte(1), 2, 3] when defined(rln_v2): let nonceManager = node1.wakuRlnRelay.nonceManager - let rateLimitProofRes = node1.wakuRlnRelay.groupManager.generateProof(concat(input, extraBytes), - epoch, - MessageId(0)) + let rateLimitProofRes = node1.wakuRlnRelay.groupManager.generateProof( + concat(input, extraBytes), epoch, MessageId(0) + ) else: - let rateLimitProofRes = node1.wakuRlnRelay.groupManager.generateProof(concat(input, extraBytes), # we add extra bytes to invalidate proof verification against original payload - epoch) - assert rateLimitProofRes.isOk(), $rateLimitProofRes.error # check the proof is generated correctly outside when block to avoid duplication + let rateLimitProofRes = node1.wakuRlnRelay.groupManager.generateProof( + concat(input, extraBytes), + # we add extra bytes to invalidate proof verification against original payload + epoch, + ) + assert rateLimitProofRes.isOk(), $rateLimitProofRes.error + # check the proof is generated correctly outside when block to avoid duplication let rateLimitProof = rateLimitProofRes.get().encode().buffer - let message = WakuMessage(payload: @payload, - contentTopic: contentTopic, - proof: rateLimitProof) - + let message = + WakuMessage(payload: @payload, contentTopic: contentTopic, proof: rateLimitProof) ## node1 publishes a message with an invalid rln proof, the message is then relayed to node2 which in turn ## attempts to verify the rate limit proof and fails hence does not relay the message to node3, thus the relayHandler of node3 @@ -344,7 +388,6 @@ procSuite "WakuNode - RLN relay": await node3.stop() asyncTest "testing rln-relay double-signaling detection": - let # publisher node nodeKey1 = generateSecp256k1Key() @@ -364,16 +407,20 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode when defined(rln_v2): - let wakuRlnConfig1 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_7")) + let wakuRlnConfig1 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(1.uint), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_7"), + ) else: - let wakuRlnConfig1 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_7")) + let wakuRlnConfig1 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(1.uint), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_7"), + ) await node1.mountRlnRelay(wakuRlnConfig1) await node1.start() @@ -383,16 +430,20 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode when defined(rln_v2): - let wakuRlnConfig2 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(2.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_8")) + let wakuRlnConfig2 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(2.uint), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_8"), + ) else: - let wakuRlnConfig2 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(2.uint), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_8")) + let wakuRlnConfig2 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(2.uint), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_8"), + ) await node2.mountRlnRelay(wakuRlnConfig2) await node2.start() @@ -401,16 +452,20 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode when defined(rln_v2): - let wakuRlnConfig3 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(3.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_9")) + let wakuRlnConfig3 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(3.uint), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_9"), + ) else: - let wakuRlnConfig3 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(3.uint), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_9")) + let wakuRlnConfig3 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(3.uint), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_9"), + ) await node3.mountRlnRelay(wakuRlnConfig3) await node3.start() @@ -429,12 +484,14 @@ procSuite "WakuNode - RLN relay": # wm3 points to the next epoch wm3 = WakuMessage(payload: "message 3".toBytes(), contentTopic: contentTopic) wm4 = WakuMessage(payload: "message 4".toBytes(), contentTopic: contentTopic) - + node3.wakuRlnRelay.unsafeAppendRLNProof(wm1, time).isOkOr: raiseAssert $error node3.wakuRlnRelay.unsafeAppendRLNProof(wm2, time).isOkOr: raiseAssert $error - node3.wakuRlnRelay.unsafeAppendRLNProof(wm3, time+float64(node3.wakuRlnRelay.rlnEpochSizeSec)).isOkOr: + node3.wakuRlnRelay.unsafeAppendRLNProof( + wm3, time + float64(node3.wakuRlnRelay.rlnEpochSizeSec) + ).isOkOr: raiseAssert $error # relay handler for node3 @@ -442,7 +499,9 @@ procSuite "WakuNode - RLN relay": var completionFut2 = newFuture[bool]() var completionFut3 = newFuture[bool]() var completionFut4 = newFuture[bool]() - proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = debug "The received topic:", topic if topic == DefaultPubsubTopic: if msg == wm1: @@ -454,7 +513,6 @@ procSuite "WakuNode - RLN relay": if msg == wm4: completionFut4.complete(true) - # mount the relay handler for node3 node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)) await sleepAsync(2000.millis) @@ -476,7 +534,8 @@ procSuite "WakuNode - RLN relay": res2 = await completionFut2.withTimeout(10.seconds) check: - (res1 and res2) == false # either of the wm1 and wm2 is found as spam hence not relayed + (res1 and res2) == false + # either of the wm1 and wm2 is found as spam hence not relayed (await completionFut3.withTimeout(10.seconds)) == true (await completionFut4.withTimeout(10.seconds)) == false @@ -485,7 +544,6 @@ procSuite "WakuNode - RLN relay": await node3.stop() asyncTest "clearNullifierLog: should clear epochs > MaxEpochGap": - let # publisher node nodeKey1 = generateSecp256k1Key() @@ -505,16 +563,20 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode when defined(rln_v2): - let wakuRlnConfig1 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_10")) + let wakuRlnConfig1 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(1.uint), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_10"), + ) else: - let wakuRlnConfig1 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_10")) + let wakuRlnConfig1 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(1.uint), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_10"), + ) await node1.mountRlnRelay(wakuRlnConfig1) await node1.start() @@ -524,16 +586,20 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode when defined(rln_v2): - let wakuRlnConfig2 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(2.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_11")) + let wakuRlnConfig2 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(2.uint), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_11"), + ) else: - let wakuRlnConfig2 = WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(2.uint), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_11")) + let wakuRlnConfig2 = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(2.uint), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_11"), + ) await node2.mountRlnRelay(wakuRlnConfig2) await node2.start() @@ -554,14 +620,18 @@ procSuite "WakuNode - RLN relay": raiseAssert $error node1.wakuRlnRelay.unsafeAppendRLNProof(wm2, time).isOkOr: raiseAssert $error - node1.wakuRlnRelay.unsafeAppendRLNProof(wm3, time + float64(node1.wakuRlnRelay.rlnEpochSizeSec * 2)).isOkOr: + node1.wakuRlnRelay.unsafeAppendRLNProof( + wm3, time + float64(node1.wakuRlnRelay.rlnEpochSizeSec * 2) + ).isOkOr: raiseAssert $error # relay handler for node2 var completionFut1 = newFuture[bool]() var completionFut2 = newFuture[bool]() var completionFut3 = newFuture[bool]() - proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = debug "The received topic:", topic if topic == DefaultPubsubTopic: if msg == wm1: diff --git a/tests/waku_rln_relay/utils.nim b/tests/waku_rln_relay/utils.nim index aab7579df..548414fb7 100644 --- a/tests/waku_rln_relay/utils.nim +++ b/tests/waku_rln_relay/utils.nim @@ -1,6 +1,8 @@ import web3, chronos, options, stint -proc deployContract*(web3: Web3, code: string, gasPrice = 0, contractInput = ""): Future[ReceiptObject] {.async.} = +proc deployContract*( + web3: Web3, code: string, gasPrice = 0, contractInput = "" +): Future[ReceiptObject] {.async.} = # the contract input is the encoded version of contract constructor's input # use nim-web3/encoding.nim module to find the appropriate encoding procedure for different argument types # e.g., consider the following contract constructor in solidity diff --git a/tests/waku_store/store_utils.nim b/tests/waku_store/store_utils.nim index 7ed346fe3..9f9fa7402 100644 --- a/tests/waku_store/store_utils.nim +++ b/tests/waku_store/store_utils.nim @@ -1,24 +1,14 @@ {.used.} -import - std/options, - chronos, - chronicles, - libp2p/crypto/crypto +import std/options, chronos, chronicles, libp2p/crypto/crypto import - ../../../waku/[ - node/peer_manager, - waku_core, - waku_store, - waku_store/client, - ], - ../testlib/[ - common, - wakucore - ] + ../../../waku/[node/peer_manager, waku_core, waku_store, waku_store/client], + ../testlib/[common, wakucore] -proc newTestWakuStore*(switch: Switch, handler: HistoryQueryHandler): Future[WakuStore] {.async.} = +proc newTestWakuStore*( + switch: Switch, handler: HistoryQueryHandler +): Future[WakuStore] {.async.} = let peerManager = PeerManager.new(switch) proto = WakuStore.new(peerManager, rng, handler) @@ -32,11 +22,12 @@ proc newTestWakuStoreClient*(switch: Switch): WakuStoreClient = let peerManager = PeerManager.new(switch) WakuStoreClient.new(peerManager, rng) - -proc computeHistoryCursor*(pubsubTopic: PubsubTopic, message: WakuMessage): HistoryCursor = +proc computeHistoryCursor*( + pubsubTopic: PubsubTopic, message: WakuMessage +): HistoryCursor = HistoryCursor( pubsubTopic: pubsubTopic, senderTime: message.timestamp, storeTime: message.timestamp, - digest: waku_store.computeDigest(message) + digest: waku_store.computeDigest(message), ) diff --git a/tests/waku_store/test_client.nim b/tests/waku_store/test_client.nim index b8386dfeb..9b675106b 100644 --- a/tests/waku_store/test_client.nim +++ b/tests/waku_store/test_client.nim @@ -1,27 +1,11 @@ {.used.} -import - std/options, - testutils/unittests, - chronos, - chronicles, - libp2p/crypto/crypto - +import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto import - ../../../waku/[ - node/peer_manager, - waku_core, - waku_store, - waku_store/client, - common/paging - ], - ../testlib/[ - common, - wakucore, - testasync, - futures - ], + ../../../waku/ + [node/peer_manager, waku_core, waku_store, waku_store/client, common/paging], + ../testlib/[common, wakucore, testasync, futures], ./store_utils suite "Store Client": @@ -43,26 +27,24 @@ suite "Store Client": var clientPeerInfo {.threadvar.}: RemotePeerInfo asyncSetup: - message1 = fakeWakuMessage(contentTopic=DefaultContentTopic) - message2 = fakeWakuMessage(contentTopic=DefaultContentTopic) - message3 = fakeWakuMessage(contentTopic=DefaultContentTopic) + message1 = fakeWakuMessage(contentTopic = DefaultContentTopic) + message2 = fakeWakuMessage(contentTopic = DefaultContentTopic) + message3 = fakeWakuMessage(contentTopic = DefaultContentTopic) messageSeq = @[message1, message2, message3] handlerFuture = newHistoryFuture() - handler = proc( - req: HistoryQuery - ): Future[HistoryResult] {.async, gcsafe.} = + handler = proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} = handlerFuture.complete(req) return ok(HistoryResponse(messages: messageSeq)) historyQuery = HistoryQuery( pubsubTopic: some(DefaultPubsubTopic), contentTopics: @[DefaultContentTopic], - direction: PagingDirection.FORWARD + direction: PagingDirection.FORWARD, ) serverSwitch = newTestSwitch() clientSwitch = newTestSwitch() - server = await newTestWakuStore(serverSwitch, handler=handler) + server = await newTestWakuStore(serverSwitch, handler = handler) client = newTestWakuStoreClient(clientSwitch) await allFutures(serverSwitch.start(), clientSwitch.start()) @@ -72,11 +54,11 @@ suite "Store Client": asyncTeardown: await allFutures(serverSwitch.stop(), clientSwitch.stop()) - + suite "HistoryQuery Creation and Execution": asyncTest "Valid Queries": # When a valid query is sent to the server - let queryResponse = await client.query(historyQuery, peer=serverPeerInfo) + let queryResponse = await client.query(historyQuery, peer = serverPeerInfo) # Then the query is processed successfully assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) @@ -90,42 +72,42 @@ suite "Store Client": # proper coverage we'd need an example implementation. # Given some invalid queries - let + let invalidQuery1 = HistoryQuery( pubsubTopic: some(DefaultPubsubTopic), contentTopics: @[], - direction: PagingDirection.FORWARD + direction: PagingDirection.FORWARD, ) invalidQuery2 = HistoryQuery( pubsubTopic: PubsubTopic.none(), contentTopics: @[DefaultContentTopic], - direction: PagingDirection.FORWARD + direction: PagingDirection.FORWARD, ) invalidQuery3 = HistoryQuery( pubsubTopic: some(DefaultPubsubTopic), contentTopics: @[DefaultContentTopic], - pageSize: 0 + pageSize: 0, ) invalidQuery4 = HistoryQuery( pubsubTopic: some(DefaultPubsubTopic), contentTopics: @[DefaultContentTopic], - pageSize: 0 + pageSize: 0, ) invalidQuery5 = HistoryQuery( pubsubTopic: some(DefaultPubsubTopic), contentTopics: @[DefaultContentTopic], startTime: some(0.Timestamp), - endTime: some(0.Timestamp) + endTime: some(0.Timestamp), ) invalidQuery6 = HistoryQuery( pubsubTopic: some(DefaultPubsubTopic), contentTopics: @[DefaultContentTopic], startTime: some(0.Timestamp), - endTime: some(-1.Timestamp) + endTime: some(-1.Timestamp), ) # When the query is sent to the server - let queryResponse1 = await client.query(invalidQuery1, peer=serverPeerInfo) + let queryResponse1 = await client.query(invalidQuery1, peer = serverPeerInfo) # Then the query is not processed assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) @@ -135,47 +117,47 @@ suite "Store Client": # When the query is sent to the server handlerFuture = newHistoryFuture() - let queryResponse2 = await client.query(invalidQuery2, peer=serverPeerInfo) + let queryResponse2 = await client.query(invalidQuery2, peer = serverPeerInfo) # Then the query is not processed assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) check: handlerFuture.read() == invalidQuery2 queryResponse2.get().messages == messageSeq - + # When the query is sent to the server handlerFuture = newHistoryFuture() - let queryResponse3 = await client.query(invalidQuery3, peer=serverPeerInfo) + let queryResponse3 = await client.query(invalidQuery3, peer = serverPeerInfo) # Then the query is not processed assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) check: handlerFuture.read() == invalidQuery3 queryResponse3.get().messages == messageSeq - + # When the query is sent to the server handlerFuture = newHistoryFuture() - let queryResponse4 = await client.query(invalidQuery4, peer=serverPeerInfo) + let queryResponse4 = await client.query(invalidQuery4, peer = serverPeerInfo) # Then the query is not processed assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) check: handlerFuture.read() == invalidQuery4 queryResponse4.get().messages == messageSeq - + # When the query is sent to the server handlerFuture = newHistoryFuture() - let queryResponse5 = await client.query(invalidQuery5, peer=serverPeerInfo) + let queryResponse5 = await client.query(invalidQuery5, peer = serverPeerInfo) # Then the query is not processed assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) check: handlerFuture.read() == invalidQuery5 queryResponse5.get().messages == messageSeq - + # When the query is sent to the server handlerFuture = newHistoryFuture() - let queryResponse6 = await client.query(invalidQuery6, peer=serverPeerInfo) + let queryResponse6 = await client.query(invalidQuery6, peer = serverPeerInfo) # Then the query is not processed assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) @@ -186,7 +168,7 @@ suite "Store Client": suite "Verification of HistoryResponse Payload": asyncTest "Positive Responses": # When a valid query is sent to the server - let queryResponse = await client.query(historyQuery, peer=serverPeerInfo) + let queryResponse = await client.query(historyQuery, peer = serverPeerInfo) # Then the query is processed successfully, and is of the expected type check: @@ -198,9 +180,9 @@ suite "Store Client": let otherServerSwitch = newTestSwitch() otherServerPeerInfo = otherServerSwitch.peerInfo.toRemotePeerInfo() - + # When a query is sent to the stopped peer - let queryResponse = await client.query(historyQuery, peer=otherServerPeerInfo) + let queryResponse = await client.query(historyQuery, peer = otherServerPeerInfo) # Then the query is not processed check: diff --git a/tests/waku_store/test_resume.nim b/tests/waku_store/test_resume.nim index 9ec58eb14..c15aa175b 100644 --- a/tests/waku_store/test_resume.nim +++ b/tests/waku_store/test_resume.nim @@ -19,46 +19,99 @@ import ./testlib/common, ./testlib/switch - procSuite "Waku Store - resume store": ## Fixtures let storeA = block: - let store = newTestMessageStore() - let msgList = @[ - fakeWakuMessage(payload= @[byte 0], contentTopic=ContentTopic("2"), ts=ts(0)), - fakeWakuMessage(payload= @[byte 1], contentTopic=ContentTopic("1"), ts=ts(1)), - fakeWakuMessage(payload= @[byte 2], contentTopic=ContentTopic("2"), ts=ts(2)), - fakeWakuMessage(payload= @[byte 3], contentTopic=ContentTopic("1"), ts=ts(3)), - fakeWakuMessage(payload= @[byte 4], contentTopic=ContentTopic("2"), ts=ts(4)), - fakeWakuMessage(payload= @[byte 5], contentTopic=ContentTopic("1"), ts=ts(5)), - fakeWakuMessage(payload= @[byte 6], contentTopic=ContentTopic("2"), ts=ts(6)), - fakeWakuMessage(payload= @[byte 7], contentTopic=ContentTopic("1"), ts=ts(7)), - fakeWakuMessage(payload= @[byte 8], contentTopic=ContentTopic("2"), ts=ts(8)), - fakeWakuMessage(payload= @[byte 9], contentTopic=ContentTopic("1"), ts=ts(9)) + let store = newTestMessageStore() + let msgList = + @[ + fakeWakuMessage( + payload = @[byte 0], contentTopic = ContentTopic("2"), ts = ts(0) + ), + fakeWakuMessage( + payload = @[byte 1], contentTopic = ContentTopic("1"), ts = ts(1) + ), + fakeWakuMessage( + payload = @[byte 2], contentTopic = ContentTopic("2"), ts = ts(2) + ), + fakeWakuMessage( + payload = @[byte 3], contentTopic = ContentTopic("1"), ts = ts(3) + ), + fakeWakuMessage( + payload = @[byte 4], contentTopic = ContentTopic("2"), ts = ts(4) + ), + fakeWakuMessage( + payload = @[byte 5], contentTopic = ContentTopic("1"), ts = ts(5) + ), + fakeWakuMessage( + payload = @[byte 6], contentTopic = ContentTopic("2"), ts = ts(6) + ), + fakeWakuMessage( + payload = @[byte 7], contentTopic = ContentTopic("1"), ts = ts(7) + ), + fakeWakuMessage( + payload = @[byte 8], contentTopic = ContentTopic("2"), ts = ts(8) + ), + fakeWakuMessage( + payload = @[byte 9], contentTopic = ContentTopic("1"), ts = ts(9) + ), ] - for msg in msgList: - require store.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp).isOk() + for msg in msgList: + require store + .put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + .isOk() - store + store let storeB = block: - let store = newTestMessageStore() - let msgList2 = @[ - fakeWakuMessage(payload= @[byte 0], contentTopic=ContentTopic("2"), ts=ts(0)), - fakeWakuMessage(payload= @[byte 11], contentTopic=ContentTopic("1"), ts=ts(1)), - fakeWakuMessage(payload= @[byte 12], contentTopic=ContentTopic("2"), ts=ts(2)), - fakeWakuMessage(payload= @[byte 3], contentTopic=ContentTopic("1"), ts=ts(3)), - fakeWakuMessage(payload= @[byte 4], contentTopic=ContentTopic("2"), ts=ts(4)), - fakeWakuMessage(payload= @[byte 5], contentTopic=ContentTopic("1"), ts=ts(5)), - fakeWakuMessage(payload= @[byte 13], contentTopic=ContentTopic("2"), ts=ts(6)), - fakeWakuMessage(payload= @[byte 14], contentTopic=ContentTopic("1"), ts=ts(7)) + let store = newTestMessageStore() + let msgList2 = + @[ + fakeWakuMessage( + payload = @[byte 0], contentTopic = ContentTopic("2"), ts = ts(0) + ), + fakeWakuMessage( + payload = @[byte 11], contentTopic = ContentTopic("1"), ts = ts(1) + ), + fakeWakuMessage( + payload = @[byte 12], contentTopic = ContentTopic("2"), ts = ts(2) + ), + fakeWakuMessage( + payload = @[byte 3], contentTopic = ContentTopic("1"), ts = ts(3) + ), + fakeWakuMessage( + payload = @[byte 4], contentTopic = ContentTopic("2"), ts = ts(4) + ), + fakeWakuMessage( + payload = @[byte 5], contentTopic = ContentTopic("1"), ts = ts(5) + ), + fakeWakuMessage( + payload = @[byte 13], contentTopic = ContentTopic("2"), ts = ts(6) + ), + fakeWakuMessage( + payload = @[byte 14], contentTopic = ContentTopic("1"), ts = ts(7) + ), ] - for msg in msgList2: - require store.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp).isOk() + for msg in msgList2: + require store + .put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + .isOk() - store + store asyncTest "multiple query to multiple peers with pagination": ## Setup @@ -70,15 +123,16 @@ procSuite "Waku Store - resume store": await allFutures(serverSwitchA.start(), serverSwitchB.start(), clientSwitch.start()) let - serverA = await newTestWakuStoreNode(serverSwitchA, store=testStore) - serverB = await newTestWakuStoreNode(serverSwitchB, store=testStore) + serverA = await newTestWakuStoreNode(serverSwitchA, store = testStore) + serverB = await newTestWakuStoreNode(serverSwitchB, store = testStore) client = newTestWakuStoreClient(clientSwitch) ## Given - let peers = @[ - serverSwitchA.peerInfo.toRemotePeerInfo(), - serverSwitchB.peerInfo.toRemotePeerInfo() - ] + let peers = + @[ + serverSwitchA.peerInfo.toRemotePeerInfo(), + serverSwitchB.peerInfo.toRemotePeerInfo(), + ] let req = HistoryQuery(contentTopics: @[DefaultContentTopic], pageSize: 5) ## When @@ -104,7 +158,7 @@ procSuite "Waku Store - resume store": await allFutures(serverSwitch.start(), clientSwitch.start()) let - server = await newTestWakuStore(serverSwitch, store=storeA) + server = await newTestWakuStore(serverSwitch, store = storeA) client = await newTestWakuStore(clientSwitch) client.setPeer(serverSwitch.peerInfo.toRemotePeerInfo()) @@ -157,16 +211,17 @@ procSuite "Waku Store - resume store": await allFutures(serverASwitch.start(), serverBSwitch.start(), clientSwitch.start()) let - serverA = await newTestWakuStore(serverASwitch, store=storeA) - serverB = await newTestWakuStore(serverBSwitch, store=storeB) + serverA = await newTestWakuStore(serverASwitch, store = storeA) + serverB = await newTestWakuStore(serverBSwitch, store = storeB) client = await newTestWakuStore(clientSwitch) ## Given - let peers = @[ - offlineSwitch.peerInfo.toRemotePeerInfo(), - serverASwitch.peerInfo.toRemotePeerInfo(), - serverBSwitch.peerInfo.toRemotePeerInfo() - ] + let peers = + @[ + offlineSwitch.peerInfo.toRemotePeerInfo(), + serverASwitch.peerInfo.toRemotePeerInfo(), + serverBSwitch.peerInfo.toRemotePeerInfo(), + ] ## When let res = await client.resume(some(peers)) @@ -187,8 +242,6 @@ procSuite "Waku Store - resume store": ## Cleanup await allFutures(serverASwitch.stop(), serverBSwitch.stop(), clientSwitch.stop()) - - suite "WakuNode - waku store": asyncTest "Resume proc fetches the history": ## Setup @@ -205,8 +258,8 @@ suite "WakuNode - waku store": await server.mountStore() let clientStore = StoreQueueRef.new() - await client.mountStore(store=clientStore) - client.mountStoreClient(store=clientStore) + await client.mountStore(store = clientStore) + client.mountStoreClient(store = clientStore) ## Given let message = fakeWakuMessage() @@ -233,18 +286,24 @@ suite "WakuNode - waku store": client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) await allFutures(server.start(), client.start()) - await server.mountStore(store=StoreQueueRef.new()) + await server.mountStore(store = StoreQueueRef.new()) let clientStore = StoreQueueRef.new() - await client.mountStore(store=clientStore) - client.mountStoreClient(store=clientStore) + await client.mountStore(store = clientStore) + client.mountStoreClient(store = clientStore) ## Given let timeOrigin = now() let - msg1 = fakeWakuMessage(payload="hello world1", ts=(timeOrigin + getNanoSecondTime(1))) - msg2 = fakeWakuMessage(payload="hello world2", ts=(timeOrigin + getNanoSecondTime(2))) - msg3 = fakeWakuMessage(payload="hello world3", ts=(timeOrigin + getNanoSecondTime(3))) + msg1 = fakeWakuMessage( + payload = "hello world1", ts = (timeOrigin + getNanoSecondTime(1)) + ) + msg2 = fakeWakuMessage( + payload = "hello world2", ts = (timeOrigin + getNanoSecondTime(2)) + ) + msg3 = fakeWakuMessage( + payload = "hello world3", ts = (timeOrigin + getNanoSecondTime(3)) + ) require server.wakuStore.store.put(DefaultPubsubTopic, msg1).isOk() require server.wakuStore.store.put(DefaultPubsubTopic, msg2).isOk() @@ -253,8 +312,12 @@ suite "WakuNode - waku store": let receivedTime3 = now() + getNanosecondTime(10) digest3 = computeDigest(msg3) - require server.wakuStore.store.put(DefaultPubsubTopic, msg3, digest3, receivedTime3).isOk() - require client.wakuStore.store.put(DefaultPubsubTopic, msg3, digest3, receivedTime3).isOk() + require server.wakuStore.store + .put(DefaultPubsubTopic, msg3, digest3, receivedTime3) + .isOk() + require client.wakuStore.store + .put(DefaultPubsubTopic, msg3, digest3, receivedTime3) + .isOk() let serverPeer = server.peerInfo.toRemotePeerInfo() diff --git a/tests/waku_store/test_rpc_codec.nim b/tests/waku_store/test_rpc_codec.nim index c06354297..d8b3e28a6 100644 --- a/tests/waku_store/test_rpc_codec.nim +++ b/tests/waku_store/test_rpc_codec.nim @@ -1,9 +1,6 @@ {.used.} -import - std/options, - testutils/unittests, - chronos +import std/options, testutils/unittests, chronos import ../../../waku/common/protobuf, ../../../waku/common/paging, @@ -13,13 +10,12 @@ import ../testlib/common, ../testlib/wakucore - - procSuite "Waku Store - RPC codec": - test "PagingIndexRPC protobuf codec": ## Given - let index = PagingIndexRPC.compute(fakeWakuMessage(), receivedTime=ts(), pubsubTopic=DefaultPubsubTopic) + let index = PagingIndexRPC.compute( + fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic + ) ## When let encodedIndex = index.encode() @@ -53,8 +49,14 @@ procSuite "Waku Store - RPC codec": test "PagingInfoRPC protobuf codec": ## Given let - index = PagingIndexRPC.compute(fakeWakuMessage(), receivedTime=ts(), pubsubTopic=DefaultPubsubTopic) - pagingInfo = PagingInfoRPC(pageSize: some(1'u64), cursor: some(index), direction: some(PagingDirection.FORWARD)) + index = PagingIndexRPC.compute( + fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic + ) + pagingInfo = PagingInfoRPC( + pageSize: some(1'u64), + cursor: some(index), + direction: some(PagingDirection.FORWARD), + ) ## When let pb = pagingInfo.encode() @@ -88,13 +90,23 @@ procSuite "Waku Store - RPC codec": test "HistoryQueryRPC protobuf codec": ## Given let - index = PagingIndexRPC.compute(fakeWakuMessage(), receivedTime=ts(), pubsubTopic=DefaultPubsubTopic) - pagingInfo = PagingInfoRPC(pageSize: some(1'u64), cursor: some(index), direction: some(PagingDirection.BACKWARD)) + index = PagingIndexRPC.compute( + fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic + ) + pagingInfo = PagingInfoRPC( + pageSize: some(1'u64), + cursor: some(index), + direction: some(PagingDirection.BACKWARD), + ) query = HistoryQueryRPC( - contentFilters: @[HistoryContentFilterRPC(contentTopic: DefaultContentTopic), HistoryContentFilterRPC(contentTopic: DefaultContentTopic)], + contentFilters: + @[ + HistoryContentFilterRPC(contentTopic: DefaultContentTopic), + HistoryContentFilterRPC(contentTopic: DefaultContentTopic), + ], pagingInfo: some(pagingInfo), startTime: some(Timestamp(10)), - endTime: some(Timestamp(11)) + endTime: some(Timestamp(11)), ) ## When @@ -129,9 +141,19 @@ procSuite "Waku Store - RPC codec": ## Given let message = fakeWakuMessage() - index = PagingIndexRPC.compute(message, receivedTime=ts(), pubsubTopic=DefaultPubsubTopic) - pagingInfo = PagingInfoRPC(pageSize: some(1'u64), cursor: some(index), direction: some(PagingDirection.BACKWARD)) - res = HistoryResponseRPC(messages: @[message], pagingInfo: some(pagingInfo), error: HistoryResponseErrorRPC.INVALID_CURSOR) + index = PagingIndexRPC.compute( + message, receivedTime = ts(), pubsubTopic = DefaultPubsubTopic + ) + pagingInfo = PagingInfoRPC( + pageSize: some(1'u64), + cursor: some(index), + direction: some(PagingDirection.BACKWARD), + ) + res = HistoryResponseRPC( + messages: @[message], + pagingInfo: some(pagingInfo), + error: HistoryResponseErrorRPC.INVALID_CURSOR, + ) ## When let pb = res.encode() diff --git a/tests/waku_store/test_waku_store.nim b/tests/waku_store/test_waku_store.nim index 0436ba6a4..da70fa1c3 100644 --- a/tests/waku_store/test_waku_store.nim +++ b/tests/waku_store/test_waku_store.nim @@ -1,29 +1,14 @@ {.used.} -import - std/options, - testutils/unittests, - chronos, - chronicles, - libp2p/crypto/crypto +import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto import - ../../../waku/[ - common/paging, - node/peer_manager, - waku_core, - waku_store, - waku_store/client, - ], - ../testlib/[ - common, - wakucore - ], + ../../../waku/ + [common/paging, node/peer_manager, waku_core, waku_store, waku_store/client], + ../testlib/[common, wakucore], ./store_utils - suite "Waku Store - query handler": - asyncTest "history query handler should be called": ## Setup let @@ -35,22 +20,26 @@ suite "Waku Store - query handler": ## Given let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() - let msg = fakeWakuMessage(contentTopic=DefaultContentTopic) + let msg = fakeWakuMessage(contentTopic = DefaultContentTopic) var queryHandlerFut = newFuture[(HistoryQuery)]() - let queryHandler = proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} = - queryHandlerFut.complete(req) - return ok(HistoryResponse(messages: @[msg])) + let queryHandler = proc( + req: HistoryQuery + ): Future[HistoryResult] {.async, gcsafe.} = + queryHandlerFut.complete(req) + return ok(HistoryResponse(messages: @[msg])) let - server = await newTestWakuStore(serverSwitch, handler=queryhandler) + server = await newTestWakuStore(serverSwitch, handler = queryhandler) client = newTestWakuStoreClient(clientSwitch) - let req = HistoryQuery(contentTopics: @[DefaultContentTopic], direction: PagingDirection.FORWARD) + let req = HistoryQuery( + contentTopics: @[DefaultContentTopic], direction: PagingDirection.FORWARD + ) ## When - let queryRes = await client.query(req, peer=serverPeerInfo) + let queryRes = await client.query(req, peer = serverPeerInfo) ## Then check: @@ -81,18 +70,22 @@ suite "Waku Store - query handler": let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() var queryHandlerFut = newFuture[(HistoryQuery)]() - let queryHandler = proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} = - queryHandlerFut.complete(req) - return err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST)) + let queryHandler = proc( + req: HistoryQuery + ): Future[HistoryResult] {.async, gcsafe.} = + queryHandlerFut.complete(req) + return err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST)) let - server = await newTestWakuStore(serverSwitch, handler=queryhandler) + server = await newTestWakuStore(serverSwitch, handler = queryhandler) client = newTestWakuStoreClient(clientSwitch) - let req = HistoryQuery(contentTopics: @[DefaultContentTopic], direction: PagingDirection.FORWARD) + let req = HistoryQuery( + contentTopics: @[DefaultContentTopic], direction: PagingDirection.FORWARD + ) ## When - let queryRes = await client.query(req, peer=serverPeerInfo) + let queryRes = await client.query(req, peer = serverPeerInfo) ## Then check: @@ -108,4 +101,4 @@ suite "Waku Store - query handler": error.kind == HistoryErrorKind.BAD_REQUEST ## Cleanup - await allFutures(serverSwitch.stop(), clientSwitch.stop()) \ No newline at end of file + await allFutures(serverSwitch.stop(), clientSwitch.stop()) diff --git a/tests/waku_store/test_wakunode_store.nim b/tests/waku_store/test_wakunode_store.nim index 9ef9aadab..e404d9165 100644 --- a/tests/waku_store/test_wakunode_store.nim +++ b/tests/waku_store/test_wakunode_store.nim @@ -29,22 +29,22 @@ import ../testlib/wakucore, ../testlib/wakunode - procSuite "WakuNode - Store": ## Fixtures let timeOrigin = now() - let msgListA = @[ - fakeWakuMessage(@[byte 00], ts=ts(00, timeOrigin)), - fakeWakuMessage(@[byte 01], ts=ts(10, timeOrigin)), - fakeWakuMessage(@[byte 02], ts=ts(20, timeOrigin)), - fakeWakuMessage(@[byte 03], ts=ts(30, timeOrigin)), - fakeWakuMessage(@[byte 04], ts=ts(40, timeOrigin)), - fakeWakuMessage(@[byte 05], ts=ts(50, timeOrigin)), - fakeWakuMessage(@[byte 06], ts=ts(60, timeOrigin)), - fakeWakuMessage(@[byte 07], ts=ts(70, timeOrigin)), - fakeWakuMessage(@[byte 08], ts=ts(80, timeOrigin)), - fakeWakuMessage(@[byte 09], ts=ts(90, timeOrigin)) - ] + let msgListA = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), + ] let archiveA = block: let driver = newSqliteArchiveDriver() @@ -52,7 +52,9 @@ procSuite "WakuNode - Store": for msg in msgListA: let msg_digest = waku_archive.computeDigest(msg) let msg_hash = computeMessageHash(DefaultPubsubTopic, msg) - require (waitFor driver.put(DefaultPubsubTopic, msg, msg_digest, msg_hash, msg.timestamp)).isOk() + require ( + waitFor driver.put(DefaultPubsubTopic, msg, msg_digest, msg_hash, msg.timestamp) + ).isOk() driver @@ -78,7 +80,7 @@ procSuite "WakuNode - Store": let serverPeer = server.peerInfo.toRemotePeerInfo() ## When - let queryRes = waitFor client.query(req, peer=serverPeer) + let queryRes = waitFor client.query(req, peer = serverPeer) ## Then check queryRes.isOk() @@ -108,7 +110,11 @@ procSuite "WakuNode - Store": client.mountStoreClient() ## Given - let req = HistoryQuery(contentTopics: @[DefaultContentTopic], pageSize: 7, direction: PagingDirection.FORWARD) + let req = HistoryQuery( + contentTopics: @[DefaultContentTopic], + pageSize: 7, + direction: PagingDirection.FORWARD, + ) let serverPeer = server.peerInfo.toRemotePeerInfo() ## When @@ -117,8 +123,8 @@ procSuite "WakuNode - Store": var pages = newSeq[seq[WakuMessage]](2) var cursors = newSeq[Option[HistoryCursor]](2) - for i in 0..<2: - let res = waitFor client.query(nextReq, peer=serverPeer) + for i in 0 ..< 2: + let res = waitFor client.query(nextReq, peer = serverPeer) require res.isOk() # Keep query response content @@ -135,8 +141,8 @@ procSuite "WakuNode - Store": cursors[1] == none(HistoryCursor) check: - pages[0] == msgListA[0..6] - pages[1] == msgListA[7..9] + pages[0] == msgListA[0 .. 6] + pages[1] == msgListA[7 .. 9] # Cleanup waitFor allFutures(client.stop(), server.stop()) @@ -159,7 +165,11 @@ procSuite "WakuNode - Store": client.mountStoreClient() ## Given - let req = HistoryQuery(contentTopics: @[DefaultContentTopic], pageSize: 7, direction: PagingDirection.BACKWARD) + let req = HistoryQuery( + contentTopics: @[DefaultContentTopic], + pageSize: 7, + direction: PagingDirection.BACKWARD, + ) let serverPeer = server.peerInfo.toRemotePeerInfo() ## When @@ -168,8 +178,8 @@ procSuite "WakuNode - Store": var pages = newSeq[seq[WakuMessage]](2) var cursors = newSeq[Option[HistoryCursor]](2) - for i in 0..<2: - let res = waitFor client.query(nextReq, peer=serverPeer) + for i in 0 ..< 2: + let res = waitFor client.query(nextReq, peer = serverPeer) require res.isOk() # Keep query response content @@ -186,8 +196,8 @@ procSuite "WakuNode - Store": cursors[1] == none(HistoryCursor) check: - pages[0] == msgListA[3..9] - pages[1] == msgListA[0..2] + pages[0] == msgListA[3 .. 9] + pages[1] == msgListA[0 .. 2] # Cleanup waitFor allFutures(client.stop(), server.stop()) @@ -197,7 +207,8 @@ procSuite "WakuNode - Store": ## Setup let filterSourceKey = generateSecp256k1Key() - filterSource = newTestWakuNode(filterSourceKey, parseIpAddress("0.0.0.0"), Port(0)) + filterSource = + newTestWakuNode(filterSourceKey, parseIpAddress("0.0.0.0"), Port(0)) serverKey = generateSecp256k1Key() server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) clientKey = generateSecp256k1Key() @@ -224,10 +235,17 @@ procSuite "WakuNode - Store": ## Then let filterFut = newFuture[(PubsubTopic, WakuMessage)]() - proc filterHandler(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async, gcsafe, closure.} = + proc filterHandler( + pubsubTopic: PubsubTopic, msg: WakuMessage + ) {.async, gcsafe, closure.} = filterFut.complete((pubsubTopic, msg)) - waitFor server.legacyFilterSubscribe(some(DefaultPubsubTopic), DefaultContentTopic, filterHandler, peer=filterSourcePeer) + waitFor server.legacyFilterSubscribe( + some(DefaultPubsubTopic), + DefaultContentTopic, + filterHandler, + peer = filterSourcePeer, + ) waitFor sleepAsync(100.millis) @@ -237,7 +255,9 @@ procSuite "WakuNode - Store": # Wait for the server filter to receive the push message require waitFor filterFut.withTimeout(5.seconds) - let res = waitFor client.query(HistoryQuery(contentTopics: @[DefaultContentTopic]), peer=serverPeer) + let res = waitFor client.query( + HistoryQuery(contentTopics: @[DefaultContentTopic]), peer = serverPeer + ) ## Then check res.isOk() @@ -273,20 +293,23 @@ procSuite "WakuNode - Store": client.mountStoreClient() ## Forcing a bad cursor with empty digest data - var data: array[32, byte] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + var data: array[32, byte] = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + ] let cursor = HistoryCursor( - pubsubTopic: "pubsubTopic", - senderTime: now(), - storeTime: now(), - digest: waku_archive.MessageDigest(data: data) - ) + pubsubTopic: "pubsubTopic", + senderTime: now(), + storeTime: now(), + digest: waku_archive.MessageDigest(data: data), + ) ## Given let req = HistoryQuery(contentTopics: @[DefaultContentTopic], cursor: some(cursor)) let serverPeer = server.peerInfo.toRemotePeerInfo() ## When - let queryRes = waitFor client.query(req, peer=serverPeer) + let queryRes = waitFor client.query(req, peer = serverPeer) ## Then check not queryRes.isOk() diff --git a/tests/wakunode2/test_all.nim b/tests/wakunode2/test_all.nim index 911c6a81d..76bc9aaad 100644 --- a/tests/wakunode2/test_all.nim +++ b/tests/wakunode2/test_all.nim @@ -1,5 +1,3 @@ {.used.} -import - ./test_app, - ./test_validators +import ./test_app, ./test_validators diff --git a/tests/wakunode2/test_app.nim b/tests/wakunode2/test_app.nim index 4e6283598..c61cc1b3c 100644 --- a/tests/wakunode2/test_app.nim +++ b/tests/wakunode2/test_app.nim @@ -9,13 +9,9 @@ import libp2p/crypto/secp, libp2p/multiaddress, libp2p/switch -import - ../testlib/common, - ../testlib/wakucore, - ../testlib/wakunode +import ../testlib/common, ../testlib/wakucore, ../testlib/wakunode -include - ../../apps/wakunode2/app +include ../../apps/wakunode2/app suite "Wakunode2 - App": test "compilation version should be reported": @@ -23,7 +19,7 @@ suite "Wakunode2 - App": let conf = defaultTestWakuNodeConf() let wakunode2 = App.init(conf).valueOr: - raiseAssert error + raiseAssert error ## When let version = wakunode2.version @@ -39,7 +35,7 @@ suite "Wakunode2 - App initialization": conf.peerPersistence = true let wakunode2 = App.init(conf).valueOr: - raiseAssert error + raiseAssert error check: not wakunode2.node.peerManager.storage.isNil() @@ -83,7 +79,7 @@ suite "Wakunode2 - App initialization": raiseAssert error ## Then - let + let node = wakunode2.node typedNodeEnr = node.enr.toTypedRecord() diff --git a/tests/wakunode2/test_validators.nim b/tests/wakunode2/test_validators.nim index 2d6c826d6..233103675 100644 --- a/tests/wakunode2/test_validators.nim +++ b/tests/wakunode2/test_validators.nim @@ -23,14 +23,17 @@ import ../testlib/wakunode suite "WakuNode2 - Validators": - asyncTest "Spam protected topic accepts signed messages": # Create 5 nodes - let nodes = toSeq(0..<5).mapIt(newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 5).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) # Protected topic and key to sign let spamProtectedTopic = PubSubTopic("some-spam-protected-topic") - let secretKey = SkSecretKey.fromHex("5526a8990317c9b7b58d07843d270f9cd1d9aaee129294c1c478abf7261dd9e6").expect("valid key") + let secretKey = SkSecretKey + .fromHex("5526a8990317c9b7b58d07843d270f9cd1d9aaee129294c1c478abf7261dd9e6") + .expect("valid key") let publicKey = secretKey.toPublicKey() let topicsPrivateKeys = {spamProtectedTopic: secretKey}.toTable let topicsPublicKeys = {spamProtectedTopic: publicKey}.toTable @@ -43,17 +46,19 @@ suite "WakuNode2 - Validators": # Add signed message validator to all nodes. They will only route signed messages for node in nodes: - var signedTopics : seq[ProtectedTopic] + var signedTopics: seq[ProtectedTopic] for topic, publicKey in topicsPublicKeys: signedTopics.add(ProtectedTopic(topic: topic, key: publicKey)) node.wakuRelay.addSignedTopicsValidator(signedTopics) # Connect the nodes in a full mesh - for i in 0..<5: - for j in 0..<5: + for i in 0 ..< 5: + for j in 0 ..< 5: if i == j: continue - let connOk = await nodes[i].peerManager.connectRelay(nodes[j].switch.peerInfo.toRemotePeerInfo()) + let connOk = await nodes[i].peerManager.connectRelay( + nodes[j].switch.peerInfo.toRemotePeerInfo() + ) require connOk # Connection triggers different actions, wait for them @@ -64,18 +69,24 @@ suite "WakuNode2 - Validators": msgReceived += 1 # Subscribe all nodes to the same topic/handler - for node in nodes: discard node.wakuRelay.subscribe(spamProtectedTopic, handler) + for node in nodes: + discard node.wakuRelay.subscribe(spamProtectedTopic, handler) await sleepAsync(500.millis) # Each node publishes 10 signed messages - for i in 0..<5: - for j in 0..<10: + for i in 0 ..< 5: + for j in 0 ..< 10: var msg = WakuMessage( - payload: urandom(1*(10^3)), contentTopic: spamProtectedTopic, - version: 2, timestamp: now(), ephemeral: true) + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedTopic, + version: 2, + timestamp: now(), + ephemeral: true, + ) # Include signature - msg.meta = secretKey.sign(SkMessage(spamProtectedTopic.msgHash(msg))).toRaw()[0..63] + msg.meta = + secretKey.sign(SkMessage(spamProtectedTopic.msgHash(msg))).toRaw()[0 .. 63] discard await nodes[i].publish(some(spamProtectedTopic), msg) @@ -87,7 +98,7 @@ suite "WakuNode2 - Validators": msgReceived == 250 # No invalid messages were received by any peer - for i in 0..<5: + for i in 0 ..< 5: for k, v in nodes[i].wakuRelay.peerStats.mpairs: check: v.topicInfos[spamProtectedTopic].invalidMessageDeliveries == 0.0 @@ -97,17 +108,23 @@ suite "WakuNode2 - Validators": asyncTest "Spam protected topic rejects non-signed/wrongly-signed/no-timestamp messages": # Create 5 nodes - let nodes = toSeq(0..<5).mapIt(newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 5).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) # Protected topic and key to sign let spamProtectedTopic = PubSubTopic("some-spam-protected-topic") - let secretKey = SkSecretKey.fromHex("5526a8990317c9b7b58d07843d270f9cd1d9aaee129294c1c478abf7261dd9e6").expect("valid key") + let secretKey = SkSecretKey + .fromHex("5526a8990317c9b7b58d07843d270f9cd1d9aaee129294c1c478abf7261dd9e6") + .expect("valid key") let publicKey = secretKey.toPublicKey() let topicsPrivateKeys = {spamProtectedTopic: secretKey}.toTable let topicsPublicKeys = {spamProtectedTopic: publicKey}.toTable # Non whitelisted secret key - let wrongSecretKey = SkSecretKey.fromHex("32ad0cc8edeb9f8a3e8635c5fe5bd200b9247a33da5e7171bd012691805151f3").expect("valid key") + let wrongSecretKey = SkSecretKey + .fromHex("32ad0cc8edeb9f8a3e8635c5fe5bd200b9247a33da5e7171bd012691805151f3") + .expect("valid key") # Start all the nodes and mount relay with protected topic await allFutures(nodes.mapIt(it.start())) @@ -117,17 +134,19 @@ suite "WakuNode2 - Validators": # Add signed message validator to all nodes. They will only route signed messages for node in nodes: - var signedTopics : seq[ProtectedTopic] + var signedTopics: seq[ProtectedTopic] for topic, publicKey in topicsPublicKeys: signedTopics.add(ProtectedTopic(topic: topic, key: publicKey)) node.wakuRelay.addSignedTopicsValidator(signedTopics) # Connect the nodes in a full mesh - for i in 0..<5: - for j in 0..<5: + for i in 0 ..< 5: + for j in 0 ..< 5: if i == j: continue - let connOk = await nodes[i].peerManager.connectRelay(nodes[j].switch.peerInfo.toRemotePeerInfo()) + let connOk = await nodes[i].peerManager.connectRelay( + nodes[j].switch.peerInfo.toRemotePeerInfo() + ) require connOk var msgReceived = 0 @@ -138,69 +157,92 @@ suite "WakuNode2 - Validators": await sleepAsync(500.millis) # Subscribe all nodes to the same topic/handler - for node in nodes: discard node.wakuRelay.subscribe(spamProtectedTopic, handler) + for node in nodes: + discard node.wakuRelay.subscribe(spamProtectedTopic, handler) await sleepAsync(500.millis) # Each node sends 5 messages, signed but with a non-whitelisted key (total = 25) - for i in 0..<5: - for j in 0..<5: + for i in 0 ..< 5: + for j in 0 ..< 5: var msg = WakuMessage( - payload: urandom(1*(10^3)), contentTopic: spamProtectedTopic, - version: 2, timestamp: now(), ephemeral: true) + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedTopic, + version: 2, + timestamp: now(), + ephemeral: true, + ) # Sign the message with a wrong key - msg.meta = wrongSecretKey.sign(SkMessage(spamProtectedTopic.msgHash(msg))).toRaw()[0..63] + msg.meta = wrongSecretKey.sign(SkMessage(spamProtectedTopic.msgHash(msg))).toRaw()[ + 0 .. 63 + ] discard await nodes[i].publish(some(spamProtectedTopic), msg) # Each node sends 5 messages that are not signed (total = 25) - for i in 0..<5: - for j in 0..<5: + for i in 0 ..< 5: + for j in 0 ..< 5: let unsignedMessage = WakuMessage( - payload: urandom(1*(10^3)), contentTopic: spamProtectedTopic, - version: 2, timestamp: now(), ephemeral: true) + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedTopic, + version: 2, + timestamp: now(), + ephemeral: true, + ) discard await nodes[i].publish(some(spamProtectedTopic), unsignedMessage) # Each node sends 5 messages that dont contain timestamp (total = 25) - for i in 0..<5: - for j in 0..<5: + for i in 0 ..< 5: + for j in 0 ..< 5: let unsignedMessage = WakuMessage( - payload: urandom(1*(10^3)), contentTopic: spamProtectedTopic, - version: 2, timestamp: 0, ephemeral: true) + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedTopic, + version: 2, + timestamp: 0, + ephemeral: true, + ) discard await nodes[i].publish(some(spamProtectedTopic), unsignedMessage) # Each node sends 5 messages way BEFORE than the current timestmap (total = 25) - for i in 0..<5: - for j in 0..<5: - let beforeTimestamp = now() - getNanosecondTime(6*60) + for i in 0 ..< 5: + for j in 0 ..< 5: + let beforeTimestamp = now() - getNanosecondTime(6 * 60) let unsignedMessage = WakuMessage( - payload: urandom(1*(10^3)), contentTopic: spamProtectedTopic, - version: 2, timestamp: beforeTimestamp, ephemeral: true) + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedTopic, + version: 2, + timestamp: beforeTimestamp, + ephemeral: true, + ) discard await nodes[i].publish(some(spamProtectedTopic), unsignedMessage) # Each node sends 5 messages way LATER than the current timestmap (total = 25) - for i in 0..<5: - for j in 0..<5: - let afterTimestamp = now() - getNanosecondTime(6*60) + for i in 0 ..< 5: + for j in 0 ..< 5: + let afterTimestamp = now() - getNanosecondTime(6 * 60) let unsignedMessage = WakuMessage( - payload: urandom(1*(10^3)), contentTopic: spamProtectedTopic, - version: 2, timestamp: afterTimestamp, ephemeral: true) + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedTopic, + version: 2, + timestamp: afterTimestamp, + ephemeral: true, + ) discard await nodes[i].publish(some(spamProtectedTopic), unsignedMessage) # Since we have a full mesh with 5 nodes and each one publishes 25+25+25+25+25 msgs # there are 625 messages being sent. # 125 are received ok in the handler (first hop) # 500 are are wrong so rejected (rejected not relayed) - + var msgRejected = 0 - + # Active wait for the messages to be delivered across the mesh - for i in 0..<100: + for i in 0 ..< 100: msgRejected = 0 - for i in 0..<5: + for i in 0 ..< 5: for k, v in nodes[i].wakuRelay.peerStats.mpairs: msgRejected += v.topicInfos[spamProtectedTopic].invalidMessageDeliveries.int - + if msgReceived == 125 and msgRejected == 500: break else: @@ -208,23 +250,29 @@ suite "WakuNode2 - Validators": check: msgReceived == 125 - msgRejected == 500 + msgRejected == 500 await allFutures(nodes.mapIt(it.stop())) asyncTest "Spam protected topic rejects a spammer node": # Create 5 nodes - let nodes = toSeq(0..<5).mapIt(newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))) + let nodes = toSeq(0 ..< 5).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) # Protected topic and key to sign let spamProtectedTopic = PubSubTopic("some-spam-protected-topic") - let secretKey = SkSecretKey.fromHex("5526a8990317c9b7b58d07843d270f9cd1d9aaee129294c1c478abf7261dd9e6").expect("valid key") + let secretKey = SkSecretKey + .fromHex("5526a8990317c9b7b58d07843d270f9cd1d9aaee129294c1c478abf7261dd9e6") + .expect("valid key") let publicKey = secretKey.toPublicKey() let topicsPrivateKeys = {spamProtectedTopic: secretKey}.toTable let topicsPublicKeys = {spamProtectedTopic: publicKey}.toTable # Non whitelisted secret key - let wrongSecretKey = SkSecretKey.fromHex("32ad0cc8edeb9f8a3e8635c5fe5bd200b9247a33da5e7171bd012691805151f3").expect("valid key") + let wrongSecretKey = SkSecretKey + .fromHex("32ad0cc8edeb9f8a3e8635c5fe5bd200b9247a33da5e7171bd012691805151f3") + .expect("valid key") # Start all the nodes and mount relay with protected topic await allFutures(nodes.mapIt(it.start())) @@ -237,45 +285,59 @@ suite "WakuNode2 - Validators": msgReceived += 1 # Subscribe all nodes to the same topic/handler - for node in nodes: discard node.wakuRelay.subscribe(spamProtectedTopic, handler) + for node in nodes: + discard node.wakuRelay.subscribe(spamProtectedTopic, handler) await sleepAsync(500.millis) # Add signed message validator to all nodes. They will only route signed messages for node in nodes: - var signedTopics : seq[ProtectedTopic] + var signedTopics: seq[ProtectedTopic] for topic, publicKey in topicsPublicKeys: signedTopics.add(ProtectedTopic(topic: topic, key: publicKey)) node.wakuRelay.addSignedTopicsValidator(signedTopics) # nodes[0] is connected only to nodes[1] - let connOk1 = await nodes[0].peerManager.connectRelay(nodes[1].switch.peerInfo.toRemotePeerInfo()) + let connOk1 = await nodes[0].peerManager.connectRelay( + nodes[1].switch.peerInfo.toRemotePeerInfo() + ) require connOk1 # rest of nodes[1..4] are connected in a full mesh - for i in 1..<5: - for j in 1..<5: + for i in 1 ..< 5: + for j in 1 ..< 5: if i == j: continue - let connOk2 = await nodes[i].peerManager.connectRelay(nodes[j].switch.peerInfo.toRemotePeerInfo()) + let connOk2 = await nodes[i].peerManager.connectRelay( + nodes[j].switch.peerInfo.toRemotePeerInfo() + ) require connOk2 # Connection triggers different actions, wait for them await sleepAsync(500.millis) # nodes[0] spams 50 non signed messages (nodes[0] just knows of nodes[1]) - for j in 0..<50: + for j in 0 ..< 50: let unsignedMessage = WakuMessage( - payload: urandom(1*(10^3)), contentTopic: spamProtectedTopic, - version: 2, timestamp: now(), ephemeral: true) + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedTopic, + version: 2, + timestamp: now(), + ephemeral: true, + ) discard await nodes[0].publish(some(spamProtectedTopic), unsignedMessage) # nodes[0] spams 50 wrongly signed messages (nodes[0] just knows of nodes[1]) - for j in 0..<50: + for j in 0 ..< 50: var msg = WakuMessage( - payload: urandom(1*(10^3)), contentTopic: spamProtectedTopic, - version: 2, timestamp: now(), ephemeral: true) + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedTopic, + version: 2, + timestamp: now(), + ephemeral: true, + ) # Sign the message with a wrong key - msg.meta = wrongSecretKey.sign(SkMessage(spamProtectedTopic.msgHash(msg))).toRaw()[0..63] + msg.meta = + wrongSecretKey.sign(SkMessage(spamProtectedTopic.msgHash(msg))).toRaw()[0 .. 63] discard await nodes[0].publish(some(spamProtectedTopic), msg) # Wait for gossip @@ -289,10 +351,11 @@ suite "WakuNode2 - Validators": # peer1 got invalid messages from peer0 let p0Id = nodes[0].peerInfo.peerId check: - nodes[1].wakuRelay.peerStats[p0Id].topicInfos[spamProtectedTopic].invalidMessageDeliveries == 100.0 + nodes[1].wakuRelay.peerStats[p0Id].topicInfos[spamProtectedTopic].invalidMessageDeliveries == + 100.0 # peer1 did not gossip further, so no other node rx invalid messages - for i in 0..<5: + for i in 0 ..< 5: for k, v in nodes[i].wakuRelay.peerStats.mpairs: if k == p0Id and i == 1: continue @@ -305,18 +368,22 @@ suite "WakuNode2 - Validators": asyncTest "Tests vectors": # keys let privateKey = "5526a8990317c9b7b58d07843d270f9cd1d9aaee129294c1c478abf7261dd9e6" - let publicKey = "049c5fac802da41e07e6cdf51c3b9a6351ad5e65921527f2df5b7d59fd9b56ab02bab736cdcfc37f25095e78127500da371947217a8cd5186ab890ea866211c3f6" + let publicKey = + "049c5fac802da41e07e6cdf51c3b9a6351ad5e65921527f2df5b7d59fd9b56ab02bab736cdcfc37f25095e78127500da371947217a8cd5186ab890ea866211c3f6" # message let contentTopic = "content-topic" let pubsubTopic = "pubsub-topic" - let payload = "1A12E077D0E89F9CAC11FBBB6A676C86120B5AD3E248B1F180E98F15EE43D2DFCF62F00C92737B2FF6F59B3ABA02773314B991C41DC19ADB0AD8C17C8E26757B" + let payload = + "1A12E077D0E89F9CAC11FBBB6A676C86120B5AD3E248B1F180E98F15EE43D2DFCF62F00C92737B2FF6F59B3ABA02773314B991C41DC19ADB0AD8C17C8E26757B" let timestamp = 1683208172339052800 let ephemeral = true # expected values - let expectedMsgAppHash = "662F8C20A335F170BD60ABC1F02AD66F0C6A6EE285DA2A53C95259E7937C0AE9" - let expectedSignature = "127FA211B2514F0E974A055392946DC1A14052182A6ABEFB8A6CD7C51DA1BF2E40595D28EF1A9488797C297EED3AAC45430005FB3A7F037BDD9FC4BD99F59E63" + let expectedMsgAppHash = + "662F8C20A335F170BD60ABC1F02AD66F0C6A6EE285DA2A53C95259E7937C0AE9" + let expectedSignature = + "127FA211B2514F0E974A055392946DC1A14052182A6ABEFB8A6CD7C51DA1BF2E40595D28EF1A9488797C297EED3AAC45430005FB3A7F037BDD9FC4BD99F59E63" let secretKey = SkSecretKey.fromHex(privateKey).expect("valid key") @@ -325,8 +392,12 @@ suite "WakuNode2 - Validators": secretKey.toHex() == privateKey var msg = WakuMessage( - payload: payload.fromHex(), contentTopic: contentTopic, - version: 2, timestamp: timestamp, ephemeral: ephemeral) + payload: payload.fromHex(), + contentTopic: contentTopic, + version: 2, + timestamp: timestamp, + ephemeral: ephemeral, + ) let msgAppHash = pubsubTopic.msgHash(msg) let signature = secretKey.sign(SkMessage(msgAppHash)).toRaw() diff --git a/tests/wakunode_rest/test_rest_admin.nim b/tests/wakunode_rest/test_rest_admin.nim index 628359ab3..4d65051a8 100644 --- a/tests/wakunode_rest/test_rest_admin.nim +++ b/tests/wakunode_rest/test_rest_admin.nim @@ -1,10 +1,11 @@ {.used.} import - std/[sequtils,strformat], + std/[sequtils, strformat], stew/shims/net, testutils/unittests, - presto, presto/client as presto_client, + presto, + presto/client as presto_client, libp2p/crypto/crypto import @@ -31,14 +32,17 @@ suite "Waku v2 Rest API - Admin": var peerInfo2 {.threadvar.}: RemotePeerInfo var peerInfo3 {.threadvar.}: RemotePeerInfo var restServer {.threadvar.}: WakuRestServerRef - var client{.threadvar.}: RestClientRef + var client {.threadvar.}: RestClientRef asyncSetup: - node1 = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("127.0.0.1"), Port(60600)) + node1 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("127.0.0.1"), Port(60600)) peerInfo1 = node1.switch.peerInfo - node2 = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("127.0.0.1"), Port(60602)) + node2 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("127.0.0.1"), Port(60602)) peerInfo2 = node2.switch.peerInfo - node3 = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("127.0.0.1"), Port(60604)) + node3 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("127.0.0.1"), Port(60604)) peerInfo3 = node3.switch.peerInfo await allFutures(node1.start(), node2.start(), node3.start()) @@ -61,8 +65,9 @@ suite "Waku v2 Rest API - Admin": asyncTest "Set and get remote peers": # Connect to nodes 2 and 3 using the Admin API - let postRes = await client.postPeers(@[constructMultiaddrStr(peerInfo2), - constructMultiaddrStr(peerInfo3)]) + let postRes = await client.postPeers( + @[constructMultiaddrStr(peerInfo2), constructMultiaddrStr(peerInfo3)] + ) check: postRes.status == 200 @@ -75,14 +80,19 @@ suite "Waku v2 Rest API - Admin": $getRes.contentType == $MIMETYPE_JSON getRes.data.len() == 2 # Check peer 2 - getRes.data.anyIt(it.protocols.find(WakuRelayCodec) >= 0 and - it.multiaddr == constructMultiaddrStr(peerInfo2)) + getRes.data.anyIt( + it.protocols.find(WakuRelayCodec) >= 0 and + it.multiaddr == constructMultiaddrStr(peerInfo2) + ) # Check peer 3 - getRes.data.anyIt(it.protocols.find(WakuRelayCodec) >= 0 and - it.multiaddr == constructMultiaddrStr(peerInfo3)) + getRes.data.anyIt( + it.protocols.find(WakuRelayCodec) >= 0 and + it.multiaddr == constructMultiaddrStr(peerInfo3) + ) asyncTest "Set wrong peer": - let nonExistentPeer = "/ip4/0.0.0.0/tcp/10000/p2p/16Uiu2HAm6HZZr7aToTvEBPpiys4UxajCTU97zj5v7RNR2gbniy1D" + let nonExistentPeer = + "/ip4/0.0.0.0/tcp/10000/p2p/16Uiu2HAm6HZZr7aToTvEBPpiys4UxajCTU97zj5v7RNR2gbniy1D" let postRes = await client.postPeers(@[nonExistentPeer]) check: @@ -99,7 +109,9 @@ suite "Waku v2 Rest API - Admin": getRes.data.len() == 0 asyncTest "Get filter data": - await allFutures(node1.mountFilter(), node2.mountFilterClient(), node3.mountFilterClient()) + await allFutures( + node1.mountFilter(), node2.mountFilterClient(), node3.mountFilterClient() + ) let contentFiltersNode2 = @[DefaultContentTopic, ContentTopic("2"), ContentTopic("3")] @@ -127,7 +139,8 @@ suite "Waku v2 Rest API - Admin": let peers = @[getRes.data[0].peerId, getRes.data[1].peerId] - numCriteria = @[getRes.data[0].filterCriteria.len, getRes.data[1].filterCriteria.len] + numCriteria = + @[getRes.data[0].filterCriteria.len, getRes.data[1].filterCriteria.len] check: $peerInfo2 in peers diff --git a/tests/wakunode_rest/test_rest_cors.nim b/tests/wakunode_rest/test_rest_cors.nim index 3ac166f07..aa6160f38 100644 --- a/tests/wakunode_rest/test_rest_cors.nim +++ b/tests/wakunode_rest/test_rest_cors.nim @@ -20,9 +20,7 @@ import ../testlib/wakucore, ../testlib/wakunode - -type - TestResponseTuple = tuple[status: int, data: string, headers: HttpTable] +type TestResponseTuple = tuple[status: int, data: string, headers: HttpTable] proc testWakuNode(): WakuNode = let @@ -33,8 +31,9 @@ proc testWakuNode(): WakuNode = newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) -proc fetchWithHeader(request: HttpClientRequestRef): Future[TestResponseTuple] - {.async: (raises: [CancelledError, HttpError]).} = +proc fetchWithHeader( + request: HttpClientRequestRef +): Future[TestResponseTuple] {.async: (raises: [CancelledError, HttpError]).} = var response: HttpClientResponseRef try: response = await request.send() @@ -45,50 +44,55 @@ proc fetchWithHeader(request: HttpClientRequestRef): Future[TestResponseTuple] response = nil return (status, buffer.bytesToString(), headers) except HttpError as exc: - if not(isNil(response)): await response.closeWait() + if not (isNil(response)): + await response.closeWait() assert false except CancelledError as exc: - if not(isNil(response)): await response.closeWait() + if not (isNil(response)): + await response.closeWait() assert false proc issueRequest( - address: HttpAddress, - reqOrigin: Option[string] = none(string) - ): Future[TestResponseTuple] {.async.} = - + address: HttpAddress, reqOrigin: Option[string] = none(string) +): Future[TestResponseTuple] {.async.} = var session = HttpSessionRef.new({HttpClientFlag.Http11Pipeline}) data: TestResponseTuple - var originHeader : seq[HttpHeaderTuple] + var originHeader: seq[HttpHeaderTuple] if reqOrigin.isSome(): originHeader.insert(("Origin", reqOrigin.get())) - var - request = HttpClientRequestRef.new(session, - address, - version = HttpVersion11, - headers = originHeader) + var request = HttpClientRequestRef.new( + session, address, version = HttpVersion11, headers = originHeader + ) try: data = await request.fetchWithHeader() finally: await request.closeWait() return data -proc checkResponse(response: TestResponseTuple, - expectedStatus : int, - expectedOrigin : Option[string]): bool = +proc checkResponse( + response: TestResponseTuple, expectedStatus: int, expectedOrigin: Option[string] +): bool = if response.status != expectedStatus: - echo(" -> check failed: expected status" & $expectedStatus & - " got " & $response.status) + echo( + " -> check failed: expected status" & $expectedStatus & " got " & $response.status + ) return false - if not (expectedOrigin.isNone() or - (expectedOrigin.isSome() and - response.headers.contains("Access-Control-Allow-Origin") and - response.headers.getLastString("Access-Control-Allow-Origin") == expectedOrigin.get())): - echo(" -> check failed: expected origin " & $expectedOrigin & " got " & - response.headers.getLastString("Access-Control-Allow-Origin")) + if not ( + expectedOrigin.isNone() or ( + expectedOrigin.isSome() and + response.headers.contains("Access-Control-Allow-Origin") and + response.headers.getLastString("Access-Control-Allow-Origin") == + expectedOrigin.get() + ) + ): + echo( + " -> check failed: expected origin " & $expectedOrigin & " got " & + response.headers.getLastString("Access-Control-Allow-Origin") + ) return false return true @@ -102,10 +106,14 @@ suite "Waku v2 REST API CORS Handling": let restPort = Port(58001) let restAddress = parseIpAddress("0.0.0.0") - let restServer = WakuRestServerRef.init(restAddress, - restPort, - allowedOrigin=some("test.net:1234,https://localhost:*,http://127.0.0.1:?8,?waku*.net:*80*") - ).tryGet() + let restServer = WakuRestServerRef + .init( + restAddress, + restPort, + allowedOrigin = + some("test.net:1234,https://localhost:*,http://127.0.0.1:?8,?waku*.net:*80*"), + ) + .tryGet() installDebugApiHandlers(restServer.router, node) restServer.start() @@ -150,10 +158,14 @@ suite "Waku v2 REST API CORS Handling": let restPort = Port(58001) let restAddress = parseIpAddress("0.0.0.0") - let restServer = WakuRestServerRef.init(restAddress, - restPort, - allowedOrigin=some("test.net:1234,https://localhost:*,http://127.0.0.1:?8,?waku*.net:*80*") - ).tryGet() + let restServer = WakuRestServerRef + .init( + restAddress, + restPort, + allowedOrigin = + some("test.net:1234,https://localhost:*,http://127.0.0.1:?8,?waku*.net:*80*"), + ) + .tryGet() installDebugApiHandlers(restServer.router, node) restServer.start() @@ -201,10 +213,8 @@ suite "Waku v2 REST API CORS Handling": let restPort = Port(58001) let restAddress = parseIpAddress("0.0.0.0") - let restServer = WakuRestServerRef.init(restAddress, - restPort, - allowedOrigin=some("*") - ).tryGet() + let restServer = + WakuRestServerRef.init(restAddress, restPort, allowedOrigin = some("*")).tryGet() installDebugApiHandlers(restServer.router, node) restServer.start() @@ -249,10 +259,14 @@ suite "Waku v2 REST API CORS Handling": let restPort = Port(58001) let restAddress = parseIpAddress("0.0.0.0") - let restServer = WakuRestServerRef.init(restAddress, - restPort, - allowedOrigin=some("test.net:1234,https://localhost:*,http://127.0.0.1:?8,?waku*.net:*80*") - ).tryGet() + let restServer = WakuRestServerRef + .init( + restAddress, + restPort, + allowedOrigin = + some("test.net:1234,https://localhost:*,http://127.0.0.1:?8,?waku*.net:*80*"), + ) + .tryGet() installDebugApiHandlers(restServer.router, node) restServer.start() diff --git a/tests/wakunode_rest/test_rest_debug.nim b/tests/wakunode_rest/test_rest_debug.nim index 4f1c4ac6a..29b954edf 100644 --- a/tests/wakunode_rest/test_rest_debug.nim +++ b/tests/wakunode_rest/test_rest_debug.nim @@ -10,7 +10,8 @@ import libp2p/crypto/crypto import ../../waku/waku_node, - ../../waku/node/waku_node as waku_node2, # TODO: Remove after moving `git_version` to the app code. + ../../waku/node/waku_node as waku_node2, + # TODO: Remove after moving `git_version` to the app code. ../../waku/waku_api/rest/server, ../../waku/waku_api/rest/client, ../../waku/waku_api/rest/responses, @@ -20,7 +21,6 @@ import ../testlib/wakucore, ../testlib/wakunode - proc testWakuNode(): WakuNode = let privkey = crypto.PrivateKey.random(Secp256k1, rng[]).tryGet() @@ -30,7 +30,6 @@ proc testWakuNode(): WakuNode = newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) - suite "Waku v2 REST API - Debug": asyncTest "Get node info - GET /debug/v1/info": # Given @@ -53,7 +52,8 @@ suite "Waku v2 REST API - Debug": check: response.status == 200 $response.contentType == $MIMETYPE_JSON - response.data.listenAddresses == @[$node.switch.peerInfo.addrs[^1] & "/p2p/" & $node.switch.peerInfo.peerId] + response.data.listenAddresses == + @[$node.switch.peerInfo.addrs[^1] & "/p2p/" & $node.switch.peerInfo.peerId] await restServer.stop() await restServer.closeWait() diff --git a/tests/wakunode_rest/test_rest_debug_serdes.nim b/tests/wakunode_rest/test_rest_debug_serdes.nim index fb0e57292..7b44f579b 100644 --- a/tests/wakunode_rest/test_rest_debug_serdes.nim +++ b/tests/wakunode_rest/test_rest_debug_serdes.nim @@ -1,17 +1,9 @@ {.used.} -import - stew/results, - stew/byteutils, - testutils/unittests, - json_serialization -import - ../../waku/waku_api/rest/serdes, - ../../waku/waku_api/rest/debug/types - +import stew/results, stew/byteutils, testutils/unittests, json_serialization +import ../../waku/waku_api/rest/serdes, ../../waku/waku_api/rest/debug/types suite "Waku v2 REST API - Debug - serialization": - suite "DebugWakuInfo - decode": test "optional field is not provided": # Given diff --git a/tests/wakunode_rest/test_rest_filter.nim b/tests/wakunode_rest/test_rest_filter.nim index 10a2d5b8f..ab6c9c0ba 100644 --- a/tests/wakunode_rest/test_rest_filter.nim +++ b/tests/wakunode_rest/test_rest_filter.nim @@ -4,7 +4,8 @@ import stew/byteutils, stew/shims/net, testutils/unittests, - presto, presto/client as presto_client, + presto, + presto/client as presto_client, libp2p/crypto/crypto import ../../waku/waku_api/message_cache, @@ -25,7 +26,6 @@ import ../testlib/wakucore, ../testlib/wakunode - proc testWakuNode(): WakuNode = let privkey = generateSecp256k1Key() @@ -35,7 +35,6 @@ proc testWakuNode(): WakuNode = return newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) - type RestFilterTest = object serviceNode: WakuNode subscriberNode: WakuNode @@ -45,7 +44,6 @@ type RestFilterTest = object client: RestClientRef clientTwdServiceNode: RestClientRef - proc init(T: type RestFilterTest): Future[T] {.async.} = var testSetup = RestFilterTest() testSetup.serviceNode = testWakuNode() @@ -57,31 +55,38 @@ proc init(T: type RestFilterTest): Future[T] {.async.} = await testSetup.serviceNode.mountFilter() await testSetup.subscriberNode.mountFilterClient() - testSetup.subscriberNode.peerManager.addServicePeer(testSetup.serviceNode.peerInfo.toRemotePeerInfo(), WakuFilterSubscribeCodec) + testSetup.subscriberNode.peerManager.addServicePeer( + testSetup.serviceNode.peerInfo.toRemotePeerInfo(), WakuFilterSubscribeCodec + ) let restPort = Port(58011) let restAddress = parseIpAddress("127.0.0.1") testSetup.restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() let restPort2 = Port(58012) - testSetup.restServerForService = WakuRestServerRef.init(restAddress, restPort2).tryGet() + testSetup.restServerForService = + WakuRestServerRef.init(restAddress, restPort2).tryGet() # through this one we will see if messages are pushed according to our content topic sub testSetup.messageCache = MessageCache.init() - installFilterRestApiHandlers(testSetup.restServer.router, testSetup.subscriberNode, testSetup.messageCache) + installFilterRestApiHandlers( + testSetup.restServer.router, testSetup.subscriberNode, testSetup.messageCache + ) let topicCache = MessageCache.init() - installRelayApiHandlers(testSetup.restServerForService.router, testSetup.serviceNode, topicCache) + installRelayApiHandlers( + testSetup.restServerForService.router, testSetup.serviceNode, topicCache + ) testSetup.restServer.start() testSetup.restServerForService.start() testSetup.client = newRestHttpClient(initTAddress(restAddress, restPort)) - testSetup.clientTwdServiceNode = newRestHttpClient(initTAddress(restAddress, restPort2)) + testSetup.clientTwdServiceNode = + newRestHttpClient(initTAddress(restAddress, restPort2)) return testSetup - proc shutdown(self: RestFilterTest) {.async.} = await self.restServer.stop() await self.restServer.closeWait() @@ -89,7 +94,6 @@ proc shutdown(self: RestFilterTest) {.async.} = await self.restServerForService.closeWait() await allFutures(self.serviceNode.stop(), self.subscriberNode.stop()) - suite "Waku v2 Rest API - Filter V2": asyncTest "Subscribe a node to an array of topics - POST /filter/v2/subscriptions": # Given @@ -97,23 +101,30 @@ suite "Waku v2 Rest API - Filter V2": let subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId # When - let contentFilters = @[DefaultContentTopic - ,ContentTopic("2") - ,ContentTopic("3") - ,ContentTopic("4") - ] + let contentFilters = + @[DefaultContentTopic, ContentTopic("2"), ContentTopic("3"), ContentTopic("4")] - let requestBody = FilterSubscribeRequest(requestId: "1234", - contentFilters: contentFilters, - pubsubTopic: some(DefaultPubsubTopic)) + let requestBody = FilterSubscribeRequest( + requestId: "1234", + contentFilters: contentFilters, + pubsubTopic: some(DefaultPubsubTopic), + ) let response = await restFilterTest.client.filterPostSubscriptions(requestBody) echo "response", $response - let subscribedPeer1 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers(DefaultPubsubTopic, DefaultContentTopic) - let subscribedPeer2 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers(DefaultPubsubTopic, "2") - let subscribedPeer3 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers(DefaultPubsubTopic, "3") - let subscribedPeer4 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers(DefaultPubsubTopic, "4") + let subscribedPeer1 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, DefaultContentTopic + ) + let subscribedPeer2 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "2" + ) + let subscribedPeer3 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "3" + ) + let subscribedPeer4 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "4" + ) # Then check: @@ -127,8 +138,11 @@ suite "Waku v2 Rest API - Filter V2": subPeerId in subscribedPeer4 # When - error case - let badRequestBody = FilterSubscribeRequest(requestId: "4567", contentFilters: @[], pubsubTopic: none(string)) - let badRequestResp = await restFilterTest.client.filterPostSubscriptions(badRequestBody) + let badRequestBody = FilterSubscribeRequest( + requestId: "4567", contentFilters: @[], pubsubTopic: none(string) + ) + let badRequestResp = + await restFilterTest.client.filterPostSubscriptions(badRequestBody) check: badRequestResp.status == 400 @@ -146,30 +160,41 @@ suite "Waku v2 Rest API - Filter V2": subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId # When - var requestBody = FilterSubscribeRequest(requestId: "1234", - contentFilters: @[ContentTopic("1") - ,ContentTopic("2") - ,ContentTopic("3") - ,ContentTopic("4") - ], - pubsubTopic: some(DefaultPubsubTopic)) + var requestBody = FilterSubscribeRequest( + requestId: "1234", + contentFilters: + @[ContentTopic("1"), ContentTopic("2"), ContentTopic("3"), ContentTopic("4")], + pubsubTopic: some(DefaultPubsubTopic), + ) discard await restFilterTest.client.filterPostSubscriptions(requestBody) - let contentFilters = @[ContentTopic("1") - ,ContentTopic("2") - ,ContentTopic("3") - # ,ContentTopic("4") # Keep this subscription for check - ] + let contentFilters = + @[ + ContentTopic("1"), + ContentTopic("2"), + ContentTopic("3"), # ,ContentTopic("4") # Keep this subscription for check + ] - let requestBodyUnsub = FilterUnsubscribeRequest(requestId: "4321", - contentFilters: contentFilters, - pubsubTopic: some(DefaultPubsubTopic)) - let response = await restFilterTest.client.filterDeleteSubscriptions(requestBodyUnsub) + let requestBodyUnsub = FilterUnsubscribeRequest( + requestId: "4321", + contentFilters: contentFilters, + pubsubTopic: some(DefaultPubsubTopic), + ) + let response = + await restFilterTest.client.filterDeleteSubscriptions(requestBodyUnsub) - let subscribedPeer1 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers(DefaultPubsubTopic, DefaultContentTopic) - let subscribedPeer2 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers(DefaultPubsubTopic, "2") - let subscribedPeer3 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers(DefaultPubsubTopic, "3") - let subscribedPeer4 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers(DefaultPubsubTopic, "4") + let subscribedPeer1 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, DefaultContentTopic + ) + let subscribedPeer2 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "2" + ) + let subscribedPeer3 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "3" + ) + let subscribedPeer4 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "4" + ) # Then check: @@ -185,9 +210,12 @@ suite "Waku v2 Rest API - Filter V2": # When - error case let requestBodyUnsubAll = FilterUnsubscribeAllRequest(requestId: "2143") - let responseUnsubAll = await restFilterTest.client.filterDeleteAllSubscriptions(requestBodyUnsubAll) + let responseUnsubAll = + await restFilterTest.client.filterDeleteAllSubscriptions(requestBodyUnsubAll) - let subscribedPeer = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers(DefaultPubsubTopic, "4") + let subscribedPeer = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "4" + ) check: responseUnsubAll.status == 200 @@ -204,9 +232,11 @@ suite "Waku v2 Rest API - Filter V2": subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId # When - var requestBody = FilterSubscribeRequest(requestId: "1234", - contentFilters: @[ContentTopic("1")], - pubsubTopic: some(DefaultPubsubTopic)) + var requestBody = FilterSubscribeRequest( + requestId: "1234", + contentFilters: @[ContentTopic("1")], + pubsubTopic: some(DefaultPubsubTopic), + ) discard await restFilterTest.client.filterPostSubscriptions(requestBody) let pingResponse = await restFilterTest.client.filterSubscriberPing("9999") @@ -220,7 +250,8 @@ suite "Waku v2 Rest API - Filter V2": # When - error case let requestBodyUnsubAll = FilterUnsubscribeAllRequest(requestId: "9988") - discard await restFilterTest.client.filterDeleteAllSubscriptions(requestBodyUnsubAll) + discard + await restFilterTest.client.filterDeleteAllSubscriptions(requestBodyUnsubAll) let pingResponseFail = await restFilterTest.client.filterSubscriberPing("9977") @@ -243,9 +274,11 @@ suite "Waku v2 Rest API - Filter V2": restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)) # When - var requestBody = FilterSubscribeRequest(requestId: "1234", - contentFilters: @[ContentTopic("1")], - pubsubTopic: some(DefaultPubsubTopic)) + var requestBody = FilterSubscribeRequest( + requestId: "1234", + contentFilters: @[ContentTopic("1")], + pubsubTopic: some(DefaultPubsubTopic), + ) discard await restFilterTest.client.filterPostSubscriptions(requestBody) let pingResponse = await restFilterTest.client.filterSubscriberPing("9999") @@ -259,16 +292,15 @@ suite "Waku v2 Rest API - Filter V2": # When - message push let testMessage = WakuMessage( - payload: "TEST-PAYLOAD-MUST-RECEIVE".toBytes(), - contentTopic: "1", - timestamp: int64(2022), - meta: "test-meta".toBytes() - ) + payload: "TEST-PAYLOAD-MUST-RECEIVE".toBytes(), + contentTopic: "1", + timestamp: int64(2022), + meta: "test-meta".toBytes(), + ) let postMsgResponse = await restFilterTest.clientTwdServiceNode.relayPostMessagesV1( - DefaultPubsubTopic, - toRelayWakuMessage(testMessage) - ) + DefaultPubsubTopic, toRelayWakuMessage(testMessage) + ) # Then let messages = restFilterTest.messageCache.getAutoMessages("1").tryGet() diff --git a/tests/wakunode_rest/test_rest_health.nim b/tests/wakunode_rest/test_rest_health.nim index 5937b8dba..ffa409693 100644 --- a/tests/wakunode_rest/test_rest_health.nim +++ b/tests/wakunode_rest/test_rest_health.nim @@ -11,7 +11,8 @@ import libp2p/crypto/crypto import ../../waku/waku_node, - ../../waku/node/waku_node as waku_node2, # TODO: Remove after moving `git_version` to the app code. + ../../waku/node/waku_node as waku_node2, + # TODO: Remove after moving `git_version` to the app code. ../../waku/waku_api/rest/server, ../../waku/waku_api/rest/client, ../../waku/waku_api/rest/responses, @@ -23,7 +24,6 @@ import ../testlib/wakucore, ../testlib/wakunode - proc testWakuNode(): WakuNode = let privkey = crypto.PrivateKey.random(Secp256k1, rng[]).tryGet() @@ -33,7 +33,6 @@ proc testWakuNode(): WakuNode = newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) - suite "Waku v2 REST API - health": # TODO: better test for health xasyncTest "Get node health info - GET /health": @@ -60,11 +59,14 @@ suite "Waku v2 REST API - health": response.data == "Node is not ready" # now kick in rln (currently the only check for health) - await node.mountRlnRelay(WakuRlnConfig(rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode"), - )) + await node.mountRlnRelay( + WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(1.uint), + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode"), + ) + ) # When response = await client.healthCheck() diff --git a/tests/wakunode_rest/test_rest_legacy_filter.nim b/tests/wakunode_rest/test_rest_legacy_filter.nim index f7de83372..4f2fb3841 100644 --- a/tests/wakunode_rest/test_rest_legacy_filter.nim +++ b/tests/wakunode_rest/test_rest_legacy_filter.nim @@ -5,7 +5,8 @@ import stew/byteutils, stew/shims/net, testutils/unittests, - presto, presto/client as presto_client, + presto, + presto/client as presto_client, libp2p/crypto/crypto import ../../waku/waku_api/message_cache, @@ -24,7 +25,6 @@ import ../testlib/wakucore, ../testlib/wakunode - proc testWakuNode(): WakuNode = let privkey = generateSecp256k1Key() @@ -34,7 +34,6 @@ proc testWakuNode(): WakuNode = return newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) - type RestFilterTest = object filterNode: WakuNode clientNode: WakuNode @@ -42,7 +41,6 @@ type RestFilterTest = object messageCache: MessageCache client: RestClientRef - proc setupRestFilter(): Future[RestFilterTest] {.async.} = result.filterNode = testWakuNode() result.clientNode = testWakuNode() @@ -53,17 +51,18 @@ proc setupRestFilter(): Future[RestFilterTest] {.async.} = await result.filterNode.mountLegacyFilter() await result.clientNode.mountFilterClient() - result.clientNode.peerManager.addServicePeer(result.filterNode.peerInfo.toRemotePeerInfo() - ,WakuLegacyFilterCodec) + result.clientNode.peerManager.addServicePeer( + result.filterNode.peerInfo.toRemotePeerInfo(), WakuLegacyFilterCodec + ) let restPort = Port(58011) let restAddress = parseIpAddress("0.0.0.0") result.restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() result.messageCache = MessageCache.init() - installLegacyFilterRestApiHandlers(result.restServer.router - ,result.clientNode - ,result.messageCache) + installLegacyFilterRestApiHandlers( + result.restServer.router, result.clientNode, result.messageCache + ) result.restServer.start() @@ -71,27 +70,23 @@ proc setupRestFilter(): Future[RestFilterTest] {.async.} = return result - proc shutdown(self: RestFilterTest) {.async.} = await self.restServer.stop() await self.restServer.closeWait() await allFutures(self.filterNode.stop(), self.clientNode.stop()) - suite "Waku v2 Rest API - Filter": asyncTest "Subscribe a node to an array of topics - POST /filter/v1/subscriptions": # Given - let restFilterTest: RestFilterTest = await setupRestFilter() + let restFilterTest: RestFilterTest = await setupRestFilter() # When - let contentFilters = @[DefaultContentTopic - ,ContentTopic("2") - ,ContentTopic("3") - ,ContentTopic("4") - ] + let contentFilters = + @[DefaultContentTopic, ContentTopic("2"), ContentTopic("3"), ContentTopic("4")] - let requestBody = FilterLegacySubscribeRequest(contentFilters: contentFilters, - pubsubTopic: some(DefaultPubsubTopic)) + let requestBody = FilterLegacySubscribeRequest( + contentFilters: contentFilters, pubsubTopic: some(DefaultPubsubTopic) + ) let response = await restFilterTest.client.filterPostSubscriptionsV1(requestBody) # Then @@ -107,23 +102,22 @@ suite "Waku v2 Rest API - Filter": restFilterTest.messageCache.isContentSubscribed("4") # When - error case - let badRequestBody = FilterLegacySubscribeRequest(contentFilters: @[] - ,pubsubTopic: none(string)) - let badResponse = await restFilterTest.client.filterPostSubscriptionsV1(badRequestBody) + let badRequestBody = + FilterLegacySubscribeRequest(contentFilters: @[], pubsubTopic: none(string)) + let badResponse = + await restFilterTest.client.filterPostSubscriptionsV1(badRequestBody) check: badResponse.status == 400 $badResponse.contentType == $MIMETYPE_TEXT - badResponse.data == "Invalid content body, could not decode. Unable to deserialize data" - + badResponse.data == + "Invalid content body, could not decode. Unable to deserialize data" await restFilterTest.shutdown() - asyncTest "Unsubscribe a node from an array of topics - DELETE /filter/v1/subscriptions": # Given - let - restFilterTest: RestFilterTest = await setupRestFilter() + let restFilterTest: RestFilterTest = await setupRestFilter() # When restFilterTest.messageCache.contentSubscribe("1") @@ -131,15 +125,17 @@ suite "Waku v2 Rest API - Filter": restFilterTest.messageCache.contentSubscribe("3") restFilterTest.messageCache.contentSubscribe("4") - let contentFilters = @[ContentTopic("1") - ,ContentTopic("2") - ,ContentTopic("3") - # ,ContentTopic("4") # Keep this subscription for check - ] + let contentFilters = + @[ + ContentTopic("1"), + ContentTopic("2"), + ContentTopic("3"), # ,ContentTopic("4") # Keep this subscription for check + ] # When - let requestBody = FilterLegacySubscribeRequest(contentFilters: contentFilters, - pubsubTopic: some(DefaultPubsubTopic)) + let requestBody = FilterLegacySubscribeRequest( + contentFilters: contentFilters, pubsubTopic: some(DefaultPubsubTopic) + ) let response = await restFilterTest.client.filterDeleteSubscriptionsV1(requestBody) # Then @@ -159,22 +155,22 @@ suite "Waku v2 Rest API - Filter": asyncTest "Get the latest messages for topic - GET /filter/v1/messages/{contentTopic}": # Given - let - restFilterTest = await setupRestFilter() + let restFilterTest = await setupRestFilter() let pubSubTopic = "/waku/2/default-waku/proto" - let contentTopic = ContentTopic( "content-topic-x" ) + let contentTopic = ContentTopic("content-topic-x") - var messages = @[ - fakeWakuMessage(contentTopic = "content-topic-x", payload = toBytes("TEST-1")) - ] + var messages = + @[fakeWakuMessage(contentTopic = "content-topic-x", payload = toBytes("TEST-1"))] # Prevent duplicate messages - for i in 0..<2: - var msg = fakeWakuMessage(contentTopic = "content-topic-x", payload = toBytes("TEST-1")) + for i in 0 ..< 2: + var msg = + fakeWakuMessage(contentTopic = "content-topic-x", payload = toBytes("TEST-1")) while msg == messages[i]: - msg = fakeWakuMessage(contentTopic = "content-topic-x", payload = toBytes("TEST-1")) + msg = + fakeWakuMessage(contentTopic = "content-topic-x", payload = toBytes("TEST-1")) messages.add(msg) @@ -190,10 +186,9 @@ suite "Waku v2 Rest API - Filter": response.status == 200 $response.contentType == $MIMETYPE_JSON response.data.len == 3 - response.data.all do (msg: FilterWakuMessage) -> bool: + response.data.all do(msg: FilterWakuMessage) -> bool: msg.payload == base64.encode("TEST-1") and - msg.contentTopic.get().string == "content-topic-x" and - msg.version.get() == 2 and - msg.timestamp.get() != Timestamp(0) + msg.contentTopic.get().string == "content-topic-x" and msg.version.get() == 2 and + msg.timestamp.get() != Timestamp(0) await restFilterTest.shutdown() diff --git a/tests/wakunode_rest/test_rest_lightpush.nim b/tests/wakunode_rest/test_rest_lightpush.nim index 34dc40dd2..b9a321a82 100644 --- a/tests/wakunode_rest/test_rest_lightpush.nim +++ b/tests/wakunode_rest/test_rest_lightpush.nim @@ -5,7 +5,8 @@ import stew/byteutils, stew/shims/net, testutils/unittests, - presto, presto/client as presto_client, + presto, + presto/client as presto_client, libp2p/crypto/crypto import @@ -24,7 +25,6 @@ import ../testlib/wakucore, ../testlib/wakunode - proc testWakuNode(): WakuNode = let privkey = generateSecp256k1Key() @@ -34,7 +34,6 @@ proc testWakuNode(): WakuNode = return newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) - type RestLightPushTest = object serviceNode: WakuNode pushNode: WakuNode @@ -42,32 +41,34 @@ type RestLightPushTest = object restServer: WakuRestServerRef client: RestClientRef - proc init(T: type RestLightPushTest): Future[T] {.async.} = var testSetup = RestLightPushTest() testSetup.serviceNode = testWakuNode() testSetup.pushNode = testWakuNode() testSetup.consumerNode = testWakuNode() - await allFutures(testSetup.serviceNode.start(), - testSetup.pushNode.start(), - testSetup.consumerNode.start()) + await allFutures( + testSetup.serviceNode.start(), + testSetup.pushNode.start(), + testSetup.consumerNode.start(), + ) await testSetup.consumerNode.mountRelay() await testSetup.serviceNode.mountRelay() await testSetup.serviceNode.mountLightPush() testSetup.pushNode.mountLightPushClient() - testSetup.serviceNode.peerManager.addServicePeer( - testSetup.consumerNode.peerInfo.toRemotePeerInfo(), - WakuRelayCodec) + testSetup.consumerNode.peerInfo.toRemotePeerInfo(), WakuRelayCodec + ) - await testSetup.serviceNode.connectToNodes(@[testSetup.consumerNode.peerInfo.toRemotePeerInfo()]) + await testSetup.serviceNode.connectToNodes( + @[testSetup.consumerNode.peerInfo.toRemotePeerInfo()] + ) testSetup.pushNode.peerManager.addServicePeer( - testSetup.serviceNode.peerInfo.toRemotePeerInfo(), - WakuLightPushCodec) + testSetup.serviceNode.peerInfo.toRemotePeerInfo(), WakuLightPushCodec + ) let restPort = Port(58011) let restAddress = parseIpAddress("127.0.0.1") @@ -81,29 +82,33 @@ proc init(T: type RestLightPushTest): Future[T] {.async.} = return testSetup - proc shutdown(self: RestLightPushTest) {.async.} = await self.restServer.stop() await self.restServer.closeWait() await allFutures(self.serviceNode.stop(), self.pushNode.stop()) - suite "Waku v2 Rest API - lightpush": asyncTest "Push message request": # Given let restLightPushTest = await RestLightPushTest.init() - restLightPushTest.consumerNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)) - restLightPushTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)) + restLightPushTest.consumerNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic) + ) + restLightPushTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic) + ) require: toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 # When - let message : RelayWakuMessage = fakeWakuMessage(contentTopic = DefaultContentTopic, - payload = toBytes("TEST-1")).toRelayWakuMessage() + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("TEST-1") + ) + .toRelayWakuMessage() - let requestBody = PushRequest(pubsubTopic: some(DefaultPubsubTopic), - message: message) + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) let response = await restLightPushTest.client.sendPushRequest(requestBody) echo "response", $response @@ -119,23 +124,27 @@ suite "Waku v2 Rest API - lightpush": # Given let restLightPushTest = await RestLightPushTest.init() - restLightPushTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)) + restLightPushTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic) + ) require: toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 # When - let badMessage1 : RelayWakuMessage = fakeWakuMessage(contentTopic = DefaultContentTopic, - payload = toBytes("")).toRelayWakuMessage() - let badRequestBody1 = PushRequest(pubsubTopic: some(DefaultPubsubTopic), - message: badMessage1) + let badMessage1: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("") + ) + .toRelayWakuMessage() + let badRequestBody1 = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: badMessage1) - let badMessage2 : RelayWakuMessage = fakeWakuMessage(contentTopic = "", - payload = toBytes("Sthg")).toRelayWakuMessage() - let badRequestBody2 = PushRequest(pubsubTopic: some(DefaultPubsubTopic), - message: badMessage2) + let badMessage2: RelayWakuMessage = + fakeWakuMessage(contentTopic = "", payload = toBytes("Sthg")).toRelayWakuMessage() + let badRequestBody2 = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: badMessage2) - let badRequestBody3 = PushRequest(pubsubTopic: none(PubsubTopic), - message: badMessage2) + let badRequestBody3 = + PushRequest(pubsubTopic: none(PubsubTopic), message: badMessage2) var response: RestResponse[string] @@ -149,7 +158,6 @@ suite "Waku v2 Rest API - lightpush": $response.contentType == $MIMETYPE_TEXT response.data.startsWith("Invalid content body") - # when response = await restLightPushTest.client.sendPushRequest(badRequestBody2) diff --git a/tests/wakunode_rest/test_rest_relay.nim b/tests/wakunode_rest/test_rest_relay.nim index 2e516cc0d..ab60be098 100644 --- a/tests/wakunode_rest/test_rest_relay.nim +++ b/tests/wakunode_rest/test_rest_relay.nim @@ -5,7 +5,8 @@ import stew/byteutils, stew/shims/net, testutils/unittests, - presto, presto/client as presto_client, + presto, + presto/client as presto_client, libp2p/crypto/crypto import ../../waku/common/base64, @@ -33,7 +34,6 @@ proc testWakuNode(): WakuNode = newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) - suite "Waku v2 Rest API - Relay": asyncTest "Subscribe a node to an array of pubsub topics - POST /relay/v1/subscriptions": # Given @@ -52,11 +52,12 @@ suite "Waku v2 Rest API - Relay": installRelayApiHandlers(restServer.router, node, cache) restServer.start() - let pubSubTopics = @[ - PubSubTopic("pubsub-topic-1"), - PubSubTopic("pubsub-topic-2"), - PubSubTopic("pubsub-topic-3") - ] + let pubSubTopics = + @[ + PubSubTopic("pubsub-topic-1"), + PubSubTopic("pubsub-topic-2"), + PubSubTopic("pubsub-topic-3"), + ] # When let client = newRestHttpClient(initTAddress(restAddress, restPort)) @@ -84,12 +85,9 @@ suite "Waku v2 Rest API - Relay": # Given let node = testWakuNode() await node.start() - await node.mountRelay(@[ - "pubsub-topic-1", - "pubsub-topic-2", - "pubsub-topic-3", - "pubsub-topic-x", - ]) + await node.mountRelay( + @["pubsub-topic-1", "pubsub-topic-2", "pubsub-topic-3", "pubsub-topic-x"] + ) var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -106,12 +104,13 @@ suite "Waku v2 Rest API - Relay": installRelayApiHandlers(restServer.router, node, cache) restServer.start() - let pubSubTopics = @[ - PubSubTopic("pubsub-topic-1"), - PubSubTopic("pubsub-topic-2"), - PubSubTopic("pubsub-topic-3"), - PubSubTopic("pubsub-topic-y") - ] + let pubSubTopics = + @[ + PubSubTopic("pubsub-topic-1"), + PubSubTopic("pubsub-topic-2"), + PubSubTopic("pubsub-topic-3"), + PubSubTopic("pubsub-topic-y"), + ] # When let client = newRestHttpClient(initTAddress(restAddress, restPort)) @@ -153,19 +152,32 @@ suite "Waku v2 Rest API - Relay": let pubSubTopic = "/waku/2/default-waku/proto" - var messages = @[ - fakeWakuMessage(contentTopic = "content-topic-x", payload = toBytes("TEST-1"), - meta = toBytes("test-meta"), ephemeral = true) - ] + var messages = + @[ + fakeWakuMessage( + contentTopic = "content-topic-x", + payload = toBytes("TEST-1"), + meta = toBytes("test-meta"), + ephemeral = true, + ) + ] # Prevent duplicate messages - for i in 0..<2: - var msg = fakeWakuMessage(contentTopic = "content-topic-x", payload = toBytes("TEST-1"), - meta = toBytes("test-meta"), ephemeral = true) + for i in 0 ..< 2: + var msg = fakeWakuMessage( + contentTopic = "content-topic-x", + payload = toBytes("TEST-1"), + meta = toBytes("test-meta"), + ephemeral = true, + ) while msg == messages[i]: - msg = fakeWakuMessage(contentTopic = "content-topic-x", payload = toBytes("TEST-1"), - meta = toBytes("test-meta"), ephemeral = true) + msg = fakeWakuMessage( + contentTopic = "content-topic-x", + payload = toBytes("TEST-1"), + meta = toBytes("test-meta"), + ephemeral = true, + ) messages.add(msg) @@ -187,13 +199,11 @@ suite "Waku v2 Rest API - Relay": response.status == 200 $response.contentType == $MIMETYPE_JSON response.data.len == 3 - response.data.all do (msg: RelayWakuMessage) -> bool: + response.data.all do(msg: RelayWakuMessage) -> bool: msg.payload == base64.encode("TEST-1") and - msg.contentTopic.get() == "content-topic-x" and - msg.version.get() == 2 and - msg.timestamp.get() != Timestamp(0) and - msg.meta.get() == base64.encode("test-meta") and - msg.ephemeral.get() == true + msg.contentTopic.get() == "content-topic-x" and msg.version.get() == 2 and + msg.timestamp.get() != Timestamp(0) and + msg.meta.get() == base64.encode("test-meta") and msg.ephemeral.get() == true check: cache.isPubsubSubscribed(pubSubTopic) @@ -210,16 +220,20 @@ suite "Waku v2 Rest API - Relay": await node.start() await node.mountRelay() when defined(rln_v2): - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), rlnRelayUserMessageLimit: 20, rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1")) + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + ) else: - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1")) + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + ) await node.mountRlnRelay(wakuRlnConfig) # RPC server setup @@ -241,11 +255,14 @@ suite "Waku v2 Rest API - Relay": toSeq(node.wakuRelay.subscribedTopics).len == 1 # When - let response = await client.relayPostMessagesV1(DefaultPubsubTopic, RelayWakuMessage( - payload: base64.encode("TEST-PAYLOAD"), - contentTopic: some(DefaultContentTopic), - timestamp: some(int64(2022)) - )) + let response = await client.relayPostMessagesV1( + DefaultPubsubTopic, + RelayWakuMessage( + payload: base64.encode("TEST-PAYLOAD"), + contentTopic: some(DefaultContentTopic), + timestamp: some(int64(2022)), + ), + ) # Then check: @@ -277,11 +294,12 @@ suite "Waku v2 Rest API - Relay": installRelayApiHandlers(restServer.router, node, cache) restServer.start() - let contentTopics = @[ - ContentTopic("/app-1/2/default-content/proto"), - ContentTopic("/app-2/2/default-content/proto"), - ContentTopic("/app-3/2/default-content/proto") - ] + let contentTopics = + @[ + ContentTopic("/app-1/2/default-content/proto"), + ContentTopic("/app-2/2/default-content/proto"), + ContentTopic("/app-3/2/default-content/proto"), + ] # When let client = newRestHttpClient(initTAddress(restAddress, restPort)) @@ -300,7 +318,8 @@ suite "Waku v2 Rest API - Relay": check: # Node should be subscribed to all shards - node.wakuRelay.subscribedTopics == @["/waku/2/rs/1/7", "/waku/2/rs/1/2", "/waku/2/rs/1/5"] + node.wakuRelay.subscribedTopics == + @["/waku/2/rs/1/7", "/waku/2/rs/1/2", "/waku/2/rs/1/5"] await restServer.stop() await restServer.closeWait() @@ -318,12 +337,13 @@ suite "Waku v2 Rest API - Relay": restPort = restServer.httpServer.address.port # update with bound port for client use - let contentTopics = @[ - ContentTopic("/waku/2/default-content1/proto"), - ContentTopic("/waku/2/default-content2/proto"), - ContentTopic("/waku/2/default-content3/proto"), - ContentTopic("/waku/2/default-contentX/proto") - ] + let contentTopics = + @[ + ContentTopic("/waku/2/default-content1/proto"), + ContentTopic("/waku/2/default-content2/proto"), + ContentTopic("/waku/2/default-content3/proto"), + ContentTopic("/waku/2/default-contentX/proto"), + ] let cache = MessageCache.init() cache.contentSubscribe(contentTopics[0]) @@ -368,16 +388,20 @@ suite "Waku v2 Rest API - Relay": let contentTopic = DefaultContentTopic - var messages = @[ - fakeWakuMessage(contentTopic = DefaultContentTopic, payload = toBytes("TEST-1")) - ] + var messages = + @[ + fakeWakuMessage(contentTopic = DefaultContentTopic, payload = toBytes("TEST-1")) + ] # Prevent duplicate messages - for i in 0..<2: - var msg = fakeWakuMessage(contentTopic = DefaultContentTopic, payload = toBytes("TEST-1")) + for i in 0 ..< 2: + var msg = + fakeWakuMessage(contentTopic = DefaultContentTopic, payload = toBytes("TEST-1")) while msg == messages[i]: - msg = fakeWakuMessage(contentTopic = DefaultContentTopic, payload = toBytes("TEST-1")) + msg = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("TEST-1") + ) messages.add(msg) @@ -399,15 +423,15 @@ suite "Waku v2 Rest API - Relay": response.status == 200 $response.contentType == $MIMETYPE_JSON response.data.len == 3 - response.data.all do (msg: RelayWakuMessage) -> bool: + response.data.all do(msg: RelayWakuMessage) -> bool: msg.payload == base64.encode("TEST-1") and - msg.contentTopic.get() == DefaultContentTopic and - msg.version.get() == 2 and - msg.timestamp.get() != Timestamp(0) + msg.contentTopic.get() == DefaultContentTopic and msg.version.get() == 2 and + msg.timestamp.get() != Timestamp(0) check: cache.isContentSubscribed(contentTopic) - cache.getAutoMessages(contentTopic).tryGet().len == 0 # The cache is cleared when getMessage is called + cache.getAutoMessages(contentTopic).tryGet().len == 0 + # The cache is cleared when getMessage is called await restServer.stop() await restServer.closeWait() @@ -420,16 +444,20 @@ suite "Waku v2 Rest API - Relay": await node.start() await node.mountRelay() when defined(rln_v2): - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), rlnRelayUserMessageLimit: 20, rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1")) + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + ) else: - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1")) + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + ) await node.mountRlnRelay(wakuRlnConfig) # RPC server setup @@ -450,11 +478,13 @@ suite "Waku v2 Rest API - Relay": toSeq(node.wakuRelay.subscribedTopics).len == 1 # When - let response = await client.relayPostAutoMessagesV1(RelayWakuMessage( - payload: base64.encode("TEST-PAYLOAD"), - contentTopic: some(DefaultContentTopic), - timestamp: some(int64(2022)) - )) + let response = await client.relayPostAutoMessagesV1( + RelayWakuMessage( + payload: base64.encode("TEST-PAYLOAD"), + contentTopic: some(DefaultContentTopic), + timestamp: some(int64(2022)), + ) + ) # Then check: @@ -473,16 +503,20 @@ suite "Waku v2 Rest API - Relay": await node.start() await node.mountRelay() when defined(rln_v2): - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), rlnRelayUserMessageLimit: 20, rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1")) + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + ) else: - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1")) + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + ) await node.mountRlnRelay(wakuRlnConfig) # RPC server setup @@ -499,17 +533,20 @@ suite "Waku v2 Rest API - Relay": let client = newRestHttpClient(initTAddress(restAddress, restPort)) # When - let response = await client.relayPostAutoMessagesV1(RelayWakuMessage( - payload: base64.encode("TEST-PAYLOAD"), - contentTopic: some("invalidContentTopic"), - timestamp: some(int64(2022)) - )) + let response = await client.relayPostAutoMessagesV1( + RelayWakuMessage( + payload: base64.encode("TEST-PAYLOAD"), + contentTopic: some("invalidContentTopic"), + timestamp: some(int64(2022)), + ) + ) # Then check: response.status == 400 $response.contentType == $MIMETYPE_TEXT - response.data == "Failed to publish. Autosharding error: invalid format: topic must start with slash" + response.data == + "Failed to publish. Autosharding error: invalid format: topic must start with slash" await restServer.stop() await restServer.closeWait() @@ -521,16 +558,20 @@ suite "Waku v2 Rest API - Relay": await node.start() await node.mountRelay() when defined(rln_v2): - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), rlnRelayUserMessageLimit: 20, rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1")) + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + ) else: - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1")) + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + ) await node.mountRlnRelay(wakuRlnConfig) # RPC server setup @@ -552,17 +593,22 @@ suite "Waku v2 Rest API - Relay": toSeq(node.wakuRelay.subscribedTopics).len == 1 # When - let response = await client.relayPostMessagesV1(DefaultPubsubTopic, RelayWakuMessage( - payload: base64.encode(getByteSequence(MaxWakuMessageSize)), # Message will be bigger than the max size - contentTopic: some(DefaultContentTopic), - timestamp: some(int64(2022)) - )) + let response = await client.relayPostMessagesV1( + DefaultPubsubTopic, + RelayWakuMessage( + payload: base64.encode(getByteSequence(MaxWakuMessageSize)), + # Message will be bigger than the max size + contentTopic: some(DefaultContentTopic), + timestamp: some(int64(2022)), + ), + ) # Then check: response.status == 400 $response.contentType == $MIMETYPE_TEXT - response.data == fmt"Failed to publish: Message size exceeded maximum of {MaxWakuMessageSize} bytes" + response.data == + fmt"Failed to publish: Message size exceeded maximum of {MaxWakuMessageSize} bytes" await restServer.stop() await restServer.closeWait() @@ -574,16 +620,20 @@ suite "Waku v2 Rest API - Relay": await node.start() await node.mountRelay() when defined(rln_v2): - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), rlnRelayUserMessageLimit: 20, rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1")) + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + ) else: - let wakuRlnConfig = WakuRlnConfig(rlnRelayDynamic: false, + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1")) + rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + ) await node.mountRlnRelay(wakuRlnConfig) # RPC server setup @@ -605,17 +655,21 @@ suite "Waku v2 Rest API - Relay": toSeq(node.wakuRelay.subscribedTopics).len == 1 # When - let response = await client.relayPostAutoMessagesV1(RelayWakuMessage( - payload: base64.encode(getByteSequence(MaxWakuMessageSize)), # Message will be bigger than the max size - contentTopic: some(DefaultContentTopic), - timestamp: some(int64(2022)) - )) + let response = await client.relayPostAutoMessagesV1( + RelayWakuMessage( + payload: base64.encode(getByteSequence(MaxWakuMessageSize)), + # Message will be bigger than the max size + contentTopic: some(DefaultContentTopic), + timestamp: some(int64(2022)), + ) + ) # Then check: response.status == 400 $response.contentType == $MIMETYPE_TEXT - response.data == fmt"Failed to publish: Message size exceeded maximum of {MaxWakuMessageSize} bytes" + response.data == + fmt"Failed to publish: Message size exceeded maximum of {MaxWakuMessageSize} bytes" await restServer.stop() await restServer.closeWait() diff --git a/tests/wakunode_rest/test_rest_relay_serdes.nim b/tests/wakunode_rest/test_rest_relay_serdes.nim index f35e2d350..78255689e 100644 --- a/tests/wakunode_rest/test_rest_relay_serdes.nim +++ b/tests/wakunode_rest/test_rest_relay_serdes.nim @@ -1,28 +1,23 @@ {.used.} -import - stew/[results, byteutils], - chronicles, - unittest2, - json_serialization +import stew/[results, byteutils], chronicles, unittest2, json_serialization import ../../waku/common/base64, ../../waku/waku_api/rest/serdes, ../../waku/waku_api/rest/relay/types, ../../waku/waku_core - - suite "Waku v2 Rest API - Relay - serialization": - suite "RelayWakuMessage - decode": test "optional fields are not provided": # Given let payload = base64.encode("MESSAGE") - let jsonBytes = toBytes("{\"payload\":\"" & $payload & "\",\"contentTopic\":\"some/topic\"}") + let jsonBytes = + toBytes("{\"payload\":\"" & $payload & "\",\"contentTopic\":\"some/topic\"}") # When - let res = decodeFromJsonBytes(RelayWakuMessage, jsonBytes, requireAllFields = true) + let res = + decodeFromJsonBytes(RelayWakuMessage, jsonBytes, requireAllFields = true) # Then require(res.isOk()) @@ -43,7 +38,7 @@ suite "Waku v2 Rest API - Relay - serialization": contentTopic: none(ContentTopic), version: none(Natural), timestamp: none(int64), - ephemeral: none(bool) + ephemeral: none(bool), ) # When diff --git a/tests/wakunode_rest/test_rest_serdes.nim b/tests/wakunode_rest/test_rest_serdes.nim index 725bf9379..94e8ab89b 100644 --- a/tests/wakunode_rest/test_rest_serdes.nim +++ b/tests/wakunode_rest/test_rest_serdes.nim @@ -1,19 +1,11 @@ {.used.} -import - stew/[results, byteutils], - chronicles, - unittest2, - json_serialization -import - ../../waku/waku_api/rest/serdes, - ../../waku/waku_api/rest/debug/types - +import stew/[results, byteutils], chronicles, unittest2, json_serialization +import ../../waku/waku_api/rest/serdes, ../../waku/waku_api/rest/debug/types # TODO: Decouple this test suite from the `debug_api` module by defining # private custom types for this test suite module suite "Waku v2 Rest API - Serdes": - suite "decode": test "decodeFromJsonString - use the corresponding readValue template": # Given @@ -68,4 +60,4 @@ suite "Waku v2 Rest API - Serdes": require(res.isOk) let value = res.get() check: - value == toBytes("""{"listenAddresses":["ABC"]}""" ) + value == toBytes("""{"listenAddresses":["ABC"]}""") diff --git a/tests/wakunode_rest/test_rest_store.nim b/tests/wakunode_rest/test_rest_store.nim index 908eec571..6f10ce07a 100644 --- a/tests/wakunode_rest/test_rest_store.nim +++ b/tests/wakunode_rest/test_rest_store.nim @@ -6,7 +6,8 @@ import chronicles, testutils/unittests, eth/keys, - presto, presto/client as presto_client, + presto, + presto/client as presto_client, libp2p/crypto/crypto import ../../../waku/waku_core/message, @@ -32,12 +33,17 @@ import logScope: topics = "waku node rest store_api test" -proc put(store: ArchiveDriver, pubsubTopic: PubsubTopic, message: WakuMessage): Future[Result[void, string]] = +proc put( + store: ArchiveDriver, pubsubTopic: PubsubTopic, message: WakuMessage +): Future[Result[void, string]] = let digest = waku_archive.computeDigest(message) msgHash = computeMessageHash(pubsubTopic, message) - receivedTime = if message.timestamp > 0: message.timestamp - else: getNanosecondTime(getTime().toUnixFloat()) + receivedTime = + if message.timestamp > 0: + message.timestamp + else: + getNanosecondTime(getTime().toUnixFloat()) store.put(pubsubTopic, message, digest, msgHash, receivedTime) @@ -55,12 +61,10 @@ proc testWakuNode(): WakuNode = # Beginning of the tests ################################################################################ procSuite "Waku v2 Rest API - Store": - asyncTest "MessageDigest <-> string conversions": # Validate MessageDigest conversion from a WakuMessage obj let wakuMsg = WakuMessage( - contentTopic: "Test content topic", - payload: @[byte('H'), byte('i'), byte('!')] + contentTopic: "Test content topic", payload: @[byte('H'), byte('i'), byte('!')] ) let messageDigest = waku_store.computeDigest(wakuMsg) @@ -103,39 +107,38 @@ procSuite "Waku v2 Rest API - Store": peerSwitch.mount(node.wakuStore) # Now prime it with some history before tests - let msgList = @[ - fakeWakuMessage(@[byte 0], contentTopic=ContentTopic("ct1"), ts=0), - fakeWakuMessage(@[byte 1], ts=1), - fakeWakuMessage(@[byte 1, byte 2], ts=2), - fakeWakuMessage(@[byte 1], ts=3), - fakeWakuMessage(@[byte 1], ts=4), - fakeWakuMessage(@[byte 1], ts=5), - fakeWakuMessage(@[byte 1], ts=6), - fakeWakuMessage(@[byte 9], contentTopic=ContentTopic("c2"), ts=9) - ] + let msgList = + @[ + fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0), + fakeWakuMessage(@[byte 1], ts = 1), + fakeWakuMessage(@[byte 1, byte 2], ts = 2), + fakeWakuMessage(@[byte 1], ts = 3), + fakeWakuMessage(@[byte 1], ts = 4), + fakeWakuMessage(@[byte 1], ts = 5), + fakeWakuMessage(@[byte 1], ts = 6), + fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("c2"), ts = 9), + ] for msg in msgList: require (waitFor driver.put(DefaultPubsubTopic, msg)).isOk() let client = newRestHttpClient(initTAddress(restAddress, restPort)) let remotePeerInfo = peerSwitch.peerInfo.toRemotePeerInfo() - let fullAddr = $remotePeerInfo.addrs[0] & - "/p2p/" & $remotePeerInfo.peerId + let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId # Apply filter by start and end timestamps - var response = - await client.getStoreMessagesV1( - encodeUrl(fullAddr), - encodeUrl(DefaultPubsubTopic), - "", # empty content topics. Don't filter by this field - "3", # start time - "6", # end time - "", # sender time - "", # store time - "", # base64-encoded digest - "", # empty implies default page size - "true" # ascending - ) + var response = await client.getStoreMessagesV1( + encodeUrl(fullAddr), + encodeUrl(DefaultPubsubTopic), + "", # empty content topics. Don't filter by this field + "3", # start time + "6", # end time + "", # sender time + "", # store time + "", # base64-encoded digest + "", # empty implies default page size + "true", # ascending + ) check: response.status == 200 @@ -174,26 +177,26 @@ procSuite "Waku v2 Rest API - Store": # Now prime it with some history before tests let timeOrigin = wakucore.now() - let msgList = @[ - fakeWakuMessage(@[byte 00], ts=ts(00, timeOrigin)), - fakeWakuMessage(@[byte 01], ts=ts(10, timeOrigin)), - fakeWakuMessage(@[byte 02], ts=ts(20, timeOrigin)), - fakeWakuMessage(@[byte 03], ts=ts(30, timeOrigin)), - fakeWakuMessage(@[byte 04], ts=ts(40, timeOrigin)), - fakeWakuMessage(@[byte 05], ts=ts(50, timeOrigin)), - fakeWakuMessage(@[byte 06], ts=ts(60, timeOrigin)), - fakeWakuMessage(@[byte 07], ts=ts(70, timeOrigin)), - fakeWakuMessage(@[byte 08], ts=ts(80, timeOrigin)), - fakeWakuMessage(@[byte 09], ts=ts(90, timeOrigin)) - ] + let msgList = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), + ] for msg in msgList: require (waitFor driver.put(DefaultPubsubTopic, msg)).isOk() let client = newRestHttpClient(initTAddress(restAddress, restPort)) let remotePeerInfo = peerSwitch.peerInfo.toRemotePeerInfo() - let fullAddr = $remotePeerInfo.addrs[0] & - "/p2p/" & $remotePeerInfo.peerId + let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId var pages = newSeq[seq[WakuMessage]](2) @@ -203,23 +206,23 @@ procSuite "Waku v2 Rest API - Store": var reqStoreTime = Timestamp(0) var reqDigest = waku_store.MessageDigest() - for i in 0..<2: - let response = - await client.getStoreMessagesV1( - encodeUrl(fullAddr), - encodeUrl(reqPubsubTopic), - "", # content topics. Empty ignores the field. - "", # start time. Empty ignores the field. - "", # end time. Empty ignores the field. - encodeUrl($reqSenderTime), # sender time - encodeUrl($reqStoreTime), # store time - reqDigest.toRestStringMessageDigest(), # base64-encoded digest. Empty ignores the field. - "7", # page size. Empty implies default page size. - "true" # ascending - ) + for i in 0 ..< 2: + let response = await client.getStoreMessagesV1( + encodeUrl(fullAddr), + encodeUrl(reqPubsubTopic), + "", # content topics. Empty ignores the field. + "", # start time. Empty ignores the field. + "", # end time. Empty ignores the field. + encodeUrl($reqSenderTime), # sender time + encodeUrl($reqStoreTime), # store time + reqDigest.toRestStringMessageDigest(), + # base64-encoded digest. Empty ignores the field. + "7", # page size. Empty implies default page size. + "true", # ascending + ) var wakuMessages = newSeq[WakuMessage](0) - for j in 0.. 0: @@ -23,15 +18,13 @@ proc check*(db: DbConn): Result[void, string] = return ok() -proc open*(connString: string): - Result[DbConn, string] = +proc open*(connString: string): Result[DbConn, string] = ## Opens a new connection. var conn: DbConn = nil try: - conn = open("","", "", connString) + conn = open("", "", "", connString) except DbError: - return err("exception opening new connection: " & - getCurrentExceptionMsg()) + return err("exception opening new connection: " & getCurrentExceptionMsg()) if conn.status != CONNECTION_OK: let checkRes = conn.check() @@ -42,10 +35,9 @@ proc open*(connString: string): ok(conn) -proc sendQuery(db: DbConn, - query: SqlQuery, - args: seq[string]): - Future[Result[void, string]] {.async.} = +proc sendQuery( + db: DbConn, query: SqlQuery, args: seq[string] +): Future[Result[void, string]] {.async.} = ## This proc can be used directly for queries that don't retrieve values back. if db.status != CONNECTION_OK: @@ -58,8 +50,7 @@ proc sendQuery(db: DbConn, try: wellFormedQuery = dbFormat(query, args) except DbError: - return err("exception formatting the query: " & - getCurrentExceptionMsg()) + return err("exception formatting the query: " & getCurrentExceptionMsg()) let success = db.pqsendQuery(cstring(wellFormedQuery)) if success != 1: @@ -71,17 +62,18 @@ proc sendQuery(db: DbConn, return ok() proc sendQueryPrepared( - db: DbConn, - stmtName: string, - paramValues: openArray[string], - paramLengths: openArray[int32], - paramFormats: openArray[int32]): - Result[void, string] {.raises: [].} = + db: DbConn, + stmtName: string, + paramValues: openArray[string], + paramLengths: openArray[int32], + paramFormats: openArray[int32], +): Result[void, string] {.raises: [].} = ## This proc can be used directly for queries that don't retrieve values back. if paramValues.len != paramLengths.len or paramValues.len != paramFormats.len or - paramLengths.len != paramFormats.len: - let lengthsErrMsg = $paramValues.len & " " & $paramLengths.len & " " & $paramFormats.len + paramLengths.len != paramFormats.len: + let lengthsErrMsg = + $paramValues.len & " " & $paramLengths.len & " " & $paramFormats.len return err("lengths discrepancies in sendQueryPrepared: " & $lengthsErrMsg) if db.status != CONNECTION_OK: @@ -91,18 +83,21 @@ proc sendQueryPrepared( return err("unknown reason") var cstrArrayParams = allocCStringArray(paramValues) - defer: deallocCStringArray(cstrArrayParams) + defer: + deallocCStringArray(cstrArrayParams) let nParams = cast[int32](paramValues.len) const ResultFormat = 0 ## 0 for text format, 1 for binary format. - let success = db.pqsendQueryPrepared(stmtName, - nParams, - cstrArrayParams, - unsafeAddr paramLengths[0], - unsafeAddr paramFormats[0], - ResultFormat) + let success = db.pqsendQueryPrepared( + stmtName, + nParams, + cstrArrayParams, + unsafeAddr paramLengths[0], + unsafeAddr paramFormats[0], + ResultFormat, + ) if success != 1: db.check().isOkOr: return err("failed pqsendQueryPrepared: " & $error) @@ -111,9 +106,9 @@ proc sendQueryPrepared( return ok() -proc waitQueryToFinish(db: DbConn, - rowCallback: DataProc = nil): - Future[Result[void, string]] {.async.} = +proc waitQueryToFinish( + db: DbConn, rowCallback: DataProc = nil +): Future[Result[void, string]] {.async.} = ## The 'rowCallback' param is != nil when the underlying query wants to retrieve results (SELECT.) ## For other queries, like "INSERT", 'rowCallback' should be nil. @@ -150,12 +145,9 @@ proc waitQueryToFinish(db: DbConn, pqclear(pqResult) -proc dbConnQuery*(db: DbConn, - query: SqlQuery, - args: seq[string], - rowCallback: DataProc): - Future[Result[void, string]] {.async, gcsafe.} = - +proc dbConnQuery*( + db: DbConn, query: SqlQuery, args: seq[string], rowCallback: DataProc +): Future[Result[void, string]] {.async, gcsafe.} = (await db.sendQuery(query, args)).isOkOr: return err("error in dbConnQuery calling sendQuery: " & $error) @@ -164,15 +156,15 @@ proc dbConnQuery*(db: DbConn, return ok() -proc dbConnQueryPrepared*(db: DbConn, - stmtName: string, - paramValues: seq[string], - paramLengths: seq[int32], - paramFormats: seq[int32], - rowCallback: DataProc): - Future[Result[void, string]] {.async, gcsafe.} = - - db.sendQueryPrepared(stmtName, paramValues , paramLengths, paramFormats).isOkOr: +proc dbConnQueryPrepared*( + db: DbConn, + stmtName: string, + paramValues: seq[string], + paramLengths: seq[int32], + paramFormats: seq[int32], + rowCallback: DataProc, +): Future[Result[void, string]] {.async, gcsafe.} = + db.sendQueryPrepared(stmtName, paramValues, paramLengths, paramFormats).isOkOr: return err("error in dbConnQueryPrepared calling sendQuery: " & $error) (await db.waitQueryToFinish(rowCallback)).isOkOr: diff --git a/waku/common/databases/db_postgres/pgasyncpool.nim b/waku/common/databases/db_postgres/pgasyncpool.nim index b38589f6e..bf20625dc 100644 --- a/waku/common/databases/db_postgres/pgasyncpool.nim +++ b/waku/common/databases/db_postgres/pgasyncpool.nim @@ -5,25 +5,19 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/[sequtils,nre,strformat,sets], - stew/results, - chronos -import - ./dbconn, - ../common +import std/[sequtils, nre, strformat, sets], stew/results, chronos +import ./dbconn, ../common type PgAsyncPoolState {.pure.} = enum - Closed, - Live, - Closing + Closed + Live + Closing -type - PgDbConn = object - dbConn: DbConn - open: bool - busy: bool - preparedStmts: HashSet[string] ## [stmtName's] +type PgDbConn = object + dbConn: DbConn + open: bool + busy: bool + preparedStmts: HashSet[string] ## [stmtName's] type # Database connection pool @@ -34,32 +28,28 @@ type state: PgAsyncPoolState conns: seq[PgDbConn] -proc new*(T: type PgAsyncPool, - dbUrl: string, - maxConnections: int): - DatabaseResult[T] = - +proc new*(T: type PgAsyncPool, dbUrl: string, maxConnections: int): DatabaseResult[T] = var connString: string try: let regex = re("""^postgres:\/\/([^:]+):([^@]+)@([^:]+):(\d+)\/(.+)$""") - let matches = find(dbUrl,regex).get.captures + let matches = find(dbUrl, regex).get.captures let user = matches[0] - let password = matches[1] + let password = matches[1] let host = matches[2] let port = matches[3] let dbName = matches[4] connString = fmt"user={user} host={host} port={port} dbname={dbName} password={password}" - except KeyError,InvalidUnicodeError, RegexInternalError, ValueError, - StudyError, SyntaxError: + except KeyError, InvalidUnicodeError, RegexInternalError, ValueError, StudyError, + SyntaxError: return err("could not parse postgres string: " & getCurrentExceptionMsg()) let pool = PgAsyncPool( connString: connString, maxConnections: maxConnections, state: PgAsyncPoolState.Live, - conns: newSeq[PgDbConn](0) + conns: newSeq[PgDbConn](0), ) return ok(pool) @@ -70,8 +60,7 @@ func isLive(pool: PgAsyncPool): bool = func isBusy(pool: PgAsyncPool): bool = pool.conns.mapIt(it.busy).allIt(it) -proc close*(pool: PgAsyncPool): - Future[Result[void, string]] {.async.} = +proc close*(pool: PgAsyncPool): Future[Result[void, string]] {.async.} = ## Gracefully wait and close all openned connections if pool.state == PgAsyncPoolState.Closing: @@ -86,7 +75,7 @@ proc close*(pool: PgAsyncPool): while pool.conns.anyIt(it.busy): await sleepAsync(0.milliseconds) - for i in 0.. 0: @@ -97,7 +96,6 @@ proc readValue*[T](r: var EnvvarReader, value: var T) {.raises: [SerializationEr if reader != nil: reader(value, r) discard r.key.pop() - else: const typeName = typetraits.name(T) {.fatal: "Failed to convert from Envvar an unsupported type: " & typeName.} diff --git a/waku/common/envvar_serialization/utils.nim b/waku/common/envvar_serialization/utils.nim index 9129a826d..724da903c 100644 --- a/waku/common/envvar_serialization/utils.nim +++ b/waku/common/envvar_serialization/utils.nim @@ -3,22 +3,21 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/[os, strutils], - stew/byteutils, - stew/ptrops +import std/[os, strutils], stew/byteutils, stew/ptrops - -type - SomePrimitives* = SomeInteger | enum | bool | SomeFloat | char +type SomePrimitives* = SomeInteger | enum | bool | SomeFloat | char proc setValue*[T: SomePrimitives](key: string, val: openArray[T]) = - os.putEnv(key, byteutils.toHex(makeOpenArray(val[0].unsafeAddr, byte, val.len*sizeof(T)))) + os.putEnv( + key, byteutils.toHex(makeOpenArray(val[0].unsafeAddr, byte, val.len * sizeof(T))) + ) proc setValue*(key: string, val: SomePrimitives) = os.putEnv(key, byteutils.toHex(makeOpenArray(val.unsafeAddr, byte, sizeof(val)))) -proc decodePaddedHex(hex: string, res: ptr UncheckedArray[byte], outputLen: int) {.raises: [ValueError].} = +proc decodePaddedHex( + hex: string, res: ptr UncheckedArray[byte], outputLen: int +) {.raises: [ValueError].} = # make it an even length let inputLen = hex.len and not 0x01 @@ -30,11 +29,12 @@ proc decodePaddedHex(hex: string, res: ptr UncheckedArray[byte], outputLen: int) offO = outputLen - maxLen for i in 0 ..< maxLen: - res[i + offO] = hex[2*i + offI].readHexChar shl 4 or hex[2*i + 1 + offI].readHexChar + res[i + offO] = + hex[2 * i + offI].readHexChar shl 4 or hex[2 * i + 1 + offI].readHexChar # write single nibble from odd length hex if (offO > 0) and (offI > 0): - res[offO-1] = hex[offI-1].readHexChar + res[offO - 1] = hex[offI - 1].readHexChar proc getValue*(key: string, outVal: var string) {.raises: [ValueError].} = let hex = os.getEnv(key) @@ -58,38 +58,22 @@ proc getValue*(key: string, outVal: var SomePrimitives) {.raises: [ValueError].} decodePaddedHex(hex, cast[ptr UncheckedArray[byte]](outVal.addr), sizeof(outVal)) template uTypeIsPrimitives*[T](_: type seq[T]): bool = - when T is SomePrimitives: - true - else: - false + when T is SomePrimitives: true else: false template uTypeIsPrimitives*[N, T](_: type array[N, T]): bool = - when T is SomePrimitives: - true - else: - false + when T is SomePrimitives: true else: false template uTypeIsPrimitives*[T](_: type openArray[T]): bool = - when T is SomePrimitives: - true - else: - false + when T is SomePrimitives: true else: false template uTypeIsRecord*(_: typed): bool = false template uTypeIsRecord*[T](_: type seq[T]): bool = - when T is (object or tuple): - true - else: - false + when T is (object or tuple): true else: false template uTypeIsRecord*[N, T](_: type array[N, T]): bool = - when T is (object or tuple): - true - else: - false - + when T is (object or tuple): true else: false func constructKey*(prefix: string, keys: openArray[string]): string = var newKey: string @@ -97,7 +81,6 @@ func constructKey*(prefix: string, keys: openArray[string]): string = let envvarPrefix = prefix.strip().toUpper().multiReplace(("-", "_"), (" ", "_")) newKey.add(envvarPrefix) - for k in keys: newKey.add("_") diff --git a/waku/common/envvar_serialization/writer.nim b/waku/common/envvar_serialization/writer.nim index 2b160e1fc..c497b5496 100644 --- a/waku/common/envvar_serialization/writer.nim +++ b/waku/common/envvar_serialization/writer.nim @@ -1,11 +1,8 @@ -import - typetraits, options, tables, os, - serialization, ./utils +import typetraits, options, tables, os, serialization, ./utils -type - EnvvarWriter* = object - prefix: string - key: seq[string] +type EnvvarWriter* = object + prefix: string + key: seq[string] proc init*(T: type EnvvarWriter, prefix: string): T = result.prefix = prefix @@ -17,30 +14,24 @@ proc writeValue*(w: var EnvvarWriter, value: auto) = when value is string: let key = constructKey(w.prefix, w.key) os.putEnv(key, value) - elif value is (SomePrimitives or range): let key = constructKey(w.prefix, w.key) setValue(key, value) - elif value is Option: if value.isSome: w.writeValue value.get - elif value is (seq or array or openArray): when uTypeIsPrimitives(type value): let key = constructKey(w.prefix, w.key) setValue(key, value) - elif uTypeIsRecord(type value): let key = w.key[^1] - for i in 0..= 2 and value[0] == '0' and value[1] in {'x', 'X'}: true - else: false + if value.len >= 2 and value[0] == '0' and value[1] in {'x', 'X'}: true else: false template isHexChar(c: char): bool = - if c notin {'0'..'9'} and - c notin {'a'..'f'} and - c notin {'A'..'F'}: false - else: true + if c notin {'0' .. '9'} and c notin {'a' .. 'f'} and c notin {'A' .. 'F'}: + false + else: + true func isValidHexQuantity*(value: string): bool = if not hasHexHeader(value): @@ -30,7 +27,7 @@ func isValidHexQuantity*(value: string): bool = if value.len < 3 or (value.len > 3 and value[2] == '0'): return false - for i in 2.. itv: - info "Missed multiple heartbeats", heartbeat = name, delay = delay, hinterval = itv + info "Missed multiple heartbeats", + heartbeat = name, delay = delay, hinterval = itv else: debug "Missed heartbeat", heartbeat = name, delay = delay, hinterval = itv diff --git a/waku/common/paging.nim b/waku/common/paging.nim index 287ee63a8..c963a24bc 100644 --- a/waku/common/paging.nim +++ b/waku/common/paging.nim @@ -1,35 +1,28 @@ import std/options -type - PagingDirection* {.pure.} = enum - ## PagingDirection determines the direction of pagination - BACKWARD = uint32(0) - FORWARD = uint32(1) - +type PagingDirection* {.pure.} = enum + ## PagingDirection determines the direction of pagination + BACKWARD = uint32(0) + FORWARD = uint32(1) proc default*(): PagingDirection {.inline.} = PagingDirection.FORWARD - proc into*(b: bool): PagingDirection = PagingDirection(b) - proc into*(b: Option[bool]): PagingDirection = if b.isNone(): return default() b.get().into() - proc into*(d: PagingDirection): bool = d == PagingDirection.FORWARD - proc into*(d: Option[PagingDirection]): bool = if d.isNone(): return false d.get().into() - proc into*(s: string): PagingDirection = (s == "true").into() diff --git a/waku/common/protobuf.nim b/waku/common/protobuf.nim index 9e012e0a9..4086124de 100644 --- a/waku/common/protobuf.nim +++ b/waku/common/protobuf.nim @@ -5,15 +5,9 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/options, - libp2p/protobuf/minprotobuf, - libp2p/varint - -export - minprotobuf, - varint +import std/options, libp2p/protobuf/minprotobuf, libp2p/varint +export minprotobuf, varint ## Custom errors @@ -32,22 +26,19 @@ type ProtobufResult*[T] = Result[T, ProtobufError] - converter toProtobufError*(err: minprotobuf.ProtoError): ProtobufError = - case err: + case err of minprotobuf.ProtoError.RequiredFieldMissing: ProtobufError(kind: ProtobufErrorKind.MissingRequiredField, field: "unknown") else: ProtobufError(kind: ProtobufErrorKind.DecodeFailure, error: err) - proc missingRequiredField*(T: type ProtobufError, field: string): T = ProtobufError(kind: ProtobufErrorKind.MissingRequiredField, field: field) proc invalidLengthField*(T: type ProtobufError, field: string): T = ProtobufError(kind: ProtobufErrorKind.InvalidLengthField, field: field) - ## Extension methods proc write3*(proto: var ProtoBuffer, field: int, value: auto) = @@ -69,9 +60,9 @@ proc `==`*(a: zint64, b: zint64): bool = int64(a) == int64(b) proc `$`*(err: ProtobufError): string = - case err.kind: + case err.kind of DecodeFailure: - case err.error: + case err.error of VarintDecode: return "VarintDecode" of MessageIncomplete: @@ -90,4 +81,3 @@ proc `$`*(err: ProtobufError): string = return "MissingRequiredField " & err.field of InvalidLengthField: return "InvalidLengthField " & err.field - diff --git a/waku/common/utils/matterbridge_client.nim b/waku/common/utils/matterbridge_client.nim index 9771dc01d..634a58368 100644 --- a/waku/common/utils/matterbridge_client.nim +++ b/waku/common/utils/matterbridge_client.nim @@ -3,9 +3,7 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/[httpclient, json, uri, options], - stew/results +import std/[httpclient, json, uri, options], stew/results const # Resource locators @@ -22,16 +20,14 @@ type host*: Uri gateway*: string -proc new*(T: type MatterbridgeClient, - hostUri: string, - gateway = "gateway1"): MatterbridgeClient - {.raises: [Defect, KeyError].} = - +proc new*( + T: type MatterbridgeClient, hostUri: string, gateway = "gateway1" +): MatterbridgeClient {.raises: [Defect, KeyError].} = let mbClient = MatterbridgeClient() mbClient.hostClient = newHttpClient() - mbClient.hostClient.headers = newHttpHeaders({ "Content-Type": "application/json" }) - + mbClient.hostClient.headers = newHttpHeaders({"Content-Type": "application/json"}) + mbClient.host = parseUri(hostUri) mbClient.gateway = gateway @@ -54,18 +50,17 @@ proc getMessages*(mb: MatterbridgeClient): MatterbridgeResult[seq[JsonNode]] = proc postMessage*(mb: MatterbridgeClient, msg: JsonNode): MatterbridgeResult[bool] = var response: Response try: - response = mb.hostClient.request($(mb.host / message), - httpMethod = HttpPost, - body = $msg) + response = + mb.hostClient.request($(mb.host / message), httpMethod = HttpPost, body = $msg) except Exception as e: return err("post request failed: " & e.msg) ok(response.status == "200 OK") -proc postMessage*(mb: MatterbridgeClient, text: string, username: string): MatterbridgeResult[bool] = - let jsonNode = %* {"text": text, - "username": username, - "gateway": mb.gateway} +proc postMessage*( + mb: MatterbridgeClient, text: string, username: string +): MatterbridgeResult[bool] = + let jsonNode = %*{"text": text, "username": username, "gateway": mb.gateway} return mb.postMessage(jsonNode) diff --git a/waku/common/utils/nat.nim b/waku/common/utils/nat.nim index 75883057f..9c69fce9e 100644 --- a/waku/common/utils/nat.nim +++ b/waku/common/utils/nat.nim @@ -1,36 +1,29 @@ - when (NimMajor, NimMinor) < (1, 4): {.push raises: [Defect].} else: {.push raises: [].} -import - std/[options, strutils] -import - chronicles, - eth/net/nat, - stew/results, - stew/shims/net, - nativesockets +import std/[options, strutils] +import chronicles, eth/net/nat, stew/results, stew/shims/net, nativesockets logScope: topics = "nat" -proc setupNat*(natConf, clientId: string, - tcpPort, udpPort: Port): - Result[tuple[ip: Option[IpAddress], - tcpPort: Option[Port], - udpPort: Option[Port]], string] - {.gcsafe.} = +proc setupNat*( + natConf, clientId: string, tcpPort, udpPort: Port +): Result[ + tuple[ip: Option[IpAddress], tcpPort: Option[Port], udpPort: Option[Port]], string +] {.gcsafe.} = + let strategy = + case natConf.toLowerAscii() + of "any": NatAny + of "none": NatNone + of "upnp": NatUpnp + of "pmp": NatPmp + else: NatNone - let strategy = case natConf.toLowerAscii(): - of "any": NatAny - of "none": NatNone - of "upnp": NatUpnp - of "pmp": NatPmp - else: NatNone - - var endpoint: tuple[ip: Option[IpAddress], tcpPort: Option[Port], udpPort: Option[Port]] + var endpoint: + tuple[ip: Option[IpAddress], tcpPort: Option[Port], udpPort: Option[Port]] if strategy != NatNone: let extIp = getExternalIP(strategy) @@ -40,9 +33,10 @@ proc setupNat*(natConf, clientId: string, # because it obtains the address of a non-gcsafe proc? var extPorts: Option[(Port, Port)] try: - extPorts = ({.gcsafe.}: redirectPorts(tcpPort = tcpPort, - udpPort = udpPort, - description = clientId)) + extPorts = ( + {.gcsafe.}: + redirectPorts(tcpPort = tcpPort, udpPort = udpPort, description = clientId) + ) except CatchableError: # TODO: nat.nim Error: can raise an unlisted exception: Exception. Isolate here for now. error "unable to determine external ports" @@ -52,16 +46,14 @@ proc setupNat*(natConf, clientId: string, let (extTcpPort, extUdpPort) = extPorts.get() endpoint.tcpPort = some(extTcpPort) endpoint.udpPort = some(extUdpPort) - else: # NatNone if not natConf.startsWith("extip:"): return err("not a valid NAT mechanism: " & $natConf) try: # any required port redirection is assumed to be done by hand - endpoint.ip = some(parseIpAddress(natConf[6..^1])) + endpoint.ip = some(parseIpAddress(natConf[6 ..^ 1])) except ValueError: - return err("not a valid IP address: " & $natConf[6..^1]) + return err("not a valid IP address: " & $natConf[6 ..^ 1]) return ok(endpoint) - diff --git a/waku/common/utils/parse_size_units.nim b/waku/common/utils/parse_size_units.nim index af6308dce..20b3e35fe 100644 --- a/waku/common/utils/parse_size_units.nim +++ b/waku/common/utils/parse_size_units.nim @@ -1,8 +1,4 @@ - -import - std/strutils, - stew/results, - regex +import std/strutils, stew/results, regex proc parseMsgSize*(input: string): Result[uint64, string] = ## Parses size strings such as "1.2 KiB" or "3Kb" and returns the equivalent number of bytes @@ -20,13 +16,15 @@ proc parseMsgSize*(input: string): Result[uint64, string] = try: value = parseFloat(input[m.captures[0]].replace(",", ".")) except ValueError: - return err("invalid size in parseSize: " & getCurrentExceptionMsg() & - " error parsing: " & input[m.captures[0]] & " KKK : " & $m) + return err( + "invalid size in parseSize: " & getCurrentExceptionMsg() & " error parsing: " & + input[m.captures[0]] & " KKK : " & $m + ) let units = input[m.captures[2]].toLowerAscii() # units is "kib", or "kb", or "b". var multiplier: float - case units: + case units of "kb": multiplier = 1000 of "kib": diff --git a/waku/factory/builder.nim b/waku/factory/builder.nim index 9bc985281..d07f7277e 100644 --- a/waku/factory/builder.nim +++ b/waku/factory/builder.nim @@ -12,15 +12,10 @@ import libp2p/builders, libp2p/nameresolving/nameresolver, libp2p/transports/wstransport -import - ../waku_enr, - ../waku_discv5, - ../waku_node, - ../node/peer_manager +import ../waku_enr, ../waku_discv5, ../waku_node, ../node/peer_manager type - WakuNodeBuilder* = object - # General + WakuNodeBuilder* = object # General nodeRng: Option[ref crypto.HmacDrbgContext] nodeKey: Option[crypto.PrivateKey] netConfig: Option[NetConfig] @@ -45,13 +40,11 @@ type WakuNodeBuilderResult* = Result[void, string] - ## Init proc init*(T: type WakuNodeBuilder): WakuNodeBuilder = WakuNodeBuilder() - ## General proc withRng*(builder: var WakuNodeBuilder, rng: ref crypto.HmacDrbgContext) = @@ -66,61 +59,67 @@ proc withRecord*(builder: var WakuNodeBuilder, record: enr.Record) = proc withNetworkConfiguration*(builder: var WakuNodeBuilder, config: NetConfig) = builder.netConfig = some(config) -proc withNetworkConfigurationDetails*(builder: var WakuNodeBuilder, - bindIp: IpAddress, - bindPort: Port, - extIp = none(IpAddress), - extPort = none(Port), - extMultiAddrs = newSeq[MultiAddress](), - wsBindPort: Port = Port(8000), - wsEnabled: bool = false, - wssEnabled: bool = false, - wakuFlags = none(CapabilitiesBitfield), - dns4DomainName = none(string)): WakuNodeBuilderResult {. - deprecated: "use 'builder.withNetworkConfiguration()' instead".} = - let netConfig = ? NetConfig.init( - bindIp = bindIp, - bindPort = bindPort, - extIp = extIp, - extPort = extPort, - extMultiAddrs = extMultiAddrs, - wsBindPort = wsBindPort, - wsEnabled = wsEnabled, - wssEnabled = wssEnabled, - wakuFlags = wakuFlags, - dns4DomainName = dns4DomainName, - ) +proc withNetworkConfigurationDetails*( + builder: var WakuNodeBuilder, + bindIp: IpAddress, + bindPort: Port, + extIp = none(IpAddress), + extPort = none(Port), + extMultiAddrs = newSeq[MultiAddress](), + wsBindPort: Port = Port(8000), + wsEnabled: bool = false, + wssEnabled: bool = false, + wakuFlags = none(CapabilitiesBitfield), + dns4DomainName = none(string), +): WakuNodeBuilderResult {. + deprecated: "use 'builder.withNetworkConfiguration()' instead" +.} = + let netConfig = + ?NetConfig.init( + bindIp = bindIp, + bindPort = bindPort, + extIp = extIp, + extPort = extPort, + extMultiAddrs = extMultiAddrs, + wsBindPort = wsBindPort, + wsEnabled = wsEnabled, + wssEnabled = wssEnabled, + wakuFlags = wakuFlags, + dns4DomainName = dns4DomainName, + ) builder.withNetworkConfiguration(netConfig) ok() - ## Peer storage and peer manager -proc withPeerStorage*(builder: var WakuNodeBuilder, peerStorage: PeerStorage, capacity = none(int)) = +proc withPeerStorage*( + builder: var WakuNodeBuilder, peerStorage: PeerStorage, capacity = none(int) +) = if not peerStorage.isNil(): builder.peerStorage = some(peerStorage) builder.peerStorageCapacity = capacity -proc withPeerManagerConfig*(builder: var WakuNodeBuilder, - maxRelayPeers = none(int), - shardAware = false) = +proc withPeerManagerConfig*( + builder: var WakuNodeBuilder, maxRelayPeers = none(int), shardAware = false +) = builder.maxRelayPeers = maxRelayPeers builder.shardAware = shardAware -proc withColocationLimit*(builder: var WakuNodeBuilder, - colocationLimit: int) = +proc withColocationLimit*(builder: var WakuNodeBuilder, colocationLimit: int) = builder.colocationLimit = colocationLimit ## Waku switch -proc withSwitchConfiguration*(builder: var WakuNodeBuilder, - maxConnections = none(int), - nameResolver: NameResolver = nil, - sendSignedPeerRecord = false, - secureKey = none(string), - secureCert = none(string), - agentString = none(string)) = +proc withSwitchConfiguration*( + builder: var WakuNodeBuilder, + maxConnections = none(int), + nameResolver: NameResolver = nil, + sendSignedPeerRecord = false, + secureKey = none(string), + secureCert = none(string), + agentString = none(string), +) = builder.switchMaxConnections = maxConnections builder.switchSendSignedPeerRecord = some(sendSignedPeerRecord) builder.switchSslSecureKey = secureKey diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim index d4040c4f1..a31f31afa 100644 --- a/waku/factory/external_config.nim +++ b/waku/factory/external_config.nim @@ -20,14 +20,9 @@ import ../waku_enr, ../node/peer_manager -include - ../waku_core/message/default_values +include ../waku_core/message/default_values -export - confTomlDefs, - confTomlNet, - confEnvvarDefs, - confEnvvarNet +export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet type ConfResult*[T] = Result[T, string] type ProtectedTopic* = object @@ -39,491 +34,567 @@ type ShardIdx = distinct uint16 type EthRpcUrl = distinct string type StartUpCommand* = enum - noCommand # default, runs waku - generateRlnKeystore # generates a new RLN keystore - inspectRlnDb # Inspects a given RLN tree db, providing essential db stats + noCommand # default, runs waku + generateRlnKeystore # generates a new RLN keystore + inspectRlnDb # Inspects a given RLN tree db, providing essential db stats -type - WakuNodeConf* = object - configFile* {. - desc: "Loads configuration from a TOML file (cmd-line parameters take precedence)" - name: "config-file" }: Option[InputFile] +type WakuNodeConf* = object + configFile* {. + desc: "Loads configuration from a TOML file (cmd-line parameters take precedence)", + name: "config-file" + .}: Option[InputFile] - ## Log configuration - logLevel* {. - desc: "Sets the log level for process. Supported levels: TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL", - defaultValue: logging.LogLevel.INFO, - name: "log-level" .}: logging.LogLevel + ## Log configuration + logLevel* {. + desc: + "Sets the log level for process. Supported levels: TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL", + defaultValue: logging.LogLevel.INFO, + name: "log-level" + .}: logging.LogLevel - logFormat* {. - desc: "Specifies what kind of logs should be written to stdout. Suported formats: TEXT, JSON", - defaultValue: logging.LogFormat.TEXT, - name: "log-format" .}: logging.LogFormat + logFormat* {. + desc: + "Specifies what kind of logs should be written to stdout. Suported formats: TEXT, JSON", + defaultValue: logging.LogFormat.TEXT, + name: "log-format" + .}: logging.LogFormat - rlnRelayCredPath* {. - desc: "The path for peristing rln-relay credential", + rlnRelayCredPath* {. + desc: "The path for peristing rln-relay credential", + defaultValue: "", + name: "rln-relay-cred-path" + .}: string + + rlnRelayEthClientAddress* {. + desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/", + defaultValue: "http://localhost:8540/", + name: "rln-relay-eth-client-address" + .}: EthRpcUrl + + rlnRelayEthContractAddress* {. + desc: "Address of membership contract on an Ethereum testnet", + defaultValue: "", + name: "rln-relay-eth-contract-address" + .}: string + + rlnRelayCredPassword* {. + desc: "Password for encrypting RLN credentials", + defaultValue: "", + name: "rln-relay-cred-password" + .}: string + + rlnRelayEthPrivateKey* {. + desc: "Private key for broadcasting transactions", + defaultValue: "", + name: "rln-relay-eth-private-key" + .}: string + + rlnRelayUserMessageLimit* {. + desc: + "Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.", + defaultValue: 1, + name: "rln-relay-user-message-limit" + .}: uint64 + + rlnEpochSizeSec* {. + desc: + "Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.", + defaultValue: 1, + name: "rln-relay-epoch-sec" + .}: uint64 + + maxMessageSize* {. + desc: + "Maximum message size. Accepted units: KiB, KB, and B. e.g. 1024KiB; 1500 B; etc.", + defaultValue: DefaultMaxWakuMessageSizeStr, + name: "max-msg-size" + .}: string + + case cmd* {.command, defaultValue: noCommand.}: StartUpCommand + of inspectRlnDb: + # have to change the name here since it counts as a duplicate, within noCommand + treePath* {. + desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)", defaultValue: "", - name: "rln-relay-cred-path" }: string + name: "rln-relay-tree-path" + .}: string + of generateRlnKeystore: + execute* {. + desc: "Runs the registration function on-chain. By default, a dry-run will occur", + defaultValue: false, + name: "execute" + .}: bool + of noCommand: + ## Application-level configuration + protectedTopics* {. + desc: + "Topics and its public key to be used for message validation, topic:pubkey. Argument may be repeated.", + defaultValue: newSeq[ProtectedTopic](0), + name: "protected-topic" + .}: seq[ProtectedTopic] - rlnRelayEthClientAddress* {. - desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/", - defaultValue: "http://localhost:8540/", - name: "rln-relay-eth-client-address" }: EthRpcUrl + ## General node config + clusterId* {. + desc: + "Cluster id that the node is running in. Node in a different cluster id is disconnected.", + defaultValue: 0, + name: "cluster-id" + .}: uint32 - rlnRelayEthContractAddress* {. - desc: "Address of membership contract on an Ethereum testnet", + agentString* {. + defaultValue: "nwaku", + desc: "Node agent string which is used as identifier in network", + name: "agent-string" + .}: string + + nodekey* {.desc: "P2P node private key as 64 char hex string.", name: "nodekey".}: + Option[PrivateKey] + + listenAddress* {. + defaultValue: defaultListenAddress(), + desc: "Listening address for LibP2P (and Discovery v5, if enabled) traffic.", + name: "listen-address" + .}: IpAddress + + tcpPort* {.desc: "TCP listening port.", defaultValue: 60000, name: "tcp-port".}: + Port + + portsShift* {. + desc: "Add a shift to all port numbers.", defaultValue: 0, name: "ports-shift" + .}: uint16 + + nat* {. + desc: + "Specify method to use for determining public address. " & + "Must be one of: any, none, upnp, pmp, extip:.", + defaultValue: "any" + .}: string + + extMultiAddrs* {. + desc: + "External multiaddresses to advertise to the network. Argument may be repeated.", + name: "ext-multiaddr" + .}: seq[string] + + extMultiAddrsOnly* {. + desc: "Only announce external multiaddresses", + defaultValue: false, + name: "ext-multiaddr-only" + .}: bool + + maxConnections* {. + desc: "Maximum allowed number of libp2p connections.", + defaultValue: 50, + name: "max-connections" + .}: uint16 + + colocationLimit* {. + desc: + "Max num allowed peers from the same IP. Set it to 0 to remove the limitation.", + defaultValue: defaultColocationLimit(), + name: "ip-colocation-limit" + .}: int + + maxRelayPeers* {. + desc: "Maximum allowed number of relay peers.", name: "max-relay-peers" + .}: Option[int] + + peerStoreCapacity* {. + desc: "Maximum stored peers in the peerstore.", name: "peer-store-capacity" + .}: Option[int] + + peerPersistence* {. + desc: "Enable peer persistence.", defaultValue: false, name: "peer-persistence" + .}: bool + + ## DNS addrs config + dnsAddrs* {. + desc: "Enable resolution of `dnsaddr`, `dns4` or `dns6` multiaddrs", + defaultValue: true, + name: "dns-addrs" + .}: bool + + dnsAddrsNameServers* {. + desc: + "DNS name server IPs to query for DNS multiaddrs resolution. Argument may be repeated.", + defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], + name: "dns-addrs-name-server" + .}: seq[IpAddress] + + dns4DomainName* {. + desc: "The domain name resolving to the node's public IPv4 address", defaultValue: "", - name: "rln-relay-eth-contract-address" }: string + name: "dns4-domain-name" + .}: string - rlnRelayCredPassword* {. - desc: "Password for encrypting RLN credentials", + ## Relay config + relay* {. + desc: "Enable relay protocol: true|false", defaultValue: true, name: "relay" + .}: bool + + relayPeerExchange* {. + desc: "Enable gossipsub peer exchange in relay protocol: true|false", + defaultValue: false, + name: "relay-peer-exchange" + .}: bool + + relayShardedPeerManagement* {. + desc: + "Enable experimental shard aware peer manager for relay protocol: true|false", + defaultValue: false, + name: "relay-shard-manager" + .}: bool + + rlnRelay* {. + desc: "Enable spam protection through rln-relay: true|false", + defaultValue: false, + name: "rln-relay" + .}: bool + + rlnRelayCredIndex* {. + desc: "the index of the onchain commitment to use", + name: "rln-relay-membership-index" + .}: Option[uint] + + rlnRelayDynamic* {. + desc: "Enable waku-rln-relay with on-chain dynamic group management: true|false", + defaultValue: false, + name: "rln-relay-dynamic" + .}: bool + + rlnRelayIdKey* {. + desc: "Rln relay identity secret key as a Hex string", defaultValue: "", - name: "rln-relay-cred-password" }: string + name: "rln-relay-id-key" + .}: string - rlnRelayEthPrivateKey* {. - desc: "Private key for broadcasting transactions", + rlnRelayIdCommitmentKey* {. + desc: "Rln relay identity commitment key as a Hex string", defaultValue: "", - name: "rln-relay-eth-private-key" }: string + name: "rln-relay-id-commitment-key" + .}: string - rlnRelayUserMessageLimit* {. - desc: "Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.", + rlnRelayTreePath* {. + desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)", + defaultValue: "", + name: "rln-relay-tree-path" + .}: string + + rlnRelayBandwidthThreshold* {. + desc: "Message rate in bytes/sec after which verification of proofs should happen", + defaultValue: 0, # to maintain backwards compatibility + name: "rln-relay-bandwidth-threshold" + .}: int + + staticnodes* {. + desc: "Peer multiaddr to directly connect with. Argument may be repeated.", + name: "staticnode" + .}: seq[string] + + keepAlive* {. + desc: "Enable keep-alive for idle connections: true|false", + defaultValue: false, + name: "keep-alive" + .}: bool + + topics* {. + desc: + "Default topic to subscribe to. Argument may be repeated. Deprecated! Please use pubsub-topic and/or content-topic instead.", + defaultValue: @["/waku/2/default-waku/proto"], + name: "topic" + .}: seq[string] + + pubsubTopics* {. + desc: "Default pubsub topic to subscribe to. Argument may be repeated.", + name: "pubsub-topic" + .}: seq[string] + + shards* {. + desc: "Shards index to subscribe to [0..MAX_SHARDS-1]. Argument may be repeated.", + defaultValue: @[], + name: "shard" + .}: seq[ShardIdx] + + contentTopics* {. + desc: "Default content topic to subscribe to. Argument may be repeated.", + name: "content-topic" + .}: seq[string] + + ## Store and message store config + store* {. + desc: "Enable/disable waku store protocol", defaultValue: false, name: "store" + .}: bool + + storenode* {. + desc: "Peer multiaddress to query for storage", + defaultValue: "", + name: "storenode" + .}: string + + storeMessageRetentionPolicy* {. + desc: + "Message store retention policy. Time retention policy: 'time:'. Capacity retention policy: 'capacity:'. Size retention policy: 'size:'. Set to 'none' to disable.", + defaultValue: "time:" & $2.days.seconds, + name: "store-message-retention-policy" + .}: string + + storeMessageDbUrl* {. + desc: "The database connection URL for peristent storage.", + defaultValue: "sqlite://store.sqlite3", + name: "store-message-db-url" + .}: string + + storeMessageDbVacuum* {. + desc: + "Enable database vacuuming at start. Only supported by SQLite database engine.", + defaultValue: false, + name: "store-message-db-vacuum" + .}: bool + + storeMessageDbMigration* {. + desc: "Enable database migration at start.", + defaultValue: true, + name: "store-message-db-migration" + .}: bool + + storeMaxNumDbConnections* {. + desc: "Maximum number of simultaneous Postgres connections.", + defaultValue: 50, + name: "store-max-num-db-connections" + .}: int + + ## Filter config + filter* {. + desc: "Enable filter protocol: true|false", defaultValue: false, name: "filter" + .}: bool + + filternode* {. + desc: "Peer multiaddr to request content filtering of messages.", + defaultValue: "", + name: "filternode" + .}: string + + filterTimeout* {. + desc: + "Filter clients will be wiped out if not able to receive push messages within this timeout. In seconds.", + defaultValue: 14400, # 4 hours + name: "filter-timeout" + .}: int64 + + filterSubscriptionTimeout* {. + desc: + "Timeout for filter subscription without ping or refresh it, in seconds. Only for v2 filter protocol.", + defaultValue: 300, # 5 minutes + name: "filter-subscription-timeout" + .}: int64 + + filterMaxPeersToServe* {. + desc: "Maximum number of peers to serve at a time. Only for v2 filter protocol.", + defaultValue: 1000, + name: "filter-max-peers-to-serve" + .}: uint32 + + filterMaxCriteria* {. + desc: + "Maximum number of pubsub- and content topic combination per peers at a time. Only for v2 filter protocol.", + defaultValue: 1000, + name: "filter-max-criteria" + .}: uint32 + + ## Lightpush config + lightpush* {. + desc: "Enable lightpush protocol: true|false", + defaultValue: false, + name: "lightpush" + .}: bool + + lightpushnode* {. + desc: "Peer multiaddr to request lightpush of published messages.", + defaultValue: "", + name: "lightpushnode" + .}: string + + ## REST HTTP config + rest* {. + desc: "Enable Waku REST HTTP server: true|false", + defaultValue: false, + name: "rest" + .}: bool + + restAddress* {. + desc: "Listening address of the REST HTTP server.", + defaultValue: parseIpAddress("127.0.0.1"), + name: "rest-address" + .}: IpAddress + + restPort* {. + desc: "Listening port of the REST HTTP server.", + defaultValue: 8645, + name: "rest-port" + .}: uint16 + + restRelayCacheCapacity* {. + desc: "Capacity of the Relay REST API message cache.", + defaultValue: 30, + name: "rest-relay-cache-capacity" + .}: uint32 + + restAdmin* {. + desc: "Enable access to REST HTTP Admin API: true|false", + defaultValue: false, + name: "rest-admin" + .}: bool + + restPrivate* {. + desc: "Enable access to REST HTTP Private API: true|false", + defaultValue: false, + name: "rest-private" + .}: bool + + restAllowOrigin* {. + desc: + "Allow cross-origin requests from the specified origin." & + "Argument may be repeated." & "Wildcards: * or ? allowed." & + "Ex.: \"localhost:*\" or \"127.0.0.1:8080\"", + defaultValue: newSeq[string](), + name: "rest-allow-origin" + .}: seq[string] + + ## Metrics config + metricsServer* {. + desc: "Enable the metrics server: true|false", + defaultValue: false, + name: "metrics-server" + .}: bool + + metricsServerAddress* {. + desc: "Listening address of the metrics server.", + defaultValue: parseIpAddress("127.0.0.1"), + name: "metrics-server-address" + .}: IpAddress + + metricsServerPort* {. + desc: "Listening HTTP port of the metrics server.", + defaultValue: 8008, + name: "metrics-server-port" + .}: uint16 + + metricsLogging* {. + desc: "Enable metrics logging: true|false", + defaultValue: true, + name: "metrics-logging" + .}: bool + + ## DNS discovery config + dnsDiscovery* {. + desc: "Enable discovering nodes via DNS", + defaultValue: false, + name: "dns-discovery" + .}: bool + + dnsDiscoveryUrl* {. + desc: "URL for DNS node list in format 'enrtree://@'", + defaultValue: "", + name: "dns-discovery-url" + .}: string + + dnsDiscoveryNameServers* {. + desc: "DNS name server IPs to query. Argument may be repeated.", + defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], + name: "dns-discovery-name-server" + .}: seq[IpAddress] + + ## Discovery v5 config + discv5Discovery* {. + desc: "Enable discovering nodes via Node Discovery v5", + defaultValue: false, + name: "discv5-discovery" + .}: bool + + discv5UdpPort* {. + desc: "Listening UDP port for Node Discovery v5.", + defaultValue: 9000, + name: "discv5-udp-port" + .}: Port + + discv5BootstrapNodes* {. + desc: + "Text-encoded ENR for bootstrap node. Used when connecting to the network. Argument may be repeated.", + name: "discv5-bootstrap-node" + .}: seq[string] + + discv5EnrAutoUpdate* {. + desc: + "Discovery can automatically update its ENR with the IP address " & + "and UDP port as seen by other nodes it communicates with. " & + "This option allows to enable/disable this functionality", + defaultValue: false, + name: "discv5-enr-auto-update" + .}: bool + + discv5TableIpLimit* {. + hidden, + desc: "Maximum amount of nodes with the same IP in discv5 routing tables", + defaultValue: 10, + name: "discv5-table-ip-limit" + .}: uint + + discv5BucketIpLimit* {. + hidden, + desc: "Maximum amount of nodes with the same IP in discv5 routing table buckets", + defaultValue: 2, + name: "discv5-bucket-ip-limit" + .}: uint + + discv5BitsPerHop* {. + hidden, + desc: "Kademlia's b variable, increase for less hops per lookup", defaultValue: 1, - name: "rln-relay-user-message-limit" .}: uint64 - - rlnEpochSizeSec* {. - desc: "Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.", - defaultValue: 1 - name: "rln-relay-epoch-sec" .}: uint64 - - maxMessageSize* {. - desc: "Maximum message size. Accepted units: KiB, KB, and B. e.g. 1024KiB; 1500 B; etc." - defaultValue: DefaultMaxWakuMessageSizeStr - name: "max-msg-size" }: string - - case cmd* {. - command - defaultValue: noCommand }: StartUpCommand - - of inspectRlnDb: - # have to change the name here since it counts as a duplicate, within noCommand - treePath* {. - desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)", - defaultValue: "" - name: "rln-relay-tree-path" .}: string - - of generateRlnKeystore: - execute* {. - desc: "Runs the registration function on-chain. By default, a dry-run will occur", - defaultValue: false, - name: "execute" .}: bool - - - of noCommand: - ## Application-level configuration - protectedTopics* {. - desc: "Topics and its public key to be used for message validation, topic:pubkey. Argument may be repeated." - defaultValue: newSeq[ProtectedTopic](0) - name: "protected-topic" .}: seq[ProtectedTopic] - - ## General node config - clusterId* {. - desc: "Cluster id that the node is running in. Node in a different cluster id is disconnected." - defaultValue: 0 - name: "cluster-id" }: uint32 - - agentString* {. - defaultValue: "nwaku", - desc: "Node agent string which is used as identifier in network" - name: "agent-string" .}: string - - nodekey* {. - desc: "P2P node private key as 64 char hex string.", - name: "nodekey" }: Option[PrivateKey] - - listenAddress* {. - defaultValue: defaultListenAddress() - desc: "Listening address for LibP2P (and Discovery v5, if enabled) traffic." - name: "listen-address"}: IpAddress - - tcpPort* {. - desc: "TCP listening port." - defaultValue: 60000 - name: "tcp-port" }: Port - - portsShift* {. - desc: "Add a shift to all port numbers." - defaultValue: 0 - name: "ports-shift" }: uint16 - - nat* {. - desc: "Specify method to use for determining public address. " & - "Must be one of: any, none, upnp, pmp, extip:." - defaultValue: "any" }: string - - extMultiAddrs* {. - desc: "External multiaddresses to advertise to the network. Argument may be repeated." - name: "ext-multiaddr" }: seq[string] - - extMultiAddrsOnly* {. - desc: "Only announce external multiaddresses", - defaultValue: false, - name: "ext-multiaddr-only" }: bool - - maxConnections* {. - desc: "Maximum allowed number of libp2p connections." - defaultValue: 50 - name: "max-connections" }: uint16 - - colocationLimit* {. - desc: "Max num allowed peers from the same IP. Set it to 0 to remove the limitation." - defaultValue: defaultColocationLimit() - name: "ip-colocation-limit" }: int - - maxRelayPeers* {. - desc: "Maximum allowed number of relay peers." - name: "max-relay-peers" }: Option[int] - - peerStoreCapacity* {. - desc: "Maximum stored peers in the peerstore." - name: "peer-store-capacity" }: Option[int] - - peerPersistence* {. - desc: "Enable peer persistence.", - defaultValue: false, - name: "peer-persistence" }: bool - - ## DNS addrs config - - dnsAddrs* {. - desc: "Enable resolution of `dnsaddr`, `dns4` or `dns6` multiaddrs" - defaultValue: true - name: "dns-addrs" }: bool - - dnsAddrsNameServers* {. - desc: "DNS name server IPs to query for DNS multiaddrs resolution. Argument may be repeated." - defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")] - name: "dns-addrs-name-server" }: seq[IpAddress] - - dns4DomainName* {. - desc: "The domain name resolving to the node's public IPv4 address", - defaultValue: "" - name: "dns4-domain-name" }: string - - ## Relay config - - relay* {. - desc: "Enable relay protocol: true|false", - defaultValue: true - name: "relay" }: bool - - relayPeerExchange* {. - desc: "Enable gossipsub peer exchange in relay protocol: true|false", - defaultValue: false - name: "relay-peer-exchange" }: bool - - relayShardedPeerManagement* {. - desc: "Enable experimental shard aware peer manager for relay protocol: true|false", - defaultValue: false - name: "relay-shard-manager" }: bool - - rlnRelay* {. - desc: "Enable spam protection through rln-relay: true|false", - defaultValue: false - name: "rln-relay" }: bool - - rlnRelayCredIndex* {. - desc: "the index of the onchain commitment to use", - name: "rln-relay-membership-index" }: Option[uint] - - rlnRelayDynamic* {. - desc: "Enable waku-rln-relay with on-chain dynamic group management: true|false", - defaultValue: false - name: "rln-relay-dynamic" }: bool - - rlnRelayIdKey* {. - desc: "Rln relay identity secret key as a Hex string", - defaultValue: "" - name: "rln-relay-id-key" }: string - - rlnRelayIdCommitmentKey* {. - desc: "Rln relay identity commitment key as a Hex string", - defaultValue: "" - name: "rln-relay-id-commitment-key" }: string - - rlnRelayTreePath* {. - desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)", - defaultValue: "" - name: "rln-relay-tree-path" }: string - - rlnRelayBandwidthThreshold* {. - desc: "Message rate in bytes/sec after which verification of proofs should happen", - defaultValue: 0 # to maintain backwards compatibility - name: "rln-relay-bandwidth-threshold" }: int - - staticnodes* {. - desc: "Peer multiaddr to directly connect with. Argument may be repeated." - name: "staticnode" }: seq[string] - - keepAlive* {. - desc: "Enable keep-alive for idle connections: true|false", - defaultValue: false - name: "keep-alive" }: bool - - topics* {. - desc: "Default topic to subscribe to. Argument may be repeated. Deprecated! Please use pubsub-topic and/or content-topic instead." - defaultValue: @["/waku/2/default-waku/proto"] - name: "topic" .}: seq[string] - - pubsubTopics* {. - desc: "Default pubsub topic to subscribe to. Argument may be repeated." - name: "pubsub-topic" .}: seq[string] - - shards* {. - desc: "Shards index to subscribe to [0..MAX_SHARDS-1]. Argument may be repeated." - defaultValue: @[] - name: "shard" .}: seq[ShardIdx] - - contentTopics* {. - desc: "Default content topic to subscribe to. Argument may be repeated." - name: "content-topic" .}: seq[string] - - ## Store and message store config - - store* {. - desc: "Enable/disable waku store protocol", - defaultValue: false, - name: "store" }: bool - - storenode* {. - desc: "Peer multiaddress to query for storage", - defaultValue: "", - name: "storenode" }: string - - storeMessageRetentionPolicy* {. - desc: "Message store retention policy. Time retention policy: 'time:'. Capacity retention policy: 'capacity:'. Size retention policy: 'size:'. Set to 'none' to disable.", - defaultValue: "time:" & $2.days.seconds, - name: "store-message-retention-policy" }: string - - storeMessageDbUrl* {. - desc: "The database connection URL for peristent storage.", - defaultValue: "sqlite://store.sqlite3", - name: "store-message-db-url" }: string - - storeMessageDbVacuum* {. - desc: "Enable database vacuuming at start. Only supported by SQLite database engine.", - defaultValue: false, - name: "store-message-db-vacuum" }: bool - - storeMessageDbMigration* {. - desc: "Enable database migration at start.", - defaultValue: true, - name: "store-message-db-migration" }: bool - - storeMaxNumDbConnections* {. - desc: "Maximum number of simultaneous Postgres connections.", - defaultValue: 50, - name: "store-max-num-db-connections" }: int - - ## Filter config - - filter* {. - desc: "Enable filter protocol: true|false", - defaultValue: false - name: "filter" }: bool - - filternode* {. - desc: "Peer multiaddr to request content filtering of messages.", - defaultValue: "" - name: "filternode" }: string - - filterTimeout* {. - desc: "Filter clients will be wiped out if not able to receive push messages within this timeout. In seconds.", - defaultValue: 14400 # 4 hours - name: "filter-timeout" }: int64 - - filterSubscriptionTimeout* {. - desc: "Timeout for filter subscription without ping or refresh it, in seconds. Only for v2 filter protocol.", - defaultValue: 300 # 5 minutes - name: "filter-subscription-timeout" }: int64 - - filterMaxPeersToServe* {. - desc: "Maximum number of peers to serve at a time. Only for v2 filter protocol.", - defaultValue: 1000 - name: "filter-max-peers-to-serve" }: uint32 - - filterMaxCriteria* {. - desc: "Maximum number of pubsub- and content topic combination per peers at a time. Only for v2 filter protocol.", - defaultValue: 1000 - name: "filter-max-criteria" }: uint32 - - ## Lightpush config - - lightpush* {. - desc: "Enable lightpush protocol: true|false", - defaultValue: false - name: "lightpush" }: bool - - lightpushnode* {. - desc: "Peer multiaddr to request lightpush of published messages.", - defaultValue: "" - name: "lightpushnode" }: string - - ## REST HTTP config - - rest* {. - desc: "Enable Waku REST HTTP server: true|false", - defaultValue: false - name: "rest" }: bool - - restAddress* {. - desc: "Listening address of the REST HTTP server.", - defaultValue: parseIpAddress("127.0.0.1") - name: "rest-address" }: IpAddress - - restPort* {. - desc: "Listening port of the REST HTTP server.", - defaultValue: 8645 - name: "rest-port" }: uint16 - - restRelayCacheCapacity* {. - desc: "Capacity of the Relay REST API message cache.", - defaultValue: 30 - name: "rest-relay-cache-capacity" }: uint32 - - restAdmin* {. - desc: "Enable access to REST HTTP Admin API: true|false", - defaultValue: false - name: "rest-admin" }: bool - - restPrivate* {. - desc: "Enable access to REST HTTP Private API: true|false", - defaultValue: false - name: "rest-private" }: bool - - restAllowOrigin* {. - desc: "Allow cross-origin requests from the specified origin." & - "Argument may be repeated." & - "Wildcards: * or ? allowed." & - "Ex.: \"localhost:*\" or \"127.0.0.1:8080\"", - defaultValue: newSeq[string]() - name: "rest-allow-origin" }: seq[string] - - ## Metrics config - - metricsServer* {. - desc: "Enable the metrics server: true|false" - defaultValue: false - name: "metrics-server" }: bool - - metricsServerAddress* {. - desc: "Listening address of the metrics server." - defaultValue: parseIpAddress("127.0.0.1") - name: "metrics-server-address" }: IpAddress - - metricsServerPort* {. - desc: "Listening HTTP port of the metrics server." - defaultValue: 8008 - name: "metrics-server-port" }: uint16 - - metricsLogging* {. - desc: "Enable metrics logging: true|false" - defaultValue: true - name: "metrics-logging" }: bool - - ## DNS discovery config - - dnsDiscovery* {. - desc: "Enable discovering nodes via DNS" - defaultValue: false - name: "dns-discovery" }: bool - - dnsDiscoveryUrl* {. - desc: "URL for DNS node list in format 'enrtree://@'", - defaultValue: "" - name: "dns-discovery-url" }: string - - dnsDiscoveryNameServers* {. - desc: "DNS name server IPs to query. Argument may be repeated." - defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")] - name: "dns-discovery-name-server" }: seq[IpAddress] - - ## Discovery v5 config - - discv5Discovery* {. - desc: "Enable discovering nodes via Node Discovery v5" - defaultValue: false - name: "discv5-discovery" }: bool - - discv5UdpPort* {. - desc: "Listening UDP port for Node Discovery v5." - defaultValue: 9000 - name: "discv5-udp-port" }: Port - - discv5BootstrapNodes* {. - desc: "Text-encoded ENR for bootstrap node. Used when connecting to the network. Argument may be repeated." - name: "discv5-bootstrap-node" }: seq[string] - - discv5EnrAutoUpdate* {. - desc: "Discovery can automatically update its ENR with the IP address " & - "and UDP port as seen by other nodes it communicates with. " & - "This option allows to enable/disable this functionality" - defaultValue: false - name: "discv5-enr-auto-update" .}: bool - - discv5TableIpLimit* {. - hidden - desc: "Maximum amount of nodes with the same IP in discv5 routing tables" - defaultValue: 10 - name: "discv5-table-ip-limit" .}: uint - - discv5BucketIpLimit* {. - hidden - desc: "Maximum amount of nodes with the same IP in discv5 routing table buckets" - defaultValue: 2 - name: "discv5-bucket-ip-limit" .}: uint - - discv5BitsPerHop* {. - hidden - desc: "Kademlia's b variable, increase for less hops per lookup" - defaultValue: 1 - name: "discv5-bits-per-hop" .}: int - - ## waku peer exchange config - peerExchange* {. - desc: "Enable waku peer exchange protocol (responder side): true|false", - defaultValue: false - name: "peer-exchange" }: bool - - peerExchangeNode* {. - desc: "Peer multiaddr to send peer exchange requests to. (enables peer exchange protocol requester side)", - defaultValue: "" - name: "peer-exchange-node" }: string - - ## websocket config - websocketSupport* {. - desc: "Enable websocket: true|false", - defaultValue: false - name: "websocket-support"}: bool - - websocketPort* {. - desc: "WebSocket listening port." - defaultValue: 8000 - name: "websocket-port" }: Port - - websocketSecureSupport* {. - desc: "Enable secure websocket: true|false", - defaultValue: false - name: "websocket-secure-support"}: bool - - websocketSecureKeyPath* {. - desc: "Secure websocket key path: '/path/to/key.txt' ", - defaultValue: "" - name: "websocket-secure-key-path"}: string - - websocketSecureCertPath* {. - desc: "Secure websocket Certificate path: '/path/to/cert.txt' ", - defaultValue: "" - name: "websocket-secure-cert-path"}: string + name: "discv5-bits-per-hop" + .}: int + + ## waku peer exchange config + peerExchange* {. + desc: "Enable waku peer exchange protocol (responder side): true|false", + defaultValue: false, + name: "peer-exchange" + .}: bool + + peerExchangeNode* {. + desc: + "Peer multiaddr to send peer exchange requests to. (enables peer exchange protocol requester side)", + defaultValue: "", + name: "peer-exchange-node" + .}: string + + ## websocket config + websocketSupport* {. + desc: "Enable websocket: true|false", + defaultValue: false, + name: "websocket-support" + .}: bool + + websocketPort* {. + desc: "WebSocket listening port.", defaultValue: 8000, name: "websocket-port" + .}: Port + + websocketSecureSupport* {. + desc: "Enable secure websocket: true|false", + defaultValue: false, + name: "websocket-secure-support" + .}: bool + + websocketSecureKeyPath* {. + desc: "Secure websocket key path: '/path/to/key.txt' ", + defaultValue: "", + name: "websocket-secure-key-path" + .}: string + + websocketSecureCertPath* {. + desc: "Secure websocket Certificate path: '/path/to/cert.txt' ", + defaultValue: "", + name: "websocket-secure-cert-path" + .}: string ## Parsing @@ -541,7 +612,9 @@ proc completeCmdArg*(T: type crypto.PrivateKey, val: string): seq[string] = proc parseCmdArg*(T: type ProtectedTopic, p: string): T = let elements = p.split(":") if elements.len != 2: - raise newException(ValueError, "Invalid format for protected topic expected topic:publickey") + raise newException( + ValueError, "Invalid format for protected topic expected topic:publickey" + ) let publicKey = secp256k1.SkPublicKey.fromHex(elements[1]) if publicKey.isErr: @@ -613,59 +686,79 @@ proc parseCmdArg*(T: type EthRpcUrl, s: string): T = ## https://url:port/path?query ## disallowed patterns: ## any valid/invalid ws or wss url - var httpPattern = re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" - var wsPattern = re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" + var httpPattern = + re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" + var wsPattern = + re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" if regex.match(s, wsPattern): - raise newException(ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL") + raise newException( + ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL" + ) if not regex.match(s, httpPattern): raise newException(ValueError, "Invalid HTTP RPC URL") return EthRpcUrl(s) ## Load -proc readValue*(r: var TomlReader, value: var crypto.PrivateKey) {.raises: [SerializationError].} = +proc readValue*( + r: var TomlReader, value: var crypto.PrivateKey +) {.raises: [SerializationError].} = try: value = parseCmdArg(crypto.PrivateKey, r.readValue(string)) except CatchableError: raise newException(SerializationError, getCurrentExceptionMsg()) -proc readValue*(r: var EnvvarReader, value: var crypto.PrivateKey) {.raises: [SerializationError].} = +proc readValue*( + r: var EnvvarReader, value: var crypto.PrivateKey +) {.raises: [SerializationError].} = try: value = parseCmdArg(crypto.PrivateKey, r.readValue(string)) except CatchableError: raise newException(SerializationError, getCurrentExceptionMsg()) -proc readValue*(r: var TomlReader, value: var ProtectedTopic) {.raises: [SerializationError].} = +proc readValue*( + r: var TomlReader, value: var ProtectedTopic +) {.raises: [SerializationError].} = try: value = parseCmdArg(ProtectedTopic, r.readValue(string)) except CatchableError: raise newException(SerializationError, getCurrentExceptionMsg()) -proc readValue*(r: var EnvvarReader, value: var ProtectedTopic) {.raises: [SerializationError].} = +proc readValue*( + r: var EnvvarReader, value: var ProtectedTopic +) {.raises: [SerializationError].} = try: value = parseCmdArg(ProtectedTopic, r.readValue(string)) except CatchableError: raise newException(SerializationError, getCurrentExceptionMsg()) -proc readValue*(r: var TomlReader, value: var ShardIdx) {.raises: [SerializationError].} = +proc readValue*( + r: var TomlReader, value: var ShardIdx +) {.raises: [SerializationError].} = try: value = parseCmdArg(ShardIdx, r.readValue(string)) except CatchableError: raise newException(SerializationError, getCurrentExceptionMsg()) -proc readValue*(r: var EnvvarReader, value: var ShardIdx) {.raises: [SerializationError].} = +proc readValue*( + r: var EnvvarReader, value: var ShardIdx +) {.raises: [SerializationError].} = try: value = parseCmdArg(ShardIdx, r.readValue(string)) except CatchableError: raise newException(SerializationError, getCurrentExceptionMsg()) -proc readValue*(r: var TomlReader, value: var EthRpcUrl) {.raises: [SerializationError].} = +proc readValue*( + r: var TomlReader, value: var EthRpcUrl +) {.raises: [SerializationError].} = try: value = parseCmdArg(EthRpcUrl, r.readValue(string)) except CatchableError: raise newException(SerializationError, getCurrentExceptionMsg()) -proc readValue*(r: var EnvvarReader, value: var EthRpcUrl) {.raises: [SerializationError].} = +proc readValue*( + r: var EnvvarReader, value: var EthRpcUrl +) {.raises: [SerializationError].} = try: value = parseCmdArg(EthRpcUrl, r.readValue(string)) except CatchableError: @@ -673,16 +766,18 @@ proc readValue*(r: var EnvvarReader, value: var EthRpcUrl) {.raises: [Serializat {.push warning[ProveInit]: off.} -proc load*(T: type WakuNodeConf, version=""): ConfResult[T] = +proc load*(T: type WakuNodeConf, version = ""): ConfResult[T] = try: let conf = WakuNodeConf.load( - version=version, - secondarySources = proc (conf: WakuNodeConf, sources: auto) - {.gcsafe, raises: [ConfigurationError].} = + version = version, + secondarySources = proc( + conf: WakuNodeConf, sources: auto + ) {.gcsafe, raises: [ConfigurationError].} = sources.addConfigFile(Envvar, InputFile("wakunode2")) if conf.configFile.isSome(): sources.addConfigFile(Toml, conf.configFile.get()) + , ) ok(conf) except CatchableError: diff --git a/waku/factory/internal_config.nim b/waku/factory/internal_config.nim index 50cb36160..51829a39b 100644 --- a/waku/factory/internal_config.nim +++ b/waku/factory/internal_config.nim @@ -15,15 +15,13 @@ import ../waku_enr, ../waku_core -proc enrConfiguration*(conf: WakuNodeConf, netConfig: NetConfig, key: crypto.PrivateKey): - Result[enr.Record, string] = - +proc enrConfiguration*( + conf: WakuNodeConf, netConfig: NetConfig, key: crypto.PrivateKey +): Result[enr.Record, string] = var enrBuilder = EnrBuilder.init(key) enrBuilder.withIpAddressAndPorts( - netConfig.enrIp, - netConfig.enrPort, - netConfig.discv5UdpPort + netConfig.enrIp, netConfig.enrPort, netConfig.discv5UdpPort ) if netConfig.wakuFlags.isSome(): @@ -33,32 +31,37 @@ proc enrConfiguration*(conf: WakuNodeConf, netConfig: NetConfig, key: crypto.Pri let shards: seq[uint16] = # no shards configured - if conf.shards.len == 0: toSeq(0.. 0): - let extMultiAddrsValidationRes = - validateExtMultiAddrs(conf.extMultiAddrs) - if extMultiAddrsValidationRes.isErr(): - return err("invalid external multiaddress: " & - $extMultiAddrsValidationRes.error) - else: - extMultiAddrsValidationRes.get() - else: - @[] + extMultiAddrs = + if (conf.extMultiAddrs.len > 0): + let extMultiAddrsValidationRes = validateExtMultiAddrs(conf.extMultiAddrs) + if extMultiAddrsValidationRes.isErr(): + return + err("invalid external multiaddress: " & $extMultiAddrsValidationRes.error) + else: + extMultiAddrsValidationRes.get() + else: + @[] wakuFlags = CapabilitiesBitfield.init( - lightpush = conf.lightpush, - filter = conf.filter, - store = conf.store, - relay = conf.relay - ) + lightpush = conf.lightpush, + filter = conf.filter, + store = conf.store, + relay = conf.relay, + ) # Resolve and use DNS domain IP if dns4DomainName.isSome() and extIp.isNone(): @@ -135,25 +142,26 @@ proc networkConfiguration*(conf: WakuNodeConf, extIp = some(parseIpAddress(dnsRes.get())) except CatchableError: - return err("Could not update extIp to resolved DNS IP: " & getCurrentExceptionMsg()) + return + err("Could not update extIp to resolved DNS IP: " & getCurrentExceptionMsg()) # Wrap in none because NetConfig does not have a default constructor # TODO: We could change bindIp in NetConfig to be something less restrictive # than IpAddress, which doesn't allow default construction let netConfigRes = NetConfig.init( - clusterId = conf.clusterId, - bindIp = conf.listenAddress, - bindPort = Port(uint16(conf.tcpPort) + conf.portsShift), - extIp = extIp, - extPort = extPort, - extMultiAddrs = extMultiAddrs, - extMultiAddrsOnly = conf.extMultiAddrsOnly, - wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift), - wsEnabled = conf.websocketSupport, - wssEnabled = conf.websocketSecureSupport, - dns4DomainName = dns4DomainName, - discv5UdpPort = discv5UdpPort, - wakuFlags = some(wakuFlags), - ) + clusterId = conf.clusterId, + bindIp = conf.listenAddress, + bindPort = Port(uint16(conf.tcpPort) + conf.portsShift), + extIp = extIp, + extPort = extPort, + extMultiAddrs = extMultiAddrs, + extMultiAddrsOnly = conf.extMultiAddrsOnly, + wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift), + wsEnabled = conf.websocketSupport, + wssEnabled = conf.websocketSecureSupport, + dns4DomainName = dns4DomainName, + discv5UdpPort = discv5UdpPort, + wakuFlags = some(wakuFlags), + ) return netConfigRes diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim index cc5360bdf..624014681 100644 --- a/waku/factory/node_factory.nim +++ b/waku/factory/node_factory.nim @@ -34,9 +34,9 @@ import const PeerPersistenceDbUrl = "peers.db" proc setupPeerStorage(): Result[Option[WakuPeerStorage], string] = - let db = ? SqliteDatabase.new(PeerPersistenceDbUrl) + let db = ?SqliteDatabase.new(PeerPersistenceDbUrl) - ? peer_store_sqlite_migrations.migrate(db) + ?peer_store_sqlite_migrations.migrate(db) let res = WakuPeerStorage.new(db) if res.isErr(): @@ -46,14 +46,15 @@ proc setupPeerStorage(): Result[Option[WakuPeerStorage], string] = ## Init waku node instance -proc initNode(conf: WakuNodeConf, - netConfig: NetConfig, - rng: ref HmacDrbgContext, - nodeKey: crypto.PrivateKey, - record: enr.Record, - peerStore: Option[WakuPeerStorage], - dynamicBootstrapNodes: openArray[RemotePeerInfo] = @[]): Result[WakuNode, string] = - +proc initNode( + conf: WakuNodeConf, + netConfig: NetConfig, + rng: ref HmacDrbgContext, + nodeKey: crypto.PrivateKey, + record: enr.Record, + peerStore: Option[WakuPeerStorage], + dynamicBootstrapNodes: openArray[RemotePeerInfo] = @[], +): Result[WakuNode, string] = ## Setup a basic Waku v2 node based on a supplied configuration ## file. Optionally include persistent peer storage. ## No protocols are mounted yet. @@ -69,8 +70,11 @@ proc initNode(conf: WakuNodeConf, var node: WakuNode - let pStorage = if peerStore.isNone(): nil - else: peerStore.get() + let pStorage = + if peerStore.isNone(): + nil + else: + peerStore.get() # Build waku node instance var builder = WakuNodeBuilder.init() @@ -80,28 +84,32 @@ proc initNode(conf: WakuNodeConf, builder.withNetworkConfiguration(netConfig) builder.withPeerStorage(pStorage, capacity = conf.peerStoreCapacity) builder.withSwitchConfiguration( - maxConnections = some(conf.maxConnections.int), - secureKey = some(conf.websocketSecureKeyPath), - secureCert = some(conf.websocketSecureCertPath), - nameResolver = dnsResolver, - sendSignedPeerRecord = conf.relayPeerExchange, # We send our own signed peer record when peer exchange enabled - agentString = some(conf.agentString) + maxConnections = some(conf.maxConnections.int), + secureKey = some(conf.websocketSecureKeyPath), + secureCert = some(conf.websocketSecureCertPath), + nameResolver = dnsResolver, + sendSignedPeerRecord = conf.relayPeerExchange, + # We send our own signed peer record when peer exchange enabled + agentString = some(conf.agentString), ) builder.withColocationLimit(conf.colocationLimit) builder.withPeerManagerConfig( - maxRelayPeers = conf.maxRelayPeers, - shardAware = conf.relayShardedPeerManagement,) + maxRelayPeers = conf.maxRelayPeers, shardAware = conf.relayShardedPeerManagement + ) - node = ? builder.build().mapErr(proc (err: string): string = "failed to create waku node instance: " & err) + node = + ?builder.build().mapErr( + proc(err: string): string = + "failed to create waku node instance: " & err + ) ok(node) ## Mount protocols -proc setupProtocols(node: WakuNode, - conf: WakuNodeConf, - nodeKey: crypto.PrivateKey): - Future[Result[void, string]] {.async.} = +proc setupProtocols( + node: WakuNode, conf: WakuNodeConf, nodeKey: crypto.PrivateKey +): Future[Result[void, string]] {.async.} = ## Setup configured protocols on an existing Waku v2 node. ## Optionally include persistent message storage. ## No protocols are started yet. @@ -115,14 +123,17 @@ proc setupProtocols(node: WakuNode, # Mount relay on all nodes var peerExchangeHandler = none(RoutingRecordsHandler) if conf.relayPeerExchange: - proc handlePeerExchange(peer: PeerId, topic: string, - peers: seq[RoutingRecordsPair]) {.gcsafe.} = + proc handlePeerExchange( + peer: PeerId, topic: string, peers: seq[RoutingRecordsPair] + ) {.gcsafe.} = ## Handle peers received via gossipsub peer exchange # TODO: Only consider peers on pubsub topics we subscribe to - let exchangedPeers = peers.filterIt(it.record.isSome()) # only peers with populated records - .mapIt(toRemotePeerInfo(it.record.get())) + let exchangedPeers = peers.filterIt(it.record.isSome()) + # only peers with populated records + .mapIt(toRemotePeerInfo(it.record.get())) - debug "connecting to exchanged peers", src=peer, topic=topic, numPeers=exchangedPeers.len + debug "connecting to exchanged peers", + src = peer, topic = topic, numPeers = exchangedPeers.len # asyncSpawn, as we don't want to block here asyncSpawn node.connectToNodes(exchangedPeers, "peer exchange") @@ -134,7 +145,8 @@ proc setupProtocols(node: WakuNode, if conf.pubsubTopics.len > 0 or conf.contentTopics.len > 0: # TODO autoshard content topics only once. # Already checked for errors in app.init - let shards = conf.contentTopics.mapIt(node.wakuSharding.getShard(it).expect("Valid Shard")) + let shards = + conf.contentTopics.mapIt(node.wakuSharding.getShard(it).expect("Valid Shard")) conf.pubsubTopics & shards else: conf.topics @@ -142,30 +154,36 @@ proc setupProtocols(node: WakuNode, let parsedMaxMsgSize = parseMsgSize(conf.maxMessageSize).valueOr: return err("failed to parse 'max-num-bytes-msg-size' param: " & $error) - debug "Setting max message size", num_bytes=parsedMaxMsgSize + debug "Setting max message size", num_bytes = parsedMaxMsgSize try: - await mountRelay(node, pubsubTopics, peerExchangeHandler = peerExchangeHandler, - int(parsedMaxMsgSize)) + await mountRelay( + node, + pubsubTopics, + peerExchangeHandler = peerExchangeHandler, + int(parsedMaxMsgSize), + ) except CatchableError: return err("failed to mount waku relay protocol: " & getCurrentExceptionMsg()) # Add validation keys to protected topics - var subscribedProtectedTopics : seq[ProtectedTopic] + var subscribedProtectedTopics: seq[ProtectedTopic] for topicKey in conf.protectedTopics: if topicKey.topic notin pubsubTopics: warn "protected topic not in subscribed pubsub topics, skipping adding validator", - protectedTopic=topicKey.topic, subscribedTopics=pubsubTopics + protectedTopic = topicKey.topic, subscribedTopics = pubsubTopics continue subscribedProtectedTopics.add(topicKey) - notice "routing only signed traffic", protectedTopic=topicKey.topic, publicKey=topicKey.key + notice "routing only signed traffic", + protectedTopic = topicKey.topic, publicKey = topicKey.key node.wakuRelay.addSignedTopicsValidator(subscribedProtectedTopics) # Enable Rendezvous Discovery protocol when Relay is enabled try: await mountRendezvous(node) except CatchableError: - return err("failed to mount waku rendezvous protocol: " & getCurrentExceptionMsg()) + return + err("failed to mount waku rendezvous protocol: " & getCurrentExceptionMsg()) # Keepalive mounted on all nodes try: @@ -213,11 +231,10 @@ proc setupProtocols(node: WakuNode, if conf.store: # Archive setup - let archiveDriverRes = waitFor ArchiveDriver.new(conf.storeMessageDbUrl, - conf.storeMessageDbVacuum, - conf.storeMessageDbMigration, - conf.storeMaxNumDbConnections, - onFatalErrorAction) + let archiveDriverRes = waitFor ArchiveDriver.new( + conf.storeMessageDbUrl, conf.storeMessageDbVacuum, conf.storeMessageDbMigration, + conf.storeMaxNumDbConnections, onFatalErrorAction, + ) if archiveDriverRes.isErr(): return err("failed to setup archive driver: " & archiveDriverRes.error) @@ -225,8 +242,7 @@ proc setupProtocols(node: WakuNode, if retPolicyRes.isErr(): return err("failed to create retention policy: " & retPolicyRes.error) - let mountArcRes = node.mountArchive(archiveDriverRes.get(), - retPolicyRes.get()) + let mountArcRes = node.mountArchive(archiveDriverRes.get(), retPolicyRes.get()) if mountArcRes.isErr(): return err("failed to mount waku archive protocol: " & mountArcRes.error) @@ -264,13 +280,16 @@ proc setupProtocols(node: WakuNode, try: await mountLegacyFilter(node, filterTimeout = chronos.seconds(conf.filterTimeout)) except CatchableError: - return err("failed to mount waku legacy filter protocol: " & getCurrentExceptionMsg()) + return + err("failed to mount waku legacy filter protocol: " & getCurrentExceptionMsg()) try: - await mountFilter(node, - subscriptionTimeout = chronos.seconds(conf.filterSubscriptionTimeout), - maxFilterPeers = conf.filterMaxPeersToServe, - maxFilterCriteriaPerPeer = conf.filterMaxCriteria) + await mountFilter( + node, + subscriptionTimeout = chronos.seconds(conf.filterSubscriptionTimeout), + maxFilterPeers = conf.filterMaxPeersToServe, + maxFilterCriteriaPerPeer = conf.filterMaxCriteria, + ) except CatchableError: return err("failed to mount waku filter protocol: " & getCurrentExceptionMsg()) @@ -282,7 +301,9 @@ proc setupProtocols(node: WakuNode, node.peerManager.addServicePeer(filterNode.value, WakuLegacyFilterCodec) node.peerManager.addServicePeer(filterNode.value, WakuFilterSubscribeCodec) except CatchableError: - return err("failed to mount waku filter client protocol: " & getCurrentExceptionMsg()) + return err( + "failed to mount waku filter client protocol: " & getCurrentExceptionMsg() + ) else: return err("failed to set node waku filter peer: " & filterNode.error) @@ -291,21 +312,24 @@ proc setupProtocols(node: WakuNode, try: await mountPeerExchange(node) except CatchableError: - return err("failed to mount waku peer-exchange protocol: " & getCurrentExceptionMsg()) + return + err("failed to mount waku peer-exchange protocol: " & getCurrentExceptionMsg()) if conf.peerExchangeNode != "": let peerExchangeNode = parsePeerInfo(conf.peerExchangeNode) if peerExchangeNode.isOk(): node.peerManager.addServicePeer(peerExchangeNode.value, WakuPeerExchangeCodec) else: - return err("failed to set node waku peer-exchange peer: " & peerExchangeNode.error) + return + err("failed to set node waku peer-exchange peer: " & peerExchangeNode.error) return ok() ## Start node -proc startNode*(node: WakuNode, conf: WakuNodeConf, - dynamicBootstrapNodes: seq[RemotePeerInfo] = @[]): Future[Result[void, string]] {.async.} = +proc startNode*( + node: WakuNode, conf: WakuNodeConf, dynamicBootstrapNodes: seq[RemotePeerInfo] = @[] +): Future[Result[void, string]] {.async.} = ## Start a configured node and all mounted protocols. ## Connect to static nodes and start ## keep-alive, if configured. @@ -328,13 +352,14 @@ proc startNode*(node: WakuNode, conf: WakuNodeConf, try: await connectToNodes(node, dynamicBootstrapNodes, "dynamic bootstrap") except CatchableError: - return err("failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg()) + return + err("failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg()) # retrieve px peers and add the to the peer store if conf.peerExchangeNode != "": let desiredOutDegree = node.wakuRelay.parameters.d.uint64() (await node.fetchPeerExchangePeers(desiredOutDegree)).isOkOr: - error "error while fetching peers from peer exchange", error = error + error "error while fetching peers from peer exchange", error = error quit(QuitFailure) # Start keepalive, if enabled @@ -347,54 +372,59 @@ proc startNode*(node: WakuNode, conf: WakuNodeConf, return ok() -proc setupNode*(conf: WakuNodeConf, rng: Option[ref HmacDrbgContext] = none(ref HmacDrbgContext)): - Result[WakuNode, string] = - var nodeRng = if rng.isSome(): rng.get() else: crypto.newRng() +proc setupNode*( + conf: WakuNodeConf, rng: Option[ref HmacDrbgContext] = none(ref HmacDrbgContext) +): Result[WakuNode, string] = + var nodeRng = + if rng.isSome(): + rng.get() + else: + crypto.newRng() - # Use provided key only if corresponding rng is also provided - let key = - if conf.nodeKey.isSome() and rng.isSome(): - conf.nodeKey.get() - else: - warn "missing key or rng, generating new set" - crypto.PrivateKey.random(Secp256k1, nodeRng[]).valueOr: - error "Failed to generate key", error=error - return err("Failed to generate key: " & $error) - - let netConfig = networkConfiguration(conf, clientId).valueOr: - error "failed to create internal config", error=error - return err("failed to create internal config: " & error) + # Use provided key only if corresponding rng is also provided + let key = + if conf.nodeKey.isSome() and rng.isSome(): + conf.nodeKey.get() + else: + warn "missing key or rng, generating new set" + crypto.PrivateKey.random(Secp256k1, nodeRng[]).valueOr: + error "Failed to generate key", error = error + return err("Failed to generate key: " & $error) - let record = enrConfiguration(conf, netConfig, key).valueOr: - error "failed to create record", error=error - return err("failed to create record: " & error) + let netConfig = networkConfiguration(conf, clientId).valueOr: + error "failed to create internal config", error = error + return err("failed to create internal config: " & error) - if isClusterMismatched(record, conf.clusterId): - error "cluster id mismatch configured shards" - return err("cluster id mismatch configured shards") + let record = enrConfiguration(conf, netConfig, key).valueOr: + error "failed to create record", error = error + return err("failed to create record: " & error) - debug "Setting up storage" + if isClusterMismatched(record, conf.clusterId): + error "cluster id mismatch configured shards" + return err("cluster id mismatch configured shards") - ## Peer persistence - var peerStore: Option[WakuPeerStorage] - if conf.peerPersistence: - peerStore = setupPeerStorage().valueOr: - error "Setting up storage failed", error = "failed to setup peer store " & error - return err("Setting up storage failed: " & error) + debug "Setting up storage" - debug "Initializing node" + ## Peer persistence + var peerStore: Option[WakuPeerStorage] + if conf.peerPersistence: + peerStore = setupPeerStorage().valueOr: + error "Setting up storage failed", error = "failed to setup peer store " & error + return err("Setting up storage failed: " & error) - let node = initNode(conf, netConfig, nodeRng, key, record, peerStore).valueOr: - error "Initializing node failed", error = error - return err("Initializing node failed: " & error) + debug "Initializing node" - debug "Mounting protocols" + let node = initNode(conf, netConfig, nodeRng, key, record, peerStore).valueOr: + error "Initializing node failed", error = error + return err("Initializing node failed: " & error) - try: - (waitFor node.setupProtocols(conf, key)).isOkOr: - error "Mounting protocols failed", error = error - return err("Mounting protocols failed: " & error) - except CatchableError: - return err("Exception setting up protocols: " & getCurrentExceptionMsg()) + debug "Mounting protocols" - return ok(node) \ No newline at end of file + try: + (waitFor node.setupProtocols(conf, key)).isOkOr: + error "Mounting protocols failed", error = error + return err("Mounting protocols failed: " & error) + except CatchableError: + return err("Exception setting up protocols: " & getCurrentExceptionMsg()) + + return ok(node) diff --git a/waku/factory/validator_signed.nim b/waku/factory/validator_signed.nim index 14901b70c..ddb45102f 100644 --- a/waku/factory/validator_signed.nim +++ b/waku/factory/validator_signed.nim @@ -16,26 +16,30 @@ import nimcrypto/sha2, secp256k1 -const MessageWindowInSec = 5*60 # +- 5 minutes +const MessageWindowInSec = 5 * 60 # +- 5 minutes -import - ./external_config, - ../waku_relay/protocol, - ../waku_core +import ./external_config, ../waku_relay/protocol, ../waku_core -declarePublicCounter waku_msg_validator_signed_outcome, "number of messages for each validation outcome", ["result"] +declarePublicCounter waku_msg_validator_signed_outcome, + "number of messages for each validation outcome", ["result"] # Application level message hash proc msgHash*(pubSubTopic: string, msg: WakuMessage): array[32, byte] = var ctx: sha256 ctx.init() - defer: ctx.clear() + defer: + ctx.clear() ctx.update(pubsubTopic.toBytes()) ctx.update(msg.payload) ctx.update(msg.contentTopic.toBytes()) ctx.update(msg.timestamp.uint64.toBytes(Endianness.littleEndian)) - ctx.update(if msg.ephemeral: @[1.byte] else: @[0.byte]) + ctx.update( + if msg.ephemeral: + @[1.byte] + else: + @[0.byte] + ) return ctx.finish() @@ -53,12 +57,13 @@ proc withinTimeWindow*(msg: WakuMessage): bool = proc addSignedTopicsValidator*(w: WakuRelay, protectedTopics: seq[ProtectedTopic]) = debug "adding validator to signed topics" - proc validator(topic: string, msg: WakuMessage): Future[errors.ValidationResult] {.async.} = + proc validator( + topic: string, msg: WakuMessage + ): Future[errors.ValidationResult] {.async.} = var outcome = errors.ValidationResult.Reject - - for protectedTopic in protectedTopics: - if(protectedTopic.topic == topic): + for protectedTopic in protectedTopics: + if (protectedTopic.topic == topic): if msg.timestamp != 0: if msg.withinTimeWindow(): let msgHash = SkMessage(topic.msgHash(msg)) @@ -68,10 +73,11 @@ proc addSignedTopicsValidator*(w: WakuRelay, protectedTopics: seq[ProtectedTopic outcome = errors.ValidationResult.Accept if outcome != errors.ValidationResult.Accept: - debug "signed topic validation failed", topic=topic, publicTopicKey=protectedTopic.key + debug "signed topic validation failed", + topic = topic, publicTopicKey = protectedTopic.key waku_msg_validator_signed_outcome.inc(labelValues = [$outcome]) return outcome return errors.ValidationResult.Accept - w.addValidator(validator, "signed topic validation failed") \ No newline at end of file + w.addValidator(validator, "signed topic validation failed") diff --git a/waku/node/config.nim b/waku/node/config.nim index 3d153d549..3e0650622 100644 --- a/waku/node/config.nim +++ b/waku/node/config.nim @@ -8,11 +8,8 @@ import stew/results, stew/shims/net, libp2p/[multiaddress, multicodec] -import - ../../waku/waku_core/peers -import - ../waku_enr - +import ../../waku/waku_core/peers +import ../waku_enr type NetConfig* = object hostAddress*: MultiAddress @@ -36,7 +33,6 @@ type NetConfig* = object type NetConfigResult* = Result[NetConfig, string] - template ip4TcpEndPoint(address, port): MultiAddress = MultiAddress.init(address, tcpProtocol, port) @@ -50,14 +46,15 @@ template dns4TcpEndPoint(dns4DomainName: string, port: Port): MultiAddress = dns4Ma(dns4DomainName) & tcpPortMa(port) template wsFlag(wssEnabled: bool): MultiAddress = - if wssEnabled: MultiAddress.init("/wss").tryGet() - else: MultiAddress.init("/ws").tryGet() - + if wssEnabled: + MultiAddress.init("/wss").tryGet() + else: + MultiAddress.init("/ws").tryGet() proc formatListenAddress(inputMultiAdd: MultiAddress): MultiAddress = - let inputStr = $inputMultiAdd - # If MultiAddress contains "0.0.0.0", replace it for "127.0.0.1" - return MultiAddress.init(inputStr.replace("0.0.0.0", "127.0.0.1")).get() + let inputStr = $inputMultiAdd + # If MultiAddress contains "0.0.0.0", replace it for "127.0.0.1" + return MultiAddress.init(inputStr.replace("0.0.0.0", "127.0.0.1")).get() proc isWsAddress*(ma: MultiAddress): bool = let @@ -67,9 +64,10 @@ proc isWsAddress*(ma: MultiAddress): bool = return isWs or isWss proc containsWsAddress(extMultiAddrs: seq[MultiAddress]): bool = - return extMultiAddrs.filterIt( it.isWsAddress() ).len > 0 + return extMultiAddrs.filterIt(it.isWsAddress()).len > 0 -proc init*(T: type NetConfig, +proc init*( + T: type NetConfig, bindIp: IpAddress, bindPort: Port, extIp = none(IpAddress), @@ -82,7 +80,8 @@ proc init*(T: type NetConfig, dns4DomainName = none(string), discv5UdpPort = none(Port), clusterId: uint32 = 0, - wakuFlags = none(CapabilitiesBitfield)): NetConfigResult = + wakuFlags = none(CapabilitiesBitfield), +): NetConfigResult = ## Initialize and validate waku node network configuration # Bind addresses @@ -95,8 +94,16 @@ proc init*(T: type NetConfig, except CatchableError: return err(getCurrentExceptionMsg()) - let enrIp = if extIp.isSome(): extIp else: some(bindIp) - let enrPort = if extPort.isSome(): extPort else: some(bindPort) + let enrIp = + if extIp.isSome(): + extIp + else: + some(bindIp) + let enrPort = + if extPort.isSome(): + extPort + else: + some(bindPort) # Setup external addresses, if available var hostExtAddress, wsExtAddress = none(MultiAddress) @@ -110,7 +117,8 @@ proc init*(T: type NetConfig, if wsHostAddress.isSome(): try: - wsExtAddress = some(dns4TcpEndPoint(dns4DomainName.get(), wsBindPort) & wsFlag(wssEnabled)) + wsExtAddress = + some(dns4TcpEndPoint(dns4DomainName.get(), wsBindPort) & wsFlag(wssEnabled)) except CatchableError: return err(getCurrentExceptionMsg()) else: @@ -120,7 +128,8 @@ proc init*(T: type NetConfig, if wsHostAddress.isSome(): try: - wsExtAddress = some(ip4TcpEndPoint(extIp.get(), wsBindPort) & wsFlag(wssEnabled)) + wsExtAddress = + some(ip4TcpEndPoint(extIp.get(), wsBindPort) & wsFlag(wssEnabled)) except CatchableError: return err(getCurrentExceptionMsg()) @@ -130,14 +139,15 @@ proc init*(T: type NetConfig, if hostExtAddress.isSome(): announcedAddresses.add(hostExtAddress.get()) else: - announcedAddresses.add(formatListenAddress(hostAddress)) # We always have at least a bind address for the host + announcedAddresses.add(formatListenAddress(hostAddress)) + # We always have at least a bind address for the host if wsExtAddress.isSome(): announcedAddresses.add(wsExtAddress.get()) elif wsHostAddress.isSome() and not containsWsAddress(extMultiAddrs): # Only publish wsHostAddress if a WS address is not set in extMultiAddrs announcedAddresses.add(wsHostAddress.get()) - + # External multiaddrs that the operator may have configured if extMultiAddrs.len > 0: announcedAddresses.add(extMultiAddrs) @@ -145,28 +155,30 @@ proc init*(T: type NetConfig, let # enrMultiaddrs are just addresses which cannot be represented in ENR, as described in # https://rfc.vac.dev/spec/31/#many-connection-types - enrMultiaddrs = announcedAddresses.filterIt(it.hasProtocol("dns4") or - it.hasProtocol("dns6") or - it.hasProtocol("ws") or - it.hasProtocol("wss")) + enrMultiaddrs = announcedAddresses.filterIt( + it.hasProtocol("dns4") or it.hasProtocol("dns6") or it.hasProtocol("ws") or + it.hasProtocol("wss") + ) - ok(NetConfig( - hostAddress: hostAddress, - clusterId: clusterId, - wsHostAddress: wsHostAddress, - hostExtAddress: hostExtAddress, - wsExtAddress: wsExtAddress, - extIp: extIp, - extPort: extPort, - wssEnabled: wssEnabled, - dns4DomainName: dns4DomainName, - announcedAddresses: announcedAddresses, - extMultiAddrs: extMultiAddrs, - enrMultiaddrs: enrMultiaddrs, - enrIp: enrIp, - enrPort: enrPort, - discv5UdpPort: discv5UdpPort, - bindIp: bindIp, - bindPort: bindPort, - wakuFlags: wakuFlags - )) + ok( + NetConfig( + hostAddress: hostAddress, + clusterId: clusterId, + wsHostAddress: wsHostAddress, + hostExtAddress: hostExtAddress, + wsExtAddress: wsExtAddress, + extIp: extIp, + extPort: extPort, + wssEnabled: wssEnabled, + dns4DomainName: dns4DomainName, + announcedAddresses: announcedAddresses, + extMultiAddrs: extMultiAddrs, + enrMultiaddrs: enrMultiaddrs, + enrIp: enrIp, + enrPort: enrPort, + discv5UdpPort: discv5UdpPort, + bindIp: bindIp, + bindPort: bindPort, + wakuFlags: wakuFlags, + ) + ) diff --git a/waku/node/peer_manager.nim b/waku/node/peer_manager.nim index 16d0be283..16cb06c1f 100644 --- a/waku/node/peer_manager.nim +++ b/waku/node/peer_manager.nim @@ -1,5 +1,3 @@ -import - ./peer_manager/peer_manager +import ./peer_manager/peer_manager -export - peer_manager +export peer_manager diff --git a/waku/node/peer_manager/peer_manager.nim b/waku/node/peer_manager/peer_manager.nim index 9f652688d..65df13697 100644 --- a/waku/node/peer_manager/peer_manager.nim +++ b/waku/node/peer_manager/peer_manager.nim @@ -3,7 +3,6 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} - import std/[options, sets, sequtils, times, strutils, math, random], chronos, @@ -27,12 +26,17 @@ export waku_peer_store, peer_storage, peers declareCounter waku_peers_dials, "Number of peer dials", ["outcome"] # TODO: Populate from PeerStore.Source when ready -declarePublicCounter waku_node_conns_initiated, "Number of connections initiated", ["source"] +declarePublicCounter waku_node_conns_initiated, + "Number of connections initiated", ["source"] declarePublicGauge waku_peers_errors, "Number of peer manager errors", ["type"] -declarePublicGauge waku_connected_peers, "Number of physical connections per direction and protocol", labels = ["direction", "protocol"] -declarePublicGauge waku_streams_peers, "Number of streams per direction and protocol", labels = ["direction", "protocol"] +declarePublicGauge waku_connected_peers, + "Number of physical connections per direction and protocol", + labels = ["direction", "protocol"] +declarePublicGauge waku_streams_peers, + "Number of streams per direction and protocol", labels = ["direction", "protocol"] declarePublicGauge waku_peer_store_size, "Number of peers managed by the peer store" -declarePublicGauge waku_service_peers, "Service peer protocol and multiaddress ", labels = ["protocol", "peerId"] +declarePublicGauge waku_service_peers, + "Service peer protocol and multiaddress ", labels = ["protocol", "peerId"] logScope: topics = "waku node peer_manager" @@ -67,23 +71,22 @@ const # Max peers that we allow from the same IP DefaultColocationLimit* = 5 -type - PeerManager* = ref object of RootObj - switch*: Switch - peerStore*: PeerStore - wakuMetadata*: WakuMetadata - initialBackoffInSec*: int - backoffFactor*: int - maxFailedAttempts*: int - storage*: PeerStorage - serviceSlots*: Table[string, RemotePeerInfo] - maxRelayPeers*: int - outRelayPeersTarget: int - inRelayPeersTarget: int - ipTable*: Table[string, seq[PeerId]] - colocationLimit*: int - started: bool - shardedPeerManagement: bool # temp feature flag +type PeerManager* = ref object of RootObj + switch*: Switch + peerStore*: PeerStore + wakuMetadata*: WakuMetadata + initialBackoffInSec*: int + backoffFactor*: int + maxFailedAttempts*: int + storage*: PeerStorage + serviceSlots*: Table[string, RemotePeerInfo] + maxRelayPeers*: int + outRelayPeersTarget: int + inRelayPeersTarget: int + ipTable*: Table[string, seq[PeerId]] + colocationLimit*: int + started: bool + shardedPeerManagement: bool # temp feature flag proc protocolMatcher*(codec: string): Matcher = ## Returns a protocol matcher function for the provided codec @@ -95,12 +98,12 @@ proc protocolMatcher*(codec: string): Matcher = return match -proc calculateBackoff(initialBackoffInSec: int, - backoffFactor: int, - failedAttempts: int): timer.Duration = +proc calculateBackoff( + initialBackoffInSec: int, backoffFactor: int, failedAttempts: int +): timer.Duration = if failedAttempts == 0: return chronos.seconds(0) - return chronos.seconds(initialBackoffInSec*(backoffFactor^(failedAttempts-1))) + return chronos.seconds(initialBackoffInSec * (backoffFactor ^ (failedAttempts - 1))) #################### # Helper functions # @@ -121,20 +124,21 @@ proc addPeer*(pm: PeerManager, remotePeerInfo: RemotePeerInfo, origin = UnknownO return if pm.peerStore[AddressBook][remotePeerInfo.peerId] == remotePeerInfo.addrs and - pm.peerStore[KeyBook][remotePeerInfo.peerId] == remotePeerInfo.publicKey and - pm.peerStore[ENRBook][remotePeerInfo.peerId].raw.len > 0: + pm.peerStore[KeyBook][remotePeerInfo.peerId] == remotePeerInfo.publicKey and + pm.peerStore[ENRBook][remotePeerInfo.peerId].raw.len > 0: # Peer already managed and ENR info is already saved return - trace "Adding peer to manager", peerId = remotePeerInfo.peerId, addresses = remotePeerInfo.addrs - + trace "Adding peer to manager", + peerId = remotePeerInfo.peerId, addresses = remotePeerInfo.addrs + pm.peerStore[AddressBook][remotePeerInfo.peerId] = remotePeerInfo.addrs pm.peerStore[KeyBook][remotePeerInfo.peerId] = remotePeerInfo.publicKey pm.peerStore[SourceBook][remotePeerInfo.peerId] = origin - + if remotePeerInfo.protocols.len > 0: pm.peerStore[ProtoBook][remotePeerInfo.peerId] = remotePeerInfo.protocols - + if remotePeerInfo.enr.isSome(): pm.peerStore[ENRBook][remotePeerInfo.peerId] = remotePeerInfo.enr.get() @@ -147,11 +151,12 @@ proc addPeer*(pm: PeerManager, remotePeerInfo: RemotePeerInfo, origin = UnknownO # Connects to a given node. Note that this function uses `connect` and # does not provide a protocol. Streams for relay (gossipsub) are created # automatically without the needing to dial. -proc connectRelay*(pm: PeerManager, - peer: RemotePeerInfo, - dialTimeout = DefaultDialTimeout, - source = "api"): Future[bool] {.async.} = - +proc connectRelay*( + pm: PeerManager, + peer: RemotePeerInfo, + dialTimeout = DefaultDialTimeout, + source = "api", +): Future[bool] {.async.} = let peerId = peer.peerId # Do not attempt to dial self @@ -162,39 +167,43 @@ proc connectRelay*(pm: PeerManager, pm.addPeer(peer) let failedAttempts = pm.peerStore[NumberFailedConnBook][peerId] - trace "Connecting to relay peer", wireAddr=peer.addrs, peerId=peerId, failedAttempts=failedAttempts + trace "Connecting to relay peer", + wireAddr = peer.addrs, peerId = peerId, failedAttempts = failedAttempts var deadline = sleepAsync(dialTimeout) let workfut = pm.switch.connect(peerId, peer.addrs) - - # Can't use catch: with .withTimeout() in this case - let res = catch: await workfut or deadline - let reasonFailed = + # Can't use catch: with .withTimeout() in this case + let res = catch: + await workfut or deadline + + let reasonFailed = if not workfut.finished(): await workfut.cancelAndWait() "timed out" - elif res.isErr(): res.error.msg - else: + elif res.isErr(): + res.error.msg + else: if not deadline.finished(): await deadline.cancelAndWait() - + waku_peers_dials.inc(labelValues = ["successful"]) waku_node_conns_initiated.inc(labelValues = [source]) pm.peerStore[NumberFailedConnBook][peerId] = 0 return true - + # Dial failed - pm.peerStore[NumberFailedConnBook][peerId] = pm.peerStore[NumberFailedConnBook][peerId] + 1 + pm.peerStore[NumberFailedConnBook][peerId] = + pm.peerStore[NumberFailedConnBook][peerId] + 1 pm.peerStore[LastFailedConnBook][peerId] = Moment.init(getTime().toUnix, Second) pm.peerStore[ConnectionBook][peerId] = CannotConnect trace "Connecting relay peer failed", - peerId = peerId, - reason = reasonFailed, - failedAttempts = pm.peerStore[NumberFailedConnBook][peerId] + peerId = peerId, + reason = reasonFailed, + failedAttempts = pm.peerStore[NumberFailedConnBook][peerId] waku_peers_dials.inc(labelValues = [reasonFailed]) return false @@ -202,13 +211,14 @@ proc connectRelay*(pm: PeerManager, # Dialing should be used for just protocols that require a stream to write and read # This shall not be used to dial Relay protocols, since that would create # unneccesary unused streams. -proc dialPeer(pm: PeerManager, - peerId: PeerID, - addrs: seq[MultiAddress], - proto: string, - dialTimeout = DefaultDialTimeout, - source = "api"): Future[Option[Connection]] {.async.} = - +proc dialPeer( + pm: PeerManager, + peerId: PeerID, + addrs: seq[MultiAddress], + proto: string, + dialTimeout = DefaultDialTimeout, + source = "api", +): Future[Option[Connection]] {.async.} = if peerId == pm.switch.peerInfo.peerId: error "could not dial self" return none(Connection) @@ -217,7 +227,7 @@ proc dialPeer(pm: PeerManager, error "dial shall not be used to connect to relays" return none(Connection) - trace "Dialing peer", wireAddr=addrs, peerId=peerId, proto=proto + trace "Dialing peer", wireAddr = addrs, peerId = peerId, proto = proto # Dial Peer let dialFut = pm.switch.dial(peerId, addrs, proto) @@ -225,26 +235,25 @@ proc dialPeer(pm: PeerManager, let res = catch: if await dialFut.withTimeout(dialTimeout): return some(dialFut.read()) - else: await cancelAndWait(dialFut) + else: + await cancelAndWait(dialFut) - let reasonFailed = - if res.isOk: "timed out" - else: res.error.msg + let reasonFailed = if res.isOk: "timed out" else: res.error.msg - trace "Dialing peer failed", peerId=peerId, reason=reasonFailed, proto=proto + trace "Dialing peer failed", peerId = peerId, reason = reasonFailed, proto = proto return none(Connection) proc loadFromStorage(pm: PeerManager) = ## Load peers from storage, if available - + trace "loading peers from storage" - + var amount = 0 proc onData(remotePeerInfo: RemotePeerInfo) = let peerId = remotePeerInfo.peerId - + if pm.switch.peerInfo.peerId == peerId: # Do not manage self return @@ -264,10 +273,10 @@ proc loadFromStorage(pm: PeerManager) = pm.peerStore[ProtoVersionBook][peerId] = remotePeerInfo.protoVersion # custom books - pm.peerStore[ConnectionBook][peerId] = NotConnected # Reset connectedness state + pm.peerStore[ConnectionBook][peerId] = NotConnected # Reset connectedness state pm.peerStore[DisconnectBook][peerId] = remotePeerInfo.disconnectTime pm.peerStore[SourceBook][peerId] = remotePeerInfo.origin - + if remotePeerInfo.enr.isSome(): pm.peerStore[ENRBook][peerId] = remotePeerInfo.enr.get() @@ -280,8 +289,7 @@ proc loadFromStorage(pm: PeerManager) = trace "recovered peers from storage", amount = amount -proc canBeConnected*(pm: PeerManager, - peerId: PeerId): bool = +proc canBeConnected*(pm: PeerManager, peerId: PeerId): bool = # Returns if we can try to connect to this peer, based on past failed attempts # It uses an exponential backoff. Each connection attempt makes us # wait more before trying again. @@ -299,8 +307,9 @@ proc canBeConnected*(pm: PeerManager, # the more failed attempts, the greater the backoff since last attempt let now = Moment.init(getTime().toUnix, Second) let lastFailed = pm.peerStore[LastFailedConnBook][peerId] - let backoff = calculateBackoff(pm.initialBackoffInSec, pm.backoffFactor, failedAttempts) - + let backoff = + calculateBackoff(pm.initialBackoffInSec, pm.backoffFactor, failedAttempts) + return now >= (lastFailed + backoff) ################## @@ -319,17 +328,17 @@ proc getPeerIp(pm: PeerManager, peerId: PeerId): Option[string] = return none(string) # TODO: think if circuit relay ips should be handled differently - + return some(obAddr.getHostname()) # called when a connection i) is created or ii) is closed proc onConnEvent(pm: PeerManager, peerId: PeerID, event: ConnEvent) {.async.} = case event.kind - of ConnEventKind.Connected: - #let direction = if event.incoming: Inbound else: Outbound - discard - of ConnEventKind.Disconnected: - discard + of ConnEventKind.Connected: + #let direction = if event.incoming: Inbound else: Outbound + discard + of ConnEventKind.Disconnected: + discard proc onPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} = # To prevent metadata protocol from breaking prev nodes, by now we only @@ -337,14 +346,15 @@ proc onPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} = if pm.wakuMetadata.clusterId == 0: return - let res = catch: await pm.switch.dial(peerId, WakuMetadataCodec) + let res = catch: + await pm.switch.dial(peerId, WakuMetadataCodec) var reason: string block guardClauses: let conn = res.valueOr: reason = "dial failed: " & error.msg break guardClauses - + let metadata = (await pm.wakuMetadata.request(conn)).valueOr: reason = "waku metatdata request failed: " & error break guardClauses @@ -354,7 +364,9 @@ proc onPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} = break guardClauses if pm.wakuMetadata.clusterId != clusterId: - reason = "different clusterId reported: " & $pm.wakuMetadata.clusterId & " vs " & $clusterId + reason = + "different clusterId reported: " & $pm.wakuMetadata.clusterId & " vs " & + $clusterId break guardClauses if not metadata.shards.anyIt(pm.wakuMetadata.shards.contains(it)): @@ -362,8 +374,8 @@ proc onPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} = break guardClauses return - - info "disconnecting from peer", peerId=peerId, reason=reason + + info "disconnecting from peer", peerId = peerId, reason = reason asyncSpawn(pm.switch.disconnect(peerId)) pm.peerStore.delete(peerId) @@ -375,34 +387,34 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} = var direction: PeerDirection var connectedness: Connectedness - case event.kind: - of Joined: - direction = if event.initiator: Outbound else: Inbound - connectedness = Connected + case event.kind + of Joined: + direction = if event.initiator: Outbound else: Inbound + connectedness = Connected - if (let ip = pm.getPeerIp(peerId); ip.isSome()): - pm.ipTable.mgetOrPut(ip.get, newSeq[PeerId]()).add(peerId) + if (let ip = pm.getPeerIp(peerId); ip.isSome()): + pm.ipTable.mgetOrPut(ip.get, newSeq[PeerId]()).add(peerId) - # in theory this should always be one, but just in case - let peersBehindIp = pm.ipTable[ip.get] - - # pm.colocationLimit == 0 disables the ip colocation limit - if pm.colocationLimit != 0 and peersBehindIp.len > pm.colocationLimit: - for peerId in peersBehindIp[0..<(peersBehindIp.len - pm.colocationLimit)]: - debug "Pruning connection due to ip colocation", peerId = peerId, ip = ip - asyncSpawn(pm.switch.disconnect(peerId)) - pm.peerStore.delete(peerId) - of Left: - direction = UnknownDirection - connectedness = CanConnect + # in theory this should always be one, but just in case + let peersBehindIp = pm.ipTable[ip.get] - # note we cant access the peerId ip here as the connection was already closed - for ip, peerIds in pm.ipTable.pairs: - if peerIds.contains(peerId): - pm.ipTable[ip] = pm.ipTable[ip].filterIt(it != peerId) - if pm.ipTable[ip].len == 0: - pm.ipTable.del(ip) - break + # pm.colocationLimit == 0 disables the ip colocation limit + if pm.colocationLimit != 0 and peersBehindIp.len > pm.colocationLimit: + for peerId in peersBehindIp[0 ..< (peersBehindIp.len - pm.colocationLimit)]: + debug "Pruning connection due to ip colocation", peerId = peerId, ip = ip + asyncSpawn(pm.switch.disconnect(peerId)) + pm.peerStore.delete(peerId) + of Left: + direction = UnknownDirection + connectedness = CanConnect + + # note we cant access the peerId ip here as the connection was already closed + for ip, peerIds in pm.ipTable.pairs: + if peerIds.contains(peerId): + pm.ipTable[ip] = pm.ipTable[ip].filterIt(it != peerId) + if pm.ipTable[ip].len == 0: + pm.ipTable.del(ip) + break pm.peerStore[ConnectionBook][peerId] = connectedness pm.peerStore[DirectionBook][peerId] = direction @@ -413,37 +425,40 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} = pm.storage.insertOrReplace(remotePeerInfo) -proc new*(T: type PeerManager, - switch: Switch, - wakuMetadata: WakuMetadata = nil, - maxRelayPeers: Option[int] = none(int), - storage: PeerStorage = nil, - initialBackoffInSec = InitialBackoffInSec, - backoffFactor = BackoffFactor, - maxFailedAttempts = MaxFailedAttempts, - colocationLimit = DefaultColocationLimit, - shardedPeerManagement = false): PeerManager = - +proc new*( + T: type PeerManager, + switch: Switch, + wakuMetadata: WakuMetadata = nil, + maxRelayPeers: Option[int] = none(int), + storage: PeerStorage = nil, + initialBackoffInSec = InitialBackoffInSec, + backoffFactor = BackoffFactor, + maxFailedAttempts = MaxFailedAttempts, + colocationLimit = DefaultColocationLimit, + shardedPeerManagement = false, +): PeerManager = let capacity = switch.peerStore.capacity let maxConnections = switch.connManager.inSema.size if maxConnections > capacity: error "Max number of connections can't be greater than PeerManager capacity", - capacity = capacity, - maxConnections = maxConnections - raise newException(Defect, "Max number of connections can't be greater than PeerManager capacity") + capacity = capacity, maxConnections = maxConnections + raise newException( + Defect, "Max number of connections can't be greater than PeerManager capacity" + ) var maxRelayPeersValue = 0 if maxRelayPeers.isSome(): if maxRelayPeers.get() > maxConnections: error "Max number of relay peers can't be greater than the max amount of connections", - maxConnections = maxConnections, - maxRelayPeers = maxRelayPeers.get() - raise newException(Defect, "Max number of relay peers can't be greater than the max amount of connections") + maxConnections = maxConnections, maxRelayPeers = maxRelayPeers.get() + raise newException( + Defect, + "Max number of relay peers can't be greater than the max amount of connections", + ) if maxRelayPeers.get() == maxConnections: warn "Max number of relay peers is equal to max amount of connections, peer won't be contributing to service peers", - maxConnections = maxConnections, - maxRelayPeers = maxRelayPeers.get() + maxConnections = maxConnections, maxRelayPeers = maxRelayPeers.get() maxRelayPeersValue = maxRelayPeers.get() else: # Leave by default 20% of connections for service peers @@ -452,24 +467,25 @@ proc new*(T: type PeerManager, # attempt to calculate max backoff to prevent potential overflows or unreasonably high values let backoff = calculateBackoff(initialBackoffInSec, backoffFactor, maxFailedAttempts) if backoff.weeks() > 1: - error "Max backoff time can't be over 1 week", - maxBackoff=backoff + error "Max backoff time can't be over 1 week", maxBackoff = backoff raise newException(Defect, "Max backoff time can't be over 1 week") let outRelayPeersTarget = max(maxRelayPeersValue div 3, 10) - let pm = PeerManager(switch: switch, - wakuMetadata: wakuMetadata, - peerStore: switch.peerStore, - storage: storage, - initialBackoffInSec: initialBackoffInSec, - backoffFactor: backoffFactor, - outRelayPeersTarget: outRelayPeersTarget, - inRelayPeersTarget: maxRelayPeersValue - outRelayPeersTarget, - maxRelayPeers: maxRelayPeersValue, - maxFailedAttempts: maxFailedAttempts, - colocationLimit: colocationLimit, - shardedPeerManagement: shardedPeerManagement,) + let pm = PeerManager( + switch: switch, + wakuMetadata: wakuMetadata, + peerStore: switch.peerStore, + storage: storage, + initialBackoffInSec: initialBackoffInSec, + backoffFactor: backoffFactor, + outRelayPeersTarget: outRelayPeersTarget, + inRelayPeersTarget: maxRelayPeersValue - outRelayPeersTarget, + maxRelayPeers: maxRelayPeersValue, + maxFailedAttempts: maxFailedAttempts, + colocationLimit: colocationLimit, + shardedPeerManagement: shardedPeerManagement, + ) proc connHook(peerId: PeerID, event: ConnEvent): Future[void] {.gcsafe.} = onConnEvent(pm, peerId, event) @@ -511,41 +527,50 @@ proc addServicePeer*(pm: PeerManager, remotePeerInfo: RemotePeerInfo, proto: str warn "Can't add relay peer to service peers slots" return - info "Adding peer to service slots", peerId = remotePeerInfo.peerId, addr = remotePeerInfo.addrs[0], service = proto + info "Adding peer to service slots", + peerId = remotePeerInfo.peerId, addr = remotePeerInfo.addrs[0], service = proto waku_service_peers.set(1, labelValues = [$proto, $remotePeerInfo.addrs[0]]) - # Set peer for service slot + # Set peer for service slot pm.serviceSlots[proto] = remotePeerInfo pm.addPeer(remotePeerInfo) -proc reconnectPeers*(pm: PeerManager, - proto: string, - backoff: chronos.Duration = chronos.seconds(0)) {.async.} = +proc reconnectPeers*( + pm: PeerManager, proto: string, backoff: chronos.Duration = chronos.seconds(0) +) {.async.} = ## Reconnect to peers registered for this protocol. This will update connectedness. ## Especially useful to resume connections from persistent storage after a restart. - trace "Reconnecting peers", proto=proto + trace "Reconnecting peers", proto = proto # Proto is not persisted, we need to iterate over all peers. for peerInfo in pm.peerStore.peers(protocolMatcher(proto)): # Check that the peer can be connected if peerInfo.connectedness == CannotConnect: - error "Not reconnecting to unreachable or non-existing peer", peerId=peerInfo.peerId + error "Not reconnecting to unreachable or non-existing peer", + peerId = peerInfo.peerId continue # Respect optional backoff period where applicable. let # TODO: Add method to peerStore (eg isBackoffExpired()) - disconnectTime = Moment.init(peerInfo.disconnectTime, Second) # Convert - currentTime = Moment.init(getTime().toUnix, Second) # Current time comparable to persisted value - backoffTime = disconnectTime + backoff - currentTime # Consider time elapsed since last disconnect + disconnectTime = Moment.init(peerInfo.disconnectTime, Second) # Convert + currentTime = Moment.init(getTime().toUnix, Second) + # Current time comparable to persisted value + backoffTime = disconnectTime + backoff - currentTime + # Consider time elapsed since last disconnect - trace "Respecting backoff", backoff=backoff, disconnectTime=disconnectTime, currentTime=currentTime, backoffTime=backoffTime + trace "Respecting backoff", + backoff = backoff, + disconnectTime = disconnectTime, + currentTime = currentTime, + backoffTime = backoffTime # TODO: This blocks the whole function. Try to connect to another peer in the meantime. if backoffTime > ZeroDuration: - trace "Backing off before reconnect...", peerId=peerInfo.peerId, backoffTime=backoffTime + trace "Backing off before reconnect...", + peerId = peerInfo.peerId, backoffTime = backoffTime # We disconnected recently and still need to wait for a backoff period before connecting await sleepAsync(backoffTime) @@ -555,39 +580,46 @@ proc reconnectPeers*(pm: PeerManager, # Dialer interface # #################### -proc dialPeer*(pm: PeerManager, - remotePeerInfo: RemotePeerInfo, - proto: string, - dialTimeout = DefaultDialTimeout, - source = "api", - ): Future[Option[Connection]] {.async.} = +proc dialPeer*( + pm: PeerManager, + remotePeerInfo: RemotePeerInfo, + proto: string, + dialTimeout = DefaultDialTimeout, + source = "api", +): Future[Option[Connection]] {.async.} = # Dial a given peer and add it to the list of known peers # TODO: check peer validity and score before continuing. Limit number of peers to be managed. # First add dialed peer info to peer store, if it does not exist yet.. # TODO: nim libp2p peerstore already adds them if not pm.peerStore.hasPeer(remotePeerInfo.peerId, proto): - trace "Adding newly dialed peer to manager", peerId= $remotePeerInfo.peerId, address= $remotePeerInfo.addrs[0], proto= proto + trace "Adding newly dialed peer to manager", + peerId = $remotePeerInfo.peerId, address = $remotePeerInfo.addrs[0], proto = proto pm.addPeer(remotePeerInfo) - return await pm.dialPeer(remotePeerInfo.peerId,remotePeerInfo.addrs, proto, dialTimeout, source) + return await pm.dialPeer( + remotePeerInfo.peerId, remotePeerInfo.addrs, proto, dialTimeout, source + ) -proc dialPeer*(pm: PeerManager, - peerId: PeerID, - proto: string, - dialTimeout = DefaultDialTimeout, - source = "api", - ): Future[Option[Connection]] {.async.} = +proc dialPeer*( + pm: PeerManager, + peerId: PeerID, + proto: string, + dialTimeout = DefaultDialTimeout, + source = "api", +): Future[Option[Connection]] {.async.} = # Dial an existing peer by looking up it's existing addrs in the switch's peerStore # TODO: check peer validity and score before continuing. Limit number of peers to be managed. let addrs = pm.switch.peerStore[AddressBook][peerId] return await pm.dialPeer(peerId, addrs, proto, dialTimeout, source) -proc connectToNodes*(pm: PeerManager, - nodes: seq[string]|seq[RemotePeerInfo], - dialTimeout = DefaultDialTimeout, - source = "api") {.async.} = +proc connectToNodes*( + pm: PeerManager, + nodes: seq[string] | seq[RemotePeerInfo], + dialTimeout = DefaultDialTimeout, + source = "api", +) {.async.} = if nodes.len == 0: return @@ -604,7 +636,8 @@ proc connectToNodes*(pm: PeerManager, await allFutures(futConns) let successfulConns = futConns.mapIt(it.read()).countIt(it == true) - info "Finished dialing multiple peers", successfulConns=successfulConns, attempted=nodes.len + info "Finished dialing multiple peers", + successfulConns = successfulConns, attempted = nodes.len # The issue seems to be around peers not being fully connected when # trying to subscribe. So what we do is sleep to guarantee nodes are @@ -617,7 +650,7 @@ proc connectToNodes*(pm: PeerManager, proc connectedPeers*(pm: PeerManager, protocol: string): (seq[PeerId], seq[PeerId]) = ## Returns the peerIds of physical connections (in and out) ## containing at least one stream with the given protocol. - + var inPeers: seq[PeerId] var outPeers: seq[PeerId] @@ -638,22 +671,22 @@ proc getNumStreams*(pm: PeerManager, protocol: string): (int, int) = numStreamsOut = 0 for peerId, muxers in pm.switch.connManager.getConnections(): for peerConn in muxers: - for stream in peerConn.getStreams(): - if stream.protocol == protocol: - if stream.dir == Direction.In: - numStreamsIn += 1 - elif stream.dir == Direction.Out: - numStreamsOut += 1 + for stream in peerConn.getStreams(): + if stream.protocol == protocol: + if stream.dir == Direction.In: + numStreamsIn += 1 + elif stream.dir == Direction.Out: + numStreamsOut += 1 return (numStreamsIn, numStreamsOut) proc pruneInRelayConns(pm: PeerManager, amount: int) {.async.} = if amount <= 0: return - + let (inRelayPeers, _) = pm.connectedPeers(WakuRelayCodec) let connsToPrune = min(amount, inRelayPeers.len) - for p in inRelayPeers[0..= pm.outRelayPeersTarget: return - let notConnectedPeers = pm.peerStore.getNotConnectedPeers().mapIt(RemotePeerInfo.init(it.peerId, it.addrs)) + let notConnectedPeers = + pm.peerStore.getNotConnectedPeers().mapIt(RemotePeerInfo.init(it.peerId, it.addrs)) let outsideBackoffPeers = notConnectedPeers.filterIt(pm.canBeConnected(it.peerId)) let numPeersToConnect = min(outsideBackoffPeers.len, MaxParallelDials) - await pm.connectToNodes(outsideBackoffPeers[0..= pruningCount: break @@ -823,13 +859,16 @@ proc prunePeerStore*(pm: PeerManager) = let afterNumPeers = pm.peerStore[AddressBook].book.len - trace "Finished pruning peer store", beforeNumPeers = numPeers, - afterNumPeers = afterNumPeers, - capacity = capacity, - pruned = peersToPrune.len + trace "Finished pruning peer store", + beforeNumPeers = numPeers, + afterNumPeers = afterNumPeers, + capacity = capacity, + pruned = peersToPrune.len -proc selectPeer*(pm: PeerManager, proto: string, shard: Option[PubsubTopic] = none(PubsubTopic)): Option[RemotePeerInfo] = - trace "Selecting peer from peerstore", protocol=proto +proc selectPeer*( + pm: PeerManager, proto: string, shard: Option[PubsubTopic] = none(PubsubTopic) +): Option[RemotePeerInfo] = + trace "Selecting peer from peerstore", protocol = proto # Selects the best peer for a given protocol var peers = pm.peerStore.getPeersByProtocol(proto) @@ -841,25 +880,28 @@ proc selectPeer*(pm: PeerManager, proto: string, shard: Option[PubsubTopic] = no if proto == WakuRelayCodec: # TODO: proper heuristic here that compares peer scores and selects "best" one. For now the first peer for the given protocol is returned if peers.len > 0: - trace "Got peer from peerstore", peerId=peers[0].peerId, multi=peers[0].addrs[0], protocol=proto + trace "Got peer from peerstore", + peerId = peers[0].peerId, multi = peers[0].addrs[0], protocol = proto return some(peers[0]) - trace "No peer found for protocol", protocol=proto + trace "No peer found for protocol", protocol = proto return none(RemotePeerInfo) # For other protocols, we select the peer that is slotted for the given protocol pm.serviceSlots.withValue(proto, serviceSlot): - trace "Got peer from service slots", peerId=serviceSlot[].peerId, multi=serviceSlot[].addrs[0], protocol=proto + trace "Got peer from service slots", + peerId = serviceSlot[].peerId, multi = serviceSlot[].addrs[0], protocol = proto return some(serviceSlot[]) # If not slotted, we select a random peer for the given protocol if peers.len > 0: - trace "Got peer from peerstore", peerId=peers[0].peerId, multi=peers[0].addrs[0], protocol=proto + trace "Got peer from peerstore", + peerId = peers[0].peerId, multi = peers[0].addrs[0], protocol = proto return some(peers[0]) - trace "No peer found for protocol", protocol=proto + trace "No peer found for protocol", protocol = proto return none(RemotePeerInfo) # Prunes peers from peerstore to remove old/stale ones -proc prunePeerStoreLoop(pm: PeerManager) {.async.} = +proc prunePeerStoreLoop(pm: PeerManager) {.async.} = trace "Starting prune peerstore loop" while pm.started: pm.prunePeerStore() @@ -871,7 +913,8 @@ proc relayConnectivityLoop*(pm: PeerManager) {.async.} = while pm.started: if pm.shardedPeerManagement: await pm.manageRelayPeers() - else: await pm.connectToRelayPeers() + else: + await pm.connectToRelayPeers() await sleepAsync(ConnectivityLoopInterval) proc logAndMetrics(pm: PeerManager) {.async.} = @@ -879,7 +922,9 @@ proc logAndMetrics(pm: PeerManager) {.async.} = # log metrics let (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec) let maxConnections = pm.switch.connManager.inSema.size - let notConnectedPeers = pm.peerStore.getNotConnectedPeers().mapIt(RemotePeerInfo.init(it.peerId, it.addrs)) + let notConnectedPeers = pm.peerStore.getNotConnectedPeers().mapIt( + RemotePeerInfo.init(it.peerId, it.addrs) + ) let outsideBackoffPeers = notConnectedPeers.filterIt(pm.canBeConnected(it.peerId)) let totalConnections = pm.switch.connManager.getConnections().len @@ -894,10 +939,18 @@ proc logAndMetrics(pm: PeerManager) {.async.} = for proto in pm.peerStore.getWakuProtos(): let (protoConnsIn, protoConnsOut) = pm.connectedPeers(proto) let (protoStreamsIn, protoStreamsOut) = pm.getNumStreams(proto) - waku_connected_peers.set(protoConnsIn.len.float64, labelValues = [$Direction.In, proto]) - waku_connected_peers.set(protoConnsOut.len.float64, labelValues = [$Direction.Out, proto]) - waku_streams_peers.set(protoStreamsIn.float64, labelValues = [$Direction.In, proto]) - waku_streams_peers.set(protoStreamsOut.float64, labelValues = [$Direction.Out, proto]) + waku_connected_peers.set( + protoConnsIn.len.float64, labelValues = [$Direction.In, proto] + ) + waku_connected_peers.set( + protoConnsOut.len.float64, labelValues = [$Direction.Out, proto] + ) + waku_streams_peers.set( + protoStreamsIn.float64, labelValues = [$Direction.In, proto] + ) + waku_streams_peers.set( + protoStreamsOut.float64, labelValues = [$Direction.Out, proto] + ) proc start*(pm: PeerManager) = pm.started = true @@ -906,4 +959,4 @@ proc start*(pm: PeerManager) = asyncSpawn pm.logAndMetrics() proc stop*(pm: PeerManager) = - pm.started = false \ No newline at end of file + pm.started = false diff --git a/waku/node/peer_manager/peer_store/migrations.nim b/waku/node/peer_manager/peer_store/migrations.nim index 7c375bd7d..abb628d19 100644 --- a/waku/node/peer_manager/peer_store/migrations.nim +++ b/waku/node/peer_manager/peer_store/migrations.nim @@ -3,25 +3,18 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} - -import - std/[tables, strutils, os], - stew/results, - chronicles -import - ../../../common/databases/db_sqlite, - ../../../common/databases/common - +import std/[tables, strutils, os], stew/results, chronicles +import ../../../common/databases/db_sqlite, ../../../common/databases/common logScope: topics = "waku node peer_manager" - const SchemaVersion* = 1 # increase this when there is an update in the database schema -template projectRoot: string = currentSourcePath.rsplit(DirSep, 1)[0] / ".." / ".." / ".." / ".." / ".." -const PeerStoreMigrationPath: string = projectRoot / "migrations" / "peer_store" +template projectRoot(): string = + currentSourcePath.rsplit(DirSep, 1)[0] / ".." / ".." / ".." / ".." / ".." +const PeerStoreMigrationPath: string = projectRoot / "migrations" / "peer_store" proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult[void] = ## Compares the `user_version` of the sqlite database with the provided `targetVersion`, then @@ -34,9 +27,10 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult ## NOTE: Down migration it is not currently supported debug "starting peer store's sqlite database migration" - let migrationRes = migrate(db, targetVersion, migrationsScriptsDir=PeerStoreMigrationPath) + let migrationRes = + migrate(db, targetVersion, migrationsScriptsDir = PeerStoreMigrationPath) if migrationRes.isErr(): return err("failed to execute migration scripts: " & migrationRes.error) debug "finished peer store's sqlite database migration" - ok() \ No newline at end of file + ok() diff --git a/waku/node/peer_manager/peer_store/peer_storage.nim b/waku/node/peer_manager/peer_store/peer_storage.nim index 5f5a37c12..c3b377f51 100644 --- a/waku/node/peer_manager/peer_store/peer_storage.nim +++ b/waku/node/peer_manager/peer_store/peer_storage.nim @@ -3,12 +3,8 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} - -import - stew/results -import - ../../../waku_core, - ../waku_peer_store +import stew/results +import ../../../waku_core, ../waku_peer_store ## This module defines a peer storage interface. Implementations of ## PeerStorage are used to store and retrieve peers @@ -22,9 +18,8 @@ type # PeerStorage interface method put*( - db: PeerStorage, - remotePeerInfo: RemotePeerInfo - ): PeerStorageResult[void] {.base.} = + db: PeerStorage, remotePeerInfo: RemotePeerInfo +): PeerStorageResult[void] {.base.} = return err("Unimplemented") method getAll*(db: PeerStorage, onData: DataProc): PeerStorageResult[void] {.base.} = diff --git a/waku/node/peer_manager/peer_store/waku_peer_storage.nim b/waku/node/peer_manager/peer_store/waku_peer_storage.nim index 8609f165c..2ba3006ed 100644 --- a/waku/node/peer_manager/peer_store/waku_peer_storage.nim +++ b/waku/node/peer_manager/peer_store/waku_peer_storage.nim @@ -3,7 +3,6 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} - import std/[sets, options], stew/results, @@ -18,10 +17,9 @@ import export db_sqlite -type - WakuPeerStorage* = ref object of PeerStorage - database*: SqliteDatabase - replaceStmt: SqliteStmt[(seq[byte], seq[byte]), void] +type WakuPeerStorage* = ref object of PeerStorage + database*: SqliteDatabase + replaceStmt: SqliteStmt[(seq[byte], seq[byte]), void] ########################## # Protobuf Serialisation # @@ -38,13 +36,13 @@ proc decode*(T: type RemotePeerInfo, buffer: seq[byte]): ProtoResult[T] = var pb = initProtoBuffer(buffer) - discard ? pb.getField(1, storedInfo.peerId) - discard ? pb.getRepeatedField(2, multiaddrSeq) - discard ? pb.getRepeatedField(3, protoSeq) - discard ? pb.getField(4, storedInfo.publicKey) - discard ? pb.getField(5, connectedness) - discard ? pb.getField(6, disconnectTime) - let hasENR = ? pb.getField(7, rlpBytes) + discard ?pb.getField(1, storedInfo.peerId) + discard ?pb.getRepeatedField(2, multiaddrSeq) + discard ?pb.getRepeatedField(3, protoSeq) + discard ?pb.getField(4, storedInfo.publicKey) + discard ?pb.getField(5, connectedness) + discard ?pb.getField(6, disconnectTime) + let hasENR = ?pb.getField(7, rlpBytes) storedInfo.addrs = multiaddrSeq storedInfo.protocols = protoSeq @@ -70,7 +68,8 @@ proc encode*(remotePeerInfo: RemotePeerInfo): PeerStorageResult[ProtoBuffer] = for proto in remotePeerInfo.protocols.items: pb.write(3, proto) - let catchRes = catch: pb.write(4, remotePeerInfo.publicKey) + let catchRes = catch: + pb.write(4, remotePeerInfo.publicKey) if catchRes.isErr(): return err("Enncoding public key failed: " & catchRes.error.msg) @@ -96,16 +95,17 @@ proc new*(T: type WakuPeerStorage, db: SqliteDatabase): PeerStorageResult[T] = # It contains: # - peer id as primary key, stored as a blob # - stored info (serialised protobuf), stored as a blob - let createStmt = db.prepareStmt( - """ + let createStmt = db + .prepareStmt( + """ CREATE TABLE IF NOT EXISTS Peer ( peerId BLOB PRIMARY KEY, storedInfo BLOB ) WITHOUT ROWID; """, - NoParams, - void - ).expect("Valid statement") + NoParams, void, + ) + .expect("Valid statement") createStmt.exec(()).isOkOr: return err("failed to exec") @@ -114,11 +114,13 @@ proc new*(T: type WakuPeerStorage, db: SqliteDatabase): PeerStorageResult[T] = createStmt.dispose() # Reusable prepared statements - let replaceStmt = db.prepareStmt( - "REPLACE INTO Peer (peerId, storedInfo) VALUES (?, ?);", - (seq[byte], seq[byte]), - void - ).expect("Valid statement") + let replaceStmt = db + .prepareStmt( + "REPLACE INTO Peer (peerId, storedInfo) VALUES (?, ?);", + (seq[byte], seq[byte]), + void, + ) + .expect("Valid statement") # General initialization let ps = WakuPeerStorage(database: db, replaceStmt: replaceStmt) @@ -126,11 +128,10 @@ proc new*(T: type WakuPeerStorage, db: SqliteDatabase): PeerStorageResult[T] = return ok(ps) method put*( - db: WakuPeerStorage, - remotePeerInfo: RemotePeerInfo - ): PeerStorageResult[void] = + db: WakuPeerStorage, remotePeerInfo: RemotePeerInfo +): PeerStorageResult[void] = ## Adds a peer to storage or replaces existing entry if it already exists - + let encoded = remotePeerInfo.encode().valueOr: return err("peer info encoding failed: " & error) @@ -140,11 +141,10 @@ method put*( return ok() method getAll*( - db: WakuPeerStorage, - onData: peer_storage.DataProc - ): PeerStorageResult[void] = + db: WakuPeerStorage, onData: peer_storage.DataProc +): PeerStorageResult[void] = ## Retrieves all peers from storage - + proc peer(s: ptr sqlite3_stmt) {.raises: [ResultError[ProtoError]].} = let # Stored Info @@ -154,12 +154,14 @@ method getAll*( onData(storedInfo) - let catchRes = catch: db.database.query("SELECT peerId, storedInfo FROM Peer", peer) + let catchRes = catch: + db.database.query("SELECT peerId, storedInfo FROM Peer", peer) let queryRes = if catchRes.isErr(): return err("failed to extract peer from query result: " & catchRes.error.msg) - else: catchRes.get() + else: + catchRes.get() if queryRes.isErr(): return err("peer storage query failed: " & queryRes.error) @@ -168,6 +170,6 @@ method getAll*( proc close*(db: WakuPeerStorage) = ## Closes the database. - + db.replaceStmt.dispose() db.database.close() diff --git a/waku/node/peer_manager/waku_peer_store.nim b/waku/node/peer_manager/waku_peer_store.nim index 579ae395a..e8b99f85c 100644 --- a/waku/node/peer_manager/waku_peer_store.nim +++ b/waku/node/peer_manager/waku_peer_store.nim @@ -45,18 +45,21 @@ type # Peer Store API # ################## -proc delete*(peerStore: PeerStore, - peerId: PeerId) = +proc delete*(peerStore: PeerStore, peerId: PeerId) = # Delete all the information of a given peer. peerStore.del(peerId) -proc get*(peerStore: PeerStore, - peerId: PeerID): RemotePeerInfo = +proc get*(peerStore: PeerStore, peerId: PeerID): RemotePeerInfo = ## Get the stored information of a given peer. RemotePeerInfo( peerId: peerId, addrs: peerStore[AddressBook][peerId], - enr: if peerStore[ENRBook][peerId] != default(enr.Record): some(peerStore[ENRBook][peerId]) else: none(enr.Record), + enr: + if peerStore[ENRBook][peerId] != default(enr.Record): + some(peerStore[ENRBook][peerId]) + else: + none(enr.Record) + , protocols: peerStore[ProtoBook][peerId], agent: peerStore[AgentBook][peerId], protoVersion: peerStore[ProtoVersionBook][peerId], @@ -68,23 +71,26 @@ proc get*(peerStore: PeerStore, origin: peerStore[SourceBook][peerId], direction: peerStore[DirectionBook][peerId], lastFailedConn: peerStore[LastFailedConnBook][peerId], - numberFailedConn: peerStore[NumberFailedConnBook][peerId] + numberFailedConn: peerStore[NumberFailedConnBook][peerId], ) proc getWakuProtos*(peerStore: PeerStore): seq[string] = ## Get the waku protocols of all the stored peers. let wakuProtocols = toSeq(peerStore[ProtoBook].book.values()) - .flatten() - .deduplicate() - .filterIt(it.startsWith("/vac/waku")) + .flatten() + .deduplicate() + .filterIt(it.startsWith("/vac/waku")) return wakuProtocols # TODO: Rename peers() to getPeersByProtocol() proc peers*(peerStore: PeerStore): seq[RemotePeerInfo] = ## Get all the stored information of every peer. - let allKeys = concat(toSeq(peerStore[AddressBook].book.keys()), - toSeq(peerStore[ProtoBook].book.keys()), - toSeq(peerStore[KeyBook].book.keys())).toHashSet() + let allKeys = concat( + toSeq(peerStore[AddressBook].book.keys()), + toSeq(peerStore[ProtoBook].book.keys()), + toSeq(peerStore[KeyBook].book.keys()), + ) + .toHashSet() return allKeys.mapIt(peerStore.get(it)) @@ -122,7 +128,9 @@ proc hasPeers*(peerStore: PeerStore, protocolMatcher: Matcher): bool = # Returns `true` if the peerstore has any peer matching the protocolMatcher toSeq(peerStore[ProtoBook].book.values()).anyIt(it.anyIt(protocolMatcher(it))) -proc getPeersByDirection*(peerStore: PeerStore, direction: PeerDirection): seq[RemotePeerInfo] = +proc getPeersByDirection*( + peerStore: PeerStore, direction: PeerDirection +): seq[RemotePeerInfo] = return peerStore.peers.filterIt(it.direction == direction) proc getNotConnectedPeers*(peerStore: PeerStore): seq[RemotePeerInfo] = @@ -135,10 +143,19 @@ proc getPeersByProtocol*(peerStore: PeerStore, proto: string): seq[RemotePeerInf return peerStore.peers.filterIt(it.protocols.contains(proto)) proc getReachablePeers*(peerStore: PeerStore): seq[RemotePeerInfo] = - return peerStore.peers.filterIt(it.connectedness == CanConnect or it.connectedness == Connected) + return peerStore.peers.filterIt( + it.connectedness == CanConnect or it.connectedness == Connected + ) -proc getPeersByShard*(peerStore: PeerStore, cluster, shard: uint16): seq[RemotePeerInfo] = - return peerStore.peers.filterIt(it.enr.isSome() and it.enr.get().containsShard(cluster, shard)) +proc getPeersByShard*( + peerStore: PeerStore, cluster, shard: uint16 +): seq[RemotePeerInfo] = + return peerStore.peers.filterIt( + it.enr.isSome() and it.enr.get().containsShard(cluster, shard) + ) -proc getPeersByCapability*(peerStore: PeerStore, cap: Capabilities): seq[RemotePeerInfo] = - return peerStore.peers.filterIt(it.enr.isSome() and it.enr.get().supportsCapability(cap)) \ No newline at end of file +proc getPeersByCapability*( + peerStore: PeerStore, cap: Capabilities +): seq[RemotePeerInfo] = + return + peerStore.peers.filterIt(it.enr.isSome() and it.enr.get().supportsCapability(cap)) diff --git a/waku/node/waku_metrics.nim b/waku/node/waku_metrics.nim index 91aff2ca5..145835f41 100644 --- a/waku/node/waku_metrics.nim +++ b/waku/node/waku_metrics.nim @@ -3,15 +3,10 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - chronicles, - chronos, - metrics, - metrics/chronos_httpserver +import chronicles, chronos, metrics, metrics/chronos_httpserver import ../waku_filter/protocol_metrics as filter_metrics, ../waku_rln_relay/protocol_metrics as rln_metrics, - ../utils/collector, ./peer_manager, ./waku_node @@ -31,13 +26,13 @@ proc startMetricsLog*() = logMetrics = CallbackFunc( proc(udata: pointer) {.gcsafe.} = - # TODO: libp2p_pubsub_peers is not public, so we need to make this either # public in libp2p or do our own peer counting after all. # track cumulative values let freshErrorCount = parseAndAccumulate(waku_node_errors, cumulativeErrors) - let freshConnCount = parseAndAccumulate(waku_node_conns_initiated, cumulativeConns) + let freshConnCount = + parseAndAccumulate(waku_node_conns_initiated, cumulativeConns) let totalMessages = collectorAsF64(waku_node_messages) let storePeers = collectorAsF64(waku_store_peers) diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index 47a0c8735..dbd985dda 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -5,7 +5,9 @@ else: import std/[hashes, options, sugar, tables, strutils, sequtils, os], - chronos, chronicles, metrics, + chronos, + chronicles, + metrics, stew/results, stew/byteutils, stew/shims/net as stewNet, @@ -31,8 +33,10 @@ import ../waku_archive, ../waku_store, ../waku_store/client as store_client, - ../waku_filter as legacy_filter, #TODO: support for legacy filter protocol will be removed - ../waku_filter/client as legacy_filter_client, #TODO: support for legacy filter protocol will be removed + ../waku_filter as legacy_filter, + #TODO: support for legacy filter protocol will be removed + ../waku_filter/client as legacy_filter_client, + #TODO: support for legacy filter protocol will be removed ../waku_filter_v2, ../waku_filter_v2/client as filter_client, ../waku_filter_v2/subscriptions as filter_subscriptions, @@ -47,17 +51,19 @@ import ./config, ./peer_manager - declarePublicCounter waku_node_messages, "number of messages received", ["type"] -declarePublicHistogram waku_histogram_message_size, "message size histogram in kB", +declarePublicHistogram waku_histogram_message_size, + "message size histogram in kB", buckets = [0.0, 5.0, 15.0, 50.0, 75.0, 100.0, 125.0, 150.0, 300.0, 700.0, 1000.0, Inf] -declarePublicGauge waku_version, "Waku version info (in git describe format)", ["version"] +declarePublicGauge waku_version, + "Waku version info (in git describe format)", ["version"] declarePublicGauge waku_node_errors, "number of wakunode errors", ["type"] declarePublicGauge waku_lightpush_peers, "number of lightpush peers" declarePublicGauge waku_filter_peers, "number of filter peers" declarePublicGauge waku_store_peers, "number of store peers" -declarePublicGauge waku_px_peers, "number of peers (in the node's peerManager) supporting the peer exchange protocol" +declarePublicGauge waku_px_peers, + "number of peers (in the node's peerManager) supporting the peer exchange protocol" logScope: topics = "waku node" @@ -74,11 +80,9 @@ const WakuNodeVersionString* = "version / git commit hash: " & git_version # key and crypto modules different type # TODO: Move to application instance (e.g., `WakuNode2`) - WakuInfo* = object - # NOTE One for simplicity, can extend later as needed + WakuInfo* = object # NOTE One for simplicity, can extend later as needed listenAddresses*: seq[string] - enrUri*: string - #multiaddrStrings*: seq[string] + enrUri*: string #multiaddrStrings*: seq[string] # NOTE based on Eth2Node in NBC eth2_network.nim WakuNode* = ref object @@ -90,8 +94,10 @@ type wakuStoreClient*: WakuStoreClient wakuFilter*: waku_filter_v2.WakuFilter wakuFilterClient*: filter_client.WakuFilterClient - wakuFilterLegacy*: legacy_filter.WakuFilterLegacy #TODO: support for legacy filter protocol will be removed - wakuFilterClientLegacy*: legacy_filter_client.WakuFilterClientLegacy #TODO: support for legacy filter protocol will be removed + wakuFilterLegacy*: legacy_filter.WakuFilterLegacy + #TODO: support for legacy filter protocol will be removed + wakuFilterClientLegacy*: legacy_filter_client.WakuFilterClientLegacy + #TODO: support for legacy filter protocol will be removed wakuRlnRelay*: WakuRLNRelay wakuLightPush*: WakuLightPush wakuLightpushClient*: WakuLightPushClient @@ -102,7 +108,7 @@ type libp2pPing*: Ping rng*: ref rand.HmacDrbgContext rendezvous*: RendezVous - announcedAddresses* : seq[MultiAddress] + announcedAddresses*: seq[MultiAddress] started*: bool # Indicates that node has started listening topicSubscriptionQueue*: AsyncEventQueue[SubscriptionEvent] contentTopicHandlers: Table[ContentTopic, TopicHandler] @@ -120,29 +126,32 @@ proc getAutonatService*(rng: ref HmacDrbgContext): AutonatService = askNewConnectedPeers = false, numPeersToAsk = 3, maxQueueSize = 3, - minConfidence = 0.7) + minConfidence = 0.7, + ) - proc statusAndConfidenceHandler(networkReachability: NetworkReachability, - confidence: Opt[float]): - Future[void] {.gcsafe, async.} = + proc statusAndConfidenceHandler( + networkReachability: NetworkReachability, confidence: Opt[float] + ): Future[void] {.gcsafe, async.} = if confidence.isSome(): - info "Peer reachability status", networkReachability=networkReachability, confidence=confidence.get() + info "Peer reachability status", + networkReachability = networkReachability, confidence = confidence.get() autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler) return autonatService -proc new*(T: type WakuNode, - netConfig: NetConfig, - enr: enr.Record, - switch: Switch, - peerManager: PeerManager, - # TODO: make this argument required after tests are updated - rng: ref HmacDrbgContext = crypto.newRng() - ): T {.raises: [Defect, LPError, IOError, TLSStreamProtocolError].} = +proc new*( + T: type WakuNode, + netConfig: NetConfig, + enr: enr.Record, + switch: Switch, + peerManager: PeerManager, + # TODO: make this argument required after tests are updated + rng: ref HmacDrbgContext = crypto.newRng(), +): T {.raises: [Defect, LPError, IOError, TLSStreamProtocolError].} = ## Creates a Waku Node instance. - info "Initializing networking", addrs= $netConfig.announcedAddresses + info "Initializing networking", addrs = $netConfig.announcedAddresses let queue = newAsyncEventQueue[SubscriptionEvent](30) @@ -152,7 +161,7 @@ proc new*(T: type WakuNode, rng: rng, enr: enr, announcedAddresses: netConfig.announcedAddresses, - topicSubscriptionQueue: queue + topicSubscriptionQueue: queue, ) return node @@ -170,7 +179,7 @@ proc info*(node: WakuNode): WakuInfo = let peerInfo = node.switch.peerInfo - var listenStr : seq[string] + var listenStr: seq[string] for address in node.announcedAddresses: var fulladdr = $address & "/p2p/" & $peerInfo.peerId listenStr &= fulladdr @@ -178,10 +187,12 @@ proc info*(node: WakuNode): WakuInfo = let wakuInfo = WakuInfo(listenAddresses: listenStr, enrUri: enrUri) return wakuInfo -proc connectToNodes*(node: WakuNode, nodes: seq[RemotePeerInfo] | seq[string], source = "api") {.async.} = +proc connectToNodes*( + node: WakuNode, nodes: seq[RemotePeerInfo] | seq[string], source = "api" +) {.async.} = ## `source` indicates source of node addrs (static config, api call, discovery, etc) # NOTE Connects to the node without a give protocol, which automatically creates streams for relay - await peer_manager.connectToNodes(node.peerManager, nodes, source=source) + await peer_manager.connectToNodes(node.peerManager, nodes, source = source) ## Waku Metadata @@ -194,15 +205,18 @@ proc mountMetadata*(node: WakuNode, clusterId: uint32): Result[void, string] = node.wakuMetadata = metadata node.peerManager.wakuMetadata = metadata - let catchRes = catch: node.switch.mount(node.wakuMetadata, protocolMatcher(WakuMetadataCodec)) + let catchRes = catch: + node.switch.mount(node.wakuMetadata, protocolMatcher(WakuMetadataCodec)) if catchRes.isErr(): return err(catchRes.error.msg) return ok() ## Waku Sharding -proc mountSharding*(node: WakuNode, clusterId: uint32, shardCount: uint32): Result[void, string] = - info "mounting sharding", clusterId=clusterId, shardCount=shardCount +proc mountSharding*( + node: WakuNode, clusterId: uint32, shardCount: uint32 +): Result[void, string] = + info "mounting sharding", clusterId = clusterId, shardCount = shardCount node.wakuSharding = Sharding(clusterId: clusterId, shardCountGenZero: shardCount) return ok() @@ -214,13 +228,13 @@ proc registerRelayDefaultHandler(node: WakuNode, topic: PubsubTopic) = proc traceHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} = trace "waku.relay received", - peerId=node.peerId, - pubsubTopic=topic, - hash=topic.computeMessageHash(msg).to0xHex(), - receivedTime=getNowInNanosecondTime(), - payloadSizeBytes=msg.payload.len + peerId = node.peerId, + pubsubTopic = topic, + hash = topic.computeMessageHash(msg).to0xHex(), + receivedTime = getNowInNanosecondTime(), + payloadSizeBytes = msg.payload.len - let msgSizeKB = msg.payload.len/1000 + let msgSizeKB = msg.payload.len / 1000 waku_node_messages.inc(labelValues = ["relay"]) waku_histogram_message_size.observe(msgSizeKB) @@ -243,15 +257,18 @@ proc registerRelayDefaultHandler(node: WakuNode, topic: PubsubTopic) = await node.wakuArchive.handleMessage(topic, msg) - - let defaultHandler = proc(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + let defaultHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = await traceHandler(topic, msg) await filterHandler(topic, msg) await archiveHandler(topic, msg) discard node.wakuRelay.subscribe(topic, defaultHandler) -proc subscribe*(node: WakuNode, subscription: SubscriptionEvent, handler = none(WakuRelayHandler)) = +proc subscribe*( + node: WakuNode, subscription: SubscriptionEvent, handler = none(WakuRelayHandler) +) = ## Subscribes to a PubSub or Content topic. Triggers handler when receiving messages on ## this topic. WakuRelayHandler is a method that takes a topic and a Waku message. @@ -260,21 +277,23 @@ proc subscribe*(node: WakuNode, subscription: SubscriptionEvent, handler = none( return let (pubsubTopic, contentTopicOp) = - case subscription.kind: - of ContentSub: - let shard = node.wakuSharding.getShard((subscription.topic)).valueOr: - error "Autosharding error", error=error - return + case subscription.kind + of ContentSub: + let shard = node.wakuSharding.getShard((subscription.topic)).valueOr: + error "Autosharding error", error = error + return - (shard, some(subscription.topic)) - of PubsubSub: (subscription.topic, none(ContentTopic)) - else: return + (shard, some(subscription.topic)) + of PubsubSub: + (subscription.topic, none(ContentTopic)) + else: + return if contentTopicOp.isSome() and node.contentTopicHandlers.hasKey(contentTopicOp.get()): error "Invalid API call to `subscribe`. Was already subscribed" return - debug "subscribe", pubsubTopic=pubsubTopic + debug "subscribe", pubsubTopic = pubsubTopic node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: pubsubTopic)) node.registerRelayDefaultHandler(pubsubTopic) @@ -293,15 +312,17 @@ proc unsubscribe*(node: WakuNode, subscription: SubscriptionEvent) = return let (pubsubTopic, contentTopicOp) = - case subscription.kind: - of ContentUnsub: - let shard = node.wakuSharding.getShard((subscription.topic)).valueOr: - error "Autosharding error", error=error - return + case subscription.kind + of ContentUnsub: + let shard = node.wakuSharding.getShard((subscription.topic)).valueOr: + error "Autosharding error", error = error + return - (shard, some(subscription.topic)) - of PubsubUnsub: (subscription.topic, none(ContentTopic)) - else: return + (shard, some(subscription.topic)) + of PubsubUnsub: + (subscription.topic, none(ContentTopic)) + else: + return if not node.wakuRelay.isSubscribed(pubsubTopic): error "Invalid API call to `unsubscribe`. Was not subscribed" @@ -311,45 +332,44 @@ proc unsubscribe*(node: WakuNode, subscription: SubscriptionEvent) = # Remove this handler only var handler: TopicHandler if node.contentTopicHandlers.pop(contentTopicOp.get(), handler): - debug "unsubscribe", contentTopic=contentTopicOp.get() + debug "unsubscribe", contentTopic = contentTopicOp.get() node.wakuRelay.unsubscribe(pubsubTopic, handler) if contentTopicOp.isNone() or node.wakuRelay.topics.getOrDefault(pubsubTopic).len == 1: # Remove all handlers - debug "unsubscribe", pubsubTopic=pubsubTopic + debug "unsubscribe", pubsubTopic = pubsubTopic node.wakuRelay.unsubscribeAll(pubsubTopic) node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic)) proc publish*( - node: WakuNode, - pubsubTopicOp: Option[PubsubTopic], - message: WakuMessage - ) : Future[Result[void, string]] {.async, gcsafe.} = + node: WakuNode, pubsubTopicOp: Option[PubsubTopic], message: WakuMessage +): Future[Result[void, string]] {.async, gcsafe.} = ## Publish a `WakuMessage`. Pubsub topic contains; none, a named or static shard. ## `WakuMessage` should contain a `contentTopic` field for light node functionality. ## It is also used to determine the shard. if node.wakuRelay.isNil(): - let msg = "Invalid API call to `publish`. WakuRelay not mounted. Try `lightpush` instead." - error "publish error", msg=msg + let msg = + "Invalid API call to `publish`. WakuRelay not mounted. Try `lightpush` instead." + error "publish error", msg = msg # TODO: Improve error handling return err(msg) let pubsubTopic = pubsubTopicOp.valueOr: node.wakuSharding.getShard(message.contentTopic).valueOr: let msg = "Autosharding error: " & error - error "publish error", msg=msg + error "publish error", msg = msg return err(msg) #TODO instead of discard return error when 0 peers received the message discard await node.wakuRelay.publish(pubsubTopic, message) trace "waku.relay published", - peerId=node.peerId, - pubsubTopic=pubsubTopic, - hash=pubsubTopic.computeMessageHash(message).to0xHex(), - publishTime=getNowInNanosecondTime() - + peerId = node.peerId, + pubsubTopic = pubsubTopic, + hash = pubsubTopic.computeMessageHash(message).to0xHex(), + publishTime = getNowInNanosecondTime() + return ok() proc startRelay*(node: WakuNode) {.async.} = @@ -367,20 +387,22 @@ proc startRelay*(node: WakuNode) {.async.} = info "Found previous WakuRelay peers. Reconnecting." # Reconnect to previous relay peers. This will respect a backoff period, if necessary - let backoffPeriod = node.wakuRelay.parameters.pruneBackoff + chronos.seconds(BackoffSlackTime) + let backoffPeriod = + node.wakuRelay.parameters.pruneBackoff + chronos.seconds(BackoffSlackTime) - await node.peerManager.reconnectPeers(WakuRelayCodec, - backoffPeriod) + await node.peerManager.reconnectPeers(WakuRelayCodec, backoffPeriod) # Start the WakuRelay protocol await node.wakuRelay.start() info "relay started successfully" -proc mountRelay*(node: WakuNode, - pubsubTopics: seq[string] = @[], - peerExchangeHandler = none(RoutingRecordsHandler), - maxMessageSize = int(MaxWakuMessageSize)) {.async, gcsafe.} = +proc mountRelay*( + node: WakuNode, + pubsubTopics: seq[string] = @[], + peerExchangeHandler = none(RoutingRecordsHandler), + maxMessageSize = int(MaxWakuMessageSize), +) {.async, gcsafe.} = if not node.wakuRelay.isNil(): error "wakuRelay already mounted, skipping" return @@ -390,14 +412,15 @@ proc mountRelay*(node: WakuNode, let initRes = WakuRelay.new(node.switch, maxMessageSize) if initRes.isErr(): - error "failed mounting relay protocol", error=initRes.error + error "failed mounting relay protocol", error = initRes.error return node.wakuRelay = initRes.value ## Add peer exchange handler if peerExchangeHandler.isSome(): - node.wakuRelay.parameters.enablePX = true # Feature flag for peer exchange in nim-libp2p + node.wakuRelay.parameters.enablePX = true + # Feature flag for peer exchange in nim-libp2p node.wakuRelay.routingRecordsHandler.add(peerExchangeHandler.get()) if node.started: @@ -413,49 +436,52 @@ proc mountRelay*(node: WakuNode, ## Waku filter -proc mountLegacyFilter*(node: WakuNode, filterTimeout: Duration = WakuLegacyFilterTimeout) - {.async, raises: [Defect, LPError]} = +proc mountLegacyFilter*( + node: WakuNode, filterTimeout: Duration = WakuLegacyFilterTimeout +) {.async, raises: [Defect, LPError].} = ## Mounting legacy filter protocol with separation from new v2 filter protocol for easier removal later ## TODO: remove legacy filter protocol info "mounting legacy filter protocol" - node.wakuFilterLegacy = WakuFilterLegacy.new(node.peerManager, node.rng, filterTimeout) + node.wakuFilterLegacy = + WakuFilterLegacy.new(node.peerManager, node.rng, filterTimeout) if node.started: await node.wakuFilterLegacy.start() #TODO: remove legacy node.switch.mount(node.wakuFilterLegacy, protocolMatcher(WakuLegacyFilterCodec)) -proc mountFilter*(node: WakuNode, - subscriptionTimeout: Duration = filter_subscriptions.DefaultSubscriptionTimeToLiveSec, - maxFilterPeers: uint32 = filter_subscriptions.MaxFilterPeers, - maxFilterCriteriaPerPeer: uint32 = filter_subscriptions.MaxFilterCriteriaPerPeer) - {.async, raises: [Defect, LPError]} = +proc mountFilter*( + node: WakuNode, + subscriptionTimeout: Duration = + filter_subscriptions.DefaultSubscriptionTimeToLiveSec, + maxFilterPeers: uint32 = filter_subscriptions.MaxFilterPeers, + maxFilterCriteriaPerPeer: uint32 = filter_subscriptions.MaxFilterCriteriaPerPeer, +) {.async, raises: [Defect, LPError].} = ## Mounting filter v2 protocol info "mounting filter protocol" - node.wakuFilter = WakuFilter.new(node.peerManager, - subscriptionTimeout, - maxFilterPeers, - maxFilterCriteriaPerPeer) + node.wakuFilter = WakuFilter.new( + node.peerManager, subscriptionTimeout, maxFilterPeers, maxFilterCriteriaPerPeer + ) if node.started: await node.wakuFilter.start() node.switch.mount(node.wakuFilter, protocolMatcher(WakuFilterSubscribeCodec)) -proc filterHandleMessage*(node: WakuNode, - pubsubTopic: PubsubTopic, - message: WakuMessage) - {.async.}= - +proc filterHandleMessage*( + node: WakuNode, pubsubTopic: PubsubTopic, message: WakuMessage +) {.async.} = if node.wakuFilter.isNil() or node.wakuFilterLegacy.isNil(): - error "cannot handle filter message", error = "waku filter and waku filter legacy are both required" + error "cannot handle filter message", + error = "waku filter and waku filter legacy are both required" return - await allFutures(node.wakuFilter.handleMessage(pubsubTopic, message), - node.wakuFilterLegacy.handleMessage(pubsubTopic, message) #TODO: remove legacy - ) + await allFutures( + node.wakuFilter.handleMessage(pubsubTopic, message), + node.wakuFilterLegacy.handleMessage(pubsubTopic, message), #TODO: remove legacy + ) proc mountFilterClient*(node: WakuNode) {.async, raises: [Defect, LPError].} = ## Mounting both filter clients v1 - legacy and v2. @@ -472,17 +498,18 @@ proc mountFilterClient*(node: WakuNode) {.async, raises: [Defect, LPError].} = node.switch.mount(node.wakuFilterClient, protocolMatcher(WakuFilterSubscribeCodec)) node.switch.mount(node.wakuFilterClientLegacy, protocolMatcher(WakuLegacyFilterCodec)) -proc legacyFilterSubscribe*(node: WakuNode, - pubsubTopic: Option[PubsubTopic], - contentTopics: ContentTopic|seq[ContentTopic], - handler: FilterPushHandler, - peer: RemotePeerInfo|string) - {.async, gcsafe, raises: [Defect, ValueError].} = - +proc legacyFilterSubscribe*( + node: WakuNode, + pubsubTopic: Option[PubsubTopic], + contentTopics: ContentTopic | seq[ContentTopic], + handler: FilterPushHandler, + peer: RemotePeerInfo | string, +) {.async, gcsafe, raises: [Defect, ValueError].} = ## Registers for messages that match a specific filter. ## Triggers the handler whenever a message is received. if node.wakuFilterClientLegacy.isNil(): - error "cannot register filter subscription to topic", error="waku legacy filter client is not set up" + error "cannot register filter subscription to topic", + error = "waku legacy filter client is not set up" return let remotePeerRes = parsePeerInfo(peer) @@ -496,48 +523,50 @@ proc legacyFilterSubscribe*(node: WakuNode, # TODO: Move this logic to wakunode2 app # FIXME: This part needs refactoring. It seems possible that in special cases archiver will store same message multiple times. let handlerWrapper: FilterPushHandler = - if node.wakuRelay.isNil() and not node.wakuStore.isNil(): - proc(pubsubTopic: string, message: WakuMessage) {.async, gcsafe, closure.} = - await allFutures(node.wakuArchive.handleMessage(pubSubTopic, message), - handler(pubsubTopic, message)) - else: - handler + if node.wakuRelay.isNil() and not node.wakuStore.isNil(): + proc(pubsubTopic: string, message: WakuMessage) {.async, gcsafe, closure.} = + await allFutures( + node.wakuArchive.handleMessage(pubSubTopic, message), + handler(pubsubTopic, message), + ) + else: + handler if pubsubTopic.isSome(): info "registering legacy filter subscription to content", - pubsubTopic=pubsubTopic.get(), - contentTopics=contentTopics, - peer=remotePeer.peerId + pubsubTopic = pubsubTopic.get(), + contentTopics = contentTopics, + peer = remotePeer.peerId - let res = await node.wakuFilterClientLegacy.subscribe(pubsubTopic.get(), - contentTopics, - handlerWrapper, - peer=remotePeer) + let res = await node.wakuFilterClientLegacy.subscribe( + pubsubTopic.get(), contentTopics, handlerWrapper, peer = remotePeer + ) if res.isOk(): - info "subscribed to topic", pubsubTopic=pubsubTopic.get(), - contentTopics=contentTopics + info "subscribed to topic", + pubsubTopic = pubsubTopic.get(), contentTopics = contentTopics else: - error "failed legacy filter subscription", error=res.error + error "failed legacy filter subscription", error = res.error waku_node_errors.inc(labelValues = ["subscribe_filter_failure"]) else: let topicMapRes = node.wakuSharding.parseSharding(pubsubTopic, contentTopics) let topicMap = if topicMapRes.isErr(): - error "can't get shard", error=topicMapRes.error + error "can't get shard", error = topicMapRes.error return - else: topicMapRes.get() + else: + topicMapRes.get() var futures = collect(newSeq): for pubsub, topics in topicMap.pairs: info "registering legacy filter subscription to content", - pubsubTopic=pubsub, - contentTopics=topics, - peer=remotePeer.peerId + pubsubTopic = pubsub, contentTopics = topics, peer = remotePeer.peerId let content = topics.mapIt($it) - node.wakuFilterClientLegacy.subscribe($pubsub, content, handlerWrapper, peer=remotePeer) + node.wakuFilterClientLegacy.subscribe( + $pubsub, content, handlerWrapper, peer = remotePeer + ) let finished = await allFinished(futures) @@ -545,24 +574,22 @@ proc legacyFilterSubscribe*(node: WakuNode, let res = fut.read() if res.isErr(): - error "failed legacy filter subscription", error=res.error + error "failed legacy filter subscription", error = res.error waku_node_errors.inc(labelValues = ["subscribe_filter_failure"]) for pubsub, topics in topicMap.pairs: - info "subscribed to topic", pubsubTopic=pubsub, contentTopics=topics - -proc filterSubscribe*(node: WakuNode, - pubsubTopic: Option[PubsubTopic], - contentTopics: ContentTopic|seq[ContentTopic], - peer: RemotePeerInfo|string): - - Future[FilterSubscribeResult] - - {.async, gcsafe, raises: [Defect, ValueError].} = + info "subscribed to topic", pubsubTopic = pubsub, contentTopics = topics +proc filterSubscribe*( + node: WakuNode, + pubsubTopic: Option[PubsubTopic], + contentTopics: ContentTopic | seq[ContentTopic], + peer: RemotePeerInfo | string, +): Future[FilterSubscribeResult] {.async, gcsafe, raises: [Defect, ValueError].} = ## Registers for messages that match a specific filter. Triggers the handler whenever a message is received. if node.wakuFilterClient.isNil(): - error "cannot register filter subscription to topic", error="waku filter client is not set up" + error "cannot register filter subscription to topic", + error = "waku filter client is not set up" return err(FilterSubscribeError.serviceUnavailable()) let remotePeerRes = parsePeerInfo(peer) @@ -573,16 +600,22 @@ proc filterSubscribe*(node: WakuNode, let remotePeer = remotePeerRes.value if pubsubTopic.isSome(): - info "registering filter subscription to content", pubsubTopic=pubsubTopic.get(), contentTopics=contentTopics, peer=remotePeer.peerId + info "registering filter subscription to content", + pubsubTopic = pubsubTopic.get(), + contentTopics = contentTopics, + peer = remotePeer.peerId - let subRes = await node.wakuFilterClient.subscribe(remotePeer, pubsubTopic.get(), contentTopics) + let subRes = await node.wakuFilterClient.subscribe( + remotePeer, pubsubTopic.get(), contentTopics + ) if subRes.isOk(): - info "v2 subscribed to topic", pubsubTopic=pubsubTopic, contentTopics=contentTopics + info "v2 subscribed to topic", + pubsubTopic = pubsubTopic, contentTopics = contentTopics # Purpose is to update Waku Metadata node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: pubsubTopic.get())) else: - error "failed filter v2 subscription", error=subRes.error + error "failed filter v2 subscription", error = subRes.error waku_node_errors.inc(labelValues = ["subscribe_filter_failure"]) return subRes @@ -591,13 +624,15 @@ proc filterSubscribe*(node: WakuNode, let topicMap = if topicMapRes.isErr(): - error "can't get shard", error=topicMapRes.error + error "can't get shard", error = topicMapRes.error return err(FilterSubscribeError.badResponse("can't get shard")) - else: topicMapRes.get() + else: + topicMapRes.get() var futures = collect(newSeq): for pubsub, topics in topicMap.pairs: - info "registering filter subscription to content", pubsubTopic=pubsub, contentTopics=topics, peer=remotePeer.peerId + info "registering filter subscription to content", + pubsubTopic = pubsub, contentTopics = topics, peer = remotePeer.peerId let content = topics.mapIt($it) node.wakuFilterClient.subscribe(remotePeer, $pubsub, content) @@ -608,12 +643,12 @@ proc filterSubscribe*(node: WakuNode, let res = fut.read() if res.isErr(): - error "failed filter subscription", error=res.error + error "failed filter subscription", error = res.error waku_node_errors.inc(labelValues = ["subscribe_filter_failure"]) subRes = FilterSubscribeResult.err(res.error) for pubsub, topics in topicMap.pairs: - info "subscribed to topic", pubsubTopic=pubsub, contentTopics=topics + info "subscribed to topic", pubsubTopic = pubsub, contentTopics = topics # Purpose is to update Waku Metadata node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: $pubsub)) @@ -621,14 +656,16 @@ proc filterSubscribe*(node: WakuNode, # return the last error or ok return subRes -proc legacyFilterUnsubscribe*(node: WakuNode, - pubsubTopic: Option[PubsubTopic], - contentTopics: ContentTopic|seq[ContentTopic], - peer: RemotePeerInfo|string) - {.async, gcsafe, raises: [Defect, ValueError].} = +proc legacyFilterUnsubscribe*( + node: WakuNode, + pubsubTopic: Option[PubsubTopic], + contentTopics: ContentTopic | seq[ContentTopic], + peer: RemotePeerInfo | string, +) {.async, gcsafe, raises: [Defect, ValueError].} = ## Unsubscribe from a content legacy filter. if node.wakuFilterClientLegacy.isNil(): - error "cannot unregister filter subscription to content", error="waku filter client is nil" + error "cannot unregister filter subscription to content", + error = "waku filter client is nil" return let remotePeerRes = parsePeerInfo(peer) @@ -639,14 +676,20 @@ proc legacyFilterUnsubscribe*(node: WakuNode, let remotePeer = remotePeerRes.value if pubsubTopic.isSome(): - info "deregistering legacy filter subscription to content", pubsubTopic=pubsubTopic.get(), contentTopics=contentTopics, peer=remotePeer.peerId + info "deregistering legacy filter subscription to content", + pubsubTopic = pubsubTopic.get(), + contentTopics = contentTopics, + peer = remotePeer.peerId - let res = await node.wakuFilterClientLegacy.unsubscribe(pubsubTopic.get(), contentTopics, peer=remotePeer) + let res = await node.wakuFilterClientLegacy.unsubscribe( + pubsubTopic.get(), contentTopics, peer = remotePeer + ) if res.isOk(): - info "unsubscribed from topic", pubsubTopic=pubsubTopic.get(), contentTopics=contentTopics + info "unsubscribed from topic", + pubsubTopic = pubsubTopic.get(), contentTopics = contentTopics else: - error "failed filter unsubscription", error=res.error + error "failed filter unsubscription", error = res.error waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"]) else: let topicMapRes = node.wakuSharding.parseSharding(pubsubTopic, contentTopics) @@ -655,13 +698,15 @@ proc legacyFilterUnsubscribe*(node: WakuNode, if topicMapRes.isErr(): error "can't get shard", error = topicMapRes.error return - else: topicMapRes.get() + else: + topicMapRes.get() var futures = collect(newSeq): for pubsub, topics in topicMap.pairs: - info "deregistering filter subscription to content", pubsubTopic=pubsub, contentTopics=topics, peer=remotePeer.peerId + info "deregistering filter subscription to content", + pubsubTopic = pubsub, contentTopics = topics, peer = remotePeer.peerId let content = topics.mapIt($it) - node.wakuFilterClientLegacy.unsubscribe($pubsub, content, peer=remotePeer) + node.wakuFilterClientLegacy.unsubscribe($pubsub, content, peer = remotePeer) let finished = await allFinished(futures) @@ -669,24 +714,22 @@ proc legacyFilterUnsubscribe*(node: WakuNode, let res = fut.read() if res.isErr(): - error "failed filter unsubscription", error=res.error + error "failed filter unsubscription", error = res.error waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"]) for pubsub, topics in topicMap.pairs: - info "unsubscribed from topic", pubsubTopic=pubsub, contentTopics=topics - -proc filterUnsubscribe*(node: WakuNode, - pubsubTopic: Option[PubsubTopic], - contentTopics: seq[ContentTopic], - peer: RemotePeerInfo|string): - - Future[FilterSubscribeResult] - - {.async, gcsafe, raises: [Defect, ValueError].} = + info "unsubscribed from topic", pubsubTopic = pubsub, contentTopics = topics +proc filterUnsubscribe*( + node: WakuNode, + pubsubTopic: Option[PubsubTopic], + contentTopics: seq[ContentTopic], + peer: RemotePeerInfo | string, +): Future[FilterSubscribeResult] {.async, gcsafe, raises: [Defect, ValueError].} = ## Unsubscribe from a content filter V2". if node.wakuFilterClientLegacy.isNil(): - error "cannot unregister filter subscription to content", error="waku filter client is nil" + error "cannot unregister filter subscription to content", + error = "waku filter client is nil" return err(FilterSubscribeError.serviceUnavailable()) let remotePeerRes = parsePeerInfo(peer) @@ -697,20 +740,25 @@ proc filterUnsubscribe*(node: WakuNode, let remotePeer = remotePeerRes.value if pubsubTopic.isSome(): - info "deregistering filter subscription to content", pubsubTopic=pubsubTopic.get(), contentTopics=contentTopics, peer=remotePeer.peerId + info "deregistering filter subscription to content", + pubsubTopic = pubsubTopic.get(), + contentTopics = contentTopics, + peer = remotePeer.peerId - let unsubRes = await node.wakuFilterClient.unsubscribe(remotePeer, pubsubTopic.get(), contentTopics) + let unsubRes = await node.wakuFilterClient.unsubscribe( + remotePeer, pubsubTopic.get(), contentTopics + ) if unsubRes.isOk(): - info "unsubscribed from topic", pubsubTopic=pubsubTopic.get(), contentTopics=contentTopics - + info "unsubscribed from topic", + pubsubTopic = pubsubTopic.get(), contentTopics = contentTopics + # Purpose is to update Waku Metadata node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic.get())) else: - error "failed filter unsubscription", error=unsubRes.error + error "failed filter unsubscription", error = unsubRes.error waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"]) return unsubRes - else: # pubsubTopic.isNone let topicMapRes = node.wakuSharding.parseSharding(pubsubTopic, contentTopics) @@ -718,11 +766,13 @@ proc filterUnsubscribe*(node: WakuNode, if topicMapRes.isErr(): error "can't get shard", error = topicMapRes.error return err(FilterSubscribeError.badResponse("can't get shard")) - else: topicMapRes.get() + else: + topicMapRes.get() var futures = collect(newSeq): for pubsub, topics in topicMap.pairs: - info "deregistering filter subscription to content", pubsubTopic=pubsub, contentTopics=topics, peer=remotePeer.peerId + info "deregistering filter subscription to content", + pubsubTopic = pubsub, contentTopics = topics, peer = remotePeer.peerId let content = topics.mapIt($it) node.wakuFilterClient.unsubscribe(remotePeer, $pubsub, content) @@ -733,12 +783,12 @@ proc filterUnsubscribe*(node: WakuNode, let res = fut.read() if res.isErr(): - error "failed filter unsubscription", error=res.error + error "failed filter unsubscription", error = res.error waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"]) unsubRes = FilterSubscribeResult.err(res.error) for pubsub, topics in topicMap.pairs: - info "unsubscribed from topic", pubsubTopic=pubsub, contentTopics=topics + info "unsubscribed from topic", pubsubTopic = pubsub, contentTopics = topics # Purpose is to update Waku Metadata node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: $pubsub)) @@ -746,16 +796,13 @@ proc filterUnsubscribe*(node: WakuNode, # return the last error or ok return unsubRes -proc filterUnsubscribeAll*(node: WakuNode, - peer: RemotePeerInfo|string): - - Future[FilterSubscribeResult] - - {.async, gcsafe, raises: [Defect, ValueError].} = - +proc filterUnsubscribeAll*( + node: WakuNode, peer: RemotePeerInfo | string +): Future[FilterSubscribeResult] {.async, gcsafe, raises: [Defect, ValueError].} = ## Unsubscribe from a content filter V2". if node.wakuFilterClientLegacy.isNil(): - error "cannot unregister filter subscription to content", error="waku filter client is nil" + error "cannot unregister filter subscription to content", + error = "waku filter client is nil" return err(FilterSubscribeError.serviceUnavailable()) let remotePeerRes = parsePeerInfo(peer) @@ -765,13 +812,13 @@ proc filterUnsubscribeAll*(node: WakuNode, let remotePeer = remotePeerRes.value - info "deregistering all filter subscription to content", peer=remotePeer.peerId + info "deregistering all filter subscription to content", peer = remotePeer.peerId let unsubRes = await node.wakuFilterClient.unsubscribeAll(remotePeer) if unsubRes.isOk(): - info "unsubscribed from all content-topic", peerId=remotePeer.peerId + info "unsubscribed from all content-topic", peerId = remotePeer.peerId else: - error "failed filter unsubscription from all content-topic", error=unsubRes.error + error "failed filter unsubscription from all content-topic", error = unsubRes.error waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"]) return unsubRes @@ -780,14 +827,10 @@ proc filterUnsubscribeAll*(node: WakuNode, # yet incompatible to handle both type of filters - use specific filter registration instead ## Waku archive -proc mountArchive*(node: WakuNode, - driver: ArchiveDriver, - retentionPolicy = none(RetentionPolicy)): - Result[void, string] = - node.wakuArchive = WakuArchive.new( - driver = driver, - retentionPolicy = retentionPolicy, - ).valueOr: +proc mountArchive*( + node: WakuNode, driver: ArchiveDriver, retentionPolicy = none(RetentionPolicy) +): Result[void, string] = + node.wakuArchive = WakuArchive.new(driver = driver, retentionPolicy = retentionPolicy).valueOr: return err("error in mountArchive: " & error) node.wakuArchive.start() @@ -801,49 +844,65 @@ proc toArchiveQuery(request: HistoryQuery): ArchiveQuery = ArchiveQuery( pubsubTopic: request.pubsubTopic, contentTopics: request.contentTopics, - cursor: request.cursor.map(proc(cursor: HistoryCursor): ArchiveCursor = ArchiveCursor(pubsubTopic: cursor.pubsubTopic, senderTime: cursor.senderTime, storeTime: cursor.storeTime, digest: cursor.digest)), + cursor: request.cursor.map( + proc(cursor: HistoryCursor): ArchiveCursor = + ArchiveCursor( + pubsubTopic: cursor.pubsubTopic, + senderTime: cursor.senderTime, + storeTime: cursor.storeTime, + digest: cursor.digest, + ) + ), startTime: request.startTime, endTime: request.endTime, pageSize: request.pageSize.uint, - direction: request.direction + direction: request.direction, ) # TODO: Review this mapping logic. Maybe, move it to the appplication code proc toHistoryResult*(res: ArchiveResult): HistoryResult = if res.isErr(): let error = res.error - case res.error.kind: + case res.error.kind of ArchiveErrorKind.DRIVER_ERROR, ArchiveErrorKind.INVALID_QUERY: - err(HistoryError( - kind: HistoryErrorKind.BAD_REQUEST, - cause: res.error.cause - )) + err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST, cause: res.error.cause)) else: err(HistoryError(kind: HistoryErrorKind.UNKNOWN)) - else: let response = res.get() - ok(HistoryResponse( - messages: response.messages, - cursor: response.cursor.map(proc(cursor: ArchiveCursor): HistoryCursor = HistoryCursor(pubsubTopic: cursor.pubsubTopic, senderTime: cursor.senderTime, storeTime: cursor.storeTime, digest: cursor.digest)), - )) + ok( + HistoryResponse( + messages: response.messages, + cursor: response.cursor.map( + proc(cursor: ArchiveCursor): HistoryCursor = + HistoryCursor( + pubsubTopic: cursor.pubsubTopic, + senderTime: cursor.senderTime, + storeTime: cursor.storeTime, + digest: cursor.digest, + ) + ), + ) + ) proc mountStore*(node: WakuNode) {.async, raises: [Defect, LPError].} = info "mounting waku store protocol" if node.wakuArchive.isNil(): - error "failed to mount waku store protocol", error="waku archive not set" + error "failed to mount waku store protocol", error = "waku archive not set" return # TODO: Review this handler logic. Maybe, move it to the appplication code - let queryHandler: HistoryQueryHandler = proc(request: HistoryQuery): Future[HistoryResult] {.async.} = - if request.cursor.isSome(): - request.cursor.get().checkHistCursor().isOkOr: - return err(error) + let queryHandler: HistoryQueryHandler = proc( + request: HistoryQuery + ): Future[HistoryResult] {.async.} = + if request.cursor.isSome(): + request.cursor.get().checkHistCursor().isOkOr: + return err(error) - let request = request.toArchiveQuery() - let response = await node.wakuArchive.findMessages(request) - return response.toHistoryResult() + let request = request.toArchiveQuery() + let response = await node.wakuArchive.findMessages(request) + return response.toHistoryResult() node.wakuStore = WakuStore.new(node.peerManager, node.rng, queryHandler) @@ -858,7 +917,9 @@ proc mountStoreClient*(node: WakuNode) = node.wakuStoreClient = WakuStoreClient.new(node.peerManager, node.rng) -proc query*(node: WakuNode, query: HistoryQuery, peer: RemotePeerInfo): Future[WakuStoreResult[HistoryResponse]] {.async, gcsafe.} = +proc query*( + node: WakuNode, query: HistoryQuery, peer: RemotePeerInfo +): Future[WakuStoreResult[HistoryResponse]] {.async, gcsafe.} = ## Queries known nodes for historical messages if node.wakuStoreClient.isNil(): return err("waku store client is nil") @@ -872,8 +933,11 @@ proc query*(node: WakuNode, query: HistoryQuery, peer: RemotePeerInfo): Future[W return ok(response) # TODO: Move to application module (e.g., wakunode2.nim) -proc query*(node: WakuNode, query: HistoryQuery): Future[WakuStoreResult[HistoryResponse]] {.async, gcsafe, - deprecated: "Use 'node.query()' with peer destination instead".} = +proc query*( + node: WakuNode, query: HistoryQuery +): Future[WakuStoreResult[HistoryResponse]] {. + async, gcsafe, deprecated: "Use 'node.query()' with peer destination instead" +.} = ## Queries known nodes for historical messages if node.wakuStoreClient.isNil(): return err("waku store client is nil") @@ -887,7 +951,9 @@ proc query*(node: WakuNode, query: HistoryQuery): Future[WakuStoreResult[History when defined(waku_exp_store_resume): # TODO: Move to application module (e.g., wakunode2.nim) - proc resume*(node: WakuNode, peerList: Option[seq[RemotePeerInfo]] = none(seq[RemotePeerInfo])) {.async, gcsafe.} = + proc resume*( + node: WakuNode, peerList: Option[seq[RemotePeerInfo]] = none(seq[RemotePeerInfo]) + ) {.async, gcsafe.} = ## resume proc retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku node has been online ## for resume to work properly the waku node must have the store protocol mounted in the full mode (i.e., persisting messages) ## messages are stored in the wakuStore's messages field and in the message db @@ -901,11 +967,11 @@ when defined(waku_exp_store_resume): let retrievedMessages = await node.wakuStoreClient.resume(peerList) if retrievedMessages.isErr(): - error "failed to resume store", error=retrievedMessages.error + error "failed to resume store", error = retrievedMessages.error return - info "the number of retrieved messages since the last online time: ", number=retrievedMessages.value - + info "the number of retrieved messages since the last online time: ", + number = retrievedMessages.value ## Waku lightpush @@ -915,11 +981,16 @@ proc mountLightPush*(node: WakuNode) {.async.} = var pushHandler: PushMessageHandler if node.wakuRelay.isNil(): debug "mounting lightpush without relay (nil)" - pushHandler = proc(peer: PeerId, pubsubTopic: string, message: WakuMessage): Future[WakuLightPushResult[void]] {.async.} = + pushHandler = proc( + peer: PeerId, pubsubTopic: string, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = return err("no waku relay found") else: - pushHandler = proc(peer: PeerId, pubsubTopic: string, message: WakuMessage): Future[WakuLightPushResult[void]] {.async.} = - let publishedCount = await node.wakuRelay.publish(pubsubTopic, message.encode().buffer) + pushHandler = proc( + peer: PeerId, pubsubTopic: string, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + let publishedCount = + await node.wakuRelay.publish(pubsubTopic, message.encode().buffer) if publishedCount == 0: ## Agreed change expected to the lightpush protocol to better handle such case. https://github.com/waku-org/pm/issues/93 @@ -936,13 +1007,17 @@ proc mountLightPush*(node: WakuNode) {.async.} = node.switch.mount(node.wakuLightPush, protocolMatcher(WakuLightPushCodec)) - proc mountLightPushClient*(node: WakuNode) = info "mounting light push client" node.wakuLightpushClient = WakuLightPushClient.new(node.peerManager, node.rng) -proc lightpushPublish*(node: WakuNode, pubsubTopic: Option[PubsubTopic], message: WakuMessage, peer: RemotePeerInfo): Future[WakuLightPushResult[void]] {.async, gcsafe.} = +proc lightpushPublish*( + node: WakuNode, + pubsubTopic: Option[PubsubTopic], + message: WakuMessage, + peer: RemotePeerInfo, +): Future[WakuLightPushResult[void]] {.async, gcsafe.} = ## Pushes a `WakuMessage` to a node which relays it further on PubSub topic. ## Returns whether relaying was successful or not. ## `WakuMessage` should contain a `contentTopic` field for light node @@ -951,7 +1026,10 @@ proc lightpushPublish*(node: WakuNode, pubsubTopic: Option[PubsubTopic], message return err("waku lightpush client is nil") if pubsubTopic.isSome(): - debug "publishing message with lightpush", pubsubTopic=pubsubTopic.get(), contentTopic=message.contentTopic, peer=peer.peerId + debug "publishing message with lightpush", + pubsubTopic = pubsubTopic.get(), + contentTopic = message.contentTopic, + peer = peer.peerId return await node.wakuLightpushClient.publish(pubsubTopic.get(), message, peer) let topicMapRes = node.wakuSharding.parseSharding(pubsubTopic, message.contentTopic) @@ -959,55 +1037,64 @@ proc lightpushPublish*(node: WakuNode, pubsubTopic: Option[PubsubTopic], message let topicMap = if topicMapRes.isErr(): return err(topicMapRes.error) - else: topicMapRes.get() + else: + topicMapRes.get() for pubsub, _ in topicMap.pairs: # There's only one pair anyway - debug "publishing message with lightpush", pubsubTopic=pubsub, contentTopic=message.contentTopic, peer=peer.peerId + debug "publishing message with lightpush", + pubsubTopic = pubsub, contentTopic = message.contentTopic, peer = peer.peerId return await node.wakuLightpushClient.publish($pubsub, message, peer) # TODO: Move to application module (e.g., wakunode2.nim) -proc lightpushPublish*(node: WakuNode, pubsubTopic: Option[PubsubTopic], message: WakuMessage): Future[WakuLightPushResult[void]] {.async, gcsafe, - deprecated: "Use 'node.lightpushPublish()' instead".} = +proc lightpushPublish*( + node: WakuNode, pubsubTopic: Option[PubsubTopic], message: WakuMessage +): Future[WakuLightPushResult[void]] {. + async, gcsafe, deprecated: "Use 'node.lightpushPublish()' instead" +.} = if node.wakuLightpushClient.isNil(): let msg = "waku lightpush client is nil" - error "failed to publish message", msg=msg + error "failed to publish message", msg = msg return err(msg) let peerOpt = node.peerManager.selectPeer(WakuLightPushCodec) if peerOpt.isNone(): let msg = "no suitable remote peers" - error "failed to publish message", msg=msg + error "failed to publish message", msg = msg return err(msg) - let publishRes = await node.lightpushPublish(pubsubTopic, message, peer=peerOpt.get()) - + let publishRes = + await node.lightpushPublish(pubsubTopic, message, peer = peerOpt.get()) + if publishRes.isErr(): - error "failed to publish message", error=publishRes.error - + error "failed to publish message", error = publishRes.error + return publishRes - ## Waku RLN Relay -proc mountRlnRelay*(node: WakuNode, - rlnConf: WakuRlnConfig, - spamHandler = none(SpamHandler), - registrationHandler = none(RegistrationHandler)) {.async.} = +proc mountRlnRelay*( + node: WakuNode, + rlnConf: WakuRlnConfig, + spamHandler = none(SpamHandler), + registrationHandler = none(RegistrationHandler), +) {.async.} = info "mounting rln relay" if node.wakuRelay.isNil(): - raise newException(CatchableError, "WakuRelay protocol is not mounted, cannot mount WakuRlnRelay") + raise newException( + CatchableError, "WakuRelay protocol is not mounted, cannot mount WakuRlnRelay" + ) - let rlnRelayRes = waitFor WakuRlnRelay.new(rlnConf, - registrationHandler) + let rlnRelayRes = waitFor WakuRlnRelay.new(rlnConf, registrationHandler) if rlnRelayRes.isErr(): - raise newException(CatchableError, "failed to mount WakuRlnRelay: " & rlnRelayRes.error) + raise + newException(CatchableError, "failed to mount WakuRlnRelay: " & rlnRelayRes.error) let rlnRelay = rlnRelayRes.get() let validator = generateRlnValidator(rlnRelay, spamHandler) # register rln validator as default validator debug "Registering RLN validator" node.wakuRelay.addValidator(validator, "RLN validation failed") - + node.wakuRlnRelay = rlnRelay ## Waku peer-exchange @@ -1048,12 +1135,14 @@ proc fetchPeerExchangePeers*( return err("Peer exchange failure: " & $pxPeersRes.error) # TODO: Move to application module (e.g., wakunode2.nim) -proc setPeerExchangePeer*(node: WakuNode, peer: RemotePeerInfo | MultiAddress | string) = +proc setPeerExchangePeer*( + node: WakuNode, peer: RemotePeerInfo | MultiAddress | string +) = if node.wakuPeerExchange.isNil(): error "could not set peer, waku peer-exchange is nil" return - info "Set peer-exchange peer", peer=peer + info "Set peer-exchange peer", peer = peer let remotePeerRes = parsePeerInfo(peer) if remotePeerRes.isErr(): @@ -1063,7 +1152,6 @@ proc setPeerExchangePeer*(node: WakuNode, peer: RemotePeerInfo | MultiAddress | node.peerManager.addPeer(remotePeerRes.value, PeerExchange) waku_px_peers.inc() - ## Other protocols proc mountLibp2pPing*(node: WakuNode) {.async, raises: [Defect, LPError].} = @@ -1089,8 +1177,8 @@ proc keepaliveLoop(node: WakuNode, keepalive: chronos.Duration) {.async.} = trace "Running keepalive" # First get a list of connected peer infos - let peers = node.peerManager.peerStore.peers() - .filterIt(it.connectedness == Connected) + let peers = + node.peerManager.peerStore.peers().filterIt(it.connectedness == Connected) for peer in peers: try: @@ -1104,7 +1192,7 @@ proc keepaliveLoop(node: WakuNode, keepalive: chronos.Duration) {.async.} = proc startKeepalive*(node: WakuNode) = let defaultKeepalive = 2.minutes # 20% of the default chronosstream timeout duration - info "starting keepalive", keepalive=defaultKeepalive + info "starting keepalive", keepalive = defaultKeepalive asyncSpawn node.keepaliveLoop(defaultKeepalive) @@ -1134,7 +1222,7 @@ proc printNodeNetworkInfo*(node: WakuNode): void = try: localIp = $getPrimaryIPAddr() except Exception as e: - warn "Could not retrieve localIp", msg=e.msg + warn "Could not retrieve localIp", msg = e.msg info "PeerInfo", peerId = peerInfo.peerId, addrs = peerInfo.addrs @@ -1156,8 +1244,8 @@ proc start*(node: WakuNode) {.async.} = ## Starts a created Waku Node and ## all its mounted protocols. - waku_version.set(1, labelValues=[git_version]) - info "Starting Waku node", version=git_version + waku_version.set(1, labelValues = [git_version]) + info "Starting Waku node", version = git_version var zeroPortPresent = false for address in node.announcedAddresses: @@ -1173,9 +1261,10 @@ proc start*(node: WakuNode) {.async.} = ## The switch uses this mapper to update peer info addrs ## with announced addrs after start - let addressMapper = - proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} = - return node.announcedAddresses + let addressMapper = proc( + listenAddrs: seq[MultiAddress] + ): Future[seq[MultiAddress]] {.async.} = + return node.announcedAddresses node.switch.peerInfo.addressMappers.add(addressMapper) ## The switch will update addresses after start using the addressMapper @@ -1204,7 +1293,7 @@ proc stop*(node: WakuNode) {.async.} = try: await node.wakuRlnRelay.stop() ## this can raise an exception except Exception: - error "exception stopping the node", error=getCurrentExceptionMsg() + error "exception stopping the node", error = getCurrentExceptionMsg() if not node.wakuArchive.isNil(): await node.wakuArchive.stopWait() diff --git a/waku/node/waku_switch.nim b/waku/node/waku_switch.nim index 56b984a2e..9e33c56b2 100644 --- a/waku/node/waku_switch.nim +++ b/waku/node/waku_switch.nim @@ -6,7 +6,8 @@ else: import std/[options, math], - chronos, chronicles, + chronos, + chronicles, eth/keys, libp2p/crypto/crypto, libp2p/protocols/pubsub/gossipsub, @@ -20,49 +21,49 @@ import const MaxConnectionsPerPeer* = 1 proc withWsTransport*(b: SwitchBuilder): SwitchBuilder = - b.withTransport(proc(upgr: Upgrade): Transport = WsTransport.new(upgr)) + b.withTransport( + proc(upgr: Upgrade): Transport = + WsTransport.new(upgr) + ) -proc getSecureKey(path : string): TLSPrivateKey - {.raises: [Defect, IOError].} = - - trace "Key path is.", path=path +proc getSecureKey(path: string): TLSPrivateKey {.raises: [Defect, IOError].} = + trace "Key path is.", path = path let stringkey: string = readFile(path) try: let key = TLSPrivateKey.init(stringkey) return key except TLSStreamProtocolError as exc: - debug "exception raised from getSecureKey", msg=exc.msg + debug "exception raised from getSecureKey", msg = exc.msg -proc getSecureCert(path : string): TLSCertificate - {.raises: [Defect, IOError].} = - - trace "Certificate path is.", path=path +proc getSecureCert(path: string): TLSCertificate {.raises: [Defect, IOError].} = + trace "Certificate path is.", path = path let stringCert: string = readFile(path) try: - let cert = TLSCertificate.init(stringCert) + let cert = TLSCertificate.init(stringCert) return cert except TLSStreamProtocolError as exc: - debug "exception raised from getSecureCert", msg=exc.msg + debug "exception raised from getSecureCert", msg = exc.msg -proc withWssTransport*(b: SwitchBuilder, - secureKeyPath: string, - secureCertPath: string): SwitchBuilder - {.raises: [Defect, IOError].} = - - let key : TLSPrivateKey = getSecureKey(secureKeyPath) - let cert : TLSCertificate = getSecureCert(secureCertPath) - b.withTransport(proc(upgr: Upgrade): Transport = WsTransport.new(upgr, - tlsPrivateKey = key, - tlsCertificate = cert, - {TLSFlags.NoVerifyHost, TLSFlags.NoVerifyServerName})) +proc withWssTransport*( + b: SwitchBuilder, secureKeyPath: string, secureCertPath: string +): SwitchBuilder {.raises: [Defect, IOError].} = + let key: TLSPrivateKey = getSecureKey(secureKeyPath) + let cert: TLSCertificate = getSecureCert(secureCertPath) + b.withTransport( + proc(upgr: Upgrade): Transport = + WsTransport.new( + upgr, + tlsPrivateKey = key, + tlsCertificate = cert, + {TLSFlags.NoVerifyHost, TLSFlags.NoVerifyServerName}, + ) + ) proc newWakuSwitch*( privKey = none(crypto.PrivateKey), address = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(), wsAddress = none(MultiAddress), - secureManagers: openarray[SecureProtocol] = [ - SecureProtocol.Noise, - ], + secureManagers: openarray[SecureProtocol] = [SecureProtocol.Noise], transportFlags: set[ServerFlags] = {}, rng: ref HmacDrbgContext, inTimeout: Duration = 5.minutes, @@ -76,53 +77,50 @@ proc newWakuSwitch*( wssEnabled: bool = false, secureKeyPath: string = "", secureCertPath: string = "", - agentString = none(string), # defaults to nim-libp2p version + agentString = none(string), # defaults to nim-libp2p version peerStoreCapacity = none(int), # defaults to 1.25 maxConnections services: seq[switch.Service] = @[], rendezvous: RendezVous = nil, - ): Switch - {.raises: [Defect, IOError, LPError].} = +): Switch {.raises: [Defect, IOError, LPError].} = + var b = SwitchBuilder + .new() + .withRng(rng) + .withMaxConnections(maxConnections) + .withMaxIn(maxIn) + .withMaxOut(maxOut) + .withMaxConnsPerPeer(maxConnsPerPeer) + .withYamux() + .withMplex(inTimeout, outTimeout) + .withNoise() + .withTcpTransport(transportFlags) + .withNameResolver(nameResolver) + .withSignedPeerRecord(sendSignedPeerRecord) + .withCircuitRelay() + .withAutonat() - var b = SwitchBuilder - .new() - .withRng(rng) - .withMaxConnections(maxConnections) - .withMaxIn(maxIn) - .withMaxOut(maxOut) - .withMaxConnsPerPeer(maxConnsPerPeer) - .withYamux() - .withMplex(inTimeout, outTimeout) - .withNoise() - .withTcpTransport(transportFlags) - .withNameResolver(nameResolver) - .withSignedPeerRecord(sendSignedPeerRecord) - .withCircuitRelay() - .withAutonat() + if peerStoreCapacity.isSome(): + b = b.withPeerStore(peerStoreCapacity.get()) + else: + let defaultPeerStoreCapacity = int(maxConnections) * 5 + b = b.withPeerStore(defaultPeerStoreCapacity) + if agentString.isSome(): + b = b.withAgentVersion(agentString.get()) + if privKey.isSome(): + b = b.withPrivateKey(privKey.get()) + if wsAddress.isSome(): + b = b.withAddresses(@[wsAddress.get(), address]) - if peerStoreCapacity.isSome(): - b = b.withPeerStore(peerStoreCapacity.get()) + if wssEnabled: + b = b.withWssTransport(secureKeyPath, secureCertPath) else: - let defaultPeerStoreCapacity = int(maxConnections)*5 - b = b.withPeerStore(defaultPeerStoreCapacity) - if agentString.isSome(): - b = b.withAgentVersion(agentString.get()) - if privKey.isSome(): - b = b.withPrivateKey(privKey.get()) - if wsAddress.isSome(): - b = b.withAddresses(@[wsAddress.get(), address]) + b = b.withWsTransport() + else: + b = b.withAddress(address) - if wssEnabled: - b = b.withWssTransport(secureKeyPath, secureCertPath) - else: - b = b.withWsTransport() + if services.len > 0: + b = b.withServices(services) - else : - b = b.withAddress(address) + if not rendezvous.isNil(): + b = b.withRendezVous(rendezvous) - if services.len > 0: - b = b.withServices(services) - - if not rendezvous.isNil(): - b = b.withRendezVous(rendezvous) - - b.build() + b.build() diff --git a/waku/utils/collector.nim b/waku/utils/collector.nim index 9bb26b8a1..c85aee052 100644 --- a/waku/utils/collector.nim +++ b/waku/utils/collector.nim @@ -3,10 +3,11 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - metrics +import metrics -proc parseCollectorIntoF64(collector: SimpleCollector): float64 {.gcsafe, raises: [Defect] } = +proc parseCollectorIntoF64( + collector: SimpleCollector +): float64 {.gcsafe, raises: [Defect].} = {.gcsafe.}: var total = 0.float64 for metrics in collector.metrics: @@ -31,4 +32,4 @@ template collectorAsF64*(collector: Collector): float64 = ## Serves as a wrapper for parseCollectorIntoF64 which is gcsafe {.gcsafe.}: let total = parseCollectorIntoF64(collector) - total \ No newline at end of file + total diff --git a/waku/utils/noise.nim b/waku/utils/noise.nim index 2da3943c5..77ea86b81 100644 --- a/waku/utils/noise.nim +++ b/waku/utils/noise.nim @@ -3,18 +3,14 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - stew/results -import - ../waku_core, - ../waku_noise/noise_types, - ../waku_noise/noise_utils - +import stew/results +import ../waku_core, ../waku_noise/noise_types, ../waku_noise/noise_utils # Decodes a WakuMessage to a PayloadV2 # Currently, this is just a wrapper over deserializePayloadV2 and encryption/decryption is done on top (no KeyInfo) -proc decodePayloadV2*(message: WakuMessage): Result[PayloadV2, cstring] - {.raises: [NoiseMalformedHandshake, NoisePublicKeyError].} = +proc decodePayloadV2*( + message: WakuMessage +): Result[PayloadV2, cstring] {.raises: [NoiseMalformedHandshake, NoisePublicKeyError].} = # We check message version (only 2 is supported in this proc) case message.version of 2: @@ -27,18 +23,21 @@ proc decodePayloadV2*(message: WakuMessage): Result[PayloadV2, cstring] else: return err("Wrong message version while decoding payload") - # Encodes a PayloadV2 to a WakuMessage # Currently, this is just a wrapper over serializePayloadV2 and encryption/decryption is done on top (no KeyInfo) -proc encodePayloadV2*(payload2: PayloadV2, contentTopic: ContentTopic = default(ContentTopic)): Result[WakuMessage, cstring] - {.raises: [NoiseMalformedHandshake, NoisePublicKeyError].} = - +proc encodePayloadV2*( + payload2: PayloadV2, contentTopic: ContentTopic = default(ContentTopic) +): Result[WakuMessage, cstring] {. + raises: [NoiseMalformedHandshake, NoisePublicKeyError] +.} = # We attempt to encode the PayloadV2 let serializedPayload2 = serializePayloadV2(payload2) if not serializedPayload2.isOk(): return err("Failed to encode PayloadV2") # If successful, we create and return a WakuMessage - let msg = WakuMessage(payload: serializedPayload2.get(), version: 2, contentTopic: contentTopic) + let msg = WakuMessage( + payload: serializedPayload2.get(), version: 2, contentTopic: contentTopic + ) return ok(msg) diff --git a/waku/utils/tableutils.nim b/waku/utils/tableutils.nim index 1bdcce941..a490534be 100644 --- a/waku/utils/tableutils.nim +++ b/waku/utils/tableutils.nim @@ -1,13 +1,11 @@ -import std/tables, - stew/objects, - stew/templateutils +import std/tables, stew/objects, stew/templateutils template keepItIf*[A, B](tableParam: var Table[A, B], itPredicate: untyped) = bind evalTemplateParamOnce evalTemplateParamOnce(tableParam, t): var itemsToDelete: seq[A] - var key {.inject.} : A - var val {.inject.} : B + var key {.inject.}: A + var val {.inject.}: B for k, v in t.mpairs(): key = k @@ -22,8 +20,8 @@ template keepItIf*[A, B](tableParam: var TableRef[A, B], itPredicate: untyped) = bind evalTemplateParamOnce evalTemplateParamOnce(tableParam, t): var itemsToDelete: seq[A] - let key {.inject.} : A - let val {.inject.} : B + let key {.inject.}: A + let val {.inject.}: B for k, v in t[].mpairs(): key = k diff --git a/waku/waku_api.nim b/waku/waku_api.nim index e412d135c..b584bfa2f 100644 --- a/waku/waku_api.nim +++ b/waku/waku_api.nim @@ -1,8 +1,3 @@ -import - ./waku_api/message_cache, - ./waku_api/rest, - ./waku_api/json_rpc +import ./waku_api/message_cache, ./waku_api/rest, ./waku_api/json_rpc -export - message_cache, - rest \ No newline at end of file +export message_cache, rest diff --git a/waku/waku_api/handlers.nim b/waku/waku_api/handlers.nim index dfbfca76d..16c99998a 100644 --- a/waku/waku_api/handlers.nim +++ b/waku/waku_api/handlers.nim @@ -3,26 +3,21 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - chronos, - chronicles, - std/[options, sequtils], - stew/results -import - ../waku_discv5, - ../waku_relay, - ../waku_core, - ./message_cache +import chronos, chronicles, std/[options, sequtils], stew/results +import ../waku_discv5, ../waku_relay, ../waku_core, ./message_cache ### Discovery -type DiscoveryHandler* = proc(): Future[Result[Option[RemotePeerInfo], string]] {.async, closure.} +type DiscoveryHandler* = + proc(): Future[Result[Option[RemotePeerInfo], string]] {.async, closure.} -proc defaultDiscoveryHandler*(discv5: WakuDiscoveryV5, cap: Capabilities): DiscoveryHandler = +proc defaultDiscoveryHandler*( + discv5: WakuDiscoveryV5, cap: Capabilities +): DiscoveryHandler = proc(): Future[Result[Option[RemotePeerInfo], string]] {.async, closure.} = #Discv5 is already filtering peers by shards no need to pass a predicate. let findPeers = discv5.findRandomPeers() - + if not await findPeers.withTimeout(60.seconds): return err("discovery process timed out!") @@ -42,4 +37,4 @@ proc defaultDiscoveryHandler*(discv5: WakuDiscoveryV5, cap: Capabilities): Disco proc messageCacheHandler*(cache: MessageCache): WakuRelayHandler = return proc(pubsubTopic: string, msg: WakuMessage): Future[void] {.async, closure.} = - cache.addMessage(pubsubTopic, msg) \ No newline at end of file + cache.addMessage(pubsubTopic, msg) diff --git a/waku/waku_api/message_cache.nim b/waku/waku_api/message_cache.nim index fe2bbd77c..7f97cbf8c 100644 --- a/waku/waku_api/message_cache.nim +++ b/waku/waku_api/message_cache.nim @@ -9,8 +9,7 @@ import chronicles, chronos, libp2p/protocols/pubsub -import - ../waku_core +import ../waku_core logScope: topics = "waku node message_cache" @@ -29,16 +28,12 @@ type MessageCache* = ref object capacity: int func `$`*(self: MessageCache): string = - "Messages: " & $self.messages.len & - " \nPubsubTopics: " & $self.pubsubTopics & - " \nContentTopics: " & $self.contentTopics & - " \nPubsubIndex: " & $self.pubsubIndex & - " \nContentIndex: " & $self.contentIndex + "Messages: " & $self.messages.len & " \nPubsubTopics: " & $self.pubsubTopics & + " \nContentTopics: " & $self.contentTopics & " \nPubsubIndex: " & $self.pubsubIndex & + " \nContentIndex: " & $self.contentIndex -func init*(T: type MessageCache, capacity=DefaultMessageCacheCapacity): T = - MessageCache( - capacity: capacity - ) +func init*(T: type MessageCache, capacity = DefaultMessageCacheCapacity): T = + MessageCache(capacity: capacity) proc messagesCount*(self: MessageCache): int = self.messages.len @@ -51,7 +46,7 @@ proc contentTopicCount*(self: MessageCache): int = proc pubsubSearch(self: MessageCache, pubsubTopic: PubsubTopic): Option[int] = # Return some with the index if found none otherwise. - + for i, topic in self.pubsubTopics: if topic == pubsubTopic: return some(i) @@ -60,7 +55,7 @@ proc pubsubSearch(self: MessageCache, pubsubTopic: PubsubTopic): Option[int] = proc contentSearch(self: MessageCache, contentTopic: ContentTopic): Option[int] = # Return some with the index if found none otherwise. - + for i, topic in self.contentTopics: if topic == contentTopic: return some(i) @@ -84,9 +79,9 @@ proc contentSubscribe*(self: MessageCache, contentTopic: ContentTopic) = proc removeMessage(self: MessageCache, idx: int) = # get last index because del() is a swap let lastIndex = self.messages.high - + self.messages.del(idx) - + # update indices var j = self.pubsubIndex.high while j > -1: @@ -114,18 +109,20 @@ proc pubsubUnsubscribe*(self: MessageCache, pubsubTopic: PubsubTopic) = let pubsubIdxOp = self.pubsubSearch(pubsubTopic) let pubsubIdx = - if pubsubIdxOp.isSome(): pubsubIdxOp.get() - else: return + if pubsubIdxOp.isSome(): + pubsubIdxOp.get() + else: + return let lastIndex = self.pubsubTopics.high self.pubsubTopics.del(pubsubIdx) - + var msgIndices = newSeq[int](0) var j = self.pubsubIndex.high while j > -1: let (pId, mId) = self.pubsubIndex[j] - + if pId == pubsubIdx: # remove index for this topic self.pubsubIndex.del(j) @@ -133,9 +130,9 @@ proc pubsubUnsubscribe*(self: MessageCache, pubsubTopic: PubsubTopic) = elif pId == lastIndex: # swap the index because pubsubTopics.del() is a swap self.pubsubIndex[j] = (pubsubIdx, mId) - + dec(j) - + # check if messages on this pubsub topic are indexed by any content topic, if not remove them. for mId in msgIndices: if not self.contentIndex.anyIt(it.msgIdx == mId): @@ -144,13 +141,15 @@ proc pubsubUnsubscribe*(self: MessageCache, pubsubTopic: PubsubTopic) = proc contentUnsubscribe*(self: MessageCache, contentTopic: ContentTopic) = let contentIdxOP = self.contentSearch(contentTopic) - let contentIdx = - if contentIdxOP.isSome(): contentIdxOP.get() - else: return + let contentIdx = + if contentIdxOP.isSome(): + contentIdxOP.get() + else: + return let lastIndex = self.contentTopics.high self.contentTopics.del(contentIdx) - + var msgIndices = newSeq[int](0) var j = self.contentIndex.high @@ -164,7 +163,7 @@ proc contentUnsubscribe*(self: MessageCache, contentTopic: ContentTopic) = elif cId == lastIndex: # swap the indices because contentTopics.del() is a swap self.contentIndex[j] = (contentIdx, mId) - + dec(j) # check if messages on this content topic are indexed by any pubsub topic, if not remove them. @@ -179,13 +178,9 @@ proc reset*(self: MessageCache) = self.pubsubIndex.setLen(0) self.contentIndex.setLen(0) -proc addMessage*( - self: MessageCache, - pubsubTopic: PubsubTopic, - msg: WakuMessage - ) = +proc addMessage*(self: MessageCache, pubsubTopic: PubsubTopic, msg: WakuMessage) = ## Idempotent message addition. - + var oldestTime = int64.high var oldestMsg = int.high for i, message in self.messages.reversed: @@ -193,8 +188,8 @@ proc addMessage*( return if message.timestamp < oldestTime: - oldestTime = message.timestamp - oldestMsg = i + oldestTime = message.timestamp + oldestMsg = i # reverse index oldestMsg = self.messages.high - oldestMsg @@ -222,7 +217,7 @@ proc addMessage*( # add the message, make space if needed if self.messages.len >= self.capacity: self.removeMessage(oldestMsg) - + let msgIdx = self.messages.len self.messages.add(msg) @@ -230,10 +225,8 @@ proc addMessage*( self.contentIndex.add((contentIdx, msgIdx)) proc getMessages*( - self: MessageCache, - pubsubTopic: PubsubTopic, - clear=false - ): Result[seq[WakuMessage], string] = + self: MessageCache, pubsubTopic: PubsubTopic, clear = false +): Result[seq[WakuMessage], string] = ## Return all messages on this pubsub topic if self.pubsubTopics.len == 0: @@ -243,7 +236,8 @@ proc getMessages*( let pubsubIdx = if pubsubIdxOp.isNone: return err("not subscribed to this pubsub topic") - else: pubsubIdxOp.get() + else: + pubsubIdxOp.get() let msgIndices = collect: for (pId, mId) in self.pubsubIndex: @@ -255,14 +249,12 @@ proc getMessages*( if clear: for idx in msgIndices.reversed: self.removeMessage(idx) - + return ok(messages) proc getAutoMessages*( - self: MessageCache, - contentTopic: ContentTopic, - clear=false - ): Result[seq[WakuMessage], string] = + self: MessageCache, contentTopic: ContentTopic, clear = false +): Result[seq[WakuMessage], string] = ## Return all messages on this content topic if self.contentTopics.len == 0: @@ -272,7 +264,8 @@ proc getAutoMessages*( let contentIdx = if contentIdxOp.isNone(): return err("not subscribed to this content topic") - else: contentIdxOp.get() + else: + contentIdxOp.get() let msgIndices = collect: for (cId, mId) in self.contentIndex: @@ -284,5 +277,5 @@ proc getAutoMessages*( if clear: for idx in msgIndices.reversed: self.removeMessage(idx) - - return ok(messages) \ No newline at end of file + + return ok(messages) diff --git a/waku/waku_api/rest/admin/client.nim b/waku/waku_api/rest/admin/client.nim index 976853656..6c9d92472 100644 --- a/waku/waku_api/rest/admin/client.nim +++ b/waku/waku_api/rest/admin/client.nim @@ -10,34 +10,30 @@ import presto/[route, client], stew/byteutils -import - ../serdes, - ../responses, - ../rest_serdes, - ./types +import ../serdes, ../responses, ../rest_serdes, ./types export types - logScope: topics = "waku node rest admin api" -proc encodeBytes*(value: seq[string], - contentType: string): RestResult[seq[byte]] = +proc encodeBytes*(value: seq[string], contentType: string): RestResult[seq[byte]] = return encodeBytesOf(value, contentType) -proc getPeers*(): - RestResponse[seq[WakuPeer]] - {.rest, endpoint: "/admin/v1/peers", meth: HttpMethod.MethodGet.} +proc getPeers*(): RestResponse[seq[WakuPeer]] {. + rest, endpoint: "/admin/v1/peers", meth: HttpMethod.MethodGet +.} -proc postPeers*(body: seq[string]): - RestResponse[string] - {.rest, endpoint: "/admin/v1/peers", meth: HttpMethod.MethodPost.} +proc postPeers*( + body: seq[string] +): RestResponse[string] {. + rest, endpoint: "/admin/v1/peers", meth: HttpMethod.MethodPost +.} -proc getFilterSubscriptions*(): - RestResponse[seq[FilterSubscription]] - {.rest, endpoint: "/admin/v1/filter/subscriptions", meth: HttpMethod.MethodGet.} +proc getFilterSubscriptions*(): RestResponse[seq[FilterSubscription]] {. + rest, endpoint: "/admin/v1/filter/subscriptions", meth: HttpMethod.MethodGet +.} -proc getFilterSubscriptionsFilterNotMounted*(): - RestResponse[string] - {.rest, endpoint: "/admin/v1/filter/subscriptions", meth: HttpMethod.MethodGet.} +proc getFilterSubscriptionsFilterNotMounted*(): RestResponse[string] {. + rest, endpoint: "/admin/v1/filter/subscriptions", meth: HttpMethod.MethodGet +.} diff --git a/waku/waku_api/rest/admin/handlers.nim b/waku/waku_api/rest/admin/handlers.nim index ca85b61c6..06bf5376b 100644 --- a/waku/waku_api/rest/admin/handlers.nim +++ b/waku/waku_api/rest/admin/handlers.nim @@ -4,7 +4,7 @@ else: {.push raises: [].} import - std/[strformat,sequtils,sets,tables], + std/[strformat, sequtils, sets, tables], stew/byteutils, chronicles, json_serialization, @@ -39,99 +39,120 @@ proc tuplesToWakuPeers(peers: var WakuPeers, peersTup: seq[PeerProtocolTuple]) = for peer in peersTup: peers.add(peer.multiaddr, peer.protocol, peer.connected) - proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = - router.api(MethodGet, ROUTE_ADMIN_V1_PEERS) do () -> RestApiResponse: - var peers: WakuPeers = @[] + router.api(MethodGet, ROUTE_ADMIN_V1_PEERS) do() -> RestApiResponse: + var peers: WakuPeers = @[] if not node.wakuRelay.isNil(): # Map managed peers to WakuPeers and add to return list - let relayPeers = node.peerManager - .peerStore.peers(WakuRelayCodec) - .mapIt(( - multiaddr: constructMultiaddrStr(it), - protocol: WakuRelayCodec, - connected: it.connectedness == Connectedness.Connected) - ) + let relayPeers = node.peerManager.peerStore.peers(WakuRelayCodec).mapIt( + ( + multiaddr: constructMultiaddrStr(it), + protocol: WakuRelayCodec, + connected: it.connectedness == Connectedness.Connected, + ) + ) tuplesToWakuPeers(peers, relayPeers) if not node.wakuFilterLegacy.isNil(): # Map WakuFilter peers to WakuPeers and add to return list - let filterPeers = node.peerManager.peerStore.peers(WakuLegacyFilterCodec) - .mapIt((multiaddr: constructMultiaddrStr(it), - protocol: WakuLegacyFilterCodec, - connected: it.connectedness == Connectedness.Connected)) + let filterPeers = node.peerManager.peerStore.peers(WakuLegacyFilterCodec).mapIt( + ( + multiaddr: constructMultiaddrStr(it), + protocol: WakuLegacyFilterCodec, + connected: it.connectedness == Connectedness.Connected, + ) + ) tuplesToWakuPeers(peers, filterPeers) if not node.wakuFilter.isNil(): # Map WakuFilter peers to WakuPeers and add to return list - let filterV2Peers = node.peerManager.peerStore.peers(WakuFilterSubscribeCodec) - .mapIt((multiaddr: constructMultiaddrStr(it), - protocol: WakuFilterSubscribeCodec, - connected: it.connectedness == Connectedness.Connected)) + let filterV2Peers = node.peerManager.peerStore + .peers(WakuFilterSubscribeCodec) + .mapIt( + ( + multiaddr: constructMultiaddrStr(it), + protocol: WakuFilterSubscribeCodec, + connected: it.connectedness == Connectedness.Connected, + ) + ) tuplesToWakuPeers(peers, filterV2Peers) if not node.wakuStore.isNil(): # Map WakuStore peers to WakuPeers and add to return list - let storePeers = node.peerManager.peerStore - .peers(WakuStoreCodec) - .mapIt((multiaddr: constructMultiaddrStr(it), - protocol: WakuStoreCodec, - connected: it.connectedness == Connectedness.Connected)) + let storePeers = node.peerManager.peerStore.peers(WakuStoreCodec).mapIt( + ( + multiaddr: constructMultiaddrStr(it), + protocol: WakuStoreCodec, + connected: it.connectedness == Connectedness.Connected, + ) + ) tuplesToWakuPeers(peers, storePeers) if not node.wakuLightPush.isNil(): # Map WakuStore peers to WakuPeers and add to return list - let lightpushPeers = node.peerManager.peerStore - .peers(WakuLightPushCodec) - .mapIt((multiaddr: constructMultiaddrStr(it), - protocol: WakuLightPushCodec, - connected: it.connectedness == Connectedness.Connected)) + let lightpushPeers = node.peerManager.peerStore.peers(WakuLightPushCodec).mapIt( + ( + multiaddr: constructMultiaddrStr(it), + protocol: WakuLightPushCodec, + connected: it.connectedness == Connectedness.Connected, + ) + ) tuplesToWakuPeers(peers, lightpushPeers) - let resp = RestApiResponse.jsonResponse(peers, status=Http200) + let resp = RestApiResponse.jsonResponse(peers, status = Http200) if resp.isErr(): - error "An error ocurred while building the json respose: ", error=resp.error - return RestApiResponse.internalServerError(fmt("An error ocurred while building the json respose: {resp.error}")) + error "An error ocurred while building the json respose: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error ocurred while building the json respose: {resp.error}") + ) return resp.get() proc installAdminV1PostPeersHandler(router: var RestRouter, node: WakuNode) = - router.api(MethodPost, ROUTE_ADMIN_V1_PEERS) do (contentBody: Option[ContentBody]) -> RestApiResponse: - + router.api(MethodPost, ROUTE_ADMIN_V1_PEERS) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: let peers: seq[string] = decodeRequestBody[seq[string]](contentBody).valueOr: return RestApiResponse.badRequest(fmt("Failed to decode request: {error}")) for i, peer in peers: let peerInfo = parsePeerInfo(peer).valueOr: - return RestApiResponse.badRequest(fmt("Couldn't parse remote peer info: {error}")) + return + RestApiResponse.badRequest(fmt("Couldn't parse remote peer info: {error}")) - if not (await node.peerManager.connectRelay(peerInfo, source="rest")): - return RestApiResponse.badRequest(fmt("Failed to connect to peer at index: {i} - {peer}")) + if not (await node.peerManager.connectRelay(peerInfo, source = "rest")): + return RestApiResponse.badRequest( + fmt("Failed to connect to peer at index: {i} - {peer}") + ) return RestApiResponse.ok() proc installAdminV1GetFilterSubsHandler(router: var RestRouter, node: WakuNode) = - router.api(MethodGet, ROUTE_ADMIN_V1_FILTER_SUBS) do () -> RestApiResponse: - + router.api(MethodGet, ROUTE_ADMIN_V1_FILTER_SUBS) do() -> RestApiResponse: if node.wakuFilter.isNil(): - return RestApiResponse.badRequest("Error: Filter Protocol is not mounted to the node") + return + RestApiResponse.badRequest("Error: Filter Protocol is not mounted to the node") var subscriptions: seq[FilterSubscription] = @[] filterCriteria: seq[FilterTopic] for peerId in node.wakuFilter.subscriptions.peersSubscribed.keys: - filterCriteria = node.wakuFilter.subscriptions.getPeerSubscriptions(peerId) - .mapIt(FilterTopic(pubsubTopic: it[0], - contentTopic: it[1])) + filterCriteria = node.wakuFilter.subscriptions.getPeerSubscriptions(peerId).mapIt( + FilterTopic(pubsubTopic: it[0], contentTopic: it[1]) + ) - subscriptions.add(FilterSubscription(peerId: $peerId, filterCriteria: filterCriteria)) + subscriptions.add( + FilterSubscription(peerId: $peerId, filterCriteria: filterCriteria) + ) - let resp = RestApiResponse.jsonResponse(subscriptions, status=Http200) + let resp = RestApiResponse.jsonResponse(subscriptions, status = Http200) if resp.isErr(): - error "An error ocurred while building the json respose: ", error=resp.error - return RestApiResponse.internalServerError(fmt("An error ocurred while building the json respose: {resp.error}")) + error "An error ocurred while building the json respose: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error ocurred while building the json respose: {resp.error}") + ) return resp.get() diff --git a/waku/waku_api/rest/admin/types.nim b/waku/waku_api/rest/admin/types.nim index 1e051e633..6ee1ed86a 100644 --- a/waku/waku_api/rest/admin/types.nim +++ b/waku/waku_api/rest/admin/types.nim @@ -8,65 +8,65 @@ import json_serialization, json_serialization/std/options, json_serialization/lexer -import - ../serdes +import ../serdes #### Types -type - ProtocolState* = object - protocol*: string - connected*: bool +type ProtocolState* = object + protocol*: string + connected*: bool -type - WakuPeer* = object - multiaddr*: string - protocols*: seq[ProtocolState] +type WakuPeer* = object + multiaddr*: string + protocols*: seq[ProtocolState] type WakuPeers* = seq[WakuPeer] -type - FilterTopic* = object - pubsubTopic*: string - contentTopic*: string +type FilterTopic* = object + pubsubTopic*: string + contentTopic*: string -type - FilterSubscription* = object - peerId*: string - filterCriteria*: seq[FilterTopic] +type FilterSubscription* = object + peerId*: string + filterCriteria*: seq[FilterTopic] #### Serialization and deserialization -proc writeValue*(writer: var JsonWriter[RestJson], value: ProtocolState) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter[RestJson], value: ProtocolState +) {.raises: [IOError].} = writer.beginRecord() writer.writeField("protocol", value.protocol) writer.writeField("connected", value.connected) writer.endRecord() -proc writeValue*(writer: var JsonWriter[RestJson], value: WakuPeer) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter[RestJson], value: WakuPeer +) {.raises: [IOError].} = writer.beginRecord() writer.writeField("multiaddr", value.multiaddr) writer.writeField("protocols", value.protocols) writer.endRecord() -proc writeValue*(writer: var JsonWriter[RestJson], value: FilterTopic) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterTopic +) {.raises: [IOError].} = writer.beginRecord() writer.writeField("pubsubTopic", value.pubsubTopic) writer.writeField("contentTopic", value.contentTopic) writer.endRecord() -proc writeValue*(writer: var JsonWriter[RestJson], value: FilterSubscription) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterSubscription +) {.raises: [IOError].} = writer.beginRecord() writer.writeField("peerId", value.peerId) writer.writeField("filterCriteria", value.filterCriteria) writer.endRecord() -proc readValue*(reader: var JsonReader[RestJson], value: var ProtocolState) - {.gcsafe, raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader[RestJson], value: var ProtocolState +) {.gcsafe, raises: [SerializationError, IOError].} = var protocol: Option[string] connected: Option[bool] @@ -79,7 +79,9 @@ proc readValue*(reader: var JsonReader[RestJson], value: var ProtocolState) protocol = some(reader.readValue(string)) of "connected": if connected.isSome(): - reader.raiseUnexpectedField("Multiple `connected` fields found", "ProtocolState") + reader.raiseUnexpectedField( + "Multiple `connected` fields found", "ProtocolState" + ) connected = some(reader.readValue(bool)) else: unrecognizedFieldWarning() @@ -90,13 +92,11 @@ proc readValue*(reader: var JsonReader[RestJson], value: var ProtocolState) if protocol.isNone(): reader.raiseUnexpectedValue("Field `protocol` is missing") - value = ProtocolState( - protocol: protocol.get(), - connected: connected.get() - ) + value = ProtocolState(protocol: protocol.get(), connected: connected.get()) -proc readValue*(reader: var JsonReader[RestJson], value: var WakuPeer) - {.gcsafe, raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader[RestJson], value: var WakuPeer +) {.gcsafe, raises: [SerializationError, IOError].} = var multiaddr: Option[string] protocols: Option[seq[ProtocolState]] @@ -120,13 +120,11 @@ proc readValue*(reader: var JsonReader[RestJson], value: var WakuPeer) if protocols.isNone(): reader.raiseUnexpectedValue("Field `protocols` are missing") - value = WakuPeer( - multiaddr: multiaddr.get(), - protocols: protocols.get() - ) + value = WakuPeer(multiaddr: multiaddr.get(), protocols: protocols.get()) -proc readValue*(reader: var JsonReader[RestJson], value: var FilterTopic) - {.gcsafe, raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterTopic +) {.gcsafe, raises: [SerializationError, IOError].} = var pubsubTopic: Option[string] contentTopic: Option[string] @@ -135,11 +133,15 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterTopic) case fieldName of "pubsubTopic": if pubsubTopic.isSome(): - reader.raiseUnexpectedField("Multiple `pubsubTopic` fields found", "FilterTopic") + reader.raiseUnexpectedField( + "Multiple `pubsubTopic` fields found", "FilterTopic" + ) pubsubTopic = some(reader.readValue(string)) of "contentTopic": if contentTopic.isSome(): - reader.raiseUnexpectedField("Multiple `contentTopic` fields found", "FilterTopic") + reader.raiseUnexpectedField( + "Multiple `contentTopic` fields found", "FilterTopic" + ) contentTopic = some(reader.readValue(string)) else: unrecognizedFieldWarning() @@ -150,13 +152,11 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterTopic) if contentTopic.isNone(): reader.raiseUnexpectedValue("Field `contentTopic` are missing") - value = FilterTopic( - pubsubTopic: pubsubTopic.get(), - contentTopic: contentTopic.get() - ) + value = FilterTopic(pubsubTopic: pubsubTopic.get(), contentTopic: contentTopic.get()) -proc readValue*(reader: var JsonReader[RestJson], value: var FilterSubscription) - {.gcsafe, raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterSubscription +) {.gcsafe, raises: [SerializationError, IOError].} = var peerId: Option[string] filterCriteria: Option[seq[FilterTopic]] @@ -165,11 +165,15 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterSubscription) case fieldName of "peerId": if peerId.isSome(): - reader.raiseUnexpectedField("Multiple `peerId` fields found", "FilterSubscription") + reader.raiseUnexpectedField( + "Multiple `peerId` fields found", "FilterSubscription" + ) peerId = some(reader.readValue(string)) of "filterCriteria": if filterCriteria.isSome(): - reader.raiseUnexpectedField("Multiple `filterCriteria` fields found", "FilterSubscription") + reader.raiseUnexpectedField( + "Multiple `filterCriteria` fields found", "FilterSubscription" + ) filterCriteria = some(reader.readValue(seq[FilterTopic])) else: unrecognizedFieldWarning() @@ -180,10 +184,7 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterSubscription) if filterCriteria.isNone(): reader.raiseUnexpectedValue("Field `filterCriteria` are missing") - value = FilterSubscription( - peerId: peerId.get(), - filterCriteria: filterCriteria.get() - ) + value = FilterSubscription(peerId: peerId.get(), filterCriteria: filterCriteria.get()) ## Utility for populating WakuPeers and ProtocolState func `==`*(a, b: ProtocolState): bool {.inline.} = @@ -196,21 +197,13 @@ func `==`*(a, b: WakuPeer): bool {.inline.} = return a.multiaddr == b.multiaddr proc add*(peers: var WakuPeers, multiaddr: string, protocol: string, connected: bool) = - var - peer: WakuPeer = WakuPeer( - multiaddr: multiaddr, - protocols: @[ProtocolState( - protocol: protocol, - connected: connected - )] - ) + var peer: WakuPeer = WakuPeer( + multiaddr: multiaddr, + protocols: @[ProtocolState(protocol: protocol, connected: connected)], + ) let idx = peers.find(peer) if idx < 0: peers.add(peer) else: - peers[idx].protocols.add(ProtocolState( - protocol: protocol, - connected: connected - )) - + peers[idx].protocols.add(ProtocolState(protocol: protocol, connected: connected)) diff --git a/waku/waku_api/rest/debug/client.nim b/waku/waku_api/rest/debug/client.nim index f086fd351..1929b28bf 100644 --- a/waku/waku_api/rest/debug/client.nim +++ b/waku/waku_api/rest/debug/client.nim @@ -4,24 +4,20 @@ else: {.push raises: [].} import - chronicles, - json_serialization, - json_serialization/std/options, - presto/[route, client] -import - ../serdes, - ../responses, - ../rest_serdes, - ./types + chronicles, json_serialization, json_serialization/std/options, presto/[route, client] +import ../serdes, ../responses, ../rest_serdes, ./types export types - logScope: topics = "waku node rest debug_api" # TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) -proc debugInfoV1*(): RestResponse[DebugWakuInfo] {.rest, endpoint: "/debug/v1/info", meth: HttpMethod.MethodGet.} +proc debugInfoV1*(): RestResponse[DebugWakuInfo] {. + rest, endpoint: "/debug/v1/info", meth: HttpMethod.MethodGet +.} # TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) -proc debugVersionV1*(): RestResponse[string] {.rest, endpoint: "/debug/v1/version", meth: HttpMethod.MethodGet.} +proc debugVersionV1*(): RestResponse[string] {. + rest, endpoint: "/debug/v1/version", meth: HttpMethod.MethodGet +.} diff --git a/waku/waku_api/rest/debug/handlers.nim b/waku/waku_api/rest/debug/handlers.nim index 95ba6f353..52cd60152 100644 --- a/waku/waku_api/rest/debug/handlers.nim +++ b/waku/waku_api/rest/debug/handlers.nim @@ -3,31 +3,22 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - chronicles, - json_serialization, - presto/route -import - ../../../waku_node, - ../responses, - ../serdes, - ./types +import chronicles, json_serialization, presto/route +import ../../../waku_node, ../responses, ../serdes, ./types export types - logScope: topics = "waku node rest debug_api" - const ROUTE_DEBUG_INFOV1* = "/debug/v1/info" proc installDebugInfoV1Handler(router: var RestRouter, node: WakuNode) = - router.api(MethodGet, ROUTE_DEBUG_INFOV1) do () -> RestApiResponse: + router.api(MethodGet, ROUTE_DEBUG_INFOV1) do() -> RestApiResponse: let info = node.info().toDebugWakuInfo() - let resp = RestApiResponse.jsonResponse(info, status=Http200) + let resp = RestApiResponse.jsonResponse(info, status = Http200) if resp.isErr(): - debug "An error occurred while building the json respose", error=resp.error + debug "An error occurred while building the json respose", error = resp.error return RestApiResponse.internalServerError() return resp.get() @@ -35,10 +26,8 @@ proc installDebugInfoV1Handler(router: var RestRouter, node: WakuNode) = const ROUTE_DEBUG_VERSIONV1* = "/debug/v1/version" proc installDebugVersionV1Handler(router: var RestRouter, node: WakuNode) = - router.api(MethodGet, ROUTE_DEBUG_VERSIONV1) do () -> RestApiResponse: - - return RestApiResponse.textResponse(git_version, status=Http200) - + router.api(MethodGet, ROUTE_DEBUG_VERSIONV1) do() -> RestApiResponse: + return RestApiResponse.textResponse(git_version, status = Http200) proc installDebugApiHandlers*(router: var RestRouter, node: WakuNode) = installDebugInfoV1Handler(router, node) diff --git a/waku/waku_api/rest/debug/types.nim b/waku/waku_api/rest/debug/types.nim index 76ea1b9fc..1f5949f75 100644 --- a/waku/waku_api/rest/debug/types.nim +++ b/waku/waku_api/rest/debug/types.nim @@ -3,43 +3,36 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - chronicles, - json_serialization, - json_serialization/std/options -import - ../../../waku_node, - ../serdes +import chronicles, json_serialization, json_serialization/std/options +import ../../../waku_node, ../serdes #### Types -type - DebugWakuInfo* = object - listenAddresses*: seq[string] - enrUri*: Option[string] - +type DebugWakuInfo* = object + listenAddresses*: seq[string] + enrUri*: Option[string] #### Type conversion proc toDebugWakuInfo*(nodeInfo: WakuInfo): DebugWakuInfo = - DebugWakuInfo( - listenAddresses: nodeInfo.listenAddresses, - enrUri: some(nodeInfo.enrUri) - ) - + DebugWakuInfo( + listenAddresses: nodeInfo.listenAddresses, enrUri: some(nodeInfo.enrUri) + ) #### Serialization and deserialization -proc writeValue*(writer: var JsonWriter[RestJson], value: DebugWakuInfo) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter[RestJson], value: DebugWakuInfo +) {.raises: [IOError].} = writer.beginRecord() writer.writeField("listenAddresses", value.listenAddresses) if value.enrUri.isSome(): writer.writeField("enrUri", value.enrUri.get()) writer.endRecord() -proc readValue*(reader: var JsonReader[RestJson], value: var DebugWakuInfo) - {.raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader[RestJson], value: var DebugWakuInfo +) {.raises: [SerializationError, IOError].} = var listenAddresses: Option[seq[string]] enrUri: Option[string] @@ -48,7 +41,9 @@ proc readValue*(reader: var JsonReader[RestJson], value: var DebugWakuInfo) case fieldName of "listenAddresses": if listenAddresses.isSome(): - reader.raiseUnexpectedField("Multiple `listenAddresses` fields found", "DebugWakuInfo") + reader.raiseUnexpectedField( + "Multiple `listenAddresses` fields found", "DebugWakuInfo" + ) listenAddresses = some(reader.readValue(seq[string])) of "enrUri": if enrUri.isSome(): @@ -60,7 +55,4 @@ proc readValue*(reader: var JsonReader[RestJson], value: var DebugWakuInfo) if listenAddresses.isNone(): reader.raiseUnexpectedValue("Field `listenAddresses` is missing") - value = DebugWakuInfo( - listenAddresses: listenAddresses.get, - enrUri: enrUri - ) + value = DebugWakuInfo(listenAddresses: listenAddresses.get, enrUri: enrUri) diff --git a/waku/waku_api/rest/filter/client.nim b/waku/waku_api/rest/filter/client.nim index 7be43153b..abd530c41 100644 --- a/waku/waku_api/rest/filter/client.nim +++ b/waku/waku_api/rest/filter/client.nim @@ -25,42 +25,58 @@ export types logScope: topics = "waku node rest client v2" -proc encodeBytes*(value: FilterSubscribeRequest, - contentType: string): RestResult[seq[byte]] = +proc encodeBytes*( + value: FilterSubscribeRequest, contentType: string +): RestResult[seq[byte]] = return encodeBytesOf(value, contentType) -proc encodeBytes*(value: FilterSubscriberPing, - contentType: string): RestResult[seq[byte]] = +proc encodeBytes*( + value: FilterSubscriberPing, contentType: string +): RestResult[seq[byte]] = return encodeBytesOf(value, contentType) -proc encodeBytes*(value: FilterUnsubscribeRequest, - contentType: string): RestResult[seq[byte]] = +proc encodeBytes*( + value: FilterUnsubscribeRequest, contentType: string +): RestResult[seq[byte]] = return encodeBytesOf(value, contentType) -proc encodeBytes*(value: FilterUnsubscribeAllRequest, - contentType: string): RestResult[seq[byte]] = +proc encodeBytes*( + value: FilterUnsubscribeAllRequest, contentType: string +): RestResult[seq[byte]] = return encodeBytesOf(value, contentType) -proc filterSubscriberPing*(requestId: string): - RestResponse[FilterSubscriptionResponse] - {.rest, endpoint: "/filter/v2/subscriptions/{requestId}", meth: HttpMethod.MethodGet.} +proc filterSubscriberPing*( + requestId: string +): RestResponse[FilterSubscriptionResponse] {. + rest, endpoint: "/filter/v2/subscriptions/{requestId}", meth: HttpMethod.MethodGet +.} -proc filterPostSubscriptions*(body: FilterSubscribeRequest): - RestResponse[FilterSubscriptionResponse] - {.rest, endpoint: "/filter/v2/subscriptions", meth: HttpMethod.MethodPost.} +proc filterPostSubscriptions*( + body: FilterSubscribeRequest +): RestResponse[FilterSubscriptionResponse] {. + rest, endpoint: "/filter/v2/subscriptions", meth: HttpMethod.MethodPost +.} -proc filterPutSubscriptions*(body: FilterSubscribeRequest): - RestResponse[FilterSubscriptionResponse] - {.rest, endpoint: "/filter/v2/subscriptions", meth: HttpMethod.MethodPut.} +proc filterPutSubscriptions*( + body: FilterSubscribeRequest +): RestResponse[FilterSubscriptionResponse] {. + rest, endpoint: "/filter/v2/subscriptions", meth: HttpMethod.MethodPut +.} -proc filterDeleteSubscriptions*(body: FilterUnsubscribeRequest): - RestResponse[FilterSubscriptionResponse] - {.rest, endpoint: "/filter/v2/subscriptions", meth: HttpMethod.MethodDelete.} +proc filterDeleteSubscriptions*( + body: FilterUnsubscribeRequest +): RestResponse[FilterSubscriptionResponse] {. + rest, endpoint: "/filter/v2/subscriptions", meth: HttpMethod.MethodDelete +.} -proc filterDeleteAllSubscriptions*(body: FilterUnsubscribeAllRequest): - RestResponse[FilterSubscriptionResponse] - {.rest, endpoint: "/filter/v2/subscriptions/all", meth: HttpMethod.MethodDelete.} +proc filterDeleteAllSubscriptions*( + body: FilterUnsubscribeAllRequest +): RestResponse[FilterSubscriptionResponse] {. + rest, endpoint: "/filter/v2/subscriptions/all", meth: HttpMethod.MethodDelete +.} -proc filterGetMessagesV1*(contentTopic: string): - RestResponse[FilterGetMessagesResponse] - {.rest, endpoint: "/filter/v2/messages/{contentTopic}", meth: HttpMethod.MethodGet.} +proc filterGetMessagesV1*( + contentTopic: string +): RestResponse[FilterGetMessagesResponse] {. + rest, endpoint: "/filter/v2/messages/{contentTopic}", meth: HttpMethod.MethodGet +.} diff --git a/waku/waku_api/rest/filter/handlers.nim b/waku/waku_api/rest/filter/handlers.nim index af2f77b25..3d5966b5d 100644 --- a/waku/waku_api/rest/filter/handlers.nim +++ b/waku/waku_api/rest/filter/handlers.nim @@ -40,126 +40,159 @@ const ROUTE_FILTER_SUBSCRIPTIONS* = "/filter/v2/subscriptions" const ROUTE_FILTER_ALL_SUBSCRIPTIONS* = "/filter/v2/subscriptions/all" -func decodeRequestBody[T](contentBody: Option[ContentBody]) : Result[T, RestApiResponse] = +func decodeRequestBody[T]( + contentBody: Option[ContentBody] +): Result[T, RestApiResponse] = if contentBody.isNone(): return err(RestApiResponse.badRequest("Missing content body")) let reqBodyContentType = MediaType.init($contentBody.get().contentType) if reqBodyContentType != MIMETYPE_JSON: - return err(RestApiResponse.badRequest("Wrong Content-Type, expected application/json")) + return + err(RestApiResponse.badRequest("Wrong Content-Type, expected application/json")) let reqBodyData = contentBody.get().data let requestResult = decodeFromJsonBytes(T, reqBodyData) if requestResult.isErr(): - return err(RestApiResponse.badRequest("Invalid content body, could not decode. " & - $requestResult.error)) + return err( + RestApiResponse.badRequest( + "Invalid content body, could not decode. " & $requestResult.error + ) + ) return ok(requestResult.get()) proc getErrorCause(err: filter_protocol_type.FilterSubscribeError): string = ## Retrieve proper error cause of FilterSubscribeError - due stringify make some parts of text double - case err.kind: + case err.kind of FilterSubscribeErrorKind.PEER_DIAL_FAILURE: err.address of FilterSubscribeErrorKind.BAD_RESPONSE, FilterSubscribeErrorKind.BAD_REQUEST, - FilterSubscribeErrorKind.NOT_FOUND, FilterSubscribeErrorKind.SERVICE_UNAVAILABLE: + FilterSubscribeErrorKind.NOT_FOUND, FilterSubscribeErrorKind.SERVICE_UNAVAILABLE: err.cause of FilterSubscribeErrorKind.UNKNOWN: "UNKNOWN" -proc convertResponse(T: type FilterSubscriptionResponse, requestId: string, protocolClientRes: filter_protocol_type.FilterSubscribeResult): T = +proc convertResponse( + T: type FilterSubscriptionResponse, + requestId: string, + protocolClientRes: filter_protocol_type.FilterSubscribeResult, +): T = ## Properly convert filter protocol's response to rest response if protocolClientRes.isErr(): return FilterSubscriptionResponse( - requestId: requestId, - statusCode: uint32(protocolClientRes.error().kind), - statusDesc: getErrorCause(protocolClientRes.error()) - ) + requestId: requestId, + statusCode: uint32(protocolClientRes.error().kind), + statusDesc: getErrorCause(protocolClientRes.error()), + ) else: - return FilterSubscriptionResponse( - requestId: requestId, - statusCode: 0, - statusDesc: "" - ) + return + FilterSubscriptionResponse(requestId: requestId, statusCode: 0, statusDesc: "") -proc convertResponse(T: type FilterSubscriptionResponse, requestId: string, protocolClientRes: filter_protocol_type.FilterSubscribeError): T = +proc convertResponse( + T: type FilterSubscriptionResponse, + requestId: string, + protocolClientRes: filter_protocol_type.FilterSubscribeError, +): T = ## Properly convert filter protocol's response to rest response in case of error return FilterSubscriptionResponse( - requestId: requestId, - statusCode: uint32(protocolClientRes.kind), - statusDesc: $protocolClientRes - ) + requestId: requestId, + statusCode: uint32(protocolClientRes.kind), + statusDesc: $protocolClientRes, + ) -proc convertErrorKindToHttpStatus(kind: filter_protocol_type.FilterSubscribeErrorKind): HttpCode = +proc convertErrorKindToHttpStatus( + kind: filter_protocol_type.FilterSubscribeErrorKind +): HttpCode = ## Filter protocol's error code is not directly convertible to HttpCodes hence this converter - case kind: - of filter_protocol_type.FilterSubscribeErrorKind.UNKNOWN: - return Http200 - of filter_protocol_type.FilterSubscribeErrorKind.PEER_DIAL_FAILURE: - return Http504 #gateway timout - of filter_protocol_type.FilterSubscribeErrorKind.BAD_RESPONSE: - return Http500 # internal server error - of filter_protocol_type.FilterSubscribeErrorKind.BAD_REQUEST: - return Http400 - of filter_protocol_type.FilterSubscribeErrorKind.NOT_FOUND: - return Http404 - of filter_protocol_type.FilterSubscribeErrorKind.SERVICE_UNAVAILABLE: - return Http503 - else: - return Http500 + case kind + of filter_protocol_type.FilterSubscribeErrorKind.UNKNOWN: + return Http200 + of filter_protocol_type.FilterSubscribeErrorKind.PEER_DIAL_FAILURE: + return Http504 #gateway timout + of filter_protocol_type.FilterSubscribeErrorKind.BAD_RESPONSE: + return Http500 # internal server error + of filter_protocol_type.FilterSubscribeErrorKind.BAD_REQUEST: + return Http400 + of filter_protocol_type.FilterSubscribeErrorKind.NOT_FOUND: + return Http404 + of filter_protocol_type.FilterSubscribeErrorKind.SERVICE_UNAVAILABLE: + return Http503 + else: + return Http500 -proc makeRestResponse(requestId: string, protocolClientRes: filter_protocol_type.FilterSubscribeResult): RestApiResponse = - let filterSubscriptionResponse = FilterSubscriptionResponse.convertResponse(requestId, protocolClientRes) +proc makeRestResponse( + requestId: string, protocolClientRes: filter_protocol_type.FilterSubscribeResult +): RestApiResponse = + let filterSubscriptionResponse = + FilterSubscriptionResponse.convertResponse(requestId, protocolClientRes) - var httpStatus : HttpCode = Http200 + var httpStatus: HttpCode = Http200 if protocolClientRes.isErr(): - httpStatus = convertErrorKindToHttpStatus(protocolClientRes.error().kind) # TODO: convert status codes! + httpStatus = convertErrorKindToHttpStatus(protocolClientRes.error().kind) + # TODO: convert status codes! - let resp = RestApiResponse.jsonResponse(filterSubscriptionResponse, status=httpStatus) + let resp = + RestApiResponse.jsonResponse(filterSubscriptionResponse, status = httpStatus) if resp.isErr(): - error "An error ocurred while building the json respose: ", error=resp.error - return RestApiResponse.internalServerError(fmt("An error ocurred while building the json respose: {resp.error}")) + error "An error ocurred while building the json respose: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error ocurred while building the json respose: {resp.error}") + ) return resp.get() -proc makeRestResponse(requestId: string, protocolClientRes: filter_protocol_type.FilterSubscribeError): RestApiResponse = - let filterSubscriptionResponse = FilterSubscriptionResponse.convertResponse(requestId, protocolClientRes) +proc makeRestResponse( + requestId: string, protocolClientRes: filter_protocol_type.FilterSubscribeError +): RestApiResponse = + let filterSubscriptionResponse = + FilterSubscriptionResponse.convertResponse(requestId, protocolClientRes) - let httpStatus = convertErrorKindToHttpStatus(protocolClientRes.kind) # TODO: convert status codes! + let httpStatus = convertErrorKindToHttpStatus(protocolClientRes.kind) + # TODO: convert status codes! - let resp = RestApiResponse.jsonResponse(filterSubscriptionResponse, status=httpStatus) + let resp = + RestApiResponse.jsonResponse(filterSubscriptionResponse, status = httpStatus) if resp.isErr(): - error "An error ocurred while building the json respose: ", error=resp.error - return RestApiResponse.internalServerError(fmt("An error ocurred while building the json respose: {resp.error}")) + error "An error ocurred while building the json respose: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error ocurred while building the json respose: {resp.error}") + ) return resp.get() const NoPeerNoDiscoError = FilterSubscribeError.serviceUnavailable( - "No suitable service peer & no discovery method") + "No suitable service peer & no discovery method" +) -const NoPeerNoneFoundError = FilterSubscribeError.serviceUnavailable( - "No suitable service peer & none discovered") +const NoPeerNoneFoundError = + FilterSubscribeError.serviceUnavailable("No suitable service peer & none discovered") proc filterPostPutSubscriptionRequestHandler( - node: WakuNode, - contentBody: Option[ContentBody], - cache: MessageCache, - discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), - ): Future[RestApiResponse] {.async.} = + node: WakuNode, + contentBody: Option[ContentBody], + cache: MessageCache, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +): Future[RestApiResponse] {.async.} = ## handles any filter subscription requests, adds or modifies. let decodedBody = decodeRequestBody[FilterSubscribeRequest](contentBody) if decodedBody.isErr(): - return makeRestResponse("unknown", FilterSubscribeError.badRequest(fmt("Failed to decode request: {decodedBody.error}"))) + return makeRestResponse( + "unknown", + FilterSubscribeError.badRequest( + fmt("Failed to decode request: {decodedBody.error}") + ), + ) let req: FilterSubscribeRequest = decodedBody.value() @@ -177,7 +210,10 @@ proc filterPostPutSubscriptionRequestHandler( if not await subFut.withTimeout(futTimeoutForSubscriptionProcessing): error "Failed to subscribe to contentFilters do to timeout!" - return makeRestResponse(req.requestId, FilterSubscribeError.serviceUnavailable("Subscription request timed out")) + return makeRestResponse( + req.requestId, + FilterSubscribeError.serviceUnavailable("Subscription request timed out"), + ) # Successfully subscribed to all content filters for cTopic in req.contentFilters: @@ -186,44 +222,58 @@ proc filterPostPutSubscriptionRequestHandler( return makeRestResponse(req.requestId, subFut.read()) proc installFilterPostSubscriptionsHandler( - router: var RestRouter, - node: WakuNode, - cache: MessageCache, - discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), - ) = - router.api(MethodPost, ROUTE_FILTER_SUBSCRIPTIONS) do (contentBody: Option[ContentBody]) -> RestApiResponse: + router: var RestRouter, + node: WakuNode, + cache: MessageCache, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodPost, ROUTE_FILTER_SUBSCRIPTIONS) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: ## Subscribes a node to a list of contentTopics of a pubsubTopic debug "post", ROUTE_FILTER_SUBSCRIPTIONS, contentBody - return await filterPostPutSubscriptionRequestHandler(node, contentBody, cache, discHandler) + return await filterPostPutSubscriptionRequestHandler( + node, contentBody, cache, discHandler + ) proc installFilterPutSubscriptionsHandler( - router: var RestRouter, - node: WakuNode, - cache: MessageCache, - discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), - ) = - router.api(MethodPut, ROUTE_FILTER_SUBSCRIPTIONS) do (contentBody: Option[ContentBody]) -> RestApiResponse: + router: var RestRouter, + node: WakuNode, + cache: MessageCache, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodPut, ROUTE_FILTER_SUBSCRIPTIONS) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: ## Modifies a subscribtion of a node to a list of contentTopics of a pubsubTopic debug "put", ROUTE_FILTER_SUBSCRIPTIONS, contentBody - return await filterPostPutSubscriptionRequestHandler(node, contentBody, cache, discHandler) + return await filterPostPutSubscriptionRequestHandler( + node, contentBody, cache, discHandler + ) proc installFilterDeleteSubscriptionsHandler( - router: var RestRouter, - node: WakuNode, - cache: MessageCache, - discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), - ) = - router.api(MethodDelete, ROUTE_FILTER_SUBSCRIPTIONS) do (contentBody: Option[ContentBody]) -> RestApiResponse: + router: var RestRouter, + node: WakuNode, + cache: MessageCache, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodDelete, ROUTE_FILTER_SUBSCRIPTIONS) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: ## Subscribes a node to a list of contentTopics of a PubSub topic debug "delete", ROUTE_FILTER_SUBSCRIPTIONS, contentBody let decodedBody = decodeRequestBody[FilterUnsubscribeRequest](contentBody) if decodedBody.isErr(): - return makeRestResponse("unknown", - FilterSubscribeError.badRequest(fmt("Failed to decode request: {decodedBody.error}"))) + return makeRestResponse( + "unknown", + FilterSubscribeError.badRequest( + fmt("Failed to decode request: {decodedBody.error}") + ), + ) let req: FilterUnsubscribeRequest = decodedBody.value() @@ -241,9 +291,12 @@ proc installFilterDeleteSubscriptionsHandler( if not await unsubFut.withTimeout(futTimeoutForSubscriptionProcessing): error "Failed to unsubscribe from contentFilters due to timeout!" - return makeRestResponse(req.requestId, - FilterSubscribeError.serviceUnavailable( - "Failed to unsubscribe from contentFilters due to timeout!")) + return makeRestResponse( + req.requestId, + FilterSubscribeError.serviceUnavailable( + "Failed to unsubscribe from contentFilters due to timeout!" + ), + ) # Successfully subscribed to all content filters for cTopic in req.contentFilters: @@ -253,20 +306,26 @@ proc installFilterDeleteSubscriptionsHandler( return makeRestResponse(req.requestId, unsubFut.read()) proc installFilterDeleteAllSubscriptionsHandler( - router: var RestRouter, - node: WakuNode, - cache: MessageCache, - discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), - ) = - router.api(MethodDelete, ROUTE_FILTER_ALL_SUBSCRIPTIONS) do (contentBody: Option[ContentBody]) -> RestApiResponse: + router: var RestRouter, + node: WakuNode, + cache: MessageCache, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodDelete, ROUTE_FILTER_ALL_SUBSCRIPTIONS) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: ## Subscribes a node to a list of contentTopics of a PubSub topic debug "delete", ROUTE_FILTER_ALL_SUBSCRIPTIONS, contentBody let decodedBody = decodeRequestBody[FilterUnsubscribeAllRequest](contentBody) if decodedBody.isErr(): - return makeRestResponse("unknown", - FilterSubscribeError.badRequest(fmt("Failed to decode request: {decodedBody.error}"))) + return makeRestResponse( + "unknown", + FilterSubscribeError.badRequest( + fmt("Failed to decode request: {decodedBody.error}") + ), + ) let req: FilterUnsubscribeAllRequest = decodedBody.value() @@ -284,9 +343,12 @@ proc installFilterDeleteAllSubscriptionsHandler( if not await unsubFut.withTimeout(futTimeoutForSubscriptionProcessing): error "Failed to unsubscribe from contentFilters due to timeout!" - return makeRestResponse(req.requestId, - FilterSubscribeError.serviceUnavailable( - "Failed to unsubscribe from all contentFilters due to timeout!")) + return makeRestResponse( + req.requestId, + FilterSubscribeError.serviceUnavailable( + "Failed to unsubscribe from all contentFilters due to timeout!" + ), + ) cache.reset() @@ -296,11 +358,13 @@ proc installFilterDeleteAllSubscriptionsHandler( const ROUTE_FILTER_SUBSCRIBER_PING* = "/filter/v2/subscriptions/{requestId}" proc installFilterPingSubscriberHandler( - router: var RestRouter, - node: WakuNode, - discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), - ) = - router.api(MethodGet, ROUTE_FILTER_SUBSCRIBER_PING) do (requestId: string) -> RestApiResponse: + router: var RestRouter, + node: WakuNode, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodGet, ROUTE_FILTER_SUBSCRIBER_PING) do( + requestId: string + ) -> RestApiResponse: ## Checks if a node has valid subscription or not. debug "get", ROUTE_FILTER_SUBSCRIBER_PING, requestId @@ -318,54 +382,57 @@ proc installFilterPingSubscriberHandler( if not await pingFutRes.withTimeout(futTimeoutForSubscriptionProcessing): error "Failed to ping filter service peer due to timeout!" - return makeRestResponse(requestId.get(), - FilterSubscribeError.serviceUnavailable("Ping timed out")) + return makeRestResponse( + requestId.get(), FilterSubscribeError.serviceUnavailable("Ping timed out") + ) return makeRestResponse(requestId.get(), pingFutRes.read()) const ROUTE_FILTER_MESSAGES* = "/filter/v2/messages/{contentTopic}" -proc installFilterGetMessagesHandler(router: var RestRouter, - node: WakuNode, - cache: MessageCache) = - - - let pushHandler : FilterPushHandler = proc (pubsubTopic: PubsubTopic, - msg: WakuMessage) - {.async, gcsafe, closure.} = +proc installFilterGetMessagesHandler( + router: var RestRouter, node: WakuNode, cache: MessageCache +) = + let pushHandler: FilterPushHandler = proc( + pubsubTopic: PubsubTopic, msg: WakuMessage + ) {.async, gcsafe, closure.} = cache.addMessage(pubsubTopic, msg) node.wakuFilterClient.registerPushHandler(pushHandler) - router.api(MethodGet, ROUTE_FILTER_MESSAGES) do (contentTopic: string) -> RestApiResponse: + router.api(MethodGet, ROUTE_FILTER_MESSAGES) do( + contentTopic: string + ) -> RestApiResponse: ## Returns all WakuMessages received on a specified content topic since the ## last time this method was called ## TODO: ability to specify a return message limit, maybe use cursor to control paging response. - debug "get", ROUTE_FILTER_MESSAGES, contentTopic=contentTopic + debug "get", ROUTE_FILTER_MESSAGES, contentTopic = contentTopic if contentTopic.isErr(): return RestApiResponse.badRequest("Missing contentTopic") let contentTopic = contentTopic.get() - let msgRes = cache.getAutoMessages(contentTopic, clear=true) + let msgRes = cache.getAutoMessages(contentTopic, clear = true) if msgRes.isErr(): return RestApiResponse.badRequest("Not subscribed to topic: " & contentTopic) let data = FilterGetMessagesResponse(msgRes.get().map(toFilterWakuMessage)) - let resp = RestApiResponse.jsonResponse(data, status=Http200) + let resp = RestApiResponse.jsonResponse(data, status = Http200) if resp.isErr(): - error "An error ocurred while building the json respose: ", error=resp.error - return RestApiResponse.internalServerError("An error ocurred while building the json respose") + error "An error ocurred while building the json respose: ", error = resp.error + return RestApiResponse.internalServerError( + "An error ocurred while building the json respose" + ) return resp.get() proc installFilterRestApiHandlers*( - router: var RestRouter, - node: WakuNode, - cache: MessageCache, - discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), - ) = + router: var RestRouter, + node: WakuNode, + cache: MessageCache, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = installFilterPingSubscriberHandler(router, node, discHandler) installFilterPostSubscriptionsHandler(router, node, cache, discHandler) installFilterPutSubscriptionsHandler(router, node, cache, discHandler) diff --git a/waku/waku_api/rest/filter/legacy_client.nim b/waku/waku_api/rest/filter/legacy_client.nim index b8d7168e4..41fb55f55 100644 --- a/waku/waku_api/rest/filter/legacy_client.nim +++ b/waku/waku_api/rest/filter/legacy_client.nim @@ -10,33 +10,35 @@ import json_serialization, json_serialization/std/options, presto/[route, client, common] -import - ../../../waku_core, - ../serdes, - ../responses, - ../rest_serdes, - ./types +import ../../../waku_core, ../serdes, ../responses, ../rest_serdes, ./types export types logScope: topics = "waku node rest client v1" -proc encodeBytes*(value: FilterLegacySubscribeRequest, - contentType: string): RestResult[seq[byte]] = +proc encodeBytes*( + value: FilterLegacySubscribeRequest, contentType: string +): RestResult[seq[byte]] = return encodeBytesOf(value, contentType) # TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) -proc filterPostSubscriptionsV1*(body: FilterLegacySubscribeRequest): - RestResponse[string] - {.rest, endpoint: "/filter/v1/subscriptions", meth: HttpMethod.MethodPost.} +proc filterPostSubscriptionsV1*( + body: FilterLegacySubscribeRequest +): RestResponse[string] {. + rest, endpoint: "/filter/v1/subscriptions", meth: HttpMethod.MethodPost +.} # TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) -proc filterDeleteSubscriptionsV1*(body: FilterLegacySubscribeRequest): - RestResponse[string] - {.rest, endpoint: "/filter/v1/subscriptions", meth: HttpMethod.MethodDelete.} +proc filterDeleteSubscriptionsV1*( + body: FilterLegacySubscribeRequest +): RestResponse[string] {. + rest, endpoint: "/filter/v1/subscriptions", meth: HttpMethod.MethodDelete +.} # TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) -proc filterGetMessagesV1*(contentTopic: string): - RestResponse[FilterGetMessagesResponse] - {.rest, endpoint: "/filter/v1/messages/{contentTopic}", meth: HttpMethod.MethodGet.} +proc filterGetMessagesV1*( + contentTopic: string +): RestResponse[FilterGetMessagesResponse] {. + rest, endpoint: "/filter/v1/messages/{contentTopic}", meth: HttpMethod.MethodGet +.} diff --git a/waku/waku_api/rest/filter/legacy_handlers.nim b/waku/waku_api/rest/filter/legacy_handlers.nim index c1c2ae91c..f926e7027 100644 --- a/waku/waku_api/rest/filter/legacy_handlers.nim +++ b/waku/waku_api/rest/filter/legacy_handlers.nim @@ -34,32 +34,40 @@ const futTimeoutForSubscriptionProcessing* = 5.seconds const ROUTE_FILTER_SUBSCRIPTIONSV1* = "/filter/v1/subscriptions" -func decodeRequestBody[T](contentBody: Option[ContentBody]) : Result[T, RestApiResponse] = +func decodeRequestBody[T]( + contentBody: Option[ContentBody] +): Result[T, RestApiResponse] = if contentBody.isNone(): return err(RestApiResponse.badRequest("Missing content body")) let reqBodyContentType = MediaType.init($contentBody.get().contentType) if reqBodyContentType != MIMETYPE_JSON: - return err(RestApiResponse.badRequest("Wrong Content-Type, expected application/json")) + return + err(RestApiResponse.badRequest("Wrong Content-Type, expected application/json")) let reqBodyData = contentBody.get().data let requestResult = decodeFromJsonBytes(T, reqBodyData) if requestResult.isErr(): - return err(RestApiResponse.badRequest("Invalid content body, could not decode. " & - $requestResult.error)) + return err( + RestApiResponse.badRequest( + "Invalid content body, could not decode. " & $requestResult.error + ) + ) return ok(requestResult.get()) -proc installFilterV1PostSubscriptionsV1Handler*(router: var RestRouter, - node: WakuNode, - cache: MessageCache) = - let pushHandler: FilterPushHandler = - proc(pubsubTopic: PubsubTopic, - msg: WakuMessage) {.async, gcsafe, closure.} = - cache.addMessage(pubsubTopic, msg) +proc installFilterV1PostSubscriptionsV1Handler*( + router: var RestRouter, node: WakuNode, cache: MessageCache +) = + let pushHandler: FilterPushHandler = proc( + pubsubTopic: PubsubTopic, msg: WakuMessage + ) {.async, gcsafe, closure.} = + cache.addMessage(pubsubTopic, msg) - router.api(MethodPost, ROUTE_FILTER_SUBSCRIPTIONSV1) do (contentBody: Option[ContentBody]) -> RestApiResponse: + router.api(MethodPost, ROUTE_FILTER_SUBSCRIPTIONSV1) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: ## Subscribes a node to a list of contentTopics of a pubsubTopic debug "post", ROUTE_FILTER_SUBSCRIPTIONSV1, contentBody @@ -75,14 +83,14 @@ proc installFilterV1PostSubscriptionsV1Handler*(router: var RestRouter, if peerOpt.isNone(): return RestApiResponse.internalServerError("No suitable remote filter peers") - let subFut = node.legacyFilterSubscribe(req.pubsubTopic, - req.contentFilters, - pushHandler, - peerOpt.get()) + let subFut = node.legacyFilterSubscribe( + req.pubsubTopic, req.contentFilters, pushHandler, peerOpt.get() + ) if not await subFut.withTimeout(futTimeoutForSubscriptionProcessing): error "Failed to subscribe to contentFilters do to timeout!" - return RestApiResponse.internalServerError("Failed to subscribe to contentFilters") + return + RestApiResponse.internalServerError("Failed to subscribe to contentFilters") # Successfully subscribed to all content filters for cTopic in req.contentFilters: @@ -90,10 +98,12 @@ proc installFilterV1PostSubscriptionsV1Handler*(router: var RestRouter, return RestApiResponse.ok() -proc installFilterV1DeleteSubscriptionsV1Handler*(router: var RestRouter, - node: WakuNode, - cache: MessageCache) = - router.api(MethodDelete, ROUTE_FILTER_SUBSCRIPTIONSV1) do (contentBody: Option[ContentBody]) -> RestApiResponse: +proc installFilterV1DeleteSubscriptionsV1Handler*( + router: var RestRouter, node: WakuNode, cache: MessageCache +) = + router.api(MethodDelete, ROUTE_FILTER_SUBSCRIPTIONSV1) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: ## Subscribes a node to a list of contentTopics of a PubSub topic debug "delete", ROUTE_FILTER_SUBSCRIPTIONSV1, contentBody @@ -109,10 +119,12 @@ proc installFilterV1DeleteSubscriptionsV1Handler*(router: var RestRouter, if peerOpt.isNone(): return RestApiResponse.internalServerError("No suitable remote filter peers") - let unsubFut = node.legacyFilterUnsubscribe(req.pubsubTopic, req.contentFilters, peerOpt.get()) + let unsubFut = + node.legacyFilterUnsubscribe(req.pubsubTopic, req.contentFilters, peerOpt.get()) if not await unsubFut.withTimeout(futTimeoutForSubscriptionProcessing): error "Failed to unsubscribe from contentFilters due to timeout!" - return RestApiResponse.internalServerError("Failed to unsubscribe from contentFilters") + return + RestApiResponse.internalServerError("Failed to unsubscribe from contentFilters") for cTopic in req.contentFilters: cache.contentUnsubscribe(cTopic) @@ -122,35 +134,39 @@ proc installFilterV1DeleteSubscriptionsV1Handler*(router: var RestRouter, const ROUTE_FILTER_MESSAGESV1* = "/filter/v1/messages/{contentTopic}" -proc installFilterV1GetMessagesV1Handler*(router: var RestRouter, - node: WakuNode, - cache: MessageCache) = - router.api(MethodGet, ROUTE_FILTER_MESSAGESV1) do (contentTopic: string) -> RestApiResponse: +proc installFilterV1GetMessagesV1Handler*( + router: var RestRouter, node: WakuNode, cache: MessageCache +) = + router.api(MethodGet, ROUTE_FILTER_MESSAGESV1) do( + contentTopic: string + ) -> RestApiResponse: ## Returns all WakuMessages received on a specified content topic since the ## last time this method was called ## TODO: ability to specify a return message limit - debug "get", ROUTE_FILTER_MESSAGESV1, contentTopic=contentTopic + debug "get", ROUTE_FILTER_MESSAGESV1, contentTopic = contentTopic if contentTopic.isErr(): return RestApiResponse.badRequest("Missing contentTopic") let contentTopic = contentTopic.get() - let msgRes = cache.getAutoMessages(contentTopic, clear=true) + let msgRes = cache.getAutoMessages(contentTopic, clear = true) if msgRes.isErr(): return RestApiResponse.badRequest("Not subscribed to topic: " & contentTopic) let data = FilterGetMessagesResponse(msgRes.get().map(toFilterWakuMessage)) - let resp = RestApiResponse.jsonResponse(data, status=Http200) + let resp = RestApiResponse.jsonResponse(data, status = Http200) if resp.isErr(): - error "An error ocurred while building the json respose: ", error=resp.error - return RestApiResponse.internalServerError("An error ocurred while building the json respose") + error "An error ocurred while building the json respose: ", error = resp.error + return RestApiResponse.internalServerError( + "An error ocurred while building the json respose" + ) return resp.get() -proc installLegacyFilterRestApiHandlers*(router: var RestRouter, - node: WakuNode, - cache: MessageCache) = +proc installLegacyFilterRestApiHandlers*( + router: var RestRouter, node: WakuNode, cache: MessageCache +) = installFilterV1PostSubscriptionsV1Handler(router, node, cache) installFilterV1DeleteSubscriptionsV1Handler(router, node, cache) installFilterV1GetMessagesV1Handler(router, node, cache) diff --git a/waku/waku_api/rest/filter/types.nim b/waku/waku_api/rest/filter/types.nim index e106ee059..e37b5c4a8 100644 --- a/waku/waku_api/rest/filter/types.nim +++ b/waku/waku_api/rest/filter/types.nim @@ -10,48 +10,45 @@ import json_serialization/std/options, presto/[route, client, common], libp2p/peerid -import - ../../../common/base64, - ../../../waku_core, - ../serdes +import ../../../common/base64, ../../../waku_core, ../serdes #### Types type FilterWakuMessage* = object - payload*: Base64String - contentTopic*: Option[ContentTopic] - version*: Option[Natural] - timestamp*: Option[int64] - meta*: Option[Base64String] - ephemeral*: Option[bool] + payload*: Base64String + contentTopic*: Option[ContentTopic] + version*: Option[Natural] + timestamp*: Option[int64] + meta*: Option[Base64String] + ephemeral*: Option[bool] type FilterGetMessagesResponse* = seq[FilterWakuMessage] type FilterLegacySubscribeRequest* = object - # Subscription request for legacy filter support - pubsubTopic*: Option[PubSubTopic] - contentFilters*: seq[ContentTopic] + # Subscription request for legacy filter support + pubsubTopic*: Option[PubSubTopic] + contentFilters*: seq[ContentTopic] type FilterSubscriberPing* = object - requestId*: string + requestId*: string type FilterSubscribeRequest* = object - requestId*: string - pubsubTopic*: Option[PubSubTopic] - contentFilters*: seq[ContentTopic] + requestId*: string + pubsubTopic*: Option[PubSubTopic] + contentFilters*: seq[ContentTopic] type FilterUnsubscribeRequest* = object - requestId*: string - pubsubTopic*: Option[PubSubTopic] - contentFilters*: seq[ContentTopic] + requestId*: string + pubsubTopic*: Option[PubSubTopic] + contentFilters*: seq[ContentTopic] type FilterUnsubscribeAllRequest* = object - requestId*: string + requestId*: string type FilterSubscriptionResponse* = object - requestId*: string - statusCode*: uint32 - statusDesc*: string + requestId*: string + statusCode*: uint32 + statusDesc*: string #### Type conversion @@ -61,8 +58,13 @@ proc toFilterWakuMessage*(msg: WakuMessage): FilterWakuMessage = contentTopic: some(msg.contentTopic), version: some(Natural(msg.version)), timestamp: some(msg.timestamp), - meta: if msg.meta.len > 0: some(base64.encode(msg.meta)) else: none(Base64String), - ephemeral: some(msg.ephemeral) + meta: + if msg.meta.len > 0: + some(base64.encode(msg.meta)) + else: + none(Base64String) + , + ephemeral: some(msg.ephemeral), ) proc toWakuMessage*(msg: FilterWakuMessage, version = 0): Result[WakuMessage, string] = @@ -74,13 +76,22 @@ proc toWakuMessage*(msg: FilterWakuMessage, version = 0): Result[WakuMessage, st meta = ?msg.meta.get(Base64String("")).decode() ephemeral = msg.ephemeral.get(false) - ok(WakuMessage(payload: payload, contentTopic: contentTopic, version: version, - timestamp: timestamp, meta: meta, ephemeral: ephemeral)) + ok( + WakuMessage( + payload: payload, + contentTopic: contentTopic, + version: version, + timestamp: timestamp, + meta: meta, + ephemeral: ephemeral, + ) + ) #### Serialization and deserialization -proc writeValue*(writer: var JsonWriter[RestJson], value: FilterWakuMessage) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterWakuMessage +) {.raises: [IOError].} = writer.beginRecord() writer.writeField("payload", value.payload) if value.contentTopic.isSome(): @@ -95,23 +106,26 @@ proc writeValue*(writer: var JsonWriter[RestJson], value: FilterWakuMessage) writer.writeField("ephemeral", value.ephemeral.get()) writer.endRecord() -proc writeValue*(writer: var JsonWriter, value: FilterLegacySubscribeRequest) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter, value: FilterLegacySubscribeRequest +) {.raises: [IOError].} = writer.beginRecord() writer.writeField("pubsubTopic", value.pubsubTopic) writer.writeField("contentFilters", value.contentFilters) writer.endRecord() -proc writeValue*(writer: var JsonWriter[RestJson], value: FilterSubscriptionResponse) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterSubscriptionResponse +) {.raises: [IOError].} = writer.beginRecord() writer.writeField("requestId", value.requestId) writer.writeField("statusCode", value.statusCode) writer.writeField("statusDesc", value.statusDesc) writer.endRecord() -proc writeValue*(writer: var JsonWriter[RestJson], value: FilterSubscribeRequest) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterSubscribeRequest +) {.raises: [IOError].} = writer.beginRecord() writer.writeField("requestId", value.requestId) if value.pubsubTopic.isSome(): @@ -119,14 +133,16 @@ proc writeValue*(writer: var JsonWriter[RestJson], value: FilterSubscribeRequest writer.writeField("contentFilters", value.contentFilters) writer.endRecord() -proc writeValue*(writer: var JsonWriter[RestJson], value: FilterSubscriberPing) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterSubscriberPing +) {.raises: [IOError].} = writer.beginRecord() writer.writeField("requestId", value.requestId) writer.endRecord() -proc writeValue*(writer: var JsonWriter[RestJson], value: FilterUnsubscribeRequest) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterUnsubscribeRequest +) {.raises: [IOError].} = writer.beginRecord() writer.writeField("requestId", value.requestId) if value.pubsubTopic.isSome(): @@ -134,14 +150,16 @@ proc writeValue*(writer: var JsonWriter[RestJson], value: FilterUnsubscribeReque writer.writeField("contentFilters", value.contentFilters) writer.endRecord() -proc writeValue*(writer: var JsonWriter[RestJson], value: FilterUnsubscribeAllRequest) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterUnsubscribeAllRequest +) {.raises: [IOError].} = writer.beginRecord() writer.writeField("requestId", value.requestId) writer.endRecord() -proc readValue*(reader: var JsonReader[RestJson], value: var FilterWakuMessage) - {.raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterWakuMessage +) {.raises: [SerializationError, IOError].} = var payload = none(Base64String) contentTopic = none(ContentTopic) @@ -154,8 +172,11 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterWakuMessage) for fieldName in readObjectFields(reader): # Check for reapeated keys if keys.containsOrIncl(fieldName): - let err = try: fmt"Multiple `{fieldName}` fields found" - except CatchableError: "Multiple fields with the same name found" + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" reader.raiseUnexpectedField(err, "FilterWakuMessage") case fieldName @@ -186,8 +207,9 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterWakuMessage) ephemeral: ephemeral, ) -proc readValue*(reader: var JsonReader[RestJson], value: var FilterLegacySubscribeRequest) - {.raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterLegacySubscribeRequest +) {.raises: [SerializationError, IOError].} = var pubsubTopic = none(PubsubTopic) contentFilters = none(seq[ContentTopic]) @@ -196,8 +218,11 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterLegacySubscri for fieldName in readObjectFields(reader): # Check for reapeated keys if keys.containsOrIncl(fieldName): - let err = try: fmt"Multiple `{fieldName}` fields found" - except CatchableError: "Multiple fields with the same name found" + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" reader.raiseUnexpectedField(err, "FilterLegacySubscribeRequest") case fieldName @@ -215,21 +240,29 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterLegacySubscri reader.raiseUnexpectedValue("Field `contentFilters` is empty") value = FilterLegacySubscribeRequest( - pubsubTopic: if pubsubTopic.isNone() or pubsubTopic.get() == "": none(string) else: some(pubsubTopic.get()), - contentFilters: contentFilters.get() + pubsubTopic: + if pubsubTopic.isNone() or pubsubTopic.get() == "": + none(string) + else: + some(pubsubTopic.get()) + , + contentFilters: contentFilters.get(), ) -proc readValue*(reader: var JsonReader[RestJson], value: var FilterSubscriberPing) - {.raises: [SerializationError, IOError].} = - var - requestId = none(string) +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterSubscriberPing +) {.raises: [SerializationError, IOError].} = + var requestId = none(string) var keys = initHashSet[string]() for fieldName in readObjectFields(reader): # Check for reapeated keys if keys.containsOrIncl(fieldName): - let err = try: fmt"Multiple `{fieldName}` fields found" - except CatchableError: "Multiple fields with the same name found" + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" reader.raiseUnexpectedField(err, "FilterSubscriberPing") case fieldName @@ -241,12 +274,11 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterSubscriberPin if requestId.isNone(): reader.raiseUnexpectedValue("Field `requestId` is missing") - value = FilterSubscriberPing( - requestId: requestId.get() - ) + value = FilterSubscriberPing(requestId: requestId.get()) -proc readValue*(reader: var JsonReader[RestJson], value: var FilterSubscribeRequest) - {.raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterSubscribeRequest +) {.raises: [SerializationError, IOError].} = var requestId = none(string) pubsubTopic = none(PubsubTopic) @@ -256,8 +288,11 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterSubscribeRequ for fieldName in readObjectFields(reader): # Check for reapeated keys if keys.containsOrIncl(fieldName): - let err = try: fmt"Multiple `{fieldName}` fields found" - except CatchableError: "Multiple fields with the same name found" + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" reader.raiseUnexpectedField(err, "FilterSubscribeRequest") case fieldName @@ -281,12 +316,18 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterSubscribeRequ value = FilterSubscribeRequest( requestId: requestId.get(), - pubsubTopic: if pubsubTopic.isNone() or pubsubTopic.get() == "": none(string) else: some(pubsubTopic.get()), - contentFilters: contentFilters.get() + pubsubTopic: + if pubsubTopic.isNone() or pubsubTopic.get() == "": + none(string) + else: + some(pubsubTopic.get()) + , + contentFilters: contentFilters.get(), ) -proc readValue*(reader: var JsonReader[RestJson], value: var FilterUnsubscribeRequest) - {.raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterUnsubscribeRequest +) {.raises: [SerializationError, IOError].} = var requestId = none(string) pubsubTopic = none(PubsubTopic) @@ -296,8 +337,11 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterUnsubscribeRe for fieldName in readObjectFields(reader): # Check for reapeated keys if keys.containsOrIncl(fieldName): - let err = try: fmt"Multiple `{fieldName}` fields found" - except CatchableError: "Multiple fields with the same name found" + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" reader.raiseUnexpectedField(err, "FilterUnsubscribeRequest") case fieldName @@ -321,21 +365,29 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterUnsubscribeRe value = FilterUnsubscribeRequest( requestId: requestId.get(), - pubsubTopic: if pubsubTopic.isNone() or pubsubTopic.get() == "": none(string) else: some(pubsubTopic.get()), - contentFilters: contentFilters.get() + pubsubTopic: + if pubsubTopic.isNone() or pubsubTopic.get() == "": + none(string) + else: + some(pubsubTopic.get()) + , + contentFilters: contentFilters.get(), ) -proc readValue*(reader: var JsonReader[RestJson], value: var FilterUnsubscribeAllRequest) - {.raises: [SerializationError, IOError].} = - var - requestId = none(string) +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterUnsubscribeAllRequest +) {.raises: [SerializationError, IOError].} = + var requestId = none(string) var keys = initHashSet[string]() for fieldName in readObjectFields(reader): # Check for reapeated keys if keys.containsOrIncl(fieldName): - let err = try: fmt"Multiple `{fieldName}` fields found" - except CatchableError: "Multiple fields with the same name found" + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" reader.raiseUnexpectedField(err, "FilterUnsubscribeAllRequest") case fieldName @@ -347,12 +399,11 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterUnsubscribeAl if requestId.isNone(): reader.raiseUnexpectedValue("Field `requestId` is missing") - value = FilterUnsubscribeAllRequest( - requestId: requestId.get(), - ) + value = FilterUnsubscribeAllRequest(requestId: requestId.get()) -proc readValue*(reader: var JsonReader[RestJson], value: var FilterSubscriptionResponse) - {.raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterSubscriptionResponse +) {.raises: [SerializationError, IOError].} = var requestId = none(string) statusCode = none(uint32) @@ -362,8 +413,11 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterSubscriptionR for fieldName in readObjectFields(reader): # Check for reapeated keys if keys.containsOrIncl(fieldName): - let err = try: fmt"Multiple `{fieldName}` fields found" - except CatchableError: "Multiple fields with the same name found" + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" reader.raiseUnexpectedField(err, "FilterSubscriptionResponse") case fieldName @@ -382,5 +436,5 @@ proc readValue*(reader: var JsonReader[RestJson], value: var FilterSubscriptionR value = FilterSubscriptionResponse( requestId: requestId.get(), statusCode: statusCode.get(), - statusDesc: statusDesc.get("") + statusDesc: statusDesc.get(""), ) diff --git a/waku/waku_api/rest/health/client.nim b/waku/waku_api/rest/health/client.nim index 309af9803..fcae0fb6e 100644 --- a/waku/waku_api/rest/health/client.nim +++ b/waku/waku_api/rest/health/client.nim @@ -4,16 +4,12 @@ else: {.push raises: [].} import - chronicles, - json_serialization, - json_serialization/std/options, - presto/[route, client] -import - ../serdes, - ../responses, - ../rest_serdes + chronicles, json_serialization, json_serialization/std/options, presto/[route, client] +import ../serdes, ../responses, ../rest_serdes logScope: topics = "waku node rest health_api" -proc healthCheck*(): RestResponse[string] {.rest, endpoint: "/health", meth: HttpMethod.MethodGet.} +proc healthCheck*(): RestResponse[string] {. + rest, endpoint: "/health", meth: HttpMethod.MethodGet +.} diff --git a/waku/waku_api/rest/health/handlers.nim b/waku/waku_api/rest/health/handlers.nim index c8c5b1ee7..ea5d658b6 100644 --- a/waku/waku_api/rest/health/handlers.nim +++ b/waku/waku_api/rest/health/handlers.nim @@ -3,14 +3,8 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - chronicles, - json_serialization, - presto/route -import - ../../../waku_node, - ../responses, - ../serdes +import chronicles, json_serialization, presto/route +import ../../../waku_node, ../responses, ../serdes logScope: topics = "waku node rest health_api" @@ -25,12 +19,11 @@ proc installHealthApiHandler*(router: var RestRouter, node: WakuNode) = ## TODO: Leter to extend it to a broader information about each subsystem state ## report. Rest response to change to JSON structure that can hold exact detailed ## information. - - router.api(MethodGet, ROUTE_HEALTH) do () -> RestApiResponse: + router.api(MethodGet, ROUTE_HEALTH) do() -> RestApiResponse: let isReadyStateFut = node.isReady() if not await isReadyStateFut.withTimeout(FutIsReadyTimout): - return RestApiResponse.internalServerError("Health check timed out") + return RestApiResponse.internalServerError("Health check timed out") var msg = "Node is healthy" var status = Http200 diff --git a/waku/waku_api/rest/lightpush/client.nim b/waku/waku_api/rest/lightpush/client.nim index 9a49fbd04..4c18175a4 100644 --- a/waku/waku_api/rest/lightpush/client.nim +++ b/waku/waku_api/rest/lightpush/client.nim @@ -12,23 +12,18 @@ import json_serialization, json_serialization/std/options, presto/[route, client, common] -import - ../../../waku_core, - ../serdes, - ../responses, - ../rest_serdes, - ./types +import ../../../waku_core, ../serdes, ../responses, ../rest_serdes, ./types export types logScope: topics = "waku node rest client v2" -proc encodeBytes*(value: PushRequest, - contentType: string): RestResult[seq[byte]] = +proc encodeBytes*(value: PushRequest, contentType: string): RestResult[seq[byte]] = return encodeBytesOf(value, contentType) - -proc sendPushRequest*(body: PushRequest): - RestResponse[string] - {.rest, endpoint: "/lightpush/v1/message", meth: HttpMethod.MethodPost.} +proc sendPushRequest*( + body: PushRequest +): RestResponse[string] {. + rest, endpoint: "/lightpush/v1/message", meth: HttpMethod.MethodPost +.} diff --git a/waku/waku_api/rest/lightpush/handlers.nim b/waku/waku_api/rest/lightpush/handlers.nim index 20a6a1b1a..f34195077 100644 --- a/waku/waku_api/rest/lightpush/handlers.nim +++ b/waku/waku_api/rest/lightpush/handlers.nim @@ -29,23 +29,24 @@ logScope: const futTimeoutForPushRequestProcessing* = 5.seconds -const NoPeerNoDiscoError = RestApiResponse.serviceUnavailable( - "No suitable service peer & no discovery method") +const NoPeerNoDiscoError = + RestApiResponse.serviceUnavailable("No suitable service peer & no discovery method") -const NoPeerNoneFoundError = RestApiResponse.serviceUnavailable( - "No suitable service peer & none discovered") +const NoPeerNoneFoundError = + RestApiResponse.serviceUnavailable("No suitable service peer & none discovered") #### Request handlers const ROUTE_LIGHTPUSH* = "/lightpush/v1/message" proc installLightPushRequestHandler*( - router: var RestRouter, - node: WakuNode, - discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), - ) = - - router.api(MethodPost, ROUTE_LIGHTPUSH) do (contentBody: Option[ContentBody]) -> RestApiResponse: + router: var RestRouter, + node: WakuNode, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodPost, ROUTE_LIGHTPUSH) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: ## Send a request to push a waku message debug "post", ROUTE_LIGHTPUSH, contentBody diff --git a/waku/waku_api/rest/lightpush/types.nim b/waku/waku_api/rest/lightpush/types.nim index d3711e803..44ddbef25 100644 --- a/waku/waku_api/rest/lightpush/types.nim +++ b/waku/waku_api/rest/lightpush/types.nim @@ -11,31 +11,30 @@ import presto/[route, client, common] import - ../../../common/base64, - ../../../waku_core, - ../relay/types as relay_types, - ../serdes + ../../../common/base64, ../../../waku_core, ../relay/types as relay_types, ../serdes export relay_types #### Types type PushRequest* = object - pubsubTopic*: Option[PubSubTopic] - message*: RelayWakuMessage + pubsubTopic*: Option[PubSubTopic] + message*: RelayWakuMessage #### Serialization and deserialization -proc writeValue*(writer: var JsonWriter[RestJson], value: PushRequest) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter[RestJson], value: PushRequest +) {.raises: [IOError].} = writer.beginRecord() if value.pubsubTopic.isSome(): writer.writeField("pubsubTopic", value.pubsubTopic.get()) writer.writeField("message", value.message) writer.endRecord() -proc readValue*(reader: var JsonReader[RestJson], value: var PushRequest) - {.raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader[RestJson], value: var PushRequest +) {.raises: [SerializationError, IOError].} = var pubsubTopic = none(PubsubTopic) message = none(RelayWakuMessage) @@ -44,8 +43,11 @@ proc readValue*(reader: var JsonReader[RestJson], value: var PushRequest) for fieldName in readObjectFields(reader): # Check for reapeated keys if keys.containsOrIncl(fieldName): - let err = try: fmt"Multiple `{fieldName}` fields found" - except CatchableError: "Multiple fields with the same name found" + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" reader.raiseUnexpectedField(err, "PushRequest") case fieldName @@ -60,6 +62,11 @@ proc readValue*(reader: var JsonReader[RestJson], value: var PushRequest) reader.raiseUnexpectedValue("Field `message` is missing") value = PushRequest( - pubsubTopic: if pubsubTopic.isNone() or pubsubTopic.get() == "": none(string) else: some(pubsubTopic.get()), - message: message.get() + pubsubTopic: + if pubsubTopic.isNone() or pubsubTopic.get() == "": + none(string) + else: + some(pubsubTopic.get()) + , + message: message.get(), ) diff --git a/waku/waku_api/rest/origin_handler.nim b/waku/waku_api/rest/origin_handler.nim index fa3453a06..e57d2fe3a 100644 --- a/waku/waku_api/rest/origin_handler.nim +++ b/waku/waku_api/rest/origin_handler.nim @@ -11,11 +11,9 @@ import chronos, chronos/apps/http/httpserver -type - OriginHandlerMiddlewareRef* = ref object of HttpServerMiddlewareRef - allowedOriginMatcher: Option[Regex] - everyOriginAllowed: bool - +type OriginHandlerMiddlewareRef* = ref object of HttpServerMiddlewareRef + allowedOriginMatcher: Option[Regex] + everyOriginAllowed: bool proc isEveryOriginAllowed(maybeAllowedOrigin: Option[string]): bool = return maybeAllowedOrigin.isSome() and maybeAllowedOrigin.get() == "*" @@ -30,7 +28,7 @@ proc compileOriginMatcher(maybeAllowedOrigin: Option[string]): Option[Regex] = return none(Regex) try: - var matchOrigin : string + var matchOrigin: string if allowedOrigin == "*": matchOrigin = r".*" @@ -38,9 +36,9 @@ proc compileOriginMatcher(maybeAllowedOrigin: Option[string]): Option[Regex] = let allowedOrigins = allowedOrigin.split(",") - var matchExpressions : seq[string] = @[] + var matchExpressions: seq[string] = @[] - var prefix : string + var prefix: string for allowedOrigin in allowedOrigins: if allowedOrigin.startsWith("http://"): prefix = r"http:\/\/" @@ -63,22 +61,22 @@ proc compileOriginMatcher(maybeAllowedOrigin: Option[string]): Option[Regex] = return some(re(finalExpression, {reIgnoreCase, reExtended})) except RegexError: var msg = getCurrentExceptionMsg() - error "Failed to compile regex", source=allowedOrigin, err=msg + error "Failed to compile regex", source = allowedOrigin, err = msg return none(Regex) -proc originsMatch(originHandler: OriginHandlerMiddlewareRef, - requestOrigin: string): bool = - +proc originsMatch( + originHandler: OriginHandlerMiddlewareRef, requestOrigin: string +): bool = if originHandler.allowedOriginMatcher.isNone(): return false return requestOrigin.match(originHandler.allowedOriginMatcher.get()) proc originMiddlewareProc( - middleware: HttpServerMiddlewareRef, - reqfence: RequestFence, - nextHandler: HttpProcessCallback2 - ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = + middleware: HttpServerMiddlewareRef, + reqfence: RequestFence, + nextHandler: HttpProcessCallback2, +): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = if reqfence.isErr(): # Ignore request errors that detected before our middleware. # Let final handler deal with it. @@ -106,7 +104,9 @@ proc originMiddlewareProc( elif origin.len == 0: discard elif origin.len > 1: - return await request.respond(Http400, "Only a single Origin header must be specified") + return await request.respond( + Http400, "Only a single Origin header must be specified" + ) except HttpWriteError as exc: # We use default error handler if we unable to send response. return defaultResponse(exc) @@ -114,12 +114,13 @@ proc originMiddlewareProc( # Calling next handler. return await nextHandler(reqfence) -proc new*(t: typedesc[OriginHandlerMiddlewareRef], - allowedOrigin: Option[string] = none(string) - ): HttpServerMiddlewareRef = - - let middleware = - OriginHandlerMiddlewareRef(allowedOriginMatcher: compileOriginMatcher(allowedOrigin), - everyOriginAllowed: isEveryOriginAllowed(allowedOrigin), - handler: originMiddlewareProc) +proc new*( + t: typedesc[OriginHandlerMiddlewareRef], + allowedOrigin: Option[string] = none(string), +): HttpServerMiddlewareRef = + let middleware = OriginHandlerMiddlewareRef( + allowedOriginMatcher: compileOriginMatcher(allowedOrigin), + everyOriginAllowed: isEveryOriginAllowed(allowedOrigin), + handler: originMiddlewareProc, + ) return HttpServerMiddlewareRef(middleware) diff --git a/waku/waku_api/rest/relay/client.nim b/waku/waku_api/rest/relay/client.nim index 4b84210b2..c6f58fa95 100644 --- a/waku/waku_api/rest/relay/client.nim +++ b/waku/waku_api/rest/relay/client.nim @@ -10,40 +10,69 @@ import json_serialization, json_serialization/std/options, presto/[route, client, common] -import - ../../../waku_core, - ../serdes, - ../responses, - ../rest_serdes, - ./types +import ../../../waku_core, ../serdes, ../responses, ../rest_serdes, ./types export types - logScope: topics = "waku node rest client" - -proc encodeBytes*(value: seq[PubSubTopic], - contentType: string): RestResult[seq[byte]] = +proc encodeBytes*(value: seq[PubSubTopic], contentType: string): RestResult[seq[byte]] = return encodeBytesOf(value, contentType) # TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) -proc relayPostSubscriptionsV1*(body: seq[PubsubTopic]): RestResponse[string] {.rest, endpoint: "/relay/v1/subscriptions", meth: HttpMethod.MethodPost.} -proc relayPostAutoSubscriptionsV1*(body: seq[ContentTopic]): RestResponse[string] {.rest, endpoint: "/relay/v1/auto/subscriptions", meth: HttpMethod.MethodPost.} +proc relayPostSubscriptionsV1*( + body: seq[PubsubTopic] +): RestResponse[string] {. + rest, endpoint: "/relay/v1/subscriptions", meth: HttpMethod.MethodPost +.} + +proc relayPostAutoSubscriptionsV1*( + body: seq[ContentTopic] +): RestResponse[string] {. + rest, endpoint: "/relay/v1/auto/subscriptions", meth: HttpMethod.MethodPost +.} # TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) -proc relayDeleteSubscriptionsV1*(body: seq[PubsubTopic]): RestResponse[string] {.rest, endpoint: "/relay/v1/subscriptions", meth: HttpMethod.MethodDelete.} -proc relayDeleteAutoSubscriptionsV1*(body: seq[ContentTopic]): RestResponse[string] {.rest, endpoint: "/relay/v1/auto/subscriptions", meth: HttpMethod.MethodDelete.} +proc relayDeleteSubscriptionsV1*( + body: seq[PubsubTopic] +): RestResponse[string] {. + rest, endpoint: "/relay/v1/subscriptions", meth: HttpMethod.MethodDelete +.} -proc encodeBytes*(value: RelayPostMessagesRequest, - contentType: string): RestResult[seq[byte]] = +proc relayDeleteAutoSubscriptionsV1*( + body: seq[ContentTopic] +): RestResponse[string] {. + rest, endpoint: "/relay/v1/auto/subscriptions", meth: HttpMethod.MethodDelete +.} + +proc encodeBytes*( + value: RelayPostMessagesRequest, contentType: string +): RestResult[seq[byte]] = return encodeBytesOf(value, contentType) # TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) -proc relayGetMessagesV1*(pubsubTopic: string): RestResponse[RelayGetMessagesResponse] {.rest, endpoint: "/relay/v1/messages/{pubsubTopic}", meth: HttpMethod.MethodGet.} -proc relayGetAutoMessagesV1*(contentTopic: string): RestResponse[RelayGetMessagesResponse] {.rest, endpoint: "/relay/v1/auto/messages/{contentTopic}", meth: HttpMethod.MethodGet.} +proc relayGetMessagesV1*( + pubsubTopic: string +): RestResponse[RelayGetMessagesResponse] {. + rest, endpoint: "/relay/v1/messages/{pubsubTopic}", meth: HttpMethod.MethodGet +.} + +proc relayGetAutoMessagesV1*( + contentTopic: string +): RestResponse[RelayGetMessagesResponse] {. + rest, endpoint: "/relay/v1/auto/messages/{contentTopic}", meth: HttpMethod.MethodGet +.} # TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) -proc relayPostMessagesV1*(pubsubTopic: string, body: RelayPostMessagesRequest): RestResponse[string] {.rest, endpoint: "/relay/v1/messages/{pubsubTopic}", meth: HttpMethod.MethodPost.} -proc relayPostAutoMessagesV1*(body: RelayPostMessagesRequest): RestResponse[string] {.rest, endpoint: "/relay/v1/auto/messages", meth: HttpMethod.MethodPost.} +proc relayPostMessagesV1*( + pubsubTopic: string, body: RelayPostMessagesRequest +): RestResponse[string] {. + rest, endpoint: "/relay/v1/messages/{pubsubTopic}", meth: HttpMethod.MethodPost +.} + +proc relayPostAutoMessagesV1*( + body: RelayPostMessagesRequest +): RestResponse[string] {. + rest, endpoint: "/relay/v1/auto/messages", meth: HttpMethod.MethodPost +.} diff --git a/waku/waku_api/rest/relay/handlers.nim b/waku/waku_api/rest/relay/handlers.nim index fca9f17a1..1e8994b5d 100644 --- a/waku/waku_api/rest/relay/handlers.nim +++ b/waku/waku_api/rest/relay/handlers.nim @@ -43,10 +43,14 @@ const ROUTE_RELAY_AUTO_SUBSCRIPTIONSV1* = "/relay/v1/auto/subscriptions" const ROUTE_RELAY_AUTO_MESSAGESV1* = "/relay/v1/auto/messages/{contentTopic}" const ROUTE_RELAY_AUTO_MESSAGESV1_NO_TOPIC* = "/relay/v1/auto/messages" -proc installRelayApiHandlers*(router: var RestRouter, node: WakuNode, cache: MessageCache) = - router.api(MethodPost, ROUTE_RELAY_SUBSCRIPTIONSV1) do (contentBody: Option[ContentBody]) -> RestApiResponse: +proc installRelayApiHandlers*( + router: var RestRouter, node: WakuNode, cache: MessageCache +) = + router.api(MethodPost, ROUTE_RELAY_SUBSCRIPTIONSV1) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: ## Subscribes a node to a list of PubSub topics - + debug "post_waku_v2_relay_v1_subscriptions" # Check the request body @@ -61,11 +65,15 @@ proc installRelayApiHandlers*(router: var RestRouter, node: WakuNode, cache: Mes for pubsubTopic in newTopics: cache.pubsubSubscribe(pubsubTopic) - node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(messageCacheHandler(cache))) + node.subscribe( + (kind: PubsubSub, topic: pubsubTopic), some(messageCacheHandler(cache)) + ) return RestApiResponse.ok() - router.api(MethodDelete, ROUTE_RELAY_SUBSCRIPTIONSV1) do (contentBody: Option[ContentBody]) -> RestApiResponse: + router.api(MethodDelete, ROUTE_RELAY_SUBSCRIPTIONSV1) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: # ## Subscribes a node to a list of PubSub topics # debug "delete_waku_v2_relay_v1_subscriptions" @@ -79,12 +87,14 @@ proc installRelayApiHandlers*(router: var RestRouter, node: WakuNode, cache: Mes # Unsubscribe all handlers from requested topics for pubsubTopic in req: cache.pubsubUnsubscribe(pubsubTopic) - node.unsubscribe((kind: PubsubUnsub, topic: pubsubTopic)) + node.unsubscribe((kind: PubsubUnsub, topic: pubsubTopic)) # Successfully unsubscribed from all requested topics return RestApiResponse.ok() - router.api(MethodGet, ROUTE_RELAY_MESSAGESV1) do (pubsubTopic: string) -> RestApiResponse: + router.api(MethodGet, ROUTE_RELAY_MESSAGESV1) do( + pubsubTopic: string + ) -> RestApiResponse: # ## Returns all WakuMessages received on a PubSub topic since the # ## last time this method was called # ## TODO: ability to specify a return message limit @@ -94,20 +104,22 @@ proc installRelayApiHandlers*(router: var RestRouter, node: WakuNode, cache: Mes return RestApiResponse.badRequest() let pubSubTopic = pubsubTopic.get() - let messages = cache.getMessages(pubSubTopic, clear=true) + let messages = cache.getMessages(pubSubTopic, clear = true) if messages.isErr(): - debug "Not subscribed to topic", topic=pubSubTopic + debug "Not subscribed to topic", topic = pubSubTopic return RestApiResponse.notFound() let data = RelayGetMessagesResponse(messages.get().map(toRelayWakuMessage)) - let resp = RestApiResponse.jsonResponse(data, status=Http200) + let resp = RestApiResponse.jsonResponse(data, status = Http200) if resp.isErr(): - debug "An error ocurred while building the json respose", error=resp.error + debug "An error ocurred while building the json respose", error = resp.error return RestApiResponse.internalServerError() return resp.get() - router.api(MethodPost, ROUTE_RELAY_MESSAGESV1) do (pubsubTopic: string, contentBody: Option[ContentBody]) -> RestApiResponse: + router.api(MethodPost, ROUTE_RELAY_MESSAGESV1) do( + pubsubTopic: string, contentBody: Option[ContentBody] + ) -> RestApiResponse: if pubsubTopic.isErr(): return RestApiResponse.badRequest() let pubSubTopic = pubsubTopic.get() @@ -115,13 +127,17 @@ proc installRelayApiHandlers*(router: var RestRouter, node: WakuNode, cache: Mes # ensure the node is subscribed to the topic. otherwise it risks publishing # to a topic with no connected peers if pubSubTopic notin node.wakuRelay.subscribedTopics(): - return RestApiResponse.badRequest("Failed to publish: Node not subscribed to topic: " & pubsubTopic) + return RestApiResponse.badRequest( + "Failed to publish: Node not subscribed to topic: " & pubsubTopic + ) # Check the request body if contentBody.isNone(): return RestApiResponse.badRequest() - let reqWakuMessage: RelayWakuMessage = decodeRequestBody[RelayWakuMessage](contentBody).valueOr: + let reqWakuMessage: RelayWakuMessage = decodeRequestBody[RelayWakuMessage]( + contentBody + ).valueOr: return error var message: WakuMessage = reqWakuMessage.toWakuMessage(version = 0).valueOr: @@ -130,26 +146,31 @@ proc installRelayApiHandlers*(router: var RestRouter, node: WakuNode, cache: Mes # if RLN is mounted, append the proof to the message if not node.wakuRlnRelay.isNil(): # append the proof to the message - node.wakuRlnRelay.appendRLNProof(message, - float64(getTime().toUnix())).isOkOr: - return RestApiResponse.internalServerError("Failed to publish: error appending RLN proof to message: " & $error) + + node.wakuRlnRelay.appendRLNProof(message, float64(getTime().toUnix())).isOkOr: + return RestApiResponse.internalServerError( + "Failed to publish: error appending RLN proof to message: " & $error + ) (await node.wakuRelay.validateMessage(pubsubTopic, message)).isOkOr: return RestApiResponse.badRequest("Failed to publish: " & error) # if we reach here its either a non-RLN message or a RLN message with a valid proof - debug "Publishing message", pubSubTopic=pubSubTopic, rln=not node.wakuRlnRelay.isNil() + debug "Publishing message", + pubSubTopic = pubSubTopic, rln = not node.wakuRlnRelay.isNil() if not (waitFor node.publish(some(pubSubTopic), message).withTimeout(futTimeout)): - error "Failed to publish message to topic", pubSubTopic=pubSubTopic + error "Failed to publish message to topic", pubSubTopic = pubSubTopic return RestApiResponse.internalServerError("Failed to publish: timedout") return RestApiResponse.ok() # Autosharding API - router.api(MethodPost, ROUTE_RELAY_AUTO_SUBSCRIPTIONSV1) do (contentBody: Option[ContentBody]) -> RestApiResponse: + router.api(MethodPost, ROUTE_RELAY_AUTO_SUBSCRIPTIONSV1) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: ## Subscribes a node to a list of content topics. - + debug "post_waku_v2_relay_v1_auto_subscriptions" let req: seq[ContentTopic] = decodeRequestBody[seq[ContentTopic]](contentBody).valueOr: @@ -160,13 +181,17 @@ proc installRelayApiHandlers*(router: var RestRouter, node: WakuNode, cache: Mes for contentTopic in newTopics: cache.contentSubscribe(contentTopic) - node.subscribe((kind: ContentSub, topic: contentTopic), some(messageCacheHandler(cache))) + node.subscribe( + (kind: ContentSub, topic: contentTopic), some(messageCacheHandler(cache)) + ) return RestApiResponse.ok() - router.api(MethodDelete, ROUTE_RELAY_AUTO_SUBSCRIPTIONSV1) do (contentBody: Option[ContentBody]) -> RestApiResponse: + router.api(MethodDelete, ROUTE_RELAY_AUTO_SUBSCRIPTIONSV1) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: ## Unsubscribes a node from a list of content topics. - + debug "delete_waku_v2_relay_v1_auto_subscriptions" let req: seq[ContentTopic] = decodeRequestBody[seq[ContentTopic]](contentBody).valueOr: @@ -178,26 +203,30 @@ proc installRelayApiHandlers*(router: var RestRouter, node: WakuNode, cache: Mes return RestApiResponse.ok() - router.api(MethodGet, ROUTE_RELAY_AUTO_MESSAGESV1) do (contentTopic: string) -> RestApiResponse: + router.api(MethodGet, ROUTE_RELAY_AUTO_MESSAGESV1) do( + contentTopic: string + ) -> RestApiResponse: ## Returns all WakuMessages received on a content topic since the ## last time this method was called. - - debug "get_waku_v2_relay_v1_auto_messages", contentTopic=contentTopic + + debug "get_waku_v2_relay_v1_auto_messages", contentTopic = contentTopic let contentTopic = contentTopic.valueOr: return RestApiResponse.badRequest($error) - let messages = cache.getAutoMessages(contentTopic, clear=true).valueOr: - debug "Not subscribed to topic", topic=contentTopic + let messages = cache.getAutoMessages(contentTopic, clear = true).valueOr: + debug "Not subscribed to topic", topic = contentTopic return RestApiResponse.notFound(contentTopic) let data = RelayGetMessagesResponse(messages.map(toRelayWakuMessage)) - return RestApiResponse.jsonResponse(data, status=Http200).valueOr: + return RestApiResponse.jsonResponse(data, status = Http200).valueOr: debug "An error ocurred while building the json respose", error = error return RestApiResponse.internalServerError($error) - router.api(MethodPost, ROUTE_RELAY_AUTO_MESSAGESV1_NO_TOPIC) do (contentBody: Option[ContentBody]) -> RestApiResponse: + router.api(MethodPost, ROUTE_RELAY_AUTO_MESSAGESV1_NO_TOPIC) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: # Check the request body if contentBody.isNone(): return RestApiResponse.badRequest() @@ -213,24 +242,26 @@ proc installRelayApiHandlers*(router: var RestRouter, node: WakuNode, cache: Mes let pubsubTopic = node.wakuSharding.getShard(message.contentTopic).valueOr: let msg = "Autosharding error: " & error - error "publish error", msg=msg + error "publish error", msg = msg return RestApiResponse.badRequest("Failed to publish. " & msg) - + # if RLN is mounted, append the proof to the message if not node.wakuRlnRelay.isNil(): node.wakuRlnRelay.appendRLNProof(message, float64(getTime().toUnix())).isOkOr: return RestApiResponse.internalServerError( - "Failed to publish: error appending RLN proof to message: " & $error) + "Failed to publish: error appending RLN proof to message: " & $error + ) (await node.wakuRelay.validateMessage(pubsubTopic, message)).isOkOr: return RestApiResponse.badRequest("Failed to publish: " & error) - + # if we reach here its either a non-RLN message or a RLN message with a valid proof - debug "Publishing message", contentTopic=message.contentTopic, rln=not node.wakuRlnRelay.isNil() + debug "Publishing message", + contentTopic = message.contentTopic, rln = not node.wakuRlnRelay.isNil() var publishFut = node.publish(some(pubsubTopic), message) if not await publishFut.withTimeout(futTimeout): - return RestApiResponse.internalServerError("Failed to publish: timedout") + return RestApiResponse.internalServerError("Failed to publish: timedout") var res = publishFut.read() diff --git a/waku/waku_api/rest/relay/types.nim b/waku/waku_api/rest/relay/types.nim index aeead801c..c39ea683a 100644 --- a/waku/waku_api/rest/relay/types.nim +++ b/waku/waku_api/rest/relay/types.nim @@ -9,21 +9,17 @@ import json_serialization, json_serialization/std/options, presto/[route, client, common] -import - ../../../common/base64, - ../../../waku_core, - ../serdes - +import ../../../common/base64, ../../../waku_core, ../serdes #### Types type RelayWakuMessage* = object - payload*: Base64String - contentTopic*: Option[ContentTopic] - version*: Option[Natural] - timestamp*: Option[int64] - meta*: Option[Base64String] - ephemeral*: Option[bool] + payload*: Base64String + contentTopic*: Option[ContentTopic] + version*: Option[Natural] + timestamp*: Option[int64] + meta*: Option[Base64String] + ephemeral*: Option[bool] type RelayGetMessagesResponse* = seq[RelayWakuMessage] @@ -37,8 +33,13 @@ proc toRelayWakuMessage*(msg: WakuMessage): RelayWakuMessage = contentTopic: some(msg.contentTopic), version: some(Natural(msg.version)), timestamp: some(msg.timestamp), - meta: if msg.meta.len > 0: some(base64.encode(msg.meta)) else: none(Base64String), - ephemeral: some(msg.ephemeral) + meta: + if msg.meta.len > 0: + some(base64.encode(msg.meta)) + else: + none(Base64String) + , + ephemeral: some(msg.ephemeral), ) proc toWakuMessage*(msg: RelayWakuMessage, version = 0): Result[WakuMessage, string] = @@ -54,13 +55,22 @@ proc toWakuMessage*(msg: RelayWakuMessage, version = 0): Result[WakuMessage, str if timestamp == 0: timestamp = getNanosecondTime(getTime().toUnixFloat()) - return ok(WakuMessage(payload: payload, contentTopic: contentTopic, version: version, - timestamp: timestamp, meta: meta, ephemeral: ephemeral)) + return ok( + WakuMessage( + payload: payload, + contentTopic: contentTopic, + version: version, + timestamp: timestamp, + meta: meta, + ephemeral: ephemeral, + ) + ) #### Serialization and deserialization -proc writeValue*(writer: var JsonWriter[RestJson], value: RelayWakuMessage) - {.raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter[RestJson], value: RelayWakuMessage +) {.raises: [IOError].} = writer.beginRecord() writer.writeField("payload", value.payload) if value.contentTopic.isSome(): @@ -75,8 +85,9 @@ proc writeValue*(writer: var JsonWriter[RestJson], value: RelayWakuMessage) writer.writeField("ephemeral", value.ephemeral.get()) writer.endRecord() -proc readValue*(reader: var JsonReader[RestJson], value: var RelayWakuMessage) - {.raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader[RestJson], value: var RelayWakuMessage +) {.raises: [SerializationError, IOError].} = var payload = none(Base64String) contentTopic = none(ContentTopic) @@ -89,8 +100,11 @@ proc readValue*(reader: var JsonReader[RestJson], value: var RelayWakuMessage) for fieldName in readObjectFields(reader): # Check for reapeated keys if keys.containsOrIncl(fieldName): - let err = try: fmt"Multiple `{fieldName}` fields found" - except CatchableError: "Multiple fields with the same name found" + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" reader.raiseUnexpectedField(err, "RelayWakuMessage") case fieldName @@ -121,5 +135,5 @@ proc readValue*(reader: var JsonReader[RestJson], value: var RelayWakuMessage) version: version, timestamp: timestamp, meta: meta, - ephemeral: ephemeral + ephemeral: ephemeral, ) diff --git a/waku/waku_api/rest/responses.nim b/waku/waku_api/rest/responses.nim index f5b244fea..7bb83f2db 100644 --- a/waku/waku_api/rest/responses.nim +++ b/waku/waku_api/rest/responses.nim @@ -3,51 +3,43 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/typetraits, - stew/results, - chronicles, - presto/common -import - ./serdes - +import std/typetraits, stew/results, chronicles, presto/common +import ./serdes const MIMETYPE_JSON* = MediaType.init("application/json") const MIMETYPE_TEXT* = MediaType.init("text/plain") - proc ok*(t: typedesc[RestApiResponse]): RestApiResponse = RestApiResponse.response("OK", Http200, $MIMETYPE_TEXT) -proc internalServerError*(t: typedesc[RestApiResponse], - msg: string = ""): - RestApiResponse = +proc internalServerError*( + t: typedesc[RestApiResponse], msg: string = "" +): RestApiResponse = RestApiResponse.error(Http500, msg, $MIMETYPE_TEXT) -proc serviceUnavailable*(t: typedesc[RestApiResponse], - msg: string = ""): - RestApiResponse = +proc serviceUnavailable*( + t: typedesc[RestApiResponse], msg: string = "" +): RestApiResponse = RestApiResponse.error(Http503, msg, $MIMETYPE_TEXT) -proc badRequest*(t: typedesc[RestApiResponse], - msg: string = ""): - RestApiResponse = +proc badRequest*(t: typedesc[RestApiResponse], msg: string = ""): RestApiResponse = RestApiResponse.error(Http400, msg, $MIMETYPE_TEXT) -proc notFound*(t: typedesc[RestApiResponse], - msg: string = ""): - RestApiResponse = +proc notFound*(t: typedesc[RestApiResponse], msg: string = ""): RestApiResponse = RestApiResponse.error(Http404, msg, $MIMETYPE_TEXT) -proc preconditionFailed*(t: typedesc[RestApiResponse], - msg: string = ""): - RestApiResponse = +proc preconditionFailed*( + t: typedesc[RestApiResponse], msg: string = "" +): RestApiResponse = RestApiResponse.error(Http412, msg, $MIMETYPE_TEXT) - -proc jsonResponse*(t: typedesc[RestApiResponse], data: auto, status: HttpCode = Http200): SerdesResult[RestApiResponse] = +proc jsonResponse*( + t: typedesc[RestApiResponse], data: auto, status: HttpCode = Http200 +): SerdesResult[RestApiResponse] = let encoded = ?encodeIntoJsonBytes(data) ok(RestApiResponse.response(encoded, status, $MIMETYPE_JSON)) -proc textResponse*(t: typedesc[RestApiResponse], data: string, status: HttpCode = Http200): RestApiResponse = +proc textResponse*( + t: typedesc[RestApiResponse], data: string, status: HttpCode = Http200 +): RestApiResponse = RestApiResponse.response(data, status, $MIMETYPE_TEXT) diff --git a/waku/waku_api/rest/rest_serdes.nim b/waku/waku_api/rest/rest_serdes.nim index b9251a14d..0e2ee77a9 100644 --- a/waku/waku_api/rest/rest_serdes.nim +++ b/waku/waku_api/rest/rest_serdes.nim @@ -15,44 +15,52 @@ import json_serialization/std/sets, presto/common -import - ./serdes, - ./responses +import ./serdes, ./responses logScope: topics = "waku node rest" -proc encodeBytesOf*[T](value: T, - contentType: string): RestResult[seq[byte]]= +proc encodeBytesOf*[T](value: T, contentType: string): RestResult[seq[byte]] = let reqContentType = MediaType.init(contentType) if reqContentType != MIMETYPE_JSON: - error "Unsupported contentType value", contentType = contentType, typ = value.type.name + error "Unsupported contentType value", + contentType = contentType, typ = value.type.name return err("Unsupported contentType") let encoded = ?encodeIntoJsonBytes(value) return ok(encoded) -func decodeRequestBody*[T](contentBody: Option[ContentBody]) : Result[T, RestApiResponse] = +func decodeRequestBody*[T]( + contentBody: Option[ContentBody] +): Result[T, RestApiResponse] = if contentBody.isNone(): return err(RestApiResponse.badRequest("Missing content body")) let reqBodyContentType = contentBody.get().contentType.mediaType if reqBodyContentType != MIMETYPE_JSON and reqBodyContentType != MIMETYPE_TEXT: - return err(RestApiResponse.badRequest("Wrong Content-Type, expected application/json or text/plain")) + return err( + RestApiResponse.badRequest( + "Wrong Content-Type, expected application/json or text/plain" + ) + ) let reqBodyData = contentBody.get().data let requestResult = decodeFromJsonBytes(T, reqBodyData) if requestResult.isErr(): - return err(RestApiResponse.badRequest("Invalid content body, could not decode. " & - $requestResult.error)) + return err( + RestApiResponse.badRequest( + "Invalid content body, could not decode. " & $requestResult.error + ) + ) return ok(requestResult.get()) -proc decodeBytes*(t: typedesc[string], value: openarray[byte], - contentType: Opt[ContentTypeData]): RestResult[string] = +proc decodeBytes*( + t: typedesc[string], value: openarray[byte], contentType: Opt[ContentTypeData] +): RestResult[string] = if MediaType.init($contentType) != MIMETYPE_TEXT: error "Unsupported contentType value", contentType = contentType return err("Unsupported contentType") @@ -63,15 +71,15 @@ proc decodeBytes*(t: typedesc[string], value: openarray[byte], copyMem(addr res[0], unsafeAddr value[0], len(value)) return ok(res) -proc decodeBytes*[T](t: typedesc[T], - data: openArray[byte], - contentType: Opt[ContentTypeData]): RestResult[T] = - - let reqContentType = contentType.valueOr(): +proc decodeBytes*[T]( + t: typedesc[T], data: openArray[byte], contentType: Opt[ContentTypeData] +): RestResult[T] = + let reqContentType = contentType.valueOr: error "Unsupported response, missing contentType value" return err("Unsupported response, missing contentType") - if reqContentType.mediaType != MIMETYPE_JSON and reqContentType.mediaType != MIMETYPE_TEXT: + if reqContentType.mediaType != MIMETYPE_JSON and + reqContentType.mediaType != MIMETYPE_TEXT: error "Unsupported response contentType value", contentType = contentType return err("Unsupported response contentType") diff --git a/waku/waku_api/rest/serdes.nim b/waku/waku_api/rest/serdes.nim index d68c89316..54be180d5 100644 --- a/waku/waku_api/rest/serdes.nim +++ b/waku/waku_api/rest/serdes.nim @@ -14,53 +14,59 @@ import json_serialization/std/net, json_serialization/std/sets, presto/common -import - ../../common/base64 +import ../../common/base64 logScope: topics = "waku node rest" createJsonFlavor RestJson -Json.setWriter JsonWriter, - PreferredOutput = string +Json.setWriter JsonWriter, PreferredOutput = string -template unrecognizedFieldWarning* = +template unrecognizedFieldWarning*() = # TODO: There should be a different notification mechanism for informing the # caller of a deserialization routine for unexpected fields. # The chonicles import in this module should be removed. debug "JSON field not recognized by the current version of nwaku. Consider upgrading", - fieldName, typeName = typetraits.name(typeof value) - + fieldName, typeName = typetraits.name(typeof value) type SerdesResult*[T] = Result[T, cstring] -proc writeValue*(writer: var JsonWriter, value: Base64String) - {.gcsafe, raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter, value: Base64String +) {.gcsafe, raises: [IOError].} = writer.writeValue(string(value)) -proc readValue*(reader: var JsonReader, value: var Base64String) - {.gcsafe, raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader, value: var Base64String +) {.gcsafe, raises: [SerializationError, IOError].} = value = Base64String(reader.readValue(string)) -proc decodeFromJsonString*[T](t: typedesc[T], - data: JsonString, - requireAllFields = true): SerdesResult[T] = +proc decodeFromJsonString*[T]( + t: typedesc[T], data: JsonString, requireAllFields = true +): SerdesResult[T] = try: - ok(RestJson.decode(string(data), T, - requireAllFields = requireAllFields, - allowUnknownFields = true)) + ok( + RestJson.decode( + string(data), T, requireAllFields = requireAllFields, allowUnknownFields = true + ) + ) except SerializationError: # TODO: Do better error reporting here err("Unable to deserialize data") -proc decodeFromJsonBytes*[T](t: typedesc[T], - data: openArray[byte], - requireAllFields = true): SerdesResult[T] = +proc decodeFromJsonBytes*[T]( + t: typedesc[T], data: openArray[byte], requireAllFields = true +): SerdesResult[T] = try: - ok(RestJson.decode(string.fromBytes(data), T, - requireAllFields = requireAllFields, - allowUnknownFields = true)) + ok( + RestJson.decode( + string.fromBytes(data), + T, + requireAllFields = requireAllFields, + allowUnknownFields = true, + ) + ) except SerializationError: # TODO: Do better error reporting here err("Unable to deserialize data") @@ -91,7 +97,6 @@ proc encodeIntoJsonBytes*(value: auto): SerdesResult[seq[byte]] = ok(encoded) - #### helpers proc encodeString*(value: string): RestResult[string] = diff --git a/waku/waku_api/rest/server.nim b/waku/waku_api/rest/server.nim index 95a76d848..b76b7c203 100644 --- a/waku/waku_api/rest/server.nim +++ b/waku/waku_api/rest/server.nim @@ -13,9 +13,7 @@ import presto/middleware, presto/servercommon -import - ./origin_handler - +import ./origin_handler type RestServerResult*[T] = Result[T, string] @@ -29,81 +27,89 @@ type ### Configuration type RestServerConf* = object - cacheSize*: Natural ## \ - ## The maximum number of recently accessed states that are kept in \ - ## memory. Speeds up requests obtaining information for consecutive - ## slots or epochs. + cacheSize*: Natural + ## \ + ## The maximum number of recently accessed states that are kept in \ + ## memory. Speeds up requests obtaining information for consecutive + ## slots or epochs. - cacheTtl*: Natural ## \ - ## The number of seconds to keep recently accessed states in memory + cacheTtl*: Natural + ## \ + ## The number of seconds to keep recently accessed states in memory - requestTimeout*: Natural ## \ - ## The number of seconds to wait until complete REST request will be received + requestTimeout*: Natural + ## \ + ## The number of seconds to wait until complete REST request will be received - maxRequestBodySize*: Natural ## \ - ## Maximum size of REST request body (kilobytes) + maxRequestBodySize*: Natural + ## \ + ## Maximum size of REST request body (kilobytes) - maxRequestHeadersSize*: Natural ## \ - ## Maximum size of REST request headers (kilobytes) + maxRequestHeadersSize*: Natural + ## \ + ## Maximum size of REST request headers (kilobytes) proc default*(T: type RestServerConf): T = RestServerConf( - cacheSize: 3, - cacheTtl: 60, - requestTimeout: 0, - maxRequestBodySize: 16_384, - maxRequestHeadersSize: 64 + cacheSize: 3, + cacheTtl: 60, + requestTimeout: 0, + maxRequestBodySize: 16_384, + maxRequestHeadersSize: 64, ) - ### Initialization -proc new*(t: typedesc[WakuRestServerRef], - router: RestRouter, - address: TransportAddress, - serverIdent: string = PrestoIdent, - serverFlags = {HttpServerFlags.NotifyDisconnect}, - socketFlags: set[ServerFlags] = {ReuseAddr}, - serverUri = Uri(), - maxConnections: int = -1, - backlogSize: int = DefaultBacklogSize, - bufferSize: int = 4096, - httpHeadersTimeout = 10.seconds, - maxHeadersSize: int = 8192, - maxRequestBodySize: int = 1_048_576, - requestErrorHandler: RestRequestErrorHandler = nil, - dualstack = DualStackType.Auto, - allowedOrigin: Option[string] = none(string) - ): RestServerResult[WakuRestServerRef] = +proc new*( + t: typedesc[WakuRestServerRef], + router: RestRouter, + address: TransportAddress, + serverIdent: string = PrestoIdent, + serverFlags = {HttpServerFlags.NotifyDisconnect}, + socketFlags: set[ServerFlags] = {ReuseAddr}, + serverUri = Uri(), + maxConnections: int = -1, + backlogSize: int = DefaultBacklogSize, + bufferSize: int = 4096, + httpHeadersTimeout = 10.seconds, + maxHeadersSize: int = 8192, + maxRequestBodySize: int = 1_048_576, + requestErrorHandler: RestRequestErrorHandler = nil, + dualstack = DualStackType.Auto, + allowedOrigin: Option[string] = none(string), +): RestServerResult[WakuRestServerRef] = var server = WakuRestServerRef(router: router) - let restMiddleware = RestServerMiddlewareRef.new(router = server.router, errorHandler = requestErrorHandler) + let restMiddleware = RestServerMiddlewareRef.new( + router = server.router, errorHandler = requestErrorHandler + ) let originHandlerMiddleware = OriginHandlerMiddlewareRef.new(allowedOrigin) - let middlewares = [originHandlerMiddleware, - restMiddleware] + let middlewares = [originHandlerMiddleware, restMiddleware] ## This must be empty and needed only to confirm original initialization requirements of ## the RestHttpServer now combining old and new middleware approach. - proc defaultProcessCallback(rf: RequestFence): Future[HttpResponseRef] {. - async: (raises: [CancelledError]).} = + proc defaultProcessCallback( + rf: RequestFence + ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = discard - - let sres = HttpServerRef.new(address - , defaultProcessCallback - , serverFlags - , socketFlags - , serverUri - , serverIdent - , maxConnections - , bufferSize - , backlogSize - , httpHeadersTimeout - , maxHeadersSize - , maxRequestBodySize - , dualstack = dualstack - , middlewares = middlewares) + let sres = HttpServerRef.new( + address, + defaultProcessCallback, + serverFlags, + socketFlags, + serverUri, + serverIdent, + maxConnections, + bufferSize, + backlogSize, + httpHeadersTimeout, + maxHeadersSize, + maxRequestBodySize, + dualstack = dualstack, + middlewares = middlewares, + ) if sres.isOk(): server.httpServer = sres.get() ok(server) @@ -115,26 +121,29 @@ proc getRouter(): RestRouter = proc validate(pattern: string, value: string): int = ## This is rough validation procedure which should be simple and fast, ## because it will be used for query routing. - if pattern.startsWith("{") and pattern.endsWith("}"): 0 - else: 1 + if pattern.startsWith("{") and pattern.endsWith("}"): 0 else: 1 # disable allowed origin handling by presto, we add our own handling as middleware RestRouter.init(validate, allowedOrigin = none(string)) -proc init*(T: type WakuRestServerRef, - ip: IpAddress, port: Port, - allowedOrigin=none(string), - conf=RestServerConf.default(), - requestErrorHandler: RestRequestErrorHandler = nil): RestServerResult[T] = +proc init*( + T: type WakuRestServerRef, + ip: IpAddress, + port: Port, + allowedOrigin = none(string), + conf = RestServerConf.default(), + requestErrorHandler: RestRequestErrorHandler = nil, +): RestServerResult[T] = let address = initTAddress(ip, port) - let serverFlags = { - HttpServerFlags.QueryCommaSeparatedArray, - HttpServerFlags.NotifyDisconnect - } + let serverFlags = + {HttpServerFlags.QueryCommaSeparatedArray, HttpServerFlags.NotifyDisconnect} let - headersTimeout = if conf.requestTimeout == 0: chronos.InfiniteDuration - else: seconds(int64(conf.requestTimeout)) + headersTimeout = + if conf.requestTimeout == 0: + chronos.InfiniteDuration + else: + seconds(int64(conf.requestTimeout)) maxHeadersSize = conf.maxRequestHeadersSize * 1024 maxRequestBodySize = conf.maxRequestBodySize * 1024 @@ -142,23 +151,25 @@ proc init*(T: type WakuRestServerRef, try: return WakuRestServerRef.new( - router, - address, - serverFlags = serverFlags, - httpHeadersTimeout = headersTimeout, - maxHeadersSize = maxHeadersSize, - maxRequestBodySize = maxRequestBodySize, - requestErrorHandler = requestErrorHandler, - allowedOrigin = allowedOrigin - ) + router, + address, + serverFlags = serverFlags, + httpHeadersTimeout = headersTimeout, + maxHeadersSize = maxHeadersSize, + maxRequestBodySize = maxRequestBodySize, + requestErrorHandler = requestErrorHandler, + allowedOrigin = allowedOrigin, + ) except CatchableError: return err(getCurrentExceptionMsg()) -proc newRestHttpServer*(ip: IpAddress, port: Port, - allowedOrigin=none(string), - conf=RestServerConf.default(), - requestErrorHandler: RestRequestErrorHandler = nil): - RestServerResult[WakuRestServerRef] = +proc newRestHttpServer*( + ip: IpAddress, + port: Port, + allowedOrigin = none(string), + conf = RestServerConf.default(), + requestErrorHandler: RestRequestErrorHandler = nil, +): RestServerResult[WakuRestServerRef] = WakuRestServerRef.init(ip, port, allowedOrigin, conf, requestErrorHandler) proc localAddress*(rs: WakuRestServerRef): TransportAddress = @@ -168,12 +179,9 @@ proc localAddress*(rs: WakuRestServerRef): TransportAddress = proc state*(rs: WakuRestServerRef): RestServerState = ## Returns current REST server's state. case rs.httpServer.state - of HttpServerState.ServerClosed: - RestServerState.Closed - of HttpServerState.ServerStopped: - RestServerState.Stopped - of HttpServerState.ServerRunning: - RestServerState.Running + of HttpServerState.ServerClosed: RestServerState.Closed + of HttpServerState.ServerStopped: RestServerState.Stopped + of HttpServerState.ServerRunning: RestServerState.Running proc start*(rs: WakuRestServerRef) = ## Starts REST server. @@ -185,8 +193,7 @@ proc stop*(rs: WakuRestServerRef) {.async: (raises: []).} = await rs.httpServer.stop() notice "REST service stopped", address = $rs.localAddress() -proc drop*(rs: WakuRestServerRef): Future[void] {. - async: (raw: true, raises: []).} = +proc drop*(rs: WakuRestServerRef): Future[void] {.async: (raw: true, raises: []).} = ## Drop all pending connections. rs.httpServer.drop() @@ -195,7 +202,8 @@ proc closeWait*(rs: WakuRestServerRef) {.async: (raises: []).} = await rs.httpServer.closeWait() notice "REST service closed", address = $rs.localAddress() -proc join*(rs: WakuRestServerRef): Future[void] {. - async: (raw: true, raises: [CancelledError]).} = +proc join*( + rs: WakuRestServerRef +): Future[void] {.async: (raw: true, raises: [CancelledError]).} = ## Wait until REST server will not be closed. rs.httpServer.join() diff --git a/waku/waku_api/rest/store/client.nim b/waku/waku_api/rest/store/client.nim index 3e5c9dd80..66eaea9a0 100644 --- a/waku/waku_api/rest/store/client.nim +++ b/waku/waku_api/rest/store/client.nim @@ -4,29 +4,19 @@ else: {.push raises: [].} import - chronicles, - json_serialization, - json_serialization/std/options, - presto/[route, client] -import - ../../../waku_store/common, - ../serdes, - ../responses, - ./types + chronicles, json_serialization, json_serialization/std/options, presto/[route, client] +import ../../../waku_store/common, ../serdes, ../responses, ./types export types - logScope: topics = "waku node rest store_api" - -proc decodeBytes*(t: typedesc[StoreResponseRest], - data: openArray[byte], - contentType: Opt[ContentTypeData]): - - RestResult[StoreResponseRest] = - +proc decodeBytes*( + t: typedesc[StoreResponseRest], + data: openArray[byte], + contentType: Opt[ContentTypeData], +): RestResult[StoreResponseRest] = if MediaType.init($contentType) == MIMETYPE_JSON: let decoded = ?decodeFromJsonBytes(StoreResponseRest, data) return ok(decoded) @@ -37,59 +27,52 @@ proc decodeBytes*(t: typedesc[StoreResponseRest], res = newString(len(data)) copyMem(addr res[0], unsafeAddr data[0], len(data)) - return ok(StoreResponseRest( - messages: newSeq[StoreWakuMessage](0), - cursor: none(HistoryCursorRest), - # field that contain error information - errorMessage: some(res) - )) + return ok( + StoreResponseRest( + messages: newSeq[StoreWakuMessage](0), + cursor: none(HistoryCursorRest), + # field that contain error information + errorMessage: some(res), + ) + ) # If everything goes wrong return err(cstring("Unsupported contentType " & $contentType)) +proc getStoreMessagesV1*( + # URL-encoded reference to the store-node + peerAddr: string = "", + pubsubTopic: string = "", + # URL-encoded comma-separated list of content topics + contentTopics: string = "", + startTime: string = "", + endTime: string = "", + + # Optional cursor fields + senderTime: string = "", + storeTime: string = "", + digest: string = "", # base64-encoded digest + pageSize: string = "", + ascending: string = "", +): RestResponse[StoreResponseRest] {. + rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet +.} proc getStoreMessagesV1*( - # URL-encoded reference to the store-node - peerAddr: string = "", - pubsubTopic: string = "", - # URL-encoded comma-separated list of content topics - contentTopics: string = "", - startTime: string = "", - endTime: string = "", + # URL-encoded reference to the store-node + peerAddr: Option[string], + pubsubTopic: string = "", + # URL-encoded comma-separated list of content topics + contentTopics: string = "", + startTime: string = "", + endTime: string = "", - # Optional cursor fields - senderTime: string = "", - storeTime: string = "", - digest: string = "", # base64-encoded digest - - pageSize: string = "", - ascending: string = "" - ): - RestResponse[StoreResponseRest] - - {.rest, - endpoint: "/store/v1/messages", - meth: HttpMethod.MethodGet.} - -proc getStoreMessagesV1*( - # URL-encoded reference to the store-node - peerAddr: Option[string], - pubsubTopic: string = "", - # URL-encoded comma-separated list of content topics - contentTopics: string = "", - startTime: string = "", - endTime: string = "", - - # Optional cursor fields - senderTime: string = "", - storeTime: string = "", - digest: string = "", # base64-encoded digest - - pageSize: string = "", - ascending: string = "" - ): - RestResponse[StoreResponseRest] - - {.rest, - endpoint: "/store/v1/messages", - meth: HttpMethod.MethodGet.} \ No newline at end of file + # Optional cursor fields + senderTime: string = "", + storeTime: string = "", + digest: string = "", # base64-encoded digest + pageSize: string = "", + ascending: string = "", +): RestResponse[StoreResponseRest] {. + rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet +.} diff --git a/waku/waku_api/rest/store/handlers.nim b/waku/waku_api/rest/store/handlers.nim index 8cdeef903..01f5259de 100644 --- a/waku/waku_api/rest/store/handlers.nim +++ b/waku/waku_api/rest/store/handlers.nim @@ -3,13 +3,7 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/strformat, - stew/results, - chronicles, - uri, - json_serialization, - presto/route +import std/strformat, stew/results, chronicles, uri, json_serialization, presto/route import ../../../waku_core, ../../../waku_store/common, @@ -29,17 +23,14 @@ logScope: const futTimeout* = 5.seconds # Max time to wait for futures -const NoPeerNoDiscError* = RestApiResponse.preconditionFailed( - "No suitable service peer & no discovery method") +const NoPeerNoDiscError* = + RestApiResponse.preconditionFailed("No suitable service peer & no discovery method") # Queries the store-node with the query parameters and # returns a RestApiResponse that is sent back to the api client. -proc performHistoryQuery(selfNode: WakuNode, - histQuery: HistoryQuery, - storePeer: RemotePeerInfo): - - Future[RestApiResponse] {.async.} = - +proc performHistoryQuery( + selfNode: WakuNode, histQuery: HistoryQuery, storePeer: RemotePeerInfo +): Future[RestApiResponse] {.async.} = let queryFut = selfNode.query(histQuery, storePeer) if not await queryFut.withTimeout(futTimeout): const msg = "No history response received (timeout)" @@ -49,42 +40,38 @@ proc performHistoryQuery(selfNode: WakuNode, let res = queryFut.read() if res.isErr(): const msg = "Error occurred in queryFut.read()" - error msg, error=res.error - return RestApiResponse.internalServerError( - fmt("{msg} [{res.error}]")) + error msg, error = res.error + return RestApiResponse.internalServerError(fmt("{msg} [{res.error}]")) let storeResp = res.value.toStoreResponseRest() - let resp = RestApiResponse.jsonResponse(storeResp, status=Http200) + let resp = RestApiResponse.jsonResponse(storeResp, status = Http200) if resp.isErr(): const msg = "Error building the json respose" - error msg, error=resp.error - return RestApiResponse.internalServerError( - fmt("{msg} [{resp.error}]")) + error msg, error = resp.error + return RestApiResponse.internalServerError(fmt("{msg} [{resp.error}]")) return resp.get() # Converts a string time representation into an Option[Timestamp]. # Only positive time is considered a valid Timestamp in the request -proc parseTime(input: Option[string]): - Result[Option[Timestamp], string] = +proc parseTime(input: Option[string]): Result[Option[Timestamp], string] = if input.isSome() and input.get() != "": try: let time = parseInt(input.get()) if time > 0: return ok(some(Timestamp(time))) except ValueError: - return err("Problem parsing time [" & - getCurrentExceptionMsg() & "]") + return err("Problem parsing time [" & getCurrentExceptionMsg() & "]") return ok(none(Timestamp)) # Generates a history query cursor as per the given params -proc parseCursor(parsedPubsubTopic: Option[string], - senderTime: Option[string], - storeTime: Option[string], - digest: Option[string]): - Result[Option[HistoryCursor], string] = - +proc parseCursor( + parsedPubsubTopic: Option[string], + senderTime: Option[string], + storeTime: Option[string], + digest: Option[string], +): Result[Option[HistoryCursor], string] = # Parse sender time let parsedSenderTime = parseTime(senderTime) if not parsedSenderTime.isOk(): @@ -101,34 +88,33 @@ proc parseCursor(parsedPubsubTopic: Option[string], return err(parsedMsgDigest.error) # Parse cursor information - if parsedPubsubTopic.isSome() and - parsedSenderTime.value.isSome() and - parsedStoreTime.value.isSome() and - parsedMsgDigest.value.isSome(): - - return ok(some( - HistoryCursor( - pubsubTopic: parsedPubsubTopic.get(), - senderTime: parsedSenderTime.value.get(), - storeTime: parsedStoreTime.value.get(), - digest: parsedMsgDigest.value.get()) - )) + if parsedPubsubTopic.isSome() and parsedSenderTime.value.isSome() and + parsedStoreTime.value.isSome() and parsedMsgDigest.value.isSome(): + return ok( + some( + HistoryCursor( + pubsubTopic: parsedPubsubTopic.get(), + senderTime: parsedSenderTime.value.get(), + storeTime: parsedStoreTime.value.get(), + digest: parsedMsgDigest.value.get(), + ) + ) + ) else: return ok(none(HistoryCursor)) # Creates a HistoryQuery from the given params -proc createHistoryQuery(pubsubTopic: Option[string], - contentTopics: Option[string], - senderTime: Option[string], - storeTime: Option[string], - digest: Option[string], - startTime: Option[string], - endTime: Option[string], - pageSize: Option[string], - direction: Option[string]): - - Result[HistoryQuery, string] = - +proc createHistoryQuery( + pubsubTopic: Option[string], + contentTopics: Option[string], + senderTime: Option[string], + storeTime: Option[string], + digest: Option[string], + startTime: Option[string], + endTime: Option[string], + pageSize: Option[string], + direction: Option[string], +): Result[HistoryQuery, string] = # Parse pubsubTopic parameter var parsedPubsubTopic = none(string) if pubsubTopic.isSome(): @@ -145,10 +131,7 @@ proc createHistoryQuery(pubsubTopic: Option[string], parsedContentTopics.add(ct) # Parse cursor information - let parsedCursor = ? parseCursor(parsedPubsubTopic, - senderTime, - storeTime, - digest) + let parsedCursor = ?parseCursor(parsedPubsubTopic, senderTime, storeTime, digest) # Parse page size field var parsedPagedSize = DefaultPageSize @@ -156,14 +139,13 @@ proc createHistoryQuery(pubsubTopic: Option[string], try: parsedPagedSize = uint64(parseInt(pageSize.get())) except CatchableError: - return err("Problem parsing page size [" & - getCurrentExceptionMsg() & "]") + return err("Problem parsing page size [" & getCurrentExceptionMsg() & "]") # Parse start time - let parsedStartTime = ? parseTime(startTime) + let parsedStartTime = ?parseTime(startTime) # Parse end time - let parsedEndTime = ? parseTime(endTime) + let parsedEndTime = ?parseTime(endTime) # Parse ascending field var parsedDirection = default() @@ -171,14 +153,16 @@ proc createHistoryQuery(pubsubTopic: Option[string], parsedDirection = direction.get().into() return ok( - HistoryQuery(pubsubTopic: parsedPubsubTopic, - contentTopics: parsedContentTopics, - startTime: parsedStartTime, - endTime: parsedEndTime, - direction: parsedDirection, - pageSize: parsedPagedSize, - cursor: parsedCursor - )) + HistoryQuery( + pubsubTopic: parsedPubsubTopic, + contentTopics: parsedContentTopics, + startTime: parsedStartTime, + endTime: parsedEndTime, + direction: parsedDirection, + pageSize: parsedPagedSize, + cursor: parsedCursor, + ) + ) # Simple type conversion. The "Option[Result[string, cstring]]" # type is used by the nim-presto library. @@ -188,8 +172,9 @@ proc toOpt(self: Option[Result[string, cstring]]): Option[string] = if self.isSome() and self.get().value != "": return some(self.get().value) -proc retrieveMsgsFromSelfNode(self: WakuNode, histQuery: HistoryQuery): - Future[RestApiResponse] {.async.} = +proc retrieveMsgsFromSelfNode( + self: WakuNode, histQuery: HistoryQuery +): Future[RestApiResponse] {.async.} = ## Performs a "store" request to the local node (self node.) ## Notice that this doesn't follow the regular store libp2p channel because a node ## it is not allowed to libp2p-dial a node to itself, by default. @@ -199,35 +184,32 @@ proc retrieveMsgsFromSelfNode(self: WakuNode, histQuery: HistoryQuery): return RestApiResponse.internalServerError($error) let storeResp = selfResp.toStoreResponseRest() - let resp = RestApiResponse.jsonResponse(storeResp, status=Http200).valueOr: + let resp = RestApiResponse.jsonResponse(storeResp, status = Http200).valueOr: const msg = "Error building the json respose" - error msg, error=error + error msg, error = error return RestApiResponse.internalServerError(fmt("{msg} [{error}]")) return resp # Subscribes the rest handler to attend "/store/v1/messages" requests proc installStoreApiHandlers*( - router: var RestRouter, - node: WakuNode, - discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), - ) = - + router: var RestRouter, + node: WakuNode, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = # Handles the store-query request according to the passed parameters - router.api(MethodGet, - "/store/v1/messages") do ( - peerAddr: Option[string], - pubsubTopic: Option[string], - contentTopics: Option[string], - senderTime: Option[string], - storeTime: Option[string], - digest: Option[string], - startTime: Option[string], - endTime: Option[string], - pageSize: Option[string], - ascending: Option[string] - ) -> RestApiResponse: - + router.api(MethodGet, "/store/v1/messages") do( + peerAddr: Option[string], + pubsubTopic: Option[string], + contentTopics: Option[string], + senderTime: Option[string], + storeTime: Option[string], + digest: Option[string], + startTime: Option[string], + endTime: Option[string], + pageSize: Option[string], + ascending: Option[string] + ) -> RestApiResponse: debug "REST-GET /store/v1/messages ", peer_addr = $peerAddr # All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding) @@ -236,16 +218,16 @@ proc installStoreApiHandlers*( # Parse the rest of the parameters and create a HistoryQuery let histQuery = createHistoryQuery( - pubsubTopic.toOpt(), - contentTopics.toOpt(), - senderTime.toOpt(), - storeTime.toOpt(), - digest.toOpt(), - startTime.toOpt(), - endTime.toOpt(), - pageSize.toOpt(), - ascending.toOpt() - ) + pubsubTopic.toOpt(), + contentTopics.toOpt(), + senderTime.toOpt(), + storeTime.toOpt(), + digest.toOpt(), + startTime.toOpt(), + endTime.toOpt(), + pageSize.toOpt(), + ascending.toOpt(), + ) if not histQuery.isOk(): return RestApiResponse.badRequest(histQuery.error) @@ -270,6 +252,7 @@ proc installStoreApiHandlers*( peerOp.valueOr: return RestApiResponse.preconditionFailed( - "No suitable service peer & none discovered") + "No suitable service peer & none discovered" + ) - return await node.performHistoryQuery(histQuery.value, peerAddr) \ No newline at end of file + return await node.performHistoryQuery(histQuery.value, peerAddr) diff --git a/waku/waku_api/rest/store/types.nim b/waku/waku_api/rest/store/types.nim index b4d3f76b9..8d08e8693 100644 --- a/waku/waku_api/rest/store/types.nim +++ b/waku/waku_api/rest/store/types.nim @@ -16,7 +16,6 @@ import ../../../waku_core, ../serdes - #### Types type @@ -44,8 +43,7 @@ type ephemeral*: Option[bool] meta*: Option[Base64String] - StoreResponseRest* = object - # inspired by https://rfc.vac.dev/spec/16/#storeresponse + StoreResponseRest* = object # inspired by https://rfc.vac.dev/spec/16/#storeresponse messages*: seq[StoreWakuMessage] cursor*: Option[HistoryCursorRest] # field that contains error information @@ -53,15 +51,12 @@ type createJsonFlavor RestJson -Json.setWriter JsonWriter, - PreferredOutput = string +Json.setWriter JsonWriter, PreferredOutput = string #### Type conversion # Converts a URL-encoded-base64 string into a 'MessageDigest' -proc parseMsgDigest*(input: Option[string]): - Result[Option[MessageDigest], string] = - +proc parseMsgDigest*(input: Option[string]): Result[Option[MessageDigest], string] = if not input.isSome() or input.get() == "": return ok(none(MessageDigest)) @@ -76,11 +71,11 @@ proc parseMsgDigest*(input: Option[string]): # Next snippet inspired by "nwaku/waku/waku_archive/archive.nim" # TODO: Improve coherence of MessageDigest type messageDigest = block: - var data: array[32, byte] - for i in 0.. 0: some(base64.encode(message.meta)) else: none(Base64String) + meta: + if message.meta.len > 0: + some(base64.encode(message.meta)) + else: + none(Base64String) + , ) var storeWakuMsgs: seq[StoreWakuMessage] @@ -122,23 +121,22 @@ proc toStoreResponseRest*(histResp: HistoryResponse): StoreResponseRest = var cursor = none(HistoryCursorRest) if histResp.cursor.isSome: - cursor = some(HistoryCursorRest( - pubsubTopic: histResp.cursor.get().pubsubTopic, - senderTime: histResp.cursor.get().senderTime, - storeTime: histResp.cursor.get().storeTime, - digest: histResp.cursor.get().digest - )) + cursor = some( + HistoryCursorRest( + pubsubTopic: histResp.cursor.get().pubsubTopic, + senderTime: histResp.cursor.get().senderTime, + storeTime: histResp.cursor.get().storeTime, + digest: histResp.cursor.get().digest, + ) + ) - StoreResponseRest( - messages: storeWakuMsgs, - cursor: cursor - ) + StoreResponseRest(messages: storeWakuMsgs, cursor: cursor) ## Beginning of StoreWakuMessage serde -proc writeValue*(writer: var JsonWriter, - value: StoreWakuMessage) - {.gcsafe, raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter, value: StoreWakuMessage +) {.gcsafe, raises: [IOError].} = writer.beginRecord() writer.writeField("payload", $value.payload) if value.contentTopic.isSome(): @@ -153,9 +151,9 @@ proc writeValue*(writer: var JsonWriter, writer.writeField("meta", value.meta.get()) writer.endRecord() -proc readValue*(reader: var JsonReader, - value: var StoreWakuMessage) - {.gcsafe, raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader, value: var StoreWakuMessage +) {.gcsafe, raises: [SerializationError, IOError].} = var payload = none(Base64String) contentTopic = none(ContentTopic) @@ -168,8 +166,11 @@ proc readValue*(reader: var JsonReader, for fieldName in readObjectFields(reader): # Check for reapeated keys if keys.containsOrIncl(fieldName): - let err = try: fmt"Multiple `{fieldName}` fields found" - except CatchableError: "Multiple fields with the same name found" + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" reader.raiseUnexpectedField(err, "StoreWakuMessage") case fieldName @@ -197,25 +198,24 @@ proc readValue*(reader: var JsonReader, version: version, timestamp: timestamp, ephemeral: ephemeral, - meta: meta + meta: meta, ) ## End of StoreWakuMessage serde ## Beginning of MessageDigest serde -proc writeValue*(writer: var JsonWriter, - value: MessageDigest) - {.gcsafe, raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter, value: MessageDigest +) {.gcsafe, raises: [IOError].} = writer.beginRecord() writer.writeField("data", base64.encode(value.data)) writer.endRecord() -proc readValue*(reader: var JsonReader, - value: var MessageDigest) - {.gcsafe, raises: [SerializationError, IOError].} = - var - data = none(seq[byte]) +proc readValue*( + reader: var JsonReader, value: var MessageDigest +) {.gcsafe, raises: [SerializationError, IOError].} = + var data = none(seq[byte]) for fieldName in readObjectFields(reader): case fieldName @@ -232,16 +232,16 @@ proc readValue*(reader: var JsonReader, if data.isNone(): reader.raiseUnexpectedValue("Field `data` is missing") - for i in 0..<32: + for i in 0 ..< 32: value.data[i] = data.get()[i] ## End of MessageDigest serde ## Beginning of HistoryCursorRest serde -proc writeValue*(writer: var JsonWriter, - value: HistoryCursorRest) - {.gcsafe, raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter, value: HistoryCursorRest +) {.gcsafe, raises: [IOError].} = writer.beginRecord() writer.writeField("pubsub_topic", value.pubsubTopic) writer.writeField("sender_time", value.senderTime) @@ -249,9 +249,9 @@ proc writeValue*(writer: var JsonWriter, writer.writeField("digest", value.digest) writer.endRecord() -proc readValue*(reader: var JsonReader, - value: var HistoryCursorRest) - {.gcsafe, raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader, value: var HistoryCursorRest +) {.gcsafe, raises: [SerializationError, IOError].} = var pubsubTopic = none(PubsubTopic) senderTime = none(Timestamp) @@ -262,19 +262,27 @@ proc readValue*(reader: var JsonReader, case fieldName of "pubsub_topic": if pubsubTopic.isSome(): - reader.raiseUnexpectedField("Multiple `pubsub_topic` fields found", "HistoryCursorRest") + reader.raiseUnexpectedField( + "Multiple `pubsub_topic` fields found", "HistoryCursorRest" + ) pubsubTopic = some(reader.readValue(PubsubTopic)) of "sender_time": if senderTime.isSome(): - reader.raiseUnexpectedField("Multiple `sender_time` fields found", "HistoryCursorRest") + reader.raiseUnexpectedField( + "Multiple `sender_time` fields found", "HistoryCursorRest" + ) senderTime = some(reader.readValue(Timestamp)) of "store_time": if storeTime.isSome(): - reader.raiseUnexpectedField("Multiple `store_time` fields found", "HistoryCursorRest") + reader.raiseUnexpectedField( + "Multiple `store_time` fields found", "HistoryCursorRest" + ) storeTime = some(reader.readValue(Timestamp)) of "digest": if digest.isSome(): - reader.raiseUnexpectedField("Multiple `digest` fields found", "HistoryCursorRest") + reader.raiseUnexpectedField( + "Multiple `digest` fields found", "HistoryCursorRest" + ) digest = some(reader.readValue(MessageDigest)) else: reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) @@ -295,16 +303,16 @@ proc readValue*(reader: var JsonReader, pubsubTopic: pubsubTopic.get(), senderTime: senderTime.get(), storeTime: storeTime.get(), - digest: digest.get() + digest: digest.get(), ) ## End of HistoryCursorRest serde ## Beginning of StoreResponseRest serde -proc writeValue*(writer: var JsonWriter, - value: StoreResponseRest) - {.gcsafe, raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter, value: StoreResponseRest +) {.gcsafe, raises: [IOError].} = writer.beginRecord() writer.writeField("messages", value.messages) if value.cursor.isSome(): @@ -313,9 +321,9 @@ proc writeValue*(writer: var JsonWriter, writer.writeField("error_message", value.errorMessage.get()) writer.endRecord() -proc readValue*(reader: var JsonReader, - value: var StoreResponseRest) - {.gcsafe, raises: [SerializationError, IOError].} = +proc readValue*( + reader: var JsonReader, value: var StoreResponseRest +) {.gcsafe, raises: [SerializationError, IOError].} = var messages = none(seq[StoreWakuMessage]) cursor = none(HistoryCursorRest) @@ -325,15 +333,21 @@ proc readValue*(reader: var JsonReader, case fieldName of "messages": if messages.isSome(): - reader.raiseUnexpectedField("Multiple `messages` fields found", "StoreResponseRest") + reader.raiseUnexpectedField( + "Multiple `messages` fields found", "StoreResponseRest" + ) messages = some(reader.readValue(seq[StoreWakuMessage])) of "cursor": if cursor.isSome(): - reader.raiseUnexpectedField("Multiple `cursor` fields found", "StoreResponseRest") + reader.raiseUnexpectedField( + "Multiple `cursor` fields found", "StoreResponseRest" + ) cursor = some(reader.readValue(HistoryCursorRest)) of "error_message": if errorMessage.isSome(): - reader.raiseUnexpectedField("Multiple `error_message` fields found", "StoreResponseRest") + reader.raiseUnexpectedField( + "Multiple `error_message` fields found", "StoreResponseRest" + ) errorMessage = some(reader.readValue(string)) else: reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) @@ -342,19 +356,16 @@ proc readValue*(reader: var JsonReader, reader.raiseUnexpectedValue("Field `messages` is missing") value = StoreResponseRest( - messages: messages.get(), - cursor: cursor, - errorMessage: errorMessage + messages: messages.get(), cursor: cursor, errorMessage: errorMessage ) ## End of StoreResponseRest serde ## Beginning of StoreRequestRest serde -proc writeValue*(writer: var JsonWriter, - value: StoreRequestRest) - {.gcsafe, raises: [IOError].} = - +proc writeValue*( + writer: var JsonWriter, value: StoreRequestRest +) {.gcsafe, raises: [IOError].} = writer.beginRecord() if value.pubsubTopic.isSome(): writer.writeField("pubsub_topic", value.pubsubTopic.get()) @@ -368,4 +379,3 @@ proc writeValue*(writer: var JsonWriter, writer.endRecord() ## End of StoreRequestRest serde - diff --git a/waku/waku_archive.nim b/waku/waku_archive.nim index 9d716c948..14c7b6071 100644 --- a/waku/waku_archive.nim +++ b/waku/waku_archive.nim @@ -4,8 +4,4 @@ import ./waku_archive/driver, ./waku_archive/retention_policy -export - common, - archive, - driver, - retention_policy +export common, archive, driver, retention_policy diff --git a/waku/waku_archive/archive.nim b/waku/waku_archive/archive.nim index 2c81e103b..903852a2b 100644 --- a/waku/waku_archive/archive.nim +++ b/waku/waku_archive/archive.nim @@ -25,17 +25,18 @@ const DefaultPageSize*: uint = 20 MaxPageSize*: uint = 100 -# Retention policy + # Retention policy WakuArchiveDefaultRetentionPolicyInterval* = chronos.minutes(30) -# Metrics reporting + # Metrics reporting WakuArchiveDefaultMetricsReportInterval* = chronos.minutes(1) -# Message validation -# 20 seconds maximum allowable sender timestamp "drift" - MaxMessageTimestampVariance* = getNanoSecondTime(20) + # Message validation + # 20 seconds maximum allowable sender timestamp "drift" + MaxMessageTimestampVariance* = getNanoSecondTime(20) -type MessageValidator* = proc(msg: WakuMessage): Result[void, string] {.closure, gcsafe, raises: [].} +type MessageValidator* = + proc(msg: WakuMessage): Result[void, string] {.closure, gcsafe, raises: [].} ## Archive @@ -53,7 +54,7 @@ proc validate*(msg: WakuMessage): Result[void, string] = if msg.ephemeral: # Ephemeral message, do not store return - + if msg.timestamp == 0: return ok() @@ -70,26 +71,23 @@ proc validate*(msg: WakuMessage): Result[void, string] = return ok() -proc new*(T: type WakuArchive, - driver: ArchiveDriver, - validator: MessageValidator = validate, - retentionPolicy = none(RetentionPolicy)): - Result[T, string] = +proc new*( + T: type WakuArchive, + driver: ArchiveDriver, + validator: MessageValidator = validate, + retentionPolicy = none(RetentionPolicy), +): Result[T, string] = if driver.isNil(): return err("archive driver is Nil") let archive = - WakuArchive( - driver: driver, - validator: validator, - retentionPolicy: retentionPolicy, - ) + WakuArchive(driver: driver, validator: validator, retentionPolicy: retentionPolicy) return ok(archive) -proc handleMessage*(self: WakuArchive, - pubsubTopic: PubsubTopic, - msg: WakuMessage) {.async.} = +proc handleMessage*( + self: WakuArchive, pubsubTopic: PubsubTopic, msg: WakuMessage +) {.async.} = self.validator(msg).isOkOr: waku_archive_errors.inc(labelValues = [error]) return @@ -97,17 +95,20 @@ proc handleMessage*(self: WakuArchive, let msgDigest = computeDigest(msg) msgHash = computeMessageHash(pubsubTopic, msg) - msgTimestamp = if msg.timestamp > 0: msg.timestamp - else: getNanosecondTime(getTime().toUnixFloat()) + msgTimestamp = + if msg.timestamp > 0: + msg.timestamp + else: + getNanosecondTime(getTime().toUnixFloat()) trace "handling message", - pubsubTopic=pubsubTopic, - contentTopic=msg.contentTopic, - msgTimestamp=msg.timestamp, - usedTimestamp=msgTimestamp, - digest=toHex(msgDigest.data), - messageHash=toHex(msgHash) - + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + msgTimestamp = msg.timestamp, + usedTimestamp = msgTimestamp, + digest = toHex(msgDigest.data), + messageHash = toHex(msgHash) + let insertStartTime = getTime().toUnixFloat() (await self.driver.put(pubsubTopic, msg, msgDigest, msgHash, msgTimestamp)).isOkOr: @@ -116,22 +117,24 @@ proc handleMessage*(self: WakuArchive, # In that case, the message cannot be inserted but is an expected "insert error" # and therefore we reduce its visibility by having the log in trace level. if "duplicate key value violates unique constraint" in error: - trace "failed to insert message", err=error + trace "failed to insert message", err = error else: - debug "failed to insert message", err=error - + debug "failed to insert message", err = error + let insertDuration = getTime().toUnixFloat() - insertStartTime waku_archive_insert_duration_seconds.observe(insertDuration) -proc findMessages*(self: WakuArchive, query: ArchiveQuery): Future[ArchiveResult] {.async, gcsafe.} = +proc findMessages*( + self: WakuArchive, query: ArchiveQuery +): Future[ArchiveResult] {.async, gcsafe.} = ## Search the archive to return a single page of messages matching the query criteria - + let maxPageSize = if query.pageSize <= 0: DefaultPageSize else: min(query.pageSize, MaxPageSize) - + let isAscendingOrder = query.direction.into() if query.contentTopics.len > 10: @@ -139,16 +142,18 @@ proc findMessages*(self: WakuArchive, query: ArchiveQuery): Future[ArchiveResult let queryStartTime = getTime().toUnixFloat() - let rows = (await self.driver.getMessages( - contentTopic = query.contentTopics, - pubsubTopic = query.pubsubTopic, - cursor = query.cursor, - startTime = query.startTime, - endTime = query.endTime, - hashes = query.hashes, - maxPageSize = maxPageSize + 1, - ascendingOrder = isAscendingOrder - )).valueOr: + let rows = ( + await self.driver.getMessages( + contentTopic = query.contentTopics, + pubsubTopic = query.pubsubTopic, + cursor = query.cursor, + startTime = query.startTime, + endTime = query.endTime, + hashes = query.hashes, + maxPageSize = maxPageSize + 1, + ascendingOrder = isAscendingOrder, + ) + ).valueOr: return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: error)) let queryDuration = getTime().toUnixFloat() - queryStartTime @@ -157,34 +162,36 @@ proc findMessages*(self: WakuArchive, query: ArchiveQuery): Future[ArchiveResult var hashes = newSeq[WakuMessageHash]() var messages = newSeq[WakuMessage]() var cursor = none(ArchiveCursor) - + if rows.len == 0: return ok(ArchiveResponse(hashes: hashes, messages: messages, cursor: cursor)) ## Messages let pageSize = min(rows.len, int(maxPageSize)) - + #TODO once store v2 is removed, unzip instead of 2x map - messages = rows[0.. int(maxPageSize): ## Build last message cursor ## The cursor is built from the last message INCLUDED in the response ## (i.e. the second last message in the rows list) - + #TODO Once Store v2 is removed keep only message and hash let (pubsubTopic, message, digest, storeTimestamp, hash) = rows[^2] #TODO Once Store v2 is removed, the cursor becomes the hash of the last message - cursor = some(ArchiveCursor( - digest: MessageDigest.fromBytes(digest), - storeTime: storeTimestamp, - sendertime: message.timestamp, - pubsubTopic: pubsubTopic, - hash: hash, - )) + cursor = some( + ArchiveCursor( + digest: MessageDigest.fromBytes(digest), + storeTime: storeTimestamp, + sendertime: message.timestamp, + pubsubTopic: pubsubTopic, + hash: hash, + ) + ) # All messages MUST be returned in chronological order if not isAscendingOrder: @@ -201,7 +208,7 @@ proc periodicRetentionPolicy(self: WakuArchive) {.async.} = while true: (await policy.execute(self.driver)).isOkOr: waku_archive_errors.inc(labelValues = [retPolicyFailure]) - error "failed execution of retention policy", error=error + error "failed execution of retention policy", error = error await sleepAsync(WakuArchiveDefaultRetentionPolicyInterval) @@ -209,7 +216,8 @@ proc periodicMetricReport(self: WakuArchive) {.async.} = while true: let countRes = (await self.driver.getMessagesCount()) if countRes.isErr(): - error "loopReportStoredMessagesMetric failed to get messages count", error=countRes.error + error "loopReportStoredMessagesMetric failed to get messages count", + error = countRes.error else: let count = countRes.get() waku_archive_messages.set(count, labelValues = ["stored"]) @@ -231,4 +239,4 @@ proc stopWait*(self: WakuArchive) {.async.} = if not self.metricsHandle.isNil: futures.add(self.metricsHandle.cancelAndWait()) - await noCancel(allFutures(futures)) \ No newline at end of file + await noCancel(allFutures(futures)) diff --git a/waku/waku_archive/archive_metrics.nim b/waku/waku_archive/archive_metrics.nim index 52fbd45fd..08b15b0a6 100644 --- a/waku/waku_archive/archive_metrics.nim +++ b/waku/waku_archive/archive_metrics.nim @@ -5,14 +5,13 @@ else: import metrics - declarePublicGauge waku_archive_messages, "number of historical messages", ["type"] declarePublicGauge waku_archive_errors, "number of store protocol errors", ["type"] declarePublicGauge waku_archive_queries, "number of store queries received" -declarePublicHistogram waku_archive_insert_duration_seconds, "message insertion duration" +declarePublicHistogram waku_archive_insert_duration_seconds, + "message insertion duration" declarePublicHistogram waku_archive_query_duration_seconds, "history query duration" - # Error types (metric label values) const invalidMessageOld* = "invalid_message_too_old" diff --git a/waku/waku_archive/common.nim b/waku/waku_archive/common.nim index b679e63f8..0d469ce0e 100644 --- a/waku/waku_archive/common.nim +++ b/waku/waku_archive/common.nim @@ -3,24 +3,16 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/options, - stew/results, - stew/byteutils, - stew/arrayops, - nimcrypto/sha2 -import - ../waku_core, - ../common/paging +import std/options, stew/results, stew/byteutils, stew/arrayops, nimcrypto/sha2 +import ../waku_core, ../common/paging ## Waku message digest type MessageDigest* = MDigest[256] proc fromBytes*(T: type MessageDigest, src: seq[byte]): T = - var data: array[32, byte] - + let byteCount = copyFrom[byte](data, src) assert byteCount == 32 @@ -30,7 +22,8 @@ proc fromBytes*(T: type MessageDigest, src: seq[byte]): T = proc computeDigest*(msg: WakuMessage): MessageDigest = var ctx: sha256 ctx.init() - defer: ctx.clear() + defer: + ctx.clear() ctx.update(msg.contentTopic.toBytes()) ctx.update(msg.payload) @@ -79,9 +72,8 @@ type ArchiveResult* = Result[ArchiveResponse, ArchiveError] - proc `$`*(err: ArchiveError): string = - case err.kind: + case err.kind of ArchiveErrorKind.DRIVER_ERROR: "DIRVER_ERROR: " & err.cause of ArchiveErrorKind.INVALID_QUERY: diff --git a/waku/waku_archive/driver.nim b/waku/waku_archive/driver.nim index 6ca21b24b..e91a98ff7 100644 --- a/waku/waku_archive/driver.nim +++ b/waku/waku_archive/driver.nim @@ -3,13 +3,8 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/options, - stew/results, - chronos -import - ../waku_core, - ./common +import std/options, stew/results, chronos +import ../waku_core, ./common const DefaultPageSize*: uint = 25 @@ -22,65 +17,90 @@ type ArchiveRow* = (PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageH # ArchiveDriver interface -method put*(driver: ArchiveDriver, - pubsubTopic: PubsubTopic, - message: WakuMessage, - digest: MessageDigest, - messageHash: WakuMessageHash, - receivedTime: Timestamp): - Future[ArchiveDriverResult[void]] {.base, async.} = discard +method put*( + driver: ArchiveDriver, + pubsubTopic: PubsubTopic, + message: WakuMessage, + digest: MessageDigest, + messageHash: WakuMessageHash, + receivedTime: Timestamp, +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard -method getAllMessages*(driver: ArchiveDriver): - Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = discard +method getAllMessages*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = + discard -method getMessages*(driver: ArchiveDriver, - contentTopic = newSeq[ContentTopic](0), - pubsubTopic = none(PubsubTopic), - cursor = none(ArchiveCursor), - startTime = none(Timestamp), - endTime = none(Timestamp), - hashes = newSeq[WakuMessageHash](0), - maxPageSize = DefaultPageSize, - ascendingOrder = true): - Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = discard +method getMessages*( + driver: ArchiveDriver, + contentTopic = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = newSeq[WakuMessageHash](0), + maxPageSize = DefaultPageSize, + ascendingOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = + discard -method getMessagesCount*(driver: ArchiveDriver): - Future[ArchiveDriverResult[int64]] {.base, async.} = discard +method getMessagesCount*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[int64]] {.base, async.} = + discard -method getPagesCount*(driver: ArchiveDriver): - Future[ArchiveDriverResult[int64]] {.base, async.} = discard +method getPagesCount*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[int64]] {.base, async.} = + discard -method getPagesSize*(driver: ArchiveDriver): - Future[ArchiveDriverResult[int64]] {.base, async.} = discard +method getPagesSize*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[int64]] {.base, async.} = + discard -method getDatabaseSize*(driver: ArchiveDriver): - Future[ArchiveDriverResult[int64]] {.base, async.} = discard +method getDatabaseSize*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[int64]] {.base, async.} = + discard -method performVacuum*(driver: ArchiveDriver): - Future[ArchiveDriverResult[void]] {.base, async.} = discard +method performVacuum*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard -method getOldestMessageTimestamp*(driver: ArchiveDriver): - Future[ArchiveDriverResult[Timestamp]] {.base, async.} = discard +method getOldestMessageTimestamp*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[Timestamp]] {.base, async.} = + discard -method getNewestMessageTimestamp*(driver: ArchiveDriver): - Future[ArchiveDriverResult[Timestamp]] {.base, async.} = discard +method getNewestMessageTimestamp*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[Timestamp]] {.base, async.} = + discard -method deleteMessagesOlderThanTimestamp*(driver: ArchiveDriver, - ts: Timestamp): - Future[ArchiveDriverResult[void]] {.base, async.} = discard +method deleteMessagesOlderThanTimestamp*( + driver: ArchiveDriver, ts: Timestamp +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard -method deleteOldestMessagesNotWithinLimit*(driver: ArchiveDriver, - limit: int): - Future[ArchiveDriverResult[void]] {.base, async.} = discard +method deleteOldestMessagesNotWithinLimit*( + driver: ArchiveDriver, limit: int +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard -method decreaseDatabaseSize*(driver: ArchiveDriver, - targetSizeInBytes: int64, - forceRemoval: bool = false): - Future[ArchiveDriverResult[void]] {.base, async.} = discard +method decreaseDatabaseSize*( + driver: ArchiveDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard -method close*(driver: ArchiveDriver): - Future[ArchiveDriverResult[void]] {.base, async.} = discard - -method existsTable*(driver: ArchiveDriver, tableName: string): - Future[ArchiveDriverResult[bool]] {.base, async.} = discard +method close*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard +method existsTable*( + driver: ArchiveDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.base, async.} = + discard diff --git a/waku/waku_archive/driver/builder.nim b/waku/waku_archive/driver/builder.nim index e32dce201..1768774d2 100644 --- a/waku/waku_archive/driver/builder.nim +++ b/waku/waku_archive/driver/builder.nim @@ -1,13 +1,9 @@ - when (NimMajor, NimMinor) < (1, 4): {.push raises: [Defect].} else: {.push raises: [].} -import - stew/results, - chronicles, - chronos +import stew/results, chronicles, chronos import ../driver, ../../common/databases/dburl, @@ -17,23 +13,22 @@ import ./sqlite_driver/migrations as archive_driver_sqlite_migrations, ./queue_driver -export - sqlite_driver, - queue_driver +export sqlite_driver, queue_driver when defined(postgres): import ## These imports add dependency with an external libpq library - ./postgres_driver/migrations as archive_postgres_driver_migrations, - ./postgres_driver + ./postgres_driver/migrations as archive_postgres_driver_migrations, + ./postgres_driver export postgres_driver -proc new*(T: type ArchiveDriver, - url: string, - vacuum: bool, - migrate: bool, - maxNumConn: int, - onFatalErrorAction: OnFatalErrorHandler): - Future[Result[T, string]] {.async.} = +proc new*( + T: type ArchiveDriver, + url: string, + vacuum: bool, + migrate: bool, + maxNumConn: int, + onFatalErrorAction: OnFatalErrorHandler, +): Future[Result[T, string]] {.async.} = ## url - string that defines the database ## vacuum - if true, a cleanup operation will be applied to the database ## migrate - if true, the database schema will be updated @@ -42,13 +37,11 @@ proc new*(T: type ArchiveDriver, let dbUrlValidationRes = dburl.validateDbUrl(url) if dbUrlValidationRes.isErr(): - return err("DbUrl failure in ArchiveDriver.new: " & - dbUrlValidationRes.error) + return err("DbUrl failure in ArchiveDriver.new: " & dbUrlValidationRes.error) let engineRes = dburl.getDbEngine(url) if engineRes.isErr(): - return err("error getting db engine in setupWakuArchiveDriver: " & - engineRes.error) + return err("error getting db engine in setupWakuArchiveDriver: " & engineRes.error) let engine = engineRes.get() @@ -70,9 +63,8 @@ proc new*(T: type ArchiveDriver, return err("error while gathering sqlite stats: " & $sqliteStatsRes.error) let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get() - debug "sqlite database page stats", pageSize = pageSize, - pages = pageCount, - freePages = freelistCount + debug "sqlite database page stats", + pageSize = pageSize, pages = pageCount, freePages = freelistCount if vacuum and (pageCount > 0 and freelistCount > 0): let vacuumRes = db.performSqliteVacuum() @@ -91,12 +83,13 @@ proc new*(T: type ArchiveDriver, return err("failed to init sqlite archive driver: " & res.error) return ok(res.get()) - of "postgres": when defined(postgres): - let res = PostgresDriver.new(dbUrl = url, - maxConnections = maxNumConn, - onFatalErrorAction = onFatalErrorAction) + let res = PostgresDriver.new( + dbUrl = url, + maxConnections = maxNumConn, + onFatalErrorAction = onFatalErrorAction, + ) if res.isErr(): return err("failed to init postgres archive driver: " & res.error) @@ -113,7 +106,7 @@ proc new*(T: type ArchiveDriver, asyncSpawn driver.startPartitionFactory(onFatalErrorAction) info "waiting for a partition to be created" - for i in 0..<100: + for i in 0 ..< 100: if driver.containsAnyPartition(): break await sleepAsync(chronos.milliseconds(100)) @@ -122,12 +115,11 @@ proc new*(T: type ArchiveDriver, onFatalErrorAction("a partition could not be created") return ok(driver) - else: - return err("Postgres has been configured but not been compiled. Check compiler definitions.") - + return err( + "Postgres has been configured but not been compiled. Check compiler definitions." + ) else: debug "setting up in-memory waku archive driver" - let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages + let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages return ok(driver) - diff --git a/waku/waku_archive/driver/postgres_driver.nim b/waku/waku_archive/driver/postgres_driver.nim index c7e908344..a106eb2c4 100644 --- a/waku/waku_archive/driver/postgres_driver.nim +++ b/waku/waku_archive/driver/postgres_driver.nim @@ -8,8 +8,4 @@ import ./postgres_driver/partitions_manager, ./postgres_driver/postgres_healthcheck -export - postgres_driver, - partitions_manager, - postgres_healthcheck - +export postgres_driver, partitions_manager, postgres_healthcheck diff --git a/waku/waku_archive/driver/postgres_driver/migrations.nim b/waku/waku_archive/driver/postgres_driver/migrations.nim index 92f658db9..c4b25228a 100644 --- a/waku/waku_archive/driver/postgres_driver/migrations.nim +++ b/waku/waku_archive/driver/postgres_driver/migrations.nim @@ -1,10 +1,6 @@ {.push raises: [].} -import - std/[tables, strutils, os], - stew/results, - chronicles, - chronos +import std/[tables, strutils, os], stew/results, chronicles, chronos import ../../../common/databases/common, ../../../../migrations/message_store_postgres/pg_migration_manager, @@ -38,7 +34,6 @@ proc breakIntoStatements*(script: string): seq[string] = plSqlStatement = "" insidePlSqlScript = false continue - else: plSqlStatement &= line & "\n" @@ -58,10 +53,9 @@ proc breakIntoStatements*(script: string): seq[string] = return statements -proc migrate*(driver: PostgresDriver, - targetVersion = SchemaVersion): - Future[DatabaseResult[void]] {.async.} = - +proc migrate*( + driver: PostgresDriver, targetVersion = SchemaVersion +): Future[DatabaseResult[void]] {.async.} = debug "starting message store's postgres database migration" let currentVersion = (await driver.getCurrentVersion()).valueOr: @@ -69,27 +63,27 @@ proc migrate*(driver: PostgresDriver, if currentVersion == targetVersion: debug "database schema is up to date", - currentVersion=currentVersion, targetVersion=targetVersion + currentVersion = currentVersion, targetVersion = targetVersion return ok() - info "database schema is outdated", currentVersion=currentVersion, targetVersion=targetVersion + info "database schema is outdated", + currentVersion = currentVersion, targetVersion = targetVersion # Load migration scripts let scripts = pg_migration_manager.getMigrationScripts(currentVersion, targetVersion) # Run the migration scripts for script in scripts: - for statement in script.breakIntoStatements(): - debug "executing migration statement", statement=statement + debug "executing migration statement", statement = statement (await driver.performWriteQuery(statement)).isOkOr: - error "failed to execute migration statement", statement=statement, error=error + error "failed to execute migration statement", + statement = statement, error = error return err("failed to execute migration statement") - debug "migration statement executed succesfully", statement=statement + debug "migration statement executed succesfully", statement = statement debug "finished message store's postgres database migration" return ok() - diff --git a/waku/waku_archive/driver/postgres_driver/partitions_manager.nim b/waku/waku_archive/driver/postgres_driver/partitions_manager.nim index a9d90d8e9..2e718473b 100644 --- a/waku/waku_archive/driver/postgres_driver/partitions_manager.nim +++ b/waku/waku_archive/driver/postgres_driver/partitions_manager.nim @@ -1,15 +1,11 @@ - ## This module is aimed to handle the creation and truncation of partition tables ## in order to limit the space occupied in disk by the database. ## ## The created partitions are referenced by the 'storedAt' field. ## -import - std/deques -import - chronos, - chronicles +import std/deques +import chronos, chronicles logScope: topics = "waku archive partitions_manager" @@ -23,14 +19,15 @@ type timeRange: TimeRange PartitionManager* = ref object - partitions: Deque[Partition] # FIFO of partition table names. The first is the oldest partition + partitions: Deque[Partition] + # FIFO of partition table names. The first is the oldest partition proc new*(T: type PartitionManager): T = return PartitionManager() -proc getPartitionFromDateTime*(self: PartitionManager, - targetMoment: int64): - Result[Partition, string] = +proc getPartitionFromDateTime*( + self: PartitionManager, targetMoment: int64 +): Result[Partition, string] = ## Returns the partition name that might store a message containing the passed timestamp. ## In order words, it simply returns the partition name which contains the given timestamp. ## targetMoment - represents the time of interest, measured in seconds since epoch. @@ -63,10 +60,9 @@ proc getOldestPartition*(self: PartitionManager): Result[Partition, string] = let oldestPartition = self.partitions.peekFirst return ok(oldestPartition) -proc addPartitionInfo*(self: PartitionManager, - partitionName: string, - beginning: int64, - `end`: int64) = +proc addPartitionInfo*( + self: PartitionManager, partitionName: string, beginning: int64, `end`: int64 +) = ## The given partition range has seconds resolution. ## We just store information of the new added partition merely to keep track of it. let partitionInfo = Partition(name: partitionName, timeRange: (beginning, `end`)) @@ -91,8 +87,7 @@ proc containsMoment*(partition: Partition, time: int64): bool = ## Returns true if the given moment is contained within the partition window, ## 'false' otherwise. ## time - number of seconds since epoch - if partition.timeRange.beginning <= time and - time < partition.timeRange.`end`: + if partition.timeRange.beginning <= time and time < partition.timeRange.`end`: return true return false @@ -102,4 +97,3 @@ proc getName*(partition: Partition): string = func `==`*(a, b: Partition): bool {.inline.} = return a.name == b.name - diff --git a/waku/waku_archive/driver/postgres_driver/postgres_driver.nim b/waku/waku_archive/driver/postgres_driver/postgres_driver.nim index 9e66c182b..e62634b08 100644 --- a/waku/waku_archive/driver/postgres_driver/postgres_driver.nim +++ b/waku/waku_archive/driver/postgres_driver/postgres_driver.nim @@ -29,14 +29,13 @@ type PostgresDriver* = ref object of ArchiveDriver futLoopPartitionFactory: Future[void] const InsertRowStmtName = "InsertRow" -const InsertRowStmtDefinition = - # TODO: get the sql queries from a file - """INSERT INTO messages (id, messageHash, storedAt, contentTopic, payload, pubsubTopic, +const InsertRowStmtDefinition = # TODO: get the sql queries from a file + """INSERT INTO messages (id, messageHash, storedAt, contentTopic, payload, pubsubTopic, version, timestamp) VALUES ($1, $2, $3, $4, $5, $6, $7, $8);""" const SelectNoCursorAscStmtName = "SelectWithoutCursorAsc" const SelectNoCursorAscStmtDef = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages WHERE contentTopic IN ($1) AND pubsubTopic = $2 AND storedAt >= $3 AND @@ -45,7 +44,7 @@ const SelectNoCursorAscStmtDef = const SelectNoCursorDescStmtName = "SelectWithoutCursorDesc" const SelectNoCursorDescStmtDef = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages WHERE contentTopic IN ($1) AND pubsubTopic = $2 AND storedAt >= $3 AND @@ -54,7 +53,7 @@ const SelectNoCursorDescStmtDef = const SelectWithCursorDescStmtName = "SelectWithCursorDesc" const SelectWithCursorDescStmtDef = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages WHERE contentTopic IN ($1) AND pubsubTopic = $2 AND (storedAt, id) < ($3,$4) AND @@ -64,7 +63,7 @@ const SelectWithCursorDescStmtDef = const SelectWithCursorAscStmtName = "SelectWithCursorAsc" const SelectWithCursorAscStmtDef = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages WHERE contentTopic IN ($1) AND pubsubTopic = $2 AND (storedAt, id) > ($3,$4) AND @@ -74,12 +73,12 @@ const SelectWithCursorAscStmtDef = const DefaultMaxNumConns = 50 -proc new*(T: type PostgresDriver, - dbUrl: string, - maxConnections = DefaultMaxNumConns, - onFatalErrorAction: OnFatalErrorHandler = nil): - ArchiveDriverResult[T] = - +proc new*( + T: type PostgresDriver, + dbUrl: string, + maxConnections = DefaultMaxNumConns, + onFatalErrorAction: OnFatalErrorHandler = nil, +): ArchiveDriverResult[T] = ## Very simplistic split of max connections let maxNumConnOnEachPool = int(maxConnections / 2) @@ -95,9 +94,11 @@ proc new*(T: type PostgresDriver, if not isNil(onFatalErrorAction): asyncSpawn checkConnectivity(writeConnPool, onFatalErrorAction) - let driver = PostgresDriver(writeConnPool: writeConnPool, - readConnPool: readConnPool, - partitionMngr: PartitionManager.new()) + let driver = PostgresDriver( + writeConnPool: writeConnPool, + readConnPool: readConnPool, + partitionMngr: PartitionManager.new(), + ) return ok(driver) proc reset*(s: PostgresDriver): Future[ArchiveDriverResult[void]] {.async.} = @@ -108,9 +109,9 @@ proc reset*(s: PostgresDriver): Future[ArchiveDriverResult[void]] {.async.} = return ret proc rowCallbackImpl( - pqResult: ptr PGresult, - outRows: var seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)], - ) = + pqResult: ptr PGresult, + outRows: var seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)], +) = ## Proc aimed to contain the logic of the callback passed to the `psasyncpool`. ## That callback is used in "SELECT" queries. ## @@ -122,8 +123,7 @@ proc rowCallbackImpl( error "Wrong number of fields" return - for iRow in 0..= newestPartition.getLastMoment(): debug "creating a new partition to contain current messages" @@ -681,28 +695,32 @@ proc loopPartitionFactory(self: PostgresDriver, await sleepAsync(DefaultDatabasePartitionCheckTimeInterval) -proc startPartitionFactory*(self: PostgresDriver, - onFatalError: OnFatalErrorHandler) {.async.} = - +proc startPartitionFactory*( + self: PostgresDriver, onFatalError: OnFatalErrorHandler +) {.async.} = self.futLoopPartitionFactory = self.loopPartitionFactory(onFatalError) -proc getTableSize*(self: PostgresDriver, - tableName: string): Future[ArchiveDriverResult[string]] {.async.} = +proc getTableSize*( + self: PostgresDriver, tableName: string +): Future[ArchiveDriverResult[string]] {.async.} = ## Returns a human-readable representation of the size for the requested table. ## tableName - table of interest. - let tableSize = (await self.getStr(fmt""" + let tableSize = ( + await self.getStr( + fmt""" SELECT pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size" FROM pg_class C - where relname = '{tableName}'""")).valueOr: + where relname = '{tableName}'""" + ) + ).valueOr: return err("error in getDatabaseSize: " & error) return ok(tableSize) -proc removeOldestPartition(self: PostgresDriver, - forceRemoval: bool = false, ## To allow cleanup in tests - ): - Future[ArchiveDriverResult[void]] {.async.} = +proc removeOldestPartition( + self: PostgresDriver, forceRemoval: bool = false, ## To allow cleanup in tests +): Future[ArchiveDriverResult[void]] {.async.} = ## Indirectly called from the retention policy let oldestPartition = self.partitionMngr.getOldestPartition().valueOr: @@ -726,7 +744,8 @@ proc removeOldestPartition(self: PostgresDriver, ## In the following lines is where the partition removal happens. ## Detach and remove the partition concurrently to not block the parent table (messages) let detachPartitionQuery = - "ALTER TABLE messages DETACH PARTITION " & oldestPartition.getName() & " CONCURRENTLY;" + "ALTER TABLE messages DETACH PARTITION " & oldestPartition.getName() & + " CONCURRENTLY;" debug "removeOldestPartition", query = detachPartitionQuery (await self.performWriteQuery(detachPartitionQuery)).isOkOr: return err(fmt"error in {detachPartitionQuery}: " & $error) @@ -737,7 +756,8 @@ proc removeOldestPartition(self: PostgresDriver, (await self.performWriteQuery(dropPartitionQuery)).isOkOr: return err(fmt"error in {dropPartitionQuery}: " & $error) - debug "removed partition", partition_name = oldestPartition.getName(), partition_size = partSize + debug "removed partition", + partition_name = oldestPartition.getName(), partition_size = partSize self.partitionMngr.removeOldestPartitionName() return ok() @@ -745,10 +765,9 @@ proc removeOldestPartition(self: PostgresDriver, proc containsAnyPartition*(self: PostgresDriver): bool = return not self.partitionMngr.isEmpty() -method decreaseDatabaseSize*(driver: PostgresDriver, - targetSizeInBytes: int64, - forceRemoval: bool = false): - Future[ArchiveDriverResult[void]] {.async.} = +method decreaseDatabaseSize*( + driver: PostgresDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.async.} = var dbSize = (await driver.getDatabaseSize()).valueOr: return err("decreaseDatabaseSize failed to get database size: " & $error) @@ -758,14 +777,18 @@ method decreaseDatabaseSize*(driver: PostgresDriver, if totalSizeOfDB <= targetSizeInBytes: return ok() - debug "start reducing database size", targetSize = $targetSizeInBytes, currentSize = $totalSizeOfDB + debug "start reducing database size", + targetSize = $targetSizeInBytes, currentSize = $totalSizeOfDB while totalSizeOfDB > targetSizeInBytes and driver.containsAnyPartition(): (await driver.removeOldestPartition(forceRemoval)).isOkOr: - return err("decreaseDatabaseSize inside loop failed to remove oldest partition: " & $error) + return err( + "decreaseDatabaseSize inside loop failed to remove oldest partition: " & $error + ) dbSize = (await driver.getDatabaseSize()).valueOr: - return err("decreaseDatabaseSize inside loop failed to get database size: " & $error) + return + err("decreaseDatabaseSize inside loop failed to get database size: " & $error) let newCurrentSize = int64(dbSize) if newCurrentSize == totalSizeOfDB: @@ -773,13 +796,16 @@ method decreaseDatabaseSize*(driver: PostgresDriver, totalSizeOfDB = newCurrentSize - debug "reducing database size", targetSize = $targetSizeInBytes, newCurrentSize = $totalSizeOfDB + debug "reducing database size", + targetSize = $targetSizeInBytes, newCurrentSize = $totalSizeOfDB return ok() -method existsTable*(s: PostgresDriver, tableName: string): - Future[ArchiveDriverResult[bool]] {.async.} = - let query: string = fmt""" +method existsTable*( + s: PostgresDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.async.} = + let query: string = + fmt""" SELECT EXISTS ( SELECT FROM pg_tables @@ -805,9 +831,9 @@ method existsTable*(s: PostgresDriver, tableName: string): return ok(exists == "t") -proc getCurrentVersion*(s: PostgresDriver): - Future[ArchiveDriverResult[int64]] {.async.} = - +proc getCurrentVersion*( + s: PostgresDriver +): Future[ArchiveDriverResult[int64]] {.async.} = let existsVersionTable = (await s.existsTable("version")).valueOr: return err("error in getCurrentVersion-existsTable: " & $error) @@ -818,5 +844,3 @@ proc getCurrentVersion*(s: PostgresDriver): return err("error in getMessagesCount: " & $error) return ok(res) - - diff --git a/waku/waku_archive/driver/postgres_driver/postgres_healthcheck.nim b/waku/waku_archive/driver/postgres_driver/postgres_healthcheck.nim index 1559db2a5..28eac59aa 100644 --- a/waku/waku_archive/driver/postgres_driver/postgres_healthcheck.nim +++ b/waku/waku_archive/driver/postgres_driver/postgres_healthcheck.nim @@ -3,13 +3,9 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} +import chronos, stew/results import - chronos, - stew/results -import - ../../driver, - ../../../common/databases/db_postgres, - ../../../common/error_handling + ../../driver, ../../../common/databases/db_postgres, ../../../common/error_handling ## Simple query to validate that the postgres is working and attending requests const HealthCheckQuery = "SELECT version();" @@ -17,13 +13,11 @@ const CheckConnectivityInterval = 60.seconds const MaxNumTrials = 20 const TrialInterval = 1.seconds -proc checkConnectivity*(connPool: PgAsyncPool, - onFatalErrorAction: OnFatalErrorHandler) {.async.} = - +proc checkConnectivity*( + connPool: PgAsyncPool, onFatalErrorAction: OnFatalErrorHandler +) {.async.} = while true: - (await connPool.pgQuery(HealthCheckQuery)).isOkOr: - ## The connection failed once. Let's try reconnecting for a while. ## Notice that the 'exec' proc tries to establish a new connection. diff --git a/waku/waku_archive/driver/queue_driver.nim b/waku/waku_archive/driver/queue_driver.nim index 14b5d3cef..1ea8a29d3 100644 --- a/waku/waku_archive/driver/queue_driver.nim +++ b/waku/waku_archive/driver/queue_driver.nim @@ -3,10 +3,6 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - ./queue_driver/queue_driver, - ./queue_driver/index +import ./queue_driver/queue_driver, ./queue_driver/index -export - queue_driver, - index +export queue_driver, index diff --git a/waku/waku_archive/driver/queue_driver/index.nim b/waku/waku_archive/driver/queue_driver/index.nim index 2cfd54006..c01862a4c 100644 --- a/waku/waku_archive/driver/queue_driver/index.nim +++ b/waku/waku_archive/driver/queue_driver/index.nim @@ -3,12 +3,8 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - stew/byteutils, - nimcrypto/sha2 -import - ../../../waku_core, - ../../common +import stew/byteutils, nimcrypto/sha2 +import ../../../waku_core, ../../common type Index* = object ## This type contains the description of an Index used in the pagination of WakuMessages @@ -18,7 +14,9 @@ type Index* = object digest*: MessageDigest # calculated over payload and content topic hash*: WakuMessageHash -proc compute*(T: type Index, msg: WakuMessage, receivedTime: Timestamp, pubsubTopic: PubsubTopic): T = +proc compute*( + T: type Index, msg: WakuMessage, receivedTime: Timestamp, pubsubTopic: PubsubTopic +): T = ## Takes a WakuMessage with received timestamp and returns its Index. let digest = computeDigest(msg) @@ -53,9 +51,8 @@ proc toIndex*(index: ArchiveCursor): Index = proc `==`*(x, y: Index): bool = ## receiverTime plays no role in index equality - return - (x.senderTime == y.senderTime) and - (x.digest == y.digest) and + return + (x.senderTime == y.senderTime) and (x.digest == y.digest) and (x.pubsubTopic == y.pubsubTopic) proc cmp*(x, y: Index): int = @@ -77,10 +74,8 @@ proc cmp*(x, y: Index): int = # Timestamp has a higher priority for comparison let # Use receiverTime where senderTime is unset - xTimestamp = if x.senderTime == 0: x.receiverTime - else: x.senderTime - yTimestamp = if y.senderTime == 0: y.receiverTime - else: y.senderTime + xTimestamp = if x.senderTime == 0: x.receiverTime else: x.senderTime + yTimestamp = if y.senderTime == 0: y.receiverTime else: y.senderTime let timecmp = cmp(xTimestamp, yTimestamp) if timecmp != 0: @@ -91,4 +86,4 @@ proc cmp*(x, y: Index): int = if digestcmp != 0: return digestcmp - return cmp(x.pubsubTopic, y.pubsubTopic) \ No newline at end of file + return cmp(x.pubsubTopic, y.pubsubTopic) diff --git a/waku/waku_archive/driver/queue_driver/queue_driver.nim b/waku/waku_archive/driver/queue_driver/queue_driver.nim index a84ad9256..bec9253d2 100644 --- a/waku/waku_archive/driver/queue_driver/queue_driver.nim +++ b/waku/waku_archive/driver/queue_driver/queue_driver.nim @@ -3,17 +3,8 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/options, - stew/results, - stew/sorted_set, - chronicles, - chronos -import - ../../../waku_core, - ../../common, - ../../driver, - ./index +import std/options, stew/results, stew/sorted_set, chronicles, chronos +import ../../../waku_core, ../../common, ../../driver, ./index logScope: topics = "waku archive queue_store" @@ -32,7 +23,7 @@ type ## item will be removed to make space for the new one. ## This implies both a `delete` and `add` operation ## for new items. - + # TODO: a circular/ring buffer may be a more efficient implementation items: SortedSet[Index, WakuMessage] # sorted set of stored messages capacity: int # Maximum amount of messages to keep @@ -43,20 +34,22 @@ type QueueDriverGetPageResult = Result[seq[ArchiveRow], QueueDriverErrorKind] proc `$`(error: QueueDriverErrorKind): string = - case error: - of INVALID_CURSOR: - "invalid_cursor" + case error + of INVALID_CURSOR: "invalid_cursor" ### Helpers -proc walkToCursor(w: SortedSetWalkRef[Index, WakuMessage], - startCursor: Index, - forward: bool): SortedSetResult[Index, WakuMessage] = +proc walkToCursor( + w: SortedSetWalkRef[Index, WakuMessage], startCursor: Index, forward: bool +): SortedSetResult[Index, WakuMessage] = ## Walk to util we find the cursor ## TODO: Improve performance here with a binary/tree search - var nextItem = if forward: w.first() - else: w.last() + var nextItem = + if forward: + w.first() + else: + w.last() ## Fast forward until we reach the startCursor while nextItem.isOk(): @@ -64,8 +57,11 @@ proc walkToCursor(w: SortedSetWalkRef[Index, WakuMessage], break # Not yet at cursor. Continue advancing - nextItem = if forward: w.next() - else: w.prev() + nextItem = + if forward: + w.next() + else: + w.prev() return nextItem @@ -82,11 +78,13 @@ proc contains*(driver: QueueDriver, index: Index): bool = proc len*(driver: QueueDriver): int {.noSideEffect.} = return driver.items.len -proc getPage(driver: QueueDriver, - pageSize: uint = 0, - forward: bool = true, - cursor: Option[Index] = none(Index), - predicate: QueryFilterMatcher = nil): QueueDriverGetPageResult = +proc getPage( + driver: QueueDriver, + pageSize: uint = 0, + forward: bool = true, + cursor: Option[Index] = none(Index), + predicate: QueryFilterMatcher = nil, +): QueueDriverGetPageResult = ## Populate a single page in forward direction ## Start at the `startCursor` (exclusive), or first entry (inclusive) if not defined. ## Page size must not exceed `maxPageSize` @@ -94,7 +92,8 @@ proc getPage(driver: QueueDriver, var outSeq: seq[ArchiveRow] var w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) - defer: w.destroy() + defer: + w.destroy() var currentEntry: SortedSetResult[Index, WakuMessage] @@ -105,14 +104,20 @@ proc getPage(driver: QueueDriver, return err(QueueDriverErrorKind.INVALID_CURSOR) # Advance walker once more - currentEntry = if forward: w.next() - else: w.prev() + currentEntry = + if forward: + w.next() + else: + w.prev() else: # Start from the beginning of the queue - currentEntry = if forward: w.first() - else: w.last() + currentEntry = + if forward: + w.first() + else: + w.last() - trace "Starting page query", currentEntry=currentEntry + trace "Starting page query", currentEntry = currentEntry ## This loop walks forward over the queue: ## 1. from the given cursor (or first/last entry, if not provided) @@ -120,7 +125,8 @@ proc getPage(driver: QueueDriver, ## 3. until either the end of the queue or maxPageSize is reached var numberOfItems: uint = 0 while currentEntry.isOk() and numberOfItems < pageSize: - trace "Continuing page query", currentEntry=currentEntry, numberOfItems=numberOfItems + trace "Continuing page query", + currentEntry = currentEntry, numberOfItems = numberOfItems let key = currentEntry.value.key @@ -129,16 +135,20 @@ proc getPage(driver: QueueDriver, if predicate.isNil() or predicate(key, data): numberOfItems += 1 - outSeq.add((key.pubsubTopic, data, @(key.digest.data), key.receiverTime, key.hash)) + outSeq.add( + (key.pubsubTopic, data, @(key.digest.data), key.receiverTime, key.hash) + ) - currentEntry = if forward: w.next() - else: w.prev() + currentEntry = + if forward: + w.next() + else: + w.prev() - trace "Successfully retrieved page", len=outSeq.len + trace "Successfully retrieved page", len = outSeq.len return ok(outSeq) - ## --- SortedSet accessors --- iterator fwdIterator*(driver: QueueDriver): (Index, WakuMessage) = @@ -189,12 +199,14 @@ proc last*(driver: QueueDriver): ArchiveDriverResult[Index] = ## --- Queue API --- -proc add*(driver: QueueDriver, index: Index, msg: WakuMessage): ArchiveDriverResult[void] = +proc add*( + driver: QueueDriver, index: Index, msg: WakuMessage +): ArchiveDriverResult[void] = ## Add a message to the queue ## ## If we're at capacity, we will be removing, the oldest (first) item if driver.contains(index): - trace "could not add item to store queue. Index already exists", index=index + trace "could not add item to store queue. Index already exists", index = index return err("duplicate") # TODO: the below delete block can be removed if we convert to circular buffer @@ -215,62 +227,66 @@ proc add*(driver: QueueDriver, index: Index, msg: WakuMessage): ArchiveDriverRes return ok() -method put*(driver: QueueDriver, - pubsubTopic: PubsubTopic, - message: WakuMessage, - digest: MessageDigest, - messageHash: WakuMessageHash, - receivedTime: Timestamp): - Future[ArchiveDriverResult[void]] {.async.} = +method put*( + driver: QueueDriver, + pubsubTopic: PubsubTopic, + message: WakuMessage, + digest: MessageDigest, + messageHash: WakuMessageHash, + receivedTime: Timestamp, +): Future[ArchiveDriverResult[void]] {.async.} = let index = Index( pubsubTopic: pubsubTopic, senderTime: message.timestamp, receiverTime: receivedTime, digest: digest, hash: messageHash, - ) - + ) + return driver.add(index, message) -method getAllMessages*(driver: QueueDriver): - Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = +method getAllMessages*( + driver: QueueDriver +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = # TODO: Implement this message_store method return err("interface method not implemented") -method existsTable*(driver: QueueDriver, tableName: string): - Future[ArchiveDriverResult[bool]] {.async.} = +method existsTable*( + driver: QueueDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.async.} = return err("interface method not implemented") method getMessages*( - driver: QueueDriver, - contentTopic: seq[ContentTopic] = @[], - pubsubTopic = none(PubsubTopic), - cursor = none(ArchiveCursor), - startTime = none(Timestamp), - endTime = none(Timestamp), - hashes: seq[WakuMessageHash] = @[], - maxPageSize = DefaultPageSize, - ascendingOrder = true, - ): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + driver: QueueDriver, + contentTopic: seq[ContentTopic] = @[], + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes: seq[WakuMessageHash] = @[], + maxPageSize = DefaultPageSize, + ascendingOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = let cursor = cursor.map(toIndex) - let matchesQuery: QueryFilterMatcher = func(index: Index, msg: WakuMessage): bool = - if pubsubTopic.isSome() and index.pubsubTopic != pubsubTopic.get(): - return false + let matchesQuery: QueryFilterMatcher = + func (index: Index, msg: WakuMessage): bool = + if pubsubTopic.isSome() and index.pubsubTopic != pubsubTopic.get(): + return false - if contentTopic.len > 0 and msg.contentTopic notin contentTopic: - return false + if contentTopic.len > 0 and msg.contentTopic notin contentTopic: + return false - if startTime.isSome() and msg.timestamp < startTime.get(): - return false + if startTime.isSome() and msg.timestamp < startTime.get(): + return false - if endTime.isSome() and msg.timestamp > endTime.get(): - return false + if endTime.isSome() and msg.timestamp > endTime.get(): + return false - if hashes.len > 0 and index.hash notin hashes: - return false + if hashes.len > 0 and index.hash notin hashes: + return false - return true + return true var pageRes: QueueDriverGetPageResult try: @@ -283,52 +299,63 @@ method getMessages*( return ok(pageRes.value) -method getMessagesCount*(driver: QueueDriver): - Future[ArchiveDriverResult[int64]] {.async} = +method getMessagesCount*( + driver: QueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = return ok(int64(driver.len())) -method getPagesCount*(driver: QueueDriver): - Future[ArchiveDriverResult[int64]] {.async} = +method getPagesCount*( + driver: QueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = return ok(int64(driver.len())) -method getPagesSize*(driver: QueueDriver): - Future[ArchiveDriverResult[int64]] {.async} = +method getPagesSize*( + driver: QueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = return ok(int64(driver.len())) -method getDatabaseSize*(driver: QueueDriver): - Future[ArchiveDriverResult[int64]] {.async} = +method getDatabaseSize*( + driver: QueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = return ok(int64(driver.len())) -method performVacuum*(driver: QueueDriver): - Future[ArchiveDriverResult[void]] {.async.} = +method performVacuum*( + driver: QueueDriver +): Future[ArchiveDriverResult[void]] {.async.} = return err("interface method not implemented") -method getOldestMessageTimestamp*(driver: QueueDriver): - Future[ArchiveDriverResult[Timestamp]] {.async.} = - return driver.first().map(proc(index: Index): Timestamp = index.receiverTime) +method getOldestMessageTimestamp*( + driver: QueueDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + return driver.first().map( + proc(index: Index): Timestamp = + index.receiverTime + ) -method getNewestMessageTimestamp*(driver: QueueDriver): - Future[ArchiveDriverResult[Timestamp]] {.async.} = - return driver.last().map(proc(index: Index): Timestamp = index.receiverTime) +method getNewestMessageTimestamp*( + driver: QueueDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + return driver.last().map( + proc(index: Index): Timestamp = + index.receiverTime + ) -method deleteMessagesOlderThanTimestamp*(driver: QueueDriver, - ts: Timestamp): - Future[ArchiveDriverResult[void]] {.async.} = +method deleteMessagesOlderThanTimestamp*( + driver: QueueDriver, ts: Timestamp +): Future[ArchiveDriverResult[void]] {.async.} = # TODO: Implement this message_store method return err("interface method not implemented") -method deleteOldestMessagesNotWithinLimit*(driver: QueueDriver, - limit: int): - Future[ArchiveDriverResult[void]] {.async.} = +method deleteOldestMessagesNotWithinLimit*( + driver: QueueDriver, limit: int +): Future[ArchiveDriverResult[void]] {.async.} = # TODO: Implement this message_store method return err("interface method not implemented") -method decreaseDatabaseSize*(driver: QueueDriver, - targetSizeInBytes: int64, - forceRemoval: bool = false): - Future[ArchiveDriverResult[void]] {.async.} = +method decreaseDatabaseSize*( + driver: QueueDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.async.} = return err("interface method not implemented") -method close*(driver: QueueDriver): - Future[ArchiveDriverResult[void]] {.async.} = +method close*(driver: QueueDriver): Future[ArchiveDriverResult[void]] {.async.} = return ok() diff --git a/waku/waku_archive/driver/sqlite_driver/cursor.nim b/waku/waku_archive/driver/sqlite_driver/cursor.nim index 9b4d00fd9..9729f0ff7 100644 --- a/waku/waku_archive/driver/sqlite_driver/cursor.nim +++ b/waku/waku_archive/driver/sqlite_driver/cursor.nim @@ -3,11 +3,9 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} - -import - ../../../waku_core, - ../../common +import ../../../waku_core, ../../common type DbCursor* = (Timestamp, seq[byte], PubsubTopic) -proc toDbCursor*(c: ArchiveCursor): DbCursor = (c.storeTime, @(c.digest.data), c.pubsubTopic) +proc toDbCursor*(c: ArchiveCursor): DbCursor = + (c.storeTime, @(c.digest.data), c.pubsubTopic) diff --git a/waku/waku_archive/driver/sqlite_driver/migrations.nim b/waku/waku_archive/driver/sqlite_driver/migrations.nim index c787f4ac9..89f2aceff 100644 --- a/waku/waku_archive/driver/sqlite_driver/migrations.nim +++ b/waku/waku_archive/driver/sqlite_driver/migrations.nim @@ -5,18 +5,16 @@ import stew/results, chronicles, sqlite3_abi # sqlite3_column_int64 -import - ../../../common/databases/db_sqlite, - ../../../common/databases/common - +import ../../../common/databases/db_sqlite, ../../../common/databases/common logScope: topics = "waku archive migration" - const SchemaVersion* = 8 # increase this when there is an update in the database schema -template projectRoot: string = currentSourcePath.rsplit(DirSep, 1)[0] / ".." / ".." / ".." / ".." +template projectRoot(): string = + currentSourcePath.rsplit(DirSep, 1)[0] / ".." / ".." / ".." / ".." + const MessageStoreMigrationPath: string = projectRoot / "migrations" / "message_store" proc isSchemaVersion7*(db: SqliteDatabase): DatabaseResult[bool] = @@ -39,14 +37,14 @@ proc isSchemaVersion7*(db: SqliteDatabase): DatabaseResult[bool] = let colName = cstring sqlite3_column_text(s, 0) pkColumns.add($colName) - let query = """SELECT l.name FROM pragma_table_info("Message") as l WHERE l.pk != 0;""" + let query = + """SELECT l.name FROM pragma_table_info("Message") as l WHERE l.pk != 0;""" let res = db.query(query, queryRowCallback) if res.isErr(): return err("failed to determine the current SchemaVersion: " & $res.error) if pkColumns == @["pubsubTopic", "id", "storedAt"]: return ok(true) - else: info "Not considered schema version 7" return ok(false) @@ -62,15 +60,16 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult ## NOTE: Down migration it is not currently supported debug "starting message store's sqlite database migration" - let userVersion = ? db.getUserVersion() - let isSchemaVersion7 = ? db.isSchemaVersion7() + let userVersion = ?db.getUserVersion() + let isSchemaVersion7 = ?db.isSchemaVersion7() if userVersion == 0'i64 and isSchemaVersion7: info "We found user_version 0 but the database schema reflects the user_version 7" ## Force the correct schema version - ? db.setUserVersion( 7 ) + ?db.setUserVersion(7) - let migrationRes = migrate(db, targetVersion, migrationsScriptsDir=MessageStoreMigrationPath) + let migrationRes = + migrate(db, targetVersion, migrationsScriptsDir = MessageStoreMigrationPath) if migrationRes.isErr(): return err("failed to execute migration scripts: " & migrationRes.error) diff --git a/waku/waku_archive/driver/sqlite_driver/queries.nim b/waku/waku_archive/driver/sqlite_driver/queries.nim index 17806a47c..ab1fa1e9f 100644 --- a/waku/waku_archive/driver/sqlite_driver/queries.nim +++ b/waku/waku_archive/driver/sqlite_driver/queries.nim @@ -3,10 +3,7 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/[options, sequtils], - stew/[results, byteutils, arrayops], - sqlite3_abi +import std/[options, sequtils], stew/[results, byteutils, arrayops], sqlite3_abi import ../../../common/databases/db_sqlite, ../../../common/databases/common, @@ -19,35 +16,44 @@ type SqlQueryStr = string ### SQLite column helper methods -proc queryRowWakuMessageCallback(s: ptr sqlite3_stmt, contentTopicCol, payloadCol, versionCol, senderTimestampCol: cint): WakuMessage = +proc queryRowWakuMessageCallback( + s: ptr sqlite3_stmt, + contentTopicCol, payloadCol, versionCol, senderTimestampCol: cint, +): WakuMessage = let topic = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, contentTopicCol)) topicLength = sqlite3_column_bytes(s, contentTopicCol) - contentTopic = string.fromBytes(@(toOpenArray(topic, 0, topicLength-1))) + contentTopic = string.fromBytes(@(toOpenArray(topic, 0, topicLength - 1))) p = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, payloadCol)) length = sqlite3_column_bytes(s, payloadCol) - payload = @(toOpenArray(p, 0, length-1)) + payload = @(toOpenArray(p, 0, length - 1)) version = sqlite3_column_int64(s, versionCol) senderTimestamp = sqlite3_column_int64(s, senderTimestampCol) return WakuMessage( contentTopic: ContentTopic(contentTopic), - payload: payload , + payload: payload, version: uint32(version), - timestamp: Timestamp(senderTimestamp) + timestamp: Timestamp(senderTimestamp), ) -proc queryRowReceiverTimestampCallback(s: ptr sqlite3_stmt, storedAtCol: cint): Timestamp = +proc queryRowReceiverTimestampCallback( + s: ptr sqlite3_stmt, storedAtCol: cint +): Timestamp = let storedAt = sqlite3_column_int64(s, storedAtCol) return Timestamp(storedAt) -proc queryRowPubsubTopicCallback(s: ptr sqlite3_stmt, pubsubTopicCol: cint): PubsubTopic = +proc queryRowPubsubTopicCallback( + s: ptr sqlite3_stmt, pubsubTopicCol: cint +): PubsubTopic = let - pubsubTopicPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, pubsubTopicCol)) + pubsubTopicPointer = + cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, pubsubTopicCol)) pubsubTopicLength = sqlite3_column_bytes(s, pubsubTopicCol) - pubsubTopic = string.fromBytes(@(toOpenArray(pubsubTopicPointer, 0, pubsubTopicLength-1))) + pubsubTopic = + string.fromBytes(@(toOpenArray(pubsubTopicPointer, 0, pubsubTopicLength - 1))) return pubsubTopic @@ -55,15 +61,17 @@ proc queryRowDigestCallback(s: ptr sqlite3_stmt, digestCol: cint): seq[byte] = let digestPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, digestCol)) digestLength = sqlite3_column_bytes(s, digestCol) - digest = @(toOpenArray(digestPointer, 0, digestLength-1)) + digest = @(toOpenArray(digestPointer, 0, digestLength - 1)) return digest -proc queryRowWakuMessageHashCallback(s: ptr sqlite3_stmt, hashCol: cint): WakuMessageHash = +proc queryRowWakuMessageHashCallback( + s: ptr sqlite3_stmt, hashCol: cint +): WakuMessageHash = let hashPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, hashCol)) hashLength = sqlite3_column_bytes(s, hashCol) - hash = fromBytes(toOpenArray(hashPointer, 0, hashLength-1)) + hash = fromBytes(toOpenArray(hashPointer, 0, hashLength - 1)) return hash @@ -72,21 +80,21 @@ proc queryRowWakuMessageHashCallback(s: ptr sqlite3_stmt, hashCol: cint): WakuMe ## Create table proc createTableQuery(table: string): SqlQueryStr = - "CREATE TABLE IF NOT EXISTS " & table & " (" & - " pubsubTopic BLOB NOT NULL," & - " contentTopic BLOB NOT NULL," & - " payload BLOB," & - " version INTEGER NOT NULL," & - " timestamp INTEGER NOT NULL," & - " id BLOB," & - " messageHash BLOB," & - " storedAt INTEGER NOT NULL," & - " CONSTRAINT messageIndex PRIMARY KEY (messageHash)" & - ") WITHOUT ROWID;" + "CREATE TABLE IF NOT EXISTS " & table & " (" & " pubsubTopic BLOB NOT NULL," & + " contentTopic BLOB NOT NULL," & " payload BLOB," & " version INTEGER NOT NULL," & + " timestamp INTEGER NOT NULL," & " id BLOB," & " messageHash BLOB," & + " storedAt INTEGER NOT NULL," & " CONSTRAINT messageIndex PRIMARY KEY (messageHash)" & + ") WITHOUT ROWID;" proc createTable*(db: SqliteDatabase): DatabaseResult[void] = let query = createTableQuery(DbTable) - discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard + , + ) return ok() ## Create indices @@ -94,31 +102,48 @@ proc createTable*(db: SqliteDatabase): DatabaseResult[void] = proc createOldestMessageTimestampIndexQuery(table: string): SqlQueryStr = "CREATE INDEX IF NOT EXISTS i_ts ON " & table & " (storedAt);" -proc createOldestMessageTimestampIndex*(db: SqliteDatabase): - DatabaseResult[void] = +proc createOldestMessageTimestampIndex*(db: SqliteDatabase): DatabaseResult[void] = let query = createOldestMessageTimestampIndexQuery(DbTable) - discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard + , + ) return ok() proc createHistoryQueryIndexQuery(table: string): SqlQueryStr = - "CREATE INDEX IF NOT EXISTS i_query ON " & table & " (contentTopic, pubsubTopic, storedAt, id);" + "CREATE INDEX IF NOT EXISTS i_query ON " & table & + " (contentTopic, pubsubTopic, storedAt, id);" proc createHistoryQueryIndex*(db: SqliteDatabase): DatabaseResult[void] = let query = createHistoryQueryIndexQuery(DbTable) - discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard + , + ) return ok() ## Insert message -type InsertMessageParams* = (seq[byte], seq[byte], Timestamp, seq[byte], seq[byte], seq[byte], int64, Timestamp) +type InsertMessageParams* = + (seq[byte], seq[byte], Timestamp, seq[byte], seq[byte], seq[byte], int64, Timestamp) proc insertMessageQuery(table: string): SqlQueryStr = return - "INSERT INTO " & table & "(id, messageHash, storedAt, contentTopic, payload, pubsubTopic, version, timestamp)" & + "INSERT INTO " & table & + "(id, messageHash, storedAt, contentTopic, payload, pubsubTopic, version, timestamp)" & " VALUES (?, ?, ?, ?, ?, ?, ?, ?);" -proc prepareInsertMessageStmt*(db: SqliteDatabase): SqliteStmt[InsertMessageParams, void] = +proc prepareInsertMessageStmt*( + db: SqliteDatabase +): SqliteStmt[InsertMessageParams, void] = let query = insertMessageQuery(DbTable) - return db.prepareStmt(query, InsertMessageParams, void).expect("this is a valid statement") + return + db.prepareStmt(query, InsertMessageParams, void).expect("this is a valid statement") ## Count table messages @@ -142,8 +167,9 @@ proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] = proc selectOldestMessageTimestampQuery(table: string): SqlQueryStr = return "SELECT MIN(storedAt) FROM " & table -proc selectOldestReceiverTimestamp*(db: SqliteDatabase): - DatabaseResult[Timestamp] {.inline.}= +proc selectOldestReceiverTimestamp*( + db: SqliteDatabase +): DatabaseResult[Timestamp] {.inline.} = var timestamp: Timestamp proc queryRowCallback(s: ptr sqlite3_stmt) = timestamp = queryRowReceiverTimestampCallback(s, 0) @@ -160,8 +186,9 @@ proc selectOldestReceiverTimestamp*(db: SqliteDatabase): proc selectNewestMessageTimestampQuery(table: string): SqlQueryStr = return "SELECT MAX(storedAt) FROM " & table -proc selectNewestReceiverTimestamp*(db: SqliteDatabase): - DatabaseResult[Timestamp] {.inline.}= +proc selectNewestReceiverTimestamp*( + db: SqliteDatabase +): DatabaseResult[Timestamp] {.inline.} = var timestamp: Timestamp proc queryRowCallback(s: ptr sqlite3_stmt) = timestamp = queryRowReceiverTimestampCallback(s, 0) @@ -178,51 +205,64 @@ proc selectNewestReceiverTimestamp*(db: SqliteDatabase): proc deleteMessagesOlderThanTimestampQuery(table: string, ts: Timestamp): SqlQueryStr = return "DELETE FROM " & table & " WHERE storedAt < " & $ts -proc deleteMessagesOlderThanTimestamp*(db: SqliteDatabase, ts: int64): - DatabaseResult[void] = +proc deleteMessagesOlderThanTimestamp*( + db: SqliteDatabase, ts: int64 +): DatabaseResult[void] = let query = deleteMessagesOlderThanTimestampQuery(DbTable, ts) - discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard + , + ) return ok() ## Delete oldest messages not within limit proc deleteOldestMessagesNotWithinLimitQuery(table: string, limit: int): SqlQueryStr = - return + return "DELETE FROM " & table & " WHERE (storedAt, id, pubsubTopic) NOT IN (" & " SELECT storedAt, id, pubsubTopic FROM " & table & - " ORDER BY storedAt DESC, id DESC" & - " LIMIT " & $limit & - ");" + " ORDER BY storedAt DESC, id DESC" & " LIMIT " & $limit & ");" -proc deleteOldestMessagesNotWithinLimit*(db: SqliteDatabase, limit: int): - DatabaseResult[void] = +proc deleteOldestMessagesNotWithinLimit*( + db: SqliteDatabase, limit: int +): DatabaseResult[void] = # NOTE: The word `limit` here refers the store capacity/maximum number-of-messages allowed limit - let query = deleteOldestMessagesNotWithinLimitQuery(DbTable, limit=limit) - discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard) + let query = deleteOldestMessagesNotWithinLimitQuery(DbTable, limit = limit) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard + , + ) return ok() ## Select all messages proc selectAllMessagesQuery(table: string): SqlQueryStr = - return + return "SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash" & - " FROM " & table & - " ORDER BY storedAt ASC" + " FROM " & table & " ORDER BY storedAt ASC" -proc selectAllMessages*(db: SqliteDatabase): DatabaseResult[seq[(PubsubTopic, - WakuMessage, - seq[byte], - Timestamp, - WakuMessageHash)]] = +proc selectAllMessages*( + db: SqliteDatabase +): DatabaseResult[ + seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] +] = ## Retrieve all messages from the store. var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] proc queryRowCallback(s: ptr sqlite3_stmt) = let - pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol=3) - wakuMessage = queryRowWakuMessageCallback(s, contentTopicCol=1, payloadCol=2, versionCol=4, senderTimestampCol=5) - digest = queryRowDigestCallback(s, digestCol=6) - storedAt = queryRowReceiverTimestampCallback(s, storedAtCol=0) - hash = queryRowWakuMessageHashCallback(s, hashCol=7) + pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3) + wakuMessage = queryRowWakuMessageCallback( + s, contentTopicCol = 1, payloadCol = 2, versionCol = 4, senderTimestampCol = 5 + ) + digest = queryRowDigestCallback(s, digestCol = 6) + storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0) + hash = queryRowWakuMessageHashCallback(s, hashCol = 7) rows.add((pubsubTopic, wakuMessage, digest, storedAt, hash)) @@ -241,79 +281,80 @@ proc combineClauses(clauses: varargs[Option[string]]): Option[string] = return none(string) var where: string = whereSeq[0] - for clause in whereSeq[1..^1]: + for clause in whereSeq[1 ..^ 1]: where &= " AND " & clause return some(where) -proc whereClause(cursor: Option[DbCursor], - pubsubTopic: Option[PubsubTopic], - contentTopic: seq[ContentTopic], - startTime: Option[Timestamp], - endTime: Option[Timestamp], - hashes: seq[WakuMessageHash], - ascending: bool): Option[string] = +proc whereClause( + cursor: Option[DbCursor], + pubsubTopic: Option[PubsubTopic], + contentTopic: seq[ContentTopic], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + hashes: seq[WakuMessageHash], + ascending: bool, +): Option[string] = + let cursorClause = + if cursor.isNone(): + none(string) + else: + let comp = if ascending: ">" else: "<" + some("(storedAt, id) " & comp & " (?, ?)") - let cursorClause = if cursor.isNone(): - none(string) - else: - let comp = if ascending: ">" else: "<" - some("(storedAt, id) " & comp & " (?, ?)") + let pubsubTopicClause = + if pubsubTopic.isNone(): + none(string) + else: + some("pubsubTopic = (?)") - let pubsubTopicClause = if pubsubTopic.isNone(): - none(string) - else: - some("pubsubTopic = (?)") + let contentTopicClause = + if contentTopic.len <= 0: + none(string) + else: + var where = "contentTopic IN (" + where &= "?" + for _ in 1 ..< contentTopic.len: + where &= ", ?" + where &= ")" + some(where) - let contentTopicClause = if contentTopic.len <= 0: - none(string) - else: - var where = "contentTopic IN (" - where &= "?" - for _ in 1..= (?)") - let startTimeClause = if startTime.isNone(): - none(string) - else: - some("storedAt >= (?)") + let endTimeClause = + if endTime.isNone(): + none(string) + else: + some("storedAt <= (?)") - let endTimeClause = if endTime.isNone(): - none(string) - else: - some("storedAt <= (?)") - - let hashesClause = if hashes.len <= 0: - none(string) - else: - var where = "messageHash IN (" - where &= "?" - for _ in 1..///` ## Autosharding adds 1 optional prefix `/ @@ -68,57 +70,58 @@ proc parse*(T: type NsContentTopic, topic: ContentTopic|string): ParsingResult[N if not topic.startsWith("/"): return err(ParsingError.invalidFormat("topic must start with slash")) - let parts = topic[1../// or /////" + return err(ParsingError.invalidFormat(errMsg)) # Content topic compatibility diff --git a/waku/waku_core/topics/parsing.nim b/waku/waku_core/topics/parsing.nim index 494167113..300037ea7 100644 --- a/waku/waku_core/topics/parsing.nim +++ b/waku/waku_core/topics/parsing.nim @@ -5,10 +5,9 @@ else: import stew/results - type ParsingErrorKind* {.pure.} = enum - InvalidFormat, + InvalidFormat MissingPart ParsingError* = object @@ -20,16 +19,14 @@ type type ParsingResult*[T] = Result[T, ParsingError] - proc invalidFormat*(T: type ParsingError, cause = "invalid format"): T = ParsingError(kind: ParsingErrorKind.InvalidFormat, cause: cause) proc missingPart*(T: type ParsingError, part = "unknown"): T = ParsingError(kind: ParsingErrorKind.MissingPart, part: part) - proc `$`*(err: ParsingError): string = - case err.kind: + case err.kind of ParsingErrorKind.InvalidFormat: return "invalid format: " & err.cause of ParsingErrorKind.MissingPart: diff --git a/waku/waku_core/topics/pubsub_topic.nim b/waku/waku_core/topics/pubsub_topic.nim index cd146ad7e..553b9c9ba 100644 --- a/waku/waku_core/topics/pubsub_topic.nim +++ b/waku/waku_core/topics/pubsub_topic.nim @@ -7,103 +7,103 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} - -import - std/strutils, - stew/[results, base10] -import - ./parsing +import std/strutils, stew/[results, base10] +import ./parsing export parsing - ## Pub-sub topic type PubsubTopic* = string const DefaultPubsubTopic* = PubsubTopic("/waku/2/default-waku/proto") - ## Namespaced pub-sub topic -type - NsPubsubTopicKind* {.pure.} = enum - StaticSharding, - NamedSharding +type NsPubsubTopicKind* {.pure.} = enum + StaticSharding + NamedSharding -type - NsPubsubTopic* = object - case kind*: NsPubsubTopicKind - of NsPubsubTopicKind.StaticSharding: - clusterId*: uint16 - shardId*: uint16 - of NsPubsubTopicKind.NamedSharding: - name*: string +type NsPubsubTopic* = object + case kind*: NsPubsubTopicKind + of NsPubsubTopicKind.StaticSharding: + clusterId*: uint16 + shardId*: uint16 + of NsPubsubTopicKind.NamedSharding: + name*: string proc staticSharding*(T: type NsPubsubTopic, clusterId, shardId: uint16): T = NsPubsubTopic( - kind: NsPubsubTopicKind.StaticSharding, - clusterId: clusterId, - shardId: shardId + kind: NsPubsubTopicKind.StaticSharding, clusterId: clusterId, shardId: shardId ) proc named*(T: type NsPubsubTopic, name: string): T = - NsPubsubTopic( - kind: NsPubsubTopicKind.NamedSharding, - name: name - ) - + NsPubsubTopic(kind: NsPubsubTopicKind.NamedSharding, name: name) # Serialization proc `$`*(topic: NsPubsubTopic): string = ## Returns a string representation of a namespaced topic ## in the format `/waku/2/ - case topic.kind: + case topic.kind of NsPubsubTopicKind.NamedSharding: "/waku/2/" & topic.name of NsPubsubTopicKind.StaticSharding: "/waku/2/rs/" & $topic.clusterId & "/" & $topic.shardId - # Deserialization const Waku2PubsubTopicPrefix = "/waku/2" StaticShardingPubsubTopicPrefix = Waku2PubsubTopicPrefix & "/rs" - -proc parseStaticSharding*(T: type NsPubsubTopic, topic: PubsubTopic|string): ParsingResult[NsPubsubTopic] = +proc parseStaticSharding*( + T: type NsPubsubTopic, topic: PubsubTopic | string +): ParsingResult[NsPubsubTopic] = if not topic.startsWith(StaticShardingPubsubTopicPrefix): - return err(ParsingError.invalidFormat("must start with " & StaticShardingPubsubTopicPrefix)) + return err( + ParsingError.invalidFormat("must start with " & StaticShardingPubsubTopicPrefix) + ) - let parts = topic[11..///` if topic.startsWith(StaticShardingPubsubTopicPrefix): @@ -111,7 +111,6 @@ proc parse*(T: type NsPubsubTopic, topic: PubsubTopic|string): ParsingResult[NsP else: NsPubsubTopic.parseNamedSharding(topic) - # Pubsub topic compatibility converter toPubsubTopic*(topic: NsPubsubTopic): PubsubTopic = @@ -119,20 +118,20 @@ converter toPubsubTopic*(topic: NsPubsubTopic): PubsubTopic = proc `==`*[T: NsPubsubTopic](x, y: T): bool = case y.kind - of NsPubsubTopicKind.StaticSharding: - if x.kind != NsPubsubTopicKind.StaticSharding: - return false + of NsPubsubTopicKind.StaticSharding: + if x.kind != NsPubsubTopicKind.StaticSharding: + return false - if x.clusterId != y.clusterId: - return false + if x.clusterId != y.clusterId: + return false - if x.shardId != y.shardId: - return false - of NsPubsubTopicKind.NamedSharding: - if x.kind != NsPubsubTopicKind.NamedSharding: - return false + if x.shardId != y.shardId: + return false + of NsPubsubTopicKind.NamedSharding: + if x.kind != NsPubsubTopicKind.NamedSharding: + return false - if x.name != y.name: - return false + if x.name != y.name: + return false true diff --git a/waku/waku_core/topics/sharding.nim b/waku/waku_core/topics/sharding.nim index ab8a32983..62f317f66 100644 --- a/waku/waku_core/topics/sharding.nim +++ b/waku/waku_core/topics/sharding.nim @@ -7,17 +7,9 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - nimcrypto, - std/options, - std/tables, - stew/endians2, - stew/results, - stew/byteutils +import nimcrypto, std/options, std/tables, stew/endians2, stew/results, stew/byteutils -import - ./content_topic, - ./pubsub_topic +import ./content_topic, ./pubsub_topic type Sharding* = object clusterId*: uint32 @@ -27,14 +19,13 @@ type Sharding* = object proc new*(T: type Sharding, clusterId: uint32, shardCount: uint32): T = return Sharding(clusterId: clusterId, shardCountGenZero: shardCount) - proc getGenZeroShard*(s: Sharding, topic: NsContentTopic, count: int): NsPubsubTopic = let bytes = toBytes(topic.application) & toBytes(topic.version) let hash = sha256.digest(bytes) # We only use the last 64 bits of the hash as having more shards is unlikely. - let hashValue = uint64.fromBytesBE(hash.data[24..31]) + let hashValue = uint64.fromBytesBE(hash.data[24 .. 31]) # This is equilavent to modulo shard count but faster let shard = hashValue and uint64((count - 1)) @@ -48,9 +39,11 @@ proc getShard*(s: Sharding, topic: NsContentTopic): Result[NsPubsubTopic, string ## Implicit generation # is 0 for all content topic return ok(s.getGenZeroShard(topic, int(s.shardCountGenZero))) - case topic.generation.get(): - of 0: return ok(s.getGenZeroShard(topic, int(s.shardCountGenZero))) - else: return err("Generation > 0 are not supported yet") + case topic.generation.get() + of 0: + return ok(s.getGenZeroShard(topic, int(s.shardCountGenZero))) + else: + return err("Generation > 0 are not supported yet") proc getShard*(s: Sharding, topic: ContentTopic): Result[PubsubTopic, string] = let parsedTopic = NsContentTopic.parse(topic).valueOr: @@ -60,7 +53,11 @@ proc getShard*(s: Sharding, topic: ContentTopic): Result[PubsubTopic, string] = ok($shard) -proc parseSharding*(s: Sharding, pubsubTopic: Option[PubsubTopic], contentTopics: ContentTopic|seq[ContentTopic]): Result[Table[NsPubsubTopic, seq[NsContentTopic]], string] = +proc parseSharding*( + s: Sharding, + pubsubTopic: Option[PubsubTopic], + contentTopics: ContentTopic | seq[ContentTopic], +): Result[Table[NsPubsubTopic, seq[NsContentTopic]], string] = var topics: seq[ContentTopic] when contentTopics is seq[ContentTopic]: topics = contentTopics @@ -74,7 +71,8 @@ proc parseSharding*(s: Sharding, pubsubTopic: Option[PubsubTopic], contentTopics let content = if parseRes.isErr(): return err("Cannot parse content topic: " & $parseRes.error) - else: parseRes.get() + else: + parseRes.get() let pubsub = if pubsubTopic.isSome(): @@ -82,13 +80,15 @@ proc parseSharding*(s: Sharding, pubsubTopic: Option[PubsubTopic], contentTopics if parseRes.isErr(): return err("Cannot parse pubsub topic: " & $parseRes.error) - else: parseRes.get() + else: + parseRes.get() else: let shardsRes = s.getShard(content) if shardsRes.isErr(): return err("Cannot autoshard content topic: " & $shardsRes.error) - else: shardsRes.get() + else: + shardsRes.get() if not topicMap.hasKey(pubsub): topicMap[pubsub] = @[] diff --git a/waku/waku_discv5.nim b/waku/waku_discv5.nim index 5daaef420..319ce824b 100644 --- a/waku/waku_discv5.nim +++ b/waku/waku_discv5.nim @@ -14,71 +14,71 @@ import eth/keys as eth_keys, eth/p2p/discoveryv5/node, eth/p2p/discoveryv5/protocol -import - ./node/peer_manager/peer_manager, - ./waku_core, - ./waku_enr +import ./node/peer_manager/peer_manager, ./waku_core, ./waku_enr export protocol, waku_enr - declarePublicGauge waku_discv5_discovered, "number of nodes discovered" declarePublicGauge waku_discv5_errors, "number of waku discv5 errors", ["type"] logScope: topics = "waku discv5" - ## Config type WakuDiscoveryV5Config* = object - discv5Config*: Option[DiscoveryConfig] - address*: IpAddress - port*: Port - privateKey*: eth_keys.PrivateKey - bootstrapRecords*: seq[waku_enr.Record] - autoupdateRecord*: bool - + discv5Config*: Option[DiscoveryConfig] + address*: IpAddress + port*: Port + privateKey*: eth_keys.PrivateKey + bootstrapRecords*: seq[waku_enr.Record] + autoupdateRecord*: bool ## Protocol -type WakuDiscv5Predicate* = proc(record: waku_enr.Record): bool {.closure, gcsafe, raises: [].} +type WakuDiscv5Predicate* = + proc(record: waku_enr.Record): bool {.closure, gcsafe, raises: [].} type WakuDiscoveryV5* = ref object - conf: WakuDiscoveryV5Config - protocol*: protocol.Protocol - listening*: bool - predicate: Option[WakuDiscv5Predicate] - peerManager: Option[PeerManager] - topicSubscriptionQueue: AsyncEventQueue[SubscriptionEvent] + conf: WakuDiscoveryV5Config + protocol*: protocol.Protocol + listening*: bool + predicate: Option[WakuDiscv5Predicate] + peerManager: Option[PeerManager] + topicSubscriptionQueue: AsyncEventQueue[SubscriptionEvent] -proc shardingPredicate*(record: Record, bootnodes: seq[Record] = @[]): Option[WakuDiscv5Predicate] = +proc shardingPredicate*( + record: Record, bootnodes: seq[Record] = @[] +): Option[WakuDiscv5Predicate] = ## Filter peers based on relay sharding information let typedRecord = record.toTyped().valueOr: - debug "peer filtering failed", reason=error + debug "peer filtering failed", reason = error return none(WakuDiscv5Predicate) let nodeShard = typedRecord.relaySharding().valueOr: debug "no relay sharding information, peer filtering disabled" return none(WakuDiscv5Predicate) - + debug "peer filtering updated" let predicate = proc(record: waku_enr.Record): bool = - bootnodes.contains(record) or # Temp. Bootnode exception - (record.getCapabilities().len > 0 and #RFC 31 requirement - nodeShard.shardIds.anyIt(record.containsShard(nodeShard.clusterId, it))) #RFC 64 guideline + bootnodes.contains(record) or # Temp. Bootnode exception + ( + record.getCapabilities().len > 0 and #RFC 31 requirement + nodeShard.shardIds.anyIt(record.containsShard(nodeShard.clusterId, it)) + ) #RFC 64 guideline return some(predicate) proc new*( - T: type WakuDiscoveryV5, - rng: ref HmacDrbgContext, - conf: WakuDiscoveryV5Config, - record: Option[waku_enr.Record], - peerManager: Option[PeerManager] = none(PeerManager), - queue: AsyncEventQueue[SubscriptionEvent] = newAsyncEventQueue[SubscriptionEvent](30), - ): T = + T: type WakuDiscoveryV5, + rng: ref HmacDrbgContext, + conf: WakuDiscoveryV5Config, + record: Option[waku_enr.Record], + peerManager: Option[PeerManager] = none(PeerManager), + queue: AsyncEventQueue[SubscriptionEvent] = + newAsyncEventQueue[SubscriptionEvent](30), +): T = let protocol = newProtocol( rng = rng, config = conf.discv5Config.get(protocol.defaultDiscoveryConfig), @@ -94,8 +94,10 @@ proc new*( ) let shardPredOp = - if record.isSome(): shardingPredicate(record.get(), conf.bootstrapRecords) - else: none(WakuDiscv5Predicate) + if record.isSome(): + shardingPredicate(record.get(), conf.bootstrapRecords) + else: + none(WakuDiscv5Predicate) WakuDiscoveryV5( conf: conf, @@ -104,14 +106,15 @@ proc new*( predicate: shardPredOp, peerManager: peerManager, topicSubscriptionQueue: queue, - ) + ) -proc updateENRShards(wd: WakuDiscoveryV5, - newTopics: seq[PubsubTopic], add: bool): Result[void, string] = +proc updateENRShards( + wd: WakuDiscoveryV5, newTopics: seq[PubsubTopic], add: bool +): Result[void, string] = ## Add or remove shards from the Discv5 ENR let newShardOp = topicsToRelayShards(newTopics).valueOr: return err("ENR update failed: " & error) - + let newShard = newShardOp.valueOr: return ok() @@ -127,9 +130,10 @@ proc updateENRShards(wd: WakuDiscoveryV5, if currentShard.clusterId != newShard.clusterId: return err("ENR update failed: clusterId id mismatch") - RelayShards.init(currentShard.clusterId, currentShard.shardIds & newShard.shardIds).valueOr: + RelayShards.init( + currentShard.clusterId, currentShard.shardIds & newShard.shardIds + ).valueOr: return err("ENR update failed: " & error) - elif not add and currentShardsOp.isSome(): let currentShard = currentShardsOp.get() @@ -146,10 +150,11 @@ proc updateENRShards(wd: WakuDiscoveryV5, RelayShards.init(currentShard.clusterId, indices).valueOr: return err("ENR update failed: " & error) + elif add and currentShardsOp.isNone(): + newShard + else: + return ok() - elif add and currentShardsOp.isNone(): newShard - else: return ok() - let (field, value) = if resultShard.shardIds.len >= ShardingIndicesListMaxLength: (ShardingBitVectorEnrField, resultShard.toBitVector()) @@ -164,7 +169,9 @@ proc updateENRShards(wd: WakuDiscoveryV5, return ok() -proc findRandomPeers*(wd: WakuDiscoveryV5, overridePred = none(WakuDiscv5Predicate)): Future[seq[waku_enr.Record]] {.async.} = +proc findRandomPeers*( + wd: WakuDiscoveryV5, overridePred = none(WakuDiscv5Predicate) +): Future[seq[waku_enr.Record]] {.async.} = ## Find random peers to connect to using Discovery v5 let discoveredNodes = await wd.protocol.queryRandom() @@ -189,7 +196,8 @@ proc searchLoop(wd: WakuDiscoveryV5) {.async.} = while wd.listening: trace "running discv5 discovery loop" let discoveredRecords = await wd.findRandomPeers() - let discoveredPeers = discoveredRecords.mapIt(it.toRemotePeerInfo()).filterIt(it.isOk()).mapIt(it.value) + let discoveredPeers = + discoveredRecords.mapIt(it.toRemotePeerInfo()).filterIt(it.isOk()).mapIt(it.value) for peer in discoveredPeers: # Peers added are filtered by the peer manager @@ -203,7 +211,7 @@ proc searchLoop(wd: WakuDiscoveryV5) {.async.} = proc subscriptionsListener(wd: WakuDiscoveryV5) {.async.} = ## Listen for pubsub topics subscriptions changes - + let key = wd.topicSubscriptionQueue.register() while wd.listening: @@ -221,17 +229,18 @@ proc subscriptionsListener(wd: WakuDiscoveryV5) {.async.} = let subRes = wd.updateENRShards(subs, true) if subRes.isErr(): - debug "ENR shard addition failed", reason= $subRes.error - + debug "ENR shard addition failed", reason = $subRes.error + if unsubRes.isErr(): - debug "ENR shard removal failed", reason= $unsubRes.error + debug "ENR shard removal failed", reason = $unsubRes.error if subRes.isErr() and unsubRes.isErr(): continue debug "ENR updated successfully" - wd.predicate = shardingPredicate(wd.protocol.localNode.record, wd.protocol.bootstrapRecords) + wd.predicate = + shardingPredicate(wd.protocol.localNode.record, wd.protocol.bootstrapRecords) wd.topicSubscriptionQueue.unregister(key) @@ -262,7 +271,7 @@ proc start*(wd: WakuDiscoveryV5): Future[Result[void, string]] {.async.} = proc stop*(wd: WakuDiscoveryV5): Future[void] {.async.} = if not wd.listening: - return + return info "Stopping discovery v5 service" @@ -288,15 +297,12 @@ proc parseBootstrapAddress(address: string): Result[enr.Record, cstring] = return err("Invalid ENR bootstrap record") return ok(enrRec) - elif lowerCaseAddress.startsWith("enode:"): return err("ENode bootstrap addresses are not supported") - else: return err("Ignoring unrecognized bootstrap address type") -proc addBootstrapNode*(bootstrapAddr: string, - bootstrapEnrs: var seq[enr.Record]) = +proc addBootstrapNode*(bootstrapAddr: string, bootstrapEnrs: var seq[enr.Record]) = # Ignore empty lines or lines starting with # if bootstrapAddr.len == 0 or bootstrapAddr[0] == '#': return diff --git a/waku/waku_dnsdisc.nim b/waku/waku_dnsdisc.nim index 21f5c6d57..639289d1c 100644 --- a/waku/waku_dnsdisc.nim +++ b/waku/waku_dnsdisc.nim @@ -21,8 +21,7 @@ import libp2p/multiaddress, libp2p/peerid, dnsdisc/client -import - ./waku_core +import ./waku_core export client @@ -32,17 +31,16 @@ declarePublicGauge waku_dnsdisc_errors, "number of waku dnsdisc errors", ["type" logScope: topics = "waku dnsdisc" -type - WakuDnsDiscovery* = object - client*: Client - resolver*: Resolver +type WakuDnsDiscovery* = object + client*: Client + resolver*: Resolver ##################### # DNS Discovery API # ##################### proc emptyResolver*(domain: string): Future[string] {.async, gcsafe.} = - debug "Empty resolver called", domain=domain + debug "Empty resolver called", domain = domain return "" proc findPeers*(wdd: var WakuDnsDiscovery): Result[seq[RemotePeerInfo], cstring] = @@ -53,7 +51,8 @@ proc findPeers*(wdd: var WakuDnsDiscovery): Result[seq[RemotePeerInfo], cstring] # Synchronise client tree using configured resolver var tree: Tree try: - tree = wdd.client.getTree(wdd.resolver) # @TODO: this is currently a blocking operation to not violate memory safety + tree = wdd.client.getTree(wdd.resolver) + # @TODO: this is currently a blocking operation to not violate memory safety except Exception: error "Failed to synchronise client tree" waku_dnsdisc_errors.inc(labelValues = ["tree_sync_failure"]) @@ -62,7 +61,7 @@ proc findPeers*(wdd: var WakuDnsDiscovery): Result[seq[RemotePeerInfo], cstring] let discoveredEnr = wdd.client.getNodeRecords() if discoveredEnr.len > 0: - info "Successfully discovered ENR", count=discoveredEnr.len + info "Successfully discovered ENR", count = discoveredEnr.len else: trace "No ENR retrieved from client tree" @@ -75,24 +74,24 @@ proc findPeers*(wdd: var WakuDnsDiscovery): Result[seq[RemotePeerInfo], cstring] if res.isOk(): discoveredNodes.add(res.get()) else: - error "Failed to convert ENR to peer info", enr= $enr, err=res.error() + error "Failed to convert ENR to peer info", enr = $enr, err = res.error() waku_dnsdisc_errors.inc(labelValues = ["peer_info_failure"]) if discoveredNodes.len > 0: - info "Successfully discovered nodes", count=discoveredNodes.len + info "Successfully discovered nodes", count = discoveredNodes.len waku_dnsdisc_discovered.inc(discoveredNodes.len.int64) return ok(discoveredNodes) -proc init*(T: type WakuDnsDiscovery, - locationUrl: string, - resolver: Resolver): Result[T, cstring] = +proc init*( + T: type WakuDnsDiscovery, locationUrl: string, resolver: Resolver +): Result[T, cstring] = ## Initialise Waku peer discovery via DNS - debug "init WakuDnsDiscovery", locationUrl=locationUrl + debug "init WakuDnsDiscovery", locationUrl = locationUrl let - client = ? Client.init(locationUrl) + client = ?Client.init(locationUrl) wakuDnsDisc = WakuDnsDiscovery(client: client, resolver: resolver) debug "init success" diff --git a/waku/waku_enr.nim b/waku/waku_enr.nim index c3f220abb..74580ea9b 100644 --- a/waku/waku_enr.nim +++ b/waku/waku_enr.nim @@ -1,11 +1,3 @@ -import - ./common/enr, - ./waku_enr/capabilities, - ./waku_enr/multiaddr, - ./waku_enr/sharding +import ./common/enr, ./waku_enr/capabilities, ./waku_enr/multiaddr, ./waku_enr/sharding -export - enr, - capabilities, - multiaddr, - sharding +export enr, capabilities, multiaddr, sharding diff --git a/waku/waku_enr/capabilities.nim b/waku/waku_enr/capabilities.nim index 7702338ad..115285e05 100644 --- a/waku/waku_enr/capabilities.nim +++ b/waku/waku_enr/capabilities.nim @@ -9,12 +9,9 @@ import stew/shims/net, eth/keys, libp2p/crypto/crypto -import - ../common/enr - -const - CapabilitiesEnrField* = "waku2" +import ../common/enr +const CapabilitiesEnrField* = "waku2" type ## 8-bit flag field to indicate Waku node capabilities. @@ -24,20 +21,23 @@ type ## See: https://rfc.vac.dev/spec/31/#waku2-enr-key ## each enum numbers maps to a bit (where 0 is the LSB) - Capabilities*{.pure.} = enum - Relay = 0, - Store = 1, - Filter = 2, + Capabilities* {.pure.} = enum + Relay = 0 + Store = 1 + Filter = 2 Lightpush = 3 - func init*(T: type CapabilitiesBitfield, lightpush, filter, store, relay: bool): T = ## Creates an waku2 ENR flag bit field according to RFC 31 (https://rfc.vac.dev/spec/31/) var bitfield: uint8 - if relay: bitfield.setBit(0) - if store: bitfield.setBit(1) - if filter: bitfield.setBit(2) - if lightpush: bitfield.setBit(3) + if relay: + bitfield.setBit(0) + if store: + bitfield.setBit(1) + if filter: + bitfield.setBit(2) + if lightpush: + bitfield.setBit(3) CapabilitiesBitfield(bitfield) func init*(T: type CapabilitiesBitfield, caps: varargs[Capabilities]): T = @@ -54,8 +54,9 @@ proc supportsCapability*(bitfield: CapabilitiesBitfield, cap: Capabilities): boo testBit(bitfield.uint8, ord(cap)) func toCapabilities*(bitfield: CapabilitiesBitfield): seq[Capabilities] = - toSeq(Capabilities.low..Capabilities.high).filterIt(supportsCapability(bitfield, it)) - + toSeq(Capabilities.low .. Capabilities.high).filterIt( + supportsCapability(bitfield, it) + ) # ENR builder extension @@ -68,7 +69,6 @@ proc withWakuCapabilities*(builder: var EnrBuilder, caps: varargs[Capabilities]) proc withWakuCapabilities*(builder: var EnrBuilder, caps: openArray[Capabilities]) = withWakuCapabilities(builder, CapabilitiesBitfield.init(@caps)) - # ENR record accessors (e.g., Record, TypedRecord, etc.) func waku2*(record: TypedRecord): Option[CapabilitiesBitfield] = diff --git a/waku/waku_enr/multiaddr.nim b/waku/waku_enr/multiaddr.nim index 8c205cbe2..caf7a819e 100644 --- a/waku/waku_enr/multiaddr.nim +++ b/waku/waku_enr/multiaddr.nim @@ -10,35 +10,34 @@ import eth/keys, libp2p/[multiaddress, multicodec], libp2p/crypto/crypto -import - ../common/enr - -const - MultiaddrEnrField* = "multiaddrs" +import ../common/enr +const MultiaddrEnrField* = "multiaddrs" func encodeMultiaddrs*(multiaddrs: seq[MultiAddress]): seq[byte] = var buffer = newSeq[byte]() for multiaddr in multiaddrs: - let raw = multiaddr.data.buffer # binary encoded multiaddr - size = raw.len.uint16.toBytes(Endianness.bigEndian) # size as Big Endian unsigned 16-bit integer + size = raw.len.uint16.toBytes(Endianness.bigEndian) + # size as Big Endian unsigned 16-bit integer buffer.add(concat(@size, raw)) buffer -func readBytes(rawBytes: seq[byte], numBytes: int, pos: var int = 0): Result[seq[byte], cstring] = +func readBytes( + rawBytes: seq[byte], numBytes: int, pos: var int = 0 +): Result[seq[byte], cstring] = ## Attempts to read `numBytes` from a sequence, from ## position `pos`. Returns the requested slice or ## an error if `rawBytes` boundary is exceeded. ## ## If successful, `pos` is advanced by `numBytes` - if rawBytes[pos..^1].len() < numBytes: + if rawBytes[pos ..^ 1].len() < numBytes: return err("insufficient bytes") - let slicedSeq = rawBytes[pos.. MaxShardIndex): return err("invalid shard") @@ -49,7 +48,9 @@ func init*(T: type RelayShards, clusterId: uint16, shardIds: varargs[uint16]): R ok(RelayShards(clusterId: clusterId, shardIds: indicesSeq)) -func init*(T: type RelayShards, clusterId: uint16, shardIds: seq[uint16]): Result[T, string] = +func init*( + T: type RelayShards, clusterId: uint16, shardIds: seq[uint16] +): Result[T, string] = if shardIds.anyIt(it > MaxShardIndex): return err("invalid shard") @@ -78,7 +79,10 @@ func topicsToRelayShards*(topics: seq[string]): Result[Option[RelayShards], stri if parsedTopicsRes.anyIt(it.get().clusterId != parsedTopicsRes[0].get().clusterId): return err("use shards with the same cluster Id.") - let relayShard = ?RelayShards.init(parsedTopicsRes[0].get().clusterId, parsedTopicsRes.mapIt(it.get().shardId)) + let relayShard = + ?RelayShards.init( + parsedTopicsRes[0].get().clusterId, parsedTopicsRes.mapIt(it.get().shardId) + ) return ok(some(relayShard)) @@ -91,7 +95,7 @@ func contains*(rs: RelayShards, topic: NsPubsubTopic): bool = rs.contains(topic.clusterId, topic.shardId) -func contains*(rs: RelayShards, topic: PubsubTopic|string): bool = +func contains*(rs: RelayShards, topic: PubsubTopic | string): bool = let parseRes = NsPubsubTopic.parse(topic) if parseRes.isErr(): return false @@ -115,17 +119,21 @@ func toIndicesList*(rs: RelayShards): EnrResult[seq[byte]] = func fromIndicesList*(buf: seq[byte]): Result[RelayShards, string] = if buf.len < 3: - return err("insufficient data: expected at least 3 bytes, got " & $buf.len & " bytes") + return + err("insufficient data: expected at least 3 bytes, got " & $buf.len & " bytes") - let clusterId = uint16.fromBytesBE(buf[0..1]) + let clusterId = uint16.fromBytesBE(buf[0 .. 1]) let length = int(buf[2]) if buf.len != 3 + 2 * length: - return err("invalid data: `length` field is " & $length & " but " & $buf.len & " bytes were provided") + return err( + "invalid data: `length` field is " & $length & " but " & $buf.len & + " bytes were provided" + ) var shardIds: seq[uint16] - for i in 0.. 0) - ## Protocol -type - WakuFilterLegacy* = ref object of LPProtocol - rng*: ref rand.HmacDrbgContext - peerManager*: PeerManager - subscriptions*: seq[Subscription] - failedPeers*: Table[string, chronos.Moment] - timeout*: chronos.Duration +type WakuFilterLegacy* = ref object of LPProtocol + rng*: ref rand.HmacDrbgContext + peerManager*: PeerManager + subscriptions*: seq[Subscription] + failedPeers*: Table[string, chronos.Moment] + timeout*: chronos.Duration proc handleFilterRequest(wf: WakuFilterLegacy, peerId: PeerId, rpc: FilterRPC) = let requestId = rpc.requestId subscribe = rpc.request.get().subscribe - pubsubTopic = rpc.request.get().pubsubTopic + pubsubTopic = rpc.request.get().pubsubTopic contentTopics = rpc.request.get().contentFilters.mapIt(it.contentTopic) if subscribe: - info "added filter subscritpiton", peerId=peerId, pubsubTopic=pubsubTopic, contentTopics=contentTopics + info "added filter subscritpiton", + peerId = peerId, pubsubTopic = pubsubTopic, contentTopics = contentTopics wf.subscriptions.addSubscription(peerId, requestId, pubsubTopic, contentTopics) else: - info "removed filter subscritpiton", peerId=peerId, contentTopics=contentTopics + info "removed filter subscritpiton", peerId = peerId, contentTopics = contentTopics wf.subscriptions.removeSubscription(peerId, contentTopics) waku_legacy_filter_subscribers.set(wf.subscriptions.len.int64) - proc initProtocolHandler(wf: WakuFilterLegacy) = proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} = let buffer = await conn.readLp(MaxRpcSize.int) @@ -110,17 +108,19 @@ proc initProtocolHandler(wf: WakuFilterLegacy) = wf.handler = handler wf.codec = WakuLegacyFilterCodec -proc new*(T: type WakuFilterLegacy, - peerManager: PeerManager, - rng: ref rand.HmacDrbgContext, - timeout: Duration = WakuLegacyFilterTimeout): T = - let wf = WakuFilterLegacy(rng: rng, - peerManager: peerManager, - timeout: timeout) +proc new*( + T: type WakuFilterLegacy, + peerManager: PeerManager, + rng: ref rand.HmacDrbgContext, + timeout: Duration = WakuLegacyFilterTimeout, +): T = + let wf = WakuFilterLegacy(rng: rng, peerManager: peerManager, timeout: timeout) wf.initProtocolHandler() return wf -proc sendFilterRpc(wf: WakuFilterLegacy, rpc: FilterRPC, peer: PeerId|RemotePeerInfo): Future[WakuFilterResult[void]] {.async, gcsafe.}= +proc sendFilterRpc( + wf: WakuFilterLegacy, rpc: FilterRPC, peer: PeerId | RemotePeerInfo +): Future[WakuFilterResult[void]] {.async, gcsafe.} = let connOpt = await wf.peerManager.dialPeer(peer, WakuLegacyFilterCodec) if connOpt.isNone(): return err(dialFailure) @@ -129,14 +129,15 @@ proc sendFilterRpc(wf: WakuFilterLegacy, rpc: FilterRPC, peer: PeerId|RemotePeer await connection.writeLP(rpc.encode().buffer) return ok() - ### Send message to subscriptors proc removePeerFromFailedPeersTable(wf: WakuFilterLegacy, subs: seq[Subscription]) = ## Clear the failed peer table if subscriber was able to connect for sub in subs: wf.failedPeers.del($sub) -proc handleClientError(wf: WakuFilterLegacy, subs: seq[Subscription]) {.raises: [Defect, KeyError].} = +proc handleClientError( + wf: WakuFilterLegacy, subs: seq[Subscription] +) {.raises: [Defect, KeyError].} = ## If we have already failed to send message to this peer, ## check for elapsed time and if it's been too long, remove the peer. for sub in subs: @@ -154,10 +155,11 @@ proc handleClientError(wf: WakuFilterLegacy, subs: seq[Subscription]) {.raises: let index = wf.subscriptions.find(sub) wf.subscriptions.delete(index) - -proc handleMessage*(wf: WakuFilterLegacy, pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} = - - trace "handling message", pubsubTopic, contentTopic=msg.contentTopic, subscriptions=wf.subscriptions.len +proc handleMessage*( + wf: WakuFilterLegacy, pubsubTopic: PubsubTopic, msg: WakuMessage +) {.async.} = + trace "handling message", + pubsubTopic, contentTopic = msg.contentTopic, subscriptions = wf.subscriptions.len if wf.subscriptions.len <= 0: return @@ -173,10 +175,8 @@ proc handleMessage*(wf: WakuFilterLegacy, pubsubTopic: PubsubTopic, msg: WakuMes if msg.contentTopic notin sub.contentTopics: continue - let rpc = FilterRPC( - requestId: sub.requestId, - push: some(MessagePush(messages: @[msg])) - ) + let rpc = + FilterRPC(requestId: sub.requestId, push: some(MessagePush(messages: @[msg]))) let res = await wf.sendFilterRpc(rpc, sub.peer) if res.isErr(): diff --git a/waku/waku_filter/protocol_metrics.nim b/waku/waku_filter/protocol_metrics.nim index deb9dd158..520a92064 100644 --- a/waku/waku_filter/protocol_metrics.nim +++ b/waku/waku_filter/protocol_metrics.nim @@ -5,13 +5,14 @@ else: import metrics - -declarePublicGauge waku_legacy_filter_subscribers, "number of light node filter subscribers" -declarePublicGauge waku_legacy_filter_errors, "number of filter protocol errors", ["type"] -declarePublicGauge waku_legacy_filter_messages, "number of filter messages received", ["type"] +declarePublicGauge waku_legacy_filter_subscribers, + "number of light node filter subscribers" +declarePublicGauge waku_legacy_filter_errors, + "number of filter protocol errors", ["type"] +declarePublicGauge waku_legacy_filter_messages, + "number of filter messages received", ["type"] declarePublicGauge waku_node_filters, "number of content filter subscriptions" - # Error types (metric label values) const dialFailure* = "dial_failure" @@ -19,4 +20,3 @@ const peerNotFoundFailure* = "peer_not_found_failure" emptyMessagePushFailure* = "empty_message_push_failure" emptyFilterRequestFailure* = "empty_filter_request_failure" - diff --git a/waku/waku_filter/rpc.nim b/waku/waku_filter/rpc.nim index 0cd3744e1..3014780f5 100644 --- a/waku/waku_filter/rpc.nim +++ b/waku/waku_filter/rpc.nim @@ -3,11 +3,8 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/options -import - ../waku_core - +import std/options +import ../waku_core type ContentFilter* = object diff --git a/waku/waku_filter/rpc_codec.nim b/waku/waku_filter/rpc_codec.nim index ea1dddfa7..9ade0ba04 100644 --- a/waku/waku_filter/rpc_codec.nim +++ b/waku/waku_filter/rpc_codec.nim @@ -3,19 +3,13 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/options -import - ../common/protobuf, - ../waku_core, - ./rpc - +import std/options +import ../common/protobuf, ../waku_core, ./rpc # Multiply by 10 for safety. Currently we never push more than 1 message at a time # We add a 64kB safety buffer for protocol overhead. const MaxRpcSize* = 10 * MaxWakuMessageSize + 64 * 1024 - proc encode*(filter: ContentFilter): ProtoBuffer = var pb = initProtoBuffer() @@ -36,7 +30,6 @@ proc decode*(T: type ContentFilter, buffer: seq[byte]): ProtobufResult[T] = ok(rpc) - proc encode*(rpc: FilterRequest): ProtoBuffer = var pb = initProtoBuffer() @@ -76,7 +69,6 @@ proc decode*(T: type FilterRequest, buffer: seq[byte]): ProtobufResult[T] = ok(rpc) - proc encode*(push: MessagePush): ProtoBuffer = var pb = initProtoBuffer() @@ -101,7 +93,6 @@ proc decode*(T: type MessagePush, buffer: seq[byte]): ProtobufResult[T] = ok(rpc) - proc encode*(rpc: FilterRPC): ProtoBuffer = var pb = initProtoBuffer() diff --git a/waku/waku_filter_v2.nim b/waku/waku_filter_v2.nim index 7f0d7ef23..3f1d010ef 100644 --- a/waku/waku_filter_v2.nim +++ b/waku/waku_filter_v2.nim @@ -1,9 +1,4 @@ import - ./waku_filter_v2/common, - ./waku_filter_v2/protocol, - ./waku_filter_v2/subscriptions + ./waku_filter_v2/common, ./waku_filter_v2/protocol, ./waku_filter_v2/subscriptions -export - common, - protocol, - subscriptions +export common, protocol, subscriptions diff --git a/waku/waku_filter_v2/client.nim b/waku/waku_filter_v2/client.nim index fbcda3aa4..d53566336 100644 --- a/waku/waku_filter_v2/client.nim +++ b/waku/waku_filter_v2/client.nim @@ -5,39 +5,30 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} +import std/options, chronicles, chronos, libp2p/protocols/protocol, bearssl/rand import - std/options, - chronicles, - chronos, - libp2p/protocols/protocol, - bearssl/rand -import - ../node/peer_manager, - ../waku_core, - ./common, - ./protocol_metrics, - ./rpc_codec, - ./rpc + ../node/peer_manager, ../waku_core, ./common, ./protocol_metrics, ./rpc_codec, ./rpc logScope: topics = "waku filter client" -type - WakuFilterClient* = ref object of LPProtocol - rng: ref HmacDrbgContext - peerManager: PeerManager - pushHandlers: seq[FilterPushHandler] +type WakuFilterClient* = ref object of LPProtocol + rng: ref HmacDrbgContext + peerManager: PeerManager + pushHandlers: seq[FilterPushHandler] func generateRequestId(rng: ref HmacDrbgContext): string = var bytes: array[10, byte] hmacDrbgGenerate(rng[], bytes) return toHex(bytes) -proc sendSubscribeRequest(wfc: WakuFilterClient, servicePeer: RemotePeerInfo, - filterSubscribeRequest: FilterSubscribeRequest): - Future[FilterSubscribeResult] - {.async.} = - trace "Sending filter subscribe request", peerId=servicePeer.peerId, filterSubscribeRequest +proc sendSubscribeRequest( + wfc: WakuFilterClient, + servicePeer: RemotePeerInfo, + filterSubscribeRequest: FilterSubscribeRequest, +): Future[FilterSubscribeResult] {.async.} = + trace "Sending filter subscribe request", + peerId = servicePeer.peerId, filterSubscribeRequest let connOpt = await wfc.peerManager.dialPeer(servicePeer, WakuFilterSubscribeCodec) if connOpt.isNone(): @@ -67,58 +58,55 @@ proc sendSubscribeRequest(wfc: WakuFilterClient, servicePeer: RemotePeerInfo, if response.statusCode != 200: trace "Filter subscribe error response", servicePeer, response waku_filter_errors.inc(labelValues = [errorResponse]) - let cause = if response.statusDesc.isSome(): response.statusDesc.get() - else: "filter subscribe error" - return err(FilterSubscribeError.parse(response.statusCode, cause=cause)) + let cause = + if response.statusDesc.isSome(): + response.statusDesc.get() + else: + "filter subscribe error" + return err(FilterSubscribeError.parse(response.statusCode, cause = cause)) return ok() -proc ping*(wfc: WakuFilterClient, servicePeer: RemotePeerInfo): Future[FilterSubscribeResult] {.async.} = +proc ping*( + wfc: WakuFilterClient, servicePeer: RemotePeerInfo +): Future[FilterSubscribeResult] {.async.} = let requestId = generateRequestId(wfc.rng) let filterSubscribeRequest = FilterSubscribeRequest.ping(requestId) return await wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest) -proc subscribe*(wfc: WakuFilterClient, - servicePeer: RemotePeerInfo, - pubsubTopic: PubsubTopic, - contentTopics: seq[ContentTopic]): - Future[FilterSubscribeResult] - {.async.} = - +proc subscribe*( + wfc: WakuFilterClient, + servicePeer: RemotePeerInfo, + pubsubTopic: PubsubTopic, + contentTopics: seq[ContentTopic], +): Future[FilterSubscribeResult] {.async.} = let requestId = generateRequestId(wfc.rng) let filterSubscribeRequest = FilterSubscribeRequest.subscribe( - requestId = requestId, - pubsubTopic = pubsubTopic, - contentTopics = contentTopics + requestId = requestId, pubsubTopic = pubsubTopic, contentTopics = contentTopics ) return await wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest) -proc unsubscribe*(wfc: WakuFilterClient, - servicePeer: RemotePeerInfo, - pubsubTopic: PubsubTopic, - contentTopics: seq[ContentTopic]): - Future[FilterSubscribeResult] - {.async.} = - +proc unsubscribe*( + wfc: WakuFilterClient, + servicePeer: RemotePeerInfo, + pubsubTopic: PubsubTopic, + contentTopics: seq[ContentTopic], +): Future[FilterSubscribeResult] {.async.} = let requestId = generateRequestId(wfc.rng) let filterSubscribeRequest = FilterSubscribeRequest.unsubscribe( - requestId = requestId, - pubsubTopic = pubsubTopic, - contentTopics = contentTopics + requestId = requestId, pubsubTopic = pubsubTopic, contentTopics = contentTopics ) return await wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest) -proc unsubscribeAll*(wfc: WakuFilterClient, servicePeer: RemotePeerInfo): - Future[FilterSubscribeResult] - {.async.} = - +proc unsubscribeAll*( + wfc: WakuFilterClient, servicePeer: RemotePeerInfo +): Future[FilterSubscribeResult] {.async.} = let requestId = generateRequestId(wfc.rng) - let filterSubscribeRequest = FilterSubscribeRequest.unsubscribeAll( - requestId = requestId - ) + let filterSubscribeRequest = + FilterSubscribeRequest.unsubscribeAll(requestId = requestId) return await wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest) @@ -126,22 +114,20 @@ proc registerPushHandler*(wfc: WakuFilterClient, handler: FilterPushHandler) = wfc.pushHandlers.add(handler) proc initProtocolHandler(wfc: WakuFilterClient) = - proc handler(conn: Connection, proto: string) {.async.} = let buf = await conn.readLp(int(MaxPushSize)) let decodeRes = MessagePush.decode(buf) if decodeRes.isErr(): - error "Failed to decode message push", peerId=conn.peerId + error "Failed to decode message push", peerId = conn.peerId waku_filter_errors.inc(labelValues = [decodeRpcFailure]) return let messagePush = decodeRes.value #TODO: toAPI() split here - trace "Received message push", peerId=conn.peerId, messagePush + trace "Received message push", peerId = conn.peerId, messagePush for handler in wfc.pushHandlers: - asyncSpawn handler(messagePush.pubsubTopic, - messagePush.wakuMessage) + asyncSpawn handler(messagePush.pubsubTopic, messagePush.wakuMessage) # Protocol specifies no response for now return @@ -149,15 +135,9 @@ proc initProtocolHandler(wfc: WakuFilterClient) = wfc.handler = handler wfc.codec = WakuFilterPushCodec -proc new*(T: type WakuFilterClient, - peerManager: PeerManager, - rng: ref HmacDrbgContext - ): T = - - let wfc = WakuFilterClient( - rng: rng, - peerManager: peerManager, - pushHandlers: @[] - ) +proc new*( + T: type WakuFilterClient, peerManager: PeerManager, rng: ref HmacDrbgContext +): T = + let wfc = WakuFilterClient(rng: rng, peerManager: peerManager, pushHandlers: @[]) wfc.initProtocolHandler() wfc diff --git a/waku/waku_filter_v2/common.nim b/waku/waku_filter_v2/common.nim index 06689e414..ad5c7f21d 100644 --- a/waku/waku_filter_v2/common.nim +++ b/waku/waku_filter_v2/common.nim @@ -3,8 +3,7 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - stew/results +import stew/results const WakuFilterSubscribeCodec* = "/vac/waku/filter-subscribe/2.0.0-beta1" @@ -31,33 +30,35 @@ type FilterSubscribeResult* = Result[void, FilterSubscribeError] # Convenience functions -proc peerDialFailure*(T: type FilterSubscribeError, address: string): FilterSubscribeError = +proc peerDialFailure*( + T: type FilterSubscribeError, address: string +): FilterSubscribeError = FilterSubscribeError( - kind: FilterSubscribeErrorKind.PEER_DIAL_FAILURE, - address: address) + kind: FilterSubscribeErrorKind.PEER_DIAL_FAILURE, address: address + ) -proc badResponse*(T: type FilterSubscribeError, cause = "bad response"): FilterSubscribeError = - FilterSubscribeError( - kind: FilterSubscribeErrorKind.BAD_RESPONSE, - cause: cause) +proc badResponse*( + T: type FilterSubscribeError, cause = "bad response" +): FilterSubscribeError = + FilterSubscribeError(kind: FilterSubscribeErrorKind.BAD_RESPONSE, cause: cause) -proc badRequest*(T: type FilterSubscribeError, cause = "bad request"): FilterSubscribeError = - FilterSubscribeError( - kind: FilterSubscribeErrorKind.BAD_REQUEST, - cause: cause) +proc badRequest*( + T: type FilterSubscribeError, cause = "bad request" +): FilterSubscribeError = + FilterSubscribeError(kind: FilterSubscribeErrorKind.BAD_REQUEST, cause: cause) -proc notFound*(T: type FilterSubscribeError, cause = "peer has no subscriptions"): FilterSubscribeError = - FilterSubscribeError( - kind: FilterSubscribeErrorKind.NOT_FOUND, - cause: cause) +proc notFound*( + T: type FilterSubscribeError, cause = "peer has no subscriptions" +): FilterSubscribeError = + FilterSubscribeError(kind: FilterSubscribeErrorKind.NOT_FOUND, cause: cause) -proc serviceUnavailable*(T: type FilterSubscribeError, cause = "service unavailable"): FilterSubscribeError = - FilterSubscribeError( - kind: FilterSubscribeErrorKind.SERVICE_UNAVAILABLE, - cause: cause) +proc serviceUnavailable*( + T: type FilterSubscribeError, cause = "service unavailable" +): FilterSubscribeError = + FilterSubscribeError(kind: FilterSubscribeErrorKind.SERVICE_UNAVAILABLE, cause: cause) proc parse*(T: type FilterSubscribeErrorKind, kind: uint32): T = - case kind: + case kind of 000, 200, 300, 400, 404, 503: FilterSubscribeErrorKind(kind) else: @@ -65,21 +66,16 @@ proc parse*(T: type FilterSubscribeErrorKind, kind: uint32): T = proc parse*(T: type FilterSubscribeError, kind: uint32, cause = "", address = ""): T = let kind = FilterSubscribeErrorKind.parse(kind) - case kind: + case kind of PEER_DIAL_FAILURE: - FilterSubscribeError( - kind: kind, - address: address) + FilterSubscribeError(kind: kind, address: address) of BAD_RESPONSE, BAD_REQUEST, NOT_FOUND, SERVICE_UNAVAILABLE: - FilterSubscribeError( - kind: kind, - cause: cause) + FilterSubscribeError(kind: kind, cause: cause) else: - FilterSubscribeError( - kind: kind) + FilterSubscribeError(kind: kind) proc `$`*(err: FilterSubscribeError): string = - case err.kind: + case err.kind of FilterSubscribeErrorKind.PEER_DIAL_FAILURE: "PEER_DIAL_FAILURE: " & err.address of FilterSubscribeErrorKind.BAD_RESPONSE: diff --git a/waku/waku_filter_v2/protocol.nim b/waku/waku_filter_v2/protocol.nim index fd422e6fc..59418ec68 100644 --- a/waku/waku_filter_v2/protocol.nim +++ b/waku/waku_filter_v2/protocol.nim @@ -6,7 +6,7 @@ else: {.push raises: [].} import - std/[options,sequtils,sets,strutils,tables], + std/[options, sequtils, sets, strutils, tables], stew/byteutils, chronicles, chronos, @@ -24,61 +24,76 @@ import logScope: topics = "waku filter" -const - MaxContentTopicsPerRequest* = 100 +const MaxContentTopicsPerRequest* = 100 -type - WakuFilter* = ref object of LPProtocol - subscriptions*: FilterSubscriptions # a mapping of peer ids to a sequence of filter criteria - peerManager: PeerManager - maintenanceTask: TimerCallback +type WakuFilter* = ref object of LPProtocol + subscriptions*: FilterSubscriptions + # a mapping of peer ids to a sequence of filter criteria + peerManager: PeerManager + maintenanceTask: TimerCallback proc pingSubscriber(wf: WakuFilter, peerId: PeerID): FilterSubscribeResult = - trace "pinging subscriber", peerId=peerId + trace "pinging subscriber", peerId = peerId if not wf.subscriptions.isSubscribed(peerId): - debug "pinging peer has no subscriptions", peerId=peerId + debug "pinging peer has no subscriptions", peerId = peerId return err(FilterSubscribeError.notFound()) wf.subscriptions.refreshSubscription(peerId) ok() -proc subscribe(wf: WakuFilter, - peerId: PeerID, - pubsubTopic: Option[PubsubTopic], - contentTopics: seq[ContentTopic]): FilterSubscribeResult = - +proc subscribe( + wf: WakuFilter, + peerId: PeerID, + pubsubTopic: Option[PubsubTopic], + contentTopics: seq[ContentTopic], +): FilterSubscribeResult = # TODO: check if this condition is valid??? if pubsubTopic.isNone() or contentTopics.len == 0: - return err(FilterSubscribeError.badRequest("pubsubTopic and contentTopics must be specified")) + return err( + FilterSubscribeError.badRequest("pubsubTopic and contentTopics must be specified") + ) if contentTopics.len > MaxContentTopicsPerRequest: - return err(FilterSubscribeError.badRequest("exceeds maximum content topics: " & - $MaxContentTopicsPerRequest)) + return err( + FilterSubscribeError.badRequest( + "exceeds maximum content topics: " & $MaxContentTopicsPerRequest + ) + ) let filterCriteria = toHashSet(contentTopics.mapIt((pubsubTopic.get(), it))) - trace "subscribing peer to filter criteria", peerId=peerId, filterCriteria=filterCriteria + trace "subscribing peer to filter criteria", + peerId = peerId, filterCriteria = filterCriteria wf.subscriptions.addSubscription(peerId, filterCriteria).isOkOr: return err(FilterSubscribeError.serviceUnavailable(error)) ok() -proc unsubscribe(wf: WakuFilter, - peerId: PeerID, - pubsubTopic: Option[PubsubTopic], - contentTopics: seq[ContentTopic]): FilterSubscribeResult = +proc unsubscribe( + wf: WakuFilter, + peerId: PeerID, + pubsubTopic: Option[PubsubTopic], + contentTopics: seq[ContentTopic], +): FilterSubscribeResult = if pubsubTopic.isNone() or contentTopics.len == 0: - return err(FilterSubscribeError.badRequest("pubsubTopic and contentTopics must be specified")) + return err( + FilterSubscribeError.badRequest("pubsubTopic and contentTopics must be specified") + ) if contentTopics.len > MaxContentTopicsPerRequest: - return err(FilterSubscribeError.badRequest("exceeds maximum content topics: " & $MaxContentTopicsPerRequest)) + return err( + FilterSubscribeError.badRequest( + "exceeds maximum content topics: " & $MaxContentTopicsPerRequest + ) + ) let filterCriteria = toHashSet(contentTopics.mapIt((pubsubTopic.get(), it))) - debug "unsubscribing peer from filter criteria", peerId=peerId, filterCriteria=filterCriteria + debug "unsubscribing peer from filter criteria", + peerId = peerId, filterCriteria = filterCriteria wf.subscriptions.removeSubscription(peerId, filterCriteria).isOkOr: return err(FilterSubscribeError.notFound()) @@ -87,19 +102,19 @@ proc unsubscribe(wf: WakuFilter, proc unsubscribeAll(wf: WakuFilter, peerId: PeerID): FilterSubscribeResult = if not wf.subscriptions.isSubscribed(peerId): - debug "unsubscribing peer has no subscriptions", peerId=peerId + debug "unsubscribing peer has no subscriptions", peerId = peerId return err(FilterSubscribeError.notFound()) - debug "removing peer subscription", peerId=peerId + debug "removing peer subscription", peerId = peerId wf.subscriptions.removePeer(peerId) wf.subscriptions.cleanUp() ok() -proc handleSubscribeRequest*(wf: WakuFilter, - peerId: PeerId, - request: FilterSubscribeRequest): FilterSubscribeResponse = - info "received filter subscribe request", peerId=peerId, request=request +proc handleSubscribeRequest*( + wf: WakuFilter, peerId: PeerId, request: FilterSubscribeRequest +): FilterSubscribeResponse = + info "received filter subscribe request", peerId = peerId, request = request waku_filter_requests.inc(labelValues = [$request.filterSubscribeType]) var subscribeResult: FilterSubscribeResult @@ -114,31 +129,34 @@ proc handleSubscribeRequest*(wf: WakuFilter, of FilterSubscribeType.SUBSCRIBE: subscribeResult = wf.subscribe(peerId, request.pubsubTopic, request.contentTopics) of FilterSubscribeType.UNSUBSCRIBE: - subscribeResult = wf.unsubscribe(peerId, request.pubsubTopic, request.contentTopics) + subscribeResult = + wf.unsubscribe(peerId, request.pubsubTopic, request.contentTopics) of FilterSubscribeType.UNSUBSCRIBE_ALL: subscribeResult = wf.unsubscribeAll(peerId) let requestDuration = Moment.now() - requestStartTime - requestDurationSec = requestDuration.milliseconds.float / 1000 # Duration in seconds with millisecond precision floating point + requestDurationSec = requestDuration.milliseconds.float / 1000 + # Duration in seconds with millisecond precision floating point waku_filter_request_duration_seconds.observe( - requestDurationSec, labelValues = [$request.filterSubscribeType]) + requestDurationSec, labelValues = [$request.filterSubscribeType] + ) if subscribeResult.isErr(): return FilterSubscribeResponse( requestId: request.requestId, statusCode: subscribeResult.error.kind.uint32, - statusDesc: some($subscribeResult.error) + statusDesc: some($subscribeResult.error), ) else: return FilterSubscribeResponse.ok(request.requestId) proc pushToPeer(wf: WakuFilter, peer: PeerId, buffer: seq[byte]) {.async.} = - trace "pushing message to subscribed peer", peer=peer + trace "pushing message to subscribed peer", peer = peer if not wf.peerManager.peerStore.hasPeer(peer, WakuFilterPushCodec): # Check that peer has not been removed from peer store - trace "no addresses for peer", peer=peer + trace "no addresses for peer", peer = peer return ## TODO: Check if dial is necessary always??? @@ -146,16 +164,19 @@ proc pushToPeer(wf: WakuFilter, peer: PeerId, buffer: seq[byte]) {.async.} = if conn.isNone(): ## We do not remove this peer, but allow the underlying peer manager ## to do so if it is deemed necessary - trace "no connection to peer", peer=peer + trace "no connection to peer", peer = peer return await conn.get().writeLp(buffer) -proc pushToPeers(wf: WakuFilter, peers: seq[PeerId], messagePush: MessagePush) {.async.} = - debug "pushing message to subscribed peers", pubsubTopic=messagePush.pubsubTopic, - contentTopic=messagePush.wakuMessage.contentTopic, - peers=peers, - hash=messagePush.pubsubTopic.computeMessageHash(messagePush.wakuMessage).to0xHex() +proc pushToPeers( + wf: WakuFilter, peers: seq[PeerId], messagePush: MessagePush +) {.async.} = + debug "pushing message to subscribed peers", + pubsubTopic = messagePush.pubsubTopic, + contentTopic = messagePush.wakuMessage.contentTopic, + peers = peers, + hash = messagePush.pubsubTopic.computeMessageHash(messagePush.wakuMessage).to0xHex() let bufferToPublish = messagePush.encode().buffer @@ -173,7 +194,8 @@ proc maintainSubscriptions*(wf: WakuFilter) = var peersToRemove: seq[PeerId] for peerId in wf.subscriptions.peersSubscribed.keys: if not wf.peerManager.peerStore.hasPeer(peerId, WakuFilterPushCodec): - debug "peer has been removed from peer store, removing subscription", peerId=peerId + debug "peer has been removed from peer store, removing subscription", + peerId = peerId peersToRemove.add(peerId) if peersToRemove.len > 0: @@ -185,48 +207,54 @@ proc maintainSubscriptions*(wf: WakuFilter) = waku_filter_subscriptions.set(wf.subscriptions.peersSubscribed.len.float64) const MessagePushTimeout = 20.seconds -proc handleMessage*(wf: WakuFilter, pubsubTopic: PubsubTopic, message: WakuMessage) {.async.} = - trace "handling message", pubsubTopic=pubsubTopic, message=message +proc handleMessage*( + wf: WakuFilter, pubsubTopic: PubsubTopic, message: WakuMessage +) {.async.} = + trace "handling message", pubsubTopic = pubsubTopic, message = message let handleMessageStartTime = Moment.now() block: ## Find subscribers and push message to them - let subscribedPeers = wf.subscriptions.findSubscribedPeers(pubsubTopic, message.contentTopic) + let subscribedPeers = + wf.subscriptions.findSubscribedPeers(pubsubTopic, message.contentTopic) if subscribedPeers.len == 0: - trace "no subscribed peers found", pubsubTopic=pubsubTopic, contentTopic=message.contentTopic + trace "no subscribed peers found", + pubsubTopic = pubsubTopic, contentTopic = message.contentTopic return - let messagePush = MessagePush( - pubsubTopic: pubsubTopic, - wakuMessage: message) + let messagePush = MessagePush(pubsubTopic: pubsubTopic, wakuMessage: message) - if not await wf.pushToPeers(subscribedPeers, messagePush).withTimeout(MessagePushTimeout): - debug "timed out pushing message to peers", pubsubTopic=pubsubTopic, - contentTopic=message.contentTopic, - hash=pubsubTopic.computeMessageHash(message).to0xHex() + if not await wf.pushToPeers(subscribedPeers, messagePush).withTimeout( + MessagePushTimeout + ): + debug "timed out pushing message to peers", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + hash = pubsubTopic.computeMessageHash(message).to0xHex() waku_filter_errors.inc(labelValues = [pushTimeoutFailure]) else: debug "pushed message succesfully to all subscribers", - pubsubTopic=pubsubTopic, - contentTopic=message.contentTopic, - hash=pubsubTopic.computeMessageHash(message).to0xHex() + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + hash = pubsubTopic.computeMessageHash(message).to0xHex() let handleMessageDuration = Moment.now() - handleMessageStartTime - handleMessageDurationSec = handleMessageDuration.milliseconds.float / 1000 # Duration in seconds with millisecond precision floating point + handleMessageDurationSec = handleMessageDuration.milliseconds.float / 1000 + # Duration in seconds with millisecond precision floating point waku_filter_handle_message_duration_seconds.observe(handleMessageDurationSec) proc initProtocolHandler(wf: WakuFilter) = - proc handler(conn: Connection, proto: string) {.async.} = - trace "filter subscribe request handler triggered", peerId=conn.peerId + trace "filter subscribe request handler triggered", peerId = conn.peerId let buf = await conn.readLp(int(MaxSubscribeSize)) let decodeRes = FilterSubscribeRequest.decode(buf) if decodeRes.isErr(): - error "Failed to decode filter subscribe request", peerId=conn.peerId, err=decodeRes.error + error "Failed to decode filter subscribe request", + peerId = conn.peerId, err = decodeRes.error waku_filter_errors.inc(labelValues = [decodeRpcFailure]) return @@ -234,7 +262,7 @@ proc initProtocolHandler(wf: WakuFilter) = let response = wf.handleSubscribeRequest(conn.peerId, request) - debug "sending filter subscribe response", peerId=conn.peerId, response=response + debug "sending filter subscribe response", peerId = conn.peerId, response = response await conn.writeLp(response.encode().buffer) #TODO: toRPC() separation here return @@ -242,18 +270,18 @@ proc initProtocolHandler(wf: WakuFilter) = wf.handler = handler wf.codec = WakuFilterSubscribeCodec -proc new*(T: type WakuFilter, - peerManager: PeerManager, - subscriptionTimeout: Duration = DefaultSubscriptionTimeToLiveSec, - maxFilterPeers: uint32 = MaxFilterPeers, - maxFilterCriteriaPerPeer: uint32 = MaxFilterCriteriaPerPeer): T = - +proc new*( + T: type WakuFilter, + peerManager: PeerManager, + subscriptionTimeout: Duration = DefaultSubscriptionTimeToLiveSec, + maxFilterPeers: uint32 = MaxFilterPeers, + maxFilterCriteriaPerPeer: uint32 = MaxFilterCriteriaPerPeer, +): T = let wf = WakuFilter( - subscriptions: FilterSubscriptions.init(subscriptionTimeout, - maxFilterPeers, - maxFilterCriteriaPerPeer - ), - peerManager: peerManager + subscriptions: FilterSubscriptions.init( + subscriptionTimeout, maxFilterPeers, maxFilterCriteriaPerPeer + ), + peerManager: peerManager, ) wf.initProtocolHandler() diff --git a/waku/waku_filter_v2/protocol_metrics.nim b/waku/waku_filter_v2/protocol_metrics.nim index 894407d53..bb30d51a1 100644 --- a/waku/waku_filter_v2/protocol_metrics.nim +++ b/waku/waku_filter_v2/protocol_metrics.nim @@ -8,10 +8,13 @@ import metrics export metrics declarePublicGauge waku_filter_errors, "number of filter protocol errors", ["type"] -declarePublicGauge waku_filter_requests, "number of filter subscribe requests received", ["type"] +declarePublicGauge waku_filter_requests, + "number of filter subscribe requests received", ["type"] declarePublicGauge waku_filter_subscriptions, "number of subscribed filter clients" -declarePublicHistogram waku_filter_request_duration_seconds, "duration of filter subscribe requests", ["type"] -declarePublicHistogram waku_filter_handle_message_duration_seconds, "duration to push message to filter subscribers" +declarePublicHistogram waku_filter_request_duration_seconds, + "duration of filter subscribe requests", ["type"] +declarePublicHistogram waku_filter_handle_message_duration_seconds, + "duration to push message to filter subscribers" # Error types (metric label values) const diff --git a/waku/waku_filter_v2/rpc.nim b/waku/waku_filter_v2/rpc.nim index 58295699b..1878bffa2 100644 --- a/waku/waku_filter_v2/rpc.nim +++ b/waku/waku_filter_v2/rpc.nim @@ -3,11 +3,8 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - json_serialization, - std/options -import - ../waku_core +import json_serialization, std/options +import ../waku_core type FilterSubscribeType* {.pure.} = enum @@ -17,84 +14,82 @@ type UNSUBSCRIBE = uint32(2) UNSUBSCRIBE_ALL = uint32(3) - FilterSubscribeRequest* = object - # Request from client to service node + FilterSubscribeRequest* = object # Request from client to service node requestId*: string filterSubscribeType*: FilterSubscribeType pubsubTopic*: Option[PubsubTopic] contentTopics*: seq[ContentTopic] - FilterSubscribeResponse* = object - # Response from service node to client + FilterSubscribeResponse* = object # Response from service node to client requestId*: string statusCode*: uint32 statusDesc*: Option[string] - MessagePush* = object - # Message pushed from service node to client + MessagePush* = object # Message pushed from service node to client wakuMessage*: WakuMessage pubsubTopic*: string # Convenience functions proc ping*(T: type FilterSubscribeRequest, requestId: string): T = - FilterSubscribeRequest( - requestId: requestId, - filterSubscribeType: SUBSCRIBER_PING - ) + FilterSubscribeRequest(requestId: requestId, filterSubscribeType: SUBSCRIBER_PING) -proc subscribe*(T: type FilterSubscribeRequest, requestId: string, pubsubTopic: PubsubTopic, contentTopics: seq[ContentTopic]): T = +proc subscribe*( + T: type FilterSubscribeRequest, + requestId: string, + pubsubTopic: PubsubTopic, + contentTopics: seq[ContentTopic], +): T = FilterSubscribeRequest( requestId: requestId, filterSubscribeType: SUBSCRIBE, pubsubTopic: some(pubsubTopic), - contentTopics: contentTopics + contentTopics: contentTopics, ) -proc unsubscribe*(T: type FilterSubscribeRequest, requestId: string, pubsubTopic: PubsubTopic, contentTopics: seq[ContentTopic]): T = +proc unsubscribe*( + T: type FilterSubscribeRequest, + requestId: string, + pubsubTopic: PubsubTopic, + contentTopics: seq[ContentTopic], +): T = FilterSubscribeRequest( requestId: requestId, filterSubscribeType: UNSUBSCRIBE, pubsubTopic: some(pubsubTopic), - contentTopics: contentTopics + contentTopics: contentTopics, ) proc unsubscribeAll*(T: type FilterSubscribeRequest, requestId: string): T = - FilterSubscribeRequest( - requestId: requestId, - filterSubscribeType: UNSUBSCRIBE_ALL - ) + FilterSubscribeRequest(requestId: requestId, filterSubscribeType: UNSUBSCRIBE_ALL) proc ok*(T: type FilterSubscribeResponse, requestId: string, desc = "OK"): T = - FilterSubscribeResponse( - requestId: requestId, - statusCode: 200, - statusDesc: some(desc) - ) + FilterSubscribeResponse(requestId: requestId, statusCode: 200, statusDesc: some(desc)) proc `$`*(err: FilterSubscribeResponse): string = - "FilterSubscribeResponse of req:" & err.requestId & " [" & $err.statusCode & "] " & $err.statusDesc + "FilterSubscribeResponse of req:" & err.requestId & " [" & $err.statusCode & "] " & + $err.statusDesc proc `$`*(req: FilterSubscribeRequest): string = - "FilterSubscribeRequest of req:" & req.requestId & " [" & $req.filterSubscribeType & + "FilterSubscribeRequest of req:" & req.requestId & " [" & $req.filterSubscribeType & "] pubsubTopic:" & $req.pubsubTopic & " contentTopics:" & $req.contentTopics proc `$`*(t: FilterSubscribeType): string = - result = case t: + result = + case t of SUBSCRIBER_PING: "SUBSCRIBER_PING" of SUBSCRIBE: "SUBSCRIBE" of UNSUBSCRIBE: "UNSUBSCRIBE" of UNSUBSCRIBE_ALL: "UNSUBSCRIBE_ALL" -proc writeValue*(writer: var JsonWriter, value: FilterSubscribeRequest) {.inline, raises: [IOError].} = +proc writeValue*( + writer: var JsonWriter, value: FilterSubscribeRequest +) {.inline, raises: [IOError].} = writer.beginRecord() writer.writeField("requestId", value.requestId) writer.writeField("type", value.filterSubscribeType) if value.pubsubTopic.isSome: writer.writeField("pubsubTopic", value.pubsubTopic) - if value.contentTopics.len>0: + if value.contentTopics.len > 0: writer.writeField("contentTopics", value.contentTopics) writer.endRecord() - - - diff --git a/waku/waku_filter_v2/rpc_codec.nim b/waku/waku_filter_v2/rpc_codec.nim index e96263352..8890a99c3 100644 --- a/waku/waku_filter_v2/rpc_codec.nim +++ b/waku/waku_filter_v2/rpc_codec.nim @@ -3,17 +3,15 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/options -import - ../common/protobuf, - ../waku_core, - ./rpc +import std/options +import ../common/protobuf, ../waku_core, ./rpc const - MaxSubscribeSize* = 10 * MaxWakuMessageSize + 64*1024 # We add a 64kB safety buffer for protocol overhead - MaxSubscribeResponseSize* = 64*1024 # Responses are small. 64kB safety buffer. - MaxPushSize* = 10 * MaxWakuMessageSize + 64*1024 # We add a 64kB safety buffer for protocol overhead + MaxSubscribeSize* = 10 * MaxWakuMessageSize + 64 * 1024 + # We add a 64kB safety buffer for protocol overhead + MaxSubscribeResponseSize* = 64 * 1024 # Responses are small. 64kB safety buffer. + MaxPushSize* = 10 * MaxWakuMessageSize + 64 * 1024 + # We add a 64kB safety buffer for protocol overhead proc encode*(rpc: FilterSubscribeRequest): ProtoBuffer = var pb = initProtoBuffer() diff --git a/waku/waku_filter_v2/subscriptions.nim b/waku/waku_filter_v2/subscriptions.nim index ba568f331..37348025d 100644 --- a/waku/waku_filter_v2/subscriptions.nim +++ b/waku/waku_filter_v2/subscriptions.nim @@ -3,15 +3,8 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/[sets,tables], - chronicles, - chronos, - libp2p/peerid, - stew/shims/sets -import - ../waku_core, - ../utils/tableutils +import std/[sets, tables], chronicles, chronos, libp2p/peerid, stew/shims/sets +import ../waku_core, ../utils/tableutils logScope: topics = "waku filter subscriptions" @@ -23,37 +16,35 @@ const type # a single filter criterion is fully defined by a pubsub topic and content topic - FilterCriterion* = tuple - pubsubTopic: PubsubTopic - contentTopic: ContentTopic + FilterCriterion* = tuple[pubsubTopic: PubsubTopic, contentTopic: ContentTopic] FilterCriteria* = HashSet[FilterCriterion] # a sequence of filter criteria SubscribedPeers* = HashSet[PeerID] # a sequence of peer ids - PeerData* = tuple - lastSeen: Moment - criteriaCount: uint + PeerData* = tuple[lastSeen: Moment, criteriaCount: uint] FilterSubscriptions* = object - peersSubscribed* : Table[PeerID, PeerData] - subscriptions : Table[FilterCriterion, SubscribedPeers] - subscriptionTimeout : Duration - maxPeers : uint - maxCriteriaPerPeer : uint + peersSubscribed*: Table[PeerID, PeerData] + subscriptions: Table[FilterCriterion, SubscribedPeers] + subscriptionTimeout: Duration + maxPeers: uint + maxCriteriaPerPeer: uint -proc init*(T: type FilterSubscriptions, - subscriptionTimeout: Duration = DefaultSubscriptionTimeToLiveSec, - maxFilterPeers: uint32 = MaxFilterPeers, - maxFilterCriteriaPerPeer: uint32 = MaxFilterCriteriaPerPeer): FilterSubscriptions = +proc init*( + T: type FilterSubscriptions, + subscriptionTimeout: Duration = DefaultSubscriptionTimeToLiveSec, + maxFilterPeers: uint32 = MaxFilterPeers, + maxFilterCriteriaPerPeer: uint32 = MaxFilterCriteriaPerPeer, +): FilterSubscriptions = ## Create a new filter subscription object return FilterSubscriptions( - peersSubscribed: initTable[PeerID, PeerData](), - subscriptions: initTable[FilterCriterion, SubscribedPeers](), - subscriptionTimeout: subscriptionTimeout, - maxPeers: maxFilterPeers, - maxCriteriaPerPeer: maxFilterCriteriaPerPeer - ) + peersSubscribed: initTable[PeerID, PeerData](), + subscriptions: initTable[FilterCriterion, SubscribedPeers](), + subscriptionTimeout: subscriptionTimeout, + maxPeers: maxFilterPeers, + maxCriteriaPerPeer: maxFilterCriteriaPerPeer, + ) proc isSubscribed*(s: var FilterSubscriptions, peerId: PeerID): bool = s.peersSubscribed.withValue(peerId, data): @@ -64,7 +55,9 @@ proc isSubscribed*(s: var FilterSubscriptions, peerId: PeerID): bool = proc subscribedPeerCount*(s: FilterSubscriptions): uint = return cast[uint](s.peersSubscribed.len) -proc getPeerSubscriptions*(s: var FilterSubscriptions, peerId: PeerID): seq[FilterCriterion] = +proc getPeerSubscriptions*( + s: var FilterSubscriptions, peerId: PeerID +): seq[FilterCriterion] = ## Get all pubsub-content topics a peer is subscribed to var subscribedContentTopics: seq[FilterCriterion] = @[] s.peersSubscribed.withValue(peerId, data): @@ -77,10 +70,12 @@ proc getPeerSubscriptions*(s: var FilterSubscriptions, peerId: PeerID): seq[Filt return subscribedContentTopics -proc findSubscribedPeers*(s: var FilterSubscriptions, pubsubTopic: PubsubTopic, contentTopic: ContentTopic): seq[PeerID] = - let filterCriterion : FilterCriterion = (pubsubTopic, contentTopic) +proc findSubscribedPeers*( + s: var FilterSubscriptions, pubsubTopic: PubsubTopic, contentTopic: ContentTopic +): seq[PeerID] = + let filterCriterion: FilterCriterion = (pubsubTopic, contentTopic) - var foundPeers : seq[PeerID] = @[] + var foundPeers: seq[PeerID] = @[] # only peers subscribed to criteria and with legit subscription is counted s.subscriptions.withValue(filterCriterion, peers): for peer in peers[]: @@ -104,7 +99,7 @@ proc cleanUp*(fs: var FilterSubscriptions) = var filtersToRemove: seq[FilterCriterion] = @[] for filterCriterion, subscribedPeers in fs.subscriptions.mpairs: - subscribedPeers.keepItIf(fs.isSubscribed(it)==true) + subscribedPeers.keepItIf(fs.isSubscribed(it) == true) fs.subscriptions.keepItIf(val.len > 0) @@ -112,7 +107,9 @@ proc refreshSubscription*(s: var FilterSubscriptions, peerId: PeerID) = s.peersSubscribed.withValue(peerId, data): data.lastSeen = Moment.now() -proc addSubscription*(s: var FilterSubscriptions, peerId: PeerID, filterCriteria: FilterCriteria): Result[void, string] = +proc addSubscription*( + s: var FilterSubscriptions, peerId: PeerID, filterCriteria: FilterCriteria +): Result[void, string] = ## Add a subscription for a given peer var peerData: ptr PeerData @@ -122,7 +119,6 @@ proc addSubscription*(s: var FilterSubscriptions, peerId: PeerID, filterCriteria data.lastSeen = Moment.now() peerData = data - do: ## not yet subscribed if cast[uint](s.peersSubscribed.len) >= s.maxPeers: @@ -139,10 +135,9 @@ proc addSubscription*(s: var FilterSubscriptions, peerId: PeerID, filterCriteria return ok() -proc removeSubscription*(s: var FilterSubscriptions, - peerId: PeerID, - filterCriteria: FilterCriteria): - Result[void, string] = +proc removeSubscription*( + s: var FilterSubscriptions, peerId: PeerID, filterCriteria: FilterCriteria +): Result[void, string] = ## Remove a subscription for a given peer s.peersSubscribed.withValue(peerId, peerData): @@ -161,8 +156,5 @@ proc removeSubscription*(s: var FilterSubscriptions, return err("Peer was not subscribed to criterion") return ok() - do: return err("Peer has no subscriptions") - - diff --git a/waku/waku_keystore.nim b/waku/waku_keystore.nim index dc7ae3b25..4793b383e 100644 --- a/waku/waku_keystore.nim +++ b/waku/waku_keystore.nim @@ -1,9 +1,7 @@ # The keyfile submodule (implementation adapted from nim-eth keyfile module https://github.com/status-im/nim-eth/blob/master/eth/keyfile) -import - ./waku_keystore/keyfile +import ./waku_keystore/keyfile -export - keyfile +export keyfile # The Waku Keystore implementation import @@ -12,8 +10,4 @@ import ./waku_keystore/protocol_types, ./waku_keystore/utils -export - keystore, - conversion_utils, - protocol_types, - utils \ No newline at end of file +export keystore, conversion_utils, protocol_types, utils diff --git a/waku/waku_keystore/conversion_utils.nim b/waku/waku_keystore/conversion_utils.nim index a77e691ee..160404651 100644 --- a/waku/waku_keystore/conversion_utils.nim +++ b/waku/waku_keystore/conversion_utils.nim @@ -3,10 +3,7 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - json, - stew/[results, byteutils], - ./protocol_types +import json, stew/[results, byteutils], ./protocol_types # Encodes a KeystoreMembership credential to a byte sequence proc encode*(credential: KeystoreMembership): seq[byte] = @@ -24,8 +21,6 @@ proc decode*(encodedCredential: seq[byte]): KeystoreResult[KeystoreMembership] = let jsonObject = parseJson(string.fromBytes(encodedCredential)) return ok(to(jsonObject, KeystoreMembership)) except JsonParsingError: - return err(AppKeystoreError(kind: KeystoreJsonError, - msg: getCurrentExceptionMsg())) + return err(AppKeystoreError(kind: KeystoreJsonError, msg: getCurrentExceptionMsg())) except Exception: #parseJson raises Exception - return err(AppKeystoreError(kind: KeystoreOsError, - msg: getCurrentExceptionMsg())) + return err(AppKeystoreError(kind: KeystoreOsError, msg: getCurrentExceptionMsg())) diff --git a/waku/waku_keystore/keyfile.nim b/waku/waku_keystore/keyfile.nim index 70fe6802d..58a7ce32a 100644 --- a/waku/waku_keystore/keyfile.nim +++ b/waku/waku_keystore/keyfile.nim @@ -29,40 +29,56 @@ const type KeyFileError* = enum - KeyfileRandomError = "keyfile error: Random generator error" - KeyfileUuidError = "keyfile error: UUID generator error" - KeyfileBufferOverrun = "keyfile error: Supplied buffer is too small" - KeyfileIncorrectDKLen = "keyfile error: `dklen` parameter is 0 or more then MaxDKLen" - KeyfileMalformedError = "keyfile error: JSON has incorrect structure" - KeyfileNotImplemented = "keyfile error: Feature is not implemented" - KeyfileNotSupported = "keyfile error: Feature is not supported" - KeyfileEmptyMac = "keyfile error: `mac` parameter is zero length or not in hexadecimal form" - KeyfileEmptyCiphertext = "keyfile error: `ciphertext` parameter is zero length or not in hexadecimal format" - KeyfileEmptySalt = "keyfile error: `salt` parameter is zero length or not in hexadecimal format" - KeyfileEmptyIV = "keyfile error: `cipherparams.iv` parameter is zero length or not in hexadecimal format" - KeyfileIncorrectIV = "keyfile error: Size of IV vector is not equal to cipher block size" - KeyfilePrfNotSupported = "keyfile error: PRF algorithm for PBKDF2 is not supported" - KeyfileKdfNotSupported = "keyfile error: KDF algorithm is not supported" - KeyfileCipherNotSupported = "keyfile error: `cipher` parameter is not supported" - KeyfileIncorrectMac = "keyfile error: `mac` verification failed" - KeyfileScryptBadParam = "keyfile error: bad scrypt's parameters" - KeyfileOsError = "keyfile error: OS specific error" - KeyfileIoError = "keyfile error: IO specific error" - KeyfileJsonError = "keyfile error: JSON encoder/decoder error" - KeyfileDoesNotExist = "keyfile error: file does not exist" + KeyfileRandomError = "keyfile error: Random generator error" + KeyfileUuidError = "keyfile error: UUID generator error" + KeyfileBufferOverrun = "keyfile error: Supplied buffer is too small" + KeyfileIncorrectDKLen = + "keyfile error: `dklen` parameter is 0 or more then MaxDKLen" + KeyfileMalformedError = "keyfile error: JSON has incorrect structure" + KeyfileNotImplemented = "keyfile error: Feature is not implemented" + KeyfileNotSupported = "keyfile error: Feature is not supported" + KeyfileEmptyMac = + "keyfile error: `mac` parameter is zero length or not in hexadecimal form" + KeyfileEmptyCiphertext = + "keyfile error: `ciphertext` parameter is zero length or not in hexadecimal format" + KeyfileEmptySalt = + "keyfile error: `salt` parameter is zero length or not in hexadecimal format" + KeyfileEmptyIV = + "keyfile error: `cipherparams.iv` parameter is zero length or not in hexadecimal format" + KeyfileIncorrectIV = + "keyfile error: Size of IV vector is not equal to cipher block size" + KeyfilePrfNotSupported = "keyfile error: PRF algorithm for PBKDF2 is not supported" + KeyfileKdfNotSupported = "keyfile error: KDF algorithm is not supported" + KeyfileCipherNotSupported = "keyfile error: `cipher` parameter is not supported" + KeyfileIncorrectMac = "keyfile error: `mac` verification failed" + KeyfileScryptBadParam = "keyfile error: bad scrypt's parameters" + KeyfileOsError = "keyfile error: OS specific error" + KeyfileIoError = "keyfile error: IO specific error" + KeyfileJsonError = "keyfile error: JSON encoder/decoder error" + KeyfileDoesNotExist = "keyfile error: file does not exist" KdfKind* = enum - PBKDF2, ## PBKDF2 - SCRYPT ## SCRYPT + PBKDF2 ## PBKDF2 + SCRYPT ## SCRYPT HashKind* = enum - HashNoSupport, HashSHA2_224, HashSHA2_256, HashSHA2_384, HashSHA2_512, - HashKECCAK224, HashKECCAK256, HashKECCAK384, HashKECCAK512, - HashSHA3_224, HashSHA3_256, HashSHA3_384, HashSHA3_512 + HashNoSupport + HashSHA2_224 + HashSHA2_256 + HashSHA2_384 + HashSHA2_512 + HashKECCAK224 + HashKECCAK256 + HashKECCAK384 + HashKECCAK512 + HashSHA3_224 + HashSHA3_256 + HashSHA3_384 + HashSHA3_512 CryptKind* = enum - CipherNoSupport, ## Cipher not supported - AES128CTR ## AES-128-CTR + CipherNoSupport ## Cipher not supported + AES128CTR ## AES-128-CTR CipherParams = object iv: seq[byte] @@ -111,15 +127,14 @@ type const SupportedHashes = [ - "sha224", "sha256", "sha384", "sha512", - "keccak224", "keccak256", "keccak384", "keccak512", - "sha3_224", "sha3_256", "sha3_384", "sha3_512" + "sha224", "sha256", "sha384", "sha512", "keccak224", "keccak256", "keccak384", + "keccak512", "sha3_224", "sha3_256", "sha3_384", "sha3_512", ] SupportedHashesKinds = [ - HashSHA2_224, HashSHA2_256, HashSHA2_384, HashSHA2_512, - HashKECCAK224, HashKECCAK256, HashKECCAK384, HashKECCAK512, - HashSHA3_224, HashSHA3_256, HashSHA3_384, HashSHA3_512 + HashSHA2_224, HashSHA2_256, HashSHA2_384, HashSHA2_512, HashKECCAK224, + HashKECCAK256, HashKECCAK384, HashKECCAK512, HashSHA3_224, HashSHA3_256, + HashSHA3_384, HashSHA3_512, ] # When true, the keyfile json will contain "version" and "id" fields, respectively. Default to false. @@ -127,27 +142,30 @@ const IdInKeyfile: bool = false proc mapErrTo[T, E](r: Result[T, E], v: static KeyFileError): KfResult[T] = - r.mapErr(proc (e: E): KeyFileError = v) + r.mapErr( + proc(e: E): KeyFileError = + v + ) proc `$`(k: KdfKind): string = case k - of SCRYPT: - return "scrypt" - else: - return "pbkdf2" + of SCRYPT: + return "scrypt" + else: + return "pbkdf2" proc `$`(k: CryptKind): string = case k - of AES128CTR: - return "aes-128-ctr" - else: - return "aes-128-ctr" + of AES128CTR: + return "aes-128-ctr" + else: + return "aes-128-ctr" # Parses the prf name to HashKind proc getPrfHash(prf: string): HashKind = let p = prf.toLowerAscii() if p.startsWith("hmac-"): - var hash = p[5..^1] + var hash = p[5 ..^ 1] var res = SupportedHashes.find(hash) if res >= 0: return SupportedHashesKinds[res] @@ -162,11 +180,13 @@ proc getCipher(c: string): CryptKind = return CipherNoSupport # Key derivation routine for PBKDF2 -proc deriveKey(password: string, - salt: string, - kdfkind: KdfKind, - hashkind: HashKind, - workfactor: int): KfResult[DKey] = +proc deriveKey( + password: string, + salt: string, + kdfkind: KdfKind, + hashkind: HashKind, + workfactor: int, +): KfResult[DKey] = if kdfkind == PBKDF2: var output: DKey var c = if workfactor == 0: Pbkdf2WorkFactor else: workfactor @@ -237,17 +257,19 @@ proc deriveKey(password: string, err(KeyfileNotImplemented) # Scrypt wrapper -func scrypt[T, M](password: openArray[T], salt: openArray[M], - N, r, p: int, output: var openArray[byte]): int = +func scrypt[T, M]( + password: openArray[T], + salt: openArray[M], + N, r, p: int, + output: var openArray[byte], +): int = let (xyvLen, bLen) = scryptCalc(N, r, p) var xyv = newSeq[uint32](xyvLen) var b = newSeq[byte](bLen) scrypt(password, salt, N, r, p, xyv, b, output) # Key derivation routine for Scrypt -proc deriveKey(password: string, salt: string, - workFactor, r, p: int): KfResult[DKey] = - +proc deriveKey(password: string, salt: string, workFactor, r, p: int): KfResult[DKey] = let wf = if workFactor == 0: ScryptWorkFactor else: workFactor var output: DKey if scrypt(password, salt, wf, r, p, output) == 0: @@ -256,12 +278,14 @@ proc deriveKey(password: string, salt: string, return ok(output) # Encryption routine -proc encryptData(plaintext: openArray[byte], - cryptkind: CryptKind, - key: openArray[byte], - iv: openArray[byte]): KfResult[seq[byte]] = +proc encryptData( + plaintext: openArray[byte], + cryptkind: CryptKind, + key: openArray[byte], + iv: openArray[byte], +): KfResult[seq[byte]] = if cryptkind == AES128CTR: - var ciphertext = newSeqWith(plaintext.len, 0.byte) + var ciphertext = newSeqWith(plaintext.len, 0.byte) var ctx: CTR[aes128] ctx.init(toOpenArray(key, 0, 15), iv) ctx.encrypt(plaintext, ciphertext) @@ -271,14 +295,16 @@ proc encryptData(plaintext: openArray[byte], err(KeyfileNotImplemented) # Decryption routine -proc decryptData(ciphertext: openArray[byte], - cryptkind: CryptKind, - key: openArray[byte], - iv: openArray[byte]): KfResult[seq[byte]] = +proc decryptData( + ciphertext: openArray[byte], + cryptkind: CryptKind, + key: openArray[byte], + iv: openArray[byte], +): KfResult[seq[byte]] = if cryptkind == AES128CTR: if len(iv) != aes128.sizeBlock: return err(KeyfileIncorrectIV) - var plaintext = newSeqWith(ciphertext.len, 0.byte) + var plaintext = newSeqWith(ciphertext.len, 0.byte) var ctx: CTR[aes128] ctx.init(toOpenArray(key, 0, 15), iv) ctx.decrypt(ciphertext, plaintext) @@ -291,25 +317,10 @@ proc decryptData(ciphertext: openArray[byte], proc kdfParams(kdfkind: KdfKind, salt: string, workfactor: int): KfResult[JsonNode] = if kdfkind == SCRYPT: let wf = if workfactor == 0: ScryptWorkFactor else: workfactor - ok(%* - { - "dklen": DKLen, - "n": wf, - "r": ScryptR, - "p": ScryptP, - "salt": salt - } - ) + ok(%*{"dklen": DKLen, "n": wf, "r": ScryptR, "p": ScryptP, "salt": salt}) elif kdfkind == PBKDF2: let wf = if workfactor == 0: Pbkdf2WorkFactor else: workfactor - ok(%* - { - "dklen": DKLen, - "c": wf, - "prf": "hmac-sha256", - "salt": salt - } - ) + ok(%*{"dklen": DKLen, "c": wf, "prf": "hmac-sha256", "salt": salt}) else: err(KeyfileNotImplemented) @@ -346,12 +357,14 @@ proc compareMac(m1: openArray[byte], m2: openArray[byte]): bool = # Creates a keyfile for secret encrypted with password according to the other parameters # Returns keyfile in JSON according to Web3 Secure storage format (here, differently than standard, version and id are optional) -proc createKeyFileJson*(secret: openArray[byte], - password: string, - version: int = 3, - cryptkind: CryptKind = AES128CTR, - kdfkind: KdfKind = PBKDF2, - workfactor: int = 0): KfResult[JsonNode] = +proc createKeyFileJson*( + secret: openArray[byte], + password: string, + version: int = 3, + cryptkind: CryptKind = AES128CTR, + kdfkind: KdfKind = PBKDF2, + workfactor: int = 0, +): KfResult[JsonNode] = ## Create JSON object with keyfile structure. ## ## ``secret`` - secret data, which will be stored @@ -372,14 +385,17 @@ proc createKeyFileJson*(secret: openArray[byte], return err(KeyfileRandomError) copyMem(addr saltstr[0], addr salt[0], SaltSize) - let u = ? uuidGenerate().mapErrTo(KeyfileUuidError) + let u = ?uuidGenerate().mapErrTo(KeyfileUuidError) let - dkey = case kdfkind - of PBKDF2: ? deriveKey(password, saltstr, kdfkind, HashSHA2_256, workfactor) - of SCRYPT: ? deriveKey(password, saltstr, workfactor, ScryptR, ScryptP) + dkey = + case kdfkind + of PBKDF2: + ?deriveKey(password, saltstr, kdfkind, HashSHA2_256, workfactor) + of SCRYPT: + ?deriveKey(password, saltstr, workfactor, ScryptR, ScryptP) - ciphertext = ? encryptData(secret, cryptkind, dkey, iv) + ciphertext = ?encryptData(secret, cryptkind, dkey, iv) var ctx: keccak256 ctx.init() @@ -388,22 +404,20 @@ proc createKeyFileJson*(secret: openArray[byte], var mac = ctx.finish() ctx.clear() - let params = ? kdfParams(kdfkind, toHex(salt, true), workfactor) + let params = ?kdfParams(kdfkind, toHex(salt, true), workfactor) var obj = KeystoreEntry( crypto: CryptoNew( - cipher: $cryptkind, - cipherparams: CypherParams( - iv: toHex(iv, true) - ), - ciphertext: toHex(ciphertext, true), - kdf: $kdfkind, - kdfparams: params, - mac: toHex(mac.data, true) + cipher: $cryptkind, + cipherparams: CypherParams(iv: toHex(iv, true)), + ciphertext: toHex(ciphertext, true), + kdf: $kdfkind, + kdfparams: params, + mac: toHex(mac.data, true), ) ) - let json = %* obj + let json = %*obj if IdInKeyfile: json.add("id", %($u)) if VersionInKeyfile: @@ -423,9 +437,12 @@ proc decodeCrypto(n: JsonNode): KfResult[Crypto] = var c: Crypto case kdf.getStr() - of "pbkdf2": c.kind = PBKDF2 - of "scrypt": c.kind = SCRYPT - else: return err(KeyfileKdfNotSupported) + of "pbkdf2": + c.kind = PBKDF2 + of "scrypt": + c.kind = SCRYPT + else: + return err(KeyfileKdfNotSupported) var cipherparams = crypto.getOrDefault("cipherparams") if isNil(cipherparams): @@ -463,7 +480,7 @@ proc decodePbkdf2Params(params: JsonNode): KfResult[Pbkdf2Params] = return err(KeyfilePrfNotSupported) if p.dklen == 0 or p.dklen > MaxDKLen: return err(KeyfileIncorrectDKLen) - + return ok(p) # Parses JSON Scrypt parameters @@ -494,13 +511,13 @@ func decryptSecret(crypto: Crypto, dkey: DKey): KfResult[seq[byte]] = if not compareMac(mac.data, crypto.mac): return err(KeyfileIncorrectMac) - let plaintext = ? decryptData(crypto.cipher.text, crypto.cipher.kind, dkey, crypto.cipher.params.iv) - + let plaintext = + ?decryptData(crypto.cipher.text, crypto.cipher.kind, dkey, crypto.cipher.params.iv) + ok(plaintext) # Parse JSON keyfile and decrypts its content using password -proc decodeKeyFileJson*(j: JsonNode, - password: string): KfResult[seq[byte]] = +proc decodeKeyFileJson*(j: JsonNode, password: string): KfResult[seq[byte]] = ## Decode secret from keyfile json object ``j`` using ## password string ``password``. let res = decodeCrypto(j) @@ -515,21 +532,21 @@ proc decodeKeyFileJson*(j: JsonNode, return err(res.error) let params = res.get() - let dkey = ? deriveKey(password, params.salt, PBKDF2, params.prf, params.c) + let dkey = ?deriveKey(password, params.salt, PBKDF2, params.prf, params.c) return decryptSecret(crypto, dkey) - of SCRYPT: let res = decodeScryptParams(crypto.kdfParams) if res.isErr: return err(res.error) let params = res.get() - let dkey = ? deriveKey(password, params.salt, params.n, params.r, params.p) + let dkey = ?deriveKey(password, params.salt, params.n, params.r, params.p) return decryptSecret(crypto, dkey) # Loads the file at pathname, decrypts and returns all keyfiles encrypted under password -proc loadKeyFiles*(pathname: string, - password: string): KfResult[seq[KfResult[seq[byte]]]] = +proc loadKeyFiles*( + pathname: string, password: string +): KfResult[seq[KfResult[seq[byte]]]] = ## Load and decode data from file with pathname ## ``pathname``, using password string ``password``. ## The index successful decryptions is returned @@ -543,7 +560,6 @@ proc loadKeyFiles*(pathname: string, # Note that lines strips the ending newline, if present try: for keyfile in lines(pathname): - # We skip empty lines if keyfile.len == 0: continue @@ -565,18 +581,15 @@ proc loadKeyFiles*(pathname: string, decodedKeyfile = decodeKeyFileJson(data, password) if decodedKeyfile.isOk(): successfullyDecodedKeyfiles.add decodedKeyfile - except IOError: return err(KeyfileIoError) return ok(successfullyDecodedKeyfiles) # Note that the keyfile is open in Append mode so that multiple credentials can be stored in same file -proc saveKeyFile*(pathname: string, - jobject: JsonNode): KfResult[void] = +proc saveKeyFile*(pathname: string, jobject: JsonNode): KfResult[void] = ## Save JSON object ``jobject`` to file with pathname ``pathname``. - var - f: File + var f: File if not f.open(pathname, fmAppend): return err(KeyfileOsError) try: @@ -590,4 +603,3 @@ proc saveKeyFile*(pathname: string, err(KeyfileOsError) finally: f.close() - diff --git a/waku/waku_keystore/keystore.nim b/waku/waku_keystore/keystore.nim index 18fad47c0..7187b2de2 100644 --- a/waku/waku_keystore/keystore.nim +++ b/waku/waku_keystore/keystore.nim @@ -3,36 +3,28 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - options, - json, - strutils, - sequtils, - std/[tables, os] +import options, json, strutils, sequtils, std/[tables, os] -import - ./keyfile, - ./conversion_utils, - ./protocol_types, - ./utils +import ./keyfile, ./conversion_utils, ./protocol_types, ./utils # This proc creates an empty keystore (i.e. with no credentials) -proc createAppKeystore*(path: string, - appInfo: AppInfo, - separator: string = "\n"): KeystoreResult[void] = - - let keystore = AppKeystore(application: appInfo.application, - appIdentifier: appInfo.appIdentifier, - version: appInfo.version, - credentials: initTable[string, KeystoreMembership]()) +proc createAppKeystore*( + path: string, appInfo: AppInfo, separator: string = "\n" +): KeystoreResult[void] = + let keystore = AppKeystore( + application: appInfo.application, + appIdentifier: appInfo.appIdentifier, + version: appInfo.version, + credentials: initTable[string, KeystoreMembership](), + ) var jsonKeystore: string jsonKeystore.toUgly(%keystore) var f: File if not f.open(path, fmWrite): - return err(AppKeystoreError(kind: KeystoreOsError, - msg: "Cannot open file for writing")) + return + err(AppKeystoreError(kind: KeystoreOsError, msg: "Cannot open file for writing")) try: # To avoid other users/attackers to be able to read keyfiles, we make the file readable/writable only by the running user @@ -42,17 +34,15 @@ proc createAppKeystore*(path: string, f.write(separator) ok() except CatchableError: - err(AppKeystoreError(kind: KeystoreOsError, - msg: getCurrentExceptionMsg())) + err(AppKeystoreError(kind: KeystoreOsError, msg: getCurrentExceptionMsg())) finally: f.close() # This proc load a keystore based on the application, appIdentifier and version filters. # If none is found, it automatically creates an empty keystore for the passed parameters -proc loadAppKeystore*(path: string, - appInfo: AppInfo, - separator: string = "\n"): KeystoreResult[JsonNode] = - +proc loadAppKeystore*( + path: string, appInfo: AppInfo, separator: string = "\n" +): KeystoreResult[JsonNode] = ## Load and decode JSON keystore from pathname var data: JsonNode var matchingAppKeystore: JsonNode @@ -64,17 +54,16 @@ proc loadAppKeystore*(path: string, return err(newKeystoreRes.error) try: - # We read all the file contents var f: File if not f.open(path, fmRead): - return err(AppKeystoreError(kind: KeystoreOsError, - msg: "Cannot open file for reading")) + return err( + AppKeystoreError(kind: KeystoreOsError, msg: "Cannot open file for reading") + ) let fileContents = readAll(f) # We iterate over each substring split by separator (which we expect to correspond to a single keystore json) for keystore in fileContents.split(separator): - # We skip if read line is empty if keystore.len == 0: continue @@ -89,41 +78,39 @@ proc loadAppKeystore*(path: string, # We check if parsed json contains the relevant keystore credentials fields and if these are set to the passed parameters # (note that "if" is lazy, so if one of the .contains() fails, the json fields contents will not be checked and no ResultDefect will be raised due to accessing unavailable fields) if data.hasKeys(["application", "appIdentifier", "credentials", "version"]) and - data["application"].getStr() == appInfo.application and - data["appIdentifier"].getStr() == appInfo.appIdentifier and - data["version"].getStr() == appInfo.version: + data["application"].getStr() == appInfo.application and + data["appIdentifier"].getStr() == appInfo.appIdentifier and + data["version"].getStr() == appInfo.version: # We return the first json keystore that matches the passed app parameters # We assume a unique kesytore with such parameters is present in the file matchingAppKeystore = data break # TODO: we might continue rather than return for some of these errors except JsonParsingError: - return err(AppKeystoreError(kind: KeystoreJsonError, - msg: getCurrentExceptionMsg())) + return + err(AppKeystoreError(kind: KeystoreJsonError, msg: getCurrentExceptionMsg())) except ValueError: - return err(AppKeystoreError(kind: KeystoreJsonError, - msg: getCurrentExceptionMsg())) + return + err(AppKeystoreError(kind: KeystoreJsonError, msg: getCurrentExceptionMsg())) except OSError: - return err(AppKeystoreError(kind: KeystoreOsError, - msg: getCurrentExceptionMsg())) + return + err(AppKeystoreError(kind: KeystoreOsError, msg: getCurrentExceptionMsg())) except Exception: #parseJson raises Exception - return err(AppKeystoreError(kind: KeystoreOsError, - msg: getCurrentExceptionMsg())) - + return + err(AppKeystoreError(kind: KeystoreOsError, msg: getCurrentExceptionMsg())) except IOError: - return err(AppKeystoreError(kind: KeystoreIoError, - msg: getCurrentExceptionMsg())) + return err(AppKeystoreError(kind: KeystoreIoError, msg: getCurrentExceptionMsg())) return ok(matchingAppKeystore) - # Adds a membership credential to the keystore matching the application, appIdentifier and version filters. -proc addMembershipCredentials*(path: string, - membership: KeystoreMembership, - password: string, - appInfo: AppInfo, - separator: string = "\n"): KeystoreResult[void] = - +proc addMembershipCredentials*( + path: string, + membership: KeystoreMembership, + password: string, + appInfo: AppInfo, + separator: string = "\n", +): KeystoreResult[void] = # We load the keystore corresponding to the desired parameters # This call ensures that JSON has all required fields let jsonKeystoreRes = loadAppKeystore(path, appInfo, separator) @@ -136,7 +123,6 @@ proc addMembershipCredentials*(path: string, try: if jsonKeystore.hasKey("credentials"): - # We get all credentials in keystore let keystoreCredentials = jsonKeystore["credentials"] let key = membership.hash() @@ -147,15 +133,14 @@ proc addMembershipCredentials*(path: string, let encodedMembershipCredential = membership.encode() let keyfileRes = createKeyFileJson(encodedMembershipCredential, password) if keyfileRes.isErr(): - return err(AppKeystoreError(kind: KeystoreCreateKeyfileError, - msg: $keyfileRes.error)) + return err( + AppKeystoreError(kind: KeystoreCreateKeyfileError, msg: $keyfileRes.error) + ) # We add it to the credentials field of the keystore jsonKeystore["credentials"][key] = keyfileRes.get() - except CatchableError: - return err(AppKeystoreError(kind: KeystoreJsonError, - msg: getCurrentExceptionMsg())) + return err(AppKeystoreError(kind: KeystoreJsonError, msg: getCurrentExceptionMsg())) # We save to disk the (updated) keystore. let saveRes = save(jsonKeystore, path, separator) @@ -166,11 +151,9 @@ proc addMembershipCredentials*(path: string, # Returns the membership credentials in the keystore matching the application, appIdentifier and version filters, further filtered by the input # identity credentials and membership contracts -proc getMembershipCredentials*(path: string, - password: string, - query: KeystoreMembership, - appInfo: AppInfo): KeystoreResult[KeystoreMembership] = - +proc getMembershipCredentials*( + path: string, password: string, query: KeystoreMembership, appInfo: AppInfo +): KeystoreResult[KeystoreMembership] = # We load the keystore corresponding to the desired parameters # This call ensures that JSON has all required fields let jsonKeystoreRes = loadAppKeystore(path, appInfo) @@ -182,37 +165,42 @@ proc getMembershipCredentials*(path: string, var jsonKeystore = jsonKeystoreRes.get() try: - if jsonKeystore.hasKey("credentials"): # We get all credentials in keystore var keystoreCredentials = jsonKeystore["credentials"] if keystoreCredentials.len == 0: # error - return err(AppKeystoreError(kind: KeystoreCredentialNotFoundError, - msg: "No credentials found in keystore")) + return err( + AppKeystoreError( + kind: KeystoreCredentialNotFoundError, + msg: "No credentials found in keystore", + ) + ) var keystoreCredential: JsonNode if keystoreCredentials.len == 1: - keystoreCredential = keystoreCredentials - .getFields() - .values() - .toSeq()[0] + keystoreCredential = keystoreCredentials.getFields().values().toSeq()[0] else: let key = query.hash() if not keystoreCredentials.hasKey(key): # error - return err(AppKeystoreError(kind: KeystoreCredentialNotFoundError, - msg: "Credential not found in keystore")) + return err( + AppKeystoreError( + kind: KeystoreCredentialNotFoundError, + msg: "Credential not found in keystore", + ) + ) keystoreCredential = keystoreCredentials[key] let decodedKeyfileRes = decodeKeyFileJson(keystoreCredential, password) if decodedKeyfileRes.isErr(): - return err(AppKeystoreError(kind: KeystoreReadKeyfileError, - msg: $decodedKeyfileRes.error)) + return err( + AppKeystoreError( + kind: KeystoreReadKeyfileError, msg: $decodedKeyfileRes.error + ) + ) # we parse the json decrypted keystoreCredential let decodedCredentialRes = decode(decodedKeyfileRes.get()) let keyfileMembershipCredential = decodedCredentialRes.get() return ok(keyfileMembershipCredential) - except CatchableError: - return err(AppKeystoreError(kind: KeystoreJsonError, - msg: getCurrentExceptionMsg())) + return err(AppKeystoreError(kind: KeystoreJsonError, msg: getCurrentExceptionMsg())) diff --git a/waku/waku_keystore/protocol_types.nim b/waku/waku_keystore/protocol_types.nim index 8abbd00ea..51d7faf7c 100644 --- a/waku/waku_keystore/protocol_types.nim +++ b/waku/waku_keystore/protocol_types.nim @@ -3,11 +3,7 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/[sequtils, tables], - stew/[results, endians2], - nimcrypto, - stint +import std/[sequtils, tables], stew/[results, endians2], nimcrypto, stint # NOTE: 256-bytes long credentials are due to the use of BN254 in RLN. Other implementations/curves might have a different byte size const CredentialByteSize* = 256 @@ -49,25 +45,37 @@ proc toMembershipIndex*(v: UInt256): MembershipIndex = # Converts a sequence of tuples containing 4 string (i.e. identity trapdoor, nullifier, secret hash, commitment) to an IdentityCredential type RawMembershipCredentials* = (string, string, string, string) -proc toIdentityCredentials*(groupKeys: seq[RawMembershipCredentials]): Result[seq[ - IdentityCredential], string] = +proc toIdentityCredentials*( + groupKeys: seq[RawMembershipCredentials] +): Result[seq[IdentityCredential], string] = ## groupKeys is sequence of membership key tuples in the form of (identity key, identity commitment) all in the hexadecimal format ## the toIdentityCredentials proc populates a sequence of IdentityCredentials using the supplied groupKeys ## Returns an error if the conversion fails var groupIdCredentials = newSeq[IdentityCredential]() - for i in 0..groupKeys.len-1: + for i in 0 .. groupKeys.len - 1: try: let - idTrapdoor = IdentityTrapdoor(@(hexToUint[CredentialByteSize](groupKeys[i][0]).toBytesLE())) - idNullifier = IdentityNullifier(@(hexToUint[CredentialByteSize](groupKeys[i][1]).toBytesLE())) - idSecretHash = IdentitySecretHash(@(hexToUint[CredentialByteSize](groupKeys[i][2]).toBytesLE())) - idCommitment = IDCommitment(@(hexToUint[CredentialByteSize](groupKeys[i][3]).toBytesLE())) - groupIdCredentials.add(IdentityCredential(idTrapdoor: idTrapdoor, - idNullifier: idNullifier, - idSecretHash: idSecretHash, - idCommitment: idCommitment)) + idTrapdoor = IdentityTrapdoor( + @(hexToUint[CredentialByteSize](groupKeys[i][0]).toBytesLE()) + ) + idNullifier = IdentityNullifier( + @(hexToUint[CredentialByteSize](groupKeys[i][1]).toBytesLE()) + ) + idSecretHash = IdentitySecretHash( + @(hexToUint[CredentialByteSize](groupKeys[i][2]).toBytesLE()) + ) + idCommitment = + IDCommitment(@(hexToUint[CredentialByteSize](groupKeys[i][3]).toBytesLE())) + groupIdCredentials.add( + IdentityCredential( + idTrapdoor: idTrapdoor, + idNullifier: idNullifier, + idSecretHash: idSecretHash, + idCommitment: idCommitment, + ) + ) except ValueError as err: return err("could not convert the group key to bytes: " & err.msg) return ok(groupIdCredentials) @@ -100,34 +108,47 @@ type KeystoreMembership* = ref object of RootObj when defined(rln_v2): proc `$`*(m: KeystoreMembership): string = - return "KeystoreMembership(chainId: " & m.membershipContract.chainId & ", contractAddress: " & m.membershipContract.address & ", treeIndex: " & $m.treeIndex & ", userMessageLimit: " & $m.userMessageLimit & ", identityCredential: " & $m.identityCredential & ")" + return + "KeystoreMembership(chainId: " & m.membershipContract.chainId & + ", contractAddress: " & m.membershipContract.address & ", treeIndex: " & + $m.treeIndex & ", userMessageLimit: " & $m.userMessageLimit & + ", identityCredential: " & $m.identityCredential & ")" + else: proc `$`*(m: KeystoreMembership): string = - return "KeystoreMembership(chainId: " & m.membershipContract.chainId & ", contractAddress: " & m.membershipContract.address & ", treeIndex: " & $m.treeIndex & ", identityCredential: " & $m.identityCredential & ")" + return + "KeystoreMembership(chainId: " & m.membershipContract.chainId & + ", contractAddress: " & m.membershipContract.address & ", treeIndex: " & + $m.treeIndex & ", identityCredential: " & $m.identityCredential & ")" when defined(rln_v2): proc `==`*(x, y: KeystoreMembership): bool = - return x.membershipContract.chainId == y.membershipContract.chainId and - x.membershipContract.address == y.membershipContract.address and - x.treeIndex == y.treeIndex and - x.userMessageLimit == y.userMessageLimit and - x.identityCredential.idTrapdoor == y.identityCredential.idTrapdoor and - x.identityCredential.idNullifier == y.identityCredential.idNullifier and - x.identityCredential.idSecretHash == y.identityCredential.idSecretHash and - x.identityCredential.idCommitment == y.identityCredential.idCommitment + return + x.membershipContract.chainId == y.membershipContract.chainId and + x.membershipContract.address == y.membershipContract.address and + x.treeIndex == y.treeIndex and x.userMessageLimit == y.userMessageLimit and + x.identityCredential.idTrapdoor == y.identityCredential.idTrapdoor and + x.identityCredential.idNullifier == y.identityCredential.idNullifier and + x.identityCredential.idSecretHash == y.identityCredential.idSecretHash and + x.identityCredential.idCommitment == y.identityCredential.idCommitment + else: proc `==`*(x, y: KeystoreMembership): bool = - return x.membershipContract.chainId == y.membershipContract.chainId and - x.membershipContract.address == y.membershipContract.address and - x.treeIndex == y.treeIndex and - x.identityCredential.idTrapdoor == y.identityCredential.idTrapdoor and - x.identityCredential.idNullifier == y.identityCredential.idNullifier and - x.identityCredential.idSecretHash == y.identityCredential.idSecretHash and - x.identityCredential.idCommitment == y.identityCredential.idCommitment + return + x.membershipContract.chainId == y.membershipContract.chainId and + x.membershipContract.address == y.membershipContract.address and + x.treeIndex == y.treeIndex and + x.identityCredential.idTrapdoor == y.identityCredential.idTrapdoor and + x.identityCredential.idNullifier == y.identityCredential.idNullifier and + x.identityCredential.idSecretHash == y.identityCredential.idSecretHash and + x.identityCredential.idCommitment == y.identityCredential.idCommitment proc hash*(m: KeystoreMembership): string = # hash together the chainId, address and treeIndex - return $sha256.digest(m.membershipContract.chainId & m.membershipContract.address & $m.treeIndex) + return + $sha256.digest( + m.membershipContract.chainId & m.membershipContract.address & $m.treeIndex + ) type MembershipTable* = Table[string, KeystoreMembership] @@ -144,24 +165,26 @@ type AppKeystore* = object type AppKeystoreErrorKind* = enum - KeystoreOsError = "keystore error: OS specific error" - KeystoreIoError = "keystore error: IO specific error" - KeystoreJsonKeyError = "keystore error: fields not present in JSON" - KeystoreJsonError = "keystore error: JSON encoder/decoder error" - KeystoreKeystoreDoesNotExist = "keystore error: file does not exist" - KeystoreCreateKeystoreError = "Error while creating application keystore" - KeystoreLoadKeystoreError = "Error while loading application keystore" - KeystoreCreateKeyfileError = "Error while creating keyfile for credentials" - KeystoreSaveKeyfileError = "Error while saving keyfile for credentials" - KeystoreReadKeyfileError = "Error while reading keyfile for credentials" - KeystoreCredentialAlreadyPresentError = "Error while adding credentials to keystore: credential already present" - KeystoreCredentialNotFoundError = "Error while searching credentials in keystore: credential not found" + KeystoreOsError = "keystore error: OS specific error" + KeystoreIoError = "keystore error: IO specific error" + KeystoreJsonKeyError = "keystore error: fields not present in JSON" + KeystoreJsonError = "keystore error: JSON encoder/decoder error" + KeystoreKeystoreDoesNotExist = "keystore error: file does not exist" + KeystoreCreateKeystoreError = "Error while creating application keystore" + KeystoreLoadKeystoreError = "Error while loading application keystore" + KeystoreCreateKeyfileError = "Error while creating keyfile for credentials" + KeystoreSaveKeyfileError = "Error while saving keyfile for credentials" + KeystoreReadKeyfileError = "Error while reading keyfile for credentials" + KeystoreCredentialAlreadyPresentError = + "Error while adding credentials to keystore: credential already present" + KeystoreCredentialNotFoundError = + "Error while searching credentials in keystore: credential not found" AppKeystoreError* = object kind*: AppKeystoreErrorKind msg*: string -proc `$`*(e: AppKeystoreError) : string = +proc `$`*(e: AppKeystoreError): string = return $e.kind & ": " & e.msg -type KeystoreResult*[T] = Result[T, AppKeystoreError] \ No newline at end of file +type KeystoreResult*[T] = Result[T, AppKeystoreError] diff --git a/waku/waku_keystore/utils.nim b/waku/waku_keystore/utils.nim index 736acbd8d..8bfa104ba 100644 --- a/waku/waku_keystore/utils.nim +++ b/waku/waku_keystore/utils.nim @@ -3,17 +3,18 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - json, - std/[os, sequtils] +import json, std/[os, sequtils] -import - ./keyfile, - ./protocol_types +import ./keyfile, ./protocol_types # Checks if a JsonNode has all keys contained in "keys" proc hasKeys*(data: JsonNode, keys: openArray[string]): bool = - return all(keys, proc (key: string): bool = return data.hasKey(key)) + return all( + keys, + proc(key: string): bool = + return data.hasKey(key) + , + ) # Safely saves a Keystore's JsonNode to disk. # If exists, the destination file is renamed with extension .bkp; the file is written at its destination and the .bkp file is removed if write is successful, otherwise is restored @@ -22,15 +23,18 @@ proc save*(json: JsonNode, path: string, separator: string): KeystoreResult[void if fileExists(path): try: moveFile(path, path & ".bkp") - except: # TODO: Fix "BareExcept" warning - return err(AppKeystoreError(kind: KeystoreOsError, - msg: "could not backup keystore: " & getCurrentExceptionMsg())) + except: # TODO: Fix "BareExcept" warning + return err( + AppKeystoreError( + kind: KeystoreOsError, + msg: "could not backup keystore: " & getCurrentExceptionMsg(), + ) + ) # We save the updated json var f: File if not f.open(path, fmAppend): - return err(AppKeystoreError(kind: KeystoreOsError, - msg: getCurrentExceptionMsg())) + return err(AppKeystoreError(kind: KeystoreOsError, msg: getCurrentExceptionMsg())) try: # To avoid other users/attackers to be able to read keyfiles, we make the file readable/writable only by the running user setFilePermissions(path, {fpUserWrite, fpUserRead}) @@ -44,12 +48,20 @@ proc save*(json: JsonNode, path: string, separator: string): KeystoreResult[void f.close() removeFile(path) moveFile(path & ".bkp", path) - except: # TODO: Fix "BareExcept" warning + except: # TODO: Fix "BareExcept" warning # Unlucky, we just fail - return err(AppKeystoreError(kind: KeystoreOsError, - msg: "could not restore keystore backup: " & getCurrentExceptionMsg())) - return err(AppKeystoreError(kind: KeystoreOsError, - msg: "could not write keystore: " & getCurrentExceptionMsg())) + return err( + AppKeystoreError( + kind: KeystoreOsError, + msg: "could not restore keystore backup: " & getCurrentExceptionMsg(), + ) + ) + return err( + AppKeystoreError( + kind: KeystoreOsError, + msg: "could not write keystore: " & getCurrentExceptionMsg(), + ) + ) finally: f.close() @@ -58,7 +70,11 @@ proc save*(json: JsonNode, path: string, separator: string): KeystoreResult[void try: removeFile(path & ".bkp") except CatchableError: - return err(AppKeystoreError(kind: KeystoreOsError, - msg: "could not remove keystore backup: " & getCurrentExceptionMsg())) + return err( + AppKeystoreError( + kind: KeystoreOsError, + msg: "could not remove keystore backup: " & getCurrentExceptionMsg(), + ) + ) return ok() diff --git a/waku/waku_lightpush.nim b/waku/waku_lightpush.nim index 3cd68a015..373478fd9 100644 --- a/waku/waku_lightpush.nim +++ b/waku/waku_lightpush.nim @@ -1,5 +1,3 @@ -import - ./waku_lightpush/protocol +import ./waku_lightpush/protocol -export - protocol \ No newline at end of file +export protocol diff --git a/waku/waku_lightpush/client.nim b/waku/waku_lightpush/client.nim index 5dd23a988..e46822a23 100644 --- a/waku/waku_lightpush/client.nim +++ b/waku/waku_lightpush/client.nim @@ -3,13 +3,7 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/options, - stew/results, - chronicles, - chronos, - metrics, - bearssl/rand +import std/options, stew/results, chronicles, chronos, metrics, bearssl/rand import ../node/peer_manager, ../utils/requests, @@ -19,23 +13,21 @@ import ./rpc, ./rpc_codec - logScope: topics = "waku lightpush client" - type WakuLightPushClient* = ref object - peerManager*: PeerManager - rng*: ref rand.HmacDrbgContext + peerManager*: PeerManager + rng*: ref rand.HmacDrbgContext - -proc new*(T: type WakuLightPushClient, - peerManager: PeerManager, - rng: ref rand.HmacDrbgContext): T = +proc new*( + T: type WakuLightPushClient, peerManager: PeerManager, rng: ref rand.HmacDrbgContext +): T = WakuLightPushClient(peerManager: peerManager, rng: rng) - -proc sendPushRequest(wl: WakuLightPushClient, req: PushRequest, peer: PeerId|RemotePeerInfo): Future[WakuLightPushResult[void]] {.async, gcsafe.} = +proc sendPushRequest( + wl: WakuLightPushClient, req: PushRequest, peer: PeerId | RemotePeerInfo +): Future[WakuLightPushResult[void]] {.async, gcsafe.} = let connOpt = await wl.peerManager.dialPeer(peer, WakuLightPushCodec) if connOpt.isNone(): waku_lightpush_errors.inc(labelValues = [dialFailure]) @@ -71,6 +63,11 @@ proc sendPushRequest(wl: WakuLightPushClient, req: PushRequest, peer: PeerId|Rem return ok() -proc publish*(wl: WakuLightPushClient, pubSubTopic: PubsubTopic, message: WakuMessage, peer: PeerId|RemotePeerInfo): Future[WakuLightPushResult[void]] {.async, gcsafe.} = +proc publish*( + wl: WakuLightPushClient, + pubSubTopic: PubsubTopic, + message: WakuMessage, + peer: PeerId | RemotePeerInfo, +): Future[WakuLightPushResult[void]] {.async, gcsafe.} = let pushRequest = PushRequest(pubSubTopic: pubSubTopic, message: message) - return await wl.sendPushRequest(pushRequest, peer) \ No newline at end of file + return await wl.sendPushRequest(pushRequest, peer) diff --git a/waku/waku_lightpush/common.nim b/waku/waku_lightpush/common.nim index 2205b94aa..75f507b52 100644 --- a/waku/waku_lightpush/common.nim +++ b/waku/waku_lightpush/common.nim @@ -3,19 +3,13 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - stew/results, - chronos, - libp2p/peerid -import - ../waku_core +import stew/results, chronos, libp2p/peerid +import ../waku_core const WakuLightPushCodec* = "/vac/waku/lightpush/2.0.0-beta1" type WakuLightPushResult*[T] = Result[T, string] type PushMessageHandler* = proc( - peer: PeerId, - pubsubTopic: PubsubTopic, - message: WakuMessage - ): Future[WakuLightPushResult[void]] {.async.} + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage +): Future[WakuLightPushResult[void]] {.async.} diff --git a/waku/waku_lightpush/protocol.nim b/waku/waku_lightpush/protocol.nim index d62dc0b79..ddac2d9ec 100644 --- a/waku/waku_lightpush/protocol.nim +++ b/waku/waku_lightpush/protocol.nim @@ -4,13 +4,7 @@ else: {.push raises: [].} import - std/options, - stew/results, - stew/byteutils, - chronicles, - chronos, - metrics, - bearssl/rand + std/options, stew/results, stew/byteutils, chronicles, chronos, metrics, bearssl/rand import ../node/peer_manager/peer_manager, ../waku_core, @@ -18,28 +12,30 @@ import ./rpc, ./rpc_codec, ./protocol_metrics - + logScope: topics = "waku lightpush" type WakuLightPush* = ref object of LPProtocol - rng*: ref rand.HmacDrbgContext - peerManager*: PeerManager - pushHandler*: PushMessageHandler + rng*: ref rand.HmacDrbgContext + peerManager*: PeerManager + pushHandler*: PushMessageHandler -proc handleRequest*(wl: WakuLightPush, peerId: PeerId, buffer: seq[byte]): Future[PushRPC] {.async.} = +proc handleRequest*( + wl: WakuLightPush, peerId: PeerId, buffer: seq[byte] +): Future[PushRPC] {.async.} = let reqDecodeRes = PushRPC.decode(buffer) var - isSuccess = false - pushResponseInfo = "" - requestId = "" + isSuccess = false + pushResponseInfo = "" + requestId = "" if reqDecodeRes.isErr(): pushResponseInfo = decodeRpcFailure & ": " & $reqDecodeRes.error elif reqDecodeRes.get().request.isNone(): pushResponseInfo = emptyRequestBodyFailure else: - let pushRpcRequest = reqDecodeRes.get(); + let pushRpcRequest = reqDecodeRes.get() requestId = pushRpcRequest.requestId @@ -49,15 +45,19 @@ proc handleRequest*(wl: WakuLightPush, peerId: PeerId, buffer: seq[byte]): Futur pubSubTopic = request.get().pubSubTopic message = request.get().message waku_lightpush_messages.inc(labelValues = ["PushRequest"]) - debug "push request", peerId=peerId, requestId=requestId, pubsubTopic=pubsubTopic, hash=pubsubTopic.computeMessageHash(message).to0xHex() - + debug "push request", + peerId = peerId, + requestId = requestId, + pubsubTopic = pubsubTopic, + hash = pubsubTopic.computeMessageHash(message).to0xHex() + let handleRes = await wl.pushHandler(peerId, pubsubTopic, message) isSuccess = handleRes.isOk() pushResponseInfo = (if isSuccess: "OK" else: handleRes.error) if not isSuccess: waku_lightpush_errors.inc(labelValues = [pushResponseInfo]) - error "failed to push message", error=pushResponseInfo + error "failed to push message", error = pushResponseInfo let response = PushResponse(isSuccess: isSuccess, info: some(pushResponseInfo)) let rpc = PushRPC(requestId: requestId, response: some(response)) return rpc @@ -67,13 +67,16 @@ proc initProtocolHandler(wl: WakuLightPush) = let buffer = await conn.readLp(MaxRpcSize.int) let rpc = await handleRequest(wl, conn.peerId, buffer) await conn.writeLp(rpc.encode().buffer) + wl.handler = handle wl.codec = WakuLightPushCodec -proc new*(T: type WakuLightPush, - peerManager: PeerManager, - rng: ref rand.HmacDrbgContext, - pushHandler: PushMessageHandler): T = +proc new*( + T: type WakuLightPush, + peerManager: PeerManager, + rng: ref rand.HmacDrbgContext, + pushHandler: PushMessageHandler, +): T = let wl = WakuLightPush(rng: rng, peerManager: peerManager, pushHandler: pushHandler) wl.initProtocolHandler() return wl diff --git a/waku/waku_lightpush/protocol_metrics.nim b/waku/waku_lightpush/protocol_metrics.nim index 98cb5fa67..2c62d319d 100644 --- a/waku/waku_lightpush/protocol_metrics.nim +++ b/waku/waku_lightpush/protocol_metrics.nim @@ -5,10 +5,10 @@ else: import metrics - -declarePublicGauge waku_lightpush_errors, "number of lightpush protocol errors", ["type"] -declarePublicGauge waku_lightpush_messages, "number of lightpush messages received", ["type"] - +declarePublicGauge waku_lightpush_errors, + "number of lightpush protocol errors", ["type"] +declarePublicGauge waku_lightpush_messages, + "number of lightpush messages received", ["type"] # Error types (metric label values) const diff --git a/waku/waku_lightpush/rpc.nim b/waku/waku_lightpush/rpc.nim index fc8e8b660..da1f123f6 100644 --- a/waku/waku_lightpush/rpc.nim +++ b/waku/waku_lightpush/rpc.nim @@ -3,10 +3,8 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/options -import - ../waku_core +import std/options +import ../waku_core type PushRequest* = object diff --git a/waku/waku_lightpush/rpc_codec.nim b/waku/waku_lightpush/rpc_codec.nim index 432a671ea..3fa5b504d 100644 --- a/waku/waku_lightpush/rpc_codec.nim +++ b/waku/waku_lightpush/rpc_codec.nim @@ -3,17 +3,11 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} +import std/options +import ../common/protobuf, ../waku_core, ./rpc -import - std/options -import - ../common/protobuf, - ../waku_core, - ./rpc - - -const MaxRpcSize* = MaxWakuMessageSize + 64 * 1024 # We add a 64kB safety buffer for protocol overhead - +const MaxRpcSize* = MaxWakuMessageSize + 64 * 1024 + # We add a 64kB safety buffer for protocol overhead proc encode*(rpc: PushRequest): ProtoBuffer = var pb = initProtoBuffer() @@ -42,7 +36,6 @@ proc decode*(T: type PushRequest, buffer: seq[byte]): ProtobufResult[T] = ok(rpc) - proc encode*(rpc: PushResponse): ProtoBuffer = var pb = initProtoBuffer() @@ -70,7 +63,6 @@ proc decode*(T: type PushResponse, buffer: seq[byte]): ProtobufResult[T] = ok(rpc) - proc encode*(rpc: PushRPC): ProtoBuffer = var pb = initProtoBuffer() diff --git a/waku/waku_metadata.nim b/waku/waku_metadata.nim index efa9b4234..823dae4fa 100644 --- a/waku/waku_metadata.nim +++ b/waku/waku_metadata.nim @@ -3,8 +3,6 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - ./waku_metadata/protocol +import ./waku_metadata/protocol -export - protocol +export protocol diff --git a/waku/waku_metadata/protocol.nim b/waku/waku_metadata/protocol.nim index ff4eaa29b..0608be6d5 100644 --- a/waku/waku_metadata/protocol.nim +++ b/waku/waku_metadata/protocol.nim @@ -13,12 +13,7 @@ import libp2p/stream/connection, libp2p/crypto/crypto, eth/p2p/discoveryv5/enr -import - ../common/nimchronos, - ../common/enr, - ../waku_core, - ../waku_enr, - ./rpc +import ../common/nimchronos, ../common/enr, ../waku_core, ../waku_enr, ./rpc logScope: topics = "waku metadata" @@ -26,42 +21,49 @@ logScope: const WakuMetadataCodec* = "/vac/waku/metadata/1.0.0" const RpcResponseMaxBytes* = 1024 -type - WakuMetadata* = ref object of LPProtocol - clusterId*: uint32 - shards*: HashSet[uint32] - topicSubscriptionQueue: AsyncEventQueue[SubscriptionEvent] +type WakuMetadata* = ref object of LPProtocol + clusterId*: uint32 + shards*: HashSet[uint32] + topicSubscriptionQueue: AsyncEventQueue[SubscriptionEvent] -proc respond(m: WakuMetadata, conn: Connection): Future[Result[void, string]] {.async, gcsafe.} = - let response = WakuMetadataResponse( - clusterId: some(m.clusterId), - shards: toSeq(m.shards) - ) - - let res = catch: await conn.writeLP(response.encode().buffer) +proc respond( + m: WakuMetadata, conn: Connection +): Future[Result[void, string]] {.async, gcsafe.} = + let response = + WakuMetadataResponse(clusterId: some(m.clusterId), shards: toSeq(m.shards)) + + let res = catch: + await conn.writeLP(response.encode().buffer) if res.isErr(): return err(res.error.msg) return ok() -proc request*(m: WakuMetadata, conn: Connection): Future[Result[WakuMetadataResponse, string]] {.async, gcsafe.} = - let request = WakuMetadataRequest(clusterId: some(m.clusterId), shards: toSeq(m.shards)) +proc request*( + m: WakuMetadata, conn: Connection +): Future[Result[WakuMetadataResponse, string]] {.async, gcsafe.} = + let request = + WakuMetadataRequest(clusterId: some(m.clusterId), shards: toSeq(m.shards)) + + let writeRes = catch: + await conn.writeLP(request.encode().buffer) + let readRes = catch: + await conn.readLp(RpcResponseMaxBytes) - let writeRes = catch: await conn.writeLP(request.encode().buffer) - let readRes = catch: await conn.readLp(RpcResponseMaxBytes) - # close no watter what - let closeRes = catch: await conn.closeWithEof() + let closeRes = catch: + await conn.closeWithEof() if closeRes.isErr(): return err("close failed: " & closeRes.error.msg) if writeRes.isErr(): return err("write failed: " & writeRes.error.msg) - let buffer = + let buffer = if readRes.isErr(): return err("read failed: " & readRes.error.msg) - else: readRes.get() + else: + readRes.get() let response = WakuMetadataResponse.decode(buffer).valueOr: return err("decode failed: " & $error) @@ -70,20 +72,21 @@ proc request*(m: WakuMetadata, conn: Connection): Future[Result[WakuMetadataResp proc initProtocolHandler(m: WakuMetadata) = proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} = - let res = catch: await conn.readLp(RpcResponseMaxBytes) + let res = catch: + await conn.readLp(RpcResponseMaxBytes) let buffer = res.valueOr: - error "Connection reading error", error=error.msg + error "Connection reading error", error = error.msg return let response = WakuMetadataResponse.decode(buffer).valueOr: - error "Response decoding error", error=error + error "Response decoding error", error = error return debug "Received WakuMetadata request", - remoteClusterId=response.clusterId, - remoteShards=response.shards, - localClusterId=m.clusterId, - localShards=m.shards + remoteClusterId = response.clusterId, + remoteShards = response.shards, + localClusterId = m.clusterId, + localShards = m.shards discard await m.respond(conn) @@ -93,32 +96,34 @@ proc initProtocolHandler(m: WakuMetadata) = m.handler = handle m.codec = WakuMetadataCodec -proc new*(T: type WakuMetadata, - clusterId: uint32, - enr: Record, - queue: AsyncEventQueue[SubscriptionEvent], - ): T = +proc new*( + T: type WakuMetadata, + clusterId: uint32, + enr: Record, + queue: AsyncEventQueue[SubscriptionEvent], +): T = var (cluster, shards) = (clusterId, initHashSet[uint32]()) - let enrRes = enr.toTyped() + let enrRes = enr.toTyped() if enrRes.isOk(): - let shardingRes = enrRes.get().relaySharding() + let shardingRes = enrRes.get().relaySharding() if shardingRes.isSome(): let relayShard = shardingRes.get() cluster = uint32(relayShard.clusterId) shards = toHashSet(relayShard.shardIds.mapIt(uint32(it))) - - let wm = WakuMetadata(clusterId: cluster, shards: shards, topicSubscriptionQueue: queue) + + let wm = + WakuMetadata(clusterId: cluster, shards: shards, topicSubscriptionQueue: queue) wm.initProtocolHandler() - info "Created WakuMetadata protocol", clusterId=wm.clusterId, shards=wm.shards + info "Created WakuMetadata protocol", clusterId = wm.clusterId, shards = wm.shards return wm proc subscriptionsListener(wm: WakuMetadata) {.async.} = ## Listen for pubsub topics subscriptions changes - + let key = wm.topicSubscriptionQueue.register() while wm.started: @@ -134,14 +139,14 @@ proc subscriptionsListener(wm: WakuMetadata) {.async.} = if parsedTopic.clusterId != wm.clusterId: continue - case event.kind: - of PubsubSub: - wm.shards.incl(parsedTopic.shardId) - of PubsubUnsub: - wm.shards.excl(parsedTopic.shardId) - else: - continue - + case event.kind + of PubsubSub: + wm.shards.incl(parsedTopic.shardId) + of PubsubUnsub: + wm.shards.excl(parsedTopic.shardId) + else: + continue + wm.topicSubscriptionQueue.unregister(key) proc start*(wm: WakuMetadata) = @@ -150,4 +155,4 @@ proc start*(wm: WakuMetadata) = asyncSpawn wm.subscriptionsListener() proc stop*(wm: WakuMetadata) = - wm.started = false \ No newline at end of file + wm.started = false diff --git a/waku/waku_metadata/rpc.nim b/waku/waku_metadata/rpc.nim index fb85b774e..12ab873ac 100644 --- a/waku/waku_metadata/rpc.nim +++ b/waku/waku_metadata/rpc.nim @@ -3,21 +3,17 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/options +import std/options -import - ../common/protobuf +import ../common/protobuf -type - WakuMetadataRequest* = object - clusterId*: Option[uint32] - shards*: seq[uint32] +type WakuMetadataRequest* = object + clusterId*: Option[uint32] + shards*: seq[uint32] -type - WakuMetadataResponse* = object - clusterId*: Option[uint32] - shards*: seq[uint32] +type WakuMetadataResponse* = object + clusterId*: Option[uint32] + shards*: seq[uint32] proc encode*(rpc: WakuMetadataRequest): ProtoBuffer = var pb = initProtoBuffer() @@ -86,5 +82,4 @@ proc decode*(T: type WakuMetadataResponse, buffer: seq[byte]): ProtoResult[T] = for shard in shards: rpc.shards.add(shard.uint32) - ok(rpc) diff --git a/waku/waku_node.nim b/waku/waku_node.nim index 66cff4521..54536f91d 100644 --- a/waku/waku_node.nim +++ b/waku/waku_node.nim @@ -1,9 +1,3 @@ -import - ./node/config, - ./node/waku_switch as switch, - ./node/waku_node as node +import ./node/config, ./node/waku_switch as switch, ./node/waku_node as node -export - config, - switch, - node +export config, switch, node diff --git a/waku/waku_noise/noise.nim b/waku/waku_noise/noise.nim index abe9d5077..dc0434500 100644 --- a/waku/waku_noise/noise.nim +++ b/waku/waku_noise/noise.nim @@ -53,9 +53,9 @@ proc hasKey*(cs: CipherState): bool = # Encrypts a plaintext using key material in a Noise Cipher State # The CipherState is updated increasing the nonce (used as a counter in Noise) by one -proc encryptWithAd*(state: var CipherState, ad, plaintext: openArray[byte]): seq[byte] - {.raises: [Defect, NoiseNonceMaxError].} = - +proc encryptWithAd*( + state: var CipherState, ad, plaintext: openArray[byte] +): seq[byte] {.raises: [Defect, NoiseNonceMaxError].} = # We raise an error if encryption is called using a Cipher State with nonce greater than MaxNonce if state.n > NonceMax: raise newException(NoiseNonceMaxError, "Noise max nonce value reached") @@ -64,11 +64,10 @@ proc encryptWithAd*(state: var CipherState, ad, plaintext: openArray[byte]): seq # If an encryption key is set in the Cipher state, we proceed with encryption if state.hasKey: - # The output is the concatenation of the ciphertext and authorization tag # We define its length accordingly ciphertext = newSeqOfCap[byte](plaintext.len + sizeof(ChaChaPolyTag)) - + # Since ChaChaPoly encryption primitive overwrites the input with the output, # we copy the plaintext in the output ciphertext variable and we pass it to encryption ciphertext.add(plaintext) @@ -76,7 +75,7 @@ proc encryptWithAd*(state: var CipherState, ad, plaintext: openArray[byte]): seq # The nonce is read from the input CipherState # By Noise specification the nonce is 8 bytes long out of the 12 bytes supported by ChaChaPoly var nonce: ChaChaPolyNonce - nonce[4..<12] = toBytesLE(state.n) + nonce[4 ..< 12] = toBytesLE(state.n) # We perform encryption and we store the authorization tag var authorizationTag: ChaChaPolyTag @@ -91,11 +90,13 @@ proc encryptWithAd*(state: var CipherState, ad, plaintext: openArray[byte]): seq if state.n > NonceMax: raise newException(NoiseNonceMaxError, "Noise max nonce value reached") - trace "encryptWithAd", authorizationTag = byteutils.toHex(authorizationTag), ciphertext = ciphertext, nonce = state.n - 1 + trace "encryptWithAd", + authorizationTag = byteutils.toHex(authorizationTag), + ciphertext = ciphertext, + nonce = state.n - 1 # Otherwise we return the input plaintext according to specification http://www.noiseprotocol.org/noise.html#the-cipherstate-object else: - ciphertext = @plaintext debug "encryptWithAd called with no encryption key set. Returning plaintext." @@ -103,9 +104,9 @@ proc encryptWithAd*(state: var CipherState, ad, plaintext: openArray[byte]): seq # Decrypts a ciphertext using key material in a Noise Cipher State # The CipherState is updated increasing the nonce (used as a counter in Noise) by one -proc decryptWithAd*(state: var CipherState, ad, ciphertext: openArray[byte]): seq[byte] - {.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} = - +proc decryptWithAd*( + state: var CipherState, ad, ciphertext: openArray[byte] +): seq[byte] {.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} = # We raise an error if encryption is called using a Cipher State with nonce greater than MaxNonce if state.n > NonceMax: raise newException(NoiseNonceMaxError, "Noise max nonce value reached") @@ -114,40 +115,48 @@ proc decryptWithAd*(state: var CipherState, ad, ciphertext: openArray[byte]): se # If an encryption key is set in the Cipher state, we proceed with decryption if state.hasKey: - # We read the authorization appendend at the end of a ciphertext - let inputAuthorizationTag = ciphertext.toOpenArray(ciphertext.len - ChaChaPolyTag.len, ciphertext.high).intoChaChaPolyTag - + let inputAuthorizationTag = ciphertext.toOpenArray( + ciphertext.len - ChaChaPolyTag.len, ciphertext.high + ).intoChaChaPolyTag + var authorizationTag: ChaChaPolyTag nonce: ChaChaPolyNonce # The nonce is read from the input CipherState # By Noise specification the nonce is 8 bytes long out of the 12 bytes supported by ChaChaPoly - nonce[4..<12] = toBytesLE(state.n) + nonce[4 ..< 12] = toBytesLE(state.n) # Since ChaChaPoly decryption primitive overwrites the input with the output, # we copy the ciphertext (authorization tag excluded) in the output plaintext variable and we pass it to decryption - plaintext = ciphertext[0..(ciphertext.high - ChaChaPolyTag.len)] - + plaintext = ciphertext[0 .. (ciphertext.high - ChaChaPolyTag.len)] + ChaChaPoly.decrypt(state.k, nonce, authorizationTag, plaintext, ad) # We check if the input authorization tag matches the decryption authorization tag if inputAuthorizationTag != authorizationTag: - debug "decryptWithAd failed", plaintext = plaintext, ciphertext = ciphertext, inputAuthorizationTag = inputAuthorizationTag, authorizationTag = authorizationTag - raise newException(NoiseDecryptTagError, "decryptWithAd failed tag authentication.") - + debug "decryptWithAd failed", + plaintext = plaintext, + ciphertext = ciphertext, + inputAuthorizationTag = inputAuthorizationTag, + authorizationTag = authorizationTag + raise + newException(NoiseDecryptTagError, "decryptWithAd failed tag authentication.") + # We increase the Cipher state nonce inc state.n # If the nonce is greater than the maximum allowed nonce, we raise an exception if state.n > NonceMax: raise newException(NoiseNonceMaxError, "Noise max nonce value reached") - trace "decryptWithAd", inputAuthorizationTag = inputAuthorizationTag, authorizationTag = authorizationTag, nonce = state.n - + trace "decryptWithAd", + inputAuthorizationTag = inputAuthorizationTag, + authorizationTag = authorizationTag, + nonce = state.n + # Otherwise we return the input ciphertext according to specification http://www.noiseprotocol.org/noise.html#the-cipherstate-object else: - plaintext = @ciphertext debug "decryptWithAd called with no encryption key set. Returning ciphertext." @@ -168,7 +177,6 @@ proc randomCipherState*(rng: var HmacDrbgContext, nonce: uint64 = 0): CipherStat setNonce(randomCipherState, nonce) return randomCipherState - # Gets the key of a Cipher State proc getKey*(cs: CipherState): ChaChaPolyKey = return cs.k @@ -219,7 +227,9 @@ proc mixHash*(ss: var SymmetricState, data: openArray[byte]) = # mixKeyAndHash as per Noise specification http://www.noiseprotocol.org/noise.html#the-symmetricstate-object # Combines MixKey and MixHash -proc mixKeyAndHash*(ss: var SymmetricState, inputKeyMaterial: openArray[byte]) {.used.} = +proc mixKeyAndHash*( + ss: var SymmetricState, inputKeyMaterial: openArray[byte] +) {.used.} = var tempKeys: array[3, ChaChaPolyKey] # Derives 3 keys using HKDF, the chaining key and the input key material sha256.hkdf(ss.ck, inputKeyMaterial, [], tempKeys) @@ -234,8 +244,9 @@ proc mixKeyAndHash*(ss: var SymmetricState, inputKeyMaterial: openArray[byte]) { # EncryptAndHash as per Noise specification http://www.noiseprotocol.org/noise.html#the-symmetricstate-object # Combines encryptWithAd and mixHash # Note that by setting extraAd, it is possible to pass extra additional data that will be concatenated to the ad specified by Noise (can be used to authenticate messageNametag) -proc encryptAndHash*(ss: var SymmetricState, plaintext: openArray[byte], extraAd: openArray[byte] = []): seq[byte] - {.raises: [Defect, NoiseNonceMaxError].} = +proc encryptAndHash*( + ss: var SymmetricState, plaintext: openArray[byte], extraAd: openArray[byte] = [] +): seq[byte] {.raises: [Defect, NoiseNonceMaxError].} = # The output ciphertext var ciphertext: seq[byte] # The additional data @@ -248,8 +259,9 @@ proc encryptAndHash*(ss: var SymmetricState, plaintext: openArray[byte], extraAd # DecryptAndHash as per Noise specification http://www.noiseprotocol.org/noise.html#the-symmetricstate-object # Combines decryptWithAd and mixHash -proc decryptAndHash*(ss: var SymmetricState, ciphertext: openArray[byte], extraAd: openArray[byte] = []): seq[byte] - {.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} = +proc decryptAndHash*( + ss: var SymmetricState, ciphertext: openArray[byte], extraAd: openArray[byte] = [] +): seq[byte] {.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} = # The output plaintext var plaintext: seq[byte] # The additional data @@ -286,7 +298,9 @@ proc getCipherState*(ss: SymmetricState): CipherState = ################################# # Initializes a Handshake State -proc init*(_: type[HandshakeState], hsPattern: HandshakePattern, psk: seq[byte] = @[]): HandshakeState = +proc init*( + _: type[HandshakeState], hsPattern: HandshakePattern, psk: seq[byte] = @[] +): HandshakeState = # The output Handshake State var hs: HandshakeState # By default the Handshake State initiator flag is set to false @@ -309,9 +323,8 @@ proc init*(_: type[HandshakeState], hsPattern: HandshakePattern, psk: seq[byte] # It takes a Cipher State (with key, nonce, and associated data) and encrypts a plaintext # The cipher state in not changed proc encrypt*( - state: ChaChaPolyCipherState, - plaintext: openArray[byte]): ChaChaPolyCiphertext - {.noinit, raises: [Defect, NoiseEmptyChaChaPolyInput].} = + state: ChaChaPolyCipherState, plaintext: openArray[byte] +): ChaChaPolyCiphertext {.noinit, raises: [Defect, NoiseEmptyChaChaPolyInput].} = # If plaintext is empty, we raise an error if plaintext == @[]: raise newException(NoiseEmptyChaChaPolyInput, "Tried to encrypt empty plaintext") @@ -329,9 +342,8 @@ proc encrypt*( # It takes a Cipher State (with key, nonce, and associated data) and decrypts a ciphertext # The cipher state is not changed proc decrypt*( - state: ChaChaPolyCipherState, - ciphertext: ChaChaPolyCiphertext): seq[byte] - {.raises: [Defect, NoiseEmptyChaChaPolyInput, NoiseDecryptTagError].} = + state: ChaChaPolyCipherState, ciphertext: ChaChaPolyCiphertext +): seq[byte] {.raises: [Defect, NoiseEmptyChaChaPolyInput, NoiseDecryptTagError].} = # If ciphertext is empty, we raise an error if ciphertext.data == @[]: raise newException(NoiseEmptyChaChaPolyInput, "Tried to decrypt empty ciphertext") @@ -352,4 +364,4 @@ proc decrypt*( if tagIn != tagOut: debug "decrypt failed", plaintext = shortLog(plaintext) raise newException(NoiseDecryptTagError, "decrypt tag authentication failed.") - return plaintext \ No newline at end of file + return plaintext diff --git a/waku/waku_noise/noise_handshake_processing.nim b/waku/waku_noise/noise_handshake_processing.nim index 080e96f40..a5b0dbc63 100644 --- a/waku/waku_noise/noise_handshake_processing.nim +++ b/waku/waku_noise/noise_handshake_processing.nim @@ -32,9 +32,10 @@ logScope: # Based on the message handshake direction and if the user is or not the initiator, returns a boolean tuple telling if the user # has to read or write the next handshake message -proc getReadingWritingState(hs: HandshakeState, direction: MessageDirection): (bool, bool) = - - var reading, writing : bool +proc getReadingWritingState( + hs: HandshakeState, direction: MessageDirection +): (bool, bool) = + var reading, writing: bool if hs.initiator and direction == D_r: # I'm Alice and direction is -> @@ -58,17 +59,18 @@ proc getReadingWritingState(hs: HandshakeState, direction: MessageDirection): (b # Checks if a pre-message is valid according to Noise specifications # http://www.noiseprotocol.org/noise.html#handshake-patterns proc isValid(msg: seq[PreMessagePattern]): bool = - var isValid: bool = true # Non-empty pre-messages can only have patterns "e", "s", "e,s" in each direction - let allowedPatterns: seq[PreMessagePattern] = @[ PreMessagePattern(direction: D_r, tokens: @[T_s]), - PreMessagePattern(direction: D_r, tokens: @[T_e]), - PreMessagePattern(direction: D_r, tokens: @[T_e, T_s]), - PreMessagePattern(direction: D_l, tokens: @[T_s]), - PreMessagePattern(direction: D_l, tokens: @[T_e]), - PreMessagePattern(direction: D_l, tokens: @[T_e, T_s]) - ] + let allowedPatterns: seq[PreMessagePattern] = + @[ + PreMessagePattern(direction: D_r, tokens: @[T_s]), + PreMessagePattern(direction: D_r, tokens: @[T_e]), + PreMessagePattern(direction: D_r, tokens: @[T_e, T_s]), + PreMessagePattern(direction: D_l, tokens: @[T_s]), + PreMessagePattern(direction: D_l, tokens: @[T_e]), + PreMessagePattern(direction: D_l, tokens: @[T_e, T_s]), + ] # We check if pre message patterns are allowed for pattern in msg: @@ -83,14 +85,16 @@ proc isValid(msg: seq[PreMessagePattern]): bool = ################################# # Processes pre-message patterns -proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq[NoisePublicKey] = @[]) - {.raises: [Defect, NoiseMalformedHandshake, NoiseHandshakeError, NoisePublicKeyError].} = - +proc processPreMessagePatternTokens( + hs: var HandshakeState, inPreMessagePKs: seq[NoisePublicKey] = @[] +) {. + raises: [Defect, NoiseMalformedHandshake, NoiseHandshakeError, NoisePublicKeyError] +.} = var # I make a copy of the input pre-message public keys, so that I can easily delete processed ones without using iterators/counters preMessagePKs = inPreMessagePKs # Here we store currently processed pre message public key - currPK : NoisePublicKey + currPK: NoisePublicKey # We retrieve the pre-message patterns to process, if any # If none, there's nothing to do @@ -108,20 +112,20 @@ proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq tokens = messagePattern.tokens # We get if the user is reading or writing the current pre-message pattern - var (reading, writing) = getReadingWritingState(hs , direction) + var (reading, writing) = getReadingWritingState(hs, direction) # We process each message pattern token for token in tokens: - # We process the pattern token case token of T_e: - # We expect an ephemeral key, so we attempt to read it (next PK to process will always be at index 0 of preMessagePKs) if preMessagePKs.len > 0: currPK = preMessagePKs[0] else: - raise newException(NoiseHandshakeError, "Noise pre-message read e, expected a public key") + raise newException( + NoiseHandshakeError, "Noise pre-message read e, expected a public key" + ) # If user is reading the "e" token if reading: @@ -129,17 +133,17 @@ proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq # We check if current key is encrypted or not. We assume pre-message public keys are all unencrypted on users' end if currPK.flag == 0.uint8: - # Sets re and calls MixHash(re.public_key). hs.re = intoCurve25519Key(currPK.pk) hs.ss.mixHash(hs.re) - else: - raise newException(NoisePublicKeyError, "Noise read e, incorrect encryption flag for pre-message public key") + raise newException( + NoisePublicKeyError, + "Noise read e, incorrect encryption flag for pre-message public key", + ) # If user is writing the "e" token elif writing: - trace "noise pre-message write e" # When writing, the user is sending a public key, @@ -147,7 +151,10 @@ proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq if hs.e.publicKey == intoCurve25519Key(currPK.pk): hs.ss.mixHash(hs.e.publicKey) else: - raise newException(NoisePublicKeyError, "Noise pre-message e key doesn't correspond to locally set e key pair") + raise newException( + NoisePublicKeyError, + "Noise pre-message e key doesn't correspond to locally set e key pair", + ) # Noise specification: section 9.2 # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results @@ -158,14 +165,14 @@ proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq # We delete processed public key preMessagePKs.delete(0) - of T_s: - # We expect a static key, so we attempt to read it (next PK to process will always be at index of preMessagePKs) if preMessagePKs.len > 0: currPK = preMessagePKs[0] else: - raise newException(NoiseHandshakeError, "Noise pre-message read s, expected a public key") + raise newException( + NoiseHandshakeError, "Noise pre-message read s, expected a public key" + ) # If user is reading the "s" token if reading: @@ -173,17 +180,17 @@ proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq # We check if current key is encrypted or not. We assume pre-message public keys are all unencrypted on users' end if currPK.flag == 0.uint8: - # Sets re and calls MixHash(re.public_key). hs.rs = intoCurve25519Key(currPK.pk) hs.ss.mixHash(hs.rs) - else: - raise newException(NoisePublicKeyError, "Noise read s, incorrect encryption flag for pre-message public key") + raise newException( + NoisePublicKeyError, + "Noise read s, incorrect encryption flag for pre-message public key", + ) # If user is writing the "s" token elif writing: - trace "noise pre-message write s" # If writing, it means that the user is sending a public key, @@ -191,7 +198,10 @@ proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq if hs.s.publicKey == intoCurve25519Key(currPK.pk): hs.ss.mixHash(hs.s.publicKey) else: - raise newException(NoisePublicKeyError, "Noise pre-message s key doesn't correspond to locally set s key pair") + raise newException( + NoisePublicKeyError, + "Noise pre-message s key doesn't correspond to locally set s key pair", + ) # Noise specification: section 9.2 # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results @@ -202,16 +212,15 @@ proc processPreMessagePatternTokens(hs: var HandshakeState, inPreMessagePKs: seq # We delete processed public key preMessagePKs.delete(0) - else: - - raise newException(NoiseMalformedHandshake, "Invalid Token for pre-message pattern") + raise + newException(NoiseMalformedHandshake, "Invalid Token for pre-message pattern") # This procedure encrypts/decrypts the implicit payload attached at the end of every message pattern # An optional extraAd to pass extra additional data in encryption/decryption can be set (useful to authenticate messageNametag) -proc processMessagePatternPayload(hs: var HandshakeState, transportMessage: seq[byte], extraAd: openArray[byte] = []): seq[byte] - {.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} = - +proc processMessagePatternPayload( + hs: var HandshakeState, transportMessage: seq[byte], extraAd: openArray[byte] = [] +): seq[byte] {.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} = var payload: seq[byte] # We retrieve current message pattern (direction + tokens) to process @@ -231,9 +240,16 @@ proc processMessagePatternPayload(hs: var HandshakeState, transportMessage: seq[ return payload # We process an input handshake message according to current handshake state and we return the next handshake step's handshake message -proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var HandshakeState, inputHandshakeMessage: seq[NoisePublicKey] = @[]): Result[seq[NoisePublicKey], cstring] - {.raises: [Defect, NoiseHandshakeError, NoiseMalformedHandshake, NoisePublicKeyError, NoiseDecryptTagError, NoiseNonceMaxError].} = - +proc processMessagePatternTokens( + rng: var rand.HmacDrbgContext, + hs: var HandshakeState, + inputHandshakeMessage: seq[NoisePublicKey] = @[], +): Result[seq[NoisePublicKey], cstring] {. + raises: [ + Defect, NoiseHandshakeError, NoiseMalformedHandshake, NoisePublicKeyError, + NoiseDecryptTagError, NoiseNonceMaxError, + ] +.} = # We retrieve current message pattern (direction + tokens) to process let messagePattern = hs.handshakePattern.messagePatterns[hs.msgPatternIdx] @@ -241,7 +257,7 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak tokens = messagePattern.tokens # We get if the user is reading or writing the input handshake message - var (reading, writing) = getReadingWritingState(hs , direction) + var (reading, writing) = getReadingWritingState(hs, direction) # I make a copy of the handshake message so that I can easily delete processed PKs without using iterators/counters # (Possibly) non-empty if reading @@ -256,10 +272,8 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak # We process each message pattern token for token in tokens: - case token of T_e: - # If user is reading the "s" token if reading: trace "noise read e" @@ -273,7 +287,6 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak # We check if current key is encrypted or not # Note: by specification, ephemeral keys should always be unencrypted. But we support encrypted ones. if currPK.flag == 0.uint8: - # Unencrypted Public Key # Sets re and calls MixHash(re.public_key). hs.re = intoCurve25519Key(currPK.pk) @@ -281,13 +294,14 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak # The following is out of specification: we call decryptAndHash for encrypted ephemeral keys, similarly as happens for (encrypted) static keys elif currPK.flag == 1.uint8: - # Encrypted public key # Decrypts re, sets re and calls MixHash(re.public_key). hs.re = intoCurve25519Key(hs.ss.decryptAndHash(currPK.pk)) - else: - raise newException(NoisePublicKeyError, "Noise read e, incorrect encryption flag for public key") + raise newException( + NoisePublicKeyError, + "Noise read e, incorrect encryption flag for public key", + ) # Noise specification: section 9.2 # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results @@ -318,9 +332,7 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak # We add the ephemeral public key to the Waku payload outHandshakeMessage.add toNoisePublicKey(getPublicKey(hs.e)) - of T_s: - # If user is reading the "s" token if reading: trace "noise read s" @@ -333,27 +345,25 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak # We check if current key is encrypted or not if currPK.flag == 0.uint8: - # Unencrypted Public Key # Sets re and calls MixHash(re.public_key). hs.rs = intoCurve25519Key(currPK.pk) hs.ss.mixHash(hs.rs) - elif currPK.flag == 1.uint8: - # Encrypted public key # Decrypts rs, sets rs and calls MixHash(rs.public_key). hs.rs = intoCurve25519Key(hs.ss.decryptAndHash(currPK.pk)) - else: - raise newException(NoisePublicKeyError, "Noise read s, incorrect encryption flag for public key") + raise newException( + NoisePublicKeyError, + "Noise read s, incorrect encryption flag for public key", + ) # We delete processed public key inHandshakeMessage.delete(0) # If user is writing the "s" token elif writing: - trace "noise write s" # If the local static key is not set (the handshake state was not properly initialized), we raise an error @@ -371,18 +381,14 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak outHandshakeMessage.add NoisePublicKey(flag: 1, pk: encS) else: outHandshakeMessage.add NoisePublicKey(flag: 0, pk: encS) - of T_psk: - # If user is reading the "psk" token trace "noise psk" # Calls MixKeyAndHash(psk) hs.ss.mixKeyAndHash(hs.psk) - of T_ee: - # If user is reading the "ee" token trace "noise dh ee" @@ -393,9 +399,7 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak # Calls MixKey(DH(e, re)). hs.ss.mixKey(dh(hs.e.privateKey, hs.re)) - of T_es: - # If user is reading the "es" token trace "noise dh es" @@ -404,15 +408,17 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak # If both present, we call MixKey(DH(e, rs)) if initiator, MixKey(DH(s, re)) if responder. if hs.initiator: if isDefault(hs.e) or isDefault(hs.rs): - raise newException(NoisePublicKeyError, "Local or remote ephemeral/static key not set") + raise newException( + NoisePublicKeyError, "Local or remote ephemeral/static key not set" + ) hs.ss.mixKey(dh(hs.e.privateKey, hs.rs)) else: if isDefault(hs.re) or isDefault(hs.s): - raise newException(NoisePublicKeyError, "Local or remote ephemeral/static key not set") + raise newException( + NoisePublicKeyError, "Local or remote ephemeral/static key not set" + ) hs.ss.mixKey(dh(hs.s.privateKey, hs.re)) - of T_se: - # If user is reading the "se" token trace "noise dh se" @@ -421,22 +427,25 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak # If both present, call MixKey(DH(s, re)) if initiator, MixKey(DH(e, rs)) if responder. if hs.initiator: if isDefault(hs.s) or isDefault(hs.re): - raise newException(NoiseMalformedHandshake, "Local or remote ephemeral/static key not set") + raise newException( + NoiseMalformedHandshake, "Local or remote ephemeral/static key not set" + ) hs.ss.mixKey(dh(hs.s.privateKey, hs.re)) else: if isDefault(hs.rs) or isDefault(hs.e): - raise newException(NoiseMalformedHandshake, "Local or remote ephemeral/static key not set") + raise newException( + NoiseMalformedHandshake, "Local or remote ephemeral/static key not set" + ) hs.ss.mixKey(dh(hs.e.privateKey, hs.rs)) - of T_ss: - # If user is reading the "ss" token trace "noise dh ss" # If local and/or remote static keys are not set, we raise an error if isDefault(hs.s) or isDefault(hs.rs): - raise newException(NoiseMalformedHandshake, "Local or remote static key not set") + raise + newException(NoiseMalformedHandshake, "Local or remote static key not set") # Calls MixKey(DH(s, rs)). hs.ss.mixKey(dh(hs.s.privateKey, hs.rs)) @@ -448,8 +457,17 @@ proc processMessagePatternTokens(rng: var rand.HmacDrbgContext, hs: var Handshak ################################# # Initializes a Handshake State -proc initialize*(hsPattern: HandshakePattern, ephemeralKey: KeyPair = default(KeyPair), staticKey: KeyPair = default(KeyPair), prologue: seq[byte] = @[], psk: seq[byte] = @[], preMessagePKs: seq[NoisePublicKey] = @[], initiator: bool = false): HandshakeState - {.raises: [Defect, NoiseMalformedHandshake, NoiseHandshakeError, NoisePublicKeyError].} = +proc initialize*( + hsPattern: HandshakePattern, + ephemeralKey: KeyPair = default(KeyPair), + staticKey: KeyPair = default(KeyPair), + prologue: seq[byte] = @[], + psk: seq[byte] = @[], + preMessagePKs: seq[NoisePublicKey] = @[], + initiator: bool = false, +): HandshakeState {. + raises: [Defect, NoiseMalformedHandshake, NoiseHandshakeError, NoisePublicKeyError] +.} = var hs = HandshakeState.init(hsPattern) hs.ss.mixHash(prologue) hs.e = ephemeralKey @@ -465,9 +483,18 @@ proc initialize*(hsPattern: HandshakePattern, ephemeralKey: KeyPair = default(Ke # Each user in a handshake alternates writing and reading of handshake messages. # If the user is writing the handshake message, the transport message (if not empty) and eventually a non-empty message nametag has to be passed to transportMessage and messageNametag and readPayloadV2 can be left to its default value # It the user is reading the handshake message, the read payload v2 has to be passed to readPayloadV2 and the transportMessage can be left to its default values. Decryption is skipped if the payloadv2 read doesn't have a message nametag equal to messageNametag (empty input nametags are converted to all-0 MessageNametagLength bytes arrays) -proc stepHandshake*(rng: var rand.HmacDrbgContext, hs: var HandshakeState, readPayloadV2: PayloadV2 = default(PayloadV2), transportMessage: seq[byte] = @[], messageNametag: openArray[byte] = []): Result[HandshakeStepResult, cstring] - {.raises: [Defect, NoiseHandshakeError, NoiseMessageNametagError, NoiseMalformedHandshake, NoisePublicKeyError, NoiseDecryptTagError, NoiseNonceMaxError].} = - +proc stepHandshake*( + rng: var rand.HmacDrbgContext, + hs: var HandshakeState, + readPayloadV2: PayloadV2 = default(PayloadV2), + transportMessage: seq[byte] = @[], + messageNametag: openArray[byte] = [], +): Result[HandshakeStepResult, cstring] {. + raises: [ + Defect, NoiseHandshakeError, NoiseMessageNametagError, NoiseMalformedHandshake, + NoisePublicKeyError, NoiseDecryptTagError, NoiseNonceMaxError, + ] +.} = var hsStepResult: HandshakeStepResult # If there are no more message patterns left for processing @@ -494,13 +521,18 @@ proc stepHandshake*(rng: var rand.HmacDrbgContext, hs: var HandshakeState, readP hsStepResult.payload2.messageNametag = toMessageNametag(messageNametag) hsStepResult.payload2.handshakeMessage = processMessagePatternTokens(rng, hs).get() # We write the payload by passing the messageNametag as extra additional data - hsStepResult.payload2.transportMessage = processMessagePatternPayload(hs, transportMessage, extraAd = hsStepResult.payload2.messageNametag) + hsStepResult.payload2.transportMessage = processMessagePatternPayload( + hs, transportMessage, extraAd = hsStepResult.payload2.messageNametag + ) # If we read an answer during this handshake step elif reading: # If the read message nametag doesn't match the expected input one we raise an error if readPayloadV2.messageNametag != toMessageNametag(messageNametag): - raise newException(NoiseMessageNametagError, "The message nametag of the read message doesn't match the expected one") + raise newException( + NoiseMessageNametagError, + "The message nametag of the read message doesn't match the expected one", + ) # We process the read public keys and (eventually decrypt) the read transport message let @@ -510,10 +542,13 @@ proc stepHandshake*(rng: var rand.HmacDrbgContext, hs: var HandshakeState, readP # Since we only read, nothing meanigful (i.e. public keys) is returned discard processMessagePatternTokens(rng, hs, readHandshakeMessage) # We retrieve and store the (decrypted) received transport message by passing the messageNametag as extra additional data - hsStepResult.transportMessage = processMessagePatternPayload(hs, readTransportMessage, extraAd = readPayloadV2.messageNametag) - + hsStepResult.transportMessage = processMessagePatternPayload( + hs, readTransportMessage, extraAd = readPayloadV2.messageNametag + ) else: - raise newException(NoiseHandshakeError, "Handshake Error: neither writing or reading user") + raise newException( + NoiseHandshakeError, "Handshake Error: neither writing or reading user" + ) # We increase the handshake state message pattern index to progress to next step hs.msgPatternIdx += 1 @@ -522,7 +557,6 @@ proc stepHandshake*(rng: var rand.HmacDrbgContext, hs: var HandshakeState, readP # Finalizes the handshake by calling Split and assigning the proper Cipher States to users proc finalizeHandshake*(hs: var HandshakeState): HandshakeResult = - var hsResult: HandshakeResult ## Noise specification, Section 5: @@ -573,9 +607,11 @@ proc finalizeHandshake*(hs: var HandshakeState): HandshakeResult = ## due to nonce exhaustion, then the application must delete the CipherState and terminate the session. # Writes an encrypted message using the proper Cipher State -proc writeMessage*(hsr: var HandshakeResult, transportMessage: seq[byte], outboundMessageNametagBuffer: var MessageNametagBuffer): PayloadV2 - {.raises: [Defect, NoiseNonceMaxError].} = - +proc writeMessage*( + hsr: var HandshakeResult, + transportMessage: seq[byte], + outboundMessageNametagBuffer: var MessageNametagBuffer, +): PayloadV2 {.raises: [Defect, NoiseNonceMaxError].} = var payload2: PayloadV2 # We set the message nametag using the input buffer @@ -587,31 +623,44 @@ proc writeMessage*(hsr: var HandshakeResult, transportMessage: seq[byte], outbou # We pad the transport message let paddedTransportMessage = pkcs7_pad(transportMessage, NoisePaddingBlockSize) # Encryption is done with zero-length associated data as per specification - payload2.transportMessage = encryptWithAd(hsr.csOutbound, ad = @(payload2.messageNametag), plaintext = paddedTransportMessage) + payload2.transportMessage = encryptWithAd( + hsr.csOutbound, ad = @(payload2.messageNametag), plaintext = paddedTransportMessage + ) return payload2 # Reads an encrypted message using the proper Cipher State # Decryption is attempted only if the input PayloadV2 has a messageNametag equal to the one expected -proc readMessage*(hsr: var HandshakeResult, readPayload2: PayloadV2, inboundMessageNametagBuffer: var MessageNametagBuffer): Result[seq[byte], cstring] - {.raises: [Defect, NoiseDecryptTagError, NoiseMessageNametagError, NoiseNonceMaxError, NoiseSomeMessagesWereLost].} = - +proc readMessage*( + hsr: var HandshakeResult, + readPayload2: PayloadV2, + inboundMessageNametagBuffer: var MessageNametagBuffer, +): Result[seq[byte], cstring] {. + raises: [ + Defect, NoiseDecryptTagError, NoiseMessageNametagError, NoiseNonceMaxError, + NoiseSomeMessagesWereLost, + ] +.} = # The output decrypted message var message: seq[byte] # If the message nametag does not correspond to the nametag expected in the inbound message nametag buffer # an error is raised (to be handled externally, i.e. re-request lost messages, discard, etc.) - let nametagIsOk = checkNametag(readPayload2.messageNametag, inboundMessageNametagBuffer).isOk + let nametagIsOk = + checkNametag(readPayload2.messageNametag, inboundMessageNametagBuffer).isOk assert(nametagIsOk) # At this point the messageNametag matches the expected nametag. # According to 35/WAKU2-NOISE RFC, no Handshake protocol information is sent when exchanging messages if readPayload2.protocolId == 0.uint8: - # On application level we decide to discard messages which fail decryption, without raising an error try: # Decryption is done with messageNametag as associated data - let paddedMessage = decryptWithAd(hsr.csInbound, ad = @(readPayload2.messageNametag), ciphertext = readPayload2.transportMessage) + let paddedMessage = decryptWithAd( + hsr.csInbound, + ad = @(readPayload2.messageNametag), + ciphertext = readPayload2.transportMessage, + ) # We unpdad the decrypted message message = pkcs7_unpad(paddedMessage, NoisePaddingBlockSize) # The message successfully decrypted, we can delete the first element of the inbound Message Nametag Buffer diff --git a/waku/waku_noise/noise_types.nim b/waku/waku_noise/noise_types.nim index 3f7fd805f..33e8a2db0 100644 --- a/waku/waku_noise/noise_types.nim +++ b/waku/waku_noise/noise_types.nim @@ -59,7 +59,7 @@ type ################################# # Noise Public Keys ################################# - + # A Noise public key is a public key exchanged during Noise handshakes (no private part) # This follows https://rfc.vac.dev/spec/35/#public-keys-serialization # pk contains the X coordinate of the public key, if unencrypted (this implies flag = 0) @@ -204,7 +204,6 @@ type ################################# # Some useful error types ################################# - NoiseError* = object of LPError NoiseHandshakeError* = object of NoiseError NoiseEmptyChaChaPolyInput* = object of NoiseError @@ -224,56 +223,71 @@ const EmptyPreMessage*: seq[PreMessagePattern] = @[] # Supported Noise handshake patterns as defined in https://rfc.vac.dev/spec/35/#specification - NoiseHandshakePatterns* = { - "K1K1": HandshakePattern(name: "Noise_K1K1_25519_ChaChaPoly_SHA256", - preMessagePatterns: @[PreMessagePattern(direction: D_r, tokens: @[T_s]), - PreMessagePattern(direction: D_l, tokens: @[T_s])], - messagePatterns: @[ MessagePattern(direction: D_r, tokens: @[T_e]), - MessagePattern(direction: D_l, tokens: @[T_e, T_ee, T_es]), - MessagePattern(direction: D_r, tokens: @[T_se])] - ), - - "XK1": HandshakePattern(name: "Noise_XK1_25519_ChaChaPoly_SHA256", - preMessagePatterns: @[PreMessagePattern(direction: D_l, tokens: @[T_s])], - messagePatterns: @[ MessagePattern(direction: D_r, tokens: @[T_e]), - MessagePattern(direction: D_l, tokens: @[T_e, T_ee, T_es]), - MessagePattern(direction: D_r, tokens: @[T_s, T_se])] - ), - - "XX": HandshakePattern(name: "Noise_XX_25519_ChaChaPoly_SHA256", - preMessagePatterns: EmptyPreMessage, - messagePatterns: @[ MessagePattern(direction: D_r, tokens: @[T_e]), - MessagePattern(direction: D_l, tokens: @[T_e, T_ee, T_s, T_es]), - MessagePattern(direction: D_r, tokens: @[T_s, T_se])] - ), - - "XXpsk0": HandshakePattern(name: "Noise_XXpsk0_25519_ChaChaPoly_SHA256", - preMessagePatterns: EmptyPreMessage, - messagePatterns: @[ MessagePattern(direction: D_r, tokens: @[T_psk, T_e]), - MessagePattern(direction: D_l, tokens: @[T_e, T_ee, T_s, T_es]), - MessagePattern(direction: D_r, tokens: @[T_s, T_se])] - ), - - "WakuPairing": HandshakePattern(name: "Noise_WakuPairing_25519_ChaChaPoly_SHA256", - preMessagePatterns: @[PreMessagePattern(direction: D_l, tokens: @[T_e])], - messagePatterns: @[ MessagePattern(direction: D_r, tokens: @[T_e, T_ee]), - MessagePattern(direction: D_l, tokens: @[T_s, T_es]), - MessagePattern(direction: D_r, tokens: @[T_s, T_se, T_ss])] - ) - - }.toTable() - + NoiseHandshakePatterns* = { + "K1K1": HandshakePattern( + name: "Noise_K1K1_25519_ChaChaPoly_SHA256", + preMessagePatterns: + @[ + PreMessagePattern(direction: D_r, tokens: @[T_s]), + PreMessagePattern(direction: D_l, tokens: @[T_s]), + ], + messagePatterns: + @[ + MessagePattern(direction: D_r, tokens: @[T_e]), + MessagePattern(direction: D_l, tokens: @[T_e, T_ee, T_es]), + MessagePattern(direction: D_r, tokens: @[T_se]), + ], + ), + "XK1": HandshakePattern( + name: "Noise_XK1_25519_ChaChaPoly_SHA256", + preMessagePatterns: @[PreMessagePattern(direction: D_l, tokens: @[T_s])], + messagePatterns: + @[ + MessagePattern(direction: D_r, tokens: @[T_e]), + MessagePattern(direction: D_l, tokens: @[T_e, T_ee, T_es]), + MessagePattern(direction: D_r, tokens: @[T_s, T_se]), + ], + ), + "XX": HandshakePattern( + name: "Noise_XX_25519_ChaChaPoly_SHA256", + preMessagePatterns: EmptyPreMessage, + messagePatterns: + @[ + MessagePattern(direction: D_r, tokens: @[T_e]), + MessagePattern(direction: D_l, tokens: @[T_e, T_ee, T_s, T_es]), + MessagePattern(direction: D_r, tokens: @[T_s, T_se]), + ], + ), + "XXpsk0": HandshakePattern( + name: "Noise_XXpsk0_25519_ChaChaPoly_SHA256", + preMessagePatterns: EmptyPreMessage, + messagePatterns: + @[ + MessagePattern(direction: D_r, tokens: @[T_psk, T_e]), + MessagePattern(direction: D_l, tokens: @[T_e, T_ee, T_s, T_es]), + MessagePattern(direction: D_r, tokens: @[T_s, T_se]), + ], + ), + "WakuPairing": HandshakePattern( + name: "Noise_WakuPairing_25519_ChaChaPoly_SHA256", + preMessagePatterns: @[PreMessagePattern(direction: D_l, tokens: @[T_e])], + messagePatterns: + @[ + MessagePattern(direction: D_r, tokens: @[T_e, T_ee]), + MessagePattern(direction: D_l, tokens: @[T_s, T_es]), + MessagePattern(direction: D_r, tokens: @[T_s, T_se, T_ss]), + ], + ), + }.toTable() # Supported Protocol ID for PayloadV2 objects # Protocol IDs are defined according to https://rfc.vac.dev/spec/35/#specification - PayloadV2ProtocolIDs* = { - - "": 0.uint8, - "Noise_K1K1_25519_ChaChaPoly_SHA256": 10.uint8, - "Noise_XK1_25519_ChaChaPoly_SHA256": 11.uint8, - "Noise_XX_25519_ChaChaPoly_SHA256": 12.uint8, - "Noise_XXpsk0_25519_ChaChaPoly_SHA256": 13.uint8, + PayloadV2ProtocolIDs* = { + "": 0.uint8, + "Noise_K1K1_25519_ChaChaPoly_SHA256": 10.uint8, + "Noise_XK1_25519_ChaChaPoly_SHA256": 11.uint8, + "Noise_XX_25519_ChaChaPoly_SHA256": 12.uint8, + "Noise_XXpsk0_25519_ChaChaPoly_SHA256": 13.uint8, "Noise_WakuPairing_25519_ChaChaPoly_SHA256": 14.uint8, - "ChaChaPoly": 30.uint8 - - }.toTable() \ No newline at end of file + "ChaChaPoly": 30.uint8, + }.toTable() diff --git a/waku/waku_noise/noise_utils.nim b/waku/waku_noise/noise_utils.nim index 4b70f9aa4..23201f965 100644 --- a/waku/waku_noise/noise_utils.nim +++ b/waku/waku_noise/noise_utils.nim @@ -37,8 +37,7 @@ proc randomSeqByte*(rng: var HmacDrbgContext, size: int): seq[byte] = # Pads a payload according to PKCS#7 as per RFC 5652 https://datatracker.ietf.org/doc/html/rfc5652#section-6.3 proc pkcs7_pad*(payload: seq[byte], paddingSize: int): seq[byte] = - - assert(paddingSize<256) + assert(paddingSize < 256) let k = paddingSize - (payload.len mod paddingSize) @@ -56,36 +55,44 @@ proc pkcs7_pad*(payload: seq[byte], paddingSize: int): seq[byte] = # Unpads a payload according to PKCS#7 as per RFC 5652 https://datatracker.ietf.org/doc/html/rfc5652#section-6.3 proc pkcs7_unpad*(payload: seq[byte], paddingSize: int): seq[byte] = let k = payload[payload.high] - let unpadded = payload[0..payload.high-k.int] + let unpadded = payload[0 .. payload.high - k.int] return unpadded proc seqToDigest256*(sequence: seq[byte]): MDigest[256] = var digest: MDigest[256] - for i in 0.. 0: - raise newException(NoiseSomeMessagesWereLost, "Message nametag is present in buffer but is not the next expected nametag. One or more messages were probably lost") + raise newException( + NoiseSomeMessagesWereLost, + "Message nametag is present in buffer but is not the next expected nametag. One or more messages were probably lost", + ) # index is 0, hence the read message tag is the next expected one return ok(true) @@ -293,7 +304,6 @@ proc pop*(mntb: var MessageNametagBuffer): MessageNametag = # Performs a Diffie-Hellman operation between two elliptic curve keys (one private, one public) proc dh*(private: EllipticCurveKey, public: EllipticCurveKey): EllipticCurveKey = - # The output result of the Diffie-Hellman operation var output: EllipticCurveKey @@ -366,8 +376,9 @@ proc serializeNoisePublicKey*(noisePublicKey: NoisePublicKey): seq[byte] = # Converts a serialized Noise public key to a NoisePublicKey object as in # https://rfc.vac.dev/spec/35/#public-keys-serialization -proc intoNoisePublicKey*(serializedNoisePublicKey: seq[byte]): NoisePublicKey - {.raises: [Defect, NoisePublicKeyError].} = +proc intoNoisePublicKey*( + serializedNoisePublicKey: seq[byte] +): NoisePublicKey {.raises: [Defect, NoisePublicKeyError].} = var noisePublicKey: NoisePublicKey # We retrieve the encryption flag noisePublicKey.flag = serializedNoisePublicKey[0] @@ -375,12 +386,13 @@ proc intoNoisePublicKey*(serializedNoisePublicKey: seq[byte]): NoisePublicKey if not (noisePublicKey.flag == 0 or noisePublicKey.flag == 1): raise newException(NoisePublicKeyError, "Invalid flag in serialized public key") # We set the remaining sequence to the pk value (this may be an encrypted or not encrypted X coordinate) - noisePublicKey.pk = serializedNoisePublicKey[1.. w.maxMessageSize: + let message = fmt"Message size exceeded maximum of {w.maxMessageSize} bytes" + debug "Invalid Waku Message", error = message + return err(message) - if messageSizeBytes > w.maxMessageSize: - let message = fmt"Message size exceeded maximum of {w.maxMessageSize} bytes" - debug "Invalid Waku Message", error=message - return err(message) + for (validator, message) in w.wakuValidators: + let validatorRes = await validator(pubsubTopic, msg) + if validatorRes != ValidationResult.Accept: + if message.len > 0: + return err(message) + else: + return err("Validator failed") + return ok() - for (validator, message) in w.wakuValidators: - let validatorRes = await validator(pubsubTopic, msg) - if validatorRes != ValidationResult.Accept: - if message.len > 0: - return err(message) - else: - return err("Validator failed") - return ok() - -proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandler): TopicHandler = - debug "subscribe", pubsubTopic=pubsubTopic +proc subscribe*( + w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandler +): TopicHandler = + debug "subscribe", pubsubTopic = pubsubTopic # We need to wrap the handler since gossipsub doesnt understand WakuMessage - let wrappedHandler = - proc(pubsubTopic: string, data: seq[byte]): Future[void] {.gcsafe, raises: [].} = - let decMsg = WakuMessage.decode(data) - if decMsg.isErr(): - # fine if triggerSelf enabled, since validators are bypassed - error "failed to decode WakuMessage, validator passed a wrong message", error = decMsg.error - let fut = newFuture[void]() - fut.complete() - return fut - else: - return handler(pubsubTopic, decMsg.get()) + let wrappedHandler = proc( + pubsubTopic: string, data: seq[byte] + ): Future[void] {.gcsafe, raises: [].} = + let decMsg = WakuMessage.decode(data) + if decMsg.isErr(): + # fine if triggerSelf enabled, since validators are bypassed + error "failed to decode WakuMessage, validator passed a wrong message", + error = decMsg.error + let fut = newFuture[void]() + fut.complete() + return fut + else: + return handler(pubsubTopic, decMsg.get()) # Add the ordered validator to the topic # This assumes that if `w.validatorInserted.hasKey(pubSubTopic) is true`, it contains the ordered validator. @@ -269,7 +274,7 @@ proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandle proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) = ## Unsubscribe all handlers on this pubsub topic - debug "unsubscribe all", pubsubTopic=pubsubTopic + debug "unsubscribe all", pubsubTopic = pubsubTopic procCall GossipSub(w).unsubscribeAll(pubsubTopic) w.validatorInserted.del(pubsubTopic) @@ -277,12 +282,14 @@ proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) = proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: TopicHandler) = ## Unsubscribe this handler on this pubsub topic - debug "unsubscribe", pubsubTopic=pubsubTopic + debug "unsubscribe", pubsubTopic = pubsubTopic procCall GossipSub(w).unsubscribe(pubsubTopic, handler) -proc publish*(w: WakuRelay, pubsubTopic: PubsubTopic, message: WakuMessage): Future[int] {.async.} = - trace "publish", pubsubTopic=pubsubTopic +proc publish*( + w: WakuRelay, pubsubTopic: PubsubTopic, message: WakuMessage +): Future[int] {.async.} = + trace "publish", pubsubTopic = pubsubTopic let data = message.encode().buffer return await procCall GossipSub(w).publish(pubsubTopic, data) diff --git a/waku/waku_rln_relay.nim b/waku/waku_rln_relay.nim index a7058f37c..ce79f98d8 100644 --- a/waku/waku_rln_relay.nim +++ b/waku/waku_rln_relay.nim @@ -4,8 +4,4 @@ import ./waku_rln_relay/rln_relay, ./waku_rln_relay/contract -export - group_manager, - conversion_utils, - rln_relay, - contract +export group_manager, conversion_utils, rln_relay, contract diff --git a/waku/waku_rln_relay/contract.nim b/waku/waku_rln_relay/contract.nim index 8bccfc682..2ff1d92b9 100644 --- a/waku/waku_rln_relay/contract.nim +++ b/waku/waku_rln_relay/contract.nim @@ -1,14 +1,17 @@ when not defined(rln_v2): - # PoseidonHasherCode holds the bytecode of Poseidon hasher solidity smart contract: - # https://github.com/kilic/rlnapp/blob/master/packages/contracts/contracts/crypto/PoseidonHasher.sol - # the solidity contract is compiled separately and the resultant bytecode is copied here - const PoseidonHasherCode* = "0x608060405234801561001057600080fd5b50613e58806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063b189fd4c1461003b578063e493ef8c1461006b575b600080fd5b61005560048036038101906100509190613dcb565b610089565b6040516100629190613e07565b60405180910390f35b61007361009b565b6040516100809190613e07565b60405180910390f35b6000610094826100bf565b9050919050565b7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000181565b60007f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000017f09c46e9ec68e9bd4fe1faaba294cba38a71aa177534cdd1b6c7dc0dbd0abd7a77f0c0356530896eec42a97ed937f3135cfc5142b3ae405b8343c1d83ffa604cb81840182828309838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1e28a1d935698ad1142e51182bb54cf4a00ea5aabd6268bd317ea977cc154a30830192507f27af2d831a9d2748080965db30e298e40e5757c3e008db964cf9e2b12b91251f82019150838384099050838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1e6f11ce60fc8f513a6a3cfe16ae175a41291462f214cd0879aaf43545b74e03830192507f2a67384d3bbd5e438541819cb681f0be04462ed14c3613d8f719206268d142d382019150838384099050838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0b66fdf356093a611609f8e12fbfecf0b985e381f025188936408f5d5c9f45d0830192507f012ee3ec1e78d470830c61093c2ade370b26c83cc5cebeeddaa6852dbdb09e2182019150838384099050838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0252ba5f6760bfbdfd88f67f8175e3fd6cd1c431b099b6bb2d108e7b445bb1b9830192507f179474cceca5ff676c6bec3cef54296354391a8935ff71d6ef5aeaad7ca932f182019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2c24261379a51bfa9228ff4a503fd4ed9c1f974a264969b37e1a2589bbed2b91830192507f1cc1d7b62692e63eac2f288bd0695b43c2f63f5001fc0fc553e66c0551801b0582019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f255059301aada98bb2ed55f852979e9600784dbf17fbacd05d9eff5fd9c91b56830192507f28437be3ac1cb2e479e1f5c0eccd32b3aea24234970a8193b11c29ce7e59efd982019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f28216a442f2e1f711ca4fa6b53766eb118548da8fb4f78d4338762c37f5f2043830192507f2c1f47cd17fa5adf1f39f4e7056dd03feee1efce03094581131f2377323482c982019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f07abad02b7a5ebc48632bcc9356ceb7dd9dafca276638a63646b8566a621afc9830192507f0230264601ffdf29275b33ffaab51dfe9429f90880a69cd137da0c4d15f96c3c82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1bc973054e51d905a0f168656497ca40a864414557ee289e717e5d66899aa0a9830192507f2e1c22f964435008206c3157e86341edd249aff5c2d8421f2a6b22288f0a67fc82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1224f38df67c5378121c1d5f461bbc509e8ea1598e46c9f7a70452bc2bba86b8830192507f02e4e69d8ba59e519280b4bd9ed0068fd7bfe8cd9dfeda1969d2989186cde20e82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1f1eccc34aaba0137f5df81fc04ff3ee4f19ee364e653f076d47e9735d98018e830192507f1672ad3d709a353974266c3039a9a7311424448032cd1819eacb8a4d4284f58282019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f283e3fdc2c6e420c56f44af5192b4ae9cda6961f284d24991d2ed602df8c8fc7830192507f1c2a3d120c550ecfd0db0957170fa013683751f8fdff59d6614fbd69ff394bcc82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f216f84877aac6172f7897a7323456efe143a9a43773ea6f296cb6b8177653fbd830192507f2c0d272becf2a75764ba7e8e3e28d12bceaa47ea61ca59a411a1f51552f9478882019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f16e34299865c0e28484ee7a74c454e9f170a5480abe0508fcb4a6c3d89546f43830192507f175ceba599e96f5b375a232a6fb9cc71772047765802290f48cd939755488fc582019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0c7594440dc48c16fead9e1758b028066aa410bfbc354f54d8c5ffbb44a1ee32830192507f1a3c29bc39f21bb5c466db7d7eb6fd8f760e20013ccf912c92479882d919fd8d82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0ccfdd906f3426e5c0986ea049b253400855d349074f5a6695c8eeabcd22e68f830192507f14f6bc81d9f186f62bdb475ce6c9411866a7a8a3fd065b3ce0e699b67dd9e79682019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0962b82789fb3d129702ca70b2f6c5aacc099810c9c495c888edeb7386b97052830192507f1a880af7074d18b3bf20c79de25127bc13284ab01ef02575afef0c8f6a31a86d82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f10cba18419a6a332cd5e77f0211c154b20af2924fc20ff3f4c3012bb7ae9311b830192507f057e62a9a8f89b3ebdc76ba63a9eaca8fa27b7319cae3406756a2849f302f10d82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f287c971de91dc0abd44adf5384b4988cb961303bbf65cff5afa0413b44280cee830192507f21df3388af1687bbb3bca9da0cca908f1e562bc46d4aba4e6f7f7960e306891d82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1be5c887d25bce703e25cc974d0934cd789df8f70b498fd83eff8b560e1682b3830192507f268da36f76e568fb68117175cea2cd0dd2cb5d42fda5acea48d59c2706a0d5c182019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0e17ab091f6eae50c609beaf5510ececc5d8bb74135ebd05bd06460cc26a5ed6830192507f04d727e728ffa0a67aee535ab074a43091ef62d8cf83d270040f5caa1f62af4082019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0ddbd7bf9c29341581b549762bc022ed33702ac10f1bfd862b15417d7e39ca6e830192507f2790eb3351621752768162e82989c6c234f5b0d1d3af9b588a29c49c8789654b82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1e457c601a63b73e4471950193d8a570395f3d9ab8b2fd0984b764206142f9e9830192507f21ae64301dca9625638d6ab2bbe7135ffa90ecd0c43ff91fc4c686fc46e091b082019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0379f63c8ce3468d4da293166f494928854be9e3432e09555858534eed8d350b830192507e2d56420359d0266a744a080809e054ca0e4921a46686ac8c9f58a324c3504982019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f123158e5965b5d9b1d68b3cd32e10bbeda8d62459e21f4090fc2c5af963515a6830192507f0be29fc40847a941661d14bbf6cbe0420fbb2b6f52836d4e60c80eb49cad9ec182019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1ac96991dec2bb0557716142015a453c36db9d859cad5f9a233802f24fdf4c1a830192507f1596443f763dbcc25f4964fc61d23b3e5e12c9fa97f18a9251ca3355bcb0627e82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f12e0bcd3654bdfa76b2861d4ec3aeae0f1857d9f17e715aed6d049eae3ba3212830192507f0fc92b4f1bbea82b9ea73d4af9af2a50ceabac7f37154b1904e6c76c7cf964ba82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1f9c0b1610446442d6f2e592a8013f40b14f7c7722236f4f9c7e965233872762830192507f0ebd74244ae72675f8cde06157a782f4050d914da38b4c058d159f643dbbf4d382019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2cb7f0ed39e16e9f69a9fafd4ab951c03b0671e97346ee397a839839dccfc6d1830192507f1a9d6e2ecff022cc5605443ee41bab20ce761d0514ce526690c72bca7352d9bf82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2a115439607f335a5ea83c3bc44a9331d0c13326a9a7ba3087da182d648ec72f830192507f23f9b6529b5d040d15b8fa7aee3e3410e738b56305cd44f29535c115c5a4c06082019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f05872c16db0f72a2249ac6ba484bb9c3a3ce97c16d58b68b260eb939f0e6e8a7830192507f1300bdee08bb7824ca20fb80118075f40219b6151d55b5c52b624a7cdeddf6a782019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f19b9b63d2f108e17e63817863a8f6c288d7ad29916d98cb1072e4e7b7d52b376830192507f015bee1357e3c015b5bda237668522f613d1c88726b5ec4224a20128481b4f7f82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2953736e94bb6b9f1b9707a4f1615e4efe1e1ce4bab218cbea92c785b128ffd1830192507f0b069353ba091618862f806180c0385f851b98d372b45f544ce7266ed6608dfc82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f304f74d461ccc13115e4e0bcfb93817e55aeb7eb9306b64e4f588ac97d81f429830192507f15bbf146ce9bca09e8a33f5e77dfe4f5aad2a164a4617a4cb8ee5415cde913fc82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0ab4dfe0c2742cde44901031487964ed9b8f4b850405c10ca9ff23859572c8c6830192507f0e32db320a044e3197f45f7649a19675ef5eedfea546dea9251de39f9639779a82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0a1756aa1f378ca4b27635a78b6888e66797733a82774896a3078efa516da016830192507f044c4a33b10f693447fd17177f952ef895e61d328f85efa94254d6a2a25d93ef82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2ed3611b725b8a70be655b537f66f700fe0879d79a496891d37b07b5466c4b8b830192507f1f9ba4e8bab7ce42c8ecc3d722aa2e0eadfdeb9cfdd347b5d8339ea7120858aa82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1b233043052e8c288f7ee907a84e518aa38e82ac4502066db74056f865c5d3da830192507f2431e1cc164bb8d074031ab72bd55b4c902053bfc0f14db0ca2f97b02087595482019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f082f934c91f5aac330cd6953a0a7db45a13e322097583319a791f273965801fd830192507f2b9a0a223e7538b0a34be074315542a3c77245e2ae7cbe999ad6bb930c48997c82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0e1cd91edd2cfa2cceb85483b887a9be8164163e75a8a00eb0b589cc70214e7d830192507f2e1eac0f2bfdfd63c951f61477e3698999774f19854d00f588d324601cebe2f982019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0cbfa95f37fb74060c76158e769d6d157345784d8efdb33c23d748115b500b83830192507f08f05b3be923ed44d65ad49d8a61e9a676d991e3a77513d9980c232dfa4a4f8482019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f22719e2a070bcd0852bf8e21984d0443e7284925dc0758a325a2dd510c047ef6830192507f041f596a9ee1cb2bc060f7fcc3a1ab4c7bdbf036119982c0f41f62b2f26830c082019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f233fd35de1be520a87628eb06f6b1d4c021be1c2d0dc464a19fcdd0986b10f89830192507f0524b46d1aa87a5e4325e0a423ebc810d31e078aa1b4707eefcb453c61c9c26782019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2c34f424c81e5716ce47fcac894b85824227bb954b0f3199cc4486237c515211830192507f0b5f2a4b63387819207effc2b5541fb72dd2025b5457cc97f33010327de4915e82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f22207856082ccc54c5b72fe439d2cfd6c17435d2f57af6ceaefac41fe05c659f830192507f24d57a8bf5da63fe4e24159b7f8950b5cdfb210194caf79f27854048ce2c817182019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0afab181fdd5e0583b371d75bd693f98374ad7097bb01a8573919bb23b79396e830192507f2dba9b108f208772998a52efac7cbd5676c0057194c16c0bf16290d62b1128ee82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f26349b66edb8b16f56f881c788f53f83cbb83de0bd592b255aff13e6bce420b3830192507f25af7ce0e5e10357685e95f92339753ad81a56d28ecc193b235288a3e6f137db82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f25b4ce7bd2294390c094d6a55edd68b970eed7aae88b2bff1f7c0187fe35011f830192507f22c543f10f6c89ec387e53f1908a88e5de9cef28ebdf30b18cb9d54c1e02b63182019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0236f93e7789c4724fc7908a9f191e1e425e906a919d7a34df668e74882f87a9830192507f29350b401166ca010e7d27e37d05da99652bdae114eb01659cb497af980c4b5282019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0eed787d65820d3f6bd31bbab547f75a65edb75d844ebb89ee1260916652363f830192507f07cc1170f13b46f2036a753f520b3291fdcd0e99bd94297d1906f656f4de6fad82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f22b939233b1d7205f49bcf613a3d30b1908786d7f9f5d10c2059435689e8acea830192507f01451762a0aab81c8aad1dc8bc33e870740f083a5aa85438add650ace60ae5a682019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f23506bb5d8727d4461fabf1025d46d1fe32eaa61dec7da57e704fec0892fce89830192507f2e484c44e838aea0bac06ae3f71bdd092a3709531e1efea97f8bd6890735552282019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0f4bc7d07ebafd64379e78c50bd2e42baf4a594545cedc2545418da26835b54c830192507f1f4d3c8f6583e9e5fa76637862faaee851582388725df460e620996d50d8e74e82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f093514e0c70711f82660d07be0e4a988fae02abc7b681d9153eb9bcb48fe7389830192507f1adab0c8e2b3bad346699a2b5f3bc03643ee83ece47228f24a58e0a347e153d882019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1672b1726057d99dd14709ebb474641a378c1b94b8072bac1a22dbef9e80dad2830192507f1dfd53d4576af2e38f44f53fdcab468cc5d8e2fae0acc4ee30d47b239b479c1482019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0c6888a10b75b0f3a70a36263a37e17fe6d77d640f6fc3debc7f207753205c60830192507f1addb933a65be77092b34a7e77d12fe8611a61e00ee6848b85091ecca9d1e50882019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507ed7540dcd268a845c10ae18d1de933cf638ff5425f0afff7935628e299d1791830192507f140c0e42687e9ead01b2827a5664ca9c26fedde4acd99db1d316939d20b82c0e82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2f0c3a115d4317d191ba89b8d13d1806c20a0f9b24f8c5edc091e2ae56565984830192507f0c4ee778ff7c14553006ed220cf9c81008a0cff670b22b82d8c538a1dc958c6182019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1704f2766d46f82c3693f00440ccc3609424ed26c0acc66227c3d7485de74c69830192507f2f2d19cc3ea5d78ea7a02c1b51d244abf0769c9f8544e40239b66fe9009c3cfa82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1ae03853b75fcaba5053f112e2a8e8dcdd7ee6cb9cfed9c7d6c766a806fc6629830192507f0971aabf795241df51d131d0fa61aa5f3556921b2d6f014e4e41a86ddaf056d582019150838384099050838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1408c316e6014e1a91d4cf6b6e0de73eda624f8380df1c875f5c29f7bfe2f646830192507f1667f3fe2edbe850248abe42b543093b6c89f1f773ef285341691f39822ef5bd82019150838384099050838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f13bf7c5d0d2c4376a48b0a03557cdf915b81718409e5c133424c69576500fe37830192507f07620a6dfb0b6cec3016adf3d3533c24024b95347856b79719bc0ba743a62c2c82019150838384099050838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1574c7ef0c43545f36a8ca08bdbdd8b075d2959e2f322b731675de3e1982b4d0830192507f269e4b5b7a2eb21afd567970a717ceec5bd4184571c254fdc06e03a7ff8378f08201915083838409905083838583840909925083828309905083828583840909915083847f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88409857f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad586090106925082945050505050919050565b600080fd5b6000819050919050565b613da881613d95565b8114613db357600080fd5b50565b600081359050613dc581613d9f565b92915050565b600060208284031215613de157613de0613d90565b5b6000613def84828501613db6565b91505092915050565b613e0181613d95565b82525050565b6000602082019050613e1c6000830184613df8565b9291505056fea2646970667358221220f20e2267360e9aeb09de75ad232eed0f5dc05f7547d686c790b156f441d402b164736f6c634300080f0033" + # PoseidonHasherCode holds the bytecode of Poseidon hasher solidity smart contract: + # https://github.com/kilic/rlnapp/blob/master/packages/contracts/contracts/crypto/PoseidonHasher.sol + # the solidity contract is compiled separately and the resultant bytecode is copied here + const PoseidonHasherCode* = + "0x608060405234801561001057600080fd5b50613e58806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063b189fd4c1461003b578063e493ef8c1461006b575b600080fd5b61005560048036038101906100509190613dcb565b610089565b6040516100629190613e07565b60405180910390f35b61007361009b565b6040516100809190613e07565b60405180910390f35b6000610094826100bf565b9050919050565b7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000181565b60007f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000017f09c46e9ec68e9bd4fe1faaba294cba38a71aa177534cdd1b6c7dc0dbd0abd7a77f0c0356530896eec42a97ed937f3135cfc5142b3ae405b8343c1d83ffa604cb81840182828309838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1e28a1d935698ad1142e51182bb54cf4a00ea5aabd6268bd317ea977cc154a30830192507f27af2d831a9d2748080965db30e298e40e5757c3e008db964cf9e2b12b91251f82019150838384099050838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1e6f11ce60fc8f513a6a3cfe16ae175a41291462f214cd0879aaf43545b74e03830192507f2a67384d3bbd5e438541819cb681f0be04462ed14c3613d8f719206268d142d382019150838384099050838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0b66fdf356093a611609f8e12fbfecf0b985e381f025188936408f5d5c9f45d0830192507f012ee3ec1e78d470830c61093c2ade370b26c83cc5cebeeddaa6852dbdb09e2182019150838384099050838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0252ba5f6760bfbdfd88f67f8175e3fd6cd1c431b099b6bb2d108e7b445bb1b9830192507f179474cceca5ff676c6bec3cef54296354391a8935ff71d6ef5aeaad7ca932f182019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2c24261379a51bfa9228ff4a503fd4ed9c1f974a264969b37e1a2589bbed2b91830192507f1cc1d7b62692e63eac2f288bd0695b43c2f63f5001fc0fc553e66c0551801b0582019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f255059301aada98bb2ed55f852979e9600784dbf17fbacd05d9eff5fd9c91b56830192507f28437be3ac1cb2e479e1f5c0eccd32b3aea24234970a8193b11c29ce7e59efd982019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f28216a442f2e1f711ca4fa6b53766eb118548da8fb4f78d4338762c37f5f2043830192507f2c1f47cd17fa5adf1f39f4e7056dd03feee1efce03094581131f2377323482c982019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f07abad02b7a5ebc48632bcc9356ceb7dd9dafca276638a63646b8566a621afc9830192507f0230264601ffdf29275b33ffaab51dfe9429f90880a69cd137da0c4d15f96c3c82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1bc973054e51d905a0f168656497ca40a864414557ee289e717e5d66899aa0a9830192507f2e1c22f964435008206c3157e86341edd249aff5c2d8421f2a6b22288f0a67fc82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1224f38df67c5378121c1d5f461bbc509e8ea1598e46c9f7a70452bc2bba86b8830192507f02e4e69d8ba59e519280b4bd9ed0068fd7bfe8cd9dfeda1969d2989186cde20e82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1f1eccc34aaba0137f5df81fc04ff3ee4f19ee364e653f076d47e9735d98018e830192507f1672ad3d709a353974266c3039a9a7311424448032cd1819eacb8a4d4284f58282019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f283e3fdc2c6e420c56f44af5192b4ae9cda6961f284d24991d2ed602df8c8fc7830192507f1c2a3d120c550ecfd0db0957170fa013683751f8fdff59d6614fbd69ff394bcc82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f216f84877aac6172f7897a7323456efe143a9a43773ea6f296cb6b8177653fbd830192507f2c0d272becf2a75764ba7e8e3e28d12bceaa47ea61ca59a411a1f51552f9478882019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f16e34299865c0e28484ee7a74c454e9f170a5480abe0508fcb4a6c3d89546f43830192507f175ceba599e96f5b375a232a6fb9cc71772047765802290f48cd939755488fc582019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0c7594440dc48c16fead9e1758b028066aa410bfbc354f54d8c5ffbb44a1ee32830192507f1a3c29bc39f21bb5c466db7d7eb6fd8f760e20013ccf912c92479882d919fd8d82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0ccfdd906f3426e5c0986ea049b253400855d349074f5a6695c8eeabcd22e68f830192507f14f6bc81d9f186f62bdb475ce6c9411866a7a8a3fd065b3ce0e699b67dd9e79682019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0962b82789fb3d129702ca70b2f6c5aacc099810c9c495c888edeb7386b97052830192507f1a880af7074d18b3bf20c79de25127bc13284ab01ef02575afef0c8f6a31a86d82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f10cba18419a6a332cd5e77f0211c154b20af2924fc20ff3f4c3012bb7ae9311b830192507f057e62a9a8f89b3ebdc76ba63a9eaca8fa27b7319cae3406756a2849f302f10d82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f287c971de91dc0abd44adf5384b4988cb961303bbf65cff5afa0413b44280cee830192507f21df3388af1687bbb3bca9da0cca908f1e562bc46d4aba4e6f7f7960e306891d82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1be5c887d25bce703e25cc974d0934cd789df8f70b498fd83eff8b560e1682b3830192507f268da36f76e568fb68117175cea2cd0dd2cb5d42fda5acea48d59c2706a0d5c182019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0e17ab091f6eae50c609beaf5510ececc5d8bb74135ebd05bd06460cc26a5ed6830192507f04d727e728ffa0a67aee535ab074a43091ef62d8cf83d270040f5caa1f62af4082019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0ddbd7bf9c29341581b549762bc022ed33702ac10f1bfd862b15417d7e39ca6e830192507f2790eb3351621752768162e82989c6c234f5b0d1d3af9b588a29c49c8789654b82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1e457c601a63b73e4471950193d8a570395f3d9ab8b2fd0984b764206142f9e9830192507f21ae64301dca9625638d6ab2bbe7135ffa90ecd0c43ff91fc4c686fc46e091b082019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0379f63c8ce3468d4da293166f494928854be9e3432e09555858534eed8d350b830192507e2d56420359d0266a744a080809e054ca0e4921a46686ac8c9f58a324c3504982019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f123158e5965b5d9b1d68b3cd32e10bbeda8d62459e21f4090fc2c5af963515a6830192507f0be29fc40847a941661d14bbf6cbe0420fbb2b6f52836d4e60c80eb49cad9ec182019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1ac96991dec2bb0557716142015a453c36db9d859cad5f9a233802f24fdf4c1a830192507f1596443f763dbcc25f4964fc61d23b3e5e12c9fa97f18a9251ca3355bcb0627e82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f12e0bcd3654bdfa76b2861d4ec3aeae0f1857d9f17e715aed6d049eae3ba3212830192507f0fc92b4f1bbea82b9ea73d4af9af2a50ceabac7f37154b1904e6c76c7cf964ba82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1f9c0b1610446442d6f2e592a8013f40b14f7c7722236f4f9c7e965233872762830192507f0ebd74244ae72675f8cde06157a782f4050d914da38b4c058d159f643dbbf4d382019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2cb7f0ed39e16e9f69a9fafd4ab951c03b0671e97346ee397a839839dccfc6d1830192507f1a9d6e2ecff022cc5605443ee41bab20ce761d0514ce526690c72bca7352d9bf82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2a115439607f335a5ea83c3bc44a9331d0c13326a9a7ba3087da182d648ec72f830192507f23f9b6529b5d040d15b8fa7aee3e3410e738b56305cd44f29535c115c5a4c06082019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f05872c16db0f72a2249ac6ba484bb9c3a3ce97c16d58b68b260eb939f0e6e8a7830192507f1300bdee08bb7824ca20fb80118075f40219b6151d55b5c52b624a7cdeddf6a782019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f19b9b63d2f108e17e63817863a8f6c288d7ad29916d98cb1072e4e7b7d52b376830192507f015bee1357e3c015b5bda237668522f613d1c88726b5ec4224a20128481b4f7f82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2953736e94bb6b9f1b9707a4f1615e4efe1e1ce4bab218cbea92c785b128ffd1830192507f0b069353ba091618862f806180c0385f851b98d372b45f544ce7266ed6608dfc82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f304f74d461ccc13115e4e0bcfb93817e55aeb7eb9306b64e4f588ac97d81f429830192507f15bbf146ce9bca09e8a33f5e77dfe4f5aad2a164a4617a4cb8ee5415cde913fc82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0ab4dfe0c2742cde44901031487964ed9b8f4b850405c10ca9ff23859572c8c6830192507f0e32db320a044e3197f45f7649a19675ef5eedfea546dea9251de39f9639779a82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0a1756aa1f378ca4b27635a78b6888e66797733a82774896a3078efa516da016830192507f044c4a33b10f693447fd17177f952ef895e61d328f85efa94254d6a2a25d93ef82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2ed3611b725b8a70be655b537f66f700fe0879d79a496891d37b07b5466c4b8b830192507f1f9ba4e8bab7ce42c8ecc3d722aa2e0eadfdeb9cfdd347b5d8339ea7120858aa82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1b233043052e8c288f7ee907a84e518aa38e82ac4502066db74056f865c5d3da830192507f2431e1cc164bb8d074031ab72bd55b4c902053bfc0f14db0ca2f97b02087595482019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f082f934c91f5aac330cd6953a0a7db45a13e322097583319a791f273965801fd830192507f2b9a0a223e7538b0a34be074315542a3c77245e2ae7cbe999ad6bb930c48997c82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0e1cd91edd2cfa2cceb85483b887a9be8164163e75a8a00eb0b589cc70214e7d830192507f2e1eac0f2bfdfd63c951f61477e3698999774f19854d00f588d324601cebe2f982019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0cbfa95f37fb74060c76158e769d6d157345784d8efdb33c23d748115b500b83830192507f08f05b3be923ed44d65ad49d8a61e9a676d991e3a77513d9980c232dfa4a4f8482019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f22719e2a070bcd0852bf8e21984d0443e7284925dc0758a325a2dd510c047ef6830192507f041f596a9ee1cb2bc060f7fcc3a1ab4c7bdbf036119982c0f41f62b2f26830c082019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f233fd35de1be520a87628eb06f6b1d4c021be1c2d0dc464a19fcdd0986b10f89830192507f0524b46d1aa87a5e4325e0a423ebc810d31e078aa1b4707eefcb453c61c9c26782019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2c34f424c81e5716ce47fcac894b85824227bb954b0f3199cc4486237c515211830192507f0b5f2a4b63387819207effc2b5541fb72dd2025b5457cc97f33010327de4915e82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f22207856082ccc54c5b72fe439d2cfd6c17435d2f57af6ceaefac41fe05c659f830192507f24d57a8bf5da63fe4e24159b7f8950b5cdfb210194caf79f27854048ce2c817182019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0afab181fdd5e0583b371d75bd693f98374ad7097bb01a8573919bb23b79396e830192507f2dba9b108f208772998a52efac7cbd5676c0057194c16c0bf16290d62b1128ee82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f26349b66edb8b16f56f881c788f53f83cbb83de0bd592b255aff13e6bce420b3830192507f25af7ce0e5e10357685e95f92339753ad81a56d28ecc193b235288a3e6f137db82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f25b4ce7bd2294390c094d6a55edd68b970eed7aae88b2bff1f7c0187fe35011f830192507f22c543f10f6c89ec387e53f1908a88e5de9cef28ebdf30b18cb9d54c1e02b63182019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0236f93e7789c4724fc7908a9f191e1e425e906a919d7a34df668e74882f87a9830192507f29350b401166ca010e7d27e37d05da99652bdae114eb01659cb497af980c4b5282019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0eed787d65820d3f6bd31bbab547f75a65edb75d844ebb89ee1260916652363f830192507f07cc1170f13b46f2036a753f520b3291fdcd0e99bd94297d1906f656f4de6fad82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f22b939233b1d7205f49bcf613a3d30b1908786d7f9f5d10c2059435689e8acea830192507f01451762a0aab81c8aad1dc8bc33e870740f083a5aa85438add650ace60ae5a682019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f23506bb5d8727d4461fabf1025d46d1fe32eaa61dec7da57e704fec0892fce89830192507f2e484c44e838aea0bac06ae3f71bdd092a3709531e1efea97f8bd6890735552282019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0f4bc7d07ebafd64379e78c50bd2e42baf4a594545cedc2545418da26835b54c830192507f1f4d3c8f6583e9e5fa76637862faaee851582388725df460e620996d50d8e74e82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f093514e0c70711f82660d07be0e4a988fae02abc7b681d9153eb9bcb48fe7389830192507f1adab0c8e2b3bad346699a2b5f3bc03643ee83ece47228f24a58e0a347e153d882019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1672b1726057d99dd14709ebb474641a378c1b94b8072bac1a22dbef9e80dad2830192507f1dfd53d4576af2e38f44f53fdcab468cc5d8e2fae0acc4ee30d47b239b479c1482019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f0c6888a10b75b0f3a70a36263a37e17fe6d77d640f6fc3debc7f207753205c60830192507f1addb933a65be77092b34a7e77d12fe8611a61e00ee6848b85091ecca9d1e50882019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507ed7540dcd268a845c10ae18d1de933cf638ff5425f0afff7935628e299d1791830192507f140c0e42687e9ead01b2827a5664ca9c26fedde4acd99db1d316939d20b82c0e82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f2f0c3a115d4317d191ba89b8d13d1806c20a0f9b24f8c5edc091e2ae56565984830192507f0c4ee778ff7c14553006ed220cf9c81008a0cff670b22b82d8c538a1dc958c6182019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1704f2766d46f82c3693f00440ccc3609424ed26c0acc66227c3d7485de74c69830192507f2f2d19cc3ea5d78ea7a02c1b51d244abf0769c9f8544e40239b66fe9009c3cfa82019150838384099050838385838409099250837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1ae03853b75fcaba5053f112e2a8e8dcdd7ee6cb9cfed9c7d6c766a806fc6629830192507f0971aabf795241df51d131d0fa61aa5f3556921b2d6f014e4e41a86ddaf056d582019150838384099050838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1408c316e6014e1a91d4cf6b6e0de73eda624f8380df1c875f5c29f7bfe2f646830192507f1667f3fe2edbe850248abe42b543093b6c89f1f773ef285341691f39822ef5bd82019150838384099050838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f13bf7c5d0d2c4376a48b0a03557cdf915b81718409e5c133424c69576500fe37830192507f07620a6dfb0b6cec3016adf3d3533c24024b95347856b79719bc0ba743a62c2c82019150838384099050838385838409099250838283099050838285838409099150837f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88309847f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad58509019050837f1274e649a32ed355a31a6ed69724e1adade857e86eb5c3a121bcd147943203c88309847f0cc57cdbb08507d62bf67a4493cc262fb6c09d557013fff1f573f431221f8ff985090191508092507f1574c7ef0c43545f36a8ca08bdbdd8b075d2959e2f322b731675de3e1982b4d0830192507f269e4b5b7a2eb21afd567970a717ceec5bd4184571c254fdc06e03a7ff8378f08201915083838409905083838583840909925083828309905083828583840909915083847f2b9d4b4110c9ae997782e1509b1d0fdb20a7c02bbd8bea7305462b9f8125b1e88409857f066f6f85d6f68a85ec10345351a23a3aaf07f38af8c952a7bceca70bd2af7ad586090106925082945050505050919050565b600080fd5b6000819050919050565b613da881613d95565b8114613db357600080fd5b50565b600081359050613dc581613d9f565b92915050565b600060208284031215613de157613de0613d90565b5b6000613def84828501613db6565b91505092915050565b613e0181613d95565b82525050565b6000602082019050613e1c6000830184613df8565b9291505056fea2646970667358221220f20e2267360e9aeb09de75ad232eed0f5dc05f7547d686c790b156f441d402b164736f6c634300080f0033" - # RegistryContractCode contains the bytecode of the membership solidity smart contract: - # https://github.com/waku-org/rln-contract/blob/fbafa7b0bb47c5233f50ba82992c572795e7e9a6/deployments/sepolia/WakuRlnRegistry.json - # the solidity contract is compiled separately and the resultant bytecode is copied here - const RegistryContractCode* = "0x60a06040526000600260006101000a81548161ffff021916908361ffff1602179055503480156200002f57600080fd5b5060405162002f2a38038062002f2a8339818101604052810190620000559190620001e6565b6200007562000069620000b060201b60201c565b620000b860201b60201c565b8073ffffffffffffffffffffffffffffffffffffffff1660808173ffffffffffffffffffffffffffffffffffffffff16815250505062000218565b600033905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000620001ae8262000181565b9050919050565b620001c081620001a1565b8114620001cc57600080fd5b50565b600081519050620001e081620001b5565b92915050565b600060208284031215620001ff57620001fe6200017c565b5b60006200020f84828501620001cf565b91505092915050565b608051612ce862000242600039600081816103e2015281816104120152610ad80152612ce86000f3fe60806040523480156200001157600080fd5b5060043610620000e25760003560e01c8063ab02492a1162000099578063ef653d5e116200006f578063ef653d5e14620001d1578063f184ef4c14620001f1578063f2fde38b1462000213578063f5542147146200023357620000e2565b8063ab02492a1462000183578063cf61637414620001a3578063d44fda1f14620001c557620000e2565b806326e0fc1f14620000e7578063331b6ab3146200010757806342f542e21462000129578063715018a614620001355780637a34289d14620001415780638da5cb5b1462000161575b600080fd5b620001056004803603810190620000ff919062001045565b62000269565b005b62000111620003e0565b60405162000120919062001117565b60405180910390f35b6200013362000404565b005b6200013f6200048b565b005b6200015f6004803603810190620001599190620011a2565b620004a3565b005b6200016b6200075f565b6040516200017a91906200121c565b60405180910390f35b620001a160048036038101906200019b919062001239565b62000788565b005b620001ad6200088b565b604051620001bc9190620012b4565b60405180910390f35b620001cf6200089f565b005b620001ef6004803603810190620001e9919062001302565b62000a09565b005b620001fb62000c9e565b6040516200020a9190620012b4565b60405180910390f35b6200023160048036038101906200022b919062001302565b62000cb2565b005b6200025160048036038101906200024b919062001334565b62000d3c565b6040516200026091906200121c565b60405180910390f35b600060149054906101000a900461ffff1661ffff168261ffff1610620002bb576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000600167ffffffffffffffff811115620002db57620002da62001366565b5b6040519080825280602002602001820160405280156200030a5781602001602082028036833780820191505090505b509050818160008151811062000325576200032462001395565b5b602002602001018181525050600160008461ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16637a34289d826040518263ffffffff1660e01b8152600401620003a7919062001492565b600060405180830381600087803b158015620003c257600080fd5b505af1158015620003d7573d6000803e3d6000fd5b50505050505050565b7f000000000000000000000000000000000000000000000000000000000000000081565b6200040e62000d6f565b60007f0000000000000000000000000000000000000000000000000000000000000000600060149054906101000a900461ffff16604051620004509062000fb3565b6200045d929190620014b6565b604051809103906000f0801580156200047a573d6000803e3d6000fd5b509050620004888162000df4565b50565b6200049562000d6f565b620004a1600062000ee7565b565b600060149054906101000a900461ffff1661ffff16600260009054906101000a900461ffff1661ffff161062000505576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5b6001156200075b5760016000600260009054906101000a900461ffff1661ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16637a34289d83836040518363ffffffff1660e01b8152600401620005969291906200155a565b600060405180830381600087803b158015620005b157600080fd5b505af1925050508015620005c3575060015b6200074f573d8060008114620005f6576040519150601f19603f3d011682016040523d82523d6000602084013e620005fb565b606091505b506040516024016040516020818303038152906040527f57f69531000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050805190602001208180519060200120146200069c57805181602001fd5b600060149054906101000a900461ffff1661ffff166001600260009054906101000a900461ffff16620006d09190620015af565b61ffff16106200070c576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001600260008282829054906101000a900461ffff166200072e9190620015af565b92506101000a81548161ffff021916908361ffff1602179055505062000755565b6200075b565b62000506565b5050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b600060149054906101000a900461ffff1661ffff168361ffff1610620007da576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600160008461ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16637a34289d83836040518363ffffffff1660e01b8152600401620008529291906200155a565b600060405180830381600087803b1580156200086d57600080fd5b505af115801562000882573d6000803e3d6000fd5b50505050505050565b600260009054906101000a900461ffff1681565b620008a962000d6f565b600060149054906101000a900461ffff1661ffff16600260009054906101000a900461ffff1661ffff16106200090b576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff16600160006001600260009054906101000a900461ffff16620009469190620015af565b61ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1603620009cb576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001600260008282829054906101000a900461ffff16620009ed9190620015af565b92506101000a81548161ffff021916908361ffff160217905550565b62000a1362000d6f565b600073ffffffffffffffffffffffffffffffffffffffff16600160008060149054906101000a900461ffff1661ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161462000ad157806040517f9cfabd1600000000000000000000000000000000000000000000000000000000815260040162000ac891906200121c565b60405180910390fd5b60008190507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1663331b6ab36040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000b59573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062000b7f919062001633565b73ffffffffffffffffffffffffffffffffffffffff161462000bcc576040517eaec95400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600060149054906101000a900461ffff1661ffff168173ffffffffffffffffffffffffffffffffffffffff166328b070e06040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000c2d573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062000c5391906200167c565b61ffff161462000c8f576040517fb893b72300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b62000c9a8262000df4565b5050565b600060149054906101000a900461ffff1681565b62000cbc62000d6f565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff160362000d2e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000d259062001735565b60405180910390fd5b62000d398162000ee7565b50565b60016020528060005260406000206000915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b62000d7962000fab565b73ffffffffffffffffffffffffffffffffffffffff1662000d996200075f565b73ffffffffffffffffffffffffffffffffffffffff161462000df2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000de990620017a7565b60405180910390fd5b565b80600160008060149054906101000a900461ffff1661ffff1661ffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055507fcf6a3b406170499209d0fcf152a1605c7c5a5c99c855e2bb803433fc960718eb600060149054906101000a900461ffff168260405162000ea0929190620017c9565b60405180910390a16001600060148282829054906101000a900461ffff1662000eca9190620015af565b92506101000a81548161ffff021916908361ffff16021790555050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b600033905090565b6114bc80620017f783390190565b600080fd5b600080fd5b600061ffff82169050919050565b62000fe48162000fcb565b811462000ff057600080fd5b50565b600081359050620010048162000fd9565b92915050565b6000819050919050565b6200101f816200100a565b81146200102b57600080fd5b50565b6000813590506200103f8162001014565b92915050565b600080604083850312156200105f576200105e62000fc1565b5b60006200106f8582860162000ff3565b925050602062001082858286016200102e565b9150509250929050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b6000620010d7620010d1620010cb846200108c565b620010ac565b6200108c565b9050919050565b6000620010eb82620010b6565b9050919050565b6000620010ff82620010de565b9050919050565b6200111181620010f2565b82525050565b60006020820190506200112e600083018462001106565b92915050565b600080fd5b600080fd5b600080fd5b60008083601f8401126200115c576200115b62001134565b5b8235905067ffffffffffffffff8111156200117c576200117b62001139565b5b6020830191508360208202830111156200119b576200119a6200113e565b5b9250929050565b60008060208385031215620011bc57620011bb62000fc1565b5b600083013567ffffffffffffffff811115620011dd57620011dc62000fc6565b5b620011eb8582860162001143565b92509250509250929050565b600062001204826200108c565b9050919050565b6200121681620011f7565b82525050565b60006020820190506200123360008301846200120b565b92915050565b60008060006040848603121562001255576200125462000fc1565b5b6000620012658682870162000ff3565b935050602084013567ffffffffffffffff81111562001289576200128862000fc6565b5b620012978682870162001143565b92509250509250925092565b620012ae8162000fcb565b82525050565b6000602082019050620012cb6000830184620012a3565b92915050565b620012dc81620011f7565b8114620012e857600080fd5b50565b600081359050620012fc81620012d1565b92915050565b6000602082840312156200131b576200131a62000fc1565b5b60006200132b84828501620012eb565b91505092915050565b6000602082840312156200134d576200134c62000fc1565b5b60006200135d8482850162000ff3565b91505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b620013fb816200100a565b82525050565b60006200140f8383620013f0565b60208301905092915050565b6000602082019050919050565b60006200143582620013c4565b620014418185620013cf565b93506200144e83620013e0565b8060005b838110156200148557815162001469888262001401565b975062001476836200141b565b92505060018101905062001452565b5085935050505092915050565b60006020820190508181036000830152620014ae818462001428565b905092915050565b6000604082019050620014cd60008301856200120b565b620014dc6020830184620012a3565b9392505050565b600080fd5b82818337600083830152505050565b6000620015058385620013cf565b93507f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8311156200153b576200153a620014e3565b5b6020830292506200154e838584620014e8565b82840190509392505050565b6000602082019050818103600083015262001577818486620014f7565b90509392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000620015bc8262000fcb565b9150620015c98362000fcb565b92508261ffff03821115620015e357620015e262001580565b5b828201905092915050565b6000620015fb82620011f7565b9050919050565b6200160d81620015ee565b81146200161957600080fd5b50565b6000815190506200162d8162001602565b92915050565b6000602082840312156200164c576200164b62000fc1565b5b60006200165c848285016200161c565b91505092915050565b600081519050620016768162000fd9565b92915050565b60006020828403121562001695576200169462000fc1565b5b6000620016a58482850162001665565b91505092915050565b600082825260208201905092915050565b7f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160008201527f6464726573730000000000000000000000000000000000000000000000000000602082015250565b60006200171d602683620016ae565b91506200172a82620016bf565b604082019050919050565b6000602082019050818103600083015262001750816200170e565b9050919050565b7f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572600082015250565b60006200178f602083620016ae565b91506200179c8262001757565b602082019050919050565b60006020820190508181036000830152620017c28162001780565b9050919050565b6000604082019050620017e06000830185620012a3565b620017ef60208301846200120b565b939250505056fe61016060405260006001553480156200001757600080fd5b50604051620014bc380380620014bc83398181016040528101906200003d919062000290565b6000601483600062000064620000586200011b60201b60201c565b6200012360201b60201c565b83608081815250508260a08181525050826001901b60c081815250508173ffffffffffffffffffffffffffffffffffffffff1660e08173ffffffffffffffffffffffffffffffffffffffff16815250508073ffffffffffffffffffffffffffffffffffffffff166101008173ffffffffffffffffffffffffffffffffffffffff16815250504363ffffffff166101208163ffffffff1681525050505050508061ffff166101408161ffff16815250505050620002d7565b600033905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006200021982620001ec565b9050919050565b6200022b816200020c565b81146200023757600080fd5b50565b6000815190506200024b8162000220565b92915050565b600061ffff82169050919050565b6200026a8162000251565b81146200027657600080fd5b50565b6000815190506200028a816200025f565b92915050565b60008060408385031215620002aa57620002a9620001e7565b5b6000620002ba858286016200023a565b9250506020620002cd8582860162000279565b9150509250929050565b60805160a05160c05160e05161010051610120516101405161117c620003406000396000610545015260006105e3015260006105690152600081816104ac015261058d0152600081816107560152610aac015260006106fc015260006107ac015261117c6000f3fe60806040526004361061011f5760003560e01c80638be9b119116100a0578063c5b208ff11610064578063c5b208ff146103c5578063d0383d6814610402578063f207564e1461042d578063f220b9ec14610449578063f2fde38b146104745761011f565b80638be9b119146102de5780638da5cb5b1461030757806398366e3514610332578063ae74552a1461035d578063bc499128146103885761011f565b80634add651e116100e75780634add651e146101f95780635daf08ca146102245780636bdcc8ab14610261578063715018a61461029e5780637a34289d146102b55761011f565b806322d9730c1461012457806328b070e0146101615780632b7ac3f31461018c578063331b6ab3146101b75780633ccfd60b146101e2575b600080fd5b34801561013057600080fd5b5061014b60048036038101906101469190610b48565b61049d565b6040516101589190610b90565b60405180910390f35b34801561016d57600080fd5b50610176610543565b6040516101839190610bc8565b60405180910390f35b34801561019857600080fd5b506101a1610567565b6040516101ae9190610c62565b60405180910390f35b3480156101c357600080fd5b506101cc61058b565b6040516101d99190610c9e565b60405180910390f35b3480156101ee57600080fd5b506101f76105af565b005b34801561020557600080fd5b5061020e6105e1565b60405161021b9190610cd8565b60405180910390f35b34801561023057600080fd5b5061024b60048036038101906102469190610b48565b610605565b6040516102589190610d02565b60405180910390f35b34801561026d57600080fd5b5061028860048036038101906102839190610b48565b61061d565b6040516102959190610b90565b60405180910390f35b3480156102aa57600080fd5b506102b361063d565b005b3480156102c157600080fd5b506102dc60048036038101906102d79190610d82565b610651565b005b3480156102ea57600080fd5b5061030560048036038101906103009190610e2f565b61069f565b005b34801561031357600080fd5b5061031c6106d1565b6040516103299190610ea4565b60405180910390f35b34801561033e57600080fd5b506103476106fa565b6040516103549190610d02565b60405180910390f35b34801561036957600080fd5b5061037261071e565b60405161037f9190610d02565b60405180910390f35b34801561039457600080fd5b506103af60048036038101906103aa9190610b48565b610724565b6040516103bc9190610d02565b60405180910390f35b3480156103d157600080fd5b506103ec60048036038101906103e79190610eeb565b61073c565b6040516103f99190610d02565b60405180910390f35b34801561040e57600080fd5b50610417610754565b6040516104249190610d02565b60405180910390f35b61044760048036038101906104429190610b48565b610778565b005b34801561045557600080fd5b5061045e6107aa565b60405161046b9190610d02565b60405180910390f35b34801561048057600080fd5b5061049b60048036038101906104969190610eeb565b6107ce565b005b600080821415801561053c57507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663e493ef8c6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610515573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906105399190610f2d565b82105b9050919050565b7f000000000000000000000000000000000000000000000000000000000000000081565b7f000000000000000000000000000000000000000000000000000000000000000081565b7f000000000000000000000000000000000000000000000000000000000000000081565b6040517fd623472500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000081565b60036020528060005260406000206000915090505481565b60046020528060005260406000206000915054906101000a900460ff1681565b610645610851565b61064f60006108cf565b565b610659610851565b600082829050905060005b818110156106995761068e84848381811061068257610681610f5a565b5b90506020020135610993565b806001019050610664565b50505050565b6040517fd623472500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b7f000000000000000000000000000000000000000000000000000000000000000081565b60015481565b60026020528060005260406000206000915090505481565b60056020528060005260406000206000915090505481565b7f000000000000000000000000000000000000000000000000000000000000000081565b6040517fd623472500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000081565b6107d6610851565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603610845576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161083c9061100c565b60405180910390fd5b61084e816108cf565b50565b610859610a0c565b73ffffffffffffffffffffffffffffffffffffffff166108776106d1565b73ffffffffffffffffffffffffffffffffffffffff16146108cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016108c490611078565b60405180910390fd5b565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b61099c81610a14565b600160036000838152602001908152602001600020819055507f5a92c2530f207992057b9c3e544108ffce3beda4a63719f316967c49bf6159d2816001546040516109e8929190611098565b60405180910390a16001806000828254610a0291906110f0565b9250508190555050565b600033905090565b610a1d8161049d565b610a5e57806040517f7f3e75af000000000000000000000000000000000000000000000000000000008152600401610a559190610d02565b60405180910390fd5b6000600360008381526020019081526020016000205414610aaa576040517e0a60f700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000060015410610b05576040517f57f6953100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b50565b600080fd5b600080fd5b6000819050919050565b610b2581610b12565b8114610b3057600080fd5b50565b600081359050610b4281610b1c565b92915050565b600060208284031215610b5e57610b5d610b08565b5b6000610b6c84828501610b33565b91505092915050565b60008115159050919050565b610b8a81610b75565b82525050565b6000602082019050610ba56000830184610b81565b92915050565b600061ffff82169050919050565b610bc281610bab565b82525050565b6000602082019050610bdd6000830184610bb9565b92915050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b6000610c28610c23610c1e84610be3565b610c03565b610be3565b9050919050565b6000610c3a82610c0d565b9050919050565b6000610c4c82610c2f565b9050919050565b610c5c81610c41565b82525050565b6000602082019050610c776000830184610c53565b92915050565b6000610c8882610c2f565b9050919050565b610c9881610c7d565b82525050565b6000602082019050610cb36000830184610c8f565b92915050565b600063ffffffff82169050919050565b610cd281610cb9565b82525050565b6000602082019050610ced6000830184610cc9565b92915050565b610cfc81610b12565b82525050565b6000602082019050610d176000830184610cf3565b92915050565b600080fd5b600080fd5b600080fd5b60008083601f840112610d4257610d41610d1d565b5b8235905067ffffffffffffffff811115610d5f57610d5e610d22565b5b602083019150836020820283011115610d7b57610d7a610d27565b5b9250929050565b60008060208385031215610d9957610d98610b08565b5b600083013567ffffffffffffffff811115610db757610db6610b0d565b5b610dc385828601610d2c565b92509250509250929050565b6000610dda82610be3565b9050919050565b610dea81610dcf565b8114610df557600080fd5b50565b600081359050610e0781610de1565b92915050565b600081905082602060080282011115610e2957610e28610d27565b5b92915050565b60008060006101408486031215610e4957610e48610b08565b5b6000610e5786828701610b33565b9350506020610e6886828701610df8565b9250506040610e7986828701610e0d565b9150509250925092565b6000610e8e82610be3565b9050919050565b610e9e81610e83565b82525050565b6000602082019050610eb96000830184610e95565b92915050565b610ec881610e83565b8114610ed357600080fd5b50565b600081359050610ee581610ebf565b92915050565b600060208284031215610f0157610f00610b08565b5b6000610f0f84828501610ed6565b91505092915050565b600081519050610f2781610b1c565b92915050565b600060208284031215610f4357610f42610b08565b5b6000610f5184828501610f18565b91505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600082825260208201905092915050565b7f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160008201527f6464726573730000000000000000000000000000000000000000000000000000602082015250565b6000610ff6602683610f89565b915061100182610f9a565b604082019050919050565b6000602082019050818103600083015261102581610fe9565b9050919050565b7f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572600082015250565b6000611062602083610f89565b915061106d8261102c565b602082019050919050565b6000602082019050818103600083015261109181611055565b9050919050565b60006040820190506110ad6000830185610cf3565b6110ba6020830184610cf3565b9392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60006110fb82610b12565b915061110683610b12565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0382111561113b5761113a6110c1565b5b82820190509291505056fea2646970667358221220507a65829a9a551e42c8757b2613a358880e56baec43e730dff9d9aa252c9a3564736f6c634300080f0033a26469706673582212208cc71be49f97f4fad7da0c74d0576129e139d0ef30e3ac4be6a3214675ab1d5264736f6c634300080f0033" + # RegistryContractCode contains the bytecode of the membership solidity smart contract: + # https://github.com/waku-org/rln-contract/blob/fbafa7b0bb47c5233f50ba82992c572795e7e9a6/deployments/sepolia/WakuRlnRegistry.json + # the solidity contract is compiled separately and the resultant bytecode is copied here + const RegistryContractCode* = + "0x60a06040526000600260006101000a81548161ffff021916908361ffff1602179055503480156200002f57600080fd5b5060405162002f2a38038062002f2a8339818101604052810190620000559190620001e6565b6200007562000069620000b060201b60201c565b620000b860201b60201c565b8073ffffffffffffffffffffffffffffffffffffffff1660808173ffffffffffffffffffffffffffffffffffffffff16815250505062000218565b600033905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000620001ae8262000181565b9050919050565b620001c081620001a1565b8114620001cc57600080fd5b50565b600081519050620001e081620001b5565b92915050565b600060208284031215620001ff57620001fe6200017c565b5b60006200020f84828501620001cf565b91505092915050565b608051612ce862000242600039600081816103e2015281816104120152610ad80152612ce86000f3fe60806040523480156200001157600080fd5b5060043610620000e25760003560e01c8063ab02492a1162000099578063ef653d5e116200006f578063ef653d5e14620001d1578063f184ef4c14620001f1578063f2fde38b1462000213578063f5542147146200023357620000e2565b8063ab02492a1462000183578063cf61637414620001a3578063d44fda1f14620001c557620000e2565b806326e0fc1f14620000e7578063331b6ab3146200010757806342f542e21462000129578063715018a614620001355780637a34289d14620001415780638da5cb5b1462000161575b600080fd5b620001056004803603810190620000ff919062001045565b62000269565b005b62000111620003e0565b60405162000120919062001117565b60405180910390f35b6200013362000404565b005b6200013f6200048b565b005b6200015f6004803603810190620001599190620011a2565b620004a3565b005b6200016b6200075f565b6040516200017a91906200121c565b60405180910390f35b620001a160048036038101906200019b919062001239565b62000788565b005b620001ad6200088b565b604051620001bc9190620012b4565b60405180910390f35b620001cf6200089f565b005b620001ef6004803603810190620001e9919062001302565b62000a09565b005b620001fb62000c9e565b6040516200020a9190620012b4565b60405180910390f35b6200023160048036038101906200022b919062001302565b62000cb2565b005b6200025160048036038101906200024b919062001334565b62000d3c565b6040516200026091906200121c565b60405180910390f35b600060149054906101000a900461ffff1661ffff168261ffff1610620002bb576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000600167ffffffffffffffff811115620002db57620002da62001366565b5b6040519080825280602002602001820160405280156200030a5781602001602082028036833780820191505090505b509050818160008151811062000325576200032462001395565b5b602002602001018181525050600160008461ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16637a34289d826040518263ffffffff1660e01b8152600401620003a7919062001492565b600060405180830381600087803b158015620003c257600080fd5b505af1158015620003d7573d6000803e3d6000fd5b50505050505050565b7f000000000000000000000000000000000000000000000000000000000000000081565b6200040e62000d6f565b60007f0000000000000000000000000000000000000000000000000000000000000000600060149054906101000a900461ffff16604051620004509062000fb3565b6200045d929190620014b6565b604051809103906000f0801580156200047a573d6000803e3d6000fd5b509050620004888162000df4565b50565b6200049562000d6f565b620004a1600062000ee7565b565b600060149054906101000a900461ffff1661ffff16600260009054906101000a900461ffff1661ffff161062000505576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5b6001156200075b5760016000600260009054906101000a900461ffff1661ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16637a34289d83836040518363ffffffff1660e01b8152600401620005969291906200155a565b600060405180830381600087803b158015620005b157600080fd5b505af1925050508015620005c3575060015b6200074f573d8060008114620005f6576040519150601f19603f3d011682016040523d82523d6000602084013e620005fb565b606091505b506040516024016040516020818303038152906040527f57f69531000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050805190602001208180519060200120146200069c57805181602001fd5b600060149054906101000a900461ffff1661ffff166001600260009054906101000a900461ffff16620006d09190620015af565b61ffff16106200070c576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001600260008282829054906101000a900461ffff166200072e9190620015af565b92506101000a81548161ffff021916908361ffff1602179055505062000755565b6200075b565b62000506565b5050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b600060149054906101000a900461ffff1661ffff168361ffff1610620007da576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600160008461ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16637a34289d83836040518363ffffffff1660e01b8152600401620008529291906200155a565b600060405180830381600087803b1580156200086d57600080fd5b505af115801562000882573d6000803e3d6000fd5b50505050505050565b600260009054906101000a900461ffff1681565b620008a962000d6f565b600060149054906101000a900461ffff1661ffff16600260009054906101000a900461ffff1661ffff16106200090b576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff16600160006001600260009054906101000a900461ffff16620009469190620015af565b61ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1603620009cb576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001600260008282829054906101000a900461ffff16620009ed9190620015af565b92506101000a81548161ffff021916908361ffff160217905550565b62000a1362000d6f565b600073ffffffffffffffffffffffffffffffffffffffff16600160008060149054906101000a900461ffff1661ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161462000ad157806040517f9cfabd1600000000000000000000000000000000000000000000000000000000815260040162000ac891906200121c565b60405180910390fd5b60008190507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1663331b6ab36040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000b59573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062000b7f919062001633565b73ffffffffffffffffffffffffffffffffffffffff161462000bcc576040517eaec95400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600060149054906101000a900461ffff1661ffff168173ffffffffffffffffffffffffffffffffffffffff166328b070e06040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000c2d573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062000c5391906200167c565b61ffff161462000c8f576040517fb893b72300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b62000c9a8262000df4565b5050565b600060149054906101000a900461ffff1681565b62000cbc62000d6f565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff160362000d2e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000d259062001735565b60405180910390fd5b62000d398162000ee7565b50565b60016020528060005260406000206000915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b62000d7962000fab565b73ffffffffffffffffffffffffffffffffffffffff1662000d996200075f565b73ffffffffffffffffffffffffffffffffffffffff161462000df2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000de990620017a7565b60405180910390fd5b565b80600160008060149054906101000a900461ffff1661ffff1661ffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055507fcf6a3b406170499209d0fcf152a1605c7c5a5c99c855e2bb803433fc960718eb600060149054906101000a900461ffff168260405162000ea0929190620017c9565b60405180910390a16001600060148282829054906101000a900461ffff1662000eca9190620015af565b92506101000a81548161ffff021916908361ffff16021790555050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b600033905090565b6114bc80620017f783390190565b600080fd5b600080fd5b600061ffff82169050919050565b62000fe48162000fcb565b811462000ff057600080fd5b50565b600081359050620010048162000fd9565b92915050565b6000819050919050565b6200101f816200100a565b81146200102b57600080fd5b50565b6000813590506200103f8162001014565b92915050565b600080604083850312156200105f576200105e62000fc1565b5b60006200106f8582860162000ff3565b925050602062001082858286016200102e565b9150509250929050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b6000620010d7620010d1620010cb846200108c565b620010ac565b6200108c565b9050919050565b6000620010eb82620010b6565b9050919050565b6000620010ff82620010de565b9050919050565b6200111181620010f2565b82525050565b60006020820190506200112e600083018462001106565b92915050565b600080fd5b600080fd5b600080fd5b60008083601f8401126200115c576200115b62001134565b5b8235905067ffffffffffffffff8111156200117c576200117b62001139565b5b6020830191508360208202830111156200119b576200119a6200113e565b5b9250929050565b60008060208385031215620011bc57620011bb62000fc1565b5b600083013567ffffffffffffffff811115620011dd57620011dc62000fc6565b5b620011eb8582860162001143565b92509250509250929050565b600062001204826200108c565b9050919050565b6200121681620011f7565b82525050565b60006020820190506200123360008301846200120b565b92915050565b60008060006040848603121562001255576200125462000fc1565b5b6000620012658682870162000ff3565b935050602084013567ffffffffffffffff81111562001289576200128862000fc6565b5b620012978682870162001143565b92509250509250925092565b620012ae8162000fcb565b82525050565b6000602082019050620012cb6000830184620012a3565b92915050565b620012dc81620011f7565b8114620012e857600080fd5b50565b600081359050620012fc81620012d1565b92915050565b6000602082840312156200131b576200131a62000fc1565b5b60006200132b84828501620012eb565b91505092915050565b6000602082840312156200134d576200134c62000fc1565b5b60006200135d8482850162000ff3565b91505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b620013fb816200100a565b82525050565b60006200140f8383620013f0565b60208301905092915050565b6000602082019050919050565b60006200143582620013c4565b620014418185620013cf565b93506200144e83620013e0565b8060005b838110156200148557815162001469888262001401565b975062001476836200141b565b92505060018101905062001452565b5085935050505092915050565b60006020820190508181036000830152620014ae818462001428565b905092915050565b6000604082019050620014cd60008301856200120b565b620014dc6020830184620012a3565b9392505050565b600080fd5b82818337600083830152505050565b6000620015058385620013cf565b93507f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8311156200153b576200153a620014e3565b5b6020830292506200154e838584620014e8565b82840190509392505050565b6000602082019050818103600083015262001577818486620014f7565b90509392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000620015bc8262000fcb565b9150620015c98362000fcb565b92508261ffff03821115620015e357620015e262001580565b5b828201905092915050565b6000620015fb82620011f7565b9050919050565b6200160d81620015ee565b81146200161957600080fd5b50565b6000815190506200162d8162001602565b92915050565b6000602082840312156200164c576200164b62000fc1565b5b60006200165c848285016200161c565b91505092915050565b600081519050620016768162000fd9565b92915050565b60006020828403121562001695576200169462000fc1565b5b6000620016a58482850162001665565b91505092915050565b600082825260208201905092915050565b7f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160008201527f6464726573730000000000000000000000000000000000000000000000000000602082015250565b60006200171d602683620016ae565b91506200172a82620016bf565b604082019050919050565b6000602082019050818103600083015262001750816200170e565b9050919050565b7f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572600082015250565b60006200178f602083620016ae565b91506200179c8262001757565b602082019050919050565b60006020820190508181036000830152620017c28162001780565b9050919050565b6000604082019050620017e06000830185620012a3565b620017ef60208301846200120b565b939250505056fe61016060405260006001553480156200001757600080fd5b50604051620014bc380380620014bc83398181016040528101906200003d919062000290565b6000601483600062000064620000586200011b60201b60201c565b6200012360201b60201c565b83608081815250508260a08181525050826001901b60c081815250508173ffffffffffffffffffffffffffffffffffffffff1660e08173ffffffffffffffffffffffffffffffffffffffff16815250508073ffffffffffffffffffffffffffffffffffffffff166101008173ffffffffffffffffffffffffffffffffffffffff16815250504363ffffffff166101208163ffffffff1681525050505050508061ffff166101408161ffff16815250505050620002d7565b600033905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006200021982620001ec565b9050919050565b6200022b816200020c565b81146200023757600080fd5b50565b6000815190506200024b8162000220565b92915050565b600061ffff82169050919050565b6200026a8162000251565b81146200027657600080fd5b50565b6000815190506200028a816200025f565b92915050565b60008060408385031215620002aa57620002a9620001e7565b5b6000620002ba858286016200023a565b9250506020620002cd8582860162000279565b9150509250929050565b60805160a05160c05160e05161010051610120516101405161117c620003406000396000610545015260006105e3015260006105690152600081816104ac015261058d0152600081816107560152610aac015260006106fc015260006107ac015261117c6000f3fe60806040526004361061011f5760003560e01c80638be9b119116100a0578063c5b208ff11610064578063c5b208ff146103c5578063d0383d6814610402578063f207564e1461042d578063f220b9ec14610449578063f2fde38b146104745761011f565b80638be9b119146102de5780638da5cb5b1461030757806398366e3514610332578063ae74552a1461035d578063bc499128146103885761011f565b80634add651e116100e75780634add651e146101f95780635daf08ca146102245780636bdcc8ab14610261578063715018a61461029e5780637a34289d146102b55761011f565b806322d9730c1461012457806328b070e0146101615780632b7ac3f31461018c578063331b6ab3146101b75780633ccfd60b146101e2575b600080fd5b34801561013057600080fd5b5061014b60048036038101906101469190610b48565b61049d565b6040516101589190610b90565b60405180910390f35b34801561016d57600080fd5b50610176610543565b6040516101839190610bc8565b60405180910390f35b34801561019857600080fd5b506101a1610567565b6040516101ae9190610c62565b60405180910390f35b3480156101c357600080fd5b506101cc61058b565b6040516101d99190610c9e565b60405180910390f35b3480156101ee57600080fd5b506101f76105af565b005b34801561020557600080fd5b5061020e6105e1565b60405161021b9190610cd8565b60405180910390f35b34801561023057600080fd5b5061024b60048036038101906102469190610b48565b610605565b6040516102589190610d02565b60405180910390f35b34801561026d57600080fd5b5061028860048036038101906102839190610b48565b61061d565b6040516102959190610b90565b60405180910390f35b3480156102aa57600080fd5b506102b361063d565b005b3480156102c157600080fd5b506102dc60048036038101906102d79190610d82565b610651565b005b3480156102ea57600080fd5b5061030560048036038101906103009190610e2f565b61069f565b005b34801561031357600080fd5b5061031c6106d1565b6040516103299190610ea4565b60405180910390f35b34801561033e57600080fd5b506103476106fa565b6040516103549190610d02565b60405180910390f35b34801561036957600080fd5b5061037261071e565b60405161037f9190610d02565b60405180910390f35b34801561039457600080fd5b506103af60048036038101906103aa9190610b48565b610724565b6040516103bc9190610d02565b60405180910390f35b3480156103d157600080fd5b506103ec60048036038101906103e79190610eeb565b61073c565b6040516103f99190610d02565b60405180910390f35b34801561040e57600080fd5b50610417610754565b6040516104249190610d02565b60405180910390f35b61044760048036038101906104429190610b48565b610778565b005b34801561045557600080fd5b5061045e6107aa565b60405161046b9190610d02565b60405180910390f35b34801561048057600080fd5b5061049b60048036038101906104969190610eeb565b6107ce565b005b600080821415801561053c57507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663e493ef8c6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610515573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906105399190610f2d565b82105b9050919050565b7f000000000000000000000000000000000000000000000000000000000000000081565b7f000000000000000000000000000000000000000000000000000000000000000081565b7f000000000000000000000000000000000000000000000000000000000000000081565b6040517fd623472500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000081565b60036020528060005260406000206000915090505481565b60046020528060005260406000206000915054906101000a900460ff1681565b610645610851565b61064f60006108cf565b565b610659610851565b600082829050905060005b818110156106995761068e84848381811061068257610681610f5a565b5b90506020020135610993565b806001019050610664565b50505050565b6040517fd623472500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b7f000000000000000000000000000000000000000000000000000000000000000081565b60015481565b60026020528060005260406000206000915090505481565b60056020528060005260406000206000915090505481565b7f000000000000000000000000000000000000000000000000000000000000000081565b6040517fd623472500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000081565b6107d6610851565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603610845576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161083c9061100c565b60405180910390fd5b61084e816108cf565b50565b610859610a0c565b73ffffffffffffffffffffffffffffffffffffffff166108776106d1565b73ffffffffffffffffffffffffffffffffffffffff16146108cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016108c490611078565b60405180910390fd5b565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b61099c81610a14565b600160036000838152602001908152602001600020819055507f5a92c2530f207992057b9c3e544108ffce3beda4a63719f316967c49bf6159d2816001546040516109e8929190611098565b60405180910390a16001806000828254610a0291906110f0565b9250508190555050565b600033905090565b610a1d8161049d565b610a5e57806040517f7f3e75af000000000000000000000000000000000000000000000000000000008152600401610a559190610d02565b60405180910390fd5b6000600360008381526020019081526020016000205414610aaa576040517e0a60f700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000060015410610b05576040517f57f6953100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b50565b600080fd5b600080fd5b6000819050919050565b610b2581610b12565b8114610b3057600080fd5b50565b600081359050610b4281610b1c565b92915050565b600060208284031215610b5e57610b5d610b08565b5b6000610b6c84828501610b33565b91505092915050565b60008115159050919050565b610b8a81610b75565b82525050565b6000602082019050610ba56000830184610b81565b92915050565b600061ffff82169050919050565b610bc281610bab565b82525050565b6000602082019050610bdd6000830184610bb9565b92915050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b6000610c28610c23610c1e84610be3565b610c03565b610be3565b9050919050565b6000610c3a82610c0d565b9050919050565b6000610c4c82610c2f565b9050919050565b610c5c81610c41565b82525050565b6000602082019050610c776000830184610c53565b92915050565b6000610c8882610c2f565b9050919050565b610c9881610c7d565b82525050565b6000602082019050610cb36000830184610c8f565b92915050565b600063ffffffff82169050919050565b610cd281610cb9565b82525050565b6000602082019050610ced6000830184610cc9565b92915050565b610cfc81610b12565b82525050565b6000602082019050610d176000830184610cf3565b92915050565b600080fd5b600080fd5b600080fd5b60008083601f840112610d4257610d41610d1d565b5b8235905067ffffffffffffffff811115610d5f57610d5e610d22565b5b602083019150836020820283011115610d7b57610d7a610d27565b5b9250929050565b60008060208385031215610d9957610d98610b08565b5b600083013567ffffffffffffffff811115610db757610db6610b0d565b5b610dc385828601610d2c565b92509250509250929050565b6000610dda82610be3565b9050919050565b610dea81610dcf565b8114610df557600080fd5b50565b600081359050610e0781610de1565b92915050565b600081905082602060080282011115610e2957610e28610d27565b5b92915050565b60008060006101408486031215610e4957610e48610b08565b5b6000610e5786828701610b33565b9350506020610e6886828701610df8565b9250506040610e7986828701610e0d565b9150509250925092565b6000610e8e82610be3565b9050919050565b610e9e81610e83565b82525050565b6000602082019050610eb96000830184610e95565b92915050565b610ec881610e83565b8114610ed357600080fd5b50565b600081359050610ee581610ebf565b92915050565b600060208284031215610f0157610f00610b08565b5b6000610f0f84828501610ed6565b91505092915050565b600081519050610f2781610b1c565b92915050565b600060208284031215610f4357610f42610b08565b5b6000610f5184828501610f18565b91505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600082825260208201905092915050565b7f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160008201527f6464726573730000000000000000000000000000000000000000000000000000602082015250565b6000610ff6602683610f89565b915061100182610f9a565b604082019050919050565b6000602082019050818103600083015261102581610fe9565b9050919050565b7f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572600082015250565b6000611062602083610f89565b915061106d8261102c565b602082019050919050565b6000602082019050818103600083015261109181611055565b9050919050565b60006040820190506110ad6000830185610cf3565b6110ba6020830184610cf3565b9392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60006110fb82610b12565b915061110683610b12565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0382111561113b5761113a6110c1565b5b82820190509291505056fea2646970667358221220507a65829a9a551e42c8757b2613a358880e56baec43e730dff9d9aa252c9a3564736f6c634300080f0033a26469706673582212208cc71be49f97f4fad7da0c74d0576129e139d0ef30e3ac4be6a3214675ab1d5264736f6c634300080f0033" else: - # This contract code is used in deployment, note: this is not the deployedBytecode, it includes constructor args. - # Ref: https://github.com/waku-org/waku-rln-contract/blob/886891b57ae54e439563023dd50161fec5ee29f1/deployments/sepolia/WakuRlnRegistry_Implementation.json - const RegistryContractCode* = "0x60a06040523073ffffffffffffffffffffffffffffffffffffffff1660809073ffffffffffffffffffffffffffffffffffffffff168152506000606760006101000a81548161ffff021916908361ffff16021790555034801561006157600080fd5b50608051614429610099600039600081816106870152818161071801528181610922015281816109b30152610a6c01526144296000f3fe6080604052600436106200010a5760003560e01c80638da5cb5b1162000097578063f184ef4c1162000061578063f184ef4c14620002f3578063f2fde38b1462000323578063f55421471462000351578063fc6ed4641462000395576200010a565b80638da5cb5b146200024b578063cf616374146200027b578063d44fda1f14620002ab578063ef653d5e14620002c5576200010a565b806352d1902d11620000d957806352d1902d14620001b95780635a244efd14620001e9578063715018a614620002175780638129fc1c1462000231576200010a565b80632de999bf146200010f5780633659cfe6146200013d57806339c0364b146200016b5780634f1ef2861462000199575b600080fd5b3480156200011c57600080fd5b506200013b60048036038101906200013591906200197b565b620003c3565b005b3480156200014a57600080fd5b5062000169600480360381019062000163919062001a6e565b62000685565b005b3480156200017857600080fd5b5062000197600480360381019062000191919062001b1a565b6200081d565b005b620001b76004803603810190620001b1919062001cd3565b62000920565b005b348015620001c657600080fd5b50620001d162000a68565b604051620001e0919062001d54565b60405180910390f35b348015620001f657600080fd5b506200021560048036038101906200020f919062001d71565b62000b24565b005b3480156200022457600080fd5b506200022f62000bc7565b005b3480156200023e57600080fd5b506200024962000bdf565b005b3480156200025857600080fd5b506200026362000d2b565b60405162000272919062001db4565b60405180910390f35b3480156200028857600080fd5b506200029362000d55565b604051620002a2919062001de2565b60405180910390f35b348015620002b857600080fd5b50620002c362000d69565b005b348015620002d257600080fd5b50620002f16004803603810190620002eb919062001a6e565b62000e13565b005b3480156200030057600080fd5b506200030b62000ef4565b6040516200031a919062001de2565b60405180910390f35b3480156200033057600080fd5b506200034f600480360381019062000349919062001a6e565b62000f08565b005b3480156200035e57600080fd5b506200037d600480360381019062000377919062001dff565b62000f92565b6040516200038c919062001db4565b60405180910390f35b348015620003a257600080fd5b50620003c16004803603810190620003bb919062001e31565b62000fc5565b005b606560009054906101000a900461ffff1661ffff16606760009054906101000a900461ffff1661ffff161062000425576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5b6001156200067f5760666000606760009054906101000a900461ffff1661ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16632de999bf858585856040518563ffffffff1660e01b8152600401620004ba949392919062001f57565b600060405180830381600087803b158015620004d557600080fd5b505af1925050508015620004e7575060015b62000673573d80600081146200051a576040519150601f19603f3d011682016040523d82523d6000602084013e6200051f565b606091505b506040516024016040516020818303038152906040527f57f69531000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505080519060200120818051906020012014620005c057805181602001fd5b606560009054906101000a900461ffff1661ffff166001606760009054906101000a900461ffff16620005f4919062001fc5565b61ffff161062000630576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001606760008282829054906101000a900461ffff1662000652919062001fc5565b92506101000a81548161ffff021916908361ffff1602179055505062000679565b6200067f565b62000426565b50505050565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff160362000716576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016200070d9062002089565b60405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1662000757620010ce565b73ffffffffffffffffffffffffffffffffffffffff1614620007b0576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401620007a79062002121565b60405180910390fd5b620007bb8162001127565b6200081a81600067ffffffffffffffff811115620007de57620007dd62001b8c565b5b6040519080825280601f01601f191660200182016040528015620008115781602001600182028036833780820191505090505b50600062001134565b50565b606560009054906101000a900461ffff1661ffff168361ffff16106200086f576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606660008461ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663d66d6c1083836040518363ffffffff1660e01b8152600401620008e792919062002154565b600060405180830381600087803b1580156200090257600080fd5b505af115801562000917573d6000803e3d6000fd5b50505050505050565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff1603620009b1576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401620009a89062002089565b60405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16620009f2620010ce565b73ffffffffffffffffffffffffffffffffffffffff161462000a4b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000a429062002121565b60405180910390fd5b62000a568262001127565b62000a648282600162001134565b5050565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff161462000afb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000af290620021f7565b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc60001b905090565b62000b2e620012b3565b6000810362000b69576040517fe671aff300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600081606560009054906101000a900461ffff1660405162000b8b90620018eb565b62000b9892919062002219565b604051809103906000f08015801562000bb5573d6000803e3d6000fd5b50905062000bc38162001338565b5050565b62000bd1620012b3565b62000bdd60006200142c565b565b60008060019054906101000a900460ff1615905080801562000c115750600160008054906101000a900460ff1660ff16105b8062000c42575062000c2330620014f2565b15801562000c415750600160008054906101000a900460ff1660ff16145b5b62000c84576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000c7b90620022bc565b60405180910390fd5b60016000806101000a81548160ff021916908360ff160217905550801562000cc2576001600060016101000a81548160ff0219169083151502179055505b62000ccc62001515565b801562000d285760008060016101000a81548160ff0219169083151502179055507f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498600160405162000d1f919062002338565b60405180910390a15b50565b6000603360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b606760009054906101000a900461ffff1681565b62000d73620012b3565b606560009054906101000a900461ffff1661ffff16606760009054906101000a900461ffff1661ffff161062000dd5576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001606760008282829054906101000a900461ffff1662000df7919062001fc5565b92506101000a81548161ffff021916908361ffff160217905550565b62000e1d620012b3565b6000819050606560009054906101000a900461ffff1661ffff168173ffffffffffffffffffffffffffffffffffffffff166328b070e06040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000e83573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062000ea991906200236c565b61ffff161462000ee5576040517fb893b72300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b62000ef08262001338565b5050565b606560009054906101000a900461ffff1681565b62000f12620012b3565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff160362000f84576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000f7b9062002414565b60405180910390fd5b62000f8f816200142c565b50565b60666020528060005260406000206000915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b606560009054906101000a900461ffff1661ffff168561ffff161062001017576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606660008661ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16632de999bf858585856040518563ffffffff1660e01b815260040162001093949392919062001f57565b600060405180830381600087803b158015620010ae57600080fd5b505af1158015620010c3573d6000803e3d6000fd5b505050505050505050565b6000620010fe7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc60001b62001573565b60000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b62001131620012b3565b50565b620011627f4910fdfa16fed3260ed0e7147f7cc6da11a60208b5b9406d12a635614ffd914360001b6200157d565b60000160009054906101000a900460ff16156200118a57620011848362001587565b620012ae565b8273ffffffffffffffffffffffffffffffffffffffff166352d1902d6040518163ffffffff1660e01b8152600401602060405180830381865afa925050508015620011f557506040513d601f19601f82011682018060405250810190620011f2919062002467565b60015b62001237576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016200122e906200250f565b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc60001b81146200129f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016200129690620025a7565b60405180910390fd5b50620012ad83838362001647565b5b505050565b620012bd62001679565b73ffffffffffffffffffffffffffffffffffffffff16620012dd62000d2b565b73ffffffffffffffffffffffffffffffffffffffff161462001336576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016200132d9062002619565b60405180910390fd5b565b8060666000606560009054906101000a900461ffff1661ffff1661ffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055507fcf6a3b406170499209d0fcf152a1605c7c5a5c99c855e2bb803433fc960718eb606560009054906101000a900461ffff1682604051620013e59291906200263b565b60405180910390a16001606560008282829054906101000a900461ffff166200140f919062001fc5565b92506101000a81548161ffff021916908361ffff16021790555050565b6000603360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905081603360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b6000808273ffffffffffffffffffffffffffffffffffffffff163b119050919050565b600060019054906101000a900460ff1662001567576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016200155e90620026de565b60405180910390fd5b6200157162001681565b565b6000819050919050565b6000819050919050565b6200159281620016e9565b620015d4576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401620015cb9062002776565b60405180910390fd5b80620016037f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc60001b62001573565b60000160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b62001652836200170c565b600082511180620016605750805b1562001674576200167283836200175d565b505b505050565b600033905090565b600060019054906101000a900460ff16620016d3576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401620016ca90620026de565b60405180910390fd5b620016e7620016e162001679565b6200142c565b565b6000808273ffffffffffffffffffffffffffffffffffffffff163b119050919050565b620017178162001587565b8073ffffffffffffffffffffffffffffffffffffffff167fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b60405160405180910390a250565b6060620017858383604051806060016040528060278152602001620043cd602791396200178d565b905092915050565b60606000808573ffffffffffffffffffffffffffffffffffffffff1685604051620017b9919062002811565b600060405180830381855af49150503d8060008114620017f6576040519150601f19603f3d011682016040523d82523d6000602084013e620017fb565b606091505b50915091506200180e8683838762001819565b925050509392505050565b60608315620018835760008351036200187a576200183785620016e9565b62001879576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162001870906200287a565b60405180910390fd5b5b82905062001890565b6200188f838362001898565b5b949350505050565b600082511115620018ac5781518083602001fd5b806040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401620018e29190620028e8565b60405180910390fd5b611ac0806200290d83390190565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b600080fd5b60008083601f8401126200193557620019346200190d565b5b8235905067ffffffffffffffff81111562001955576200195462001912565b5b60208301915083602082028301111562001974576200197362001917565b5b9250929050565b6000806000806040858703121562001998576200199762001903565b5b600085013567ffffffffffffffff811115620019b957620019b862001908565b5b620019c7878288016200191c565b9450945050602085013567ffffffffffffffff811115620019ed57620019ec62001908565b5b620019fb878288016200191c565b925092505092959194509250565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b600062001a368262001a09565b9050919050565b62001a488162001a29565b811462001a5457600080fd5b50565b60008135905062001a688162001a3d565b92915050565b60006020828403121562001a875762001a8662001903565b5b600062001a978482850162001a57565b91505092915050565b600061ffff82169050919050565b62001ab98162001aa0565b811462001ac557600080fd5b50565b60008135905062001ad98162001aae565b92915050565b6000819050919050565b62001af48162001adf565b811462001b0057600080fd5b50565b60008135905062001b148162001ae9565b92915050565b60008060006060848603121562001b365762001b3562001903565b5b600062001b468682870162001ac8565b935050602062001b598682870162001b03565b925050604062001b6c8682870162001b03565b9150509250925092565b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b62001bc68262001b7b565b810181811067ffffffffffffffff8211171562001be85762001be762001b8c565b5b80604052505050565b600062001bfd620018f9565b905062001c0b828262001bbb565b919050565b600067ffffffffffffffff82111562001c2e5762001c2d62001b8c565b5b62001c398262001b7b565b9050602081019050919050565b82818337600083830152505050565b600062001c6c62001c668462001c10565b62001bf1565b90508281526020810184848401111562001c8b5762001c8a62001b76565b5b62001c9884828562001c46565b509392505050565b600082601f83011262001cb85762001cb76200190d565b5b813562001cca84826020860162001c55565b91505092915050565b6000806040838503121562001ced5762001cec62001903565b5b600062001cfd8582860162001a57565b925050602083013567ffffffffffffffff81111562001d215762001d2062001908565b5b62001d2f8582860162001ca0565b9150509250929050565b6000819050919050565b62001d4e8162001d39565b82525050565b600060208201905062001d6b600083018462001d43565b92915050565b60006020828403121562001d8a5762001d8962001903565b5b600062001d9a8482850162001b03565b91505092915050565b62001dae8162001a29565b82525050565b600060208201905062001dcb600083018462001da3565b92915050565b62001ddc8162001aa0565b82525050565b600060208201905062001df9600083018462001dd1565b92915050565b60006020828403121562001e185762001e1762001903565b5b600062001e288482850162001ac8565b91505092915050565b60008060008060006060868803121562001e505762001e4f62001903565b5b600062001e608882890162001ac8565b955050602086013567ffffffffffffffff81111562001e845762001e8362001908565b5b62001e92888289016200191c565b9450945050604086013567ffffffffffffffff81111562001eb85762001eb762001908565b5b62001ec6888289016200191c565b92509250509295509295909350565b600082825260208201905092915050565b600080fd5b82818337505050565b600062001f02838562001ed5565b93507f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff83111562001f385762001f3762001ee6565b5b60208302925062001f4b83858462001eeb565b82840190509392505050565b6000604082019050818103600083015262001f7481868862001ef4565b9050818103602083015262001f8b81848662001ef4565b905095945050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600062001fd28262001aa0565b915062001fdf8362001aa0565b9250828201905061ffff81111562001ffc5762001ffb62001f96565b5b92915050565b600082825260208201905092915050565b7f46756e6374696f6e206d7573742062652063616c6c6564207468726f7567682060008201527f64656c656761746563616c6c0000000000000000000000000000000000000000602082015250565b600062002071602c8362002002565b91506200207e8262002013565b604082019050919050565b60006020820190508181036000830152620020a48162002062565b9050919050565b7f46756e6374696f6e206d7573742062652063616c6c6564207468726f7567682060008201527f6163746976652070726f78790000000000000000000000000000000000000000602082015250565b600062002109602c8362002002565b91506200211682620020ab565b604082019050919050565b600060208201905081810360008301526200213c81620020fa565b9050919050565b6200214e8162001adf565b82525050565b60006040820190506200216b600083018562002143565b6200217a602083018462002143565b9392505050565b7f555550535570677261646561626c653a206d757374206e6f742062652063616c60008201527f6c6564207468726f7567682064656c656761746563616c6c0000000000000000602082015250565b6000620021df60388362002002565b9150620021ec8262002181565b604082019050919050565b600060208201905081810360008301526200221281620021d0565b9050919050565b600060408201905062002230600083018562002143565b6200223f602083018462001dd1565b9392505050565b7f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160008201527f647920696e697469616c697a6564000000000000000000000000000000000000602082015250565b6000620022a4602e8362002002565b9150620022b18262002246565b604082019050919050565b60006020820190508181036000830152620022d78162002295565b9050919050565b6000819050919050565b600060ff82169050919050565b6000819050919050565b6000620023206200231a6200231484620022de565b620022f5565b620022e8565b9050919050565b6200233281620022ff565b82525050565b60006020820190506200234f600083018462002327565b92915050565b600081519050620023668162001aae565b92915050565b60006020828403121562002385576200238462001903565b5b6000620023958482850162002355565b91505092915050565b7f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160008201527f6464726573730000000000000000000000000000000000000000000000000000602082015250565b6000620023fc60268362002002565b915062002409826200239e565b604082019050919050565b600060208201905081810360008301526200242f81620023ed565b9050919050565b620024418162001d39565b81146200244d57600080fd5b50565b600081519050620024618162002436565b92915050565b60006020828403121562002480576200247f62001903565b5b6000620024908482850162002450565b91505092915050565b7f45524331393637557067726164653a206e657720696d706c656d656e7461746960008201527f6f6e206973206e6f742055555053000000000000000000000000000000000000602082015250565b6000620024f7602e8362002002565b9150620025048262002499565b604082019050919050565b600060208201905081810360008301526200252a81620024e8565b9050919050565b7f45524331393637557067726164653a20756e737570706f727465642070726f7860008201527f6961626c65555549440000000000000000000000000000000000000000000000602082015250565b60006200258f60298362002002565b91506200259c8262002531565b604082019050919050565b60006020820190508181036000830152620025c28162002580565b9050919050565b7f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572600082015250565b60006200260160208362002002565b91506200260e82620025c9565b602082019050919050565b600060208201905081810360008301526200263481620025f2565b9050919050565b600060408201905062002652600083018562001dd1565b62002661602083018462001da3565b9392505050565b7f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960008201527f6e697469616c697a696e67000000000000000000000000000000000000000000602082015250565b6000620026c6602b8362002002565b9150620026d38262002668565b604082019050919050565b60006020820190508181036000830152620026f981620026b7565b9050919050565b7f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60008201527f6f74206120636f6e747261637400000000000000000000000000000000000000602082015250565b60006200275e602d8362002002565b91506200276b8262002700565b604082019050919050565b6000602082019050818103600083015262002791816200274f565b9050919050565b600081519050919050565b600081905092915050565b60005b83811015620027ce578082015181840152602081019050620027b1565b60008484015250505050565b6000620027e78262002798565b620027f38185620027a3565b935062002805818560208601620027ae565b80840191505092915050565b60006200281f8284620027da565b915081905092915050565b7f416464726573733a2063616c6c20746f206e6f6e2d636f6e7472616374000000600082015250565b600062002862601d8362002002565b91506200286f826200282a565b602082019050919050565b60006020820190508181036000830152620028958162002853565b9050919050565b600081519050919050565b6000620028b4826200289c565b620028c0818562002002565b9350620028d2818560208601620027ae565b620028dd8162001b7b565b840191505092915050565b60006020820190508181036000830152620029048184620028a7565b90509291505056fe61016060405260006001553480156200001757600080fd5b5060405162001ac038038062001ac083398181016040528101906200003d91906200023a565b600060148360006200006462000058620000ef60201b60201c565b620000f760201b60201c565b8360a0818152505081608081815250508260c08181525050826001901b60e081815250508073ffffffffffffffffffffffffffffffffffffffff166101008173ffffffffffffffffffffffffffffffffffffffff16815250504363ffffffff166101208163ffffffff1681525050505050508061ffff166101408161ffff1681525050505062000281565b600033905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b600080fd5b6000819050919050565b620001d581620001c0565b8114620001e157600080fd5b50565b600081519050620001f581620001ca565b92915050565b600061ffff82169050919050565b6200021481620001fb565b81146200022057600080fd5b50565b600081519050620002348162000209565b92915050565b60008060408385031215620002545762000253620001bb565b5b60006200026485828601620001e4565b9250506020620002778582860162000223565b9150509250929050565b60805160a05160c05160e0516101005161012051610140516117cf620002f16000396000610661015260006107c401526000610685015260008181610a4a0152610f24015260006109f001526000818161075f0152610aa00152600081816106050152610dd201526117cf6000f3fe6080604052600436106101665760003560e01c80638be9b119116100d1578063bc4991281161008a578063d66d6c1011610064578063d66d6c1014610568578063e493ef8c14610584578063f220b9ec146105af578063f2fde38b146105da57610166565b8063bc499128146104c3578063c5b208ff14610500578063d0383d681461053d57610166565b80638be9b1191461039f5780638da5cb5b146103c85780639056a9bf146103f3578063933ebfdd1461043057806398366e351461046d578063ae74552a1461049857610166565b80633ccfd60b116101235780633ccfd60b1461028f5780634add651e146102a65780635daf08ca146102d15780636bdcc8ab1461030e578063715018a61461034b5780637671ac051461036257610166565b806309aeb04c1461016b57806322d9730c1461019657806328b070e0146101d35780632b7ac3f3146101fe5780632de999bf14610229578063378de45b14610252575b600080fd5b34801561017757600080fd5b50610180610603565b60405161018d9190610f9c565b60405180910390f35b3480156101a257600080fd5b506101bd60048036038101906101b89190610fed565b610627565b6040516101ca9190611035565b60405180910390f35b3480156101df57600080fd5b506101e861065f565b6040516101f5919061106d565b60405180910390f35b34801561020a57600080fd5b50610213610683565b6040516102209190611107565b60405180910390f35b34801561023557600080fd5b50610250600480360381019061024b9190611187565b6106a7565b005b34801561025e57600080fd5b5061027960048036038101906102749190610fed565b61075b565b6040516102869190610f9c565b60405180910390f35b34801561029b57600080fd5b506102a4610790565b005b3480156102b257600080fd5b506102bb6107c2565b6040516102c89190611227565b60405180910390f35b3480156102dd57600080fd5b506102f860048036038101906102f39190610fed565b6107e6565b6040516103059190610f9c565b60405180910390f35b34801561031a57600080fd5b5061033560048036038101906103309190610fed565b6107fe565b6040516103429190611035565b60405180910390f35b34801561035757600080fd5b5061036061081e565b005b34801561036e57600080fd5b5061038960048036038101906103849190610fed565b610832565b6040516103969190610f9c565b60405180910390f35b3480156103ab57600080fd5b506103c660048036038101906103c191906112a2565b61084a565b005b3480156103d457600080fd5b506103dd61085a565b6040516103ea9190611317565b60405180910390f35b3480156103ff57600080fd5b5061041a60048036038101906104159190610fed565b610883565b6040516104279190610f9c565b60405180910390f35b34801561043c57600080fd5b5061045760048036038101906104529190611332565b61089b565b6040516104649190611430565b60405180910390f35b34801561047957600080fd5b506104826109ee565b60405161048f9190610f9c565b60405180910390f35b3480156104a457600080fd5b506104ad610a12565b6040516104ba9190610f9c565b60405180910390f35b3480156104cf57600080fd5b506104ea60048036038101906104e59190610fed565b610a18565b6040516104f79190610f9c565b60405180910390f35b34801561050c57600080fd5b506105276004803603810190610522919061147e565b610a30565b6040516105349190610f9c565b60405180910390f35b34801561054957600080fd5b50610552610a48565b60405161055f9190610f9c565b60405180910390f35b610582600480360381019061057d9190611332565b610a6c565b005b34801561059057600080fd5b50610599610a7a565b6040516105a69190610f9c565b60405180910390f35b3480156105bb57600080fd5b506105c4610a9e565b6040516105d19190610f9c565b60405180910390f35b3480156105e657600080fd5b5061060160048036038101906105fc919061147e565b610ac2565b005b7f000000000000000000000000000000000000000000000000000000000000000081565b600080821415801561065857507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000182105b9050919050565b7f000000000000000000000000000000000000000000000000000000000000000081565b7f000000000000000000000000000000000000000000000000000000000000000081565b6106af610b45565b838383836106bf84848484610bc3565b6106f5576040517fb750624800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600088889050905060005b8181101561074f576107448a8a8381811061071e5761071d6114ab565b5b90506020020135898984818110610738576107376114ab565b5b90506020020135610bf5565b806001019050610700565b50505050505050505050565b60007f0000000000000000000000000000000000000000000000000000000000000000826107899190611509565b9050919050565b6040517fd623472500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000081565b60036020528060005260406000206000915090505481565b60066020528060005260406000206000915054906101000a900460ff1681565b610826610b45565b6108306000610cd1565b565b60056020528060005260406000206000915090505481565b610855838383610d95565b505050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60046020528060005260406000206000915090505481565b60608183106108e35782826040517f9ffcd53d0000000000000000000000000000000000000000000000000000000081526004016108da92919061154b565b60405180910390fd5b60015482111561092c5782826040517f9ffcd53d00000000000000000000000000000000000000000000000000000000815260040161092392919061154b565b60405180910390fd5b6000838361093a9190611574565b67ffffffffffffffff811115610953576109526115a8565b5b6040519080825280602002602001820160405280156109815781602001602082028036833780820191505090505b50905060008490505b838110156109e35760056000828152602001908152602001600020548286836109b39190611574565b815181106109c4576109c36114ab565b5b60200260200101818152505080806109db906115d7565b91505061098a565b508091505092915050565b7f000000000000000000000000000000000000000000000000000000000000000081565b60015481565b60026020528060005260406000206000915090505481565b60076020528060005260406000206000915090505481565b7f000000000000000000000000000000000000000000000000000000000000000081565b610a768282610bf5565b5050565b7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000181565b7f000000000000000000000000000000000000000000000000000000000000000081565b610aca610b45565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603610b39576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b30906116a2565b60405180910390fd5b610b4281610cd1565b50565b610b4d610dc7565b73ffffffffffffffffffffffffffffffffffffffff16610b6b61085a565b73ffffffffffffffffffffffffffffffffffffffff1614610bc1576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610bb89061170e565b60405180910390fd5b565b6000808585905090506000848490509050808214610be657600092505050610bed565b6001925050505b949350505050565b610bff8282610dcf565b6001546003600084815260200190815260200160002081905550816005600060015481526020019081526020016000208190555060016006600084815260200190815260200160002060006101000a81548160ff0219169083151502179055508060046000848152602001908152602001600020819055507fff42916a89d1f5125f7f47168ee59c2b3fc9246ad1b229082ee85b69d001b5d78282600154604051610cac9392919061172e565b60405180910390a16001806000828254610cc69190611765565b925050819055505050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b6040517fd623472500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600033905090565b807f0000000000000000000000000000000000000000000000000000000000000000811115610e3557806040517f13a5e2ee000000000000000000000000000000000000000000000000000000008152600401610e2c9190610f9c565b60405180910390fd5b60008103610e7a57806040517f13a5e2ee000000000000000000000000000000000000000000000000000000008152600401610e719190610f9c565b60405180910390fd5b82610e8481610627565b610ec557806040517f7f3e75af000000000000000000000000000000000000000000000000000000008152600401610ebc9190610f9c565b60405180910390fd5b600115156006600086815260200190815260200160002060009054906101000a900460ff16151503610f22576040517e0a60f700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000060015410610f7d576040517f57f6953100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b50505050565b6000819050919050565b610f9681610f83565b82525050565b6000602082019050610fb16000830184610f8d565b92915050565b600080fd5b600080fd5b610fca81610f83565b8114610fd557600080fd5b50565b600081359050610fe781610fc1565b92915050565b60006020828403121561100357611002610fb7565b5b600061101184828501610fd8565b91505092915050565b60008115159050919050565b61102f8161101a565b82525050565b600060208201905061104a6000830184611026565b92915050565b600061ffff82169050919050565b61106781611050565b82525050565b6000602082019050611082600083018461105e565b92915050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b60006110cd6110c86110c384611088565b6110a8565b611088565b9050919050565b60006110df826110b2565b9050919050565b60006110f1826110d4565b9050919050565b611101816110e6565b82525050565b600060208201905061111c60008301846110f8565b92915050565b600080fd5b600080fd5b600080fd5b60008083601f84011261114757611146611122565b5b8235905067ffffffffffffffff81111561116457611163611127565b5b6020830191508360208202830111156111805761117f61112c565b5b9250929050565b600080600080604085870312156111a1576111a0610fb7565b5b600085013567ffffffffffffffff8111156111bf576111be610fbc565b5b6111cb87828801611131565b9450945050602085013567ffffffffffffffff8111156111ee576111ed610fbc565b5b6111fa87828801611131565b925092505092959194509250565b600063ffffffff82169050919050565b61122181611208565b82525050565b600060208201905061123c6000830184611218565b92915050565b600061124d82611088565b9050919050565b61125d81611242565b811461126857600080fd5b50565b60008135905061127a81611254565b92915050565b60008190508260206008028201111561129c5761129b61112c565b5b92915050565b600080600061014084860312156112bc576112bb610fb7565b5b60006112ca86828701610fd8565b93505060206112db8682870161126b565b92505060406112ec86828701611280565b9150509250925092565b600061130182611088565b9050919050565b611311816112f6565b82525050565b600060208201905061132c6000830184611308565b92915050565b6000806040838503121561134957611348610fb7565b5b600061135785828601610fd8565b925050602061136885828601610fd8565b9150509250929050565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b6113a781610f83565b82525050565b60006113b9838361139e565b60208301905092915050565b6000602082019050919050565b60006113dd82611372565b6113e7818561137d565b93506113f28361138e565b8060005b8381101561142357815161140a88826113ad565b9750611415836113c5565b9250506001810190506113f6565b5085935050505092915050565b6000602082019050818103600083015261144a81846113d2565b905092915050565b61145b816112f6565b811461146657600080fd5b50565b60008135905061147881611452565b92915050565b60006020828403121561149457611493610fb7565b5b60006114a284828501611469565b91505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600061151482610f83565b915061151f83610f83565b925082820261152d81610f83565b91508282048414831517611544576115436114da565b5b5092915050565b60006040820190506115606000830185610f8d565b61156d6020830184610f8d565b9392505050565b600061157f82610f83565b915061158a83610f83565b92508282039050818111156115a2576115a16114da565b5b92915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60006115e282610f83565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203611614576116136114da565b5b600182019050919050565b600082825260208201905092915050565b7f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160008201527f6464726573730000000000000000000000000000000000000000000000000000602082015250565b600061168c60268361161f565b915061169782611630565b604082019050919050565b600060208201905081810360008301526116bb8161167f565b9050919050565b7f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572600082015250565b60006116f860208361161f565b9150611703826116c2565b602082019050919050565b60006020820190508181036000830152611727816116eb565b9050919050565b60006060820190506117436000830186610f8d565b6117506020830185610f8d565b61175d6040830184610f8d565b949350505050565b600061177082610f83565b915061177b83610f83565b9250828201905080821115611793576117926114da565b5b9291505056fea2646970667358221220f3d63817472861b92ed47c1c0a059033d26762a83df789a2f02dba13d9cc3df464736f6c63430008130033416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a26469706673582212206950adc7c418dd947ac3e184fb9f4d3a3206a03ca637b1c5a6fa23e8ae800c7464736f6c63430008130033" \ No newline at end of file + # This contract code is used in deployment, note: this is not the deployedBytecode, it includes constructor args. + # Ref: https://github.com/waku-org/waku-rln-contract/blob/886891b57ae54e439563023dd50161fec5ee29f1/deployments/sepolia/WakuRlnRegistry_Implementation.json + const RegistryContractCode* = + "0x60a06040523073ffffffffffffffffffffffffffffffffffffffff1660809073ffffffffffffffffffffffffffffffffffffffff168152506000606760006101000a81548161ffff021916908361ffff16021790555034801561006157600080fd5b50608051614429610099600039600081816106870152818161071801528181610922015281816109b30152610a6c01526144296000f3fe6080604052600436106200010a5760003560e01c80638da5cb5b1162000097578063f184ef4c1162000061578063f184ef4c14620002f3578063f2fde38b1462000323578063f55421471462000351578063fc6ed4641462000395576200010a565b80638da5cb5b146200024b578063cf616374146200027b578063d44fda1f14620002ab578063ef653d5e14620002c5576200010a565b806352d1902d11620000d957806352d1902d14620001b95780635a244efd14620001e9578063715018a614620002175780638129fc1c1462000231576200010a565b80632de999bf146200010f5780633659cfe6146200013d57806339c0364b146200016b5780634f1ef2861462000199575b600080fd5b3480156200011c57600080fd5b506200013b60048036038101906200013591906200197b565b620003c3565b005b3480156200014a57600080fd5b5062000169600480360381019062000163919062001a6e565b62000685565b005b3480156200017857600080fd5b5062000197600480360381019062000191919062001b1a565b6200081d565b005b620001b76004803603810190620001b1919062001cd3565b62000920565b005b348015620001c657600080fd5b50620001d162000a68565b604051620001e0919062001d54565b60405180910390f35b348015620001f657600080fd5b506200021560048036038101906200020f919062001d71565b62000b24565b005b3480156200022457600080fd5b506200022f62000bc7565b005b3480156200023e57600080fd5b506200024962000bdf565b005b3480156200025857600080fd5b506200026362000d2b565b60405162000272919062001db4565b60405180910390f35b3480156200028857600080fd5b506200029362000d55565b604051620002a2919062001de2565b60405180910390f35b348015620002b857600080fd5b50620002c362000d69565b005b348015620002d257600080fd5b50620002f16004803603810190620002eb919062001a6e565b62000e13565b005b3480156200030057600080fd5b506200030b62000ef4565b6040516200031a919062001de2565b60405180910390f35b3480156200033057600080fd5b506200034f600480360381019062000349919062001a6e565b62000f08565b005b3480156200035e57600080fd5b506200037d600480360381019062000377919062001dff565b62000f92565b6040516200038c919062001db4565b60405180910390f35b348015620003a257600080fd5b50620003c16004803603810190620003bb919062001e31565b62000fc5565b005b606560009054906101000a900461ffff1661ffff16606760009054906101000a900461ffff1661ffff161062000425576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5b6001156200067f5760666000606760009054906101000a900461ffff1661ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16632de999bf858585856040518563ffffffff1660e01b8152600401620004ba949392919062001f57565b600060405180830381600087803b158015620004d557600080fd5b505af1925050508015620004e7575060015b62000673573d80600081146200051a576040519150601f19603f3d011682016040523d82523d6000602084013e6200051f565b606091505b506040516024016040516020818303038152906040527f57f69531000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505080519060200120818051906020012014620005c057805181602001fd5b606560009054906101000a900461ffff1661ffff166001606760009054906101000a900461ffff16620005f4919062001fc5565b61ffff161062000630576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001606760008282829054906101000a900461ffff1662000652919062001fc5565b92506101000a81548161ffff021916908361ffff1602179055505062000679565b6200067f565b62000426565b50505050565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff160362000716576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016200070d9062002089565b60405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1662000757620010ce565b73ffffffffffffffffffffffffffffffffffffffff1614620007b0576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401620007a79062002121565b60405180910390fd5b620007bb8162001127565b6200081a81600067ffffffffffffffff811115620007de57620007dd62001b8c565b5b6040519080825280601f01601f191660200182016040528015620008115781602001600182028036833780820191505090505b50600062001134565b50565b606560009054906101000a900461ffff1661ffff168361ffff16106200086f576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606660008461ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663d66d6c1083836040518363ffffffff1660e01b8152600401620008e792919062002154565b600060405180830381600087803b1580156200090257600080fd5b505af115801562000917573d6000803e3d6000fd5b50505050505050565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff1603620009b1576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401620009a89062002089565b60405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16620009f2620010ce565b73ffffffffffffffffffffffffffffffffffffffff161462000a4b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000a429062002121565b60405180910390fd5b62000a568262001127565b62000a648282600162001134565b5050565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff161462000afb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000af290620021f7565b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc60001b905090565b62000b2e620012b3565b6000810362000b69576040517fe671aff300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600081606560009054906101000a900461ffff1660405162000b8b90620018eb565b62000b9892919062002219565b604051809103906000f08015801562000bb5573d6000803e3d6000fd5b50905062000bc38162001338565b5050565b62000bd1620012b3565b62000bdd60006200142c565b565b60008060019054906101000a900460ff1615905080801562000c115750600160008054906101000a900460ff1660ff16105b8062000c42575062000c2330620014f2565b15801562000c415750600160008054906101000a900460ff1660ff16145b5b62000c84576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000c7b90620022bc565b60405180910390fd5b60016000806101000a81548160ff021916908360ff160217905550801562000cc2576001600060016101000a81548160ff0219169083151502179055505b62000ccc62001515565b801562000d285760008060016101000a81548160ff0219169083151502179055507f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498600160405162000d1f919062002338565b60405180910390a15b50565b6000603360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b606760009054906101000a900461ffff1681565b62000d73620012b3565b606560009054906101000a900461ffff1661ffff16606760009054906101000a900461ffff1661ffff161062000dd5576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001606760008282829054906101000a900461ffff1662000df7919062001fc5565b92506101000a81548161ffff021916908361ffff160217905550565b62000e1d620012b3565b6000819050606560009054906101000a900461ffff1661ffff168173ffffffffffffffffffffffffffffffffffffffff166328b070e06040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000e83573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062000ea991906200236c565b61ffff161462000ee5576040517fb893b72300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b62000ef08262001338565b5050565b606560009054906101000a900461ffff1681565b62000f12620012b3565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff160362000f84576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000f7b9062002414565b60405180910390fd5b62000f8f816200142c565b50565b60666020528060005260406000206000915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b606560009054906101000a900461ffff1661ffff168561ffff161062001017576040517fd23276a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606660008661ffff1661ffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16632de999bf858585856040518563ffffffff1660e01b815260040162001093949392919062001f57565b600060405180830381600087803b158015620010ae57600080fd5b505af1158015620010c3573d6000803e3d6000fd5b505050505050505050565b6000620010fe7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc60001b62001573565b60000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b62001131620012b3565b50565b620011627f4910fdfa16fed3260ed0e7147f7cc6da11a60208b5b9406d12a635614ffd914360001b6200157d565b60000160009054906101000a900460ff16156200118a57620011848362001587565b620012ae565b8273ffffffffffffffffffffffffffffffffffffffff166352d1902d6040518163ffffffff1660e01b8152600401602060405180830381865afa925050508015620011f557506040513d601f19601f82011682018060405250810190620011f2919062002467565b60015b62001237576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016200122e906200250f565b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc60001b81146200129f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016200129690620025a7565b60405180910390fd5b50620012ad83838362001647565b5b505050565b620012bd62001679565b73ffffffffffffffffffffffffffffffffffffffff16620012dd62000d2b565b73ffffffffffffffffffffffffffffffffffffffff161462001336576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016200132d9062002619565b60405180910390fd5b565b8060666000606560009054906101000a900461ffff1661ffff1661ffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055507fcf6a3b406170499209d0fcf152a1605c7c5a5c99c855e2bb803433fc960718eb606560009054906101000a900461ffff1682604051620013e59291906200263b565b60405180910390a16001606560008282829054906101000a900461ffff166200140f919062001fc5565b92506101000a81548161ffff021916908361ffff16021790555050565b6000603360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905081603360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b6000808273ffffffffffffffffffffffffffffffffffffffff163b119050919050565b600060019054906101000a900460ff1662001567576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016200155e90620026de565b60405180910390fd5b6200157162001681565b565b6000819050919050565b6000819050919050565b6200159281620016e9565b620015d4576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401620015cb9062002776565b60405180910390fd5b80620016037f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc60001b62001573565b60000160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b62001652836200170c565b600082511180620016605750805b1562001674576200167283836200175d565b505b505050565b600033905090565b600060019054906101000a900460ff16620016d3576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401620016ca90620026de565b60405180910390fd5b620016e7620016e162001679565b6200142c565b565b6000808273ffffffffffffffffffffffffffffffffffffffff163b119050919050565b620017178162001587565b8073ffffffffffffffffffffffffffffffffffffffff167fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b60405160405180910390a250565b6060620017858383604051806060016040528060278152602001620043cd602791396200178d565b905092915050565b60606000808573ffffffffffffffffffffffffffffffffffffffff1685604051620017b9919062002811565b600060405180830381855af49150503d8060008114620017f6576040519150601f19603f3d011682016040523d82523d6000602084013e620017fb565b606091505b50915091506200180e8683838762001819565b925050509392505050565b60608315620018835760008351036200187a576200183785620016e9565b62001879576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162001870906200287a565b60405180910390fd5b5b82905062001890565b6200188f838362001898565b5b949350505050565b600082511115620018ac5781518083602001fd5b806040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401620018e29190620028e8565b60405180910390fd5b611ac0806200290d83390190565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b600080fd5b60008083601f8401126200193557620019346200190d565b5b8235905067ffffffffffffffff81111562001955576200195462001912565b5b60208301915083602082028301111562001974576200197362001917565b5b9250929050565b6000806000806040858703121562001998576200199762001903565b5b600085013567ffffffffffffffff811115620019b957620019b862001908565b5b620019c7878288016200191c565b9450945050602085013567ffffffffffffffff811115620019ed57620019ec62001908565b5b620019fb878288016200191c565b925092505092959194509250565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b600062001a368262001a09565b9050919050565b62001a488162001a29565b811462001a5457600080fd5b50565b60008135905062001a688162001a3d565b92915050565b60006020828403121562001a875762001a8662001903565b5b600062001a978482850162001a57565b91505092915050565b600061ffff82169050919050565b62001ab98162001aa0565b811462001ac557600080fd5b50565b60008135905062001ad98162001aae565b92915050565b6000819050919050565b62001af48162001adf565b811462001b0057600080fd5b50565b60008135905062001b148162001ae9565b92915050565b60008060006060848603121562001b365762001b3562001903565b5b600062001b468682870162001ac8565b935050602062001b598682870162001b03565b925050604062001b6c8682870162001b03565b9150509250925092565b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b62001bc68262001b7b565b810181811067ffffffffffffffff8211171562001be85762001be762001b8c565b5b80604052505050565b600062001bfd620018f9565b905062001c0b828262001bbb565b919050565b600067ffffffffffffffff82111562001c2e5762001c2d62001b8c565b5b62001c398262001b7b565b9050602081019050919050565b82818337600083830152505050565b600062001c6c62001c668462001c10565b62001bf1565b90508281526020810184848401111562001c8b5762001c8a62001b76565b5b62001c9884828562001c46565b509392505050565b600082601f83011262001cb85762001cb76200190d565b5b813562001cca84826020860162001c55565b91505092915050565b6000806040838503121562001ced5762001cec62001903565b5b600062001cfd8582860162001a57565b925050602083013567ffffffffffffffff81111562001d215762001d2062001908565b5b62001d2f8582860162001ca0565b9150509250929050565b6000819050919050565b62001d4e8162001d39565b82525050565b600060208201905062001d6b600083018462001d43565b92915050565b60006020828403121562001d8a5762001d8962001903565b5b600062001d9a8482850162001b03565b91505092915050565b62001dae8162001a29565b82525050565b600060208201905062001dcb600083018462001da3565b92915050565b62001ddc8162001aa0565b82525050565b600060208201905062001df9600083018462001dd1565b92915050565b60006020828403121562001e185762001e1762001903565b5b600062001e288482850162001ac8565b91505092915050565b60008060008060006060868803121562001e505762001e4f62001903565b5b600062001e608882890162001ac8565b955050602086013567ffffffffffffffff81111562001e845762001e8362001908565b5b62001e92888289016200191c565b9450945050604086013567ffffffffffffffff81111562001eb85762001eb762001908565b5b62001ec6888289016200191c565b92509250509295509295909350565b600082825260208201905092915050565b600080fd5b82818337505050565b600062001f02838562001ed5565b93507f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff83111562001f385762001f3762001ee6565b5b60208302925062001f4b83858462001eeb565b82840190509392505050565b6000604082019050818103600083015262001f7481868862001ef4565b9050818103602083015262001f8b81848662001ef4565b905095945050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600062001fd28262001aa0565b915062001fdf8362001aa0565b9250828201905061ffff81111562001ffc5762001ffb62001f96565b5b92915050565b600082825260208201905092915050565b7f46756e6374696f6e206d7573742062652063616c6c6564207468726f7567682060008201527f64656c656761746563616c6c0000000000000000000000000000000000000000602082015250565b600062002071602c8362002002565b91506200207e8262002013565b604082019050919050565b60006020820190508181036000830152620020a48162002062565b9050919050565b7f46756e6374696f6e206d7573742062652063616c6c6564207468726f7567682060008201527f6163746976652070726f78790000000000000000000000000000000000000000602082015250565b600062002109602c8362002002565b91506200211682620020ab565b604082019050919050565b600060208201905081810360008301526200213c81620020fa565b9050919050565b6200214e8162001adf565b82525050565b60006040820190506200216b600083018562002143565b6200217a602083018462002143565b9392505050565b7f555550535570677261646561626c653a206d757374206e6f742062652063616c60008201527f6c6564207468726f7567682064656c656761746563616c6c0000000000000000602082015250565b6000620021df60388362002002565b9150620021ec8262002181565b604082019050919050565b600060208201905081810360008301526200221281620021d0565b9050919050565b600060408201905062002230600083018562002143565b6200223f602083018462001dd1565b9392505050565b7f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160008201527f647920696e697469616c697a6564000000000000000000000000000000000000602082015250565b6000620022a4602e8362002002565b9150620022b18262002246565b604082019050919050565b60006020820190508181036000830152620022d78162002295565b9050919050565b6000819050919050565b600060ff82169050919050565b6000819050919050565b6000620023206200231a6200231484620022de565b620022f5565b620022e8565b9050919050565b6200233281620022ff565b82525050565b60006020820190506200234f600083018462002327565b92915050565b600081519050620023668162001aae565b92915050565b60006020828403121562002385576200238462001903565b5b6000620023958482850162002355565b91505092915050565b7f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160008201527f6464726573730000000000000000000000000000000000000000000000000000602082015250565b6000620023fc60268362002002565b915062002409826200239e565b604082019050919050565b600060208201905081810360008301526200242f81620023ed565b9050919050565b620024418162001d39565b81146200244d57600080fd5b50565b600081519050620024618162002436565b92915050565b60006020828403121562002480576200247f62001903565b5b6000620024908482850162002450565b91505092915050565b7f45524331393637557067726164653a206e657720696d706c656d656e7461746960008201527f6f6e206973206e6f742055555053000000000000000000000000000000000000602082015250565b6000620024f7602e8362002002565b9150620025048262002499565b604082019050919050565b600060208201905081810360008301526200252a81620024e8565b9050919050565b7f45524331393637557067726164653a20756e737570706f727465642070726f7860008201527f6961626c65555549440000000000000000000000000000000000000000000000602082015250565b60006200258f60298362002002565b91506200259c8262002531565b604082019050919050565b60006020820190508181036000830152620025c28162002580565b9050919050565b7f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572600082015250565b60006200260160208362002002565b91506200260e82620025c9565b602082019050919050565b600060208201905081810360008301526200263481620025f2565b9050919050565b600060408201905062002652600083018562001dd1565b62002661602083018462001da3565b9392505050565b7f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960008201527f6e697469616c697a696e67000000000000000000000000000000000000000000602082015250565b6000620026c6602b8362002002565b9150620026d38262002668565b604082019050919050565b60006020820190508181036000830152620026f981620026b7565b9050919050565b7f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60008201527f6f74206120636f6e747261637400000000000000000000000000000000000000602082015250565b60006200275e602d8362002002565b91506200276b8262002700565b604082019050919050565b6000602082019050818103600083015262002791816200274f565b9050919050565b600081519050919050565b600081905092915050565b60005b83811015620027ce578082015181840152602081019050620027b1565b60008484015250505050565b6000620027e78262002798565b620027f38185620027a3565b935062002805818560208601620027ae565b80840191505092915050565b60006200281f8284620027da565b915081905092915050565b7f416464726573733a2063616c6c20746f206e6f6e2d636f6e7472616374000000600082015250565b600062002862601d8362002002565b91506200286f826200282a565b602082019050919050565b60006020820190508181036000830152620028958162002853565b9050919050565b600081519050919050565b6000620028b4826200289c565b620028c0818562002002565b9350620028d2818560208601620027ae565b620028dd8162001b7b565b840191505092915050565b60006020820190508181036000830152620029048184620028a7565b90509291505056fe61016060405260006001553480156200001757600080fd5b5060405162001ac038038062001ac083398181016040528101906200003d91906200023a565b600060148360006200006462000058620000ef60201b60201c565b620000f760201b60201c565b8360a0818152505081608081815250508260c08181525050826001901b60e081815250508073ffffffffffffffffffffffffffffffffffffffff166101008173ffffffffffffffffffffffffffffffffffffffff16815250504363ffffffff166101208163ffffffff1681525050505050508061ffff166101408161ffff1681525050505062000281565b600033905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b600080fd5b6000819050919050565b620001d581620001c0565b8114620001e157600080fd5b50565b600081519050620001f581620001ca565b92915050565b600061ffff82169050919050565b6200021481620001fb565b81146200022057600080fd5b50565b600081519050620002348162000209565b92915050565b60008060408385031215620002545762000253620001bb565b5b60006200026485828601620001e4565b9250506020620002778582860162000223565b9150509250929050565b60805160a05160c05160e0516101005161012051610140516117cf620002f16000396000610661015260006107c401526000610685015260008181610a4a0152610f24015260006109f001526000818161075f0152610aa00152600081816106050152610dd201526117cf6000f3fe6080604052600436106101665760003560e01c80638be9b119116100d1578063bc4991281161008a578063d66d6c1011610064578063d66d6c1014610568578063e493ef8c14610584578063f220b9ec146105af578063f2fde38b146105da57610166565b8063bc499128146104c3578063c5b208ff14610500578063d0383d681461053d57610166565b80638be9b1191461039f5780638da5cb5b146103c85780639056a9bf146103f3578063933ebfdd1461043057806398366e351461046d578063ae74552a1461049857610166565b80633ccfd60b116101235780633ccfd60b1461028f5780634add651e146102a65780635daf08ca146102d15780636bdcc8ab1461030e578063715018a61461034b5780637671ac051461036257610166565b806309aeb04c1461016b57806322d9730c1461019657806328b070e0146101d35780632b7ac3f3146101fe5780632de999bf14610229578063378de45b14610252575b600080fd5b34801561017757600080fd5b50610180610603565b60405161018d9190610f9c565b60405180910390f35b3480156101a257600080fd5b506101bd60048036038101906101b89190610fed565b610627565b6040516101ca9190611035565b60405180910390f35b3480156101df57600080fd5b506101e861065f565b6040516101f5919061106d565b60405180910390f35b34801561020a57600080fd5b50610213610683565b6040516102209190611107565b60405180910390f35b34801561023557600080fd5b50610250600480360381019061024b9190611187565b6106a7565b005b34801561025e57600080fd5b5061027960048036038101906102749190610fed565b61075b565b6040516102869190610f9c565b60405180910390f35b34801561029b57600080fd5b506102a4610790565b005b3480156102b257600080fd5b506102bb6107c2565b6040516102c89190611227565b60405180910390f35b3480156102dd57600080fd5b506102f860048036038101906102f39190610fed565b6107e6565b6040516103059190610f9c565b60405180910390f35b34801561031a57600080fd5b5061033560048036038101906103309190610fed565b6107fe565b6040516103429190611035565b60405180910390f35b34801561035757600080fd5b5061036061081e565b005b34801561036e57600080fd5b5061038960048036038101906103849190610fed565b610832565b6040516103969190610f9c565b60405180910390f35b3480156103ab57600080fd5b506103c660048036038101906103c191906112a2565b61084a565b005b3480156103d457600080fd5b506103dd61085a565b6040516103ea9190611317565b60405180910390f35b3480156103ff57600080fd5b5061041a60048036038101906104159190610fed565b610883565b6040516104279190610f9c565b60405180910390f35b34801561043c57600080fd5b5061045760048036038101906104529190611332565b61089b565b6040516104649190611430565b60405180910390f35b34801561047957600080fd5b506104826109ee565b60405161048f9190610f9c565b60405180910390f35b3480156104a457600080fd5b506104ad610a12565b6040516104ba9190610f9c565b60405180910390f35b3480156104cf57600080fd5b506104ea60048036038101906104e59190610fed565b610a18565b6040516104f79190610f9c565b60405180910390f35b34801561050c57600080fd5b506105276004803603810190610522919061147e565b610a30565b6040516105349190610f9c565b60405180910390f35b34801561054957600080fd5b50610552610a48565b60405161055f9190610f9c565b60405180910390f35b610582600480360381019061057d9190611332565b610a6c565b005b34801561059057600080fd5b50610599610a7a565b6040516105a69190610f9c565b60405180910390f35b3480156105bb57600080fd5b506105c4610a9e565b6040516105d19190610f9c565b60405180910390f35b3480156105e657600080fd5b5061060160048036038101906105fc919061147e565b610ac2565b005b7f000000000000000000000000000000000000000000000000000000000000000081565b600080821415801561065857507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000182105b9050919050565b7f000000000000000000000000000000000000000000000000000000000000000081565b7f000000000000000000000000000000000000000000000000000000000000000081565b6106af610b45565b838383836106bf84848484610bc3565b6106f5576040517fb750624800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600088889050905060005b8181101561074f576107448a8a8381811061071e5761071d6114ab565b5b90506020020135898984818110610738576107376114ab565b5b90506020020135610bf5565b806001019050610700565b50505050505050505050565b60007f0000000000000000000000000000000000000000000000000000000000000000826107899190611509565b9050919050565b6040517fd623472500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000081565b60036020528060005260406000206000915090505481565b60066020528060005260406000206000915054906101000a900460ff1681565b610826610b45565b6108306000610cd1565b565b60056020528060005260406000206000915090505481565b610855838383610d95565b505050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60046020528060005260406000206000915090505481565b60608183106108e35782826040517f9ffcd53d0000000000000000000000000000000000000000000000000000000081526004016108da92919061154b565b60405180910390fd5b60015482111561092c5782826040517f9ffcd53d00000000000000000000000000000000000000000000000000000000815260040161092392919061154b565b60405180910390fd5b6000838361093a9190611574565b67ffffffffffffffff811115610953576109526115a8565b5b6040519080825280602002602001820160405280156109815781602001602082028036833780820191505090505b50905060008490505b838110156109e35760056000828152602001908152602001600020548286836109b39190611574565b815181106109c4576109c36114ab565b5b60200260200101818152505080806109db906115d7565b91505061098a565b508091505092915050565b7f000000000000000000000000000000000000000000000000000000000000000081565b60015481565b60026020528060005260406000206000915090505481565b60076020528060005260406000206000915090505481565b7f000000000000000000000000000000000000000000000000000000000000000081565b610a768282610bf5565b5050565b7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000181565b7f000000000000000000000000000000000000000000000000000000000000000081565b610aca610b45565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603610b39576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b30906116a2565b60405180910390fd5b610b4281610cd1565b50565b610b4d610dc7565b73ffffffffffffffffffffffffffffffffffffffff16610b6b61085a565b73ffffffffffffffffffffffffffffffffffffffff1614610bc1576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610bb89061170e565b60405180910390fd5b565b6000808585905090506000848490509050808214610be657600092505050610bed565b6001925050505b949350505050565b610bff8282610dcf565b6001546003600084815260200190815260200160002081905550816005600060015481526020019081526020016000208190555060016006600084815260200190815260200160002060006101000a81548160ff0219169083151502179055508060046000848152602001908152602001600020819055507fff42916a89d1f5125f7f47168ee59c2b3fc9246ad1b229082ee85b69d001b5d78282600154604051610cac9392919061172e565b60405180910390a16001806000828254610cc69190611765565b925050819055505050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050816000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b6040517fd623472500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600033905090565b807f0000000000000000000000000000000000000000000000000000000000000000811115610e3557806040517f13a5e2ee000000000000000000000000000000000000000000000000000000008152600401610e2c9190610f9c565b60405180910390fd5b60008103610e7a57806040517f13a5e2ee000000000000000000000000000000000000000000000000000000008152600401610e719190610f9c565b60405180910390fd5b82610e8481610627565b610ec557806040517f7f3e75af000000000000000000000000000000000000000000000000000000008152600401610ebc9190610f9c565b60405180910390fd5b600115156006600086815260200190815260200160002060009054906101000a900460ff16151503610f22576040517e0a60f700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000060015410610f7d576040517f57f6953100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b50505050565b6000819050919050565b610f9681610f83565b82525050565b6000602082019050610fb16000830184610f8d565b92915050565b600080fd5b600080fd5b610fca81610f83565b8114610fd557600080fd5b50565b600081359050610fe781610fc1565b92915050565b60006020828403121561100357611002610fb7565b5b600061101184828501610fd8565b91505092915050565b60008115159050919050565b61102f8161101a565b82525050565b600060208201905061104a6000830184611026565b92915050565b600061ffff82169050919050565b61106781611050565b82525050565b6000602082019050611082600083018461105e565b92915050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b60006110cd6110c86110c384611088565b6110a8565b611088565b9050919050565b60006110df826110b2565b9050919050565b60006110f1826110d4565b9050919050565b611101816110e6565b82525050565b600060208201905061111c60008301846110f8565b92915050565b600080fd5b600080fd5b600080fd5b60008083601f84011261114757611146611122565b5b8235905067ffffffffffffffff81111561116457611163611127565b5b6020830191508360208202830111156111805761117f61112c565b5b9250929050565b600080600080604085870312156111a1576111a0610fb7565b5b600085013567ffffffffffffffff8111156111bf576111be610fbc565b5b6111cb87828801611131565b9450945050602085013567ffffffffffffffff8111156111ee576111ed610fbc565b5b6111fa87828801611131565b925092505092959194509250565b600063ffffffff82169050919050565b61122181611208565b82525050565b600060208201905061123c6000830184611218565b92915050565b600061124d82611088565b9050919050565b61125d81611242565b811461126857600080fd5b50565b60008135905061127a81611254565b92915050565b60008190508260206008028201111561129c5761129b61112c565b5b92915050565b600080600061014084860312156112bc576112bb610fb7565b5b60006112ca86828701610fd8565b93505060206112db8682870161126b565b92505060406112ec86828701611280565b9150509250925092565b600061130182611088565b9050919050565b611311816112f6565b82525050565b600060208201905061132c6000830184611308565b92915050565b6000806040838503121561134957611348610fb7565b5b600061135785828601610fd8565b925050602061136885828601610fd8565b9150509250929050565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b6113a781610f83565b82525050565b60006113b9838361139e565b60208301905092915050565b6000602082019050919050565b60006113dd82611372565b6113e7818561137d565b93506113f28361138e565b8060005b8381101561142357815161140a88826113ad565b9750611415836113c5565b9250506001810190506113f6565b5085935050505092915050565b6000602082019050818103600083015261144a81846113d2565b905092915050565b61145b816112f6565b811461146657600080fd5b50565b60008135905061147881611452565b92915050565b60006020828403121561149457611493610fb7565b5b60006114a284828501611469565b91505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600061151482610f83565b915061151f83610f83565b925082820261152d81610f83565b91508282048414831517611544576115436114da565b5b5092915050565b60006040820190506115606000830185610f8d565b61156d6020830184610f8d565b9392505050565b600061157f82610f83565b915061158a83610f83565b92508282039050818111156115a2576115a16114da565b5b92915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60006115e282610f83565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203611614576116136114da565b5b600182019050919050565b600082825260208201905092915050565b7f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160008201527f6464726573730000000000000000000000000000000000000000000000000000602082015250565b600061168c60268361161f565b915061169782611630565b604082019050919050565b600060208201905081810360008301526116bb8161167f565b9050919050565b7f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572600082015250565b60006116f860208361161f565b9150611703826116c2565b602082019050919050565b60006020820190508181036000830152611727816116eb565b9050919050565b60006060820190506117436000830186610f8d565b6117506020830185610f8d565b61175d6040830184610f8d565b949350505050565b600061177082610f83565b915061177b83610f83565b9250828201905080821115611793576117926114da565b5b9291505056fea2646970667358221220f3d63817472861b92ed47c1c0a059033d26762a83df789a2f02dba13d9cc3df464736f6c63430008130033416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a26469706673582212206950adc7c418dd947ac3e184fb9f4d3a3206a03ca637b1c5a6fa23e8ae800c7464736f6c63430008130033" diff --git a/waku/waku_rln_relay/conversion_utils.nim b/waku/waku_rln_relay/conversion_utils.nim index f1bd7b6cd..59070c6a5 100644 --- a/waku/waku_rln_relay/conversion_utils.nim +++ b/waku/waku_rln_relay/conversion_utils.nim @@ -9,30 +9,19 @@ import chronicles, stew/[arrayops, results, endians2], stint -import - ./constants, - ./protocol_types -import - ../waku_keystore +import ./constants, ./protocol_types +import ../waku_keystore -export - web3, - chronicles, - stint, - constants, - endians2 +export web3, chronicles, stint, constants, endians2 logScope: - topics = "waku rln_relay conversion_utils" + topics = "waku rln_relay conversion_utils" -proc inHex*(value: IdentityTrapdoor or - IdentityNullifier or - IdentitySecretHash or - IDCommitment or - MerkleNode or - Nullifier or - Epoch or - RlnIdentifier): string = +proc inHex*( + value: + IdentityTrapdoor or IdentityNullifier or IdentitySecretHash or IDCommitment or + MerkleNode or Nullifier or Epoch or RlnIdentifier +): string = var valueHex = "" #UInt256.fromBytesLE(value) for b in value.reversed(): valueHex = valueHex & b.toHex() @@ -60,21 +49,22 @@ proc encodeLengthPrefix*(input: openArray[byte]): seq[byte] = proc serialize*(v: uint64): array[32, byte] = ## a private proc to convert uint64 to a byte seq ## this conversion is used in the proofGen proc - + ## converts `v` to a byte seq in little-endian order let bytes = toBytes(v, Endianness.littleEndian) var output: array[32, byte] discard output.copyFrom(bytes) return output - when defined(rln_v2): - proc serialize*(idSecretHash: IdentitySecretHash, - memIndex: MembershipIndex, - userMessageLimit: UserMessageLimit, - messageId: MessageId, - externalNullifier: ExternalNullifier, - msg: openArray[byte]): seq[byte] = + proc serialize*( + idSecretHash: IdentitySecretHash, + memIndex: MembershipIndex, + userMessageLimit: UserMessageLimit, + messageId: MessageId, + externalNullifier: ExternalNullifier, + msg: openArray[byte], + ): seq[byte] = ## a private proc to convert RateLimitProof and the data to a byte seq ## this conversion is used in the proofGen proc ## the serialization is done as instructed in https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L146 @@ -83,13 +73,23 @@ when defined(rln_v2): let userMessageLimitBytes = userMessageLimit.serialize() let messageIdBytes = messageId.serialize() let lenPrefMsg = encodeLengthPrefix(msg) - let output = concat(@idSecretHash, @memIndexBytes, @userMessageLimitBytes, @messageIdBytes, @externalNullifier, lenPrefMsg) + let output = concat( + @idSecretHash, + @memIndexBytes, + @userMessageLimitBytes, + @messageIdBytes, + @externalNullifier, + lenPrefMsg, + ) return output + else: - proc serialize*(idSecretHash: IdentitySecretHash, - memIndex: MembershipIndex, - epoch: Epoch, - msg: openArray[byte]): seq[byte] = + proc serialize*( + idSecretHash: IdentitySecretHash, + memIndex: MembershipIndex, + epoch: Epoch, + msg: openArray[byte], + ): seq[byte] = ## a private proc to convert RateLimitProof and the data to a byte seq ## this conversion is used in the proofGen proc ## the serialization is done as instructed in https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L146 @@ -99,29 +99,32 @@ else: let output = concat(@idSecretHash, @memIndexBytes, @epoch, lenPrefMsg) return output - proc serialize*(proof: RateLimitProof, data: openArray[byte]): seq[byte] = ## a private proc to convert RateLimitProof and data to a byte seq ## this conversion is used in the proof verification proc ## [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal ] let lenPrefMsg = encodeLengthPrefix(@data) when defined(rln_v2): - var proofBytes = concat(@(proof.proof), - @(proof.merkleRoot), - @(proof.externalNullifier), - @(proof.shareX), - @(proof.shareY), - @(proof.nullifier), - lenPrefMsg) + var proofBytes = concat( + @(proof.proof), + @(proof.merkleRoot), + @(proof.externalNullifier), + @(proof.shareX), + @(proof.shareY), + @(proof.nullifier), + lenPrefMsg, + ) else: - var proofBytes = concat(@(proof.proof), - @(proof.merkleRoot), - @(proof.epoch), - @(proof.shareX), - @(proof.shareY), - @(proof.nullifier), - @(proof.rlnIdentifier), - lenPrefMsg) + var proofBytes = concat( + @(proof.proof), + @(proof.merkleRoot), + @(proof.epoch), + @(proof.shareX), + @(proof.shareY), + @(proof.nullifier), + @(proof.rlnIdentifier), + lenPrefMsg, + ) return proofBytes diff --git a/waku/waku_rln_relay/group_manager.nim b/waku/waku_rln_relay/group_manager.nim index 1e1a17b48..db825db5f 100644 --- a/waku/waku_rln_relay/group_manager.nim +++ b/waku/waku_rln_relay/group_manager.nim @@ -1,6 +1,3 @@ -import - group_manager/[static, on_chain] +import group_manager/[static, on_chain] -export - static, - on_chain \ No newline at end of file +export static, on_chain diff --git a/waku/waku_rln_relay/group_manager/group_manager_base.nim b/waku/waku_rln_relay/group_manager/group_manager_base.nim index a8f7ca55c..99dff9635 100644 --- a/waku/waku_rln_relay/group_manager/group_manager_base.nim +++ b/waku/waku_rln_relay/group_manager/group_manager_base.nim @@ -4,19 +4,9 @@ import ../protocol_metrics, ../constants, ../rln -import - options, - chronos, - stew/results, - std/[deques, sequtils] +import options, chronos, stew/results, std/[deques, sequtils] -export - options, - chronos, - results, - protocol_types, - protocol_metrics, - deques +export options, chronos, results, protocol_types, protocol_metrics, deques # This module contains the GroupManager interface # The GroupManager is responsible for managing the group state @@ -30,104 +20,154 @@ type Membership* = object else: idCommitment*: IDCommitment -type OnRegisterCallback* = proc (registrations: seq[Membership]): Future[void] {.gcsafe.} -type OnWithdrawCallback* = proc (withdrawals: seq[Membership]): Future[void] {.gcsafe.} +type OnRegisterCallback* = proc(registrations: seq[Membership]): Future[void] {.gcsafe.} +type OnWithdrawCallback* = proc(withdrawals: seq[Membership]): Future[void] {.gcsafe.} type GroupManagerResult*[T] = Result[T, string] -type - GroupManager* = ref object of RootObj - idCredentials*: Option[IdentityCredential] - membershipIndex*: Option[MembershipIndex] - registerCb*: Option[OnRegisterCallback] - withdrawCb*: Option[OnWithdrawCallback] - rlnInstance*: ptr RLN - initialized*: bool - latestIndex*: MembershipIndex - validRoots*: Deque[MerkleNode] - onFatalErrorAction*: OnFatalErrorHandler - when defined(rln_v2): - userMessageLimit*: Option[UserMessageLimit] +type GroupManager* = ref object of RootObj + idCredentials*: Option[IdentityCredential] + membershipIndex*: Option[MembershipIndex] + registerCb*: Option[OnRegisterCallback] + withdrawCb*: Option[OnWithdrawCallback] + rlnInstance*: ptr RLN + initialized*: bool + latestIndex*: MembershipIndex + validRoots*: Deque[MerkleNode] + onFatalErrorAction*: OnFatalErrorHandler + when defined(rln_v2): + userMessageLimit*: Option[UserMessageLimit] # This proc is used to initialize the group manager # Any initialization logic should be implemented here -method init*(g: GroupManager): Future[void] {.base,async.} = - raise newException(CatchableError, "init proc for " & $g.type & " is not implemented yet") +method init*(g: GroupManager): Future[void] {.base, async.} = + raise + newException(CatchableError, "init proc for " & $g.type & " is not implemented yet") # This proc is used to start the group sync process # It should be used to sync the group state with the rest of the group members -method startGroupSync*(g: GroupManager): Future[void] {.base, async: (raises: [Exception]).} = - raise newException(CatchableError, "startGroupSync proc for " & $g.type & " is not implemented yet") +method startGroupSync*( + g: GroupManager +): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "startGroupSync proc for " & $g.type & " is not implemented yet" + ) # This proc is used to register a new identity commitment into the merkle tree # The user may or may not have the identity secret to this commitment # It should be used when detecting new members in the group, and syncing the group state when defined(rln_v2): - method register*(g: GroupManager, - rateCommitment: RateCommitment): Future[void] {.base,async: (raises: [Exception]).} = - raise newException(CatchableError, "register proc for " & $g.type & " is not implemented yet") + method register*( + g: GroupManager, rateCommitment: RateCommitment + ): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "register proc for " & $g.type & " is not implemented yet" + ) + else: - method register*(g: GroupManager, idCommitment: IDCommitment): Future[void] {.base,async: (raises: [Exception]).} = - raise newException(CatchableError, "register proc for " & $g.type & " is not implemented yet") + method register*( + g: GroupManager, idCommitment: IDCommitment + ): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "register proc for " & $g.type & " is not implemented yet" + ) # This proc is used to register a new identity commitment into the merkle tree # The user should have the identity secret to this commitment # It should be used when the user wants to join the group when defined(rln_v2): - method register*(g: GroupManager, - credentials: IdentityCredential, - userMessageLimit: UserMessageLimit): Future[void] {.base,async: (raises: [Exception]).} = - raise newException(CatchableError, "register proc for " & $g.type & " is not implemented yet") + method register*( + g: GroupManager, + credentials: IdentityCredential, + userMessageLimit: UserMessageLimit, + ): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "register proc for " & $g.type & " is not implemented yet" + ) + else: - method register*(g: GroupManager, credentials: IdentityCredential): Future[void] {.base,async: (raises: [Exception]).} = - raise newException(CatchableError, "register proc for " & $g.type & " is not implemented yet") + method register*( + g: GroupManager, credentials: IdentityCredential + ): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "register proc for " & $g.type & " is not implemented yet" + ) # This proc is used to register a batch of new identity commitments into the merkle tree # The user may or may not have the identity secret to these commitments # It should be used when detecting a batch of new members in the group, and syncing the group state when defined(rln_v2): - method registerBatch*(g: GroupManager, rateCommitments: seq[RateCommitment]): Future[void] {.base,async: (raises: [Exception]).} = - raise newException(CatchableError, "registerBatch proc for " & $g.type & " is not implemented yet") + method registerBatch*( + g: GroupManager, rateCommitments: seq[RateCommitment] + ): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "registerBatch proc for " & $g.type & " is not implemented yet" + ) + else: - method registerBatch*(g: GroupManager, idCommitments: seq[IDCommitment]): Future[void] {.base,async: (raises: [Exception]).} = - raise newException(CatchableError, "registerBatch proc for " & $g.type & " is not implemented yet") + method registerBatch*( + g: GroupManager, idCommitments: seq[IDCommitment] + ): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "registerBatch proc for " & $g.type & " is not implemented yet" + ) # This proc is used to set a callback that will be called when a new identity commitment is registered # The callback may be called multiple times, and should be used to for any post processing -method onRegister*(g: GroupManager, cb: OnRegisterCallback) {.base,gcsafe.} = +method onRegister*(g: GroupManager, cb: OnRegisterCallback) {.base, gcsafe.} = g.registerCb = some(cb) # This proc is used to withdraw/remove an identity commitment from the merkle tree # The user should have the identity secret hash to this commitment, by either deriving it, or owning it -method withdraw*(g: GroupManager, identitySecretHash: IdentitySecretHash): Future[void] {.base,async: (raises: [Exception]).} = - raise newException(CatchableError, "withdraw proc for " & $g.type & " is not implemented yet") +method withdraw*( + g: GroupManager, identitySecretHash: IdentitySecretHash +): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "withdraw proc for " & $g.type & " is not implemented yet" + ) # This proc is used to withdraw/remove a batch of identity commitments from the merkle tree # The user should have the identity secret hash to these commitments, by either deriving them, or owning them -method withdrawBatch*(g: GroupManager, identitySecretHashes: seq[IdentitySecretHash]): Future[void] {.base,async: (raises: [Exception]).} = - raise newException(CatchableError, "withdrawBatch proc for " & $g.type & " is not implemented yet") +method withdrawBatch*( + g: GroupManager, identitySecretHashes: seq[IdentitySecretHash] +): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "withdrawBatch proc for " & $g.type & " is not implemented yet" + ) # This proc is used to insert and remove a set of commitments from the merkle tree when defined(rln_v2): - method atomicBatch*(g: GroupManager, - rateCommitments: seq[RateCommitment], - toRemoveIndices: seq[MembershipIndex]): Future[void] {.base,async: (raises: [Exception]).} = - raise newException(CatchableError, "atomicBatch proc for " & $g.type & " is not implemented yet") -else: - method atomicBatch*(g: GroupManager, - idCommitments: seq[IDCommitment], - toRemoveIndices: seq[MembershipIndex]): Future[void] {.base,async: (raises: [Exception]).} = - raise newException(CatchableError, "atomicBatch proc for " & $g.type & " is not implemented yet") + method atomicBatch*( + g: GroupManager, + rateCommitments: seq[RateCommitment], + toRemoveIndices: seq[MembershipIndex], + ): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "atomicBatch proc for " & $g.type & " is not implemented yet" + ) -method stop*(g: GroupManager): Future[void] {.base,async.} = - raise newException(CatchableError, "stop proc for " & $g.type & " is not implemented yet") +else: + method atomicBatch*( + g: GroupManager, + idCommitments: seq[IDCommitment], + toRemoveIndices: seq[MembershipIndex], + ): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "atomicBatch proc for " & $g.type & " is not implemented yet" + ) + +method stop*(g: GroupManager): Future[void] {.base, async.} = + raise + newException(CatchableError, "stop proc for " & $g.type & " is not implemented yet") # This proc is used to set a callback that will be called when an identity commitment is withdrawn # The callback may be called multiple times, and should be used to for any post processing -method onWithdraw*(g: GroupManager, cb: OnWithdrawCallback) {.base,gcsafe.} = +method onWithdraw*(g: GroupManager, cb: OnWithdrawCallback) {.base, gcsafe.} = g.withdrawCb = some(cb) -proc slideRootQueue*(rootQueue: var Deque[MerkleNode], root: MerkleNode): seq[MerkleNode] = +proc slideRootQueue*( + rootQueue: var Deque[MerkleNode], root: MerkleNode +): seq[MerkleNode] = ## updates the root queue with the latest root and pops the oldest one when the capacity of `AcceptableRootWindowSize` is reached let overflowCount = rootQueue.len - AcceptableRootWindowSize + 1 var overflowedRoots = newSeq[MerkleNode]() @@ -140,12 +180,16 @@ proc slideRootQueue*(rootQueue: var Deque[MerkleNode], root: MerkleNode): seq[Me rootQueue.addLast(root) return overFlowedRoots -method indexOfRoot*(g: GroupManager, root: MerkleNode): int {.base,gcsafe,raises:[].} = +method indexOfRoot*( + g: GroupManager, root: MerkleNode +): int {.base, gcsafe, raises: [].} = ## returns the index of the root in the merkle tree. ## returns -1 if the root is not found return g.validRoots.find(root) -method validateRoot*(g: GroupManager, root: MerkleNode): bool {.base,gcsafe,raises:[].} = +method validateRoot*( + g: GroupManager, root: MerkleNode +): bool {.base, gcsafe, raises: [].} = ## validates the root against the valid roots queue # Check if the root is in the valid roots queue if g.indexOfRoot(root) >= 0: @@ -165,21 +209,24 @@ template slideRootQueue*(g: GroupManager): untyped = discard rootBuffer.slideRootQueue(root) rootBuffer -method verifyProof*(g: GroupManager, - input: openArray[byte], - proof: RateLimitProof): GroupManagerResult[bool] {.base,gcsafe,raises:[].} = +method verifyProof*( + g: GroupManager, input: openArray[byte], proof: RateLimitProof +): GroupManagerResult[bool] {.base, gcsafe, raises: [].} = ## verifies the proof against the input and the current merkle root - let proofVerifyRes = g.rlnInstance.proofVerify(input, proof, g.validRoots.items().toSeq()) + let proofVerifyRes = + g.rlnInstance.proofVerify(input, proof, g.validRoots.items().toSeq()) if proofVerifyRes.isErr(): return err("proof verification failed: " & $proofVerifyRes.error()) return ok(proofVerifyRes.value()) when defined(rln_v2): - method generateProof*(g: GroupManager, - data: openArray[byte], - epoch: Epoch, - messageId: MessageId, - rlnIdentifier = DefaultRlnIdentifier): GroupManagerResult[RateLimitProof] {.base,gcsafe,raises:[].} = + method generateProof*( + g: GroupManager, + data: openArray[byte], + epoch: Epoch, + messageId: MessageId, + rlnIdentifier = DefaultRlnIdentifier, + ): GroupManagerResult[RateLimitProof] {.base, gcsafe, raises: [].} = ## generates a proof for the given data and epoch ## the proof is generated using the current merkle root if g.idCredentials.isNone(): @@ -189,19 +236,22 @@ when defined(rln_v2): if g.userMessageLimit.isNone(): return err("user message limit is not set") waku_rln_proof_generation_duration_seconds.nanosecondTime: - let proof = proofGen(rlnInstance = g.rlnInstance, - data = data, - membership = g.idCredentials.get(), - index = g.membershipIndex.get(), - epoch = epoch, - userMessageLimit = g.userMessageLimit.get(), - messageId = messageId).valueOr: + let proof = proofGen( + rlnInstance = g.rlnInstance, + data = data, + membership = g.idCredentials.get(), + index = g.membershipIndex.get(), + epoch = epoch, + userMessageLimit = g.userMessageLimit.get(), + messageId = messageId, + ).valueOr: return err("proof generation failed: " & $error) return ok(proof) + else: - method generateProof*(g: GroupManager, - data: openArray[byte], - epoch: Epoch): GroupManagerResult[RateLimitProof] {.base,gcsafe,raises:[].} = + method generateProof*( + g: GroupManager, data: openArray[byte], epoch: Epoch + ): GroupManagerResult[RateLimitProof] {.base, gcsafe, raises: [].} = ## generates a proof for the given data and epoch ## the proof is generated using the current merkle root if g.idCredentials.isNone(): @@ -209,13 +259,17 @@ else: if g.membershipIndex.isNone(): return err("membership index is not set") waku_rln_proof_generation_duration_seconds.nanosecondTime: - let proof = proofGen(rlnInstance = g.rlnInstance, - data = data, - memKeys = g.idCredentials.get(), - memIndex = g.membershipIndex.get(), - epoch = epoch).valueOr: + let proof = proofGen( + rlnInstance = g.rlnInstance, + data = data, + memKeys = g.idCredentials.get(), + memIndex = g.membershipIndex.get(), + epoch = epoch, + ).valueOr: return err("proof generation failed: " & $error) return ok(proof) -method isReady*(g: GroupManager): Future[bool] {.base,async.} = - raise newException(CatchableError, "isReady proc for " & $g.type & " is not implemented yet") +method isReady*(g: GroupManager): Future[bool] {.base, async.} = + raise newException( + CatchableError, "isReady proc for " & $g.type & " is not implemented yet" + ) diff --git a/waku/waku_rln_relay/group_manager/on_chain.nim b/waku/waku_rln_relay/group_manager/on_chain.nim index e6a900f97..68651e764 100644 --- a/waku/waku_rln_relay/group_manager/on_chain.nim +++ b/waku/waku_rln_relay/group_manager/on_chain.nim @@ -1,3 +1,3 @@ import on_chain/group_manager -export group_manager \ No newline at end of file +export group_manager diff --git a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim index 07325d1d0..6237b287b 100644 --- a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim +++ b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim @@ -38,7 +38,10 @@ when defined(rln_v2): # this map contains the address of a given storage slot proc storages(index: Uint16): Address {.pure.} # this serves as an entrypoint into the rln storage contract - proc register(storageIndex: Uint16, idCommitment: Uint256, userMessageLimit: Uint256) + proc register( + storageIndex: Uint16, idCommitment: Uint256, userMessageLimit: Uint256 + ) + # this creates a new storage on the rln registry proc newStorage(maxMessageLimit: Uint256) # Initializes the implementation contract (only used in unit tests) @@ -47,7 +50,10 @@ when defined(rln_v2): # membership contract interface contract(RlnStorage): # this event is raised when a new member is registered - proc MemberRegistered(idCommitment: Uint256, userMessageLimit: Uint256, index: Uint256) {.event.} + proc MemberRegistered( + idCommitment: Uint256, userMessageLimit: Uint256, index: Uint256 + ) {.event.} + # this constant contains the membership deposit of the contract proc MEMBERSHIP_DEPOSIT(): Uint256 {.pure.} # this map denotes existence of a given user @@ -80,7 +86,6 @@ else: # this constant describes the block number this contract was deployed on proc deployedBlockNumber(): Uint256 {.view.} - type RegistryContractWithSender = Sender[WakuRlnRegistry] RlnContractWithSender = Sender[RlnStorage] @@ -116,41 +121,46 @@ template initializedGuard(g: OnchainGroupManager): untyped = if not g.initialized: raise newException(CatchableError, "OnchainGroupManager is not initialized") -template retryWrapper(g: OnchainGroupManager, res: auto, errStr: string, body: untyped): auto = +template retryWrapper( + g: OnchainGroupManager, res: auto, errStr: string, body: untyped +): auto = retryWrapper(res, RetryStrategy.new(), errStr, g.onFatalErrorAction): body - proc setMetadata*(g: OnchainGroupManager): RlnRelayResult[void] = try: - let metadataSetRes = g.rlnInstance.setMetadata(RlnMetadata( - lastProcessedBlock: g.latestProcessedBlock, - chainId: uint64(g.chainId.get()), - contractAddress: g.ethContractAddress, - validRoots: g.validRoots.toSeq())) + let metadataSetRes = g.rlnInstance.setMetadata( + RlnMetadata( + lastProcessedBlock: g.latestProcessedBlock, + chainId: uint64(g.chainId.get()), + contractAddress: g.ethContractAddress, + validRoots: g.validRoots.toSeq(), + ) + ) if metadataSetRes.isErr(): return err("failed to persist rln metadata: " & metadataSetRes.error) except CatchableError: return err("failed to persist rln metadata: " & getCurrentExceptionMsg()) return ok() - when defined(rln_v2): - method atomicBatch*(g: OnchainGroupManager, - start: MembershipIndex, - rateCommitments = newSeq[RateCommitment](), - toRemoveIndices = newSeq[MembershipIndex]()): - Future[void] {.async: (raises: [Exception]).} = + method atomicBatch*( + g: OnchainGroupManager, + start: MembershipIndex, + rateCommitments = newSeq[RateCommitment](), + toRemoveIndices = newSeq[MembershipIndex](), + ): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) # convert the rateCommitment struct to a leaf value let leaves = rateCommitments.toLeaves().valueOr: - raise newException(ValueError, "failed to convert rateCommitments to leaves: " & $error) + raise newException( + ValueError, "failed to convert rateCommitments to leaves: " & $error + ) waku_rln_membership_insertion_duration_seconds.nanosecondTime: - let operationSuccess = g.rlnInstance.atomicWrite(some(start), - leaves, - toRemoveIndices) + let operationSuccess = + g.rlnInstance.atomicWrite(some(start), leaves, toRemoveIndices) if not operationSuccess: raise newException(CatchableError, "atomic batch operation failed") # TODO: when slashing is enabled, we need to track slashed members @@ -168,17 +178,20 @@ when defined(rln_v2): g.validRootBuffer = g.slideRootQueue() let setMetadataRes = g.setMetadata() if setMetadataRes.isErr(): - error "failed to persist rln metadata", error=setMetadataRes.error + error "failed to persist rln metadata", error = setMetadataRes.error + else: - method atomicBatch*(g: OnchainGroupManager, - start: MembershipIndex, - idCommitments = newSeq[IDCommitment](), - toRemoveIndices = newSeq[MembershipIndex]()): - Future[void] {.async: (raises: [Exception]).} = + method atomicBatch*( + g: OnchainGroupManager, + start: MembershipIndex, + idCommitments = newSeq[IDCommitment](), + toRemoveIndices = newSeq[MembershipIndex](), + ): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) waku_rln_membership_insertion_duration_seconds.nanosecondTime: - let operationSuccess = g.rlnInstance.atomicWrite(some(start), idCommitments, toRemoveIndices) + let operationSuccess = + g.rlnInstance.atomicWrite(some(start), idCommitments, toRemoveIndices) if not operationSuccess: raise newException(ValueError, "atomic batch operation failed") # TODO: when slashing is enabled, we need to track slashed members @@ -196,42 +209,48 @@ else: g.validRootBuffer = g.slideRootQueue() let setMetadataRes = g.setMetadata() if setMetadataRes.isErr(): - error "failed to persist rln metadata", error=setMetadataRes.error + error "failed to persist rln metadata", error = setMetadataRes.error when defined(rln_v2): - method register*(g: OnchainGroupManager, - rateCommitment: RateCommitment): Future[void] {.async: (raises: [Exception]).} = + method register*( + g: OnchainGroupManager, rateCommitment: RateCommitment + ): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) await g.registerBatch(@[rateCommitment]) + else: - method register*(g: OnchainGroupManager, - idCommitment: IDCommitment): Future[void] {.async: (raises: [Exception]).} = - initializedGuard(g) - - await g.registerBatch(@[idCommitment]) + method register*( + g: OnchainGroupManager, idCommitment: IDCommitment + ): Future[void] {.async: (raises: [Exception]).} = + initializedGuard(g) + await g.registerBatch(@[idCommitment]) when defined(rln_v2): - method registerBatch*(g: OnchainGroupManager, - rateCommitments: seq[RateCommitment]): Future[void] {.async: (raises: [Exception]).} = + method registerBatch*( + g: OnchainGroupManager, rateCommitments: seq[RateCommitment] + ): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) await g.atomicBatch(g.latestIndex, rateCommitments) g.latestIndex += MembershipIndex(rateCommitments.len) + else: - method registerBatch*(g: OnchainGroupManager, - idCommitments: seq[IDCommitment]): Future[void] {.async: (raises: [Exception]).} = + method registerBatch*( + g: OnchainGroupManager, idCommitments: seq[IDCommitment] + ): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) await g.atomicBatch(g.latestIndex, idCommitments) g.latestIndex += MembershipIndex(idCommitments.len) - when defined(rln_v2): - method register*(g: OnchainGroupManager, - identityCredential: IdentityCredential, - userMessageLimit: UserMessageLimit): Future[void] {.async: (raises: [Exception]).} = + method register*( + g: OnchainGroupManager, + identityCredential: IdentityCredential, + userMessageLimit: UserMessageLimit, + ): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) let ethRpc = g.ethRpc.get() @@ -245,9 +264,14 @@ when defined(rln_v2): var txHash: TxHash let storageIndex = g.usingStorageIndex.get() - debug "registering the member", idCommitment = idCommitment, storageIndex = storageIndex, userMessageLimit = userMessageLimit + debug "registering the member", + idCommitment = idCommitment, + storageIndex = storageIndex, + userMessageLimit = userMessageLimit g.retryWrapper(txHash, "Failed to register the member"): - await registryContract.register(storageIndex, idCommitment, u256(userMessageLimit)).send(gasPrice = gasPrice) + await registryContract + .register(storageIndex, idCommitment, u256(userMessageLimit)) + .send(gasPrice = gasPrice) # wait for the transaction to be mined var tsReceipt: ReceiptObject @@ -259,84 +283,94 @@ when defined(rln_v2): # TODO: make this robust. search within the event list for the event let firstTopic = tsReceipt.logs[0].topics[0] # the hash of the signature of MemberRegistered(uint256,uint256,uint256) event is equal to the following hex value - if firstTopic != cast[FixedBytes[32]](keccak256.digest("MemberRegistered(uint256,uint256,uint256)").data): + if firstTopic != + cast[FixedBytes[32]](keccak256.digest( + "MemberRegistered(uint256,uint256,uint256)" + ).data): raise newException(ValueError, "unexpected event signature") # the arguments of the raised event i.e., MemberRegistered are encoded inside the data field # data = pk encoded as 256 bits || index encoded as 256 bits || userMessageLimit encoded as 256 bits let arguments = tsReceipt.logs[0].data - debug "tx log data", arguments=arguments + debug "tx log data", arguments = arguments let argumentsBytes = arguments # In TX log data, uints are encoded in big endian - membershipIndex = UInt256.fromBytesBE(argumentsBytes[64..^1]) + membershipIndex = UInt256.fromBytesBE(argumentsBytes[64 ..^ 1]) g.userMessageLimit = some(userMessageLimit) g.membershipIndex = some(membershipIndex.toMembershipIndex()) # don't handle member insertion into the tree here, it will be handled by the event listener return + else: - method register*(g: OnchainGroupManager, - credentials: IdentityCredential): Future[void] {.async: (raises: [Exception]).} = - initializedGuard(g) + method register*( + g: OnchainGroupManager, credentials: IdentityCredential + ): Future[void] {.async: (raises: [Exception]).} = + initializedGuard(g) - let ethRpc = g.ethRpc.get() - let registryContract = g.registryContract.get() - let membershipFee = g.membershipFee.get() + let ethRpc = g.ethRpc.get() + let registryContract = g.registryContract.get() + let membershipFee = g.membershipFee.get() - var gasPrice: int - g.retryWrapper(gasPrice, "Failed to get gas price"): - int(await ethRpc.provider.eth_gasPrice()) * 2 - let idCommitment = credentials.idCommitment.toUInt256() + var gasPrice: int + g.retryWrapper(gasPrice, "Failed to get gas price"): + int(await ethRpc.provider.eth_gasPrice()) * 2 + let idCommitment = credentials.idCommitment.toUInt256() - var txHash: TxHash - let storageIndex = g.usingStorageIndex.get() - debug "registering the member", idCommitment = idCommitment, storageIndex = storageIndex - g.retryWrapper(txHash, "Failed to register the member"): - await registryContract.register(storageIndex, idCommitment).send(gasPrice = gasPrice) + var txHash: TxHash + let storageIndex = g.usingStorageIndex.get() + debug "registering the member", + idCommitment = idCommitment, storageIndex = storageIndex + g.retryWrapper(txHash, "Failed to register the member"): + await registryContract.register(storageIndex, idCommitment).send( + gasPrice = gasPrice + ) - # wait for the transaction to be mined - var tsReceipt: ReceiptObject - g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"): - await ethRpc.getMinedTransactionReceipt(txHash) - debug "registration transaction mined", txHash = txHash - g.registrationTxHash = some(txHash) - # the receipt topic holds the hash of signature of the raised events - # TODO: make this robust. search within the event list for the event - let firstTopic = tsReceipt.logs[0].topics[0] - # the hash of the signature of MemberRegistered(uint256,uint256) event is equal to the following hex value - if firstTopic != cast[FixedBytes[32]](keccak256.digest("MemberRegistered(uint256,uint256)").data): - raise newException(ValueError, "unexpected event signature") + # wait for the transaction to be mined + var tsReceipt: ReceiptObject + g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"): + await ethRpc.getMinedTransactionReceipt(txHash) + debug "registration transaction mined", txHash = txHash + g.registrationTxHash = some(txHash) + # the receipt topic holds the hash of signature of the raised events + # TODO: make this robust. search within the event list for the event + let firstTopic = tsReceipt.logs[0].topics[0] + # the hash of the signature of MemberRegistered(uint256,uint256) event is equal to the following hex value + if firstTopic != + cast[FixedBytes[32]](keccak256.digest("MemberRegistered(uint256,uint256)").data): + raise newException(ValueError, "unexpected event signature") - # the arguments of the raised event i.e., MemberRegistered are encoded inside the data field - # data = pk encoded as 256 bits || index encoded as 256 bits - let arguments = tsReceipt.logs[0].data - debug "tx log data", arguments=arguments - let - argumentsBytes = arguments - # In TX log data, uints are encoded in big endian - eventIndex = UInt256.fromBytesBE(argumentsBytes[32..^1]) + # the arguments of the raised event i.e., MemberRegistered are encoded inside the data field + # data = pk encoded as 256 bits || index encoded as 256 bits + let arguments = tsReceipt.logs[0].data + debug "tx log data", arguments = arguments + let + argumentsBytes = arguments + # In TX log data, uints are encoded in big endian + eventIndex = UInt256.fromBytesBE(argumentsBytes[32 ..^ 1]) - g.membershipIndex = some(eventIndex.toMembershipIndex()) + g.membershipIndex = some(eventIndex.toMembershipIndex()) - # don't handle member insertion into the tree here, it will be handled by the event listener - return + # don't handle member insertion into the tree here, it will be handled by the event listener + return -method withdraw*(g: OnchainGroupManager, idCommitment: IDCommitment): - Future[void] {.async: (raises: [Exception]).} = - initializedGuard(g) +method withdraw*( + g: OnchainGroupManager, idCommitment: IDCommitment +): Future[void] {.async: (raises: [Exception]).} = + initializedGuard(g) # TODO: after slashing is enabled on the contract - # TODO: after slashing is enabled on the contract - -method withdrawBatch*(g: OnchainGroupManager, idCommitments: seq[IDCommitment]): - Future[void] {.async: (raises: [Exception]).} = +method withdrawBatch*( + g: OnchainGroupManager, idCommitments: seq[IDCommitment] +): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) # TODO: after slashing is enabled on the contract, use atomicBatch internally -proc parseEvent(event: type MemberRegistered, - log: JsonNode): GroupManagerResult[Membership] = +proc parseEvent( + event: type MemberRegistered, log: JsonNode +): GroupManagerResult[Membership] = ## parses the `data` parameter of the `MemberRegistered` event `log` ## returns an error if it cannot parse the `data` parameter var idComm: UInt256 @@ -348,7 +382,10 @@ proc parseEvent(event: type MemberRegistered, try: data = strip0xPrefix(log["data"].getStr()) except CatchableError: - return err("failed to parse the data field of the MemberRegistered event: " & getCurrentExceptionMsg()) + return err( + "failed to parse the data field of the MemberRegistered event: " & + getCurrentExceptionMsg() + ) var offset = 0 try: # Parse the idComm @@ -359,38 +396,55 @@ proc parseEvent(event: type MemberRegistered, # Parse the index offset += decode(data, offset, index) when defined(rln_v2): - return ok(Membership(rateCommitment: RateCommitment(idCommitment: idComm.toIDCommitment(), - userMessageLimit: userMessageLimit.toUserMessageLimit()), - index: index.toMembershipIndex())) + return ok( + Membership( + rateCommitment: RateCommitment( + idCommitment: idComm.toIDCommitment(), + userMessageLimit: userMessageLimit.toUserMessageLimit(), + ), + index: index.toMembershipIndex(), + ) + ) else: - return ok(Membership(idCommitment: idComm.toIDCommitment(), index: index.toMembershipIndex())) + return ok( + Membership( + idCommitment: idComm.toIDCommitment(), index: index.toMembershipIndex() + ) + ) except CatchableError: return err("failed to parse the data field of the MemberRegistered event") type BlockTable* = OrderedTable[BlockNumber, seq[(Membership, bool)]] -proc backfillRootQueue*(g: OnchainGroupManager, len: uint): - Future[void] {.async: (raises: [Exception]).} = +proc backfillRootQueue*( + g: OnchainGroupManager, len: uint +): Future[void] {.async: (raises: [Exception]).} = if len > 0: # backfill the tree's acceptable roots - for i in 0..len-1: + for i in 0 .. len - 1: # remove the last root g.validRoots.popLast() - for i in 0..len-1: + for i in 0 .. len - 1: # add the backfilled root g.validRoots.addLast(g.validRootBuffer.popLast()) -proc insert(blockTable: var BlockTable, blockNumber: BlockNumber, member: Membership, removed: bool) = +proc insert( + blockTable: var BlockTable, + blockNumber: BlockNumber, + member: Membership, + removed: bool, +) = let memberTuple = (member, removed) if blockTable.hasKeyOrPut(blockNumber, @[memberTuple]): try: blockTable[blockNumber].add(memberTuple) except KeyError: # qed - error "could not insert member into block table", blockNumber=blockNumber, member=member + error "could not insert member into block table", + blockNumber = blockNumber, member = member -proc getRawEvents(g: OnchainGroupManager, - fromBlock: BlockNumber, - toBlock: BlockNumber): Future[JsonNode] {.async: (raises: [Exception]).} = +proc getRawEvents( + g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: BlockNumber +): Future[JsonNode] {.async: (raises: [Exception]).} = initializedGuard(g) let ethRpc = g.ethRpc.get() @@ -398,14 +452,16 @@ proc getRawEvents(g: OnchainGroupManager, var events: JsonNode g.retryWrapper(events, "Failed to get the events"): - await rlnContract.getJsonLogs(MemberRegistered, - fromBlock = some(fromBlock.blockId()), - toBlock = some(toBlock.blockId())) + await rlnContract.getJsonLogs( + MemberRegistered, + fromBlock = some(fromBlock.blockId()), + toBlock = some(toBlock.blockId()), + ) return events -proc getBlockTable(g: OnchainGroupManager, - fromBlock: BlockNumber, - toBlock: BlockNumber): Future[BlockTable] {.async: (raises: [Exception]).} = +proc getBlockTable( + g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: BlockNumber +): Future[BlockTable] {.async: (raises: [Exception]).} = initializedGuard(g) var blockTable = default(BlockTable) @@ -421,15 +477,16 @@ proc getBlockTable(g: OnchainGroupManager, let removed = event["removed"].getBool() let parsedEventRes = parseEvent(MemberRegistered, event) if parsedEventRes.isErr(): - error "failed to parse the MemberRegistered event", error=parsedEventRes.error() + error "failed to parse the MemberRegistered event", error = parsedEventRes.error() raise newException(ValueError, "failed to parse the MemberRegistered event") let parsedEvent = parsedEventRes.get() blockTable.insert(blockNumber, parsedEvent, removed) return blockTable -proc handleEvents(g: OnchainGroupManager, - blockTable: BlockTable): Future[void] {.async: (raises: [Exception]).} = +proc handleEvents( + g: OnchainGroupManager, blockTable: BlockTable +): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) for blockNumber, members in blockTable.pairs(): @@ -438,26 +495,31 @@ proc handleEvents(g: OnchainGroupManager, let removalIndices = members.filterIt(it[1]).mapIt(it[0].index) when defined(rln_v2): let rateCommitments = members.mapIt(it[0].rateCommitment) - await g.atomicBatch(start = startIndex, - rateCommitments = rateCommitments, - toRemoveIndices = removalIndices) + await g.atomicBatch( + start = startIndex, + rateCommitments = rateCommitments, + toRemoveIndices = removalIndices, + ) g.latestIndex = startIndex + MembershipIndex(rateCommitments.len) - trace "new members added to the Merkle tree", commitments=rateCommitments + trace "new members added to the Merkle tree", commitments = rateCommitments else: let idCommitments = members.mapIt(it[0].idCommitment) - await g.atomicBatch(start = startIndex, - idCommitments = idCommitments, - toRemoveIndices = removalIndices) + await g.atomicBatch( + start = startIndex, + idCommitments = idCommitments, + toRemoveIndices = removalIndices, + ) g.latestIndex = startIndex + MembershipIndex(idCommitments.len) - trace "new members added to the Merkle tree", commitments=idCommitments + trace "new members added to the Merkle tree", commitments = idCommitments except CatchableError: - error "failed to insert members into the tree", error=getCurrentExceptionMsg() + error "failed to insert members into the tree", error = getCurrentExceptionMsg() raise newException(ValueError, "failed to insert members into the tree") return -proc handleRemovedEvents(g: OnchainGroupManager, blockTable: BlockTable): - Future[void] {.async: (raises: [Exception]).} = +proc handleRemovedEvents( + g: OnchainGroupManager, blockTable: BlockTable +): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) # count number of blocks that have been removed @@ -468,26 +530,26 @@ proc handleRemovedEvents(g: OnchainGroupManager, blockTable: BlockTable): await g.backfillRootQueue(numRemovedBlocks) -proc getAndHandleEvents(g: OnchainGroupManager, - fromBlock: BlockNumber, - toBlock: BlockNumber): Future[bool] {.async: (raises: [Exception]).} = +proc getAndHandleEvents( + g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: BlockNumber +): Future[bool] {.async: (raises: [Exception]).} = initializedGuard(g) let blockTable = await g.getBlockTable(fromBlock, toBlock) try: await g.handleEvents(blockTable) await g.handleRemovedEvents(blockTable) except CatchableError: - error "failed to handle events", error=getCurrentExceptionMsg() + error "failed to handle events", error = getCurrentExceptionMsg() raise newException(ValueError, "failed to handle events") g.latestProcessedBlock = toBlock let metadataSetRes = g.setMetadata() if metadataSetRes.isErr(): # this is not a fatal error, hence we don't raise an exception - warn "failed to persist rln metadata", error=metadataSetRes.error() + warn "failed to persist rln metadata", error = metadataSetRes.error() else: trace "rln metadata persisted", blockNumber = g.latestProcessedBlock - + return true proc runInInterval(g: OnchainGroupManager, cb: proc, interval: Duration) = @@ -506,7 +568,6 @@ proc runInInterval(g: OnchainGroupManager, cb: proc, interval: Duration) = # we make use of the error handling provided by # OnFatalErrorHandler asyncSpawn runIntervalLoop() - proc getNewBlockCallback(g: OnchainGroupManager): proc = let ethRpc = g.ethRpc.get() @@ -514,7 +575,7 @@ proc getNewBlockCallback(g: OnchainGroupManager): proc = var latestBlock: BlockNumber g.retryWrapper(latestBlock, "Failed to get the latest block number"): cast[BlockNumber](await ethRpc.provider.eth_blockNumber()) - + if latestBlock <= g.latestProcessedBlock: return # get logs from the last block @@ -524,18 +585,21 @@ proc getNewBlockCallback(g: OnchainGroupManager): proc = g.retryWrapper(handleBlockRes, "Failed to handle new block"): await g.getAndHandleEvents(fromBlock, latestBlock) return true + return wrappedCb -proc startListeningToEvents(g: OnchainGroupManager): - Future[void] {.async: (raises: [Exception]).} = +proc startListeningToEvents( + g: OnchainGroupManager +): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) let ethRpc = g.ethRpc.get() let newBlockCallback = g.getNewBlockCallback() g.runInInterval(newBlockCallback, DefaultBlockPollRate) -proc startOnchainSync(g: OnchainGroupManager): - Future[void] {.async: (raises: [Exception]).} = +proc startOnchainSync( + g: OnchainGroupManager +): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) let ethRpc = g.ethRpc.get() @@ -543,12 +607,14 @@ proc startOnchainSync(g: OnchainGroupManager): # static block chunk size let blockChunkSize = 2_000 - var fromBlock = if g.latestProcessedBlock > g.rlnContractDeployedBlockNumber: - info "syncing from last processed block", blockNumber = g.latestProcessedBlock - g.latestProcessedBlock + 1 - else: - info "syncing from rln contract deployed block", blockNumber = g.rlnContractDeployedBlockNumber - g.rlnContractDeployedBlockNumber + var fromBlock = + if g.latestProcessedBlock > g.rlnContractDeployedBlockNumber: + info "syncing from last processed block", blockNumber = g.latestProcessedBlock + g.latestProcessedBlock + 1 + else: + info "syncing from rln contract deployed block", + blockNumber = g.rlnContractDeployedBlockNumber + g.rlnContractDeployedBlockNumber try: # we always want to sync from last processed block => latest @@ -563,27 +629,35 @@ proc startOnchainSync(g: OnchainGroupManager): let toBlock = min(fromBlock + BlockNumber(blockChunkSize), currentLatestBlock) debug "fetching events", fromBlock = fromBlock, toBlock = toBlock var handleBlockRes: bool - g.retryWrapper(handleBlockRes, "Failed to handle old blocks"): + g.retryWrapper(handleBlockRes, "Failed to handle old blocks"): await g.getAndHandleEvents(fromBlock, toBlock) fromBlock = toBlock + 1 - except CatchableError: - raise newException(ValueError, "failed to get the history/reconcile missed blocks: " & getCurrentExceptionMsg()) + raise newException( + ValueError, + "failed to get the history/reconcile missed blocks: " & getCurrentExceptionMsg(), + ) # listen to blockheaders and contract events try: await g.startListeningToEvents() except CatchableError: - raise newException(ValueError, "failed to start listening to events: " & getCurrentExceptionMsg()) + raise newException( + ValueError, "failed to start listening to events: " & getCurrentExceptionMsg() + ) -method startGroupSync*(g: OnchainGroupManager): - Future[void] {.async: (raises: [Exception]).} = +method startGroupSync*( + g: OnchainGroupManager +): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) # Get archive history try: await startOnchainSync(g) except CatchableError: - raise newException(CatchableError, "failed to start onchain sync service: " & getCurrentExceptionMsg()) + raise newException( + CatchableError, + "failed to start onchain sync service: " & getCurrentExceptionMsg(), + ) return method onRegister*(g: OnchainGroupManager, cb: OnRegisterCallback) {.gcsafe.} = @@ -609,7 +683,8 @@ method init*(g: OnchainGroupManager): Future[void] {.async.} = if pkParseRes.isErr(): raise newException(ValueError, "could not parse the private key") ethRpc.privateKey = some(pkParseRes.get()) - ethRpc.defaultAccount = ethRpc.privateKey.get().toPublicKey().toCanonicalAddress().Address + ethRpc.defaultAccount = + ethRpc.privateKey.get().toPublicKey().toCanonicalAddress().Address let registryAddress = web3.fromHex(web3.Address, g.ethContractAddress) let registryContract = ethRpc.contractSender(WakuRlnRegistry, registryAddress) @@ -631,38 +706,45 @@ method init*(g: OnchainGroupManager): Future[void] {.async.} = if g.keystorePath.isSome() and g.keystorePassword.isSome(): if not existsFile(g.keystorePath.get()): - error "File provided as keystore path does not exist", path=g.keystorePath.get() + error "File provided as keystore path does not exist", path = g.keystorePath.get() raise newException(CatchableError, "missing keystore") - + var keystoreQuery = KeystoreMembership( - membershipContract: MembershipContract( - chainId: $g.chainId.get(), - address: g.ethContractAddress - ) + membershipContract: + MembershipContract(chainId: $g.chainId.get(), address: g.ethContractAddress) ) if g.membershipIndex.isSome(): keystoreQuery.treeIndex = MembershipIndex(g.membershipIndex.get()) waku_rln_membership_credentials_import_duration_seconds.nanosecondTime: - let keystoreCredRes = getMembershipCredentials(path = g.keystorePath.get(), - password = g.keystorePassword.get(), - query = keystoreQuery, - appInfo = RLNAppInfo) + let keystoreCredRes = getMembershipCredentials( + path = g.keystorePath.get(), + password = g.keystorePassword.get(), + query = keystoreQuery, + appInfo = RLNAppInfo, + ) if keystoreCredRes.isErr(): - raise newException(CatchableError, "could not parse the keystore: " & $keystoreCredRes.error) + raise newException( + CatchableError, "could not parse the keystore: " & $keystoreCredRes.error + ) let keystoreCred = keystoreCredRes.get() g.membershipIndex = some(keystoreCred.treeIndex) when defined(rln_v2): g.userMessageLimit = some(keystoreCred.userMessageLimit) # now we check on the contract if the commitment actually has a membership try: - let membershipExists = await rlnContract.memberExists(keystoreCred - .identityCredential - .idCommitment.toUInt256()).call() + let membershipExists = await rlnContract + .memberExists(keystoreCred.identityCredential.idCommitment.toUInt256()) + .call() if membershipExists == 0: - raise newException(CatchableError, "the provided commitment does not have a membership") + raise newException( + CatchableError, "the provided commitment does not have a membership" + ) except CatchableError: - raise newException(CatchableError, "could not check if the commitment exists on the contract: " & - getCurrentExceptionMsg()) + raise newException( + CatchableError, + "could not check if the commitment exists on the contract: " & + getCurrentExceptionMsg(), + ) g.idCredentials = some(keystoreCred.identityCredential) @@ -692,10 +774,11 @@ method init*(g: OnchainGroupManager): Future[void] {.async.} = g.rlnContractDeployedBlockNumber = cast[BlockNumber](deployedBlockNumber) g.latestProcessedBlock = max(g.latestProcessedBlock, g.rlnContractDeployedBlockNumber) - proc onDisconnect() {.async.} = + proc onDisconnect() {.async.} = error "Ethereum client disconnected" let fromBlock = max(g.latestProcessedBlock, g.rlnContractDeployedBlockNumber) - info "reconnecting with the Ethereum client, and restarting group sync", fromBlock = fromBlock + info "reconnecting with the Ethereum client, and restarting group sync", + fromBlock = fromBlock var newEthRpc: Web3 g.retryWrapper(newEthRpc, "Failed to reconnect with the Ethereum client"): await newWeb3(g.ethClientUrl) @@ -705,19 +788,19 @@ method init*(g: OnchainGroupManager): Future[void] {.async.} = try: await g.startOnchainSync() except CatchableError, Exception: - g.onFatalErrorAction("failed to restart group sync" & ": " & getCurrentExceptionMsg()) - + g.onFatalErrorAction( + "failed to restart group sync" & ": " & getCurrentExceptionMsg() + ) ethRpc.ondisconnect = proc() = asyncSpawn onDisconnect() - waku_rln_number_registered_memberships.set(int64(g.rlnInstance.leavesSet())) g.initialized = true -method stop*(g: OnchainGroupManager): Future[void] {.async,gcsafe.} = +method stop*(g: OnchainGroupManager): Future[void] {.async, gcsafe.} = g.blockFetchingActive = false - + if g.ethRpc.isSome(): g.ethRpc.get().ondisconnect = nil await g.ethRpc.get().close() @@ -727,7 +810,7 @@ method stop*(g: OnchainGroupManager): Future[void] {.async,gcsafe.} = g.initialized = false -proc isSyncing*(g: OnchainGroupManager): Future[bool] {.async,gcsafe.} = +proc isSyncing*(g: OnchainGroupManager): Future[bool] {.async, gcsafe.} = let ethRpc = g.ethRpc.get() var syncing: JsonNode @@ -735,8 +818,7 @@ proc isSyncing*(g: OnchainGroupManager): Future[bool] {.async,gcsafe.} = await ethRpc.provider.eth_syncing() return syncing.getBool() -method isReady*(g: OnchainGroupManager): - Future[bool] {.async.} = +method isReady*(g: OnchainGroupManager): Future[bool] {.async.} = initializedGuard(g) if g.ethRpc.isNone(): @@ -744,10 +826,7 @@ method isReady*(g: OnchainGroupManager): var currentBlock: BlockNumber g.retryWrapper(currentBlock, "Failed to get the current block number"): - cast[BlockNumber](await g.ethRpc - .get() - .provider - .eth_blockNumber()) + cast[BlockNumber](await g.ethRpc.get().provider.eth_blockNumber()) if g.latestProcessedBlock < currentBlock: return false diff --git a/waku/waku_rln_relay/group_manager/on_chain/retry_wrapper.nim b/waku/waku_rln_relay/group_manager/on_chain/retry_wrapper.nim index 5a1c5ec4f..eaa239c80 100644 --- a/waku/waku_rln_relay/group_manager/on_chain/retry_wrapper.nim +++ b/waku/waku_rln_relay/group_manager/on_chain/retry_wrapper.nim @@ -1,7 +1,5 @@ -import - ../../../common/error_handling -import - chronos +import ../../../common/error_handling +import chronos type RetryStrategy* = object shouldRetry*: bool @@ -9,18 +7,15 @@ type RetryStrategy* = object retryCount*: uint proc new*(T: type RetryStrategy): RetryStrategy = - return RetryStrategy( - shouldRetry: true, - retryDelay: 1000.millis, - retryCount: 3 - ) + return RetryStrategy(shouldRetry: true, retryDelay: 1000.millis, retryCount: 3) - -template retryWrapper*(res: auto, - retryStrategy: RetryStrategy, - errStr: string, - errCallback: OnFatalErrorHandler = nil, - body: untyped): auto = +template retryWrapper*( + res: auto, + retryStrategy: RetryStrategy, + errStr: string, + errCallback: OnFatalErrorHandler = nil, + body: untyped, +): auto = var retryCount = retryStrategy.retryCount var shouldRetry = retryStrategy.shouldRetry var exceptionMessage = "" @@ -35,7 +30,9 @@ template retryWrapper*(res: auto, await sleepAsync(retryStrategy.retryDelay) if shouldRetry: if errCallback == nil: - raise newException(CatchableError, errStr & " errCallback == nil: " & exceptionMessage) + raise newException( + CatchableError, errStr & " errCallback == nil: " & exceptionMessage + ) else: errCallback(errStr & ": " & exceptionMessage) return diff --git a/waku/waku_rln_relay/group_manager/static.nim b/waku/waku_rln_relay/group_manager/static.nim index 64a0f8ec3..0d385c298 100644 --- a/waku/waku_rln_relay/group_manager/static.nim +++ b/waku/waku_rln_relay/group_manager/static.nim @@ -1,3 +1,3 @@ import static/group_manager -export group_manager \ No newline at end of file +export group_manager diff --git a/waku/waku_rln_relay/group_manager/static/group_manager.nim b/waku/waku_rln_relay/group_manager/static/group_manager.nim index 82782addf..48d38dd72 100644 --- a/waku/waku_rln_relay/group_manager/static/group_manager.nim +++ b/waku/waku_rln_relay/group_manager/static/group_manager.nim @@ -1,44 +1,51 @@ -import - ../group_manager_base, - ../../constants, - ../../rln, - std/sequtils +import ../group_manager_base, ../../constants, ../../rln, std/sequtils -export - group_manager_base +export group_manager_base -type - StaticGroupManager* = ref object of GroupManager - groupKeys*: seq[IdentityCredential] - groupSize*: uint +type StaticGroupManager* = ref object of GroupManager + groupKeys*: seq[IdentityCredential] + groupSize*: uint template initializedGuard*(g: StaticGroupManager): untyped = if not g.initialized: - raise newException(ValueError, "StaticGroupManager is not initialized") + raise newException(ValueError, "StaticGroupManager is not initialized") method init*(g: StaticGroupManager): Future[void] {.async.} = let groupSize = g.groupSize groupKeys = g.groupKeys - membershipIndex = if g.membershipIndex.isSome(): g.membershipIndex.get() - else: raise newException(ValueError, "Membership index is not set") + membershipIndex = + if g.membershipIndex.isSome(): + g.membershipIndex.get() + else: + raise newException(ValueError, "Membership index is not set") - if membershipIndex < MembershipIndex(0) or membershipIndex >= MembershipIndex(groupSize): - raise newException(ValueError, "Invalid membership index. Must be within 0 and " & $(groupSize - 1) & "but was " & $membershipIndex) + if membershipIndex < MembershipIndex(0) or + membershipIndex >= MembershipIndex(groupSize): + raise newException( + ValueError, + "Invalid membership index. Must be within 0 and " & $(groupSize - 1) & "but was " & + $membershipIndex, + ) when defined(rln_v2): g.userMessageLimit = some(DefaultUserMessageLimit) g.idCredentials = some(groupKeys[membershipIndex]) # Seed the received commitments into the merkle tree when defined(rln_v2): - let rateCommitments = groupKeys.mapIt(RateCommitment(idCommitment: it.idCommitment, - userMessageLimit: g.userMessageLimit.get())) + let rateCommitments = groupKeys.mapIt( + RateCommitment( + idCommitment: it.idCommitment, userMessageLimit: g.userMessageLimit.get() + ) + ) let leaves = rateCommitments.toLeaves().valueOr: - raise newException(ValueError, "Failed to convert rate commitments to leaves: " & $error) + raise newException( + ValueError, "Failed to convert rate commitments to leaves: " & $error + ) let membersInserted = g.rlnInstance.insertMembers(g.latestIndex, leaves) else: let idCommitments = groupKeys.mapIt(it.idCommitment) - let membersInserted = g.rlnInstance.insertMembers(g.latestIndex, idCommitments) + let membersInserted = g.rlnInstance.insertMembers(g.latestIndex, idCommitments) if not membersInserted: raise newException(ValueError, "Failed to insert members into the merkle tree") @@ -50,27 +57,32 @@ method init*(g: StaticGroupManager): Future[void] {.async.} = return -method startGroupSync*(g: StaticGroupManager): Future[void] {.async: (raises: [Exception]).} = +method startGroupSync*( + g: StaticGroupManager +): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) # No-op when defined(rln_v2): - method register*(g: StaticGroupManager, - rateCommitment: RateCommitment): Future[void] {.async: (raises: [Exception]).} = + method register*( + g: StaticGroupManager, rateCommitment: RateCommitment + ): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) await g.registerBatch(@[rateCommitment]) else: - method register*(g: StaticGroupManager, idCommitment: IDCommitment): - Future[void] {.async: (raises: [Exception]).} = + method register*( + g: StaticGroupManager, idCommitment: IDCommitment + ): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) await g.registerBatch(@[idCommitment]) when defined(rln_v2): - method registerBatch*(g: StaticGroupManager, - rateCommitments: seq[RateCommitment]): Future[void] {.async: (raises: [Exception]).} = + method registerBatch*( + g: StaticGroupManager, rateCommitments: seq[RateCommitment] + ): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) let leavesRes = rateCommitments.toLeaves() @@ -84,8 +96,13 @@ when defined(rln_v2): if g.registerCb.isSome(): var memberSeq = newSeq[Membership]() - for i in 0..= n.epoch: retNonce = 0 + if now - n.lastNonceTime >= n.epoch: + retNonce = 0 n.nextNonce = retNonce + 1 n.lastNonceTime = now if retNonce >= n.nonceLimit: - return err(NonceManagerError(kind: NonceLimitReached, - error: "Nonce limit reached. Please wait for the next epoch. requested nonce: " & $retNonce & " & nonceLimit: " & $n.nonceLimit)) - + return err( + NonceManagerError( + kind: NonceLimitReached, + error: + "Nonce limit reached. Please wait for the next epoch. requested nonce: " & + $retNonce & " & nonceLimit: " & $n.nonceLimit, + ) + ) + return ok(retNonce) diff --git a/waku/waku_rln_relay/protocol_metrics.nim b/waku/waku_rln_relay/protocol_metrics.nim index d075eff38..d53caad2a 100644 --- a/waku/waku_rln_relay/protocol_metrics.nim +++ b/waku/waku_rln_relay/protocol_metrics.nim @@ -11,8 +11,7 @@ import ./constants, ../utils/collector -export - metrics +export metrics logScope: topics = "waku rln_relay" @@ -22,31 +21,57 @@ func generateBucketsForHistogram*(length: int): seq[float64] = let numberOfBuckets = 5 let stepSize = length / numberOfBuckets var buckets: seq[float64] - for i in 1..numberOfBuckets: + for i in 1 .. numberOfBuckets: buckets.add(stepSize * i.toFloat()) return buckets -declarePublicCounter(waku_rln_messages_total, "number of messages published on the rln content topic") +declarePublicCounter( + waku_rln_messages_total, "number of messages published on the rln content topic" +) declarePublicCounter(waku_rln_spam_messages_total, "number of spam messages detected") -declarePublicCounter(waku_rln_invalid_messages_total, "number of invalid messages detected", ["type"]) +declarePublicCounter( + waku_rln_invalid_messages_total, "number of invalid messages detected", ["type"] +) # This metric will be useful in detecting the index of the root in the acceptable window of roots -declarePublicHistogram(identifier = waku_rln_valid_messages_total, +declarePublicHistogram( + identifier = waku_rln_valid_messages_total, help = "number of valid messages with their roots tracked", - buckets = generateBucketsForHistogram(AcceptableRootWindowSize)) -declarePublicCounter(waku_rln_errors_total, "number of errors detected while operating the rln relay", ["type"]) -declarePublicCounter(waku_rln_proof_verification_total, "number of times the rln proofs are verified") + buckets = generateBucketsForHistogram(AcceptableRootWindowSize), +) +declarePublicCounter( + waku_rln_errors_total, + "number of errors detected while operating the rln relay", + ["type"], +) +declarePublicCounter( + waku_rln_proof_verification_total, "number of times the rln proofs are verified" +) # this is a gauge so that we can set it based on the events we receive -declarePublicGauge(waku_rln_number_registered_memberships, "number of registered and active rln memberships") +declarePublicGauge( + waku_rln_number_registered_memberships, + "number of registered and active rln memberships", +) # Timing metrics -declarePublicGauge(waku_rln_proof_verification_duration_seconds, "time taken to verify a proof") -declarePublicGauge(waku_rln_proof_generation_duration_seconds, "time taken to generate a proof") -declarePublicGauge(waku_rln_instance_creation_duration_seconds, "time taken to create an rln instance") -declarePublicGauge(waku_rln_membership_insertion_duration_seconds, "time taken to insert a new member into the local merkle tree") -declarePublicGauge(waku_rln_membership_credentials_import_duration_seconds, "time taken to import membership credentials") +declarePublicGauge( + waku_rln_proof_verification_duration_seconds, "time taken to verify a proof" +) +declarePublicGauge( + waku_rln_proof_generation_duration_seconds, "time taken to generate a proof" +) +declarePublicGauge( + waku_rln_instance_creation_duration_seconds, "time taken to create an rln instance" +) +declarePublicGauge( + waku_rln_membership_insertion_duration_seconds, + "time taken to insert a new member into the local merkle tree", +) +declarePublicGauge( + waku_rln_membership_credentials_import_duration_seconds, + "time taken to import membership credentials", +) -type - RLNMetricsLogger = proc() {.gcsafe, raises: [Defect].} +type RLNMetricsLogger = proc() {.gcsafe, raises: [Defect].} proc getRlnMetricsLogger*(): RLNMetricsLogger = var logMetrics: RLNMetricsLogger @@ -60,20 +85,17 @@ proc getRlnMetricsLogger*(): RLNMetricsLogger = logMetrics = proc() = {.gcsafe.}: - - let freshErrorCount = parseAndAccumulate(waku_rln_errors_total, - cumulativeErrors) - let freshMsgCount = parseAndAccumulate(waku_rln_messages_total, - cumulativeMessages) - let freshSpamCount = parseAndAccumulate(waku_rln_spam_messages_total, - cumulativeSpamMessages) - let freshInvalidMsgCount = parseAndAccumulate(waku_rln_invalid_messages_total, - cumulativeInvalidMessages) - let freshValidMsgCount = parseAndAccumulate(waku_rln_valid_messages_total, - cumulativeValidMessages) - let freshProofCount = parseAndAccumulate(waku_rln_proof_verification_total, - cumulativeProofs) - + let freshErrorCount = parseAndAccumulate(waku_rln_errors_total, cumulativeErrors) + let freshMsgCount = + parseAndAccumulate(waku_rln_messages_total, cumulativeMessages) + let freshSpamCount = + parseAndAccumulate(waku_rln_spam_messages_total, cumulativeSpamMessages) + let freshInvalidMsgCount = + parseAndAccumulate(waku_rln_invalid_messages_total, cumulativeInvalidMessages) + let freshValidMsgCount = + parseAndAccumulate(waku_rln_valid_messages_total, cumulativeValidMessages) + let freshProofCount = + parseAndAccumulate(waku_rln_proof_verification_total, cumulativeProofs) info "Total messages", count = freshMsgCount info "Total spam messages", count = freshSpamCount @@ -82,4 +104,3 @@ proc getRlnMetricsLogger*(): RLNMetricsLogger = info "Total errors", count = freshErrorCount info "Total proofs verified", count = freshProofCount return logMetrics - diff --git a/waku/waku_rln_relay/protocol_types.nim b/waku/waku_rln_relay/protocol_types.nim index 4f583954f..158f550b9 100644 --- a/waku/waku_rln_relay/protocol_types.nim +++ b/waku/waku_rln_relay/protocol_types.nim @@ -3,20 +3,10 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/[options, tables, deques], - stew/arrayops, - chronos, - web3, - eth/keys -import - ../waku_core, - ../waku_keystore, - ../common/protobuf +import std/[options, tables, deques], stew/arrayops, chronos, web3, eth/keys +import ../waku_core, ../waku_keystore, ../common/protobuf -export - waku_keystore, - waku_core +export waku_keystore, waku_core type RlnRelayResult*[T] = Result[T, string] @@ -25,7 +15,8 @@ type RLN* {.incompleteStruct.} = object type RLNResult* = RlnRelayResult[ptr RLN] type - MerkleNode* = array[32, byte] # Each node of the Merkle tee is a Poseidon hash which is a 32 byte value + MerkleNode* = array[32, byte] + # Each node of the Merkle tee is a Poseidon hash which is a 32 byte value Nullifier* = array[32, byte] Epoch* = array[32, byte] RlnIdentifier* = array[32, byte] @@ -39,7 +30,7 @@ when defined(rln_v2): type RateCommitment* = object idCommitment*: IDCommitment userMessageLimit*: UserMessageLimit - + # Custom data types defined for waku rln relay ------------------------- type RateLimitProof* = object ## RateLimitProof holds the public inputs to rln circuit as @@ -72,9 +63,10 @@ type ProofMetadata* = object type MessageValidationResult* {.pure.} = enum - Valid, - Invalid, + Valid + Invalid Spam + MerkleNodeResult* = RlnRelayResult[MerkleNode] RateLimitProofResult* = RlnRelayResult[RateLimitProof] @@ -85,36 +77,35 @@ proc init*(T: type RateLimitProof, buffer: seq[byte]): ProtoResult[T] = let pb = initProtoBuffer(buffer) var proof: seq[byte] - discard ? pb.getField(1, proof) + discard ?pb.getField(1, proof) discard nsp.proof.copyFrom(proof) var merkleRoot: seq[byte] - discard ? pb.getField(2, merkleRoot) + discard ?pb.getField(2, merkleRoot) discard nsp.merkleRoot.copyFrom(merkleRoot) var epoch: seq[byte] - discard ? pb.getField(3, epoch) + discard ?pb.getField(3, epoch) discard nsp.epoch.copyFrom(epoch) var shareX: seq[byte] - discard ? pb.getField(4, shareX) + discard ?pb.getField(4, shareX) discard nsp.shareX.copyFrom(shareX) var shareY: seq[byte] - discard ? pb.getField(5, shareY) + discard ?pb.getField(5, shareY) discard nsp.shareY.copyFrom(shareY) var nullifier: seq[byte] - discard ? pb.getField(6, nullifier) + discard ?pb.getField(6, nullifier) discard nsp.nullifier.copyFrom(nullifier) var rlnIdentifier: seq[byte] - discard ? pb.getField(7, rlnIdentifier) + discard ?pb.getField(7, rlnIdentifier) discard nsp.rlnIdentifier.copyFrom(rlnIdentifier) return ok(nsp) - proc encode*(nsp: RateLimitProof): ProtoBuffer = var output = initProtoBuffer() @@ -130,5 +121,7 @@ proc encode*(nsp: RateLimitProof): ProtoBuffer = return output type - SpamHandler* = proc(wakuMessage: WakuMessage): void {.gcsafe, closure, raises: [Defect].} - RegistrationHandler* = proc(txHash: string): void {.gcsafe, closure, raises: [Defect].} + SpamHandler* = + proc(wakuMessage: WakuMessage): void {.gcsafe, closure, raises: [Defect].} + RegistrationHandler* = + proc(txHash: string): void {.gcsafe, closure, raises: [Defect].} diff --git a/waku/waku_rln_relay/rln.nim b/waku/waku_rln_relay/rln.nim index f2105e772..59f6b1df2 100644 --- a/waku/waku_rln_relay/rln.nim +++ b/waku/waku_rln_relay/rln.nim @@ -1,7 +1,3 @@ -import - rln/rln_interface, - rln/wrappers +import rln/rln_interface, rln/wrappers -export - rln_interface, - wrappers +export rln_interface, wrappers diff --git a/waku/waku_rln_relay/rln/rln_interface.nim b/waku/waku_rln_relay/rln/rln_interface.nim index f45880e2e..54af6a3b6 100644 --- a/waku/waku_rln_relay/rln/rln_interface.nim +++ b/waku/waku_rln_relay/rln/rln_interface.nim @@ -1,14 +1,11 @@ ## Nim wrappers for the functions defined in librln -import - ../protocol_types - +import ../protocol_types when (NimMajor, NimMinor) < (1, 4): {.push raises: [Defect].} else: {.push raises: [].} - ## Buffer struct is taken from # https://github.com/celo-org/celo-threshold-bls-rs/blob/master/crates/threshold-bls-ffi/src/ffi.rs type Buffer* = object @@ -23,13 +20,15 @@ proc toBuffer*(x: openArray[byte]): Buffer = let output = Buffer(`ptr`: cast[ptr uint8](baseAddr), len: uint(temp.len)) return output - ###################################################################### ## RLN Zerokit module APIs ###################################################################### #------------------------------ Merkle Tree operations ----------------------------------------- -proc update_next_member*(ctx: ptr RLN, input_buffer: ptr Buffer): bool {.importc: "set_next_leaf".} +proc update_next_member*( + ctx: ptr RLN, input_buffer: ptr Buffer +): bool {.importc: "set_next_leaf".} + ## adds an element in the merkle tree to the next available position ## input_buffer points to the id commitment byte seq ## the return bool value indicates the success or failure of the operation @@ -44,17 +43,26 @@ proc get_root*(ctx: ptr RLN, output_buffer: ptr Buffer): bool {.importc: "get_ro ## the output_buffer holds the Merkle tree root of size 32 bytes ## the return bool value indicates the success or failure of the operation -proc get_merkle_proof*(ctx: ptr RLN, index: uint, output_buffer: ptr Buffer): bool {.importc: "get_proof".} +proc get_merkle_proof*( + ctx: ptr RLN, index: uint, output_buffer: ptr Buffer +): bool {.importc: "get_proof".} + ## populates the passed pointer output_buffer with the merkle proof for the leaf at position index in the tree stored by ctx ## the output_buffer holds a serialized Merkle proof (vector of 32 bytes nodes) ## the return bool value indicates the success or failure of the operation -proc set_leaf*(ctx: ptr RLN, index: uint, input_buffer: ptr Buffer): bool {.importc: "set_leaf".} +proc set_leaf*( + ctx: ptr RLN, index: uint, input_buffer: ptr Buffer +): bool {.importc: "set_leaf".} + ## sets the leaf at position index in the tree stored by ctx to the value passed by input_buffer ## the input_buffer holds a serialized leaf of 32 bytes ## the return bool value indicates the success or failure of the operation -proc get_leaf*(ctx: ptr RLN, index: uint, output_buffer: ptr Buffer): bool {.importc: "get_leaf".} +proc get_leaf*( + ctx: ptr RLN, index: uint, output_buffer: ptr Buffer +): bool {.importc: "get_leaf".} + ## gets the leaf at position index in the tree stored by ctx ## the output_buffer holds a serialized leaf of 32 bytes ## the return bool value indicates the success or failure of the operation @@ -63,14 +71,20 @@ proc leaves_set*(ctx: ptr RLN): uint {.importc: "leaves_set".} ## gets the number of leaves set in the tree stored by ctx ## the return uint value indicates the number of leaves set in the tree -proc init_tree_with_leaves*(ctx: ptr RLN, input_buffer: ptr Buffer): bool {.importc: "init_tree_with_leaves".} +proc init_tree_with_leaves*( + ctx: ptr RLN, input_buffer: ptr Buffer +): bool {.importc: "init_tree_with_leaves".} + ## sets multiple leaves in the tree stored by ctx to the value passed by input_buffer ## the input_buffer holds a serialized vector of leaves (32 bytes each) ## the input_buffer size is prefixed by a 8 bytes integer indicating the number of leaves ## leaves are set one after each other starting from index 0 ## the return bool value indicates the success or failure of the operation -proc atomic_write*(ctx: ptr RLN, index: uint, leaves_buffer: ptr Buffer, indices_buffer: ptr Buffer): bool {.importc: "atomic_operation".} +proc atomic_write*( + ctx: ptr RLN, index: uint, leaves_buffer: ptr Buffer, indices_buffer: ptr Buffer +): bool {.importc: "atomic_operation".} + ## sets multiple leaves, and zeroes out indices in the tree stored by ctx to the value passed by input_buffer ## the leaves_buffer holds a serialized vector of leaves (32 bytes each) ## the leaves_buffer size is prefixed by a 8 bytes integer indicating the number of leaves @@ -86,13 +100,19 @@ proc reset_tree*(ctx: ptr RLN, tree_height: uint): bool {.importc: "set_tree".} #---------------------------------------------------------------------------------------------- #-------------------------------- zkSNARKs operations ----------------------------------------- -proc key_gen*(ctx: ptr RLN, output_buffer: ptr Buffer): bool {.importc: "extended_key_gen".} +proc key_gen*( + ctx: ptr RLN, output_buffer: ptr Buffer +): bool {.importc: "extended_key_gen".} + ## generates identity trapdoor, identity nullifier, identity secret hash and id commitment tuple serialized inside output_buffer as | identity_trapdoor<32> | identity_nullifier<32> | identity_secret_hash<32> | id_commitment<32> | ## identity secret hash is the poseidon hash of [identity_trapdoor, identity_nullifier] ## id commitment is the poseidon hash of the identity secret hash ## the return bool value indicates the success or failure of the operation -proc seeded_key_gen*(ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr Buffer): bool {.importc: "seeded_extended_key_gen".} +proc seeded_key_gen*( + ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr Buffer +): bool {.importc: "seeded_extended_key_gen".} + ## generates identity trapdoor, identity nullifier, identity secret hash and id commitment tuple serialized inside output_buffer as | identity_trapdoor<32> | identity_nullifier<32> | identity_secret_hash<32> | id_commitment<32> | using ChaCha20 ## seeded with an arbitrary long seed serialized in input_buffer ## The input seed provided by the user is hashed using Keccak256 before being passed to ChaCha20 as seed. @@ -100,9 +120,10 @@ proc seeded_key_gen*(ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr ## id commitment is the poseidon hash of the identity secret hash ## the return bool value indicates the success or failure of the operation -proc generate_proof*(ctx: ptr RLN, - input_buffer: ptr Buffer, - output_buffer: ptr Buffer): bool {.importc: "generate_rln_proof".} +proc generate_proof*( + ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr Buffer +): bool {.importc: "generate_rln_proof".} + ## rln-v2 ## input_buffer has to be serialized as [ identity_secret<32> | identity_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal ] ## output_buffer holds the proof data and should be parsed as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ] @@ -112,9 +133,10 @@ proc generate_proof*(ctx: ptr RLN, ## integers wrapped in <> indicate value sizes in bytes ## the return bool value indicates the success or failure of the operation -proc verify*(ctx: ptr RLN, - proof_buffer: ptr Buffer, - proof_is_valid_ptr: ptr bool): bool {.importc: "verify_rln_proof".} +proc verify*( + ctx: ptr RLN, proof_buffer: ptr Buffer, proof_is_valid_ptr: ptr bool +): bool {.importc: "verify_rln_proof".} + ## rln-v2 ## proof_buffer has to be serialized as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> | signal_len<8> | signal ] ## rln-v1 @@ -122,10 +144,13 @@ proc verify*(ctx: ptr RLN, ## the return bool value indicates the success or failure of the call to the verify function ## the verification of the zk proof is available in proof_is_valid_ptr, where a value of true indicates success and false a failure -proc verify_with_roots*(ctx: ptr RLN, - proof_buffer: ptr Buffer, - roots_buffer: ptr Buffer, - proof_is_valid_ptr: ptr bool): bool {.importc: "verify_with_roots".} +proc verify_with_roots*( + ctx: ptr RLN, + proof_buffer: ptr Buffer, + roots_buffer: ptr Buffer, + proof_is_valid_ptr: ptr bool, +): bool {.importc: "verify_with_roots".} + ## rln-v2 ## proof_buffer has to be serialized as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> | signal_len<8> | signal ] ## rln-v1 @@ -134,9 +159,10 @@ proc verify_with_roots*(ctx: ptr RLN, ## the return bool value indicates the success or failure of the call to the verify function ## the verification of the zk proof is available in proof_is_valid_ptr, where a value of true indicates success and false a failure -proc zk_prove*(ctx: ptr RLN, - input_buffer: ptr Buffer, - output_buffer: ptr Buffer): bool {.importc: "prove".} +proc zk_prove*( + ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr Buffer +): bool {.importc: "prove".} + ## Computes the zkSNARK proof and stores it in output_buffer for input values stored in input_buffer ## rln-v2 ## input_buffer is serialized as input_data as [ identity_secret<32> | user_message_limit<32> | message_id<32> | path_elements> | identity_path_index> | x<32> | external_nullifier<32> ] @@ -148,9 +174,10 @@ proc zk_prove*(ctx: ptr RLN, ## epoch is the input epoch (equivalently, the nullifier) ## the return bool value indicates the success or failure of the operation -proc zk_verify*(ctx: ptr RLN, - proof_buffer: ptr Buffer, - proof_is_valid_ptr: ptr bool): bool {.importc: "verify".} +proc zk_verify*( + ctx: ptr RLN, proof_buffer: ptr Buffer, proof_is_valid_ptr: ptr bool +): bool {.importc: "verify".} + ## Verifies the zkSNARK proof passed in proof_buffer ## input_buffer is serialized as input_data as [ proof<128> ] ## the verification of the zk proof is available in proof_is_valid_ptr, where a value of true indicates success and false a failure @@ -159,14 +186,24 @@ proc zk_verify*(ctx: ptr RLN, #---------------------------------------------------------------------------------------------- #-------------------------------- Common procedures ------------------------------------------- -proc new_circuit*(tree_height: uint, input_buffer: ptr Buffer, ctx: ptr (ptr RLN)): bool {.importc: "new".} +proc new_circuit*( + tree_height: uint, input_buffer: ptr Buffer, ctx: ptr (ptr RLN) +): bool {.importc: "new".} + ## creates an instance of rln object as defined by the zerokit RLN lib ## tree_height represent the depth of the Merkle tree ## input_buffer contains a serialization of the path where the circuit resources can be found (.r1cs, .wasm, .zkey and optionally the verification_key.json) ## ctx holds the final created rln object ## the return bool value indicates the success or failure of the operation -proc new_circuit_from_data*(tree_height: uint, circom_buffer: ptr Buffer, zkey_buffer: ptr Buffer, vk_buffer: ptr Buffer, ctx: ptr (ptr RLN)): bool {.importc: "new_with_params".} +proc new_circuit_from_data*( + tree_height: uint, + circom_buffer: ptr Buffer, + zkey_buffer: ptr Buffer, + vk_buffer: ptr Buffer, + ctx: ptr (ptr RLN), +): bool {.importc: "new_with_params".} + ## creates an instance of rln object as defined by the zerokit RLN lib by passing the required inputs as byte arrays ## tree_height represent the depth of the Merkle tree ## circom_buffer contains the bytes read from the Circom .wasm circuit @@ -177,16 +214,20 @@ proc new_circuit_from_data*(tree_height: uint, circom_buffer: ptr Buffer, zkey_b #-------------------------------- Hashing utils ------------------------------------------- -proc sha256*(input_buffer: ptr Buffer, - output_buffer: ptr Buffer): bool {.importc: "hash".} +proc sha256*( + input_buffer: ptr Buffer, output_buffer: ptr Buffer +): bool {.importc: "hash".} + ## it hashes (sha256) the plain text supplied in inputs_buffer and then maps it to a field element ## this proc is used to map arbitrary signals to field element for the sake of proof generation ## inputs_buffer holds the hash input as a byte seq ## the hash output is generated and populated inside output_buffer ## the output_buffer contains 32 bytes hash output -proc poseidon*(input_buffer: ptr Buffer, - output_buffer: ptr Buffer): bool {.importc: "poseidon_hash".} +proc poseidon*( + input_buffer: ptr Buffer, output_buffer: ptr Buffer +): bool {.importc: "poseidon_hash".} + ## it hashes (poseidon) the plain text supplied in inputs_buffer ## this proc is used to compute the identity secret hash, and external nullifier ## inputs_buffer holds the hash input as a byte seq @@ -195,13 +236,19 @@ proc poseidon*(input_buffer: ptr Buffer, #-------------------------------- Persistent Metadata utils ------------------------------------------- -proc set_metadata*(ctx: ptr RLN, input_buffer: ptr Buffer): bool {.importc: "set_metadata".} +proc set_metadata*( + ctx: ptr RLN, input_buffer: ptr Buffer +): bool {.importc: "set_metadata".} + ## sets the metadata stored by ctx to the value passed by input_buffer ## the input_buffer holds a serialized representation of the metadata (format to be defined) ## input_buffer holds the metadata as a byte seq ## the return bool value indicates the success or failure of the operation -proc get_metadata*(ctx: ptr RLN, output_buffer: ptr Buffer): bool {.importc: "get_metadata".} +proc get_metadata*( + ctx: ptr RLN, output_buffer: ptr Buffer +): bool {.importc: "get_metadata".} + ## gets the metadata stored by ctx and populates the passed pointer output_buffer with it ## the output_buffer holds the metadata as a byte seq ## the return bool value indicates the success or failure of the operation @@ -209,4 +256,4 @@ proc get_metadata*(ctx: ptr RLN, output_buffer: ptr Buffer): bool {.importc: "ge proc flush*(ctx: ptr RLN): bool {.importc: "flush".} ## flushes the write buffer to the database ## the return bool value indicates the success or failure of the operation -## This allows more robust and graceful handling of the database connection \ No newline at end of file +## This allows more robust and graceful handling of the database connection diff --git a/waku/waku_rln_relay/rln/wrappers.nim b/waku/waku_rln_relay/rln/wrappers.nim index ae83db052..3f7c87ed4 100644 --- a/waku/waku_rln_relay/rln/wrappers.nim +++ b/waku/waku_rln_relay/rln/wrappers.nim @@ -1,5 +1,4 @@ -import - std/json +import std/json import chronicles, options, @@ -8,14 +7,8 @@ import std/[sequtils, strformat, strutils, tables], nimcrypto/utils -import - ./rln_interface, - ../conversion_utils, - ../protocol_types, - ../protocol_metrics -import - ../../waku_core, - ../../waku_keystore +import ./rln_interface, ../conversion_utils, ../protocol_types, ../protocol_metrics +import ../../waku_core, ../../waku_keystore logScope: topics = "waku rln_relay ffi" @@ -31,13 +24,13 @@ proc membershipKeyGen*(ctxPtr: ptr RLN): RlnRelayResult[IdentityCredential] = done = key_gen(ctxPtr, keysBufferPtr) # check whether the keys are generated successfully - if(done == false): + if (done == false): return err("error in key generation") - if (keysBuffer.len != 4*32): + if (keysBuffer.len != 4 * 32): return err("keysBuffer is of invalid length") - var generatedKeys = cast[ptr array[4*32, byte]](keysBufferPtr.`ptr`)[] + var generatedKeys = cast[ptr array[4 * 32, byte]](keysBufferPtr.`ptr`)[] # the public and secret keys together are 64 bytes # TODO define a separate proc to decode the generated keys to the secret and public components @@ -46,13 +39,21 @@ proc membershipKeyGen*(ctxPtr: ptr RLN): RlnRelayResult[IdentityCredential] = idNullifier: array[32, byte] idSecretHash: array[32, byte] idCommitment: array[32, byte] - for (i, x) in idTrapdoor.mpairs: x = generatedKeys[i+0*32] - for (i, x) in idNullifier.mpairs: x = generatedKeys[i+1*32] - for (i, x) in idSecretHash.mpairs: x = generatedKeys[i+2*32] - for (i, x) in idCommitment.mpairs: x = generatedKeys[i+3*32] + for (i, x) in idTrapdoor.mpairs: + x = generatedKeys[i + 0 * 32] + for (i, x) in idNullifier.mpairs: + x = generatedKeys[i + 1 * 32] + for (i, x) in idSecretHash.mpairs: + x = generatedKeys[i + 2 * 32] + for (i, x) in idCommitment.mpairs: + x = generatedKeys[i + 3 * 32] - var - identityCredential = IdentityCredential(idTrapdoor: @idTrapdoor, idNullifier: @idNullifier, idSecretHash: @idSecretHash, idCommitment: @idCommitment) + var identityCredential = IdentityCredential( + idTrapdoor: @idTrapdoor, + idNullifier: @idNullifier, + idSecretHash: @idSecretHash, + idCommitment: @idCommitment, + ) return ok(identityCredential) @@ -70,22 +71,25 @@ type RlnConfig = ref object of RootObj proc `%`(c: RlnConfig): JsonNode = ## wrapper around the generic JObject constructor. ## We don't need to have a separate proc for the tree_config field - let tree_config = %{ "cache_capacity": %c.tree_config.cache_capacity, - "mode": %c.tree_config.mode, - "compression": %c.tree_config.compression, - "flush_every_ms": %c.tree_config.flush_every_ms, - "path": %c.tree_config.path } - return %[("resources_folder", %c.resources_folder), - ("tree_config", %tree_config)] + let tree_config = + %{ + "cache_capacity": %c.tree_config.cache_capacity, + "mode": %c.tree_config.mode, + "compression": %c.tree_config.compression, + "flush_every_ms": %c.tree_config.flush_every_ms, + "path": %c.tree_config.path, + } + return %[("resources_folder", %c.resources_folder), ("tree_config", %tree_config)] -proc createRLNInstanceLocal(d = MerkleTreeDepth, - tree_path = DefaultRlnTreePath): RLNResult = +proc createRLNInstanceLocal( + d = MerkleTreeDepth, tree_path = DefaultRlnTreePath +): RLNResult = ## generates an instance of RLN ## An RLN instance supports both zkSNARKs logics and Merkle tree data structure and operations ## d indicates the depth of Merkle tree ## tree_path indicates the path of the Merkle tree ## Returns an error if the instance creation fails - + let rln_config = RlnConfig( resources_folder: "tree_height_" & $d & "/", tree_config: RlnTreeConfig( @@ -93,8 +97,8 @@ proc createRLNInstanceLocal(d = MerkleTreeDepth, mode: "high_throughput", compression: false, flush_every_ms: 500, - path: if tree_path != "": tree_path else: DefaultRlnTreePath - ) + path: if tree_path != "": tree_path else: DefaultRlnTreePath, + ), ) var serialized_rln_config = $(%rln_config) @@ -102,7 +106,8 @@ proc createRLNInstanceLocal(d = MerkleTreeDepth, var rlnInstance: ptr RLN merkleDepth: csize_t = uint(d) - configBuffer = serialized_rln_config.toOpenArrayByte(0, serialized_rln_config.high).toBuffer() + configBuffer = + serialized_rln_config.toOpenArrayByte(0, serialized_rln_config.high).toBuffer() # create an instance of RLN let res = new_circuit(merkleDepth, addr configBuffer, addr rlnInstance) @@ -112,8 +117,9 @@ proc createRLNInstanceLocal(d = MerkleTreeDepth, return err("error in parameters generation") return ok(rlnInstance) -proc createRLNInstance*(d = MerkleTreeDepth, - tree_path = DefaultRlnTreePath): RLNResult = +proc createRLNInstance*( + d = MerkleTreeDepth, tree_path = DefaultRlnTreePath +): RLNResult = ## Wraps the rln instance creation for metrics ## Returns an error if the instance creation fails var res: RLNResult @@ -129,15 +135,13 @@ proc sha256*(data: openArray[byte]): RlnRelayResult[MerkleNode] = outputBuffer: Buffer # will holds the hash output trace "sha256 hash input buffer length", bufflen = hashInputBuffer.len - let - hashSuccess = sha256(addr hashInputBuffer, addr outputBuffer) + let hashSuccess = sha256(addr hashInputBuffer, addr outputBuffer) # check whether the hash call is done successfully if not hashSuccess: return err("error in sha256 hash") - let - output = cast[ptr MerkleNode](outputBuffer.`ptr`)[] + let output = cast[ptr MerkleNode](outputBuffer.`ptr`)[] return ok(output) @@ -148,15 +152,13 @@ proc poseidon*(data: seq[seq[byte]]): RlnRelayResult[array[32, byte]] = hashInputBuffer = inputBytes.toBuffer() outputBuffer: Buffer # will holds the hash output - let - hashSuccess = poseidon(addr hashInputBuffer, addr outputBuffer) + let hashSuccess = poseidon(addr hashInputBuffer, addr outputBuffer) # check whether the hash call is done successfully if not hashSuccess: return err("error in poseidon hash") - let - output = cast[ptr array[32, byte]](outputBuffer.`ptr`)[] + let output = cast[ptr array[32, byte]](outputBuffer.`ptr`)[] return ok(output) @@ -165,13 +167,17 @@ when defined(rln_v2): let idCommitment = rateCommitment.idCommitment var userMessageLimit: array[32, byte] try: - discard userMessageLimit.copyFrom(toBytes(rateCommitment.userMessageLimit, Endianness.littleEndian)) + discard userMessageLimit.copyFrom( + toBytes(rateCommitment.userMessageLimit, Endianness.littleEndian) + ) except CatchableError: - return err("could not convert the user message limit to bytes: " & getCurrentExceptionMsg()) + return err( + "could not convert the user message limit to bytes: " & getCurrentExceptionMsg() + ) let leaf = poseidon(@[@idCommitment, @userMessageLimit]).valueOr: return err("could not convert the rate commitment to a leaf") var retLeaf = newSeq[byte](leaf.len) - for i in 0..|merkle_node[len]| - + var roots = newSeq[MerkleNode]() - let len = uint64.fromBytes(merkleNodeByteSeq[0..7], Endianness.littleEndian) + let len = uint64.fromBytes(merkleNodeByteSeq[0 .. 7], Endianness.littleEndian) trace "length of valid roots", len - for i in 0'u64.. rlnPeer.rlnMaxEpochGap: # message's epoch is too old or too ahead # accept messages whose epoch is within +-MaxEpochGap from the current epoch - warn "invalid message: epoch gap exceeds a threshold", gap = gap, - payloadLen = msg.payload.len, msgEpoch = fromEpoch(proof.epoch) - waku_rln_invalid_messages_total.inc(labelValues=["invalid_epoch"]) + warn "invalid message: epoch gap exceeds a threshold", + gap = gap, payloadLen = msg.payload.len, msgEpoch = fromEpoch(proof.epoch) + waku_rln_invalid_messages_total.inc(labelValues = ["invalid_epoch"]) return MessageValidationResult.Invalid let rootValidationRes = rlnPeer.groupManager.validateRoot(proof.merkleRoot) if not rootValidationRes: - warn "invalid message: provided root does not belong to acceptable window of roots", provided=proof.merkleRoot.inHex(), validRoots=rlnPeer.groupManager.validRoots.mapIt(it.inHex()) - waku_rln_invalid_messages_total.inc(labelValues=["invalid_root"]) + warn "invalid message: provided root does not belong to acceptable window of roots", + provided = proof.merkleRoot.inHex(), + validRoots = rlnPeer.groupManager.validRoots.mapIt(it.inHex()) + waku_rln_invalid_messages_total.inc(labelValues = ["invalid_root"]) return MessageValidationResult.Invalid # verify the proof @@ -226,23 +240,23 @@ proc validateMessage*(rlnPeer: WakuRLNRelay, let proofVerificationRes = rlnPeer.groupManager.verifyProof(input, proof) if proofVerificationRes.isErr(): - waku_rln_errors_total.inc(labelValues=["proof_verification"]) + waku_rln_errors_total.inc(labelValues = ["proof_verification"]) warn "invalid message: proof verification failed", payloadLen = msg.payload.len return MessageValidationResult.Invalid if not proofVerificationRes.value(): # invalid proof warn "invalid message: invalid proof", payloadLen = msg.payload.len - waku_rln_invalid_messages_total.inc(labelValues=["invalid_proof"]) + waku_rln_invalid_messages_total.inc(labelValues = ["invalid_proof"]) return MessageValidationResult.Invalid # check if double messaging has happened let proofMetadataRes = proof.extractMetadata() if proofMetadataRes.isErr(): - waku_rln_errors_total.inc(labelValues=["proof_metadata_extraction"]) + waku_rln_errors_total.inc(labelValues = ["proof_metadata_extraction"]) return MessageValidationResult.Invalid let hasDup = rlnPeer.hasDuplicate(msgEpoch, proofMetadataRes.get()) if hasDup.isErr(): - waku_rln_errors_total.inc(labelValues=["duplicate_check"]) + waku_rln_errors_total.inc(labelValues = ["duplicate_check"]) elif hasDup.value == true: trace "invalid message: message is spam", payloadLen = msg.payload.len waku_rln_spam_messages_total.inc() @@ -254,9 +268,8 @@ proc validateMessage*(rlnPeer: WakuRLNRelay, return MessageValidationResult.Valid proc validateMessageAndUpdateLog*( - rlnPeer: WakuRLNRelay, - msg: WakuMessage, - timeOption = none(float64)): MessageValidationResult = + rlnPeer: WakuRLNRelay, msg: WakuMessage, timeOption = none(float64) +): MessageValidationResult = ## validates the message and updates the log to prevent double messaging ## in future messages @@ -285,9 +298,9 @@ proc toRLNSignal*(wakumessage: WakuMessage): seq[byte] = output = concat(wakumessage.payload, contentTopicBytes) return output -proc appendRLNProof*(rlnPeer: WakuRLNRelay, - msg: var WakuMessage, - senderEpochTime: float64): RlnRelayResult[void] = +proc appendRLNProof*( + rlnPeer: WakuRLNRelay, msg: var WakuMessage, senderEpochTime: float64 +): RlnRelayResult[void] = ## returns true if it can create and append a `RateLimitProof` to the supplied `msg` ## returns false otherwise ## `senderEpochTime` indicates the number of seconds passed since Unix epoch. The fractional part holds sub-seconds. @@ -316,27 +329,30 @@ proc clearNullifierLog(rlnPeer: WakuRlnRelay) = return trace "clearing epochs from the nullifier log", count = rlnPeer.rlnMaxEpochGap - let epochsToClear = rlnPeer.nullifierLog.keys().toSeq()[0.. 0: message.timestamp - else: getNanosecondTime(getTime().toUnixFloat()) + receivedTime = + if message.timestamp > 0: + message.timestamp + else: + getNanosecondTime(getTime().toUnixFloat()) store.put(pubsubTopic, message, digest, messageHash, receivedTime) - proc resume*(w: WakuStoreClient, - peerList = none(seq[RemotePeerInfo]), - pageSize = DefaultPageSize, - pubsubTopic = DefaultPubsubTopic): Future[WakuStoreResult[uint64]] {.async, gcsafe.} = + proc resume*( + w: WakuStoreClient, + peerList = none(seq[RemotePeerInfo]), + pageSize = DefaultPageSize, + pubsubTopic = DefaultPubsubTopic, + ): Future[WakuStoreResult[uint64]] {.async, gcsafe.} = ## resume proc retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku store node has been online ## messages are stored in the store node's messages field and in the message db ## the offline time window is measured as the difference between the current time and the timestamp of the most recent persisted waku message @@ -188,7 +193,8 @@ when defined(waku_exp_store_resume): lastSeenTime = w.store.getNewestMessageTimestamp().get(Timestamp(0)) now = getNanosecondTime(getTime().toUnixFloat()) - debug "resuming with offline time window", lastSeenTime=lastSeenTime, currentTime=now + debug "resuming with offline time window", + lastSeenTime = lastSeenTime, currentTime = now let queryEndTime = now + StoreResumeTimeWindowOffset @@ -199,14 +205,13 @@ when defined(waku_exp_store_resume): startTime: some(queryStartTime), endTime: some(queryEndTime), pageSize: uint64(pageSize), - direction: default() + direction: default(), ) var res: WakuStoreResult[seq[WakuMessage]] if peerList.isSome(): debug "trying the candidate list to fetch the history" res = await w.queryLoop(req, peerList.get()) - else: debug "no candidate list is provided, selecting a random peer" # if no peerList is set then query from one of the peers stored in the peer manager @@ -223,7 +228,6 @@ when defined(waku_exp_store_resume): debug "failed to resume the history" return err("failed to resume the history") - # Save the retrieved messages in the store var added: uint = 0 for msg in res.get(): diff --git a/waku/waku_store/common.nim b/waku/waku_store/common.nim index 6302e6478..19f4661c9 100644 --- a/waku/waku_store/common.nim +++ b/waku/waku_store/common.nim @@ -3,15 +3,8 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/[options,sequtils], - stew/results, - stew/byteutils, - nimcrypto/sha2 -import - ../waku_core, - ../common/paging - +import std/[options, sequtils], stew/results, stew/byteutils, nimcrypto/sha2 +import ../waku_core, ../common/paging const WakuStoreCodec* = "/vac/waku/store/2.0.0-beta4" @@ -20,10 +13,8 @@ const MaxPageSize*: uint64 = 100 - type WakuStoreResult*[T] = Result[T, string] - ## Waku message digest type MessageDigest* = MDigest[256] @@ -31,7 +22,8 @@ type MessageDigest* = MDigest[256] proc computeDigest*(msg: WakuMessage): MessageDigest = var ctx: sha256 ctx.init() - defer: ctx.clear() + defer: + ctx.clear() ctx.update(msg.contentTopic.toBytes()) ctx.update(msg.payload) @@ -39,7 +31,6 @@ proc computeDigest*(msg: WakuMessage): MessageDigest = # Computes the hash return ctx.finish() - ## Public API types type @@ -80,16 +71,15 @@ type HistoryResult* = Result[HistoryResponse, HistoryError] - proc parse*(T: type HistoryErrorKind, kind: uint32): T = - case kind: + case kind of 000, 200, 300, 400, 503: HistoryErrorKind(kind) else: HistoryErrorKind.UNKNOWN proc `$`*(err: HistoryError): string = - case err.kind: + case err.kind of HistoryErrorKind.PEER_DIAL_FAILURE: "PEER_DIAL_FAILURE: " & err.address of HistoryErrorKind.BAD_RESPONSE: @@ -103,15 +93,14 @@ proc `$`*(err: HistoryError): string = proc checkHistCursor*(self: HistoryCursor): Result[void, HistoryError] = if self.pubsubTopic.len == 0: - return err(HistoryError(kind: BAD_REQUEST, - cause: "empty pubsubTopic")) + return err(HistoryError(kind: BAD_REQUEST, cause: "empty pubsubTopic")) if self.senderTime == 0: - return err(HistoryError(kind: BAD_REQUEST, - cause: "invalid senderTime")) + return err(HistoryError(kind: BAD_REQUEST, cause: "invalid senderTime")) if self.storeTime == 0: - return err(HistoryError(kind: BAD_REQUEST, - cause: "invalid storeTime")) - if self.digest.data.all(proc (x: byte): bool = x == 0): - return err(HistoryError(kind: BAD_REQUEST, - cause: "empty digest")) + return err(HistoryError(kind: BAD_REQUEST, cause: "invalid storeTime")) + if self.digest.data.all( + proc(x: byte): bool = + x == 0 + ): + return err(HistoryError(kind: BAD_REQUEST, cause: "empty digest")) return ok() diff --git a/waku/waku_store/protocol.nim b/waku/waku_store/protocol.nim index 4e085e3c8..be88b0ac2 100644 --- a/waku/waku_store/protocol.nim +++ b/waku/waku_store/protocol.nim @@ -18,49 +18,39 @@ import libp2p/stream/connection, metrics import - ../waku_core, - ../node/peer_manager, - ./common, - ./rpc, - ./rpc_codec, - ./protocol_metrics - + ../waku_core, ../node/peer_manager, ./common, ./rpc, ./rpc_codec, ./protocol_metrics logScope: topics = "waku store" +const MaxMessageTimestampVariance* = getNanoSecondTime(20) + # 20 seconds maximum allowable sender timestamp "drift" -const - MaxMessageTimestampVariance* = getNanoSecondTime(20) # 20 seconds maximum allowable sender timestamp "drift" +type HistoryQueryHandler* = + proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} - -type HistoryQueryHandler* = proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} - -type - WakuStore* = ref object of LPProtocol - peerManager: PeerManager - rng: ref rand.HmacDrbgContext - queryHandler*: HistoryQueryHandler +type WakuStore* = ref object of LPProtocol + peerManager: PeerManager + rng: ref rand.HmacDrbgContext + queryHandler*: HistoryQueryHandler ## Protocol proc initProtocolHandler(ws: WakuStore) = - proc handler(conn: Connection, proto: string) {.async.} = let buf = await conn.readLp(MaxRpcSize.int) let decodeRes = HistoryRPC.decode(buf) if decodeRes.isErr(): - error "failed to decode rpc", peerId= $conn.peerId + error "failed to decode rpc", peerId = $conn.peerId waku_store_errors.inc(labelValues = [decodeRpcFailure]) # TODO: Return (BAD_REQUEST, cause: "decode rpc failed") return - let reqRpc = decodeRes.value if reqRpc.query.isNone(): - error "empty query rpc", peerId= $conn.peerId, requestId=reqRpc.requestId + error "empty query rpc", peerId = $conn.peerId, requestId = reqRpc.requestId waku_store_errors.inc(labelValues = [emptyRpcQueryFailure]) # TODO: Return (BAD_REQUEST, cause: "empty query") return @@ -69,14 +59,16 @@ proc initProtocolHandler(ws: WakuStore) = requestId = reqRpc.requestId request = reqRpc.query.get().toAPI() - info "received history query", peerId=conn.peerId, requestId=requestId, query=request + info "received history query", + peerId = conn.peerId, requestId = requestId, query = request waku_store_queries.inc() var responseRes: HistoryResult try: responseRes = await ws.queryHandler(request) except Exception: - error "history query failed", peerId= $conn.peerId, requestId=requestId, error=getCurrentExceptionMsg() + error "history query failed", + peerId = $conn.peerId, requestId = requestId, error = getCurrentExceptionMsg() let error = HistoryError(kind: HistoryErrorKind.UNKNOWN).toRPC() let response = HistoryResponseRPC(error: error) @@ -85,17 +77,18 @@ proc initProtocolHandler(ws: WakuStore) = return if responseRes.isErr(): - error "history query failed", peerId= $conn.peerId, requestId=requestId, error=responseRes.error + error "history query failed", + peerId = $conn.peerId, requestId = requestId, error = responseRes.error let response = responseRes.toRPC() let rpc = HistoryRPC(requestId: requestId, response: some(response)) await conn.writeLp(rpc.encode().buffer) return - let response = responseRes.toRPC() - info "sending history response", peerId=conn.peerId, requestId=requestId, messages=response.messages.len + info "sending history response", + peerId = conn.peerId, requestId = requestId, messages = response.messages.len let rpc = HistoryRPC(requestId: requestId, response: some(response)) await conn.writeLp(rpc.encode().buffer) @@ -103,20 +96,16 @@ proc initProtocolHandler(ws: WakuStore) = ws.handler = handler ws.codec = WakuStoreCodec - -proc new*(T: type WakuStore, - peerManager: PeerManager, - rng: ref rand.HmacDrbgContext, - queryHandler: HistoryQueryHandler): T = - +proc new*( + T: type WakuStore, + peerManager: PeerManager, + rng: ref rand.HmacDrbgContext, + queryHandler: HistoryQueryHandler, +): T = # Raise a defect if history query handler is nil if queryHandler.isNil(): raise newException(NilAccessDefect, "history query handler is nil") - let ws = WakuStore( - rng: rng, - peerManager: peerManager, - queryHandler: queryHandler - ) + let ws = WakuStore(rng: rng, peerManager: peerManager, queryHandler: queryHandler) ws.initProtocolHandler() ws diff --git a/waku/waku_store/protocol_metrics.nim b/waku/waku_store/protocol_metrics.nim index 4f36955e3..1566841f9 100644 --- a/waku/waku_store/protocol_metrics.nim +++ b/waku/waku_store/protocol_metrics.nim @@ -5,11 +5,9 @@ else: import metrics - declarePublicGauge waku_store_errors, "number of store protocol errors", ["type"] declarePublicGauge waku_store_queries, "number of store queries received" - # Error types (metric label values) const dialFailure* = "dial_failure" diff --git a/waku/waku_store/rpc.nim b/waku/waku_store/rpc.nim index dadd9140d..22e0d55ec 100644 --- a/waku/waku_store/rpc.nim +++ b/waku/waku_store/rpc.nim @@ -3,14 +3,8 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/[options, sequtils], - stew/results -import - ../waku_core, - ../common/paging, - ./common - +import std/[options, sequtils], stew/results +import ../waku_core, ../common/paging, ./common ## Wire protocol @@ -25,11 +19,15 @@ type PagingIndexRPC* = object proc `==`*(x, y: PagingIndexRPC): bool = ## receiverTime plays no role in index equality - (x.senderTime == y.senderTime) and - (x.digest == y.digest) and - (x.pubsubTopic == y.pubsubTopic) + (x.senderTime == y.senderTime) and (x.digest == y.digest) and + (x.pubsubTopic == y.pubsubTopic) -proc compute*(T: type PagingIndexRPC, msg: WakuMessage, receivedTime: Timestamp, pubsubTopic: PubsubTopic): T = +proc compute*( + T: type PagingIndexRPC, + msg: WakuMessage, + receivedTime: Timestamp, + pubsubTopic: PubsubTopic, +): T = ## Takes a WakuMessage with received timestamp and returns its Index. let digest = computeDigest(msg) @@ -39,17 +37,14 @@ proc compute*(T: type PagingIndexRPC, msg: WakuMessage, receivedTime: Timestamp, pubsubTopic: pubsubTopic, senderTime: senderTime, receiverTime: receivedTime, - digest: digest + digest: digest, ) - -type - PagingInfoRPC* = object - ## This type holds the information needed for the pagination - pageSize*: Option[uint64] - cursor*: Option[PagingIndexRPC] - direction*: Option[PagingDirection] - +type PagingInfoRPC* = object + ## This type holds the information needed for the pagination + pageSize*: Option[uint64] + cursor*: Option[PagingIndexRPC] + direction*: Option[PagingDirection] type HistoryContentFilterRPC* = object @@ -79,24 +74,22 @@ type query*: Option[HistoryQueryRPC] response*: Option[HistoryResponseRPC] - proc parse*(T: type HistoryResponseErrorRPC, kind: uint32): T = - case kind: + case kind of 0, 1, 503: HistoryResponseErrorRPC(kind) else: # TODO: Improve error variants/move to satus codes HistoryResponseErrorRPC.INVALID_CURSOR - ## Wire protocol type mappings -proc toRPC*(cursor: HistoryCursor): PagingIndexRPC {.gcsafe.}= +proc toRPC*(cursor: HistoryCursor): PagingIndexRPC {.gcsafe.} = PagingIndexRPC( pubsubTopic: cursor.pubsubTopic, senderTime: cursor.senderTime, receiverTime: cursor.storeTime, - digest: cursor.digest + digest: cursor.digest, ) proc toAPI*(rpc: PagingIndexRPC): HistoryCursor = @@ -104,58 +97,61 @@ proc toAPI*(rpc: PagingIndexRPC): HistoryCursor = pubsubTopic: rpc.pubsubTopic, senderTime: rpc.senderTime, storeTime: rpc.receiverTime, - digest: rpc.digest + digest: rpc.digest, ) - proc toRPC*(query: HistoryQuery): HistoryQueryRPC = var rpc = HistoryQueryRPC() - rpc.contentFilters = query.contentTopics.mapIt(HistoryContentFilterRPC(contentTopic: it)) + rpc.contentFilters = + query.contentTopics.mapIt(HistoryContentFilterRPC(contentTopic: it)) rpc.pubsubTopic = query.pubsubTopic rpc.pagingInfo = block: - if query.cursor.isNone() and - query.pageSize == default(type query.pageSize) and - query.direction == HistoryQueryDirectionDefaultValue: - none(PagingInfoRPC) - else: - let - pageSize = some(query.pageSize) - cursor = query.cursor.map(toRPC) - direction = some(query.direction) + if query.cursor.isNone() and query.pageSize == default(type query.pageSize) and + query.direction == HistoryQueryDirectionDefaultValue: + none(PagingInfoRPC) + else: + let + pageSize = some(query.pageSize) + cursor = query.cursor.map(toRPC) + direction = some(query.direction) - some(PagingInfoRPC( - pageSize: pageSize, - cursor: cursor, - direction: direction - )) + some(PagingInfoRPC(pageSize: pageSize, cursor: cursor, direction: direction)) rpc.startTime = query.startTime rpc.endTime = query.endTime rpc - proc toAPI*(rpc: HistoryQueryRPC): HistoryQuery = let pubsubTopic = rpc.pubsubTopic contentTopics = rpc.contentFilters.mapIt(it.contentTopic) - cursor = if rpc.pagingInfo.isNone() or rpc.pagingInfo.get().cursor.isNone(): none(HistoryCursor) - else: rpc.pagingInfo.get().cursor.map(toAPI) + cursor = + if rpc.pagingInfo.isNone() or rpc.pagingInfo.get().cursor.isNone(): + none(HistoryCursor) + else: + rpc.pagingInfo.get().cursor.map(toAPI) startTime = rpc.startTime endTime = rpc.endTime - pageSize = if rpc.pagingInfo.isNone() or rpc.pagingInfo.get().pageSize.isNone(): 0'u64 - else: rpc.pagingInfo.get().pageSize.get() + pageSize = + if rpc.pagingInfo.isNone() or rpc.pagingInfo.get().pageSize.isNone(): + 0'u64 + else: + rpc.pagingInfo.get().pageSize.get() - direction = if rpc.pagingInfo.isNone() or rpc.pagingInfo.get().direction.isNone(): HistoryQueryDirectionDefaultValue - else: rpc.pagingInfo.get().direction.get() + direction = + if rpc.pagingInfo.isNone() or rpc.pagingInfo.get().direction.isNone(): + HistoryQueryDirectionDefaultValue + else: + rpc.pagingInfo.get().direction.get() HistoryQuery( pubsubTopic: pubsubTopic, @@ -164,13 +160,12 @@ proc toAPI*(rpc: HistoryQueryRPC): HistoryQuery = startTime: startTime, endTime: endTime, pageSize: pageSize, - direction: direction + direction: direction, ) - proc toRPC*(err: HistoryError): HistoryResponseErrorRPC = # TODO: Better error mappings/move to error codes - case err.kind: + case err.kind of HistoryErrorKind.BAD_REQUEST: # TODO: Respond aksi with the reason HistoryResponseErrorRPC.INVALID_CURSOR @@ -181,7 +176,7 @@ proc toRPC*(err: HistoryError): HistoryResponseErrorRPC = proc toAPI*(err: HistoryResponseErrorRPC): HistoryError = # TODO: Better error mappings/move to error codes - case err: + case err of HistoryResponseErrorRPC.INVALID_CURSOR: HistoryError(kind: HistoryErrorKind.BAD_REQUEST, cause: "invalid cursor") of HistoryResponseErrorRPC.SERVICE_UNAVAILABLE: @@ -189,7 +184,6 @@ proc toAPI*(err: HistoryResponseErrorRPC): HistoryError = else: HistoryError(kind: HistoryErrorKind.UNKNOWN) - proc toRPC*(res: HistoryResult): HistoryResponseRPC = if res.isErr(): let error = res.error.toRPC() @@ -209,11 +203,7 @@ proc toRPC*(res: HistoryResult): HistoryResponseRPC = error = HistoryResponseErrorRPC.NONE - HistoryResponseRPC( - messages: messages, - pagingInfo: pagingInfo, - error: error - ) + HistoryResponseRPC(messages: messages, pagingInfo: pagingInfo, error: error) proc toAPI*(rpc: HistoryResponseRPC): HistoryResult = if rpc.error != HistoryResponseErrorRPC.NONE: @@ -222,10 +212,10 @@ proc toAPI*(rpc: HistoryResponseRPC): HistoryResult = let messages = rpc.messages - cursor = if rpc.pagingInfo.isNone(): none(HistoryCursor) - else: rpc.pagingInfo.get().cursor.map(toAPI) + cursor = + if rpc.pagingInfo.isNone(): + none(HistoryCursor) + else: + rpc.pagingInfo.get().cursor.map(toAPI) - ok(HistoryResponse( - messages: messages, - cursor: cursor - )) + ok(HistoryResponse(messages: messages, cursor: cursor)) diff --git a/waku/waku_store/rpc_codec.nim b/waku/waku_store/rpc_codec.nim index 5d44b3ba9..9d0e281a4 100644 --- a/waku/waku_store/rpc_codec.nim +++ b/waku/waku_store/rpc_codec.nim @@ -3,18 +3,11 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import - std/options, - nimcrypto/hash -import - ../common/[protobuf, paging], - ../waku_core, - ./common, - ./rpc - - -const MaxRpcSize* = MaxPageSize * MaxWakuMessageSize + 64*1024 # We add a 64kB safety buffer for protocol overhead +import std/options, nimcrypto/hash +import ../common/[protobuf, paging], ../waku_core, ./common, ./rpc +const MaxRpcSize* = MaxPageSize * MaxWakuMessageSize + 64 * 1024 + # We add a 64kB safety buffer for protocol overhead ## Pagination @@ -66,7 +59,6 @@ proc decode*(T: type PagingIndexRPC, buffer: seq[byte]): ProtobufResult[T] = ok(rpc) - proc encode*(rpc: PagingInfoRPC): ProtoBuffer = ## Encodes a PagingInfo object into a ProtoBuffer ## returns the resultant ProtoBuffer @@ -74,7 +66,13 @@ proc encode*(rpc: PagingInfoRPC): ProtoBuffer = pb.write3(1, rpc.pageSize) pb.write3(2, rpc.cursor.map(encode)) - pb.write3(3, rpc.direction.map(proc(d: PagingDirection): uint32 = uint32(ord(d)))) + pb.write3( + 3, + rpc.direction.map( + proc(d: PagingDirection): uint32 = + uint32(ord(d)) + ), + ) pb.finish3() pb @@ -105,7 +103,6 @@ proc decode*(T: type PagingInfoRPC, buffer: seq[byte]): ProtobufResult[T] = ok(rpc) - ## Wire protocol proc encode*(rpc: HistoryContentFilterRPC): ProtoBuffer = @@ -124,7 +121,6 @@ proc decode*(T: type HistoryContentFilterRPC, buffer: seq[byte]): ProtobufResult return err(ProtobufError.missingRequiredField("content_topic")) ok(HistoryContentFilterRPC(contentTopic: contentTopic)) - proc encode*(rpc: HistoryQueryRPC): ProtoBuffer = var pb = initProtoBuffer() pb.write3(2, rpc.pubsubTopic) @@ -133,8 +129,20 @@ proc encode*(rpc: HistoryQueryRPC): ProtoBuffer = pb.write3(3, filter.encode()) pb.write3(4, rpc.pagingInfo.map(encode)) - pb.write3(5, rpc.startTime.map(proc (time: int64): zint64 = zint64(time))) - pb.write3(6, rpc.endTime.map(proc (time: int64): zint64 = zint64(time))) + pb.write3( + 5, + rpc.startTime.map( + proc(time: int64): zint64 = + zint64(time) + ), + ) + pb.write3( + 6, + rpc.endTime.map( + proc(time: int64): zint64 = + zint64(time) + ), + ) pb.finish3() pb @@ -178,7 +186,6 @@ proc decode*(T: type HistoryQueryRPC, buffer: seq[byte]): ProtobufResult[T] = ok(rpc) - proc encode*(response: HistoryResponseRPC): ProtoBuffer = var pb = initProtoBuffer() @@ -218,7 +225,6 @@ proc decode*(T: type HistoryResponseRPC, buffer: seq[byte]): ProtobufResult[T] = ok(rpc) - proc encode*(rpc: HistoryRPC): ProtoBuffer = var pb = initProtoBuffer() diff --git a/waku/waku_store/self_req_handler.nim b/waku/waku_store/self_req_handler.nim index 9504109cc..183de1223 100644 --- a/waku/waku_store/self_req_handler.nim +++ b/waku/waku_store/self_req_handler.nim @@ -1,4 +1,3 @@ - ## ## This file is aimed to attend the requests that come directly ## from the 'self' node. It is expected to attend the store requests that @@ -14,16 +13,12 @@ ## stored by that local store node. ## -import - stew/results, - chronos, - chronicles -import - ./protocol, - ./common +import stew/results, chronos, chronicles +import ./protocol, ./common -proc handleSelfStoreRequest*(self: WakuStore, histQuery: HistoryQuery): - Future[WakuStoreResult[HistoryResponse]] {.async.} = +proc handleSelfStoreRequest*( + self: WakuStore, histQuery: HistoryQuery +): Future[WakuStoreResult[HistoryResponse]] {.async.} = ## Handles the store requests made by the node to itself. ## Normally used in REST-store requests @@ -32,7 +27,5 @@ proc handleSelfStoreRequest*(self: WakuStore, histQuery: HistoryQuery): return err("error in handleSelfStoreRequest: " & $error) return WakuStoreResult[HistoryResponse].ok(resp) - except Exception: return err("exception in handleSelfStoreRequest: " & getCurrentExceptionMsg()) -