diff --git a/waku/v2/node/waku_node.nim b/waku/v2/node/waku_node.nim index 042361fd9..cfb61b015 100644 --- a/waku/v2/node/waku_node.nim +++ b/waku/v2/node/waku_node.nim @@ -86,7 +86,8 @@ type wakuFilter*: WakuFilter wakuFilterClient*: WakuFilterClient wakuSwap*: WakuSwap - wakuRlnRelay*: WakuRLNRelay + when defined(rln): + wakuRlnRelay*: WakuRLNRelay wakuLightPush*: WakuLightPush wakuLightpushClient*: WakuLightPushClient wakuPeerExchange*: WakuPeerExchange diff --git a/waku/v2/protocol/waku_message.nim b/waku/v2/protocol/waku_message.nim index 6b0e7b9e3..e6e4c94c5 100644 --- a/waku/v2/protocol/waku_message.nim +++ b/waku/v2/protocol/waku_message.nim @@ -40,7 +40,8 @@ type WakuMessage* = object # the proof field indicates that the message is not a spam # this field will be used in the rln-relay protocol # XXX Experimental, this is part of https://rfc.vac.dev/spec/17/ spec and not yet part of WakuMessage spec - proof*: RateLimitProof + when defined(rln): + proof*: RateLimitProof # The ephemeral field indicates if the message should # be stored. bools and uints are # equivalent in serialization of the protobuf @@ -56,7 +57,8 @@ proc encode*(message: WakuMessage): ProtoBuffer = buf.write3(2, message.contentTopic) buf.write3(3, message.version) buf.write3(10, zint64(message.timestamp)) - buf.write3(21, message.proof.encode()) + when defined(rln): + buf.write3(21, message.proof.encode()) buf.write3(31, uint64(message.ephemeral)) buf.finish3() @@ -75,9 +77,10 @@ proc decode*(T: type WakuMessage, buffer: seq[byte]): ProtoResult[T] = msg.timestamp = Timestamp(timestamp) # XXX Experimental, this is part of https://rfc.vac.dev/spec/17/ spec - var proofBytes: seq[byte] - discard ?pb.getField(21, proofBytes) - msg.proof = ?RateLimitProof.init(proofBytes) + when defined(rln): + var proofBytes: seq[byte] + discard ?pb.getField(21, proofBytes) + msg.proof = ?RateLimitProof.init(proofBytes) var ephemeral: uint if ?pb.getField(31, ephemeral): diff --git a/waku/v2/protocol/waku_rln_relay/waku_rln_relay_constants.nim b/waku/v2/protocol/waku_rln_relay/waku_rln_relay_constants.nim index a2c92bc18..3f0c3fad4 100644 --- a/waku/v2/protocol/waku_rln_relay/waku_rln_relay_constants.nim +++ b/waku/v2/protocol/waku_rln_relay/waku_rln_relay_constants.nim @@ -1,246 +1,248 @@ import stint -# Acceptable roots for merkle root validation of incoming messages -const AcceptableRootWindowSize* = 5 +when defined(rln): -# RLN membership key and index files path -const - RlnCredentialsFilename* = "rlnCredentials.txt" - -# inputs of the membership contract constructor -# TODO may be able to make these constants private and put them inside the waku_rln_relay_utils -const - MembershipFee* = 1000000000000000.u256 - # the current implementation of the rln lib supports a circuit for Merkle tree with depth 20 - MerkleTreeDepth* = 20 - EthClient* = "ws://127.0.0.1:8540" + # Acceptable roots for merkle root validation of incoming messages + const AcceptableRootWindowSize* = 5 -const - # the size of poseidon hash output in bits - HashBitSize* = 256 - # the size of poseidon hash output as the number hex digits - HashHexSize* = int(HashBitSize/4) + # RLN membership key and index files path + const + RlnCredentialsFilename* = "rlnCredentials.txt" + + # inputs of the membership contract constructor + # TODO may be able to make these constants private and put them inside the waku_rln_relay_utils + const + MembershipFee* = 1000000000000000.u256 + # the current implementation of the rln lib supports a circuit for Merkle tree with depth 20 + MerkleTreeDepth* = 20 + EthClient* = "ws://127.0.0.1:8540" -const - # The relative folder where the circuit, proving and verification key for RLN can be found - # Note that resources has to be compiled with respect to the above MerkleTreeDepth - RlnResourceFolder* = "vendor/zerokit/rln/resources/tree_height_" & $MerkleTreeDepth & "/" - -# temporary variables to test waku-rln-relay performance in the static group mode -const - StaticGroupSize* = 100 - # StaticGroupKeys is a static list of 100 membership keys in the form of (identity key, identity commitment) - # keys are created locally, using createMembershipList proc from waku_rln_relay_utils module, and the results are hardcoded in here - # this list is temporary and is created to test the performance of waku-rln-relay for the static groups - # in the later versions, this static hardcoded group will be replaced with a dynamic one - -const - StaticGroupKeys* = @[("2c0198101a2828a0ac6c9e58fc131e1bd83326a4f748ef592588eeb8c3112dc1", - "1ca742a54641e2de14c5cb87ad707fd869693f682cedb901e47b8138918aecb3"), ( - "095177fd334629dbc29c5fc9e32addecfcb6a42f9673268810fa9f70d1a8191a", - "0e30810a60f53fcbd60b312d9d65ccefe4e9d0b0d2220d7350fdf881469e59eb"), ( - "131c6d4dbfb3feafc7fdd69aa62cd05d7b6be0cd7b7777ce9513ad742be71763", - "110ca13b7fa2ed72ccc4659a4fdaea3a2e28443617a29bbe977e92c83b531a15"), ( - "2f45043d0efabffadcc0db24d238a8bc4a30ce7394ee2a70b4f9c91ff675d3d5", - "23a76dd96b527d12f7d5cda1000cba8e4926b072a795d3a2bc13c479956038f5"), ( - "0ca272f013725eb1cfed07780a0ea6122e67d0c29717b26eb12c71dfd2367bf4", - "082663318217ad470c42f66cc66a888b296f92353d35151c31d8c611ce2cd1d4"), ( - "0d08566358b2af4b38c0ce7d92840d1bc71930a7591470ad8cabf457de60833a", - "28c6909a697e160d69ffda94e0f9acc9cca6d61c05b8bd78b864620313b7a808"), ( - "22b64925e839c488ef606d99d463f7af963b5634bfbc81a1e8c93597b65e1b66", - "2c1e7bf4af57bdadebb36ee840db5cfe88bb722d05ba24eb788b6dbcc2faffc1"), ( - "1c102745fffa271dbc1e6ba9d1e4d4cf03f0537e1c0937d7126453eec61afe79", - "2e57378468e7d3612379047dc0580a48eb75afacd827324b4c16cb18ed331bae"), ( - "2ef1ca7b105b932c78ae2b03b152a07dcf2ceb9459a718d417447c870559f388", - "05b7e60a719029aad98713ed5a0bcba3982e16a0befafe0d4763c8e2fef83c0e"), ( - "1aceda8f46cd198b7abbb79e45f6508af8999ca31332133d042dfc4a19201b77", - "2f3d259b8aabe16f645d4a6e66ba890d55e7f0cf594935ea852a0e2624868f40"), ( - "202de1595bc8e35e502a46124a067453d1e9c1b9f406acbac37c93be4bc1401e", - "1f5666cf374886ca7558658a7e9b5af5004f2bef9b65c5ff19394cd57082eac5"), ( - "162cf23e68b513465c1c392879ad7a3ce51fb98687777873c145cf32b0e2da23", - "1f3f115f1ecf136d740fa0927d2525beaa2fc965b7cbab4ce6f96d9fcc206e9f"), ( - "1c45d7e1421ae4df9a32bd2ea6f2bff70a4ed56762ef1358ee73bd8ee13e21f3", - "2417003e9c27c4fbc14c77e877074eceeaae6ace98f2b881c1c4eca2583c9c83"), ( - "0b0b3d09d843d911b283299bbcd8965e8e4534c66ef9752be1eb01dff69669f9", - "115b7c70b9a1f468295afdca9dba3b67ccc3123e5be74d5a8da73a029fa2e68c"), ( - "1b1b4f1fbc8804762e3e7ccb53c50921a5642431169cb49cb220eed33e40d5c9", - "2b7821ead551ba2a9a55450da9b21d6aaa4c382c50d9fcc9115b48a89f06a496"), ( - "067b456b971b40477a97b66672fbf259f5dc46669218272ed517d36716f0e43d", - "0f4cbafe9afbee252db6dbaf23a5975e24bf2d360b907f5c88bdd3e3be6bff6e"), ( - "18ee822f7abe8ac80aba210116b438d661a97dba2c0602456582a767e33e4589", - "02f812ffd5b9859fd202ad21c303b6352bda53af97dd944e9ae4161508191f55"), ( - "14c41f2ee56279e7bca332679c4ed5c865b83b4e7aa1be0415d4e5ef12a5c532", - "122f3326f332d5ff387bf8e3f4a2ac647636a2dbc69be9590664107f87ab8d3b"), ( - "29b1fe377ff7295120756108959ee7d6c8dc314c078086fd52743e81d3134ca7", - "1e47ec0b7957e911a9c3591a9df514413e437eb8549a61a2d13f9aa9f38559c5"), ( - "17d5b6d345b4ee67418a29ee18a6bed4976c1321ba0b299f2e25dca30a421c9b", - "233ef113d1b1c6659b1c964eba8a8b2336e9f71db3daa251f8a69968aeeff066"), ( - "14e40d11000392066c5624737ad96faed0e126cccec8eb4f8f7b5eed141c82d4", - "1d8bb23790d6105666913eb9bdb4ad06584e12bf428e46f034b546b9cbe05396"), ( - "0498600cbfb3fff26656fb8ecf7f5fc1212003d5391e6998330bae8b3453c426", - "2944f71c79f223b47d0ded89fea53ecd362d64314c39aa632046fd7c9bc366a3"), ( - "19137c8e6c8249e4897b260d387aea6afb82d0f36f9eef5018ea786dc6a8504a", - "2dbd8b02cbb6d5a8fc2a91ae5175019af9c9c003bcdcbb5d56590ba48ec353e3"), ( - "176f27c080beeb31b4d4726846185b41b0ed480b589645d18609759afe8847d0", - "1030d697176631bba1ac4179f6a1640a4d7b99b68484a4808f750fd58bcb4e23"), ( - "0383a55e14226eff828d4a3f6df6e3374d6a9e6538d9a36ba2161b5e88e4a3c5", - "25e4b8ccb1fe4f9cf90e60dbd9714902e3c76e95e39ebaadac0dc36b3b632129"), ( - "0c57de13726bee0b6fc5f04c45d4bc3fdc1f884eaf8ece07ac85b87d23875848", - "22f4ceb33f1a84d0d263aebc1d0850347bc126e9981dbcc4593aaa53e3861106"), ( - "06d159ec59bc4b0e9ec25e63fe3e29368059999e3c304b565789c776f89f0fca", - "144789e2e129d2282ecf5dd3b23286ab475a8eec7e066182f28ef71be14ce048"), ( - "242b10778fa6e753bb08f186b2e07c1c87c263057e09d9ce88333033f465799d", - "2feb99f0a004f1ee48046d72969eaa73ec5945fed1b8e41704d38fdee2b6afb6"), ( - "1359ff7f691d03c867ed1e10e7bffec09ffda9d9254565d5da8b3d472ef91edb", - "016f4071d37b38572bb3b4f07b86d47b1813bcb64caaf0fcf4c49388bbb8d534"), ( - "14263ca40720663eaa4e78cd9bc6336aaed42f83c30a20b4b62c5a5a9b053ed4", - "121d1ac2a73279e3b1ed99c044a5b240cd4345c6e944233b8850f85f91830656"), ( - "017e31163b233f56fa999c1433dbe6bf9f4e6c248d83f6a6f496866ea407cfa1", - "27e00d68571fb1b20f32ad2090125d39a781f3a80133d267a520c09e3e000439"), ( - "2668502cb436958df8b6036340c9cbfcd6f6cb4304a46d50621fa612869c8c17", - "0577e480c19501a7e1b03acc9e456c79af9b7c2405a005fa288340196290812c"), ( - "1cbe3144b594879735c43002b480bd83e83c4fe6cd430fc447c11ef862934bf8", - "0ff3b7fdb22a138cd90beec386f5cac06c3167dd2f25169199004472394f52ca"), ( - "221edbd101eb5300d45f30c5102b9cf61a4668de7b96c2428e726fbb2eccfba2", - "1ee78ea001663058670233fa4bd3f5eca125d2ee27166a3c270cc9ff828ef44e"), ( - "09f4c1a0a0acb6fd0bdbad6fa15fec36a053b04924f71c50158d386393dea791", - "0af261879fe9f9dc428d2d3ed1e2951e0b8ea45d2dd9452ae9657197ebc25bfa"), ( - "1c797777a6175c021f776a177617ff78c691075e61c5ab78a575696abf98256e", - "14acf51f86ed0f2cc202599a7b2c7d836036e80374a27ba4d1a015577ac0af22"), ( - "11b022742928eea62ef72d64b6156c7bc1a3df355e57d5c1edf29887b7d788e7", - "15aa67cf3c4bfcc267b5267c33b026cd8d7b19e2a6b3f8abef610da276419aba"), ( - "00323a544e19221877fa41066cc0f400f73b4d788e05192f9ba4c33559c113bd", - "1afab12d34efab0b919c5fcf9aa4250a13e48bf97c112f0451c8ab687cc93515"), ( - "0ff6ec8f949bc2c540fc661911215818212f37f37d1b041517bee13f01d01877", - "099ac3a4ae91b8aaaedb701d53c284931bdd4e1acf248cc435a2121b44d876e4"), ( - "03200d6a35588a4cae7342c9cb2160a596072f17e51b0071eec3eeef44df7a85", - "21785df67a48d487e7267175ceb1398d871fcf204d3507389c08fd818eae8e35"), ( - "22f6ffa9d3607ae5eb10610dc9198d1e836b5b67fdebe0e875f59ee41c49d28d", - "285af8a70a34a6d34e6d715d28b18869c74fa8c0a97cd5035c39b4ad9c9bef88"), ( - "131373e532a4c0e6730d023c8cc00a300ded84f0ff38ae8ef26b869ffd21a495", - "2617021b27ccb848203efb158e222e727929239acebc1bcfcfdab441ca2267d8"), ( - "2deaa385e5ac88d90c9ac88f4dd1390b570da7e3368820baddee878c1c920985", - "0775d7a6e69b2fc8d887ac4d8ba5f45a5d70ff1593d44ec7d68f125241a27019"), ( - "13a55e79d5843dd86599461865d3e41895ae7414778b5d6d7b3122f8a21b59f0", - "0023467c1f7de5335e1f25ae8a854dcd9afae22f8a8ebda13725827ad0b5781d"), ( - "0b6b34fa4f045993221dddc35b1a7d39b900bc74d6da3e75d85dbf3f6a9002a3", - "204d8b8dbc6fe2300c2a7998c6334aa772e8e31f9aa0521f074cebfca09076de"), ( - "07bf2bd503c4c08950b74961eae6b22c1dfa9866e1d9566561665ffd3c5f426b", - "0049bade3018d28f8c40a47c71f1415811dc6528f0bf6982abd09f4df6d45432"), ( - "2a2243a0405c0d40af17245e02e1c1f5aea007143fd17fe2fd93a2b49b3f0a57", - "112aab67ed0bacf225f87ff6591fc8d219a73b4dbfe36b21892a5795d481d4f3"), ( - "22a1fd33e4b83d1cbac4a92160ac8b41ef86774bd7885f69a8fa79fe6c33a4d2", - "2de16b061df3b291a664ef1e1eb285538d18b80aea7014ebf2b3e50d22888861"), ( - "284e390e5be08da632b0155b6ee23941b3275fd23682590c7efd1dcd78b69d5e", - "0a282a3b2e4462ea48c82933c2ed03d046058d24c39d02b7d012126adc79234f"), ( - "216ef8f66b9d3cab3ad9f95e041677ced8d59cfa987cc898d0db31ec17bc2d04", - "08cbcb81beaa2113b5710e5441032a7d7144121372ef9d2d903b8e76c44108d3"), ( - "1f5fe5d1a7eba65373f8c23b02b330e24677fd8fecc8142f4af2df2de6a9cdc5", - "18818a95dc1a2dceeaf5484b2324b6f2977ba0a8120d4d19b9a1bac7482a25d6"), ( - "02f4a10357e1b8fcd3c8d6fa3cbe498a4451cc0d3c29075addc853c7e0b851b7", - "23f50656c54f84cbce760335ca09d50f27ab160f7fcea39ceb49d9d5aef67a29"), ( - "1ae88ec49df2fa49d2f1d3170a15b2c5a80fc950ab0b50d28a6caa20aca6722c", - "08f18be90156d957e2328367d7fd661d6c5bcb167193961c7f9b7d7bb82855c0"), ( - "152b2f54041f1a615225bc96723dae89fd0d17352b0d37b37f419e49332f3a84", - "087ea7a796503b43cd723344004d4a6c869f9c779ef5c96b4f385c1b6a16f585"), ( - "0ce422373db69ea5f19ad22480b6b5052c8e2d66e3bde5cf9a3a43998e699f32", - "3034885d053bdcb7074ff7bd67f5b6d3dfc27be07986ae27ae65f99f95670b91"), ( - "0ea472ec17bba7b8d9e7c522d9d5b0b8404ae1da0f6998c7e1acbc5433f3002e", - "1eb8eb2cf399ceed4b9c4894d1cba611d78f2aea9ec3bb04e4f4502c6e72504b"), ( - "20769b227bb7d3cd8f08a7c3a58e557d87a645601dda0a89b322cdc4a29dc073", - "13b601dea1b88605d1479331596128fcff755118259802a2ef28e7aa3a7900f5"), ( - "207c117d42d91e6b6d4c0451aa96f38d7d86a0dda678440f4a1b79ffe9efc825", - "064790db9a228621e867ba894c0e7f3a0b178c0eae72892dc743ee62d5c557eb"), ( - "2ed78374a4b336bcb8fd8603610059ff51dac1cc7c6dd3549c072101e0443f86", - "2c1d57b17d1f0fd9cf00fa9409ecb47624c8685bd632f447a3c15ab0b1d90fc3"), ( - "073fee805a6121d1cac9f68ab6b5759313a82100e9b59e70a84589f1e311736a", - "1c61cc79a8a9c061385ea2f7899171f5d870b0f5de27b6acd5387d7caa8f3d1a"), ( - "2fe95c45a1c3ed3f81a9713762c8f15edd783207a2343f866466474be47cd297", - "137d8a12344f774e17c852313526c982927a08050b8add27ad8fe8feea29c49f"), ( - "10c3634849afe5a64bb5325770dc2ab1045bead339fa995397cda85e5bb48aa8", - "1cc853ceab6fe0a2e0396f5e58ff96ffd7280907c44c77d2327e34018bcf2924"), ( - "26853b411ff5829880b6305740a15c245b4246e84ffc6d61722966f466a4eb51", - "1d3a27beef286cb47d716de1b6b93b1c090e759a9538395a38bb804cdb2d11f9"), ( - "2b4ea98233ee2eac4df809f2620bbc6b183b956561e1eb25c8b4a84a28ffe945", - "1c82f94e2e952156c1d759d3f93fc97bf8f8a444183e578cb62001e1bbaf32d9"), ( - "1c047970753dda8c20b8cc4a40721847d26dff8634b72d02e8d28e3b407c80d1", - "195e38c09efa924609232323395867ccd8f913dd7a4fe4322a043f6eeb51ed7a"), ( - "01c2cd45a421cb629b5d4752380f296f98ba923a6ee9b28c3f427f236138c9b9", - "160875e6eceeb3e3abfbaea572d16e7a4c1df16a906f18805d1db031bbe25ae2"), ( - "1ac75381eb5d3bb34dbf46b3e8deaf88822bf59880cd206f2b88a7cb0cd78a55", - "2198af5533f0829ee6a02cd9f7f9d50de57e15f08d86fc8d18eb9d6f7b3751c6"), ( - "254c281c6a72e0fadfe88280dcf1353b1880db14634863cab2fc18fc38ffe4b6", - "1ba0e9722e2f2911610966004407991a8c7f41c5c4bb78f15b9809aa45b6ed2e"), ( - "2786cdfc7446c95c3f3a83d851d0c50cd3ee96bb91bec1bb9a3308907dab64c2", - "2bfe013073d942cfb1793c0aa8d14a5e09f7d5ac7c8f6dd5119ef730c232d78f"), ( - "1a809572677c00af2d52023bc2aaa3d43d8b6f16f7576a357f82471081b2298b", - "13ff785c5a8ec7ab13291759c37499d32ab95edededf2e7c08eb3e5d0dd0b4e4"), ( - "2ef26ceeca603bacaae271072909a22cea69fce5f299865c66f5ae63b45dbe30", - "0ed774428015317b7d299500c5e4d01e49039bcfebe5db3ab1034c664d975e3c"), ( - "2d5d5b1d825b27f2345976f3d43e47017c1efc41e8e62f93b0e12c1037b316db", - "2343a52d8a37d986fdd52c31d5742a84f51088c002f4dd4e4b039960a504cbdb"), ( - "172e2144b6ded94be4b9642c7c6f9afed81aecb5485aeef5b3d054f53aa2756f", - "2406c9401b7cbd9494f429616ffc85a489bfbdb6f914bd84640b28528dbfce68"), ( - "285af6cd43a017bec5394152e13a2ca4df2131b32c28cc9df9b45c24aa0bd0e3", - "0f79493ffde795d083b097734356107f41f33137b6bc7118f2768c924cf31057"), ( - "00117742394adf01ad59a127f0a0bec58f8d4e593c589266ed15041422f382f3", - "0fb47c14a31f6be53718284a8f35f3a7be7445acb7685893b007a22da26dfb4b"), ( - "041d9eed43c71e705a9a4ab0f3d20d27e27cc86f494db2579c20166fde4fe733", - "1f37f81316470689a67a8f6fc22a8000870affa7f69a53c0538240bae147e8e1"), ( - "2c43ad22585ba0e1c836782ce619ae0579ac376efe2c433c3d29257fc65d4b3e", - "0303d6adea59b2538c8c625761396228fbd552e36d93af02d177c1219ba778c9"), ( - "2931a506675b34076d279296b1fb36d8d2161568210754bc0ada09206b956814", - "2a650ed20b09a37b6c99f4ed99e352069979306db538c9e85d8e4f24990df7ee"), ( - "0199bb1cb7798516759aecedc974db39f8ecc9befbf97265b224c17b66b241c6", - "18ba142aaa522112b47c2ebab08a483e01e75b7931024b1accca5e7ba557b983"), ( - "2fc6e58b52fb6bdcde370f5849c990c542dc112c42100323dd199a3a90c3c55b", - "1be44b8c0fbc328fe46ed9ad8d62d85c6058df7a5a1eadd3d88d018dfdca2d5e"), ( - "0e5928b37cd899cd186c0b5ed2d2b9034b82fa542420903c25aab9d835e16697", - "0db6cc4f93d7c62a819ff05218e7f97deed2899ff87e9316048afa3d5f88f8d3"), ( - "1c4ad5b10dc591d28b111f17bdf06c4d52fca3f598a16918f0c22adb1046b6fe", - "0fdbbf0beb5a7e9fe36eb5950a8a42886c985c35a753193dfb88b2289bcd273e"), ( - "254cc2e093b44b9817d6e63b200e901719ee2c71cf08122845ae17c83df11b57", - "0ce7d93826ead805dccb2db908df05af4b8eb253ffba9f7e12f5b65c790e2f13"), ( - "082d33c58b7ef35450b6e0f04228af71cb62c24b65facdca8a786448939a64fb", - "1eca9298aee68fec2c3485fe7b46b154b2d28dac651d4692143da495e1a6a567"), ( - "135441db7e663906d2a3fe179541475c568dfe5100a7bb0d711bb8fca34d6dbd", - "2832f9e2f973d3c679905fc1c173b71384c54d0c86be08ca053052a1e516fc07"), ( - "1d979375e1e725d394b017452863a2375a349072add1a1f4e8a62783e29997e4", - "16d4c4d52996d207d2f5111e6be2a333f575cd63a5d71df24e413ec239441a64"), ( - "241e5996499e43372a4e86c30a1f141d6157eb175b939da7b08a19522a52bac0", - "129341569c322516d77c426ebcfef6d7f40e5338d15c31a5aa3bcd056008fe78"), ( - "2269de57ca36264ec72dde1c8ebacee471960cc2fad7c0ee3a9118ba40e41e87", - "2aae3a1043157c1bf2c79e6111cdc46c7e1a2bc960596eb023b7b8c6227c2e9d"), ( - "120cb3d07e7560d2146397bdf872004fa29eefc5a5113595be164ecd72125aab", - "1e21ba2aec2eaaae2595695d5b638a27433bd883b8e17f6f8fe1fd1b3421fceb"), ( - "1bc45135b0cffa828ee41ce1c65b64d9f3f65390684401e903c9964099919e2c", - "0083742467cf09853010726529661bac06864c91da35300f630e8cc78c61cbf9"), ( - "233135b00df8453eaebcd34d338014d01efbabebb3ddfe6219dd7f5db7a341ba", - "07fd4cd97e761f7391eb9718baddef0acc9a6dba3dedaef40612754a2dcdee39"), ( - "02fe17ec7b1ccc29368b8f654442db237d8cd8221edaff653220ca0beffef1cc", - "0cf8adbfd9140830b15670269f6c9e7a39b6172e2b0e059998403ee609323c09"), ( - "11221c9f08070d40f1368e0818d2f31c93312b98954bdd1f0f820d68be592f7c", - "16de82ddd15ed3809e392ae92a0d67dbde6abeede876c7cdf8e6fa106721eb67"), ( - "12c0d0c606aa1cc11896de752de3b071793b0d40bc1c76224bf709a73f3e0694", - "184d0476ead3701a7ffb62d0ae2b43e3ebc41cb27d4d558c09e9092f50fceec2"), ( - "260904e50145e0dab40c86488a48f1a857915058f3b263b2d7d4325e71788abd", - "14c9bf7f4c25a6a69a83652a57853462149761ec82954678345c8811abc2daf9"), ( - "2a1ceaa3461f3e3e6539c1d5f58823753c2bab300fc7b995be94f510f4d842c7", - "05a4f98932e76eb1a8b4714c6b3bf079aa022750d4d8a6c9761b34b0578ba239"), ( - "0d344bf806ab8566bea729eea160fee2e0f685416b577ca8bee0b4346be74cb7", - "0b55b69e6183e03e3ff83e9397e6c4fd7a530a17d77687e98da9346f436fbefa"), ( - "2fbc760addd5f584378b071acb9f8ecb4aa9d40bd640d352db3f56a0d0b3f389", - "2cd8a28452aead0fe6860fc152a8d15568d2a7fd41bb3d907a93ee8bdce2e554"), ( - "0d0b7216902409e7a4c2588c59b002bdd3f739954deb6869fea6424c26212b6a", - "1d2a5e216d4e4cfced68bf71e68721fdd9297bd0c3c790432156f029d63820a0"), ( - "0ab175fa60fe331351385be1eca5174511d284f674b6ba4cf58acb47c018f5a7", - "03aeb24c83f1a5d343d9cfb6c124f6cea32113a4d02c063bbeaa937ff1a492e0")] + const + # the size of poseidon hash output in bits + HashBitSize* = 256 + # the size of poseidon hash output as the number hex digits + HashHexSize* = int(HashBitSize/4) - # StaticGroupMerkleRoot is the root of the Merkle tree constructed from the StaticGroupKeys above - # only identity commitments are used for the Merkle tree construction - # the root is created locally, using createMembershipList proc from waku_rln_relay_utils module, and the result is hardcoded in here - StaticGroupMerkleRoot* = "2b5c4a3a12d98026e2f55a5cbfc74e8a5a05a8f5403409bf218bbc92ace25b80" + const + # The relative folder where the circuit, proving and verification key for RLN can be found + # Note that resources has to be compiled with respect to the above MerkleTreeDepth + RlnResourceFolder* = "vendor/zerokit/rln/resources/tree_height_" & $MerkleTreeDepth & "/" + + # temporary variables to test waku-rln-relay performance in the static group mode + const + StaticGroupSize* = 100 + # StaticGroupKeys is a static list of 100 membership keys in the form of (identity key, identity commitment) + # keys are created locally, using createMembershipList proc from waku_rln_relay_utils module, and the results are hardcoded in here + # this list is temporary and is created to test the performance of waku-rln-relay for the static groups + # in the later versions, this static hardcoded group will be replaced with a dynamic one + + const + StaticGroupKeys* = @[("2c0198101a2828a0ac6c9e58fc131e1bd83326a4f748ef592588eeb8c3112dc1", + "1ca742a54641e2de14c5cb87ad707fd869693f682cedb901e47b8138918aecb3"), ( + "095177fd334629dbc29c5fc9e32addecfcb6a42f9673268810fa9f70d1a8191a", + "0e30810a60f53fcbd60b312d9d65ccefe4e9d0b0d2220d7350fdf881469e59eb"), ( + "131c6d4dbfb3feafc7fdd69aa62cd05d7b6be0cd7b7777ce9513ad742be71763", + "110ca13b7fa2ed72ccc4659a4fdaea3a2e28443617a29bbe977e92c83b531a15"), ( + "2f45043d0efabffadcc0db24d238a8bc4a30ce7394ee2a70b4f9c91ff675d3d5", + "23a76dd96b527d12f7d5cda1000cba8e4926b072a795d3a2bc13c479956038f5"), ( + "0ca272f013725eb1cfed07780a0ea6122e67d0c29717b26eb12c71dfd2367bf4", + "082663318217ad470c42f66cc66a888b296f92353d35151c31d8c611ce2cd1d4"), ( + "0d08566358b2af4b38c0ce7d92840d1bc71930a7591470ad8cabf457de60833a", + "28c6909a697e160d69ffda94e0f9acc9cca6d61c05b8bd78b864620313b7a808"), ( + "22b64925e839c488ef606d99d463f7af963b5634bfbc81a1e8c93597b65e1b66", + "2c1e7bf4af57bdadebb36ee840db5cfe88bb722d05ba24eb788b6dbcc2faffc1"), ( + "1c102745fffa271dbc1e6ba9d1e4d4cf03f0537e1c0937d7126453eec61afe79", + "2e57378468e7d3612379047dc0580a48eb75afacd827324b4c16cb18ed331bae"), ( + "2ef1ca7b105b932c78ae2b03b152a07dcf2ceb9459a718d417447c870559f388", + "05b7e60a719029aad98713ed5a0bcba3982e16a0befafe0d4763c8e2fef83c0e"), ( + "1aceda8f46cd198b7abbb79e45f6508af8999ca31332133d042dfc4a19201b77", + "2f3d259b8aabe16f645d4a6e66ba890d55e7f0cf594935ea852a0e2624868f40"), ( + "202de1595bc8e35e502a46124a067453d1e9c1b9f406acbac37c93be4bc1401e", + "1f5666cf374886ca7558658a7e9b5af5004f2bef9b65c5ff19394cd57082eac5"), ( + "162cf23e68b513465c1c392879ad7a3ce51fb98687777873c145cf32b0e2da23", + "1f3f115f1ecf136d740fa0927d2525beaa2fc965b7cbab4ce6f96d9fcc206e9f"), ( + "1c45d7e1421ae4df9a32bd2ea6f2bff70a4ed56762ef1358ee73bd8ee13e21f3", + "2417003e9c27c4fbc14c77e877074eceeaae6ace98f2b881c1c4eca2583c9c83"), ( + "0b0b3d09d843d911b283299bbcd8965e8e4534c66ef9752be1eb01dff69669f9", + "115b7c70b9a1f468295afdca9dba3b67ccc3123e5be74d5a8da73a029fa2e68c"), ( + "1b1b4f1fbc8804762e3e7ccb53c50921a5642431169cb49cb220eed33e40d5c9", + "2b7821ead551ba2a9a55450da9b21d6aaa4c382c50d9fcc9115b48a89f06a496"), ( + "067b456b971b40477a97b66672fbf259f5dc46669218272ed517d36716f0e43d", + "0f4cbafe9afbee252db6dbaf23a5975e24bf2d360b907f5c88bdd3e3be6bff6e"), ( + "18ee822f7abe8ac80aba210116b438d661a97dba2c0602456582a767e33e4589", + "02f812ffd5b9859fd202ad21c303b6352bda53af97dd944e9ae4161508191f55"), ( + "14c41f2ee56279e7bca332679c4ed5c865b83b4e7aa1be0415d4e5ef12a5c532", + "122f3326f332d5ff387bf8e3f4a2ac647636a2dbc69be9590664107f87ab8d3b"), ( + "29b1fe377ff7295120756108959ee7d6c8dc314c078086fd52743e81d3134ca7", + "1e47ec0b7957e911a9c3591a9df514413e437eb8549a61a2d13f9aa9f38559c5"), ( + "17d5b6d345b4ee67418a29ee18a6bed4976c1321ba0b299f2e25dca30a421c9b", + "233ef113d1b1c6659b1c964eba8a8b2336e9f71db3daa251f8a69968aeeff066"), ( + "14e40d11000392066c5624737ad96faed0e126cccec8eb4f8f7b5eed141c82d4", + "1d8bb23790d6105666913eb9bdb4ad06584e12bf428e46f034b546b9cbe05396"), ( + "0498600cbfb3fff26656fb8ecf7f5fc1212003d5391e6998330bae8b3453c426", + "2944f71c79f223b47d0ded89fea53ecd362d64314c39aa632046fd7c9bc366a3"), ( + "19137c8e6c8249e4897b260d387aea6afb82d0f36f9eef5018ea786dc6a8504a", + "2dbd8b02cbb6d5a8fc2a91ae5175019af9c9c003bcdcbb5d56590ba48ec353e3"), ( + "176f27c080beeb31b4d4726846185b41b0ed480b589645d18609759afe8847d0", + "1030d697176631bba1ac4179f6a1640a4d7b99b68484a4808f750fd58bcb4e23"), ( + "0383a55e14226eff828d4a3f6df6e3374d6a9e6538d9a36ba2161b5e88e4a3c5", + "25e4b8ccb1fe4f9cf90e60dbd9714902e3c76e95e39ebaadac0dc36b3b632129"), ( + "0c57de13726bee0b6fc5f04c45d4bc3fdc1f884eaf8ece07ac85b87d23875848", + "22f4ceb33f1a84d0d263aebc1d0850347bc126e9981dbcc4593aaa53e3861106"), ( + "06d159ec59bc4b0e9ec25e63fe3e29368059999e3c304b565789c776f89f0fca", + "144789e2e129d2282ecf5dd3b23286ab475a8eec7e066182f28ef71be14ce048"), ( + "242b10778fa6e753bb08f186b2e07c1c87c263057e09d9ce88333033f465799d", + "2feb99f0a004f1ee48046d72969eaa73ec5945fed1b8e41704d38fdee2b6afb6"), ( + "1359ff7f691d03c867ed1e10e7bffec09ffda9d9254565d5da8b3d472ef91edb", + "016f4071d37b38572bb3b4f07b86d47b1813bcb64caaf0fcf4c49388bbb8d534"), ( + "14263ca40720663eaa4e78cd9bc6336aaed42f83c30a20b4b62c5a5a9b053ed4", + "121d1ac2a73279e3b1ed99c044a5b240cd4345c6e944233b8850f85f91830656"), ( + "017e31163b233f56fa999c1433dbe6bf9f4e6c248d83f6a6f496866ea407cfa1", + "27e00d68571fb1b20f32ad2090125d39a781f3a80133d267a520c09e3e000439"), ( + "2668502cb436958df8b6036340c9cbfcd6f6cb4304a46d50621fa612869c8c17", + "0577e480c19501a7e1b03acc9e456c79af9b7c2405a005fa288340196290812c"), ( + "1cbe3144b594879735c43002b480bd83e83c4fe6cd430fc447c11ef862934bf8", + "0ff3b7fdb22a138cd90beec386f5cac06c3167dd2f25169199004472394f52ca"), ( + "221edbd101eb5300d45f30c5102b9cf61a4668de7b96c2428e726fbb2eccfba2", + "1ee78ea001663058670233fa4bd3f5eca125d2ee27166a3c270cc9ff828ef44e"), ( + "09f4c1a0a0acb6fd0bdbad6fa15fec36a053b04924f71c50158d386393dea791", + "0af261879fe9f9dc428d2d3ed1e2951e0b8ea45d2dd9452ae9657197ebc25bfa"), ( + "1c797777a6175c021f776a177617ff78c691075e61c5ab78a575696abf98256e", + "14acf51f86ed0f2cc202599a7b2c7d836036e80374a27ba4d1a015577ac0af22"), ( + "11b022742928eea62ef72d64b6156c7bc1a3df355e57d5c1edf29887b7d788e7", + "15aa67cf3c4bfcc267b5267c33b026cd8d7b19e2a6b3f8abef610da276419aba"), ( + "00323a544e19221877fa41066cc0f400f73b4d788e05192f9ba4c33559c113bd", + "1afab12d34efab0b919c5fcf9aa4250a13e48bf97c112f0451c8ab687cc93515"), ( + "0ff6ec8f949bc2c540fc661911215818212f37f37d1b041517bee13f01d01877", + "099ac3a4ae91b8aaaedb701d53c284931bdd4e1acf248cc435a2121b44d876e4"), ( + "03200d6a35588a4cae7342c9cb2160a596072f17e51b0071eec3eeef44df7a85", + "21785df67a48d487e7267175ceb1398d871fcf204d3507389c08fd818eae8e35"), ( + "22f6ffa9d3607ae5eb10610dc9198d1e836b5b67fdebe0e875f59ee41c49d28d", + "285af8a70a34a6d34e6d715d28b18869c74fa8c0a97cd5035c39b4ad9c9bef88"), ( + "131373e532a4c0e6730d023c8cc00a300ded84f0ff38ae8ef26b869ffd21a495", + "2617021b27ccb848203efb158e222e727929239acebc1bcfcfdab441ca2267d8"), ( + "2deaa385e5ac88d90c9ac88f4dd1390b570da7e3368820baddee878c1c920985", + "0775d7a6e69b2fc8d887ac4d8ba5f45a5d70ff1593d44ec7d68f125241a27019"), ( + "13a55e79d5843dd86599461865d3e41895ae7414778b5d6d7b3122f8a21b59f0", + "0023467c1f7de5335e1f25ae8a854dcd9afae22f8a8ebda13725827ad0b5781d"), ( + "0b6b34fa4f045993221dddc35b1a7d39b900bc74d6da3e75d85dbf3f6a9002a3", + "204d8b8dbc6fe2300c2a7998c6334aa772e8e31f9aa0521f074cebfca09076de"), ( + "07bf2bd503c4c08950b74961eae6b22c1dfa9866e1d9566561665ffd3c5f426b", + "0049bade3018d28f8c40a47c71f1415811dc6528f0bf6982abd09f4df6d45432"), ( + "2a2243a0405c0d40af17245e02e1c1f5aea007143fd17fe2fd93a2b49b3f0a57", + "112aab67ed0bacf225f87ff6591fc8d219a73b4dbfe36b21892a5795d481d4f3"), ( + "22a1fd33e4b83d1cbac4a92160ac8b41ef86774bd7885f69a8fa79fe6c33a4d2", + "2de16b061df3b291a664ef1e1eb285538d18b80aea7014ebf2b3e50d22888861"), ( + "284e390e5be08da632b0155b6ee23941b3275fd23682590c7efd1dcd78b69d5e", + "0a282a3b2e4462ea48c82933c2ed03d046058d24c39d02b7d012126adc79234f"), ( + "216ef8f66b9d3cab3ad9f95e041677ced8d59cfa987cc898d0db31ec17bc2d04", + "08cbcb81beaa2113b5710e5441032a7d7144121372ef9d2d903b8e76c44108d3"), ( + "1f5fe5d1a7eba65373f8c23b02b330e24677fd8fecc8142f4af2df2de6a9cdc5", + "18818a95dc1a2dceeaf5484b2324b6f2977ba0a8120d4d19b9a1bac7482a25d6"), ( + "02f4a10357e1b8fcd3c8d6fa3cbe498a4451cc0d3c29075addc853c7e0b851b7", + "23f50656c54f84cbce760335ca09d50f27ab160f7fcea39ceb49d9d5aef67a29"), ( + "1ae88ec49df2fa49d2f1d3170a15b2c5a80fc950ab0b50d28a6caa20aca6722c", + "08f18be90156d957e2328367d7fd661d6c5bcb167193961c7f9b7d7bb82855c0"), ( + "152b2f54041f1a615225bc96723dae89fd0d17352b0d37b37f419e49332f3a84", + "087ea7a796503b43cd723344004d4a6c869f9c779ef5c96b4f385c1b6a16f585"), ( + "0ce422373db69ea5f19ad22480b6b5052c8e2d66e3bde5cf9a3a43998e699f32", + "3034885d053bdcb7074ff7bd67f5b6d3dfc27be07986ae27ae65f99f95670b91"), ( + "0ea472ec17bba7b8d9e7c522d9d5b0b8404ae1da0f6998c7e1acbc5433f3002e", + "1eb8eb2cf399ceed4b9c4894d1cba611d78f2aea9ec3bb04e4f4502c6e72504b"), ( + "20769b227bb7d3cd8f08a7c3a58e557d87a645601dda0a89b322cdc4a29dc073", + "13b601dea1b88605d1479331596128fcff755118259802a2ef28e7aa3a7900f5"), ( + "207c117d42d91e6b6d4c0451aa96f38d7d86a0dda678440f4a1b79ffe9efc825", + "064790db9a228621e867ba894c0e7f3a0b178c0eae72892dc743ee62d5c557eb"), ( + "2ed78374a4b336bcb8fd8603610059ff51dac1cc7c6dd3549c072101e0443f86", + "2c1d57b17d1f0fd9cf00fa9409ecb47624c8685bd632f447a3c15ab0b1d90fc3"), ( + "073fee805a6121d1cac9f68ab6b5759313a82100e9b59e70a84589f1e311736a", + "1c61cc79a8a9c061385ea2f7899171f5d870b0f5de27b6acd5387d7caa8f3d1a"), ( + "2fe95c45a1c3ed3f81a9713762c8f15edd783207a2343f866466474be47cd297", + "137d8a12344f774e17c852313526c982927a08050b8add27ad8fe8feea29c49f"), ( + "10c3634849afe5a64bb5325770dc2ab1045bead339fa995397cda85e5bb48aa8", + "1cc853ceab6fe0a2e0396f5e58ff96ffd7280907c44c77d2327e34018bcf2924"), ( + "26853b411ff5829880b6305740a15c245b4246e84ffc6d61722966f466a4eb51", + "1d3a27beef286cb47d716de1b6b93b1c090e759a9538395a38bb804cdb2d11f9"), ( + "2b4ea98233ee2eac4df809f2620bbc6b183b956561e1eb25c8b4a84a28ffe945", + "1c82f94e2e952156c1d759d3f93fc97bf8f8a444183e578cb62001e1bbaf32d9"), ( + "1c047970753dda8c20b8cc4a40721847d26dff8634b72d02e8d28e3b407c80d1", + "195e38c09efa924609232323395867ccd8f913dd7a4fe4322a043f6eeb51ed7a"), ( + "01c2cd45a421cb629b5d4752380f296f98ba923a6ee9b28c3f427f236138c9b9", + "160875e6eceeb3e3abfbaea572d16e7a4c1df16a906f18805d1db031bbe25ae2"), ( + "1ac75381eb5d3bb34dbf46b3e8deaf88822bf59880cd206f2b88a7cb0cd78a55", + "2198af5533f0829ee6a02cd9f7f9d50de57e15f08d86fc8d18eb9d6f7b3751c6"), ( + "254c281c6a72e0fadfe88280dcf1353b1880db14634863cab2fc18fc38ffe4b6", + "1ba0e9722e2f2911610966004407991a8c7f41c5c4bb78f15b9809aa45b6ed2e"), ( + "2786cdfc7446c95c3f3a83d851d0c50cd3ee96bb91bec1bb9a3308907dab64c2", + "2bfe013073d942cfb1793c0aa8d14a5e09f7d5ac7c8f6dd5119ef730c232d78f"), ( + "1a809572677c00af2d52023bc2aaa3d43d8b6f16f7576a357f82471081b2298b", + "13ff785c5a8ec7ab13291759c37499d32ab95edededf2e7c08eb3e5d0dd0b4e4"), ( + "2ef26ceeca603bacaae271072909a22cea69fce5f299865c66f5ae63b45dbe30", + "0ed774428015317b7d299500c5e4d01e49039bcfebe5db3ab1034c664d975e3c"), ( + "2d5d5b1d825b27f2345976f3d43e47017c1efc41e8e62f93b0e12c1037b316db", + "2343a52d8a37d986fdd52c31d5742a84f51088c002f4dd4e4b039960a504cbdb"), ( + "172e2144b6ded94be4b9642c7c6f9afed81aecb5485aeef5b3d054f53aa2756f", + "2406c9401b7cbd9494f429616ffc85a489bfbdb6f914bd84640b28528dbfce68"), ( + "285af6cd43a017bec5394152e13a2ca4df2131b32c28cc9df9b45c24aa0bd0e3", + "0f79493ffde795d083b097734356107f41f33137b6bc7118f2768c924cf31057"), ( + "00117742394adf01ad59a127f0a0bec58f8d4e593c589266ed15041422f382f3", + "0fb47c14a31f6be53718284a8f35f3a7be7445acb7685893b007a22da26dfb4b"), ( + "041d9eed43c71e705a9a4ab0f3d20d27e27cc86f494db2579c20166fde4fe733", + "1f37f81316470689a67a8f6fc22a8000870affa7f69a53c0538240bae147e8e1"), ( + "2c43ad22585ba0e1c836782ce619ae0579ac376efe2c433c3d29257fc65d4b3e", + "0303d6adea59b2538c8c625761396228fbd552e36d93af02d177c1219ba778c9"), ( + "2931a506675b34076d279296b1fb36d8d2161568210754bc0ada09206b956814", + "2a650ed20b09a37b6c99f4ed99e352069979306db538c9e85d8e4f24990df7ee"), ( + "0199bb1cb7798516759aecedc974db39f8ecc9befbf97265b224c17b66b241c6", + "18ba142aaa522112b47c2ebab08a483e01e75b7931024b1accca5e7ba557b983"), ( + "2fc6e58b52fb6bdcde370f5849c990c542dc112c42100323dd199a3a90c3c55b", + "1be44b8c0fbc328fe46ed9ad8d62d85c6058df7a5a1eadd3d88d018dfdca2d5e"), ( + "0e5928b37cd899cd186c0b5ed2d2b9034b82fa542420903c25aab9d835e16697", + "0db6cc4f93d7c62a819ff05218e7f97deed2899ff87e9316048afa3d5f88f8d3"), ( + "1c4ad5b10dc591d28b111f17bdf06c4d52fca3f598a16918f0c22adb1046b6fe", + "0fdbbf0beb5a7e9fe36eb5950a8a42886c985c35a753193dfb88b2289bcd273e"), ( + "254cc2e093b44b9817d6e63b200e901719ee2c71cf08122845ae17c83df11b57", + "0ce7d93826ead805dccb2db908df05af4b8eb253ffba9f7e12f5b65c790e2f13"), ( + "082d33c58b7ef35450b6e0f04228af71cb62c24b65facdca8a786448939a64fb", + "1eca9298aee68fec2c3485fe7b46b154b2d28dac651d4692143da495e1a6a567"), ( + "135441db7e663906d2a3fe179541475c568dfe5100a7bb0d711bb8fca34d6dbd", + "2832f9e2f973d3c679905fc1c173b71384c54d0c86be08ca053052a1e516fc07"), ( + "1d979375e1e725d394b017452863a2375a349072add1a1f4e8a62783e29997e4", + "16d4c4d52996d207d2f5111e6be2a333f575cd63a5d71df24e413ec239441a64"), ( + "241e5996499e43372a4e86c30a1f141d6157eb175b939da7b08a19522a52bac0", + "129341569c322516d77c426ebcfef6d7f40e5338d15c31a5aa3bcd056008fe78"), ( + "2269de57ca36264ec72dde1c8ebacee471960cc2fad7c0ee3a9118ba40e41e87", + "2aae3a1043157c1bf2c79e6111cdc46c7e1a2bc960596eb023b7b8c6227c2e9d"), ( + "120cb3d07e7560d2146397bdf872004fa29eefc5a5113595be164ecd72125aab", + "1e21ba2aec2eaaae2595695d5b638a27433bd883b8e17f6f8fe1fd1b3421fceb"), ( + "1bc45135b0cffa828ee41ce1c65b64d9f3f65390684401e903c9964099919e2c", + "0083742467cf09853010726529661bac06864c91da35300f630e8cc78c61cbf9"), ( + "233135b00df8453eaebcd34d338014d01efbabebb3ddfe6219dd7f5db7a341ba", + "07fd4cd97e761f7391eb9718baddef0acc9a6dba3dedaef40612754a2dcdee39"), ( + "02fe17ec7b1ccc29368b8f654442db237d8cd8221edaff653220ca0beffef1cc", + "0cf8adbfd9140830b15670269f6c9e7a39b6172e2b0e059998403ee609323c09"), ( + "11221c9f08070d40f1368e0818d2f31c93312b98954bdd1f0f820d68be592f7c", + "16de82ddd15ed3809e392ae92a0d67dbde6abeede876c7cdf8e6fa106721eb67"), ( + "12c0d0c606aa1cc11896de752de3b071793b0d40bc1c76224bf709a73f3e0694", + "184d0476ead3701a7ffb62d0ae2b43e3ebc41cb27d4d558c09e9092f50fceec2"), ( + "260904e50145e0dab40c86488a48f1a857915058f3b263b2d7d4325e71788abd", + "14c9bf7f4c25a6a69a83652a57853462149761ec82954678345c8811abc2daf9"), ( + "2a1ceaa3461f3e3e6539c1d5f58823753c2bab300fc7b995be94f510f4d842c7", + "05a4f98932e76eb1a8b4714c6b3bf079aa022750d4d8a6c9761b34b0578ba239"), ( + "0d344bf806ab8566bea729eea160fee2e0f685416b577ca8bee0b4346be74cb7", + "0b55b69e6183e03e3ff83e9397e6c4fd7a530a17d77687e98da9346f436fbefa"), ( + "2fbc760addd5f584378b071acb9f8ecb4aa9d40bd640d352db3f56a0d0b3f389", + "2cd8a28452aead0fe6860fc152a8d15568d2a7fd41bb3d907a93ee8bdce2e554"), ( + "0d0b7216902409e7a4c2588c59b002bdd3f739954deb6869fea6424c26212b6a", + "1d2a5e216d4e4cfced68bf71e68721fdd9297bd0c3c790432156f029d63820a0"), ( + "0ab175fa60fe331351385be1eca5174511d284f674b6ba4cf58acb47c018f5a7", + "03aeb24c83f1a5d343d9cfb6c124f6cea32113a4d02c063bbeaa937ff1a492e0")] + + # StaticGroupMerkleRoot is the root of the Merkle tree constructed from the StaticGroupKeys above + # only identity commitments are used for the Merkle tree construction + # the root is created locally, using createMembershipList proc from waku_rln_relay_utils module, and the result is hardcoded in here + StaticGroupMerkleRoot* = "2b5c4a3a12d98026e2f55a5cbfc74e8a5a05a8f5403409bf218bbc92ace25b80" const EpochUnitSeconds* = float64(10) # the rln-relay epoch length in seconds const MaxClockGapSeconds* = 20.0 # the maximum clock difference between peers in seconds diff --git a/waku/v2/protocol/waku_rln_relay/waku_rln_relay_types.nim b/waku/v2/protocol/waku_rln_relay/waku_rln_relay_types.nim index c56744199..c12af8fa8 100644 --- a/waku/v2/protocol/waku_rln_relay/waku_rln_relay_types.nim +++ b/waku/v2/protocol/waku_rln_relay/waku_rln_relay_types.nim @@ -16,141 +16,142 @@ import type RlnRelayResult*[T] = Result[T, string] -## RLN is a Nim wrapper for the data types used in zerokit RLN -type RLN* {.incompleteStruct.} = object -type RLNResult* = RlnRelayResult[ptr RLN] +when defined(rln): + ## RLN is a Nim wrapper for the data types used in zerokit RLN + type RLN* {.incompleteStruct.} = object + type RLNResult* = RlnRelayResult[ptr RLN] -type - # identity key as defined in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership - IDKey* = array[32, byte] - # hash of identity key as defined ed in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership - IDCommitment* = array[32, byte] - MerkleNode* = array[32, byte] # Each node of the Merkle tee is a Poseidon hash which is a 32 byte value - Nullifier* = array[32, byte] - Epoch* = array[32, byte] - RlnIdentifier* = array[32, byte] - ZKSNARK* = array[128, byte] + type + # identity key as defined in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership + IDKey* = array[32, byte] + # hash of identity key as defined ed in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership + IDCommitment* = array[32, byte] + MerkleNode* = array[32, byte] # Each node of the Merkle tee is a Poseidon hash which is a 32 byte value + Nullifier* = array[32, byte] + Epoch* = array[32, byte] + RlnIdentifier* = array[32, byte] + ZKSNARK* = array[128, byte] -# Custom data types defined for waku rln relay ------------------------- -type MembershipKeyPair* = object - ## user's identity key (a secret key) which is selected randomly - ## see details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership - idKey*: IDKey - # hash of user's identity key generated by - # Poseidon hash function implemented in rln lib - # more details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership - idCommitment*: IDCommitment + # Custom data types defined for waku rln relay ------------------------- + type MembershipKeyPair* = object + ## user's identity key (a secret key) which is selected randomly + ## see details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership + idKey*: IDKey + # hash of user's identity key generated by + # Poseidon hash function implemented in rln lib + # more details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership + idCommitment*: IDCommitment -type RateLimitProof* = object - ## RateLimitProof holds the public inputs to rln circuit as - ## defined in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Public-Inputs - ## the `proof` field carries the actual zkSNARK proof - proof*: ZKSNARK - ## the root of Merkle tree used for the generation of the `proof` - merkleRoot*: MerkleNode - ## the epoch used for the generation of the `proof` - epoch*: Epoch - ## shareX and shareY are shares of user's identity key - ## these shares are created using Shamir secret sharing scheme - ## see details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Linear-Equation-amp-SSS - shareX*: MerkleNode - shareY*: MerkleNode - ## nullifier enables linking two messages published during the same epoch - ## see details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Nullifiers - nullifier*: Nullifier - ## Application specific RLN Identifier - rlnIdentifier*: RlnIdentifier + type RateLimitProof* = object + ## RateLimitProof holds the public inputs to rln circuit as + ## defined in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Public-Inputs + ## the `proof` field carries the actual zkSNARK proof + proof*: ZKSNARK + ## the root of Merkle tree used for the generation of the `proof` + merkleRoot*: MerkleNode + ## the epoch used for the generation of the `proof` + epoch*: Epoch + ## shareX and shareY are shares of user's identity key + ## these shares are created using Shamir secret sharing scheme + ## see details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Linear-Equation-amp-SSS + shareX*: MerkleNode + shareY*: MerkleNode + ## nullifier enables linking two messages published during the same epoch + ## see details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Nullifiers + nullifier*: Nullifier + ## Application specific RLN Identifier + rlnIdentifier*: RlnIdentifier -type MembershipIndex* = uint + type MembershipIndex* = uint -type RlnMembershipCredentials* = object - membershipKeyPair*: MembershipKeyPair - rlnIndex*: MembershipIndex + type RlnMembershipCredentials* = object + membershipKeyPair*: MembershipKeyPair + rlnIndex*: MembershipIndex -type ProofMetadata* = object - nullifier*: Nullifier - shareX*: MerkleNode - shareY*: MerkleNode + type ProofMetadata* = object + nullifier*: Nullifier + shareX*: MerkleNode + shareY*: MerkleNode -type WakuRLNRelay* = ref object - membershipKeyPair*: MembershipKeyPair - # membershipIndex denotes the index of a leaf in the Merkle tree - # that contains the pk of the current peer - # this index is used to retrieve the peer's authentication path - membershipIndex*: MembershipIndex - membershipContractAddress*: Address - ethClientAddress*: string - ethAccountAddress*: Option[Address] - # this field is required for signing transactions - # TODO may need to erase this ethAccountPrivateKey when is not used - # TODO may need to make ethAccountPrivateKey mandatory - ethAccountPrivateKey*: Option[PrivateKey] - rlnInstance*: ptr RLN - pubsubTopic*: string # the pubsub topic for which rln relay is mounted - # contentTopic should be of type waku_message.ContentTopic, however, due to recursive module dependency, the underlying type of ContentTopic is used instead - # TODO a long-term solution is to place types with recursive dependency inside one file - contentTopic*: string - # the log of nullifiers and Shamir shares of the past messages grouped per epoch - nullifierLog*: Table[Epoch, seq[ProofMetadata]] - lastEpoch*: Epoch # the epoch of the last published rln message - validMerkleRoots*: Deque[MerkleNode] # An array of valid merkle roots, which are updated in a FIFO fashion - lastSeenMembershipIndex*: MembershipIndex # the last seen membership index - lastProcessedBlock*: BlockNumber # the last processed block number + type WakuRLNRelay* = ref object + membershipKeyPair*: MembershipKeyPair + # membershipIndex denotes the index of a leaf in the Merkle tree + # that contains the pk of the current peer + # this index is used to retrieve the peer's authentication path + membershipIndex*: MembershipIndex + membershipContractAddress*: Address + ethClientAddress*: string + ethAccountAddress*: Option[Address] + # this field is required for signing transactions + # TODO may need to erase this ethAccountPrivateKey when is not used + # TODO may need to make ethAccountPrivateKey mandatory + ethAccountPrivateKey*: Option[PrivateKey] + rlnInstance*: ptr RLN + pubsubTopic*: string # the pubsub topic for which rln relay is mounted + # contentTopic should be of type waku_message.ContentTopic, however, due to recursive module dependency, the underlying type of ContentTopic is used instead + # TODO a long-term solution is to place types with recursive dependency inside one file + contentTopic*: string + # the log of nullifiers and Shamir shares of the past messages grouped per epoch + nullifierLog*: Table[Epoch, seq[ProofMetadata]] + lastEpoch*: Epoch # the epoch of the last published rln message + validMerkleRoots*: Deque[MerkleNode] # An array of valid merkle roots, which are updated in a FIFO fashion + lastSeenMembershipIndex*: MembershipIndex # the last seen membership index + lastProcessedBlock*: BlockNumber # the last processed block number -type - MessageValidationResult* {.pure.} = enum - Valid, - Invalid, - Spam - MerkleNodeResult* = RlnRelayResult[MerkleNode] - RateLimitProofResult* = RlnRelayResult[RateLimitProof] - -# Protobufs enc and init -proc init*(T: type RateLimitProof, buffer: seq[byte]): ProtoResult[T] = - var nsp: RateLimitProof - let pb = initProtoBuffer(buffer) + type + MessageValidationResult* {.pure.} = enum + Valid, + Invalid, + Spam + MerkleNodeResult* = RlnRelayResult[MerkleNode] + RateLimitProofResult* = RlnRelayResult[RateLimitProof] + + # Protobufs enc and init + proc init*(T: type RateLimitProof, buffer: seq[byte]): ProtoResult[T] = + var nsp: RateLimitProof + let pb = initProtoBuffer(buffer) - var proof: seq[byte] - discard ? pb.getField(1, proof) - discard nsp.proof.copyFrom(proof) + var proof: seq[byte] + discard ? pb.getField(1, proof) + discard nsp.proof.copyFrom(proof) - var merkleRoot: seq[byte] - discard ? pb.getField(2, merkleRoot) - discard nsp.merkleRoot.copyFrom(merkleRoot) + var merkleRoot: seq[byte] + discard ? pb.getField(2, merkleRoot) + discard nsp.merkleRoot.copyFrom(merkleRoot) - var epoch: seq[byte] - discard ? pb.getField(3, epoch) - discard nsp.epoch.copyFrom(epoch) + var epoch: seq[byte] + discard ? pb.getField(3, epoch) + discard nsp.epoch.copyFrom(epoch) - var shareX: seq[byte] - discard ? pb.getField(4, shareX) - discard nsp.shareX.copyFrom(shareX) + var shareX: seq[byte] + discard ? pb.getField(4, shareX) + discard nsp.shareX.copyFrom(shareX) - var shareY: seq[byte] - discard ? pb.getField(5, shareY) - discard nsp.shareY.copyFrom(shareY) + var shareY: seq[byte] + discard ? pb.getField(5, shareY) + discard nsp.shareY.copyFrom(shareY) - var nullifier: seq[byte] - discard ? pb.getField(6, nullifier) - discard nsp.nullifier.copyFrom(nullifier) - - var rlnIdentifier: seq[byte] - discard ? pb.getField(7, rlnIdentifier) - discard nsp.rlnIdentifier.copyFrom(rlnIdentifier) + var nullifier: seq[byte] + discard ? pb.getField(6, nullifier) + discard nsp.nullifier.copyFrom(nullifier) + + var rlnIdentifier: seq[byte] + discard ? pb.getField(7, rlnIdentifier) + discard nsp.rlnIdentifier.copyFrom(rlnIdentifier) - return ok(nsp) + return ok(nsp) -proc encode*(nsp: RateLimitProof): ProtoBuffer = - var output = initProtoBuffer() + proc encode*(nsp: RateLimitProof): ProtoBuffer = + var output = initProtoBuffer() - output.write3(1, nsp.proof) - output.write3(2, nsp.merkleRoot) - output.write3(3, nsp.epoch) - output.write3(4, nsp.shareX) - output.write3(5, nsp.shareY) - output.write3(6, nsp.nullifier) - output.write3(7, nsp.rlnIdentifier) + output.write3(1, nsp.proof) + output.write3(2, nsp.merkleRoot) + output.write3(3, nsp.epoch) + output.write3(4, nsp.shareX) + output.write3(5, nsp.shareY) + output.write3(6, nsp.nullifier) + output.write3(7, nsp.rlnIdentifier) - output.finish3() + output.finish3() - return output \ No newline at end of file + return output \ No newline at end of file diff --git a/waku/v2/protocol/waku_rln_relay/waku_rln_relay_utils.nim b/waku/v2/protocol/waku_rln_relay/waku_rln_relay_utils.nim index 5903d8132..d2d27c46b 100644 --- a/waku/v2/protocol/waku_rln_relay/waku_rln_relay_utils.nim +++ b/waku/v2/protocol/waku_rln_relay/waku_rln_relay_utils.nim @@ -63,1273 +63,1274 @@ proc toBuffer*(x: openArray[byte]): Buffer = let output = Buffer(`ptr`: cast[ptr uint8](baseAddr), len: uint(temp.len)) return output -proc createRLNInstanceLocal(d: int = MerkleTreeDepth): RLNResult = - ## generates an instance of RLN - ## An RLN instance supports both zkSNARKs logics and Merkle tree data structure and operations - ## d indicates the depth of Merkle tree - ## Returns an error if the instance creation fails - var - rlnInstance: ptr RLN - merkleDepth: csize_t = uint(d) - resourcesPathBuffer = RlnResourceFolder.toOpenArrayByte(0, RlnResourceFolder.high).toBuffer() +when defined(rln): + proc createRLNInstanceLocal(d: int = MerkleTreeDepth): RLNResult = + ## generates an instance of RLN + ## An RLN instance supports both zkSNARKs logics and Merkle tree data structure and operations + ## d indicates the depth of Merkle tree + ## Returns an error if the instance creation fails + var + rlnInstance: ptr RLN + merkleDepth: csize_t = uint(d) + resourcesPathBuffer = RlnResourceFolder.toOpenArrayByte(0, RlnResourceFolder.high).toBuffer() - # create an instance of RLN - let res = new_circuit(merkleDepth, addr resourcesPathBuffer, addr rlnInstance) - # check whether the circuit parameters are generated successfully - if (res == false): - debug "error in parameters generation" - return err("error in parameters generation") - return ok(rlnInstance) + # create an instance of RLN + let res = new_circuit(merkleDepth, addr resourcesPathBuffer, addr rlnInstance) + # check whether the circuit parameters are generated successfully + if (res == false): + debug "error in parameters generation" + return err("error in parameters generation") + return ok(rlnInstance) -proc membershipKeyGen*(ctxPtr: ptr RLN): RlnRelayResult[MembershipKeyPair] = - ## generates a MembershipKeyPair that can be used for the registration into the rln membership contract - ## Returns an error if the key generation fails + proc membershipKeyGen*(ctxPtr: ptr RLN): RlnRelayResult[MembershipKeyPair] = + ## generates a MembershipKeyPair that can be used for the registration into the rln membership contract + ## Returns an error if the key generation fails - # keysBufferPtr will hold the generated key pairs i.e., secret and public keys - var - keysBuffer: Buffer - keysBufferPtr = addr(keysBuffer) - done = key_gen(ctxPtr, keysBufferPtr) + # keysBufferPtr will hold the generated key pairs i.e., secret and public keys + var + keysBuffer: Buffer + keysBufferPtr = addr(keysBuffer) + done = key_gen(ctxPtr, keysBufferPtr) - # check whether the keys are generated successfully - if(done == false): - return err("error in key generation") + # check whether the keys are generated successfully + if(done == false): + return err("error in key generation") - var generatedKeys = cast[ptr array[64, byte]](keysBufferPtr.`ptr`)[] - # the public and secret keys together are 64 bytes - if (generatedKeys.len != 64): - return err("generated keys are of invalid length") + var generatedKeys = cast[ptr array[64, byte]](keysBufferPtr.`ptr`)[] + # the public and secret keys together are 64 bytes + if (generatedKeys.len != 64): + return err("generated keys are of invalid length") - # TODO define a separate proc to decode the generated keys to the secret and public components - var - secret: array[32, byte] - public: array[32, byte] - for (i, x) in secret.mpairs: x = generatedKeys[i] - for (i, x) in public.mpairs: x = generatedKeys[i+32] + # TODO define a separate proc to decode the generated keys to the secret and public components + var + secret: array[32, byte] + public: array[32, byte] + for (i, x) in secret.mpairs: x = generatedKeys[i] + for (i, x) in public.mpairs: x = generatedKeys[i+32] - var - keypair = MembershipKeyPair(idKey: secret, idCommitment: public) + var + keypair = MembershipKeyPair(idKey: secret, idCommitment: public) - return ok(keypair) + return ok(keypair) -proc createRLNInstance*(d: int = MerkleTreeDepth): RLNResult = - ## Wraps the rln instance creation for metrics - ## Returns an error if the instance creation fails - var res: RLNResult - waku_rln_instance_creation_duration_seconds.nanosecondTime: - res = createRLNInstanceLocal(d) - return res + proc createRLNInstance*(d: int = MerkleTreeDepth): RLNResult = + ## Wraps the rln instance creation for metrics + ## Returns an error if the instance creation fails + var res: RLNResult + waku_rln_instance_creation_duration_seconds.nanosecondTime: + res = createRLNInstanceLocal(d) + return res -proc toUInt256*(idCommitment: IDCommitment): UInt256 = - let pk = UInt256.fromBytesLE(idCommitment) - return pk + proc toUInt256*(idCommitment: IDCommitment): UInt256 = + let pk = UInt256.fromBytesLE(idCommitment) + return pk -proc toIDCommitment*(idCommitmentUint: UInt256): IDCommitment = - let pk = IDCommitment(idCommitmentUint.toBytesLE()) - return pk + proc toIDCommitment*(idCommitmentUint: UInt256): IDCommitment = + let pk = IDCommitment(idCommitmentUint.toBytesLE()) + return pk -proc inHex*(value: IDKey or IDCommitment or MerkleNode or Nullifier or Epoch or RlnIdentifier): string = - var valueHex = (UInt256.fromBytesLE(value)).toHex - # We pad leading zeroes - while valueHex.len < value.len * 2: - valueHex = "0" & valueHex - return valueHex + proc inHex*(value: IDKey or IDCommitment or MerkleNode or Nullifier or Epoch or RlnIdentifier): string = + var valueHex = (UInt256.fromBytesLE(value)).toHex + # We pad leading zeroes + while valueHex.len < value.len * 2: + valueHex = "0" & valueHex + return valueHex -proc toMembershipIndex(v: UInt256): MembershipIndex = - let membershipIndex: MembershipIndex = cast[MembershipIndex](v) - return membershipIndex + proc toMembershipIndex(v: UInt256): MembershipIndex = + let membershipIndex: MembershipIndex = cast[MembershipIndex](v) + return membershipIndex -proc register*(idComm: IDCommitment, ethAccountAddress: Option[Address], ethAccountPrivKey: keys.PrivateKey, ethClientAddress: string, membershipContractAddress: Address, registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler)): Future[Result[MembershipIndex, string]] {.async.} = - # TODO may need to also get eth Account Private Key as PrivateKey - ## registers the idComm into the membership contract whose address is in rlnPeer.membershipContractAddress - - var web3: Web3 - try: # check if the Ethereum client is reachable - web3 = await newWeb3(ethClientAddress) - except: - return err("could not connect to the Ethereum client") - - if ethAccountAddress.isSome(): - web3.defaultAccount = ethAccountAddress.get() - # set the account private key - web3.privateKey = some(ethAccountPrivKey) - # set the gas price twice the suggested price in order for the fast mining - let gasPrice = int(await web3.provider.eth_gasPrice()) * 2 - - # when the private key is set in a web3 instance, the send proc (sender.register(pk).send(MembershipFee)) - # does the signing using the provided key - # web3.privateKey = some(ethAccountPrivateKey) - var sender = web3.contractSender(MembershipContract, membershipContractAddress) # creates a Sender object with a web3 field and contract address of type Address - - debug "registering an id commitment", idComm=idComm.inHex() - let pk = idComm.toUInt256() - - var txHash: TxHash - try: # send the registration transaction and check if any error occurs - txHash = await sender.register(pk).send(value = MembershipFee, gasPrice = gasPrice) - except ValueError as e: - return err("registration transaction failed: " & e.msg) - - let tsReceipt = await web3.getMinedTransactionReceipt(txHash) - - # the receipt topic holds the hash of signature of the raised events - let firstTopic = tsReceipt.logs[0].topics[0] - # the hash of the signature of MemberRegistered(uint256,uint256) event is equal to the following hex value - if firstTopic[0..65] != "0x5a92c2530f207992057b9c3e544108ffce3beda4a63719f316967c49bf6159d2": - return err("invalid event signature hash") - - # the arguments of the raised event i.e., MemberRegistered are encoded inside the data field - # data = pk encoded as 256 bits || index encoded as 256 bits - let arguments = tsReceipt.logs[0].data - debug "tx log data", arguments=arguments - let - argumentsBytes = arguments.hexToSeqByte() - # In TX log data, uints are encoded in big endian - eventIdCommUint = UInt256.fromBytesBE(argumentsBytes[0..31]) - eventIndex = UInt256.fromBytesBE(argumentsBytes[32..^1]) - eventIdComm = eventIdCommUint.toIDCommitment() - debug "the identity commitment key extracted from tx log", eventIdComm=eventIdComm.inHex() - debug "the index of registered identity commitment key", eventIndex=eventIndex - - if eventIdComm != idComm: - return err("invalid id commitment key") - - await web3.close() - - if registrationHandler.isSome(): - let handler = registrationHandler.get - handler(toHex(txHash)) - return ok(toMembershipIndex(eventIndex)) - -proc register*(rlnPeer: WakuRLNRelay, registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler)): Future[RlnRelayResult[bool]] {.async.} = - ## registers the public key of the rlnPeer which is rlnPeer.membershipKeyPair.publicKey - ## into the membership contract whose address is in rlnPeer.membershipContractAddress - let pk = rlnPeer.membershipKeyPair.idCommitment - let regResult = await register(idComm = pk, ethAccountAddress = rlnPeer.ethAccountAddress, ethAccountPrivKey = rlnPeer.ethAccountPrivateKey.get(), ethClientAddress = rlnPeer.ethClientAddress, membershipContractAddress = rlnPeer.membershipContractAddress, registrationHandler = registrationHandler) - if regResult.isErr: - return err(regResult.error()) - return ok(true) - -proc appendLength*(input: openArray[byte]): seq[byte] = - ## returns length prefixed version of the input - ## with the following format [len<8>|input] - ## len: 8-byte value that represents the number of bytes in the `input` - ## len is serialized in little-endian - ## input: the supplied `input` - let - # the length should be serialized in little-endian - len = toBytes(uint64(input.len), Endianness.littleEndian) - output = concat(@len, @input) - return output - -proc hash*(rlnInstance: ptr RLN, data: openArray[byte]): MerkleNode = - ## a thin layer on top of the Nim wrapper of the Poseidon hasher - debug "hash input", hashhex = data.toHex() - var lenPrefData = appendLength(data) - var - hashInputBuffer = lenPrefData.toBuffer() - outputBuffer: Buffer # will holds the hash output - - debug "hash input buffer length", bufflen = hashInputBuffer.len - let - hashSuccess = hash(rlnInstance, addr hashInputBuffer, addr outputBuffer) - output = cast[ptr MerkleNode](outputBuffer.`ptr`)[] - - return output - -proc serialize(idKey: IDKey, memIndex: MembershipIndex, epoch: Epoch, - msg: openArray[byte]): seq[byte] = - ## a private proc to convert RateLimitProof and the data to a byte seq - ## this conversion is used in the proofGen proc - ## the serialization is done as instructed in https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L146 - ## [ id_key<32> | id_index<8> | epoch<32> | signal_len<8> | signal ] - let memIndexBytes = toBytes(uint64(memIndex), Endianness.littleEndian) - let lenPrefMsg = appendLength(msg) - let output = concat(@idKey, @memIndexBytes, @epoch, lenPrefMsg) - return output - -proc proofGen*(rlnInstance: ptr RLN, data: openArray[byte], - memKeys: MembershipKeyPair, memIndex: MembershipIndex, - epoch: Epoch): RateLimitProofResult = - - # serialize inputs - let serializedInputs = serialize(idKey = memKeys.idKey, - memIndex = memIndex, - epoch = epoch, - msg = data) - var inputBuffer = toBuffer(serializedInputs) - - debug "input buffer ", inputBuffer - - # generate the proof - var proof: Buffer - let proofIsSuccessful = generate_proof(rlnInstance, addr inputBuffer, addr proof) - # check whether the generate_proof call is done successfully - if not proofIsSuccessful: - return err("could not generate the proof") - - var proofValue = cast[ptr array[320, byte]] (proof.`ptr`) - let proofBytes: array[320, byte] = proofValue[] - debug "proof content", proofHex = proofValue[].toHex - - ## parse the proof as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ] - - let - proofOffset = 128 - rootOffset = proofOffset + 32 - epochOffset = rootOffset + 32 - shareXOffset = epochOffset + 32 - shareYOffset = shareXOffset + 32 - nullifierOffset = shareYOffset + 32 - rlnIdentifierOffset = nullifierOffset + 32 - - var - zkproof: ZKSNARK - proofRoot, shareX, shareY: MerkleNode - epoch: Epoch - nullifier: Nullifier - rlnIdentifier: RlnIdentifier - - discard zkproof.copyFrom(proofBytes[0..proofOffset-1]) - discard proofRoot.copyFrom(proofBytes[proofOffset..rootOffset-1]) - discard epoch.copyFrom(proofBytes[rootOffset..epochOffset-1]) - discard shareX.copyFrom(proofBytes[epochOffset..shareXOffset-1]) - discard shareY.copyFrom(proofBytes[shareXOffset..shareYOffset-1]) - discard nullifier.copyFrom(proofBytes[shareYOffset..nullifierOffset-1]) - discard rlnIdentifier.copyFrom(proofBytes[nullifierOffset..rlnIdentifierOffset-1]) - - let output = RateLimitProof(proof: zkproof, - merkleRoot: proofRoot, - epoch: epoch, - shareX: shareX, - shareY: shareY, - nullifier: nullifier, - rlnIdentifier: rlnIdentifier) - - return ok(output) - -proc serialize(proof: RateLimitProof, data: openArray[byte]): seq[byte] = - ## a private proc to convert RateLimitProof and data to a byte seq - ## this conversion is used in the proof verification proc - ## [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal ] - let lenPrefMsg = appendLength(@data) - var proofBytes = concat(@(proof.proof), - @(proof.merkleRoot), - @(proof.epoch), - @(proof.shareX), - @(proof.shareY), - @(proof.nullifier), - @(proof.rlnIdentifier), - lenPrefMsg) - - return proofBytes - -# Serializes a sequence of MerkleNodes -proc serialize(roots: seq[MerkleNode]): seq[byte] = - var rootsBytes: seq[byte] = @[] - for root in roots: - rootsBytes = concat(rootsBytes, @root) - return rootsBytes - -# validRoots should contain a sequence of roots in the acceptable windows. -# As default, it is set to an empty sequence of roots. This implies that the validity check for the proof's root is skipped -proc proofVerify*(rlnInstance: ptr RLN, - data: openArray[byte], - proof: RateLimitProof, - validRoots: seq[MerkleNode] = @[]): RlnRelayResult[bool] = - ## verifies the proof, returns an error if the proof verification fails - ## returns true if the proof is valid - var - proofBytes = serialize(proof, data) - proofBuffer = proofBytes.toBuffer() - validProof: bool - rootsBytes = serialize(validRoots) - rootsBuffer = rootsBytes.toBuffer() - - trace "serialized proof", proof = proofBytes.toHex() - - let verifyIsSuccessful = verify_with_roots(rlnInstance, addr proofBuffer, addr rootsBuffer, addr validProof) - if not verifyIsSuccessful: - # something went wrong in verification call - warn "could not verify validity of the proof", proof=proof - return err("could not verify the proof") - - if not validProof: - return ok(false) - else: - return ok(true) - -proc insertMember*(rlnInstance: ptr RLN, idComm: IDCommitment): bool = - ## inserts a member to the tree - ## returns true if the member is inserted successfully - ## returns false if the member could not be inserted - var pkBuffer = toBuffer(idComm) - let pkBufferPtr = addr pkBuffer - - # add the member to the tree - let memberAdded = update_next_member(rlnInstance, pkBufferPtr) - return memberAdded - -proc serializeIdCommitments*(idComms: seq[IDCommitment]): seq[byte] = - ## serializes a seq of IDCommitments to a byte seq - ## the serialization is based on https://github.com/status-im/nwaku/blob/37bd29fbc37ce5cf636734e7dd410b1ed27b88c8/waku/v2/protocol/waku_rln_relay/rln.nim#L142 - ## the order of serialization is |id_commitment_len<8>|id_commitment| - var idCommsBytes = newSeq[byte]() - - # serialize the idComms, with its length prefixed - let len = toBytes(uint64(idComms.len), Endianness.littleEndian) - idCommsBytes.add(len) - - for idComm in idComms: - idCommsBytes = concat(idCommsBytes, @idComm) - - return idCommsBytes - -proc insertMembers*(rlnInstance: ptr RLN, - index: MembershipIndex, - idComms: seq[IDCommitment]): bool = - ## Insert multiple members i.e., identity commitments - ## returns true if the insertion is successful - ## returns false if any of the insertions fails - ## Note: This proc is atomic, i.e., if any of the insertions fails, all the previous insertions are rolled back - - # serialize the idComms - let idCommsBytes = serializeIdCommitments(idComms) + proc register*(idComm: IDCommitment, ethAccountAddress: Option[Address], ethAccountPrivKey: keys.PrivateKey, ethClientAddress: string, membershipContractAddress: Address, registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler)): Future[Result[MembershipIndex, string]] {.async.} = + # TODO may need to also get eth Account Private Key as PrivateKey + ## registers the idComm into the membership contract whose address is in rlnPeer.membershipContractAddress - var idCommsBuffer = idCommsBytes.toBuffer() - let idCommsBufferPtr = addr idCommsBuffer - # add the member to the tree - let membersAdded = set_leaves_from(rlnInstance, index, idCommsBufferPtr) - return membersAdded - -proc removeMember*(rlnInstance: ptr RLN, index: MembershipIndex): bool = - let deletion_success = delete_member(rlnInstance, index) - return deletion_success - -proc getMerkleRoot*(rlnInstance: ptr RLN): MerkleNodeResult = - # read the Merkle Tree root after insertion - var - root {.noinit.}: Buffer = Buffer() - rootPtr = addr(root) - getRootSuccessful = getRoot(rlnInstance, rootPtr) - if not getRootSuccessful: - return err("could not get the root") - if not root.len == 32: - return err("wrong output size") - - var rootValue = cast[ptr MerkleNode] (root.`ptr`)[] - return ok(rootValue) - -proc updateValidRootQueue*(wakuRlnRelay: WakuRLNRelay, root: MerkleNode): void = - ## updates the valid Merkle root queue with the latest root and pops the oldest one when the capacity of `AcceptableRootWindowSize` is reached - let overflowCount = wakuRlnRelay.validMerkleRoots.len() - AcceptableRootWindowSize - if overflowCount >= 0: - # Delete the oldest `overflowCount` elements in the deque (index 0..`overflowCount`) - for i in 0..overflowCount: - wakuRlnRelay.validMerkleRoots.popFirst() - # Push the next root into the queue - wakuRlnRelay.validMerkleRoots.addLast(root) - -proc insertMembers*(wakuRlnRelay: WakuRLNRelay, - index: MembershipIndex, - idComms: seq[IDCommitment]): RlnRelayResult[void] = - ## inserts a sequence of id commitments into the local merkle tree, and adds the changed root to the - ## queue of valid roots - ## Returns an error if the insertion fails - waku_rln_membership_insertion_duration_seconds.nanosecondTime: - let actionSucceeded = wakuRlnRelay.rlnInstance.insertMembers(index, idComms) - if not actionSucceeded: - return err("could not insert id commitments into the merkle tree") - - let rootAfterUpdate = ?wakuRlnRelay.rlnInstance.getMerkleRoot() - wakuRlnRelay.updateValidRootQueue(rootAfterUpdate) - return ok() - -proc removeMember*(wakuRlnRelay: WakuRLNRelay, index: MembershipIndex): RlnRelayResult[void] = - ## removes a commitment from the local merkle tree at `index`, and adds the changed root to the - ## queue of valid roots - ## Returns an error if the removal fails - - let actionSucceeded = wakuRlnRelay.rlnInstance.removeMember(index) - if not actionSucceeded: - return err("could not remove id commitment from the merkle tree") - - let rootAfterUpdate = ?wakuRlnRelay.rlnInstance.getMerkleRoot() - wakuRlnRelay.updateValidRootQueue(rootAfterUpdate) - return ok() - -proc validateRoot*(wakuRlnRelay: WakuRLNRelay, root: MerkleNode): bool = - ## Validate against the window of roots stored in wakuRlnRelay.validMerkleRoots - return root in wakuRlnRelay.validMerkleRoots - -proc toMembershipKeyPairs*(groupKeys: seq[(string, string)]): RlnRelayResult[seq[ - MembershipKeyPair]] = - ## groupKeys is sequence of membership key tuples in the form of (identity key, identity commitment) all in the hexadecimal format - ## the toMembershipKeyPairs proc populates a sequence of MembershipKeyPairs using the supplied groupKeys - ## Returns an error if the conversion fails - - var groupKeyPairs = newSeq[MembershipKeyPair]() - - for i in 0..groupKeys.len-1: - try: - let - idKey = hexToUint[IDKey.len*8](groupKeys[i][0]).toBytesLE() - idCommitment = hexToUint[IDCommitment.len*8](groupKeys[i][1]).toBytesLE() - groupKeyPairs.add(MembershipKeyPair(idKey: idKey, - idCommitment: idCommitment)) - except ValueError as err: - warn "could not convert the group key to bytes", err = err.msg - return err("could not convert the group key to bytes: " & err.msg) - return ok(groupKeyPairs) - -proc calcMerkleRoot*(list: seq[IDCommitment]): RlnRelayResult[string] = - ## returns the root of the Merkle tree that is computed from the supplied list - ## the root is in hexadecimal format - ## Returns an error if the computation fails - - let rlnInstance = createRLNInstance() - if rlnInstance.isErr(): - return err("could not create rln instance: " & rlnInstance.error()) - let rln = rlnInstance.get() - - # create a Merkle tree - let membersAdded = rln.insertMembers(0, list) - if not membersAdded: - return err("could not insert members into the tree") - let root = rln.getMerkleRoot().value().inHex() - return ok(root) - -proc createMembershipList*(n: int): RlnRelayResult[( - seq[(string, string)], string - )] = - ## createMembershipList produces a sequence of membership key pairs in the form of (identity key, id commitment keys) in the hexadecimal format - ## this proc also returns the root of a Merkle tree constructed out of the identity commitment keys of the generated list - ## the output of this proc is used to initialize a static group keys (to test waku-rln-relay in the off-chain mode) - ## Returns an error if it cannot create the membership list - - # initialize a Merkle tree - let rlnInstance = createRLNInstance() - if rlnInstance.isErr(): - return err("could not create rln instance: " & rlnInstance.error()) - let rln = rlnInstance.get() - - var output = newSeq[(string, string)]() - var idCommitments = newSeq[IDCommitment]() - for i in 0..n-1: - - # generate a key pair - let keypairRes = rln.membershipKeyGen() - if keypairRes.isErr(): - return err("could not generate a key pair: " & keypairRes.error()) - let keypair = keypairRes.get() - let keyTuple = (keypair.idKey.inHex(), keypair.idCommitment.inHex()) - output.add(keyTuple) - - idCommitments.add(keypair.idCommitment) - - # Insert members into tree - let membersAdded = rln.insertMembers(0, idCommitments) - if not membersAdded: - return err("could not insert members into the tree") - - let root = rln.getMerkleRoot().value().inHex() - return ok((output, root)) - -proc rlnRelayStaticSetUp*(rlnRelayMembershipIndex: MembershipIndex): RlnRelayResult[(Option[seq[ - IDCommitment]], Option[MembershipKeyPair], Option[ - MembershipIndex])] = - ## rlnRelayStaticSetUp is a proc that is used to initialize the static group keys and the static membership index - ## this proc is used to test waku-rln-relay in the off-chain mode - ## it returns the static group keys, the static membership key pair, and the static membership index - ## Returns an error if it cannot initialize the static group keys and the static membership index - let - # static group - groupKeys = StaticGroupKeys - groupSize = StaticGroupSize - - debug "rln-relay membership index", rlnRelayMembershipIndex - - # validate the user-supplied membership index - if rlnRelayMembershipIndex < MembershipIndex(0) or rlnRelayMembershipIndex >= - MembershipIndex(groupSize): - error "wrong membership index" - return ok((none(seq[IDCommitment]), none(MembershipKeyPair), none(MembershipIndex))) - - # prepare the outputs from the static group keys - let - # create a sequence of MembershipKeyPairs from the group keys (group keys are in string format) - groupKeyPairsRes = groupKeys.toMembershipKeyPairs() - - if groupKeyPairsRes.isErr(): - return err("could not convert the group keys to MembershipKeyPairs: " & - groupKeyPairsRes.error()) - - let - groupKeyPairs = groupKeyPairsRes.get() - # extract id commitment keys - groupIDCommitments = groupKeyPairs.mapIt(it.idCommitment) - groupOpt = some(groupIDCommitments) - # user selected membership key pair - memKeyPairOpt = some(groupKeyPairs[rlnRelayMembershipIndex]) - memIndexOpt = some(rlnRelayMembershipIndex) - - return ok((groupOpt, memKeyPairOpt, memIndexOpt)) - -proc hasDuplicate*(rlnPeer: WakuRLNRelay, msg: WakuMessage): RlnRelayResult[bool] = - ## returns true if there is another message in the `nullifierLog` of the `rlnPeer` with the same - ## epoch and nullifier as `msg`'s epoch and nullifier but different Shamir secret shares - ## otherwise, returns false - ## Returns an error if it cannot check for duplicates - - # extract the proof metadata of the supplied `msg` - let proofMD = ProofMetadata(nullifier: msg.proof.nullifier, - shareX: msg.proof.shareX, shareY: msg.proof.shareY) - - # check if the epoch exists - if not rlnPeer.nullifierLog.hasKey(msg.proof.epoch): - return ok(false) - try: - if rlnPeer.nullifierLog[msg.proof.epoch].contains(proofMD): - # there is an identical record, ignore rhe mag - return ok(false) - - # check for a message with the same nullifier but different secret shares - let matched = rlnPeer.nullifierLog[msg.proof.epoch].filterIt(( - it.nullifier == proofMD.nullifier) and ((it.shareX != proofMD.shareX) or - (it.shareY != proofMD.shareY))) - - if matched.len != 0: - # there is a duplicate - return ok(true) - - # there is no duplicate - return ok(false) - - except KeyError as e: - return err("the epoch was not found") - -proc updateLog*(rlnPeer: WakuRLNRelay, msg: WakuMessage): RlnRelayResult[bool] = - ## extracts the `ProofMetadata` of the supplied messages `msg` and - ## saves it in the `nullifierLog` of the `rlnPeer` - ## Returns an error if it cannot update the log - - let proofMD = ProofMetadata(nullifier: msg.proof.nullifier, - shareX: msg.proof.shareX, shareY: msg.proof.shareY) - debug "proof metadata", proofMD = proofMD - - # check if the epoch exists - if not rlnPeer.nullifierLog.hasKey(msg.proof.epoch): - rlnPeer.nullifierLog[msg.proof.epoch] = @[proofMD] - return ok(true) - - try: - # check if an identical record exists - if rlnPeer.nullifierLog[msg.proof.epoch].contains(proofMD): - return ok(true) - # add proofMD to the log - rlnPeer.nullifierLog[msg.proof.epoch].add(proofMD) - return ok(true) - except KeyError as e: - return err("the epoch was not found") - -proc toEpoch*(t: uint64): Epoch = - ## converts `t` to `Epoch` in little-endian order - let bytes = toBytes(t, Endianness.littleEndian) - debug "bytes", bytes = bytes - var epoch: Epoch - discard epoch.copyFrom(bytes) - return epoch - -proc fromEpoch*(epoch: Epoch): uint64 = - ## decodes bytes of `epoch` (in little-endian) to uint64 - let t = fromBytesLE(uint64, array[32, byte](epoch)) - return t - -proc calcEpoch*(t: float64): Epoch = - ## gets time `t` as `flaot64` with subseconds resolution in the fractional part - ## and returns its corresponding rln `Epoch` value - let e = uint64(t/EpochUnitSeconds) - return toEpoch(e) - -proc getCurrentEpoch*(): Epoch = - ## gets the current rln Epoch time - return calcEpoch(epochTime()) - -proc absDiff*(e1, e2: Epoch): uint64 = - ## returns the absolute difference between the two rln `Epoch`s `e1` and `e2` - ## i.e., e1 - e2 - - # convert epochs to their corresponding unsigned numerical values - let - epoch1 = fromEpoch(e1) - epoch2 = fromEpoch(e2) - - # Manually perform an `abs` calculation - if epoch1 > epoch2: - return epoch1 - epoch2 - else: - return epoch2 - epoch1 - -proc validateMessage*(rlnPeer: WakuRLNRelay, msg: WakuMessage, - timeOption: Option[float64] = none(float64)): MessageValidationResult = - ## validate the supplied `msg` based on the waku-rln-relay routing protocol i.e., - ## the `msg`'s epoch is within MaxEpochGap of the current epoch - ## the `msg` has valid rate limit proof - ## the `msg` does not violate the rate limit - ## `timeOption` indicates Unix epoch time (fractional part holds sub-seconds) - ## if `timeOption` is supplied, then the current epoch is calculated based on that - - # track message count for metrics - waku_rln_messages_total.inc() - - # checks if the `msg`'s epoch is far from the current epoch - # it corresponds to the validation of rln external nullifier - var epoch: Epoch - if timeOption.isSome(): - epoch = calcEpoch(timeOption.get()) - else: - # get current rln epoch - epoch = getCurrentEpoch() - - debug "current epoch", currentEpoch = fromEpoch(epoch) - let - msgEpoch = msg.proof.epoch - # calculate the gaps - gap = absDiff(epoch, msgEpoch) - - debug "message epoch", msgEpoch = fromEpoch(msgEpoch) - - # validate the epoch - if gap > MaxEpochGap: - # message's epoch is too old or too ahead - # accept messages whose epoch is within +-MaxEpochGap from the current epoch - warn "invalid message: epoch gap exceeds a threshold", gap = gap, - payload = string.fromBytes(msg.payload) - waku_rln_invalid_messages_total.inc(labelValues=["invalid_epoch"]) - return MessageValidationResult.Invalid - - ## TODO: FIXME after resolving this issue https://github.com/status-im/nwaku/issues/1247 - if not rlnPeer.validateRoot(msg.proof.merkleRoot): - debug "invalid message: provided root does not belong to acceptable window of roots", provided=msg.proof.merkleRoot, validRoots=rlnPeer.validMerkleRoots.mapIt(it.inHex()) - waku_rln_invalid_messages_total.inc(labelValues=["invalid_root"]) - # return MessageValidationResult.Invalid - - # verify the proof - let - contentTopicBytes = msg.contentTopic.toBytes - input = concat(msg.payload, contentTopicBytes) - - waku_rln_proof_verification_total.inc() - waku_rln_proof_verification_duration_seconds.nanosecondTime: - let proofVerificationRes = rlnPeer.rlnInstance.proofVerify(input, msg.proof) - - if proofVerificationRes.isErr(): - waku_rln_errors_total.inc(labelValues=["proof_verification"]) - warn "invalid message: proof verification failed", payload = string.fromBytes(msg.payload) - return MessageValidationResult.Invalid - if not proofVerificationRes.value(): - # invalid proof - debug "invalid message: invalid proof", payload = string.fromBytes(msg.payload) - waku_rln_invalid_messages_total.inc(labelValues=["invalid_proof"]) - return MessageValidationResult.Invalid - - # check if double messaging has happened - let hasDup = rlnPeer.hasDuplicate(msg) - if hasDup.isErr(): - waku_rln_errors_total.inc(labelValues=["duplicate_check"]) - elif hasDup.value == true: - debug "invalid message: message is spam", payload = string.fromBytes(msg.payload) - waku_rln_spam_messages_total.inc() - return MessageValidationResult.Spam - - # insert the message to the log - # the result of `updateLog` is discarded because message insertion is guaranteed by the implementation i.e., - # it will never error out - discard rlnPeer.updateLog(msg) - debug "message is valid", payload = string.fromBytes(msg.payload) - let rootIndex = rlnPeer.validMerkleRoots.find(msg.proof.merkleRoot) - waku_rln_valid_messages_total.observe(rootIndex.toFloat()) - return MessageValidationResult.Valid - -proc toRLNSignal*(wakumessage: WakuMessage): seq[byte] = - ## it is a utility proc that prepares the `data` parameter of the proof generation procedure i.e., `proofGen` that resides in the current module - ## it extracts the `contentTopic` and the `payload` of the supplied `wakumessage` and serializes them into a byte sequence - let - contentTopicBytes = wakumessage.contentTopic.toBytes - output = concat(wakumessage.payload, contentTopicBytes) - return output - -proc appendRLNProof*(rlnPeer: WakuRLNRelay, msg: var WakuMessage, - senderEpochTime: float64): bool = - ## returns true if it can create and append a `RateLimitProof` to the supplied `msg` - ## returns false otherwise - ## `senderEpochTime` indicates the number of seconds passed since Unix epoch. The fractional part holds sub-seconds. - ## The `epoch` field of `RateLimitProof` is derived from the provided `senderEpochTime` (using `calcEpoch()`) - - let input = msg.toRLNSignal() - - var proof: RateLimitProofResult = proofGen(rlnInstance = rlnPeer.rlnInstance, data = input, - memKeys = rlnPeer.membershipKeyPair, - memIndex = rlnPeer.membershipIndex, - epoch = calcEpoch(senderEpochTime)) - - if proof.isErr: - return false - - msg.proof = proof.value - return true - -proc addAll*(wakuRlnRelay: WakuRLNRelay, list: seq[IDCommitment]): RlnRelayResult[void] = - # add members to the Merkle tree of the `rlnInstance` - ## Returns an error if it cannot add any member to the Merkle tree - let membersAdded = wakuRlnRelay.insertMembers(0, list) - if not membersAdded.isOk(): - return err("failed to add members to the Merkle tree") - return ok() - -proc generateGroupUpdateHandler(rlnPeer: WakuRLNRelay): GroupUpdateHandler = - ## assuming all the members arrive in order - ## TODO: check the index and the pubkey depending on - ## the group update operation - var handler: GroupUpdateHandler - handler = proc(blockNumber: BlockNumber, members: seq[MembershipTuple]): RlnRelayResult[void] = - let startingIndex = members[0].index - debug "starting index", startingIndex = startingIndex, members = members.mapIt(it.idComm.inHex()) - let isSuccessful = rlnPeer.insertMembers(startingIndex, members.mapIt(it.idComm)) - if isSuccessful.isErr(): - return err("failed to add new members to the Merkle tree") - else: - debug "new members added to the Merkle tree", pubkeys=members.mapIt(it.idComm.inHex()) , startingIndex=startingIndex - debug "acceptable window", validRoots=rlnPeer.validMerkleRoots.mapIt(it.inHex()) - let lastIndex = members[0].index + members.len.uint - 1 - let indexGap = startingIndex - rlnPeer.lastSeenMembershipIndex - if not (toSeq(startingIndex..lastIndex) == members.mapIt(it.index)): - return err("the indexes of the new members are not in order") - if indexGap != 1.uint: - warn "membership index gap, may have lost connection", lastIndex, currIndex=rlnPeer.lastSeenMembershipIndex, indexGap = indexGap - rlnPeer.lastSeenMembershipIndex = lastIndex - rlnPeer.lastProcessedBlock = blockNumber - debug "last processed block", blockNumber = blockNumber - return ok() - return handler - -proc parse*(event: type MemberRegistered, - log: JsonNode): RlnRelayResult[MembershipTuple] = - ## parses the `data` parameter of the `MemberRegistered` event `log` - ## returns an error if it cannot parse the `data` parameter - var pubkey: UInt256 - var index: UInt256 - var data: string - # Remove the 0x prefix - try: - data = strip0xPrefix(log["data"].getStr()) - except CatchableError: - return err("failed to parse the data field of the MemberRegistered event: " & getCurrentExceptionMsg()) - var offset = 0 - try: - # Parse the pubkey - offset += decode(data, offset, pubkey) - # Parse the index - offset += decode(data, offset, index) - return ok((index: index.toMembershipIndex(), - idComm: pubkey.toIDCommitment())) - except: - return err("failed to parse the data field of the MemberRegistered event") - -type BlockTable = OrderedTable[BlockNumber, seq[MembershipTuple]] -proc getHistoricalEvents*(ethClientUri: string, - contractAddress: Address, - fromBlock: string = "0x0", - toBlock: string = "latest"): Future[RlnRelayResult[BlockTable]] {.async, gcsafe.} = - ## `ethClientUri` is the URI of the Ethereum client - ## `contractAddress` is the address of the contract - ## `fromBlock` is the block number from which the events are fetched - ## `toBlock` is the block number to which the events are fetched - ## returns a table that maps block numbers to the list of members registered in that block - ## returns an error if it cannot retrieve the historical events - let web3 = await newWeb3(ethClientUri) - let contract = web3.contractSender(MembershipContract, contractAddress) - # Get the historical events, and insert memberships into the tree - let historicalEvents = await contract.getJsonLogs(MemberRegistered, - fromBlock=some(fromBlock.blockId()), - toBlock=some(toBlock.blockId())) - # Create a table that maps block numbers to the list of members registered in that block - var blockTable = OrderedTable[BlockNumber, seq[MembershipTuple]]() - for log in historicalEvents: - # batch according to log.blockNumber - let blockNumber = parseHexInt(log["blockNumber"].getStr()).uint - let parsedEventRes = parse(MemberRegistered, log) - - if parsedEventRes.isErr(): - error "failed to parse the MemberRegistered event", error=parsedEventRes.error() - return err("failed to parse the MemberRegistered event") - let parsedEvent = parsedEventRes.get() - # Add the parsed event to the table - if blockTable.hasKey(blockNumber): - blockTable[blockNumber].add(parsedEvent) - else: - blockTable[blockNumber] = @[parsedEvent] - return ok(blockTable) - -proc subscribeToGroupEvents*(ethClientUri: string, - ethAccountAddress: Option[Address] = none(Address), - contractAddress: Address, - blockNumber: string = "0x0", - handler: GroupUpdateHandler) {.async, gcsafe.} = - ## connects to the eth client whose URI is supplied as `ethClientUri` - ## subscribes to the `MemberRegistered` event emitted from the `MembershipContract` which is available on the supplied `contractAddress` - ## it collects all the events starting from the given `blockNumber` - ## for every received block, it calls the `handler` - let web3 = await newWeb3(ethClientUri) - let contract = web3.contractSender(MembershipContract, contractAddress) - - let blockTableRes = await getHistoricalEvents(ethClientUri, - contractAddress, - fromBlock=blockNumber) - if blockTableRes.isErr(): - error "failed to retrieve historical events", error=blockTableRes.error - return - let blockTable = blockTableRes.get() - # Update MT by batch - for blockNumber, members in blockTable.pairs(): - debug "updating the Merkle tree", blockNumber=blockNumber, members=members - let res = handler(blockNumber, members) - if res.isErr(): - error "failed to update the Merkle tree", error=res.error - - # We don't need the block table after this point - discard blockTable - - var latestBlock: BlockNumber - let handleLog = proc(blockHeader: BlockHeader) {.async, gcsafe.} = - try: - let membershipRegistrationLogs = await contract.getJsonLogs(MemberRegistered, - blockHash = some(blockheader.hash)) - if membershipRegistrationLogs.len == 0: - return - var members: seq[MembershipTuple] - for log in membershipRegistrationLogs: - let parsedEventRes = parse(MemberRegistered, log) - if parsedEventRes.isErr(): - fatal "failed to parse the MemberRegistered event", error=parsedEventRes.error() - return - let parsedEvent = parsedEventRes.get() - members.add(parsedEvent) - let res = handler(blockHeader.number.uint, members) - if res.isErr(): - error "failed to update the Merkle tree", error=res.error - except CatchableError: - warn "failed to get logs", error=getCurrentExceptionMsg() - return - let newHeadCallback = proc (blockheader: BlockHeader) {.gcsafe.} = - latestBlock = blockheader.number.uint - debug "block received", blockNumber = latestBlock - # get logs from the last block - try: - asyncSpawn handleLog(blockHeader) - except CatchableError: - warn "failed to handle log: ", error=getCurrentExceptionMsg() - - let newHeadErrorHandler = proc (err: CatchableError) {.gcsafe.} = - error "Error from subscription: ", err=err.msg - discard await web3.subscribeForBlockHeaders(newHeadCallback, newHeadErrorHandler) - - web3.onDisconnect = proc() = - debug "connection to ethereum node dropped", lastBlock = latestBlock - -proc handleGroupUpdates*(rlnPeer: WakuRLNRelay) {.async, gcsafe.} = - ## generates the groupUpdateHandler which is called when a new member is registered, - ## and has the WakuRLNRelay instance as a closure - let handler = generateGroupUpdateHandler(rlnPeer) - await subscribeToGroupEvents(ethClientUri = rlnPeer.ethClientAddress, - ethAccountAddress = rlnPeer.ethAccountAddress, - contractAddress = rlnPeer.membershipContractAddress, - handler = handler) - -proc addRLNRelayValidator*(node: WakuNode, pubsubTopic: PubsubTopic, contentTopic: ContentTopic, spamHandler: Option[SpamHandler] = none(SpamHandler)) = - ## this procedure is a thin wrapper for the pubsub addValidator method - ## it sets a validator for the waku messages published on the supplied pubsubTopic and contentTopic - ## if contentTopic is empty, then validation takes place for All the messages published on the given pubsubTopic - ## the message validation logic is according to https://rfc.vac.dev/spec/17/ - proc validator(topic: string, message: messages.Message): Future[pubsub.ValidationResult] {.async.} = - trace "rln-relay topic validator is called" - let msg = WakuMessage.decode(message.data) - if msg.isOk(): - let - wakumessage = msg.value() - payload = string.fromBytes(wakumessage.payload) - - # check the contentTopic - if (wakumessage.contentTopic != "") and (contentTopic != "") and (wakumessage.contentTopic != contentTopic): - trace "content topic did not match:", contentTopic=wakumessage.contentTopic, payload=payload - return pubsub.ValidationResult.Accept - - # validate the message - let - validationRes = node.wakuRlnRelay.validateMessage(wakumessage) - proof = toHex(wakumessage.proof.proof) - epoch = fromEpoch(wakumessage.proof.epoch) - root = inHex(wakumessage.proof.merkleRoot) - shareX = inHex(wakumessage.proof.shareX) - shareY = inHex(wakumessage.proof.shareY) - nullifier = inHex(wakumessage.proof.nullifier) - case validationRes: - of Valid: - debug "message validity is verified, relaying:", contentTopic=wakumessage.contentTopic, epoch=epoch, timestamp=wakumessage.timestamp, payload=payload - trace "message validity is verified, relaying:", proof=proof, root=root, shareX=shareX, shareY=shareY, nullifier=nullifier - return pubsub.ValidationResult.Accept - of Invalid: - debug "message validity could not be verified, discarding:", contentTopic=wakumessage.contentTopic, epoch=epoch, timestamp=wakumessage.timestamp, payload=payload - trace "message validity could not be verified, discarding:", proof=proof, root=root, shareX=shareX, shareY=shareY, nullifier=nullifier - return pubsub.ValidationResult.Reject - of Spam: - debug "A spam message is found! yay! discarding:", contentTopic=wakumessage.contentTopic, epoch=epoch, timestamp=wakumessage.timestamp, payload=payload - trace "A spam message is found! yay! discarding:", proof=proof, root=root, shareX=shareX, shareY=shareY, nullifier=nullifier - if spamHandler.isSome: - let handler = spamHandler.get - handler(wakumessage) - return pubsub.ValidationResult.Reject - # set a validator for the supplied pubsubTopic - let pb = PubSub(node.wakuRelay) - pb.addValidator(pubsubTopic, validator) - -proc mountRlnRelayStatic*(node: WakuNode, - group: seq[IDCommitment], - memKeyPair: MembershipKeyPair, - memIndex: MembershipIndex, - pubsubTopic: PubsubTopic, - contentTopic: ContentTopic, - spamHandler: Option[SpamHandler] = none(SpamHandler)): RlnRelayResult[void] = - # Returns RlnRelayResult[void] to indicate the success of the call - - debug "mounting rln-relay in off-chain/static mode" - # check whether inputs are provided - # relay protocol is the prerequisite of rln-relay - if node.wakuRelay.isNil(): - return err("WakuRelay protocol is not mounted") - # check whether the pubsub topic is supported at the relay level - if pubsubTopic notin node.wakuRelay.defaultPubsubTopics: - return err("The relay protocol does not support the configured pubsub topic") - - debug "rln-relay input validation passed" - - # check the peer's index and the inclusion of user's identity commitment in the group - if not memKeyPair.idCommitment == group[int(memIndex)]: - return err("The peer's index is not consistent with the group") - - # create an RLN instance - let rlnInstance = createRLNInstance() - if rlnInstance.isErr(): - return err("RLN instance creation failed") - let rln = rlnInstance.get() - - # create the WakuRLNRelay - let rlnPeer = WakuRLNRelay(membershipKeyPair: memKeyPair, - membershipIndex: memIndex, - rlnInstance: rln, - pubsubTopic: pubsubTopic, - contentTopic: contentTopic) - - # add members to the Merkle tree - let membersAdded = rlnPeer.insertMembers(0, group) - if membersAdded.isErr(): - return err("member addition to the Merkle tree failed: " & membersAdded.error) - - # adds a topic validator for the supplied pubsub topic at the relay protocol - # messages published on this pubsub topic will be relayed upon a successful validation, otherwise they will be dropped - # the topic validator checks for the correct non-spamming proof of the message - node.addRLNRelayValidator(pubsubTopic, contentTopic, spamHandler) - debug "rln relay topic validator is mounted successfully", pubsubTopic=pubsubTopic, contentTopic=contentTopic - - node.wakuRlnRelay = rlnPeer - return ok() - -proc mountRlnRelayDynamic*(node: WakuNode, - ethClientAddr: string = "", - ethAccountAddress: Option[web3.Address] = none(web3.Address), - ethAccountPrivKeyOpt: Option[keys.PrivateKey], - memContractAddr: web3.Address, - memKeyPair: Option[MembershipKeyPair] = none(MembershipKeyPair), - memIndex: Option[MembershipIndex] = none(MembershipIndex), - pubsubTopic: PubsubTopic, - contentTopic: ContentTopic, - spamHandler: Option[SpamHandler] = none(SpamHandler), - registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler)) : Future[RlnRelayResult[void]] {.async.} = - debug "mounting rln-relay in on-chain/dynamic mode" - # TODO return a bool value to indicate the success of the call - # relay protocol is the prerequisite of rln-relay - if node.wakuRelay.isNil: - return err("WakuRelay protocol is not mounted.") - # check whether the pubsub topic is supported at the relay level - if pubsubTopic notin node.wakuRelay.defaultPubsubTopics: - return err("WakuRelay protocol does not support the configured pubsub topic.") - debug "rln-relay input validation passed" - - # create an RLN instance - let rlnInstance = createRLNInstance() - - if rlnInstance.isErr(): - return err("RLN instance creation failed.") - let rln = rlnInstance.get() - - # prepare rln membership key pair - var - keyPair: MembershipKeyPair - rlnIndex: MembershipIndex - if memKeyPair.isNone: # no rln credentials provided - if ethAccountPrivKeyOpt.isSome: # if an ethereum private key is supplied, then create rln credentials and register to the membership contract - trace "no rln-relay key is provided, generating one" - let keyPairRes = rln.membershipKeyGen() - if keyPairRes.isErr(): - error "failed to generate rln-relay key pair" - return err("failed to generate rln-relay key pair: " & keyPairRes.error()) - keyPair = keyPairRes.value() - # register the rln-relay peer to the membership contract - waku_rln_registration_duration_seconds.nanosecondTime: - let regIndexRes = await register(idComm = keyPair.idCommitment, - ethAccountAddress = ethAccountAddress, - ethAccountPrivKey = ethAccountPrivKeyOpt.get(), - ethClientAddress = ethClientAddr, - membershipContractAddress = memContractAddr, - registrationHandler = registrationHandler) - # check whether registration is done - if regIndexRes.isErr(): - debug "membership registration failed", err=regIndexRes.error() - return err("membership registration failed: " & regIndexRes.error()) - rlnIndex = regIndexRes.value - debug "peer is successfully registered into the membership contract" - else: # if no eth private key is available, skip registration - debug "running waku-rln-relay in relay-only mode" - else: - debug "Peer is already registered to the membership contract" - keyPair = memKeyPair.get() - rlnIndex = memIndex.get() - - # create the WakuRLNRelay - var rlnPeer = WakuRLNRelay(membershipKeyPair: keyPair, - membershipIndex: rlnIndex, - membershipContractAddress: memContractAddr, - ethClientAddress: ethClientAddr, - ethAccountAddress: ethAccountAddress, - ethAccountPrivateKey: ethAccountPrivKeyOpt, - rlnInstance: rln, - pubsubTopic: pubsubTopic, - contentTopic: contentTopic) - - asyncSpawn rlnPeer.handleGroupUpdates() - debug "dynamic group management is started" - # adds a topic validator for the supplied pubsub topic at the relay protocol - # messages published on this pubsub topic will be relayed upon a successful validation, otherwise they will be dropped - # the topic validator checks for the correct non-spamming proof of the message - addRLNRelayValidator(node, pubsubTopic, contentTopic, spamHandler) - debug "rln relay topic validator is mounted successfully", pubsubTopic=pubsubTopic, contentTopic=contentTopic - - node.wakuRlnRelay = rlnPeer - return ok() - -proc writeRlnCredentials*(path: string, - credentials: RlnMembershipCredentials, - password: string): RlnRelayResult[void] = - # Returns RlnRelayResult[void], which indicates the success of the call - info "Storing RLN credentials" - var jsonString: string - jsonString.toUgly(%credentials) - let keyfile = createKeyFileJson(toBytes(jsonString), password) - if keyfile.isErr(): - return err("Error while creating keyfile for RLN credentials") - if saveKeyFile(path, keyfile.get()).isErr(): - return err("Error while saving keyfile for RLN credentials") - return ok() - -# Attempts decryptions of all keyfiles with the provided password. -# If one or more credentials are successfully decrypted, the max(min(index,number_decrypted),0)-th is returned. -proc readRlnCredentials*(path: string, - password: string, - index: int = 0): RlnRelayResult[Option[RlnMembershipCredentials]] = - # Returns RlnRelayResult[Option[RlnMembershipCredentials]], which indicates the success of the call - info "Reading RLN credentials" - # With regards to printing the keys, it is purely for debugging purposes so that the user becomes explicitly aware of the current keys in use when nwaku is started. - # Note that this is only until the RLN contract being used is the one deployed on Goerli testnet. - # These prints need to omitted once RLN contract is deployed on Ethereum mainnet and using valuable funds for staking. - waku_rln_membership_credentials_import_duration_seconds.nanosecondTime: - - try: - var decodedKeyfiles = loadKeyFiles(path, password) - - if decodedKeyfiles.isOk(): - var decodedRlnCredentials = decodedKeyfiles.get() - debug "Successfully decrypted keyfiles for the provided password", numberKeyfilesDecrypted=decodedRlnCredentials.len - # We should return the index-th decrypted credential, but we ensure to not overflow - let credentialIndex = max(min(index, decodedRlnCredentials.len - 1), 0) - debug "Picking credential with (adjusted) index", inputIndex=index, adjustedIndex=credentialIndex - let jsonObject = parseJson(string.fromBytes(decodedRlnCredentials[credentialIndex].get())) - let deserializedRlnCredentials = to(jsonObject, RlnMembershipCredentials) - debug "Deserialized RLN credentials", rlnCredentials=deserializedRlnCredentials - return ok(some(deserializedRlnCredentials)) - else: - debug "Unable to decrypt RLN credentials with provided password. ", error=decodedKeyfiles.error - return ok(none(RlnMembershipCredentials)) + var web3: Web3 + try: # check if the Ethereum client is reachable + web3 = await newWeb3(ethClientAddress) except: - return err("Error while loading keyfile for RLN credentials at " & path) + return err("could not connect to the Ethereum client") -proc mount(node: WakuNode, - conf: WakuRlnConfig, - spamHandler: Option[SpamHandler] = none(SpamHandler), - registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler) - ): Future[RlnRelayResult[void]] {.async.} = - # Returns RlnRelayResult[void], which indicates the success of the call - if not conf.rlnRelayDynamic: - info " setting up waku-rln-relay in off-chain mode... " - # set up rln relay inputs - let staticSetupRes = rlnRelayStaticSetUp(MembershipIndex(conf.rlnRelayMembershipIndex)) - if staticSetupRes.isErr(): - return err("rln relay static setup failed: " & staticSetupRes.error()) - let (groupOpt, memKeyPairOpt, memIndexOpt) = staticSetupRes.get() - if memIndexOpt.isNone: - error "failed to mount WakuRLNRelay" - return err("failed to mount WakuRLNRelay") - else: - # mount rlnrelay in off-chain mode with a static group of users - let mountRes = node.mountRlnRelayStatic(group = groupOpt.get(), - memKeyPair = memKeyPairOpt.get(), - memIndex= memIndexOpt.get(), - pubsubTopic = conf.rlnRelayPubsubTopic, - contentTopic = conf.rlnRelayContentTopic, - spamHandler = spamHandler) - - if mountRes.isErr(): - return err("Failed to mount WakuRLNRelay: " & mountRes.error()) - - info "membership id key", idkey=memKeyPairOpt.get().idKey.inHex() - info "membership id commitment key", idCommitmentkey=memKeyPairOpt.get().idCommitment.inHex() - - # check the correct construction of the tree by comparing the calculated root against the expected root - # no error should happen as it is already captured in the unit tests - # TODO have added this check to account for unseen corner cases, will remove it later - let - rootRes = node.wakuRlnRelay.rlnInstance.getMerkleRoot() - expectedRoot = StaticGroupMerkleRoot - - if rootRes.isErr(): - return err(rootRes.error()) - - let root = rootRes.value() - - if root.inHex() != expectedRoot: - error "root mismatch: something went wrong not in Merkle tree construction" - debug "the calculated root", root - info "WakuRLNRelay is mounted successfully", pubsubtopic=conf.rlnRelayPubsubTopic, contentTopic=conf.rlnRelayContentTopic - return ok() - else: # mount the rln relay protocol in the on-chain/dynamic mode - debug "setting up waku-rln-relay in on-chain mode... " + if ethAccountAddress.isSome(): + web3.defaultAccount = ethAccountAddress.get() + # set the account private key + web3.privateKey = some(ethAccountPrivKey) + # set the gas price twice the suggested price in order for the fast mining + let gasPrice = int(await web3.provider.eth_gasPrice()) * 2 - debug "on-chain setup parameters", contractAddress=conf.rlnRelayEthContractAddress - # read related inputs to run rln-relay in on-chain mode and do type conversion when needed + # when the private key is set in a web3 instance, the send proc (sender.register(pk).send(MembershipFee)) + # does the signing using the provided key + # web3.privateKey = some(ethAccountPrivateKey) + var sender = web3.contractSender(MembershipContract, membershipContractAddress) # creates a Sender object with a web3 field and contract address of type Address + + debug "registering an id commitment", idComm=idComm.inHex() + let pk = idComm.toUInt256() + + var txHash: TxHash + try: # send the registration transaction and check if any error occurs + txHash = await sender.register(pk).send(value = MembershipFee, gasPrice = gasPrice) + except ValueError as e: + return err("registration transaction failed: " & e.msg) + + let tsReceipt = await web3.getMinedTransactionReceipt(txHash) + + # the receipt topic holds the hash of signature of the raised events + let firstTopic = tsReceipt.logs[0].topics[0] + # the hash of the signature of MemberRegistered(uint256,uint256) event is equal to the following hex value + if firstTopic[0..65] != "0x5a92c2530f207992057b9c3e544108ffce3beda4a63719f316967c49bf6159d2": + return err("invalid event signature hash") + + # the arguments of the raised event i.e., MemberRegistered are encoded inside the data field + # data = pk encoded as 256 bits || index encoded as 256 bits + let arguments = tsReceipt.logs[0].data + debug "tx log data", arguments=arguments let - ethClientAddr = conf.rlnRelayEthClientAddress + argumentsBytes = arguments.hexToSeqByte() + # In TX log data, uints are encoded in big endian + eventIdCommUint = UInt256.fromBytesBE(argumentsBytes[0..31]) + eventIndex = UInt256.fromBytesBE(argumentsBytes[32..^1]) + eventIdComm = eventIdCommUint.toIDCommitment() + debug "the identity commitment key extracted from tx log", eventIdComm=eventIdComm.inHex() + debug "the index of registered identity commitment key", eventIndex=eventIndex - var ethMemContractAddress: web3.Address - try: - ethMemContractAddress = web3.fromHex(web3.Address, conf.rlnRelayEthContractAddress) - except ValueError as err: - return err("invalid eth contract address: " & err.msg) - var ethAccountPrivKeyOpt = none(keys.PrivateKey) - var ethAccountAddressOpt = none(Address) - var credentials = none(RlnMembershipCredentials) - var res: RlnRelayResult[void] + if eventIdComm != idComm: + return err("invalid id commitment key") - if conf.rlnRelayEthAccountPrivateKey != "": - ethAccountPrivKeyOpt = some(keys.PrivateKey(SkSecretKey.fromHex(conf.rlnRelayEthAccountPrivateKey).value)) + await web3.close() + + if registrationHandler.isSome(): + let handler = registrationHandler.get + handler(toHex(txHash)) + return ok(toMembershipIndex(eventIndex)) + + proc register*(rlnPeer: WakuRLNRelay, registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler)): Future[RlnRelayResult[bool]] {.async.} = + ## registers the public key of the rlnPeer which is rlnPeer.membershipKeyPair.publicKey + ## into the membership contract whose address is in rlnPeer.membershipContractAddress + let pk = rlnPeer.membershipKeyPair.idCommitment + let regResult = await register(idComm = pk, ethAccountAddress = rlnPeer.ethAccountAddress, ethAccountPrivKey = rlnPeer.ethAccountPrivateKey.get(), ethClientAddress = rlnPeer.ethClientAddress, membershipContractAddress = rlnPeer.membershipContractAddress, registrationHandler = registrationHandler) + if regResult.isErr: + return err(regResult.error()) + return ok(true) + + proc appendLength*(input: openArray[byte]): seq[byte] = + ## returns length prefixed version of the input + ## with the following format [len<8>|input] + ## len: 8-byte value that represents the number of bytes in the `input` + ## len is serialized in little-endian + ## input: the supplied `input` + let + # the length should be serialized in little-endian + len = toBytes(uint64(input.len), Endianness.littleEndian) + output = concat(@len, @input) + return output + + proc hash*(rlnInstance: ptr RLN, data: openArray[byte]): MerkleNode = + ## a thin layer on top of the Nim wrapper of the Poseidon hasher + debug "hash input", hashhex = data.toHex() + var lenPrefData = appendLength(data) + var + hashInputBuffer = lenPrefData.toBuffer() + outputBuffer: Buffer # will holds the hash output + + debug "hash input buffer length", bufflen = hashInputBuffer.len + let + hashSuccess = hash(rlnInstance, addr hashInputBuffer, addr outputBuffer) + output = cast[ptr MerkleNode](outputBuffer.`ptr`)[] + + return output + + proc serialize(idKey: IDKey, memIndex: MembershipIndex, epoch: Epoch, + msg: openArray[byte]): seq[byte] = + ## a private proc to convert RateLimitProof and the data to a byte seq + ## this conversion is used in the proofGen proc + ## the serialization is done as instructed in https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L146 + ## [ id_key<32> | id_index<8> | epoch<32> | signal_len<8> | signal ] + let memIndexBytes = toBytes(uint64(memIndex), Endianness.littleEndian) + let lenPrefMsg = appendLength(msg) + let output = concat(@idKey, @memIndexBytes, @epoch, lenPrefMsg) + return output + + proc proofGen*(rlnInstance: ptr RLN, data: openArray[byte], + memKeys: MembershipKeyPair, memIndex: MembershipIndex, + epoch: Epoch): RateLimitProofResult = + + # serialize inputs + let serializedInputs = serialize(idKey = memKeys.idKey, + memIndex = memIndex, + epoch = epoch, + msg = data) + var inputBuffer = toBuffer(serializedInputs) + + debug "input buffer ", inputBuffer + + # generate the proof + var proof: Buffer + let proofIsSuccessful = generate_proof(rlnInstance, addr inputBuffer, addr proof) + # check whether the generate_proof call is done successfully + if not proofIsSuccessful: + return err("could not generate the proof") + + var proofValue = cast[ptr array[320, byte]] (proof.`ptr`) + let proofBytes: array[320, byte] = proofValue[] + debug "proof content", proofHex = proofValue[].toHex + + ## parse the proof as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ] + + let + proofOffset = 128 + rootOffset = proofOffset + 32 + epochOffset = rootOffset + 32 + shareXOffset = epochOffset + 32 + shareYOffset = shareXOffset + 32 + nullifierOffset = shareYOffset + 32 + rlnIdentifierOffset = nullifierOffset + 32 + + var + zkproof: ZKSNARK + proofRoot, shareX, shareY: MerkleNode + epoch: Epoch + nullifier: Nullifier + rlnIdentifier: RlnIdentifier + + discard zkproof.copyFrom(proofBytes[0..proofOffset-1]) + discard proofRoot.copyFrom(proofBytes[proofOffset..rootOffset-1]) + discard epoch.copyFrom(proofBytes[rootOffset..epochOffset-1]) + discard shareX.copyFrom(proofBytes[epochOffset..shareXOffset-1]) + discard shareY.copyFrom(proofBytes[shareXOffset..shareYOffset-1]) + discard nullifier.copyFrom(proofBytes[shareYOffset..nullifierOffset-1]) + discard rlnIdentifier.copyFrom(proofBytes[nullifierOffset..rlnIdentifierOffset-1]) - if conf.rlnRelayEthAccountAddress != "": - var ethAccountAddress: web3.Address - try: - ethAccountAddress = web3.fromHex(web3.Address, conf.rlnRelayEthAccountAddress) - except ValueError as err: - return err("invalid eth account address: " & err.msg) - ethAccountAddressOpt = some(ethAccountAddress) - - # if the rlnRelayCredPath config option is non-empty, then rln-relay credentials should be persisted - # if the path does not contain any credential file, then a new set is generated and pesisted in the same path - # if there is a credential file, then no new credentials are generated, instead the content of the file is read and used to mount rln-relay - if conf.rlnRelayCredPath != "": - - let rlnRelayCredPath = joinPath(conf.rlnRelayCredPath, RlnCredentialsFilename) - debug "rln-relay credential path", rlnRelayCredPath - - # check if there is an rln-relay credential file in the supplied path - if fileExists(rlnRelayCredPath): - - info "A RLN credential file exists in provided path", path=rlnRelayCredPath - - # retrieve rln-relay credential - let readCredentialsRes = readRlnCredentials(rlnRelayCredPath, conf.rlnRelayCredentialsPassword) - - if readCredentialsRes.isErr(): - return err("RLN credentials cannot be read: " & readCredentialsRes.error()) + let output = RateLimitProof(proof: zkproof, + merkleRoot: proofRoot, + epoch: epoch, + shareX: shareX, + shareY: shareY, + nullifier: nullifier, + rlnIdentifier: rlnIdentifier) - credentials = readCredentialsRes.get() + return ok(output) - else: # there is no credential file available in the supplied path - # mount the rln-relay protocol leaving rln-relay credentials arguments unassigned - # this infroms mountRlnRelayDynamic proc that new credentials should be generated and registered to the membership contract - info "no rln credential is provided" - - if credentials.isSome(): - # mount rln-relay in on-chain mode, with credentials that were read or generated - res = await node.mountRlnRelayDynamic(memContractAddr = ethMemContractAddress, - ethClientAddr = ethClientAddr, - ethAccountAddress = ethAccountAddressOpt, - ethAccountPrivKeyOpt = ethAccountPrivKeyOpt, - pubsubTopic = conf.rlnRelayPubsubTopic, - contentTopic = conf.rlnRelayContentTopic, - spamHandler = spamHandler, - registrationHandler = registrationHandler, - memKeyPair = some(credentials.get().membershipKeyPair), - memIndex = some(credentials.get().rlnIndex)) - else: - # mount rln-relay in on-chain mode, with the provided private key - res = await node.mountRlnRelayDynamic(memContractAddr = ethMemContractAddress, - ethClientAddr = ethClientAddr, - ethAccountAddress = ethAccountAddressOpt, - ethAccountPrivKeyOpt = ethAccountPrivKeyOpt, - pubsubTopic = conf.rlnRelayPubsubTopic, - contentTopic = conf.rlnRelayContentTopic, - spamHandler = spamHandler, - registrationHandler = registrationHandler) - - # TODO should be replaced with key-store with proper encryption - # persist rln credential - credentials = some(RlnMembershipCredentials(rlnIndex: node.wakuRlnRelay.membershipIndex, - membershipKeyPair: node.wakuRlnRelay.membershipKeyPair)) - if writeRlnCredentials(rlnRelayCredPath, credentials.get(), conf.rlnRelayCredentialsPassword).isErr(): - return err("error in storing rln credentials") + proc serialize(proof: RateLimitProof, data: openArray[byte]): seq[byte] = + ## a private proc to convert RateLimitProof and data to a byte seq + ## this conversion is used in the proof verification proc + ## [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal ] + let lenPrefMsg = appendLength(@data) + var proofBytes = concat(@(proof.proof), + @(proof.merkleRoot), + @(proof.epoch), + @(proof.shareX), + @(proof.shareY), + @(proof.nullifier), + @(proof.rlnIdentifier), + lenPrefMsg) + return proofBytes + + # Serializes a sequence of MerkleNodes + proc serialize(roots: seq[MerkleNode]): seq[byte] = + var rootsBytes: seq[byte] = @[] + for root in roots: + rootsBytes = concat(rootsBytes, @root) + return rootsBytes + + # validRoots should contain a sequence of roots in the acceptable windows. + # As default, it is set to an empty sequence of roots. This implies that the validity check for the proof's root is skipped + proc proofVerify*(rlnInstance: ptr RLN, + data: openArray[byte], + proof: RateLimitProof, + validRoots: seq[MerkleNode] = @[]): RlnRelayResult[bool] = + ## verifies the proof, returns an error if the proof verification fails + ## returns true if the proof is valid + var + proofBytes = serialize(proof, data) + proofBuffer = proofBytes.toBuffer() + validProof: bool + rootsBytes = serialize(validRoots) + rootsBuffer = rootsBytes.toBuffer() + + trace "serialized proof", proof = proofBytes.toHex() + + let verifyIsSuccessful = verify_with_roots(rlnInstance, addr proofBuffer, addr rootsBuffer, addr validProof) + if not verifyIsSuccessful: + # something went wrong in verification call + warn "could not verify validity of the proof", proof=proof + return err("could not verify the proof") + + if not validProof: + return ok(false) else: - # do not persist or use a persisted rln-relay credential - # a new credential will be generated during the mount process but will not be persisted - info "no need to persist or use a persisted rln-relay credential" - res = await node.mountRlnRelayDynamic(memContractAddr = ethMemContractAddress, ethClientAddr = ethClientAddr, - ethAccountAddress = ethAccountAddressOpt, ethAccountPrivKeyOpt = ethAccountPrivKeyOpt, pubsubTopic = conf.rlnRelayPubsubTopic, - contentTopic = conf.rlnRelayContentTopic, spamHandler = spamHandler, registrationHandler = registrationHandler) + return ok(true) + + proc insertMember*(rlnInstance: ptr RLN, idComm: IDCommitment): bool = + ## inserts a member to the tree + ## returns true if the member is inserted successfully + ## returns false if the member could not be inserted + var pkBuffer = toBuffer(idComm) + let pkBufferPtr = addr pkBuffer + + # add the member to the tree + let memberAdded = update_next_member(rlnInstance, pkBufferPtr) + return memberAdded + + proc serializeIdCommitments*(idComms: seq[IDCommitment]): seq[byte] = + ## serializes a seq of IDCommitments to a byte seq + ## the serialization is based on https://github.com/status-im/nwaku/blob/37bd29fbc37ce5cf636734e7dd410b1ed27b88c8/waku/v2/protocol/waku_rln_relay/rln.nim#L142 + ## the order of serialization is |id_commitment_len<8>|id_commitment| + var idCommsBytes = newSeq[byte]() + + # serialize the idComms, with its length prefixed + let len = toBytes(uint64(idComms.len), Endianness.littleEndian) + idCommsBytes.add(len) + + for idComm in idComms: + idCommsBytes = concat(idCommsBytes, @idComm) + + return idCommsBytes + + proc insertMembers*(rlnInstance: ptr RLN, + index: MembershipIndex, + idComms: seq[IDCommitment]): bool = + ## Insert multiple members i.e., identity commitments + ## returns true if the insertion is successful + ## returns false if any of the insertions fails + ## Note: This proc is atomic, i.e., if any of the insertions fails, all the previous insertions are rolled back + + # serialize the idComms + let idCommsBytes = serializeIdCommitments(idComms) - if res.isErr(): - return err("dynamic rln-relay could not be mounted: " & res.error()) + var idCommsBuffer = idCommsBytes.toBuffer() + let idCommsBufferPtr = addr idCommsBuffer + # add the member to the tree + let membersAdded = set_leaves_from(rlnInstance, index, idCommsBufferPtr) + return membersAdded + + proc removeMember*(rlnInstance: ptr RLN, index: MembershipIndex): bool = + let deletion_success = delete_member(rlnInstance, index) + return deletion_success + + proc getMerkleRoot*(rlnInstance: ptr RLN): MerkleNodeResult = + # read the Merkle Tree root after insertion + var + root {.noinit.}: Buffer = Buffer() + rootPtr = addr(root) + getRootSuccessful = getRoot(rlnInstance, rootPtr) + if not getRootSuccessful: + return err("could not get the root") + if not root.len == 32: + return err("wrong output size") + + var rootValue = cast[ptr MerkleNode] (root.`ptr`)[] + return ok(rootValue) + + proc updateValidRootQueue*(wakuRlnRelay: WakuRLNRelay, root: MerkleNode): void = + ## updates the valid Merkle root queue with the latest root and pops the oldest one when the capacity of `AcceptableRootWindowSize` is reached + let overflowCount = wakuRlnRelay.validMerkleRoots.len() - AcceptableRootWindowSize + if overflowCount >= 0: + # Delete the oldest `overflowCount` elements in the deque (index 0..`overflowCount`) + for i in 0..overflowCount: + wakuRlnRelay.validMerkleRoots.popFirst() + # Push the next root into the queue + wakuRlnRelay.validMerkleRoots.addLast(root) + + proc insertMembers*(wakuRlnRelay: WakuRLNRelay, + index: MembershipIndex, + idComms: seq[IDCommitment]): RlnRelayResult[void] = + ## inserts a sequence of id commitments into the local merkle tree, and adds the changed root to the + ## queue of valid roots + ## Returns an error if the insertion fails + waku_rln_membership_insertion_duration_seconds.nanosecondTime: + let actionSucceeded = wakuRlnRelay.rlnInstance.insertMembers(index, idComms) + if not actionSucceeded: + return err("could not insert id commitments into the merkle tree") + + let rootAfterUpdate = ?wakuRlnRelay.rlnInstance.getMerkleRoot() + wakuRlnRelay.updateValidRootQueue(rootAfterUpdate) return ok() -proc mountRlnRelay*(node: WakuNode, - conf: WakuRlnConfig, - spamHandler: Option[SpamHandler] = none(SpamHandler), - registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler) - ): Future[RlnRelayResult[void]] {.async.} = - ## Mounts the rln-relay protocol on the node. - ## The rln-relay protocol can be mounted in two modes: on-chain and off-chain. - ## Returns an error if the rln-relay protocol could not be mounted. - waku_rln_relay_mounting_duration_seconds.nanosecondTime: - let res = await mount( - node, - conf, - spamHandler, - registrationHandler - ) - return res + proc removeMember*(wakuRlnRelay: WakuRLNRelay, index: MembershipIndex): RlnRelayResult[void] = + ## removes a commitment from the local merkle tree at `index`, and adds the changed root to the + ## queue of valid roots + ## Returns an error if the removal fails + + let actionSucceeded = wakuRlnRelay.rlnInstance.removeMember(index) + if not actionSucceeded: + return err("could not remove id commitment from the merkle tree") + + let rootAfterUpdate = ?wakuRlnRelay.rlnInstance.getMerkleRoot() + wakuRlnRelay.updateValidRootQueue(rootAfterUpdate) + return ok() + + proc validateRoot*(wakuRlnRelay: WakuRLNRelay, root: MerkleNode): bool = + ## Validate against the window of roots stored in wakuRlnRelay.validMerkleRoots + return root in wakuRlnRelay.validMerkleRoots + + proc toMembershipKeyPairs*(groupKeys: seq[(string, string)]): RlnRelayResult[seq[ + MembershipKeyPair]] = + ## groupKeys is sequence of membership key tuples in the form of (identity key, identity commitment) all in the hexadecimal format + ## the toMembershipKeyPairs proc populates a sequence of MembershipKeyPairs using the supplied groupKeys + ## Returns an error if the conversion fails + + var groupKeyPairs = newSeq[MembershipKeyPair]() + + for i in 0..groupKeys.len-1: + try: + let + idKey = hexToUint[IDKey.len*8](groupKeys[i][0]).toBytesLE() + idCommitment = hexToUint[IDCommitment.len*8](groupKeys[i][1]).toBytesLE() + groupKeyPairs.add(MembershipKeyPair(idKey: idKey, + idCommitment: idCommitment)) + except ValueError as err: + warn "could not convert the group key to bytes", err = err.msg + return err("could not convert the group key to bytes: " & err.msg) + return ok(groupKeyPairs) + + proc calcMerkleRoot*(list: seq[IDCommitment]): RlnRelayResult[string] = + ## returns the root of the Merkle tree that is computed from the supplied list + ## the root is in hexadecimal format + ## Returns an error if the computation fails + + let rlnInstance = createRLNInstance() + if rlnInstance.isErr(): + return err("could not create rln instance: " & rlnInstance.error()) + let rln = rlnInstance.get() + + # create a Merkle tree + let membersAdded = rln.insertMembers(0, list) + if not membersAdded: + return err("could not insert members into the tree") + let root = rln.getMerkleRoot().value().inHex() + return ok(root) + + proc createMembershipList*(n: int): RlnRelayResult[( + seq[(string, string)], string + )] = + ## createMembershipList produces a sequence of membership key pairs in the form of (identity key, id commitment keys) in the hexadecimal format + ## this proc also returns the root of a Merkle tree constructed out of the identity commitment keys of the generated list + ## the output of this proc is used to initialize a static group keys (to test waku-rln-relay in the off-chain mode) + ## Returns an error if it cannot create the membership list + + # initialize a Merkle tree + let rlnInstance = createRLNInstance() + if rlnInstance.isErr(): + return err("could not create rln instance: " & rlnInstance.error()) + let rln = rlnInstance.get() + + var output = newSeq[(string, string)]() + var idCommitments = newSeq[IDCommitment]() + for i in 0..n-1: + + # generate a key pair + let keypairRes = rln.membershipKeyGen() + if keypairRes.isErr(): + return err("could not generate a key pair: " & keypairRes.error()) + let keypair = keypairRes.get() + let keyTuple = (keypair.idKey.inHex(), keypair.idCommitment.inHex()) + output.add(keyTuple) + + idCommitments.add(keypair.idCommitment) + + # Insert members into tree + let membersAdded = rln.insertMembers(0, idCommitments) + if not membersAdded: + return err("could not insert members into the tree") + + let root = rln.getMerkleRoot().value().inHex() + return ok((output, root)) + + proc rlnRelayStaticSetUp*(rlnRelayMembershipIndex: MembershipIndex): RlnRelayResult[(Option[seq[ + IDCommitment]], Option[MembershipKeyPair], Option[ + MembershipIndex])] = + ## rlnRelayStaticSetUp is a proc that is used to initialize the static group keys and the static membership index + ## this proc is used to test waku-rln-relay in the off-chain mode + ## it returns the static group keys, the static membership key pair, and the static membership index + ## Returns an error if it cannot initialize the static group keys and the static membership index + let + # static group + groupKeys = StaticGroupKeys + groupSize = StaticGroupSize + + debug "rln-relay membership index", rlnRelayMembershipIndex + + # validate the user-supplied membership index + if rlnRelayMembershipIndex < MembershipIndex(0) or rlnRelayMembershipIndex >= + MembershipIndex(groupSize): + error "wrong membership index" + return ok((none(seq[IDCommitment]), none(MembershipKeyPair), none(MembershipIndex))) + + # prepare the outputs from the static group keys + let + # create a sequence of MembershipKeyPairs from the group keys (group keys are in string format) + groupKeyPairsRes = groupKeys.toMembershipKeyPairs() + + if groupKeyPairsRes.isErr(): + return err("could not convert the group keys to MembershipKeyPairs: " & + groupKeyPairsRes.error()) + + let + groupKeyPairs = groupKeyPairsRes.get() + # extract id commitment keys + groupIDCommitments = groupKeyPairs.mapIt(it.idCommitment) + groupOpt = some(groupIDCommitments) + # user selected membership key pair + memKeyPairOpt = some(groupKeyPairs[rlnRelayMembershipIndex]) + memIndexOpt = some(rlnRelayMembershipIndex) + + return ok((groupOpt, memKeyPairOpt, memIndexOpt)) + + proc hasDuplicate*(rlnPeer: WakuRLNRelay, msg: WakuMessage): RlnRelayResult[bool] = + ## returns true if there is another message in the `nullifierLog` of the `rlnPeer` with the same + ## epoch and nullifier as `msg`'s epoch and nullifier but different Shamir secret shares + ## otherwise, returns false + ## Returns an error if it cannot check for duplicates + + # extract the proof metadata of the supplied `msg` + let proofMD = ProofMetadata(nullifier: msg.proof.nullifier, + shareX: msg.proof.shareX, shareY: msg.proof.shareY) + + # check if the epoch exists + if not rlnPeer.nullifierLog.hasKey(msg.proof.epoch): + return ok(false) + try: + if rlnPeer.nullifierLog[msg.proof.epoch].contains(proofMD): + # there is an identical record, ignore rhe mag + return ok(false) + + # check for a message with the same nullifier but different secret shares + let matched = rlnPeer.nullifierLog[msg.proof.epoch].filterIt(( + it.nullifier == proofMD.nullifier) and ((it.shareX != proofMD.shareX) or + (it.shareY != proofMD.shareY))) + + if matched.len != 0: + # there is a duplicate + return ok(true) + + # there is no duplicate + return ok(false) + + except KeyError as e: + return err("the epoch was not found") + + proc updateLog*(rlnPeer: WakuRLNRelay, msg: WakuMessage): RlnRelayResult[bool] = + ## extracts the `ProofMetadata` of the supplied messages `msg` and + ## saves it in the `nullifierLog` of the `rlnPeer` + ## Returns an error if it cannot update the log + + let proofMD = ProofMetadata(nullifier: msg.proof.nullifier, + shareX: msg.proof.shareX, shareY: msg.proof.shareY) + debug "proof metadata", proofMD = proofMD + + # check if the epoch exists + if not rlnPeer.nullifierLog.hasKey(msg.proof.epoch): + rlnPeer.nullifierLog[msg.proof.epoch] = @[proofMD] + return ok(true) + + try: + # check if an identical record exists + if rlnPeer.nullifierLog[msg.proof.epoch].contains(proofMD): + return ok(true) + # add proofMD to the log + rlnPeer.nullifierLog[msg.proof.epoch].add(proofMD) + return ok(true) + except KeyError as e: + return err("the epoch was not found") + + proc toEpoch*(t: uint64): Epoch = + ## converts `t` to `Epoch` in little-endian order + let bytes = toBytes(t, Endianness.littleEndian) + debug "bytes", bytes = bytes + var epoch: Epoch + discard epoch.copyFrom(bytes) + return epoch + + proc fromEpoch*(epoch: Epoch): uint64 = + ## decodes bytes of `epoch` (in little-endian) to uint64 + let t = fromBytesLE(uint64, array[32, byte](epoch)) + return t + + proc calcEpoch*(t: float64): Epoch = + ## gets time `t` as `flaot64` with subseconds resolution in the fractional part + ## and returns its corresponding rln `Epoch` value + let e = uint64(t/EpochUnitSeconds) + return toEpoch(e) + + proc getCurrentEpoch*(): Epoch = + ## gets the current rln Epoch time + return calcEpoch(epochTime()) + + proc absDiff*(e1, e2: Epoch): uint64 = + ## returns the absolute difference between the two rln `Epoch`s `e1` and `e2` + ## i.e., e1 - e2 + + # convert epochs to their corresponding unsigned numerical values + let + epoch1 = fromEpoch(e1) + epoch2 = fromEpoch(e2) + + # Manually perform an `abs` calculation + if epoch1 > epoch2: + return epoch1 - epoch2 + else: + return epoch2 - epoch1 + + proc validateMessage*(rlnPeer: WakuRLNRelay, msg: WakuMessage, + timeOption: Option[float64] = none(float64)): MessageValidationResult = + ## validate the supplied `msg` based on the waku-rln-relay routing protocol i.e., + ## the `msg`'s epoch is within MaxEpochGap of the current epoch + ## the `msg` has valid rate limit proof + ## the `msg` does not violate the rate limit + ## `timeOption` indicates Unix epoch time (fractional part holds sub-seconds) + ## if `timeOption` is supplied, then the current epoch is calculated based on that + + # track message count for metrics + waku_rln_messages_total.inc() + + # checks if the `msg`'s epoch is far from the current epoch + # it corresponds to the validation of rln external nullifier + var epoch: Epoch + if timeOption.isSome(): + epoch = calcEpoch(timeOption.get()) + else: + # get current rln epoch + epoch = getCurrentEpoch() + + debug "current epoch", currentEpoch = fromEpoch(epoch) + let + msgEpoch = msg.proof.epoch + # calculate the gaps + gap = absDiff(epoch, msgEpoch) + + debug "message epoch", msgEpoch = fromEpoch(msgEpoch) + + # validate the epoch + if gap > MaxEpochGap: + # message's epoch is too old or too ahead + # accept messages whose epoch is within +-MaxEpochGap from the current epoch + warn "invalid message: epoch gap exceeds a threshold", gap = gap, + payload = string.fromBytes(msg.payload) + waku_rln_invalid_messages_total.inc(labelValues=["invalid_epoch"]) + return MessageValidationResult.Invalid + + ## TODO: FIXME after resolving this issue https://github.com/status-im/nwaku/issues/1247 + if not rlnPeer.validateRoot(msg.proof.merkleRoot): + debug "invalid message: provided root does not belong to acceptable window of roots", provided=msg.proof.merkleRoot, validRoots=rlnPeer.validMerkleRoots.mapIt(it.inHex()) + waku_rln_invalid_messages_total.inc(labelValues=["invalid_root"]) + # return MessageValidationResult.Invalid + + # verify the proof + let + contentTopicBytes = msg.contentTopic.toBytes + input = concat(msg.payload, contentTopicBytes) + + waku_rln_proof_verification_total.inc() + waku_rln_proof_verification_duration_seconds.nanosecondTime: + let proofVerificationRes = rlnPeer.rlnInstance.proofVerify(input, msg.proof) + + if proofVerificationRes.isErr(): + waku_rln_errors_total.inc(labelValues=["proof_verification"]) + warn "invalid message: proof verification failed", payload = string.fromBytes(msg.payload) + return MessageValidationResult.Invalid + if not proofVerificationRes.value(): + # invalid proof + debug "invalid message: invalid proof", payload = string.fromBytes(msg.payload) + waku_rln_invalid_messages_total.inc(labelValues=["invalid_proof"]) + return MessageValidationResult.Invalid + + # check if double messaging has happened + let hasDup = rlnPeer.hasDuplicate(msg) + if hasDup.isErr(): + waku_rln_errors_total.inc(labelValues=["duplicate_check"]) + elif hasDup.value == true: + debug "invalid message: message is spam", payload = string.fromBytes(msg.payload) + waku_rln_spam_messages_total.inc() + return MessageValidationResult.Spam + + # insert the message to the log + # the result of `updateLog` is discarded because message insertion is guaranteed by the implementation i.e., + # it will never error out + discard rlnPeer.updateLog(msg) + debug "message is valid", payload = string.fromBytes(msg.payload) + let rootIndex = rlnPeer.validMerkleRoots.find(msg.proof.merkleRoot) + waku_rln_valid_messages_total.observe(rootIndex.toFloat()) + return MessageValidationResult.Valid + + proc toRLNSignal*(wakumessage: WakuMessage): seq[byte] = + ## it is a utility proc that prepares the `data` parameter of the proof generation procedure i.e., `proofGen` that resides in the current module + ## it extracts the `contentTopic` and the `payload` of the supplied `wakumessage` and serializes them into a byte sequence + let + contentTopicBytes = wakumessage.contentTopic.toBytes + output = concat(wakumessage.payload, contentTopicBytes) + return output + + proc appendRLNProof*(rlnPeer: WakuRLNRelay, msg: var WakuMessage, + senderEpochTime: float64): bool = + ## returns true if it can create and append a `RateLimitProof` to the supplied `msg` + ## returns false otherwise + ## `senderEpochTime` indicates the number of seconds passed since Unix epoch. The fractional part holds sub-seconds. + ## The `epoch` field of `RateLimitProof` is derived from the provided `senderEpochTime` (using `calcEpoch()`) + + let input = msg.toRLNSignal() + + var proof: RateLimitProofResult = proofGen(rlnInstance = rlnPeer.rlnInstance, data = input, + memKeys = rlnPeer.membershipKeyPair, + memIndex = rlnPeer.membershipIndex, + epoch = calcEpoch(senderEpochTime)) + + if proof.isErr: + return false + + msg.proof = proof.value + return true + + proc addAll*(wakuRlnRelay: WakuRLNRelay, list: seq[IDCommitment]): RlnRelayResult[void] = + # add members to the Merkle tree of the `rlnInstance` + ## Returns an error if it cannot add any member to the Merkle tree + let membersAdded = wakuRlnRelay.insertMembers(0, list) + if not membersAdded.isOk(): + return err("failed to add members to the Merkle tree") + return ok() + + proc generateGroupUpdateHandler(rlnPeer: WakuRLNRelay): GroupUpdateHandler = + ## assuming all the members arrive in order + ## TODO: check the index and the pubkey depending on + ## the group update operation + var handler: GroupUpdateHandler + handler = proc(blockNumber: BlockNumber, members: seq[MembershipTuple]): RlnRelayResult[void] = + let startingIndex = members[0].index + debug "starting index", startingIndex = startingIndex, members = members.mapIt(it.idComm.inHex()) + let isSuccessful = rlnPeer.insertMembers(startingIndex, members.mapIt(it.idComm)) + if isSuccessful.isErr(): + return err("failed to add new members to the Merkle tree") + else: + debug "new members added to the Merkle tree", pubkeys=members.mapIt(it.idComm.inHex()) , startingIndex=startingIndex + debug "acceptable window", validRoots=rlnPeer.validMerkleRoots.mapIt(it.inHex()) + let lastIndex = members[0].index + members.len.uint - 1 + let indexGap = startingIndex - rlnPeer.lastSeenMembershipIndex + if not (toSeq(startingIndex..lastIndex) == members.mapIt(it.index)): + return err("the indexes of the new members are not in order") + if indexGap != 1.uint: + warn "membership index gap, may have lost connection", lastIndex, currIndex=rlnPeer.lastSeenMembershipIndex, indexGap = indexGap + rlnPeer.lastSeenMembershipIndex = lastIndex + rlnPeer.lastProcessedBlock = blockNumber + debug "last processed block", blockNumber = blockNumber + return ok() + return handler + + proc parse*(event: type MemberRegistered, + log: JsonNode): RlnRelayResult[MembershipTuple] = + ## parses the `data` parameter of the `MemberRegistered` event `log` + ## returns an error if it cannot parse the `data` parameter + var pubkey: UInt256 + var index: UInt256 + var data: string + # Remove the 0x prefix + try: + data = strip0xPrefix(log["data"].getStr()) + except CatchableError: + return err("failed to parse the data field of the MemberRegistered event: " & getCurrentExceptionMsg()) + var offset = 0 + try: + # Parse the pubkey + offset += decode(data, offset, pubkey) + # Parse the index + offset += decode(data, offset, index) + return ok((index: index.toMembershipIndex(), + idComm: pubkey.toIDCommitment())) + except: + return err("failed to parse the data field of the MemberRegistered event") + + type BlockTable = OrderedTable[BlockNumber, seq[MembershipTuple]] + proc getHistoricalEvents*(ethClientUri: string, + contractAddress: Address, + fromBlock: string = "0x0", + toBlock: string = "latest"): Future[RlnRelayResult[BlockTable]] {.async, gcsafe.} = + ## `ethClientUri` is the URI of the Ethereum client + ## `contractAddress` is the address of the contract + ## `fromBlock` is the block number from which the events are fetched + ## `toBlock` is the block number to which the events are fetched + ## returns a table that maps block numbers to the list of members registered in that block + ## returns an error if it cannot retrieve the historical events + let web3 = await newWeb3(ethClientUri) + let contract = web3.contractSender(MembershipContract, contractAddress) + # Get the historical events, and insert memberships into the tree + let historicalEvents = await contract.getJsonLogs(MemberRegistered, + fromBlock=some(fromBlock.blockId()), + toBlock=some(toBlock.blockId())) + # Create a table that maps block numbers to the list of members registered in that block + var blockTable = OrderedTable[BlockNumber, seq[MembershipTuple]]() + for log in historicalEvents: + # batch according to log.blockNumber + let blockNumber = parseHexInt(log["blockNumber"].getStr()).uint + let parsedEventRes = parse(MemberRegistered, log) + + if parsedEventRes.isErr(): + error "failed to parse the MemberRegistered event", error=parsedEventRes.error() + return err("failed to parse the MemberRegistered event") + let parsedEvent = parsedEventRes.get() + # Add the parsed event to the table + if blockTable.hasKey(blockNumber): + blockTable[blockNumber].add(parsedEvent) + else: + blockTable[blockNumber] = @[parsedEvent] + return ok(blockTable) + + proc subscribeToGroupEvents*(ethClientUri: string, + ethAccountAddress: Option[Address] = none(Address), + contractAddress: Address, + blockNumber: string = "0x0", + handler: GroupUpdateHandler) {.async, gcsafe.} = + ## connects to the eth client whose URI is supplied as `ethClientUri` + ## subscribes to the `MemberRegistered` event emitted from the `MembershipContract` which is available on the supplied `contractAddress` + ## it collects all the events starting from the given `blockNumber` + ## for every received block, it calls the `handler` + let web3 = await newWeb3(ethClientUri) + let contract = web3.contractSender(MembershipContract, contractAddress) + + let blockTableRes = await getHistoricalEvents(ethClientUri, + contractAddress, + fromBlock=blockNumber) + if blockTableRes.isErr(): + error "failed to retrieve historical events", error=blockTableRes.error + return + let blockTable = blockTableRes.get() + # Update MT by batch + for blockNumber, members in blockTable.pairs(): + debug "updating the Merkle tree", blockNumber=blockNumber, members=members + let res = handler(blockNumber, members) + if res.isErr(): + error "failed to update the Merkle tree", error=res.error + + # We don't need the block table after this point + discard blockTable + + var latestBlock: BlockNumber + let handleLog = proc(blockHeader: BlockHeader) {.async, gcsafe.} = + try: + let membershipRegistrationLogs = await contract.getJsonLogs(MemberRegistered, + blockHash = some(blockheader.hash)) + if membershipRegistrationLogs.len == 0: + return + var members: seq[MembershipTuple] + for log in membershipRegistrationLogs: + let parsedEventRes = parse(MemberRegistered, log) + if parsedEventRes.isErr(): + fatal "failed to parse the MemberRegistered event", error=parsedEventRes.error() + return + let parsedEvent = parsedEventRes.get() + members.add(parsedEvent) + let res = handler(blockHeader.number.uint, members) + if res.isErr(): + error "failed to update the Merkle tree", error=res.error + except CatchableError: + warn "failed to get logs", error=getCurrentExceptionMsg() + return + let newHeadCallback = proc (blockheader: BlockHeader) {.gcsafe.} = + latestBlock = blockheader.number.uint + debug "block received", blockNumber = latestBlock + # get logs from the last block + try: + asyncSpawn handleLog(blockHeader) + except CatchableError: + warn "failed to handle log: ", error=getCurrentExceptionMsg() + + let newHeadErrorHandler = proc (err: CatchableError) {.gcsafe.} = + error "Error from subscription: ", err=err.msg + discard await web3.subscribeForBlockHeaders(newHeadCallback, newHeadErrorHandler) + + web3.onDisconnect = proc() = + debug "connection to ethereum node dropped", lastBlock = latestBlock + + proc handleGroupUpdates*(rlnPeer: WakuRLNRelay) {.async, gcsafe.} = + ## generates the groupUpdateHandler which is called when a new member is registered, + ## and has the WakuRLNRelay instance as a closure + let handler = generateGroupUpdateHandler(rlnPeer) + await subscribeToGroupEvents(ethClientUri = rlnPeer.ethClientAddress, + ethAccountAddress = rlnPeer.ethAccountAddress, + contractAddress = rlnPeer.membershipContractAddress, + handler = handler) + + proc addRLNRelayValidator*(node: WakuNode, pubsubTopic: PubsubTopic, contentTopic: ContentTopic, spamHandler: Option[SpamHandler] = none(SpamHandler)) = + ## this procedure is a thin wrapper for the pubsub addValidator method + ## it sets a validator for the waku messages published on the supplied pubsubTopic and contentTopic + ## if contentTopic is empty, then validation takes place for All the messages published on the given pubsubTopic + ## the message validation logic is according to https://rfc.vac.dev/spec/17/ + proc validator(topic: string, message: messages.Message): Future[pubsub.ValidationResult] {.async.} = + trace "rln-relay topic validator is called" + let msg = WakuMessage.decode(message.data) + if msg.isOk(): + let + wakumessage = msg.value() + payload = string.fromBytes(wakumessage.payload) + + # check the contentTopic + if (wakumessage.contentTopic != "") and (contentTopic != "") and (wakumessage.contentTopic != contentTopic): + trace "content topic did not match:", contentTopic=wakumessage.contentTopic, payload=payload + return pubsub.ValidationResult.Accept + + # validate the message + let + validationRes = node.wakuRlnRelay.validateMessage(wakumessage) + proof = toHex(wakumessage.proof.proof) + epoch = fromEpoch(wakumessage.proof.epoch) + root = inHex(wakumessage.proof.merkleRoot) + shareX = inHex(wakumessage.proof.shareX) + shareY = inHex(wakumessage.proof.shareY) + nullifier = inHex(wakumessage.proof.nullifier) + case validationRes: + of Valid: + debug "message validity is verified, relaying:", contentTopic=wakumessage.contentTopic, epoch=epoch, timestamp=wakumessage.timestamp, payload=payload + trace "message validity is verified, relaying:", proof=proof, root=root, shareX=shareX, shareY=shareY, nullifier=nullifier + return pubsub.ValidationResult.Accept + of Invalid: + debug "message validity could not be verified, discarding:", contentTopic=wakumessage.contentTopic, epoch=epoch, timestamp=wakumessage.timestamp, payload=payload + trace "message validity could not be verified, discarding:", proof=proof, root=root, shareX=shareX, shareY=shareY, nullifier=nullifier + return pubsub.ValidationResult.Reject + of Spam: + debug "A spam message is found! yay! discarding:", contentTopic=wakumessage.contentTopic, epoch=epoch, timestamp=wakumessage.timestamp, payload=payload + trace "A spam message is found! yay! discarding:", proof=proof, root=root, shareX=shareX, shareY=shareY, nullifier=nullifier + if spamHandler.isSome: + let handler = spamHandler.get + handler(wakumessage) + return pubsub.ValidationResult.Reject + # set a validator for the supplied pubsubTopic + let pb = PubSub(node.wakuRelay) + pb.addValidator(pubsubTopic, validator) + + proc mountRlnRelayStatic*(node: WakuNode, + group: seq[IDCommitment], + memKeyPair: MembershipKeyPair, + memIndex: MembershipIndex, + pubsubTopic: PubsubTopic, + contentTopic: ContentTopic, + spamHandler: Option[SpamHandler] = none(SpamHandler)): RlnRelayResult[void] = + # Returns RlnRelayResult[void] to indicate the success of the call + + debug "mounting rln-relay in off-chain/static mode" + # check whether inputs are provided + # relay protocol is the prerequisite of rln-relay + if node.wakuRelay.isNil(): + return err("WakuRelay protocol is not mounted") + # check whether the pubsub topic is supported at the relay level + if pubsubTopic notin node.wakuRelay.defaultPubsubTopics: + return err("The relay protocol does not support the configured pubsub topic") + + debug "rln-relay input validation passed" + + # check the peer's index and the inclusion of user's identity commitment in the group + if not memKeyPair.idCommitment == group[int(memIndex)]: + return err("The peer's index is not consistent with the group") + + # create an RLN instance + let rlnInstance = createRLNInstance() + if rlnInstance.isErr(): + return err("RLN instance creation failed") + let rln = rlnInstance.get() + + # create the WakuRLNRelay + let rlnPeer = WakuRLNRelay(membershipKeyPair: memKeyPair, + membershipIndex: memIndex, + rlnInstance: rln, + pubsubTopic: pubsubTopic, + contentTopic: contentTopic) + + # add members to the Merkle tree + let membersAdded = rlnPeer.insertMembers(0, group) + if membersAdded.isErr(): + return err("member addition to the Merkle tree failed: " & membersAdded.error) + + # adds a topic validator for the supplied pubsub topic at the relay protocol + # messages published on this pubsub topic will be relayed upon a successful validation, otherwise they will be dropped + # the topic validator checks for the correct non-spamming proof of the message + node.addRLNRelayValidator(pubsubTopic, contentTopic, spamHandler) + debug "rln relay topic validator is mounted successfully", pubsubTopic=pubsubTopic, contentTopic=contentTopic + + node.wakuRlnRelay = rlnPeer + return ok() + + proc mountRlnRelayDynamic*(node: WakuNode, + ethClientAddr: string = "", + ethAccountAddress: Option[web3.Address] = none(web3.Address), + ethAccountPrivKeyOpt: Option[keys.PrivateKey], + memContractAddr: web3.Address, + memKeyPair: Option[MembershipKeyPair] = none(MembershipKeyPair), + memIndex: Option[MembershipIndex] = none(MembershipIndex), + pubsubTopic: PubsubTopic, + contentTopic: ContentTopic, + spamHandler: Option[SpamHandler] = none(SpamHandler), + registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler)) : Future[RlnRelayResult[void]] {.async.} = + debug "mounting rln-relay in on-chain/dynamic mode" + # TODO return a bool value to indicate the success of the call + # relay protocol is the prerequisite of rln-relay + if node.wakuRelay.isNil: + return err("WakuRelay protocol is not mounted.") + # check whether the pubsub topic is supported at the relay level + if pubsubTopic notin node.wakuRelay.defaultPubsubTopics: + return err("WakuRelay protocol does not support the configured pubsub topic.") + debug "rln-relay input validation passed" + + # create an RLN instance + let rlnInstance = createRLNInstance() + + if rlnInstance.isErr(): + return err("RLN instance creation failed.") + let rln = rlnInstance.get() + + # prepare rln membership key pair + var + keyPair: MembershipKeyPair + rlnIndex: MembershipIndex + if memKeyPair.isNone: # no rln credentials provided + if ethAccountPrivKeyOpt.isSome: # if an ethereum private key is supplied, then create rln credentials and register to the membership contract + trace "no rln-relay key is provided, generating one" + let keyPairRes = rln.membershipKeyGen() + if keyPairRes.isErr(): + error "failed to generate rln-relay key pair" + return err("failed to generate rln-relay key pair: " & keyPairRes.error()) + keyPair = keyPairRes.value() + # register the rln-relay peer to the membership contract + waku_rln_registration_duration_seconds.nanosecondTime: + let regIndexRes = await register(idComm = keyPair.idCommitment, + ethAccountAddress = ethAccountAddress, + ethAccountPrivKey = ethAccountPrivKeyOpt.get(), + ethClientAddress = ethClientAddr, + membershipContractAddress = memContractAddr, + registrationHandler = registrationHandler) + # check whether registration is done + if regIndexRes.isErr(): + debug "membership registration failed", err=regIndexRes.error() + return err("membership registration failed: " & regIndexRes.error()) + rlnIndex = regIndexRes.value + debug "peer is successfully registered into the membership contract" + else: # if no eth private key is available, skip registration + debug "running waku-rln-relay in relay-only mode" + else: + debug "Peer is already registered to the membership contract" + keyPair = memKeyPair.get() + rlnIndex = memIndex.get() + + # create the WakuRLNRelay + var rlnPeer = WakuRLNRelay(membershipKeyPair: keyPair, + membershipIndex: rlnIndex, + membershipContractAddress: memContractAddr, + ethClientAddress: ethClientAddr, + ethAccountAddress: ethAccountAddress, + ethAccountPrivateKey: ethAccountPrivKeyOpt, + rlnInstance: rln, + pubsubTopic: pubsubTopic, + contentTopic: contentTopic) + + asyncSpawn rlnPeer.handleGroupUpdates() + debug "dynamic group management is started" + # adds a topic validator for the supplied pubsub topic at the relay protocol + # messages published on this pubsub topic will be relayed upon a successful validation, otherwise they will be dropped + # the topic validator checks for the correct non-spamming proof of the message + addRLNRelayValidator(node, pubsubTopic, contentTopic, spamHandler) + debug "rln relay topic validator is mounted successfully", pubsubTopic=pubsubTopic, contentTopic=contentTopic + + node.wakuRlnRelay = rlnPeer + return ok() + + proc writeRlnCredentials*(path: string, + credentials: RlnMembershipCredentials, + password: string): RlnRelayResult[void] = + # Returns RlnRelayResult[void], which indicates the success of the call + info "Storing RLN credentials" + var jsonString: string + jsonString.toUgly(%credentials) + let keyfile = createKeyFileJson(toBytes(jsonString), password) + if keyfile.isErr(): + return err("Error while creating keyfile for RLN credentials") + if saveKeyFile(path, keyfile.get()).isErr(): + return err("Error while saving keyfile for RLN credentials") + return ok() + + # Attempts decryptions of all keyfiles with the provided password. + # If one or more credentials are successfully decrypted, the max(min(index,number_decrypted),0)-th is returned. + proc readRlnCredentials*(path: string, + password: string, + index: int = 0): RlnRelayResult[Option[RlnMembershipCredentials]] = + # Returns RlnRelayResult[Option[RlnMembershipCredentials]], which indicates the success of the call + info "Reading RLN credentials" + # With regards to printing the keys, it is purely for debugging purposes so that the user becomes explicitly aware of the current keys in use when nwaku is started. + # Note that this is only until the RLN contract being used is the one deployed on Goerli testnet. + # These prints need to omitted once RLN contract is deployed on Ethereum mainnet and using valuable funds for staking. + waku_rln_membership_credentials_import_duration_seconds.nanosecondTime: + + try: + var decodedKeyfiles = loadKeyFiles(path, password) + + if decodedKeyfiles.isOk(): + var decodedRlnCredentials = decodedKeyfiles.get() + debug "Successfully decrypted keyfiles for the provided password", numberKeyfilesDecrypted=decodedRlnCredentials.len + # We should return the index-th decrypted credential, but we ensure to not overflow + let credentialIndex = max(min(index, decodedRlnCredentials.len - 1), 0) + debug "Picking credential with (adjusted) index", inputIndex=index, adjustedIndex=credentialIndex + let jsonObject = parseJson(string.fromBytes(decodedRlnCredentials[credentialIndex].get())) + let deserializedRlnCredentials = to(jsonObject, RlnMembershipCredentials) + debug "Deserialized RLN credentials", rlnCredentials=deserializedRlnCredentials + return ok(some(deserializedRlnCredentials)) + else: + debug "Unable to decrypt RLN credentials with provided password. ", error=decodedKeyfiles.error + return ok(none(RlnMembershipCredentials)) + except: + return err("Error while loading keyfile for RLN credentials at " & path) + + proc mount(node: WakuNode, + conf: WakuRlnConfig, + spamHandler: Option[SpamHandler] = none(SpamHandler), + registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler) + ): Future[RlnRelayResult[void]] {.async.} = + # Returns RlnRelayResult[void], which indicates the success of the call + if not conf.rlnRelayDynamic: + info " setting up waku-rln-relay in off-chain mode... " + # set up rln relay inputs + let staticSetupRes = rlnRelayStaticSetUp(MembershipIndex(conf.rlnRelayMembershipIndex)) + if staticSetupRes.isErr(): + return err("rln relay static setup failed: " & staticSetupRes.error()) + let (groupOpt, memKeyPairOpt, memIndexOpt) = staticSetupRes.get() + if memIndexOpt.isNone: + error "failed to mount WakuRLNRelay" + return err("failed to mount WakuRLNRelay") + else: + # mount rlnrelay in off-chain mode with a static group of users + let mountRes = node.mountRlnRelayStatic(group = groupOpt.get(), + memKeyPair = memKeyPairOpt.get(), + memIndex= memIndexOpt.get(), + pubsubTopic = conf.rlnRelayPubsubTopic, + contentTopic = conf.rlnRelayContentTopic, + spamHandler = spamHandler) + + if mountRes.isErr(): + return err("Failed to mount WakuRLNRelay: " & mountRes.error()) + + info "membership id key", idkey=memKeyPairOpt.get().idKey.inHex() + info "membership id commitment key", idCommitmentkey=memKeyPairOpt.get().idCommitment.inHex() + + # check the correct construction of the tree by comparing the calculated root against the expected root + # no error should happen as it is already captured in the unit tests + # TODO have added this check to account for unseen corner cases, will remove it later + let + rootRes = node.wakuRlnRelay.rlnInstance.getMerkleRoot() + expectedRoot = StaticGroupMerkleRoot + + if rootRes.isErr(): + return err(rootRes.error()) + + let root = rootRes.value() + + if root.inHex() != expectedRoot: + error "root mismatch: something went wrong not in Merkle tree construction" + debug "the calculated root", root + info "WakuRLNRelay is mounted successfully", pubsubtopic=conf.rlnRelayPubsubTopic, contentTopic=conf.rlnRelayContentTopic + return ok() + else: # mount the rln relay protocol in the on-chain/dynamic mode + debug "setting up waku-rln-relay in on-chain mode... " + + debug "on-chain setup parameters", contractAddress=conf.rlnRelayEthContractAddress + # read related inputs to run rln-relay in on-chain mode and do type conversion when needed + let + ethClientAddr = conf.rlnRelayEthClientAddress + + var ethMemContractAddress: web3.Address + try: + ethMemContractAddress = web3.fromHex(web3.Address, conf.rlnRelayEthContractAddress) + except ValueError as err: + return err("invalid eth contract address: " & err.msg) + var ethAccountPrivKeyOpt = none(keys.PrivateKey) + var ethAccountAddressOpt = none(Address) + var credentials = none(RlnMembershipCredentials) + var res: RlnRelayResult[void] + + if conf.rlnRelayEthAccountPrivateKey != "": + ethAccountPrivKeyOpt = some(keys.PrivateKey(SkSecretKey.fromHex(conf.rlnRelayEthAccountPrivateKey).value)) + + if conf.rlnRelayEthAccountAddress != "": + var ethAccountAddress: web3.Address + try: + ethAccountAddress = web3.fromHex(web3.Address, conf.rlnRelayEthAccountAddress) + except ValueError as err: + return err("invalid eth account address: " & err.msg) + ethAccountAddressOpt = some(ethAccountAddress) + + # if the rlnRelayCredPath config option is non-empty, then rln-relay credentials should be persisted + # if the path does not contain any credential file, then a new set is generated and pesisted in the same path + # if there is a credential file, then no new credentials are generated, instead the content of the file is read and used to mount rln-relay + if conf.rlnRelayCredPath != "": + + let rlnRelayCredPath = joinPath(conf.rlnRelayCredPath, RlnCredentialsFilename) + debug "rln-relay credential path", rlnRelayCredPath + + # check if there is an rln-relay credential file in the supplied path + if fileExists(rlnRelayCredPath): + + info "A RLN credential file exists in provided path", path=rlnRelayCredPath + + # retrieve rln-relay credential + let readCredentialsRes = readRlnCredentials(rlnRelayCredPath, conf.rlnRelayCredentialsPassword) + + if readCredentialsRes.isErr(): + return err("RLN credentials cannot be read: " & readCredentialsRes.error()) + + credentials = readCredentialsRes.get() + + else: # there is no credential file available in the supplied path + # mount the rln-relay protocol leaving rln-relay credentials arguments unassigned + # this infroms mountRlnRelayDynamic proc that new credentials should be generated and registered to the membership contract + info "no rln credential is provided" + + if credentials.isSome(): + # mount rln-relay in on-chain mode, with credentials that were read or generated + res = await node.mountRlnRelayDynamic(memContractAddr = ethMemContractAddress, + ethClientAddr = ethClientAddr, + ethAccountAddress = ethAccountAddressOpt, + ethAccountPrivKeyOpt = ethAccountPrivKeyOpt, + pubsubTopic = conf.rlnRelayPubsubTopic, + contentTopic = conf.rlnRelayContentTopic, + spamHandler = spamHandler, + registrationHandler = registrationHandler, + memKeyPair = some(credentials.get().membershipKeyPair), + memIndex = some(credentials.get().rlnIndex)) + else: + # mount rln-relay in on-chain mode, with the provided private key + res = await node.mountRlnRelayDynamic(memContractAddr = ethMemContractAddress, + ethClientAddr = ethClientAddr, + ethAccountAddress = ethAccountAddressOpt, + ethAccountPrivKeyOpt = ethAccountPrivKeyOpt, + pubsubTopic = conf.rlnRelayPubsubTopic, + contentTopic = conf.rlnRelayContentTopic, + spamHandler = spamHandler, + registrationHandler = registrationHandler) + + # TODO should be replaced with key-store with proper encryption + # persist rln credential + credentials = some(RlnMembershipCredentials(rlnIndex: node.wakuRlnRelay.membershipIndex, + membershipKeyPair: node.wakuRlnRelay.membershipKeyPair)) + if writeRlnCredentials(rlnRelayCredPath, credentials.get(), conf.rlnRelayCredentialsPassword).isErr(): + return err("error in storing rln credentials") + + else: + # do not persist or use a persisted rln-relay credential + # a new credential will be generated during the mount process but will not be persisted + info "no need to persist or use a persisted rln-relay credential" + res = await node.mountRlnRelayDynamic(memContractAddr = ethMemContractAddress, ethClientAddr = ethClientAddr, + ethAccountAddress = ethAccountAddressOpt, ethAccountPrivKeyOpt = ethAccountPrivKeyOpt, pubsubTopic = conf.rlnRelayPubsubTopic, + contentTopic = conf.rlnRelayContentTopic, spamHandler = spamHandler, registrationHandler = registrationHandler) + + if res.isErr(): + return err("dynamic rln-relay could not be mounted: " & res.error()) + return ok() + + proc mountRlnRelay*(node: WakuNode, + conf: WakuRlnConfig, + spamHandler: Option[SpamHandler] = none(SpamHandler), + registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler) + ): Future[RlnRelayResult[void]] {.async.} = + ## Mounts the rln-relay protocol on the node. + ## The rln-relay protocol can be mounted in two modes: on-chain and off-chain. + ## Returns an error if the rln-relay protocol could not be mounted. + waku_rln_relay_mounting_duration_seconds.nanosecondTime: + let res = await mount( + node, + conf, + spamHandler, + registrationHandler + ) + return res