mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-07 16:33:08 +00:00
fix: refact rln-relay and post sync test (#3434)
This commit is contained in:
parent
413c6371d8
commit
753891217e
@ -1,42 +1,59 @@
|
|||||||
import
|
import
|
||||||
math,
|
std/[strutils, times, sequtils, osproc], math, results, options, testutils/unittests
|
||||||
std/sequtils,
|
|
||||||
results,
|
import
|
||||||
options,
|
|
||||||
waku/[
|
waku/[
|
||||||
waku_rln_relay/protocol_types,
|
waku_rln_relay/protocol_types,
|
||||||
waku_rln_relay/rln,
|
waku_rln_relay/rln,
|
||||||
waku_rln_relay,
|
waku_rln_relay,
|
||||||
waku_rln_relay/conversion_utils,
|
waku_rln_relay/conversion_utils,
|
||||||
waku_rln_relay/group_manager/static/group_manager,
|
waku_rln_relay/group_manager/on_chain/group_manager,
|
||||||
]
|
],
|
||||||
|
tests/waku_rln_relay/utils_onchain
|
||||||
import std/[times, os]
|
|
||||||
|
|
||||||
proc main(): Future[string] {.async, gcsafe.} =
|
proc main(): Future[string] {.async, gcsafe.} =
|
||||||
let rlnIns = createRLNInstance(20).get()
|
# Spin up a local Ethereum JSON-RPC (Anvil) and deploy the RLN contract
|
||||||
let credentials = toSeq(0 .. 1000).mapIt(membershipKeyGen(rlnIns).get())
|
let anvilProc = runAnvil()
|
||||||
|
defer:
|
||||||
|
stopAnvil(anvilProc)
|
||||||
|
|
||||||
let manager = StaticGroupManager(
|
# Set up an On-chain group manager (includes contract deployment)
|
||||||
rlnInstance: rlnIns,
|
let manager = await setupOnchainGroupManager()
|
||||||
groupSize: 1000,
|
(await manager.init()).isOkOr:
|
||||||
membershipIndex: some(MembershipIndex(900)),
|
raiseAssert $error
|
||||||
groupKeys: credentials,
|
|
||||||
)
|
|
||||||
|
|
||||||
await manager.init()
|
# Register a new member so that we can later generate proofs
|
||||||
|
let idCredentials = generateCredentials(manager.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
await manager.register(idCredentials, UserMessageLimit(100))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let rootUpdated = await manager.updateRoots()
|
||||||
|
|
||||||
|
if rootUpdated:
|
||||||
|
let proofResult = await manager.fetchMerkleProofElements()
|
||||||
|
if proofResult.isErr():
|
||||||
|
error "Failed to fetch Merkle proof", error = proofResult.error
|
||||||
|
manager.merkleProofCache = proofResult.get()
|
||||||
|
|
||||||
|
let epoch = default(Epoch)
|
||||||
|
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||||
let data: seq[byte] = newSeq[byte](1024)
|
let data: seq[byte] = newSeq[byte](1024)
|
||||||
|
|
||||||
var proofGenTimes: seq[times.Duration] = @[]
|
var proofGenTimes: seq[times.Duration] = @[]
|
||||||
var proofVerTimes: seq[times.Duration] = @[]
|
var proofVerTimes: seq[times.Duration] = @[]
|
||||||
for i in 0 .. 50:
|
|
||||||
|
for i in 1 .. 100:
|
||||||
var time = getTime()
|
var time = getTime()
|
||||||
let proof = manager.generateProof(data, default(Epoch)).get()
|
let proof = manager.generateProof(data, epoch, MessageId(i.uint8)).valueOr:
|
||||||
|
raiseAssert $error
|
||||||
proofGenTimes.add(getTime() - time)
|
proofGenTimes.add(getTime() - time)
|
||||||
|
|
||||||
time = getTime()
|
time = getTime()
|
||||||
let res = manager.verifyProof(data, proof).get()
|
let ok = manager.verifyProof(data, proof).valueOr:
|
||||||
|
raiseAssert $error
|
||||||
proofVerTimes.add(getTime() - time)
|
proofVerTimes.add(getTime() - time)
|
||||||
|
|
||||||
echo "Proof generation times: ", sum(proofGenTimes) div len(proofGenTimes)
|
echo "Proof generation times: ", sum(proofGenTimes) div len(proofGenTimes)
|
||||||
@ -44,6 +61,6 @@ proc main(): Future[string] {.async, gcsafe.} =
|
|||||||
|
|
||||||
when isMainModule:
|
when isMainModule:
|
||||||
try:
|
try:
|
||||||
waitFor(main())
|
discard waitFor main()
|
||||||
except CatchableError as e:
|
except CatchableError as e:
|
||||||
raise e
|
raise e
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[options, tempfiles, net],
|
std/[options, tempfiles, net, osproc],
|
||||||
testutils/unittests,
|
testutils/unittests,
|
||||||
chronos,
|
chronos,
|
||||||
std/strformat,
|
std/strformat,
|
||||||
@ -17,8 +17,9 @@ import
|
|||||||
waku_lightpush_legacy/protocol_metrics,
|
waku_lightpush_legacy/protocol_metrics,
|
||||||
waku_rln_relay,
|
waku_rln_relay,
|
||||||
],
|
],
|
||||||
../testlib/[wakucore, wakunode, testasync, futures],
|
../testlib/[wakucore, wakunode, testasync, futures, testutils],
|
||||||
../resources/payloads
|
../resources/payloads,
|
||||||
|
../waku_rln_relay/[rln/waku_rln_relay_utils, utils_onchain]
|
||||||
|
|
||||||
suite "Waku Legacy Lightpush - End To End":
|
suite "Waku Legacy Lightpush - End To End":
|
||||||
var
|
var
|
||||||
@ -110,6 +111,8 @@ suite "RLN Proofs as a Lightpush Service":
|
|||||||
|
|
||||||
server {.threadvar.}: WakuNode
|
server {.threadvar.}: WakuNode
|
||||||
client {.threadvar.}: WakuNode
|
client {.threadvar.}: WakuNode
|
||||||
|
anvilProc {.threadvar.}: Process
|
||||||
|
manager {.threadvar.}: OnchainGroupManager
|
||||||
|
|
||||||
serverRemotePeerInfo {.threadvar.}: RemotePeerInfo
|
serverRemotePeerInfo {.threadvar.}: RemotePeerInfo
|
||||||
pubsubTopic {.threadvar.}: PubsubTopic
|
pubsubTopic {.threadvar.}: PubsubTopic
|
||||||
@ -131,13 +134,14 @@ suite "RLN Proofs as a Lightpush Service":
|
|||||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||||
|
|
||||||
|
anvilProc = runAnvil()
|
||||||
|
manager = waitFor setupOnchainGroupManager()
|
||||||
|
|
||||||
# mount rln-relay
|
# mount rln-relay
|
||||||
let wakuRlnConfig = WakuRlnConfig(
|
let wakuRlnConfig = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(1.uint),
|
treePath = genTempPath("rln_tree", "wakunode_1"),
|
||||||
userMessageLimit: 1,
|
index = MembershipIndex(1),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await allFutures(server.start(), client.start())
|
await allFutures(server.start(), client.start())
|
||||||
@ -149,6 +153,24 @@ suite "RLN Proofs as a Lightpush Service":
|
|||||||
await server.mountLegacyLightPush()
|
await server.mountLegacyLightPush()
|
||||||
client.mountLegacyLightPushClient()
|
client.mountLegacyLightPushClient()
|
||||||
|
|
||||||
|
let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials1 = generateCredentials(manager1.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let rootUpdated1 = waitFor manager1.updateRoots()
|
||||||
|
debug "Updated root for node1", rootUpdated1
|
||||||
|
|
||||||
|
if rootUpdated1:
|
||||||
|
let proofResult = waitFor manager1.fetchMerkleProofElements()
|
||||||
|
if proofResult.isErr():
|
||||||
|
error "Failed to fetch Merkle proof", error = proofResult.error
|
||||||
|
manager1.merkleProofCache = proofResult.get()
|
||||||
|
|
||||||
serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo()
|
serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo()
|
||||||
pubsubTopic = DefaultPubsubTopic
|
pubsubTopic = DefaultPubsubTopic
|
||||||
contentTopic = DefaultContentTopic
|
contentTopic = DefaultContentTopic
|
||||||
@ -156,6 +178,7 @@ suite "RLN Proofs as a Lightpush Service":
|
|||||||
|
|
||||||
asyncTeardown:
|
asyncTeardown:
|
||||||
await server.stop()
|
await server.stop()
|
||||||
|
stopAnvil(anvilProc)
|
||||||
|
|
||||||
suite "Lightpush attaching RLN proofs":
|
suite "Lightpush attaching RLN proofs":
|
||||||
asyncTest "Message is published when RLN enabled":
|
asyncTest "Message is published when RLN enabled":
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[options, tempfiles],
|
std/[options, tempfiles, osproc],
|
||||||
testutils/unittests,
|
testutils/unittests,
|
||||||
chronos,
|
chronos,
|
||||||
std/strformat,
|
std/strformat,
|
||||||
@ -10,7 +10,8 @@ import
|
|||||||
import
|
import
|
||||||
waku/[waku_core, node/peer_manager, node/waku_node, waku_lightpush, waku_rln_relay],
|
waku/[waku_core, node/peer_manager, node/waku_node, waku_lightpush, waku_rln_relay],
|
||||||
../testlib/[wakucore, wakunode, testasync, futures],
|
../testlib/[wakucore, wakunode, testasync, futures],
|
||||||
../resources/payloads
|
../resources/payloads,
|
||||||
|
../waku_rln_relay/[rln/waku_rln_relay_utils, utils_onchain]
|
||||||
|
|
||||||
const PublishedToOnePeer = 1
|
const PublishedToOnePeer = 1
|
||||||
|
|
||||||
@ -104,6 +105,8 @@ suite "RLN Proofs as a Lightpush Service":
|
|||||||
|
|
||||||
server {.threadvar.}: WakuNode
|
server {.threadvar.}: WakuNode
|
||||||
client {.threadvar.}: WakuNode
|
client {.threadvar.}: WakuNode
|
||||||
|
anvilProc {.threadvar.}: Process
|
||||||
|
manager {.threadvar.}: OnchainGroupManager
|
||||||
|
|
||||||
serverRemotePeerInfo {.threadvar.}: RemotePeerInfo
|
serverRemotePeerInfo {.threadvar.}: RemotePeerInfo
|
||||||
pubsubTopic {.threadvar.}: PubsubTopic
|
pubsubTopic {.threadvar.}: PubsubTopic
|
||||||
@ -125,13 +128,14 @@ suite "RLN Proofs as a Lightpush Service":
|
|||||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||||
|
|
||||||
|
anvilProc = runAnvil()
|
||||||
|
manager = waitFor setupOnchainGroupManager()
|
||||||
|
|
||||||
# mount rln-relay
|
# mount rln-relay
|
||||||
let wakuRlnConfig = WakuRlnConfig(
|
let wakuRlnConfig = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(1.uint),
|
treePath = genTempPath("rln_tree", "wakunode_1"),
|
||||||
userMessageLimit: 1,
|
index = MembershipIndex(1),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await allFutures(server.start(), client.start())
|
await allFutures(server.start(), client.start())
|
||||||
@ -143,6 +147,24 @@ suite "RLN Proofs as a Lightpush Service":
|
|||||||
await server.mountLightPush()
|
await server.mountLightPush()
|
||||||
client.mountLightPushClient()
|
client.mountLightPushClient()
|
||||||
|
|
||||||
|
let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials1 = generateCredentials(manager1.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let rootUpdated1 = waitFor manager1.updateRoots()
|
||||||
|
debug "Updated root for node1", rootUpdated1
|
||||||
|
|
||||||
|
if rootUpdated1:
|
||||||
|
let proofResult = waitFor manager1.fetchMerkleProofElements()
|
||||||
|
if proofResult.isErr():
|
||||||
|
error "Failed to fetch Merkle proof", error = proofResult.error
|
||||||
|
manager1.merkleProofCache = proofResult.get()
|
||||||
|
|
||||||
serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo()
|
serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo()
|
||||||
pubsubTopic = DefaultPubsubTopic
|
pubsubTopic = DefaultPubsubTopic
|
||||||
contentTopic = DefaultContentTopic
|
contentTopic = DefaultContentTopic
|
||||||
@ -150,6 +172,7 @@ suite "RLN Proofs as a Lightpush Service":
|
|||||||
|
|
||||||
asyncTeardown:
|
asyncTeardown:
|
||||||
await server.stop()
|
await server.stop()
|
||||||
|
stopAnvil(anvilProc)
|
||||||
|
|
||||||
suite "Lightpush attaching RLN proofs":
|
suite "Lightpush attaching RLN proofs":
|
||||||
asyncTest "Message is published when RLN enabled":
|
asyncTest "Message is published when RLN enabled":
|
||||||
|
|||||||
@ -15,7 +15,8 @@ import
|
|||||||
waku/waku_rln_relay/rln/wrappers,
|
waku/waku_rln_relay/rln/wrappers,
|
||||||
./waku_rln_relay_utils,
|
./waku_rln_relay_utils,
|
||||||
../../testlib/[simple_mock, assertions],
|
../../testlib/[simple_mock, assertions],
|
||||||
../../waku_keystore/utils
|
../../waku_keystore/utils,
|
||||||
|
../../testlib/testutils
|
||||||
|
|
||||||
from std/times import epochTime
|
from std/times import epochTime
|
||||||
|
|
||||||
@ -98,7 +99,7 @@ suite "RlnConfig":
|
|||||||
suite "createRLNInstance":
|
suite "createRLNInstance":
|
||||||
test "ok":
|
test "ok":
|
||||||
# When we create the RLN instance
|
# When we create the RLN instance
|
||||||
let rlnRes: RLNResult = createRLNInstance(15, "my.db")
|
let rlnRes: RLNResult = createRLNInstance(15)
|
||||||
|
|
||||||
# Then it succeeds
|
# Then it succeeds
|
||||||
check:
|
check:
|
||||||
@ -124,7 +125,7 @@ suite "RlnConfig":
|
|||||||
newCircuitMock
|
newCircuitMock
|
||||||
|
|
||||||
# When we create the RLN instance
|
# When we create the RLN instance
|
||||||
let rlnRes: RLNResult = createRLNInstance(15, "my.db")
|
let rlnRes: RLNResult = createRLNInstance(15)
|
||||||
|
|
||||||
# Then it fails
|
# Then it fails
|
||||||
check:
|
check:
|
||||||
@ -133,42 +134,3 @@ suite "RlnConfig":
|
|||||||
# Cleanup
|
# Cleanup
|
||||||
mock(new_circuit):
|
mock(new_circuit):
|
||||||
backup
|
backup
|
||||||
|
|
||||||
suite "proofGen":
|
|
||||||
test "Valid zk proof":
|
|
||||||
# this test vector is from zerokit
|
|
||||||
let rlnInstanceRes = createRLNInstanceWrapper()
|
|
||||||
assertResultOk(rlnInstanceRes)
|
|
||||||
let rlnInstance = rlnInstanceRes.value
|
|
||||||
|
|
||||||
let identityCredential = defaultIdentityCredential()
|
|
||||||
assert rlnInstance.insertMember(identityCredential.idCommitment)
|
|
||||||
|
|
||||||
let merkleRootRes = rlnInstance.getMerkleRoot()
|
|
||||||
assertResultOk(merkleRootRes)
|
|
||||||
let merkleRoot = merkleRootRes.value
|
|
||||||
|
|
||||||
let proofGenRes = rlnInstance.proofGen(
|
|
||||||
data = @[],
|
|
||||||
memKeys = identityCredential,
|
|
||||||
memIndex = MembershipIndex(0),
|
|
||||||
epoch = uint64(epochTime() / 1.float64).toEpoch(),
|
|
||||||
)
|
|
||||||
assertResultOk(proofGenRes)
|
|
||||||
|
|
||||||
let
|
|
||||||
rateLimitProof = proofGenRes.value
|
|
||||||
proofVerifyRes = rlnInstance.proofVerify(
|
|
||||||
data = @[], proof = rateLimitProof, validRoots = @[merkleRoot]
|
|
||||||
)
|
|
||||||
|
|
||||||
assertResultOk(proofVerifyRes)
|
|
||||||
assert proofVerifyRes.value, "proof verification failed"
|
|
||||||
|
|
||||||
# Assert the proof fields adhere to the specified types and lengths
|
|
||||||
check:
|
|
||||||
typeEq(rateLimitProof.proof, array[256, byte])
|
|
||||||
typeEq(rateLimitProof.merkleRoot, array[32, byte])
|
|
||||||
typeEq(rateLimitProof.shareX, array[32, byte])
|
|
||||||
typeEq(rateLimitProof.shareY, array[32, byte])
|
|
||||||
typeEq(rateLimitProof.nullifier, array[32, byte])
|
|
||||||
|
|||||||
@ -1,23 +1,59 @@
|
|||||||
import std/tempfiles
|
import std/tempfiles
|
||||||
|
|
||||||
import waku/waku_rln_relay, waku/waku_rln_relay/[rln, protocol_types]
|
import
|
||||||
|
waku/waku_rln_relay,
|
||||||
|
waku/waku_rln_relay/[
|
||||||
|
group_manager, rln, conversion_utils, constants, protocol_types, protocol_metrics,
|
||||||
|
nonce_manager,
|
||||||
|
]
|
||||||
|
|
||||||
proc createRLNInstanceWrapper*(): RLNResult =
|
proc createRLNInstanceWrapper*(): RLNResult =
|
||||||
return createRlnInstance(tree_path = genTempPath("rln_tree", "waku_rln_relay"))
|
return createRlnInstance(tree_path = genTempPath("rln_tree", "waku_rln_relay"))
|
||||||
|
|
||||||
proc unsafeAppendRLNProof*(
|
proc unsafeAppendRLNProof*(
|
||||||
rlnPeer: WakuRLNRelay, msg: var WakuMessage, senderEpochTime: float64
|
rlnPeer: WakuRLNRelay, msg: var WakuMessage, epoch: Epoch, messageId: MessageId
|
||||||
): RlnRelayResult[void] =
|
): RlnRelayResult[void] =
|
||||||
## this proc derived from appendRLNProof, does not perform nonce check to
|
## Test helper derived from `appendRLNProof`.
|
||||||
## facilitate bad message id generation for testing
|
## - Skips nonce validation to intentionally allow generating "bad" message IDs for tests.
|
||||||
|
## - Forces a real-time on-chain Merkle root refresh via `updateRoots()` and fetches Merkle
|
||||||
|
## proof elements, updating `merkleProofCache` (bypasses `trackRootsChanges`).
|
||||||
|
## WARNING: For testing only
|
||||||
|
|
||||||
let input = msg.toRLNSignal()
|
let manager = cast[OnchainGroupManager](rlnPeer.groupManager)
|
||||||
let epoch = rlnPeer.calcEpoch(senderEpochTime)
|
let rootUpdated = waitFor manager.updateRoots()
|
||||||
|
|
||||||
# we do not fetch a nonce from the nonce manager,
|
# Fetch Merkle proof either when a new root was detected *or* when the cache is empty.
|
||||||
# instead we use 0 as the nonce
|
if rootUpdated or manager.merkleProofCache.len == 0:
|
||||||
let proof = rlnPeer.groupManager.generateProof(input, epoch, 0).valueOr:
|
let proofResult = waitFor manager.fetchMerkleProofElements()
|
||||||
|
if proofResult.isErr():
|
||||||
|
error "Failed to fetch Merkle proof", error = proofResult.error
|
||||||
|
manager.merkleProofCache = proofResult.get()
|
||||||
|
|
||||||
|
let proof = manager.generateProof(msg.toRLNSignal(), epoch, messageId).valueOr:
|
||||||
return err("could not generate rln-v2 proof: " & $error)
|
return err("could not generate rln-v2 proof: " & $error)
|
||||||
|
|
||||||
msg.proof = proof.encode().buffer
|
msg.proof = proof.encode().buffer
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
|
proc getWakuRlnConfig*(
|
||||||
|
manager: OnchainGroupManager,
|
||||||
|
userMessageLimit: uint64 = 1,
|
||||||
|
epochSizeSec: uint64 = 1,
|
||||||
|
treePath: string = genTempPath("rln_tree", "waku_rln_relay"),
|
||||||
|
index: MembershipIndex = MembershipIndex(0),
|
||||||
|
): WakuRlnConfig =
|
||||||
|
let wakuRlnConfig = WakuRlnConfig(
|
||||||
|
dynamic: true,
|
||||||
|
ethClientUrls: @[EthClient],
|
||||||
|
ethContractAddress: manager.ethContractAddress,
|
||||||
|
chainId: manager.chainId,
|
||||||
|
credIndex: some(index),
|
||||||
|
userMessageLimit: userMessageLimit,
|
||||||
|
epochSizeSec: epochSizeSec,
|
||||||
|
treePath: treePath,
|
||||||
|
ethPrivateKey: some(manager.ethPrivateKey.get()),
|
||||||
|
onFatalErrorAction: proc(errStr: string) =
|
||||||
|
warn "non-fatal onchain test error", errStr
|
||||||
|
,
|
||||||
|
)
|
||||||
|
return wakuRlnConfig
|
||||||
|
|||||||
@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
import
|
import
|
||||||
./test_rln_group_manager_onchain,
|
./test_rln_group_manager_onchain,
|
||||||
./test_rln_group_manager_static,
|
|
||||||
./test_waku_rln_relay,
|
./test_waku_rln_relay,
|
||||||
./test_wakunode_rln_relay,
|
./test_wakunode_rln_relay,
|
||||||
./test_rln_nonce_manager,
|
./test_rln_nonce_manager
|
||||||
./test_rln_serde
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[options, sequtils, deques, random, locks],
|
std/[options, sequtils, deques, random, locks, osproc],
|
||||||
results,
|
results,
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
testutils/unittests,
|
testutils/unittests,
|
||||||
@ -28,24 +28,16 @@ import
|
|||||||
../testlib/wakucore,
|
../testlib/wakucore,
|
||||||
./utils_onchain
|
./utils_onchain
|
||||||
|
|
||||||
var testLock: Lock
|
|
||||||
initLock(testLock)
|
|
||||||
|
|
||||||
suite "Onchain group manager":
|
suite "Onchain group manager":
|
||||||
|
var anvilProc {.threadVar.}: Process
|
||||||
|
var manager {.threadVar.}: OnchainGroupManager
|
||||||
|
|
||||||
setup:
|
setup:
|
||||||
# Acquire lock to ensure tests run sequentially
|
anvilProc = runAnvil()
|
||||||
acquire(testLock)
|
|
||||||
|
|
||||||
let runAnvil {.used.} = runAnvil()
|
|
||||||
|
|
||||||
var manager {.threadvar.}: OnchainGroupManager
|
|
||||||
manager = waitFor setupOnchainGroupManager()
|
manager = waitFor setupOnchainGroupManager()
|
||||||
|
|
||||||
teardown:
|
teardown:
|
||||||
waitFor manager.stop()
|
stopAnvil(anvilProc)
|
||||||
stopAnvil(runAnvil)
|
|
||||||
# Release lock after test completes
|
|
||||||
release(testLock)
|
|
||||||
|
|
||||||
test "should initialize successfully":
|
test "should initialize successfully":
|
||||||
(waitFor manager.init()).isOkOr:
|
(waitFor manager.init()).isOkOr:
|
||||||
@ -119,11 +111,6 @@ suite "Onchain group manager":
|
|||||||
(waitFor manager.init()).isErrOr:
|
(waitFor manager.init()).isErrOr:
|
||||||
raiseAssert "Expected error when keystore file doesn't exist"
|
raiseAssert "Expected error when keystore file doesn't exist"
|
||||||
|
|
||||||
test "trackRootChanges: start tracking roots":
|
|
||||||
(waitFor manager.init()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
discard manager.trackRootChanges()
|
|
||||||
|
|
||||||
test "trackRootChanges: should guard against uninitialized state":
|
test "trackRootChanges: should guard against uninitialized state":
|
||||||
try:
|
try:
|
||||||
discard manager.trackRootChanges()
|
discard manager.trackRootChanges()
|
||||||
@ -161,10 +148,9 @@ suite "Onchain group manager":
|
|||||||
|
|
||||||
test "trackRootChanges: should fetch history correctly":
|
test "trackRootChanges: should fetch history correctly":
|
||||||
# TODO: We can't use `trackRootChanges()` directly in this test because its current implementation
|
# TODO: We can't use `trackRootChanges()` directly in this test because its current implementation
|
||||||
# relies on a busy loop rather than event-based monitoring. As a result, some root changes
|
# relies on a busy loop rather than event-based monitoring. but that busy loop fetch root every 5 seconds
|
||||||
# may be missed, leading to inconsistent test results (i.e., it may randomly return true or false).
|
# so we can't use it in this test.
|
||||||
# To ensure reliability, we use the `updateRoots()` function to validate the `validRoots` window
|
|
||||||
# after each registration.
|
|
||||||
const credentialCount = 6
|
const credentialCount = 6
|
||||||
let credentials = generateCredentials(manager.rlnInstance, credentialCount)
|
let credentials = generateCredentials(manager.rlnInstance, credentialCount)
|
||||||
(waitFor manager.init()).isOkOr:
|
(waitFor manager.init()).isOkOr:
|
||||||
|
|||||||
@ -1,228 +0,0 @@
|
|||||||
{.used.}
|
|
||||||
|
|
||||||
{.push raises: [].}
|
|
||||||
|
|
||||||
import
|
|
||||||
testutils/unittests,
|
|
||||||
results,
|
|
||||||
options,
|
|
||||||
waku/[
|
|
||||||
waku_rln_relay/protocol_types,
|
|
||||||
waku_rln_relay/rln,
|
|
||||||
waku_rln_relay/conversion_utils,
|
|
||||||
waku_rln_relay/group_manager/static/group_manager,
|
|
||||||
]
|
|
||||||
|
|
||||||
import chronos, libp2p/crypto/crypto, eth/keys, dnsdisc/builder
|
|
||||||
|
|
||||||
import std/tempfiles
|
|
||||||
|
|
||||||
proc generateCredentials(rlnInstance: ptr RLN): IdentityCredential =
|
|
||||||
let credRes = membershipKeyGen(rlnInstance)
|
|
||||||
return credRes.get()
|
|
||||||
|
|
||||||
proc generateCredentials(rlnInstance: ptr RLN, n: int): seq[IdentityCredential] =
|
|
||||||
var credentials: seq[IdentityCredential]
|
|
||||||
for i in 0 ..< n:
|
|
||||||
credentials.add(generateCredentials(rlnInstance))
|
|
||||||
return credentials
|
|
||||||
|
|
||||||
suite "Static group manager":
|
|
||||||
setup:
|
|
||||||
let rlnInstance = createRlnInstance(
|
|
||||||
tree_path = genTempPath("rln_tree", "group_manager_static")
|
|
||||||
).valueOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
let credentials = generateCredentials(rlnInstance, 10)
|
|
||||||
|
|
||||||
let manager {.used.} = StaticGroupManager(
|
|
||||||
rlnInstance: rlnInstance,
|
|
||||||
groupSize: 10,
|
|
||||||
membershipIndex: some(MembershipIndex(5)),
|
|
||||||
groupKeys: credentials,
|
|
||||||
)
|
|
||||||
|
|
||||||
asyncTest "should initialize successfully":
|
|
||||||
let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
(await manager.init()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
check:
|
|
||||||
manager.idCredentials.isSome()
|
|
||||||
manager.groupKeys.len == 10
|
|
||||||
manager.groupSize == 10
|
|
||||||
manager.membershipIndex == some(MembershipIndex(5))
|
|
||||||
manager.groupKeys[5] == manager.idCredentials.get()
|
|
||||||
manager.latestIndex == 9
|
|
||||||
merkleRootAfter.inHex() != merkleRootBefore.inHex()
|
|
||||||
|
|
||||||
asyncTest "startGroupSync: should start group sync":
|
|
||||||
(await manager.init()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
require:
|
|
||||||
manager.validRoots.len() == 1
|
|
||||||
manager.rlnInstance.getMerkleRoot().get() == manager.validRoots[0]
|
|
||||||
|
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
asyncTest "startGroupSync: should guard against uninitialized state":
|
|
||||||
let manager = StaticGroupManager(
|
|
||||||
groupSize: 0,
|
|
||||||
membershipIndex: some(MembershipIndex(0)),
|
|
||||||
groupKeys: @[],
|
|
||||||
rlnInstance: rlnInstance,
|
|
||||||
)
|
|
||||||
|
|
||||||
(await manager.startGroupSync()).isErrOr:
|
|
||||||
raiseAssert "StartGroupSync: expected error"
|
|
||||||
|
|
||||||
asyncTest "register: should guard against uninitialized state":
|
|
||||||
let manager = StaticGroupManager(
|
|
||||||
groupSize: 0,
|
|
||||||
membershipIndex: some(MembershipIndex(0)),
|
|
||||||
groupKeys: @[],
|
|
||||||
rlnInstance: rlnInstance,
|
|
||||||
)
|
|
||||||
|
|
||||||
let dummyCommitment = default(IDCommitment)
|
|
||||||
|
|
||||||
try:
|
|
||||||
await manager.register(
|
|
||||||
RateCommitment(
|
|
||||||
idCommitment: dummyCommitment, userMessageLimit: DefaultUserMessageLimit
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except ValueError:
|
|
||||||
assert true
|
|
||||||
except Exception, CatchableError:
|
|
||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
|
||||||
|
|
||||||
asyncTest "register: should register successfully":
|
|
||||||
(await manager.init()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
let idCommitment = generateCredentials(manager.rlnInstance).idCommitment
|
|
||||||
let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr:
|
|
||||||
raiseAssert $error
|
|
||||||
try:
|
|
||||||
await manager.register(
|
|
||||||
RateCommitment(
|
|
||||||
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception, CatchableError:
|
|
||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
|
||||||
let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr:
|
|
||||||
raiseAssert $error
|
|
||||||
check:
|
|
||||||
merkleRootAfter.inHex() != merkleRootBefore.inHex()
|
|
||||||
manager.latestIndex == 10
|
|
||||||
|
|
||||||
asyncTest "register: callback is called":
|
|
||||||
var callbackCalled = false
|
|
||||||
let idCommitment = generateCredentials(manager.rlnInstance).idCommitment
|
|
||||||
|
|
||||||
let fut = newFuture[void]()
|
|
||||||
|
|
||||||
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
|
||||||
require:
|
|
||||||
registrations.len == 1
|
|
||||||
registrations[0].index == 10
|
|
||||||
registrations[0].rateCommitment ==
|
|
||||||
RateCommitment(
|
|
||||||
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
|
|
||||||
)
|
|
||||||
.toLeaf()
|
|
||||||
.get()
|
|
||||||
callbackCalled = true
|
|
||||||
fut.complete()
|
|
||||||
|
|
||||||
try:
|
|
||||||
manager.onRegister(callback)
|
|
||||||
(await manager.init()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
await manager.register(
|
|
||||||
RateCommitment(
|
|
||||||
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception, CatchableError:
|
|
||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
|
||||||
|
|
||||||
await fut
|
|
||||||
check:
|
|
||||||
callbackCalled
|
|
||||||
|
|
||||||
asyncTest "withdraw: should guard against uninitialized state":
|
|
||||||
let idSecretHash = credentials[0].idSecretHash
|
|
||||||
|
|
||||||
try:
|
|
||||||
await manager.withdraw(idSecretHash)
|
|
||||||
except ValueError:
|
|
||||||
assert true
|
|
||||||
except Exception, CatchableError:
|
|
||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
|
||||||
|
|
||||||
asyncTest "withdraw: should withdraw successfully":
|
|
||||||
(await manager.init()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
let idSecretHash = credentials[0].idSecretHash
|
|
||||||
let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
try:
|
|
||||||
await manager.withdraw(idSecretHash)
|
|
||||||
except Exception, CatchableError:
|
|
||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
|
||||||
let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr:
|
|
||||||
raiseAssert $error
|
|
||||||
check:
|
|
||||||
merkleRootAfter.inHex() != merkleRootBefore.inHex()
|
|
||||||
|
|
||||||
asyncTest "withdraw: callback is called":
|
|
||||||
var callbackCalled = false
|
|
||||||
let idSecretHash = credentials[0].idSecretHash
|
|
||||||
let idCommitment = credentials[0].idCommitment
|
|
||||||
let fut = newFuture[void]()
|
|
||||||
|
|
||||||
proc callback(withdrawals: seq[Membership]): Future[void] {.async.} =
|
|
||||||
require:
|
|
||||||
withdrawals.len == 1
|
|
||||||
withdrawals[0].index == 0
|
|
||||||
withdrawals[0].rateCommitment ==
|
|
||||||
RateCommitment(
|
|
||||||
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
|
|
||||||
)
|
|
||||||
.toLeaf()
|
|
||||||
.get()
|
|
||||||
|
|
||||||
callbackCalled = true
|
|
||||||
fut.complete()
|
|
||||||
|
|
||||||
try:
|
|
||||||
manager.onWithdraw(callback)
|
|
||||||
(await manager.init()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
await manager.withdraw(idSecretHash)
|
|
||||||
except Exception, CatchableError:
|
|
||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
|
||||||
|
|
||||||
await fut
|
|
||||||
check:
|
|
||||||
callbackCalled
|
|
||||||
@ -1,68 +0,0 @@
|
|||||||
{.used.}
|
|
||||||
|
|
||||||
{.push raises: [].}
|
|
||||||
|
|
||||||
import results
|
|
||||||
|
|
||||||
import
|
|
||||||
./rln/waku_rln_relay_utils,
|
|
||||||
waku/[
|
|
||||||
waku_keystore/protocol_types,
|
|
||||||
waku_rln_relay,
|
|
||||||
waku_rln_relay/rln,
|
|
||||||
waku_rln_relay/protocol_types,
|
|
||||||
],
|
|
||||||
../waku_keystore/utils,
|
|
||||||
testutils/unittests
|
|
||||||
|
|
||||||
from std/times import epochTime
|
|
||||||
|
|
||||||
func defaultRateCommitment*(): RateCommitment =
|
|
||||||
let idCredential = defaultIdentityCredential()
|
|
||||||
return RateCommitment(idCommitment: idCredential.idCommitment, userMessageLimit: 100)
|
|
||||||
|
|
||||||
suite "RLN Relay v2: serde":
|
|
||||||
test "toLeaf: converts a rateCommitment to a valid leaf":
|
|
||||||
# this test vector is from zerokit
|
|
||||||
let rateCommitment = defaultRateCommitment()
|
|
||||||
|
|
||||||
let leafRes = toLeaf(rateCommitment)
|
|
||||||
assert leafRes.isOk(), $leafRes.error
|
|
||||||
|
|
||||||
let expectedLeaf =
|
|
||||||
"09beac7784abfadc9958b3176b352389d0b969ccc7f8bccf3e968ed632e26eca"
|
|
||||||
check expectedLeaf == leafRes.value.inHex()
|
|
||||||
|
|
||||||
test "proofGen: generates a valid zk proof":
|
|
||||||
# this test vector is from zerokit
|
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
|
||||||
assert rlnInstance.isOk, $rlnInstance.error
|
|
||||||
let rln = rlnInstance.value
|
|
||||||
|
|
||||||
let credential = defaultIdentityCredential()
|
|
||||||
let rateCommitment = defaultRateCommitment()
|
|
||||||
let success = rln.insertMember(@(rateCommitment.toLeaf().value))
|
|
||||||
let merkleRootRes = rln.getMerkleRoot()
|
|
||||||
assert merkleRootRes.isOk, $merkleRootRes.error
|
|
||||||
let merkleRoot = merkleRootRes.value
|
|
||||||
|
|
||||||
assert success, "failed to insert member"
|
|
||||||
|
|
||||||
let proofRes = rln.proofGen(
|
|
||||||
data = @[],
|
|
||||||
membership = credential,
|
|
||||||
userMessageLimit = rateCommitment.userMessageLimit,
|
|
||||||
messageId = 0,
|
|
||||||
index = 0,
|
|
||||||
epoch = uint64(epochTime() / 1.float64).toEpoch(),
|
|
||||||
)
|
|
||||||
|
|
||||||
assert proofRes.isOk, $proofRes.error
|
|
||||||
|
|
||||||
let proof = proofRes.value
|
|
||||||
|
|
||||||
let proofVerifyRes =
|
|
||||||
rln.proofVerify(data = @[], proof = proof, validRoots = @[merkleRoot])
|
|
||||||
|
|
||||||
assert proofVerifyRes.isOk, $proofVerifyRes.error
|
|
||||||
assert proofVerifyRes.value, "proof verification failed"
|
|
||||||
@ -1,7 +1,7 @@
|
|||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[options, os, sequtils, tempfiles],
|
std/[options, os, sequtils, tempfiles, strutils, osproc],
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
testutils/unittests,
|
testutils/unittests,
|
||||||
chronos,
|
chronos,
|
||||||
@ -17,11 +17,22 @@ import
|
|||||||
waku_keystore,
|
waku_keystore,
|
||||||
],
|
],
|
||||||
./rln/waku_rln_relay_utils,
|
./rln/waku_rln_relay_utils,
|
||||||
|
./utils_onchain,
|
||||||
../testlib/[wakucore, futures, wakunode, testutils]
|
../testlib/[wakucore, futures, wakunode, testutils]
|
||||||
|
|
||||||
from std/times import epochTime
|
from std/times import epochTime
|
||||||
|
|
||||||
suite "Waku rln relay":
|
suite "Waku rln relay":
|
||||||
|
var anvilProc {.threadVar.}: Process
|
||||||
|
var manager {.threadVar.}: OnchainGroupManager
|
||||||
|
|
||||||
|
setup:
|
||||||
|
anvilProc = runAnvil()
|
||||||
|
manager = waitFor setupOnchainGroupManager()
|
||||||
|
|
||||||
|
teardown:
|
||||||
|
stopAnvil(anvilProc)
|
||||||
|
|
||||||
test "key_gen Nim Wrappers":
|
test "key_gen Nim Wrappers":
|
||||||
let merkleDepth: csize_t = 20
|
let merkleDepth: csize_t = 20
|
||||||
|
|
||||||
@ -68,173 +79,6 @@ suite "Waku rln relay":
|
|||||||
|
|
||||||
debug "the generated identity credential: ", idCredential
|
debug "the generated identity credential: ", idCredential
|
||||||
|
|
||||||
test "getRoot Nim binding":
|
|
||||||
# create an RLN instance which also includes an empty Merkle tree
|
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
|
||||||
require:
|
|
||||||
rlnInstance.isOk()
|
|
||||||
|
|
||||||
# read the Merkle Tree root
|
|
||||||
let
|
|
||||||
root1 {.noinit.}: Buffer = Buffer()
|
|
||||||
rootPtr1 = unsafeAddr(root1)
|
|
||||||
getRootSuccessful1 = getRoot(rlnInstance.get(), rootPtr1)
|
|
||||||
require:
|
|
||||||
getRootSuccessful1
|
|
||||||
root1.len == 32
|
|
||||||
|
|
||||||
# read the Merkle Tree root
|
|
||||||
let
|
|
||||||
root2 {.noinit.}: Buffer = Buffer()
|
|
||||||
rootPtr2 = unsafeAddr(root2)
|
|
||||||
getRootSuccessful2 = getRoot(rlnInstance.get(), rootPtr2)
|
|
||||||
require:
|
|
||||||
getRootSuccessful2
|
|
||||||
root2.len == 32
|
|
||||||
|
|
||||||
let rootValue1 = cast[ptr array[32, byte]](root1.`ptr`)
|
|
||||||
let rootHex1 = rootValue1[].inHex
|
|
||||||
|
|
||||||
let rootValue2 = cast[ptr array[32, byte]](root2.`ptr`)
|
|
||||||
let rootHex2 = rootValue2[].inHex
|
|
||||||
|
|
||||||
# the two roots must be identical
|
|
||||||
check:
|
|
||||||
rootHex1 == rootHex2
|
|
||||||
test "getMerkleRoot utils":
|
|
||||||
# create an RLN instance which also includes an empty Merkle tree
|
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
|
||||||
require:
|
|
||||||
rlnInstance.isOk()
|
|
||||||
let rln = rlnInstance.get()
|
|
||||||
|
|
||||||
# read the Merkle Tree root
|
|
||||||
let root1 = getMerkleRoot(rln)
|
|
||||||
require:
|
|
||||||
root1.isOk()
|
|
||||||
let rootHex1 = root1.value().inHex
|
|
||||||
|
|
||||||
# read the Merkle Tree root
|
|
||||||
let root2 = getMerkleRoot(rln)
|
|
||||||
require:
|
|
||||||
root2.isOk()
|
|
||||||
let rootHex2 = root2.value().inHex
|
|
||||||
|
|
||||||
# the two roots must be identical
|
|
||||||
check:
|
|
||||||
rootHex1 == rootHex2
|
|
||||||
|
|
||||||
test "update_next_member Nim Wrapper":
|
|
||||||
# create an RLN instance which also includes an empty Merkle tree
|
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
|
||||||
require:
|
|
||||||
rlnInstance.isOk()
|
|
||||||
let rln = rlnInstance.get()
|
|
||||||
# generate an identity credential
|
|
||||||
let idCredentialRes = membershipKeyGen(rln)
|
|
||||||
require:
|
|
||||||
idCredentialRes.isOk()
|
|
||||||
|
|
||||||
let idCredential = idCredentialRes.get()
|
|
||||||
let pkBuffer = toBuffer(idCredential.idCommitment)
|
|
||||||
let pkBufferPtr = unsafeAddr(pkBuffer)
|
|
||||||
|
|
||||||
# add the member to the tree
|
|
||||||
let memberAdded = updateNextMember(rln, pkBufferPtr)
|
|
||||||
check:
|
|
||||||
memberAdded
|
|
||||||
|
|
||||||
test "getMember Nim wrapper":
|
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
|
||||||
require:
|
|
||||||
rlnInstance.isOk()
|
|
||||||
let rln = rlnInstance.get()
|
|
||||||
# generate an identity credential
|
|
||||||
let idCredentialRes = membershipKeyGen(rln)
|
|
||||||
require:
|
|
||||||
idCredentialRes.isOk()
|
|
||||||
|
|
||||||
let idCredential = idCredentialRes.get()
|
|
||||||
let pkBuffer = toBuffer(idCredential.idCommitment)
|
|
||||||
let pkBufferPtr = unsafeAddr(pkBuffer)
|
|
||||||
|
|
||||||
let
|
|
||||||
root1 {.noinit.}: Buffer = Buffer()
|
|
||||||
rootPtr1 = unsafeAddr(root1)
|
|
||||||
getRootSuccessful1 = getRoot(rlnInstance.get(), rootPtr1)
|
|
||||||
|
|
||||||
# add the member to the tree
|
|
||||||
let memberAdded = updateNextMember(rln, pkBufferPtr)
|
|
||||||
require:
|
|
||||||
memberAdded
|
|
||||||
|
|
||||||
let leafRes = getMember(rln, 0)
|
|
||||||
require:
|
|
||||||
leafRes.isOk()
|
|
||||||
let leaf = leafRes.get()
|
|
||||||
let leafHex = leaf.inHex()
|
|
||||||
check:
|
|
||||||
leafHex == idCredential.idCommitment.inHex()
|
|
||||||
|
|
||||||
test "delete_member Nim wrapper":
|
|
||||||
# create an RLN instance which also includes an empty Merkle tree
|
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
|
||||||
require:
|
|
||||||
rlnInstance.isOk()
|
|
||||||
# generate an identity credential
|
|
||||||
let rln = rlnInstance.get()
|
|
||||||
let idCredentialRes = rln.membershipKeyGen()
|
|
||||||
require:
|
|
||||||
idCredentialRes.isOk()
|
|
||||||
rln.insertMember(idCredentialRes.get().idCommitment)
|
|
||||||
|
|
||||||
# delete the first member
|
|
||||||
let deletedMemberIndex = MembershipIndex(0)
|
|
||||||
let deletionSuccess = rln.deleteMember(deletedMemberIndex)
|
|
||||||
check:
|
|
||||||
deletionSuccess
|
|
||||||
|
|
||||||
test "insertMembers rln utils":
|
|
||||||
# create an RLN instance which also includes an empty Merkle tree
|
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
|
||||||
require:
|
|
||||||
rlnInstance.isOk()
|
|
||||||
let rln = rlnInstance.get()
|
|
||||||
# generate an identity credential
|
|
||||||
let idCredentialRes = rln.membershipKeyGen()
|
|
||||||
require:
|
|
||||||
idCredentialRes.isOk()
|
|
||||||
check:
|
|
||||||
rln.insertMembers(0, @[idCredentialRes.get().idCommitment])
|
|
||||||
rln.leavesSet() == 1
|
|
||||||
|
|
||||||
test "insertMember rln utils":
|
|
||||||
# create an RLN instance which also includes an empty Merkle tree
|
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
|
||||||
require:
|
|
||||||
rlnInstance.isOk()
|
|
||||||
let rln = rlnInstance.get()
|
|
||||||
# generate an identity credential
|
|
||||||
let idCredentialRes = rln.membershipKeyGen()
|
|
||||||
require:
|
|
||||||
idCredentialRes.isOk()
|
|
||||||
check:
|
|
||||||
rln.insertMember(idCredentialRes.get().idCommitment)
|
|
||||||
|
|
||||||
test "removeMember rln utils":
|
|
||||||
# create an RLN instance which also includes an empty Merkle tree
|
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
|
||||||
require:
|
|
||||||
rlnInstance.isOk()
|
|
||||||
let rln = rlnInstance.get()
|
|
||||||
|
|
||||||
let idCredentialRes = rln.membershipKeyGen()
|
|
||||||
require:
|
|
||||||
idCredentialRes.isOk()
|
|
||||||
rln.insertMember(idCredentialRes.get().idCommitment)
|
|
||||||
check:
|
|
||||||
rln.removeMember(MembershipIndex(0))
|
|
||||||
|
|
||||||
test "setMetadata rln utils":
|
test "setMetadata rln utils":
|
||||||
# create an RLN instance which also includes an empty Merkle tree
|
# create an RLN instance which also includes an empty Merkle tree
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
let rlnInstance = createRLNInstanceWrapper()
|
||||||
@ -290,135 +134,6 @@ suite "Waku rln relay":
|
|||||||
check:
|
check:
|
||||||
metadata.isNone()
|
metadata.isNone()
|
||||||
|
|
||||||
test "Merkle tree consistency check between deletion and insertion":
|
|
||||||
# create an RLN instance
|
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
|
||||||
require:
|
|
||||||
rlnInstance.isOk()
|
|
||||||
|
|
||||||
let rln = rlnInstance.get()
|
|
||||||
|
|
||||||
# read the Merkle Tree root
|
|
||||||
let
|
|
||||||
root1 {.noinit.}: Buffer = Buffer()
|
|
||||||
rootPtr1 = unsafeAddr(root1)
|
|
||||||
getRootSuccessful1 = getRoot(rln, rootPtr1)
|
|
||||||
require:
|
|
||||||
getRootSuccessful1
|
|
||||||
root1.len == 32
|
|
||||||
|
|
||||||
# generate an identity credential
|
|
||||||
let idCredentialRes = membershipKeyGen(rln)
|
|
||||||
require:
|
|
||||||
idCredentialRes.isOk()
|
|
||||||
|
|
||||||
let idCredential = idCredentialRes.get()
|
|
||||||
let pkBuffer = toBuffer(idCredential.idCommitment)
|
|
||||||
let pkBufferPtr = unsafeAddr(pkBuffer)
|
|
||||||
|
|
||||||
# add the member to the tree
|
|
||||||
let memberAdded = updateNextMember(rln, pkBufferPtr)
|
|
||||||
require:
|
|
||||||
memberAdded
|
|
||||||
|
|
||||||
# read the Merkle Tree root after insertion
|
|
||||||
let
|
|
||||||
root2 {.noinit.}: Buffer = Buffer()
|
|
||||||
rootPtr2 = unsafeAddr(root2)
|
|
||||||
getRootSuccessful = getRoot(rln, rootPtr2)
|
|
||||||
require:
|
|
||||||
getRootSuccessful
|
|
||||||
root2.len == 32
|
|
||||||
|
|
||||||
# delete the first member
|
|
||||||
let deletedMemberIndex = MembershipIndex(0)
|
|
||||||
let deletionSuccess = deleteMember(rln, deletedMemberIndex)
|
|
||||||
require:
|
|
||||||
deletionSuccess
|
|
||||||
|
|
||||||
# read the Merkle Tree root after the deletion
|
|
||||||
let
|
|
||||||
root3 {.noinit.}: Buffer = Buffer()
|
|
||||||
rootPtr3 = unsafeAddr(root3)
|
|
||||||
getRootSuccessful3 = getRoot(rln, rootPtr3)
|
|
||||||
require:
|
|
||||||
getRootSuccessful3
|
|
||||||
root3.len == 32
|
|
||||||
|
|
||||||
let rootValue1 = cast[ptr array[32, byte]](root1.`ptr`)
|
|
||||||
let rootHex1 = rootValue1[].inHex
|
|
||||||
debug "The initial root", rootHex1
|
|
||||||
|
|
||||||
let rootValue2 = cast[ptr array[32, byte]](root2.`ptr`)
|
|
||||||
let rootHex2 = rootValue2[].inHex
|
|
||||||
debug "The root after insertion", rootHex2
|
|
||||||
|
|
||||||
let rootValue3 = cast[ptr array[32, byte]](root3.`ptr`)
|
|
||||||
let rootHex3 = rootValue3[].inHex
|
|
||||||
debug "The root after deletion", rootHex3
|
|
||||||
|
|
||||||
# the root must change after the insertion
|
|
||||||
check:
|
|
||||||
not (rootHex1 == rootHex2)
|
|
||||||
|
|
||||||
## The initial root of the tree (empty tree) must be identical to
|
|
||||||
## the root of the tree after one insertion followed by a deletion
|
|
||||||
check:
|
|
||||||
rootHex1 == rootHex3
|
|
||||||
|
|
||||||
test "Merkle tree consistency check between deletion and insertion using rln utils":
|
|
||||||
# create an RLN instance
|
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
|
||||||
require:
|
|
||||||
rlnInstance.isOk()
|
|
||||||
|
|
||||||
let rln = rlnInstance.get()
|
|
||||||
|
|
||||||
# read the Merkle Tree root
|
|
||||||
let root1 = rln.getMerkleRoot()
|
|
||||||
require:
|
|
||||||
root1.isOk()
|
|
||||||
let rootHex1 = root1.value().inHex()
|
|
||||||
|
|
||||||
# generate an identity credential
|
|
||||||
let idCredentialRes = rln.membershipKeyGen()
|
|
||||||
require:
|
|
||||||
idCredentialRes.isOk()
|
|
||||||
let memberInserted = rln.insertMembers(0, @[idCredentialRes.get().idCommitment])
|
|
||||||
require:
|
|
||||||
memberInserted
|
|
||||||
|
|
||||||
# read the Merkle Tree root after insertion
|
|
||||||
let root2 = rln.getMerkleRoot()
|
|
||||||
require:
|
|
||||||
root2.isOk()
|
|
||||||
let rootHex2 = root2.value().inHex()
|
|
||||||
|
|
||||||
# delete the first member
|
|
||||||
let deletedMemberIndex = MembershipIndex(0)
|
|
||||||
let deletionSuccess = rln.removeMember(deletedMemberIndex)
|
|
||||||
require:
|
|
||||||
deletionSuccess
|
|
||||||
|
|
||||||
# read the Merkle Tree root after the deletion
|
|
||||||
let root3 = rln.getMerkleRoot()
|
|
||||||
require:
|
|
||||||
root3.isOk()
|
|
||||||
let rootHex3 = root3.value().inHex()
|
|
||||||
|
|
||||||
debug "The initial root", rootHex1
|
|
||||||
debug "The root after insertion", rootHex2
|
|
||||||
debug "The root after deletion", rootHex3
|
|
||||||
|
|
||||||
# the root must change after the insertion
|
|
||||||
check:
|
|
||||||
not (rootHex1 == rootHex2)
|
|
||||||
|
|
||||||
## The initial root of the tree (empty tree) must be identical to
|
|
||||||
## the root of the tree after one insertion followed by a deletion
|
|
||||||
check:
|
|
||||||
rootHex1 == rootHex3
|
|
||||||
|
|
||||||
test "hash Nim Wrappers":
|
test "hash Nim Wrappers":
|
||||||
# create an RLN instance
|
# create an RLN instance
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
let rlnInstance = createRLNInstanceWrapper()
|
||||||
@ -488,69 +203,6 @@ suite "Waku rln relay":
|
|||||||
"28a15a991fe3d2a014485c7fa905074bfb55c0909112f865ded2be0a26a932c3" ==
|
"28a15a991fe3d2a014485c7fa905074bfb55c0909112f865ded2be0a26a932c3" ==
|
||||||
hashRes.get().inHex()
|
hashRes.get().inHex()
|
||||||
|
|
||||||
test "create a list of membership keys and construct a Merkle tree based on the list":
|
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
|
||||||
require:
|
|
||||||
rlnInstance.isOk()
|
|
||||||
let rln = rlnInstance.get()
|
|
||||||
|
|
||||||
let
|
|
||||||
groupSize = 100
|
|
||||||
memListRes = rln.createMembershipList(groupSize)
|
|
||||||
|
|
||||||
require:
|
|
||||||
memListRes.isOk()
|
|
||||||
|
|
||||||
let (list, root) = memListRes.get()
|
|
||||||
|
|
||||||
debug "created membership key list", number_of_keys = list.len
|
|
||||||
debug "Merkle tree root", size_calculated_tree_root = root.len
|
|
||||||
|
|
||||||
check:
|
|
||||||
list.len == groupSize # check the number of keys
|
|
||||||
root.len == HashHexSize # check the size of the calculated tree root
|
|
||||||
|
|
||||||
test "check correctness of toIdentityCredentials":
|
|
||||||
let groupKeys = StaticGroupKeys
|
|
||||||
|
|
||||||
# create a set of IdentityCredentials objects from groupKeys
|
|
||||||
let groupIdCredentialsRes = groupKeys.toIdentityCredentials()
|
|
||||||
require:
|
|
||||||
groupIdCredentialsRes.isOk()
|
|
||||||
|
|
||||||
let groupIdCredentials = groupIdCredentialsRes.get()
|
|
||||||
# extract the id commitments
|
|
||||||
let groupIDCommitments = groupIdCredentials.mapIt(it.idCommitment)
|
|
||||||
# calculate the Merkle tree root out of the extracted id commitments
|
|
||||||
let rlnInstance = createRLNInstanceWrapper()
|
|
||||||
require:
|
|
||||||
rlnInstance.isOk()
|
|
||||||
let rln = rlnInstance.get()
|
|
||||||
|
|
||||||
# create a Merkle tree
|
|
||||||
let rateCommitments =
|
|
||||||
groupIDCommitments.mapIt(RateCommitment(idCommitment: it, userMessageLimit: 20))
|
|
||||||
let leaves = rateCommitments.toLeaves().valueOr:
|
|
||||||
raiseAssert $error
|
|
||||||
let membersAdded = rln.insertMembers(0, leaves)
|
|
||||||
|
|
||||||
assert membersAdded, "members should be added"
|
|
||||||
let rawRoot = rln.getMerkleRoot().valueOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
let root = rawRoot.inHex()
|
|
||||||
|
|
||||||
debug "groupIdCredentials", num_group_id_credentials = groupIdCredentials.len
|
|
||||||
# debug "groupIDCommitments", leaving commented in case needed to debug in the future
|
|
||||||
# groupIDCommitments = groupIDCommitments.mapIt(it.inHex())
|
|
||||||
debug "root", root
|
|
||||||
|
|
||||||
check:
|
|
||||||
# check that the correct number of identity credentials is created
|
|
||||||
groupIdCredentials.len == StaticGroupSize
|
|
||||||
# compare the calculated root against the correct root
|
|
||||||
root == StaticGroupMerkleRoot
|
|
||||||
|
|
||||||
test "RateLimitProof Protobuf encode/init test":
|
test "RateLimitProof Protobuf encode/init test":
|
||||||
var
|
var
|
||||||
proof: ZKSNARK
|
proof: ZKSNARK
|
||||||
@ -691,49 +343,51 @@ suite "Waku rln relay":
|
|||||||
asyncTest "validateMessageAndUpdateLog: against epoch gap":
|
asyncTest "validateMessageAndUpdateLog: against epoch gap":
|
||||||
let index = MembershipIndex(5)
|
let index = MembershipIndex(5)
|
||||||
|
|
||||||
let wakuRlnConfig = WakuRlnConfig(
|
let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = index)
|
||||||
dynamic: false,
|
|
||||||
credIndex: some(index),
|
|
||||||
userMessageLimit: 1,
|
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "waku_rln_relay_2"),
|
|
||||||
)
|
|
||||||
|
|
||||||
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
|
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
let time_1 = epochTime()
|
let manager = cast[OnchainGroupManager](wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials = generateCredentials(manager.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager.register(idCredentials, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let epoch1 = wakuRlnRelay.getCurrentEpoch()
|
||||||
|
|
||||||
|
# Create messages from the same peer and append RLN proof to them (except wm4)
|
||||||
var
|
var
|
||||||
# create some messages from the same peer and append rln proof to them, except wm4
|
|
||||||
wm1 = WakuMessage(payload: "Valid message".toBytes(), timestamp: now())
|
wm1 = WakuMessage(payload: "Valid message".toBytes(), timestamp: now())
|
||||||
# another message in the same epoch as wm1, it will break the messaging rate limit
|
# Another message in the same epoch as wm1, expected to break the rate limit
|
||||||
wm2 = WakuMessage(payload: "Spam message".toBytes(), timestamp: now())
|
wm2 = WakuMessage(payload: "Spam message".toBytes(), timestamp: now())
|
||||||
|
|
||||||
await sleepAsync(1.seconds)
|
await sleepAsync(1.seconds)
|
||||||
let time_2 = epochTime()
|
let epoch2 = wakuRlnRelay.getCurrentEpoch()
|
||||||
|
|
||||||
var
|
var
|
||||||
# wm3 points to the next epoch bcz of the sleep
|
# wm3 points to the next epoch due to the sleep
|
||||||
wm3 = WakuMessage(payload: "Valid message".toBytes(), timestamp: now())
|
wm3 = WakuMessage(payload: "Valid message".toBytes(), timestamp: now())
|
||||||
wm4 = WakuMessage(payload: "Invalid message".toBytes(), timestamp: now())
|
wm4 = WakuMessage(payload: "Invalid message".toBytes(), timestamp: now())
|
||||||
|
|
||||||
wakuRlnRelay.unsafeAppendRLNProof(wm1, time_1).isOkOr:
|
# Append RLN proofs
|
||||||
|
wakuRlnRelay.unsafeAppendRLNProof(wm1, epoch1, MessageId(1)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
wakuRlnRelay.unsafeAppendRLNProof(wm2, time_1).isOkOr:
|
wakuRlnRelay.unsafeAppendRLNProof(wm2, epoch1, MessageId(1)).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
|
wakuRlnRelay.unsafeAppendRLNProof(wm3, epoch2, MessageId(3)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
wakuRlnRelay.unsafeAppendRLNProof(wm3, time_2).isOkOr:
|
# Validate messages
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
# validate messages
|
|
||||||
let
|
let
|
||||||
msgValidate1 = wakuRlnRelay.validateMessageAndUpdateLog(wm1)
|
msgValidate1 = wakuRlnRelay.validateMessageAndUpdateLog(wm1)
|
||||||
# wm2 is published within the same Epoch as wm1 and should be found as spam
|
# wm2 is within the same epoch as wm1 → should be spam
|
||||||
msgValidate2 = wakuRlnRelay.validateMessageAndUpdateLog(wm2)
|
msgValidate2 = wakuRlnRelay.validateMessageAndUpdateLog(wm2)
|
||||||
# a valid message should be validated successfully
|
# wm3 is in the next epoch → should be valid
|
||||||
msgValidate3 = wakuRlnRelay.validateMessageAndUpdateLog(wm3)
|
msgValidate3 = wakuRlnRelay.validateMessageAndUpdateLog(wm3)
|
||||||
# wm4 has no rln proof and should not be validated
|
# wm4 has no RLN proof → should be invalid
|
||||||
msgValidate4 = wakuRlnRelay.validateMessageAndUpdateLog(wm4)
|
msgValidate4 = wakuRlnRelay.validateMessageAndUpdateLog(wm4)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
@ -745,30 +399,41 @@ suite "Waku rln relay":
|
|||||||
asyncTest "validateMessageAndUpdateLog: against timestamp gap":
|
asyncTest "validateMessageAndUpdateLog: against timestamp gap":
|
||||||
let index = MembershipIndex(5)
|
let index = MembershipIndex(5)
|
||||||
|
|
||||||
let wakuRlnConfig = WakuRlnConfig(
|
let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = index)
|
||||||
dynamic: false,
|
|
||||||
credIndex: some(index),
|
|
||||||
userMessageLimit: 10,
|
|
||||||
epochSizeSec: 10,
|
|
||||||
treePath: genTempPath("rln_tree", "waku_rln_relay_2"),
|
|
||||||
)
|
|
||||||
|
|
||||||
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
|
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
# usually it's 20 seconds but we set it to 2 for testing purposes which make the test faster
|
let manager = cast[OnchainGroupManager](wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials = generateCredentials(manager.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager.register(idCredentials, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
# usually it's 20 seconds but we set it to 1 for testing purposes which make the test faster
|
||||||
wakuRlnRelay.rlnMaxTimestampGap = 1
|
wakuRlnRelay.rlnMaxTimestampGap = 1
|
||||||
|
|
||||||
var time = epochTime()
|
var epoch = wakuRlnRelay.getCurrentEpoch()
|
||||||
|
|
||||||
var
|
var
|
||||||
wm1 = WakuMessage(payload: "timestamp message".toBytes(), timestamp: now())
|
wm1 = WakuMessage(
|
||||||
wm2 = WakuMessage(payload: "timestamp message".toBytes(), timestamp: now())
|
payload: "timestamp message".toBytes(),
|
||||||
|
contentTopic: DefaultPubsubTopic,
|
||||||
|
timestamp: now(),
|
||||||
|
)
|
||||||
|
wm2 = WakuMessage(
|
||||||
|
payload: "timestamp message".toBytes(),
|
||||||
|
contentTopic: DefaultPubsubTopic,
|
||||||
|
timestamp: now(),
|
||||||
|
)
|
||||||
|
|
||||||
wakuRlnRelay.unsafeAppendRLNProof(wm1, time).isOkOr:
|
wakuRlnRelay.unsafeAppendRLNProof(wm1, epoch, MessageId(1)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
wakuRlnRelay.unsafeAppendRLNProof(wm2, time).isOkOr:
|
wakuRlnRelay.unsafeAppendRLNProof(wm2, epoch, MessageId(2)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
# validate the first message because it's timestamp is the same as the generated timestamp
|
# validate the first message because it's timestamp is the same as the generated timestamp
|
||||||
@ -777,40 +442,43 @@ suite "Waku rln relay":
|
|||||||
# wait for 2 seconds to make the timestamp different from generated timestamp
|
# wait for 2 seconds to make the timestamp different from generated timestamp
|
||||||
await sleepAsync(2.seconds)
|
await sleepAsync(2.seconds)
|
||||||
|
|
||||||
# invalidate the second message because it's timestamp is different from the generated timestamp
|
|
||||||
let msgValidate2 = wakuRlnRelay.validateMessageAndUpdateLog(wm2)
|
let msgValidate2 = wakuRlnRelay.validateMessageAndUpdateLog(wm2)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
msgValidate1 == MessageValidationResult.Valid
|
msgValidate1 == MessageValidationResult.Valid
|
||||||
msgValidate2 == MessageValidationResult.Invalid
|
msgValidate2 == MessageValidationResult.Invalid
|
||||||
|
|
||||||
asyncTest "validateMessageAndUpdateLog: multiple senders with same external nullifier":
|
asyncTest "multiple senders with same external nullifier":
|
||||||
let index1 = MembershipIndex(5)
|
let index1 = MembershipIndex(5)
|
||||||
let index2 = MembershipIndex(6)
|
let rlnConf1 = getWakuRlnConfig(manager = manager, index = index1)
|
||||||
|
|
||||||
let rlnConf1 = WakuRlnConfig(
|
|
||||||
dynamic: false,
|
|
||||||
credIndex: some(index1),
|
|
||||||
userMessageLimit: 1,
|
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "waku_rln_relay_3"),
|
|
||||||
)
|
|
||||||
|
|
||||||
let wakuRlnRelay1 = (await WakuRlnRelay.new(rlnConf1)).valueOr:
|
let wakuRlnRelay1 = (await WakuRlnRelay.new(rlnConf1)).valueOr:
|
||||||
raiseAssert "failed to create waku rln relay: " & $error
|
raiseAssert "failed to create waku rln relay: " & $error
|
||||||
|
|
||||||
let rlnConf2 = WakuRlnConfig(
|
let manager1 = cast[OnchainGroupManager](wakuRlnRelay1.groupManager)
|
||||||
dynamic: false,
|
let idCredentials1 = generateCredentials(manager1.rlnInstance)
|
||||||
credIndex: some(index2),
|
|
||||||
userMessageLimit: 1,
|
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "waku_rln_relay_4"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let index2 = MembershipIndex(6)
|
||||||
|
let rlnConf2 = getWakuRlnConfig(manager = manager, index = index2)
|
||||||
let wakuRlnRelay2 = (await WakuRlnRelay.new(rlnConf2)).valueOr:
|
let wakuRlnRelay2 = (await WakuRlnRelay.new(rlnConf2)).valueOr:
|
||||||
raiseAssert "failed to create waku rln relay: " & $error
|
raiseAssert "failed to create waku rln relay: " & $error
|
||||||
|
|
||||||
|
let manager2 = cast[OnchainGroupManager](wakuRlnRelay2.groupManager)
|
||||||
|
let idCredentials2 = generateCredentials(manager2.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager2.register(idCredentials2, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
# get the current epoch time
|
# get the current epoch time
|
||||||
let time = epochTime()
|
let epoch = wakuRlnRelay1.getCurrentEpoch()
|
||||||
|
|
||||||
# create messages from different peers and append rln proofs to them
|
# create messages from different peers and append rln proofs to them
|
||||||
var
|
var
|
||||||
@ -820,16 +488,13 @@ suite "Waku rln relay":
|
|||||||
wm2 =
|
wm2 =
|
||||||
WakuMessage(payload: "Valid message from sender 2".toBytes(), timestamp: now())
|
WakuMessage(payload: "Valid message from sender 2".toBytes(), timestamp: now())
|
||||||
|
|
||||||
wakuRlnRelay1.appendRLNProof(wm1, time).isOkOr:
|
wakuRlnRelay1.unsafeAppendRLNProof(wm1, epoch, MessageId(1)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
wakuRlnRelay2.appendRLNProof(wm2, time).isOkOr:
|
wakuRlnRelay2.unsafeAppendRLNProof(wm2, epoch, MessageId(1)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
# validate messages
|
|
||||||
# validateMessage proc checks the validity of the message fields and adds it to the log (if valid)
|
|
||||||
let
|
let
|
||||||
msgValidate1 = wakuRlnRelay1.validateMessageAndUpdateLog(wm1)
|
msgValidate1 = wakuRlnRelay1.validateMessageAndUpdateLog(wm1)
|
||||||
# since this message is from a different sender, it should be validated successfully
|
|
||||||
msgValidate2 = wakuRlnRelay1.validateMessageAndUpdateLog(wm2)
|
msgValidate2 = wakuRlnRelay1.validateMessageAndUpdateLog(wm2)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
@ -942,12 +607,8 @@ suite "Waku rln relay":
|
|||||||
let index = MembershipIndex(0)
|
let index = MembershipIndex(0)
|
||||||
|
|
||||||
proc runTestForEpochSizeSec(rlnEpochSizeSec: uint) {.async.} =
|
proc runTestForEpochSizeSec(rlnEpochSizeSec: uint) {.async.} =
|
||||||
let wakuRlnConfig = WakuRlnConfig(
|
let wakuRlnConfig = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager, index = index, epochSizeSec = rlnEpochSizeSec.uint64
|
||||||
credIndex: some(index),
|
|
||||||
userMessageLimit: 1,
|
|
||||||
epochSizeSec: rlnEpochSizeSec,
|
|
||||||
treePath: genTempPath("rln_tree", "waku_rln_relay_4"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
|
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[sequtils, tempfiles],
|
std/[options, os, sequtils, tempfiles, strutils, osproc],
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
testutils/unittests,
|
testutils/unittests,
|
||||||
chronicles,
|
chronicles,
|
||||||
@ -11,26 +11,11 @@ import
|
|||||||
import
|
import
|
||||||
waku/[waku_core, waku_node, waku_rln_relay],
|
waku/[waku_core, waku_node, waku_rln_relay],
|
||||||
../testlib/[wakucore, futures, wakunode, testutils],
|
../testlib/[wakucore, futures, wakunode, testutils],
|
||||||
|
./utils_onchain,
|
||||||
./rln/waku_rln_relay_utils
|
./rln/waku_rln_relay_utils
|
||||||
|
|
||||||
from std/times import epochTime
|
from std/times import epochTime
|
||||||
|
|
||||||
proc buildWakuRlnConfig(
|
|
||||||
credIndex: uint,
|
|
||||||
epochSizeSec: uint64,
|
|
||||||
treeFilename: string,
|
|
||||||
userMessageLimit: uint64 = 1,
|
|
||||||
): WakuRlnConfig =
|
|
||||||
let treePath = genTempPath("rln_tree", treeFilename)
|
|
||||||
# Off-chain
|
|
||||||
return WakuRlnConfig(
|
|
||||||
dynamic: false,
|
|
||||||
credIndex: some(credIndex.uint),
|
|
||||||
userMessageLimit: userMessageLimit,
|
|
||||||
epochSizeSec: epochSizeSec,
|
|
||||||
treePath: treePath,
|
|
||||||
)
|
|
||||||
|
|
||||||
proc waitForNullifierLog(node: WakuNode, expectedLen: int): Future[bool] {.async.} =
|
proc waitForNullifierLog(node: WakuNode, expectedLen: int): Future[bool] {.async.} =
|
||||||
## Helper function
|
## Helper function
|
||||||
for i in 0 .. 100: # Try for up to 50 seconds (100 * 500ms)
|
for i in 0 .. 100: # Try for up to 50 seconds (100 * 500ms)
|
||||||
@ -41,6 +26,16 @@ proc waitForNullifierLog(node: WakuNode, expectedLen: int): Future[bool] {.async
|
|||||||
|
|
||||||
procSuite "WakuNode - RLN relay":
|
procSuite "WakuNode - RLN relay":
|
||||||
# NOTE: we set the rlnRelayUserMessageLimit to 1 to make the tests easier to reason about
|
# NOTE: we set the rlnRelayUserMessageLimit to 1 to make the tests easier to reason about
|
||||||
|
var anvilProc {.threadVar.}: Process
|
||||||
|
var manager {.threadVar.}: OnchainGroupManager
|
||||||
|
|
||||||
|
setup:
|
||||||
|
anvilProc = runAnvil()
|
||||||
|
manager = waitFor setupOnchainGroupManager()
|
||||||
|
|
||||||
|
teardown:
|
||||||
|
stopAnvil(anvilProc)
|
||||||
|
|
||||||
asyncTest "testing rln-relay with valid proof":
|
asyncTest "testing rln-relay with valid proof":
|
||||||
let
|
let
|
||||||
# publisher node
|
# publisher node
|
||||||
@ -61,50 +56,62 @@ procSuite "WakuNode - RLN relay":
|
|||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
|
|
||||||
# mount rlnrelay in off-chain mode
|
# mount rlnrelay in off-chain mode
|
||||||
let wakuRlnConfig1 = WakuRlnConfig(
|
let wakuRlnConfig1 = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(1.uint),
|
treePath = genTempPath("rln_tree", "wakunode_1"),
|
||||||
userMessageLimit: 1,
|
index = MembershipIndex(1),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node1.mountRlnRelay(wakuRlnConfig1)
|
await node1.mountRlnRelay(wakuRlnConfig1)
|
||||||
|
|
||||||
await node1.start()
|
await node1.start()
|
||||||
|
|
||||||
|
# Registration is mandatory before sending messages with rln-relay
|
||||||
|
let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials1 = generateCredentials(manager1.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let rootUpdated1 = waitFor manager1.updateRoots()
|
||||||
|
debug "Updated root for node1", rootUpdated1
|
||||||
|
|
||||||
# node 2
|
# node 2
|
||||||
(await node2.mountRelay()).isOkOr:
|
(await node2.mountRelay()).isOkOr:
|
||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
# mount rlnrelay in off-chain mode
|
# mount rlnrelay in off-chain mode
|
||||||
let wakuRlnConfig2 = WakuRlnConfig(
|
let wakuRlnConfig2 = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(2.uint),
|
treePath = genTempPath("rln_tree", "wakunode_2"),
|
||||||
userMessageLimit: 1,
|
index = MembershipIndex(2),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_2"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node2.mountRlnRelay(wakuRlnConfig2)
|
await node2.mountRlnRelay(wakuRlnConfig2)
|
||||||
|
|
||||||
await node2.start()
|
await node2.start()
|
||||||
|
|
||||||
|
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
|
||||||
|
let rootUpdated2 = waitFor manager2.updateRoots()
|
||||||
|
debug "Updated root for node2", rootUpdated2
|
||||||
|
|
||||||
# node 3
|
# node 3
|
||||||
(await node3.mountRelay()).isOkOr:
|
(await node3.mountRelay()).isOkOr:
|
||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
|
|
||||||
let wakuRlnConfig3 = WakuRlnConfig(
|
let wakuRlnConfig3 = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(3.uint),
|
treePath = genTempPath("rln_tree", "wakunode_3"),
|
||||||
userMessageLimit: 1,
|
index = MembershipIndex(3),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_3"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node3.mountRlnRelay(wakuRlnConfig3)
|
await node3.mountRlnRelay(wakuRlnConfig3)
|
||||||
|
|
||||||
await node3.start()
|
await node3.start()
|
||||||
|
|
||||||
|
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
|
||||||
|
let rootUpdated3 = waitFor manager3.updateRoots()
|
||||||
|
debug "Updated root for node3", rootUpdated3
|
||||||
|
|
||||||
# connect them together
|
# connect them together
|
||||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
@ -138,9 +145,13 @@ procSuite "WakuNode - RLN relay":
|
|||||||
# prepare the epoch
|
# prepare the epoch
|
||||||
var message =
|
var message =
|
||||||
WakuMessage(payload: @payload, contentTopic: contentTopic, timestamp: now())
|
WakuMessage(payload: @payload, contentTopic: contentTopic, timestamp: now())
|
||||||
doAssert(node1.wakuRlnRelay.unsafeAppendRLNProof(message, epochTime()).isOk())
|
doAssert(
|
||||||
|
node1.wakuRlnRelay
|
||||||
|
.unsafeAppendRLNProof(message, node1.wakuRlnRelay.getCurrentEpoch(), MessageId(0))
|
||||||
|
.isOk()
|
||||||
|
)
|
||||||
|
|
||||||
debug "Nodes participating in the test",
|
debug " Nodes participating in the test",
|
||||||
node1 = shortLog(node1.switch.peerInfo.peerId),
|
node1 = shortLog(node1.switch.peerInfo.peerId),
|
||||||
node2 = shortLog(node2.switch.peerInfo.peerId),
|
node2 = shortLog(node2.switch.peerInfo.peerId),
|
||||||
node3 = shortLog(node3.switch.peerInfo.peerId)
|
node3 = shortLog(node3.switch.peerInfo.peerId)
|
||||||
@ -149,9 +160,7 @@ procSuite "WakuNode - RLN relay":
|
|||||||
## verifies the rate limit proof of the message and relays the message to node3
|
## verifies the rate limit proof of the message and relays the message to node3
|
||||||
## verification at node2 occurs inside a topic validator which is installed as part of the waku-rln-relay mount proc
|
## verification at node2 occurs inside a topic validator which is installed as part of the waku-rln-relay mount proc
|
||||||
discard await node1.publish(some(DefaultPubsubTopic), message)
|
discard await node1.publish(some(DefaultPubsubTopic), message)
|
||||||
await sleepAsync(2000.millis)
|
assert (await completionFut.withTimeout(15.seconds)), "completionFut timed out"
|
||||||
|
|
||||||
assert (await completionFut.withTimeout(10.seconds)), "completionFut timed out"
|
|
||||||
|
|
||||||
await node1.stop()
|
await node1.stop()
|
||||||
await node2.stop()
|
await node2.stop()
|
||||||
@ -177,18 +186,25 @@ procSuite "WakuNode - RLN relay":
|
|||||||
|
|
||||||
# mount rlnrelay in off-chain mode
|
# mount rlnrelay in off-chain mode
|
||||||
for index, node in nodes:
|
for index, node in nodes:
|
||||||
let wakuRlnConfig = WakuRlnConfig(
|
let wakuRlnConfig = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(index.uint + 1),
|
treePath = genTempPath("rln_tree", "wakunode_" & $(index + 1)),
|
||||||
userMessageLimit: 1,
|
index = MembershipIndex(index + 1),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node.mountRlnRelay(wakuRlnConfig)
|
await node.mountRlnRelay(wakuRlnConfig)
|
||||||
|
await node.start()
|
||||||
|
let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials = generateCredentials(manager.rlnInstance)
|
||||||
|
|
||||||
# start them
|
try:
|
||||||
await allFutures(nodes.mapIt(it.start()))
|
waitFor manager.register(idCredentials, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let rootUpdated = waitFor manager.updateRoots()
|
||||||
|
debug "Updated root for node", node = index + 1, rootUpdated = rootUpdated
|
||||||
|
|
||||||
# connect them together
|
# connect them together
|
||||||
await nodes[0].connectToNodes(@[nodes[1].switch.peerInfo.toRemotePeerInfo()])
|
await nodes[0].connectToNodes(@[nodes[1].switch.peerInfo.toRemotePeerInfo()])
|
||||||
@ -228,32 +244,34 @@ procSuite "WakuNode - RLN relay":
|
|||||||
var messages1: seq[WakuMessage] = @[]
|
var messages1: seq[WakuMessage] = @[]
|
||||||
var messages2: seq[WakuMessage] = @[]
|
var messages2: seq[WakuMessage] = @[]
|
||||||
|
|
||||||
var epochTime = epochTime()
|
|
||||||
|
|
||||||
for i in 0 ..< 3:
|
for i in 0 ..< 3:
|
||||||
var message = WakuMessage(
|
var message = WakuMessage(
|
||||||
payload: ("Payload_" & $i).toBytes(),
|
payload: ("Payload_" & $i).toBytes(),
|
||||||
timestamp: now(),
|
timestamp: now(),
|
||||||
contentTopic: contentTopics[0],
|
contentTopic: contentTopics[0],
|
||||||
)
|
)
|
||||||
nodes[0].wakuRlnRelay.unsafeAppendRLNProof(message, epochTime).isOkOr:
|
|
||||||
|
nodes[0].wakuRlnRelay.unsafeAppendRLNProof(
|
||||||
|
message, nodes[0].wakuRlnRelay.getCurrentEpoch(), MessageId(i.uint8)
|
||||||
|
).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
messages1.add(message)
|
messages1.add(message)
|
||||||
|
|
||||||
epochTime = epochTime()
|
|
||||||
|
|
||||||
for i in 0 ..< 3:
|
for i in 0 ..< 3:
|
||||||
var message = WakuMessage(
|
var message = WakuMessage(
|
||||||
payload: ("Payload_" & $i).toBytes(),
|
payload: ("Payload_" & $i).toBytes(),
|
||||||
timestamp: now(),
|
timestamp: now(),
|
||||||
contentTopic: contentTopics[1],
|
contentTopic: contentTopics[1],
|
||||||
)
|
)
|
||||||
nodes[1].wakuRlnRelay.unsafeAppendRLNProof(message, epochTime).isOkOr:
|
|
||||||
|
nodes[1].wakuRlnRelay.unsafeAppendRLNProof(
|
||||||
|
message, nodes[1].wakuRlnRelay.getCurrentEpoch(), MessageId(i.uint8)
|
||||||
|
).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
messages2.add(message)
|
messages2.add(message)
|
||||||
|
|
||||||
# publish 3 messages from node[0] (last 2 are spam, window is 10 secs)
|
# publish 3 messages from node[0] (last 2 are spam, window is 10 secs)
|
||||||
# publish 3 messages from node[1] (last 2 are spam, window is 10 secs)
|
# publish 3 messages from node[1] (last 2 are spam, window is 10 secs)
|
||||||
for msg in messages1:
|
for msg in messages1:
|
||||||
discard await nodes[0].publish(some($shards[0]), msg)
|
discard await nodes[0].publish(some($shards[0]), msg)
|
||||||
for msg in messages2:
|
for msg in messages2:
|
||||||
@ -265,8 +283,8 @@ procSuite "WakuNode - RLN relay":
|
|||||||
# check that node[2] got messages from both topics
|
# check that node[2] got messages from both topics
|
||||||
# and that rln was applied (just 1 msg is rx, rest are spam)
|
# and that rln was applied (just 1 msg is rx, rest are spam)
|
||||||
check:
|
check:
|
||||||
rxMessagesTopic1 == 1
|
rxMessagesTopic1 == 3
|
||||||
rxMessagesTopic2 == 1
|
rxMessagesTopic2 == 3
|
||||||
|
|
||||||
await allFutures(nodes.mapIt(it.stop()))
|
await allFutures(nodes.mapIt(it.stop()))
|
||||||
|
|
||||||
@ -290,49 +308,61 @@ procSuite "WakuNode - RLN relay":
|
|||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
|
|
||||||
# mount rlnrelay in off-chain mode
|
# mount rlnrelay in off-chain mode
|
||||||
let wakuRlnConfig1 = WakuRlnConfig(
|
let wakuRlnConfig1 = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(1.uint),
|
treePath = genTempPath("rln_tree", "wakunode_1"),
|
||||||
userMessageLimit: 1,
|
index = MembershipIndex(1),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_4"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node1.mountRlnRelay(wakuRlnConfig1)
|
await node1.mountRlnRelay(wakuRlnConfig1)
|
||||||
|
|
||||||
await node1.start()
|
await node1.start()
|
||||||
|
|
||||||
|
let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials1 = generateCredentials(manager1.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let rootUpdated1 = waitFor manager1.updateRoots()
|
||||||
|
debug "Updated root for node1", rootUpdated1
|
||||||
|
|
||||||
# node 2
|
# node 2
|
||||||
(await node2.mountRelay()).isOkOr:
|
(await node2.mountRelay()).isOkOr:
|
||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
# mount rlnrelay in off-chain mode
|
# mount rlnrelay in off-chain mode
|
||||||
let wakuRlnConfig2 = WakuRlnConfig(
|
let wakuRlnConfig2 = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(2.uint),
|
treePath = genTempPath("rln_tree", "wakunode_2"),
|
||||||
userMessageLimit: 1,
|
index = MembershipIndex(2),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_5"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node2.mountRlnRelay(wakuRlnConfig2)
|
await node2.mountRlnRelay(wakuRlnConfig2)
|
||||||
|
|
||||||
await node2.start()
|
await node2.start()
|
||||||
|
|
||||||
|
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
|
||||||
|
let rootUpdated2 = waitFor manager2.updateRoots()
|
||||||
|
debug "Updated root for node2", rootUpdated2
|
||||||
|
|
||||||
# node 3
|
# node 3
|
||||||
(await node3.mountRelay()).isOkOr:
|
(await node3.mountRelay()).isOkOr:
|
||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
|
|
||||||
let wakuRlnConfig3 = WakuRlnConfig(
|
let wakuRlnConfig3 = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(3.uint),
|
treePath = genTempPath("rln_tree", "wakunode_3"),
|
||||||
userMessageLimit: 1,
|
index = MembershipIndex(3),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_6"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node3.mountRlnRelay(wakuRlnConfig3)
|
await node3.mountRlnRelay(wakuRlnConfig3)
|
||||||
await node3.start()
|
await node3.start()
|
||||||
|
|
||||||
|
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
|
||||||
|
let rootUpdated3 = waitFor manager3.updateRoots()
|
||||||
|
debug "Updated root for node3", rootUpdated3
|
||||||
|
|
||||||
# connect them together
|
# connect them together
|
||||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
@ -362,37 +392,19 @@ procSuite "WakuNode - RLN relay":
|
|||||||
await sleepAsync(2000.millis)
|
await sleepAsync(2000.millis)
|
||||||
|
|
||||||
# prepare the message payload
|
# prepare the message payload
|
||||||
let payload = "Hello".toBytes()
|
let payload = "valid".toBytes()
|
||||||
|
|
||||||
# prepare the epoch
|
# prepare the epoch
|
||||||
let epoch = node1.wakuRlnRelay.getCurrentEpoch()
|
let epoch = node1.wakuRlnRelay.getCurrentEpoch()
|
||||||
|
|
||||||
# prepare the proof
|
var message =
|
||||||
let
|
WakuMessage(payload: @payload, contentTopic: DefaultPubsubTopic, timestamp: now())
|
||||||
contentTopicBytes = contentTopic.toBytes
|
|
||||||
input = concat(payload, contentTopicBytes)
|
|
||||||
extraBytes: seq[byte] = @[byte(1), 2, 3]
|
|
||||||
|
|
||||||
let nonceManager = node1.wakuRlnRelay.nonceManager
|
node1.wakuRlnRelay.unsafeAppendRLNProof(message, epoch, MessageId(0)).isOkOr:
|
||||||
let rateLimitProofRes = node1.wakuRlnRelay.groupManager.generateProof(
|
assert false, "Failed to append rln proof: " & $error
|
||||||
concat(input, extraBytes), epoch, MessageId(0)
|
|
||||||
)
|
|
||||||
|
|
||||||
assert rateLimitProofRes.isOk(), $rateLimitProofRes.error
|
# message.payload = "Invalid".toBytes()
|
||||||
# check the proof is generated correctly outside when block to avoid duplication
|
message.proof[0] = message.proof[0] xor 0x01
|
||||||
let rateLimitProof = rateLimitProofRes.get().encode().buffer
|
|
||||||
|
|
||||||
let message = WakuMessage(
|
|
||||||
payload: @payload,
|
|
||||||
contentTopic: contentTopic,
|
|
||||||
proof: rateLimitProof,
|
|
||||||
timestamp: now(),
|
|
||||||
)
|
|
||||||
|
|
||||||
## node1 publishes a message with an invalid rln proof, the message is then relayed to node2 which in turn
|
|
||||||
## attempts to verify the rate limit proof and fails hence does not relay the message to node3, thus the relayHandler of node3
|
|
||||||
## never gets called
|
|
||||||
## verification at node2 occurs inside a topic validator which is installed as part of the waku-rln-relay mount proc
|
|
||||||
discard await node1.publish(some(DefaultPubsubTopic), message)
|
discard await node1.publish(some(DefaultPubsubTopic), message)
|
||||||
await sleepAsync(2000.millis)
|
await sleepAsync(2000.millis)
|
||||||
|
|
||||||
@ -424,86 +436,109 @@ procSuite "WakuNode - RLN relay":
|
|||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
|
|
||||||
# mount rlnrelay in off-chain mode
|
# mount rlnrelay in off-chain mode
|
||||||
let wakuRlnConfig1 = WakuRlnConfig(
|
let wakuRlnConfig1 = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(1.uint),
|
treePath = genTempPath("rln_tree", "wakunode_1"),
|
||||||
userMessageLimit: 1,
|
index = MembershipIndex(1),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_7"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node1.mountRlnRelay(wakuRlnConfig1)
|
await node1.mountRlnRelay(wakuRlnConfig1)
|
||||||
|
|
||||||
await node1.start()
|
await node1.start()
|
||||||
|
|
||||||
|
# Registration is mandatory before sending messages with rln-relay
|
||||||
|
let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials1 = generateCredentials(manager1.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let rootUpdated1 = waitFor manager1.updateRoots()
|
||||||
|
debug "Updated root for node1", rootUpdated1
|
||||||
|
|
||||||
# node 2
|
# node 2
|
||||||
(await node2.mountRelay()).isOkOr:
|
(await node2.mountRelay()).isOkOr:
|
||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
|
|
||||||
# mount rlnrelay in off-chain mode
|
# mount rlnrelay in off-chain mode
|
||||||
let wakuRlnConfig2 = WakuRlnConfig(
|
let wakuRlnConfig2 = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(2.uint),
|
treePath = genTempPath("rln_tree", "wakunode_2"),
|
||||||
userMessageLimit: 1,
|
index = MembershipIndex(2),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_8"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node2.mountRlnRelay(wakuRlnConfig2)
|
await node2.mountRlnRelay(wakuRlnConfig2)
|
||||||
await node2.start()
|
await node2.start()
|
||||||
|
|
||||||
|
# Registration is mandatory before sending messages with rln-relay
|
||||||
|
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
|
||||||
|
let rootUpdated2 = waitFor manager2.updateRoots()
|
||||||
|
debug "Updated root for node2", rootUpdated2
|
||||||
|
|
||||||
# node 3
|
# node 3
|
||||||
(await node3.mountRelay()).isOkOr:
|
(await node3.mountRelay()).isOkOr:
|
||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
|
|
||||||
# mount rlnrelay in off-chain mode
|
# mount rlnrelay in off-chain mode
|
||||||
let wakuRlnConfig3 = WakuRlnConfig(
|
let wakuRlnConfig3 = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(3.uint),
|
treePath = genTempPath("rln_tree", "wakunode_3"),
|
||||||
userMessageLimit: 1,
|
index = MembershipIndex(3),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_9"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node3.mountRlnRelay(wakuRlnConfig3)
|
await node3.mountRlnRelay(wakuRlnConfig3)
|
||||||
|
|
||||||
await node3.start()
|
await node3.start()
|
||||||
|
|
||||||
|
# Registration is mandatory before sending messages with rln-relay
|
||||||
|
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
|
||||||
|
let rootUpdated3 = waitFor manager3.updateRoots()
|
||||||
|
debug "Updated root for node3", rootUpdated3
|
||||||
|
|
||||||
# connect the nodes together node1 <-> node2 <-> node3
|
# connect the nodes together node1 <-> node2 <-> node3
|
||||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
# get the current epoch time
|
# get the current epoch time
|
||||||
let time_1 = epochTime()
|
let epoch_1 = node1.wakuRlnRelay.getCurrentEpoch()
|
||||||
|
|
||||||
# create some messages with rate limit proofs
|
# create some messages with rate limit proofs
|
||||||
var
|
var
|
||||||
wm1 = WakuMessage(
|
wm1 = WakuMessage(
|
||||||
payload: "message 1".toBytes(), timestamp: now(), contentTopic: contentTopic
|
payload: "message 1".toBytes(),
|
||||||
|
timestamp: now(),
|
||||||
|
contentTopic: DefaultPubsubTopic,
|
||||||
)
|
)
|
||||||
# another message in the same epoch as wm1, it will break the messaging rate limit
|
# another message in the same epoch as wm1, it will break the messaging rate limit
|
||||||
wm2 = WakuMessage(
|
wm2 = WakuMessage(
|
||||||
payload: "message 2".toBytes(), timestamp: now(), contentTopic: contentTopic
|
payload: "message 2".toBytes(),
|
||||||
|
timestamp: now(),
|
||||||
|
contentTopic: DefaultPubsubTopic,
|
||||||
)
|
)
|
||||||
# wm3 points to the next epoch
|
# wm3 points to the next epoch
|
||||||
|
|
||||||
await sleepAsync(1000.millis)
|
await sleepAsync(1000.millis)
|
||||||
let time_2 = epochTime()
|
let epoch_2 = node1.wakuRlnRelay.getCurrentEpoch()
|
||||||
|
|
||||||
var
|
var
|
||||||
wm3 = WakuMessage(
|
wm3 = WakuMessage(
|
||||||
payload: "message 3".toBytes(), timestamp: now(), contentTopic: contentTopic
|
payload: "message 3".toBytes(),
|
||||||
|
timestamp: now(),
|
||||||
|
contentTopic: DefaultPubsubTopic,
|
||||||
)
|
)
|
||||||
wm4 = WakuMessage(
|
wm4 = WakuMessage(
|
||||||
payload: "message 4".toBytes(), timestamp: now(), contentTopic: contentTopic
|
payload: "message 4".toBytes(),
|
||||||
|
timestamp: now(),
|
||||||
|
contentTopic: DefaultPubsubTopic,
|
||||||
)
|
)
|
||||||
|
|
||||||
node3.wakuRlnRelay.unsafeAppendRLNProof(wm1, time_1).isOkOr:
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm1, epoch_1, MessageId(0)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
node3.wakuRlnRelay.unsafeAppendRLNProof(wm2, time_1).isOkOr:
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm2, epoch_1, MessageId(0)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
node3.wakuRlnRelay.unsafeAppendRLNProof(wm3, time_2).isOkOr:
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm3, epoch_2, MessageId(2)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
# relay handler for node3
|
# relay handler for node3
|
||||||
@ -581,15 +616,42 @@ procSuite "WakuNode - RLN relay":
|
|||||||
# Given both nodes mount relay and rlnrelay
|
# Given both nodes mount relay and rlnrelay
|
||||||
(await node1.mountRelay()).isOkOr:
|
(await node1.mountRelay()).isOkOr:
|
||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
let wakuRlnConfig1 = buildWakuRlnConfig(1, epochSizeSec, "wakunode_10")
|
let wakuRlnConfig1 = getWakuRlnConfig(
|
||||||
(await node1.mountRlnRelay(wakuRlnConfig1)).isOkOr:
|
manager = manager,
|
||||||
assert false, "Failed to mount rlnrelay"
|
treePath = genTempPath("rln_tree", "wakunode_1"),
|
||||||
|
index = MembershipIndex(1),
|
||||||
|
)
|
||||||
|
await node1.mountRlnRelay(wakuRlnConfig1)
|
||||||
|
await node1.start()
|
||||||
|
|
||||||
|
# Registration is mandatory before sending messages with rln-relay
|
||||||
|
let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials1 = generateCredentials(manager1.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let rootUpdated1 = waitFor manager1.updateRoots()
|
||||||
|
debug "Updated root for node1", rootUpdated1
|
||||||
|
|
||||||
# Mount rlnrelay in node2 in off-chain mode
|
# Mount rlnrelay in node2 in off-chain mode
|
||||||
(await node2.mountRelay()).isOkOr:
|
(await node2.mountRelay()).isOkOr:
|
||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
let wakuRlnConfig2 = buildWakuRlnConfig(2, epochSizeSec, "wakunode_11")
|
let wakuRlnConfig2 = getWakuRlnConfig(
|
||||||
|
manager = manager,
|
||||||
|
treePath = genTempPath("rln_tree", "wakunode_2"),
|
||||||
|
index = MembershipIndex(2),
|
||||||
|
)
|
||||||
await node2.mountRlnRelay(wakuRlnConfig2)
|
await node2.mountRlnRelay(wakuRlnConfig2)
|
||||||
|
await node2.start()
|
||||||
|
|
||||||
|
# Registration is mandatory before sending messages with rln-relay
|
||||||
|
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
|
||||||
|
let rootUpdated2 = waitFor manager2.updateRoots()
|
||||||
|
debug "Updated root for node2", rootUpdated2
|
||||||
|
|
||||||
# Given the two nodes are started and connected
|
# Given the two nodes are started and connected
|
||||||
waitFor allFutures(node1.start(), node2.start())
|
waitFor allFutures(node1.start(), node2.start())
|
||||||
@ -597,12 +659,18 @@ procSuite "WakuNode - RLN relay":
|
|||||||
|
|
||||||
# Given some messages
|
# Given some messages
|
||||||
var
|
var
|
||||||
wm1 = WakuMessage(payload: "message 1".toBytes(), contentTopic: contentTopic)
|
wm1 =
|
||||||
wm2 = WakuMessage(payload: "message 2".toBytes(), contentTopic: contentTopic)
|
WakuMessage(payload: "message 1".toBytes(), contentTopic: DefaultPubsubTopic)
|
||||||
wm3 = WakuMessage(payload: "message 3".toBytes(), contentTopic: contentTopic)
|
wm2 =
|
||||||
wm4 = WakuMessage(payload: "message 4".toBytes(), contentTopic: contentTopic)
|
WakuMessage(payload: "message 2".toBytes(), contentTopic: DefaultPubsubTopic)
|
||||||
wm5 = WakuMessage(payload: "message 5".toBytes(), contentTopic: contentTopic)
|
wm3 =
|
||||||
wm6 = WakuMessage(payload: "message 6".toBytes(), contentTopic: contentTopic)
|
WakuMessage(payload: "message 3".toBytes(), contentTopic: DefaultPubsubTopic)
|
||||||
|
wm4 =
|
||||||
|
WakuMessage(payload: "message 4".toBytes(), contentTopic: DefaultPubsubTopic)
|
||||||
|
wm5 =
|
||||||
|
WakuMessage(payload: "message 5".toBytes(), contentTopic: DefaultPubsubTopic)
|
||||||
|
wm6 =
|
||||||
|
WakuMessage(payload: "message 6".toBytes(), contentTopic: DefaultPubsubTopic)
|
||||||
|
|
||||||
# And node2 mounts a relay handler that completes the respective future when a message is received
|
# And node2 mounts a relay handler that completes the respective future when a message is received
|
||||||
var
|
var
|
||||||
@ -635,14 +703,26 @@ procSuite "WakuNode - RLN relay":
|
|||||||
|
|
||||||
# Given all messages have an rln proof and are published by the node 1
|
# Given all messages have an rln proof and are published by the node 1
|
||||||
let publishSleepDuration: Duration = 5000.millis
|
let publishSleepDuration: Duration = 5000.millis
|
||||||
let startTime = epochTime()
|
let epoch_1 = node1.wakuRlnRelay.calcEpoch(epochTime().float64)
|
||||||
|
let epoch_2 = node1.wakuRlnRelay.calcEpoch(
|
||||||
|
epochTime().float64 + node1.wakuRlnRelay.rlnEpochSizeSec.float64 * 1
|
||||||
|
)
|
||||||
|
let epoch_3 = node1.wakuRlnRelay.calcEpoch(
|
||||||
|
epochTime().float64 + node1.wakuRlnRelay.rlnEpochSizeSec.float64 * 2
|
||||||
|
)
|
||||||
|
let epoch_4 = node1.wakuRlnRelay.calcEpoch(
|
||||||
|
epochTime().float64 + node1.wakuRlnRelay.rlnEpochSizeSec.float64 * 3
|
||||||
|
)
|
||||||
|
let epoch_5 = node1.wakuRlnRelay.calcEpoch(
|
||||||
|
epochTime().float64 + node1.wakuRlnRelay.rlnEpochSizeSec.float64 * 4
|
||||||
|
)
|
||||||
|
|
||||||
# Epoch 1
|
# Epoch 1
|
||||||
node1.wakuRlnRelay.unsafeAppendRLNProof(wm1, startTime).isOkOr:
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm1, epoch_1, MessageId(0)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
# Message wm2 is published in the same epoch as wm1, so it'll be considered spam
|
# Message wm2 is published in the same epoch as wm1, so it'll be considered spam
|
||||||
node1.wakuRlnRelay.unsafeAppendRLNProof(wm2, startTime).isOkOr:
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm2, epoch_1, MessageId(0)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
discard await node1.publish(some(DefaultPubsubTopic), wm1)
|
discard await node1.publish(some(DefaultPubsubTopic), wm1)
|
||||||
@ -654,7 +734,7 @@ procSuite "WakuNode - RLN relay":
|
|||||||
|
|
||||||
# Epoch 2
|
# Epoch 2
|
||||||
|
|
||||||
node1.wakuRlnRelay.unsafeAppendRLNProof(wm3, startTime + float(1 * epochSizeSec)).isOkOr:
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm3, epoch_2, MessageId(0)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
discard await node1.publish(some(DefaultPubsubTopic), wm3)
|
discard await node1.publish(some(DefaultPubsubTopic), wm3)
|
||||||
@ -666,7 +746,7 @@ procSuite "WakuNode - RLN relay":
|
|||||||
await node2.waitForNullifierLog(2)
|
await node2.waitForNullifierLog(2)
|
||||||
|
|
||||||
# Epoch 3
|
# Epoch 3
|
||||||
node1.wakuRlnRelay.unsafeAppendRLNProof(wm4, startTime + float(2 * epochSizeSec)).isOkOr:
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm4, epoch_3, MessageId(0)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
discard await node1.publish(some(DefaultPubsubTopic), wm4)
|
discard await node1.publish(some(DefaultPubsubTopic), wm4)
|
||||||
@ -676,7 +756,7 @@ procSuite "WakuNode - RLN relay":
|
|||||||
await node2.waitForNullifierLog(3)
|
await node2.waitForNullifierLog(3)
|
||||||
|
|
||||||
# Epoch 4
|
# Epoch 4
|
||||||
node1.wakuRlnRelay.unsafeAppendRLNProof(wm5, startTime + float(3 * epochSizeSec)).isOkOr:
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm5, epoch_4, MessageId(0)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
discard await node1.publish(some(DefaultPubsubTopic), wm5)
|
discard await node1.publish(some(DefaultPubsubTopic), wm5)
|
||||||
@ -686,7 +766,7 @@ procSuite "WakuNode - RLN relay":
|
|||||||
await node2.waitForNullifierLog(4)
|
await node2.waitForNullifierLog(4)
|
||||||
|
|
||||||
# Epoch 5
|
# Epoch 5
|
||||||
node1.wakuRlnRelay.unsafeAppendRLNProof(wm6, startTime + float(4 * epochSizeSec)).isOkOr:
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm6, epoch_5, MessageId(0)).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
discard await node1.publish(some(DefaultPubsubTopic), wm6)
|
discard await node1.publish(some(DefaultPubsubTopic), wm6)
|
||||||
@ -707,78 +787,3 @@ procSuite "WakuNode - RLN relay":
|
|||||||
|
|
||||||
# Cleanup
|
# Cleanup
|
||||||
waitFor allFutures(node1.stop(), node2.stop())
|
waitFor allFutures(node1.stop(), node2.stop())
|
||||||
|
|
||||||
asyncTest "Spam Detection and Slashing (currently gossipsub score decrease)":
|
|
||||||
# Given two nodes
|
|
||||||
let
|
|
||||||
contentTopic = ContentTopic("/waku/2/default-content/proto")
|
|
||||||
shardSeq = @[DefaultRelayShard]
|
|
||||||
nodeKey1 = generateSecp256k1Key()
|
|
||||||
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0))
|
|
||||||
nodeKey2 = generateSecp256k1Key()
|
|
||||||
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0))
|
|
||||||
epochSizeSec: uint64 = 5 # This means rlnMaxEpochGap = 4
|
|
||||||
|
|
||||||
# Given both nodes mount relay and rlnrelay
|
|
||||||
# Mount rlnrelay in node1 in off-chain mode
|
|
||||||
(await node1.mountRelay()).isOkOr:
|
|
||||||
assert false, "Failed to mount relay"
|
|
||||||
let wakuRlnConfig1 = buildWakuRlnConfig(1, epochSizeSec, "wakunode_10")
|
|
||||||
await node1.mountRlnRelay(wakuRlnConfig1)
|
|
||||||
|
|
||||||
# Mount rlnrelay in node2 in off-chain mode
|
|
||||||
(await node2.mountRelay()).isOkOr:
|
|
||||||
assert false, "Failed to mount relay"
|
|
||||||
let wakuRlnConfig2 = buildWakuRlnConfig(2, epochSizeSec, "wakunode_11")
|
|
||||||
await node2.mountRlnRelay(wakuRlnConfig2)
|
|
||||||
|
|
||||||
proc simpleHandler(
|
|
||||||
topic: PubsubTopic, msg: WakuMessage
|
|
||||||
): Future[void] {.async, gcsafe.} =
|
|
||||||
await sleepAsync(0.milliseconds)
|
|
||||||
|
|
||||||
node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
|
|
||||||
assert false, "Failed to subscribe to pubsub topic in node2: " & $error
|
|
||||||
node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
|
|
||||||
assert false, "Failed to subscribe to pubsub topic in node1: " & $error
|
|
||||||
|
|
||||||
# Given the two nodes are started and connected
|
|
||||||
waitFor allFutures(node1.start(), node2.start())
|
|
||||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
|
||||||
|
|
||||||
# Given some messages with rln proofs
|
|
||||||
let time = epochTime()
|
|
||||||
var
|
|
||||||
msg1 = WakuMessage(
|
|
||||||
payload: "message 1".toBytes(), timestamp: now(), contentTopic: contentTopic
|
|
||||||
)
|
|
||||||
msg2 = WakuMessage(
|
|
||||||
payload: "message 2".toBytes(), timestamp: now(), contentTopic: contentTopic
|
|
||||||
)
|
|
||||||
|
|
||||||
node1.wakuRlnRelay.unsafeAppendRLNProof(msg1, time).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
# Message wm2 is published in the same epoch as wm1, so it'll be considered spam
|
|
||||||
node1.wakuRlnRelay.unsafeAppendRLNProof(msg2, time).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
# When publishing the first message (valid)
|
|
||||||
discard await node1.publish(some(DefaultPubsubTopic), msg1)
|
|
||||||
await sleepAsync(FUTURE_TIMEOUT_SCORING) # Wait for scoring
|
|
||||||
|
|
||||||
# Then the score of node2 should increase
|
|
||||||
check:
|
|
||||||
node1.wakuRelay.peerStats[node2.switch.peerInfo.peerId].score == 0.1
|
|
||||||
node2.wakuRelay.peerStats[node1.switch.peerInfo.peerId].score == 1.1
|
|
||||||
|
|
||||||
# When publishing the second message (spam)
|
|
||||||
discard await node1.publish(some(DefaultPubsubTopic), msg2)
|
|
||||||
await sleepAsync(FUTURE_TIMEOUT_SCORING)
|
|
||||||
|
|
||||||
# Then the score of node2 should decrease
|
|
||||||
check:
|
|
||||||
node1.wakuRelay.peerStats[node2.switch.peerInfo.peerId].score == 0.1
|
|
||||||
node2.wakuRelay.peerStats[node1.switch.peerInfo.peerId].score == -99.4
|
|
||||||
|
|
||||||
await node1.stop()
|
|
||||||
await node2.stop()
|
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/tempfiles,
|
std/[tempfiles, osproc],
|
||||||
testutils/unittests,
|
testutils/unittests,
|
||||||
presto,
|
presto,
|
||||||
presto/client as presto_client,
|
presto/client as presto_client,
|
||||||
@ -23,7 +23,8 @@ import
|
|||||||
],
|
],
|
||||||
../testlib/common,
|
../testlib/common,
|
||||||
../testlib/wakucore,
|
../testlib/wakucore,
|
||||||
../testlib/wakunode
|
../testlib/wakunode,
|
||||||
|
../waku_rln_relay/[rln/waku_rln_relay_utils, utils_onchain]
|
||||||
|
|
||||||
proc testWakuNode(): WakuNode =
|
proc testWakuNode(): WakuNode =
|
||||||
let
|
let
|
||||||
@ -36,6 +37,16 @@ proc testWakuNode(): WakuNode =
|
|||||||
|
|
||||||
suite "Waku v2 REST API - health":
|
suite "Waku v2 REST API - health":
|
||||||
# TODO: better test for health
|
# TODO: better test for health
|
||||||
|
var anvilProc {.threadVar.}: Process
|
||||||
|
var manager {.threadVar.}: OnchainGroupManager
|
||||||
|
|
||||||
|
setup:
|
||||||
|
anvilProc = runAnvil()
|
||||||
|
manager = waitFor setupOnchainGroupManager()
|
||||||
|
|
||||||
|
teardown:
|
||||||
|
stopAnvil(anvilProc)
|
||||||
|
|
||||||
asyncTest "Get node health info - GET /health":
|
asyncTest "Get node health info - GET /health":
|
||||||
# Given
|
# Given
|
||||||
let node = testWakuNode()
|
let node = testWakuNode()
|
||||||
@ -67,11 +78,10 @@ suite "Waku v2 REST API - health":
|
|||||||
|
|
||||||
# now kick in rln (currently the only check for health)
|
# now kick in rln (currently the only check for health)
|
||||||
await node.mountRlnRelay(
|
await node.mountRlnRelay(
|
||||||
WakuRlnConfig(
|
getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(1.uint),
|
treePath = genTempPath("rln_tree", "wakunode"),
|
||||||
epochSizeSec: 1,
|
index = MembershipIndex(1),
|
||||||
treePath: genTempPath("rln_tree", "wakunode"),
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[sequtils, strformat, tempfiles],
|
std/[sequtils, strformat, tempfiles, osproc],
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
testutils/unittests,
|
testutils/unittests,
|
||||||
presto,
|
presto,
|
||||||
@ -24,7 +24,8 @@ import
|
|||||||
],
|
],
|
||||||
../testlib/wakucore,
|
../testlib/wakucore,
|
||||||
../testlib/wakunode,
|
../testlib/wakunode,
|
||||||
../resources/payloads
|
../resources/payloads,
|
||||||
|
../waku_rln_relay/[rln/waku_rln_relay_utils, utils_onchain]
|
||||||
|
|
||||||
proc testWakuNode(): WakuNode =
|
proc testWakuNode(): WakuNode =
|
||||||
let
|
let
|
||||||
@ -36,6 +37,16 @@ proc testWakuNode(): WakuNode =
|
|||||||
newTestWakuNode(privkey, bindIp, port, some(extIp), some(port))
|
newTestWakuNode(privkey, bindIp, port, some(extIp), some(port))
|
||||||
|
|
||||||
suite "Waku v2 Rest API - Relay":
|
suite "Waku v2 Rest API - Relay":
|
||||||
|
var anvilProc {.threadVar.}: Process
|
||||||
|
var manager {.threadVar.}: OnchainGroupManager
|
||||||
|
|
||||||
|
setup:
|
||||||
|
anvilProc = runAnvil()
|
||||||
|
manager = waitFor setupOnchainGroupManager()
|
||||||
|
|
||||||
|
teardown:
|
||||||
|
stopAnvil(anvilProc)
|
||||||
|
|
||||||
asyncTest "Subscribe a node to an array of pubsub topics - POST /relay/v1/subscriptions":
|
asyncTest "Subscribe a node to an array of pubsub topics - POST /relay/v1/subscriptions":
|
||||||
# Given
|
# Given
|
||||||
let node = testWakuNode()
|
let node = testWakuNode()
|
||||||
@ -246,18 +257,33 @@ suite "Waku v2 Rest API - Relay":
|
|||||||
## "Relay API: publish and subscribe/unsubscribe":
|
## "Relay API: publish and subscribe/unsubscribe":
|
||||||
# Given
|
# Given
|
||||||
let node = testWakuNode()
|
let node = testWakuNode()
|
||||||
await node.start()
|
|
||||||
(await node.mountRelay()).isOkOr:
|
(await node.mountRelay()).isOkOr:
|
||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
let wakuRlnConfig = WakuRlnConfig(
|
let wakuRlnConfig = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(1.uint),
|
treePath = genTempPath("rln_tree", "waku_rest_relay_1"),
|
||||||
userMessageLimit: 20,
|
index = MembershipIndex(1),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_1"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node.mountRlnRelay(wakuRlnConfig)
|
await node.mountRlnRelay(wakuRlnConfig)
|
||||||
|
await node.start()
|
||||||
|
# Registration is mandatory before sending messages with rln-relay
|
||||||
|
let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials = generateCredentials(manager.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager.register(idCredentials, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let rootUpdated = waitFor manager.updateRoots()
|
||||||
|
debug "Updated root for node", rootUpdated
|
||||||
|
|
||||||
|
let proofRes = waitFor manager.fetchMerkleProofElements()
|
||||||
|
if proofRes.isErr():
|
||||||
|
assert false, "failed to fetch merkle proof: " & proofRes.error
|
||||||
|
manager.merkleProofCache = proofRes.get()
|
||||||
|
|
||||||
# RPC server setup
|
# RPC server setup
|
||||||
var restPort = Port(0)
|
var restPort = Port(0)
|
||||||
@ -484,20 +510,35 @@ suite "Waku v2 Rest API - Relay":
|
|||||||
## "Relay API: publish and subscribe/unsubscribe":
|
## "Relay API: publish and subscribe/unsubscribe":
|
||||||
# Given
|
# Given
|
||||||
let node = testWakuNode()
|
let node = testWakuNode()
|
||||||
await node.start()
|
|
||||||
(await node.mountRelay()).isOkOr:
|
(await node.mountRelay()).isOkOr:
|
||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
require node.mountAutoSharding(1, 8).isOk
|
require node.mountAutoSharding(1, 8).isOk
|
||||||
|
|
||||||
let wakuRlnConfig = WakuRlnConfig(
|
let wakuRlnConfig = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(1.uint),
|
treePath = genTempPath("rln_tree", "waku_rest_relay_1"),
|
||||||
userMessageLimit: 20,
|
index = MembershipIndex(1),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_1"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node.mountRlnRelay(wakuRlnConfig)
|
await node.mountRlnRelay(wakuRlnConfig)
|
||||||
|
await node.start()
|
||||||
|
# Registration is mandatory before sending messages with rln-relay
|
||||||
|
let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials = generateCredentials(manager.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager.register(idCredentials, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let rootUpdated = waitFor manager.updateRoots()
|
||||||
|
debug "Updated root for node", rootUpdated
|
||||||
|
|
||||||
|
let proofRes = waitFor manager.fetchMerkleProofElements()
|
||||||
|
if proofRes.isErr():
|
||||||
|
assert false, "failed to fetch merkle proof: " & proofRes.error
|
||||||
|
manager.merkleProofCache = proofRes.get()
|
||||||
|
|
||||||
# RPC server setup
|
# RPC server setup
|
||||||
var restPort = Port(0)
|
var restPort = Port(0)
|
||||||
@ -545,20 +586,35 @@ suite "Waku v2 Rest API - Relay":
|
|||||||
## "Relay API: publish and subscribe/unsubscribe":
|
## "Relay API: publish and subscribe/unsubscribe":
|
||||||
# Given
|
# Given
|
||||||
let node = testWakuNode()
|
let node = testWakuNode()
|
||||||
await node.start()
|
|
||||||
(await node.mountRelay()).isOkOr:
|
(await node.mountRelay()).isOkOr:
|
||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
require node.mountAutoSharding(1, 8).isOk
|
require node.mountAutoSharding(1, 8).isOk
|
||||||
|
|
||||||
let wakuRlnConfig = WakuRlnConfig(
|
let wakuRlnConfig = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(1.uint),
|
treePath = genTempPath("rln_tree", "waku_rest_relay_1"),
|
||||||
userMessageLimit: 20,
|
index = MembershipIndex(1),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_1"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node.mountRlnRelay(wakuRlnConfig)
|
await node.mountRlnRelay(wakuRlnConfig)
|
||||||
|
await node.start()
|
||||||
|
|
||||||
|
# Registration is mandatory before sending messages with rln-relay
|
||||||
|
let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials = generateCredentials(manager.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager.register(idCredentials, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let rootUpdated = waitFor manager.updateRoots()
|
||||||
|
debug "Updated root for node", rootUpdated
|
||||||
|
|
||||||
|
let proofRes = waitFor manager.fetchMerkleProofElements()
|
||||||
|
if proofRes.isErr():
|
||||||
|
assert false, "failed to fetch merkle proof: " & proofRes.error
|
||||||
|
manager.merkleProofCache = proofRes.get()
|
||||||
|
|
||||||
# RPC server setup
|
# RPC server setup
|
||||||
var restPort = Port(0)
|
var restPort = Port(0)
|
||||||
@ -598,18 +654,33 @@ suite "Waku v2 Rest API - Relay":
|
|||||||
asyncTest "Post a message larger than maximum size - POST /relay/v1/messages/{topic}":
|
asyncTest "Post a message larger than maximum size - POST /relay/v1/messages/{topic}":
|
||||||
# Given
|
# Given
|
||||||
let node = testWakuNode()
|
let node = testWakuNode()
|
||||||
await node.start()
|
|
||||||
(await node.mountRelay()).isOkOr:
|
(await node.mountRelay()).isOkOr:
|
||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
let wakuRlnConfig = WakuRlnConfig(
|
let wakuRlnConfig = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(1.uint),
|
treePath = genTempPath("rln_tree", "waku_rest_relay_1"),
|
||||||
userMessageLimit: 20,
|
index = MembershipIndex(1),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_1"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node.mountRlnRelay(wakuRlnConfig)
|
await node.mountRlnRelay(wakuRlnConfig)
|
||||||
|
await node.start()
|
||||||
|
|
||||||
|
# Registration is mandatory before sending messages with rln-relay
|
||||||
|
let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials = generateCredentials(manager.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager.register(idCredentials, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let rootUpdated = waitFor manager.updateRoots()
|
||||||
|
debug "Updated root for node", rootUpdated
|
||||||
|
|
||||||
|
let proofRes = waitFor manager.fetchMerkleProofElements()
|
||||||
|
if proofRes.isErr():
|
||||||
|
assert false, "failed to fetch merkle proof: " & proofRes.error
|
||||||
|
manager.merkleProofCache = proofRes.get()
|
||||||
|
|
||||||
# RPC server setup
|
# RPC server setup
|
||||||
var restPort = Port(0)
|
var restPort = Port(0)
|
||||||
@ -660,20 +731,35 @@ suite "Waku v2 Rest API - Relay":
|
|||||||
asyncTest "Post a message larger than maximum size - POST /relay/v1/auto/messages/{topic}":
|
asyncTest "Post a message larger than maximum size - POST /relay/v1/auto/messages/{topic}":
|
||||||
# Given
|
# Given
|
||||||
let node = testWakuNode()
|
let node = testWakuNode()
|
||||||
await node.start()
|
|
||||||
(await node.mountRelay()).isOkOr:
|
(await node.mountRelay()).isOkOr:
|
||||||
assert false, "Failed to mount relay"
|
assert false, "Failed to mount relay"
|
||||||
require node.mountAutoSharding(1, 8).isOk
|
require node.mountAutoSharding(1, 8).isOk
|
||||||
|
|
||||||
let wakuRlnConfig = WakuRlnConfig(
|
let wakuRlnConfig = getWakuRlnConfig(
|
||||||
dynamic: false,
|
manager = manager,
|
||||||
credIndex: some(1.uint),
|
treePath = genTempPath("rln_tree", "waku_rest_relay_1"),
|
||||||
userMessageLimit: 20,
|
index = MembershipIndex(1),
|
||||||
epochSizeSec: 1,
|
|
||||||
treePath: genTempPath("rln_tree", "wakunode_1"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
await node.mountRlnRelay(wakuRlnConfig)
|
await node.mountRlnRelay(wakuRlnConfig)
|
||||||
|
await node.start()
|
||||||
|
|
||||||
|
# Registration is mandatory before sending messages with rln-relay
|
||||||
|
let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager)
|
||||||
|
let idCredentials = generateCredentials(manager.rlnInstance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
waitFor manager.register(idCredentials, UserMessageLimit(20))
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false,
|
||||||
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
let rootUpdated = waitFor manager.updateRoots()
|
||||||
|
debug "Updated root for node", rootUpdated
|
||||||
|
|
||||||
|
let proofRes = waitFor manager.fetchMerkleProofElements()
|
||||||
|
if proofRes.isErr():
|
||||||
|
assert false, "failed to fetch merkle proof: " & proofRes.error
|
||||||
|
manager.merkleProofCache = proofRes.get()
|
||||||
|
|
||||||
# RPC server setup
|
# RPC server setup
|
||||||
var restPort = Port(0)
|
var restPort = Port(0)
|
||||||
|
|||||||
@ -18,7 +18,7 @@ proc doInspectRlnDb*(conf: InspectRlnDbConf) =
|
|||||||
trace "configuration", conf = $conf
|
trace "configuration", conf = $conf
|
||||||
|
|
||||||
# 2. initialize rlnInstance
|
# 2. initialize rlnInstance
|
||||||
let rlnInstance = createRLNInstance(d = 20, tree_path = conf.treePath).valueOr:
|
let rlnInstance = createRLNInstance(d = 20).valueOr:
|
||||||
error "failure while creating RLN instance", error
|
error "failure while creating RLN instance", error
|
||||||
quit(1)
|
quit(1)
|
||||||
|
|
||||||
@ -38,29 +38,4 @@ proc doInspectRlnDb*(conf: InspectRlnDbConf) =
|
|||||||
contractAddress = metadata.contractAddress,
|
contractAddress = metadata.contractAddress,
|
||||||
validRoots = metadata.validRoots.mapIt(it.inHex())
|
validRoots = metadata.validRoots.mapIt(it.inHex())
|
||||||
|
|
||||||
var index: uint = 0
|
|
||||||
var hits: uint = 0
|
|
||||||
var zeroLeafIndices: seq[uint] = @[]
|
|
||||||
var assumeEmptyAfter: uint = 10
|
|
||||||
while true:
|
|
||||||
let leaf = rlnInstance.getMember(index).valueOr:
|
|
||||||
error "failure while getting RLN leaf", error
|
|
||||||
quit(1)
|
|
||||||
|
|
||||||
if leaf.inHex() == "0000000000000000000000000000000000000000000000000000000000000000":
|
|
||||||
zeroLeafIndices.add(index)
|
|
||||||
hits = hits + 1
|
|
||||||
else:
|
|
||||||
hits = 0
|
|
||||||
|
|
||||||
if hits > assumeEmptyAfter:
|
|
||||||
info "reached end of RLN tree", index = index - assumeEmptyAfter
|
|
||||||
# remove zeroLeafIndices that are not at the end of the tree
|
|
||||||
zeroLeafIndices = zeroLeafIndices.filterIt(it < index - assumeEmptyAfter)
|
|
||||||
break
|
|
||||||
|
|
||||||
index = index + 1
|
|
||||||
|
|
||||||
info "zero leaf indices", zeroLeafIndices
|
|
||||||
|
|
||||||
quit(0)
|
quit(0)
|
||||||
|
|||||||
@ -31,9 +31,7 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) =
|
|||||||
trace "configuration", conf = $conf
|
trace "configuration", conf = $conf
|
||||||
|
|
||||||
# 2. initialize rlnInstance
|
# 2. initialize rlnInstance
|
||||||
let rlnInstanceRes = createRLNInstance(
|
let rlnInstanceRes = createRLNInstance(d = 20)
|
||||||
d = 20, tree_path = genTempPath("rln_tree", "rln_keystore_generator")
|
|
||||||
)
|
|
||||||
if rlnInstanceRes.isErr():
|
if rlnInstanceRes.isErr():
|
||||||
error "failure while creating RLN instance", error = rlnInstanceRes.error
|
error "failure while creating RLN instance", error = rlnInstanceRes.error
|
||||||
quit(1)
|
quit(1)
|
||||||
|
|||||||
@ -116,7 +116,7 @@ task wakunode2, "Build Waku v2 cli node":
|
|||||||
|
|
||||||
task benchmarks, "Some benchmarks":
|
task benchmarks, "Some benchmarks":
|
||||||
let name = "benchmarks"
|
let name = "benchmarks"
|
||||||
buildBinary name, "apps/benchmarks/"
|
buildBinary name, "apps/benchmarks/", "-p:../.."
|
||||||
|
|
||||||
task wakucanary, "Build waku-canary tool":
|
task wakucanary, "Build waku-canary tool":
|
||||||
let name = "wakucanary"
|
let name = "wakucanary"
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,3 @@
|
|||||||
import group_manager/[static, on_chain]
|
import group_manager/[on_chain]
|
||||||
|
|
||||||
export static, on_chain
|
export on_chain
|
||||||
|
|||||||
@ -119,58 +119,23 @@ method stop*(g: GroupManager): Future[void] {.base, async.} =
|
|||||||
method onWithdraw*(g: GroupManager, cb: OnWithdrawCallback) {.base, gcsafe.} =
|
method onWithdraw*(g: GroupManager, cb: OnWithdrawCallback) {.base, gcsafe.} =
|
||||||
g.withdrawCb = some(cb)
|
g.withdrawCb = some(cb)
|
||||||
|
|
||||||
proc slideRootQueue*(
|
|
||||||
rootQueue: var Deque[MerkleNode], root: MerkleNode
|
|
||||||
): seq[MerkleNode] =
|
|
||||||
## updates the root queue with the latest root and pops the oldest one when the capacity of `AcceptableRootWindowSize` is reached
|
|
||||||
let overflowCount = rootQueue.len - AcceptableRootWindowSize + 1
|
|
||||||
var overflowedRoots = newSeq[MerkleNode]()
|
|
||||||
if overflowCount > 0:
|
|
||||||
# Delete the oldest `overflowCount` roots in the deque (index 0..`overflowCount`)
|
|
||||||
# insert into overflowedRoots seq and return
|
|
||||||
for i in 0 ..< overflowCount:
|
|
||||||
overFlowedRoots.add(rootQueue.popFirst())
|
|
||||||
# Push the next root into the queue
|
|
||||||
rootQueue.addLast(root)
|
|
||||||
return overFlowedRoots
|
|
||||||
|
|
||||||
method indexOfRoot*(
|
method indexOfRoot*(
|
||||||
g: GroupManager, root: MerkleNode
|
g: GroupManager, root: MerkleNode
|
||||||
): int {.base, gcsafe, raises: [].} =
|
): int {.base, gcsafe, raises: [].} =
|
||||||
## returns the index of the root in the merkle tree.
|
## returns the index of the root in the merkle tree and returns -1 if the root is not found
|
||||||
## returns -1 if the root is not found
|
|
||||||
return g.validRoots.find(root)
|
return g.validRoots.find(root)
|
||||||
|
|
||||||
method validateRoot*(
|
method validateRoot*(
|
||||||
g: GroupManager, root: MerkleNode
|
g: GroupManager, root: MerkleNode
|
||||||
): bool {.base, gcsafe, raises: [].} =
|
): bool {.base, gcsafe, raises: [].} =
|
||||||
## validates the root against the valid roots queue
|
## validates the root against the valid roots queue
|
||||||
if g.indexOfRoot(root) >= 0:
|
return g.indexOfRoot(root) >= 0
|
||||||
return true
|
|
||||||
return false
|
|
||||||
|
|
||||||
template slideRootQueue*(g: GroupManager): untyped =
|
|
||||||
let rootRes = g.rlnInstance.getMerkleRoot()
|
|
||||||
if rootRes.isErr():
|
|
||||||
raise newException(ValueError, "failed to get merkle root")
|
|
||||||
let rootAfterUpdate = rootRes.get()
|
|
||||||
|
|
||||||
var rootBuffer: Deque[MerkleNode]
|
|
||||||
let overflowedRoots = slideRootQueue(g.validRoots, rootAfterUpdate)
|
|
||||||
if overflowedRoots.len > 0:
|
|
||||||
for root in overflowedRoots:
|
|
||||||
discard rootBuffer.slideRootQueue(root)
|
|
||||||
rootBuffer
|
|
||||||
|
|
||||||
method verifyProof*(
|
method verifyProof*(
|
||||||
g: GroupManager, input: openArray[byte], proof: RateLimitProof
|
g: GroupManager, input: seq[byte], proof: RateLimitProof
|
||||||
): GroupManagerResult[bool] {.base, gcsafe, raises: [].} =
|
): GroupManagerResult[bool] {.base, gcsafe, raises: [].} =
|
||||||
## verifies the proof against the input and the current merkle root
|
## Dummy implementation for verifyProof
|
||||||
let proofVerifyRes =
|
return err("verifyProof is not implemented")
|
||||||
g.rlnInstance.proofVerify(input, proof, g.validRoots.items().toSeq())
|
|
||||||
if proofVerifyRes.isErr():
|
|
||||||
return err("proof verification failed: " & $proofVerifyRes.error())
|
|
||||||
return ok(proofVerifyRes.value())
|
|
||||||
|
|
||||||
method generateProof*(
|
method generateProof*(
|
||||||
g: GroupManager,
|
g: GroupManager,
|
||||||
@ -179,29 +144,8 @@ method generateProof*(
|
|||||||
messageId: MessageId,
|
messageId: MessageId,
|
||||||
rlnIdentifier = DefaultRlnIdentifier,
|
rlnIdentifier = DefaultRlnIdentifier,
|
||||||
): GroupManagerResult[RateLimitProof] {.base, gcsafe, raises: [].} =
|
): GroupManagerResult[RateLimitProof] {.base, gcsafe, raises: [].} =
|
||||||
var lastProcessedEpoch {.global.}: Epoch
|
## Dummy implementation for generateProof
|
||||||
## generates a proof for the given data and epoch
|
return err("generateProof is not implemented")
|
||||||
## the proof is generated using the current merkle root
|
|
||||||
if g.idCredentials.isNone():
|
|
||||||
return err("identity credentials are not set")
|
|
||||||
if g.membershipIndex.isNone():
|
|
||||||
return err("membership index is not set")
|
|
||||||
if g.userMessageLimit.isNone():
|
|
||||||
return err("user message limit is not set")
|
|
||||||
|
|
||||||
waku_rln_proof_generation_duration_seconds.nanosecondTime:
|
|
||||||
let proof = proofGen(
|
|
||||||
rlnInstance = g.rlnInstance,
|
|
||||||
data = data,
|
|
||||||
membership = g.idCredentials.get(),
|
|
||||||
index = g.membershipIndex.get(),
|
|
||||||
epoch = epoch,
|
|
||||||
userMessageLimit = g.userMessageLimit.get(),
|
|
||||||
messageId = messageId,
|
|
||||||
).valueOr:
|
|
||||||
return err("proof generation failed: " & $error)
|
|
||||||
|
|
||||||
return ok(proof)
|
|
||||||
|
|
||||||
method isReady*(g: GroupManager): Future[bool] {.base, async.} =
|
method isReady*(g: GroupManager): Future[bool] {.base, async.} =
|
||||||
raise newException(
|
raise newException(
|
||||||
|
|||||||
@ -173,11 +173,6 @@ template retryWrapper(
|
|||||||
retryWrapper(res, RetryStrategy.new(), errStr, g.onFatalErrorAction):
|
retryWrapper(res, RetryStrategy.new(), errStr, g.onFatalErrorAction):
|
||||||
body
|
body
|
||||||
|
|
||||||
method validateRoot*(g: OnchainGroupManager, root: MerkleNode): bool =
|
|
||||||
if g.validRoots.find(root) >= 0:
|
|
||||||
return true
|
|
||||||
return false
|
|
||||||
|
|
||||||
proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} =
|
proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} =
|
||||||
let rootRes = await g.fetchMerkleRoot()
|
let rootRes = await g.fetchMerkleRoot()
|
||||||
if rootRes.isErr():
|
if rootRes.isErr():
|
||||||
@ -200,21 +195,17 @@ proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} =
|
|||||||
proc trackRootChanges*(g: OnchainGroupManager) {.async: (raises: [CatchableError]).} =
|
proc trackRootChanges*(g: OnchainGroupManager) {.async: (raises: [CatchableError]).} =
|
||||||
try:
|
try:
|
||||||
initializedGuard(g)
|
initializedGuard(g)
|
||||||
let ethRpc = g.ethRpc.get()
|
|
||||||
let wakuRlnContract = g.wakuRlnContract.get()
|
|
||||||
|
|
||||||
const rpcDelay = 5.seconds
|
const rpcDelay = 5.seconds
|
||||||
|
|
||||||
while true:
|
while true:
|
||||||
|
await sleepAsync(rpcDelay)
|
||||||
let rootUpdated = await g.updateRoots()
|
let rootUpdated = await g.updateRoots()
|
||||||
|
|
||||||
if rootUpdated:
|
if rootUpdated:
|
||||||
if g.membershipIndex.isNone():
|
let proofResult = await g.fetchMerkleProofElements()
|
||||||
error "membershipIndex is not set; skipping proof update"
|
if proofResult.isErr():
|
||||||
|
error "Failed to fetch Merkle proof", error = proofResult.error
|
||||||
else:
|
else:
|
||||||
let proofResult = await g.fetchMerkleProofElements()
|
|
||||||
if proofResult.isErr():
|
|
||||||
error "Failed to fetch Merkle proof", error = proofResult.error
|
|
||||||
g.merkleProofCache = proofResult.get()
|
g.merkleProofCache = proofResult.get()
|
||||||
|
|
||||||
let nextFreeIndex = await g.fetchNextFreeIndex()
|
let nextFreeIndex = await g.fetchNextFreeIndex()
|
||||||
@ -226,8 +217,6 @@ proc trackRootChanges*(g: OnchainGroupManager) {.async: (raises: [CatchableError
|
|||||||
|
|
||||||
let memberCount = cast[int64](nextFreeIndex.get())
|
let memberCount = cast[int64](nextFreeIndex.get())
|
||||||
waku_rln_number_registered_memberships.set(float64(memberCount))
|
waku_rln_number_registered_memberships.set(float64(memberCount))
|
||||||
|
|
||||||
await sleepAsync(rpcDelay)
|
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
error "Fatal error in trackRootChanges", error = getCurrentExceptionMsg()
|
error "Fatal error in trackRootChanges", error = getCurrentExceptionMsg()
|
||||||
|
|
||||||
@ -457,7 +446,7 @@ method generateProof*(
|
|||||||
nullifier: nullifier,
|
nullifier: nullifier,
|
||||||
)
|
)
|
||||||
|
|
||||||
debug "Proof generated successfully"
|
debug "Proof generated successfully", proof = output
|
||||||
|
|
||||||
waku_rln_remaining_proofs_per_epoch.dec()
|
waku_rln_remaining_proofs_per_epoch.dec()
|
||||||
waku_rln_total_generated_proofs.inc()
|
waku_rln_total_generated_proofs.inc()
|
||||||
@ -493,7 +482,7 @@ method verifyProof*(
|
|||||||
if not ffiOk:
|
if not ffiOk:
|
||||||
return err("could not verify the proof")
|
return err("could not verify the proof")
|
||||||
else:
|
else:
|
||||||
trace "Proof verified successfully !"
|
debug "Proof verified successfully"
|
||||||
|
|
||||||
return ok(validProof)
|
return ok(validProof)
|
||||||
|
|
||||||
|
|||||||
@ -1,3 +0,0 @@
|
|||||||
import static/group_manager
|
|
||||||
|
|
||||||
export group_manager
|
|
||||||
@ -1,144 +0,0 @@
|
|||||||
import ../group_manager_base, ../../rln, std/sequtils, ../../constants
|
|
||||||
|
|
||||||
export group_manager_base
|
|
||||||
|
|
||||||
type StaticGroupManager* = ref object of GroupManager
|
|
||||||
groupKeys*: seq[IdentityCredential]
|
|
||||||
groupSize*: uint
|
|
||||||
|
|
||||||
template initializedGuard*(g: StaticGroupManager): untyped =
|
|
||||||
if not g.initialized:
|
|
||||||
raise newException(ValueError, "StaticGroupManager is not initialized")
|
|
||||||
|
|
||||||
proc resultifiedInitGuard(g: StaticGroupManager): GroupManagerResult[void] =
|
|
||||||
try:
|
|
||||||
initializedGuard(g)
|
|
||||||
return ok()
|
|
||||||
except CatchableError:
|
|
||||||
return err("StaticGroupManager is not initialized")
|
|
||||||
|
|
||||||
method init*(g: StaticGroupManager): Future[GroupManagerResult[void]] {.async.} =
|
|
||||||
let
|
|
||||||
groupSize = g.groupSize
|
|
||||||
groupKeys = g.groupKeys
|
|
||||||
membershipIndex =
|
|
||||||
if g.membershipIndex.isSome():
|
|
||||||
g.membershipIndex.get()
|
|
||||||
else:
|
|
||||||
return err("membershipIndex is not set")
|
|
||||||
|
|
||||||
if membershipIndex < MembershipIndex(0) or
|
|
||||||
membershipIndex >= MembershipIndex(groupSize):
|
|
||||||
return err(
|
|
||||||
"Invalid membership index. Must be within 0 and " & $(groupSize - 1) & "but was " &
|
|
||||||
$membershipIndex
|
|
||||||
)
|
|
||||||
g.userMessageLimit = some(DefaultUserMessageLimit)
|
|
||||||
|
|
||||||
g.idCredentials = some(groupKeys[membershipIndex])
|
|
||||||
# Seed the received commitments into the merkle tree
|
|
||||||
let rateCommitments = groupKeys.mapIt(
|
|
||||||
RateCommitment(
|
|
||||||
idCommitment: it.idCommitment, userMessageLimit: g.userMessageLimit.get()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
let leaves = rateCommitments.toLeaves().valueOr:
|
|
||||||
return err("Failed to convert rate commitments to leaves: " & $error)
|
|
||||||
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex, leaves)
|
|
||||||
|
|
||||||
discard g.slideRootQueue()
|
|
||||||
|
|
||||||
g.latestIndex += MembershipIndex(groupKeys.len - 1)
|
|
||||||
|
|
||||||
g.initialized = true
|
|
||||||
|
|
||||||
return ok()
|
|
||||||
|
|
||||||
method startGroupSync*(
|
|
||||||
g: StaticGroupManager
|
|
||||||
): Future[GroupManagerResult[void]] {.async.} =
|
|
||||||
?g.resultifiedInitGuard()
|
|
||||||
# No-op
|
|
||||||
return ok()
|
|
||||||
|
|
||||||
method register*(
|
|
||||||
g: StaticGroupManager, rateCommitment: RateCommitment
|
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
|
||||||
initializedGuard(g)
|
|
||||||
|
|
||||||
let leaf = rateCommitment.toLeaf().get()
|
|
||||||
|
|
||||||
await g.registerBatch(@[leaf])
|
|
||||||
|
|
||||||
method registerBatch*(
|
|
||||||
g: StaticGroupManager, rateCommitments: seq[RawRateCommitment]
|
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
|
||||||
initializedGuard(g)
|
|
||||||
|
|
||||||
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex + 1, rateCommitments)
|
|
||||||
if not membersInserted:
|
|
||||||
raise newException(ValueError, "Failed to insert members into the merkle tree")
|
|
||||||
|
|
||||||
if g.registerCb.isSome():
|
|
||||||
var memberSeq = newSeq[Membership]()
|
|
||||||
for i in 0 ..< rateCommitments.len:
|
|
||||||
memberSeq.add(
|
|
||||||
Membership(
|
|
||||||
rateCommitment: rateCommitments[i],
|
|
||||||
index: g.latestIndex + MembershipIndex(i) + 1,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
await g.registerCb.get()(memberSeq)
|
|
||||||
|
|
||||||
discard g.slideRootQueue()
|
|
||||||
|
|
||||||
g.latestIndex += MembershipIndex(rateCommitments.len)
|
|
||||||
return
|
|
||||||
|
|
||||||
method withdraw*(
|
|
||||||
g: StaticGroupManager, idSecretHash: IdentitySecretHash
|
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
|
||||||
initializedGuard(g)
|
|
||||||
|
|
||||||
let groupKeys = g.groupKeys
|
|
||||||
|
|
||||||
for i in 0 ..< groupKeys.len:
|
|
||||||
if groupKeys[i].idSecretHash == idSecretHash:
|
|
||||||
let idCommitment = groupKeys[i].idCommitment
|
|
||||||
let index = MembershipIndex(i)
|
|
||||||
let rateCommitment = RateCommitment(
|
|
||||||
idCommitment: idCommitment, userMessageLimit: g.userMessageLimit.get()
|
|
||||||
).toLeaf().valueOr:
|
|
||||||
raise newException(ValueError, "Failed to parse rateCommitment")
|
|
||||||
let memberRemoved = g.rlnInstance.removeMember(index)
|
|
||||||
if not memberRemoved:
|
|
||||||
raise newException(ValueError, "Failed to remove member from the merkle tree")
|
|
||||||
|
|
||||||
if g.withdrawCb.isSome():
|
|
||||||
let withdrawCb = g.withdrawCb.get()
|
|
||||||
await withdrawCb(@[Membership(rateCommitment: rateCommitment, index: index)])
|
|
||||||
|
|
||||||
return
|
|
||||||
|
|
||||||
method withdrawBatch*(
|
|
||||||
g: StaticGroupManager, idSecretHashes: seq[IdentitySecretHash]
|
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
|
||||||
initializedGuard(g)
|
|
||||||
|
|
||||||
# call withdraw on each idSecretHash
|
|
||||||
for idSecretHash in idSecretHashes:
|
|
||||||
await g.withdraw(idSecretHash)
|
|
||||||
|
|
||||||
method onRegister*(g: StaticGroupManager, cb: OnRegisterCallback) {.gcsafe.} =
|
|
||||||
g.registerCb = some(cb)
|
|
||||||
|
|
||||||
method onWithdraw*(g: StaticGroupManager, cb: OnWithdrawCallback) {.gcsafe.} =
|
|
||||||
g.withdrawCb = some(cb)
|
|
||||||
|
|
||||||
method stop*(g: StaticGroupManager): Future[void] {.async.} =
|
|
||||||
initializedGuard(g)
|
|
||||||
# No-op
|
|
||||||
|
|
||||||
method isReady*(g: StaticGroupManager): Future[bool] {.async.} =
|
|
||||||
initializedGuard(g)
|
|
||||||
return true
|
|
||||||
@ -21,81 +21,6 @@ proc toBuffer*(x: openArray[byte]): Buffer =
|
|||||||
## RLN Zerokit module APIs
|
## RLN Zerokit module APIs
|
||||||
######################################################################
|
######################################################################
|
||||||
|
|
||||||
#------------------------------ Merkle Tree operations -----------------------------------------
|
|
||||||
proc update_next_member*(
|
|
||||||
ctx: ptr RLN, input_buffer: ptr Buffer
|
|
||||||
): bool {.importc: "set_next_leaf".}
|
|
||||||
|
|
||||||
## adds an element in the merkle tree to the next available position
|
|
||||||
## input_buffer points to the id commitment byte seq
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc delete_member*(ctx: ptr RLN, index: uint): bool {.importc: "delete_leaf".}
|
|
||||||
## index is the position of the id commitment key to be deleted from the tree
|
|
||||||
## the deleted id commitment key is replaced with a zero leaf
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc get_root*(ctx: ptr RLN, output_buffer: ptr Buffer): bool {.importc: "get_root".}
|
|
||||||
## get_root populates the passed pointer output_buffer with the current tree root
|
|
||||||
## the output_buffer holds the Merkle tree root of size 32 bytes
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc get_merkle_proof*(
|
|
||||||
ctx: ptr RLN, index: uint, output_buffer: ptr Buffer
|
|
||||||
): bool {.importc: "get_proof".}
|
|
||||||
|
|
||||||
## populates the passed pointer output_buffer with the merkle proof for the leaf at position index in the tree stored by ctx
|
|
||||||
## the output_buffer holds a serialized Merkle proof (vector of 32 bytes nodes)
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc set_leaf*(
|
|
||||||
ctx: ptr RLN, index: uint, input_buffer: ptr Buffer
|
|
||||||
): bool {.importc: "set_leaf".}
|
|
||||||
|
|
||||||
## sets the leaf at position index in the tree stored by ctx to the value passed by input_buffer
|
|
||||||
## the input_buffer holds a serialized leaf of 32 bytes
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc get_leaf*(
|
|
||||||
ctx: ptr RLN, index: uint, output_buffer: ptr Buffer
|
|
||||||
): bool {.importc: "get_leaf".}
|
|
||||||
|
|
||||||
## gets the leaf at position index in the tree stored by ctx
|
|
||||||
## the output_buffer holds a serialized leaf of 32 bytes
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc leaves_set*(ctx: ptr RLN): uint {.importc: "leaves_set".}
|
|
||||||
## gets the number of leaves set in the tree stored by ctx
|
|
||||||
## the return uint value indicates the number of leaves set in the tree
|
|
||||||
|
|
||||||
proc init_tree_with_leaves*(
|
|
||||||
ctx: ptr RLN, input_buffer: ptr Buffer
|
|
||||||
): bool {.importc: "init_tree_with_leaves".}
|
|
||||||
|
|
||||||
## sets multiple leaves in the tree stored by ctx to the value passed by input_buffer
|
|
||||||
## the input_buffer holds a serialized vector of leaves (32 bytes each)
|
|
||||||
## the input_buffer size is prefixed by a 8 bytes integer indicating the number of leaves
|
|
||||||
## leaves are set one after each other starting from index 0
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc atomic_write*(
|
|
||||||
ctx: ptr RLN, index: uint, leaves_buffer: ptr Buffer, indices_buffer: ptr Buffer
|
|
||||||
): bool {.importc: "atomic_operation".}
|
|
||||||
|
|
||||||
## sets multiple leaves, and zeroes out indices in the tree stored by ctx to the value passed by input_buffer
|
|
||||||
## the leaves_buffer holds a serialized vector of leaves (32 bytes each)
|
|
||||||
## the leaves_buffer size is prefixed by a 8 bytes integer indicating the number of leaves
|
|
||||||
## the indices_bufffer holds a serialized vector of indices (8 bytes each)
|
|
||||||
## the indices_buffer size is prefixed by a 8 bytes integer indicating the number of indices
|
|
||||||
## leaves are set one after each other starting from index `index`
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc reset_tree*(ctx: ptr RLN, tree_height: uint): bool {.importc: "set_tree".}
|
|
||||||
## resets the tree stored by ctx to the empty tree (all leaves set to 0) of height tree_height
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
#----------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
#-------------------------------- zkSNARKs operations -----------------------------------------
|
#-------------------------------- zkSNARKs operations -----------------------------------------
|
||||||
proc key_gen*(
|
proc key_gen*(
|
||||||
ctx: ptr RLN, output_buffer: ptr Buffer
|
ctx: ptr RLN, output_buffer: ptr Buffer
|
||||||
|
|||||||
@ -98,7 +98,7 @@ proc createRLNInstanceLocal(
|
|||||||
mode: "high_throughput",
|
mode: "high_throughput",
|
||||||
compression: false,
|
compression: false,
|
||||||
flush_every_ms: 500,
|
flush_every_ms: 500,
|
||||||
path: if tree_path != "": tree_path else: DefaultRlnTreePath,
|
path: tree_path,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -201,214 +201,6 @@ proc extractMetadata*(proof: RateLimitProof): RlnRelayResult[ProofMetadata] =
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
proc proofGen*(
|
|
||||||
rlnInstance: ptr RLN,
|
|
||||||
data: openArray[byte],
|
|
||||||
membership: IdentityCredential,
|
|
||||||
userMessageLimit: UserMessageLimit,
|
|
||||||
messageId: MessageId,
|
|
||||||
index: MembershipIndex,
|
|
||||||
epoch: Epoch,
|
|
||||||
rlnIdentifier = DefaultRlnIdentifier,
|
|
||||||
): RateLimitProofResult =
|
|
||||||
# obtain the external nullifier
|
|
||||||
let externalNullifierRes = poseidon(@[@(epoch), @(rlnIdentifier)])
|
|
||||||
|
|
||||||
if externalNullifierRes.isErr():
|
|
||||||
return err("could not construct the external nullifier")
|
|
||||||
|
|
||||||
# serialize inputs
|
|
||||||
let serializedInputs = serialize(
|
|
||||||
idSecretHash = membership.idSecretHash,
|
|
||||||
memIndex = index,
|
|
||||||
userMessageLimit = userMessageLimit,
|
|
||||||
messageId = messageId,
|
|
||||||
externalNullifier = externalNullifierRes.get(),
|
|
||||||
msg = data,
|
|
||||||
)
|
|
||||||
var inputBuffer = toBuffer(serializedInputs)
|
|
||||||
|
|
||||||
debug "input buffer ", inputBuffer = repr(inputBuffer)
|
|
||||||
|
|
||||||
# generate the proof
|
|
||||||
var proof: Buffer
|
|
||||||
let proofIsSuccessful = generate_proof(rlnInstance, addr inputBuffer, addr proof)
|
|
||||||
# check whether the generate_proof call is done successfully
|
|
||||||
if not proofIsSuccessful:
|
|
||||||
return err("could not generate the proof")
|
|
||||||
|
|
||||||
var proofValue = cast[ptr array[320, byte]](proof.`ptr`)
|
|
||||||
let proofBytes: array[320, byte] = proofValue[]
|
|
||||||
debug "proof content", proofHex = proofValue[].toHex
|
|
||||||
|
|
||||||
## parse the proof as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ]
|
|
||||||
|
|
||||||
let
|
|
||||||
proofOffset = 128
|
|
||||||
rootOffset = proofOffset + 32
|
|
||||||
externalNullifierOffset = rootOffset + 32
|
|
||||||
shareXOffset = externalNullifierOffset + 32
|
|
||||||
shareYOffset = shareXOffset + 32
|
|
||||||
nullifierOffset = shareYOffset + 32
|
|
||||||
|
|
||||||
var
|
|
||||||
zkproof: ZKSNARK
|
|
||||||
proofRoot, shareX, shareY: MerkleNode
|
|
||||||
externalNullifier: ExternalNullifier
|
|
||||||
nullifier: Nullifier
|
|
||||||
|
|
||||||
discard zkproof.copyFrom(proofBytes[0 .. proofOffset - 1])
|
|
||||||
discard proofRoot.copyFrom(proofBytes[proofOffset .. rootOffset - 1])
|
|
||||||
discard
|
|
||||||
externalNullifier.copyFrom(proofBytes[rootOffset .. externalNullifierOffset - 1])
|
|
||||||
discard shareX.copyFrom(proofBytes[externalNullifierOffset .. shareXOffset - 1])
|
|
||||||
discard shareY.copyFrom(proofBytes[shareXOffset .. shareYOffset - 1])
|
|
||||||
discard nullifier.copyFrom(proofBytes[shareYOffset .. nullifierOffset - 1])
|
|
||||||
|
|
||||||
let output = RateLimitProof(
|
|
||||||
proof: zkproof,
|
|
||||||
merkleRoot: proofRoot,
|
|
||||||
externalNullifier: externalNullifier,
|
|
||||||
epoch: epoch,
|
|
||||||
rlnIdentifier: rlnIdentifier,
|
|
||||||
shareX: shareX,
|
|
||||||
shareY: shareY,
|
|
||||||
nullifier: nullifier,
|
|
||||||
)
|
|
||||||
return ok(output)
|
|
||||||
|
|
||||||
# validRoots should contain a sequence of roots in the acceptable windows.
|
|
||||||
# As default, it is set to an empty sequence of roots. This implies that the validity check for the proof's root is skipped
|
|
||||||
proc proofVerify*(
|
|
||||||
rlnInstance: ptr RLN,
|
|
||||||
data: openArray[byte],
|
|
||||||
proof: RateLimitProof,
|
|
||||||
validRoots: seq[MerkleNode] = @[],
|
|
||||||
): RlnRelayResult[bool] =
|
|
||||||
## verifies the proof, returns an error if the proof verification fails
|
|
||||||
## returns true if the proof is valid
|
|
||||||
var normalizedProof = proof
|
|
||||||
# when we do this, we ensure that we compute the proof for the derived value
|
|
||||||
# of the externalNullifier. The proof verification will fail if a malicious peer
|
|
||||||
# attaches invalid epoch+rlnidentifier pair
|
|
||||||
normalizedProof.externalNullifier = poseidon(
|
|
||||||
@[@(proof.epoch), @(proof.rlnIdentifier)]
|
|
||||||
).valueOr:
|
|
||||||
return err("could not construct the external nullifier")
|
|
||||||
var
|
|
||||||
proofBytes = serialize(normalizedProof, data)
|
|
||||||
proofBuffer = proofBytes.toBuffer()
|
|
||||||
validProof: bool
|
|
||||||
rootsBytes = serialize(validRoots)
|
|
||||||
rootsBuffer = rootsBytes.toBuffer()
|
|
||||||
|
|
||||||
trace "serialized proof", proof = byteutils.toHex(proofBytes)
|
|
||||||
|
|
||||||
let verifyIsSuccessful =
|
|
||||||
verify_with_roots(rlnInstance, addr proofBuffer, addr rootsBuffer, addr validProof)
|
|
||||||
if not verifyIsSuccessful:
|
|
||||||
# something went wrong in verification call
|
|
||||||
warn "could not verify validity of the proof", proof = proof
|
|
||||||
return err("could not verify the proof")
|
|
||||||
|
|
||||||
if not validProof:
|
|
||||||
return ok(false)
|
|
||||||
else:
|
|
||||||
return ok(true)
|
|
||||||
|
|
||||||
proc insertMember*(rlnInstance: ptr RLN, idComm: IDCommitment): bool =
|
|
||||||
## inserts a member to the tree
|
|
||||||
## returns true if the member is inserted successfully
|
|
||||||
## returns false if the member could not be inserted
|
|
||||||
var pkBuffer = toBuffer(idComm)
|
|
||||||
let pkBufferPtr = addr pkBuffer
|
|
||||||
|
|
||||||
# add the member to the tree
|
|
||||||
let memberAdded = update_next_member(rlnInstance, pkBufferPtr)
|
|
||||||
return memberAdded
|
|
||||||
|
|
||||||
proc getMember*(
|
|
||||||
rlnInstance: ptr RLN, index: MembershipIndex
|
|
||||||
): RlnRelayResult[IDCommitment] =
|
|
||||||
## returns the member at the given index
|
|
||||||
## returns an error if the index is out of bounds
|
|
||||||
## returns the member if the index is valid
|
|
||||||
var
|
|
||||||
idCommitment {.noinit.}: Buffer = Buffer()
|
|
||||||
idCommitmentPtr = addr(idCommitment)
|
|
||||||
memberRetrieved = get_leaf(rlnInstance, index, idCommitmentPtr)
|
|
||||||
|
|
||||||
if not memberRetrieved:
|
|
||||||
return err("could not get the member")
|
|
||||||
|
|
||||||
if not idCommitment.len == 32:
|
|
||||||
return err("wrong output size")
|
|
||||||
|
|
||||||
let idCommitmentValue = (cast[ptr array[32, byte]](idCommitment.`ptr`))[]
|
|
||||||
|
|
||||||
return ok(@idCommitmentValue)
|
|
||||||
|
|
||||||
proc atomicWrite*(
|
|
||||||
rlnInstance: ptr RLN,
|
|
||||||
index = none(MembershipIndex),
|
|
||||||
idComms = newSeq[IDCommitment](),
|
|
||||||
toRemoveIndices = newSeq[MembershipIndex](),
|
|
||||||
): bool =
|
|
||||||
## Insert multiple members i.e., identity commitments, and remove multiple members
|
|
||||||
## returns true if the operation is successful
|
|
||||||
## returns false if the operation fails
|
|
||||||
|
|
||||||
let startIndex =
|
|
||||||
if index.isNone():
|
|
||||||
MembershipIndex(0)
|
|
||||||
else:
|
|
||||||
index.get()
|
|
||||||
|
|
||||||
# serialize the idComms
|
|
||||||
let idCommsBytes = serialize(idComms)
|
|
||||||
var idCommsBuffer = idCommsBytes.toBuffer()
|
|
||||||
let idCommsBufferPtr = addr idCommsBuffer
|
|
||||||
|
|
||||||
# serialize the toRemoveIndices
|
|
||||||
let indicesBytes = serialize(toRemoveIndices)
|
|
||||||
var indicesBuffer = indicesBytes.toBuffer()
|
|
||||||
let indicesBufferPtr = addr indicesBuffer
|
|
||||||
|
|
||||||
let operationSuccess =
|
|
||||||
atomic_write(rlnInstance, startIndex, idCommsBufferPtr, indicesBufferPtr)
|
|
||||||
return operationSuccess
|
|
||||||
|
|
||||||
proc insertMembers*(
|
|
||||||
rlnInstance: ptr RLN, index: MembershipIndex, idComms: seq[IDCommitment]
|
|
||||||
): bool =
|
|
||||||
## Insert multiple members i.e., identity commitments
|
|
||||||
## returns true if the insertion is successful
|
|
||||||
## returns false if any of the insertions fails
|
|
||||||
## Note: This proc is atomic, i.e., if any of the insertions fails, all the previous insertions are rolled back
|
|
||||||
|
|
||||||
return atomicWrite(rlnInstance, some(index), idComms)
|
|
||||||
|
|
||||||
proc removeMember*(rlnInstance: ptr RLN, index: MembershipIndex): bool =
|
|
||||||
let deletionSuccess = delete_member(rlnInstance, index)
|
|
||||||
return deletionSuccess
|
|
||||||
|
|
||||||
proc removeMembers*(rlnInstance: ptr RLN, indices: seq[MembershipIndex]): bool =
|
|
||||||
return atomicWrite(rlnInstance, idComms = @[], toRemoveIndices = indices)
|
|
||||||
|
|
||||||
proc getMerkleRoot*(rlnInstance: ptr RLN): MerkleNodeResult =
|
|
||||||
# read the Merkle Tree root after insertion
|
|
||||||
var
|
|
||||||
root {.noinit.}: Buffer = Buffer()
|
|
||||||
rootPtr = addr(root)
|
|
||||||
getRootSuccessful = getRoot(rlnInstance, rootPtr)
|
|
||||||
if not getRootSuccessful:
|
|
||||||
return err("could not get the root")
|
|
||||||
if not root.len == 32:
|
|
||||||
return err("wrong output size")
|
|
||||||
|
|
||||||
var rootValue = cast[ptr MerkleNode](root.`ptr`)[]
|
|
||||||
return ok(rootValue)
|
|
||||||
|
|
||||||
type RlnMetadata* = object
|
type RlnMetadata* = object
|
||||||
lastProcessedBlock*: uint64
|
lastProcessedBlock*: uint64
|
||||||
chainId*: UInt256
|
chainId*: UInt256
|
||||||
|
|||||||
@ -49,45 +49,11 @@ type RlnRelayConf* = object of RootObj
|
|||||||
treePath*: string
|
treePath*: string
|
||||||
epochSizeSec*: uint64
|
epochSizeSec*: uint64
|
||||||
userMessageLimit*: uint64
|
userMessageLimit*: uint64
|
||||||
|
ethPrivateKey*: Option[string]
|
||||||
|
|
||||||
type WakuRlnConfig* = object of RlnRelayConf
|
type WakuRlnConfig* = object of RlnRelayConf
|
||||||
onFatalErrorAction*: OnFatalErrorHandler
|
onFatalErrorAction*: OnFatalErrorHandler
|
||||||
|
|
||||||
proc createMembershipList*(
|
|
||||||
rln: ptr RLN, n: int
|
|
||||||
): RlnRelayResult[(seq[RawMembershipCredentials], string)] =
|
|
||||||
## createMembershipList produces a sequence of identity credentials in the form of (identity trapdoor, identity nullifier, identity secret hash, id commitment) in the hexadecimal format
|
|
||||||
## this proc also returns the root of a Merkle tree constructed out of the identity commitment keys of the generated list
|
|
||||||
## the output of this proc is used to initialize a static group keys (to test waku-rln-relay in the off-chain mode)
|
|
||||||
## Returns an error if it cannot create the membership list
|
|
||||||
|
|
||||||
var output = newSeq[RawMembershipCredentials]()
|
|
||||||
var idCommitments = newSeq[IDCommitment]()
|
|
||||||
|
|
||||||
for i in 0 .. n - 1:
|
|
||||||
# generate an identity credential
|
|
||||||
let idCredentialRes = rln.membershipKeyGen()
|
|
||||||
if idCredentialRes.isErr():
|
|
||||||
return
|
|
||||||
err("could not generate an identity credential: " & idCredentialRes.error())
|
|
||||||
let idCredential = idCredentialRes.get()
|
|
||||||
let idTuple = (
|
|
||||||
idCredential.idTrapdoor.inHex(),
|
|
||||||
idCredential.idNullifier.inHex(),
|
|
||||||
idCredential.idSecretHash.inHex(),
|
|
||||||
idCredential.idCommitment.inHex(),
|
|
||||||
)
|
|
||||||
output.add(idTuple)
|
|
||||||
idCommitments.add(idCredential.idCommitment)
|
|
||||||
|
|
||||||
# Insert members into tree
|
|
||||||
let membersAdded = rln.insertMembers(0, idCommitments)
|
|
||||||
if not membersAdded:
|
|
||||||
return err("could not insert members into the tree")
|
|
||||||
|
|
||||||
let root = rln.getMerkleRoot().value().inHex()
|
|
||||||
return ok((output, root))
|
|
||||||
|
|
||||||
type WakuRLNRelay* = ref object of RootObj
|
type WakuRLNRelay* = ref object of RootObj
|
||||||
# the log of nullifiers and Shamir shares of the past messages grouped per epoch
|
# the log of nullifiers and Shamir shares of the past messages grouped per epoch
|
||||||
nullifierLog*: OrderedTable[Epoch, Table[Nullifier, ProofMetadata]]
|
nullifierLog*: OrderedTable[Epoch, Table[Nullifier, ProofMetadata]]
|
||||||
@ -176,7 +142,6 @@ proc updateLog*(
|
|||||||
err("the epoch was not found: " & getCurrentExceptionMsg()) # should never happen
|
err("the epoch was not found: " & getCurrentExceptionMsg()) # should never happen
|
||||||
|
|
||||||
proc getCurrentEpoch*(rlnPeer: WakuRLNRelay): Epoch =
|
proc getCurrentEpoch*(rlnPeer: WakuRLNRelay): Epoch =
|
||||||
## gets the current rln Epoch time
|
|
||||||
return rlnPeer.calcEpoch(epochTime())
|
return rlnPeer.calcEpoch(epochTime())
|
||||||
|
|
||||||
proc absDiff*(e1, e2: Epoch): uint64 =
|
proc absDiff*(e1, e2: Epoch): uint64 =
|
||||||
@ -194,6 +159,16 @@ proc absDiff*(e1, e2: Epoch): uint64 =
|
|||||||
else:
|
else:
|
||||||
return epoch2 - epoch1
|
return epoch2 - epoch1
|
||||||
|
|
||||||
|
proc toRLNSignal*(wakumessage: WakuMessage): seq[byte] =
|
||||||
|
## it is a utility proc that prepares the `data` parameter of the proof generation procedure i.e., `proofGen` that resides in the current module
|
||||||
|
## it extracts the `contentTopic`, `timestamp` and the `payload` of the supplied `wakumessage` and serializes them into a byte sequence
|
||||||
|
|
||||||
|
let
|
||||||
|
contentTopicBytes = toBytes(wakumessage.contentTopic)
|
||||||
|
timestampBytes = toBytes(wakumessage.timestamp.uint64)
|
||||||
|
output = concat(wakumessage.payload, contentTopicBytes, @(timestampBytes))
|
||||||
|
return output
|
||||||
|
|
||||||
proc validateMessage*(
|
proc validateMessage*(
|
||||||
rlnPeer: WakuRLNRelay, msg: WakuMessage
|
rlnPeer: WakuRLNRelay, msg: WakuMessage
|
||||||
): MessageValidationResult =
|
): MessageValidationResult =
|
||||||
@ -251,7 +226,8 @@ proc validateMessage*(
|
|||||||
|
|
||||||
waku_rln_proof_verification_total.inc()
|
waku_rln_proof_verification_total.inc()
|
||||||
waku_rln_proof_verification_duration_seconds.nanosecondTime:
|
waku_rln_proof_verification_duration_seconds.nanosecondTime:
|
||||||
let proofVerificationRes = rlnPeer.groupManager.verifyProof(input, proof)
|
let proofVerificationRes =
|
||||||
|
rlnPeer.groupManager.verifyProof(msg.toRLNSignal(), proof)
|
||||||
|
|
||||||
if proofVerificationRes.isErr():
|
if proofVerificationRes.isErr():
|
||||||
waku_rln_errors_total.inc(labelValues = ["proof_verification"])
|
waku_rln_errors_total.inc(labelValues = ["proof_verification"])
|
||||||
@ -309,16 +285,6 @@ proc validateMessageAndUpdateLog*(
|
|||||||
|
|
||||||
return isValidMessage
|
return isValidMessage
|
||||||
|
|
||||||
proc toRLNSignal*(wakumessage: WakuMessage): seq[byte] =
|
|
||||||
## it is a utility proc that prepares the `data` parameter of the proof generation procedure i.e., `proofGen` that resides in the current module
|
|
||||||
## it extracts the `contentTopic`, `timestamp` and the `payload` of the supplied `wakumessage` and serializes them into a byte sequence
|
|
||||||
|
|
||||||
let
|
|
||||||
contentTopicBytes = toBytes(wakumessage.contentTopic)
|
|
||||||
timestampBytes = toBytes(wakumessage.timestamp.uint64)
|
|
||||||
output = concat(wakumessage.payload, contentTopicBytes, @(timestampBytes))
|
|
||||||
return output
|
|
||||||
|
|
||||||
proc appendRLNProof*(
|
proc appendRLNProof*(
|
||||||
rlnPeer: WakuRLNRelay, msg: var WakuMessage, senderEpochTime: float64
|
rlnPeer: WakuRLNRelay, msg: var WakuMessage, senderEpochTime: float64
|
||||||
): RlnRelayResult[void] =
|
): RlnRelayResult[void] =
|
||||||
@ -445,38 +411,25 @@ proc mount(
|
|||||||
let rlnInstance = createRLNInstance(tree_path = conf.treePath).valueOr:
|
let rlnInstance = createRLNInstance(tree_path = conf.treePath).valueOr:
|
||||||
return err("could not create RLN instance: " & $error)
|
return err("could not create RLN instance: " & $error)
|
||||||
|
|
||||||
if not conf.dynamic:
|
let (rlnRelayCredPath, rlnRelayCredPassword) =
|
||||||
# static setup
|
if conf.creds.isSome:
|
||||||
let parsedGroupKeys = StaticGroupKeys.toIdentityCredentials().valueOr:
|
(some(conf.creds.get().path), some(conf.creds.get().password))
|
||||||
return err("could not parse static group keys: " & $error)
|
else:
|
||||||
|
(none(string), none(string))
|
||||||
|
|
||||||
groupManager = StaticGroupManager(
|
groupManager = OnchainGroupManager(
|
||||||
groupSize: StaticGroupSize,
|
userMessageLimit: some(conf.userMessageLimit),
|
||||||
groupKeys: parsedGroupKeys,
|
ethClientUrls: conf.ethClientUrls,
|
||||||
membershipIndex: conf.credIndex,
|
ethContractAddress: $conf.ethContractAddress,
|
||||||
rlnInstance: rlnInstance,
|
chainId: conf.chainId,
|
||||||
onFatalErrorAction: conf.onFatalErrorAction,
|
rlnInstance: rlnInstance,
|
||||||
)
|
registrationHandler: registrationHandler,
|
||||||
# we don't persist credentials in static mode since they exist in ./constants.nim
|
keystorePath: rlnRelayCredPath,
|
||||||
else:
|
keystorePassword: rlnRelayCredPassword,
|
||||||
let (rlnRelayCredPath, rlnRelayCredPassword) =
|
ethPrivateKey: conf.ethPrivateKey,
|
||||||
if conf.creds.isSome:
|
membershipIndex: conf.credIndex,
|
||||||
(some(conf.creds.get().path), some(conf.creds.get().password))
|
onFatalErrorAction: conf.onFatalErrorAction,
|
||||||
else:
|
)
|
||||||
(none(string), none(string))
|
|
||||||
|
|
||||||
groupManager = OnchainGroupManager(
|
|
||||||
userMessageLimit: some(conf.userMessageLimit),
|
|
||||||
ethClientUrls: conf.ethClientUrls,
|
|
||||||
ethContractAddress: $conf.ethContractAddress,
|
|
||||||
chainId: conf.chainId,
|
|
||||||
rlnInstance: rlnInstance,
|
|
||||||
registrationHandler: registrationHandler,
|
|
||||||
keystorePath: rlnRelayCredPath,
|
|
||||||
keystorePassword: rlnRelayCredPassword,
|
|
||||||
membershipIndex: conf.credIndex,
|
|
||||||
onFatalErrorAction: conf.onFatalErrorAction,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the groupManager
|
# Initialize the groupManager
|
||||||
(await groupManager.init()).isOkOr:
|
(await groupManager.init()).isOkOr:
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user