mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-04 06:53:12 +00:00
test(rln): Implement rln tests (#2639)
* Implement tests. * Clean coding.
This commit is contained in:
parent
1ce87c49a8
commit
1ab665ce2c
@ -1,26 +1,107 @@
|
|||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[sequtils, tempfiles],
|
std/[tempfiles, strutils, options],
|
||||||
stew/byteutils,
|
|
||||||
stew/shims/net as stewNet,
|
stew/shims/net as stewNet,
|
||||||
|
stew/results,
|
||||||
testutils/unittests,
|
testutils/unittests,
|
||||||
chronos,
|
chronos,
|
||||||
libp2p/switch,
|
libp2p/switch,
|
||||||
libp2p/protocols/pubsub/pubsub
|
libp2p/protocols/pubsub/pubsub,
|
||||||
|
eth/keys
|
||||||
|
|
||||||
from std/times import epochTime
|
from std/times import epochTime
|
||||||
|
|
||||||
import
|
import
|
||||||
waku/[node/waku_node, node/peer_manager, waku_core, waku_node, waku_rln_relay],
|
../../../waku/[
|
||||||
|
node/waku_node,
|
||||||
|
node/peer_manager,
|
||||||
|
waku_core,
|
||||||
|
waku_node,
|
||||||
|
common/error_handling,
|
||||||
|
waku_rln_relay,
|
||||||
|
waku_rln_relay/rln,
|
||||||
|
waku_rln_relay/protocol_types,
|
||||||
|
waku_keystore/keystore,
|
||||||
|
],
|
||||||
../waku_store/store_utils,
|
../waku_store/store_utils,
|
||||||
../waku_archive/archive_utils,
|
../waku_archive/archive_utils,
|
||||||
../waku_relay/utils,
|
../testlib/[wakucore, wakunode, testasync, futures, common, assertions],
|
||||||
../waku_rln_relay/test_rln_group_manager_onchain,
|
../resources/payloads,
|
||||||
../testlib/[wakucore, wakunode, testasync, futures],
|
../waku_rln_relay/[utils_static, utils_onchain]
|
||||||
../resources/payloads
|
|
||||||
|
|
||||||
suite "Waku RlnRelay - End to End":
|
from ../../waku/waku_noise/noise_utils import randomSeqByte
|
||||||
|
|
||||||
|
proc buildRandomIdentityCredentials(): IdentityCredential =
|
||||||
|
# We generate a random identity credential (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen)
|
||||||
|
let
|
||||||
|
idTrapdoor = randomSeqByte(rng[], 32)
|
||||||
|
idNullifier = randomSeqByte(rng[], 32)
|
||||||
|
idSecretHash = randomSeqByte(rng[], 32)
|
||||||
|
idCommitment = randomSeqByte(rng[], 32)
|
||||||
|
|
||||||
|
IdentityCredential(
|
||||||
|
idTrapdoor: idTrapdoor,
|
||||||
|
idNullifier: idNullifier,
|
||||||
|
idSecretHash: idSecretHash,
|
||||||
|
idCommitment: idCommitment,
|
||||||
|
)
|
||||||
|
|
||||||
|
proc addMembershipCredentialsToKeystore(
|
||||||
|
credentials: IdentityCredential,
|
||||||
|
keystorePath: string,
|
||||||
|
appInfo: AppInfo,
|
||||||
|
rlnRelayEthContractAddress: string,
|
||||||
|
password: string,
|
||||||
|
membershipIndex: uint,
|
||||||
|
): KeystoreResult[void] =
|
||||||
|
let
|
||||||
|
contract = MembershipContract(chainId: "0x539", address: rlnRelayEthContractAddress)
|
||||||
|
# contract = MembershipContract(chainId: "1337", address: rlnRelayEthContractAddress)
|
||||||
|
index = MembershipIndex(membershipIndex)
|
||||||
|
membershipCredential = KeystoreMembership(
|
||||||
|
membershipContract: contract, treeIndex: index, identityCredential: credentials
|
||||||
|
)
|
||||||
|
|
||||||
|
addMembershipCredentials(
|
||||||
|
path = keystorePath,
|
||||||
|
membership = membershipCredential,
|
||||||
|
password = password,
|
||||||
|
appInfo = appInfo,
|
||||||
|
)
|
||||||
|
|
||||||
|
proc fatalErrorVoidHandler(errMsg: string) {.gcsafe, raises: [].} =
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc getWakuRlnConfigOnChain*(
|
||||||
|
keystorePath: string,
|
||||||
|
appInfo: AppInfo,
|
||||||
|
rlnRelayEthContractAddress: string,
|
||||||
|
password: string,
|
||||||
|
credIndex: uint,
|
||||||
|
fatalErrorHandler: Option[OnFatalErrorHandler] = none(OnFatalErrorHandler),
|
||||||
|
ethClientAddress: Option[string] = none(string),
|
||||||
|
): WakuRlnConfig =
|
||||||
|
return WakuRlnConfig(
|
||||||
|
rlnRelayDynamic: true,
|
||||||
|
rlnRelayCredIndex: some(credIndex),
|
||||||
|
rlnRelayEthContractAddress: rlnRelayEthContractAddress,
|
||||||
|
rlnRelayEthClientAddress: ethClientAddress.get(EthClient),
|
||||||
|
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $credIndex),
|
||||||
|
rlnEpochSizeSec: 1,
|
||||||
|
onFatalErrorAction: fatalErrorHandler.get(fatalErrorVoidHandler),
|
||||||
|
# If these are used, initialisation fails with "failed to mount WakuRlnRelay: could not initialize the group manager: the commitment does not have a membership"
|
||||||
|
rlnRelayCredPath: keystorePath,
|
||||||
|
rlnRelayCredPassword: password,
|
||||||
|
)
|
||||||
|
|
||||||
|
proc setupRelayWithOnChainRln*(
|
||||||
|
node: WakuNode, pubsubTopics: seq[string], wakuRlnConfig: WakuRlnConfig
|
||||||
|
) {.async.} =
|
||||||
|
await node.mountRelay(pubsubTopics)
|
||||||
|
await node.mountRlnRelay(wakuRlnConfig)
|
||||||
|
|
||||||
|
suite "Waku RlnRelay - End to End - Static":
|
||||||
var
|
var
|
||||||
pubsubTopic {.threadvar.}: PubsubTopic
|
pubsubTopic {.threadvar.}: PubsubTopic
|
||||||
contentTopic {.threadvar.}: ContentTopic
|
contentTopic {.threadvar.}: ContentTopic
|
||||||
@ -61,7 +142,7 @@ suite "Waku RlnRelay - End to End":
|
|||||||
|
|
||||||
# When RlnRelay is mounted
|
# When RlnRelay is mounted
|
||||||
let catchRes = catch:
|
let catchRes = catch:
|
||||||
await server.setupRln(1)
|
await server.setupStaticRln(1)
|
||||||
|
|
||||||
# Then Relay and RLN are not mounted,and the process fails
|
# Then Relay and RLN are not mounted,and the process fails
|
||||||
check:
|
check:
|
||||||
@ -72,8 +153,8 @@ suite "Waku RlnRelay - End to End":
|
|||||||
|
|
||||||
asyncTest "Pubsub topics subscribed before mounting RlnRelay are added to it":
|
asyncTest "Pubsub topics subscribed before mounting RlnRelay are added to it":
|
||||||
# Given the node enables Relay and Rln while subscribing to a pubsub topic
|
# Given the node enables Relay and Rln while subscribing to a pubsub topic
|
||||||
await server.setupRelayWithRln(1.uint, @[pubsubTopic])
|
await server.setupRelayWithStaticRln(1.uint, @[pubsubTopic])
|
||||||
await client.setupRelayWithRln(2.uint, @[pubsubTopic])
|
await client.setupRelayWithStaticRln(2.uint, @[pubsubTopic])
|
||||||
check:
|
check:
|
||||||
server.wakuRelay != nil
|
server.wakuRelay != nil
|
||||||
server.wakuRlnRelay != nil
|
server.wakuRlnRelay != nil
|
||||||
@ -107,8 +188,8 @@ suite "Waku RlnRelay - End to End":
|
|||||||
|
|
||||||
asyncTest "Pubsub topics subscribed after mounting RlnRelay are added to it":
|
asyncTest "Pubsub topics subscribed after mounting RlnRelay are added to it":
|
||||||
# Given the node enables Relay and Rln without subscribing to a pubsub topic
|
# Given the node enables Relay and Rln without subscribing to a pubsub topic
|
||||||
await server.setupRelayWithRln(1.uint, @[])
|
await server.setupRelayWithStaticRln(1.uint, @[])
|
||||||
await client.setupRelayWithRln(2.uint, @[])
|
await client.setupRelayWithStaticRln(2.uint, @[])
|
||||||
|
|
||||||
# And the nodes are connected
|
# And the nodes are connected
|
||||||
await client.connectToNodes(@[serverRemotePeerInfo])
|
await client.connectToNodes(@[serverRemotePeerInfo])
|
||||||
@ -167,8 +248,8 @@ suite "Waku RlnRelay - End to End":
|
|||||||
suite "Analysis of Bandwith Limitations":
|
suite "Analysis of Bandwith Limitations":
|
||||||
asyncTest "Valid Payload Sizes":
|
asyncTest "Valid Payload Sizes":
|
||||||
# Given the node enables Relay and Rln while subscribing to a pubsub topic
|
# Given the node enables Relay and Rln while subscribing to a pubsub topic
|
||||||
await server.setupRelayWithRln(1.uint, @[pubsubTopic])
|
await server.setupRelayWithStaticRln(1.uint, @[pubsubTopic])
|
||||||
await client.setupRelayWithRln(2.uint, @[pubsubTopic])
|
await client.setupRelayWithStaticRln(2.uint, @[pubsubTopic])
|
||||||
|
|
||||||
# And the nodes are connected
|
# And the nodes are connected
|
||||||
await client.connectToNodes(@[serverRemotePeerInfo])
|
await client.connectToNodes(@[serverRemotePeerInfo])
|
||||||
@ -261,8 +342,8 @@ suite "Waku RlnRelay - End to End":
|
|||||||
|
|
||||||
asyncTest "Invalid Payload Sizes":
|
asyncTest "Invalid Payload Sizes":
|
||||||
# Given the node enables Relay and Rln while subscribing to a pubsub topic
|
# Given the node enables Relay and Rln while subscribing to a pubsub topic
|
||||||
await server.setupRelayWithRln(1.uint, @[pubsubTopic])
|
await server.setupRelayWithStaticRln(1.uint, @[pubsubTopic])
|
||||||
await client.setupRelayWithRln(2.uint, @[pubsubTopic])
|
await client.setupRelayWithStaticRln(2.uint, @[pubsubTopic])
|
||||||
|
|
||||||
# And the nodes are connected
|
# And the nodes are connected
|
||||||
await client.connectToNodes(@[serverRemotePeerInfo])
|
await client.connectToNodes(@[serverRemotePeerInfo])
|
||||||
@ -302,3 +383,375 @@ suite "Waku RlnRelay - End to End":
|
|||||||
|
|
||||||
# Then the message is not relayed
|
# Then the message is not relayed
|
||||||
check not await completionFut.withTimeout(FUTURE_TIMEOUT_LONG)
|
check not await completionFut.withTimeout(FUTURE_TIMEOUT_LONG)
|
||||||
|
|
||||||
|
suite "Waku RlnRelay - End to End - OnChain":
|
||||||
|
let runAnvil {.used.} = runAnvil()
|
||||||
|
|
||||||
|
var
|
||||||
|
pubsubTopic {.threadvar.}: PubsubTopic
|
||||||
|
contentTopic {.threadvar.}: ContentTopic
|
||||||
|
|
||||||
|
var
|
||||||
|
server {.threadvar.}: WakuNode
|
||||||
|
client {.threadvar.}: WakuNode
|
||||||
|
|
||||||
|
var
|
||||||
|
serverRemotePeerInfo {.threadvar.}: RemotePeerInfo
|
||||||
|
clientPeerId {.threadvar.}: PeerId
|
||||||
|
|
||||||
|
asyncSetup:
|
||||||
|
pubsubTopic = DefaultPubsubTopic
|
||||||
|
contentTopic = DefaultContentTopic
|
||||||
|
|
||||||
|
let
|
||||||
|
serverKey = generateSecp256k1Key()
|
||||||
|
clientKey = generateSecp256k1Key()
|
||||||
|
|
||||||
|
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||||
|
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||||
|
|
||||||
|
await allFutures(server.start(), client.start())
|
||||||
|
|
||||||
|
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
|
||||||
|
clientPeerId = client.switch.peerInfo.toRemotePeerInfo().peerId
|
||||||
|
|
||||||
|
asyncTeardown:
|
||||||
|
await allFutures(client.stop(), server.stop())
|
||||||
|
|
||||||
|
suite "Smart Contract Availability and Interaction":
|
||||||
|
asyncTest "Invalid format contract":
|
||||||
|
let
|
||||||
|
# One character missing
|
||||||
|
invalidContractAddress = "0x000000000000000000000000000000000000000"
|
||||||
|
keystorePath =
|
||||||
|
genTempPath("rln_keystore", "test_wakunode_relay_rln-no_valid_contract")
|
||||||
|
appInfo = RlnAppInfo
|
||||||
|
password = "1234"
|
||||||
|
wakuRlnConfig1 = getWakuRlnConfigOnChain(
|
||||||
|
keystorePath, appInfo, invalidContractAddress, password, 0
|
||||||
|
)
|
||||||
|
wakuRlnConfig2 = getWakuRlnConfigOnChain(
|
||||||
|
keystorePath, appInfo, invalidContractAddress, password, 1
|
||||||
|
)
|
||||||
|
idCredential = buildRandomIdentityCredentials()
|
||||||
|
persistRes = addMembershipCredentialsToKeystore(
|
||||||
|
idCredential, keystorePath, appInfo, invalidContractAddress, password, 1
|
||||||
|
)
|
||||||
|
assertResultOk(persistRes)
|
||||||
|
|
||||||
|
# Given the node enables Relay and Rln while subscribing to a pubsub topic
|
||||||
|
try:
|
||||||
|
await server.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig1)
|
||||||
|
assert false, "Relay should fail mounting when using an invalid contract"
|
||||||
|
except CatchableError:
|
||||||
|
assert true
|
||||||
|
|
||||||
|
try:
|
||||||
|
await client.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig2)
|
||||||
|
assert false, "Relay should fail mounting when using an invalid contract"
|
||||||
|
except CatchableError:
|
||||||
|
assert true
|
||||||
|
|
||||||
|
asyncTest "Unregistered contract":
|
||||||
|
# This is a very slow test due to the retries RLN does. Might take upwards of 1m-2m to finish.
|
||||||
|
let
|
||||||
|
invalidContractAddress = "0x0000000000000000000000000000000000000000"
|
||||||
|
keystorePath =
|
||||||
|
genTempPath("rln_keystore", "test_wakunode_relay_rln-no_valid_contract")
|
||||||
|
appInfo = RlnAppInfo
|
||||||
|
password = "1234"
|
||||||
|
|
||||||
|
# Connect to the eth client
|
||||||
|
discard await newWeb3(EthClient)
|
||||||
|
|
||||||
|
var serverErrorFuture = Future[string].new()
|
||||||
|
proc serverFatalErrorHandler(errMsg: string) {.gcsafe, closure, raises: [].} =
|
||||||
|
serverErrorFuture.complete(errMsg)
|
||||||
|
|
||||||
|
var clientErrorFuture = Future[string].new()
|
||||||
|
proc clientFatalErrorHandler(errMsg: string) {.gcsafe, closure, raises: [].} =
|
||||||
|
clientErrorFuture.complete(errMsg)
|
||||||
|
|
||||||
|
let
|
||||||
|
wakuRlnConfig1 = getWakuRlnConfigOnChain(
|
||||||
|
keystorePath,
|
||||||
|
appInfo,
|
||||||
|
invalidContractAddress,
|
||||||
|
password,
|
||||||
|
0,
|
||||||
|
some(serverFatalErrorHandler),
|
||||||
|
)
|
||||||
|
wakuRlnConfig2 = getWakuRlnConfigOnChain(
|
||||||
|
keystorePath,
|
||||||
|
appInfo,
|
||||||
|
invalidContractAddress,
|
||||||
|
password,
|
||||||
|
1,
|
||||||
|
some(clientFatalErrorHandler),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Given the node enable Relay and Rln while subscribing to a pubsub topic.
|
||||||
|
# The withTimeout call is a workaround for the test not to terminate with an exception.
|
||||||
|
# However, it doesn't reduce the retries against the blockchain that the mounting rln process attempts (until it accepts failure).
|
||||||
|
# Note: These retries might be an unintended library issue.
|
||||||
|
discard await server
|
||||||
|
.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig1)
|
||||||
|
.withTimeout(FUTURE_TIMEOUT)
|
||||||
|
discard await client
|
||||||
|
.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig2)
|
||||||
|
.withTimeout(FUTURE_TIMEOUT)
|
||||||
|
|
||||||
|
check:
|
||||||
|
(await serverErrorFuture.waitForResult()).get() ==
|
||||||
|
"Failed to get the storage index: No response from the Web3 provider"
|
||||||
|
(await clientErrorFuture.waitForResult()).get() ==
|
||||||
|
"Failed to get the storage index: No response from the Web3 provider"
|
||||||
|
|
||||||
|
asyncTest "Valid contract":
|
||||||
|
#[
|
||||||
|
# Notes
|
||||||
|
## Issues
|
||||||
|
### TreeIndex
|
||||||
|
For some reason the calls to `getWakuRlnConfigOnChain` need to be made with `treeIndex` = 0 and 1, in that order.
|
||||||
|
But the registration needs to be made with 1 and 2.
|
||||||
|
#### Solutions
|
||||||
|
Requires investigation
|
||||||
|
### Monkeypatching
|
||||||
|
Instead of running the idCredentials monkeypatch, passing the correct membershipIndex and keystorePath and keystorePassword should work.
|
||||||
|
#### Solutions
|
||||||
|
A) Using the register callback to fetch the correct membership
|
||||||
|
B) Using two different keystores, one for each rlnconfig. If there's only one key, it will fetch it regardless of membershipIndex.
|
||||||
|
##### A
|
||||||
|
- Register is not calling callback even though register is happening, this should happen.
|
||||||
|
- This command should be working, but it doesn't on the current HEAD of the branch, it does work on master, which suggest there's something wrong with the branch.
|
||||||
|
- nim c -r --out:build/onchain -d:chronicles_log_level=NOTICE --verbosity:0 --hints:off -d:git_version="v0.27.0-rc.0-3-gaa9c30" -d:release --passL:librln_v0.3.7.a --passL:-lm tests/waku_rln_relay/test_rln_group_manager_onchain.nim && onchain_group_test
|
||||||
|
- All modified files are tests/*, which is a bit weird. Might be interesting re-creating the branch slowly, and checking out why this is happening.
|
||||||
|
##### B
|
||||||
|
Untested
|
||||||
|
]#
|
||||||
|
|
||||||
|
let
|
||||||
|
onChainGroupManager = await setup()
|
||||||
|
contractAddress = onChainGroupManager.ethContractAddress
|
||||||
|
keystorePath =
|
||||||
|
genTempPath("rln_keystore", "test_wakunode_relay_rln-valid_contract")
|
||||||
|
appInfo = RlnAppInfo
|
||||||
|
password = "1234"
|
||||||
|
rlnInstance = onChainGroupManager.rlnInstance
|
||||||
|
assertResultOk(createAppKeystore(keystorePath, appInfo))
|
||||||
|
|
||||||
|
# Generate configs before registering the credentials. Otherwise the file gets cleared up.
|
||||||
|
let
|
||||||
|
wakuRlnConfig1 =
|
||||||
|
getWakuRlnConfigOnChain(keystorePath, appInfo, contractAddress, password, 0)
|
||||||
|
wakuRlnConfig2 =
|
||||||
|
getWakuRlnConfigOnChain(keystorePath, appInfo, contractAddress, password, 1)
|
||||||
|
|
||||||
|
# Generate credentials
|
||||||
|
let
|
||||||
|
idCredential1 = rlnInstance.membershipKeyGen().get()
|
||||||
|
idCredential2 = rlnInstance.membershipKeyGen().get()
|
||||||
|
|
||||||
|
discard await onChainGroupManager.init()
|
||||||
|
try:
|
||||||
|
# Register credentials in the chain
|
||||||
|
waitFor onChainGroupManager.register(idCredential1)
|
||||||
|
waitFor onChainGroupManager.register(idCredential2)
|
||||||
|
except Exception:
|
||||||
|
assert false, "Failed to register credentials: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
# Add credentials to keystore
|
||||||
|
let
|
||||||
|
persistRes1 = addMembershipCredentialsToKeystore(
|
||||||
|
idCredential1, keystorePath, appInfo, contractAddress, password, 0
|
||||||
|
)
|
||||||
|
persistRes2 = addMembershipCredentialsToKeystore(
|
||||||
|
idCredential2, keystorePath, appInfo, contractAddress, password, 1
|
||||||
|
)
|
||||||
|
|
||||||
|
assertResultOk(persistRes1)
|
||||||
|
assertResultOk(persistRes2)
|
||||||
|
|
||||||
|
await onChainGroupManager.stop()
|
||||||
|
|
||||||
|
# Given the node enables Relay and Rln while subscribing to a pubsub topic
|
||||||
|
await server.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig1)
|
||||||
|
await client.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig2)
|
||||||
|
|
||||||
|
try:
|
||||||
|
(await server.wakuRlnRelay.groupManager.startGroupSync()).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
|
(await client.wakuRlnRelay.groupManager.startGroupSync()).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
|
|
||||||
|
# Test Hack: Monkeypatch the idCredentials into the groupManager
|
||||||
|
server.wakuRlnRelay.groupManager.idCredentials = some(idCredential1)
|
||||||
|
client.wakuRlnRelay.groupManager.idCredentials = some(idCredential2)
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
# And the nodes are connected
|
||||||
|
let serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
|
||||||
|
await client.connectToNodes(@[serverRemotePeerInfo])
|
||||||
|
|
||||||
|
# And the node registers the completion handler
|
||||||
|
var completionFuture = subscribeCompletionHandler(server, pubsubTopic)
|
||||||
|
|
||||||
|
# When the client sends a valid RLN message
|
||||||
|
let isCompleted =
|
||||||
|
await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture)
|
||||||
|
|
||||||
|
# Then the valid RLN message is relayed
|
||||||
|
check isCompleted
|
||||||
|
assertResultOk(await completionFuture.waitForResult())
|
||||||
|
|
||||||
|
asyncTest "Not enough gas":
|
||||||
|
let
|
||||||
|
onChainGroupManager = await setup(ethAmount = 0.u256)
|
||||||
|
contractAddress = onChainGroupManager.ethContractAddress
|
||||||
|
keystorePath =
|
||||||
|
genTempPath("rln_keystore", "test_wakunode_relay_rln-valid_contract")
|
||||||
|
appInfo = RlnAppInfo
|
||||||
|
password = "1234"
|
||||||
|
rlnInstance = onChainGroupManager.rlnInstance
|
||||||
|
assertResultOk(createAppKeystore(keystorePath, appInfo))
|
||||||
|
|
||||||
|
# Generate credentials
|
||||||
|
let idCredential = rlnInstance.membershipKeyGen().get()
|
||||||
|
|
||||||
|
discard await onChainGroupManager.init()
|
||||||
|
var errorFuture = Future[string].new()
|
||||||
|
onChainGroupManager.onFatalErrorAction = proc(
|
||||||
|
errMsg: string
|
||||||
|
) {.gcsafe, closure.} =
|
||||||
|
errorFuture.complete(errMsg)
|
||||||
|
try:
|
||||||
|
# Register credentials in the chain
|
||||||
|
waitFor onChainGroupManager.register(idCredential)
|
||||||
|
assert false, "Should have failed to register credentials given there is 0 gas"
|
||||||
|
except Exception:
|
||||||
|
assert true
|
||||||
|
|
||||||
|
check (await errorFuture.waitForResult()).get() ==
|
||||||
|
"Failed to register the member: {\"code\":-32003,\"message\":\"Insufficient funds for gas * price + value\"}"
|
||||||
|
await onChainGroupManager.stop()
|
||||||
|
|
||||||
|
suite "RLN Relay Configuration and Parameters":
|
||||||
|
asyncTest "RLN Relay Credential Path":
|
||||||
|
let
|
||||||
|
onChainGroupManager = await setup()
|
||||||
|
contractAddress = onChainGroupManager.ethContractAddress
|
||||||
|
keystorePath =
|
||||||
|
genTempPath("rln_keystore", "test_wakunode_relay_rln-valid_contract")
|
||||||
|
appInfo = RlnAppInfo
|
||||||
|
password = "1234"
|
||||||
|
rlnInstance = onChainGroupManager.rlnInstance
|
||||||
|
assertResultOk(createAppKeystore(keystorePath, appInfo))
|
||||||
|
|
||||||
|
# Generate configs before registering the credentials. Otherwise the file gets cleared up.
|
||||||
|
let
|
||||||
|
wakuRlnConfig1 =
|
||||||
|
getWakuRlnConfigOnChain(keystorePath, appInfo, contractAddress, password, 0)
|
||||||
|
wakuRlnConfig2 =
|
||||||
|
getWakuRlnConfigOnChain(keystorePath, appInfo, contractAddress, password, 1)
|
||||||
|
|
||||||
|
# Given the node enables Relay and Rln while subscribing to a pubsub topic
|
||||||
|
await server.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig1)
|
||||||
|
await client.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig2)
|
||||||
|
|
||||||
|
try:
|
||||||
|
(await server.wakuRlnRelay.groupManager.startGroupSync()).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
|
(await client.wakuRlnRelay.groupManager.startGroupSync()).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
|
|
||||||
|
# Test Hack: Monkeypatch the idCredentials into the groupManager
|
||||||
|
echo server.wakuRlnRelay.groupManager.idCredentials
|
||||||
|
echo client.wakuRlnRelay.groupManager.idCredentials
|
||||||
|
except Exception, CatchableError:
|
||||||
|
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
# And the nodes are connected
|
||||||
|
let serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
|
||||||
|
await client.connectToNodes(@[serverRemotePeerInfo])
|
||||||
|
|
||||||
|
# And the node registers the completion handler
|
||||||
|
var completionFuture = subscribeCompletionHandler(server, pubsubTopic)
|
||||||
|
|
||||||
|
# When the client attempts to send a message
|
||||||
|
try:
|
||||||
|
let isCompleted =
|
||||||
|
await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture)
|
||||||
|
assert false, "Should have failed to send a message"
|
||||||
|
except AssertionDefect as e:
|
||||||
|
# Then the message is not relayed
|
||||||
|
assert e.msg.endsWith("identity credentials are not set")
|
||||||
|
|
||||||
|
suite "RLN Relay Resilience, Security and Compatibility":
|
||||||
|
asyncTest "Key Management and Integrity":
|
||||||
|
let
|
||||||
|
onChainGroupManager = await setup()
|
||||||
|
contractAddress = onChainGroupManager.ethContractAddress
|
||||||
|
keystorePath =
|
||||||
|
genTempPath("rln_keystore", "test_wakunode_relay_rln-valid_contract")
|
||||||
|
appInfo = RlnAppInfo
|
||||||
|
password = "1234"
|
||||||
|
rlnInstance = onChainGroupManager.rlnInstance
|
||||||
|
assertResultOk(createAppKeystore(keystorePath, appInfo))
|
||||||
|
|
||||||
|
# Generate configs before registering the credentials. Otherwise the file gets cleared up.
|
||||||
|
let
|
||||||
|
wakuRlnConfig1 =
|
||||||
|
getWakuRlnConfigOnChain(keystorePath, appInfo, contractAddress, password, 0)
|
||||||
|
wakuRlnConfig2 =
|
||||||
|
getWakuRlnConfigOnChain(keystorePath, appInfo, contractAddress, password, 1)
|
||||||
|
|
||||||
|
# Generate credentials
|
||||||
|
let
|
||||||
|
idCredential1 = rlnInstance.membershipKeyGen().get()
|
||||||
|
idCredential2 = rlnInstance.membershipKeyGen().get()
|
||||||
|
|
||||||
|
discard await onChainGroupManager.init()
|
||||||
|
try:
|
||||||
|
# Register credentials in the chain
|
||||||
|
waitFor onChainGroupManager.register(idCredential1)
|
||||||
|
waitFor onChainGroupManager.register(idCredential2)
|
||||||
|
except Exception:
|
||||||
|
assert false, "Failed to register credentials: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
# Add credentials to keystore
|
||||||
|
let
|
||||||
|
persistRes1 = addMembershipCredentialsToKeystore(
|
||||||
|
idCredential1, keystorePath, appInfo, contractAddress, password, 0
|
||||||
|
)
|
||||||
|
persistRes2 = addMembershipCredentialsToKeystore(
|
||||||
|
idCredential2, keystorePath, appInfo, contractAddress, password, 1
|
||||||
|
)
|
||||||
|
|
||||||
|
assertResultOk(persistRes1)
|
||||||
|
assertResultOk(persistRes2)
|
||||||
|
|
||||||
|
# await onChainGroupManager.stop()
|
||||||
|
|
||||||
|
let
|
||||||
|
registryContract = onChainGroupManager.registryContract.get()
|
||||||
|
storageIndex = (await registryContract.usingStorageIndex().call())
|
||||||
|
rlnContractAddress = await registryContract.storages(storageIndex).call()
|
||||||
|
contract = onChainGroupManager.ethRpc.get().contractSender(
|
||||||
|
RlnStorage, rlnContractAddress
|
||||||
|
)
|
||||||
|
contract2 = onChainGroupManager.rlnContract.get()
|
||||||
|
|
||||||
|
echo "###"
|
||||||
|
echo await (contract.memberExists(idCredential1.idCommitment.toUInt256()).call())
|
||||||
|
echo await (contract.memberExists(idCredential2.idCommitment.toUInt256()).call())
|
||||||
|
echo await (contract2.memberExists(idCredential1.idCommitment.toUInt256()).call())
|
||||||
|
echo await (contract2.memberExists(idCredential2.idCommitment.toUInt256()).call())
|
||||||
|
echo "###"
|
||||||
|
|
||||||
|
################################
|
||||||
|
## Terminating/removing Anvil
|
||||||
|
################################
|
||||||
|
|
||||||
|
# We stop Anvil daemon
|
||||||
|
stopAnvil(runAnvil)
|
||||||
|
|||||||
@ -20,6 +20,7 @@ import
|
|||||||
waku/[
|
waku/[
|
||||||
waku_core/topics/pubsub_topic,
|
waku_core/topics/pubsub_topic,
|
||||||
waku_core/topics/sharding,
|
waku_core/topics/sharding,
|
||||||
|
waku_store_legacy/common,
|
||||||
node/waku_node,
|
node/waku_node,
|
||||||
common/paging,
|
common/paging,
|
||||||
waku_core,
|
waku_core,
|
||||||
|
|||||||
@ -2,3 +2,13 @@ import chronos
|
|||||||
|
|
||||||
template assertResultOk*[T, E](result: Result[T, E]) =
|
template assertResultOk*[T, E](result: Result[T, E]) =
|
||||||
assert result.isOk(), $result.error()
|
assert result.isOk(), $result.error()
|
||||||
|
|
||||||
|
template assertResultOk*(result: Result[void, string]) =
|
||||||
|
assert result.isOk(), $result.error()
|
||||||
|
|
||||||
|
template typeEq*(t: typedesc, u: typedesc): bool =
|
||||||
|
# <a is b> is also true if a is subtype of b
|
||||||
|
t is u and u is t # Only true if actually equal types
|
||||||
|
|
||||||
|
template typeEq*(t: auto, u: typedesc): bool =
|
||||||
|
typeEq(type(t), u)
|
||||||
|
|||||||
@ -7,6 +7,7 @@ const
|
|||||||
FUTURE_TIMEOUT_MEDIUM* = 5.seconds
|
FUTURE_TIMEOUT_MEDIUM* = 5.seconds
|
||||||
FUTURE_TIMEOUT_LONG* = 10.seconds
|
FUTURE_TIMEOUT_LONG* = 10.seconds
|
||||||
FUTURE_TIMEOUT_SHORT* = 100.milliseconds
|
FUTURE_TIMEOUT_SHORT* = 100.milliseconds
|
||||||
|
FUTURE_TIMEOUT_SCORING* = 13.seconds # Scoring is 12s, so we need to wait more
|
||||||
|
|
||||||
proc newPushHandlerFuture*(): Future[(string, WakuMessage)] =
|
proc newPushHandlerFuture*(): Future[(string, WakuMessage)] =
|
||||||
newFuture[(string, WakuMessage)]()
|
newFuture[(string, WakuMessage)]()
|
||||||
|
|||||||
@ -146,7 +146,7 @@ suite "Waku Filter - DOS protection":
|
|||||||
some(FilterSubscribeErrorKind.TOO_MANY_REQUESTS)
|
some(FilterSubscribeErrorKind.TOO_MANY_REQUESTS)
|
||||||
|
|
||||||
# ensure period of time has passed and clients can again use the service
|
# ensure period of time has passed and clients can again use the service
|
||||||
await sleepAsync(600.milliseconds)
|
await sleepAsync(700.milliseconds)
|
||||||
check client1.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) ==
|
check client1.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) ==
|
||||||
none(FilterSubscribeErrorKind)
|
none(FilterSubscribeErrorKind)
|
||||||
check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) ==
|
check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) ==
|
||||||
|
|||||||
29
tests/waku_keystore/utils.nim
Normal file
29
tests/waku_keystore/utils.nim
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
{.used.}
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import stint
|
||||||
|
|
||||||
|
import
|
||||||
|
waku/[waku_keystore/protocol_types, waku_rln_relay, waku_rln_relay/protocol_types]
|
||||||
|
|
||||||
|
func fromStrToBytesLe*(v: string): seq[byte] =
|
||||||
|
try:
|
||||||
|
return @(hexToUint[256](v).toBytesLE())
|
||||||
|
except ValueError:
|
||||||
|
# this should never happen
|
||||||
|
return @[]
|
||||||
|
|
||||||
|
func defaultIdentityCredential*(): IdentityCredential =
|
||||||
|
# zero out the values we don't need
|
||||||
|
return IdentityCredential(
|
||||||
|
idTrapdoor: default(IdentityTrapdoor),
|
||||||
|
idNullifier: default(IdentityNullifier),
|
||||||
|
idSecretHash: fromStrToBytesLe(
|
||||||
|
"7984f7c054ad7793d9f31a1e9f29eaa8d05966511e546bced89961eb8874ab9"
|
||||||
|
),
|
||||||
|
idCommitment: fromStrToBytesLe(
|
||||||
|
"51c31de3bff7e52dc7b2eb34fc96813bacf38bde92d27fe326ce5d8296322a7"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
{.pop.}
|
||||||
@ -15,7 +15,10 @@ import
|
|||||||
waku/waku_rln_relay/rln,
|
waku/waku_rln_relay/rln,
|
||||||
waku/waku_rln_relay/rln/wrappers,
|
waku/waku_rln_relay/rln/wrappers,
|
||||||
./waku_rln_relay_utils,
|
./waku_rln_relay_utils,
|
||||||
../../testlib/[simple_mock]
|
../../testlib/[simple_mock, assertions],
|
||||||
|
../../waku_keystore/utils
|
||||||
|
|
||||||
|
from std/times import epochTime
|
||||||
|
|
||||||
const Empty32Array = default(array[32, byte])
|
const Empty32Array = default(array[32, byte])
|
||||||
|
|
||||||
@ -131,3 +134,42 @@ suite "RlnConfig":
|
|||||||
# Cleanup
|
# Cleanup
|
||||||
mock(new_circuit):
|
mock(new_circuit):
|
||||||
backup
|
backup
|
||||||
|
|
||||||
|
suite "proofGen":
|
||||||
|
test "Valid zk proof":
|
||||||
|
# this test vector is from zerokit
|
||||||
|
let rlnInstanceRes = createRLNInstanceWrapper()
|
||||||
|
assertResultOk(rlnInstanceRes)
|
||||||
|
let rlnInstance = rlnInstanceRes.value
|
||||||
|
|
||||||
|
let identityCredential = defaultIdentityCredential()
|
||||||
|
assert rlnInstance.insertMember(identityCredential.idCommitment)
|
||||||
|
|
||||||
|
let merkleRootRes = rlnInstance.getMerkleRoot()
|
||||||
|
assertResultOk(merkleRootRes)
|
||||||
|
let merkleRoot = merkleRootRes.value
|
||||||
|
|
||||||
|
let proofGenRes = rlnInstance.proofGen(
|
||||||
|
data = @[],
|
||||||
|
memKeys = identityCredential,
|
||||||
|
memIndex = MembershipIndex(0),
|
||||||
|
epoch = uint64(epochTime() / 1.float64).toEpoch(),
|
||||||
|
)
|
||||||
|
assertResultOk(proofGenRes)
|
||||||
|
|
||||||
|
let
|
||||||
|
rateLimitProof = proofGenRes.value
|
||||||
|
proofVerifyRes = rlnInstance.proofVerify(
|
||||||
|
data = @[], proof = rateLimitProof, validRoots = @[merkleRoot]
|
||||||
|
)
|
||||||
|
|
||||||
|
assertResultOk(proofVerifyRes)
|
||||||
|
assert proofVerifyRes.value, "proof verification failed"
|
||||||
|
|
||||||
|
# Assert the proof fields adhere to the specified types and lengths
|
||||||
|
check:
|
||||||
|
typeEq(rateLimitProof.proof, array[256, byte])
|
||||||
|
typeEq(rateLimitProof.merkleRoot, array[32, byte])
|
||||||
|
typeEq(rateLimitProof.shareX, array[32, byte])
|
||||||
|
typeEq(rateLimitProof.shareY, array[32, byte])
|
||||||
|
typeEq(rateLimitProof.nullifier, array[32, byte])
|
||||||
|
|||||||
@ -10,9 +10,9 @@ import
|
|||||||
chronicles,
|
chronicles,
|
||||||
stint,
|
stint,
|
||||||
web3,
|
web3,
|
||||||
json,
|
|
||||||
libp2p/crypto/crypto,
|
libp2p/crypto/crypto,
|
||||||
eth/keys
|
eth/keys
|
||||||
|
|
||||||
import
|
import
|
||||||
waku/[
|
waku/[
|
||||||
waku_node,
|
waku_node,
|
||||||
@ -26,202 +26,9 @@ import
|
|||||||
waku_rln_relay/group_manager/on_chain/group_manager,
|
waku_rln_relay/group_manager/on_chain/group_manager,
|
||||||
],
|
],
|
||||||
../testlib/[wakucore, wakunode, common],
|
../testlib/[wakucore, wakunode, common],
|
||||||
|
./utils_onchain,
|
||||||
./utils
|
./utils
|
||||||
|
|
||||||
const CHAIN_ID = 1337
|
|
||||||
|
|
||||||
proc generateCredentials(rlnInstance: ptr RLN): IdentityCredential =
|
|
||||||
let credRes = membershipKeyGen(rlnInstance)
|
|
||||||
return credRes.get()
|
|
||||||
|
|
||||||
proc getRateCommitment(
|
|
||||||
idCredential: IdentityCredential, userMessageLimit: UserMessageLimit
|
|
||||||
): RlnRelayResult[RawRateCommitment] =
|
|
||||||
return RateCommitment(
|
|
||||||
idCommitment: idCredential.idCommitment, userMessageLimit: userMessageLimit
|
|
||||||
).toLeaf()
|
|
||||||
|
|
||||||
proc generateCredentials(rlnInstance: ptr RLN, n: int): seq[IdentityCredential] =
|
|
||||||
var credentials: seq[IdentityCredential]
|
|
||||||
for i in 0 ..< n:
|
|
||||||
credentials.add(generateCredentials(rlnInstance))
|
|
||||||
return credentials
|
|
||||||
|
|
||||||
# a util function used for testing purposes
|
|
||||||
# it deploys membership contract on Anvil (or any Eth client available on EthClient address)
|
|
||||||
# must be edited if used for a different contract than membership contract
|
|
||||||
# <the difference between this and rln-v1 is that there is no need to deploy the poseidon hasher contract>
|
|
||||||
proc uploadRLNContract*(ethClientAddress: string): Future[Address] {.async.} =
|
|
||||||
let web3 = await newWeb3(ethClientAddress)
|
|
||||||
debug "web3 connected to", ethClientAddress
|
|
||||||
|
|
||||||
# fetch the list of registered accounts
|
|
||||||
let accounts = await web3.provider.eth_accounts()
|
|
||||||
web3.defaultAccount = accounts[1]
|
|
||||||
let add = web3.defaultAccount
|
|
||||||
debug "contract deployer account address ", add
|
|
||||||
|
|
||||||
let balance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest")
|
|
||||||
debug "Initial account balance: ", balance
|
|
||||||
|
|
||||||
# deploy poseidon hasher bytecode
|
|
||||||
let poseidonT3Receipt = await web3.deployContract(PoseidonT3)
|
|
||||||
let poseidonT3Address = poseidonT3Receipt.contractAddress.get()
|
|
||||||
let poseidonAddressStripped = strip0xPrefix($poseidonT3Address)
|
|
||||||
|
|
||||||
# deploy lazy imt bytecode
|
|
||||||
let lazyImtReceipt = await web3.deployContract(
|
|
||||||
LazyIMT.replace("__$PoseidonT3$__", poseidonAddressStripped)
|
|
||||||
)
|
|
||||||
let lazyImtAddress = lazyImtReceipt.contractAddress.get()
|
|
||||||
let lazyImtAddressStripped = strip0xPrefix($lazyImtAddress)
|
|
||||||
|
|
||||||
# deploy waku rlnv2 contract
|
|
||||||
let wakuRlnContractReceipt = await web3.deployContract(
|
|
||||||
WakuRlnV2Contract.replace("__$PoseidonT3$__", poseidonAddressStripped).replace(
|
|
||||||
"__$LazyIMT$__", lazyImtAddressStripped
|
|
||||||
)
|
|
||||||
)
|
|
||||||
let wakuRlnContractAddress = wakuRlnContractReceipt.contractAddress.get()
|
|
||||||
let wakuRlnAddressStripped = strip0xPrefix($wakuRlnContractAddress)
|
|
||||||
|
|
||||||
debug "Address of the deployed rlnv2 contract: ", wakuRlnContractAddress
|
|
||||||
|
|
||||||
# need to send concat: impl & init_bytes
|
|
||||||
let contractInput = encode(wakuRlnContractAddress).data & Erc1967ProxyContractInput
|
|
||||||
debug "contractInput", contractInput
|
|
||||||
let proxyReceipt =
|
|
||||||
await web3.deployContract(Erc1967Proxy, contractInput = contractInput)
|
|
||||||
|
|
||||||
debug "proxy receipt", proxyReceipt
|
|
||||||
let proxyAddress = proxyReceipt.contractAddress.get()
|
|
||||||
|
|
||||||
let newBalance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest")
|
|
||||||
debug "Account balance after the contract deployment: ", newBalance
|
|
||||||
|
|
||||||
await web3.close()
|
|
||||||
debug "disconnected from ", ethClientAddress
|
|
||||||
|
|
||||||
return proxyAddress
|
|
||||||
|
|
||||||
proc createEthAccount(): Future[(keys.PrivateKey, Address)] {.async.} =
|
|
||||||
let web3 = await newWeb3(EthClient)
|
|
||||||
let accounts = await web3.provider.eth_accounts()
|
|
||||||
let gasPrice = int(await web3.provider.eth_gasPrice())
|
|
||||||
web3.defaultAccount = accounts[0]
|
|
||||||
|
|
||||||
let pk = keys.PrivateKey.random(rng[])
|
|
||||||
let acc = Address(toCanonicalAddress(pk.toPublicKey()))
|
|
||||||
|
|
||||||
var tx: EthSend
|
|
||||||
tx.source = accounts[0]
|
|
||||||
tx.value = some(ethToWei(1000.u256))
|
|
||||||
tx.to = some(acc)
|
|
||||||
tx.gasPrice = some(gasPrice)
|
|
||||||
|
|
||||||
# Send 1000 eth to acc
|
|
||||||
discard await web3.send(tx)
|
|
||||||
let balance = await web3.provider.eth_getBalance(acc, "latest")
|
|
||||||
assert balance == ethToWei(1000.u256),
|
|
||||||
fmt"Balance is {balance} but expected {ethToWei(1000.u256)}"
|
|
||||||
|
|
||||||
return (pk, acc)
|
|
||||||
|
|
||||||
proc getAnvilPath(): string =
|
|
||||||
var anvilPath = ""
|
|
||||||
if existsEnv("XDG_CONFIG_HOME"):
|
|
||||||
anvilPath = joinPath(anvilPath, os.getEnv("XDG_CONFIG_HOME", ""))
|
|
||||||
else:
|
|
||||||
anvilPath = joinPath(anvilPath, os.getEnv("HOME", ""))
|
|
||||||
anvilPath = joinPath(anvilPath, ".foundry/bin/anvil")
|
|
||||||
return $anvilPath
|
|
||||||
|
|
||||||
# Runs Anvil daemon
|
|
||||||
proc runAnvil(): Process =
|
|
||||||
# Passed options are
|
|
||||||
# --port Port to listen on.
|
|
||||||
# --gas-limit Sets the block gas limit in WEI.
|
|
||||||
# --balance The default account balance, specified in ether.
|
|
||||||
# --chain-id Chain ID of the network.
|
|
||||||
# See anvil documentation https://book.getfoundry.sh/reference/anvil/ for more details
|
|
||||||
try:
|
|
||||||
let anvilPath = getAnvilPath()
|
|
||||||
debug "Anvil path", anvilPath
|
|
||||||
let runAnvil = startProcess(
|
|
||||||
anvilPath,
|
|
||||||
args = [
|
|
||||||
"--port",
|
|
||||||
"8540",
|
|
||||||
"--gas-limit",
|
|
||||||
"300000000000000",
|
|
||||||
"--balance",
|
|
||||||
"1000000000",
|
|
||||||
"--chain-id",
|
|
||||||
$CHAIN_ID,
|
|
||||||
],
|
|
||||||
options = {poUsePath},
|
|
||||||
)
|
|
||||||
let anvilPID = runAnvil.processID
|
|
||||||
|
|
||||||
# We read stdout from Anvil to see when daemon is ready
|
|
||||||
var anvilStartLog: string
|
|
||||||
var cmdline: string
|
|
||||||
while true:
|
|
||||||
try:
|
|
||||||
if runAnvil.outputstream.readLine(cmdline):
|
|
||||||
anvilStartLog.add(cmdline)
|
|
||||||
if cmdline.contains("Listening on 127.0.0.1:8540"):
|
|
||||||
break
|
|
||||||
except Exception, CatchableError:
|
|
||||||
break
|
|
||||||
debug "Anvil daemon is running and ready", pid = anvilPID, startLog = anvilStartLog
|
|
||||||
return runAnvil
|
|
||||||
except: # TODO: Fix "BareExcept" warning
|
|
||||||
error "Anvil daemon run failed", err = getCurrentExceptionMsg()
|
|
||||||
|
|
||||||
# Stops Anvil daemon
|
|
||||||
proc stopAnvil(runAnvil: Process) {.used.} =
|
|
||||||
let anvilPID = runAnvil.processID
|
|
||||||
# We wait the daemon to exit
|
|
||||||
try:
|
|
||||||
# We terminate Anvil daemon by sending a SIGTERM signal to the runAnvil PID to trigger RPC server termination and clean-up
|
|
||||||
kill(runAnvil)
|
|
||||||
debug "Sent SIGTERM to Anvil", anvilPID = anvilPID
|
|
||||||
except:
|
|
||||||
error "Anvil daemon termination failed: ", err = getCurrentExceptionMsg()
|
|
||||||
|
|
||||||
proc setup(): Future[OnchainGroupManager] {.async.} =
|
|
||||||
let rlnInstanceRes =
|
|
||||||
createRlnInstance(tree_path = genTempPath("rln_tree", "group_manager_onchain"))
|
|
||||||
check:
|
|
||||||
rlnInstanceRes.isOk()
|
|
||||||
|
|
||||||
let rlnInstance = rlnInstanceRes.get()
|
|
||||||
|
|
||||||
let contractAddress = await uploadRLNContract(EthClient)
|
|
||||||
# connect to the eth client
|
|
||||||
let web3 = await newWeb3(EthClient)
|
|
||||||
|
|
||||||
let accounts = await web3.provider.eth_accounts()
|
|
||||||
web3.defaultAccount = accounts[0]
|
|
||||||
|
|
||||||
var pk = none(string)
|
|
||||||
let (privateKey, _) = await createEthAccount()
|
|
||||||
pk = some($privateKey)
|
|
||||||
|
|
||||||
let manager = OnchainGroupManager(
|
|
||||||
ethClientUrl: EthClient,
|
|
||||||
ethContractAddress: $contractAddress,
|
|
||||||
chainId: CHAIN_ID,
|
|
||||||
ethPrivateKey: pk,
|
|
||||||
rlnInstance: rlnInstance,
|
|
||||||
onFatalErrorAction: proc(errStr: string) =
|
|
||||||
raiseAssert errStr
|
|
||||||
,
|
|
||||||
)
|
|
||||||
|
|
||||||
return manager
|
|
||||||
|
|
||||||
suite "Onchain group manager":
|
suite "Onchain group manager":
|
||||||
# We run Anvil
|
# We run Anvil
|
||||||
let runAnvil {.used.} = runAnvil()
|
let runAnvil {.used.} = runAnvil()
|
||||||
@ -282,9 +89,32 @@ suite "Onchain group manager":
|
|||||||
raiseAssert errStr
|
raiseAssert errStr
|
||||||
,
|
,
|
||||||
)
|
)
|
||||||
(await manager2.init()).isErrOr:
|
let e = await manager2.init()
|
||||||
|
(e).isErrOr:
|
||||||
raiseAssert "Expected error when contract address doesn't match"
|
raiseAssert "Expected error when contract address doesn't match"
|
||||||
|
|
||||||
|
echo "---"
|
||||||
|
discard "persisted data: contract address mismatch"
|
||||||
|
echo e.error
|
||||||
|
echo "---"
|
||||||
|
|
||||||
|
asyncTest "should error if contract does not exist":
|
||||||
|
var triggeredError = false
|
||||||
|
|
||||||
|
let manager = await setup()
|
||||||
|
manager.ethContractAddress = "0x0000000000000000000000000000000000000000"
|
||||||
|
manager.onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} =
|
||||||
|
echo "---"
|
||||||
|
discard
|
||||||
|
"Failed to get the deployed block number. Have you set the correct contract address?: No response from the Web3 provider"
|
||||||
|
echo msg
|
||||||
|
echo "---"
|
||||||
|
triggeredError = true
|
||||||
|
|
||||||
|
discard await manager.init()
|
||||||
|
|
||||||
|
check triggeredError
|
||||||
|
|
||||||
asyncTest "should error when keystore path and password are provided but file doesn't exist":
|
asyncTest "should error when keystore path and password are provided but file doesn't exist":
|
||||||
let manager = await setup()
|
let manager = await setup()
|
||||||
manager.keystorePath = some("/inexistent/file")
|
manager.keystorePath = some("/inexistent/file")
|
||||||
|
|||||||
@ -2,34 +2,21 @@
|
|||||||
|
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import stew/results, stint
|
||||||
|
|
||||||
import
|
import
|
||||||
./rln/waku_rln_relay_utils,
|
./rln/waku_rln_relay_utils,
|
||||||
waku/[waku_keystore/protocol_types, waku_rln_relay, waku_rln_relay/rln]
|
waku/[
|
||||||
|
waku_keystore/protocol_types,
|
||||||
|
waku_rln_relay,
|
||||||
|
waku_rln_relay/rln,
|
||||||
|
waku_rln_relay/protocol_types,
|
||||||
|
],
|
||||||
|
../waku_keystore/utils,
|
||||||
|
testutils/unittests
|
||||||
|
|
||||||
import testutils/unittests
|
|
||||||
import stew/results, stint
|
|
||||||
from std/times import epochTime
|
from std/times import epochTime
|
||||||
|
|
||||||
func fromStrToBytesLe(v: string): seq[byte] =
|
|
||||||
try:
|
|
||||||
return @(hexToUint[256](v).toBytesLE())
|
|
||||||
except ValueError:
|
|
||||||
# this should never happen
|
|
||||||
return @[]
|
|
||||||
|
|
||||||
func defaultIdentityCredential*(): IdentityCredential =
|
|
||||||
# zero out the values we don't need
|
|
||||||
return IdentityCredential(
|
|
||||||
idTrapdoor: default(IdentityTrapdoor),
|
|
||||||
idNullifier: default(IdentityNullifier),
|
|
||||||
idSecretHash: fromStrToBytesLe(
|
|
||||||
"7984f7c054ad7793d9f31a1e9f29eaa8d05966511e546bced89961eb8874ab9"
|
|
||||||
),
|
|
||||||
idCommitment: fromStrToBytesLe(
|
|
||||||
"51c31de3bff7e52dc7b2eb34fc96813bacf38bde92d27fe326ce5d8296322a7"
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
func defaultRateCommitment*(): RateCommitment =
|
func defaultRateCommitment*(): RateCommitment =
|
||||||
let idCredential = defaultIdentityCredential()
|
let idCredential = defaultIdentityCredential()
|
||||||
return RateCommitment(idCommitment: idCredential.idCommitment, userMessageLimit: 100)
|
return RateCommitment(idCommitment: idCredential.idCommitment, userMessageLimit: 100)
|
||||||
|
|||||||
@ -11,12 +11,27 @@ import
|
|||||||
libp2p/protocols/pubsub/pubsub
|
libp2p/protocols/pubsub/pubsub
|
||||||
import
|
import
|
||||||
waku/[waku_core, waku_node, waku_rln_relay],
|
waku/[waku_core, waku_node, waku_rln_relay],
|
||||||
../testlib/wakucore,
|
../testlib/[wakucore, futures, wakunode],
|
||||||
../testlib/wakunode,
|
|
||||||
./rln/waku_rln_relay_utils
|
./rln/waku_rln_relay_utils
|
||||||
|
|
||||||
from std/times import epochTime
|
from std/times import epochTime
|
||||||
|
|
||||||
|
proc buildWakuRlnConfig(
|
||||||
|
credIndex: uint,
|
||||||
|
epochSizeSec: uint64,
|
||||||
|
treeFilename: string,
|
||||||
|
userMessageLimit: uint64 = 1,
|
||||||
|
): WakuRlnConfig =
|
||||||
|
let treePath = genTempPath("rln_tree", treeFilename)
|
||||||
|
# Off-chain
|
||||||
|
return WakuRlnConfig(
|
||||||
|
rlnRelayDynamic: false,
|
||||||
|
rlnRelayCredIndex: some(credIndex.uint),
|
||||||
|
rlnRelayUserMessageLimit: userMessageLimit,
|
||||||
|
rlnEpochSizeSec: epochSizeSec,
|
||||||
|
rlnRelayTreePath: treePath,
|
||||||
|
)
|
||||||
|
|
||||||
procSuite "WakuNode - RLN relay":
|
procSuite "WakuNode - RLN relay":
|
||||||
# NOTE: we set the rlnRelayUserMessageLimit to 1 to make the tests easier to reason about
|
# NOTE: we set the rlnRelayUserMessageLimit to 1 to make the tests easier to reason about
|
||||||
asyncTest "testing rln-relay with valid proof":
|
asyncTest "testing rln-relay with valid proof":
|
||||||
@ -467,78 +482,47 @@ procSuite "WakuNode - RLN relay":
|
|||||||
await node3.stop()
|
await node3.stop()
|
||||||
|
|
||||||
asyncTest "clearNullifierLog: should clear epochs > MaxEpochGap":
|
asyncTest "clearNullifierLog: should clear epochs > MaxEpochGap":
|
||||||
|
# Given two nodes
|
||||||
let
|
let
|
||||||
# publisher node
|
contentTopic = ContentTopic("/waku/2/default-content/proto")
|
||||||
|
pubsubTopicSeq = @[DefaultPubsubTopic]
|
||||||
nodeKey1 = generateSecp256k1Key()
|
nodeKey1 = generateSecp256k1Key()
|
||||||
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0))
|
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0))
|
||||||
# Relay node
|
|
||||||
nodeKey2 = generateSecp256k1Key()
|
nodeKey2 = generateSecp256k1Key()
|
||||||
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0))
|
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0))
|
||||||
# Subscriber
|
epochSizeSec: uint64 = 5 # This means rlnMaxEpochGap = 4
|
||||||
nodeKey3 = generateSecp256k1Key()
|
|
||||||
node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0))
|
|
||||||
|
|
||||||
contentTopic = ContentTopic("/waku/2/default-content/proto")
|
|
||||||
|
|
||||||
# set up 2 nodes
|
|
||||||
# node1
|
|
||||||
await node1.mountRelay(@[DefaultPubsubTopic])
|
|
||||||
|
|
||||||
# mount rlnrelay in off-chain mode
|
|
||||||
let wakuRlnConfig1 = WakuRlnConfig(
|
|
||||||
rlnRelayDynamic: false,
|
|
||||||
rlnRelayCredIndex: some(1.uint),
|
|
||||||
rlnRelayUserMessageLimit: 1,
|
|
||||||
rlnEpochSizeSec: 1,
|
|
||||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_10"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
# Given both nodes mount relay and rlnrelay
|
||||||
|
await node1.mountRelay(pubsubTopicSeq)
|
||||||
|
let wakuRlnConfig1 = buildWakuRlnConfig(1, epochSizeSec, "wakunode_10")
|
||||||
await node1.mountRlnRelay(wakuRlnConfig1)
|
await node1.mountRlnRelay(wakuRlnConfig1)
|
||||||
|
|
||||||
await node1.start()
|
# Mount rlnrelay in node2 in off-chain mode
|
||||||
|
|
||||||
# node 2
|
|
||||||
await node2.mountRelay(@[DefaultPubsubTopic])
|
await node2.mountRelay(@[DefaultPubsubTopic])
|
||||||
|
let wakuRlnConfig2 = buildWakuRlnConfig(2, epochSizeSec, "wakunode_11")
|
||||||
# mount rlnrelay in off-chain mode
|
|
||||||
let wakuRlnConfig2 = WakuRlnConfig(
|
|
||||||
rlnRelayDynamic: false,
|
|
||||||
rlnRelayCredIndex: some(2.uint),
|
|
||||||
rlnRelayUserMessageLimit: 1,
|
|
||||||
rlnEpochSizeSec: 1,
|
|
||||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_11"),
|
|
||||||
)
|
|
||||||
|
|
||||||
await node2.mountRlnRelay(wakuRlnConfig2)
|
await node2.mountRlnRelay(wakuRlnConfig2)
|
||||||
|
|
||||||
await node2.start()
|
# Given the two nodes are started and connected
|
||||||
|
waitFor allFutures(node1.start(), node2.start())
|
||||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
# get the current epoch time
|
# Given some messages
|
||||||
let time = epochTime()
|
|
||||||
# create some messages with rate limit proofs
|
|
||||||
var
|
var
|
||||||
wm1 = WakuMessage(payload: "message 1".toBytes(), contentTopic: contentTopic)
|
wm1 = WakuMessage(payload: "message 1".toBytes(), contentTopic: contentTopic)
|
||||||
# another message in the same epoch as wm1, it will break the messaging rate limit
|
|
||||||
wm2 = WakuMessage(payload: "message 2".toBytes(), contentTopic: contentTopic)
|
wm2 = WakuMessage(payload: "message 2".toBytes(), contentTopic: contentTopic)
|
||||||
# wm3 points to the next epoch
|
|
||||||
wm3 = WakuMessage(payload: "message 3".toBytes(), contentTopic: contentTopic)
|
wm3 = WakuMessage(payload: "message 3".toBytes(), contentTopic: contentTopic)
|
||||||
|
wm4 = WakuMessage(payload: "message 4".toBytes(), contentTopic: contentTopic)
|
||||||
|
wm5 = WakuMessage(payload: "message 5".toBytes(), contentTopic: contentTopic)
|
||||||
|
wm6 = WakuMessage(payload: "message 6".toBytes(), contentTopic: contentTopic)
|
||||||
|
|
||||||
node1.wakuRlnRelay.unsafeAppendRLNProof(wm1, time).isOkOr:
|
# And node2 mounts a relay handler that completes the respective future when a message is received
|
||||||
raiseAssert $error
|
var
|
||||||
node1.wakuRlnRelay.unsafeAppendRLNProof(wm2, time).isOkOr:
|
completionFut1 = newFuture[bool]()
|
||||||
raiseAssert $error
|
completionFut2 = newFuture[bool]()
|
||||||
|
completionFut3 = newFuture[bool]()
|
||||||
node1.wakuRlnRelay.unsafeAppendRLNProof(
|
completionFut4 = newFuture[bool]()
|
||||||
wm3, time + float64(node1.wakuRlnRelay.rlnEpochSizeSec * 2)
|
completionFut5 = newFuture[bool]()
|
||||||
).isOkOr:
|
completionFut6 = newFuture[bool]()
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
# relay handler for node2
|
|
||||||
var completionFut1 = newFuture[bool]()
|
|
||||||
var completionFut2 = newFuture[bool]()
|
|
||||||
var completionFut3 = newFuture[bool]()
|
|
||||||
proc relayHandler(
|
proc relayHandler(
|
||||||
topic: PubsubTopic, msg: WakuMessage
|
topic: PubsubTopic, msg: WakuMessage
|
||||||
): Future[void] {.async, gcsafe.} =
|
): Future[void] {.async, gcsafe.} =
|
||||||
@ -550,25 +534,133 @@ procSuite "WakuNode - RLN relay":
|
|||||||
completionFut2.complete(true)
|
completionFut2.complete(true)
|
||||||
if msg == wm3:
|
if msg == wm3:
|
||||||
completionFut3.complete(true)
|
completionFut3.complete(true)
|
||||||
|
if msg == wm4:
|
||||||
|
completionFut4.complete(true)
|
||||||
|
if msg == wm5:
|
||||||
|
completionFut5.complete(true)
|
||||||
|
if msg == wm6:
|
||||||
|
completionFut6.complete(true)
|
||||||
|
|
||||||
# mount the relay handler for node2
|
|
||||||
node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler))
|
node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler))
|
||||||
await sleepAsync(2000.millis)
|
|
||||||
|
|
||||||
|
# Given all messages have an rln proof and are published by the node 1
|
||||||
|
let publishSleepDuration: Duration = 5000.millis
|
||||||
|
let time = epochTime()
|
||||||
|
|
||||||
|
# Epoch 1
|
||||||
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm1, time).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
|
# Message wm2 is published in the same epoch as wm1, so it'll be considered spam
|
||||||
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm2, time).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
discard await node1.publish(some(DefaultPubsubTopic), wm1)
|
discard await node1.publish(some(DefaultPubsubTopic), wm1)
|
||||||
discard await node1.publish(some(DefaultPubsubTopic), wm2)
|
discard await node1.publish(some(DefaultPubsubTopic), wm2)
|
||||||
discard await node1.publish(some(DefaultPubsubTopic), wm3)
|
await sleepAsync(publishSleepDuration)
|
||||||
|
|
||||||
let
|
|
||||||
res1 = await completionFut1.withTimeout(10.seconds)
|
|
||||||
res2 = await completionFut2.withTimeout(10.seconds)
|
|
||||||
res3 = await completionFut3.withTimeout(10.seconds)
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
res1 == true
|
node1.wakuRlnRelay.nullifierLog.len() == 0
|
||||||
res2 == false
|
node2.wakuRlnRelay.nullifierLog.len() == 1
|
||||||
res3 == true
|
|
||||||
|
# Epoch 2
|
||||||
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm3, epochTime()).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
|
discard await node1.publish(some(DefaultPubsubTopic), wm3)
|
||||||
|
await sleepAsync(publishSleepDuration)
|
||||||
|
check:
|
||||||
|
node1.wakuRlnRelay.nullifierLog.len() == 0
|
||||||
node2.wakuRlnRelay.nullifierLog.len() == 2
|
node2.wakuRlnRelay.nullifierLog.len() == 2
|
||||||
|
|
||||||
await node1.stop()
|
# Epoch 3
|
||||||
await node2.stop()
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm4, epochTime()).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
|
discard await node1.publish(some(DefaultPubsubTopic), wm4)
|
||||||
|
await sleepAsync(publishSleepDuration)
|
||||||
|
check:
|
||||||
|
node1.wakuRlnRelay.nullifierLog.len() == 0
|
||||||
|
node2.wakuRlnRelay.nullifierLog.len() == 3
|
||||||
|
|
||||||
|
# Epoch 4
|
||||||
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm5, epochTime()).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
|
discard await node1.publish(some(DefaultPubsubTopic), wm5)
|
||||||
|
await sleepAsync(publishSleepDuration)
|
||||||
|
check:
|
||||||
|
node1.wakuRlnRelay.nullifierLog.len() == 0
|
||||||
|
node2.wakuRlnRelay.nullifierLog.len() == 4
|
||||||
|
|
||||||
|
# Epoch 5
|
||||||
|
node1.wakuRlnRelay.unsafeAppendRLNProof(wm6, epochTime()).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
|
discard await node1.publish(some(DefaultPubsubTopic), wm6)
|
||||||
|
await sleepAsync(publishSleepDuration)
|
||||||
|
check:
|
||||||
|
node1.wakuRlnRelay.nullifierLog.len() == 0
|
||||||
|
node2.wakuRlnRelay.nullifierLog.len() == 4
|
||||||
|
|
||||||
|
# Then the node 2 should have cleared the nullifier log for epochs > MaxEpochGap
|
||||||
|
# Therefore, with 4 max epochs, the first 4 messages will be published (except wm2, which shares epoch with wm1)
|
||||||
|
check:
|
||||||
|
(await completionFut1.waitForResult()).value() == true
|
||||||
|
(await completionFut2.waitForResult()).isErr()
|
||||||
|
(await completionFut3.waitForResult()).value() == true
|
||||||
|
(await completionFut4.waitForResult()).value() == true
|
||||||
|
(await completionFut5.waitForResult()).value() == true
|
||||||
|
(await completionFut6.waitForResult()).value() == true
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
waitFor allFutures(node1.stop(), node2.stop())
|
||||||
|
|
||||||
|
asyncTest "Spam Detection and Slashing (currently gossipsub score decrease)":
|
||||||
|
# Given two nodes
|
||||||
|
let
|
||||||
|
contentTopic = ContentTopic("/waku/2/default-content/proto")
|
||||||
|
pubsubTopicSeq = @[DefaultPubsubTopic]
|
||||||
|
nodeKey1 = generateSecp256k1Key()
|
||||||
|
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0))
|
||||||
|
nodeKey2 = generateSecp256k1Key()
|
||||||
|
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0))
|
||||||
|
epochSizeSec: uint64 = 5 # This means rlnMaxEpochGap = 4
|
||||||
|
|
||||||
|
# Given both nodes mount relay and rlnrelay
|
||||||
|
# Mount rlnrelay in node1 in off-chain mode
|
||||||
|
await node1.mountRelay(pubsubTopicSeq)
|
||||||
|
let wakuRlnConfig1 = buildWakuRlnConfig(1, epochSizeSec, "wakunode_10")
|
||||||
|
await node1.mountRlnRelay(wakuRlnConfig1)
|
||||||
|
|
||||||
|
# Mount rlnrelay in node2 in off-chain mode
|
||||||
|
await node2.mountRelay(@[DefaultPubsubTopic])
|
||||||
|
let wakuRlnConfig2 = buildWakuRlnConfig(2, epochSizeSec, "wakunode_11")
|
||||||
|
await node2.mountRlnRelay(wakuRlnConfig2)
|
||||||
|
|
||||||
|
# Given the two nodes are started and connected
|
||||||
|
waitFor allFutures(node1.start(), node2.start())
|
||||||
|
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||||
|
|
||||||
|
# Given some messages with rln proofs
|
||||||
|
let time = epochTime()
|
||||||
|
var
|
||||||
|
msg1 = WakuMessage(payload: "message 1".toBytes(), contentTopic: contentTopic)
|
||||||
|
msg2 = WakuMessage(payload: "message 2".toBytes(), contentTopic: contentTopic)
|
||||||
|
|
||||||
|
node1.wakuRlnRelay.unsafeAppendRLNProof(msg1, time).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
|
# Message wm2 is published in the same epoch as wm1, so it'll be considered spam
|
||||||
|
node1.wakuRlnRelay.unsafeAppendRLNProof(msg2, time).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
|
|
||||||
|
# When publishing the first message (valid)
|
||||||
|
discard await node1.publish(some(DefaultPubsubTopic), msg1)
|
||||||
|
await sleepAsync(FUTURE_TIMEOUT_SCORING) # Wait for scoring
|
||||||
|
|
||||||
|
# Then the score of node2 should increase
|
||||||
|
check:
|
||||||
|
node1.wakuRelay.peerStats[node2.switch.peerInfo.peerId].score == 0.1
|
||||||
|
node2.wakuRelay.peerStats[node1.switch.peerInfo.peerId].score == 1.1
|
||||||
|
|
||||||
|
# When publishing the second message (spam)
|
||||||
|
discard await node1.publish(some(DefaultPubsubTopic), msg2)
|
||||||
|
await sleepAsync(FUTURE_TIMEOUT_SCORING)
|
||||||
|
|
||||||
|
# Then the score of node2 should decrease
|
||||||
|
check:
|
||||||
|
node1.wakuRelay.peerStats[node2.switch.peerInfo.peerId].score == 0.1
|
||||||
|
node2.wakuRelay.peerStats[node1.switch.peerInfo.peerId].score == -99.4
|
||||||
|
|||||||
226
tests/waku_rln_relay/utils_onchain.nim
Normal file
226
tests/waku_rln_relay/utils_onchain.nim
Normal file
@ -0,0 +1,226 @@
|
|||||||
|
{.used.}
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import
|
||||||
|
std/[options, os, osproc, sequtils, deques, streams, strutils, tempfiles, strformat],
|
||||||
|
stew/[results, byteutils],
|
||||||
|
testutils/unittests,
|
||||||
|
chronos,
|
||||||
|
chronicles,
|
||||||
|
stint,
|
||||||
|
web3,
|
||||||
|
json,
|
||||||
|
libp2p/crypto/crypto,
|
||||||
|
eth/keys
|
||||||
|
|
||||||
|
import
|
||||||
|
waku/[
|
||||||
|
waku_rln_relay,
|
||||||
|
waku_rln_relay/protocol_types,
|
||||||
|
waku_rln_relay/constants,
|
||||||
|
waku_rln_relay/contract,
|
||||||
|
waku_rln_relay/rln,
|
||||||
|
],
|
||||||
|
../testlib/common,
|
||||||
|
./utils
|
||||||
|
|
||||||
|
const CHAIN_ID* = 1337
|
||||||
|
|
||||||
|
proc generateCredentials*(rlnInstance: ptr RLN): IdentityCredential =
|
||||||
|
let credRes = membershipKeyGen(rlnInstance)
|
||||||
|
return credRes.get()
|
||||||
|
|
||||||
|
proc getRateCommitment*(
|
||||||
|
idCredential: IdentityCredential, userMessageLimit: UserMessageLimit
|
||||||
|
): RlnRelayResult[RawRateCommitment] =
|
||||||
|
return RateCommitment(
|
||||||
|
idCommitment: idCredential.idCommitment, userMessageLimit: userMessageLimit
|
||||||
|
).toLeaf()
|
||||||
|
|
||||||
|
proc generateCredentials*(rlnInstance: ptr RLN, n: int): seq[IdentityCredential] =
|
||||||
|
var credentials: seq[IdentityCredential]
|
||||||
|
for i in 0 ..< n:
|
||||||
|
credentials.add(generateCredentials(rlnInstance))
|
||||||
|
return credentials
|
||||||
|
|
||||||
|
# a util function used for testing purposes
|
||||||
|
# it deploys membership contract on Anvil (or any Eth client available on EthClient address)
|
||||||
|
# must be edited if used for a different contract than membership contract
|
||||||
|
# <the difference between this and rln-v1 is that there is no need to deploy the poseidon hasher contract>
|
||||||
|
proc uploadRLNContract*(ethClientAddress: string): Future[Address] {.async.} =
|
||||||
|
let web3 = await newWeb3(ethClientAddress)
|
||||||
|
debug "web3 connected to", ethClientAddress
|
||||||
|
|
||||||
|
# fetch the list of registered accounts
|
||||||
|
let accounts = await web3.provider.eth_accounts()
|
||||||
|
web3.defaultAccount = accounts[1]
|
||||||
|
let add = web3.defaultAccount
|
||||||
|
debug "contract deployer account address ", add
|
||||||
|
|
||||||
|
let balance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest")
|
||||||
|
debug "Initial account balance: ", balance
|
||||||
|
|
||||||
|
# deploy poseidon hasher bytecode
|
||||||
|
let poseidonT3Receipt = await web3.deployContract(PoseidonT3)
|
||||||
|
let poseidonT3Address = poseidonT3Receipt.contractAddress.get()
|
||||||
|
let poseidonAddressStripped = strip0xPrefix($poseidonT3Address)
|
||||||
|
|
||||||
|
# deploy lazy imt bytecode
|
||||||
|
let lazyImtReceipt = await web3.deployContract(
|
||||||
|
LazyIMT.replace("__$PoseidonT3$__", poseidonAddressStripped)
|
||||||
|
)
|
||||||
|
let lazyImtAddress = lazyImtReceipt.contractAddress.get()
|
||||||
|
let lazyImtAddressStripped = strip0xPrefix($lazyImtAddress)
|
||||||
|
|
||||||
|
# deploy waku rlnv2 contract
|
||||||
|
let wakuRlnContractReceipt = await web3.deployContract(
|
||||||
|
WakuRlnV2Contract.replace("__$PoseidonT3$__", poseidonAddressStripped).replace(
|
||||||
|
"__$LazyIMT$__", lazyImtAddressStripped
|
||||||
|
)
|
||||||
|
)
|
||||||
|
let wakuRlnContractAddress = wakuRlnContractReceipt.contractAddress.get()
|
||||||
|
let wakuRlnAddressStripped = strip0xPrefix($wakuRlnContractAddress)
|
||||||
|
|
||||||
|
debug "Address of the deployed rlnv2 contract: ", wakuRlnContractAddress
|
||||||
|
|
||||||
|
# need to send concat: impl & init_bytes
|
||||||
|
let contractInput = encode(wakuRlnContractAddress).data & Erc1967ProxyContractInput
|
||||||
|
debug "contractInput", contractInput
|
||||||
|
let proxyReceipt =
|
||||||
|
await web3.deployContract(Erc1967Proxy, contractInput = contractInput)
|
||||||
|
|
||||||
|
debug "proxy receipt", proxyReceipt
|
||||||
|
let proxyAddress = proxyReceipt.contractAddress.get()
|
||||||
|
|
||||||
|
let newBalance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest")
|
||||||
|
debug "Account balance after the contract deployment: ", newBalance
|
||||||
|
|
||||||
|
await web3.close()
|
||||||
|
debug "disconnected from ", ethClientAddress
|
||||||
|
|
||||||
|
return proxyAddress
|
||||||
|
|
||||||
|
proc createEthAccount*(
|
||||||
|
ethAmount: UInt256 = 1000.u256
|
||||||
|
): Future[(keys.PrivateKey, Address)] {.async.} =
|
||||||
|
let web3 = await newWeb3(EthClient)
|
||||||
|
let accounts = await web3.provider.eth_accounts()
|
||||||
|
let gasPrice = int(await web3.provider.eth_gasPrice())
|
||||||
|
web3.defaultAccount = accounts[0]
|
||||||
|
|
||||||
|
let pk = keys.PrivateKey.random(rng[])
|
||||||
|
let acc = Address(toCanonicalAddress(pk.toPublicKey()))
|
||||||
|
|
||||||
|
var tx: EthSend
|
||||||
|
tx.source = accounts[0]
|
||||||
|
tx.value = some(ethToWei(ethAmount))
|
||||||
|
tx.to = some(acc)
|
||||||
|
tx.gasPrice = some(gasPrice)
|
||||||
|
|
||||||
|
# Send ethAmount to acc
|
||||||
|
discard await web3.send(tx)
|
||||||
|
let balance = await web3.provider.eth_getBalance(acc, "latest")
|
||||||
|
assert balance == ethToWei(ethAmount),
|
||||||
|
fmt"Balance is {balance} but expected {ethToWei(ethAmount)}"
|
||||||
|
|
||||||
|
return (pk, acc)
|
||||||
|
|
||||||
|
proc getAnvilPath*(): string =
|
||||||
|
var anvilPath = ""
|
||||||
|
if existsEnv("XDG_CONFIG_HOME"):
|
||||||
|
anvilPath = joinPath(anvilPath, os.getEnv("XDG_CONFIG_HOME", ""))
|
||||||
|
else:
|
||||||
|
anvilPath = joinPath(anvilPath, os.getEnv("HOME", ""))
|
||||||
|
anvilPath = joinPath(anvilPath, ".foundry/bin/anvil")
|
||||||
|
return $anvilPath
|
||||||
|
|
||||||
|
# Runs Anvil daemon
|
||||||
|
proc runAnvil*(port: int = 8540, chainId: string = "1337"): Process =
|
||||||
|
# Passed options are
|
||||||
|
# --port Port to listen on.
|
||||||
|
# --gas-limit Sets the block gas limit in WEI.
|
||||||
|
# --balance The default account balance, specified in ether.
|
||||||
|
# --chain-id Chain ID of the network.
|
||||||
|
# See anvil documentation https://book.getfoundry.sh/reference/anvil/ for more details
|
||||||
|
try:
|
||||||
|
let anvilPath = getAnvilPath()
|
||||||
|
debug "Anvil path", anvilPath
|
||||||
|
let runAnvil = startProcess(
|
||||||
|
anvilPath,
|
||||||
|
args = [
|
||||||
|
"--port",
|
||||||
|
"8540",
|
||||||
|
"--gas-limit",
|
||||||
|
"300000000000000",
|
||||||
|
"--balance",
|
||||||
|
"1000000000",
|
||||||
|
"--chain-id",
|
||||||
|
$CHAIN_ID,
|
||||||
|
],
|
||||||
|
options = {poUsePath},
|
||||||
|
)
|
||||||
|
let anvilPID = runAnvil.processID
|
||||||
|
|
||||||
|
# We read stdout from Anvil to see when daemon is ready
|
||||||
|
var anvilStartLog: string
|
||||||
|
var cmdline: string
|
||||||
|
while true:
|
||||||
|
try:
|
||||||
|
if runAnvil.outputstream.readLine(cmdline):
|
||||||
|
anvilStartLog.add(cmdline)
|
||||||
|
if cmdline.contains("Listening on 127.0.0.1:" & $port):
|
||||||
|
break
|
||||||
|
except Exception, CatchableError:
|
||||||
|
break
|
||||||
|
debug "Anvil daemon is running and ready", pid = anvilPID, startLog = anvilStartLog
|
||||||
|
return runAnvil
|
||||||
|
except: # TODO: Fix "BareExcept" warning
|
||||||
|
error "Anvil daemon run failed", err = getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
# Stops Anvil daemon
|
||||||
|
proc stopAnvil*(runAnvil: Process) {.used.} =
|
||||||
|
let anvilPID = runAnvil.processID
|
||||||
|
# We wait the daemon to exit
|
||||||
|
try:
|
||||||
|
# We terminate Anvil daemon by sending a SIGTERM signal to the runAnvil PID to trigger RPC server termination and clean-up
|
||||||
|
kill(runAnvil)
|
||||||
|
debug "Sent SIGTERM to Anvil", anvilPID = anvilPID
|
||||||
|
except:
|
||||||
|
error "Anvil daemon termination failed: ", err = getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
proc setup*(
|
||||||
|
ethClientAddress: string = EthClient, ethAmount: UInt256 = 10.u256
|
||||||
|
): Future[OnchainGroupManager] {.async.} =
|
||||||
|
let rlnInstanceRes =
|
||||||
|
createRlnInstance(tree_path = genTempPath("rln_tree", "group_manager_onchain"))
|
||||||
|
check:
|
||||||
|
rlnInstanceRes.isOk()
|
||||||
|
|
||||||
|
let rlnInstance = rlnInstanceRes.get()
|
||||||
|
|
||||||
|
let contractAddress = await uploadRLNContract(ethClientAddress)
|
||||||
|
# connect to the eth client
|
||||||
|
let web3 = await newWeb3(ethClientAddress)
|
||||||
|
|
||||||
|
let accounts = await web3.provider.eth_accounts()
|
||||||
|
web3.defaultAccount = accounts[0]
|
||||||
|
|
||||||
|
var pk = none(string)
|
||||||
|
let (privateKey, _) = await createEthAccount(ethAmount)
|
||||||
|
pk = some($privateKey)
|
||||||
|
|
||||||
|
let manager = OnchainGroupManager(
|
||||||
|
ethClientUrl: ethClientAddress,
|
||||||
|
ethContractAddress: $contractAddress,
|
||||||
|
chainId: CHAIN_ID,
|
||||||
|
ethPrivateKey: pk,
|
||||||
|
rlnInstance: rlnInstance,
|
||||||
|
onFatalErrorAction: proc(errStr: string) =
|
||||||
|
raiseAssert errStr
|
||||||
|
,
|
||||||
|
)
|
||||||
|
|
||||||
|
return manager
|
||||||
|
|
||||||
|
{.pop.}
|
||||||
86
tests/waku_rln_relay/utils_static.nim
Normal file
86
tests/waku_rln_relay/utils_static.nim
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
{.used.}
|
||||||
|
|
||||||
|
import
|
||||||
|
std/[sequtils, tempfiles],
|
||||||
|
stew/byteutils,
|
||||||
|
stew/shims/net as stewNet,
|
||||||
|
chronos,
|
||||||
|
libp2p/switch,
|
||||||
|
libp2p/protocols/pubsub/pubsub
|
||||||
|
|
||||||
|
from std/times import epochTime
|
||||||
|
|
||||||
|
import
|
||||||
|
../../../waku/
|
||||||
|
[node/waku_node, node/peer_manager, waku_core, waku_node, waku_rln_relay],
|
||||||
|
../waku_store/store_utils,
|
||||||
|
../waku_archive/archive_utils,
|
||||||
|
../testlib/[wakucore, futures, assertions]
|
||||||
|
|
||||||
|
proc setupStaticRln*(
|
||||||
|
node: WakuNode,
|
||||||
|
identifier: uint,
|
||||||
|
rlnRelayEthContractAddress: Option[string] = none(string),
|
||||||
|
) {.async.} =
|
||||||
|
await node.mountRlnRelay(
|
||||||
|
WakuRlnConfig(
|
||||||
|
rlnRelayDynamic: false,
|
||||||
|
rlnRelayCredIndex: some(identifier),
|
||||||
|
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $identifier),
|
||||||
|
rlnEpochSizeSec: 1,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
proc setupRelayWithStaticRln*(
|
||||||
|
node: WakuNode, identifier: uint, pubsubTopics: seq[string]
|
||||||
|
) {.async.} =
|
||||||
|
await node.mountRelay(pubsubTopics)
|
||||||
|
await setupStaticRln(node, identifier)
|
||||||
|
|
||||||
|
proc subscribeCompletionHandler*(node: WakuNode, pubsubTopic: string): Future[bool] =
|
||||||
|
var completionFut = newFuture[bool]()
|
||||||
|
proc relayHandler(
|
||||||
|
topic: PubsubTopic, msg: WakuMessage
|
||||||
|
): Future[void] {.async, gcsafe.} =
|
||||||
|
if topic == pubsubTopic:
|
||||||
|
completionFut.complete(true)
|
||||||
|
|
||||||
|
node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(relayHandler))
|
||||||
|
return completionFut
|
||||||
|
|
||||||
|
proc sendRlnMessage*(
|
||||||
|
client: WakuNode,
|
||||||
|
pubsubTopic: string,
|
||||||
|
contentTopic: string,
|
||||||
|
completionFuture: Future[bool],
|
||||||
|
payload: seq[byte] = "Hello".toBytes(),
|
||||||
|
): Future[bool] {.async.} =
|
||||||
|
var message = WakuMessage(payload: payload, contentTopic: contentTopic)
|
||||||
|
let appendResult = client.wakuRlnRelay.appendRLNProof(message, epochTime())
|
||||||
|
# Assignment required or crashess
|
||||||
|
assertResultOk(appendResult)
|
||||||
|
discard await client.publish(some(pubsubTopic), message)
|
||||||
|
let isCompleted = await completionFuture.withTimeout(FUTURE_TIMEOUT)
|
||||||
|
return isCompleted
|
||||||
|
|
||||||
|
proc sendRlnMessageWithInvalidProof*(
|
||||||
|
client: WakuNode,
|
||||||
|
pubsubTopic: string,
|
||||||
|
contentTopic: string,
|
||||||
|
completionFuture: Future[bool],
|
||||||
|
payload: seq[byte] = "Hello".toBytes(),
|
||||||
|
): Future[bool] {.async.} =
|
||||||
|
let
|
||||||
|
extraBytes: seq[byte] = @[byte(1), 2, 3]
|
||||||
|
rateLimitProofRes = client.wakuRlnRelay.groupManager.generateProof(
|
||||||
|
concat(payload, extraBytes),
|
||||||
|
# we add extra bytes to invalidate proof verification against original payload
|
||||||
|
client.wakuRlnRelay.getCurrentEpoch(),
|
||||||
|
)
|
||||||
|
rateLimitProof = rateLimitProofRes.get().encode().buffer
|
||||||
|
message =
|
||||||
|
WakuMessage(payload: @payload, contentTopic: contentTopic, proof: rateLimitProof)
|
||||||
|
|
||||||
|
discard await client.publish(some(pubsubTopic), message)
|
||||||
|
let isCompleted = await completionFuture.withTimeout(FUTURE_TIMEOUT)
|
||||||
|
return isCompleted
|
||||||
Loading…
x
Reference in New Issue
Block a user