feat(rlnv2): clean fork of rlnv2 (#2828)

* chore(rlnv2): contract interface changes (#2770)
* fix: tests
* fix: remove stuint[32]
* chore(submodule): update zerokit submodule to v0.5.1 (#2782)
* fix: remove cond comp for lightpush test
* fix: ci and nonceManager
This commit is contained in:
Aaryamann Challani 2024-06-20 15:05:21 +05:30 committed by Ivan Folgueira Bande
parent 31daabab84
commit a02832fe12
No known key found for this signature in database
GPG Key ID: 3C117481F89E24A7
30 changed files with 834 additions and 1649 deletions

View File

@ -54,12 +54,11 @@ jobs:
strategy:
fail-fast: false
matrix:
rln_version: [1, 2]
os: [ubuntu-latest, macos-13]
runs-on: ${{ matrix.os }}
timeout-minutes: 60
name: build-${{ matrix.os }}-rln-v${{ matrix.rln_version }}
name: build-${{ matrix.os }}
steps:
- name: Checkout code
uses: actions/checkout@v3
@ -78,7 +77,7 @@ jobs:
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
- name: Build binaries
run: make RLN_V${{matrix.rln_version}}=true V=1 QUICK_AND_DIRTY_COMPILER=1 all tools
run: make V=1 QUICK_AND_DIRTY_COMPILER=1 all tools
test:
needs: changes
@ -86,12 +85,11 @@ jobs:
strategy:
fail-fast: false
matrix:
rln_version: [1, 2]
os: [ubuntu-latest, macos-13]
runs-on: ${{ matrix.os }}
timeout-minutes: 60
name: test-${{ matrix.os }}-rln-v${{ matrix.rln_version }}
name: test-${{ matrix.os }}
steps:
- name: Checkout code
uses: actions/checkout@v3
@ -120,7 +118,7 @@ jobs:
export MAKEFLAGS="-j1"
export NIMFLAGS="--colors:off -d:chronicles_colors:none"
make RLN_V${{matrix.rln_version}}=true V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test testwakunode2
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test testwakunode2
build-docker-image:
needs: changes

View File

@ -22,12 +22,11 @@ jobs:
build-docker-image:
strategy:
matrix:
rln_version : [1, 2]
os: [ubuntu-latest]
runs-on: ${{ matrix.os }}
timeout-minutes: 60
name: docker-build-${{ matrix.os }}-rln-v${{ matrix.rln_version }}
name: docker-build-${{ matrix.os }}
outputs:
image: ${{ steps.build.outputs.image }}
steps:
@ -67,12 +66,12 @@ jobs:
if: ${{ steps.secrets.outcome == 'success' }}
run: |
make RLN_V${{matrix.rln_version}}=true -j${NPROC} V=1 QUICK_AND_DIRTY_COMPILER=1 NIMFLAGS="-d:disableMarchNative -d:postgres" wakunode2
make -j${NPROC} V=1 QUICK_AND_DIRTY_COMPILER=1 NIMFLAGS="-d:disableMarchNative -d:postgres" wakunode2
SHORT_REF=$(git rev-parse --short HEAD)
TAG=$([ "${PR_NUMBER}" == "" ] && echo "${SHORT_REF}" || echo "${PR_NUMBER}")
IMAGE=quay.io/wakuorg/nwaku-pr:${TAG}-rln-v${{matrix.rln_version}}
IMAGE=quay.io/wakuorg/nwaku-pr:${TAG}
echo "image=${IMAGE}" >> $GITHUB_OUTPUT
echo "commit_hash=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT

2
.gitmodules vendored
View File

@ -143,7 +143,7 @@
path = vendor/zerokit
url = https://github.com/vacp2p/zerokit.git
ignore = dirty
branch = v0.3.4
branch = v0.5.1
[submodule "vendor/nim-regex"]
path = vendor/nim-regex
url = https://github.com/nitely/nim-regex.git

View File

@ -136,14 +136,10 @@ clean: | clean-libbacktrace
##################
## RLN ##
##################
.PHONY: librln shouldUseRLNV2
.PHONY: librln
LIBRLN_BUILDDIR := $(CURDIR)/vendor/zerokit
ifeq ($(RLN_V2),true)
LIBRLN_VERSION := v0.4.4
else
LIBRLN_VERSION := v0.3.7
endif
LIBRLN_VERSION := v0.5.1
ifeq ($(OS),Windows_NT)
LIBRLN_FILE := rln.lib
@ -155,12 +151,7 @@ $(LIBRLN_FILE):
echo -e $(BUILD_MSG) "$@" && \
./scripts/build_rln.sh $(LIBRLN_BUILDDIR) $(LIBRLN_VERSION) $(LIBRLN_FILE)
shouldUseRLNV2:
ifeq ($(RLN_V2),true)
$(eval NIM_PARAMS += -d:rln_v2)
endif
librln: | $(LIBRLN_FILE) shouldUseRLNV2
librln: | $(LIBRLN_FILE)
$(eval NIM_PARAMS += --passL:$(LIBRLN_FILE) --passL:-lm)
clean-librln:
@ -320,7 +311,7 @@ endif
rebuild-nat-libs: | clean-cross nat-libs
libwaku-android-precheck: shouldUseRLNV2
libwaku-android-precheck:
ifndef ANDROID_NDK_HOME
$(error ANDROID_NDK_HOME is not set)
endif

View File

@ -542,7 +542,6 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
echo "rln-relay preparation is in progress..."
when defined(rln_v2):
let rlnConf = WakuRlnConfig(
rlnRelayDynamic: conf.rlnRelayDynamic,
rlnRelayCredIndex: conf.rlnRelayCredIndex,
@ -553,16 +552,6 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit,
rlnEpochSizeSec: conf.rlnEpochSizeSec,
)
else:
let rlnConf = WakuRlnConfig(
rlnRelayDynamic: conf.rlnRelayDynamic,
rlnRelayCredIndex: conf.rlnRelayCredIndex,
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
rlnRelayCredPath: conf.rlnRelayCredPath,
rlnRelayCredPassword: conf.rlnRelayCredPassword,
rlnEpochSizeSec: conf.rlnEpochSizeSec,
)
waitFor node.mountRlnRelay(rlnConf, spamHandler = some(spamHandler))

View File

@ -19,9 +19,9 @@ host_triplet=$(rustc --version --verbose | awk '/host:/{print $2}')
tarball="${host_triplet}"
# use arkzkey feature for v0.4.4
# use arkzkey feature for v0.5.1
# TODO: update this script in the future when arkzkey is default
if [[ "${rln_version}" == "v0.4.4" ]]; then
if [[ "${rln_version}" == "v0.5.1" ]]; then
tarball+="-arkzkey-rln.tar.gz"
else
tarball+="-rln.tar.gz"
@ -52,6 +52,6 @@ else
exit 1
fi
# if submodule version = version in Makefile, build rln
cargo build --release -p rln --manifest-path "${build_dir}/rln/Cargo.toml"
cargo build --release -p rln --manifest-path "${build_dir}/rln/Cargo.toml" --features arkzkey
cp "${build_dir}/target/release/librln.a" "${output_filename}"
fi

View File

@ -135,7 +135,6 @@ suite "RLN Proofs as a Lightpush Service":
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
# mount rln-relay
when defined(rln_v2):
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
@ -143,13 +142,7 @@ suite "RLN Proofs as a Lightpush Service":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
)
else:
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
)
await allFutures(server.start(), client.start())
await server.start()

View File

@ -95,7 +95,6 @@ proc sendRlnMessage*(
let isCompleted = await completionFuture.withTimeout(FUTURE_TIMEOUT)
return isCompleted
when defined(rln_v2):
proc sendRlnMessageWithInvalidProof*(
client: WakuNode,
pubsubTopic: string,
@ -112,33 +111,8 @@ when defined(rln_v2):
messageId = MessageId(0),
)
rateLimitProof = rateLimitProofRes.get().encode().buffer
message = WakuMessage(
payload: @payload, contentTopic: contentTopic, proof: rateLimitProof
)
discard await client.publish(some(pubsubTopic), message)
let isCompleted = await completionFuture.withTimeout(FUTURE_TIMEOUT)
return isCompleted
else:
proc sendRlnMessageWithInvalidProof*(
client: WakuNode,
pubsubTopic: string,
contentTopic: string,
completionFuture: Future[bool],
payload: seq[byte] = "Hello".toBytes(),
): Future[bool] {.async.} =
let
extraBytes: seq[byte] = @[byte(1), 2, 3]
rateLimitProofRes = client.wakuRlnRelay.groupManager.generateProof(
concat(payload, extraBytes),
# we add extra bytes to invalidate proof verification against original payload
client.wakuRlnRelay.getCurrentEpoch(),
)
rateLimitProof = rateLimitProofRes.get().encode().buffer
message = WakuMessage(
payload: @payload, contentTopic: contentTopic, proof: rateLimitProof
)
message =
WakuMessage(payload: @payload, contentTopic: contentTopic, proof: rateLimitProof)
discard await client.publish(some(pubsubTopic), message)
let isCompleted = await completionFuture.withTimeout(FUTURE_TIMEOUT)

View File

@ -14,14 +14,11 @@ proc unsafeAppendRLNProof*(
let input = msg.toRLNSignal()
let epoch = rlnPeer.calcEpoch(senderEpochTime)
when defined(rln_v2):
# we do not fetch a nonce from the nonce manager,
# instead we use 0 as the nonce
let proof = rlnPeer.groupManager.generateProof(input, epoch, 0).valueOr:
return err("could not generate rln-v2 proof: " & $error)
else:
let proof = rlnPeer.groupManager.generateProof(input, epoch).valueOr:
return err("could not generate rln proof: " & $error)
msg.proof = proof.encode().buffer
return ok()

View File

@ -5,7 +5,5 @@ import
./test_rln_group_manager_static,
./test_waku_rln_relay,
./test_wakunode_rln_relay,
./test_rln_nonce_manager
when defined(rln_v2):
import ./rln_v2/test_rln_relay_v2_serde
./test_rln_nonce_manager,
./test_rln_serde

View File

@ -30,13 +30,12 @@ proc generateCredentials(rlnInstance: ptr RLN): IdentityCredential =
let credRes = membershipKeyGen(rlnInstance)
return credRes.get()
when defined(rln_v2):
proc getRateCommitment(
idCredential: IdentityCredential, userMessageLimit: UserMessageLimit
): RateCommitment =
): RlnRelayResult[RawRateCommitment] =
return RateCommitment(
idCommitment: idCredential.idCommitment, userMessageLimit: userMessageLimit
)
).toLeaf()
proc generateCredentials(rlnInstance: ptr RLN, n: int): seq[IdentityCredential] =
var credentials: seq[IdentityCredential]
@ -61,48 +60,38 @@ proc uploadRLNContract*(ethClientAddress: string): Future[Address] {.async.} =
let balance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest")
debug "Initial account balance: ", balance
when defined(rln_v2):
# deploy registry contract with its constructor inputs
let receipt = await web3.deployContract(RegistryContractCode)
else:
# deploy the poseidon hash contract and gets its address
let
hasherReceipt = await web3.deployContract(PoseidonHasherCode)
hasherAddress = hasherReceipt.contractAddress.get
debug "hasher address: ", hasherAddress
# deploy poseidon hasher bytecode
let poseidonT3Receipt = await web3.deployContract(PoseidonT3)
let poseidonT3Address = poseidonT3Receipt.contractAddress.get()
let poseidonAddressStripped = strip0xPrefix($poseidonT3Address)
# encode registry contract inputs to 32 bytes zero-padded
let
hasherAddressEncoded = encode(hasherAddress).data
# this is the contract constructor input
contractInput = hasherAddressEncoded
# deploy lazy imt bytecode
let lazyImtReceipt = await web3.deployContract(LazyIMT.replace("__$PoseidonT3$__", poseidonAddressStripped))
let lazyImtAddress = lazyImtReceipt.contractAddress.get()
let lazyImtAddressStripped = strip0xPrefix($lazyImtAddress)
debug "encoded hasher address: ", hasherAddressEncoded
debug "encoded contract input:", contractInput
# deploy waku rlnv2 contract
let wakuRlnContractReceipt = await web3.deployContract(WakuRlnV2Contract.replace("__$PoseidonT3$__", poseidonAddressStripped).replace("__$LazyIMT$__", lazyImtAddressStripped))
let wakuRlnContractAddress = wakuRlnContractReceipt.contractAddress.get()
let wakuRlnAddressStripped = strip0xPrefix($wakuRlnContractAddress)
# deploy registry contract with its constructor inputs
let receipt =
await web3.deployContract(RegistryContractCode, contractInput = contractInput)
debug "Address of the deployed rlnv2 contract: ", wakuRlnContractAddress
let contractAddress = receipt.contractAddress.get()
# need to send concat: impl & init_bytes
let contractInput = encode(wakuRlnContractAddress).data & Erc1967ProxyContractInput
debug "contractInput", contractInput
let proxyReceipt = await web3.deployContract(Erc1967Proxy, contractInput = contractInput)
debug "Address of the deployed registry contract: ", contractAddress
debug "proxy receipt", proxyReceipt
let proxyAddress = proxyReceipt.contractAddress.get()
let registryContract = web3.contractSender(WakuRlnRegistry, contractAddress)
when defined(rln_v2):
let initReceipt = await registryContract.initialize().send()
let newStorageReceipt = await registryContract.newStorage(20.u256).send()
else:
let newStorageReceipt = await registryContract.newStorage().send()
debug "Receipt of the newStorage transaction: ", newStorageReceipt
let newBalance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest")
debug "Account balance after the contract deployment: ", newBalance
await web3.close()
debug "disconnected from ", ethClientAddress
return contractAddress
return proxyAddress
proc createEthAccount(): Future[(keys.PrivateKey, Address)] {.async.} =
let web3 = await newWeb3(EthClient)
@ -187,7 +176,7 @@ proc stopAnvil(runAnvil: Process) {.used.} =
proc setup(): Future[OnchainGroupManager] {.async.} =
let rlnInstanceRes =
createRlnInstance(tree_path = genTempPath("rln_tree", "group_manager_onchain"))
require:
check:
rlnInstanceRes.isOk()
let rlnInstance = rlnInstanceRes.get()
@ -223,8 +212,7 @@ suite "Onchain group manager":
check:
manager.ethRpc.isSome()
manager.rlnContract.isSome()
manager.membershipFee.isSome()
manager.wakuRlnContract.isSome()
manager.initialized
manager.rlnContractDeployedBlockNumber > 0
@ -287,6 +275,8 @@ suite "Onchain group manager":
asyncTest "startGroupSync: should sync to the state of the group":
let manager = await setup()
let credentials = generateCredentials(manager.rlnInstance)
let rateCommitment = getRateCommitment(credentials, UserMessageLimit(1)).valueOr:
raiseAssert $error
(await manager.init()).isOkOr:
raiseAssert $error
@ -297,28 +287,17 @@ suite "Onchain group manager":
proc generateCallback(fut: Future[void]): OnRegisterCallback =
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
require:
check:
registrations.len == 1
registrations[0].index == 0
when defined(rln_v2):
require:
registrations[0].rateCommitment ==
getRateCommitment(credentials, UserMessageLimit(1))
else:
require:
registrations[0].idCommitment == credentials.idCommitment
require:
registrations[0].index == 0
registrations[0].rateCommitment == rateCommitment
fut.complete()
return callback
try:
manager.onRegister(generateCallback(fut))
when defined(rln_v2):
await manager.register(credentials, UserMessageLimit(1))
else:
await manager.register(credentials)
(await manager.startGroupSync()).isOkOr:
raiseAssert $error
except Exception, CatchableError:
@ -355,16 +334,9 @@ suite "Onchain group manager":
): OnRegisterCallback =
var futureIndex = 0
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
when defined(rln_v2):
let rateCommitment = getRateCommitment(credentials[futureIndex], UserMessageLimit(1))
if registrations.len == 1 and
registrations[0].rateCommitment ==
getRateCommitment(credentials[futureIndex], UserMessageLimit(1)) and
registrations[0].index == MembershipIndex(futureIndex):
futs[futureIndex].complete()
futureIndex += 1
else:
if registrations.len == 1 and
registrations[0].idCommitment == credentials[futureIndex].idCommitment and
registrations[0].rateCommitment == rateCommitment.get() and
registrations[0].index == MembershipIndex(futureIndex):
futs[futureIndex].complete()
futureIndex += 1
@ -377,10 +349,7 @@ suite "Onchain group manager":
raiseAssert $error
for i in 0 ..< credentials.len():
when defined(rln_v2):
await manager.register(credentials[i], UserMessageLimit(1))
else:
await manager.register(credentials[i])
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
@ -399,14 +368,11 @@ suite "Onchain group manager":
let dummyCommitment = default(IDCommitment)
try:
when defined(rln_v2):
await manager.register(
RateCommitment(
idCommitment: dummyCommitment, userMessageLimit: UserMessageLimit(1)
)
)
else:
await manager.register(dummyCommitment)
except CatchableError:
assert true
except Exception:
@ -426,14 +392,11 @@ suite "Onchain group manager":
raiseAssert $error
try:
when defined(rln_v2):
await manager.register(
RateCommitment(
idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
)
)
else:
await manager.register(idCommitment)
except Exception, CatchableError:
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
@ -448,23 +411,16 @@ suite "Onchain group manager":
asyncTest "register: callback is called":
let manager = await setup()
let idCommitment = generateCredentials(manager.rlnInstance).idCommitment
let idCredentials = generateCredentials(manager.rlnInstance)
let idCommitment = idCredentials.idCommitment
let fut = newFuture[void]()
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
require:
let rateCommitment = getRateCommitment(idCredentials, UserMessageLimit(1))
check:
registrations.len == 1
when defined(rln_v2):
require:
registrations[0].rateCommitment ==
RateCommitment(
idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
)
else:
require:
registrations[0].idCommitment == idCommitment
require:
registrations[0].rateCommitment == rateCommitment.get()
registrations[0].index == 0
fut.complete()
@ -474,18 +430,15 @@ suite "Onchain group manager":
try:
(await manager.startGroupSync()).isOkOr:
raiseAssert $error
when defined(rln_v2):
await manager.register(
RateCommitment(
idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
)
)
else:
await manager.register(idCommitment)
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
check await fut.withTimeout(5.seconds)
await fut
await manager.stop()
@ -511,29 +464,20 @@ suite "Onchain group manager":
let fut = newFuture[void]()
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
when defined(rln_v2):
if registrations.len == 1 and
registrations[0].rateCommitment ==
getRateCommitment(credentials, UserMessageLimit(1)) and
registrations[0].index == 0:
manager.idCredentials = some(credentials)
fut.complete()
else:
if registrations.len == 1 and
registrations[0].idCommitment == credentials.idCommitment and
getRateCommitment(credentials, UserMessageLimit(1)).get() and
registrations[0].index == 0:
manager.idCredentials = some(credentials)
fut.complete()
manager.onRegister(callback)
try:
(await manager.startGroupSync()).isOkOr:
raiseAssert $error
when defined(rln_v2):
await manager.register(credentials, UserMessageLimit(1))
else:
await manager.register(credentials)
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
@ -546,14 +490,11 @@ suite "Onchain group manager":
debug "epoch in bytes", epochHex = epoch.inHex()
# generate proof
when defined(rln_v2):
let validProofRes = manager.generateProof(
data = messageBytes, epoch = epoch, messageId = MessageId(1)
)
else:
let validProofRes = manager.generateProof(data = messageBytes, epoch = epoch)
require:
check:
validProofRes.isOk()
let validProof = validProofRes.get()
@ -576,7 +517,6 @@ suite "Onchain group manager":
## Assume the registration occured out of band
manager.idCredentials = some(credentials)
manager.membershipIndex = some(MembershipIndex(0))
when defined(rln_v2):
manager.userMessageLimit = some(UserMessageLimit(1))
let messageBytes = "Hello".toBytes()
@ -586,15 +526,10 @@ suite "Onchain group manager":
debug "epoch in bytes", epochHex = epoch.inHex()
# generate proof
when defined(rln_v2):
let validProofRes = manager.generateProof(
let validProof = manager.generateProof(
data = messageBytes, epoch = epoch, messageId = MessageId(0)
)
else:
let validProofRes = manager.generateProof(data = messageBytes, epoch = epoch)
require:
validProofRes.isOk()
let validProof = validProofRes.get()
).valueOr:
raiseAssert $error
# validate the root (should be false)
let validated = manager.validateRoot(validProof.merkleRoot)
@ -612,29 +547,20 @@ suite "Onchain group manager":
let fut = newFuture[void]()
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
when defined(rln_v2):
if registrations.len == 1 and
registrations[0].rateCommitment ==
getRateCommitment(credentials, UserMessageLimit(1)) and
registrations[0].index == 0:
manager.idCredentials = some(credentials)
fut.complete()
else:
if registrations.len == 1 and
registrations[0].idCommitment == credentials.idCommitment and
getRateCommitment(credentials, UserMessageLimit(1)).get() and
registrations[0].index == 0:
manager.idCredentials = some(credentials)
fut.complete()
manager.onRegister(callback)
try:
(await manager.startGroupSync()).isOkOr:
raiseAssert $error
when defined(rln_v2):
await manager.register(credentials, UserMessageLimit(1))
else:
await manager.register(credentials)
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
await fut
@ -646,23 +572,16 @@ suite "Onchain group manager":
debug "epoch in bytes", epochHex = epoch.inHex()
# generate proof
when defined(rln_v2):
let validProofRes = manager.generateProof(
let validProof = manager.generateProof(
data = messageBytes, epoch = epoch, messageId = MessageId(0)
)
else:
let validProofRes = manager.generateProof(data = messageBytes, epoch = epoch)
require:
validProofRes.isOk()
let validProof = validProofRes.get()
).valueOr:
raiseAssert $error
# verify the proof (should be true)
let verifiedRes = manager.verifyProof(messageBytes, validProof)
require:
verifiedRes.isOk()
let verified = manager.verifyProof(messageBytes, validProof).valueOr:
raiseAssert $error
check:
verifiedRes.get()
check: verified
await manager.stop()
asyncTest "verifyProof: should reject invalid proof":
@ -675,10 +594,8 @@ suite "Onchain group manager":
let idCredential = generateCredentials(manager.rlnInstance)
try:
when defined(rln_v2):
await manager.register(getRateCommitment(idCredential, UserMessageLimit(1)))
else:
await manager.register(idCredential.idCommitment)
await manager.register(RateCommitment(idCommitment: idCredential.idCommitment,
userMessageLimit: UserMessageLimit(1)))
except Exception, CatchableError:
assert false,
"exception raised when calling startGroupSync: " & getCurrentExceptionMsg()
@ -688,7 +605,6 @@ suite "Onchain group manager":
## Assume the registration occured out of band
manager.idCredentials = some(idCredential2)
manager.membershipIndex = some(MembershipIndex(0))
when defined(rln_v2):
manager.userMessageLimit = some(UserMessageLimit(1))
let messageBytes = "Hello".toBytes()
@ -698,14 +614,11 @@ suite "Onchain group manager":
debug "epoch in bytes", epochHex = epoch.inHex()
# generate proof
when defined(rln_v2):
let invalidProofRes = manager.generateProof(
data = messageBytes, epoch = epoch, messageId = MessageId(0)
)
else:
let invalidProofRes = manager.generateProof(data = messageBytes, epoch = epoch)
require:
check:
invalidProofRes.isOk()
let invalidProof = invalidProofRes.get()
@ -734,16 +647,9 @@ suite "Onchain group manager":
): OnRegisterCallback =
var futureIndex = 0
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
when defined(rln_v2):
if registrations.len == 1 and
registrations[0].rateCommitment ==
getRateCommitment(credentials[futureIndex], UserMessageLimit(1)) and
registrations[0].index == MembershipIndex(futureIndex):
futs[futureIndex].complete()
futureIndex += 1
else:
if registrations.len == 1 and
registrations[0].idCommitment == credentials[futureIndex].idCommitment and
getRateCommitment(credentials[futureIndex], UserMessageLimit(1)).get() and
registrations[0].index == MembershipIndex(futureIndex):
futs[futureIndex].complete()
futureIndex += 1
@ -756,17 +662,14 @@ suite "Onchain group manager":
raiseAssert $error
for i in 0 ..< credentials.len():
when defined(rln_v2):
await manager.register(credentials[i], UserMessageLimit(1))
else:
await manager.register(credentials[i])
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
await allFutures(futures)
# At this point, we should have a full root queue, 5 roots, and partial buffer of 1 root
require:
check:
manager.validRoots.len() == credentialCount - 1
manager.validRootBuffer.len() == 1

View File

@ -94,14 +94,11 @@ suite "Static group manager":
let dummyCommitment = default(IDCommitment)
try:
when defined(rln_v2):
await manager.register(
RateCommitment(
idCommitment: dummyCommitment, userMessageLimit: DefaultUserMessageLimit
)
)
else:
await manager.register(dummyCommitment)
except ValueError:
assert true
except Exception, CatchableError:
@ -117,14 +114,11 @@ suite "Static group manager":
let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr:
raiseAssert $error
try:
when defined(rln_v2):
await manager.register(
RateCommitment(
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
)
)
else:
await manager.register(idCommitment)
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr:
@ -143,15 +137,10 @@ suite "Static group manager":
require:
registrations.len == 1
registrations[0].index == 10
when defined(rln_v2):
require:
registrations[0].rateCommitment ==
RateCommitment(
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
)
else:
require:
registrations[0].idCommitment == idCommitment
).toLeaf().get()
callbackCalled = true
fut.complete()
@ -161,14 +150,11 @@ suite "Static group manager":
raiseAssert $error
(await manager.startGroupSync()).isOkOr:
raiseAssert $error
when defined(rln_v2):
await manager.register(
RateCommitment(
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
)
)
else:
await manager.register(idCommitment)
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
@ -215,15 +201,11 @@ suite "Static group manager":
require:
withdrawals.len == 1
withdrawals[0].index == 0
when defined(rln_v2):
require:
withdrawals[0].rateCommitment ==
RateCommitment(
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
)
else:
require:
withdrawals[0].idCommitment == idCommitment
).toLeaf().get()
callbackCalled = true
fut.complete()

View File

@ -6,10 +6,10 @@ else:
{.push raises: [].}
import
../rln/waku_rln_relay_utils,
../../../waku/waku_keystore/protocol_types,
../../../waku/waku_rln_relay,
../../../waku/waku_rln_relay/rln
./rln/waku_rln_relay_utils,
../../waku/waku_keystore/protocol_types,
../../waku/waku_rln_relay,
../../waku/waku_rln_relay/rln
import testutils/unittests
import stew/results, stint

View File

@ -525,14 +525,11 @@ suite "Waku rln relay":
let rln = rlnInstance.get()
# create a Merkle tree
when defined(rln_v2):
let rateCommitments =
groupIDCommitments.mapIt(RateCommitment(idCommitment: it, userMessageLimit: 20))
let leaves = rateCommitments.toLeaves().valueOr:
raiseAssert $error
let membersAdded = rln.insertMembers(0, leaves)
else:
let membersAdded = rln.insertMembers(0, groupIDCommitments)
assert membersAdded, "members should be added"
let rawRoot = rln.getMerkleRoot().valueOr:
@ -691,7 +688,6 @@ suite "Waku rln relay":
asyncTest "validateMessageAndUpdateLog test":
let index = MembershipIndex(5)
when defined(rln_v2):
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(index),
@ -699,13 +695,7 @@ suite "Waku rln relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_2"),
)
else:
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(index),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_2"),
)
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
raiseAssert $error
@ -749,7 +739,6 @@ suite "Waku rln relay":
let index1 = MembershipIndex(5)
let index2 = MembershipIndex(6)
when defined(rln_v2):
let rlnConf1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(index1),
@ -757,18 +746,10 @@ suite "Waku rln relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_3"),
)
else:
let rlnConf1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(index1),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_3"),
)
let wakuRlnRelay1 = (await WakuRlnRelay.new(rlnConf1)).valueOr:
raiseAssert "failed to create waku rln relay: " & $error
when defined(rln_v2):
let rlnConf2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(index2),
@ -776,13 +757,7 @@ suite "Waku rln relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4"),
)
else:
let rlnConf2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(index2),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4"),
)
let wakuRlnRelay2 = (await WakuRlnRelay.new(rlnConf2)).valueOr:
raiseAssert "failed to create waku rln relay: " & $error
# get the current epoch time

View File

@ -40,7 +40,6 @@ procSuite "WakuNode - RLN relay":
await node1.mountRelay(@[DefaultPubsubTopic])
# mount rlnrelay in off-chain mode
when defined(rln_v2):
let wakuRlnConfig1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
@ -48,13 +47,7 @@ procSuite "WakuNode - RLN relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
)
else:
let wakuRlnConfig1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
)
await node1.mountRlnRelay(wakuRlnConfig1)
await node1.start()
@ -62,7 +55,6 @@ procSuite "WakuNode - RLN relay":
# node 2
await node2.mountRelay(@[DefaultPubsubTopic])
# mount rlnrelay in off-chain mode
when defined(rln_v2):
let wakuRlnConfig2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(2.uint),
@ -70,13 +62,7 @@ procSuite "WakuNode - RLN relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_2"),
)
else:
let wakuRlnConfig2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(2.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_2"),
)
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()
@ -84,7 +70,6 @@ procSuite "WakuNode - RLN relay":
# node 3
await node3.mountRelay(@[DefaultPubsubTopic])
when defined(rln_v2):
let wakuRlnConfig3 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(3.uint),
@ -92,13 +77,7 @@ procSuite "WakuNode - RLN relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_3"),
)
else:
let wakuRlnConfig3 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(3.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_3"),
)
await node3.mountRlnRelay(wakuRlnConfig3)
await node3.start()
@ -162,7 +141,6 @@ procSuite "WakuNode - RLN relay":
# mount rlnrelay in off-chain mode
for index, node in nodes:
when defined(rln_v2):
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(index.uint + 1),
@ -170,13 +148,7 @@ procSuite "WakuNode - RLN relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)),
)
else:
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(index.uint + 1),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)),
)
await node.mountRlnRelay(wakuRlnConfig)
# start them
@ -263,7 +235,6 @@ procSuite "WakuNode - RLN relay":
await node1.mountRelay(@[DefaultPubsubTopic])
# mount rlnrelay in off-chain mode
when defined(rln_v2):
let wakuRlnConfig1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
@ -271,13 +242,7 @@ procSuite "WakuNode - RLN relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_4"),
)
else:
let wakuRlnConfig1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_4"),
)
await node1.mountRlnRelay(wakuRlnConfig1)
await node1.start()
@ -285,7 +250,6 @@ procSuite "WakuNode - RLN relay":
# node 2
await node2.mountRelay(@[DefaultPubsubTopic])
# mount rlnrelay in off-chain mode
when defined(rln_v2):
let wakuRlnConfig2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(2.uint),
@ -293,13 +257,7 @@ procSuite "WakuNode - RLN relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_5"),
)
else:
let wakuRlnConfig2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(2.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_5"),
)
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()
@ -307,7 +265,6 @@ procSuite "WakuNode - RLN relay":
# node 3
await node3.mountRelay(@[DefaultPubsubTopic])
when defined(rln_v2):
let wakuRlnConfig3 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(3.uint),
@ -315,13 +272,7 @@ procSuite "WakuNode - RLN relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_6"),
)
else:
let wakuRlnConfig3 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(3.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_6"),
)
await node3.mountRlnRelay(wakuRlnConfig3)
await node3.start()
@ -354,17 +305,11 @@ procSuite "WakuNode - RLN relay":
input = concat(payload, contentTopicBytes)
extraBytes: seq[byte] = @[byte(1), 2, 3]
when defined(rln_v2):
let nonceManager = node1.wakuRlnRelay.nonceManager
let rateLimitProofRes = node1.wakuRlnRelay.groupManager.generateProof(
concat(input, extraBytes), epoch, MessageId(0)
)
else:
let rateLimitProofRes = node1.wakuRlnRelay.groupManager.generateProof(
concat(input, extraBytes),
# we add extra bytes to invalidate proof verification against original payload
epoch,
)
assert rateLimitProofRes.isOk(), $rateLimitProofRes.error
# check the proof is generated correctly outside when block to avoid duplication
let rateLimitProof = rateLimitProofRes.get().encode().buffer
@ -406,7 +351,6 @@ procSuite "WakuNode - RLN relay":
await node1.mountRelay(@[DefaultPubsubTopic])
# mount rlnrelay in off-chain mode
when defined(rln_v2):
let wakuRlnConfig1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
@ -414,13 +358,7 @@ procSuite "WakuNode - RLN relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_7"),
)
else:
let wakuRlnConfig1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_7"),
)
await node1.mountRlnRelay(wakuRlnConfig1)
await node1.start()
@ -429,7 +367,6 @@ procSuite "WakuNode - RLN relay":
await node2.mountRelay(@[DefaultPubsubTopic])
# mount rlnrelay in off-chain mode
when defined(rln_v2):
let wakuRlnConfig2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(2.uint),
@ -437,13 +374,7 @@ procSuite "WakuNode - RLN relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_8"),
)
else:
let wakuRlnConfig2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(2.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_8"),
)
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()
@ -451,7 +382,6 @@ procSuite "WakuNode - RLN relay":
await node3.mountRelay(@[DefaultPubsubTopic])
# mount rlnrelay in off-chain mode
when defined(rln_v2):
let wakuRlnConfig3 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(3.uint),
@ -459,13 +389,7 @@ procSuite "WakuNode - RLN relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_9"),
)
else:
let wakuRlnConfig3 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(3.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_9"),
)
await node3.mountRlnRelay(wakuRlnConfig3)
await node3.start()
@ -562,7 +486,6 @@ procSuite "WakuNode - RLN relay":
await node1.mountRelay(@[DefaultPubsubTopic])
# mount rlnrelay in off-chain mode
when defined(rln_v2):
let wakuRlnConfig1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
@ -570,13 +493,7 @@ procSuite "WakuNode - RLN relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_10"),
)
else:
let wakuRlnConfig1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_10"),
)
await node1.mountRlnRelay(wakuRlnConfig1)
await node1.start()
@ -585,7 +502,6 @@ procSuite "WakuNode - RLN relay":
await node2.mountRelay(@[DefaultPubsubTopic])
# mount rlnrelay in off-chain mode
when defined(rln_v2):
let wakuRlnConfig2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(2.uint),
@ -593,13 +509,7 @@ procSuite "WakuNode - RLN relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_11"),
)
else:
let wakuRlnConfig2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(2.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_11"),
)
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()

View File

@ -219,7 +219,6 @@ suite "Waku v2 Rest API - Relay":
let node = testWakuNode()
await node.start()
await node.mountRelay()
when defined(rln_v2):
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
@ -227,13 +226,7 @@ suite "Waku v2 Rest API - Relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
)
else:
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
# RPC server setup
@ -443,7 +436,6 @@ suite "Waku v2 Rest API - Relay":
let node = testWakuNode()
await node.start()
await node.mountRelay()
when defined(rln_v2):
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
@ -451,13 +443,7 @@ suite "Waku v2 Rest API - Relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
)
else:
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
# RPC server setup
@ -502,7 +488,6 @@ suite "Waku v2 Rest API - Relay":
let node = testWakuNode()
await node.start()
await node.mountRelay()
when defined(rln_v2):
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
@ -510,13 +495,7 @@ suite "Waku v2 Rest API - Relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
)
else:
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
# RPC server setup
@ -557,7 +536,6 @@ suite "Waku v2 Rest API - Relay":
let node = testWakuNode()
await node.start()
await node.mountRelay()
when defined(rln_v2):
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
@ -565,13 +543,7 @@ suite "Waku v2 Rest API - Relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
)
else:
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
# RPC server setup
@ -619,7 +591,6 @@ suite "Waku v2 Rest API - Relay":
let node = testWakuNode()
await node.start()
await node.mountRelay()
when defined(rln_v2):
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
@ -627,13 +598,7 @@ suite "Waku v2 Rest API - Relay":
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
)
else:
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
# RPC server setup

View File

@ -67,10 +67,7 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
# 5. register on-chain
try:
when defined(rln_v2):
waitFor groupManager.register(credential, conf.rlnRelayUserMessageLimit)
else:
waitFor groupManager.register(credential)
except Exception, CatchableError:
error "failure while registering credentials on-chain",
error = getCurrentExceptionMsg()
@ -82,11 +79,9 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
chainId = $groupManager.chainId.get(),
contractAddress = conf.rlnRelayEthContractAddress,
membershipIndex = groupManager.membershipIndex.get()
when defined(rln_v2):
info "Your user message limit is", userMessageLimit = conf.rlnRelayUserMessageLimit
# 6. write to keystore
when defined(rln_v2):
let keystoreCred = KeystoreMembership(
membershipContract: MembershipContract(
chainId: $groupManager.chainId.get(), address: conf.rlnRelayEthContractAddress
@ -95,14 +90,7 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
identityCredential: credential,
userMessageLimit: conf.rlnRelayUserMessageLimit,
)
else:
let keystoreCred = KeystoreMembership(
membershipContract: MembershipContract(
chainId: $groupManager.chainId.get(), address: conf.rlnRelayEthContractAddress
),
treeIndex: groupManager.membershipIndex.get(),
identityCredential: credential,
)
let persistRes = addMembershipCredentials(
conf.rlnRelayCredPath, keystoreCred, conf.rlnRelayCredPassword, RLNAppInfo

2
vendor/zerokit vendored

@ -1 +1 @@
Subproject commit 0ad1ed296d49e85598e0ec0bae7c220885e47912
Subproject commit 85d71a5427ee78528d6420c04b67c7825e3c6e91

View File

@ -195,7 +195,6 @@ proc setupProtocols(
quit(QuitFailure)
if conf.rlnRelay:
when defined(rln_v2):
let rlnConf = WakuRlnConfig(
rlnRelayDynamic: conf.rlnRelayDynamic,
rlnRelayCredIndex: conf.rlnRelayCredIndex,
@ -208,18 +207,6 @@ proc setupProtocols(
rlnEpochSizeSec: conf.rlnEpochSizeSec,
onFatalErrorAction: onFatalErrorAction,
)
else:
let rlnConf = WakuRlnConfig(
rlnRelayDynamic: conf.rlnRelayDynamic,
rlnRelayCredIndex: conf.rlnRelayCredIndex,
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
rlnRelayCredPath: conf.rlnRelayCredPath,
rlnRelayCredPassword: conf.rlnRelayCredPassword,
rlnRelayTreePath: conf.rlnRelayTreePath,
rlnEpochSizeSec: conf.rlnEpochSizeSec,
onFatalErrorAction: onFatalErrorAction,
)
try:
waitFor node.mountRlnRelay(rlnConf)

View File

@ -15,9 +15,7 @@ type
IdentitySecretHash* = seq[byte] #array[32, byte]
# hash of identity key as defined ed in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership
IDCommitment* = seq[byte] #array[32, byte]
when defined(rln_v2):
type UserMessageLimit* = uint64
UserMessageLimit* = uint64
type IdentityCredential* = object
idTrapdoor*: IdentityTrapdoor
@ -103,10 +101,8 @@ type KeystoreMembership* = ref object of RootObj
membershipContract*: MembershipContract
treeIndex*: MembershipIndex
identityCredential*: IdentityCredential
when defined(rln_v2):
userMessageLimit*: UserMessageLimit
when defined(rln_v2):
proc `$`*(m: KeystoreMembership): string =
return
"KeystoreMembership(chainId: " & m.membershipContract.chainId &
@ -114,14 +110,6 @@ when defined(rln_v2):
$m.treeIndex & ", userMessageLimit: " & $m.userMessageLimit &
", identityCredential: " & $m.identityCredential & ")"
else:
proc `$`*(m: KeystoreMembership): string =
return
"KeystoreMembership(chainId: " & m.membershipContract.chainId &
", contractAddress: " & m.membershipContract.address & ", treeIndex: " &
$m.treeIndex & ", identityCredential: " & $m.identityCredential & ")"
when defined(rln_v2):
proc `==`*(x, y: KeystoreMembership): bool =
return
x.membershipContract.chainId == y.membershipContract.chainId and
@ -132,17 +120,6 @@ when defined(rln_v2):
x.identityCredential.idSecretHash == y.identityCredential.idSecretHash and
x.identityCredential.idCommitment == y.identityCredential.idCommitment
else:
proc `==`*(x, y: KeystoreMembership): bool =
return
x.membershipContract.chainId == y.membershipContract.chainId and
x.membershipContract.address == y.membershipContract.address and
x.treeIndex == y.treeIndex and
x.identityCredential.idTrapdoor == y.identityCredential.idTrapdoor and
x.identityCredential.idNullifier == y.identityCredential.idNullifier and
x.identityCredential.idSecretHash == y.identityCredential.idSecretHash and
x.identityCredential.idCommitment == y.identityCredential.idCommitment
proc hash*(m: KeystoreMembership): string =
# hash together the chainId, address and treeIndex
return

View File

@ -27,7 +27,6 @@ const
const DefaultRlnTreePath* = "rln_tree.db"
when defined(rln_v2):
const
# pre-processed "rln/waku-rln-relay/v2.0.0" to array[32, byte]
DefaultRlnIdentifier*: RlnIdentifier = [
@ -60048,15 +60047,11 @@ const StaticGroupKeys* =
]
# StaticGroupMerkleRoot is the root of the Merkle tree constructed from the StaticGroupKeys above
# only identity commitments are used for the Merkle tree construction
# rln-v2: rate commitments are used for the Merkle tree construction, defaulting the UserMessageLimit to 20
# the root is created locally, using createMembershipList proc from waku_rln_relay_utils module, and the result is hardcoded in here
when defined(rln_v2):
const StaticGroupMerkleRoot* =
"2c149e48886b5ba3da2edf8db8d7a364ae7a25618489c04cf0c0380f7cdd4d6f"
else:
const StaticGroupMerkleRoot* =
"1e534adab58f7d300aaeecae57a25e0a0b18c368a09f720280da92b288950901"
const MaxClockGapSeconds* = 20.0 # the maximum clock difference between peers in seconds

File diff suppressed because one or more lines are too long

View File

@ -30,7 +30,6 @@ proc inHex*(
valueHex = "0" & valueHex
return toLowerAscii(valueHex)
when defined(rln_v2):
proc toUserMessageLimit*(v: UInt256): UserMessageLimit =
return cast[UserMessageLimit](v)
@ -56,7 +55,6 @@ proc serialize*(v: uint64): array[32, byte] =
discard output.copyFrom(bytes)
return output
when defined(rln_v2):
proc serialize*(
idSecretHash: IdentitySecretHash,
memIndex: MembershipIndex,
@ -83,28 +81,12 @@ when defined(rln_v2):
)
return output
else:
proc serialize*(
idSecretHash: IdentitySecretHash,
memIndex: MembershipIndex,
epoch: Epoch,
msg: openArray[byte],
): seq[byte] =
## a private proc to convert RateLimitProof and the data to a byte seq
## this conversion is used in the proofGen proc
## the serialization is done as instructed in https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L146
## [ id_key<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
let memIndexBytes = toBytes(uint64(memIndex), Endianness.littleEndian)
let lenPrefMsg = encodeLengthPrefix(msg)
let output = concat(@idSecretHash, @memIndexBytes, @epoch, lenPrefMsg)
return output
proc serialize*(proof: RateLimitProof, data: openArray[byte]): seq[byte] =
## a private proc to convert RateLimitProof and data to a byte seq
## this conversion is used in the proof verification proc
## [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
let lenPrefMsg = encodeLengthPrefix(@data)
when defined(rln_v2):
var proofBytes = concat(
@(proof.proof),
@(proof.merkleRoot),
@ -114,17 +96,6 @@ proc serialize*(proof: RateLimitProof, data: openArray[byte]): seq[byte] =
@(proof.nullifier),
lenPrefMsg,
)
else:
var proofBytes = concat(
@(proof.proof),
@(proof.merkleRoot),
@(proof.epoch),
@(proof.shareX),
@(proof.shareY),
@(proof.nullifier),
@(proof.rlnIdentifier),
lenPrefMsg,
)
return proofBytes

View File

@ -15,10 +15,7 @@ export options, chronos, results, protocol_types, protocol_metrics, deques
type Membership* = object
index*: MembershipIndex
when defined(rln_v2):
rateCommitment*: RateCommitment
else:
idCommitment*: IDCommitment
rateCommitment*: RawRateCommitment
type OnRegisterCallback* = proc(registrations: seq[Membership]): Future[void] {.gcsafe.}
type OnWithdrawCallback* = proc(withdrawals: seq[Membership]): Future[void] {.gcsafe.}
@ -35,7 +32,6 @@ type GroupManager* = ref object of RootObj
latestIndex*: MembershipIndex
validRoots*: Deque[MerkleNode]
onFatalErrorAction*: OnFatalErrorHandler
when defined(rln_v2):
userMessageLimit*: Option[UserMessageLimit]
# This proc is used to initialize the group manager
@ -53,7 +49,6 @@ method startGroupSync*(
# This proc is used to register a new identity commitment into the merkle tree
# The user may or may not have the identity secret to this commitment
# It should be used when detecting new members in the group, and syncing the group state
when defined(rln_v2):
method register*(
g: GroupManager, rateCommitment: RateCommitment
): Future[void] {.base, async: (raises: [Exception]).} =
@ -61,18 +56,9 @@ when defined(rln_v2):
CatchableError, "register proc for " & $g.type & " is not implemented yet"
)
else:
method register*(
g: GroupManager, idCommitment: IDCommitment
): Future[void] {.base, async: (raises: [Exception]).} =
raise newException(
CatchableError, "register proc for " & $g.type & " is not implemented yet"
)
# This proc is used to register a new identity commitment into the merkle tree
# The user should have the identity secret to this commitment
# It should be used when the user wants to join the group
when defined(rln_v2):
method register*(
g: GroupManager,
credentials: IdentityCredential,
@ -82,28 +68,12 @@ when defined(rln_v2):
CatchableError, "register proc for " & $g.type & " is not implemented yet"
)
else:
method register*(
g: GroupManager, credentials: IdentityCredential
): Future[void] {.base, async: (raises: [Exception]).} =
raise newException(
CatchableError, "register proc for " & $g.type & " is not implemented yet"
)
# This proc is used to register a batch of new identity commitments into the merkle tree
# The user may or may not have the identity secret to these commitments
# It should be used when detecting a batch of new members in the group, and syncing the group state
when defined(rln_v2):
method registerBatch*(
g: GroupManager, rateCommitments: seq[RateCommitment]
): Future[void] {.base, async: (raises: [Exception]).} =
raise newException(
CatchableError, "registerBatch proc for " & $g.type & " is not implemented yet"
)
else:
method registerBatch*(
g: GroupManager, idCommitments: seq[IDCommitment]
g: GroupManager, rateCommitments: seq[RawRateCommitment]
): Future[void] {.base, async: (raises: [Exception]).} =
raise newException(
CatchableError, "registerBatch proc for " & $g.type & " is not implemented yet"
@ -133,7 +103,6 @@ method withdrawBatch*(
)
# This proc is used to insert and remove a set of commitments from the merkle tree
when defined(rln_v2):
method atomicBatch*(
g: GroupManager,
rateCommitments: seq[RateCommitment],
@ -143,15 +112,6 @@ when defined(rln_v2):
CatchableError, "atomicBatch proc for " & $g.type & " is not implemented yet"
)
else:
method atomicBatch*(
g: GroupManager,
idCommitments: seq[IDCommitment],
toRemoveIndices: seq[MembershipIndex],
): Future[void] {.base, async: (raises: [Exception]).} =
raise newException(
CatchableError, "atomicBatch proc for " & $g.type & " is not implemented yet"
)
method stop*(g: GroupManager): Future[void] {.base, async.} =
raise
@ -216,7 +176,6 @@ method verifyProof*(
return err("proof verification failed: " & $proofVerifyRes.error())
return ok(proofVerifyRes.value())
when defined(rln_v2):
method generateProof*(
g: GroupManager,
data: openArray[byte],
@ -245,26 +204,6 @@ when defined(rln_v2):
return err("proof generation failed: " & $error)
return ok(proof)
else:
method generateProof*(
g: GroupManager, data: openArray[byte], epoch: Epoch
): GroupManagerResult[RateLimitProof] {.base, gcsafe, raises: [].} =
## generates a proof for the given data and epoch
## the proof is generated using the current merkle root
if g.idCredentials.isNone():
return err("identity credentials are not set")
if g.membershipIndex.isNone():
return err("membership index is not set")
waku_rln_proof_generation_duration_seconds.nanosecondTime:
let proof = proofGen(
rlnInstance = g.rlnInstance,
data = data,
memKeys = g.idCredentials.get(),
memIndex = g.membershipIndex.get(),
epoch = epoch,
).valueOr:
return err("proof generation failed: " & $error)
return ok(proof)
method isReady*(g: GroupManager): Future[bool] {.base, async.} =
raise newException(

View File

@ -31,74 +31,34 @@ logScope:
topics = "waku rln_relay onchain_group_manager"
# using the when predicate does not work within the contract macro, hence need to dupe
when defined(rln_v2):
contract(WakuRlnRegistry):
# this describes the storage slot to use
proc usingStorageIndex(): Uint16 {.pure.}
# this map contains the address of a given storage slot
proc storages(index: Uint16): Address {.pure.}
# this serves as an entrypoint into the rln storage contract
contract(WakuRlnContract):
# this serves as an entrypoint into the rln membership set
proc register(
storageIndex: Uint16, idCommitment: Uint256, userMessageLimit: Uint256
idCommitment: UInt256, userMessageLimit: UInt32
)
# this creates a new storage on the rln registry
proc newStorage(maxMessageLimit: Uint256)
# Initializes the implementation contract (only used in unit tests)
proc initialize()
# membership contract interface
contract(RlnStorage):
proc initialize(maxMessageLimit: UInt256)
# this event is raised when a new member is registered
proc MemberRegistered(
idCommitment: Uint256, userMessageLimit: Uint256, index: Uint256
rateCommitment: UInt256, index: Uint32
) {.event.}
# this constant contains the membership deposit of the contract
proc MEMBERSHIP_DEPOSIT(): Uint256 {.pure.}
# this map denotes existence of a given user
proc memberExists(idCommitment: Uint256): Uint256 {.view.}
# this function denotes existence of a given user
proc memberExists(idCommitment: Uint256): UInt256 {.view.}
# this constant describes the next index of a new member
proc idCommitmentIndex(): Uint256 {.view.}
proc commitmentIndex(): UInt256 {.view.}
# this constant describes the block number this contract was deployed on
proc deployedBlockNumber(): Uint256 {.view.}
else:
contract(WakuRlnRegistry):
# this describes the storage slot to use
proc usingStorageIndex(): Uint16 {.pure.}
# this map contains the address of a given storage slot
proc storages(index: Uint16): Address {.pure.}
# this serves as an entrypoint into the rln storage contract
proc register(storageIndex: Uint16, idCommitment: Uint256)
# this creates a new storage on the rln registry
proc newStorage()
# membership contract interface
contract(RlnStorage):
# this event is raised when a new member is registered
proc MemberRegistered(idCommitment: Uint256, index: Uint256) {.event.}
# this constant contains the membership deposit of the contract
proc MEMBERSHIP_DEPOSIT(): Uint256 {.pure.}
# this map denotes existence of a given user
proc memberExists(idCommitment: Uint256): Uint256 {.view.}
# this constant describes the next index of a new member
proc idCommitmentIndex(): Uint256 {.view.}
# this constant describes the block number this contract was deployed on
proc deployedBlockNumber(): Uint256 {.view.}
proc deployedBlockNumber(): UInt256 {.view.}
type
RegistryContractWithSender = Sender[WakuRlnRegistry]
RlnContractWithSender = Sender[RlnStorage]
WakuRlnContractWithSender = Sender[WakuRlnContract]
OnchainGroupManager* = ref object of GroupManager
ethClientUrl*: string
ethPrivateKey*: Option[string]
ethContractAddress*: string
ethRpc*: Option[Web3]
rlnContract*: Option[RlnContractWithSender]
rlnContractDeployedBlockNumber*: BlockNumber
registryContract*: Option[RegistryContractWithSender]
usingStorageIndex: Option[Uint16]
membershipFee*: Option[Uint256]
wakuRlnContract*: Option[WakuRlnContractWithSender]
latestProcessedBlock*: BlockNumber
registrationTxHash*: Option[TxHash]
chainId*: Option[Quantity]
@ -157,24 +117,17 @@ proc setMetadata*(
return err("failed to persist rln metadata: " & getCurrentExceptionMsg())
return ok()
when defined(rln_v2):
method atomicBatch*(
g: OnchainGroupManager,
start: MembershipIndex,
rateCommitments = newSeq[RateCommitment](),
rateCommitments = newSeq[RawRateCommitment](),
toRemoveIndices = newSeq[MembershipIndex](),
): Future[void] {.async: (raises: [Exception]), base.} =
initializedGuard(g)
# convert the rateCommitment struct to a leaf value
let leaves = rateCommitments.toLeaves().valueOr:
raise newException(
ValueError, "failed to convert rateCommitments to leaves: " & $error
)
waku_rln_membership_insertion_duration_seconds.nanosecondTime:
let operationSuccess =
g.rlnInstance.atomicWrite(some(start), leaves, toRemoveIndices)
g.rlnInstance.atomicWrite(some(start), rateCommitments, toRemoveIndices)
if not operationSuccess:
raise newException(CatchableError, "atomic batch operation failed")
# TODO: when slashing is enabled, we need to track slashed members
@ -184,76 +137,33 @@ when defined(rln_v2):
var membersSeq = newSeq[Membership]()
for i in 0 ..< rateCommitments.len:
var index = start + MembershipIndex(i)
trace "registering member", rateCommitment = rateCommitments[i], index = index
debug "registering member to callback", rateCommitment = rateCommitments[i], index = index
let member = Membership(rateCommitment: rateCommitments[i], index: index)
membersSeq.add(member)
await g.registerCb.get()(membersSeq)
g.validRootBuffer = g.slideRootQueue()
else:
method atomicBatch*(
g: OnchainGroupManager,
start: MembershipIndex,
idCommitments = newSeq[IDCommitment](),
toRemoveIndices = newSeq[MembershipIndex](),
): Future[void] {.async: (raises: [Exception]), base.} =
initializedGuard(g)
waku_rln_membership_insertion_duration_seconds.nanosecondTime:
let operationSuccess =
g.rlnInstance.atomicWrite(some(start), idCommitments, toRemoveIndices)
if not operationSuccess:
raise newException(ValueError, "atomic batch operation failed")
# TODO: when slashing is enabled, we need to track slashed members
waku_rln_number_registered_memberships.set(int64(g.rlnInstance.leavesSet()))
if g.registerCb.isSome():
var membersSeq = newSeq[Membership]()
for i in 0 ..< idCommitments.len:
var index = start + MembershipIndex(i)
trace "registering member", idCommitment = idCommitments[i], index = index
let member = Membership(idCommitment: idCommitments[i], index: index)
membersSeq.add(member)
await g.registerCb.get()(membersSeq)
g.validRootBuffer = g.slideRootQueue()
when defined(rln_v2):
method register*(
g: OnchainGroupManager, rateCommitment: RateCommitment
): Future[void] {.async: (raises: [Exception]).} =
initializedGuard(g)
await g.registerBatch(@[rateCommitment])
try:
let leaf = rateCommitment.toLeaf().get()
await g.registerBatch(@[leaf])
except CatchableError:
raise newException(ValueError, getCurrentExceptionMsg())
else:
method register*(
g: OnchainGroupManager, idCommitment: IDCommitment
): Future[void] {.async: (raises: [Exception]).} =
initializedGuard(g)
await g.registerBatch(@[idCommitment])
when defined(rln_v2):
method registerBatch*(
g: OnchainGroupManager, rateCommitments: seq[RateCommitment]
g: OnchainGroupManager, rateCommitments: seq[RawRateCommitment]
): Future[void] {.async: (raises: [Exception]).} =
initializedGuard(g)
await g.atomicBatch(g.latestIndex, rateCommitments)
g.latestIndex += MembershipIndex(rateCommitments.len)
else:
method registerBatch*(
g: OnchainGroupManager, idCommitments: seq[IDCommitment]
): Future[void] {.async: (raises: [Exception]).} =
initializedGuard(g)
await g.atomicBatch(g.latestIndex, idCommitments)
g.latestIndex += MembershipIndex(idCommitments.len)
when defined(rln_v2):
method register*(
g: OnchainGroupManager,
identityCredential: IdentityCredential,
@ -262,23 +172,20 @@ when defined(rln_v2):
initializedGuard(g)
let ethRpc = g.ethRpc.get()
let registryContract = g.registryContract.get()
let membershipFee = g.membershipFee.get()
let wakuRlnContract = g.wakuRlnContract.get()
var gasPrice: int
g.retryWrapper(gasPrice, "Failed to get gas price"):
int(await ethRpc.provider.eth_gasPrice()) * 2
let idCommitment = identityCredential.idCommitment.toUInt256()
let storageIndex = g.usingStorageIndex.get()
debug "registering the member",
idCommitment = idCommitment,
storageIndex = storageIndex,
userMessageLimit = userMessageLimit
var txHash: TxHash
g.retryWrapper(txHash, "Failed to register the member"):
await registryContract
.register(storageIndex, idCommitment, u256(userMessageLimit))
await wakuRlnContract
.register(idCommitment, userMessageLimit.stuint(32))
.send(gasPrice = gasPrice)
# wait for the transaction to be mined
@ -289,80 +196,30 @@ when defined(rln_v2):
g.registrationTxHash = some(txHash)
# the receipt topic holds the hash of signature of the raised events
# TODO: make this robust. search within the event list for the event
debug "ts receipt", tsReceipt
let firstTopic = tsReceipt.logs[0].topics[0]
# the hash of the signature of MemberRegistered(uint256,uint256,uint256) event is equal to the following hex value
# the hash of the signature of MemberRegistered(uint256,uint32) event is equal to the following hex value
if firstTopic !=
cast[FixedBytes[32]](keccak256.digest(
"MemberRegistered(uint256,uint256,uint256)"
"MemberRegistered(uint256,uint32)"
).data):
raise newException(ValueError, "unexpected event signature")
# the arguments of the raised event i.e., MemberRegistered are encoded inside the data field
# data = pk encoded as 256 bits || index encoded as 256 bits || userMessageLimit encoded as 256 bits
# data = rateCommitment encoded as 256 bits || index encoded as 32 bits
let arguments = tsReceipt.logs[0].data
debug "tx log data", arguments = arguments
let
argumentsBytes = arguments
# In TX log data, uints are encoded in big endian
membershipIndex = UInt256.fromBytesBE(argumentsBytes[64 ..^ 1])
membershipIndex = UInt256.fromBytesBE(arguments[32 ..^ 1])
debug "parsed membershipIndex", membershipIndex
g.userMessageLimit = some(userMessageLimit)
g.membershipIndex = some(membershipIndex.toMembershipIndex())
# don't handle member insertion into the tree here, it will be handled by the event listener
return
else:
method register*(
g: OnchainGroupManager, credentials: IdentityCredential
): Future[void] {.async: (raises: [Exception]).} =
initializedGuard(g)
let ethRpc = g.ethRpc.get()
let registryContract = g.registryContract.get()
let membershipFee = g.membershipFee.get()
var gasPrice: int
g.retryWrapper(gasPrice, "Failed to get gas price"):
int(await ethRpc.provider.eth_gasPrice()) * 2
let idCommitment = credentials.idCommitment.toUInt256()
let storageIndex = g.usingStorageIndex.get()
debug "registering the member",
idCommitment = idCommitment, storageIndex = storageIndex
var txHash: TxHash
g.retryWrapper(txHash, "Failed to register the member"):
await registryContract.register(storageIndex, idCommitment).send(
gasPrice = gasPrice
)
# wait for the transaction to be mined
var tsReceipt: ReceiptObject
g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"):
await ethRpc.getMinedTransactionReceipt(txHash)
debug "registration transaction mined", txHash = txHash
g.registrationTxHash = some(txHash)
# the receipt topic holds the hash of signature of the raised events
# TODO: make this robust. search within the event list for the event
let firstTopic = tsReceipt.logs[0].topics[0]
# the hash of the signature of MemberRegistered(uint256,uint256) event is equal to the following hex value
if firstTopic !=
cast[FixedBytes[32]](keccak256.digest("MemberRegistered(uint256,uint256)").data):
raise newException(ValueError, "unexpected event signature")
# the arguments of the raised event i.e., MemberRegistered are encoded inside the data field
# data = pk encoded as 256 bits || index encoded as 256 bits
let arguments = tsReceipt.logs[0].data
debug "tx log data", arguments = arguments
let
argumentsBytes = arguments
# In TX log data, uints are encoded in big endian
eventIndex = UInt256.fromBytesBE(argumentsBytes[32 ..^ 1])
g.membershipIndex = some(eventIndex.toMembershipIndex())
# don't handle member insertion into the tree here, it will be handled by the event listener
return
method withdraw*(
g: OnchainGroupManager, idCommitment: IDCommitment
@ -381,10 +238,8 @@ proc parseEvent(
): GroupManagerResult[Membership] =
## parses the `data` parameter of the `MemberRegistered` event `log`
## returns an error if it cannot parse the `data` parameter
var idComm: UInt256
var rateCommitment: UInt256
var index: UInt256
when defined(rln_v2):
var userMessageLimit: UInt256
var data: string
# Remove the 0x prefix
try:
@ -396,29 +251,17 @@ proc parseEvent(
)
var offset = 0
try:
# Parse the idComm
offset += decode(data, offset, idComm)
when defined(rln_v2):
# Parse the userMessageLimit
offset += decode(data, offset, userMessageLimit)
# Parse the rateCommitment
offset += decode(data, offset, rateCommitment)
# Parse the index
offset += decode(data, offset, index)
when defined(rln_v2):
return ok(
Membership(
rateCommitment: RateCommitment(
idCommitment: idComm.toIDCommitment(),
userMessageLimit: userMessageLimit.toUserMessageLimit(),
),
rateCommitment: rateCommitment.toRateCommitment(),
index: index.toMembershipIndex(),
)
)
else:
return ok(
Membership(
idCommitment: idComm.toIDCommitment(), index: index.toMembershipIndex()
)
)
except CatchableError:
return err("failed to parse the data field of the MemberRegistered event")
@ -456,11 +299,11 @@ proc getRawEvents(
initializedGuard(g)
let ethRpc = g.ethRpc.get()
let rlnContract = g.rlnContract.get()
let wakuRlnContract = g.wakuRlnContract.get()
var events: JsonNode
g.retryWrapper(events, "Failed to get the events"):
await rlnContract.getJsonLogs(
await wakuRlnContract.getJsonLogs(
MemberRegistered,
fromBlock = some(fromBlock.blockId()),
toBlock = some(toBlock.blockId()),
@ -501,7 +344,6 @@ proc handleEvents(
try:
let startIndex = blockTable[blockNumber].filterIt(not it[1])[0][0].index
let removalIndices = members.filterIt(it[1]).mapIt(it[0].index)
when defined(rln_v2):
let rateCommitments = members.mapIt(it[0].rateCommitment)
await g.atomicBatch(
start = startIndex,
@ -509,16 +351,8 @@ proc handleEvents(
toRemoveIndices = removalIndices,
)
g.latestIndex = startIndex + MembershipIndex(rateCommitments.len)
trace "new members added to the Merkle tree", commitments = rateCommitments
else:
let idCommitments = members.mapIt(it[0].idCommitment)
await g.atomicBatch(
start = startIndex,
idCommitments = idCommitments,
toRemoveIndices = removalIndices,
)
g.latestIndex = startIndex + MembershipIndex(idCommitments.len)
trace "new members added to the Merkle tree", commitments = idCommitments
trace "new members added to the Merkle tree", commitments = rateCommitments.mapIt(it.inHex)
except CatchableError:
error "failed to insert members into the tree", error = getCurrentExceptionMsg()
raise newException(ValueError, "failed to insert members into the tree")
@ -720,23 +554,11 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
ethRpc.defaultAccount =
ethRpc.privateKey.get().toPublicKey().toCanonicalAddress().Address
let registryAddress = web3.fromHex(web3.Address, g.ethContractAddress)
let registryContract = ethRpc.contractSender(WakuRlnRegistry, registryAddress)
# get the current storage index
var usingStorageIndex: Uint16
g.retryWrapper(usingStorageIndex, "Failed to get the storage index"):
await registryContract.usingStorageIndex().call()
g.usingStorageIndex = some(usingStorageIndex)
var rlnContractAddress: Address
g.retryWrapper(rlnContractAddress, "Failed to get the rln contract address"):
await registryContract.storages(usingStorageIndex).call()
let rlnContract = ethRpc.contractSender(RlnStorage, rlnContractAddress)
let contractAddress = web3.fromHex(web3.Address, g.ethContractAddress)
let wakuRlnContract = ethRpc.contractSender(WakuRlnContract, contractAddress)
g.ethRpc = some(ethRpc)
g.rlnContract = some(rlnContract)
g.registryContract = some(registryContract)
g.wakuRlnContract = some(wakuRlnContract)
if g.keystorePath.isSome() and g.keystorePassword.isSome():
if not fileExists(g.keystorePath.get()):
@ -759,11 +581,10 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
return err("failed to get the keystore credentials: " & $error)
g.membershipIndex = some(keystoreCred.treeIndex)
when defined(rln_v2):
g.userMessageLimit = some(keystoreCred.userMessageLimit)
# now we check on the contract if the commitment actually has a membership
try:
let membershipExists = await rlnContract
let membershipExists = await wakuRlnContract
.memberExists(keystoreCred.identityCredential.idCommitment.toUInt256())
.call()
if membershipExists == 0:
@ -786,16 +607,10 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
g.latestProcessedBlock = metadata.lastProcessedBlock
g.validRoots = metadata.validRoots.toDeque()
# check if the contract exists by calling a static function
var membershipFee: Uint256
g.retryWrapper(membershipFee, "Failed to get the membership deposit"):
await rlnContract.MEMBERSHIP_DEPOSIT().call()
g.membershipFee = some(membershipFee)
var deployedBlockNumber: Uint256
g.retryWrapper(deployedBlockNumber, "Failed to get the deployed block number"):
await rlnContract.deployedBlockNumber().call()
debug "using rln storage", deployedBlockNumber, rlnContractAddress
await wakuRlnContract.deployedBlockNumber().call()
debug "using rln contract", deployedBlockNumber, rlnContractAddress = contractAddress
g.rlnContractDeployedBlockNumber = cast[BlockNumber](deployedBlockNumber)
g.latestProcessedBlock = max(g.latestProcessedBlock, g.rlnContractDeployedBlockNumber)

View File

@ -33,12 +33,10 @@ method init*(g: StaticGroupManager): Future[GroupManagerResult[void]] {.async.}
"Invalid membership index. Must be within 0 and " & $(groupSize - 1) & "but was " &
$membershipIndex
)
when defined(rln_v2):
g.userMessageLimit = some(DefaultUserMessageLimit)
g.idCredentials = some(groupKeys[membershipIndex])
# Seed the received commitments into the merkle tree
when defined(rln_v2):
let rateCommitments = groupKeys.mapIt(
RateCommitment(
idCommitment: it.idCommitment, userMessageLimit: g.userMessageLimit.get()
@ -47,11 +45,6 @@ method init*(g: StaticGroupManager): Future[GroupManagerResult[void]] {.async.}
let leaves = rateCommitments.toLeaves().valueOr:
return err("Failed to convert rate commitments to leaves: " & $error)
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex, leaves)
else:
let idCommitments = groupKeys.mapIt(it.idCommitment)
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex, idCommitments)
if not membersInserted:
return err("Failed to insert members into the merkle tree")
discard g.slideRootQueue()
@ -68,34 +61,22 @@ method startGroupSync*(
# No-op
return ok()
when defined(rln_v2):
method register*(
g: StaticGroupManager, rateCommitment: RateCommitment
): Future[void] {.async: (raises: [Exception]).} =
initializedGuard(g)
await g.registerBatch(@[rateCommitment])
let leaf = rateCommitment.toLeaf().get()
else:
method register*(
g: StaticGroupManager, idCommitment: IDCommitment
): Future[void] {.async: (raises: [Exception]).} =
initializedGuard(g)
await g.registerBatch(@[leaf])
await g.registerBatch(@[idCommitment])
when defined(rln_v2):
method registerBatch*(
g: StaticGroupManager, rateCommitments: seq[RateCommitment]
g: StaticGroupManager, rateCommitments: seq[RawRateCommitment]
): Future[void] {.async: (raises: [Exception]).} =
initializedGuard(g)
let leavesRes = rateCommitments.toLeaves()
if not leavesRes.isOk():
raise newException(ValueError, "Failed to convert rate commitments to leaves")
let leaves = cast[seq[seq[byte]]](leavesRes.get())
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex + 1, leaves)
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex + 1, rateCommitments)
if not membersInserted:
raise newException(ValueError, "Failed to insert members into the merkle tree")
@ -113,37 +94,8 @@ when defined(rln_v2):
discard g.slideRootQueue()
g.latestIndex += MembershipIndex(rateCommitments.len)
return
else:
method registerBatch*(
g: StaticGroupManager, idCommitments: seq[IDCommitment]
): Future[void] {.async: (raises: [Exception]).} =
initializedGuard(g)
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex + 1, idCommitments)
if not membersInserted:
raise newException(ValueError, "Failed to insert members into the merkle tree")
if g.registerCb.isSome():
var memberSeq = newSeq[Membership]()
for i in 0 ..< idCommitments.len:
memberSeq.add(
Membership(
idCommitment: idCommitments[i],
index: g.latestIndex + MembershipIndex(i) + 1,
)
)
await g.registerCb.get()(memberSeq)
discard g.slideRootQueue()
g.latestIndex += MembershipIndex(idCommitments.len)
return
when defined(rln_v2):
method withdraw*(
g: StaticGroupManager, idSecretHash: IdentitySecretHash
): Future[void] {.async: (raises: [Exception]).} =
@ -157,7 +109,8 @@ when defined(rln_v2):
let index = MembershipIndex(i)
let rateCommitment = RateCommitment(
idCommitment: idCommitment, userMessageLimit: g.userMessageLimit.get()
)
).toLeaf().valueOr:
raise newException(ValueError, "Failed to parse rateCommitment")
let memberRemoved = g.rlnInstance.removeMember(index)
if not memberRemoved:
raise newException(ValueError, "Failed to remove member from the merkle tree")
@ -168,28 +121,6 @@ when defined(rln_v2):
return
else:
method withdraw*(
g: StaticGroupManager, idSecretHash: IdentitySecretHash
): Future[void] {.async: (raises: [Exception]).} =
initializedGuard(g)
let groupKeys = g.groupKeys
for i in 0 ..< groupKeys.len:
if groupKeys[i].idSecretHash == idSecretHash:
let idCommitment = groupKeys[i].idCommitment
let index = MembershipIndex(i)
let memberRemoved = g.rlnInstance.removeMember(index)
if not memberRemoved:
raise newException(ValueError, "Failed to remove member from the merkle tree")
if g.withdrawCb.isSome():
let withdrawCb = g.withdrawCb.get()
await withdrawCb((@[Membership(idCommitment: idCommitment, index: index)]))
return
method withdrawBatch*(
g: StaticGroupManager, idSecretHashes: seq[IdentitySecretHash]
): Future[void] {.async: (raises: [Exception]).} =

View File

@ -45,9 +45,10 @@ proc getNonce*(n: NonceManager): NonceManagerResult[Nonce] =
if now - n.lastNonceTime >= n.epoch:
retNonce = 0
n.nextNonce = retNonce + 1
n.lastNonceTime = now
n.nextNonce = retNonce + 1
if retNonce >= n.nonceLimit:
return err(
NonceManagerError(

View File

@ -3,7 +3,7 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
import std/[options, tables, deques], stew/arrayops, chronos, web3, eth/keys
import std/[options, tables, deques], stew/arrayops, stint, chronos, web3, eth/keys
import ../waku_core, ../waku_keystore, ../common/protobuf
export waku_keystore, waku_core
@ -16,20 +16,20 @@ type RLNResult* = RlnRelayResult[ptr RLN]
type
MerkleNode* = array[32, byte]
# Each node of the Merkle tee is a Poseidon hash which is a 32 byte value
# Each node of the Merkle tree is a Poseidon hash which is a 32 byte value
Nullifier* = array[32, byte]
Epoch* = array[32, byte]
RlnIdentifier* = array[32, byte]
ZKSNARK* = array[128, byte]
when defined(rln_v2):
type
MessageId* = uint64
ExternalNullifier* = array[32, byte]
type RateCommitment* = object
RateCommitment* = object
idCommitment*: IDCommitment
userMessageLimit*: UserMessageLimit
RawRateCommitment* = seq[byte]
proc toRateCommitment*(rateCommitmentUint: UInt256): RawRateCommitment =
return RawRateCommitment(@(rateCommitmentUint.toBytesLE()))
# Custom data types defined for waku rln relay -------------------------
type RateLimitProof* = object
@ -51,7 +51,6 @@ type RateLimitProof* = object
epoch*: Epoch
## Application specific RLN Identifier
rlnIdentifier*: RlnIdentifier
when defined(rln_v2):
## the external nullifier used for the generation of the `proof` (derived from poseidon([epoch, rln_identifier]))
externalNullifier*: ExternalNullifier

View File

@ -161,7 +161,6 @@ proc poseidon*(data: seq[seq[byte]]): RlnRelayResult[array[32, byte]] =
return ok(output)
when defined(rln_v2):
proc toLeaf*(rateCommitment: RateCommitment): RlnRelayResult[seq[byte]] =
let idCommitment = rateCommitment.idCommitment
var userMessageLimit: array[32, byte]
@ -200,7 +199,6 @@ proc extractMetadata*(proof: RateLimitProof): RlnRelayResult[ProofMetadata] =
)
)
when defined(rln_v2):
proc proofGen*(
rlnInstance: ptr RLN,
data: openArray[byte],
@ -277,75 +275,6 @@ when defined(rln_v2):
)
return ok(output)
else:
proc proofGen*(
rlnInstance: ptr RLN,
data: openArray[byte],
memKeys: IdentityCredential,
memIndex: MembershipIndex,
epoch: Epoch,
): RateLimitProofResult =
# serialize inputs
let serializedInputs = serialize(
idSecretHash = memKeys.idSecretHash,
memIndex = memIndex,
epoch = epoch,
msg = data,
)
var inputBuffer = toBuffer(serializedInputs)
debug "input buffer ", inputBuffer = repr(inputBuffer)
# generate the proof
var proof: Buffer
let proofIsSuccessful = generate_proof(rlnInstance, addr inputBuffer, addr proof)
# check whether the generate_proof call is done successfully
if not proofIsSuccessful:
return err("could not generate the proof")
var proofValue = cast[ptr array[320, byte]](proof.`ptr`)
let proofBytes: array[320, byte] = proofValue[]
debug "proof content", proofHex = proofValue[].toHex
## parse the proof as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ]
let
proofOffset = 128
rootOffset = proofOffset + 32
epochOffset = rootOffset + 32
shareXOffset = epochOffset + 32
shareYOffset = shareXOffset + 32
nullifierOffset = shareYOffset + 32
rlnIdentifierOffset = nullifierOffset + 32
var
zkproof: ZKSNARK
proofRoot, shareX, shareY: MerkleNode
epoch: Epoch
nullifier: Nullifier
rlnIdentifier: RlnIdentifier
discard zkproof.copyFrom(proofBytes[0 .. proofOffset - 1])
discard proofRoot.copyFrom(proofBytes[proofOffset .. rootOffset - 1])
discard epoch.copyFrom(proofBytes[rootOffset .. epochOffset - 1])
discard shareX.copyFrom(proofBytes[epochOffset .. shareXOffset - 1])
discard shareY.copyFrom(proofBytes[shareXOffset .. shareYOffset - 1])
discard nullifier.copyFrom(proofBytes[shareYOffset .. nullifierOffset - 1])
discard
rlnIdentifier.copyFrom(proofBytes[nullifierOffset .. rlnIdentifierOffset - 1])
let output = RateLimitProof(
proof: zkproof,
merkleRoot: proofRoot,
epoch: epoch,
shareX: shareX,
shareY: shareY,
nullifier: nullifier,
rlnIdentifier: rlnIdentifier,
)
return ok(output)
# validRoots should contain a sequence of roots in the acceptable windows.
# As default, it is set to an empty sequence of roots. This implies that the validity check for the proof's root is skipped
proc proofVerify*(
@ -357,7 +286,6 @@ proc proofVerify*(
## verifies the proof, returns an error if the proof verification fails
## returns true if the proof is valid
var normalizedProof = proof
when defined(rln_v2):
# when we do this, we ensure that we compute the proof for the derived value
# of the externalNullifier. The proof verification will fail if a malicious peer
# attaches invalid epoch+rlnidentifier pair

View File

@ -23,10 +23,8 @@ import
./conversion_utils,
./constants,
./protocol_types,
./protocol_metrics
when defined(rln_v2):
import ./nonce_manager
./protocol_metrics,
./nonce_manager
import
../common/error_handling,
@ -47,7 +45,6 @@ type WakuRlnConfig* = object
rlnRelayTreePath*: string
rlnEpochSizeSec*: uint64
onFatalErrorAction*: OnFatalErrorHandler
when defined(rln_v2):
rlnRelayUserMessageLimit*: uint64
proc createMembershipList*(
@ -93,7 +90,6 @@ type WakuRLNRelay* = ref object of RootObj
rlnMaxEpochGap*: uint64
groupManager*: GroupManager
onFatalErrorAction*: OnFatalErrorHandler
when defined(rln_v2):
nonceManager*: NonceManager
proc calcEpoch*(rlnPeer: WakuRLNRelay, t: float64): Epoch =
@ -307,14 +303,11 @@ proc appendRLNProof*(
let input = msg.toRLNSignal()
let epoch = rlnPeer.calcEpoch(senderEpochTime)
when defined(rln_v2):
let nonce = rlnPeer.nonceManager.getNonce().valueOr:
return err("could not get new message id to generate an rln proof: " & $error)
let proof = rlnPeer.groupManager.generateProof(input, epoch, nonce).valueOr:
return err("could not generate rln-v2 proof: " & $error)
else:
let proof = rlnPeer.groupManager.generateProof(input, epoch).valueOr:
return err("could not generate rln proof: " & $error)
msg.proof = proof.encode().buffer
return ok()
@ -445,7 +438,6 @@ proc mount(
(await groupManager.startGroupSync()).isOkOr:
return err("could not start the group sync: " & $error)
when defined(rln_v2):
return ok(
WakuRLNRelay(
groupManager: groupManager,
@ -457,16 +449,7 @@ proc mount(
onFatalErrorAction: conf.onFatalErrorAction,
)
)
else:
return ok(
WakuRLNRelay(
groupManager: groupManager,
rlnEpochSizeSec: conf.rlnEpochSizeSec,
rlnMaxEpochGap:
max(uint64(MaxClockGapSeconds / float64(conf.rlnEpochSizeSec)), 1),
onFatalErrorAction: conf.onFatalErrorAction,
)
)
proc isReady*(rlnPeer: WakuRLNRelay): Future[bool] {.async: (raises: [Exception]).} =
## returns true if the rln-relay protocol is ready to relay messages