mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-03 14:33:12 +00:00
feat(rlnv2): rlnv2 fork feature branch
This commit is contained in:
parent
9cc37d037f
commit
ed23c04090
10
.github/workflows/ci.yml
vendored
10
.github/workflows/ci.yml
vendored
@ -53,12 +53,11 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }}
|
||||
strategy:
|
||||
matrix:
|
||||
rln_version: [1, 2]
|
||||
os: [ubuntu-latest, macos-13]
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: build-${{ matrix.os }}-rln-v${{ matrix.rln_version }}
|
||||
name: build-${{ matrix.os }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
@ -77,19 +76,18 @@ jobs:
|
||||
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
|
||||
|
||||
- name: Build binaries
|
||||
run: make RLN_V${{matrix.rln_version}}=true V=1 QUICK_AND_DIRTY_COMPILER=1 all tools
|
||||
run: make V=1 QUICK_AND_DIRTY_COMPILER=1 all tools
|
||||
|
||||
test:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }}
|
||||
strategy:
|
||||
matrix:
|
||||
rln_version: [1, 2]
|
||||
os: [ubuntu-latest, macos-13]
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: test-${{ matrix.os }}-rln-v${{ matrix.rln_version }}
|
||||
name: test-${{ matrix.os }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
@ -118,7 +116,7 @@ jobs:
|
||||
postgres_enabled=1
|
||||
fi
|
||||
|
||||
make RLN_V${{matrix.rln_version}}=true V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test testwakunode2
|
||||
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test testwakunode2
|
||||
|
||||
build-docker-image:
|
||||
needs: changes
|
||||
|
||||
7
.github/workflows/container-image.yml
vendored
7
.github/workflows/container-image.yml
vendored
@ -22,12 +22,11 @@ jobs:
|
||||
build-docker-image:
|
||||
strategy:
|
||||
matrix:
|
||||
rln_version : [1, 2]
|
||||
os: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: docker-build-${{ matrix.os }}-rln-v${{ matrix.rln_version }}
|
||||
name: docker-build-${{ matrix.os }}
|
||||
outputs:
|
||||
image: ${{ steps.build.outputs.image }}
|
||||
steps:
|
||||
@ -67,12 +66,12 @@ jobs:
|
||||
if: ${{ steps.secrets.outcome == 'success' }}
|
||||
run: |
|
||||
|
||||
make RLN_V${{matrix.rln_version}}=true -j${NPROC} V=1 QUICK_AND_DIRTY_COMPILER=1 NIMFLAGS="-d:disableMarchNative -d:postgres" wakunode2
|
||||
make -j${NPROC} V=1 QUICK_AND_DIRTY_COMPILER=1 NIMFLAGS="-d:disableMarchNative -d:postgres" wakunode2
|
||||
|
||||
SHORT_REF=$(git rev-parse --short HEAD)
|
||||
|
||||
TAG=$([ "${PR_NUMBER}" == "" ] && echo "${SHORT_REF}" || echo "${PR_NUMBER}")
|
||||
IMAGE=quay.io/wakuorg/nwaku-pr:${TAG}-rln-v${{matrix.rln_version}}
|
||||
IMAGE=quay.io/wakuorg/nwaku-pr:${TAG}
|
||||
|
||||
echo "image=${IMAGE}" >> $GITHUB_OUTPUT
|
||||
echo "commit_hash=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT
|
||||
|
||||
11
Makefile
11
Makefile
@ -139,11 +139,7 @@ clean: | clean-libbacktrace
|
||||
.PHONY: librln shouldUseRLNV2
|
||||
|
||||
LIBRLN_BUILDDIR := $(CURDIR)/vendor/zerokit
|
||||
ifeq ($(RLN_V2),true)
|
||||
LIBRLN_VERSION := v0.4.4
|
||||
else
|
||||
LIBRLN_VERSION := v0.3.7
|
||||
endif
|
||||
|
||||
ifeq ($(OS),Windows_NT)
|
||||
LIBRLN_FILE := rln.lib
|
||||
@ -155,12 +151,7 @@ $(LIBRLN_FILE):
|
||||
echo -e $(BUILD_MSG) "$@" && \
|
||||
./scripts/build_rln.sh $(LIBRLN_BUILDDIR) $(LIBRLN_VERSION) $(LIBRLN_FILE)
|
||||
|
||||
shouldUseRLNV2:
|
||||
ifeq ($(RLN_V2),true)
|
||||
$(eval NIM_PARAMS += -d:rln_v2)
|
||||
endif
|
||||
|
||||
librln: | $(LIBRLN_FILE) shouldUseRLNV2
|
||||
librln: | $(LIBRLN_FILE)
|
||||
$(eval NIM_PARAMS += --passL:$(LIBRLN_FILE) --passL:-lm)
|
||||
|
||||
clean-librln:
|
||||
|
||||
@ -542,27 +542,16 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
|
||||
echo "rln-relay preparation is in progress..."
|
||||
|
||||
when defined(rln_v2):
|
||||
let rlnConf = WakuRlnConfig(
|
||||
rlnRelayDynamic: conf.rlnRelayDynamic,
|
||||
rlnRelayCredIndex: conf.rlnRelayCredIndex,
|
||||
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
|
||||
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
|
||||
rlnRelayCredPath: conf.rlnRelayCredPath,
|
||||
rlnRelayCredPassword: conf.rlnRelayCredPassword,
|
||||
rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit,
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
||||
)
|
||||
else:
|
||||
let rlnConf = WakuRlnConfig(
|
||||
rlnRelayDynamic: conf.rlnRelayDynamic,
|
||||
rlnRelayCredIndex: conf.rlnRelayCredIndex,
|
||||
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
|
||||
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
|
||||
rlnRelayCredPath: conf.rlnRelayCredPath,
|
||||
rlnRelayCredPassword: conf.rlnRelayCredPassword,
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
||||
)
|
||||
let rlnConf = WakuRlnConfig(
|
||||
rlnRelayDynamic: conf.rlnRelayDynamic,
|
||||
rlnRelayCredIndex: conf.rlnRelayCredIndex,
|
||||
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
|
||||
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
|
||||
rlnRelayCredPath: conf.rlnRelayCredPath,
|
||||
rlnRelayCredPassword: conf.rlnRelayCredPassword,
|
||||
rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit,
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
||||
)
|
||||
|
||||
waitFor node.mountRlnRelay(rlnConf, spamHandler = some(spamHandler))
|
||||
|
||||
|
||||
@ -95,51 +95,25 @@ proc sendRlnMessage*(
|
||||
let isCompleted = await completionFuture.withTimeout(FUTURE_TIMEOUT)
|
||||
return isCompleted
|
||||
|
||||
when defined(rln_v2):
|
||||
proc sendRlnMessageWithInvalidProof*(
|
||||
client: WakuNode,
|
||||
pubsubTopic: string,
|
||||
contentTopic: string,
|
||||
completionFuture: Future[bool],
|
||||
payload: seq[byte] = "Hello".toBytes(),
|
||||
): Future[bool] {.async.} =
|
||||
let
|
||||
extraBytes: seq[byte] = @[byte(1), 2, 3]
|
||||
rateLimitProofRes = client.wakuRlnRelay.groupManager.generateProof(
|
||||
concat(payload, extraBytes),
|
||||
# we add extra bytes to invalidate proof verification against original payload
|
||||
client.wakuRlnRelay.getCurrentEpoch(),
|
||||
messageId = MessageId(0),
|
||||
)
|
||||
rateLimitProof = rateLimitProofRes.get().encode().buffer
|
||||
message = WakuMessage(
|
||||
payload: @payload, contentTopic: contentTopic, proof: rateLimitProof
|
||||
)
|
||||
proc sendRlnMessageWithInvalidProof*(
|
||||
client: WakuNode,
|
||||
pubsubTopic: string,
|
||||
contentTopic: string,
|
||||
completionFuture: Future[bool],
|
||||
payload: seq[byte] = "Hello".toBytes(),
|
||||
): Future[bool] {.async.} =
|
||||
let
|
||||
extraBytes: seq[byte] = @[byte(1), 2, 3]
|
||||
rateLimitProofRes = client.wakuRlnRelay.groupManager.generateProof(
|
||||
concat(payload, extraBytes),
|
||||
# we add extra bytes to invalidate proof verification against original payload
|
||||
client.wakuRlnRelay.getCurrentEpoch(),
|
||||
messageId = MessageId(0),
|
||||
)
|
||||
rateLimitProof = rateLimitProofRes.get().encode().buffer
|
||||
message =
|
||||
WakuMessage(payload: @payload, contentTopic: contentTopic, proof: rateLimitProof)
|
||||
|
||||
discard await client.publish(some(pubsubTopic), message)
|
||||
let isCompleted = await completionFuture.withTimeout(FUTURE_TIMEOUT)
|
||||
return isCompleted
|
||||
|
||||
else:
|
||||
proc sendRlnMessageWithInvalidProof*(
|
||||
client: WakuNode,
|
||||
pubsubTopic: string,
|
||||
contentTopic: string,
|
||||
completionFuture: Future[bool],
|
||||
payload: seq[byte] = "Hello".toBytes(),
|
||||
): Future[bool] {.async.} =
|
||||
let
|
||||
extraBytes: seq[byte] = @[byte(1), 2, 3]
|
||||
rateLimitProofRes = client.wakuRlnRelay.groupManager.generateProof(
|
||||
concat(payload, extraBytes),
|
||||
# we add extra bytes to invalidate proof verification against original payload
|
||||
client.wakuRlnRelay.getCurrentEpoch(),
|
||||
)
|
||||
rateLimitProof = rateLimitProofRes.get().encode().buffer
|
||||
message = WakuMessage(
|
||||
payload: @payload, contentTopic: contentTopic, proof: rateLimitProof
|
||||
)
|
||||
|
||||
discard await client.publish(some(pubsubTopic), message)
|
||||
let isCompleted = await completionFuture.withTimeout(FUTURE_TIMEOUT)
|
||||
return isCompleted
|
||||
discard await client.publish(some(pubsubTopic), message)
|
||||
let isCompleted = await completionFuture.withTimeout(FUTURE_TIMEOUT)
|
||||
return isCompleted
|
||||
|
||||
@ -14,14 +14,11 @@ proc unsafeAppendRLNProof*(
|
||||
let input = msg.toRLNSignal()
|
||||
let epoch = rlnPeer.calcEpoch(senderEpochTime)
|
||||
|
||||
when defined(rln_v2):
|
||||
# we do not fetch a nonce from the nonce manager,
|
||||
# instead we use 0 as the nonce
|
||||
let proof = rlnPeer.groupManager.generateProof(input, epoch, 0).valueOr:
|
||||
return err("could not generate rln-v2 proof: " & $error)
|
||||
else:
|
||||
let proof = rlnPeer.groupManager.generateProof(input, epoch).valueOr:
|
||||
return err("could not generate rln proof: " & $error)
|
||||
# we do not fetch a nonce from the nonce manager,
|
||||
# instead we use 0 as the nonce
|
||||
let proof = rlnPeer.groupManager.generateProof(input, epoch, 0).valueOr:
|
||||
return err("could not generate rln-v2 proof: " & $error)
|
||||
|
||||
|
||||
msg.proof = proof.encode().buffer
|
||||
return ok()
|
||||
|
||||
@ -5,7 +5,5 @@ import
|
||||
./test_rln_group_manager_static,
|
||||
./test_waku_rln_relay,
|
||||
./test_wakunode_rln_relay,
|
||||
./test_rln_nonce_manager
|
||||
|
||||
when defined(rln_v2):
|
||||
import ./rln_v2/test_rln_relay_v2_serde
|
||||
./test_rln_nonce_manager,
|
||||
./test_rln_serde
|
||||
|
||||
@ -30,13 +30,12 @@ proc generateCredentials(rlnInstance: ptr RLN): IdentityCredential =
|
||||
let credRes = membershipKeyGen(rlnInstance)
|
||||
return credRes.get()
|
||||
|
||||
when defined(rln_v2):
|
||||
proc getRateCommitment(
|
||||
idCredential: IdentityCredential, userMessageLimit: UserMessageLimit
|
||||
): RateCommitment =
|
||||
return RateCommitment(
|
||||
idCommitment: idCredential.idCommitment, userMessageLimit: userMessageLimit
|
||||
)
|
||||
proc getRateCommitment(
|
||||
idCredential: IdentityCredential, userMessageLimit: UserMessageLimit
|
||||
): RateCommitment =
|
||||
return RateCommitment(
|
||||
idCommitment: idCredential.idCommitment, userMessageLimit: userMessageLimit
|
||||
)
|
||||
|
||||
proc generateCredentials(rlnInstance: ptr RLN, n: int): seq[IdentityCredential] =
|
||||
var credentials: seq[IdentityCredential]
|
||||
@ -61,39 +60,15 @@ proc uploadRLNContract*(ethClientAddress: string): Future[Address] {.async.} =
|
||||
let balance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest")
|
||||
debug "Initial account balance: ", balance
|
||||
|
||||
when defined(rln_v2):
|
||||
# deploy registry contract with its constructor inputs
|
||||
let receipt = await web3.deployContract(RegistryContractCode)
|
||||
else:
|
||||
# deploy the poseidon hash contract and gets its address
|
||||
let
|
||||
hasherReceipt = await web3.deployContract(PoseidonHasherCode)
|
||||
hasherAddress = hasherReceipt.contractAddress.get
|
||||
debug "hasher address: ", hasherAddress
|
||||
|
||||
# encode registry contract inputs to 32 bytes zero-padded
|
||||
let
|
||||
hasherAddressEncoded = encode(hasherAddress).data
|
||||
# this is the contract constructor input
|
||||
contractInput = hasherAddressEncoded
|
||||
|
||||
debug "encoded hasher address: ", hasherAddressEncoded
|
||||
debug "encoded contract input:", contractInput
|
||||
|
||||
# deploy registry contract with its constructor inputs
|
||||
let receipt =
|
||||
await web3.deployContract(RegistryContractCode, contractInput = contractInput)
|
||||
|
||||
# deploy registry contract with its constructor inputs
|
||||
let receipt = await web3.deployContract(RegistryContractCode)
|
||||
let contractAddress = receipt.contractAddress.get()
|
||||
|
||||
debug "Address of the deployed registry contract: ", contractAddress
|
||||
|
||||
let registryContract = web3.contractSender(WakuRlnRegistry, contractAddress)
|
||||
when defined(rln_v2):
|
||||
let initReceipt = await registryContract.initialize().send()
|
||||
let newStorageReceipt = await registryContract.newStorage(20.u256).send()
|
||||
else:
|
||||
let newStorageReceipt = await registryContract.newStorage().send()
|
||||
let initReceipt = await registryContract.initialize().send()
|
||||
let newStorageReceipt = await registryContract.newStorage(20.u256).send()
|
||||
|
||||
debug "Receipt of the newStorage transaction: ", newStorageReceipt
|
||||
let newBalance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest")
|
||||
@ -299,13 +274,10 @@ suite "Onchain group manager":
|
||||
require:
|
||||
registrations.len == 1
|
||||
registrations[0].index == 0
|
||||
when defined(rln_v2):
|
||||
require:
|
||||
registrations[0].rateCommitment ==
|
||||
getRateCommitment(credentials, UserMessageLimit(1))
|
||||
else:
|
||||
require:
|
||||
registrations[0].idCommitment == credentials.idCommitment
|
||||
require:
|
||||
registrations[0].rateCommitment ==
|
||||
getRateCommitment(credentials, UserMessageLimit(1))
|
||||
|
||||
require:
|
||||
registrations[0].index == 0
|
||||
fut.complete()
|
||||
@ -314,10 +286,7 @@ suite "Onchain group manager":
|
||||
|
||||
try:
|
||||
manager.onRegister(generateCallback(fut))
|
||||
when defined(rln_v2):
|
||||
await manager.register(credentials, UserMessageLimit(1))
|
||||
else:
|
||||
await manager.register(credentials)
|
||||
await manager.register(credentials, UserMessageLimit(1))
|
||||
(await manager.startGroupSync()).isOkOr:
|
||||
raiseAssert $error
|
||||
except Exception, CatchableError:
|
||||
@ -354,19 +323,12 @@ suite "Onchain group manager":
|
||||
): OnRegisterCallback =
|
||||
var futureIndex = 0
|
||||
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||
when defined(rln_v2):
|
||||
if registrations.len == 1 and
|
||||
registrations[0].rateCommitment ==
|
||||
getRateCommitment(credentials[futureIndex], UserMessageLimit(1)) and
|
||||
registrations[0].index == MembershipIndex(futureIndex):
|
||||
futs[futureIndex].complete()
|
||||
futureIndex += 1
|
||||
else:
|
||||
if registrations.len == 1 and
|
||||
registrations[0].idCommitment == credentials[futureIndex].idCommitment and
|
||||
registrations[0].index == MembershipIndex(futureIndex):
|
||||
futs[futureIndex].complete()
|
||||
futureIndex += 1
|
||||
if registrations.len == 1 and
|
||||
registrations[0].rateCommitment ==
|
||||
getRateCommitment(credentials[futureIndex], UserMessageLimit(1)) and
|
||||
registrations[0].index == MembershipIndex(futureIndex):
|
||||
futs[futureIndex].complete()
|
||||
futureIndex += 1
|
||||
|
||||
return callback
|
||||
|
||||
@ -376,10 +338,7 @@ suite "Onchain group manager":
|
||||
raiseAssert $error
|
||||
|
||||
for i in 0 ..< credentials.len():
|
||||
when defined(rln_v2):
|
||||
await manager.register(credentials[i], UserMessageLimit(1))
|
||||
else:
|
||||
await manager.register(credentials[i])
|
||||
await manager.register(credentials[i], UserMessageLimit(1))
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
|
||||
@ -398,14 +357,11 @@ suite "Onchain group manager":
|
||||
let dummyCommitment = default(IDCommitment)
|
||||
|
||||
try:
|
||||
when defined(rln_v2):
|
||||
await manager.register(
|
||||
RateCommitment(
|
||||
idCommitment: dummyCommitment, userMessageLimit: UserMessageLimit(1)
|
||||
)
|
||||
await manager.register(
|
||||
RateCommitment(
|
||||
idCommitment: dummyCommitment, userMessageLimit: UserMessageLimit(1)
|
||||
)
|
||||
else:
|
||||
await manager.register(dummyCommitment)
|
||||
)
|
||||
except CatchableError:
|
||||
assert true
|
||||
except Exception:
|
||||
@ -425,14 +381,11 @@ suite "Onchain group manager":
|
||||
raiseAssert $error
|
||||
|
||||
try:
|
||||
when defined(rln_v2):
|
||||
await manager.register(
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
|
||||
)
|
||||
await manager.register(
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
|
||||
)
|
||||
else:
|
||||
await manager.register(idCommitment)
|
||||
)
|
||||
except Exception, CatchableError:
|
||||
assert false,
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
@ -454,16 +407,11 @@ suite "Onchain group manager":
|
||||
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||
require:
|
||||
registrations.len == 1
|
||||
when defined(rln_v2):
|
||||
require:
|
||||
registrations[0].rateCommitment ==
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
|
||||
)
|
||||
else:
|
||||
require:
|
||||
registrations[0].idCommitment == idCommitment
|
||||
require:
|
||||
registrations[0].rateCommitment ==
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
|
||||
)
|
||||
registrations[0].index == 0
|
||||
fut.complete()
|
||||
|
||||
@ -473,14 +421,11 @@ suite "Onchain group manager":
|
||||
try:
|
||||
(await manager.startGroupSync()).isOkOr:
|
||||
raiseAssert $error
|
||||
when defined(rln_v2):
|
||||
await manager.register(
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
|
||||
)
|
||||
await manager.register(
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
|
||||
)
|
||||
else:
|
||||
await manager.register(idCommitment)
|
||||
)
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
|
||||
@ -510,29 +455,20 @@ suite "Onchain group manager":
|
||||
let fut = newFuture[void]()
|
||||
|
||||
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||
when defined(rln_v2):
|
||||
if registrations.len == 1 and
|
||||
registrations[0].rateCommitment ==
|
||||
getRateCommitment(credentials, UserMessageLimit(1)) and
|
||||
registrations[0].index == 0:
|
||||
manager.idCredentials = some(credentials)
|
||||
fut.complete()
|
||||
else:
|
||||
if registrations.len == 1 and
|
||||
registrations[0].idCommitment == credentials.idCommitment and
|
||||
registrations[0].index == 0:
|
||||
manager.idCredentials = some(credentials)
|
||||
fut.complete()
|
||||
if registrations.len == 1 and
|
||||
registrations[0].rateCommitment ==
|
||||
getRateCommitment(credentials, UserMessageLimit(1)) and
|
||||
registrations[0].index == 0:
|
||||
manager.idCredentials = some(credentials)
|
||||
fut.complete()
|
||||
|
||||
|
||||
manager.onRegister(callback)
|
||||
|
||||
try:
|
||||
(await manager.startGroupSync()).isOkOr:
|
||||
raiseAssert $error
|
||||
when defined(rln_v2):
|
||||
await manager.register(credentials, UserMessageLimit(1))
|
||||
else:
|
||||
await manager.register(credentials)
|
||||
await manager.register(credentials, UserMessageLimit(1))
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
|
||||
@ -545,12 +481,9 @@ suite "Onchain group manager":
|
||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
|
||||
# generate proof
|
||||
when defined(rln_v2):
|
||||
let validProofRes = manager.generateProof(
|
||||
data = messageBytes, epoch = epoch, messageId = MessageId(1)
|
||||
)
|
||||
else:
|
||||
let validProofRes = manager.generateProof(data = messageBytes, epoch = epoch)
|
||||
let validProofRes = manager.generateProof(
|
||||
data = messageBytes, epoch = epoch, messageId = MessageId(1)
|
||||
)
|
||||
|
||||
require:
|
||||
validProofRes.isOk()
|
||||
@ -575,8 +508,7 @@ suite "Onchain group manager":
|
||||
## Assume the registration occured out of band
|
||||
manager.idCredentials = some(credentials)
|
||||
manager.membershipIndex = some(MembershipIndex(0))
|
||||
when defined(rln_v2):
|
||||
manager.userMessageLimit = some(UserMessageLimit(1))
|
||||
manager.userMessageLimit = some(UserMessageLimit(1))
|
||||
|
||||
let messageBytes = "Hello".toBytes()
|
||||
|
||||
@ -585,12 +517,9 @@ suite "Onchain group manager":
|
||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
|
||||
# generate proof
|
||||
when defined(rln_v2):
|
||||
let validProofRes = manager.generateProof(
|
||||
data = messageBytes, epoch = epoch, messageId = MessageId(0)
|
||||
)
|
||||
else:
|
||||
let validProofRes = manager.generateProof(data = messageBytes, epoch = epoch)
|
||||
let validProofRes = manager.generateProof(
|
||||
data = messageBytes, epoch = epoch, messageId = MessageId(0)
|
||||
)
|
||||
require:
|
||||
validProofRes.isOk()
|
||||
let validProof = validProofRes.get()
|
||||
@ -611,29 +540,20 @@ suite "Onchain group manager":
|
||||
let fut = newFuture[void]()
|
||||
|
||||
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||
when defined(rln_v2):
|
||||
if registrations.len == 1 and
|
||||
registrations[0].rateCommitment ==
|
||||
getRateCommitment(credentials, UserMessageLimit(1)) and
|
||||
registrations[0].index == 0:
|
||||
manager.idCredentials = some(credentials)
|
||||
fut.complete()
|
||||
else:
|
||||
if registrations.len == 1 and
|
||||
registrations[0].idCommitment == credentials.idCommitment and
|
||||
registrations[0].index == 0:
|
||||
manager.idCredentials = some(credentials)
|
||||
fut.complete()
|
||||
if registrations.len == 1 and
|
||||
registrations[0].rateCommitment ==
|
||||
getRateCommitment(credentials, UserMessageLimit(1)) and
|
||||
registrations[0].index == 0:
|
||||
manager.idCredentials = some(credentials)
|
||||
fut.complete()
|
||||
|
||||
|
||||
manager.onRegister(callback)
|
||||
|
||||
try:
|
||||
(await manager.startGroupSync()).isOkOr:
|
||||
raiseAssert $error
|
||||
when defined(rln_v2):
|
||||
await manager.register(credentials, UserMessageLimit(1))
|
||||
else:
|
||||
await manager.register(credentials)
|
||||
await manager.register(credentials, UserMessageLimit(1))
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
await fut
|
||||
@ -645,12 +565,9 @@ suite "Onchain group manager":
|
||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
|
||||
# generate proof
|
||||
when defined(rln_v2):
|
||||
let validProofRes = manager.generateProof(
|
||||
data = messageBytes, epoch = epoch, messageId = MessageId(0)
|
||||
)
|
||||
else:
|
||||
let validProofRes = manager.generateProof(data = messageBytes, epoch = epoch)
|
||||
let validProofRes = manager.generateProof(
|
||||
data = messageBytes, epoch = epoch, messageId = MessageId(0)
|
||||
)
|
||||
require:
|
||||
validProofRes.isOk()
|
||||
let validProof = validProofRes.get()
|
||||
@ -674,10 +591,7 @@ suite "Onchain group manager":
|
||||
let idCredential = generateCredentials(manager.rlnInstance)
|
||||
|
||||
try:
|
||||
when defined(rln_v2):
|
||||
await manager.register(getRateCommitment(idCredential, UserMessageLimit(1)))
|
||||
else:
|
||||
await manager.register(idCredential.idCommitment)
|
||||
await manager.register(getRateCommitment(idCredential, UserMessageLimit(1)))
|
||||
except Exception, CatchableError:
|
||||
assert false,
|
||||
"exception raised when calling startGroupSync: " & getCurrentExceptionMsg()
|
||||
@ -687,8 +601,7 @@ suite "Onchain group manager":
|
||||
## Assume the registration occured out of band
|
||||
manager.idCredentials = some(idCredential2)
|
||||
manager.membershipIndex = some(MembershipIndex(0))
|
||||
when defined(rln_v2):
|
||||
manager.userMessageLimit = some(UserMessageLimit(1))
|
||||
manager.userMessageLimit = some(UserMessageLimit(1))
|
||||
|
||||
let messageBytes = "Hello".toBytes()
|
||||
|
||||
@ -697,12 +610,9 @@ suite "Onchain group manager":
|
||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
|
||||
# generate proof
|
||||
when defined(rln_v2):
|
||||
let invalidProofRes = manager.generateProof(
|
||||
data = messageBytes, epoch = epoch, messageId = MessageId(0)
|
||||
)
|
||||
else:
|
||||
let invalidProofRes = manager.generateProof(data = messageBytes, epoch = epoch)
|
||||
let invalidProofRes = manager.generateProof(
|
||||
data = messageBytes, epoch = epoch, messageId = MessageId(0)
|
||||
)
|
||||
|
||||
require:
|
||||
invalidProofRes.isOk()
|
||||
@ -733,19 +643,12 @@ suite "Onchain group manager":
|
||||
): OnRegisterCallback =
|
||||
var futureIndex = 0
|
||||
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||
when defined(rln_v2):
|
||||
if registrations.len == 1 and
|
||||
registrations[0].rateCommitment ==
|
||||
getRateCommitment(credentials[futureIndex], UserMessageLimit(1)) and
|
||||
registrations[0].index == MembershipIndex(futureIndex):
|
||||
futs[futureIndex].complete()
|
||||
futureIndex += 1
|
||||
else:
|
||||
if registrations.len == 1 and
|
||||
registrations[0].idCommitment == credentials[futureIndex].idCommitment and
|
||||
registrations[0].index == MembershipIndex(futureIndex):
|
||||
futs[futureIndex].complete()
|
||||
futureIndex += 1
|
||||
if registrations.len == 1 and
|
||||
registrations[0].rateCommitment ==
|
||||
getRateCommitment(credentials[futureIndex], UserMessageLimit(1)) and
|
||||
registrations[0].index == MembershipIndex(futureIndex):
|
||||
futs[futureIndex].complete()
|
||||
futureIndex += 1
|
||||
|
||||
return callback
|
||||
|
||||
@ -755,10 +658,7 @@ suite "Onchain group manager":
|
||||
raiseAssert $error
|
||||
|
||||
for i in 0 ..< credentials.len():
|
||||
when defined(rln_v2):
|
||||
await manager.register(credentials[i], UserMessageLimit(1))
|
||||
else:
|
||||
await manager.register(credentials[i])
|
||||
await manager.register(credentials[i], UserMessageLimit(1))
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
|
||||
|
||||
@ -94,14 +94,11 @@ suite "Static group manager":
|
||||
let dummyCommitment = default(IDCommitment)
|
||||
|
||||
try:
|
||||
when defined(rln_v2):
|
||||
await manager.register(
|
||||
RateCommitment(
|
||||
idCommitment: dummyCommitment, userMessageLimit: DefaultUserMessageLimit
|
||||
)
|
||||
await manager.register(
|
||||
RateCommitment(
|
||||
idCommitment: dummyCommitment, userMessageLimit: DefaultUserMessageLimit
|
||||
)
|
||||
else:
|
||||
await manager.register(dummyCommitment)
|
||||
)
|
||||
except ValueError:
|
||||
assert true
|
||||
except Exception, CatchableError:
|
||||
@ -117,14 +114,11 @@ suite "Static group manager":
|
||||
let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr:
|
||||
raiseAssert $error
|
||||
try:
|
||||
when defined(rln_v2):
|
||||
await manager.register(
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
|
||||
)
|
||||
await manager.register(
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
|
||||
)
|
||||
else:
|
||||
await manager.register(idCommitment)
|
||||
)
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr:
|
||||
@ -143,15 +137,10 @@ suite "Static group manager":
|
||||
require:
|
||||
registrations.len == 1
|
||||
registrations[0].index == 10
|
||||
when defined(rln_v2):
|
||||
require:
|
||||
registrations[0].rateCommitment ==
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
|
||||
)
|
||||
else:
|
||||
require:
|
||||
registrations[0].idCommitment == idCommitment
|
||||
registrations[0].rateCommitment ==
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
|
||||
)
|
||||
callbackCalled = true
|
||||
fut.complete()
|
||||
|
||||
@ -161,14 +150,11 @@ suite "Static group manager":
|
||||
raiseAssert $error
|
||||
(await manager.startGroupSync()).isOkOr:
|
||||
raiseAssert $error
|
||||
when defined(rln_v2):
|
||||
await manager.register(
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
|
||||
)
|
||||
await manager.register(
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
|
||||
)
|
||||
else:
|
||||
await manager.register(idCommitment)
|
||||
)
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
|
||||
@ -215,15 +201,11 @@ suite "Static group manager":
|
||||
require:
|
||||
withdrawals.len == 1
|
||||
withdrawals[0].index == 0
|
||||
when defined(rln_v2):
|
||||
require:
|
||||
withdrawals[0].rateCommitment ==
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
|
||||
)
|
||||
else:
|
||||
require:
|
||||
withdrawals[0].idCommitment == idCommitment
|
||||
withdrawals[0].rateCommitment ==
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: DefaultUserMessageLimit
|
||||
)
|
||||
|
||||
callbackCalled = true
|
||||
fut.complete()
|
||||
|
||||
|
||||
@ -6,10 +6,10 @@ else:
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
../rln/waku_rln_relay_utils,
|
||||
../../../waku/waku_keystore/protocol_types,
|
||||
../../../waku/waku_rln_relay,
|
||||
../../../waku/waku_rln_relay/rln
|
||||
./rln/waku_rln_relay_utils,
|
||||
../../waku/waku_keystore/protocol_types,
|
||||
../../waku/waku_rln_relay,
|
||||
../../waku/waku_rln_relay/rln
|
||||
|
||||
import testutils/unittests
|
||||
import stew/results, stint
|
||||
@ -525,14 +525,11 @@ suite "Waku rln relay":
|
||||
let rln = rlnInstance.get()
|
||||
|
||||
# create a Merkle tree
|
||||
when defined(rln_v2):
|
||||
let rateCommitments =
|
||||
groupIDCommitments.mapIt(RateCommitment(idCommitment: it, userMessageLimit: 20))
|
||||
let leaves = rateCommitments.toLeaves().valueOr:
|
||||
raiseAssert $error
|
||||
let membersAdded = rln.insertMembers(0, leaves)
|
||||
else:
|
||||
let membersAdded = rln.insertMembers(0, groupIDCommitments)
|
||||
let rateCommitments =
|
||||
groupIDCommitments.mapIt(RateCommitment(idCommitment: it, userMessageLimit: 20))
|
||||
let leaves = rateCommitments.toLeaves().valueOr:
|
||||
raiseAssert $error
|
||||
let membersAdded = rln.insertMembers(0, leaves)
|
||||
|
||||
assert membersAdded, "members should be added"
|
||||
let rawRoot = rln.getMerkleRoot().valueOr:
|
||||
@ -691,21 +688,14 @@ suite "Waku rln relay":
|
||||
asyncTest "validateMessageAndUpdateLog test":
|
||||
let index = MembershipIndex(5)
|
||||
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(index),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_2"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(index),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_2"),
|
||||
)
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(index),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_2"),
|
||||
)
|
||||
|
||||
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
|
||||
raiseAssert $error
|
||||
|
||||
@ -749,40 +739,25 @@ suite "Waku rln relay":
|
||||
let index1 = MembershipIndex(5)
|
||||
let index2 = MembershipIndex(6)
|
||||
|
||||
when defined(rln_v2):
|
||||
let rlnConf1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(index1),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_3"),
|
||||
)
|
||||
else:
|
||||
let rlnConf1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(index1),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_3"),
|
||||
)
|
||||
let rlnConf1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(index1),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_3"),
|
||||
)
|
||||
|
||||
let wakuRlnRelay1 = (await WakuRlnRelay.new(rlnConf1)).valueOr:
|
||||
raiseAssert "failed to create waku rln relay: " & $error
|
||||
|
||||
when defined(rln_v2):
|
||||
let rlnConf2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(index2),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4"),
|
||||
)
|
||||
else:
|
||||
let rlnConf2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(index2),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4"),
|
||||
)
|
||||
let rlnConf2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(index2),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4"),
|
||||
)
|
||||
|
||||
let wakuRlnRelay2 = (await WakuRlnRelay.new(rlnConf2)).valueOr:
|
||||
raiseAssert "failed to create waku rln relay: " & $error
|
||||
# get the current epoch time
|
||||
|
||||
@ -40,21 +40,14 @@ procSuite "WakuNode - RLN relay":
|
||||
await node1.mountRelay(@[DefaultPubsubTopic])
|
||||
|
||||
# mount rlnrelay in off-chain mode
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
|
||||
)
|
||||
let wakuRlnConfig1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
|
||||
)
|
||||
|
||||
await node1.mountRlnRelay(wakuRlnConfig1)
|
||||
|
||||
await node1.start()
|
||||
@ -62,21 +55,14 @@ procSuite "WakuNode - RLN relay":
|
||||
# node 2
|
||||
await node2.mountRelay(@[DefaultPubsubTopic])
|
||||
# mount rlnrelay in off-chain mode
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(2.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_2"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(2.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_2"),
|
||||
)
|
||||
let wakuRlnConfig2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(2.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_2"),
|
||||
)
|
||||
|
||||
await node2.mountRlnRelay(wakuRlnConfig2)
|
||||
|
||||
await node2.start()
|
||||
@ -84,21 +70,14 @@ procSuite "WakuNode - RLN relay":
|
||||
# node 3
|
||||
await node3.mountRelay(@[DefaultPubsubTopic])
|
||||
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig3 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(3.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_3"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig3 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(3.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_3"),
|
||||
)
|
||||
let wakuRlnConfig3 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(3.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_3"),
|
||||
)
|
||||
|
||||
await node3.mountRlnRelay(wakuRlnConfig3)
|
||||
|
||||
await node3.start()
|
||||
@ -162,21 +141,14 @@ procSuite "WakuNode - RLN relay":
|
||||
|
||||
# mount rlnrelay in off-chain mode
|
||||
for index, node in nodes:
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(index.uint + 1),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(index.uint + 1),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)),
|
||||
)
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(index.uint + 1),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)),
|
||||
)
|
||||
|
||||
await node.mountRlnRelay(wakuRlnConfig)
|
||||
|
||||
# start them
|
||||
@ -263,21 +235,14 @@ procSuite "WakuNode - RLN relay":
|
||||
await node1.mountRelay(@[DefaultPubsubTopic])
|
||||
|
||||
# mount rlnrelay in off-chain mode
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_4"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_4"),
|
||||
)
|
||||
let wakuRlnConfig1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_4"),
|
||||
)
|
||||
|
||||
await node1.mountRlnRelay(wakuRlnConfig1)
|
||||
|
||||
await node1.start()
|
||||
@ -285,21 +250,14 @@ procSuite "WakuNode - RLN relay":
|
||||
# node 2
|
||||
await node2.mountRelay(@[DefaultPubsubTopic])
|
||||
# mount rlnrelay in off-chain mode
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(2.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_5"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(2.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_5"),
|
||||
)
|
||||
let wakuRlnConfig2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(2.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_5"),
|
||||
)
|
||||
|
||||
await node2.mountRlnRelay(wakuRlnConfig2)
|
||||
|
||||
await node2.start()
|
||||
@ -307,21 +265,14 @@ procSuite "WakuNode - RLN relay":
|
||||
# node 3
|
||||
await node3.mountRelay(@[DefaultPubsubTopic])
|
||||
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig3 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(3.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_6"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig3 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(3.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_6"),
|
||||
)
|
||||
let wakuRlnConfig3 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(3.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_6"),
|
||||
)
|
||||
|
||||
await node3.mountRlnRelay(wakuRlnConfig3)
|
||||
await node3.start()
|
||||
|
||||
@ -354,17 +305,11 @@ procSuite "WakuNode - RLN relay":
|
||||
input = concat(payload, contentTopicBytes)
|
||||
extraBytes: seq[byte] = @[byte(1), 2, 3]
|
||||
|
||||
when defined(rln_v2):
|
||||
let nonceManager = node1.wakuRlnRelay.nonceManager
|
||||
let rateLimitProofRes = node1.wakuRlnRelay.groupManager.generateProof(
|
||||
concat(input, extraBytes), epoch, MessageId(0)
|
||||
)
|
||||
else:
|
||||
let rateLimitProofRes = node1.wakuRlnRelay.groupManager.generateProof(
|
||||
concat(input, extraBytes),
|
||||
# we add extra bytes to invalidate proof verification against original payload
|
||||
epoch,
|
||||
)
|
||||
let nonceManager = node1.wakuRlnRelay.nonceManager
|
||||
let rateLimitProofRes = node1.wakuRlnRelay.groupManager.generateProof(
|
||||
concat(input, extraBytes), epoch, MessageId(0)
|
||||
)
|
||||
|
||||
assert rateLimitProofRes.isOk(), $rateLimitProofRes.error
|
||||
# check the proof is generated correctly outside when block to avoid duplication
|
||||
let rateLimitProof = rateLimitProofRes.get().encode().buffer
|
||||
@ -406,21 +351,14 @@ procSuite "WakuNode - RLN relay":
|
||||
await node1.mountRelay(@[DefaultPubsubTopic])
|
||||
|
||||
# mount rlnrelay in off-chain mode
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_7"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_7"),
|
||||
)
|
||||
let wakuRlnConfig1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_7"),
|
||||
)
|
||||
|
||||
await node1.mountRlnRelay(wakuRlnConfig1)
|
||||
|
||||
await node1.start()
|
||||
@ -429,21 +367,14 @@ procSuite "WakuNode - RLN relay":
|
||||
await node2.mountRelay(@[DefaultPubsubTopic])
|
||||
|
||||
# mount rlnrelay in off-chain mode
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(2.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_8"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(2.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_8"),
|
||||
)
|
||||
let wakuRlnConfig2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(2.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_8"),
|
||||
)
|
||||
|
||||
await node2.mountRlnRelay(wakuRlnConfig2)
|
||||
await node2.start()
|
||||
|
||||
@ -451,21 +382,14 @@ procSuite "WakuNode - RLN relay":
|
||||
await node3.mountRelay(@[DefaultPubsubTopic])
|
||||
|
||||
# mount rlnrelay in off-chain mode
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig3 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(3.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_9"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig3 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(3.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_9"),
|
||||
)
|
||||
let wakuRlnConfig3 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(3.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_9"),
|
||||
)
|
||||
|
||||
await node3.mountRlnRelay(wakuRlnConfig3)
|
||||
|
||||
await node3.start()
|
||||
@ -562,21 +486,14 @@ procSuite "WakuNode - RLN relay":
|
||||
await node1.mountRelay(@[DefaultPubsubTopic])
|
||||
|
||||
# mount rlnrelay in off-chain mode
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_10"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_10"),
|
||||
)
|
||||
let wakuRlnConfig1 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_10"),
|
||||
)
|
||||
|
||||
await node1.mountRlnRelay(wakuRlnConfig1)
|
||||
|
||||
await node1.start()
|
||||
@ -585,21 +502,14 @@ procSuite "WakuNode - RLN relay":
|
||||
await node2.mountRelay(@[DefaultPubsubTopic])
|
||||
|
||||
# mount rlnrelay in off-chain mode
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(2.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_11"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(2.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_11"),
|
||||
)
|
||||
let wakuRlnConfig2 = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(2.uint),
|
||||
rlnRelayUserMessageLimit: 1,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_11"),
|
||||
)
|
||||
|
||||
await node2.mountRlnRelay(wakuRlnConfig2)
|
||||
|
||||
await node2.start()
|
||||
|
||||
@ -219,21 +219,14 @@ suite "Waku v2 Rest API - Relay":
|
||||
let node = testWakuNode()
|
||||
await node.start()
|
||||
await node.mountRelay()
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 20,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 20,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
|
||||
await node.mountRlnRelay(wakuRlnConfig)
|
||||
|
||||
# RPC server setup
|
||||
@ -443,21 +436,14 @@ suite "Waku v2 Rest API - Relay":
|
||||
let node = testWakuNode()
|
||||
await node.start()
|
||||
await node.mountRelay()
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 20,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 20,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
|
||||
await node.mountRlnRelay(wakuRlnConfig)
|
||||
|
||||
# RPC server setup
|
||||
@ -502,21 +488,14 @@ suite "Waku v2 Rest API - Relay":
|
||||
let node = testWakuNode()
|
||||
await node.start()
|
||||
await node.mountRelay()
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 20,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 20,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
|
||||
await node.mountRlnRelay(wakuRlnConfig)
|
||||
|
||||
# RPC server setup
|
||||
@ -557,21 +536,14 @@ suite "Waku v2 Rest API - Relay":
|
||||
let node = testWakuNode()
|
||||
await node.start()
|
||||
await node.mountRelay()
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 20,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 20,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
|
||||
await node.mountRlnRelay(wakuRlnConfig)
|
||||
|
||||
# RPC server setup
|
||||
@ -619,21 +591,14 @@ suite "Waku v2 Rest API - Relay":
|
||||
let node = testWakuNode()
|
||||
await node.start()
|
||||
await node.mountRelay()
|
||||
when defined(rln_v2):
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 20,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
else:
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(1.uint),
|
||||
rlnRelayUserMessageLimit: 20,
|
||||
rlnEpochSizeSec: 1,
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
|
||||
)
|
||||
|
||||
await node.mountRlnRelay(wakuRlnConfig)
|
||||
|
||||
# RPC server setup
|
||||
|
||||
@ -67,10 +67,7 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
|
||||
|
||||
# 5. register on-chain
|
||||
try:
|
||||
when defined(rln_v2):
|
||||
waitFor groupManager.register(credential, conf.rlnRelayUserMessageLimit)
|
||||
else:
|
||||
waitFor groupManager.register(credential)
|
||||
waitFor groupManager.register(credential, conf.rlnRelayUserMessageLimit)
|
||||
except Exception, CatchableError:
|
||||
error "failure while registering credentials on-chain",
|
||||
error = getCurrentExceptionMsg()
|
||||
@ -82,27 +79,18 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
|
||||
chainId = $groupManager.chainId.get(),
|
||||
contractAddress = conf.rlnRelayEthContractAddress,
|
||||
membershipIndex = groupManager.membershipIndex.get()
|
||||
when defined(rln_v2):
|
||||
info "Your user message limit is", userMessageLimit = conf.rlnRelayUserMessageLimit
|
||||
info "Your user message limit is", userMessageLimit = conf.rlnRelayUserMessageLimit
|
||||
|
||||
# 6. write to keystore
|
||||
when defined(rln_v2):
|
||||
let keystoreCred = KeystoreMembership(
|
||||
membershipContract: MembershipContract(
|
||||
chainId: $groupManager.chainId.get(), address: conf.rlnRelayEthContractAddress
|
||||
),
|
||||
treeIndex: groupManager.membershipIndex.get(),
|
||||
identityCredential: credential,
|
||||
userMessageLimit: conf.rlnRelayUserMessageLimit,
|
||||
)
|
||||
else:
|
||||
let keystoreCred = KeystoreMembership(
|
||||
membershipContract: MembershipContract(
|
||||
chainId: $groupManager.chainId.get(), address: conf.rlnRelayEthContractAddress
|
||||
),
|
||||
treeIndex: groupManager.membershipIndex.get(),
|
||||
identityCredential: credential,
|
||||
)
|
||||
let keystoreCred = KeystoreMembership(
|
||||
membershipContract: MembershipContract(
|
||||
chainId: $groupManager.chainId.get(), address: conf.rlnRelayEthContractAddress
|
||||
),
|
||||
treeIndex: groupManager.membershipIndex.get(),
|
||||
identityCredential: credential,
|
||||
userMessageLimit: conf.rlnRelayUserMessageLimit,
|
||||
)
|
||||
|
||||
|
||||
let persistRes = addMembershipCredentials(
|
||||
conf.rlnRelayCredPath, keystoreCred, conf.rlnRelayCredPassword, RLNAppInfo
|
||||
|
||||
@ -195,31 +195,18 @@ proc setupProtocols(
|
||||
quit(QuitFailure)
|
||||
|
||||
if conf.rlnRelay:
|
||||
when defined(rln_v2):
|
||||
let rlnConf = WakuRlnConfig(
|
||||
rlnRelayDynamic: conf.rlnRelayDynamic,
|
||||
rlnRelayCredIndex: conf.rlnRelayCredIndex,
|
||||
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
|
||||
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
|
||||
rlnRelayCredPath: conf.rlnRelayCredPath,
|
||||
rlnRelayCredPassword: conf.rlnRelayCredPassword,
|
||||
rlnRelayTreePath: conf.rlnRelayTreePath,
|
||||
rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit,
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
||||
onFatalErrorAction: onFatalErrorAction,
|
||||
)
|
||||
else:
|
||||
let rlnConf = WakuRlnConfig(
|
||||
rlnRelayDynamic: conf.rlnRelayDynamic,
|
||||
rlnRelayCredIndex: conf.rlnRelayCredIndex,
|
||||
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
|
||||
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
|
||||
rlnRelayCredPath: conf.rlnRelayCredPath,
|
||||
rlnRelayCredPassword: conf.rlnRelayCredPassword,
|
||||
rlnRelayTreePath: conf.rlnRelayTreePath,
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
||||
onFatalErrorAction: onFatalErrorAction,
|
||||
)
|
||||
let rlnConf = WakuRlnConfig(
|
||||
rlnRelayDynamic: conf.rlnRelayDynamic,
|
||||
rlnRelayCredIndex: conf.rlnRelayCredIndex,
|
||||
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
|
||||
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
|
||||
rlnRelayCredPath: conf.rlnRelayCredPath,
|
||||
rlnRelayCredPassword: conf.rlnRelayCredPassword,
|
||||
rlnRelayTreePath: conf.rlnRelayTreePath,
|
||||
rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit,
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
||||
onFatalErrorAction: onFatalErrorAction,
|
||||
)
|
||||
|
||||
try:
|
||||
waitFor node.mountRlnRelay(rlnConf)
|
||||
|
||||
@ -15,9 +15,7 @@ type
|
||||
IdentitySecretHash* = seq[byte] #array[32, byte]
|
||||
# hash of identity key as defined ed in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership
|
||||
IDCommitment* = seq[byte] #array[32, byte]
|
||||
|
||||
when defined(rln_v2):
|
||||
type UserMessageLimit* = uint64
|
||||
UserMessageLimit* = uint64
|
||||
|
||||
type IdentityCredential* = object
|
||||
idTrapdoor*: IdentityTrapdoor
|
||||
@ -103,45 +101,24 @@ type KeystoreMembership* = ref object of RootObj
|
||||
membershipContract*: MembershipContract
|
||||
treeIndex*: MembershipIndex
|
||||
identityCredential*: IdentityCredential
|
||||
when defined(rln_v2):
|
||||
userMessageLimit*: UserMessageLimit
|
||||
userMessageLimit*: UserMessageLimit
|
||||
|
||||
when defined(rln_v2):
|
||||
proc `$`*(m: KeystoreMembership): string =
|
||||
return
|
||||
"KeystoreMembership(chainId: " & m.membershipContract.chainId &
|
||||
", contractAddress: " & m.membershipContract.address & ", treeIndex: " &
|
||||
$m.treeIndex & ", userMessageLimit: " & $m.userMessageLimit &
|
||||
", identityCredential: " & $m.identityCredential & ")"
|
||||
proc `$`*(m: KeystoreMembership): string =
|
||||
return
|
||||
"KeystoreMembership(chainId: " & m.membershipContract.chainId &
|
||||
", contractAddress: " & m.membershipContract.address & ", treeIndex: " &
|
||||
$m.treeIndex & ", userMessageLimit: " & $m.userMessageLimit &
|
||||
", identityCredential: " & $m.identityCredential & ")"
|
||||
|
||||
else:
|
||||
proc `$`*(m: KeystoreMembership): string =
|
||||
return
|
||||
"KeystoreMembership(chainId: " & m.membershipContract.chainId &
|
||||
", contractAddress: " & m.membershipContract.address & ", treeIndex: " &
|
||||
$m.treeIndex & ", identityCredential: " & $m.identityCredential & ")"
|
||||
|
||||
when defined(rln_v2):
|
||||
proc `==`*(x, y: KeystoreMembership): bool =
|
||||
return
|
||||
x.membershipContract.chainId == y.membershipContract.chainId and
|
||||
x.membershipContract.address == y.membershipContract.address and
|
||||
x.treeIndex == y.treeIndex and x.userMessageLimit == y.userMessageLimit and
|
||||
x.identityCredential.idTrapdoor == y.identityCredential.idTrapdoor and
|
||||
x.identityCredential.idNullifier == y.identityCredential.idNullifier and
|
||||
x.identityCredential.idSecretHash == y.identityCredential.idSecretHash and
|
||||
x.identityCredential.idCommitment == y.identityCredential.idCommitment
|
||||
|
||||
else:
|
||||
proc `==`*(x, y: KeystoreMembership): bool =
|
||||
return
|
||||
x.membershipContract.chainId == y.membershipContract.chainId and
|
||||
x.membershipContract.address == y.membershipContract.address and
|
||||
x.treeIndex == y.treeIndex and
|
||||
x.identityCredential.idTrapdoor == y.identityCredential.idTrapdoor and
|
||||
x.identityCredential.idNullifier == y.identityCredential.idNullifier and
|
||||
x.identityCredential.idSecretHash == y.identityCredential.idSecretHash and
|
||||
x.identityCredential.idCommitment == y.identityCredential.idCommitment
|
||||
proc `==`*(x, y: KeystoreMembership): bool =
|
||||
return
|
||||
x.membershipContract.chainId == y.membershipContract.chainId and
|
||||
x.membershipContract.address == y.membershipContract.address and
|
||||
x.treeIndex == y.treeIndex and x.userMessageLimit == y.userMessageLimit and
|
||||
x.identityCredential.idTrapdoor == y.identityCredential.idTrapdoor and
|
||||
x.identityCredential.idNullifier == y.identityCredential.idNullifier and
|
||||
x.identityCredential.idSecretHash == y.identityCredential.idSecretHash and
|
||||
x.identityCredential.idCommitment == y.identityCredential.idCommitment
|
||||
|
||||
proc hash*(m: KeystoreMembership): string =
|
||||
# hash together the chainId, address and treeIndex
|
||||
|
||||
File diff suppressed because one or more lines are too long
@ -30,9 +30,8 @@ proc inHex*(
|
||||
valueHex = "0" & valueHex
|
||||
return toLowerAscii(valueHex)
|
||||
|
||||
when defined(rln_v2):
|
||||
proc toUserMessageLimit*(v: UInt256): UserMessageLimit =
|
||||
return cast[UserMessageLimit](v)
|
||||
proc toUserMessageLimit*(v: UInt256): UserMessageLimit =
|
||||
return cast[UserMessageLimit](v)
|
||||
|
||||
proc encodeLengthPrefix*(input: openArray[byte]): seq[byte] =
|
||||
## returns length prefixed version of the input
|
||||
@ -56,75 +55,47 @@ proc serialize*(v: uint64): array[32, byte] =
|
||||
discard output.copyFrom(bytes)
|
||||
return output
|
||||
|
||||
when defined(rln_v2):
|
||||
proc serialize*(
|
||||
idSecretHash: IdentitySecretHash,
|
||||
memIndex: MembershipIndex,
|
||||
userMessageLimit: UserMessageLimit,
|
||||
messageId: MessageId,
|
||||
externalNullifier: ExternalNullifier,
|
||||
msg: openArray[byte],
|
||||
): seq[byte] =
|
||||
## a private proc to convert RateLimitProof and the data to a byte seq
|
||||
## this conversion is used in the proofGen proc
|
||||
## the serialization is done as instructed in https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L146
|
||||
## [ id_key<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
|
||||
let memIndexBytes = toBytes(uint64(memIndex), Endianness.littleEndian)
|
||||
let userMessageLimitBytes = userMessageLimit.serialize()
|
||||
let messageIdBytes = messageId.serialize()
|
||||
let lenPrefMsg = encodeLengthPrefix(msg)
|
||||
let output = concat(
|
||||
@idSecretHash,
|
||||
@memIndexBytes,
|
||||
@userMessageLimitBytes,
|
||||
@messageIdBytes,
|
||||
@externalNullifier,
|
||||
lenPrefMsg,
|
||||
)
|
||||
return output
|
||||
proc serialize*(
|
||||
idSecretHash: IdentitySecretHash,
|
||||
memIndex: MembershipIndex,
|
||||
userMessageLimit: UserMessageLimit,
|
||||
messageId: MessageId,
|
||||
externalNullifier: ExternalNullifier,
|
||||
msg: openArray[byte],
|
||||
): seq[byte] =
|
||||
## a private proc to convert RateLimitProof and the data to a byte seq
|
||||
## this conversion is used in the proofGen proc
|
||||
## the serialization is done as instructed in https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L146
|
||||
## [ id_key<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
|
||||
let memIndexBytes = toBytes(uint64(memIndex), Endianness.littleEndian)
|
||||
let userMessageLimitBytes = userMessageLimit.serialize()
|
||||
let messageIdBytes = messageId.serialize()
|
||||
let lenPrefMsg = encodeLengthPrefix(msg)
|
||||
let output = concat(
|
||||
@idSecretHash,
|
||||
@memIndexBytes,
|
||||
@userMessageLimitBytes,
|
||||
@messageIdBytes,
|
||||
@externalNullifier,
|
||||
lenPrefMsg,
|
||||
)
|
||||
return output
|
||||
|
||||
else:
|
||||
proc serialize*(
|
||||
idSecretHash: IdentitySecretHash,
|
||||
memIndex: MembershipIndex,
|
||||
epoch: Epoch,
|
||||
msg: openArray[byte],
|
||||
): seq[byte] =
|
||||
## a private proc to convert RateLimitProof and the data to a byte seq
|
||||
## this conversion is used in the proofGen proc
|
||||
## the serialization is done as instructed in https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L146
|
||||
## [ id_key<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
|
||||
let memIndexBytes = toBytes(uint64(memIndex), Endianness.littleEndian)
|
||||
let lenPrefMsg = encodeLengthPrefix(msg)
|
||||
let output = concat(@idSecretHash, @memIndexBytes, @epoch, lenPrefMsg)
|
||||
return output
|
||||
|
||||
proc serialize*(proof: RateLimitProof, data: openArray[byte]): seq[byte] =
|
||||
## a private proc to convert RateLimitProof and data to a byte seq
|
||||
## this conversion is used in the proof verification proc
|
||||
## [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
|
||||
let lenPrefMsg = encodeLengthPrefix(@data)
|
||||
when defined(rln_v2):
|
||||
var proofBytes = concat(
|
||||
@(proof.proof),
|
||||
@(proof.merkleRoot),
|
||||
@(proof.externalNullifier),
|
||||
@(proof.shareX),
|
||||
@(proof.shareY),
|
||||
@(proof.nullifier),
|
||||
lenPrefMsg,
|
||||
)
|
||||
else:
|
||||
var proofBytes = concat(
|
||||
@(proof.proof),
|
||||
@(proof.merkleRoot),
|
||||
@(proof.epoch),
|
||||
@(proof.shareX),
|
||||
@(proof.shareY),
|
||||
@(proof.nullifier),
|
||||
@(proof.rlnIdentifier),
|
||||
lenPrefMsg,
|
||||
)
|
||||
var proofBytes = concat(
|
||||
@(proof.proof),
|
||||
@(proof.merkleRoot),
|
||||
@(proof.externalNullifier),
|
||||
@(proof.shareX),
|
||||
@(proof.shareY),
|
||||
@(proof.nullifier),
|
||||
lenPrefMsg,
|
||||
)
|
||||
|
||||
return proofBytes
|
||||
|
||||
|
||||
@ -15,10 +15,7 @@ export options, chronos, results, protocol_types, protocol_metrics, deques
|
||||
|
||||
type Membership* = object
|
||||
index*: MembershipIndex
|
||||
when defined(rln_v2):
|
||||
rateCommitment*: RateCommitment
|
||||
else:
|
||||
idCommitment*: IDCommitment
|
||||
rateCommitment*: RateCommitment
|
||||
|
||||
type OnRegisterCallback* = proc(registrations: seq[Membership]): Future[void] {.gcsafe.}
|
||||
type OnWithdrawCallback* = proc(withdrawals: seq[Membership]): Future[void] {.gcsafe.}
|
||||
@ -35,8 +32,7 @@ type GroupManager* = ref object of RootObj
|
||||
latestIndex*: MembershipIndex
|
||||
validRoots*: Deque[MerkleNode]
|
||||
onFatalErrorAction*: OnFatalErrorHandler
|
||||
when defined(rln_v2):
|
||||
userMessageLimit*: Option[UserMessageLimit]
|
||||
userMessageLimit*: Option[UserMessageLimit]
|
||||
|
||||
# This proc is used to initialize the group manager
|
||||
# Any initialization logic should be implemented here
|
||||
@ -53,61 +49,35 @@ method startGroupSync*(
|
||||
# This proc is used to register a new identity commitment into the merkle tree
|
||||
# The user may or may not have the identity secret to this commitment
|
||||
# It should be used when detecting new members in the group, and syncing the group state
|
||||
when defined(rln_v2):
|
||||
method register*(
|
||||
g: GroupManager, rateCommitment: RateCommitment
|
||||
): Future[void] {.base, async: (raises: [Exception]).} =
|
||||
raise newException(
|
||||
CatchableError, "register proc for " & $g.type & " is not implemented yet"
|
||||
)
|
||||
|
||||
else:
|
||||
method register*(
|
||||
g: GroupManager, idCommitment: IDCommitment
|
||||
): Future[void] {.base, async: (raises: [Exception]).} =
|
||||
raise newException(
|
||||
CatchableError, "register proc for " & $g.type & " is not implemented yet"
|
||||
)
|
||||
method register*(
|
||||
g: GroupManager, rateCommitment: RateCommitment
|
||||
): Future[void] {.base, async: (raises: [Exception]).} =
|
||||
raise newException(
|
||||
CatchableError, "register proc for " & $g.type & " is not implemented yet"
|
||||
)
|
||||
|
||||
# This proc is used to register a new identity commitment into the merkle tree
|
||||
# The user should have the identity secret to this commitment
|
||||
# It should be used when the user wants to join the group
|
||||
when defined(rln_v2):
|
||||
method register*(
|
||||
g: GroupManager,
|
||||
credentials: IdentityCredential,
|
||||
userMessageLimit: UserMessageLimit,
|
||||
): Future[void] {.base, async: (raises: [Exception]).} =
|
||||
raise newException(
|
||||
CatchableError, "register proc for " & $g.type & " is not implemented yet"
|
||||
)
|
||||
method register*(
|
||||
g: GroupManager,
|
||||
credentials: IdentityCredential,
|
||||
userMessageLimit: UserMessageLimit,
|
||||
): Future[void] {.base, async: (raises: [Exception]).} =
|
||||
raise newException(
|
||||
CatchableError, "register proc for " & $g.type & " is not implemented yet"
|
||||
)
|
||||
|
||||
else:
|
||||
method register*(
|
||||
g: GroupManager, credentials: IdentityCredential
|
||||
): Future[void] {.base, async: (raises: [Exception]).} =
|
||||
raise newException(
|
||||
CatchableError, "register proc for " & $g.type & " is not implemented yet"
|
||||
)
|
||||
|
||||
# This proc is used to register a batch of new identity commitments into the merkle tree
|
||||
# The user may or may not have the identity secret to these commitments
|
||||
# It should be used when detecting a batch of new members in the group, and syncing the group state
|
||||
when defined(rln_v2):
|
||||
method registerBatch*(
|
||||
g: GroupManager, rateCommitments: seq[RateCommitment]
|
||||
): Future[void] {.base, async: (raises: [Exception]).} =
|
||||
raise newException(
|
||||
CatchableError, "registerBatch proc for " & $g.type & " is not implemented yet"
|
||||
)
|
||||
|
||||
else:
|
||||
method registerBatch*(
|
||||
g: GroupManager, idCommitments: seq[IDCommitment]
|
||||
): Future[void] {.base, async: (raises: [Exception]).} =
|
||||
raise newException(
|
||||
CatchableError, "registerBatch proc for " & $g.type & " is not implemented yet"
|
||||
)
|
||||
method registerBatch*(
|
||||
g: GroupManager, rateCommitments: seq[RateCommitment]
|
||||
): Future[void] {.base, async: (raises: [Exception]).} =
|
||||
raise newException(
|
||||
CatchableError, "registerBatch proc for " & $g.type & " is not implemented yet"
|
||||
)
|
||||
|
||||
# This proc is used to set a callback that will be called when a new identity commitment is registered
|
||||
# The callback may be called multiple times, and should be used to for any post processing
|
||||
@ -133,25 +103,15 @@ method withdrawBatch*(
|
||||
)
|
||||
|
||||
# This proc is used to insert and remove a set of commitments from the merkle tree
|
||||
when defined(rln_v2):
|
||||
method atomicBatch*(
|
||||
g: GroupManager,
|
||||
rateCommitments: seq[RateCommitment],
|
||||
toRemoveIndices: seq[MembershipIndex],
|
||||
): Future[void] {.base, async: (raises: [Exception]).} =
|
||||
raise newException(
|
||||
CatchableError, "atomicBatch proc for " & $g.type & " is not implemented yet"
|
||||
)
|
||||
method atomicBatch*(
|
||||
g: GroupManager,
|
||||
rateCommitments: seq[RateCommitment],
|
||||
toRemoveIndices: seq[MembershipIndex],
|
||||
): Future[void] {.base, async: (raises: [Exception]).} =
|
||||
raise newException(
|
||||
CatchableError, "atomicBatch proc for " & $g.type & " is not implemented yet"
|
||||
)
|
||||
|
||||
else:
|
||||
method atomicBatch*(
|
||||
g: GroupManager,
|
||||
idCommitments: seq[IDCommitment],
|
||||
toRemoveIndices: seq[MembershipIndex],
|
||||
): Future[void] {.base, async: (raises: [Exception]).} =
|
||||
raise newException(
|
||||
CatchableError, "atomicBatch proc for " & $g.type & " is not implemented yet"
|
||||
)
|
||||
|
||||
method stop*(g: GroupManager): Future[void] {.base, async.} =
|
||||
raise
|
||||
@ -216,55 +176,34 @@ method verifyProof*(
|
||||
return err("proof verification failed: " & $proofVerifyRes.error())
|
||||
return ok(proofVerifyRes.value())
|
||||
|
||||
when defined(rln_v2):
|
||||
method generateProof*(
|
||||
g: GroupManager,
|
||||
data: openArray[byte],
|
||||
epoch: Epoch,
|
||||
messageId: MessageId,
|
||||
rlnIdentifier = DefaultRlnIdentifier,
|
||||
): GroupManagerResult[RateLimitProof] {.base, gcsafe, raises: [].} =
|
||||
## generates a proof for the given data and epoch
|
||||
## the proof is generated using the current merkle root
|
||||
if g.idCredentials.isNone():
|
||||
return err("identity credentials are not set")
|
||||
if g.membershipIndex.isNone():
|
||||
return err("membership index is not set")
|
||||
if g.userMessageLimit.isNone():
|
||||
return err("user message limit is not set")
|
||||
waku_rln_proof_generation_duration_seconds.nanosecondTime:
|
||||
let proof = proofGen(
|
||||
rlnInstance = g.rlnInstance,
|
||||
data = data,
|
||||
membership = g.idCredentials.get(),
|
||||
index = g.membershipIndex.get(),
|
||||
epoch = epoch,
|
||||
userMessageLimit = g.userMessageLimit.get(),
|
||||
messageId = messageId,
|
||||
).valueOr:
|
||||
return err("proof generation failed: " & $error)
|
||||
return ok(proof)
|
||||
method generateProof*(
|
||||
g: GroupManager,
|
||||
data: openArray[byte],
|
||||
epoch: Epoch,
|
||||
messageId: MessageId,
|
||||
rlnIdentifier = DefaultRlnIdentifier,
|
||||
): GroupManagerResult[RateLimitProof] {.base, gcsafe, raises: [].} =
|
||||
## generates a proof for the given data and epoch
|
||||
## the proof is generated using the current merkle root
|
||||
if g.idCredentials.isNone():
|
||||
return err("identity credentials are not set")
|
||||
if g.membershipIndex.isNone():
|
||||
return err("membership index is not set")
|
||||
if g.userMessageLimit.isNone():
|
||||
return err("user message limit is not set")
|
||||
waku_rln_proof_generation_duration_seconds.nanosecondTime:
|
||||
let proof = proofGen(
|
||||
rlnInstance = g.rlnInstance,
|
||||
data = data,
|
||||
membership = g.idCredentials.get(),
|
||||
index = g.membershipIndex.get(),
|
||||
epoch = epoch,
|
||||
userMessageLimit = g.userMessageLimit.get(),
|
||||
messageId = messageId,
|
||||
).valueOr:
|
||||
return err("proof generation failed: " & $error)
|
||||
return ok(proof)
|
||||
|
||||
else:
|
||||
method generateProof*(
|
||||
g: GroupManager, data: openArray[byte], epoch: Epoch
|
||||
): GroupManagerResult[RateLimitProof] {.base, gcsafe, raises: [].} =
|
||||
## generates a proof for the given data and epoch
|
||||
## the proof is generated using the current merkle root
|
||||
if g.idCredentials.isNone():
|
||||
return err("identity credentials are not set")
|
||||
if g.membershipIndex.isNone():
|
||||
return err("membership index is not set")
|
||||
waku_rln_proof_generation_duration_seconds.nanosecondTime:
|
||||
let proof = proofGen(
|
||||
rlnInstance = g.rlnInstance,
|
||||
data = data,
|
||||
memKeys = g.idCredentials.get(),
|
||||
memIndex = g.membershipIndex.get(),
|
||||
epoch = epoch,
|
||||
).valueOr:
|
||||
return err("proof generation failed: " & $error)
|
||||
return ok(proof)
|
||||
|
||||
method isReady*(g: GroupManager): Future[bool] {.base, async.} =
|
||||
raise newException(
|
||||
|
||||
@ -31,60 +31,36 @@ logScope:
|
||||
topics = "waku rln_relay onchain_group_manager"
|
||||
|
||||
# using the when predicate does not work within the contract macro, hence need to dupe
|
||||
when defined(rln_v2):
|
||||
contract(WakuRlnRegistry):
|
||||
# this describes the storage slot to use
|
||||
proc usingStorageIndex(): Uint16 {.pure.}
|
||||
# this map contains the address of a given storage slot
|
||||
proc storages(index: Uint16): Address {.pure.}
|
||||
# this serves as an entrypoint into the rln storage contract
|
||||
proc register(
|
||||
storageIndex: Uint16, idCommitment: Uint256, userMessageLimit: Uint256
|
||||
)
|
||||
contract(WakuRlnRegistry):
|
||||
# this describes the storage slot to use
|
||||
proc usingStorageIndex(): Uint16 {.pure.}
|
||||
# this map contains the address of a given storage slot
|
||||
proc storages(index: Uint16): Address {.pure.}
|
||||
# this serves as an entrypoint into the rln storage contract
|
||||
proc register(
|
||||
storageIndex: Uint16, idCommitment: Uint256, userMessageLimit: Uint256
|
||||
)
|
||||
|
||||
# this creates a new storage on the rln registry
|
||||
proc newStorage(maxMessageLimit: Uint256)
|
||||
# Initializes the implementation contract (only used in unit tests)
|
||||
proc initialize()
|
||||
# this creates a new storage on the rln registry
|
||||
proc newStorage(maxMessageLimit: Uint256)
|
||||
# Initializes the implementation contract (only used in unit tests)
|
||||
proc initialize()
|
||||
|
||||
# membership contract interface
|
||||
contract(RlnStorage):
|
||||
# this event is raised when a new member is registered
|
||||
proc MemberRegistered(
|
||||
idCommitment: Uint256, userMessageLimit: Uint256, index: Uint256
|
||||
) {.event.}
|
||||
# membership contract interface
|
||||
contract(RlnStorage):
|
||||
# this event is raised when a new member is registered
|
||||
proc MemberRegistered(
|
||||
idCommitment: Uint256, userMessageLimit: Uint256, index: Uint256
|
||||
) {.event.}
|
||||
|
||||
# this constant contains the membership deposit of the contract
|
||||
proc MEMBERSHIP_DEPOSIT(): Uint256 {.pure.}
|
||||
# this map denotes existence of a given user
|
||||
proc memberExists(idCommitment: Uint256): Uint256 {.view.}
|
||||
# this constant describes the next index of a new member
|
||||
proc idCommitmentIndex(): Uint256 {.view.}
|
||||
# this constant describes the block number this contract was deployed on
|
||||
proc deployedBlockNumber(): Uint256 {.view.}
|
||||
else:
|
||||
contract(WakuRlnRegistry):
|
||||
# this describes the storage slot to use
|
||||
proc usingStorageIndex(): Uint16 {.pure.}
|
||||
# this map contains the address of a given storage slot
|
||||
proc storages(index: Uint16): Address {.pure.}
|
||||
# this serves as an entrypoint into the rln storage contract
|
||||
proc register(storageIndex: Uint16, idCommitment: Uint256)
|
||||
# this creates a new storage on the rln registry
|
||||
proc newStorage()
|
||||
|
||||
# membership contract interface
|
||||
contract(RlnStorage):
|
||||
# this event is raised when a new member is registered
|
||||
proc MemberRegistered(idCommitment: Uint256, index: Uint256) {.event.}
|
||||
# this constant contains the membership deposit of the contract
|
||||
proc MEMBERSHIP_DEPOSIT(): Uint256 {.pure.}
|
||||
# this map denotes existence of a given user
|
||||
proc memberExists(idCommitment: Uint256): Uint256 {.view.}
|
||||
# this constant describes the next index of a new member
|
||||
proc idCommitmentIndex(): Uint256 {.view.}
|
||||
# this constant describes the block number this contract was deployed on
|
||||
proc deployedBlockNumber(): Uint256 {.view.}
|
||||
# this constant contains the membership deposit of the contract
|
||||
proc MEMBERSHIP_DEPOSIT(): Uint256 {.pure.}
|
||||
# this map denotes existence of a given user
|
||||
proc memberExists(idCommitment: Uint256): Uint256 {.view.}
|
||||
# this constant describes the next index of a new member
|
||||
proc idCommitmentIndex(): Uint256 {.view.}
|
||||
# this constant describes the block number this contract was deployed on
|
||||
proc deployedBlockNumber(): Uint256 {.view.}
|
||||
|
||||
type
|
||||
RegistryContractWithSender = Sender[WakuRlnRegistry]
|
||||
@ -157,212 +133,112 @@ proc setMetadata*(
|
||||
return err("failed to persist rln metadata: " & getCurrentExceptionMsg())
|
||||
return ok()
|
||||
|
||||
when defined(rln_v2):
|
||||
method atomicBatch*(
|
||||
g: OnchainGroupManager,
|
||||
start: MembershipIndex,
|
||||
rateCommitments = newSeq[RateCommitment](),
|
||||
toRemoveIndices = newSeq[MembershipIndex](),
|
||||
): Future[void] {.async: (raises: [Exception]), base.} =
|
||||
initializedGuard(g)
|
||||
method atomicBatch*(
|
||||
g: OnchainGroupManager,
|
||||
start: MembershipIndex,
|
||||
rateCommitments = newSeq[RateCommitment](),
|
||||
toRemoveIndices = newSeq[MembershipIndex](),
|
||||
): Future[void] {.async: (raises: [Exception]), base.} =
|
||||
initializedGuard(g)
|
||||
|
||||
# convert the rateCommitment struct to a leaf value
|
||||
let leaves = rateCommitments.toLeaves().valueOr:
|
||||
raise newException(
|
||||
ValueError, "failed to convert rateCommitments to leaves: " & $error
|
||||
)
|
||||
# convert the rateCommitment struct to a leaf value
|
||||
let leaves = rateCommitments.toLeaves().valueOr:
|
||||
raise newException(
|
||||
ValueError, "failed to convert rateCommitments to leaves: " & $error
|
||||
)
|
||||
|
||||
waku_rln_membership_insertion_duration_seconds.nanosecondTime:
|
||||
let operationSuccess =
|
||||
g.rlnInstance.atomicWrite(some(start), leaves, toRemoveIndices)
|
||||
if not operationSuccess:
|
||||
raise newException(CatchableError, "atomic batch operation failed")
|
||||
# TODO: when slashing is enabled, we need to track slashed members
|
||||
waku_rln_number_registered_memberships.set(int64(g.rlnInstance.leavesSet()))
|
||||
waku_rln_membership_insertion_duration_seconds.nanosecondTime:
|
||||
let operationSuccess =
|
||||
g.rlnInstance.atomicWrite(some(start), leaves, toRemoveIndices)
|
||||
if not operationSuccess:
|
||||
raise newException(CatchableError, "atomic batch operation failed")
|
||||
# TODO: when slashing is enabled, we need to track slashed members
|
||||
waku_rln_number_registered_memberships.set(int64(g.rlnInstance.leavesSet()))
|
||||
|
||||
if g.registerCb.isSome():
|
||||
var membersSeq = newSeq[Membership]()
|
||||
for i in 0 ..< rateCommitments.len:
|
||||
var index = start + MembershipIndex(i)
|
||||
trace "registering member", rateCommitment = rateCommitments[i], index = index
|
||||
let member = Membership(rateCommitment: rateCommitments[i], index: index)
|
||||
membersSeq.add(member)
|
||||
await g.registerCb.get()(membersSeq)
|
||||
if g.registerCb.isSome():
|
||||
var membersSeq = newSeq[Membership]()
|
||||
for i in 0 ..< rateCommitments.len:
|
||||
var index = start + MembershipIndex(i)
|
||||
trace "registering member", rateCommitment = rateCommitments[i], index = index
|
||||
let member = Membership(rateCommitment: rateCommitments[i], index: index)
|
||||
membersSeq.add(member)
|
||||
await g.registerCb.get()(membersSeq)
|
||||
|
||||
g.validRootBuffer = g.slideRootQueue()
|
||||
g.validRootBuffer = g.slideRootQueue()
|
||||
|
||||
else:
|
||||
method atomicBatch*(
|
||||
g: OnchainGroupManager,
|
||||
start: MembershipIndex,
|
||||
idCommitments = newSeq[IDCommitment](),
|
||||
toRemoveIndices = newSeq[MembershipIndex](),
|
||||
): Future[void] {.async: (raises: [Exception]), base.} =
|
||||
initializedGuard(g)
|
||||
method register*(
|
||||
g: OnchainGroupManager, rateCommitment: RateCommitment
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
|
||||
waku_rln_membership_insertion_duration_seconds.nanosecondTime:
|
||||
let operationSuccess =
|
||||
g.rlnInstance.atomicWrite(some(start), idCommitments, toRemoveIndices)
|
||||
if not operationSuccess:
|
||||
raise newException(ValueError, "atomic batch operation failed")
|
||||
# TODO: when slashing is enabled, we need to track slashed members
|
||||
waku_rln_number_registered_memberships.set(int64(g.rlnInstance.leavesSet()))
|
||||
await g.registerBatch(@[rateCommitment])
|
||||
|
||||
if g.registerCb.isSome():
|
||||
var membersSeq = newSeq[Membership]()
|
||||
for i in 0 ..< idCommitments.len:
|
||||
var index = start + MembershipIndex(i)
|
||||
trace "registering member", idCommitment = idCommitments[i], index = index
|
||||
let member = Membership(idCommitment: idCommitments[i], index: index)
|
||||
membersSeq.add(member)
|
||||
await g.registerCb.get()(membersSeq)
|
||||
method registerBatch*(
|
||||
g: OnchainGroupManager, rateCommitments: seq[RateCommitment]
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
|
||||
g.validRootBuffer = g.slideRootQueue()
|
||||
await g.atomicBatch(g.latestIndex, rateCommitments)
|
||||
g.latestIndex += MembershipIndex(rateCommitments.len)
|
||||
|
||||
when defined(rln_v2):
|
||||
method register*(
|
||||
g: OnchainGroupManager, rateCommitment: RateCommitment
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
method register*(
|
||||
g: OnchainGroupManager,
|
||||
identityCredential: IdentityCredential,
|
||||
userMessageLimit: UserMessageLimit,
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
|
||||
await g.registerBatch(@[rateCommitment])
|
||||
let ethRpc = g.ethRpc.get()
|
||||
let registryContract = g.registryContract.get()
|
||||
let membershipFee = g.membershipFee.get()
|
||||
|
||||
else:
|
||||
method register*(
|
||||
g: OnchainGroupManager, idCommitment: IDCommitment
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
var gasPrice: int
|
||||
g.retryWrapper(gasPrice, "Failed to get gas price"):
|
||||
int(await ethRpc.provider.eth_gasPrice()) * 2
|
||||
let idCommitment = identityCredential.idCommitment.toUInt256()
|
||||
|
||||
await g.registerBatch(@[idCommitment])
|
||||
let storageIndex = g.usingStorageIndex.get()
|
||||
debug "registering the member",
|
||||
idCommitment = idCommitment,
|
||||
storageIndex = storageIndex,
|
||||
userMessageLimit = userMessageLimit
|
||||
var txHash: TxHash
|
||||
g.retryWrapper(txHash, "Failed to register the member"):
|
||||
await registryContract
|
||||
.register(storageIndex, idCommitment, u256(userMessageLimit))
|
||||
.send(gasPrice = gasPrice)
|
||||
|
||||
when defined(rln_v2):
|
||||
method registerBatch*(
|
||||
g: OnchainGroupManager, rateCommitments: seq[RateCommitment]
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
# wait for the transaction to be mined
|
||||
var tsReceipt: ReceiptObject
|
||||
g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"):
|
||||
await ethRpc.getMinedTransactionReceipt(txHash)
|
||||
debug "registration transaction mined", txHash = txHash
|
||||
g.registrationTxHash = some(txHash)
|
||||
# the receipt topic holds the hash of signature of the raised events
|
||||
# TODO: make this robust. search within the event list for the event
|
||||
let firstTopic = tsReceipt.logs[0].topics[0]
|
||||
# the hash of the signature of MemberRegistered(uint256,uint256,uint256) event is equal to the following hex value
|
||||
if firstTopic !=
|
||||
cast[FixedBytes[32]](keccak256.digest(
|
||||
"MemberRegistered(uint256,uint256,uint256)"
|
||||
).data):
|
||||
raise newException(ValueError, "unexpected event signature")
|
||||
|
||||
await g.atomicBatch(g.latestIndex, rateCommitments)
|
||||
g.latestIndex += MembershipIndex(rateCommitments.len)
|
||||
# the arguments of the raised event i.e., MemberRegistered are encoded inside the data field
|
||||
# data = pk encoded as 256 bits || index encoded as 256 bits || userMessageLimit encoded as 256 bits
|
||||
let arguments = tsReceipt.logs[0].data
|
||||
debug "tx log data", arguments = arguments
|
||||
let
|
||||
argumentsBytes = arguments
|
||||
# In TX log data, uints are encoded in big endian
|
||||
membershipIndex = UInt256.fromBytesBE(argumentsBytes[64 ..^ 1])
|
||||
|
||||
else:
|
||||
method registerBatch*(
|
||||
g: OnchainGroupManager, idCommitments: seq[IDCommitment]
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
g.userMessageLimit = some(userMessageLimit)
|
||||
g.membershipIndex = some(membershipIndex.toMembershipIndex())
|
||||
|
||||
await g.atomicBatch(g.latestIndex, idCommitments)
|
||||
g.latestIndex += MembershipIndex(idCommitments.len)
|
||||
# don't handle member insertion into the tree here, it will be handled by the event listener
|
||||
return
|
||||
|
||||
when defined(rln_v2):
|
||||
method register*(
|
||||
g: OnchainGroupManager,
|
||||
identityCredential: IdentityCredential,
|
||||
userMessageLimit: UserMessageLimit,
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
|
||||
let ethRpc = g.ethRpc.get()
|
||||
let registryContract = g.registryContract.get()
|
||||
let membershipFee = g.membershipFee.get()
|
||||
|
||||
var gasPrice: int
|
||||
g.retryWrapper(gasPrice, "Failed to get gas price"):
|
||||
int(await ethRpc.provider.eth_gasPrice()) * 2
|
||||
let idCommitment = identityCredential.idCommitment.toUInt256()
|
||||
|
||||
let storageIndex = g.usingStorageIndex.get()
|
||||
debug "registering the member",
|
||||
idCommitment = idCommitment,
|
||||
storageIndex = storageIndex,
|
||||
userMessageLimit = userMessageLimit
|
||||
var txHash: TxHash
|
||||
g.retryWrapper(txHash, "Failed to register the member"):
|
||||
await registryContract
|
||||
.register(storageIndex, idCommitment, u256(userMessageLimit))
|
||||
.send(gasPrice = gasPrice)
|
||||
|
||||
# wait for the transaction to be mined
|
||||
var tsReceipt: ReceiptObject
|
||||
g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"):
|
||||
await ethRpc.getMinedTransactionReceipt(txHash)
|
||||
debug "registration transaction mined", txHash = txHash
|
||||
g.registrationTxHash = some(txHash)
|
||||
# the receipt topic holds the hash of signature of the raised events
|
||||
# TODO: make this robust. search within the event list for the event
|
||||
let firstTopic = tsReceipt.logs[0].topics[0]
|
||||
# the hash of the signature of MemberRegistered(uint256,uint256,uint256) event is equal to the following hex value
|
||||
if firstTopic !=
|
||||
cast[FixedBytes[32]](keccak256.digest(
|
||||
"MemberRegistered(uint256,uint256,uint256)"
|
||||
).data):
|
||||
raise newException(ValueError, "unexpected event signature")
|
||||
|
||||
# the arguments of the raised event i.e., MemberRegistered are encoded inside the data field
|
||||
# data = pk encoded as 256 bits || index encoded as 256 bits || userMessageLimit encoded as 256 bits
|
||||
let arguments = tsReceipt.logs[0].data
|
||||
debug "tx log data", arguments = arguments
|
||||
let
|
||||
argumentsBytes = arguments
|
||||
# In TX log data, uints are encoded in big endian
|
||||
membershipIndex = UInt256.fromBytesBE(argumentsBytes[64 ..^ 1])
|
||||
|
||||
g.userMessageLimit = some(userMessageLimit)
|
||||
g.membershipIndex = some(membershipIndex.toMembershipIndex())
|
||||
|
||||
# don't handle member insertion into the tree here, it will be handled by the event listener
|
||||
return
|
||||
|
||||
else:
|
||||
method register*(
|
||||
g: OnchainGroupManager, credentials: IdentityCredential
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
|
||||
let ethRpc = g.ethRpc.get()
|
||||
let registryContract = g.registryContract.get()
|
||||
let membershipFee = g.membershipFee.get()
|
||||
|
||||
var gasPrice: int
|
||||
g.retryWrapper(gasPrice, "Failed to get gas price"):
|
||||
int(await ethRpc.provider.eth_gasPrice()) * 2
|
||||
let idCommitment = credentials.idCommitment.toUInt256()
|
||||
|
||||
let storageIndex = g.usingStorageIndex.get()
|
||||
debug "registering the member",
|
||||
idCommitment = idCommitment, storageIndex = storageIndex
|
||||
var txHash: TxHash
|
||||
g.retryWrapper(txHash, "Failed to register the member"):
|
||||
await registryContract.register(storageIndex, idCommitment).send(
|
||||
gasPrice = gasPrice
|
||||
)
|
||||
|
||||
# wait for the transaction to be mined
|
||||
var tsReceipt: ReceiptObject
|
||||
g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"):
|
||||
await ethRpc.getMinedTransactionReceipt(txHash)
|
||||
debug "registration transaction mined", txHash = txHash
|
||||
g.registrationTxHash = some(txHash)
|
||||
# the receipt topic holds the hash of signature of the raised events
|
||||
# TODO: make this robust. search within the event list for the event
|
||||
let firstTopic = tsReceipt.logs[0].topics[0]
|
||||
# the hash of the signature of MemberRegistered(uint256,uint256) event is equal to the following hex value
|
||||
if firstTopic !=
|
||||
cast[FixedBytes[32]](keccak256.digest("MemberRegistered(uint256,uint256)").data):
|
||||
raise newException(ValueError, "unexpected event signature")
|
||||
|
||||
# the arguments of the raised event i.e., MemberRegistered are encoded inside the data field
|
||||
# data = pk encoded as 256 bits || index encoded as 256 bits
|
||||
let arguments = tsReceipt.logs[0].data
|
||||
debug "tx log data", arguments = arguments
|
||||
let
|
||||
argumentsBytes = arguments
|
||||
# In TX log data, uints are encoded in big endian
|
||||
eventIndex = UInt256.fromBytesBE(argumentsBytes[32 ..^ 1])
|
||||
|
||||
g.membershipIndex = some(eventIndex.toMembershipIndex())
|
||||
|
||||
# don't handle member insertion into the tree here, it will be handled by the event listener
|
||||
return
|
||||
|
||||
method withdraw*(
|
||||
g: OnchainGroupManager, idCommitment: IDCommitment
|
||||
@ -383,8 +259,7 @@ proc parseEvent(
|
||||
## returns an error if it cannot parse the `data` parameter
|
||||
var idComm: UInt256
|
||||
var index: UInt256
|
||||
when defined(rln_v2):
|
||||
var userMessageLimit: UInt256
|
||||
var userMessageLimit: UInt256
|
||||
var data: string
|
||||
# Remove the 0x prefix
|
||||
try:
|
||||
@ -398,27 +273,20 @@ proc parseEvent(
|
||||
try:
|
||||
# Parse the idComm
|
||||
offset += decode(data, offset, idComm)
|
||||
when defined(rln_v2):
|
||||
# Parse the userMessageLimit
|
||||
offset += decode(data, offset, userMessageLimit)
|
||||
# Parse the userMessageLimit
|
||||
offset += decode(data, offset, userMessageLimit)
|
||||
# Parse the index
|
||||
offset += decode(data, offset, index)
|
||||
when defined(rln_v2):
|
||||
return ok(
|
||||
Membership(
|
||||
rateCommitment: RateCommitment(
|
||||
idCommitment: idComm.toIDCommitment(),
|
||||
userMessageLimit: userMessageLimit.toUserMessageLimit(),
|
||||
),
|
||||
index: index.toMembershipIndex(),
|
||||
)
|
||||
)
|
||||
else:
|
||||
return ok(
|
||||
Membership(
|
||||
idCommitment: idComm.toIDCommitment(), index: index.toMembershipIndex()
|
||||
)
|
||||
return ok(
|
||||
Membership(
|
||||
rateCommitment: RateCommitment(
|
||||
idCommitment: idComm.toIDCommitment(),
|
||||
userMessageLimit: userMessageLimit.toUserMessageLimit(),
|
||||
),
|
||||
index: index.toMembershipIndex(),
|
||||
)
|
||||
)
|
||||
|
||||
except CatchableError:
|
||||
return err("failed to parse the data field of the MemberRegistered event")
|
||||
|
||||
@ -501,24 +369,15 @@ proc handleEvents(
|
||||
try:
|
||||
let startIndex = blockTable[blockNumber].filterIt(not it[1])[0][0].index
|
||||
let removalIndices = members.filterIt(it[1]).mapIt(it[0].index)
|
||||
when defined(rln_v2):
|
||||
let rateCommitments = members.mapIt(it[0].rateCommitment)
|
||||
await g.atomicBatch(
|
||||
start = startIndex,
|
||||
rateCommitments = rateCommitments,
|
||||
toRemoveIndices = removalIndices,
|
||||
)
|
||||
g.latestIndex = startIndex + MembershipIndex(rateCommitments.len)
|
||||
trace "new members added to the Merkle tree", commitments = rateCommitments
|
||||
else:
|
||||
let idCommitments = members.mapIt(it[0].idCommitment)
|
||||
await g.atomicBatch(
|
||||
start = startIndex,
|
||||
idCommitments = idCommitments,
|
||||
toRemoveIndices = removalIndices,
|
||||
)
|
||||
g.latestIndex = startIndex + MembershipIndex(idCommitments.len)
|
||||
trace "new members added to the Merkle tree", commitments = idCommitments
|
||||
let rateCommitments = members.mapIt(it[0].rateCommitment)
|
||||
await g.atomicBatch(
|
||||
start = startIndex,
|
||||
rateCommitments = rateCommitments,
|
||||
toRemoveIndices = removalIndices,
|
||||
)
|
||||
g.latestIndex = startIndex + MembershipIndex(rateCommitments.len)
|
||||
trace "new members added to the Merkle tree", commitments = rateCommitments
|
||||
|
||||
except CatchableError:
|
||||
error "failed to insert members into the tree", error = getCurrentExceptionMsg()
|
||||
raise newException(ValueError, "failed to insert members into the tree")
|
||||
@ -759,8 +618,7 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
|
||||
return err("failed to get the keystore credentials: " & $error)
|
||||
|
||||
g.membershipIndex = some(keystoreCred.treeIndex)
|
||||
when defined(rln_v2):
|
||||
g.userMessageLimit = some(keystoreCred.userMessageLimit)
|
||||
g.userMessageLimit = some(keystoreCred.userMessageLimit)
|
||||
# now we check on the contract if the commitment actually has a membership
|
||||
try:
|
||||
let membershipExists = await rlnContract
|
||||
|
||||
@ -33,25 +33,18 @@ method init*(g: StaticGroupManager): Future[GroupManagerResult[void]] {.async.}
|
||||
"Invalid membership index. Must be within 0 and " & $(groupSize - 1) & "but was " &
|
||||
$membershipIndex
|
||||
)
|
||||
when defined(rln_v2):
|
||||
g.userMessageLimit = some(DefaultUserMessageLimit)
|
||||
g.userMessageLimit = some(DefaultUserMessageLimit)
|
||||
|
||||
g.idCredentials = some(groupKeys[membershipIndex])
|
||||
# Seed the received commitments into the merkle tree
|
||||
when defined(rln_v2):
|
||||
let rateCommitments = groupKeys.mapIt(
|
||||
RateCommitment(
|
||||
idCommitment: it.idCommitment, userMessageLimit: g.userMessageLimit.get()
|
||||
)
|
||||
let rateCommitments = groupKeys.mapIt(
|
||||
RateCommitment(
|
||||
idCommitment: it.idCommitment, userMessageLimit: g.userMessageLimit.get()
|
||||
)
|
||||
let leaves = rateCommitments.toLeaves().valueOr:
|
||||
return err("Failed to convert rate commitments to leaves: " & $error)
|
||||
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex, leaves)
|
||||
else:
|
||||
let idCommitments = groupKeys.mapIt(it.idCommitment)
|
||||
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex, idCommitments)
|
||||
if not membersInserted:
|
||||
return err("Failed to insert members into the merkle tree")
|
||||
)
|
||||
let leaves = rateCommitments.toLeaves().valueOr:
|
||||
return err("Failed to convert rate commitments to leaves: " & $error)
|
||||
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex, leaves)
|
||||
|
||||
discard g.slideRootQueue()
|
||||
|
||||
@ -68,127 +61,67 @@ method startGroupSync*(
|
||||
# No-op
|
||||
return ok()
|
||||
|
||||
when defined(rln_v2):
|
||||
method register*(
|
||||
g: StaticGroupManager, rateCommitment: RateCommitment
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
method register*(
|
||||
g: StaticGroupManager, rateCommitment: RateCommitment
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
|
||||
await g.registerBatch(@[rateCommitment])
|
||||
await g.registerBatch(@[rateCommitment])
|
||||
|
||||
else:
|
||||
method register*(
|
||||
g: StaticGroupManager, idCommitment: IDCommitment
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
|
||||
await g.registerBatch(@[idCommitment])
|
||||
method registerBatch*(
|
||||
g: StaticGroupManager, rateCommitments: seq[RateCommitment]
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
|
||||
when defined(rln_v2):
|
||||
method registerBatch*(
|
||||
g: StaticGroupManager, rateCommitments: seq[RateCommitment]
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
let leavesRes = rateCommitments.toLeaves()
|
||||
if not leavesRes.isOk():
|
||||
raise newException(ValueError, "Failed to convert rate commitments to leaves")
|
||||
let leaves = cast[seq[seq[byte]]](leavesRes.get())
|
||||
|
||||
let leavesRes = rateCommitments.toLeaves()
|
||||
if not leavesRes.isOk():
|
||||
raise newException(ValueError, "Failed to convert rate commitments to leaves")
|
||||
let leaves = cast[seq[seq[byte]]](leavesRes.get())
|
||||
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex + 1, leaves)
|
||||
if not membersInserted:
|
||||
raise newException(ValueError, "Failed to insert members into the merkle tree")
|
||||
|
||||
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex + 1, leaves)
|
||||
if not membersInserted:
|
||||
raise newException(ValueError, "Failed to insert members into the merkle tree")
|
||||
|
||||
if g.registerCb.isSome():
|
||||
var memberSeq = newSeq[Membership]()
|
||||
for i in 0 ..< rateCommitments.len:
|
||||
memberSeq.add(
|
||||
Membership(
|
||||
rateCommitment: rateCommitments[i],
|
||||
index: g.latestIndex + MembershipIndex(i) + 1,
|
||||
)
|
||||
if g.registerCb.isSome():
|
||||
var memberSeq = newSeq[Membership]()
|
||||
for i in 0 ..< rateCommitments.len:
|
||||
memberSeq.add(
|
||||
Membership(
|
||||
rateCommitment: rateCommitments[i],
|
||||
index: g.latestIndex + MembershipIndex(i) + 1,
|
||||
)
|
||||
await g.registerCb.get()(memberSeq)
|
||||
)
|
||||
await g.registerCb.get()(memberSeq)
|
||||
|
||||
discard g.slideRootQueue()
|
||||
discard g.slideRootQueue()
|
||||
|
||||
g.latestIndex += MembershipIndex(rateCommitments.len)
|
||||
g.latestIndex += MembershipIndex(rateCommitments.len)
|
||||
return
|
||||
|
||||
return
|
||||
method withdraw*(
|
||||
g: StaticGroupManager, idSecretHash: IdentitySecretHash
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
|
||||
else:
|
||||
method registerBatch*(
|
||||
g: StaticGroupManager, idCommitments: seq[IDCommitment]
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
let groupKeys = g.groupKeys
|
||||
|
||||
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex + 1, idCommitments)
|
||||
if not membersInserted:
|
||||
raise newException(ValueError, "Failed to insert members into the merkle tree")
|
||||
for i in 0 ..< groupKeys.len:
|
||||
if groupKeys[i].idSecretHash == idSecretHash:
|
||||
let idCommitment = groupKeys[i].idCommitment
|
||||
let index = MembershipIndex(i)
|
||||
let rateCommitment = RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: g.userMessageLimit.get()
|
||||
)
|
||||
let memberRemoved = g.rlnInstance.removeMember(index)
|
||||
if not memberRemoved:
|
||||
raise newException(ValueError, "Failed to remove member from the merkle tree")
|
||||
|
||||
if g.registerCb.isSome():
|
||||
var memberSeq = newSeq[Membership]()
|
||||
for i in 0 ..< idCommitments.len:
|
||||
memberSeq.add(
|
||||
Membership(
|
||||
idCommitment: idCommitments[i],
|
||||
index: g.latestIndex + MembershipIndex(i) + 1,
|
||||
)
|
||||
)
|
||||
await g.registerCb.get()(memberSeq)
|
||||
if g.withdrawCb.isSome():
|
||||
let withdrawCb = g.withdrawCb.get()
|
||||
await withdrawCb(@[Membership(rateCommitment: rateCommitment, index: index)])
|
||||
|
||||
discard g.slideRootQueue()
|
||||
|
||||
g.latestIndex += MembershipIndex(idCommitments.len)
|
||||
|
||||
return
|
||||
|
||||
when defined(rln_v2):
|
||||
method withdraw*(
|
||||
g: StaticGroupManager, idSecretHash: IdentitySecretHash
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
|
||||
let groupKeys = g.groupKeys
|
||||
|
||||
for i in 0 ..< groupKeys.len:
|
||||
if groupKeys[i].idSecretHash == idSecretHash:
|
||||
let idCommitment = groupKeys[i].idCommitment
|
||||
let index = MembershipIndex(i)
|
||||
let rateCommitment = RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: g.userMessageLimit.get()
|
||||
)
|
||||
let memberRemoved = g.rlnInstance.removeMember(index)
|
||||
if not memberRemoved:
|
||||
raise newException(ValueError, "Failed to remove member from the merkle tree")
|
||||
|
||||
if g.withdrawCb.isSome():
|
||||
let withdrawCb = g.withdrawCb.get()
|
||||
await withdrawCb(@[Membership(rateCommitment: rateCommitment, index: index)])
|
||||
|
||||
return
|
||||
|
||||
else:
|
||||
method withdraw*(
|
||||
g: StaticGroupManager, idSecretHash: IdentitySecretHash
|
||||
): Future[void] {.async: (raises: [Exception]).} =
|
||||
initializedGuard(g)
|
||||
|
||||
let groupKeys = g.groupKeys
|
||||
|
||||
for i in 0 ..< groupKeys.len:
|
||||
if groupKeys[i].idSecretHash == idSecretHash:
|
||||
let idCommitment = groupKeys[i].idCommitment
|
||||
let index = MembershipIndex(i)
|
||||
let memberRemoved = g.rlnInstance.removeMember(index)
|
||||
if not memberRemoved:
|
||||
raise newException(ValueError, "Failed to remove member from the merkle tree")
|
||||
|
||||
if g.withdrawCb.isSome():
|
||||
let withdrawCb = g.withdrawCb.get()
|
||||
await withdrawCb((@[Membership(idCommitment: idCommitment, index: index)]))
|
||||
|
||||
return
|
||||
return
|
||||
|
||||
method withdrawBatch*(
|
||||
g: StaticGroupManager, idSecretHashes: seq[IdentitySecretHash]
|
||||
|
||||
@ -21,13 +21,9 @@ type
|
||||
Epoch* = array[32, byte]
|
||||
RlnIdentifier* = array[32, byte]
|
||||
ZKSNARK* = array[128, byte]
|
||||
|
||||
when defined(rln_v2):
|
||||
type
|
||||
MessageId* = uint64
|
||||
ExternalNullifier* = array[32, byte]
|
||||
|
||||
type RateCommitment* = object
|
||||
MessageId* = uint64
|
||||
ExternalNullifier* = array[32, byte]
|
||||
RateCommitment* = object
|
||||
idCommitment*: IDCommitment
|
||||
userMessageLimit*: UserMessageLimit
|
||||
|
||||
@ -51,9 +47,8 @@ type RateLimitProof* = object
|
||||
epoch*: Epoch
|
||||
## Application specific RLN Identifier
|
||||
rlnIdentifier*: RlnIdentifier
|
||||
when defined(rln_v2):
|
||||
## the external nullifier used for the generation of the `proof` (derived from poseidon([epoch, rln_identifier]))
|
||||
externalNullifier*: ExternalNullifier
|
||||
## the external nullifier used for the generation of the `proof` (derived from poseidon([epoch, rln_identifier]))
|
||||
externalNullifier*: ExternalNullifier
|
||||
|
||||
type ProofMetadata* = object
|
||||
nullifier*: Nullifier
|
||||
|
||||
@ -161,32 +161,31 @@ proc poseidon*(data: seq[seq[byte]]): RlnRelayResult[array[32, byte]] =
|
||||
|
||||
return ok(output)
|
||||
|
||||
when defined(rln_v2):
|
||||
proc toLeaf*(rateCommitment: RateCommitment): RlnRelayResult[seq[byte]] =
|
||||
let idCommitment = rateCommitment.idCommitment
|
||||
var userMessageLimit: array[32, byte]
|
||||
try:
|
||||
discard userMessageLimit.copyFrom(
|
||||
toBytes(rateCommitment.userMessageLimit, Endianness.littleEndian)
|
||||
)
|
||||
except CatchableError:
|
||||
return err(
|
||||
"could not convert the user message limit to bytes: " & getCurrentExceptionMsg()
|
||||
)
|
||||
let leaf = poseidon(@[@idCommitment, @userMessageLimit]).valueOr:
|
||||
return err("could not convert the rate commitment to a leaf")
|
||||
var retLeaf = newSeq[byte](leaf.len)
|
||||
for i in 0 ..< leaf.len:
|
||||
retLeaf[i] = leaf[i]
|
||||
return ok(retLeaf)
|
||||
proc toLeaf*(rateCommitment: RateCommitment): RlnRelayResult[seq[byte]] =
|
||||
let idCommitment = rateCommitment.idCommitment
|
||||
var userMessageLimit: array[32, byte]
|
||||
try:
|
||||
discard userMessageLimit.copyFrom(
|
||||
toBytes(rateCommitment.userMessageLimit, Endianness.littleEndian)
|
||||
)
|
||||
except CatchableError:
|
||||
return err(
|
||||
"could not convert the user message limit to bytes: " & getCurrentExceptionMsg()
|
||||
)
|
||||
let leaf = poseidon(@[@idCommitment, @userMessageLimit]).valueOr:
|
||||
return err("could not convert the rate commitment to a leaf")
|
||||
var retLeaf = newSeq[byte](leaf.len)
|
||||
for i in 0 ..< leaf.len:
|
||||
retLeaf[i] = leaf[i]
|
||||
return ok(retLeaf)
|
||||
|
||||
proc toLeaves*(rateCommitments: seq[RateCommitment]): RlnRelayResult[seq[seq[byte]]] =
|
||||
var leaves = newSeq[seq[byte]]()
|
||||
for rateCommitment in rateCommitments:
|
||||
let leaf = toLeaf(rateCommitment).valueOr:
|
||||
return err("could not convert the rate commitment to a leaf: " & $error)
|
||||
leaves.add(leaf)
|
||||
return ok(leaves)
|
||||
proc toLeaves*(rateCommitments: seq[RateCommitment]): RlnRelayResult[seq[seq[byte]]] =
|
||||
var leaves = newSeq[seq[byte]]()
|
||||
for rateCommitment in rateCommitments:
|
||||
let leaf = toLeaf(rateCommitment).valueOr:
|
||||
return err("could not convert the rate commitment to a leaf: " & $error)
|
||||
leaves.add(leaf)
|
||||
return ok(leaves)
|
||||
|
||||
proc extractMetadata*(proof: RateLimitProof): RlnRelayResult[ProofMetadata] =
|
||||
let externalNullifier = poseidon(@[@(proof.epoch), @(proof.rlnIdentifier)]).valueOr:
|
||||
@ -200,151 +199,81 @@ proc extractMetadata*(proof: RateLimitProof): RlnRelayResult[ProofMetadata] =
|
||||
)
|
||||
)
|
||||
|
||||
when defined(rln_v2):
|
||||
proc proofGen*(
|
||||
rlnInstance: ptr RLN,
|
||||
data: openArray[byte],
|
||||
membership: IdentityCredential,
|
||||
userMessageLimit: UserMessageLimit,
|
||||
messageId: MessageId,
|
||||
index: MembershipIndex,
|
||||
epoch: Epoch,
|
||||
rlnIdentifier = DefaultRlnIdentifier,
|
||||
): RateLimitProofResult =
|
||||
# obtain the external nullifier
|
||||
let externalNullifierRes = poseidon(@[@(epoch), @(rlnIdentifier)])
|
||||
proc proofGen*(
|
||||
rlnInstance: ptr RLN,
|
||||
data: openArray[byte],
|
||||
membership: IdentityCredential,
|
||||
userMessageLimit: UserMessageLimit,
|
||||
messageId: MessageId,
|
||||
index: MembershipIndex,
|
||||
epoch: Epoch,
|
||||
rlnIdentifier = DefaultRlnIdentifier,
|
||||
): RateLimitProofResult =
|
||||
# obtain the external nullifier
|
||||
let externalNullifierRes = poseidon(@[@(epoch), @(rlnIdentifier)])
|
||||
|
||||
if externalNullifierRes.isErr():
|
||||
return err("could not construct the external nullifier")
|
||||
if externalNullifierRes.isErr():
|
||||
return err("could not construct the external nullifier")
|
||||
|
||||
# serialize inputs
|
||||
let serializedInputs = serialize(
|
||||
idSecretHash = membership.idSecretHash,
|
||||
memIndex = index,
|
||||
userMessageLimit = userMessageLimit,
|
||||
messageId = messageId,
|
||||
externalNullifier = externalNullifierRes.get(),
|
||||
msg = data,
|
||||
)
|
||||
var inputBuffer = toBuffer(serializedInputs)
|
||||
# serialize inputs
|
||||
let serializedInputs = serialize(
|
||||
idSecretHash = membership.idSecretHash,
|
||||
memIndex = index,
|
||||
userMessageLimit = userMessageLimit,
|
||||
messageId = messageId,
|
||||
externalNullifier = externalNullifierRes.get(),
|
||||
msg = data,
|
||||
)
|
||||
var inputBuffer = toBuffer(serializedInputs)
|
||||
|
||||
debug "input buffer ", inputBuffer = repr(inputBuffer)
|
||||
debug "input buffer ", inputBuffer = repr(inputBuffer)
|
||||
|
||||
# generate the proof
|
||||
var proof: Buffer
|
||||
let proofIsSuccessful = generate_proof(rlnInstance, addr inputBuffer, addr proof)
|
||||
# check whether the generate_proof call is done successfully
|
||||
if not proofIsSuccessful:
|
||||
return err("could not generate the proof")
|
||||
# generate the proof
|
||||
var proof: Buffer
|
||||
let proofIsSuccessful = generate_proof(rlnInstance, addr inputBuffer, addr proof)
|
||||
# check whether the generate_proof call is done successfully
|
||||
if not proofIsSuccessful:
|
||||
return err("could not generate the proof")
|
||||
|
||||
var proofValue = cast[ptr array[320, byte]](proof.`ptr`)
|
||||
let proofBytes: array[320, byte] = proofValue[]
|
||||
debug "proof content", proofHex = proofValue[].toHex
|
||||
var proofValue = cast[ptr array[320, byte]](proof.`ptr`)
|
||||
let proofBytes: array[320, byte] = proofValue[]
|
||||
debug "proof content", proofHex = proofValue[].toHex
|
||||
|
||||
## parse the proof as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ]
|
||||
## parse the proof as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ]
|
||||
|
||||
let
|
||||
proofOffset = 128
|
||||
rootOffset = proofOffset + 32
|
||||
externalNullifierOffset = rootOffset + 32
|
||||
shareXOffset = externalNullifierOffset + 32
|
||||
shareYOffset = shareXOffset + 32
|
||||
nullifierOffset = shareYOffset + 32
|
||||
let
|
||||
proofOffset = 128
|
||||
rootOffset = proofOffset + 32
|
||||
externalNullifierOffset = rootOffset + 32
|
||||
shareXOffset = externalNullifierOffset + 32
|
||||
shareYOffset = shareXOffset + 32
|
||||
nullifierOffset = shareYOffset + 32
|
||||
|
||||
var
|
||||
zkproof: ZKSNARK
|
||||
proofRoot, shareX, shareY: MerkleNode
|
||||
externalNullifier: ExternalNullifier
|
||||
nullifier: Nullifier
|
||||
var
|
||||
zkproof: ZKSNARK
|
||||
proofRoot, shareX, shareY: MerkleNode
|
||||
externalNullifier: ExternalNullifier
|
||||
nullifier: Nullifier
|
||||
|
||||
discard zkproof.copyFrom(proofBytes[0 .. proofOffset - 1])
|
||||
discard proofRoot.copyFrom(proofBytes[proofOffset .. rootOffset - 1])
|
||||
discard
|
||||
externalNullifier.copyFrom(proofBytes[rootOffset .. externalNullifierOffset - 1])
|
||||
discard shareX.copyFrom(proofBytes[externalNullifierOffset .. shareXOffset - 1])
|
||||
discard shareY.copyFrom(proofBytes[shareXOffset .. shareYOffset - 1])
|
||||
discard nullifier.copyFrom(proofBytes[shareYOffset .. nullifierOffset - 1])
|
||||
discard zkproof.copyFrom(proofBytes[0 .. proofOffset - 1])
|
||||
discard proofRoot.copyFrom(proofBytes[proofOffset .. rootOffset - 1])
|
||||
discard
|
||||
externalNullifier.copyFrom(proofBytes[rootOffset .. externalNullifierOffset - 1])
|
||||
discard shareX.copyFrom(proofBytes[externalNullifierOffset .. shareXOffset - 1])
|
||||
discard shareY.copyFrom(proofBytes[shareXOffset .. shareYOffset - 1])
|
||||
discard nullifier.copyFrom(proofBytes[shareYOffset .. nullifierOffset - 1])
|
||||
|
||||
let output = RateLimitProof(
|
||||
proof: zkproof,
|
||||
merkleRoot: proofRoot,
|
||||
externalNullifier: externalNullifier,
|
||||
epoch: epoch,
|
||||
rlnIdentifier: rlnIdentifier,
|
||||
shareX: shareX,
|
||||
shareY: shareY,
|
||||
nullifier: nullifier,
|
||||
)
|
||||
return ok(output)
|
||||
|
||||
else:
|
||||
proc proofGen*(
|
||||
rlnInstance: ptr RLN,
|
||||
data: openArray[byte],
|
||||
memKeys: IdentityCredential,
|
||||
memIndex: MembershipIndex,
|
||||
epoch: Epoch,
|
||||
): RateLimitProofResult =
|
||||
# serialize inputs
|
||||
let serializedInputs = serialize(
|
||||
idSecretHash = memKeys.idSecretHash,
|
||||
memIndex = memIndex,
|
||||
epoch = epoch,
|
||||
msg = data,
|
||||
)
|
||||
var inputBuffer = toBuffer(serializedInputs)
|
||||
|
||||
debug "input buffer ", inputBuffer = repr(inputBuffer)
|
||||
|
||||
# generate the proof
|
||||
var proof: Buffer
|
||||
let proofIsSuccessful = generate_proof(rlnInstance, addr inputBuffer, addr proof)
|
||||
# check whether the generate_proof call is done successfully
|
||||
if not proofIsSuccessful:
|
||||
return err("could not generate the proof")
|
||||
|
||||
var proofValue = cast[ptr array[320, byte]](proof.`ptr`)
|
||||
let proofBytes: array[320, byte] = proofValue[]
|
||||
debug "proof content", proofHex = proofValue[].toHex
|
||||
|
||||
## parse the proof as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ]
|
||||
|
||||
let
|
||||
proofOffset = 128
|
||||
rootOffset = proofOffset + 32
|
||||
epochOffset = rootOffset + 32
|
||||
shareXOffset = epochOffset + 32
|
||||
shareYOffset = shareXOffset + 32
|
||||
nullifierOffset = shareYOffset + 32
|
||||
rlnIdentifierOffset = nullifierOffset + 32
|
||||
|
||||
var
|
||||
zkproof: ZKSNARK
|
||||
proofRoot, shareX, shareY: MerkleNode
|
||||
epoch: Epoch
|
||||
nullifier: Nullifier
|
||||
rlnIdentifier: RlnIdentifier
|
||||
|
||||
discard zkproof.copyFrom(proofBytes[0 .. proofOffset - 1])
|
||||
discard proofRoot.copyFrom(proofBytes[proofOffset .. rootOffset - 1])
|
||||
discard epoch.copyFrom(proofBytes[rootOffset .. epochOffset - 1])
|
||||
discard shareX.copyFrom(proofBytes[epochOffset .. shareXOffset - 1])
|
||||
discard shareY.copyFrom(proofBytes[shareXOffset .. shareYOffset - 1])
|
||||
discard nullifier.copyFrom(proofBytes[shareYOffset .. nullifierOffset - 1])
|
||||
discard
|
||||
rlnIdentifier.copyFrom(proofBytes[nullifierOffset .. rlnIdentifierOffset - 1])
|
||||
|
||||
let output = RateLimitProof(
|
||||
proof: zkproof,
|
||||
merkleRoot: proofRoot,
|
||||
epoch: epoch,
|
||||
shareX: shareX,
|
||||
shareY: shareY,
|
||||
nullifier: nullifier,
|
||||
rlnIdentifier: rlnIdentifier,
|
||||
)
|
||||
|
||||
return ok(output)
|
||||
let output = RateLimitProof(
|
||||
proof: zkproof,
|
||||
merkleRoot: proofRoot,
|
||||
externalNullifier: externalNullifier,
|
||||
epoch: epoch,
|
||||
rlnIdentifier: rlnIdentifier,
|
||||
shareX: shareX,
|
||||
shareY: shareY,
|
||||
nullifier: nullifier,
|
||||
)
|
||||
return ok(output)
|
||||
|
||||
# validRoots should contain a sequence of roots in the acceptable windows.
|
||||
# As default, it is set to an empty sequence of roots. This implies that the validity check for the proof's root is skipped
|
||||
@ -357,14 +286,13 @@ proc proofVerify*(
|
||||
## verifies the proof, returns an error if the proof verification fails
|
||||
## returns true if the proof is valid
|
||||
var normalizedProof = proof
|
||||
when defined(rln_v2):
|
||||
# when we do this, we ensure that we compute the proof for the derived value
|
||||
# of the externalNullifier. The proof verification will fail if a malicious peer
|
||||
# attaches invalid epoch+rlnidentifier pair
|
||||
normalizedProof.externalNullifier = poseidon(
|
||||
@[@(proof.epoch), @(proof.rlnIdentifier)]
|
||||
).valueOr:
|
||||
return err("could not construct the external nullifier")
|
||||
# when we do this, we ensure that we compute the proof for the derived value
|
||||
# of the externalNullifier. The proof verification will fail if a malicious peer
|
||||
# attaches invalid epoch+rlnidentifier pair
|
||||
normalizedProof.externalNullifier = poseidon(
|
||||
@[@(proof.epoch), @(proof.rlnIdentifier)]
|
||||
).valueOr:
|
||||
return err("could not construct the external nullifier")
|
||||
var
|
||||
proofBytes = serialize(normalizedProof, data)
|
||||
proofBuffer = proofBytes.toBuffer()
|
||||
|
||||
@ -23,10 +23,8 @@ import
|
||||
./conversion_utils,
|
||||
./constants,
|
||||
./protocol_types,
|
||||
./protocol_metrics
|
||||
|
||||
when defined(rln_v2):
|
||||
import ./nonce_manager
|
||||
./protocol_metrics,
|
||||
./nonce_manager
|
||||
|
||||
import
|
||||
../common/error_handling,
|
||||
@ -47,8 +45,7 @@ type WakuRlnConfig* = object
|
||||
rlnRelayTreePath*: string
|
||||
rlnEpochSizeSec*: uint64
|
||||
onFatalErrorAction*: OnFatalErrorHandler
|
||||
when defined(rln_v2):
|
||||
rlnRelayUserMessageLimit*: uint64
|
||||
rlnRelayUserMessageLimit*: uint64
|
||||
|
||||
proc createMembershipList*(
|
||||
rln: ptr RLN, n: int
|
||||
@ -93,8 +90,7 @@ type WakuRLNRelay* = ref object of RootObj
|
||||
rlnMaxEpochGap*: uint64
|
||||
groupManager*: GroupManager
|
||||
onFatalErrorAction*: OnFatalErrorHandler
|
||||
when defined(rln_v2):
|
||||
nonceManager*: NonceManager
|
||||
nonceManager*: NonceManager
|
||||
|
||||
proc calcEpoch*(rlnPeer: WakuRLNRelay, t: float64): Epoch =
|
||||
## gets time `t` as `flaot64` with subseconds resolution in the fractional part
|
||||
@ -307,14 +303,11 @@ proc appendRLNProof*(
|
||||
let input = msg.toRLNSignal()
|
||||
let epoch = rlnPeer.calcEpoch(senderEpochTime)
|
||||
|
||||
when defined(rln_v2):
|
||||
let nonce = rlnPeer.nonceManager.getNonce().valueOr:
|
||||
return err("could not get new message id to generate an rln proof: " & $error)
|
||||
let proof = rlnPeer.groupManager.generateProof(input, epoch, nonce).valueOr:
|
||||
return err("could not generate rln-v2 proof: " & $error)
|
||||
else:
|
||||
let proof = rlnPeer.groupManager.generateProof(input, epoch).valueOr:
|
||||
return err("could not generate rln proof: " & $error)
|
||||
let nonce = rlnPeer.nonceManager.getNonce().valueOr:
|
||||
return err("could not get new message id to generate an rln proof: " & $error)
|
||||
let proof = rlnPeer.groupManager.generateProof(input, epoch, nonce).valueOr:
|
||||
return err("could not generate rln-v2 proof: " & $error)
|
||||
|
||||
|
||||
msg.proof = proof.encode().buffer
|
||||
return ok()
|
||||
@ -445,28 +438,18 @@ proc mount(
|
||||
(await groupManager.startGroupSync()).isOkOr:
|
||||
return err("could not start the group sync: " & $error)
|
||||
|
||||
when defined(rln_v2):
|
||||
return ok(
|
||||
WakuRLNRelay(
|
||||
groupManager: groupManager,
|
||||
nonceManager:
|
||||
NonceManager.init(conf.rlnRelayUserMessageLimit, conf.rlnEpochSizeSec.float),
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
||||
rlnMaxEpochGap:
|
||||
max(uint64(MaxClockGapSeconds / float64(conf.rlnEpochSizeSec)), 1),
|
||||
onFatalErrorAction: conf.onFatalErrorAction,
|
||||
)
|
||||
)
|
||||
else:
|
||||
return ok(
|
||||
WakuRLNRelay(
|
||||
groupManager: groupManager,
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
||||
rlnMaxEpochGap:
|
||||
max(uint64(MaxClockGapSeconds / float64(conf.rlnEpochSizeSec)), 1),
|
||||
onFatalErrorAction: conf.onFatalErrorAction,
|
||||
)
|
||||
return ok(
|
||||
WakuRLNRelay(
|
||||
groupManager: groupManager,
|
||||
nonceManager:
|
||||
NonceManager.init(conf.rlnRelayUserMessageLimit, conf.rlnEpochSizeSec.float),
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
||||
rlnMaxEpochGap:
|
||||
max(uint64(MaxClockGapSeconds / float64(conf.rlnEpochSizeSec)), 1),
|
||||
onFatalErrorAction: conf.onFatalErrorAction,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
proc isReady*(rlnPeer: WakuRLNRelay): Future[bool] {.async: (raises: [Exception]).} =
|
||||
## returns true if the rln-relay protocol is ready to relay messages
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user