mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-07 16:33:08 +00:00
feat: deprecate sync / local merkle tree (#3312)
This commit is contained in:
parent
cc66c7fe78
commit
d86babac3a
2
Makefile
2
Makefile
@ -165,7 +165,7 @@ nimbus-build-system-nimble-dir:
|
|||||||
.PHONY: librln
|
.PHONY: librln
|
||||||
|
|
||||||
LIBRLN_BUILDDIR := $(CURDIR)/vendor/zerokit
|
LIBRLN_BUILDDIR := $(CURDIR)/vendor/zerokit
|
||||||
LIBRLN_VERSION := v0.5.1
|
LIBRLN_VERSION := v0.7.0
|
||||||
|
|
||||||
ifeq ($(detected_OS),Windows)
|
ifeq ($(detected_OS),Windows)
|
||||||
LIBRLN_FILE := rln.lib
|
LIBRLN_FILE := rln.lib
|
||||||
|
|||||||
@ -3,7 +3,7 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[options, sequtils, deques],
|
std/[options, sequtils, deques, random],
|
||||||
results,
|
results,
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
testutils/unittests,
|
testutils/unittests,
|
||||||
@ -13,7 +13,8 @@ import
|
|||||||
web3,
|
web3,
|
||||||
libp2p/crypto/crypto,
|
libp2p/crypto/crypto,
|
||||||
eth/keys,
|
eth/keys,
|
||||||
tests/testlib/testasync
|
tests/testlib/testasync,
|
||||||
|
tests/testlib/testutils
|
||||||
|
|
||||||
import
|
import
|
||||||
waku/[
|
waku/[
|
||||||
@ -47,7 +48,6 @@ suite "Onchain group manager":
|
|||||||
manager.ethRpc.isSome()
|
manager.ethRpc.isSome()
|
||||||
manager.wakuRlnContract.isSome()
|
manager.wakuRlnContract.isSome()
|
||||||
manager.initialized
|
manager.initialized
|
||||||
manager.rlnContractDeployedBlockNumber > 0.Quantity
|
|
||||||
manager.rlnRelayMaxMessageLimit == 100
|
manager.rlnRelayMaxMessageLimit == 100
|
||||||
|
|
||||||
asyncTest "should error on initialization when chainId does not match":
|
asyncTest "should error on initialization when chainId does not match":
|
||||||
@ -97,18 +97,13 @@ suite "Onchain group manager":
|
|||||||
echo "---"
|
echo "---"
|
||||||
|
|
||||||
asyncTest "should error if contract does not exist":
|
asyncTest "should error if contract does not exist":
|
||||||
var triggeredError = false
|
|
||||||
|
|
||||||
manager.ethContractAddress = "0x0000000000000000000000000000000000000000"
|
manager.ethContractAddress = "0x0000000000000000000000000000000000000000"
|
||||||
manager.onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} =
|
|
||||||
echo "---"
|
|
||||||
discard
|
|
||||||
"Failed to get the deployed block number. Have you set the correct contract address?: No response from the Web3 provider"
|
|
||||||
echo msg
|
|
||||||
echo "---"
|
|
||||||
triggeredError = true
|
|
||||||
|
|
||||||
discard await manager.init()
|
var triggeredError = false
|
||||||
|
try:
|
||||||
|
discard await manager.init()
|
||||||
|
except CatchableError:
|
||||||
|
triggeredError = true
|
||||||
|
|
||||||
check triggeredError
|
check triggeredError
|
||||||
|
|
||||||
@ -119,103 +114,71 @@ suite "Onchain group manager":
|
|||||||
(await manager.init()).isErrOr:
|
(await manager.init()).isErrOr:
|
||||||
raiseAssert "Expected error when keystore file doesn't exist"
|
raiseAssert "Expected error when keystore file doesn't exist"
|
||||||
|
|
||||||
asyncTest "startGroupSync: should start group sync":
|
asyncTest "trackRootChanges: start tracking roots":
|
||||||
(await manager.init()).isOkOr:
|
(await manager.init()).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
(await manager.startGroupSync()).isOkOr:
|
discard manager.trackRootChanges()
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
asyncTest "startGroupSync: should guard against uninitialized state":
|
asyncTest "trackRootChanges: should guard against uninitialized state":
|
||||||
(await manager.startGroupSync()).isErrOr:
|
try:
|
||||||
raiseAssert "Expected error when not initialized"
|
discard manager.trackRootChanges()
|
||||||
|
except CatchableError:
|
||||||
|
check getCurrentExceptionMsg().len == 38
|
||||||
|
|
||||||
asyncTest "startGroupSync: should sync to the state of the group":
|
asyncTest "trackRootChanges: should sync to the state of the group":
|
||||||
let credentials = generateCredentials(manager.rlnInstance)
|
let credentials = generateCredentials(manager.rlnInstance)
|
||||||
let rateCommitment = getRateCommitment(credentials, UserMessageLimit(1)).valueOr:
|
|
||||||
raiseAssert $error
|
|
||||||
(await manager.init()).isOkOr:
|
(await manager.init()).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr:
|
let merkleRootBefore = manager.fetchMerkleRoot()
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
let fut = newFuture[void]("startGroupSync")
|
|
||||||
|
|
||||||
proc generateCallback(fut: Future[void]): OnRegisterCallback =
|
|
||||||
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
|
||||||
check:
|
|
||||||
registrations.len == 1
|
|
||||||
registrations[0].index == 0
|
|
||||||
registrations[0].rateCommitment == rateCommitment
|
|
||||||
fut.complete()
|
|
||||||
|
|
||||||
return callback
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
manager.onRegister(generateCallback(fut))
|
|
||||||
await manager.register(credentials, UserMessageLimit(1))
|
await manager.register(credentials, UserMessageLimit(1))
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
except Exception, CatchableError:
|
except Exception, CatchableError:
|
||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
await fut
|
discard await withTimeout(trackRootChanges(manager), 15.seconds)
|
||||||
|
|
||||||
let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr:
|
let merkleRootAfter = manager.fetchMerkleRoot()
|
||||||
|
|
||||||
|
let metadataSetRes = manager.setMetadata()
|
||||||
|
assert metadataSetRes.isOk(), metadataSetRes.error
|
||||||
|
|
||||||
|
let metadataOpt = getMetadata(manager.rlnInstance).valueOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
let metadataOpt = manager.rlnInstance.getMetadata().valueOr:
|
assert metadataOpt.isSome(), "metadata is not set"
|
||||||
raiseAssert $error
|
let metadata = metadataOpt.get()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
metadataOpt.get().validRoots == manager.validRoots.toSeq()
|
metadata.validRoots == manager.validRoots.toSeq()
|
||||||
merkleRootBefore != merkleRootAfter
|
merkleRootBefore != merkleRootAfter
|
||||||
|
|
||||||
asyncTest "startGroupSync: should fetch history correctly":
|
asyncTest "trackRootChanges: should fetch history correctly":
|
||||||
|
# TODO: We can't use `trackRootChanges()` directly in this test because its current implementation
|
||||||
|
# relies on a busy loop rather than event-based monitoring. As a result, some root changes
|
||||||
|
# may be missed, leading to inconsistent test results (i.e., it may randomly return true or false).
|
||||||
|
# To ensure reliability, we use the `updateRoots()` function to validate the `validRoots` window
|
||||||
|
# after each registration.
|
||||||
const credentialCount = 6
|
const credentialCount = 6
|
||||||
let credentials = generateCredentials(manager.rlnInstance, credentialCount)
|
let credentials = generateCredentials(manager.rlnInstance, credentialCount)
|
||||||
(await manager.init()).isOkOr:
|
(await manager.init()).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr:
|
let merkleRootBefore = manager.fetchMerkleRoot()
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
type TestGroupSyncFuts = array[0 .. credentialCount - 1, Future[void]]
|
|
||||||
var futures: TestGroupSyncFuts
|
|
||||||
for i in 0 ..< futures.len():
|
|
||||||
futures[i] = newFuture[void]()
|
|
||||||
proc generateCallback(
|
|
||||||
futs: TestGroupSyncFuts, credentials: seq[IdentityCredential]
|
|
||||||
): OnRegisterCallback =
|
|
||||||
var futureIndex = 0
|
|
||||||
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
|
||||||
let rateCommitment =
|
|
||||||
getRateCommitment(credentials[futureIndex], UserMessageLimit(1))
|
|
||||||
if registrations.len == 1 and
|
|
||||||
registrations[0].rateCommitment == rateCommitment.get() and
|
|
||||||
registrations[0].index == MembershipIndex(futureIndex):
|
|
||||||
futs[futureIndex].complete()
|
|
||||||
futureIndex += 1
|
|
||||||
|
|
||||||
return callback
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
manager.onRegister(generateCallback(futures, credentials))
|
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
for i in 0 ..< credentials.len():
|
for i in 0 ..< credentials.len():
|
||||||
await manager.register(credentials[i], UserMessageLimit(1))
|
await manager.register(credentials[i], UserMessageLimit(1))
|
||||||
|
discard await manager.updateRoots()
|
||||||
except Exception, CatchableError:
|
except Exception, CatchableError:
|
||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
await allFutures(futures)
|
let merkleRootAfter = manager.fetchMerkleRoot()
|
||||||
|
|
||||||
let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
merkleRootBefore != merkleRootAfter
|
merkleRootBefore != merkleRootAfter
|
||||||
manager.validRootBuffer.len() == credentialCount - AcceptableRootWindowSize
|
manager.validRoots.len() == credentialCount
|
||||||
|
|
||||||
asyncTest "register: should guard against uninitialized state":
|
asyncTest "register: should guard against uninitialized state":
|
||||||
let dummyCommitment = default(IDCommitment)
|
let dummyCommitment = default(IDCommitment)
|
||||||
@ -232,14 +195,12 @@ suite "Onchain group manager":
|
|||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
asyncTest "register: should register successfully":
|
asyncTest "register: should register successfully":
|
||||||
|
# TODO :- similar to ```trackRootChanges: should fetch history correctly```
|
||||||
(await manager.init()).isOkOr:
|
(await manager.init()).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
let idCommitment = generateCredentials(manager.rlnInstance).idCommitment
|
let idCommitment = generateCredentials(manager.rlnInstance).idCommitment
|
||||||
let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr:
|
let merkleRootBefore = manager.fetchMerkleRoot()
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await manager.register(
|
await manager.register(
|
||||||
@ -251,10 +212,10 @@ suite "Onchain group manager":
|
|||||||
assert false,
|
assert false,
|
||||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr:
|
let merkleRootAfter = manager.fetchMerkleRoot()
|
||||||
raiseAssert $error
|
|
||||||
check:
|
check:
|
||||||
merkleRootAfter.inHex() != merkleRootBefore.inHex()
|
merkleRootAfter != merkleRootBefore
|
||||||
manager.latestIndex == 1
|
manager.latestIndex == 1
|
||||||
|
|
||||||
asyncTest "register: callback is called":
|
asyncTest "register: callback is called":
|
||||||
@ -264,19 +225,19 @@ suite "Onchain group manager":
|
|||||||
let fut = newFuture[void]()
|
let fut = newFuture[void]()
|
||||||
|
|
||||||
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||||
let rateCommitment = getRateCommitment(idCredentials, UserMessageLimit(1))
|
let rateCommitment = getRateCommitment(idCredentials, UserMessageLimit(1)).get()
|
||||||
check:
|
check:
|
||||||
registrations.len == 1
|
registrations.len == 1
|
||||||
registrations[0].rateCommitment == rateCommitment.get()
|
registrations[0].rateCommitment == rateCommitment
|
||||||
registrations[0].index == 0
|
registrations[0].index == 0
|
||||||
fut.complete()
|
fut.complete()
|
||||||
|
|
||||||
manager.onRegister(callback)
|
|
||||||
(await manager.init()).isOkOr:
|
(await manager.init()).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
|
manager.onRegister(callback)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
await manager.register(
|
await manager.register(
|
||||||
RateCommitment(
|
RateCommitment(
|
||||||
idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
|
idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
|
||||||
@ -298,38 +259,43 @@ suite "Onchain group manager":
|
|||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
asyncTest "validateRoot: should validate good root":
|
asyncTest "validateRoot: should validate good root":
|
||||||
let credentials = generateCredentials(manager.rlnInstance)
|
let idCredentials = generateCredentials(manager.rlnInstance)
|
||||||
(await manager.init()).isOkOr:
|
let idCommitment = idCredentials.idCommitment
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
let fut = newFuture[void]()
|
let fut = newFuture[void]()
|
||||||
|
|
||||||
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||||
if registrations.len == 1 and
|
if registrations.len == 1 and
|
||||||
registrations[0].rateCommitment ==
|
registrations[0].rateCommitment ==
|
||||||
getRateCommitment(credentials, UserMessageLimit(1)).get() and
|
getRateCommitment(idCredentials, UserMessageLimit(1)).get() and
|
||||||
registrations[0].index == 0:
|
registrations[0].index == 0:
|
||||||
manager.idCredentials = some(credentials)
|
manager.idCredentials = some(idCredentials)
|
||||||
fut.complete()
|
fut.complete()
|
||||||
|
|
||||||
manager.onRegister(callback)
|
manager.onRegister(callback)
|
||||||
|
|
||||||
|
(await manager.init()).isOkOr:
|
||||||
|
raiseAssert $error
|
||||||
|
|
||||||
try:
|
try:
|
||||||
(await manager.startGroupSync()).isOkOr:
|
await manager.register(idCredentials, UserMessageLimit(1))
|
||||||
raiseAssert $error
|
|
||||||
await manager.register(credentials, UserMessageLimit(1))
|
|
||||||
except Exception, CatchableError:
|
except Exception, CatchableError:
|
||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
await fut
|
await fut
|
||||||
|
|
||||||
|
let rootUpdated = await manager.updateRoots()
|
||||||
|
|
||||||
|
if rootUpdated:
|
||||||
|
let proofResult = await manager.fetchMerkleProofElements()
|
||||||
|
if proofResult.isErr():
|
||||||
|
error "Failed to fetch Merkle proof", error = proofResult.error
|
||||||
|
manager.merkleProofCache = proofResult.get()
|
||||||
let messageBytes = "Hello".toBytes()
|
let messageBytes = "Hello".toBytes()
|
||||||
|
|
||||||
# prepare the epoch
|
|
||||||
let epoch = default(Epoch)
|
let epoch = default(Epoch)
|
||||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||||
|
|
||||||
# generate proof
|
|
||||||
let validProofRes = manager.generateProof(
|
let validProofRes = manager.generateProof(
|
||||||
data = messageBytes, epoch = epoch, messageId = MessageId(1)
|
data = messageBytes, epoch = epoch, messageId = MessageId(1)
|
||||||
)
|
)
|
||||||
@ -338,38 +304,39 @@ suite "Onchain group manager":
|
|||||||
validProofRes.isOk()
|
validProofRes.isOk()
|
||||||
let validProof = validProofRes.get()
|
let validProof = validProofRes.get()
|
||||||
|
|
||||||
# validate the root (should be true)
|
|
||||||
let validated = manager.validateRoot(validProof.merkleRoot)
|
let validated = manager.validateRoot(validProof.merkleRoot)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
validated
|
validated
|
||||||
|
|
||||||
asyncTest "validateRoot: should reject bad root":
|
asyncTest "validateRoot: should reject bad root":
|
||||||
|
let idCredentials = generateCredentials(manager.rlnInstance)
|
||||||
|
let idCommitment = idCredentials.idCommitment
|
||||||
|
|
||||||
(await manager.init()).isOkOr:
|
(await manager.init()).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
let credentials = generateCredentials(manager.rlnInstance)
|
|
||||||
|
|
||||||
## Assume the registration occured out of band
|
|
||||||
manager.idCredentials = some(credentials)
|
|
||||||
manager.membershipIndex = some(MembershipIndex(0))
|
|
||||||
manager.userMessageLimit = some(UserMessageLimit(1))
|
manager.userMessageLimit = some(UserMessageLimit(1))
|
||||||
|
manager.membershipIndex = some(MembershipIndex(0))
|
||||||
|
manager.idCredentials = some(idCredentials)
|
||||||
|
|
||||||
|
manager.merkleProofCache = newSeq[byte](640)
|
||||||
|
for i in 0 ..< 640:
|
||||||
|
manager.merkleProofCache[i] = byte(rand(255))
|
||||||
|
|
||||||
let messageBytes = "Hello".toBytes()
|
let messageBytes = "Hello".toBytes()
|
||||||
|
|
||||||
# prepare the epoch
|
|
||||||
let epoch = default(Epoch)
|
let epoch = default(Epoch)
|
||||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||||
|
|
||||||
# generate proof
|
let validProofRes = manager.generateProof(
|
||||||
let validProof = manager.generateProof(
|
data = messageBytes, epoch = epoch, messageId = MessageId(1)
|
||||||
data = messageBytes, epoch = epoch, messageId = MessageId(0)
|
)
|
||||||
).valueOr:
|
|
||||||
raiseAssert $error
|
check:
|
||||||
|
validProofRes.isOk()
|
||||||
|
let validProof = validProofRes.get()
|
||||||
|
|
||||||
# validate the root (should be false)
|
|
||||||
let validated = manager.validateRoot(validProof.merkleRoot)
|
let validated = manager.validateRoot(validProof.merkleRoot)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
@ -393,13 +360,19 @@ suite "Onchain group manager":
|
|||||||
manager.onRegister(callback)
|
manager.onRegister(callback)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
await manager.register(credentials, UserMessageLimit(1))
|
await manager.register(credentials, UserMessageLimit(1))
|
||||||
except Exception, CatchableError:
|
except Exception, CatchableError:
|
||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||||
await fut
|
await fut
|
||||||
|
|
||||||
|
let rootUpdated = await manager.updateRoots()
|
||||||
|
|
||||||
|
if rootUpdated:
|
||||||
|
let proofResult = await manager.fetchMerkleProofElements()
|
||||||
|
if proofResult.isErr():
|
||||||
|
error "Failed to fetch Merkle proof", error = proofResult.error
|
||||||
|
manager.merkleProofCache = proofResult.get()
|
||||||
|
|
||||||
let messageBytes = "Hello".toBytes()
|
let messageBytes = "Hello".toBytes()
|
||||||
|
|
||||||
# prepare the epoch
|
# prepare the epoch
|
||||||
@ -412,7 +385,6 @@ suite "Onchain group manager":
|
|||||||
).valueOr:
|
).valueOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
# verify the proof (should be true)
|
|
||||||
let verified = manager.verifyProof(messageBytes, validProof).valueOr:
|
let verified = manager.verifyProof(messageBytes, validProof).valueOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
|
|
||||||
@ -422,31 +394,23 @@ suite "Onchain group manager":
|
|||||||
asyncTest "verifyProof: should reject invalid proof":
|
asyncTest "verifyProof: should reject invalid proof":
|
||||||
(await manager.init()).isOkOr:
|
(await manager.init()).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
let idCredential = generateCredentials(manager.rlnInstance)
|
let idCredential = generateCredentials(manager.rlnInstance)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await manager.register(
|
await manager.register(idCredential, UserMessageLimit(1))
|
||||||
RateCommitment(
|
|
||||||
idCommitment: idCredential.idCommitment, userMessageLimit: UserMessageLimit(1)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception, CatchableError:
|
except Exception, CatchableError:
|
||||||
assert false,
|
assert false,
|
||||||
"exception raised when calling startGroupSync: " & getCurrentExceptionMsg()
|
"exception raised when calling startGroupSync: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
let idCredential2 = generateCredentials(manager.rlnInstance)
|
|
||||||
|
|
||||||
## Assume the registration occured out of band
|
|
||||||
manager.idCredentials = some(idCredential2)
|
|
||||||
manager.membershipIndex = some(MembershipIndex(0))
|
|
||||||
manager.userMessageLimit = some(UserMessageLimit(1))
|
|
||||||
|
|
||||||
let messageBytes = "Hello".toBytes()
|
let messageBytes = "Hello".toBytes()
|
||||||
|
|
||||||
# prepare the epoch
|
let rootUpdated = await manager.updateRoots()
|
||||||
|
|
||||||
|
manager.merkleProofCache = newSeq[byte](640)
|
||||||
|
for i in 0 ..< 640:
|
||||||
|
manager.merkleProofCache[i] = byte(rand(255))
|
||||||
|
|
||||||
let epoch = default(Epoch)
|
let epoch = default(Epoch)
|
||||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||||
|
|
||||||
@ -466,8 +430,8 @@ suite "Onchain group manager":
|
|||||||
check:
|
check:
|
||||||
verified == false
|
verified == false
|
||||||
|
|
||||||
asyncTest "backfillRootQueue: should backfill roots in event of chain reorg":
|
asyncTest "root queue should be updated correctly":
|
||||||
const credentialCount = 6
|
const credentialCount = 12
|
||||||
let credentials = generateCredentials(manager.rlnInstance, credentialCount)
|
let credentials = generateCredentials(manager.rlnInstance, credentialCount)
|
||||||
(await manager.init()).isOkOr:
|
(await manager.init()).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
@ -493,33 +457,17 @@ suite "Onchain group manager":
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
manager.onRegister(generateCallback(futures, credentials))
|
manager.onRegister(generateCallback(futures, credentials))
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
for i in 0 ..< credentials.len():
|
for i in 0 ..< credentials.len():
|
||||||
await manager.register(credentials[i], UserMessageLimit(1))
|
await manager.register(credentials[i], UserMessageLimit(1))
|
||||||
|
discard await manager.updateRoots()
|
||||||
except Exception, CatchableError:
|
except Exception, CatchableError:
|
||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||||
|
|
||||||
await allFutures(futures)
|
await allFutures(futures)
|
||||||
|
|
||||||
# At this point, we should have a full root queue, 5 roots, and partial buffer of 1 root
|
|
||||||
check:
|
check:
|
||||||
manager.validRoots.len() == credentialCount - 1
|
manager.validRoots.len() == credentialCount
|
||||||
manager.validRootBuffer.len() == 1
|
|
||||||
|
|
||||||
# We can now simulate a chain reorg by calling backfillRootQueue
|
|
||||||
let expectedLastRoot = manager.validRootBuffer[0]
|
|
||||||
try:
|
|
||||||
await manager.backfillRootQueue(1)
|
|
||||||
except Exception, CatchableError:
|
|
||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
|
||||||
|
|
||||||
# We should now have 5 roots in the queue, and no partial buffer
|
|
||||||
check:
|
|
||||||
manager.validRoots.len() == credentialCount - 1
|
|
||||||
manager.validRootBuffer.len() == 0
|
|
||||||
manager.validRoots[credentialCount - 2] == expectedLastRoot
|
|
||||||
|
|
||||||
asyncTest "isReady should return false if ethRpc is none":
|
asyncTest "isReady should return false if ethRpc is none":
|
||||||
(await manager.init()).isOkOr:
|
(await manager.init()).isOkOr:
|
||||||
@ -536,25 +484,9 @@ suite "Onchain group manager":
|
|||||||
check:
|
check:
|
||||||
isReady == false
|
isReady == false
|
||||||
|
|
||||||
asyncTest "isReady should return false if lastSeenBlockHead > lastProcessed":
|
|
||||||
(await manager.init()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
var isReady = true
|
|
||||||
try:
|
|
||||||
isReady = await manager.isReady()
|
|
||||||
except Exception, CatchableError:
|
|
||||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
|
||||||
|
|
||||||
check:
|
|
||||||
isReady == false
|
|
||||||
|
|
||||||
asyncTest "isReady should return true if ethRpc is ready":
|
asyncTest "isReady should return true if ethRpc is ready":
|
||||||
(await manager.init()).isOkOr:
|
(await manager.init()).isOkOr:
|
||||||
raiseAssert $error
|
raiseAssert $error
|
||||||
# node can only be ready after group sync is done
|
|
||||||
(await manager.startGroupSync()).isOkOr:
|
|
||||||
raiseAssert $error
|
|
||||||
|
|
||||||
var isReady = false
|
var isReady = false
|
||||||
try:
|
try:
|
||||||
|
|||||||
@ -529,7 +529,6 @@ procSuite "WakuNode - RLN relay":
|
|||||||
|
|
||||||
xasyncTest "clearNullifierLog: should clear epochs > MaxEpochGap":
|
xasyncTest "clearNullifierLog: should clear epochs > MaxEpochGap":
|
||||||
## This is skipped because is flaky and made CI randomly fail but is useful to run manually
|
## This is skipped because is flaky and made CI randomly fail but is useful to run manually
|
||||||
|
|
||||||
# Given two nodes
|
# Given two nodes
|
||||||
let
|
let
|
||||||
contentTopic = ContentTopic("/waku/2/default-content/proto")
|
contentTopic = ContentTopic("/waku/2/default-content/proto")
|
||||||
|
|||||||
2
vendor/zerokit
vendored
2
vendor/zerokit
vendored
@ -1 +1 @@
|
|||||||
Subproject commit b9d27039c3266af108882d7a8bafc37400d29855
|
Subproject commit ba467d370c56b7432522227de22fbd664d44ef3e
|
||||||
@ -27,9 +27,6 @@ proc inHex*(
|
|||||||
valueHex = "0" & valueHex
|
valueHex = "0" & valueHex
|
||||||
return toLowerAscii(valueHex)
|
return toLowerAscii(valueHex)
|
||||||
|
|
||||||
proc toUserMessageLimit*(v: UInt256): UserMessageLimit =
|
|
||||||
return cast[UserMessageLimit](v)
|
|
||||||
|
|
||||||
proc encodeLengthPrefix*(input: openArray[byte]): seq[byte] =
|
proc encodeLengthPrefix*(input: openArray[byte]): seq[byte] =
|
||||||
## returns length prefixed version of the input
|
## returns length prefixed version of the input
|
||||||
## with the following format [len<8>|input<var>]
|
## with the following format [len<8>|input<var>]
|
||||||
@ -78,6 +75,31 @@ proc serialize*(
|
|||||||
)
|
)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
proc serialize*(witness: RLNWitnessInput): seq[byte] =
|
||||||
|
## Serializes the RLN witness into a byte array following zerokit's expected format.
|
||||||
|
## The serialized format includes:
|
||||||
|
## - identity_secret (32 bytes, little-endian with zero padding)
|
||||||
|
## - user_message_limit (32 bytes, little-endian with zero padding)
|
||||||
|
## - message_id (32 bytes, little-endian with zero padding)
|
||||||
|
## - merkle tree depth (8 bytes, little-endian) = path_elements.len / 32
|
||||||
|
## - path_elements (each 32 bytes, ordered bottom-to-top)
|
||||||
|
## - merkle tree depth again (8 bytes, little-endian)
|
||||||
|
## - identity_path_index (sequence of bits as bytes, 0 = left, 1 = right)
|
||||||
|
## - x (32 bytes, little-endian with zero padding)
|
||||||
|
## - external_nullifier (32 bytes, little-endian with zero padding)
|
||||||
|
var buffer: seq[byte]
|
||||||
|
buffer.add(@(witness.identity_secret))
|
||||||
|
buffer.add(@(witness.user_message_limit))
|
||||||
|
buffer.add(@(witness.message_id))
|
||||||
|
buffer.add(toBytes(uint64(witness.path_elements.len / 32), Endianness.littleEndian))
|
||||||
|
for element in witness.path_elements:
|
||||||
|
buffer.add(element)
|
||||||
|
buffer.add(toBytes(uint64(witness.path_elements.len / 32), Endianness.littleEndian))
|
||||||
|
buffer.add(witness.identity_path_index)
|
||||||
|
buffer.add(@(witness.x))
|
||||||
|
buffer.add(@(witness.external_nullifier))
|
||||||
|
return buffer
|
||||||
|
|
||||||
proc serialize*(proof: RateLimitProof, data: openArray[byte]): seq[byte] =
|
proc serialize*(proof: RateLimitProof, data: openArray[byte]): seq[byte] =
|
||||||
## a private proc to convert RateLimitProof and data to a byte seq
|
## a private proc to convert RateLimitProof and data to a byte seq
|
||||||
## this conversion is used in the proof verification proc
|
## this conversion is used in the proof verification proc
|
||||||
@ -133,3 +155,25 @@ func `+`*(a, b: Quantity): Quantity {.borrow.}
|
|||||||
|
|
||||||
func u256*(n: Quantity): UInt256 {.inline.} =
|
func u256*(n: Quantity): UInt256 {.inline.} =
|
||||||
n.uint64.stuint(256)
|
n.uint64.stuint(256)
|
||||||
|
|
||||||
|
proc uint64ToField*(n: uint64): array[32, byte] =
|
||||||
|
var output: array[32, byte]
|
||||||
|
let bytes = toBytes(n, Endianness.littleEndian)
|
||||||
|
output[0 ..< bytes.len] = bytes
|
||||||
|
return output
|
||||||
|
|
||||||
|
proc UInt256ToField*(v: UInt256): array[32, byte] =
|
||||||
|
return cast[array[32, byte]](v) # already doesn't use `result`
|
||||||
|
|
||||||
|
proc seqToField*(s: seq[byte]): array[32, byte] =
|
||||||
|
var output: array[32, byte]
|
||||||
|
let len = min(s.len, 32)
|
||||||
|
for i in 0 ..< len:
|
||||||
|
output[i] = s[i]
|
||||||
|
return output
|
||||||
|
|
||||||
|
proc uint64ToIndex*(index: MembershipIndex, depth: int): seq[byte] =
|
||||||
|
var output = newSeq[byte](depth)
|
||||||
|
for i in 0 ..< depth:
|
||||||
|
output[i] = byte((index shr i) and 1) # LSB-first bit decomposition
|
||||||
|
return output
|
||||||
|
|||||||
@ -145,7 +145,6 @@ method validateRoot*(
|
|||||||
g: GroupManager, root: MerkleNode
|
g: GroupManager, root: MerkleNode
|
||||||
): bool {.base, gcsafe, raises: [].} =
|
): bool {.base, gcsafe, raises: [].} =
|
||||||
## validates the root against the valid roots queue
|
## validates the root against the valid roots queue
|
||||||
# Check if the root is in the valid roots queue
|
|
||||||
if g.indexOfRoot(root) >= 0:
|
if g.indexOfRoot(root) >= 0:
|
||||||
return true
|
return true
|
||||||
return false
|
return false
|
||||||
@ -175,7 +174,7 @@ method verifyProof*(
|
|||||||
|
|
||||||
method generateProof*(
|
method generateProof*(
|
||||||
g: GroupManager,
|
g: GroupManager,
|
||||||
data: openArray[byte],
|
data: seq[byte],
|
||||||
epoch: Epoch,
|
epoch: Epoch,
|
||||||
messageId: MessageId,
|
messageId: MessageId,
|
||||||
rlnIdentifier = DefaultRlnIdentifier,
|
rlnIdentifier = DefaultRlnIdentifier,
|
||||||
@ -189,6 +188,7 @@ method generateProof*(
|
|||||||
return err("membership index is not set")
|
return err("membership index is not set")
|
||||||
if g.userMessageLimit.isNone():
|
if g.userMessageLimit.isNone():
|
||||||
return err("user message limit is not set")
|
return err("user message limit is not set")
|
||||||
|
|
||||||
waku_rln_proof_generation_duration_seconds.nanosecondTime:
|
waku_rln_proof_generation_duration_seconds.nanosecondTime:
|
||||||
let proof = proofGen(
|
let proof = proofGen(
|
||||||
rlnInstance = g.rlnInstance,
|
rlnInstance = g.rlnInstance,
|
||||||
@ -201,8 +201,6 @@ method generateProof*(
|
|||||||
).valueOr:
|
).valueOr:
|
||||||
return err("proof generation failed: " & $error)
|
return err("proof generation failed: " & $error)
|
||||||
|
|
||||||
waku_rln_remaining_proofs_per_epoch.dec()
|
|
||||||
waku_rln_total_generated_proofs.inc()
|
|
||||||
return ok(proof)
|
return ok(proof)
|
||||||
|
|
||||||
method isReady*(g: GroupManager): Future[bool] {.base, async.} =
|
method isReady*(g: GroupManager): Future[bool] {.base, async.} =
|
||||||
|
|||||||
@ -10,19 +10,18 @@ import
|
|||||||
nimcrypto/keccak as keccak,
|
nimcrypto/keccak as keccak,
|
||||||
stint,
|
stint,
|
||||||
json,
|
json,
|
||||||
std/tables,
|
std/[strutils, tables, algorithm],
|
||||||
stew/[byteutils, arrayops],
|
stew/[byteutils, arrayops],
|
||||||
sequtils,
|
sequtils
|
||||||
strutils
|
|
||||||
import
|
import
|
||||||
../../../waku_keystore,
|
../../../waku_keystore,
|
||||||
../../rln,
|
../../rln,
|
||||||
|
../../rln/rln_interface,
|
||||||
../../conversion_utils,
|
../../conversion_utils,
|
||||||
../group_manager_base,
|
../group_manager_base,
|
||||||
./retry_wrapper
|
./retry_wrapper
|
||||||
|
|
||||||
from strutils import parseHexInt
|
|
||||||
|
|
||||||
export group_manager_base
|
export group_manager_base
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
@ -31,19 +30,23 @@ logScope:
|
|||||||
# using the when predicate does not work within the contract macro, hence need to dupe
|
# using the when predicate does not work within the contract macro, hence need to dupe
|
||||||
contract(WakuRlnContract):
|
contract(WakuRlnContract):
|
||||||
# this serves as an entrypoint into the rln membership set
|
# this serves as an entrypoint into the rln membership set
|
||||||
proc register(idCommitment: UInt256, userMessageLimit: EthereumUInt32)
|
proc register(idCommitment: UInt256, userMessageLimit: UInt32)
|
||||||
# Initializes the implementation contract (only used in unit tests)
|
# Initializes the implementation contract (only used in unit tests)
|
||||||
proc initialize(maxMessageLimit: UInt256)
|
proc initialize(maxMessageLimit: UInt256)
|
||||||
# this event is raised when a new member is registered
|
# this event is raised when a new member is registered
|
||||||
proc MemberRegistered(rateCommitment: UInt256, index: EthereumUInt32) {.event.}
|
proc MemberRegistered(rateCommitment: UInt256, index: UInt32) {.event.}
|
||||||
# this function denotes existence of a given user
|
# this function denotes existence of a given user
|
||||||
proc memberExists(idCommitment: Uint256): UInt256 {.view.}
|
proc memberExists(idCommitment: UInt256): UInt256 {.view.}
|
||||||
# this constant describes the next index of a new member
|
# this constant describes the next index of a new member
|
||||||
proc commitmentIndex(): UInt256 {.view.}
|
proc commitmentIndex(): UInt256 {.view.}
|
||||||
# this constant describes the block number this contract was deployed on
|
# this constant describes the block number this contract was deployed on
|
||||||
proc deployedBlockNumber(): UInt256 {.view.}
|
proc deployedBlockNumber(): UInt256 {.view.}
|
||||||
# this constant describes max message limit of rln contract
|
# this constant describes max message limit of rln contract
|
||||||
proc MAX_MESSAGE_LIMIT(): UInt256 {.view.}
|
proc MAX_MESSAGE_LIMIT(): UInt256 {.view.}
|
||||||
|
# this function returns the merkleProof for a given index
|
||||||
|
# proc merkleProofElements(index: UInt40): seq[byte] {.view.}
|
||||||
|
# this function returns the merkle root
|
||||||
|
proc root(): UInt256 {.view.}
|
||||||
|
|
||||||
type
|
type
|
||||||
WakuRlnContractWithSender = Sender[WakuRlnContract]
|
WakuRlnContractWithSender = Sender[WakuRlnContract]
|
||||||
@ -52,42 +55,14 @@ type
|
|||||||
ethPrivateKey*: Option[string]
|
ethPrivateKey*: Option[string]
|
||||||
ethContractAddress*: string
|
ethContractAddress*: string
|
||||||
ethRpc*: Option[Web3]
|
ethRpc*: Option[Web3]
|
||||||
rlnContractDeployedBlockNumber*: BlockNumber
|
|
||||||
wakuRlnContract*: Option[WakuRlnContractWithSender]
|
wakuRlnContract*: Option[WakuRlnContractWithSender]
|
||||||
latestProcessedBlock*: BlockNumber
|
|
||||||
registrationTxHash*: Option[TxHash]
|
registrationTxHash*: Option[TxHash]
|
||||||
chainId*: uint
|
chainId*: uint
|
||||||
keystorePath*: Option[string]
|
keystorePath*: Option[string]
|
||||||
keystorePassword*: Option[string]
|
keystorePassword*: Option[string]
|
||||||
registrationHandler*: Option[RegistrationHandler]
|
registrationHandler*: Option[RegistrationHandler]
|
||||||
# this buffer exists to backfill appropriate roots for the merkle tree,
|
latestProcessedBlock*: BlockNumber
|
||||||
# in event of a reorg. we store 5 in the buffer. Maybe need to revisit this,
|
merkleProofCache*: seq[byte]
|
||||||
# because the average reorg depth is 1 to 2 blocks.
|
|
||||||
validRootBuffer*: Deque[MerkleNode]
|
|
||||||
# interval loop to shut down gracefully
|
|
||||||
blockFetchingActive*: bool
|
|
||||||
|
|
||||||
const DefaultKeyStorePath* = "rlnKeystore.json"
|
|
||||||
const DefaultKeyStorePassword* = "password"
|
|
||||||
|
|
||||||
const DefaultBlockPollRate* = 6.seconds
|
|
||||||
|
|
||||||
template initializedGuard(g: OnchainGroupManager): untyped =
|
|
||||||
if not g.initialized:
|
|
||||||
raise newException(CatchableError, "OnchainGroupManager is not initialized")
|
|
||||||
|
|
||||||
proc resultifiedInitGuard(g: OnchainGroupManager): GroupManagerResult[void] =
|
|
||||||
try:
|
|
||||||
initializedGuard(g)
|
|
||||||
return ok()
|
|
||||||
except CatchableError:
|
|
||||||
return err("OnchainGroupManager is not initialized")
|
|
||||||
|
|
||||||
template retryWrapper(
|
|
||||||
g: OnchainGroupManager, res: auto, errStr: string, body: untyped
|
|
||||||
): auto =
|
|
||||||
retryWrapper(res, RetryStrategy.new(), errStr, g.onFatalErrorAction):
|
|
||||||
body
|
|
||||||
|
|
||||||
proc setMetadata*(
|
proc setMetadata*(
|
||||||
g: OnchainGroupManager, lastProcessedBlock = none(BlockNumber)
|
g: OnchainGroupManager, lastProcessedBlock = none(BlockNumber)
|
||||||
@ -112,33 +87,109 @@ proc setMetadata*(
|
|||||||
return err("failed to persist rln metadata: " & getCurrentExceptionMsg())
|
return err("failed to persist rln metadata: " & getCurrentExceptionMsg())
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
method atomicBatch*(
|
proc fetchMerkleProofElements*(
|
||||||
g: OnchainGroupManager,
|
g: OnchainGroupManager
|
||||||
start: MembershipIndex,
|
): Future[Result[seq[byte], string]] {.async.} =
|
||||||
rateCommitments = newSeq[RawRateCommitment](),
|
try:
|
||||||
toRemoveIndices = newSeq[MembershipIndex](),
|
let membershipIndex = g.membershipIndex.get()
|
||||||
): Future[void] {.async: (raises: [Exception]), base.} =
|
let index40 = stuint(membershipIndex, 40)
|
||||||
initializedGuard(g)
|
|
||||||
|
|
||||||
waku_rln_membership_insertion_duration_seconds.nanosecondTime:
|
let methodSig = "merkleProofElements(uint40)"
|
||||||
let operationSuccess =
|
let methodIdDigest = keccak.keccak256.digest(methodSig)
|
||||||
g.rlnInstance.atomicWrite(some(start), rateCommitments, toRemoveIndices)
|
let methodId = methodIdDigest.data[0 .. 3]
|
||||||
if not operationSuccess:
|
|
||||||
raise newException(CatchableError, "atomic batch operation failed")
|
|
||||||
# TODO: when slashing is enabled, we need to track slashed members
|
|
||||||
waku_rln_number_registered_memberships.set(int64(g.rlnInstance.leavesSet()))
|
|
||||||
|
|
||||||
if g.registerCb.isSome():
|
var paddedParam = newSeq[byte](32)
|
||||||
var membersSeq = newSeq[Membership]()
|
let indexBytes = index40.toBytesBE()
|
||||||
for i in 0 ..< rateCommitments.len:
|
for i in 0 ..< min(indexBytes.len, paddedParam.len):
|
||||||
var index = start + MembershipIndex(i)
|
paddedParam[paddedParam.len - indexBytes.len + i] = indexBytes[i]
|
||||||
debug "registering member to callback",
|
|
||||||
rateCommitment = rateCommitments[i], index = index
|
|
||||||
let member = Membership(rateCommitment: rateCommitments[i], index: index)
|
|
||||||
membersSeq.add(member)
|
|
||||||
await g.registerCb.get()(membersSeq)
|
|
||||||
|
|
||||||
g.validRootBuffer = g.slideRootQueue()
|
var callData = newSeq[byte]()
|
||||||
|
for b in methodId:
|
||||||
|
callData.add(b)
|
||||||
|
callData.add(paddedParam)
|
||||||
|
|
||||||
|
var tx: TransactionArgs
|
||||||
|
tx.to = Opt.some(fromHex(Address, g.ethContractAddress))
|
||||||
|
tx.data = Opt.some(callData)
|
||||||
|
|
||||||
|
let responseBytes = await g.ethRpc.get().provider.eth_call(tx, "latest")
|
||||||
|
|
||||||
|
return ok(responseBytes)
|
||||||
|
except CatchableError:
|
||||||
|
error "Failed to fetch Merkle proof elements", error = getCurrentExceptionMsg()
|
||||||
|
return err("Failed to fetch merkle proof elements: " & getCurrentExceptionMsg())
|
||||||
|
|
||||||
|
proc fetchMerkleRoot*(
|
||||||
|
g: OnchainGroupManager
|
||||||
|
): Future[Result[UInt256, string]] {.async.} =
|
||||||
|
try:
|
||||||
|
let merkleRootInvocation = g.wakuRlnContract.get().root()
|
||||||
|
let merkleRoot = await merkleRootInvocation.call()
|
||||||
|
return ok(merkleRoot)
|
||||||
|
except CatchableError:
|
||||||
|
error "Failed to fetch Merkle root", error = getCurrentExceptionMsg()
|
||||||
|
return err("Failed to fetch merkle root: " & getCurrentExceptionMsg())
|
||||||
|
|
||||||
|
template initializedGuard(g: OnchainGroupManager): untyped =
|
||||||
|
if not g.initialized:
|
||||||
|
raise newException(CatchableError, "OnchainGroupManager is not initialized")
|
||||||
|
|
||||||
|
template retryWrapper(
|
||||||
|
g: OnchainGroupManager, res: auto, errStr: string, body: untyped
|
||||||
|
): auto =
|
||||||
|
retryWrapper(res, RetryStrategy.new(), errStr, g.onFatalErrorAction):
|
||||||
|
body
|
||||||
|
|
||||||
|
method validateRoot*(g: OnchainGroupManager, root: MerkleNode): bool =
|
||||||
|
if g.validRoots.find(root) >= 0:
|
||||||
|
return true
|
||||||
|
return false
|
||||||
|
|
||||||
|
proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} =
|
||||||
|
let rootRes = await g.fetchMerkleRoot()
|
||||||
|
if rootRes.isErr():
|
||||||
|
return false
|
||||||
|
|
||||||
|
let merkleRoot = UInt256ToField(rootRes.get())
|
||||||
|
if g.validRoots.len == 0:
|
||||||
|
g.validRoots.addLast(merkleRoot)
|
||||||
|
return true
|
||||||
|
|
||||||
|
if g.validRoots[g.validRoots.len - 1] != merkleRoot:
|
||||||
|
if g.validRoots.len > AcceptableRootWindowSize:
|
||||||
|
discard g.validRoots.popFirst()
|
||||||
|
g.validRoots.addLast(merkleRoot)
|
||||||
|
return true
|
||||||
|
|
||||||
|
return false
|
||||||
|
|
||||||
|
proc trackRootChanges*(g: OnchainGroupManager) {.async: (raises: [CatchableError]).} =
|
||||||
|
try:
|
||||||
|
initializedGuard(g)
|
||||||
|
let ethRpc = g.ethRpc.get()
|
||||||
|
let wakuRlnContract = g.wakuRlnContract.get()
|
||||||
|
|
||||||
|
const rpcDelay = 5.seconds
|
||||||
|
|
||||||
|
while true:
|
||||||
|
let rootUpdated = await g.updateRoots()
|
||||||
|
|
||||||
|
if rootUpdated:
|
||||||
|
if g.membershipIndex.isNone():
|
||||||
|
error "membershipIndex is not set; skipping proof update"
|
||||||
|
else:
|
||||||
|
let proofResult = await g.fetchMerkleProofElements()
|
||||||
|
if proofResult.isErr():
|
||||||
|
error "Failed to fetch Merkle proof", error = proofResult.error
|
||||||
|
g.merkleProofCache = proofResult.get()
|
||||||
|
|
||||||
|
# also need update registerd membership
|
||||||
|
let memberCount = cast[int64](await wakuRlnContract.commitmentIndex().call())
|
||||||
|
waku_rln_number_registered_memberships.set(float64(memberCount))
|
||||||
|
|
||||||
|
await sleepAsync(rpcDelay)
|
||||||
|
except CatchableError:
|
||||||
|
error "Fatal error in trackRootChanges", error = getCurrentExceptionMsg()
|
||||||
|
|
||||||
method register*(
|
method register*(
|
||||||
g: OnchainGroupManager, rateCommitment: RateCommitment
|
g: OnchainGroupManager, rateCommitment: RateCommitment
|
||||||
@ -147,18 +198,14 @@ method register*(
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
let leaf = rateCommitment.toLeaf().get()
|
let leaf = rateCommitment.toLeaf().get()
|
||||||
await g.registerBatch(@[leaf])
|
if g.registerCb.isSome():
|
||||||
|
let idx = g.latestIndex
|
||||||
|
debug "registering member via callback", rateCommitment = leaf, index = idx
|
||||||
|
await g.registerCb.get()(@[Membership(rateCommitment: leaf, index: idx)])
|
||||||
|
g.latestIndex.inc()
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
raise newException(ValueError, getCurrentExceptionMsg())
|
raise newException(ValueError, getCurrentExceptionMsg())
|
||||||
|
|
||||||
method registerBatch*(
|
|
||||||
g: OnchainGroupManager, rateCommitments: seq[RawRateCommitment]
|
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
|
||||||
initializedGuard(g)
|
|
||||||
|
|
||||||
await g.atomicBatch(g.latestIndex, rateCommitments)
|
|
||||||
g.latestIndex += MembershipIndex(rateCommitments.len)
|
|
||||||
|
|
||||||
method register*(
|
method register*(
|
||||||
g: OnchainGroupManager,
|
g: OnchainGroupManager,
|
||||||
identityCredential: IdentityCredential,
|
identityCredential: IdentityCredential,
|
||||||
@ -212,8 +259,19 @@ method register*(
|
|||||||
debug "parsed membershipIndex", membershipIndex
|
debug "parsed membershipIndex", membershipIndex
|
||||||
g.userMessageLimit = some(userMessageLimit)
|
g.userMessageLimit = some(userMessageLimit)
|
||||||
g.membershipIndex = some(membershipIndex.toMembershipIndex())
|
g.membershipIndex = some(membershipIndex.toMembershipIndex())
|
||||||
|
g.idCredentials = some(identityCredential)
|
||||||
|
|
||||||
|
let rateCommitment = RateCommitment(
|
||||||
|
idCommitment: identityCredential.idCommitment, userMessageLimit: userMessageLimit
|
||||||
|
)
|
||||||
|
.toLeaf()
|
||||||
|
.get()
|
||||||
|
|
||||||
|
if g.registerCb.isSome():
|
||||||
|
let member = Membership(rateCommitment: rateCommitment, index: g.latestIndex)
|
||||||
|
await g.registerCb.get()(@[member])
|
||||||
|
g.latestIndex.inc()
|
||||||
|
|
||||||
# don't handle member insertion into the tree here, it will be handled by the event listener
|
|
||||||
return
|
return
|
||||||
|
|
||||||
method withdraw*(
|
method withdraw*(
|
||||||
@ -226,304 +284,173 @@ method withdrawBatch*(
|
|||||||
): Future[void] {.async: (raises: [Exception]).} =
|
): Future[void] {.async: (raises: [Exception]).} =
|
||||||
initializedGuard(g)
|
initializedGuard(g)
|
||||||
|
|
||||||
# TODO: after slashing is enabled on the contract, use atomicBatch internally
|
proc getRootFromProofAndIndex(
|
||||||
|
g: OnchainGroupManager, elements: seq[byte], bits: seq[byte]
|
||||||
|
): GroupManagerResult[array[32, byte]] =
|
||||||
|
# this is a helper function to get root from merkle proof elements and index
|
||||||
|
# it's currently not used anywhere, but can be used to verify the root from the proof and index
|
||||||
|
# Compute leaf hash from idCommitment and messageLimit
|
||||||
|
let messageLimitField = uint64ToField(g.userMessageLimit.get())
|
||||||
|
let leafHashRes = poseidon(@[g.idCredentials.get().idCommitment, @messageLimitField])
|
||||||
|
if leafHashRes.isErr():
|
||||||
|
return err("Failed to compute leaf hash: " & leafHashRes.error)
|
||||||
|
|
||||||
proc parseEvent(
|
var hash = leafHashRes.get()
|
||||||
event: type MemberRegistered, log: JsonNode
|
for i in 0 ..< bits.len:
|
||||||
): GroupManagerResult[Membership] =
|
let sibling = elements[i * 32 .. (i + 1) * 32 - 1]
|
||||||
## parses the `data` parameter of the `MemberRegistered` event `log`
|
|
||||||
## returns an error if it cannot parse the `data` parameter
|
|
||||||
var rateCommitment: UInt256
|
|
||||||
var index: UInt256
|
|
||||||
var data: seq[byte]
|
|
||||||
try:
|
|
||||||
data = hexToSeqByte(log["data"].getStr())
|
|
||||||
except ValueError:
|
|
||||||
return err(
|
|
||||||
"failed to parse the data field of the MemberRegistered event: " &
|
|
||||||
getCurrentExceptionMsg()
|
|
||||||
)
|
|
||||||
var offset = 0
|
|
||||||
try:
|
|
||||||
# Parse the rateCommitment
|
|
||||||
offset += decode(data, 0, offset, rateCommitment)
|
|
||||||
# Parse the index
|
|
||||||
offset += decode(data, 0, offset, index)
|
|
||||||
return ok(
|
|
||||||
Membership(
|
|
||||||
rateCommitment: rateCommitment.toRateCommitment(),
|
|
||||||
index: index.toMembershipIndex(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except CatchableError:
|
|
||||||
return err("failed to parse the data field of the MemberRegistered event")
|
|
||||||
|
|
||||||
type BlockTable* = OrderedTable[BlockNumber, seq[(Membership, bool)]]
|
let hashRes =
|
||||||
|
if bits[i] == 0:
|
||||||
|
poseidon(@[@hash, sibling])
|
||||||
|
else:
|
||||||
|
poseidon(@[sibling, @hash])
|
||||||
|
|
||||||
proc backfillRootQueue*(
|
hash = hashRes.valueOr:
|
||||||
g: OnchainGroupManager, len: uint
|
return err("Failed to compute poseidon hash: " & error)
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
hash = hashRes.get()
|
||||||
if len > 0:
|
|
||||||
# backfill the tree's acceptable roots
|
|
||||||
for i in 0 .. len - 1:
|
|
||||||
# remove the last root
|
|
||||||
g.validRoots.popLast()
|
|
||||||
for i in 0 .. len - 1:
|
|
||||||
# add the backfilled root
|
|
||||||
g.validRoots.addLast(g.validRootBuffer.popLast())
|
|
||||||
|
|
||||||
proc insert(
|
return ok(hash)
|
||||||
blockTable: var BlockTable,
|
|
||||||
blockNumber: BlockNumber,
|
|
||||||
member: Membership,
|
|
||||||
removed: bool,
|
|
||||||
) =
|
|
||||||
let memberTuple = (member, removed)
|
|
||||||
if blockTable.hasKeyOrPut(blockNumber, @[memberTuple]):
|
|
||||||
try:
|
|
||||||
blockTable[blockNumber].add(memberTuple)
|
|
||||||
except KeyError: # qed
|
|
||||||
error "could not insert member into block table",
|
|
||||||
blockNumber = blockNumber, member = member
|
|
||||||
|
|
||||||
proc getRawEvents(
|
method generateProof*(
|
||||||
g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: BlockNumber
|
g: OnchainGroupManager,
|
||||||
): Future[JsonNode] {.async: (raises: [Exception]).} =
|
data: seq[byte],
|
||||||
initializedGuard(g)
|
epoch: Epoch,
|
||||||
|
messageId: MessageId,
|
||||||
|
rlnIdentifier = DefaultRlnIdentifier,
|
||||||
|
): GroupManagerResult[RateLimitProof] {.gcsafe, raises: [].} =
|
||||||
|
## Generates an RLN proof using the cached Merkle proof and custom witness
|
||||||
|
# Ensure identity credentials and membership index are set
|
||||||
|
if g.idCredentials.isNone():
|
||||||
|
return err("identity credentials are not set")
|
||||||
|
if g.membershipIndex.isNone():
|
||||||
|
return err("membership index is not set")
|
||||||
|
if g.userMessageLimit.isNone():
|
||||||
|
return err("user message limit is not set")
|
||||||
|
|
||||||
let ethRpc = g.ethRpc.get()
|
if (g.merkleProofCache.len mod 32) != 0:
|
||||||
let wakuRlnContract = g.wakuRlnContract.get()
|
return err("Invalid merkle proof cache length")
|
||||||
|
|
||||||
var eventStrs: seq[JsonString]
|
let identity_secret = seqToField(g.idCredentials.get().idSecretHash)
|
||||||
g.retryWrapper(eventStrs, "Failed to get the events"):
|
let user_message_limit = uint64ToField(g.userMessageLimit.get())
|
||||||
await wakuRlnContract.getJsonLogs(
|
let message_id = uint64ToField(messageId)
|
||||||
MemberRegistered,
|
var path_elements = newSeq[byte](0)
|
||||||
fromBlock = Opt.some(fromBlock.blockId()),
|
|
||||||
toBlock = Opt.some(toBlock.blockId()),
|
|
||||||
)
|
|
||||||
|
|
||||||
var events = newJArray()
|
if (g.merkleProofCache.len mod 32) != 0:
|
||||||
for eventStr in eventStrs:
|
return err("Invalid merkle proof cache length")
|
||||||
events.add(parseJson(eventStr.string))
|
|
||||||
return events
|
|
||||||
|
|
||||||
proc getBlockTable(
|
let identity_path_index = uint64ToIndex(g.membershipIndex.get(), 20)
|
||||||
g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: BlockNumber
|
for i in 0 ..< g.merkleProofCache.len div 32:
|
||||||
): Future[BlockTable] {.async: (raises: [Exception]).} =
|
let chunk = g.merkleProofCache[i * 32 .. (i + 1) * 32 - 1]
|
||||||
initializedGuard(g)
|
path_elements.add(chunk.reversed())
|
||||||
|
|
||||||
var blockTable = default(BlockTable)
|
let x = keccak.keccak256.digest(data)
|
||||||
|
|
||||||
let events = await g.getRawEvents(fromBlock, toBlock)
|
let extNullifier = poseidon(@[@(epoch), @(rlnIdentifier)]).valueOr:
|
||||||
|
return err("Failed to compute external nullifier: " & error)
|
||||||
|
|
||||||
if events.len == 0:
|
let witness = RLNWitnessInput(
|
||||||
trace "no events found"
|
identity_secret: identity_secret,
|
||||||
return blockTable
|
user_message_limit: user_message_limit,
|
||||||
|
message_id: message_id,
|
||||||
|
path_elements: path_elements,
|
||||||
|
identity_path_index: identity_path_index,
|
||||||
|
x: x,
|
||||||
|
external_nullifier: extNullifier,
|
||||||
|
)
|
||||||
|
|
||||||
for event in events:
|
let serializedWitness = serialize(witness)
|
||||||
let blockNumber = parseHexInt(event["blockNumber"].getStr()).BlockNumber
|
|
||||||
let removed = event["removed"].getBool()
|
|
||||||
let parsedEventRes = parseEvent(MemberRegistered, event)
|
|
||||||
if parsedEventRes.isErr():
|
|
||||||
error "failed to parse the MemberRegistered event", error = parsedEventRes.error()
|
|
||||||
raise newException(ValueError, "failed to parse the MemberRegistered event")
|
|
||||||
let parsedEvent = parsedEventRes.get()
|
|
||||||
blockTable.insert(blockNumber, parsedEvent, removed)
|
|
||||||
|
|
||||||
return blockTable
|
var input_witness_buffer = toBuffer(serializedWitness)
|
||||||
|
|
||||||
proc handleEvents(
|
# Generate the proof using the zerokit API
|
||||||
g: OnchainGroupManager, blockTable: BlockTable
|
var output_witness_buffer: Buffer
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
let witness_success = generate_proof_with_witness(
|
||||||
initializedGuard(g)
|
g.rlnInstance, addr input_witness_buffer, addr output_witness_buffer
|
||||||
|
)
|
||||||
|
|
||||||
for blockNumber, members in blockTable.pairs():
|
if not witness_success:
|
||||||
try:
|
return err("Failed to generate proof")
|
||||||
let startIndex = blockTable[blockNumber].filterIt(not it[1])[0][0].index
|
|
||||||
let removalIndices = members.filterIt(it[1]).mapIt(it[0].index)
|
|
||||||
let rateCommitments = members.mapIt(it[0].rateCommitment)
|
|
||||||
await g.atomicBatch(
|
|
||||||
start = startIndex,
|
|
||||||
rateCommitments = rateCommitments,
|
|
||||||
toRemoveIndices = removalIndices,
|
|
||||||
)
|
|
||||||
g.latestIndex = startIndex + MembershipIndex(rateCommitments.len)
|
|
||||||
trace "new members added to the Merkle tree",
|
|
||||||
commitments = rateCommitments.mapIt(it.inHex)
|
|
||||||
except CatchableError:
|
|
||||||
error "failed to insert members into the tree", error = getCurrentExceptionMsg()
|
|
||||||
raise newException(ValueError, "failed to insert members into the tree")
|
|
||||||
|
|
||||||
return
|
# Parse the proof into a RateLimitProof object
|
||||||
|
var proofValue = cast[ptr array[320, byte]](output_witness_buffer.`ptr`)
|
||||||
|
let proofBytes: array[320, byte] = proofValue[]
|
||||||
|
|
||||||
proc handleRemovedEvents(
|
## parse the proof as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ]
|
||||||
g: OnchainGroupManager, blockTable: BlockTable
|
let
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
proofOffset = 128
|
||||||
initializedGuard(g)
|
rootOffset = proofOffset + 32
|
||||||
|
externalNullifierOffset = rootOffset + 32
|
||||||
|
shareXOffset = externalNullifierOffset + 32
|
||||||
|
shareYOffset = shareXOffset + 32
|
||||||
|
nullifierOffset = shareYOffset + 32
|
||||||
|
|
||||||
# count number of blocks that have been removed
|
var
|
||||||
var numRemovedBlocks: uint = 0
|
zkproof: ZKSNARK
|
||||||
for blockNumber, members in blockTable.pairs():
|
proofRoot, shareX, shareY: MerkleNode
|
||||||
if members.anyIt(it[1]):
|
externalNullifier: ExternalNullifier
|
||||||
numRemovedBlocks += 1
|
nullifier: Nullifier
|
||||||
|
|
||||||
await g.backfillRootQueue(numRemovedBlocks)
|
discard zkproof.copyFrom(proofBytes[0 .. proofOffset - 1])
|
||||||
|
discard proofRoot.copyFrom(proofBytes[proofOffset .. rootOffset - 1])
|
||||||
|
discard
|
||||||
|
externalNullifier.copyFrom(proofBytes[rootOffset .. externalNullifierOffset - 1])
|
||||||
|
discard shareX.copyFrom(proofBytes[externalNullifierOffset .. shareXOffset - 1])
|
||||||
|
discard shareY.copyFrom(proofBytes[shareXOffset .. shareYOffset - 1])
|
||||||
|
discard nullifier.copyFrom(proofBytes[shareYOffset .. nullifierOffset - 1])
|
||||||
|
|
||||||
proc getAndHandleEvents(
|
# Create the RateLimitProof object
|
||||||
g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: BlockNumber
|
let output = RateLimitProof(
|
||||||
): Future[bool] {.async: (raises: [Exception]).} =
|
proof: zkproof,
|
||||||
initializedGuard(g)
|
merkleRoot: proofRoot,
|
||||||
let blockTable = await g.getBlockTable(fromBlock, toBlock)
|
externalNullifier: externalNullifier,
|
||||||
try:
|
epoch: epoch,
|
||||||
await g.handleEvents(blockTable)
|
rlnIdentifier: rlnIdentifier,
|
||||||
await g.handleRemovedEvents(blockTable)
|
shareX: shareX,
|
||||||
except CatchableError:
|
shareY: shareY,
|
||||||
error "failed to handle events", error = getCurrentExceptionMsg()
|
nullifier: nullifier,
|
||||||
raise newException(ValueError, "failed to handle events")
|
)
|
||||||
|
|
||||||
g.latestProcessedBlock = toBlock
|
debug "Proof generated successfully"
|
||||||
return true
|
|
||||||
|
|
||||||
proc runInInterval(g: OnchainGroupManager, cb: proc, interval: Duration) =
|
waku_rln_remaining_proofs_per_epoch.dec()
|
||||||
g.blockFetchingActive = false
|
waku_rln_total_generated_proofs.inc()
|
||||||
|
return ok(output)
|
||||||
|
|
||||||
proc runIntervalLoop() {.async, gcsafe.} =
|
method verifyProof*(
|
||||||
g.blockFetchingActive = true
|
g: OnchainGroupManager, # verifier context
|
||||||
|
input: seq[byte], # raw message data (signal)
|
||||||
|
proof: RateLimitProof, # proof received from the peer
|
||||||
|
): GroupManagerResult[bool] {.gcsafe, raises: [].} =
|
||||||
|
## -- Verifies an RLN rate-limit proof against the set of valid Merkle roots --
|
||||||
|
|
||||||
while g.blockFetchingActive:
|
var normalizedProof = proof
|
||||||
var retCb: bool
|
|
||||||
g.retryWrapper(retCb, "Failed to run the interval block fetching loop"):
|
|
||||||
await cb()
|
|
||||||
await sleepAsync(interval)
|
|
||||||
|
|
||||||
# using asyncSpawn is OK here since
|
normalizedProof.externalNullifier = poseidon(
|
||||||
# we make use of the error handling provided by
|
@[@(proof.epoch), @(proof.rlnIdentifier)]
|
||||||
# OnFatalErrorHandler
|
).valueOr:
|
||||||
asyncSpawn runIntervalLoop()
|
return err("Failed to compute external nullifier: " & error)
|
||||||
|
|
||||||
proc getNewBlockCallback(g: OnchainGroupManager): proc =
|
let proofBytes = serialize(normalizedProof, input)
|
||||||
let ethRpc = g.ethRpc.get()
|
let proofBuffer = proofBytes.toBuffer()
|
||||||
proc wrappedCb(): Future[bool] {.async, gcsafe.} =
|
|
||||||
var latestBlock: BlockNumber
|
|
||||||
g.retryWrapper(latestBlock, "Failed to get the latest block number"):
|
|
||||||
cast[BlockNumber](await ethRpc.provider.eth_blockNumber())
|
|
||||||
|
|
||||||
if latestBlock <= g.latestProcessedBlock:
|
let rootsBytes = serialize(g.validRoots.items().toSeq())
|
||||||
return
|
let rootsBuffer = rootsBytes.toBuffer()
|
||||||
# get logs from the last block
|
|
||||||
# inc by 1 to prevent double processing
|
|
||||||
let fromBlock = g.latestProcessedBlock + 1
|
|
||||||
var handleBlockRes: bool
|
|
||||||
g.retryWrapper(handleBlockRes, "Failed to handle new block"):
|
|
||||||
await g.getAndHandleEvents(fromBlock, latestBlock)
|
|
||||||
|
|
||||||
# cannot use isOkOr here because results in a compile-time error that
|
var validProof: bool # out-param
|
||||||
# shows the error is void for some reason
|
let ffiOk = verify_with_roots(
|
||||||
let setMetadataRes = g.setMetadata()
|
g.rlnInstance, # RLN context created at init()
|
||||||
if setMetadataRes.isErr():
|
addr proofBuffer, # (proof + signal)
|
||||||
error "failed to persist rln metadata", error = setMetadataRes.error
|
addr rootsBuffer, # valid Merkle roots
|
||||||
|
addr validProof # will be set by the FFI call
|
||||||
|
,
|
||||||
|
)
|
||||||
|
|
||||||
return handleBlockRes
|
if not ffiOk:
|
||||||
|
return err("could not verify the proof")
|
||||||
|
else:
|
||||||
|
trace "Proof verified successfully !"
|
||||||
|
|
||||||
return wrappedCb
|
return ok(validProof)
|
||||||
|
|
||||||
proc startListeningToEvents(
|
|
||||||
g: OnchainGroupManager
|
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
|
||||||
initializedGuard(g)
|
|
||||||
|
|
||||||
let ethRpc = g.ethRpc.get()
|
|
||||||
let newBlockCallback = g.getNewBlockCallback()
|
|
||||||
g.runInInterval(newBlockCallback, DefaultBlockPollRate)
|
|
||||||
|
|
||||||
proc batchAwaitBlockHandlingFuture(
|
|
||||||
g: OnchainGroupManager, futs: seq[Future[bool]]
|
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
|
||||||
for fut in futs:
|
|
||||||
try:
|
|
||||||
var handleBlockRes: bool
|
|
||||||
g.retryWrapper(handleBlockRes, "Failed to handle block"):
|
|
||||||
await fut
|
|
||||||
except CatchableError:
|
|
||||||
raise newException(
|
|
||||||
CatchableError, "could not fetch events from block: " & getCurrentExceptionMsg()
|
|
||||||
)
|
|
||||||
|
|
||||||
proc startOnchainSync(
|
|
||||||
g: OnchainGroupManager
|
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
|
||||||
initializedGuard(g)
|
|
||||||
|
|
||||||
let ethRpc = g.ethRpc.get()
|
|
||||||
|
|
||||||
# static block chunk size
|
|
||||||
let blockChunkSize = 2_000.BlockNumber
|
|
||||||
# delay between rpc calls to not overload the rate limit
|
|
||||||
let rpcDelay = 200.milliseconds
|
|
||||||
# max number of futures to run concurrently
|
|
||||||
let maxFutures = 10
|
|
||||||
|
|
||||||
var fromBlock: BlockNumber =
|
|
||||||
if g.latestProcessedBlock > g.rlnContractDeployedBlockNumber:
|
|
||||||
info "syncing from last processed block", blockNumber = g.latestProcessedBlock
|
|
||||||
g.latestProcessedBlock + 1
|
|
||||||
else:
|
|
||||||
info "syncing from rln contract deployed block",
|
|
||||||
blockNumber = g.rlnContractDeployedBlockNumber
|
|
||||||
g.rlnContractDeployedBlockNumber
|
|
||||||
|
|
||||||
var futs = newSeq[Future[bool]]()
|
|
||||||
var currentLatestBlock: BlockNumber
|
|
||||||
g.retryWrapper(currentLatestBlock, "Failed to get the latest block number"):
|
|
||||||
cast[BlockNumber](await ethRpc.provider.eth_blockNumber())
|
|
||||||
|
|
||||||
try:
|
|
||||||
# we always want to sync from last processed block => latest
|
|
||||||
# chunk events
|
|
||||||
while true:
|
|
||||||
# if the fromBlock is less than 2k blocks behind the current block
|
|
||||||
# then fetch the new toBlock
|
|
||||||
if fromBlock >= currentLatestBlock:
|
|
||||||
break
|
|
||||||
|
|
||||||
if fromBlock + blockChunkSize > currentLatestBlock:
|
|
||||||
g.retryWrapper(currentLatestBlock, "Failed to get the latest block number"):
|
|
||||||
cast[BlockNumber](await ethRpc.provider.eth_blockNumber())
|
|
||||||
|
|
||||||
let toBlock = min(fromBlock + blockChunkSize, currentLatestBlock)
|
|
||||||
debug "fetching events", fromBlock = fromBlock, toBlock = toBlock
|
|
||||||
await sleepAsync(rpcDelay)
|
|
||||||
futs.add(g.getAndHandleEvents(fromBlock, toBlock))
|
|
||||||
if futs.len >= maxFutures or toBlock == currentLatestBlock:
|
|
||||||
await g.batchAwaitBlockHandlingFuture(futs)
|
|
||||||
g.setMetadata(lastProcessedBlock = some(toBlock)).isOkOr:
|
|
||||||
error "failed to persist rln metadata", error = $error
|
|
||||||
futs = newSeq[Future[bool]]()
|
|
||||||
fromBlock = toBlock + 1
|
|
||||||
except CatchableError:
|
|
||||||
raise newException(
|
|
||||||
CatchableError,
|
|
||||||
"failed to get the history/reconcile missed blocks: " & getCurrentExceptionMsg(),
|
|
||||||
)
|
|
||||||
|
|
||||||
# listen to blockheaders and contract events
|
|
||||||
try:
|
|
||||||
await g.startListeningToEvents()
|
|
||||||
except CatchableError:
|
|
||||||
raise newException(
|
|
||||||
ValueError, "failed to start listening to events: " & getCurrentExceptionMsg()
|
|
||||||
)
|
|
||||||
|
|
||||||
method startGroupSync*(
|
|
||||||
g: OnchainGroupManager
|
|
||||||
): Future[GroupManagerResult[void]] {.async.} =
|
|
||||||
?resultifiedInitGuard(g)
|
|
||||||
# Get archive history
|
|
||||||
try:
|
|
||||||
await startOnchainSync(g)
|
|
||||||
return ok()
|
|
||||||
except CatchableError, Exception:
|
|
||||||
return err("failed to start group sync: " & getCurrentExceptionMsg())
|
|
||||||
|
|
||||||
method onRegister*(g: OnchainGroupManager, cb: OnRegisterCallback) {.gcsafe.} =
|
method onRegister*(g: OnchainGroupManager, cb: OnRegisterCallback) {.gcsafe.} =
|
||||||
g.registerCb = some(cb)
|
g.registerCb = some(cb)
|
||||||
@ -609,53 +536,27 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
|
|||||||
let metadata = metadataGetOptRes.get().get()
|
let metadata = metadataGetOptRes.get().get()
|
||||||
if metadata.chainId != uint(g.chainId):
|
if metadata.chainId != uint(g.chainId):
|
||||||
return err("persisted data: chain id mismatch")
|
return err("persisted data: chain id mismatch")
|
||||||
|
|
||||||
if metadata.contractAddress != g.ethContractAddress.toLower():
|
if metadata.contractAddress != g.ethContractAddress.toLower():
|
||||||
return err("persisted data: contract address mismatch")
|
return err("persisted data: contract address mismatch")
|
||||||
g.latestProcessedBlock = metadata.lastProcessedBlock.BlockNumber
|
|
||||||
g.validRoots = metadata.validRoots.toDeque()
|
|
||||||
|
|
||||||
var deployedBlockNumber: Uint256
|
|
||||||
g.retryWrapper(
|
|
||||||
deployedBlockNumber,
|
|
||||||
"Failed to get the deployed block number. Have you set the correct contract address?",
|
|
||||||
):
|
|
||||||
await wakuRlnContract.deployedBlockNumber().call()
|
|
||||||
debug "using rln contract", deployedBlockNumber, rlnContractAddress = contractAddress
|
|
||||||
g.rlnContractDeployedBlockNumber = cast[BlockNumber](deployedBlockNumber)
|
|
||||||
g.latestProcessedBlock = max(g.latestProcessedBlock, g.rlnContractDeployedBlockNumber)
|
|
||||||
g.rlnRelayMaxMessageLimit =
|
g.rlnRelayMaxMessageLimit =
|
||||||
cast[uint64](await wakuRlnContract.MAX_MESSAGE_LIMIT().call())
|
cast[uint64](await wakuRlnContract.MAX_MESSAGE_LIMIT().call())
|
||||||
|
|
||||||
proc onDisconnect() {.async.} =
|
proc onDisconnect() {.async.} =
|
||||||
error "Ethereum client disconnected"
|
error "Ethereum client disconnected"
|
||||||
let fromBlock = max(g.latestProcessedBlock, g.rlnContractDeployedBlockNumber)
|
|
||||||
info "reconnecting with the Ethereum client, and restarting group sync",
|
|
||||||
fromBlock = fromBlock
|
|
||||||
var newEthRpc: Web3
|
var newEthRpc: Web3
|
||||||
g.retryWrapper(newEthRpc, "Failed to reconnect with the Ethereum client"):
|
g.retryWrapper(newEthRpc, "Failed to reconnect with the Ethereum client"):
|
||||||
await newWeb3(g.ethClientUrl)
|
await newWeb3(g.ethClientUrl)
|
||||||
newEthRpc.ondisconnect = ethRpc.ondisconnect
|
newEthRpc.ondisconnect = ethRpc.ondisconnect
|
||||||
g.ethRpc = some(newEthRpc)
|
g.ethRpc = some(newEthRpc)
|
||||||
|
|
||||||
try:
|
|
||||||
await g.startOnchainSync()
|
|
||||||
except CatchableError, Exception:
|
|
||||||
g.onFatalErrorAction(
|
|
||||||
"failed to restart group sync" & ": " & getCurrentExceptionMsg()
|
|
||||||
)
|
|
||||||
|
|
||||||
ethRpc.ondisconnect = proc() =
|
ethRpc.ondisconnect = proc() =
|
||||||
asyncSpawn onDisconnect()
|
asyncSpawn onDisconnect()
|
||||||
|
|
||||||
waku_rln_number_registered_memberships.set(int64(g.rlnInstance.leavesSet()))
|
|
||||||
g.initialized = true
|
g.initialized = true
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
method stop*(g: OnchainGroupManager): Future[void] {.async, gcsafe.} =
|
method stop*(g: OnchainGroupManager): Future[void] {.async, gcsafe.} =
|
||||||
g.blockFetchingActive = false
|
|
||||||
|
|
||||||
if g.ethRpc.isSome():
|
if g.ethRpc.isSome():
|
||||||
g.ethRpc.get().ondisconnect = nil
|
g.ethRpc.get().ondisconnect = nil
|
||||||
await g.ethRpc.get().close()
|
await g.ethRpc.get().close()
|
||||||
@ -665,26 +566,13 @@ method stop*(g: OnchainGroupManager): Future[void] {.async, gcsafe.} =
|
|||||||
|
|
||||||
g.initialized = false
|
g.initialized = false
|
||||||
|
|
||||||
proc isSyncing*(g: OnchainGroupManager): Future[bool] {.async, gcsafe.} =
|
|
||||||
let ethRpc = g.ethRpc.get()
|
|
||||||
|
|
||||||
var syncing: SyncingStatus
|
|
||||||
g.retryWrapper(syncing, "Failed to get the syncing status"):
|
|
||||||
await ethRpc.provider.eth_syncing()
|
|
||||||
return syncing.syncing
|
|
||||||
|
|
||||||
method isReady*(g: OnchainGroupManager): Future[bool] {.async.} =
|
method isReady*(g: OnchainGroupManager): Future[bool] {.async.} =
|
||||||
initializedGuard(g)
|
initializedGuard(g)
|
||||||
|
|
||||||
if g.ethRpc.isNone():
|
if g.ethRpc.isNone():
|
||||||
return false
|
return false
|
||||||
|
|
||||||
var currentBlock: BlockNumber
|
if g.wakuRlnContract.isNone():
|
||||||
g.retryWrapper(currentBlock, "Failed to get the current block number"):
|
|
||||||
cast[BlockNumber](await g.ethRpc.get().provider.eth_blockNumber())
|
|
||||||
|
|
||||||
# the node is still able to process messages if it is behind the latest block by a factor of the valid roots
|
|
||||||
if u256(g.latestProcessedBlock.uint64) < (u256(currentBlock) - u256(g.validRoots.len)):
|
|
||||||
return false
|
return false
|
||||||
|
|
||||||
return not (await g.isSyncing())
|
return true
|
||||||
|
|||||||
@ -85,6 +85,7 @@ proc getRlnMetricsLogger*(): RLNMetricsLogger =
|
|||||||
var cumulativeProofsVerified = 0.float64
|
var cumulativeProofsVerified = 0.float64
|
||||||
var cumulativeProofsGenerated = 0.float64
|
var cumulativeProofsGenerated = 0.float64
|
||||||
var cumulativeProofsRemaining = 100.float64
|
var cumulativeProofsRemaining = 100.float64
|
||||||
|
var cumulativeRegisteredMember = 0.float64
|
||||||
|
|
||||||
when defined(metrics):
|
when defined(metrics):
|
||||||
logMetrics = proc() =
|
logMetrics = proc() =
|
||||||
@ -107,6 +108,9 @@ proc getRlnMetricsLogger*(): RLNMetricsLogger =
|
|||||||
let freshProofsRemainingCount = parseAndAccumulate(
|
let freshProofsRemainingCount = parseAndAccumulate(
|
||||||
waku_rln_remaining_proofs_per_epoch, cumulativeProofsRemaining
|
waku_rln_remaining_proofs_per_epoch, cumulativeProofsRemaining
|
||||||
)
|
)
|
||||||
|
let freshRegisteredMemberCount = parseAndAccumulate(
|
||||||
|
waku_rln_number_registered_memberships, cumulativeRegisteredMember
|
||||||
|
)
|
||||||
|
|
||||||
info "Total messages", count = freshMsgCount
|
info "Total messages", count = freshMsgCount
|
||||||
info "Total spam messages", count = freshSpamCount
|
info "Total spam messages", count = freshSpamCount
|
||||||
@ -116,5 +120,6 @@ proc getRlnMetricsLogger*(): RLNMetricsLogger =
|
|||||||
info "Total proofs verified", count = freshProofsVerifiedCount
|
info "Total proofs verified", count = freshProofsVerifiedCount
|
||||||
info "Total proofs generated", count = freshProofsGeneratedCount
|
info "Total proofs generated", count = freshProofsGeneratedCount
|
||||||
info "Total proofs remaining", count = freshProofsRemainingCount
|
info "Total proofs remaining", count = freshProofsRemainingCount
|
||||||
|
info "Total registered members", count = freshRegisteredMemberCount
|
||||||
|
|
||||||
return logMetrics
|
return logMetrics
|
||||||
|
|||||||
@ -52,6 +52,20 @@ type RateLimitProof* = object
|
|||||||
## the external nullifier used for the generation of the `proof` (derived from poseidon([epoch, rln_identifier]))
|
## the external nullifier used for the generation of the `proof` (derived from poseidon([epoch, rln_identifier]))
|
||||||
externalNullifier*: ExternalNullifier
|
externalNullifier*: ExternalNullifier
|
||||||
|
|
||||||
|
type UInt40* = StUint[40]
|
||||||
|
type UInt32* = StUint[32]
|
||||||
|
|
||||||
|
type
|
||||||
|
Field = array[32, byte] # Field element representation (256 bits)
|
||||||
|
RLNWitnessInput* = object
|
||||||
|
identity_secret*: Field
|
||||||
|
user_message_limit*: Field
|
||||||
|
message_id*: Field
|
||||||
|
path_elements*: seq[byte]
|
||||||
|
identity_path_index*: seq[byte]
|
||||||
|
x*: Field
|
||||||
|
external_nullifier*: Field
|
||||||
|
|
||||||
type ProofMetadata* = object
|
type ProofMetadata* = object
|
||||||
nullifier*: Nullifier
|
nullifier*: Nullifier
|
||||||
shareX*: MerkleNode
|
shareX*: MerkleNode
|
||||||
|
|||||||
@ -130,6 +130,21 @@ proc generate_proof*(
|
|||||||
## integers wrapped in <> indicate value sizes in bytes
|
## integers wrapped in <> indicate value sizes in bytes
|
||||||
## the return bool value indicates the success or failure of the operation
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc generate_proof_with_witness*(
|
||||||
|
ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr Buffer
|
||||||
|
): bool {.importc: "generate_rln_proof_with_witness".}
|
||||||
|
|
||||||
|
## rln-v2
|
||||||
|
## "witness" term refer to collection of secret inputs with proper serialization
|
||||||
|
## input_buffer has to be serialized as [ identity_secret<32> | user_message_limit<32> | message_id<32> | path_elements<Vec<32>> | identity_path_index<Vec<1>> | x<32> | external_nullifier<32> ]
|
||||||
|
## output_buffer holds the proof data and should be parsed as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ]
|
||||||
|
## rln-v1
|
||||||
|
## input_buffer has to be serialized as [ id_key<32> | path_elements<Vec<32>> | identity_path_index<Vec<1>> | x<32> | epoch<32> | rln_identifier<32> ]
|
||||||
|
## output_buffer holds the proof data and should be parsed as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ]
|
||||||
|
## integers wrapped in <> indicate value sizes in bytes
|
||||||
|
## path_elements and identity_path_index serialize a merkle proof and are vectors of elements of 32 and 1 bytes respectively
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
proc verify*(
|
proc verify*(
|
||||||
ctx: ptr RLN, proof_buffer: ptr Buffer, proof_is_valid_ptr: ptr bool
|
ctx: ptr RLN, proof_buffer: ptr Buffer, proof_is_valid_ptr: ptr bool
|
||||||
): bool {.importc: "verify_rln_proof".}
|
): bool {.importc: "verify_rln_proof".}
|
||||||
|
|||||||
@ -98,6 +98,7 @@ type WakuRLNRelay* = ref object of RootObj
|
|||||||
onFatalErrorAction*: OnFatalErrorHandler
|
onFatalErrorAction*: OnFatalErrorHandler
|
||||||
nonceManager*: NonceManager
|
nonceManager*: NonceManager
|
||||||
epochMonitorFuture*: Future[void]
|
epochMonitorFuture*: Future[void]
|
||||||
|
rootChangesFuture*: Future[void]
|
||||||
|
|
||||||
proc calcEpoch*(rlnPeer: WakuRLNRelay, t: float64): Epoch =
|
proc calcEpoch*(rlnPeer: WakuRLNRelay, t: float64): Epoch =
|
||||||
## gets time `t` as `flaot64` with subseconds resolution in the fractional part
|
## gets time `t` as `flaot64` with subseconds resolution in the fractional part
|
||||||
@ -252,6 +253,7 @@ proc validateMessage*(
|
|||||||
waku_rln_errors_total.inc(labelValues = ["proof_verification"])
|
waku_rln_errors_total.inc(labelValues = ["proof_verification"])
|
||||||
warn "invalid message: proof verification failed", payloadLen = msg.payload.len
|
warn "invalid message: proof verification failed", payloadLen = msg.payload.len
|
||||||
return MessageValidationResult.Invalid
|
return MessageValidationResult.Invalid
|
||||||
|
|
||||||
if not proofVerificationRes.value():
|
if not proofVerificationRes.value():
|
||||||
# invalid proof
|
# invalid proof
|
||||||
warn "invalid message: invalid proof", payloadLen = msg.payload.len
|
warn "invalid message: invalid proof", payloadLen = msg.payload.len
|
||||||
@ -467,9 +469,6 @@ proc mount(
|
|||||||
# Initialize the groupManager
|
# Initialize the groupManager
|
||||||
(await groupManager.init()).isOkOr:
|
(await groupManager.init()).isOkOr:
|
||||||
return err("could not initialize the group manager: " & $error)
|
return err("could not initialize the group manager: " & $error)
|
||||||
# Start the group sync
|
|
||||||
(await groupManager.startGroupSync()).isOkOr:
|
|
||||||
return err("could not start the group sync: " & $error)
|
|
||||||
|
|
||||||
wakuRlnRelay = WakuRLNRelay(
|
wakuRlnRelay = WakuRLNRelay(
|
||||||
groupManager: groupManager,
|
groupManager: groupManager,
|
||||||
@ -479,6 +478,11 @@ proc mount(
|
|||||||
onFatalErrorAction: conf.onFatalErrorAction,
|
onFatalErrorAction: conf.onFatalErrorAction,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# track root changes on smart contract merkle tree
|
||||||
|
if groupManager of OnchainGroupManager:
|
||||||
|
let onchainManager = cast[OnchainGroupManager](groupManager)
|
||||||
|
wakuRlnRelay.rootChangesFuture = onchainManager.trackRootChanges()
|
||||||
|
|
||||||
# Start epoch monitoring in the background
|
# Start epoch monitoring in the background
|
||||||
wakuRlnRelay.epochMonitorFuture = monitorEpochs(wakuRlnRelay)
|
wakuRlnRelay.epochMonitorFuture = monitorEpochs(wakuRlnRelay)
|
||||||
return ok(wakuRlnRelay)
|
return ok(wakuRlnRelay)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user