mirror of https://github.com/waku-org/nwaku.git
feat(rln-relay): abstract group management into its own api (#1465)
* feat(rln-relay): group manager api * fix(rln-relay): static gm, method async works now * fix(rln-relay): cb naming, ensure merkle root changed * chore(rln-relay): static group manager fully tested * chore(rln-relay): split ffi into its own module, split conversion_utils * chore(rln-relay): refactor onchain group * fix(rln-relay): throw error if privatekey is malformed * chore(rln-relay): convert methods to procs, remove warnings, formatting * chore(rln-relay): remove comment * style(rln-relay): fmt test * feat(rln-relay): complete onchain group manager. pending tests * fix(rln-relay): onchain implementation works now * fix(rln-relay): reg index * fix(rln): imports * fix(rln-relay): revert method to proc conv * fix(rln-relay): s/ffi/rln * fix(rln-relay): remove rln/ from gitignore * fix(rln-relay): s/ffi/rln
This commit is contained in:
parent
2df6cb1645
commit
605cf1c38c
|
@ -29,7 +29,6 @@
|
||||||
/metrics/prometheus
|
/metrics/prometheus
|
||||||
/metrics/waku-sim-all-nodes-grafana-dashboard.json
|
/metrics/waku-sim-all-nodes-grafana-dashboard.json
|
||||||
|
|
||||||
rln
|
|
||||||
*.log
|
*.log
|
||||||
package-lock.json
|
package-lock.json
|
||||||
package.json
|
package.json
|
||||||
|
@ -50,4 +49,4 @@ nimbus-build-system.paths
|
||||||
*.db-wal
|
*.db-wal
|
||||||
*.sqlite3
|
*.sqlite3
|
||||||
*.sqlite3-shm
|
*.sqlite3-shm
|
||||||
*.sqlite3-wal
|
*.sqlite3-wal
|
||||||
|
|
|
@ -0,0 +1,352 @@
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
else:
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import
|
||||||
|
testutils/unittests,
|
||||||
|
stew/results,
|
||||||
|
options,
|
||||||
|
../../waku/v2/protocol/waku_rln_relay/protocol_types,
|
||||||
|
../../waku/v2/protocol/waku_rln_relay/constants,
|
||||||
|
../../waku/v2/protocol/waku_rln_relay/contract,
|
||||||
|
../../waku/v2/protocol/waku_rln_relay/rln,
|
||||||
|
../../waku/v2/protocol/waku_rln_relay/conversion_utils,
|
||||||
|
../../waku/v2/protocol/waku_rln_relay/group_manager/on_chain/group_manager
|
||||||
|
|
||||||
|
import
|
||||||
|
std/[osproc, streams, strutils, sequtils],
|
||||||
|
chronos, chronicles, stint, web3, json,
|
||||||
|
stew/shims/net as stewNet,
|
||||||
|
libp2p/crypto/crypto,
|
||||||
|
eth/keys,
|
||||||
|
../test_helpers,
|
||||||
|
./test_utils
|
||||||
|
|
||||||
|
from posix import kill, SIGINT
|
||||||
|
|
||||||
|
proc generateCredentials(rlnInstance: ptr RLN): IdentityCredential =
|
||||||
|
let credRes = membershipKeyGen(rlnInstance)
|
||||||
|
return credRes.get()
|
||||||
|
|
||||||
|
proc generateCredentials(rlnInstance: ptr RLN, n: int): seq[IdentityCredential] =
|
||||||
|
var credentials: seq[IdentityCredential]
|
||||||
|
for i in 0 ..< n:
|
||||||
|
credentials.add(generateCredentials(rlnInstance))
|
||||||
|
return credentials
|
||||||
|
|
||||||
|
# a util function used for testing purposes
|
||||||
|
# it deploys membership contract on Ganache (or any Eth client available on EthClient address)
|
||||||
|
# must be edited if used for a different contract than membership contract
|
||||||
|
proc uploadRLNContract*(ethClientAddress: string): Future[Address] {.async.} =
|
||||||
|
let web3 = await newWeb3(ethClientAddress)
|
||||||
|
debug "web3 connected to", ethClientAddress
|
||||||
|
|
||||||
|
# fetch the list of registered accounts
|
||||||
|
let accounts = await web3.provider.eth_accounts()
|
||||||
|
web3.defaultAccount = accounts[1]
|
||||||
|
let add = web3.defaultAccount
|
||||||
|
debug "contract deployer account address ", add
|
||||||
|
|
||||||
|
let balance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest")
|
||||||
|
debug "Initial account balance: ", balance
|
||||||
|
|
||||||
|
# deploy the poseidon hash contract and gets its address
|
||||||
|
let
|
||||||
|
hasherReceipt = await web3.deployContract(PoseidonHasherCode)
|
||||||
|
hasherAddress = hasherReceipt.contractAddress.get
|
||||||
|
debug "hasher address: ", hasherAddress
|
||||||
|
|
||||||
|
|
||||||
|
# encode membership contract inputs to 32 bytes zero-padded
|
||||||
|
let
|
||||||
|
membershipFeeEncoded = encode(MembershipFee).data
|
||||||
|
depthEncoded = encode(MerkleTreeDepth.u256).data
|
||||||
|
hasherAddressEncoded = encode(hasherAddress).data
|
||||||
|
# this is the contract constructor input
|
||||||
|
contractInput = membershipFeeEncoded & depthEncoded & hasherAddressEncoded
|
||||||
|
|
||||||
|
|
||||||
|
debug "encoded membership fee: ", membershipFeeEncoded
|
||||||
|
debug "encoded depth: ", depthEncoded
|
||||||
|
debug "encoded hasher address: ", hasherAddressEncoded
|
||||||
|
debug "encoded contract input:", contractInput
|
||||||
|
|
||||||
|
# deploy membership contract with its constructor inputs
|
||||||
|
let receipt = await web3.deployContract(MembershipContractCode,
|
||||||
|
contractInput = contractInput)
|
||||||
|
let contractAddress = receipt.contractAddress.get
|
||||||
|
debug "Address of the deployed membership contract: ", contractAddress
|
||||||
|
|
||||||
|
let newBalance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest")
|
||||||
|
debug "Account balance after the contract deployment: ", newBalance
|
||||||
|
|
||||||
|
await web3.close()
|
||||||
|
debug "disconnected from ", ethClientAddress
|
||||||
|
|
||||||
|
return contractAddress
|
||||||
|
|
||||||
|
|
||||||
|
proc createEthAccount(): Future[(keys.PrivateKey, Address)] {.async.} =
|
||||||
|
let theRNG = keys.newRng()
|
||||||
|
|
||||||
|
let web3 = await newWeb3(EthClient)
|
||||||
|
let accounts = await web3.provider.eth_accounts()
|
||||||
|
let gasPrice = int(await web3.provider.eth_gasPrice())
|
||||||
|
web3.defaultAccount = accounts[0]
|
||||||
|
|
||||||
|
let pk = keys.PrivateKey.random(theRNG[])
|
||||||
|
let acc = Address(toCanonicalAddress(pk.toPublicKey()))
|
||||||
|
|
||||||
|
var tx:EthSend
|
||||||
|
tx.source = accounts[0]
|
||||||
|
tx.value = some(ethToWei(10.u256))
|
||||||
|
tx.to = some(acc)
|
||||||
|
tx.gasPrice = some(gasPrice)
|
||||||
|
|
||||||
|
# Send 10 eth to acc
|
||||||
|
discard await web3.send(tx)
|
||||||
|
let balance = await web3.provider.eth_getBalance(acc, "latest")
|
||||||
|
assert(balance == ethToWei(10.u256))
|
||||||
|
|
||||||
|
return (pk, acc)
|
||||||
|
|
||||||
|
# Runs Ganache daemon
|
||||||
|
proc runGanache(): Process =
|
||||||
|
# We run directly "node node_modules/ganache/dist/node/cli.js" rather than using "npx ganache", so that the daemon does not spawn in a new child process.
|
||||||
|
# In this way, we can directly send a SIGINT signal to the corresponding PID to gracefully terminate Ganache without dealing with multiple processes.
|
||||||
|
# Passed options are
|
||||||
|
# --port Port to listen on.
|
||||||
|
# --miner.blockGasLimit Sets the block gas limit in WEI.
|
||||||
|
# --wallet.defaultBalance The default account balance, specified in ether.
|
||||||
|
# See ganache documentation https://www.npmjs.com/package/ganache for more details
|
||||||
|
try:
|
||||||
|
let runGanache = startProcess("npx", args = ["--yes", "ganache", "--port", "8540", "--miner.blockGasLimit", "300000000000000", "--wallet.defaultBalance", "10000"], options = {poUsePath})
|
||||||
|
let ganachePID = runGanache.processID
|
||||||
|
|
||||||
|
# We read stdout from Ganache to see when daemon is ready
|
||||||
|
var ganacheStartLog: string
|
||||||
|
var cmdline: string
|
||||||
|
while true:
|
||||||
|
try:
|
||||||
|
if runGanache.outputstream.readLine(cmdline):
|
||||||
|
ganacheStartLog.add(cmdline)
|
||||||
|
if cmdline.contains("Listening on 127.0.0.1:8540"):
|
||||||
|
break
|
||||||
|
except:
|
||||||
|
break
|
||||||
|
debug "Ganache daemon is running and ready", pid=ganachePID, startLog=ganacheStartLog
|
||||||
|
return runGanache
|
||||||
|
except:
|
||||||
|
error "Ganache daemon run failed"
|
||||||
|
|
||||||
|
|
||||||
|
# Stops Ganache daemon
|
||||||
|
proc stopGanache(runGanache: Process) {.used.} =
|
||||||
|
|
||||||
|
let ganachePID = runGanache.processID
|
||||||
|
|
||||||
|
# We gracefully terminate Ganache daemon by sending a SIGINT signal to the runGanache PID to trigger RPC server termination and clean-up
|
||||||
|
let returnCodeSIGINT = kill(ganachePID.int32, SIGINT)
|
||||||
|
debug "Sent SIGINT to Ganache", ganachePID=ganachePID, returnCode=returnCodeSIGINT
|
||||||
|
|
||||||
|
# We wait the daemon to exit
|
||||||
|
try:
|
||||||
|
let returnCodeExit = runGanache.waitForExit()
|
||||||
|
debug "Ganache daemon terminated", returnCode=returnCodeExit
|
||||||
|
debug "Ganache daemon run log", log=runGanache.outputstream.readAll()
|
||||||
|
except:
|
||||||
|
error "Ganache daemon termination failed"
|
||||||
|
|
||||||
|
proc setup(): Future[OnchainGroupManager] {.async.} =
|
||||||
|
let rlnInstanceRes = createRlnInstance()
|
||||||
|
require:
|
||||||
|
rlnInstanceRes.isOk()
|
||||||
|
|
||||||
|
let rlnInstance = rlnInstanceRes.get()
|
||||||
|
|
||||||
|
let contractAddress = await uploadRLNContract(EthClient)
|
||||||
|
# connect to the eth client
|
||||||
|
let web3 = await newWeb3(EthClient)
|
||||||
|
|
||||||
|
let accounts = await web3.provider.eth_accounts()
|
||||||
|
web3.defaultAccount = accounts[1]
|
||||||
|
|
||||||
|
let (pk, _) = await createEthAccount()
|
||||||
|
|
||||||
|
let onchainConfig = OnchainGroupManagerConfig(ethClientUrl: EthClient,
|
||||||
|
ethContractAddress: $contractAddress,
|
||||||
|
ethPrivateKey: some($pk))
|
||||||
|
|
||||||
|
let manager {.used.} = OnchainGroupManager(config: onchainConfig,
|
||||||
|
rlnInstance: rlnInstance)
|
||||||
|
|
||||||
|
return manager
|
||||||
|
|
||||||
|
suite "Onchain group manager":
|
||||||
|
# We run Ganache
|
||||||
|
let runGanache {.used.} = runGanache()
|
||||||
|
|
||||||
|
asyncTest "should initialize successfully":
|
||||||
|
let manager = await setup()
|
||||||
|
await manager.init()
|
||||||
|
|
||||||
|
check:
|
||||||
|
manager.config.ethRpc.isSome()
|
||||||
|
manager.config.rlnContract.isSome()
|
||||||
|
manager.config.membershipFee.isSome()
|
||||||
|
manager.initialized
|
||||||
|
|
||||||
|
asyncTest "startGroupSync: should start group sync":
|
||||||
|
let manager = await setup()
|
||||||
|
|
||||||
|
await manager.init()
|
||||||
|
await manager.startGroupSync()
|
||||||
|
|
||||||
|
asyncTest "startGroupSync: should guard against uninitialized state":
|
||||||
|
let manager = await setup()
|
||||||
|
|
||||||
|
expect(ValueError):
|
||||||
|
await manager.startGroupSync()
|
||||||
|
|
||||||
|
asyncTest "startGroupSync: should sync to the state of the group":
|
||||||
|
let manager = await setup()
|
||||||
|
let credentials = generateCredentials(manager.rlnInstance)
|
||||||
|
|
||||||
|
manager.idCredentials = some(credentials)
|
||||||
|
await manager.init()
|
||||||
|
|
||||||
|
let merkleRootBeforeRes = manager.rlnInstance.getMerkleRoot()
|
||||||
|
require:
|
||||||
|
merkleRootBeforeRes.isOk()
|
||||||
|
let merkleRootBefore = merkleRootBeforeRes.get()
|
||||||
|
|
||||||
|
let future = newFuture[void]("startGroupSync")
|
||||||
|
|
||||||
|
proc generateCallback(fut: Future[void], idCommitment: IDCommitment): OnRegisterCallback =
|
||||||
|
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||||
|
require:
|
||||||
|
registrations.len == 1
|
||||||
|
registrations[0].idCommitment == idCommitment
|
||||||
|
registrations[0].index == 0
|
||||||
|
fut.complete()
|
||||||
|
return callback
|
||||||
|
|
||||||
|
manager.onRegister(generateCallback(future, credentials.idCommitment))
|
||||||
|
await manager.startGroupSync()
|
||||||
|
|
||||||
|
await future
|
||||||
|
|
||||||
|
let merkleRootAfterRes = manager.rlnInstance.getMerkleRoot()
|
||||||
|
require:
|
||||||
|
merkleRootAfterRes.isOk()
|
||||||
|
let merkleRootAfter = merkleRootAfterRes.get()
|
||||||
|
|
||||||
|
check:
|
||||||
|
merkleRootBefore != merkleRootAfter
|
||||||
|
|
||||||
|
asyncTest "startGroupSync: should fetch history correctly":
|
||||||
|
let manager = await setup()
|
||||||
|
let credentials = generateCredentials(manager.rlnInstance, 5)
|
||||||
|
await manager.init()
|
||||||
|
|
||||||
|
let merkleRootBeforeRes = manager.rlnInstance.getMerkleRoot()
|
||||||
|
require:
|
||||||
|
merkleRootBeforeRes.isOk()
|
||||||
|
let merkleRootBefore = merkleRootBeforeRes.get()
|
||||||
|
|
||||||
|
var futures = [newFuture[void](), newFuture[void](), newFuture[void](), newFuture[void](), newFuture[void]()]
|
||||||
|
|
||||||
|
proc generateCallback(futs: array[0..4, Future[system.void]], credentials: seq[IdentityCredential]): OnRegisterCallback =
|
||||||
|
var futureIndex = 0
|
||||||
|
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||||
|
require:
|
||||||
|
registrations.len == 1
|
||||||
|
registrations[0].idCommitment == credentials[futureIndex].idCommitment
|
||||||
|
registrations[0].index == MembershipIndex(futureIndex)
|
||||||
|
futs[futureIndex].complete()
|
||||||
|
futureIndex += 1
|
||||||
|
return callback
|
||||||
|
|
||||||
|
manager.onRegister(generateCallback(futures, credentials))
|
||||||
|
await manager.startGroupSync()
|
||||||
|
|
||||||
|
for i in 0 ..< credentials.len():
|
||||||
|
await manager.register(credentials[i])
|
||||||
|
|
||||||
|
await allFutures(futures)
|
||||||
|
|
||||||
|
let merkleRootAfterRes = manager.rlnInstance.getMerkleRoot()
|
||||||
|
require:
|
||||||
|
merkleRootAfterRes.isOk()
|
||||||
|
let merkleRootAfter = merkleRootAfterRes.get()
|
||||||
|
|
||||||
|
check:
|
||||||
|
merkleRootBefore != merkleRootAfter
|
||||||
|
|
||||||
|
asyncTest "register: should guard against uninitialized state":
|
||||||
|
let manager = await setup()
|
||||||
|
let dummyCommitment = default(IDCommitment)
|
||||||
|
|
||||||
|
expect(ValueError):
|
||||||
|
await manager.register(dummyCommitment)
|
||||||
|
|
||||||
|
asyncTest "register: should register successfully":
|
||||||
|
let manager = await setup()
|
||||||
|
await manager.init()
|
||||||
|
await manager.startGroupSync()
|
||||||
|
|
||||||
|
let idCommitment = generateCredentials(manager.rlnInstance).idCommitment
|
||||||
|
let merkleRootBeforeRes = manager.rlnInstance.getMerkleRoot()
|
||||||
|
require:
|
||||||
|
merkleRootBeforeRes.isOk()
|
||||||
|
let merkleRootBefore = merkleRootBeforeRes.get()
|
||||||
|
await manager.register(idCommitment)
|
||||||
|
let merkleRootAfterRes = manager.rlnInstance.getMerkleRoot()
|
||||||
|
require:
|
||||||
|
merkleRootAfterRes.isOk()
|
||||||
|
let merkleRootAfter = merkleRootAfterRes.get()
|
||||||
|
check:
|
||||||
|
merkleRootAfter.inHex() != merkleRootBefore.inHex()
|
||||||
|
manager.latestIndex == 1
|
||||||
|
|
||||||
|
asyncTest "register: callback is called":
|
||||||
|
let manager = await setup()
|
||||||
|
|
||||||
|
var callbackCalled = false
|
||||||
|
let idCommitment = generateCredentials(manager.rlnInstance).idCommitment
|
||||||
|
|
||||||
|
let fut = newFuture[void]()
|
||||||
|
|
||||||
|
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||||
|
require:
|
||||||
|
registrations.len == 1
|
||||||
|
registrations[0].idCommitment == idCommitment
|
||||||
|
registrations[0].index == 0
|
||||||
|
callbackCalled = true
|
||||||
|
fut.complete()
|
||||||
|
|
||||||
|
manager.onRegister(callback)
|
||||||
|
await manager.init()
|
||||||
|
await manager.startGroupSync()
|
||||||
|
|
||||||
|
await manager.register(idCommitment)
|
||||||
|
|
||||||
|
await fut
|
||||||
|
check:
|
||||||
|
callbackCalled
|
||||||
|
|
||||||
|
asyncTest "withdraw: should guard against uninitialized state":
|
||||||
|
let manager = await setup()
|
||||||
|
let idSecretHash = generateCredentials(manager.rlnInstance).idSecretHash
|
||||||
|
|
||||||
|
expect(ValueError):
|
||||||
|
await manager.withdraw(idSecretHash)
|
||||||
|
|
||||||
|
################################
|
||||||
|
## Terminating/removing Ganache
|
||||||
|
################################
|
||||||
|
|
||||||
|
# We stop Ganache daemon
|
||||||
|
stopGanache(runGanache)
|
||||||
|
|
|
@ -0,0 +1,183 @@
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
else:
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import
|
||||||
|
testutils/unittests,
|
||||||
|
stew/results,
|
||||||
|
options,
|
||||||
|
../../waku/v2/protocol/waku_rln_relay/protocol_types,
|
||||||
|
../../waku/v2/protocol/waku_rln_relay/rln,
|
||||||
|
../../waku/v2/protocol/waku_rln_relay/conversion_utils,
|
||||||
|
../../waku/v2/protocol/waku_rln_relay/group_manager/static/group_manager
|
||||||
|
|
||||||
|
import
|
||||||
|
stew/shims/net,
|
||||||
|
chronos,
|
||||||
|
libp2p/crypto/crypto,
|
||||||
|
eth/keys,
|
||||||
|
discovery/dnsdisc/builder
|
||||||
|
|
||||||
|
proc generateCredentials(rlnInstance: ptr RLN): IdentityCredential =
|
||||||
|
let credRes = membershipKeyGen(rlnInstance)
|
||||||
|
return credRes.get()
|
||||||
|
|
||||||
|
proc generateCredentials(rlnInstance: ptr RLN, n: int): seq[IdentityCredential] =
|
||||||
|
var credentials: seq[IdentityCredential]
|
||||||
|
for i in 0 ..< n:
|
||||||
|
credentials.add(generateCredentials(rlnInstance))
|
||||||
|
return credentials
|
||||||
|
|
||||||
|
suite "Static group manager":
|
||||||
|
setup:
|
||||||
|
let rlnInstanceRes = createRlnInstance()
|
||||||
|
require:
|
||||||
|
rlnInstanceRes.isOk()
|
||||||
|
|
||||||
|
let rlnInstance = rlnInstanceRes.get()
|
||||||
|
let credentials = generateCredentials(rlnInstance, 10)
|
||||||
|
|
||||||
|
let staticConfig = StaticGroupManagerConfig(groupSize: 10,
|
||||||
|
membershipIndex: 5,
|
||||||
|
groupKeys: credentials)
|
||||||
|
|
||||||
|
let manager {.used.} = StaticGroupManager(config: staticConfig,
|
||||||
|
rlnInstance: rlnInstance)
|
||||||
|
|
||||||
|
asyncTest "should initialize successfully":
|
||||||
|
let merkleRootBeforeRes = manager.rlnInstance.getMerkleRoot()
|
||||||
|
require:
|
||||||
|
merkleRootBeforeRes.isOk()
|
||||||
|
let merkleRootBefore = merkleRootBeforeRes.get()
|
||||||
|
|
||||||
|
await manager.init()
|
||||||
|
let merkleRootAfterRes = manager.rlnInstance.getMerkleRoot()
|
||||||
|
require:
|
||||||
|
merkleRootAfterRes.isOk()
|
||||||
|
let merkleRootAfter = merkleRootAfterRes.get()
|
||||||
|
check:
|
||||||
|
manager.idCredentials.isSome()
|
||||||
|
manager.config.groupKeys.len == 10
|
||||||
|
manager.config.groupSize == 10
|
||||||
|
manager.config.membershipIndex == 5
|
||||||
|
manager.config.groupKeys[5] == manager.idCredentials.get()
|
||||||
|
manager.latestIndex == 9
|
||||||
|
merkleRootAfter.inHex() != merkleRootBefore.inHex()
|
||||||
|
|
||||||
|
asyncTest "startGroupSync: should start group sync":
|
||||||
|
await manager.init()
|
||||||
|
await manager.startGroupSync()
|
||||||
|
|
||||||
|
asyncTest "startGroupSync: should guard against uninitialized state":
|
||||||
|
let staticConfig = StaticGroupManagerConfig(groupSize: 0,
|
||||||
|
membershipIndex: 0,
|
||||||
|
groupKeys: @[])
|
||||||
|
|
||||||
|
let manager = StaticGroupManager(config: staticConfig,
|
||||||
|
rlnInstance: rlnInstance)
|
||||||
|
|
||||||
|
expect(ValueError):
|
||||||
|
await manager.startGroupSync()
|
||||||
|
|
||||||
|
asyncTest "register: should guard against uninitialized state":
|
||||||
|
let staticConfig = StaticGroupManagerConfig(groupSize: 0,
|
||||||
|
membershipIndex: 0,
|
||||||
|
groupKeys: @[])
|
||||||
|
|
||||||
|
let manager = StaticGroupManager(config: staticConfig,
|
||||||
|
rlnInstance: rlnInstance)
|
||||||
|
|
||||||
|
let dummyCommitment = default(IDCommitment)
|
||||||
|
|
||||||
|
expect(ValueError):
|
||||||
|
await manager.register(dummyCommitment)
|
||||||
|
|
||||||
|
asyncTest "register: should register successfully":
|
||||||
|
await manager.init()
|
||||||
|
await manager.startGroupSync()
|
||||||
|
|
||||||
|
let idCommitment = generateCredentials(manager.rlnInstance).idCommitment
|
||||||
|
let merkleRootBeforeRes = manager.rlnInstance.getMerkleRoot()
|
||||||
|
require:
|
||||||
|
merkleRootBeforeRes.isOk()
|
||||||
|
let merkleRootBefore = merkleRootBeforeRes.get()
|
||||||
|
await manager.register(idCommitment)
|
||||||
|
let merkleRootAfterRes = manager.rlnInstance.getMerkleRoot()
|
||||||
|
require:
|
||||||
|
merkleRootAfterRes.isOk()
|
||||||
|
let merkleRootAfter = merkleRootAfterRes.get()
|
||||||
|
check:
|
||||||
|
merkleRootAfter.inHex() != merkleRootBefore.inHex()
|
||||||
|
manager.latestIndex == 10
|
||||||
|
|
||||||
|
asyncTest "register: callback is called":
|
||||||
|
var callbackCalled = false
|
||||||
|
let idCommitment = generateCredentials(manager.rlnInstance).idCommitment
|
||||||
|
|
||||||
|
let fut = newFuture[void]()
|
||||||
|
|
||||||
|
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||||
|
require:
|
||||||
|
registrations.len == 1
|
||||||
|
registrations[0].idCommitment == idCommitment
|
||||||
|
registrations[0].index == 10
|
||||||
|
callbackCalled = true
|
||||||
|
fut.complete()
|
||||||
|
|
||||||
|
manager.onRegister(callback)
|
||||||
|
await manager.init()
|
||||||
|
await manager.startGroupSync()
|
||||||
|
|
||||||
|
await manager.register(idCommitment)
|
||||||
|
|
||||||
|
await fut
|
||||||
|
check:
|
||||||
|
callbackCalled
|
||||||
|
|
||||||
|
asyncTest "withdraw: should guard against uninitialized state":
|
||||||
|
let idSecretHash = credentials[0].idSecretHash
|
||||||
|
|
||||||
|
expect(ValueError):
|
||||||
|
await manager.withdraw(idSecretHash)
|
||||||
|
|
||||||
|
asyncTest "withdraw: should withdraw successfully":
|
||||||
|
await manager.init()
|
||||||
|
await manager.startGroupSync()
|
||||||
|
|
||||||
|
let idSecretHash = credentials[0].idSecretHash
|
||||||
|
let merkleRootBeforeRes = manager.rlnInstance.getMerkleRoot()
|
||||||
|
require:
|
||||||
|
merkleRootBeforeRes.isOk()
|
||||||
|
let merkleRootBefore = merkleRootBeforeRes.get()
|
||||||
|
await manager.withdraw(idSecretHash)
|
||||||
|
let merkleRootAfterRes = manager.rlnInstance.getMerkleRoot()
|
||||||
|
require:
|
||||||
|
merkleRootAfterRes.isOk()
|
||||||
|
let merkleRootAfter = merkleRootAfterRes.get()
|
||||||
|
check:
|
||||||
|
merkleRootAfter.inHex() != merkleRootBefore.inHex()
|
||||||
|
|
||||||
|
asyncTest "withdraw: callback is called":
|
||||||
|
var callbackCalled = false
|
||||||
|
let idSecretHash = credentials[0].idSecretHash
|
||||||
|
let idCommitment = credentials[0].idCommitment
|
||||||
|
let fut = newFuture[void]()
|
||||||
|
|
||||||
|
proc callback(withdrawals: seq[Membership]): Future[void] {.async.} =
|
||||||
|
require:
|
||||||
|
withdrawals.len == 1
|
||||||
|
withdrawals[0].idCommitment == idCommitment
|
||||||
|
withdrawals[0].index == 0
|
||||||
|
callbackCalled = true
|
||||||
|
fut.complete()
|
||||||
|
|
||||||
|
manager.onWithdraw(callback)
|
||||||
|
await manager.init()
|
||||||
|
await manager.startGroupSync()
|
||||||
|
|
||||||
|
await manager.withdraw(idSecretHash)
|
||||||
|
|
||||||
|
await fut
|
||||||
|
check:
|
||||||
|
callbackCalled
|
|
@ -3,6 +3,7 @@ import
|
||||||
./waku_rln_relay/constants,
|
./waku_rln_relay/constants,
|
||||||
./waku_rln_relay/protocol_types,
|
./waku_rln_relay/protocol_types,
|
||||||
./waku_rln_relay/protocol_metrics,
|
./waku_rln_relay/protocol_metrics,
|
||||||
|
./waku_rln_relay/conversion_utils,
|
||||||
./waku_rln_relay/utils,
|
./waku_rln_relay/utils,
|
||||||
./waku_rln_relay/contract
|
./waku_rln_relay/contract
|
||||||
|
|
||||||
|
@ -11,5 +12,6 @@ export
|
||||||
constants,
|
constants,
|
||||||
protocol_types,
|
protocol_types,
|
||||||
protocol_metrics,
|
protocol_metrics,
|
||||||
|
conversion_utils,
|
||||||
utils,
|
utils,
|
||||||
contract
|
contract
|
||||||
|
|
|
@ -0,0 +1,161 @@
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
else:
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import
|
||||||
|
std/[sequtils],
|
||||||
|
web3,
|
||||||
|
chronicles,
|
||||||
|
stew/[arrayops, results, endians2],
|
||||||
|
stint
|
||||||
|
import
|
||||||
|
./protocol_types
|
||||||
|
import
|
||||||
|
../../utils/keyfile
|
||||||
|
|
||||||
|
export
|
||||||
|
web3,
|
||||||
|
chronicles,
|
||||||
|
stint
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "waku rln_relay conversion_utils"
|
||||||
|
|
||||||
|
proc toUInt256*(idCommitment: IDCommitment): UInt256 =
|
||||||
|
let pk = UInt256.fromBytesLE(idCommitment)
|
||||||
|
return pk
|
||||||
|
|
||||||
|
proc toIDCommitment*(idCommitmentUint: UInt256): IDCommitment =
|
||||||
|
let pk = IDCommitment(idCommitmentUint.toBytesLE())
|
||||||
|
return pk
|
||||||
|
|
||||||
|
proc inHex*(value: array[32, byte]): string =
|
||||||
|
var valueHex = (UInt256.fromBytesLE(value)).toHex()
|
||||||
|
# We pad leading zeroes
|
||||||
|
while valueHex.len < value.len * 2:
|
||||||
|
valueHex = "0" & valueHex
|
||||||
|
return valueHex
|
||||||
|
|
||||||
|
proc toMembershipIndex*(v: UInt256): MembershipIndex =
|
||||||
|
let membershipIndex: MembershipIndex = cast[MembershipIndex](v)
|
||||||
|
return membershipIndex
|
||||||
|
|
||||||
|
proc appendLength*(input: openArray[byte]): seq[byte] =
|
||||||
|
## returns length prefixed version of the input
|
||||||
|
## with the following format [len<8>|input<var>]
|
||||||
|
## len: 8-byte value that represents the number of bytes in the `input`
|
||||||
|
## len is serialized in little-endian
|
||||||
|
## input: the supplied `input`
|
||||||
|
let
|
||||||
|
# the length should be serialized in little-endian
|
||||||
|
len = toBytes(uint64(input.len), Endianness.littleEndian)
|
||||||
|
output = concat(@len, @input)
|
||||||
|
return output
|
||||||
|
|
||||||
|
proc serialize*(idSecretHash: IdentitySecretHash, memIndex: MembershipIndex, epoch: Epoch,
|
||||||
|
msg: openArray[byte]): seq[byte] =
|
||||||
|
## a private proc to convert RateLimitProof and the data to a byte seq
|
||||||
|
## this conversion is used in the proofGen proc
|
||||||
|
## the serialization is done as instructed in https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L146
|
||||||
|
## [ id_key<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
|
||||||
|
let memIndexBytes = toBytes(uint64(memIndex), Endianness.littleEndian)
|
||||||
|
let lenPrefMsg = appendLength(msg)
|
||||||
|
let output = concat(@idSecretHash, @memIndexBytes, @epoch, lenPrefMsg)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
proc serialize*(proof: RateLimitProof, data: openArray[byte]): seq[byte] =
|
||||||
|
## a private proc to convert RateLimitProof and data to a byte seq
|
||||||
|
## this conversion is used in the proof verification proc
|
||||||
|
## [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
|
||||||
|
let lenPrefMsg = appendLength(@data)
|
||||||
|
var proofBytes = concat(@(proof.proof),
|
||||||
|
@(proof.merkleRoot),
|
||||||
|
@(proof.epoch),
|
||||||
|
@(proof.shareX),
|
||||||
|
@(proof.shareY),
|
||||||
|
@(proof.nullifier),
|
||||||
|
@(proof.rlnIdentifier),
|
||||||
|
lenPrefMsg)
|
||||||
|
|
||||||
|
return proofBytes
|
||||||
|
|
||||||
|
# Serializes a sequence of MerkleNodes
|
||||||
|
proc serialize*(roots: seq[MerkleNode]): seq[byte] =
|
||||||
|
var rootsBytes: seq[byte] = @[]
|
||||||
|
for root in roots:
|
||||||
|
rootsBytes = concat(rootsBytes, @root)
|
||||||
|
return rootsBytes
|
||||||
|
|
||||||
|
proc serializeIdCommitments*(idComms: seq[IDCommitment]): seq[byte] =
|
||||||
|
## serializes a seq of IDCommitments to a byte seq
|
||||||
|
## the serialization is based on https://github.com/status-im/nwaku/blob/37bd29fbc37ce5cf636734e7dd410b1ed27b88c8/waku/v2/protocol/waku_rln_relay/rln.nim#L142
|
||||||
|
## the order of serialization is |id_commitment_len<8>|id_commitment<var>|
|
||||||
|
var idCommsBytes = newSeq[byte]()
|
||||||
|
|
||||||
|
# serialize the idComms, with its length prefixed
|
||||||
|
let len = toBytes(uint64(idComms.len), Endianness.littleEndian)
|
||||||
|
idCommsBytes.add(len)
|
||||||
|
|
||||||
|
for idComm in idComms:
|
||||||
|
idCommsBytes = concat(idCommsBytes, @idComm)
|
||||||
|
|
||||||
|
return idCommsBytes
|
||||||
|
|
||||||
|
# Converts a sequence of tuples containing 4 string (i.e. identity trapdoor, nullifier, secret hash and commitment) to an IndentityCredential
|
||||||
|
proc toIdentityCredentials*(groupKeys: seq[(string, string, string, string)]): RlnRelayResult[seq[
|
||||||
|
IdentityCredential]] =
|
||||||
|
## groupKeys is sequence of membership key tuples in the form of (identity key, identity commitment) all in the hexadecimal format
|
||||||
|
## the toIdentityCredentials proc populates a sequence of IdentityCredentials using the supplied groupKeys
|
||||||
|
## Returns an error if the conversion fails
|
||||||
|
|
||||||
|
var groupIdCredentials = newSeq[IdentityCredential]()
|
||||||
|
|
||||||
|
for i in 0..groupKeys.len-1:
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
idTrapdoor = hexToUint[IdentityTrapdoor.len*8](groupKeys[i][0]).toBytesLE()
|
||||||
|
idNullifier = hexToUint[IdentityNullifier.len*8](groupKeys[i][1]).toBytesLE()
|
||||||
|
idSecretHash = hexToUint[IdentitySecretHash.len*8](groupKeys[i][2]).toBytesLE()
|
||||||
|
idCommitment = hexToUint[IDCommitment.len*8](groupKeys[i][3]).toBytesLE()
|
||||||
|
groupIdCredentials.add(IdentityCredential(idTrapdoor: idTrapdoor, idNullifier: idNullifier, idSecretHash: idSecretHash,
|
||||||
|
idCommitment: idCommitment))
|
||||||
|
except ValueError as err:
|
||||||
|
warn "could not convert the group key to bytes", err = err.msg
|
||||||
|
return err("could not convert the group key to bytes: " & err.msg)
|
||||||
|
return ok(groupIdCredentials)
|
||||||
|
|
||||||
|
# Converts a sequence of tuples containing 2 string (i.e. identity secret hash and commitment) to an IndentityCredential
|
||||||
|
proc toIdentityCredentials*(groupKeys: seq[(string, string)]): RlnRelayResult[seq[
|
||||||
|
IdentityCredential]] =
|
||||||
|
## groupKeys is sequence of membership key tuples in the form of (identity key, identity commitment) all in the hexadecimal format
|
||||||
|
## the toIdentityCredentials proc populates a sequence of IdentityCredentials using the supplied groupKeys
|
||||||
|
## Returns an error if the conversion fails
|
||||||
|
|
||||||
|
var groupIdCredentials = newSeq[IdentityCredential]()
|
||||||
|
|
||||||
|
for i in 0..groupKeys.len-1:
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
idSecretHash = hexToUint[IdentitySecretHash.len*8](groupKeys[i][0]).toBytesLE()
|
||||||
|
idCommitment = hexToUint[IDCommitment.len*8](groupKeys[i][1]).toBytesLE()
|
||||||
|
groupIdCredentials.add(IdentityCredential(idSecretHash: idSecretHash,
|
||||||
|
idCommitment: idCommitment))
|
||||||
|
except ValueError as err:
|
||||||
|
warn "could not convert the group key to bytes", err = err.msg
|
||||||
|
return err("could not convert the group key to bytes: " & err.msg)
|
||||||
|
return ok(groupIdCredentials)
|
||||||
|
|
||||||
|
proc toEpoch*(t: uint64): Epoch =
|
||||||
|
## converts `t` to `Epoch` in little-endian order
|
||||||
|
let bytes = toBytes(t, Endianness.littleEndian)
|
||||||
|
debug "bytes", bytes = bytes
|
||||||
|
var epoch: Epoch
|
||||||
|
discard epoch.copyFrom(bytes)
|
||||||
|
return epoch
|
||||||
|
|
||||||
|
proc fromEpoch*(epoch: Epoch): uint64 =
|
||||||
|
## decodes bytes of `epoch` (in little-endian) to uint64
|
||||||
|
let t = fromBytesLE(uint64, array[32, byte](epoch))
|
||||||
|
return t
|
|
@ -0,0 +1,6 @@
|
||||||
|
import
|
||||||
|
group_manager/[static, on_chain]
|
||||||
|
|
||||||
|
export
|
||||||
|
static,
|
||||||
|
on_chain
|
|
@ -0,0 +1,84 @@
|
||||||
|
import
|
||||||
|
../protocol_types
|
||||||
|
import
|
||||||
|
options,
|
||||||
|
chronos,
|
||||||
|
stew/results
|
||||||
|
|
||||||
|
export
|
||||||
|
options,
|
||||||
|
chronos,
|
||||||
|
results,
|
||||||
|
protocol_types
|
||||||
|
|
||||||
|
# This module contains the GroupManager interface
|
||||||
|
# The GroupManager is responsible for managing the group state
|
||||||
|
# It should be used to register new members, and withdraw existing members
|
||||||
|
# It should also be used to sync the group state with the rest of the group members
|
||||||
|
|
||||||
|
type Membership* = object
|
||||||
|
idCommitment*: IDCommitment
|
||||||
|
index*: MembershipIndex
|
||||||
|
|
||||||
|
type OnRegisterCallback* = proc (registrations: seq[Membership]): Future[void] {.gcsafe.}
|
||||||
|
type OnWithdrawCallback* = proc (withdrawals: seq[Membership]): Future[void] {.gcsafe.}
|
||||||
|
|
||||||
|
type GroupManagerResult*[T] = Result[T, string]
|
||||||
|
|
||||||
|
type
|
||||||
|
GroupManager*[Config] = ref object of RootObj
|
||||||
|
idCredentials*: Option[IdentityCredential]
|
||||||
|
registerCb*: Option[OnRegisterCallback]
|
||||||
|
withdrawCb*: Option[OnWithdrawCallback]
|
||||||
|
config*: Config
|
||||||
|
rlnInstance*: ptr RLN
|
||||||
|
initialized*: bool
|
||||||
|
latestIndex*: MembershipIndex
|
||||||
|
|
||||||
|
# This proc is used to initialize the group manager
|
||||||
|
# Any initialization logic should be implemented here
|
||||||
|
method init*(g: GroupManager): Future[void] {.base,gcsafe.} =
|
||||||
|
return err("init proc for " & $g.kind & " is not implemented yet")
|
||||||
|
|
||||||
|
# This proc is used to start the group sync process
|
||||||
|
# It should be used to sync the group state with the rest of the group members
|
||||||
|
method startGroupSync*(g: GroupManager): Future[void] {.base,gcsafe.} =
|
||||||
|
return err("startGroupSync proc for " & $g.kind & " is not implemented yet")
|
||||||
|
|
||||||
|
# This proc is used to register a new identity commitment into the merkle tree
|
||||||
|
# The user may or may not have the identity secret to this commitment
|
||||||
|
# It should be used when detecting new members in the group, and syncing the group state
|
||||||
|
method register*(g: GroupManager, idCommitment: IDCommitment): Future[void] {.base,gcsafe.} =
|
||||||
|
return err("register proc for " & $g.kind & " is not implemented yet")
|
||||||
|
|
||||||
|
# This proc is used to register a new identity commitment into the merkle tree
|
||||||
|
# The user should have the identity secret to this commitment
|
||||||
|
# It should be used when the user wants to join the group
|
||||||
|
method register*(g: GroupManager, credentials: IdentityCredential): Future[void] {.base,gcsafe.} =
|
||||||
|
return err("register proc for " & $g.kind & " is not implemented yet")
|
||||||
|
|
||||||
|
# This proc is used to register a batch of new identity commitments into the merkle tree
|
||||||
|
# The user may or may not have the identity secret to these commitments
|
||||||
|
# It should be used when detecting a batch of new members in the group, and syncing the group state
|
||||||
|
method registerBatch*(g: GroupManager, idCommitments: seq[IDCommitment]): Future[void] {.base,gcsafe.} =
|
||||||
|
return err("registerBatch proc for " & $g.kind & " is not implemented yet")
|
||||||
|
|
||||||
|
# This proc is used to set a callback that will be called when a new identity commitment is registered
|
||||||
|
# The callback may be called multiple times, and should be used to for any post processing
|
||||||
|
method onRegister*(g: GroupManager, cb: OnRegisterCallback) {.base,gcsafe.} =
|
||||||
|
g.registerCb = some(cb)
|
||||||
|
|
||||||
|
# This proc is used to withdraw/remove an identity commitment from the merkle tree
|
||||||
|
# The user should have the identity secret hash to this commitment, by either deriving it, or owning it
|
||||||
|
method withdraw*(g: GroupManager, identitySecretHash: IdentitySecretHash): Future[void] {.base,gcsafe.} =
|
||||||
|
return err("withdraw proc for " & $g.kind & " is not implemented yet")
|
||||||
|
|
||||||
|
# This proc is used to withdraw/remove a batch of identity commitments from the merkle tree
|
||||||
|
# The user should have the identity secret hash to these commitments, by either deriving them, or owning them
|
||||||
|
method withdrawBatch*(g: GroupManager, identitySecretHashes: seq[IdentitySecretHash]): Future[void] {.base,gcsafe.} =
|
||||||
|
return err("withdrawBatch proc for " & $g.kind & " is not implemented yet")
|
||||||
|
|
||||||
|
# This proc is used to set a callback that will be called when an identity commitment is withdrawn
|
||||||
|
# The callback may be called multiple times, and should be used to for any post processing
|
||||||
|
method onWithdraw*(g: GroupManager, cb: OnWithdrawCallback) {.base,gcsafe.} =
|
||||||
|
g.withdrawCb = some(cb)
|
|
@ -0,0 +1,3 @@
|
||||||
|
import on_chain/group_manager
|
||||||
|
|
||||||
|
export group_manager
|
|
@ -0,0 +1,328 @@
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
else:
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import
|
||||||
|
web3,
|
||||||
|
web3/ethtypes,
|
||||||
|
eth/keys as keys,
|
||||||
|
chronicles,
|
||||||
|
stint,
|
||||||
|
json,
|
||||||
|
std/tables,
|
||||||
|
stew/[byteutils, arrayops],
|
||||||
|
sequtils
|
||||||
|
import
|
||||||
|
../../rln,
|
||||||
|
../../conversion_utils,
|
||||||
|
../group_manager_base
|
||||||
|
|
||||||
|
from strutils import parseHexInt
|
||||||
|
|
||||||
|
export group_manager_base
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "waku rln_relay onchain_group_manager"
|
||||||
|
|
||||||
|
# membership contract interface
|
||||||
|
contract(RlnContract):
|
||||||
|
proc register(pubkey: Uint256) {.payable.} # external payable
|
||||||
|
proc MemberRegistered(pubkey: Uint256, index: Uint256) {.event.}
|
||||||
|
proc MEMBERSHIP_DEPOSIT(): Uint256
|
||||||
|
# TODO the following are to be supported
|
||||||
|
# proc registerBatch(pubkeys: seq[Uint256]) # external payable
|
||||||
|
# proc withdraw(secret: Uint256, pubkeyIndex: Uint256, receiver: Address)
|
||||||
|
# proc withdrawBatch( secrets: seq[Uint256], pubkeyIndex: seq[Uint256], receiver: seq[Address])
|
||||||
|
|
||||||
|
type
|
||||||
|
RlnContractWithSender = Sender[RlnContract]
|
||||||
|
OnchainGroupManagerConfig* = object
|
||||||
|
ethClientUrl*: string
|
||||||
|
ethPrivateKey*: Option[string]
|
||||||
|
ethContractAddress*: string
|
||||||
|
ethRpc*: Option[Web3]
|
||||||
|
rlnContract*: Option[RlnContractWithSender]
|
||||||
|
membershipFee*: Option[Uint256]
|
||||||
|
membershipIndex*: Option[MembershipIndex]
|
||||||
|
latestProcessedBlock*: Option[BlockNumber]
|
||||||
|
|
||||||
|
OnchainGroupManager* = ref object of GroupManager[OnchainGroupManagerConfig]
|
||||||
|
|
||||||
|
template initializedGuard*(g: OnchainGroupManager): untyped =
|
||||||
|
if not g.initialized:
|
||||||
|
raise newException(ValueError, "OnchainGroupManager is not initialized")
|
||||||
|
|
||||||
|
proc register*(g: OnchainGroupManager, idCommitment: IDCommitment): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
let memberInserted = g.rlnInstance.insertMember(idCommitment)
|
||||||
|
if not memberInserted:
|
||||||
|
raise newException(ValueError,"member insertion failed")
|
||||||
|
|
||||||
|
if g.registerCb.isSome():
|
||||||
|
await g.registerCb.get()(@[Membership(idCommitment: idCommitment, index: g.latestIndex)])
|
||||||
|
|
||||||
|
g.latestIndex += 1
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
proc registerBatch*(g: OnchainGroupManager, idCommitments: seq[IDCommitment]): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex, idCommitments)
|
||||||
|
if not membersInserted:
|
||||||
|
raise newException(ValueError, "Failed to insert members into the merkle tree")
|
||||||
|
|
||||||
|
if g.registerCb.isSome():
|
||||||
|
var membersSeq = newSeq[Membership]()
|
||||||
|
for i in 0 ..< idCommitments.len():
|
||||||
|
var index = g.latestIndex + MembershipIndex(i)
|
||||||
|
debug "registering member", idCommitment = idCommitments[i], index = index, latestIndex = g.latestIndex
|
||||||
|
let member = Membership(idCommitment: idCommitments[i], index: index)
|
||||||
|
membersSeq.add(member)
|
||||||
|
await g.registerCb.get()(membersSeq)
|
||||||
|
|
||||||
|
g.latestIndex += MembershipIndex(idCommitments.len())
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
proc register*(g: OnchainGroupManager, identityCredentials: IdentityCredential): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
let ethRpc = g.config.ethRpc.get()
|
||||||
|
let rlnContract = g.config.rlnContract.get()
|
||||||
|
let membershipFee = g.config.membershipFee.get()
|
||||||
|
|
||||||
|
let gasPrice = int(await ethRpc.provider.eth_gasPrice()) * 2
|
||||||
|
let idCommitment = identityCredentials.idCommitment.toUInt256()
|
||||||
|
|
||||||
|
var txHash: TxHash
|
||||||
|
try: # send the registration transaction and check if any error occurs
|
||||||
|
txHash = await rlnContract.register(idCommitment).send(value = membershipFee,
|
||||||
|
gasPrice = gasPrice)
|
||||||
|
except ValueError as e:
|
||||||
|
raise newException(ValueError, "could not register the member: " & e.msg)
|
||||||
|
|
||||||
|
let tsReceipt = await ethRpc.getMinedTransactionReceipt(txHash)
|
||||||
|
|
||||||
|
# the receipt topic holds the hash of signature of the raised events
|
||||||
|
# TODO: make this robust. search within the event list for the event
|
||||||
|
let firstTopic = tsReceipt.logs[0].topics[0]
|
||||||
|
# the hash of the signature of MemberRegistered(uint256,uint256) event is equal to the following hex value
|
||||||
|
if firstTopic[0..65] != "0x5a92c2530f207992057b9c3e544108ffce3beda4a63719f316967c49bf6159d2":
|
||||||
|
raise newException(ValueError, "unexpected event signature")
|
||||||
|
|
||||||
|
# the arguments of the raised event i.e., MemberRegistered are encoded inside the data field
|
||||||
|
# data = pk encoded as 256 bits || index encoded as 256 bits
|
||||||
|
let arguments = tsReceipt.logs[0].data
|
||||||
|
debug "tx log data", arguments=arguments
|
||||||
|
let
|
||||||
|
argumentsBytes = arguments.hexToSeqByte()
|
||||||
|
# In TX log data, uints are encoded in big endian
|
||||||
|
eventIndex = UInt256.fromBytesBE(argumentsBytes[32..^1])
|
||||||
|
|
||||||
|
g.config.membershipIndex = some(eventIndex.toMembershipIndex())
|
||||||
|
|
||||||
|
# don't handle member insertion into the tree here, it will be handled by the event listener
|
||||||
|
return
|
||||||
|
|
||||||
|
proc withdraw*(g: OnchainGroupManager, idCommitment: IDCommitment): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
# TODO: after slashing is enabled on the contract
|
||||||
|
|
||||||
|
proc withdrawBatch*(g: OnchainGroupManager, idCommitments: seq[IDCommitment]): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
# TODO: after slashing is enabled on the contract
|
||||||
|
|
||||||
|
proc parseEvent*(event: type MemberRegistered,
|
||||||
|
log: JsonNode): GroupManagerResult[Membership] =
|
||||||
|
## parses the `data` parameter of the `MemberRegistered` event `log`
|
||||||
|
## returns an error if it cannot parse the `data` parameter
|
||||||
|
var idComm: UInt256
|
||||||
|
var index: UInt256
|
||||||
|
var data: string
|
||||||
|
# Remove the 0x prefix
|
||||||
|
try:
|
||||||
|
data = strip0xPrefix(log["data"].getStr())
|
||||||
|
except CatchableError:
|
||||||
|
return err("failed to parse the data field of the MemberRegistered event: " & getCurrentExceptionMsg())
|
||||||
|
var offset = 0
|
||||||
|
try:
|
||||||
|
# Parse the idComm
|
||||||
|
offset += decode(data, offset, idComm)
|
||||||
|
# Parse the index
|
||||||
|
offset += decode(data, offset, index)
|
||||||
|
return ok(Membership(idCommitment: idComm.toIDCommitment(), index: index.toMembershipIndex()))
|
||||||
|
except:
|
||||||
|
return err("failed to parse the data field of the MemberRegistered event")
|
||||||
|
|
||||||
|
type BlockTable* = OrderedTable[BlockNumber, seq[Membership]]
|
||||||
|
|
||||||
|
proc getEvents*(g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: Option[BlockNumber] = none(BlockNumber)): Future[BlockTable] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
let ethRpc = g.config.ethRpc.get()
|
||||||
|
let rlnContract = g.config.rlnContract.get()
|
||||||
|
|
||||||
|
var normalizedToBlock: BlockNumber
|
||||||
|
if toBlock.isSome():
|
||||||
|
var value = toBlock.get()
|
||||||
|
if value == 0:
|
||||||
|
# set to latest block
|
||||||
|
value = cast[BlockNumber](await ethRpc.provider.eth_blockNumber())
|
||||||
|
normalizedToBlock = value
|
||||||
|
else:
|
||||||
|
normalizedToBlock = fromBlock
|
||||||
|
|
||||||
|
var blockTable = default(BlockTable)
|
||||||
|
let events = await rlnContract.getJsonLogs(MemberRegistered, fromBlock = some(fromBlock.blockId()), toBlock = some(normalizedToBlock.blockId()))
|
||||||
|
if events.len == 0:
|
||||||
|
debug "no events found"
|
||||||
|
return blockTable
|
||||||
|
|
||||||
|
for event in events:
|
||||||
|
let blockNumber = parseHexInt(event["blockNumber"].getStr()).uint
|
||||||
|
let parsedEventRes = parseEvent(MemberRegistered, event)
|
||||||
|
if parsedEventRes.isErr():
|
||||||
|
error "failed to parse the MemberRegistered event", error=parsedEventRes.error()
|
||||||
|
raise newException(ValueError, "failed to parse the MemberRegistered event")
|
||||||
|
let parsedEvent = parsedEventRes.get()
|
||||||
|
|
||||||
|
if blockTable.hasKey(blockNumber):
|
||||||
|
blockTable[blockNumber].add(parsedEvent)
|
||||||
|
else:
|
||||||
|
blockTable[blockNumber] = @[parsedEvent]
|
||||||
|
|
||||||
|
return blockTable
|
||||||
|
|
||||||
|
proc seedBlockTableIntoTree*(g: OnchainGroupManager, blockTable: BlockTable): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
for blockNumber, members in blockTable.pairs():
|
||||||
|
let latestIndex = g.latestIndex
|
||||||
|
let startingIndex = members[0].index
|
||||||
|
try:
|
||||||
|
await g.registerBatch(members.mapIt(it.idCommitment))
|
||||||
|
except:
|
||||||
|
error "failed to insert members into the tree"
|
||||||
|
raise newException(ValueError, "failed to insert members into the tree")
|
||||||
|
debug "new members added to the Merkle tree", commitments=members.mapIt(it.idCommitment.inHex()) , startingIndex=startingIndex
|
||||||
|
let lastIndex = startingIndex + members.len.uint - 1
|
||||||
|
let indexGap = startingIndex - latestIndex
|
||||||
|
if not (toSeq(startingIndex..lastIndex) == members.mapIt(it.index)):
|
||||||
|
raise newException(ValueError, "membership indices are not sequential")
|
||||||
|
if indexGap != 1.uint and lastIndex != latestIndex:
|
||||||
|
warn "membership index gap, may have lost connection", lastIndex, currIndex=latestIndex, indexGap = indexGap
|
||||||
|
g.config.latestProcessedBlock = some(blockNumber)
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
proc getEventsAndSeedIntoTree*(g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: Option[BlockNumber] = none(BlockNumber)): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
let events = await g.getEvents(fromBlock, toBlock)
|
||||||
|
await g.seedBlockTableIntoTree(events)
|
||||||
|
return
|
||||||
|
|
||||||
|
proc getNewHeadCallback*(g: OnchainGroupManager): BlockHeaderHandler =
|
||||||
|
proc newHeadCallback(blockheader: BlockHeader) {.gcsafe.} =
|
||||||
|
let latestBlock = blockheader.number.uint
|
||||||
|
debug "block received", blockNumber = latestBlock
|
||||||
|
# get logs from the last block
|
||||||
|
try:
|
||||||
|
asyncSpawn g.getEventsAndSeedIntoTree(latestBlock)
|
||||||
|
except CatchableError:
|
||||||
|
warn "failed to handle log: ", error=getCurrentExceptionMsg()
|
||||||
|
return newHeadCallback
|
||||||
|
|
||||||
|
proc newHeadErrCallback(error: CatchableError) =
|
||||||
|
warn "failed to get new head", error=error.msg
|
||||||
|
|
||||||
|
proc startListeningToEvents*(g: OnchainGroupManager): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
let ethRpc = g.config.ethRpc.get()
|
||||||
|
let newHeadCallback = g.getNewHeadCallback()
|
||||||
|
try:
|
||||||
|
discard await ethRpc.subscribeForBlockHeaders(newHeadCallback, newHeadErrCallback)
|
||||||
|
except:
|
||||||
|
raise newException(ValueError, "failed to subscribe to block headers: " & getCurrentExceptionMsg())
|
||||||
|
|
||||||
|
proc startOnchainSync*(g: OnchainGroupManager, fromBlock: BlockNumber = BlockNumber(0)): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
try:
|
||||||
|
await g.getEventsAndSeedIntoTree(fromBlock, some(fromBlock))
|
||||||
|
except:
|
||||||
|
raise newException(ValueError, "failed to get the history/reconcile missed blocks: " & getCurrentExceptionMsg())
|
||||||
|
|
||||||
|
# listen to blockheaders and contract events
|
||||||
|
try:
|
||||||
|
await g.startListeningToEvents()
|
||||||
|
except:
|
||||||
|
raise newException(ValueError, "failed to start listening to events: " & getCurrentExceptionMsg())
|
||||||
|
|
||||||
|
proc startGroupSync*(g: OnchainGroupManager): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
# Get archive history
|
||||||
|
try:
|
||||||
|
await startOnchainSync(g)
|
||||||
|
except:
|
||||||
|
raise newException(ValueError, "failed to start onchain sync service: " & getCurrentExceptionMsg())
|
||||||
|
|
||||||
|
if g.config.ethPrivateKey.isSome() and g.idCredentials.isSome():
|
||||||
|
debug "registering commitment on contract"
|
||||||
|
await g.register(g.idCredentials.get())
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
proc onRegister*(g: OnchainGroupManager, cb: OnRegisterCallback) {.gcsafe.} =
|
||||||
|
g.registerCb = some(cb)
|
||||||
|
|
||||||
|
proc onWithdraw*(g: OnchainGroupManager, cb: OnWithdrawCallback) {.gcsafe.} =
|
||||||
|
g.withdrawCb = some(cb)
|
||||||
|
|
||||||
|
proc init*(g: OnchainGroupManager): Future[void] {.async.} =
|
||||||
|
var ethRpc: Web3
|
||||||
|
var contract: RlnContractWithSender
|
||||||
|
# check if the Ethereum client is reachable
|
||||||
|
try:
|
||||||
|
ethRpc = await newWeb3(g.config.ethClientUrl)
|
||||||
|
except:
|
||||||
|
raise newException(ValueError, "could not connect to the Ethereum client")
|
||||||
|
|
||||||
|
let contractAddress = web3.fromHex(web3.Address, g.config.ethContractAddress)
|
||||||
|
contract = ethRpc.contractSender(RlnContract, contractAddress)
|
||||||
|
|
||||||
|
# check if the contract exists by calling a static function
|
||||||
|
var membershipFee: Uint256
|
||||||
|
try:
|
||||||
|
membershipFee = await contract.MEMBERSHIP_DEPOSIT().call()
|
||||||
|
except:
|
||||||
|
raise newException(ValueError, "could not get the membership deposit")
|
||||||
|
|
||||||
|
if g.config.ethPrivateKey.isSome():
|
||||||
|
let pk = string(g.config.ethPrivateKey.get())
|
||||||
|
let pkParseRes = keys.PrivateKey.fromHex(pk)
|
||||||
|
if pkParseRes.isErr():
|
||||||
|
raise newException(ValueError, "could not parse the private key")
|
||||||
|
ethRpc.privateKey = some(pkParseRes.get())
|
||||||
|
|
||||||
|
g.config.ethRpc = some(ethRpc)
|
||||||
|
g.config.rlnContract = some(contract)
|
||||||
|
g.config.membershipFee = some(membershipFee)
|
||||||
|
|
||||||
|
|
||||||
|
ethRpc.ondisconnect = proc() =
|
||||||
|
error "Ethereum client disconnected"
|
||||||
|
let fromBlock = g.config.latestProcessedBlock.get()
|
||||||
|
info "reconnecting with the Ethereum client, and restarting group sync", fromBlock = fromBlock
|
||||||
|
try:
|
||||||
|
asyncSpawn g.startOnchainSync(fromBlock)
|
||||||
|
except:
|
||||||
|
error "failed to restart group sync"
|
||||||
|
|
||||||
|
g.initialized = true
|
|
@ -0,0 +1,3 @@
|
||||||
|
import static/group_manager
|
||||||
|
|
||||||
|
export group_manager
|
|
@ -0,0 +1,110 @@
|
||||||
|
import
|
||||||
|
../group_manager_base,
|
||||||
|
../../rln,
|
||||||
|
std/sequtils
|
||||||
|
|
||||||
|
export
|
||||||
|
group_manager_base
|
||||||
|
|
||||||
|
type
|
||||||
|
StaticGroupManagerConfig* = object
|
||||||
|
groupKeys*: seq[IdentityCredential]
|
||||||
|
groupSize*: uint
|
||||||
|
membershipIndex*: MembershipIndex
|
||||||
|
|
||||||
|
StaticGroupManager* = ref object of GroupManager[StaticGroupManagerConfig]
|
||||||
|
|
||||||
|
template initializedGuard*(g: StaticGroupManager): untyped =
|
||||||
|
if not g.initialized:
|
||||||
|
raise newException(ValueError, "StaticGroupManager is not initialized")
|
||||||
|
|
||||||
|
proc init*(g: StaticGroupManager): Future[void] {.async,gcsafe.} =
|
||||||
|
let
|
||||||
|
groupSize = g.config.groupSize
|
||||||
|
groupKeys = g.config.groupKeys
|
||||||
|
membershipIndex = g.config.membershipIndex
|
||||||
|
|
||||||
|
if membershipIndex < MembershipIndex(0) or membershipIndex >= MembershipIndex(groupSize):
|
||||||
|
raise newException(ValueError, "Invalid membership index. Must be within 0 and " & $(groupSize - 1) & "but was " & $membershipIndex)
|
||||||
|
g.idCredentials = some(groupKeys[membershipIndex])
|
||||||
|
|
||||||
|
# Seed the received commitments into the merkle tree
|
||||||
|
let idCommitments = groupKeys.mapIt(it.idCommitment)
|
||||||
|
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex, idCommitments)
|
||||||
|
if not membersInserted:
|
||||||
|
raise newException(ValueError, "Failed to insert members into the merkle tree")
|
||||||
|
|
||||||
|
g.latestIndex += MembershipIndex(idCommitments.len() - 1)
|
||||||
|
|
||||||
|
g.initialized = true
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
proc startGroupSync*(g: StaticGroupManager): Future[void] =
|
||||||
|
initializedGuard(g)
|
||||||
|
var retFuture = newFuture[void]("StaticGroupManager.sta rtGroupSync")
|
||||||
|
# No-op
|
||||||
|
retFuture.complete()
|
||||||
|
return retFuture
|
||||||
|
|
||||||
|
proc register*(g: StaticGroupManager, idCommitment: IDCommitment): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
let memberInserted = g.rlnInstance.insertMember(idCommitment)
|
||||||
|
if not memberInserted:
|
||||||
|
raise newException(ValueError, "Failed to insert member into the merkle tree")
|
||||||
|
|
||||||
|
g.latestIndex += 1
|
||||||
|
|
||||||
|
if g.registerCb.isSome():
|
||||||
|
await g.registerCb.get()(@[Membership(idCommitment: idCommitment, index: g.latestIndex)])
|
||||||
|
return
|
||||||
|
|
||||||
|
proc registerBatch*(g: StaticGroupManager, idCommitments: seq[IDCommitment]): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
let membersInserted = g.rlnInstance.insertMembers(g.latestIndex + 1, idCommitments)
|
||||||
|
if not membersInserted:
|
||||||
|
raise newException(ValueError, "Failed to insert members into the merkle tree")
|
||||||
|
|
||||||
|
if g.registerCb.isSome():
|
||||||
|
var memberSeq = newSeq[Membership]()
|
||||||
|
for i in 0..<idCommitments.len():
|
||||||
|
memberSeq.add(Membership(idCommitment: idCommitments[i], index: g.latestIndex + MembershipIndex(i)))
|
||||||
|
await g.registerCb.get()(memberSeq)
|
||||||
|
|
||||||
|
g.latestIndex += MembershipIndex(idCommitments.len() - 1)
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
proc withdraw*(g: StaticGroupManager, idSecretHash: IdentitySecretHash): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
let groupKeys = g.config.groupKeys
|
||||||
|
|
||||||
|
for i in 0..<groupKeys.len():
|
||||||
|
if groupKeys[i].idSecretHash == idSecretHash:
|
||||||
|
let idCommitment = groupKeys[i].idCommitment
|
||||||
|
let index = MembershipIndex(i)
|
||||||
|
let memberRemoved = g.rlnInstance.removeMember(index)
|
||||||
|
if not memberRemoved:
|
||||||
|
raise newException(ValueError, "Failed to remove member from the merkle tree")
|
||||||
|
|
||||||
|
if g.withdrawCb.isSome():
|
||||||
|
await g.withdrawCb.get()(@[Membership(idCommitment: idCommitment, index: index)])
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
proc withdrawBatch*(g: StaticGroupManager, idSecretHashes: seq[IdentitySecretHash]): Future[void] {.async.} =
|
||||||
|
initializedGuard(g)
|
||||||
|
|
||||||
|
# call withdraw on each idSecretHash
|
||||||
|
for idSecretHash in idSecretHashes:
|
||||||
|
await g.withdraw(idSecretHash)
|
||||||
|
|
||||||
|
proc onRegister*(g: StaticGroupManager, cb: OnRegisterCallback) {.gcsafe.} =
|
||||||
|
g.registerCb = some(cb)
|
||||||
|
|
||||||
|
proc onWithdraw*(g: StaticGroupManager, cb: OnWithdrawCallback) {.gcsafe.} =
|
||||||
|
g.withdrawCb = some(cb)
|
|
@ -11,7 +11,8 @@ import
|
||||||
./constants,
|
./constants,
|
||||||
../../utils/collector
|
../../utils/collector
|
||||||
|
|
||||||
export metrics
|
export
|
||||||
|
metrics
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "waku rln_relay"
|
topics = "waku rln_relay"
|
||||||
|
|
|
@ -1,152 +1,7 @@
|
||||||
## Nim wrappers for the functions defined in librln
|
|
||||||
import
|
import
|
||||||
./protocol_types
|
rln/rln_interface,
|
||||||
|
rln/wrappers
|
||||||
|
|
||||||
|
export
|
||||||
when (NimMajor, NimMinor) < (1, 4):
|
rln_interface,
|
||||||
{.push raises: [Defect].}
|
wrappers
|
||||||
else:
|
|
||||||
{.push raises: [].}
|
|
||||||
|
|
||||||
|
|
||||||
## Buffer struct is taken from
|
|
||||||
# https://github.com/celo-org/celo-threshold-bls-rs/blob/master/crates/threshold-bls-ffi/src/ffi.rs
|
|
||||||
type Buffer* = object
|
|
||||||
`ptr`*: ptr uint8
|
|
||||||
len*: uint
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
## RLN Zerokit module APIs
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
#------------------------------ Merkle Tree operations -----------------------------------------
|
|
||||||
proc update_next_member*(ctx: ptr RLN, input_buffer: ptr Buffer): bool {.importc: "set_next_leaf".}
|
|
||||||
## adds an element in the merkle tree to the next available position
|
|
||||||
## input_buffer points to the id commitment byte seq
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc delete_member*(ctx: ptr RLN, index: uint): bool {.importc: "delete_leaf".}
|
|
||||||
## index is the position of the id commitment key to be deleted from the tree
|
|
||||||
## the deleted id commitment key is replaced with a zero leaf
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc get_root*(ctx: ptr RLN, output_buffer: ptr Buffer): bool {.importc: "get_root".}
|
|
||||||
## get_root populates the passed pointer output_buffer with the current tree root
|
|
||||||
## the output_buffer holds the Merkle tree root of size 32 bytes
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc get_merkle_proof*(ctx: ptr RLN, index: uint, output_buffer: ptr Buffer): bool {.importc: "get_proof".}
|
|
||||||
## populates the passed pointer output_buffer with the merkle proof for the leaf at position index in the tree stored by ctx
|
|
||||||
## the output_buffer holds a serialized Merkle proof (vector of 32 bytes nodes)
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc set_leaf*(ctx: ptr RLN, index: uint, input_buffer: ptr Buffer): bool {.importc: "set_leaf".}
|
|
||||||
## sets the leaf at position index in the tree stored by ctx to the value passed by input_buffer
|
|
||||||
## the input_buffer holds a serialized leaf of 32 bytes
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc init_tree_with_leaves*(ctx: ptr RLN, input_buffer: ptr Buffer): bool {.importc: "init_tree_with_leaves".}
|
|
||||||
## sets multiple leaves in the tree stored by ctx to the value passed by input_buffer
|
|
||||||
## the input_buffer holds a serialized vector of leaves (32 bytes each)
|
|
||||||
## the input_buffer size is prefixed by a 8 bytes integer indicating the number of leaves
|
|
||||||
## leaves are set one after each other starting from index 0
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc set_leaves_from*(ctx: ptr RLN, index: uint, input_buffer: ptr Buffer): bool {.importc: "set_leaves_from".}
|
|
||||||
## sets multiple leaves in the tree stored by ctx to the value passed by input_buffer
|
|
||||||
## the input_buffer holds a serialized vector of leaves (32 bytes each)
|
|
||||||
## the input_buffer size is prefixed by a 8 bytes integer indicating the number of leaves
|
|
||||||
## leaves are set one after each other starting from index `index`
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc reset_tree*(ctx: ptr RLN, tree_height: uint): bool {.importc: "set_tree".}
|
|
||||||
## resets the tree stored by ctx to the the empty tree (all leaves set to 0) of height tree_height
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
#----------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
#-------------------------------- zkSNARKs operations -----------------------------------------
|
|
||||||
proc key_gen*(ctx: ptr RLN, output_buffer: ptr Buffer): bool {.importc: "extended_key_gen".}
|
|
||||||
## generates identity trapdoor, identity nullifier, identity secret hash and id commitment tuple serialized inside output_buffer as | identity_trapdoor<32> | identity_nullifier<32> | identity_secret_hash<32> | id_commitment<32> |
|
|
||||||
## identity secret hash is the poseidon hash of [identity_trapdoor, identity_nullifier]
|
|
||||||
## id commitment is the poseidon hash of the identity secret hash
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc seeded_key_gen*(ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr Buffer): bool {.importc: "seeded_extended_key_gen".}
|
|
||||||
## generates identity trapdoor, identity nullifier, identity secret hash and id commitment tuple serialized inside output_buffer as | identity_trapdoor<32> | identity_nullifier<32> | identity_secret_hash<32> | id_commitment<32> | using ChaCha20
|
|
||||||
## seeded with an arbitrary long seed serialized in input_buffer
|
|
||||||
## The input seed provided by the user is hashed using Keccak256 before being passed to ChaCha20 as seed.
|
|
||||||
## identity secret hash is the poseidon hash of [identity_trapdoor, identity_nullifier]
|
|
||||||
## id commitment is the poseidon hash of the identity secret hash
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc generate_proof*(ctx: ptr RLN,
|
|
||||||
input_buffer: ptr Buffer,
|
|
||||||
output_buffer: ptr Buffer): bool {.importc: "generate_rln_proof".}
|
|
||||||
## input_buffer has to be serialized as [ id_key<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
|
|
||||||
## output_buffer holds the proof data and should be parsed as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ]
|
|
||||||
## integers wrapped in <> indicate value sizes in bytes
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc verify*(ctx: ptr RLN,
|
|
||||||
proof_buffer: ptr Buffer,
|
|
||||||
proof_is_valid_ptr: ptr bool): bool {.importc: "verify_rln_proof".}
|
|
||||||
## proof_buffer has to be serialized as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
|
|
||||||
## the return bool value indicates the success or failure of the call to the verify function
|
|
||||||
## the verification of the zk proof is available in proof_is_valid_ptr, where a value of true indicates success and false a failure
|
|
||||||
|
|
||||||
proc verify_with_roots*(ctx: ptr RLN,
|
|
||||||
proof_buffer: ptr Buffer,
|
|
||||||
roots_buffer: ptr Buffer,
|
|
||||||
proof_is_valid_ptr: ptr bool): bool {.importc: "verify_with_roots".}
|
|
||||||
## proof_buffer has to be serialized as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
|
|
||||||
## roots_buffer contains the concatenation of 32 bytes long serializations in little endian of root values
|
|
||||||
## the return bool value indicates the success or failure of the call to the verify function
|
|
||||||
## the verification of the zk proof is available in proof_is_valid_ptr, where a value of true indicates success and false a failure
|
|
||||||
|
|
||||||
proc zk_prove*(ctx: ptr RLN,
|
|
||||||
input_buffer: ptr Buffer,
|
|
||||||
output_buffer: ptr Buffer): bool {.importc: "prove".}
|
|
||||||
## Computes the zkSNARK proof and stores it in output_buffer for input values stored in input_buffer
|
|
||||||
## input_buffer is serialized as input_data as [ id_key<32> | path_elements<Vec<32>> | identity_path_index<Vec<1>> | x<32> | epoch<32> | rln_identifier<32> ]
|
|
||||||
## output_buffer holds the proof data and should be parsed as [ proof<128> ]
|
|
||||||
## path_elements and indentity_path elements serialize a merkle proof for id_key and are vectors of elements of 32 and 1 bytes, respectively (not. Vec<>).
|
|
||||||
## x is the x coordinate of the Shamir's secret share for which the proof is computed
|
|
||||||
## epoch is the input epoch (equivalently, the nullifier)
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc zk_verify*(ctx: ptr RLN,
|
|
||||||
proof_buffer: ptr Buffer,
|
|
||||||
proof_is_valid_ptr: ptr bool): bool {.importc: "verify".}
|
|
||||||
## Verifies the zkSNARK proof passed in proof_buffer
|
|
||||||
## input_buffer is serialized as input_data as [ proof<128> ]
|
|
||||||
## the verification of the zk proof is available in proof_is_valid_ptr, where a value of true indicates success and false a failure
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
#----------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
#-------------------------------- Common procedures -------------------------------------------
|
|
||||||
proc new_circuit*(tree_height: uint, input_buffer: ptr Buffer, ctx: ptr (ptr RLN)): bool {.importc: "new".}
|
|
||||||
## creates an instance of rln object as defined by the zerokit RLN lib
|
|
||||||
## tree_height represent the depth of the Merkle tree
|
|
||||||
## input_buffer contains a serialization of the path where the circuit resources can be found (.r1cs, .wasm, .zkey and optionally the verification_key.json)
|
|
||||||
## ctx holds the final created rln object
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc new_circuit_from_data*(tree_height: uint, circom_buffer: ptr Buffer, zkey_buffer: ptr Buffer, vk_buffer: ptr Buffer, ctx: ptr (ptr RLN)): bool {.importc: "new_with_params".}
|
|
||||||
## creates an instance of rln object as defined by the zerokit RLN lib by passing the required inputs as byte arrays
|
|
||||||
## tree_height represent the depth of the Merkle tree
|
|
||||||
## circom_buffer contains the bytes read from the Circom .wasm circuit
|
|
||||||
## zkey_buffer contains the bytes read from the .zkey proving key
|
|
||||||
## vk_buffer contains the bytes read from the verification_key.json
|
|
||||||
## ctx holds the final created rln object
|
|
||||||
## the return bool value indicates the success or failure of the operation
|
|
||||||
|
|
||||||
proc hash*(ctx: ptr RLN,
|
|
||||||
input_buffer: ptr Buffer,
|
|
||||||
output_buffer: ptr Buffer): bool {.importc: "hash".}
|
|
||||||
## it hashes (sha256) the plain text supplied in inputs_buffer and then maps it to a field element
|
|
||||||
## this proc is used to map arbitrary signals to field element for the sake of proof generation
|
|
||||||
## inputs_buffer holds the hash input as a byte seq
|
|
||||||
## the hash output is generated and populated inside output_buffer
|
|
||||||
## the output_buffer contains 32 bytes hash output
|
|
||||||
|
|
|
@ -0,0 +1,161 @@
|
||||||
|
## Nim wrappers for the functions defined in librln
|
||||||
|
import
|
||||||
|
../protocol_types
|
||||||
|
|
||||||
|
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
else:
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
|
||||||
|
## Buffer struct is taken from
|
||||||
|
# https://github.com/celo-org/celo-threshold-bls-rs/blob/master/crates/threshold-bls-ffi/src/ffi.rs
|
||||||
|
type Buffer* = object
|
||||||
|
`ptr`*: ptr uint8
|
||||||
|
len*: uint
|
||||||
|
|
||||||
|
proc toBuffer*(x: openArray[byte]): Buffer =
|
||||||
|
## converts the input to a Buffer object
|
||||||
|
## the Buffer object is used to communicate data with the rln lib
|
||||||
|
var temp = @x
|
||||||
|
let baseAddr = cast[pointer](x)
|
||||||
|
let output = Buffer(`ptr`: cast[ptr uint8](baseAddr), len: uint(temp.len))
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
######################################################################
|
||||||
|
## RLN Zerokit module APIs
|
||||||
|
######################################################################
|
||||||
|
|
||||||
|
#------------------------------ Merkle Tree operations -----------------------------------------
|
||||||
|
proc update_next_member*(ctx: ptr RLN, input_buffer: ptr Buffer): bool {.importc: "set_next_leaf".}
|
||||||
|
## adds an element in the merkle tree to the next available position
|
||||||
|
## input_buffer points to the id commitment byte seq
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc delete_member*(ctx: ptr RLN, index: uint): bool {.importc: "delete_leaf".}
|
||||||
|
## index is the position of the id commitment key to be deleted from the tree
|
||||||
|
## the deleted id commitment key is replaced with a zero leaf
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc get_root*(ctx: ptr RLN, output_buffer: ptr Buffer): bool {.importc: "get_root".}
|
||||||
|
## get_root populates the passed pointer output_buffer with the current tree root
|
||||||
|
## the output_buffer holds the Merkle tree root of size 32 bytes
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc get_merkle_proof*(ctx: ptr RLN, index: uint, output_buffer: ptr Buffer): bool {.importc: "get_proof".}
|
||||||
|
## populates the passed pointer output_buffer with the merkle proof for the leaf at position index in the tree stored by ctx
|
||||||
|
## the output_buffer holds a serialized Merkle proof (vector of 32 bytes nodes)
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc set_leaf*(ctx: ptr RLN, index: uint, input_buffer: ptr Buffer): bool {.importc: "set_leaf".}
|
||||||
|
## sets the leaf at position index in the tree stored by ctx to the value passed by input_buffer
|
||||||
|
## the input_buffer holds a serialized leaf of 32 bytes
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc init_tree_with_leaves*(ctx: ptr RLN, input_buffer: ptr Buffer): bool {.importc: "init_tree_with_leaves".}
|
||||||
|
## sets multiple leaves in the tree stored by ctx to the value passed by input_buffer
|
||||||
|
## the input_buffer holds a serialized vector of leaves (32 bytes each)
|
||||||
|
## the input_buffer size is prefixed by a 8 bytes integer indicating the number of leaves
|
||||||
|
## leaves are set one after each other starting from index 0
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc set_leaves_from*(ctx: ptr RLN, index: uint, input_buffer: ptr Buffer): bool {.importc: "set_leaves_from".}
|
||||||
|
## sets multiple leaves in the tree stored by ctx to the value passed by input_buffer
|
||||||
|
## the input_buffer holds a serialized vector of leaves (32 bytes each)
|
||||||
|
## the input_buffer size is prefixed by a 8 bytes integer indicating the number of leaves
|
||||||
|
## leaves are set one after each other starting from index `index`
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc reset_tree*(ctx: ptr RLN, tree_height: uint): bool {.importc: "set_tree".}
|
||||||
|
## resets the tree stored by ctx to the the empty tree (all leaves set to 0) of height tree_height
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
#----------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#-------------------------------- zkSNARKs operations -----------------------------------------
|
||||||
|
proc key_gen*(ctx: ptr RLN, output_buffer: ptr Buffer): bool {.importc: "extended_key_gen".}
|
||||||
|
## generates identity trapdoor, identity nullifier, identity secret hash and id commitment tuple serialized inside output_buffer as | identity_trapdoor<32> | identity_nullifier<32> | identity_secret_hash<32> | id_commitment<32> |
|
||||||
|
## identity secret hash is the poseidon hash of [identity_trapdoor, identity_nullifier]
|
||||||
|
## id commitment is the poseidon hash of the identity secret hash
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc seeded_key_gen*(ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr Buffer): bool {.importc: "seeded_extended_key_gen".}
|
||||||
|
## generates identity trapdoor, identity nullifier, identity secret hash and id commitment tuple serialized inside output_buffer as | identity_trapdoor<32> | identity_nullifier<32> | identity_secret_hash<32> | id_commitment<32> | using ChaCha20
|
||||||
|
## seeded with an arbitrary long seed serialized in input_buffer
|
||||||
|
## The input seed provided by the user is hashed using Keccak256 before being passed to ChaCha20 as seed.
|
||||||
|
## identity secret hash is the poseidon hash of [identity_trapdoor, identity_nullifier]
|
||||||
|
## id commitment is the poseidon hash of the identity secret hash
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc generate_proof*(ctx: ptr RLN,
|
||||||
|
input_buffer: ptr Buffer,
|
||||||
|
output_buffer: ptr Buffer): bool {.importc: "generate_rln_proof".}
|
||||||
|
## input_buffer has to be serialized as [ id_key<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
|
||||||
|
## output_buffer holds the proof data and should be parsed as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ]
|
||||||
|
## integers wrapped in <> indicate value sizes in bytes
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc verify*(ctx: ptr RLN,
|
||||||
|
proof_buffer: ptr Buffer,
|
||||||
|
proof_is_valid_ptr: ptr bool): bool {.importc: "verify_rln_proof".}
|
||||||
|
## proof_buffer has to be serialized as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
|
||||||
|
## the return bool value indicates the success or failure of the call to the verify function
|
||||||
|
## the verification of the zk proof is available in proof_is_valid_ptr, where a value of true indicates success and false a failure
|
||||||
|
|
||||||
|
proc verify_with_roots*(ctx: ptr RLN,
|
||||||
|
proof_buffer: ptr Buffer,
|
||||||
|
roots_buffer: ptr Buffer,
|
||||||
|
proof_is_valid_ptr: ptr bool): bool {.importc: "verify_with_roots".}
|
||||||
|
## proof_buffer has to be serialized as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
|
||||||
|
## roots_buffer contains the concatenation of 32 bytes long serializations in little endian of root values
|
||||||
|
## the return bool value indicates the success or failure of the call to the verify function
|
||||||
|
## the verification of the zk proof is available in proof_is_valid_ptr, where a value of true indicates success and false a failure
|
||||||
|
|
||||||
|
proc zk_prove*(ctx: ptr RLN,
|
||||||
|
input_buffer: ptr Buffer,
|
||||||
|
output_buffer: ptr Buffer): bool {.importc: "prove".}
|
||||||
|
## Computes the zkSNARK proof and stores it in output_buffer for input values stored in input_buffer
|
||||||
|
## input_buffer is serialized as input_data as [ id_key<32> | path_elements<Vec<32>> | identity_path_index<Vec<1>> | x<32> | epoch<32> | rln_identifier<32> ]
|
||||||
|
## output_buffer holds the proof data and should be parsed as [ proof<128> ]
|
||||||
|
## path_elements and indentity_path elements serialize a merkle proof for id_key and are vectors of elements of 32 and 1 bytes, respectively (not. Vec<>).
|
||||||
|
## x is the x coordinate of the Shamir's secret share for which the proof is computed
|
||||||
|
## epoch is the input epoch (equivalently, the nullifier)
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc zk_verify*(ctx: ptr RLN,
|
||||||
|
proof_buffer: ptr Buffer,
|
||||||
|
proof_is_valid_ptr: ptr bool): bool {.importc: "verify".}
|
||||||
|
## Verifies the zkSNARK proof passed in proof_buffer
|
||||||
|
## input_buffer is serialized as input_data as [ proof<128> ]
|
||||||
|
## the verification of the zk proof is available in proof_is_valid_ptr, where a value of true indicates success and false a failure
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
#----------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#-------------------------------- Common procedures -------------------------------------------
|
||||||
|
proc new_circuit*(tree_height: uint, input_buffer: ptr Buffer, ctx: ptr (ptr RLN)): bool {.importc: "new".}
|
||||||
|
## creates an instance of rln object as defined by the zerokit RLN lib
|
||||||
|
## tree_height represent the depth of the Merkle tree
|
||||||
|
## input_buffer contains a serialization of the path where the circuit resources can be found (.r1cs, .wasm, .zkey and optionally the verification_key.json)
|
||||||
|
## ctx holds the final created rln object
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc new_circuit_from_data*(tree_height: uint, circom_buffer: ptr Buffer, zkey_buffer: ptr Buffer, vk_buffer: ptr Buffer, ctx: ptr (ptr RLN)): bool {.importc: "new_with_params".}
|
||||||
|
## creates an instance of rln object as defined by the zerokit RLN lib by passing the required inputs as byte arrays
|
||||||
|
## tree_height represent the depth of the Merkle tree
|
||||||
|
## circom_buffer contains the bytes read from the Circom .wasm circuit
|
||||||
|
## zkey_buffer contains the bytes read from the .zkey proving key
|
||||||
|
## vk_buffer contains the bytes read from the verification_key.json
|
||||||
|
## ctx holds the final created rln object
|
||||||
|
## the return bool value indicates the success or failure of the operation
|
||||||
|
|
||||||
|
proc hash*(ctx: ptr RLN,
|
||||||
|
input_buffer: ptr Buffer,
|
||||||
|
output_buffer: ptr Buffer): bool {.importc: "hash".}
|
||||||
|
## it hashes (sha256) the plain text supplied in inputs_buffer and then maps it to a field element
|
||||||
|
## this proc is used to map arbitrary signals to field element for the sake of proof generation
|
||||||
|
## inputs_buffer holds the hash input as a byte seq
|
||||||
|
## the hash output is generated and populated inside output_buffer
|
||||||
|
## the output_buffer contains 32 bytes hash output
|
|
@ -0,0 +1,235 @@
|
||||||
|
import
|
||||||
|
chronicles,
|
||||||
|
options,
|
||||||
|
stew/[arrayops, results],
|
||||||
|
nimcrypto/utils
|
||||||
|
|
||||||
|
import
|
||||||
|
./rln_interface,
|
||||||
|
../conversion_utils,
|
||||||
|
../protocol_types,
|
||||||
|
../protocol_metrics,
|
||||||
|
../constants
|
||||||
|
import
|
||||||
|
../../../utils/time
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "waku rln_relay ffi"
|
||||||
|
|
||||||
|
proc membershipKeyGen*(ctxPtr: ptr RLN): RlnRelayResult[IdentityCredential] =
|
||||||
|
## generates a IdentityCredential that can be used for the registration into the rln membership contract
|
||||||
|
## Returns an error if the key generation fails
|
||||||
|
|
||||||
|
# keysBufferPtr will hold the generated identity tuple i.e., trapdoor, nullifier, secret hash and commitment
|
||||||
|
var
|
||||||
|
keysBuffer: Buffer
|
||||||
|
keysBufferPtr = addr(keysBuffer)
|
||||||
|
done = key_gen(ctxPtr, keysBufferPtr)
|
||||||
|
|
||||||
|
# check whether the keys are generated successfully
|
||||||
|
if(done == false):
|
||||||
|
return err("error in key generation")
|
||||||
|
|
||||||
|
var generatedKeys = cast[ptr array[4*32, byte]](keysBufferPtr.`ptr`)[]
|
||||||
|
# the public and secret keys together are 64 bytes
|
||||||
|
if (generatedKeys.len != 4*32):
|
||||||
|
return err("generated keys are of invalid length")
|
||||||
|
|
||||||
|
# TODO define a separate proc to decode the generated keys to the secret and public components
|
||||||
|
var
|
||||||
|
idTrapdoor: array[32, byte]
|
||||||
|
idNullifier: array[32, byte]
|
||||||
|
idSecretHash: array[32, byte]
|
||||||
|
idCommitment: array[32, byte]
|
||||||
|
for (i, x) in idTrapdoor.mpairs: x = generatedKeys[i+0*32]
|
||||||
|
for (i, x) in idNullifier.mpairs: x = generatedKeys[i+1*32]
|
||||||
|
for (i, x) in idSecretHash.mpairs: x = generatedKeys[i+2*32]
|
||||||
|
for (i, x) in idCommitment.mpairs: x = generatedKeys[i+3*32]
|
||||||
|
|
||||||
|
var
|
||||||
|
identityCredential = IdentityCredential(idTrapdoor: idTrapdoor, idNullifier: idNullifier, idSecretHash: idSecretHash, idCommitment: idCommitment)
|
||||||
|
|
||||||
|
return ok(identityCredential)
|
||||||
|
|
||||||
|
proc createRLNInstanceLocal*(d: int = MerkleTreeDepth): RLNResult =
|
||||||
|
## generates an instance of RLN
|
||||||
|
## An RLN instance supports both zkSNARKs logics and Merkle tree data structure and operations
|
||||||
|
## d indicates the depth of Merkle tree
|
||||||
|
## Returns an error if the instance creation fails
|
||||||
|
var
|
||||||
|
rlnInstance: ptr RLN
|
||||||
|
merkleDepth: csize_t = uint(d)
|
||||||
|
resourcesPathBuffer = RlnResourceFolder.toOpenArrayByte(0, RlnResourceFolder.high).toBuffer()
|
||||||
|
|
||||||
|
# create an instance of RLN
|
||||||
|
let res = new_circuit(merkleDepth, addr resourcesPathBuffer, addr rlnInstance)
|
||||||
|
# check whether the circuit parameters are generated successfully
|
||||||
|
if (res == false):
|
||||||
|
debug "error in parameters generation"
|
||||||
|
return err("error in parameters generation")
|
||||||
|
return ok(rlnInstance)
|
||||||
|
|
||||||
|
proc createRLNInstance*(d: int = MerkleTreeDepth): RLNResult =
|
||||||
|
## Wraps the rln instance creation for metrics
|
||||||
|
## Returns an error if the instance creation fails
|
||||||
|
var res: RLNResult
|
||||||
|
waku_rln_instance_creation_duration_seconds.nanosecondTime:
|
||||||
|
res = createRLNInstanceLocal(d)
|
||||||
|
return res
|
||||||
|
|
||||||
|
proc hash*(rlnInstance: ptr RLN, data: openArray[byte]): MerkleNode =
|
||||||
|
## a thin layer on top of the Nim wrapper of the Poseidon hasher
|
||||||
|
debug "hash input", hashhex = data.toHex()
|
||||||
|
var lenPrefData = appendLength(data)
|
||||||
|
var
|
||||||
|
hashInputBuffer = lenPrefData.toBuffer()
|
||||||
|
outputBuffer: Buffer # will holds the hash output
|
||||||
|
|
||||||
|
debug "hash input buffer length", bufflen = hashInputBuffer.len
|
||||||
|
let
|
||||||
|
hashSuccess = hash(rlnInstance, addr hashInputBuffer, addr outputBuffer)
|
||||||
|
|
||||||
|
# check whether the hash call is done successfully
|
||||||
|
if not hashSuccess:
|
||||||
|
debug "error in hash"
|
||||||
|
return default(MerkleNode)
|
||||||
|
|
||||||
|
let
|
||||||
|
output = cast[ptr MerkleNode](outputBuffer.`ptr`)[]
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
proc proofGen*(rlnInstance: ptr RLN, data: openArray[byte],
|
||||||
|
memKeys: IdentityCredential, memIndex: MembershipIndex,
|
||||||
|
epoch: Epoch): RateLimitProofResult =
|
||||||
|
|
||||||
|
# serialize inputs
|
||||||
|
let serializedInputs = serialize(idSecretHash = memKeys.idSecretHash,
|
||||||
|
memIndex = memIndex,
|
||||||
|
epoch = epoch,
|
||||||
|
msg = data)
|
||||||
|
var inputBuffer = toBuffer(serializedInputs)
|
||||||
|
|
||||||
|
debug "input buffer ", inputBuffer= repr(inputBuffer)
|
||||||
|
|
||||||
|
# generate the proof
|
||||||
|
var proof: Buffer
|
||||||
|
let proofIsSuccessful = generate_proof(rlnInstance, addr inputBuffer, addr proof)
|
||||||
|
# check whether the generate_proof call is done successfully
|
||||||
|
if not proofIsSuccessful:
|
||||||
|
return err("could not generate the proof")
|
||||||
|
|
||||||
|
var proofValue = cast[ptr array[320, byte]] (proof.`ptr`)
|
||||||
|
let proofBytes: array[320, byte] = proofValue[]
|
||||||
|
debug "proof content", proofHex = proofValue[].toHex
|
||||||
|
|
||||||
|
## parse the proof as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ]
|
||||||
|
|
||||||
|
let
|
||||||
|
proofOffset = 128
|
||||||
|
rootOffset = proofOffset + 32
|
||||||
|
epochOffset = rootOffset + 32
|
||||||
|
shareXOffset = epochOffset + 32
|
||||||
|
shareYOffset = shareXOffset + 32
|
||||||
|
nullifierOffset = shareYOffset + 32
|
||||||
|
rlnIdentifierOffset = nullifierOffset + 32
|
||||||
|
|
||||||
|
var
|
||||||
|
zkproof: ZKSNARK
|
||||||
|
proofRoot, shareX, shareY: MerkleNode
|
||||||
|
epoch: Epoch
|
||||||
|
nullifier: Nullifier
|
||||||
|
rlnIdentifier: RlnIdentifier
|
||||||
|
|
||||||
|
discard zkproof.copyFrom(proofBytes[0..proofOffset-1])
|
||||||
|
discard proofRoot.copyFrom(proofBytes[proofOffset..rootOffset-1])
|
||||||
|
discard epoch.copyFrom(proofBytes[rootOffset..epochOffset-1])
|
||||||
|
discard shareX.copyFrom(proofBytes[epochOffset..shareXOffset-1])
|
||||||
|
discard shareY.copyFrom(proofBytes[shareXOffset..shareYOffset-1])
|
||||||
|
discard nullifier.copyFrom(proofBytes[shareYOffset..nullifierOffset-1])
|
||||||
|
discard rlnIdentifier.copyFrom(proofBytes[nullifierOffset..rlnIdentifierOffset-1])
|
||||||
|
|
||||||
|
let output = RateLimitProof(proof: zkproof,
|
||||||
|
merkleRoot: proofRoot,
|
||||||
|
epoch: epoch,
|
||||||
|
shareX: shareX,
|
||||||
|
shareY: shareY,
|
||||||
|
nullifier: nullifier,
|
||||||
|
rlnIdentifier: rlnIdentifier)
|
||||||
|
|
||||||
|
return ok(output)
|
||||||
|
|
||||||
|
# validRoots should contain a sequence of roots in the acceptable windows.
|
||||||
|
# As default, it is set to an empty sequence of roots. This implies that the validity check for the proof's root is skipped
|
||||||
|
proc proofVerify*(rlnInstance: ptr RLN,
|
||||||
|
data: openArray[byte],
|
||||||
|
proof: RateLimitProof,
|
||||||
|
validRoots: seq[MerkleNode] = @[]): RlnRelayResult[bool] =
|
||||||
|
## verifies the proof, returns an error if the proof verification fails
|
||||||
|
## returns true if the proof is valid
|
||||||
|
var
|
||||||
|
proofBytes = serialize(proof, data)
|
||||||
|
proofBuffer = proofBytes.toBuffer()
|
||||||
|
validProof: bool
|
||||||
|
rootsBytes = serialize(validRoots)
|
||||||
|
rootsBuffer = rootsBytes.toBuffer()
|
||||||
|
|
||||||
|
trace "serialized proof", proof = proofBytes.toHex()
|
||||||
|
|
||||||
|
let verifyIsSuccessful = verify_with_roots(rlnInstance, addr proofBuffer, addr rootsBuffer, addr validProof)
|
||||||
|
if not verifyIsSuccessful:
|
||||||
|
# something went wrong in verification call
|
||||||
|
warn "could not verify validity of the proof", proof=proof
|
||||||
|
return err("could not verify the proof")
|
||||||
|
|
||||||
|
if not validProof:
|
||||||
|
return ok(false)
|
||||||
|
else:
|
||||||
|
return ok(true)
|
||||||
|
|
||||||
|
proc insertMember*(rlnInstance: ptr RLN, idComm: IDCommitment): bool =
|
||||||
|
## inserts a member to the tree
|
||||||
|
## returns true if the member is inserted successfully
|
||||||
|
## returns false if the member could not be inserted
|
||||||
|
var pkBuffer = toBuffer(idComm)
|
||||||
|
let pkBufferPtr = addr pkBuffer
|
||||||
|
|
||||||
|
# add the member to the tree
|
||||||
|
let memberAdded = update_next_member(rlnInstance, pkBufferPtr)
|
||||||
|
return memberAdded
|
||||||
|
|
||||||
|
|
||||||
|
proc insertMembers*(rlnInstance: ptr RLN,
|
||||||
|
index: MembershipIndex,
|
||||||
|
idComms: seq[IDCommitment]): bool =
|
||||||
|
## Insert multiple members i.e., identity commitments
|
||||||
|
## returns true if the insertion is successful
|
||||||
|
## returns false if any of the insertions fails
|
||||||
|
## Note: This proc is atomic, i.e., if any of the insertions fails, all the previous insertions are rolled back
|
||||||
|
|
||||||
|
# serialize the idComms
|
||||||
|
let idCommsBytes = serializeIdCommitments(idComms)
|
||||||
|
|
||||||
|
var idCommsBuffer = idCommsBytes.toBuffer()
|
||||||
|
let idCommsBufferPtr = addr idCommsBuffer
|
||||||
|
# add the member to the tree
|
||||||
|
let membersAdded = set_leaves_from(rlnInstance, index, idCommsBufferPtr)
|
||||||
|
return membersAdded
|
||||||
|
|
||||||
|
proc removeMember*(rlnInstance: ptr RLN, index: MembershipIndex): bool =
|
||||||
|
let deletion_success = delete_member(rlnInstance, index)
|
||||||
|
return deletion_success
|
||||||
|
|
||||||
|
proc getMerkleRoot*(rlnInstance: ptr RLN): MerkleNodeResult =
|
||||||
|
# read the Merkle Tree root after insertion
|
||||||
|
var
|
||||||
|
root {.noinit.}: Buffer = Buffer()
|
||||||
|
rootPtr = addr(root)
|
||||||
|
getRootSuccessful = getRoot(rlnInstance, rootPtr)
|
||||||
|
if not getRootSuccessful:
|
||||||
|
return err("could not get the root")
|
||||||
|
if not root.len == 32:
|
||||||
|
return err("wrong output size")
|
||||||
|
|
||||||
|
var rootValue = cast[ptr MerkleNode] (root.`ptr`)[]
|
||||||
|
return ok(rootValue)
|
|
@ -14,9 +14,10 @@ import
|
||||||
libp2p/protocols/pubsub/rpc/messages,
|
libp2p/protocols/pubsub/rpc/messages,
|
||||||
libp2p/protocols/pubsub/pubsub,
|
libp2p/protocols/pubsub/pubsub,
|
||||||
stew/results,
|
stew/results,
|
||||||
stew/[byteutils, arrayops, endians2]
|
stew/[byteutils, arrayops]
|
||||||
import
|
import
|
||||||
./rln,
|
./rln,
|
||||||
|
./conversion_utils,
|
||||||
./constants,
|
./constants,
|
||||||
./protocol_types,
|
./protocol_types,
|
||||||
./protocol_metrics
|
./protocol_metrics
|
||||||
|
@ -57,94 +58,23 @@ contract(MembershipContract):
|
||||||
# proc withdraw(secret: Uint256, pubkeyIndex: Uint256, receiver: Address)
|
# proc withdraw(secret: Uint256, pubkeyIndex: Uint256, receiver: Address)
|
||||||
# proc withdrawBatch( secrets: seq[Uint256], pubkeyIndex: seq[Uint256], receiver: seq[Address])
|
# proc withdrawBatch( secrets: seq[Uint256], pubkeyIndex: seq[Uint256], receiver: seq[Address])
|
||||||
|
|
||||||
proc toBuffer*(x: openArray[byte]): Buffer =
|
proc inHex*(value:
|
||||||
## converts the input to a Buffer object
|
IdentityTrapdoor or
|
||||||
## the Buffer object is used to communicate data with the rln lib
|
IdentityNullifier or
|
||||||
var temp = @x
|
IdentitySecretHash or
|
||||||
let baseAddr = cast[pointer](x)
|
IDCommitment or
|
||||||
let output = Buffer(`ptr`: cast[ptr uint8](baseAddr), len: uint(temp.len))
|
MerkleNode or
|
||||||
return output
|
Nullifier or
|
||||||
|
Epoch or
|
||||||
proc createRLNInstanceLocal(d: int = MerkleTreeDepth): RLNResult =
|
RlnIdentifier
|
||||||
## generates an instance of RLN
|
): string =
|
||||||
## An RLN instance supports both zkSNARKs logics and Merkle tree data structure and operations
|
var valueHex = UInt256.fromBytesLE(value)
|
||||||
## d indicates the depth of Merkle tree
|
valueHex = valueHex.toHex()
|
||||||
## Returns an error if the instance creation fails
|
|
||||||
var
|
|
||||||
rlnInstance: ptr RLN
|
|
||||||
merkleDepth: csize_t = uint(d)
|
|
||||||
resourcesPathBuffer = RlnResourceFolder.toOpenArrayByte(0, RlnResourceFolder.high).toBuffer()
|
|
||||||
|
|
||||||
# create an instance of RLN
|
|
||||||
let res = new_circuit(merkleDepth, addr resourcesPathBuffer, addr rlnInstance)
|
|
||||||
# check whether the circuit parameters are generated successfully
|
|
||||||
if (res == false):
|
|
||||||
debug "error in parameters generation"
|
|
||||||
return err("error in parameters generation")
|
|
||||||
return ok(rlnInstance)
|
|
||||||
|
|
||||||
proc membershipKeyGen*(ctxPtr: ptr RLN): RlnRelayResult[IdentityCredential] =
|
|
||||||
## generates a IdentityCredential that can be used for the registration into the rln membership contract
|
|
||||||
## Returns an error if the key generation fails
|
|
||||||
|
|
||||||
# keysBufferPtr will hold the generated identity tuple i.e., trapdoor, nullifier, secret hash and commitment
|
|
||||||
var
|
|
||||||
keysBuffer: Buffer
|
|
||||||
keysBufferPtr = addr(keysBuffer)
|
|
||||||
done = key_gen(ctxPtr, keysBufferPtr)
|
|
||||||
|
|
||||||
# check whether the keys are generated successfully
|
|
||||||
if(done == false):
|
|
||||||
return err("error in key generation")
|
|
||||||
|
|
||||||
var generatedKeys = cast[ptr array[4*32, byte]](keysBufferPtr.`ptr`)[]
|
|
||||||
# the public and secret keys together are 64 bytes
|
|
||||||
if (generatedKeys.len != 4*32):
|
|
||||||
return err("generated keys are of invalid length")
|
|
||||||
|
|
||||||
# TODO define a separate proc to decode the generated keys to the secret and public components
|
|
||||||
var
|
|
||||||
idTrapdoor: array[32, byte]
|
|
||||||
idNullifier: array[32, byte]
|
|
||||||
idSecretHash: array[32, byte]
|
|
||||||
idCommitment: array[32, byte]
|
|
||||||
for (i, x) in idTrapdoor.mpairs: x = generatedKeys[i+0*32]
|
|
||||||
for (i, x) in idNullifier.mpairs: x = generatedKeys[i+1*32]
|
|
||||||
for (i, x) in idSecretHash.mpairs: x = generatedKeys[i+2*32]
|
|
||||||
for (i, x) in idCommitment.mpairs: x = generatedKeys[i+3*32]
|
|
||||||
|
|
||||||
var
|
|
||||||
identityCredential = IdentityCredential(idTrapdoor: idTrapdoor, idNullifier: idNullifier, idSecretHash: idSecretHash, idCommitment: idCommitment)
|
|
||||||
|
|
||||||
return ok(identityCredential)
|
|
||||||
|
|
||||||
proc createRLNInstance*(d: int = MerkleTreeDepth): RLNResult =
|
|
||||||
## Wraps the rln instance creation for metrics
|
|
||||||
## Returns an error if the instance creation fails
|
|
||||||
var res: RLNResult
|
|
||||||
waku_rln_instance_creation_duration_seconds.nanosecondTime:
|
|
||||||
res = createRLNInstanceLocal(d)
|
|
||||||
return res
|
|
||||||
|
|
||||||
proc toUInt256*(idCommitment: IDCommitment): UInt256 =
|
|
||||||
let pk = UInt256.fromBytesLE(idCommitment)
|
|
||||||
return pk
|
|
||||||
|
|
||||||
proc toIDCommitment*(idCommitmentUint: UInt256): IDCommitment =
|
|
||||||
let pk = IDCommitment(idCommitmentUint.toBytesLE())
|
|
||||||
return pk
|
|
||||||
|
|
||||||
proc inHex*(value: IdentityTrapdoor or IdentityNullifier or IdentitySecretHash or IDCommitment or MerkleNode or Nullifier or Epoch or RlnIdentifier): string =
|
|
||||||
var valueHex = (UInt256.fromBytesLE(value)).toHex()
|
|
||||||
# We pad leading zeroes
|
# We pad leading zeroes
|
||||||
while valueHex.len < value.len * 2:
|
while valueHex.len < value.len * 2:
|
||||||
valueHex = "0" & valueHex
|
valueHex = "0" & valueHex
|
||||||
return valueHex
|
return valueHex
|
||||||
|
|
||||||
proc toMembershipIndex(v: UInt256): MembershipIndex =
|
|
||||||
let membershipIndex: MembershipIndex = cast[MembershipIndex](v)
|
|
||||||
return membershipIndex
|
|
||||||
|
|
||||||
proc register*(idComm: IDCommitment, ethAccountAddress: Option[Address], ethAccountPrivKey: keys.PrivateKey, ethClientAddress: string, membershipContractAddress: Address, registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler)): Future[Result[MembershipIndex, string]] {.async.} =
|
proc register*(idComm: IDCommitment, ethAccountAddress: Option[Address], ethAccountPrivKey: keys.PrivateKey, ethClientAddress: string, membershipContractAddress: Address, registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler)): Future[Result[MembershipIndex, string]] {.async.} =
|
||||||
# TODO may need to also get eth Account Private Key as PrivateKey
|
# TODO may need to also get eth Account Private Key as PrivateKey
|
||||||
## registers the idComm into the membership contract whose address is in rlnPeer.membershipContractAddress
|
## registers the idComm into the membership contract whose address is in rlnPeer.membershipContractAddress
|
||||||
|
@ -216,216 +146,6 @@ proc register*(rlnPeer: WakuRLNRelay, registrationHandler: Option[RegistrationHa
|
||||||
return err(regResult.error())
|
return err(regResult.error())
|
||||||
return ok(true)
|
return ok(true)
|
||||||
|
|
||||||
proc appendLength*(input: openArray[byte]): seq[byte] =
|
|
||||||
## returns length prefixed version of the input
|
|
||||||
## with the following format [len<8>|input<var>]
|
|
||||||
## len: 8-byte value that represents the number of bytes in the `input`
|
|
||||||
## len is serialized in little-endian
|
|
||||||
## input: the supplied `input`
|
|
||||||
let
|
|
||||||
# the length should be serialized in little-endian
|
|
||||||
len = toBytes(uint64(input.len), Endianness.littleEndian)
|
|
||||||
output = concat(@len, @input)
|
|
||||||
return output
|
|
||||||
|
|
||||||
proc hash*(rlnInstance: ptr RLN, data: openArray[byte]): MerkleNode =
|
|
||||||
## a thin layer on top of the Nim wrapper of the Poseidon hasher
|
|
||||||
debug "hash input", hashhex = data.toHex()
|
|
||||||
var lenPrefData = appendLength(data)
|
|
||||||
var
|
|
||||||
hashInputBuffer = lenPrefData.toBuffer()
|
|
||||||
outputBuffer: Buffer # will holds the hash output
|
|
||||||
|
|
||||||
debug "hash input buffer length", bufflen = hashInputBuffer.len
|
|
||||||
let
|
|
||||||
hashSuccess = hash(rlnInstance, addr hashInputBuffer, addr outputBuffer)
|
|
||||||
output = cast[ptr MerkleNode](outputBuffer.`ptr`)[]
|
|
||||||
|
|
||||||
return output
|
|
||||||
|
|
||||||
proc serialize(idSecretHash: IdentitySecretHash, memIndex: MembershipIndex, epoch: Epoch,
|
|
||||||
msg: openArray[byte]): seq[byte] =
|
|
||||||
## a private proc to convert RateLimitProof and the data to a byte seq
|
|
||||||
## this conversion is used in the proofGen proc
|
|
||||||
## the serialization is done as instructed in https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L146
|
|
||||||
## [ id_key<32> | id_index<8> | epoch<32> | signal_len<8> | signal<var> ]
|
|
||||||
let memIndexBytes = toBytes(uint64(memIndex), Endianness.littleEndian)
|
|
||||||
let lenPrefMsg = appendLength(msg)
|
|
||||||
let output = concat(@idSecretHash, @memIndexBytes, @epoch, lenPrefMsg)
|
|
||||||
return output
|
|
||||||
|
|
||||||
proc proofGen*(rlnInstance: ptr RLN, data: openArray[byte],
|
|
||||||
memKeys: IdentityCredential, memIndex: MembershipIndex,
|
|
||||||
epoch: Epoch): RateLimitProofResult =
|
|
||||||
|
|
||||||
# serialize inputs
|
|
||||||
let serializedInputs = serialize(idSecretHash = memKeys.idSecretHash,
|
|
||||||
memIndex = memIndex,
|
|
||||||
epoch = epoch,
|
|
||||||
msg = data)
|
|
||||||
var inputBuffer = toBuffer(serializedInputs)
|
|
||||||
|
|
||||||
debug "input buffer ", inputBuffer= repr(inputBuffer)
|
|
||||||
|
|
||||||
# generate the proof
|
|
||||||
var proof: Buffer
|
|
||||||
let proofIsSuccessful = generate_proof(rlnInstance, addr inputBuffer, addr proof)
|
|
||||||
# check whether the generate_proof call is done successfully
|
|
||||||
if not proofIsSuccessful:
|
|
||||||
return err("could not generate the proof")
|
|
||||||
|
|
||||||
var proofValue = cast[ptr array[320, byte]] (proof.`ptr`)
|
|
||||||
let proofBytes: array[320, byte] = proofValue[]
|
|
||||||
debug "proof content", proofHex = proofValue[].toHex
|
|
||||||
|
|
||||||
## parse the proof as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ]
|
|
||||||
|
|
||||||
let
|
|
||||||
proofOffset = 128
|
|
||||||
rootOffset = proofOffset + 32
|
|
||||||
epochOffset = rootOffset + 32
|
|
||||||
shareXOffset = epochOffset + 32
|
|
||||||
shareYOffset = shareXOffset + 32
|
|
||||||
nullifierOffset = shareYOffset + 32
|
|
||||||
rlnIdentifierOffset = nullifierOffset + 32
|
|
||||||
|
|
||||||
var
|
|
||||||
zkproof: ZKSNARK
|
|
||||||
proofRoot, shareX, shareY: MerkleNode
|
|
||||||
epoch: Epoch
|
|
||||||
nullifier: Nullifier
|
|
||||||
rlnIdentifier: RlnIdentifier
|
|
||||||
|
|
||||||
discard zkproof.copyFrom(proofBytes[0..proofOffset-1])
|
|
||||||
discard proofRoot.copyFrom(proofBytes[proofOffset..rootOffset-1])
|
|
||||||
discard epoch.copyFrom(proofBytes[rootOffset..epochOffset-1])
|
|
||||||
discard shareX.copyFrom(proofBytes[epochOffset..shareXOffset-1])
|
|
||||||
discard shareY.copyFrom(proofBytes[shareXOffset..shareYOffset-1])
|
|
||||||
discard nullifier.copyFrom(proofBytes[shareYOffset..nullifierOffset-1])
|
|
||||||
discard rlnIdentifier.copyFrom(proofBytes[nullifierOffset..rlnIdentifierOffset-1])
|
|
||||||
|
|
||||||
let output = RateLimitProof(proof: zkproof,
|
|
||||||
merkleRoot: proofRoot,
|
|
||||||
epoch: epoch,
|
|
||||||
shareX: shareX,
|
|
||||||
shareY: shareY,
|
|
||||||
nullifier: nullifier,
|
|
||||||
rlnIdentifier: rlnIdentifier)
|
|
||||||
|
|
||||||
return ok(output)
|
|
||||||
|
|
||||||
proc serialize(proof: RateLimitProof, data: openArray[byte]): seq[byte] =
|
|
||||||
## a private proc to convert RateLimitProof and data to a byte seq
|
|
||||||
## this conversion is used in the proof verification proc
|
|
||||||
## [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal<var> ]
|
|
||||||
let lenPrefMsg = appendLength(@data)
|
|
||||||
var proofBytes = concat(@(proof.proof),
|
|
||||||
@(proof.merkleRoot),
|
|
||||||
@(proof.epoch),
|
|
||||||
@(proof.shareX),
|
|
||||||
@(proof.shareY),
|
|
||||||
@(proof.nullifier),
|
|
||||||
@(proof.rlnIdentifier),
|
|
||||||
lenPrefMsg)
|
|
||||||
|
|
||||||
return proofBytes
|
|
||||||
|
|
||||||
# Serializes a sequence of MerkleNodes
|
|
||||||
proc serialize(roots: seq[MerkleNode]): seq[byte] =
|
|
||||||
var rootsBytes: seq[byte] = @[]
|
|
||||||
for root in roots:
|
|
||||||
rootsBytes = concat(rootsBytes, @root)
|
|
||||||
return rootsBytes
|
|
||||||
|
|
||||||
# validRoots should contain a sequence of roots in the acceptable windows.
|
|
||||||
# As default, it is set to an empty sequence of roots. This implies that the validity check for the proof's root is skipped
|
|
||||||
proc proofVerify*(rlnInstance: ptr RLN,
|
|
||||||
data: openArray[byte],
|
|
||||||
proof: RateLimitProof,
|
|
||||||
validRoots: seq[MerkleNode] = @[]): RlnRelayResult[bool] =
|
|
||||||
## verifies the proof, returns an error if the proof verification fails
|
|
||||||
## returns true if the proof is valid
|
|
||||||
var
|
|
||||||
proofBytes = serialize(proof, data)
|
|
||||||
proofBuffer = proofBytes.toBuffer()
|
|
||||||
validProof: bool
|
|
||||||
rootsBytes = serialize(validRoots)
|
|
||||||
rootsBuffer = rootsBytes.toBuffer()
|
|
||||||
|
|
||||||
trace "serialized proof", proof = proofBytes.toHex()
|
|
||||||
|
|
||||||
let verifyIsSuccessful = verify_with_roots(rlnInstance, addr proofBuffer, addr rootsBuffer, addr validProof)
|
|
||||||
if not verifyIsSuccessful:
|
|
||||||
# something went wrong in verification call
|
|
||||||
warn "could not verify validity of the proof", proof=proof
|
|
||||||
return err("could not verify the proof")
|
|
||||||
|
|
||||||
if not validProof:
|
|
||||||
return ok(false)
|
|
||||||
else:
|
|
||||||
return ok(true)
|
|
||||||
|
|
||||||
proc insertMember*(rlnInstance: ptr RLN, idComm: IDCommitment): bool =
|
|
||||||
## inserts a member to the tree
|
|
||||||
## returns true if the member is inserted successfully
|
|
||||||
## returns false if the member could not be inserted
|
|
||||||
var pkBuffer = toBuffer(idComm)
|
|
||||||
let pkBufferPtr = addr pkBuffer
|
|
||||||
|
|
||||||
# add the member to the tree
|
|
||||||
let memberAdded = update_next_member(rlnInstance, pkBufferPtr)
|
|
||||||
return memberAdded
|
|
||||||
|
|
||||||
proc serializeIdCommitments*(idComms: seq[IDCommitment]): seq[byte] =
|
|
||||||
## serializes a seq of IDCommitments to a byte seq
|
|
||||||
## the serialization is based on https://github.com/status-im/nwaku/blob/37bd29fbc37ce5cf636734e7dd410b1ed27b88c8/waku/v2/protocol/waku_rln_relay/rln.nim#L142
|
|
||||||
## the order of serialization is |id_commitment_len<8>|id_commitment<var>|
|
|
||||||
var idCommsBytes = newSeq[byte]()
|
|
||||||
|
|
||||||
# serialize the idComms, with its length prefixed
|
|
||||||
let len = toBytes(uint64(idComms.len), Endianness.littleEndian)
|
|
||||||
idCommsBytes.add(len)
|
|
||||||
|
|
||||||
for idComm in idComms:
|
|
||||||
idCommsBytes = concat(idCommsBytes, @idComm)
|
|
||||||
|
|
||||||
return idCommsBytes
|
|
||||||
|
|
||||||
proc insertMembers*(rlnInstance: ptr RLN,
|
|
||||||
index: MembershipIndex,
|
|
||||||
idComms: seq[IDCommitment]): bool =
|
|
||||||
## Insert multiple members i.e., identity commitments
|
|
||||||
## returns true if the insertion is successful
|
|
||||||
## returns false if any of the insertions fails
|
|
||||||
## Note: This proc is atomic, i.e., if any of the insertions fails, all the previous insertions are rolled back
|
|
||||||
|
|
||||||
# serialize the idComms
|
|
||||||
let idCommsBytes = serializeIdCommitments(idComms)
|
|
||||||
|
|
||||||
var idCommsBuffer = idCommsBytes.toBuffer()
|
|
||||||
let idCommsBufferPtr = addr idCommsBuffer
|
|
||||||
# add the member to the tree
|
|
||||||
let membersAdded = set_leaves_from(rlnInstance, index, idCommsBufferPtr)
|
|
||||||
return membersAdded
|
|
||||||
|
|
||||||
proc removeMember*(rlnInstance: ptr RLN, index: MembershipIndex): bool =
|
|
||||||
let deletion_success = delete_member(rlnInstance, index)
|
|
||||||
return deletion_success
|
|
||||||
|
|
||||||
proc getMerkleRoot*(rlnInstance: ptr RLN): MerkleNodeResult =
|
|
||||||
# read the Merkle Tree root after insertion
|
|
||||||
var
|
|
||||||
root {.noinit.}: Buffer = Buffer()
|
|
||||||
rootPtr = addr(root)
|
|
||||||
getRootSuccessful = getRoot(rlnInstance, rootPtr)
|
|
||||||
if not getRootSuccessful:
|
|
||||||
return err("could not get the root")
|
|
||||||
if not root.len == 32:
|
|
||||||
return err("wrong output size")
|
|
||||||
|
|
||||||
var rootValue = cast[ptr MerkleNode] (root.`ptr`)[]
|
|
||||||
return ok(rootValue)
|
|
||||||
|
|
||||||
proc updateValidRootQueue*(wakuRlnRelay: WakuRLNRelay, root: MerkleNode): void =
|
proc updateValidRootQueue*(wakuRlnRelay: WakuRLNRelay, root: MerkleNode): void =
|
||||||
## updates the valid Merkle root queue with the latest root and pops the oldest one when the capacity of `AcceptableRootWindowSize` is reached
|
## updates the valid Merkle root queue with the latest root and pops the oldest one when the capacity of `AcceptableRootWindowSize` is reached
|
||||||
let overflowCount = wakuRlnRelay.validMerkleRoots.len() - AcceptableRootWindowSize
|
let overflowCount = wakuRlnRelay.validMerkleRoots.len() - AcceptableRootWindowSize
|
||||||
|
@ -468,50 +188,6 @@ proc validateRoot*(wakuRlnRelay: WakuRLNRelay, root: MerkleNode): bool =
|
||||||
## Validate against the window of roots stored in wakuRlnRelay.validMerkleRoots
|
## Validate against the window of roots stored in wakuRlnRelay.validMerkleRoots
|
||||||
return root in wakuRlnRelay.validMerkleRoots
|
return root in wakuRlnRelay.validMerkleRoots
|
||||||
|
|
||||||
# Converts a sequence of tuples containing 4 string (i.e. identity trapdoor, nullifier, secret hash and commitment) to an IndentityCredential
|
|
||||||
proc toIdentityCredentials*(groupKeys: seq[(string, string, string, string)]): RlnRelayResult[seq[
|
|
||||||
IdentityCredential]] =
|
|
||||||
## groupKeys is sequence of membership key tuples in the form of (identity key, identity commitment) all in the hexadecimal format
|
|
||||||
## the toIdentityCredentials proc populates a sequence of IdentityCredentials using the supplied groupKeys
|
|
||||||
## Returns an error if the conversion fails
|
|
||||||
|
|
||||||
var groupIdCredentials = newSeq[IdentityCredential]()
|
|
||||||
|
|
||||||
for i in 0..groupKeys.len-1:
|
|
||||||
try:
|
|
||||||
let
|
|
||||||
idTrapdoor = hexToUint[IdentityTrapdoor.len*8](groupKeys[i][0]).toBytesLE()
|
|
||||||
idNullifier = hexToUint[IdentityNullifier.len*8](groupKeys[i][1]).toBytesLE()
|
|
||||||
idSecretHash = hexToUint[IdentitySecretHash.len*8](groupKeys[i][2]).toBytesLE()
|
|
||||||
idCommitment = hexToUint[IDCommitment.len*8](groupKeys[i][3]).toBytesLE()
|
|
||||||
groupIdCredentials.add(IdentityCredential(idTrapdoor: idTrapdoor, idNullifier: idNullifier, idSecretHash: idSecretHash,
|
|
||||||
idCommitment: idCommitment))
|
|
||||||
except ValueError as err:
|
|
||||||
warn "could not convert the group key to bytes", err = err.msg
|
|
||||||
return err("could not convert the group key to bytes: " & err.msg)
|
|
||||||
return ok(groupIdCredentials)
|
|
||||||
|
|
||||||
# Converts a sequence of tuples containing 2 string (i.e. identity secret hash and commitment) to an IndentityCredential
|
|
||||||
proc toIdentityCredentials*(groupKeys: seq[(string, string)]): RlnRelayResult[seq[
|
|
||||||
IdentityCredential]] =
|
|
||||||
## groupKeys is sequence of membership key tuples in the form of (identity key, identity commitment) all in the hexadecimal format
|
|
||||||
## the toIdentityCredentials proc populates a sequence of IdentityCredentials using the supplied groupKeys
|
|
||||||
## Returns an error if the conversion fails
|
|
||||||
|
|
||||||
var groupIdCredentials = newSeq[IdentityCredential]()
|
|
||||||
|
|
||||||
for i in 0..groupKeys.len-1:
|
|
||||||
try:
|
|
||||||
let
|
|
||||||
idSecretHash = hexToUint[IdentitySecretHash.len*8](groupKeys[i][0]).toBytesLE()
|
|
||||||
idCommitment = hexToUint[IDCommitment.len*8](groupKeys[i][1]).toBytesLE()
|
|
||||||
groupIdCredentials.add(IdentityCredential(idSecretHash: idSecretHash,
|
|
||||||
idCommitment: idCommitment))
|
|
||||||
except ValueError as err:
|
|
||||||
warn "could not convert the group key to bytes", err = err.msg
|
|
||||||
return err("could not convert the group key to bytes: " & err.msg)
|
|
||||||
return ok(groupIdCredentials)
|
|
||||||
|
|
||||||
proc calcMerkleRoot*(list: seq[IDCommitment]): RlnRelayResult[string] =
|
proc calcMerkleRoot*(list: seq[IDCommitment]): RlnRelayResult[string] =
|
||||||
## returns the root of the Merkle tree that is computed from the supplied list
|
## returns the root of the Merkle tree that is computed from the supplied list
|
||||||
## the root is in hexadecimal format
|
## the root is in hexadecimal format
|
||||||
|
@ -604,6 +280,12 @@ proc rlnRelayStaticSetUp*(rlnRelayMembershipIndex: MembershipIndex): RlnRelayRes
|
||||||
|
|
||||||
return ok((groupIDCommitmentsOpt, groupIdCredentialsOpt, memIndexOpt))
|
return ok((groupIDCommitmentsOpt, groupIdCredentialsOpt, memIndexOpt))
|
||||||
|
|
||||||
|
proc calcEpoch*(t: float64): Epoch =
|
||||||
|
## gets time `t` as `flaot64` with subseconds resolution in the fractional part
|
||||||
|
## and returns its corresponding rln `Epoch` value
|
||||||
|
let e = uint64(t/EpochUnitSeconds)
|
||||||
|
return toEpoch(e)
|
||||||
|
|
||||||
proc hasDuplicate*(rlnPeer: WakuRLNRelay, msg: WakuMessage): RlnRelayResult[bool] =
|
proc hasDuplicate*(rlnPeer: WakuRLNRelay, msg: WakuMessage): RlnRelayResult[bool] =
|
||||||
## returns true if there is another message in the `nullifierLog` of the `rlnPeer` with the same
|
## returns true if there is another message in the `nullifierLog` of the `rlnPeer` with the same
|
||||||
## epoch and nullifier as `msg`'s epoch and nullifier but different Shamir secret shares
|
## epoch and nullifier as `msg`'s epoch and nullifier but different Shamir secret shares
|
||||||
|
@ -680,25 +362,6 @@ proc updateLog*(rlnPeer: WakuRLNRelay, msg: WakuMessage): RlnRelayResult[bool] =
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
return err("the epoch was not found")
|
return err("the epoch was not found")
|
||||||
|
|
||||||
proc toEpoch*(t: uint64): Epoch =
|
|
||||||
## converts `t` to `Epoch` in little-endian order
|
|
||||||
let bytes = toBytes(t, Endianness.littleEndian)
|
|
||||||
debug "bytes", bytes = bytes
|
|
||||||
var epoch: Epoch
|
|
||||||
discard epoch.copyFrom(bytes)
|
|
||||||
return epoch
|
|
||||||
|
|
||||||
proc fromEpoch*(epoch: Epoch): uint64 =
|
|
||||||
## decodes bytes of `epoch` (in little-endian) to uint64
|
|
||||||
let t = fromBytesLE(uint64, array[32, byte](epoch))
|
|
||||||
return t
|
|
||||||
|
|
||||||
proc calcEpoch*(t: float64): Epoch =
|
|
||||||
## gets time `t` as `flaot64` with subseconds resolution in the fractional part
|
|
||||||
## and returns its corresponding rln `Epoch` value
|
|
||||||
let e = uint64(t/EpochUnitSeconds)
|
|
||||||
return toEpoch(e)
|
|
||||||
|
|
||||||
proc getCurrentEpoch*(): Epoch =
|
proc getCurrentEpoch*(): Epoch =
|
||||||
## gets the current rln Epoch time
|
## gets the current rln Epoch time
|
||||||
return calcEpoch(epochTime())
|
return calcEpoch(epochTime())
|
||||||
|
@ -1000,8 +663,8 @@ proc handleGroupUpdates*(rlnPeer: WakuRLNRelay) {.async, gcsafe.} =
|
||||||
|
|
||||||
proc addRLNRelayValidator*(wakuRlnRelay: WakuRLNRelay,
|
proc addRLNRelayValidator*(wakuRlnRelay: WakuRLNRelay,
|
||||||
wakuRelay: WakuRelay,
|
wakuRelay: WakuRelay,
|
||||||
pubsubTopic: PubsubTopic,
|
pubsubTopic: PubsubTopic,
|
||||||
contentTopic: ContentTopic,
|
contentTopic: ContentTopic,
|
||||||
spamHandler: Option[SpamHandler] = none(SpamHandler)) =
|
spamHandler: Option[SpamHandler] = none(SpamHandler)) =
|
||||||
## this procedure is a thin wrapper for the pubsub addValidator method
|
## this procedure is a thin wrapper for the pubsub addValidator method
|
||||||
## it sets a validator for the waku messages published on the supplied pubsubTopic and contentTopic
|
## it sets a validator for the waku messages published on the supplied pubsubTopic and contentTopic
|
||||||
|
@ -1220,7 +883,7 @@ proc mount(wakuRelay: WakuRelay,
|
||||||
registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler)
|
registrationHandler: Option[RegistrationHandler] = none(RegistrationHandler)
|
||||||
): Future[RlnRelayResult[WakuRlnRelay]] {.async.} =
|
): Future[RlnRelayResult[WakuRlnRelay]] {.async.} =
|
||||||
|
|
||||||
if not conf.rlnRelayDynamic:
|
if not conf.rlnRelayDynamic:
|
||||||
info " setting up waku-rln-relay in off-chain mode... "
|
info " setting up waku-rln-relay in off-chain mode... "
|
||||||
# set up rln relay inputs
|
# set up rln relay inputs
|
||||||
let staticSetupRes = rlnRelayStaticSetUp(MembershipIndex(conf.rlnRelayMembershipIndex))
|
let staticSetupRes = rlnRelayStaticSetUp(MembershipIndex(conf.rlnRelayMembershipIndex))
|
||||||
|
@ -1233,11 +896,11 @@ proc mount(wakuRelay: WakuRelay,
|
||||||
else:
|
else:
|
||||||
# mount rlnrelay in off-chain mode with a static group of users
|
# mount rlnrelay in off-chain mode with a static group of users
|
||||||
let mountRes = mountRlnRelayStatic(wakuRelay,
|
let mountRes = mountRlnRelayStatic(wakuRelay,
|
||||||
group = groupOpt.get(),
|
group = groupOpt.get(),
|
||||||
memIdCredential = idCredentialOpt.get(),
|
memIdCredential = idCredentialOpt.get(),
|
||||||
memIndex = memIndexOpt.get(),
|
memIndex = memIndexOpt.get(),
|
||||||
pubsubTopic = conf.rlnRelayPubsubTopic,
|
pubsubTopic = conf.rlnRelayPubsubTopic,
|
||||||
contentTopic = conf.rlnRelayContentTopic,
|
contentTopic = conf.rlnRelayContentTopic,
|
||||||
spamHandler = spamHandler)
|
spamHandler = spamHandler)
|
||||||
|
|
||||||
if mountRes.isErr():
|
if mountRes.isErr():
|
||||||
|
@ -1325,26 +988,26 @@ proc mount(wakuRelay: WakuRelay,
|
||||||
if credentials.isSome():
|
if credentials.isSome():
|
||||||
# mount rln-relay in on-chain mode, with credentials that were read or generated
|
# mount rln-relay in on-chain mode, with credentials that were read or generated
|
||||||
rlnRelayRes = await mountRlnRelayDynamic(wakuRelay,
|
rlnRelayRes = await mountRlnRelayDynamic(wakuRelay,
|
||||||
memContractAddr = ethMemContractAddress,
|
memContractAddr = ethMemContractAddress,
|
||||||
ethClientAddr = ethClientAddr,
|
ethClientAddr = ethClientAddr,
|
||||||
ethAccountAddress = ethAccountAddressOpt,
|
ethAccountAddress = ethAccountAddressOpt,
|
||||||
ethAccountPrivKeyOpt = ethAccountPrivKeyOpt,
|
ethAccountPrivKeyOpt = ethAccountPrivKeyOpt,
|
||||||
pubsubTopic = conf.rlnRelayPubsubTopic,
|
pubsubTopic = conf.rlnRelayPubsubTopic,
|
||||||
contentTopic = conf.rlnRelayContentTopic,
|
contentTopic = conf.rlnRelayContentTopic,
|
||||||
spamHandler = spamHandler,
|
spamHandler = spamHandler,
|
||||||
registrationHandler = registrationHandler,
|
registrationHandler = registrationHandler,
|
||||||
memIdCredential = some(credentials.get().identityCredential),
|
memIdCredential = some(credentials.get().identityCredential),
|
||||||
memIndex = some(credentials.get().rlnIndex))
|
memIndex = some(credentials.get().rlnIndex))
|
||||||
else:
|
else:
|
||||||
# mount rln-relay in on-chain mode, with the provided private key
|
# mount rln-relay in on-chain mode, with the provided private key
|
||||||
rlnRelayRes = await mountRlnRelayDynamic(wakuRelay,
|
rlnRelayRes = await mountRlnRelayDynamic(wakuRelay,
|
||||||
memContractAddr = ethMemContractAddress,
|
memContractAddr = ethMemContractAddress,
|
||||||
ethClientAddr = ethClientAddr,
|
ethClientAddr = ethClientAddr,
|
||||||
ethAccountAddress = ethAccountAddressOpt,
|
ethAccountAddress = ethAccountAddressOpt,
|
||||||
ethAccountPrivKeyOpt = ethAccountPrivKeyOpt,
|
ethAccountPrivKeyOpt = ethAccountPrivKeyOpt,
|
||||||
pubsubTopic = conf.rlnRelayPubsubTopic,
|
pubsubTopic = conf.rlnRelayPubsubTopic,
|
||||||
contentTopic = conf.rlnRelayContentTopic,
|
contentTopic = conf.rlnRelayContentTopic,
|
||||||
spamHandler = spamHandler,
|
spamHandler = spamHandler,
|
||||||
registrationHandler = registrationHandler)
|
registrationHandler = registrationHandler)
|
||||||
|
|
||||||
persistCredentials = true
|
persistCredentials = true
|
||||||
|
@ -1354,13 +1017,13 @@ proc mount(wakuRelay: WakuRelay,
|
||||||
# a new credential will be generated during the mount process but will not be persisted
|
# a new credential will be generated during the mount process but will not be persisted
|
||||||
info "no need to persist or use a persisted rln-relay credential"
|
info "no need to persist or use a persisted rln-relay credential"
|
||||||
rlnRelayRes = await mountRlnRelayDynamic(wakuRelay,
|
rlnRelayRes = await mountRlnRelayDynamic(wakuRelay,
|
||||||
memContractAddr = ethMemContractAddress,
|
memContractAddr = ethMemContractAddress,
|
||||||
ethClientAddr = ethClientAddr,
|
ethClientAddr = ethClientAddr,
|
||||||
ethAccountAddress = ethAccountAddressOpt,
|
ethAccountAddress = ethAccountAddressOpt,
|
||||||
ethAccountPrivKeyOpt = ethAccountPrivKeyOpt,
|
ethAccountPrivKeyOpt = ethAccountPrivKeyOpt,
|
||||||
pubsubTopic = conf.rlnRelayPubsubTopic,
|
pubsubTopic = conf.rlnRelayPubsubTopic,
|
||||||
contentTopic = conf.rlnRelayContentTopic,
|
contentTopic = conf.rlnRelayContentTopic,
|
||||||
spamHandler = spamHandler,
|
spamHandler = spamHandler,
|
||||||
registrationHandler = registrationHandler)
|
registrationHandler = registrationHandler)
|
||||||
|
|
||||||
if rlnRelayRes.isErr():
|
if rlnRelayRes.isErr():
|
||||||
|
@ -1368,7 +1031,7 @@ proc mount(wakuRelay: WakuRelay,
|
||||||
let wakuRlnRelay = rlnRelayRes.get()
|
let wakuRlnRelay = rlnRelayRes.get()
|
||||||
if persistCredentials:
|
if persistCredentials:
|
||||||
# persist rln credential
|
# persist rln credential
|
||||||
credentials = some(RlnMembershipCredentials(rlnIndex: wakuRlnRelay.membershipIndex,
|
credentials = some(RlnMembershipCredentials(rlnIndex: wakuRlnRelay.membershipIndex,
|
||||||
identityCredential: wakuRlnRelay.identityCredential))
|
identityCredential: wakuRlnRelay.identityCredential))
|
||||||
if writeRlnCredentials(rlnRelayCredPath, credentials.get(), conf.rlnRelayCredentialsPassword).isErr():
|
if writeRlnCredentials(rlnRelayCredPath, credentials.get(), conf.rlnRelayCredentialsPassword).isErr():
|
||||||
return err("error in storing rln credentials")
|
return err("error in storing rln credentials")
|
||||||
|
@ -1393,11 +1056,11 @@ proc new*(T: type WakuRlnRelay,
|
||||||
return err("The relay protocol does not support the configured pubsub topic")
|
return err("The relay protocol does not support the configured pubsub topic")
|
||||||
|
|
||||||
debug "rln-relay input validation passed"
|
debug "rln-relay input validation passed"
|
||||||
waku_rln_relay_mounting_duration_seconds.nanosecondTime:
|
waku_rln_relay_mounting_duration_seconds.nanosecondTime:
|
||||||
let rlnRelayRes = await mount(
|
let rlnRelayRes = await mount(
|
||||||
wakuRelay,
|
wakuRelay,
|
||||||
conf,
|
conf,
|
||||||
spamHandler,
|
spamHandler,
|
||||||
registrationHandler
|
registrationHandler
|
||||||
)
|
)
|
||||||
return rlnRelayRes
|
return rlnRelayRes
|
||||||
|
|
Loading…
Reference in New Issue