Integrates proof generation and verification into wakunode2 (#735)

* WIP

* WIP: fixes a bug

* adds test for static group formation

* adds static group creation when rln-relay is enabled

* adds createStatic group

* wip: adds group formation to mount rlnrelay

* adds createMembershipList utility function

* adds doc strings and todos

* cleans up the code and add comments

* defaults createRLNInstance depth argument to 32

* renames Depth

* distinguishes between onchain and offchain modes

* updates index boundaries

* updates log levels

* updates docstring

* updates log level of displayed membership keys

* relocates a todo

* activates all the tests

* fixes some comments and todos

* extracts some utils procs for better debugging

* adds todo

* moves calculateMerkleRoot and toMembersipKeyPairs to the rln utils

* makes calls to the utils functions

* adds unit test for createMembershipList

* adds unittest for toMembershipKeyPairs and calcMerkleRoot

* cleans up the code and fixes tree root value

* reverts an unwanted change

* minor

* adds comments and cleans up the code

* updates config message

* adds more comments

* fixes a minor value mismatch

* edits the size of group

* minor rewording

* defines a const var for the group keys

* replaces the sequence literal with the StaticGroupKeys const

* adds a rudimentary unittest

* adds todos

* adds more comment

* replaces uint with MembeshipIndex type

* fixes rln relay mem index config message

* adds rln relay setup proc

* decouples relay and rln-relay

* uses MemIndexType instead of uint

* brings back the rlnRelayEnabled flag to mountRlnRelay

* deletes commented codes

* adds rln relay topic validator inside updates rln relay mounting procedure

* adds rln-relay-pubsub-topic cli option

* adds a static rln-relay topic

* deletes rlnrelayEnabled argument

* adds pubsub topic for rln-relay

* deletes static pubsub topic

* mounts relay before rlnrelay in the tests

* logs rln relay pubsub topic

* cleans up the code

* edits rlnrelay setup

* uninitializes the input parameter of rlnrelay setup

* adds comments

* removes unused comments

* compiles addRLNRelayValidtor when RLN compilation flag is set

* adds comment about topic validator

* minor

* mode modifications on the description of add validator

* adds pubsubtopic field to wakuRlnRelay type

* WIP: shaping the test

* Checks whether rln relay pubsub topic is within the supported topics of relay protocol

* minor

* WIP: unit test for actual proof

* fixes a bug

* removes a redundant proc

* refines the test for actual proof

* breaks lines to 80 chars

* defines NonSpamProof type

* adds a return

* defines Epoch type

* WIP: proof gen

* implements actual proof gen

* adds proto enc and init

* adds notes about proof structure

* adds NonSpamProof to wakumessage

* adds proof gen

* WIP: non working tests for protobuf

* fixes the protobuf encoding issue

* discards the output of copyFrom

* WIP: hash unittest and proofVrfy and ProofGen

* integrates proofVrfy

* uses toBuffer inside the hash proc

* adds comment

* fixes a bug

* removes proof field initialization

* cleans up the test

* generalizes input from byte seq to byte openArray

* adds toBuffer

* adds a bad test

* cleans up unused tests

* adds integration test

* adds comments

* cleans up

* adds description to the integration test

* adds test for unhappy path

* tides up the tests

* tides up hash unit test

* renames a few var

* uses a const for wku rln relay pubsub topic

* minor refinement

* deletes an obsolete comment

* comment revision

* adds comments

* cleans up and adds docstrings

* profGen returns proofRes instead of proof

* removes extra sleepAsync

* fixes two bugs

* returns reject when proof is not verified\

* addresses comments

* adds comments

* links to rln doc

* more comments

* fixes space format

* uncomments v2 tests

* dnsclient branch update

* undo branch update

* minor spacing fix

* makes proof field conditional
This commit is contained in:
Sanaz Taheri Boshrooyeh 2021-10-19 17:37:29 -07:00 committed by GitHub
parent 165e235158
commit 6efba0dc56
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 568 additions and 68 deletions

View File

@ -305,9 +305,8 @@ suite "time-window history query":
let
version = 0'u32
payload = @[byte 0, 1, 2]
proof = @[byte 0, 1, 2, 3]
timestamp = float64(10)
msg = WakuMessage(payload: payload, version: version, proof: proof, timestamp: timestamp)
msg = WakuMessage(payload: payload, version: version, timestamp: timestamp)
pb = msg.encode()
# Decoding
@ -327,8 +326,7 @@ suite "time-window history query":
let
version = 0'u32
payload = @[byte 0, 1, 2]
proof = @[byte 0, 1, 2, 3]
msg = WakuMessage(payload: payload, version: version, proof: proof)
msg = WakuMessage(payload: payload, version: version)
pb = msg.encode()
# Decoding

View File

@ -267,6 +267,8 @@ procSuite "Waku rln relay":
# create a group of 100 membership keys
let
(groupKeys, root) = createMembershipList(100)
check groupKeys.len == 100
let
# convert the keys to MembershipKeyPair structs
groupKeyPairs = groupKeys.toMembershipKeyPairs()
# extract the id commitments
@ -590,6 +592,22 @@ suite "Waku rln relay":
debug "hash output", hashOutputHex
test "hash utils":
# create an RLN instance
var rlnInstance = createRLNInstance()
check:
rlnInstance.isOk == true
let rln = rlnInstance.value
# prepare the input
# TODO should add support for arbitrary messages, the following input is artificial
var hashInput : array[32, byte]
for x in hashInput.mitems: x = 1
debug "sample_hash_input_bytes", hashInputHex=hashInput.toHex()
let hash = rln.hash(hashInput)
doAssert("53a6338cdbf02f0563cec1898e354d0d272c8f98b606c538945c6f41ef101828" == hash.toHex())
test "generate_proof and verify Nim Wrappers":
# create an RLN instance
@ -633,7 +651,7 @@ suite "Waku rln relay":
var epochBytes : array[32,byte]
for x in epochBytes.mitems : x = 0
var epochHex = epochBytes.toHex()
debug "epoch in bytes", epochHex
debug "epoch", epochHex
# serialize message and epoch
@ -732,4 +750,145 @@ suite "Waku rln relay":
# check that the correct number of key pairs is created
groupKeyPairs.len == StaticGroupSize
# compare the calculated root against the correct root
root == STATIC_GROUP_MERKLE_ROOT
root == STATIC_GROUP_MERKLE_ROOT
test "RateLimitProof Protobuf encode/init test":
var
proof: ZKSNARK
merkleRoot: MerkleNode
epoch: Epoch
shareX: MerkleNode
shareY: MerkleNode
nullifier: Nullifier
# populate fields with dummy values
for x in proof.mitems : x = 1
for x in merkleRoot.mitems : x = 2
for x in epoch.mitems : x = 3
for x in shareX.mitems : x = 4
for x in shareY.mitems : x = 5
for x in nullifier.mitems : x = 6
let
nsp = RateLimitProof(proof: proof,
merkleRoot: merkleRoot,
epoch: epoch,
shareX: shareX,
shareY: shareY,
nullifier: nullifier)
protobuf = nsp.encode()
decodednsp = RateLimitProof.init(protobuf.buffer)
check:
decodednsp.isErr == false
decodednsp.value == nsp
test "test proofVerify and proofGen for a valid proof":
var rlnInstance = createRLNInstance()
check:
rlnInstance.isOk == true
var rln = rlnInstance.value
let
# create a membership key pair
memKeys = membershipKeyGen(rln).get()
# peer's index in the Merkle Tree
index = 5
# Create a Merkle tree with random members
for i in 0..10:
var member_is_added: bool = false
if (i == index):
# insert the current peer's pk
member_is_added = rln.insertMember(memKeys.idCommitment)
else:
# create a new key pair
let memberKeys = rln.membershipKeyGen()
member_is_added = rln.insertMember(memberKeys.get().idCommitment)
# check the member is added
doAssert(member_is_added)
# prepare the message
# TODO this message format is artificial (to bypass the Poseidon hasher issue)
# TODO in practice we should be able to pick messages of arbitrary size and format
var messageBytes {.noinit.}: array[32, byte]
for x in messageBytes.mitems: x = 1
debug "message", messageHex=messageBytes.toHex()
# prepare the epoch
var epoch : Epoch
for x in epoch.mitems : x = 0
debug "epoch", epochHex=epoch.toHex()
# hash the message
let msgHash = rln.hash(messageBytes)
debug "message hash", mh=byteutils.toHex(msgHash)
# generate proof
let proofRes = rln.proofGen(data = msgHash,
memKeys = memKeys,
memIndex = MembershipIndex(index),
epoch = epoch)
doAssert(proofRes.isOk())
let proof = proofRes.value
# verify the proof
let verified = rln.proofVerify(data = messageBytes,
proof = proof)
check verified == true
test "test proofVerify and proofGen for an invalid proof":
var rlnInstance = createRLNInstance()
check:
rlnInstance.isOk == true
var rln = rlnInstance.value
let
# create a membership key pair
memKeys = membershipKeyGen(rln).get()
# peer's index in the Merkle Tree
index = 5
# Create a Merkle tree with random members
for i in 0..10:
var member_is_added: bool = false
if (i == index):
# insert the current peer's pk
member_is_added = rln.insertMember(memKeys.idCommitment)
else:
# create a new key pair
let memberKeys = rln.membershipKeyGen()
member_is_added = rln.insertMember(memberKeys.get().idCommitment)
# check the member is added
doAssert(member_is_added)
# prepare the message
# TODO this message format is artificial (to bypass the Poseidon hasher issue)
# TODO in practice we should be able to pick messages of arbitrary size and format
var messageBytes {.noinit.}: array[32, byte]
for x in messageBytes.mitems: x = 1
debug "message", messageHex=messageBytes.toHex()
# prepare the epoch
var epoch : Epoch
for x in epoch.mitems : x = 0
debug "epoch in bytes", epochHex=epoch.toHex()
# hash the message
let msgHash = rln.hash(messageBytes)
debug "message hash", mh=byteutils.toHex(msgHash)
let badIndex = 4
# generate proof
let proofRes = rln.proofGen(data = msgHash,
memKeys = memKeys,
memIndex = MembershipIndex(badIndex),
epoch = epoch)
doAssert(proofRes.isOk())
let proof = proofRes.value
# verify the proof (should not be verified)
let verified = rln.proofVerify(data = messageBytes,
proof = proof)
check verified == false

View File

@ -23,6 +23,11 @@ import
../../waku/v2/node/wakunode2,
../test_helpers
when defined(rln):
import ../../waku/v2/protocol/waku_rln_relay/[waku_rln_relay_utils, waku_rln_relay_types]
const RLNRELAY_PUBSUB_TOPIC = "waku/2/rlnrelay/proto"
procSuite "WakuNode":
let rng = keys.newRng()
asyncTest "Message published with content filter is retrievable":
@ -581,7 +586,7 @@ procSuite "WakuNode":
await node3.stop()
when defined(rln):
asyncTest "testing rln-relay with mocked zkp":
asyncTest "testing rln-relay with valid proof":
let
# publisher node
@ -594,39 +599,85 @@ procSuite "WakuNode":
nodeKey3 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node3 = WakuNode.new(nodeKey3, ValidIpAddress.init("0.0.0.0"), Port(60003))
pubSubTopic = "defaultTopic"
contentTopic1 = ContentTopic("/waku/2/default-content/proto")
payload = "hello world".toBytes()
message1 = WakuMessage(payload: payload, contentTopic: contentTopic1)
rlnRelayPubSubTopic = RLNRELAY_PUBSUB_TOPIC
contentTopic = ContentTopic("/waku/2/default-content/proto")
# start all the nodes
await node1.start()
node1.mountRelay(@[pubSubTopic])
# set up three nodes
# node1
node1.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt1, memKeyPairOpt1, memIndexOpt1) = rlnRelaySetUp(1) # set up rln relay inputs
# mount rlnrelay in off-chain mode
waitFor node1.mountRlnRelay(groupOpt = groupOpt1,
memKeyPairOpt = memKeyPairOpt1,
memIndexOpt= memIndexOpt1,
onchainMode = false,
pubsubTopic = rlnRelayPubSubTopic)
await node1.start()
# node 2
node2.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt2, memKeyPairOpt2, memIndexOpt2) = rlnRelaySetUp(2) # set up rln relay inputs
# mount rlnrelay in off-chain mode
waitFor node2.mountRlnRelay(groupOpt = groupOpt2,
memKeyPairOpt = memKeyPairOpt2,
memIndexOpt= memIndexOpt2,
onchainMode = false,
pubsubTopic = rlnRelayPubSubTopic)
await node2.start()
node2.mountRelay(@[pubSubTopic])
node2.addRLNRelayValidator(pubSubTopic)
# node 3
node3.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt3, memKeyPairOpt3, memIndexOpt3) = rlnRelaySetUp(3) # set up rln relay inputs
# mount rlnrelay in off-chain mode
waitFor node3.mountRlnRelay(groupOpt = groupOpt3,
memKeyPairOpt = memKeyPairOpt3,
memIndexOpt= memIndexOpt3,
onchainMode = false,
pubsubTopic = rlnRelayPubSubTopic)
await node3.start()
node3.mountRelay(@[pubSubTopic])
# connect them together
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
await node3.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
var completionFut = newFuture[bool]()
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
let msg = WakuMessage.init(data)
if msg.isOk():
let val = msg.value()
debug "The received topic:", topic
if topic == pubSubTopic:
if topic == rlnRelayPubSubTopic:
completionFut.complete(true)
node3.subscribe(pubSubTopic, relayHandler)
# mount the relay handler
node3.subscribe(rlnRelayPubSubTopic, relayHandler)
await sleepAsync(2000.millis)
await node1.publish(pubSubTopic, message1, rlnRelayEnabled = true)
# prepare the message payload
var payload {.noinit.}: array[32, byte]
for x in payload.mitems: x = 1
# prepare the epoch
var epoch {.noinit.}: Epoch
for x in epoch.mitems: x = 2
# prepare the proof
let rateLimitProofRes = node1.wakuRlnRelay.rlnInstance.proofGen(data = payload,
memKeys = node1.wakuRlnRelay.membershipKeyPair,
memIndex = node1.wakuRlnRelay.membershipIndex,
epoch = epoch)
doAssert(rateLimitProofRes.isOk())
let rateLimitProof = rateLimitProofRes.value
let message = WakuMessage(payload: @payload,
contentTopic: contentTopic,
proof: rateLimitProof)
## node1 publishes a message with a non-spam proof, the message is then relayed to node2 which in turn
## verifies the non-spam proof of the message and relays the message to node3
## verification at node2 occurs inside a topic validator which is installed as part of the waku-rln-relay mount proc
await node1.publish(rlnRelayPubSubTopic, message)
await sleepAsync(2000.millis)
@ -636,6 +687,108 @@ procSuite "WakuNode":
await node1.stop()
await node2.stop()
await node3.stop()
asyncTest "testing rln-relay with invalid proof":
let
# publisher node
nodeKey1 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node1 = WakuNode.new(nodeKey1, ValidIpAddress.init("0.0.0.0"), Port(60000))
# Relay node
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"), Port(60002))
# Subscriber
nodeKey3 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node3 = WakuNode.new(nodeKey3, ValidIpAddress.init("0.0.0.0"), Port(60003))
rlnRelayPubSubTopic = RLNRELAY_PUBSUB_TOPIC
contentTopic = ContentTopic("/waku/2/default-content/proto")
# set up three nodes
# node1
node1.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt1, memKeyPairOpt1, memIndexOpt1) = rlnRelaySetUp(1) # set up rln relay inputs
# mount rlnrelay in off-chain mode
waitFor node1.mountRlnRelay(groupOpt = groupOpt1,
memKeyPairOpt = memKeyPairOpt1,
memIndexOpt= memIndexOpt1,
onchainMode = false,
pubsubTopic = rlnRelayPubSubTopic)
await node1.start()
# node 2
node2.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt2, memKeyPairOpt2, memIndexOpt2) = rlnRelaySetUp(2) # set up rln relay inputs
# mount rlnrelay in off-chain mode
waitFor node2.mountRlnRelay(groupOpt = groupOpt2,
memKeyPairOpt = memKeyPairOpt2,
memIndexOpt= memIndexOpt2,
onchainMode = false,
pubsubTopic = rlnRelayPubSubTopic)
await node2.start()
# node 3
node3.mountRelay(@[rlnRelayPubSubTopic])
let (groupOpt3, memKeyPairOpt3, memIndexOpt3) = rlnRelaySetUp(3) # set up rln relay inputs
# mount rlnrelay in off-chain mode
waitFor node3.mountRlnRelay(groupOpt = groupOpt3,
memKeyPairOpt = memKeyPairOpt3,
memIndexOpt= memIndexOpt3,
onchainMode = false,
pubsubTopic = rlnRelayPubSubTopic)
await node3.start()
# connect them together
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
await node3.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
# define a custom relay handler
var completionFut = newFuture[bool]()
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
let msg = WakuMessage.init(data)
if msg.isOk():
let val = msg.value()
debug "The received topic:", topic
if topic == rlnRelayPubSubTopic:
completionFut.complete(true)
# mount the relay handler
node3.subscribe(rlnRelayPubSubTopic, relayHandler)
await sleepAsync(2000.millis)
# prepare the message payload
var payload {.noinit.}: array[32, byte]
for x in payload.mitems: x = 1
# prepare the epoch
var epoch {.noinit.}: Epoch
for x in epoch.mitems: x = 2
# prepare the proof
let rateLimitProofRes = node1.wakuRlnRelay.rlnInstance.proofGen(data = payload,
memKeys = node1.wakuRlnRelay.membershipKeyPair,
memIndex = MembershipIndex(4),
epoch = epoch)
doAssert(rateLimitProofRes.isOk())
let rateLimitProof = rateLimitProofRes.value
let message = WakuMessage(payload: @payload,
contentTopic: contentTopic,
proof: rateLimitProof)
## node1 publishes a message with an invalid non-spam proof, the message is then relayed to node2 which in turn
## attempts to verify the non-spam proof and fails hence does not relay the message to node3, thus the relayHandler of node3
## never gets called
## verification at node2 occurs inside a topic validator which is installed as part of the waku-rln-relay mount proc
await node1.publish(rlnRelayPubSubTopic, message)
await sleepAsync(2000.millis)
check:
# the relayHandler of node3 never gets called
(await completionFut.withTimeout(10.seconds)) == false
await node1.stop()
await node2.stop()
await node3.stop()
asyncTest "Relay protocol is started correctly":
let

View File

@ -37,7 +37,7 @@ when defined(rln):
import
libp2p/protocols/pubsub/rpc/messages,
web3,
../protocol/waku_rln_relay/[rln, waku_rln_relay_utils, waku_rln_relay_utils]
../protocol/waku_rln_relay/[rln, waku_rln_relay_utils]
declarePublicCounter waku_node_messages, "number of messages received", ["type"]
declarePublicGauge waku_node_filters, "number of content filter subscriptions"
@ -288,14 +288,13 @@ proc unsubscribe*(node: WakuNode, request: FilterRequest) {.async, gcsafe.} =
waku_node_filters.set(node.filters.len.int64)
proc publish*(node: WakuNode, topic: Topic, message: WakuMessage, rlnRelayEnabled: bool = false) {.async, gcsafe.} =
proc publish*(node: WakuNode, topic: Topic, message: WakuMessage) {.async, gcsafe.} =
## Publish a `WakuMessage` to a PubSub topic. `WakuMessage` should contain a
## `contentTopic` field for light node functionality. This field may be also
## be omitted.
##
## Status: Implemented.
## When rlnRelayEnabled is true, a zkp will be generated and attached to the message (it is an experimental feature)
if node.wakuRelay.isNil:
error "Invalid API call to `publish`. WakuRelay not mounted. Try `lightpush` instead."
# @TODO improved error handling
@ -305,15 +304,6 @@ proc publish*(node: WakuNode, topic: Topic, message: WakuMessage, rlnRelayEnabl
debug "publish", topic=topic, contentTopic=message.contentTopic
var publishingMessage = message
when defined(rln):
if rlnRelayEnabled:
# if rln relay is enabled then a proof must be generated and added to the waku message
let
proof = proofGen(message.payload)
## TODO here since the message is immutable we have to make a copy of it and then attach the proof to its duplicate
## TODO however, it might be better to change message type to mutable (i.e., var) so that we can add the proof field to the original message
publishingMessage = WakuMessage(payload: message.payload, contentTopic: message.contentTopic, version: message.version, proof: proof)
let data = message.encode().buffer
discard await wakuRelay.publish(topic, data)
@ -417,9 +407,10 @@ when defined(rln):
let msg = WakuMessage.init(message.data)
if msg.isOk():
# check the proof
if proofVrfy(msg.value().payload, msg.value().proof):
if node.wakuRlnRelay.rlnInstance.proofVerify(msg.value().payload, msg.value().proof):
return ValidationResult.Accept
# set a validator for the pubsubTopic
return ValidationResult.Reject
# set a validator for the supplied pubsubTopic
let pb = PubSub(node.wakuRelay)
pb.addValidator(pubsubTopic, validator)
@ -503,14 +494,16 @@ when defined(rln):
let member_is_added = rln.insertMember(member)
doAssert(member_is_added)
# create the WakuRLNRelay
var rlnPeer = WakuRLNRelay(membershipKeyPair: memKeyPair,
membershipIndex: memIndex,
membershipContractAddress: memContractAdd,
ethClientAddress: ethClientAddr,
ethAccountAddress: ethAccAddr,
rlnInstance: rln)
rlnInstance: rln,
pubsubTopic: pubsubTopic)
if onchainMode:
# register the rln-relay peer to the membership contract
let is_successful = await rlnPeer.register()

View File

@ -9,7 +9,8 @@
{.push raises: [Defect].}
import
libp2p/protobuf/minprotobuf
libp2p/protobuf/minprotobuf,
waku_rln_relay/waku_rln_relay_types
type
ContentTopic* = string
@ -23,7 +24,10 @@ type
# the proof field indicates that the message is not a spam
# this field will be used in the rln-relay protocol
# XXX Experimental, this is part of https://rfc.vac.dev/spec/17/ spec and not yet part of WakuMessage spec
proof*: seq[byte]
when defined(rln):
proof*: RateLimitProof
else:
proof*: seq[byte]
# Encoding and decoding -------------------------------------------------------
@ -34,9 +38,15 @@ proc init*(T: type WakuMessage, buffer: seq[byte]): ProtoResult[T] =
discard ? pb.getField(1, msg.payload)
discard ? pb.getField(2, msg.contentTopic)
discard ? pb.getField(3, msg.version)
discard ? pb.getField(4, msg.timestamp)
# XXX Experimental, this is part of https://rfc.vac.dev/spec/17/ spec and not yet part of WakuMessage spec
discard ? pb.getField(21, msg.proof)
when defined(rln):
var proofBytes: seq[byte]
discard ? pb.getField(21, proofBytes)
msg.proof = ? RateLimitProof.init(proofBytes)
else:
discard ? pb.getField(21, msg.proof)
ok(msg)
@ -47,4 +57,7 @@ proc encode*(message: WakuMessage): ProtoBuffer =
result.write(2, message.contentTopic)
result.write(3, message.version)
result.write(4, message.timestamp)
result.write(21, message.proof)
when defined(rln):
result.write(21, message.proof.encode())
else:
result.write(21, message.proof)

View File

@ -46,7 +46,8 @@ proc generate_proof*(ctx: RLN[Bn256],
input_buffer: ptr Buffer,
auth: ptr Auth,
output_buffer: ptr Buffer): bool {.importc: "generate_proof".}
## output_buffer holds the proof data and should be parsed as |proof<256>|root<32>|epoch<32>|share_x<32>|share_y<32>|nullifier<32>|
## numbers are in bytes
proc verify*(ctx: RLN[Bn256],
proof_buffer: ptr Buffer,
result_ptr: ptr uint32): bool {.importc: "verify".}

View File

@ -3,31 +3,65 @@
import
options, chronos, stint,
web3,
eth/keys
eth/keys,
libp2p/protobuf/minprotobuf,
stew/arrayops
## Bn256 and RLN are Nim wrappers for the data types used in
## Bn256 and RLN are Nim wrappers for the data types used in
## the rln library https://github.com/kilic/rln/blob/3bbec368a4adc68cd5f9bfae80b17e1bbb4ef373/src/ffi.rs
type Bn256* = pointer
type RLN*[E] = pointer
type IDKey* = array[32, byte]
type IDCommitment* = array[32, byte]
# represents a Merkle tree node which is the output of
# Poseidon hash function implemented by rln lib
type MerkleNode* = array[32,byte]
type
# identity key as defined in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership
IDKey* = array[32, byte]
# hash of identity key as defined ed in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership
IDCommitment* = array[32, byte]
type
MerkleNode* = array[32,byte] # Each node of the Merkle tee is a Poseidon hash which is a 32 byte value
Nullifier* = array[32,byte]
ZKSNARK* = array[256, byte]
Epoch* = array[32,byte]
# Custom data types defined for waku rln relay -------------------------
type MembershipKeyPair* = object
# node's identity key (a secret key) which is selected randomly
## user's identity key (a secret key) which is selected randomly
## see details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership
idKey*: IDKey
# hash of node's identity key generated by
# hash of user's identity key generated by
# Poseidon hash function implemented in rln lib
# more details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership
idCommitment*: IDCommitment
type WakuRLNRelay* = object
type RateLimitProof* = object
## RateLimitProof holds the public inputs to rln circuit as
## defined in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Public-Inputs
## the `proof` field carries the actual zkSNARK proof
proof*: ZKSNARK
## the root of Merkle tree used for the generation of the `proof`
merkleRoot*: MerkleNode
## the epoch used for the generation of the `proof`
epoch*: Epoch
## shareX and shareY are shares of user's identity key
## these shares are created using Shamir secret sharing scheme
## see details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Linear-Equation-amp-SSS
shareX*: MerkleNode
shareY*: MerkleNode
## nullifier enables linking two messages published during the same epoch
## see details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Nullifiers
nullifier*: Nullifier
type MembershipIndex* = uint
type WakuRLNRelay* = ref object
membershipKeyPair*: MembershipKeyPair
membershipIndex*: uint # index of peers in the Merkle tree
# membershipIndex denotes the index of a leaf in the Merkle tree
# that contains the pk of the current peer
# this index is used to retrieve the peer's authentication path
membershipIndex*: MembershipIndex
membershipContractAddress*: Address
ethClientAddress*: string
ethAccountAddress*: Address
@ -36,8 +70,8 @@ type WakuRLNRelay* = object
# TODO may need to make ethAccountPrivateKey mandatory
ethAccountPrivateKey*: Option[PrivateKey]
rlnInstance*: RLN[Bn256]
pubsubTopic*: string # the pubsub topic for which rln relay is mounted
type MembershipIndex* = uint
# inputs of the membership contract constructor
# TODO may be able to make these constants private and put them inside the waku_rln_relay_utils
@ -69,3 +103,46 @@ const
# the root is created locally, using createMembershipList proc from waku_rln_relay_utils module, and the result is hardcoded in here
STATIC_GROUP_MERKLE_ROOT* = "a1877a553eff12e1b21632a0545a916a5c5b8060ad7cc6c69956741134397b2d"
# Protobufs enc and init
proc init*(T: type RateLimitProof, buffer: seq[byte]): ProtoResult[T] =
var nsp: RateLimitProof
let pb = initProtoBuffer(buffer)
var proof: seq[byte]
discard ? pb.getField(1, proof)
discard nsp.proof.copyFrom(proof)
var merkleRoot: seq[byte]
discard ? pb.getField(2, merkleRoot)
discard nsp.merkleRoot.copyFrom(merkleRoot)
var epoch: seq[byte]
discard ? pb.getField(3, epoch)
discard nsp.epoch.copyFrom(epoch)
var shareX: seq[byte]
discard ? pb.getField(4, shareX)
discard nsp.shareX.copyFrom(shareX)
var shareY: seq[byte]
discard ? pb.getField(5, shareY)
discard nsp.shareY.copyFrom(shareY)
var nullifier: seq[byte]
discard ? pb.getField(6, nullifier)
discard nsp.nullifier.copyFrom(nullifier)
return ok(nsp)
proc encode*(nsp: RateLimitProof): ProtoBuffer =
var output = initProtoBuffer()
output.write(1, nsp.proof)
output.write(2, nsp.merkleRoot)
output.write(3, nsp.epoch)
output.write(4, nsp.shareX)
output.write(5, nsp.shareY)
output.write(6, nsp.nullifier)
return output

View File

@ -5,7 +5,7 @@ import
chronicles, options, chronos, stint,
web3,
stew/results,
stew/byteutils,
stew/[byteutils, arrayops],
rln,
waku_rln_relay_types
@ -14,6 +14,7 @@ logScope:
type RLNResult* = Result[RLN[Bn256], string]
type MerkleNodeResult* = Result[MerkleNode, string]
type RateLimitProofResult* = Result[RateLimitProof, string]
# membership contract interface
contract(MembershipContract):
# TODO define a return type of bool for register method to signify a successful registration
@ -102,17 +103,123 @@ proc register*(rlnPeer: WakuRLNRelay): Future[bool] {.async.} =
await web3.close()
return true
proc proofGen*(data: seq[byte]): seq[byte] =
# TODO to implement the actual proof generation logic
return "proof".toBytes()
proc toBuffer*(x: openArray[byte]): Buffer =
## converts the input to a Buffer object
## the Buffer object is used to communicate data with the rln lib
var temp = @x
let output = Buffer(`ptr`: addr(temp[0]), len: uint(temp.len))
return output
proc proofVrfy*(data, proof: seq[byte]): bool =
# TODO to implement the actual proof verification logic
return true
proc hash*(rlnInstance: RLN[Bn256], data: openArray[byte]): MerkleNode =
## a thin layer on top of the Nim wrapper of the Poseidon hasher
debug "hash input", hashhex=data.toHex()
var
hashInputBuffer = data.toBuffer()
outputBuffer: Buffer # will holds the hash output
numOfInputs = 1.uint # the number of hash inputs that can be 1 or 2
debug "hash input buffer length", bufflen=hashInputBuffer.len
let
hashSuccess = hash(rlnInstance, addr hashInputBuffer, numOfInputs, addr outputBuffer)
output = cast[ptr MerkleNode](outputBuffer.`ptr`)[]
return output
proc proofGen*(rlnInstance: RLN[Bn256], data: openArray[byte], memKeys: MembershipKeyPair, memIndex: MembershipIndex, epoch: Epoch): RateLimitProofResult =
var skBuffer = toBuffer(memKeys.idKey)
# peer's index in the Merkle Tree
var index = memIndex
# prepare the authentication object with peer's index and sk
var authObj: Auth = Auth(secret_buffer: addr skBuffer, index: index)
# serialize message and epoch
# TODO add a proc for serializing
var epochMessage = @epoch & @data
# convert the seq to an array
var inputBytes{.noinit.}: array[64, byte] # holds epoch||Message
for (i, x) in inputBytes.mpairs: x = epochMessage[i]
debug "serialized epoch and message ", inputHex=inputBytes.toHex()
# put the serialized epoch||message into a buffer
var inputBuffer = toBuffer(inputBytes)
# generate the proof
var proof: Buffer
let proofIsSuccessful = generate_proof(rlnInstance, addr inputBuffer, addr authObj, addr proof)
# check whether the generate_proof call is done successfully
if not proofIsSuccessful:
return err("could not generate the proof")
var proofValue = cast[ptr array[416,byte]] (proof.`ptr`)
let proofBytes: array[416,byte] = proofValue[]
debug "proof content", proofHex=proofValue[].toHex
## parse the proof as |zkSNARKs<256>|root<32>|epoch<32>|share_x<32>|share_y<32>|nullifier<32>|
let
proofOffset = 256
rootOffset = proofOffset + 32
epochOffset = rootOffset + 32
shareXOffset = epochOffset + 32
shareYOffset = shareXOffset + 32
nullifierOffset = shareYOffset + 32
var
zkproof: ZKSNARK
proofRoot, shareX, shareY: MerkleNode
epoch: Epoch
nullifier: Nullifier
discard zkproof.copyFrom(proofBytes[0..proofOffset-1])
discard proofRoot.copyFrom(proofBytes[proofOffset..rootOffset-1])
discard epoch.copyFrom(proofBytes[rootOffset..epochOffset-1])
discard shareX.copyFrom(proofBytes[epochOffset..shareXOffset-1])
discard shareY.copyFrom(proofBytes[shareXOffset..shareYOffset-1])
discard nullifier.copyFrom(proofBytes[shareYOffset..nullifierOffset-1])
let output = RateLimitProof(proof: zkproof,
merkleRoot: proofRoot,
epoch: epoch,
shareX: shareX,
shareY: shareY,
nullifier: nullifier)
return ok(output)
proc serializeProof(proof: RateLimitProof): seq[byte] =
## a private proc to convert RateLimitProof to a byte seq
## this conversion is used in the proof verification proc
var proofBytes = concat(@(proof.proof),
@(proof.merkleRoot),
@(proof.epoch),
@(proof.shareX),
@(proof.shareY),
@(proof.nullifier))
return proofBytes
proc proofVerify*(rlnInstance: RLN[Bn256], data: openArray[byte], proof: RateLimitProof): bool =
# TODO proof should be checked against the data
var
proofBytes= serializeProof(proof)
proofBuffer = proofBytes.toBuffer()
f = 0.uint32
debug "serialized proof", proof=proofBytes.toHex()
let verifyIsSuccessful = verify(rlnInstance, addr proofBuffer, addr f)
if not verifyIsSuccessful:
# something went wrong in verification
return false
# f = 0 means the proof is verified
if f == 0:
return true
return false
proc insertMember*(rlnInstance: RLN[Bn256], idComm: IDCommitment): bool =
var temp = idComm
var pkBuffer = Buffer(`ptr`: addr(temp[0]), len: 32)
var pkBuffer = toBuffer(idComm)
let pkBufferPtr = addr pkBuffer
# add the member to the tree
@ -132,9 +239,8 @@ proc getMerkleRoot*(rlnInstance: RLN[Bn256]): MerkleNodeResult =
if (not get_root_successful): return err("could not get the root")
if (not (root.len == 32)): return err("wrong output size")
var rootValue = cast[ptr array[32,byte]] (root.`ptr`)
let merkleNode = rootValue[]
return ok(merkleNode)
var rootValue = cast[ptr MerkleNode] (root.`ptr`)[]
return ok(rootValue)
proc toMembershipKeyPairs*(groupKeys: seq[(string, string)]): seq[MembershipKeyPair] {.raises: [Defect, ValueError]} =
## groupKeys is sequence of membership key tuples in the form of (identity key, identity commitment) all in the hexadecimal format