mirror of
https://github.com/waku-org/nwaku.git
synced 2025-01-15 01:14:56 +00:00
deploy: fda5128cc120d87f666773e560732b13d04a2e2b
This commit is contained in:
parent
44936038dd
commit
e846609ac7
@ -2,7 +2,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/options, sequtils,
|
||||
std/options, sequtils, times,
|
||||
testutils/unittests, chronos, chronicles, stint, web3,
|
||||
stew/byteutils, stew/shims/net as stewNet,
|
||||
libp2p/crypto/crypto,
|
||||
@ -188,7 +188,7 @@ procSuite "Waku rln relay":
|
||||
|
||||
# initialize the WakuRLNRelay
|
||||
var rlnPeer = WakuRLNRelay(membershipKeyPair: membershipKeyPair.get(),
|
||||
membershipIndex: uint(0),
|
||||
membershipIndex: MembershipIndex(0),
|
||||
ethClientAddress: EthClient,
|
||||
ethAccountAddress: ethAccountAddress,
|
||||
membershipContractAddress: contractAddress)
|
||||
@ -759,3 +759,144 @@ suite "Waku rln relay":
|
||||
let verified = rln.proofVerify(data = messageBytes,
|
||||
proof = proof)
|
||||
check verified == false
|
||||
test "toEpoch and fromEpoch consistency check":
|
||||
# check edge cases
|
||||
let
|
||||
time = uint64.high
|
||||
epoch = time.toEpoch()
|
||||
decodedTime = epoch.fromEpoch()
|
||||
check time == decodedTime
|
||||
debug "encoded and decode time", time=time, epoch=epoch, decodedTime=decodedTime
|
||||
|
||||
test "Epoch comparison":
|
||||
# check edge cases
|
||||
let
|
||||
time1 = uint64.high
|
||||
time2 = uint64.high - 1
|
||||
epoch1 = time1.toEpoch()
|
||||
epoch2 = time2.toEpoch()
|
||||
check compare(epoch1, epoch2) == int64(1)
|
||||
check compare(epoch2, epoch1) == int64(-1)
|
||||
|
||||
test "updateLog and hasDuplicate tests":
|
||||
let
|
||||
wakurlnrelay = WakuRLNRelay()
|
||||
epoch = getCurrentEpoch()
|
||||
|
||||
# cretae some dummy nullifiers and secret shares
|
||||
var nullifier1: Nullifier
|
||||
for index, x in nullifier1.mpairs: nullifier1[index] = 1
|
||||
var shareX1: MerkleNode
|
||||
for index, x in shareX1.mpairs: shareX1[index] = 1
|
||||
let shareY1 = shareX1
|
||||
|
||||
var nullifier2: Nullifier
|
||||
for index, x in nullifier2.mpairs: nullifier2[index] = 2
|
||||
var shareX2: MerkleNode
|
||||
for index, x in shareX2.mpairs: shareX2[index] = 2
|
||||
let shareY2 = shareX2
|
||||
|
||||
let nullifier3 = nullifier1
|
||||
var shareX3: MerkleNode
|
||||
for index, x in shareX3.mpairs: shareX3[index] = 3
|
||||
let shareY3 = shareX3
|
||||
|
||||
let
|
||||
wm1 = WakuMessage(proof: RateLimitProof(epoch: epoch, nullifier: nullifier1, shareX: shareX1, shareY: shareY1))
|
||||
wm2 = WakuMessage(proof: RateLimitProof(epoch: epoch, nullifier: nullifier2, shareX: shareX2, shareY: shareY2))
|
||||
wm3 = WakuMessage(proof: RateLimitProof(epoch: epoch, nullifier: nullifier3, shareX: shareX3, shareY: shareY3))
|
||||
|
||||
# check whether hasDuplicate correctly finds records with the same nullifiers but different secret shares
|
||||
# no duplicate for wm1 should be found, since the log is empty
|
||||
let result1 = wakurlnrelay.hasDuplicate(wm1)
|
||||
check:
|
||||
result1.isOk
|
||||
# no duplicate is found
|
||||
result1.value == false
|
||||
# add it to the log
|
||||
discard wakurlnrelay.updateLog(wm1)
|
||||
|
||||
# # no duplicate for wm2 should be found, its nullifier differs from wm1
|
||||
let result2 = wakurlnrelay.hasDuplicate(wm2)
|
||||
check:
|
||||
result2.isOk
|
||||
# no duplicate is found
|
||||
result2.value == false
|
||||
# add it to the log
|
||||
discard wakurlnrelay.updateLog(wm2)
|
||||
|
||||
# wm3 has the same nullifier as wm1 but different secret shares, it should be detected as duplicate
|
||||
let result3 = wakurlnrelay.hasDuplicate(wm3)
|
||||
check:
|
||||
result3.isOk
|
||||
# it is a duplicate
|
||||
result3.value == true
|
||||
|
||||
test "validateMessage test":
|
||||
# setup a wakurlnrelay peer with a static group----------
|
||||
|
||||
# create a group of 100 membership keys
|
||||
let
|
||||
(groupKeys, root) = createMembershipList(100)
|
||||
# convert the keys to MembershipKeyPair structs
|
||||
groupKeyPairs = groupKeys.toMembershipKeyPairs()
|
||||
# extract the id commitments
|
||||
groupIDCommitments = groupKeyPairs.mapIt(it.idCommitment)
|
||||
debug "groupKeyPairs", groupKeyPairs
|
||||
debug "groupIDCommitments", groupIDCommitments
|
||||
|
||||
# index indicates the position of a membership key pair in the static list of group keys i.e., groupKeyPairs
|
||||
# the corresponding key pair will be used to mount rlnRelay on the current node
|
||||
# index also represents the index of the leaf in the Merkle tree that contains node's commitment key
|
||||
let index = MembershipIndex(5)
|
||||
|
||||
# create an RLN instance
|
||||
var rlnInstance = createRLNInstance()
|
||||
doAssert(rlnInstance.isOk)
|
||||
var rln = rlnInstance.value
|
||||
|
||||
# add members
|
||||
discard rln.addAll(groupIDCommitments)
|
||||
|
||||
let
|
||||
wakuRlnRelay = WakuRLNRelay(membershipIndex: index, membershipKeyPair: groupKeyPairs[index], rlnInstance: rln)
|
||||
|
||||
# get the current epoch time
|
||||
let time = epochTime()
|
||||
|
||||
# create some messages from the same peer and append rln proof to them, except wm4
|
||||
var
|
||||
wm1 = WakuMessage(payload: "Valid message".toBytes())
|
||||
proofAdded1 = wakuRlnRelay.appendRLNProof(wm1, time)
|
||||
# another message in the same epoch as wm1, it will break the messaging rate limit
|
||||
wm2 = WakuMessage(payload: "Spam".toBytes())
|
||||
proofAdded2 = wakuRlnRelay.appendRLNProof(wm2, time)
|
||||
# wm3 points to the next epoch
|
||||
wm3 = WakuMessage(payload: "Valid message".toBytes())
|
||||
proofAdded3 = wakuRlnRelay.appendRLNProof(wm3, time+EPOCH_UNIT_SECONDS)
|
||||
wm4 = WakuMessage(payload: "Invalid message".toBytes())
|
||||
|
||||
# checks proofs are added
|
||||
check:
|
||||
proofAdded1
|
||||
proofAdded2
|
||||
proofAdded3
|
||||
|
||||
# validate messages
|
||||
# validateMessage proc checks the validity of the message fields and adds it to the log (if valid)
|
||||
let
|
||||
msgValidate1 = wakuRlnRelay.validateMessage(wm1)
|
||||
# wm2 is published within the same Epoch as wm1 and should be found as spam
|
||||
msgValidate2 = wakuRlnRelay.validateMessage(wm2)
|
||||
# a valid message should be validated successfully
|
||||
msgValidate3 = wakuRlnRelay.validateMessage(wm3)
|
||||
# wm4 has no rln proof and should not be validated
|
||||
msgValidate4 = wakuRlnRelay.validateMessage(wm4)
|
||||
|
||||
|
||||
check:
|
||||
msgValidate1 == MessageValidationResult.Valid
|
||||
msgValidate2 == MessageValidationResult.Spam
|
||||
msgValidate3 == MessageValidationResult.Valid
|
||||
msgValidate4 == MessageValidationResult.Invalid
|
||||
|
||||
|
@ -24,8 +24,10 @@ import
|
||||
../test_helpers
|
||||
|
||||
when defined(rln):
|
||||
import ../../waku/v2/protocol/waku_rln_relay/[waku_rln_relay_utils, waku_rln_relay_types]
|
||||
|
||||
import
|
||||
../../waku/v2/protocol/waku_rln_relay/[waku_rln_relay_utils, waku_rln_relay_types]
|
||||
from times import epochTime
|
||||
|
||||
const RLNRELAY_PUBSUB_TOPIC = "waku/2/rlnrelay/proto"
|
||||
template sourceDir: string = currentSourcePath.parentDir()
|
||||
const KEY_PATH = sourceDir / "resources/test_key.pem"
|
||||
@ -658,12 +660,10 @@ procSuite "WakuNode":
|
||||
await sleepAsync(2000.millis)
|
||||
|
||||
# prepare the message payload
|
||||
var payload {.noinit.}: array[32, byte]
|
||||
for x in payload.mitems: x = 1
|
||||
let payload = "Hello".toBytes()
|
||||
|
||||
# prepare the epoch
|
||||
var epoch {.noinit.}: Epoch
|
||||
for x in epoch.mitems: x = 2
|
||||
let epoch = getCurrentEpoch()
|
||||
|
||||
# prepare the proof
|
||||
let rateLimitProofRes = node1.wakuRlnRelay.rlnInstance.proofGen(data = payload,
|
||||
@ -678,8 +678,8 @@ procSuite "WakuNode":
|
||||
proof: rateLimitProof)
|
||||
|
||||
|
||||
## node1 publishes a message with a non-spam proof, the message is then relayed to node2 which in turn
|
||||
## verifies the non-spam proof of the message and relays the message to node3
|
||||
## node1 publishes a message with a rate limit proof, the message is then relayed to node2 which in turn
|
||||
## verifies the rate limit proof of the message and relays the message to node3
|
||||
## verification at node2 occurs inside a topic validator which is installed as part of the waku-rln-relay mount proc
|
||||
await node1.publish(rlnRelayPubSubTopic, message)
|
||||
await sleepAsync(2000.millis)
|
||||
@ -759,12 +759,10 @@ procSuite "WakuNode":
|
||||
await sleepAsync(2000.millis)
|
||||
|
||||
# prepare the message payload
|
||||
var payload {.noinit.}: array[32, byte]
|
||||
for x in payload.mitems: x = 1
|
||||
let payload = "Hello".toBytes()
|
||||
|
||||
# prepare the epoch
|
||||
var epoch {.noinit.}: Epoch
|
||||
for x in epoch.mitems: x = 2
|
||||
let epoch = getCurrentEpoch()
|
||||
|
||||
# prepare the proof
|
||||
let rateLimitProofRes = node1.wakuRlnRelay.rlnInstance.proofGen(data = payload,
|
||||
@ -779,8 +777,8 @@ procSuite "WakuNode":
|
||||
proof: rateLimitProof)
|
||||
|
||||
|
||||
## node1 publishes a message with an invalid non-spam proof, the message is then relayed to node2 which in turn
|
||||
## attempts to verify the non-spam proof and fails hence does not relay the message to node3, thus the relayHandler of node3
|
||||
## node1 publishes a message with an invalid rln proof, the message is then relayed to node2 which in turn
|
||||
## attempts to verify the rate limit proof and fails hence does not relay the message to node3, thus the relayHandler of node3
|
||||
## never gets called
|
||||
## verification at node2 occurs inside a topic validator which is installed as part of the waku-rln-relay mount proc
|
||||
await node1.publish(rlnRelayPubSubTopic, message)
|
||||
@ -794,6 +792,131 @@ procSuite "WakuNode":
|
||||
await node2.stop()
|
||||
await node3.stop()
|
||||
|
||||
asyncTest "testing rln-relay double-signaling detection":
|
||||
|
||||
let
|
||||
# publisher node
|
||||
nodeKey1 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||
node1 = WakuNode.new(nodeKey1, ValidIpAddress.init("0.0.0.0"), Port(60000))
|
||||
# Relay node
|
||||
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||
node2 = WakuNode.new(nodeKey2, ValidIpAddress.init("0.0.0.0"), Port(60002))
|
||||
# Subscriber
|
||||
nodeKey3 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||
node3 = WakuNode.new(nodeKey3, ValidIpAddress.init("0.0.0.0"), Port(60003))
|
||||
|
||||
rlnRelayPubSubTopic = RLNRELAY_PUBSUB_TOPIC
|
||||
contentTopic = ContentTopic("/waku/2/default-content/proto")
|
||||
|
||||
# set up three nodes
|
||||
# node1
|
||||
node1.mountRelay(@[rlnRelayPubSubTopic])
|
||||
let (groupOpt1, memKeyPairOpt1, memIndexOpt1) = rlnRelaySetUp(1) # set up rln relay inputs
|
||||
# mount rlnrelay in off-chain mode
|
||||
waitFor node1.mountRlnRelay(groupOpt = groupOpt1,
|
||||
memKeyPairOpt = memKeyPairOpt1,
|
||||
memIndexOpt= memIndexOpt1,
|
||||
onchainMode = false,
|
||||
pubsubTopic = rlnRelayPubSubTopic)
|
||||
await node1.start()
|
||||
|
||||
# node 2
|
||||
node2.mountRelay(@[rlnRelayPubSubTopic])
|
||||
let (groupOpt2, memKeyPairOpt2, memIndexOpt2) = rlnRelaySetUp(2) # set up rln relay inputs
|
||||
# mount rlnrelay in off-chain mode
|
||||
waitFor node2.mountRlnRelay(groupOpt = groupOpt2,
|
||||
memKeyPairOpt = memKeyPairOpt2,
|
||||
memIndexOpt= memIndexOpt2,
|
||||
onchainMode = false,
|
||||
pubsubTopic = rlnRelayPubSubTopic)
|
||||
await node2.start()
|
||||
|
||||
# node 3
|
||||
node3.mountRelay(@[rlnRelayPubSubTopic])
|
||||
let (groupOpt3, memKeyPairOpt3, memIndexOpt3) = rlnRelaySetUp(3) # set up rln relay inputs
|
||||
# mount rlnrelay in off-chain mode
|
||||
waitFor node3.mountRlnRelay(groupOpt = groupOpt3,
|
||||
memKeyPairOpt = memKeyPairOpt3,
|
||||
memIndexOpt= memIndexOpt3,
|
||||
onchainMode = false,
|
||||
pubsubTopic = rlnRelayPubSubTopic)
|
||||
await node3.start()
|
||||
|
||||
# connect the nodes together node1 <-> node2 <-> node3
|
||||
await node1.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
||||
await node3.connectToNodes(@[node2.peerInfo.toRemotePeerInfo()])
|
||||
|
||||
# get the current epoch time
|
||||
let time = epochTime()
|
||||
# create some messages with rate limit proofs
|
||||
var
|
||||
wm1 = WakuMessage(payload: "message 1".toBytes())
|
||||
proofAdded1 = node3.wakuRlnRelay.appendRLNProof(wm1, time)
|
||||
# another message in the same epoch as wm1, it will break the messaging rate limit
|
||||
wm2 = WakuMessage(payload: "message2".toBytes())
|
||||
proofAdded2 = node3.wakuRlnRelay.appendRLNProof(wm2, time)
|
||||
# wm3 points to the next epoch
|
||||
wm3 = WakuMessage(payload: "message 3".toBytes())
|
||||
proofAdded3 = node3.wakuRlnRelay.appendRLNProof(wm3, time+EPOCH_UNIT_SECONDS)
|
||||
wm4 = WakuMessage(payload: "message4".toBytes())
|
||||
|
||||
# check proofs are added correctly
|
||||
check:
|
||||
proofAdded1
|
||||
proofAdded2
|
||||
proofAdded3
|
||||
|
||||
# relay handler for node3
|
||||
var completionFut1 = newFuture[bool]()
|
||||
var completionFut2 = newFuture[bool]()
|
||||
var completionFut3 = newFuture[bool]()
|
||||
var completionFut4 = newFuture[bool]()
|
||||
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||
let msg = WakuMessage.init(data)
|
||||
if msg.isOk():
|
||||
let wm = msg.value()
|
||||
debug "The received topic:", topic
|
||||
if topic == rlnRelayPubSubTopic:
|
||||
if wm == wm1:
|
||||
completionFut1.complete(true)
|
||||
if wm == wm2:
|
||||
completionFut2.complete(true)
|
||||
if wm == wm3:
|
||||
completionFut3.complete(true)
|
||||
if wm == wm4:
|
||||
completionFut4.complete(true)
|
||||
|
||||
|
||||
# mount the relay handler for node3
|
||||
node3.subscribe(rlnRelayPubSubTopic, relayHandler)
|
||||
await sleepAsync(2000.millis)
|
||||
|
||||
## node1 publishes and relays 4 messages to node2
|
||||
## verification at node2 occurs inside a topic validator which is installed as part of the waku-rln-relay mount proc
|
||||
## node2 relays either of wm1 or wm2 to node3, depending on which message arrives at node2 first
|
||||
## node2 should detect either of wm1 or wm2 as spam and not relay it
|
||||
## node2 should relay wm3 to node3
|
||||
## node2 should not relay wm4 because it has no valid rln proof
|
||||
await node1.publish(rlnRelayPubSubTopic, wm1)
|
||||
await node1.publish(rlnRelayPubSubTopic, wm2)
|
||||
await node1.publish(rlnRelayPubSubTopic, wm3)
|
||||
await node1.publish(rlnRelayPubSubTopic, wm4)
|
||||
await sleepAsync(2000.millis)
|
||||
|
||||
let
|
||||
res1 = await completionFut1.withTimeout(10.seconds)
|
||||
res2 = await completionFut2.withTimeout(10.seconds)
|
||||
|
||||
check:
|
||||
res1 or res2 == true # either of the wm1 and wm2 is relayed
|
||||
(res1 and res2) == false # either of the wm1 and wm2 is found as spam hence not relayed
|
||||
(await completionFut2.withTimeout(10.seconds)) == true
|
||||
(await completionFut3.withTimeout(10.seconds)) == true
|
||||
(await completionFut4.withTimeout(10.seconds)) == false
|
||||
|
||||
await node1.stop()
|
||||
await node2.stop()
|
||||
await node3.stop()
|
||||
asyncTest "Relay protocol is started correctly":
|
||||
let
|
||||
nodeKey1 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
# libtool - Provide generalized library-building support services.
|
||||
# Generated automatically by config.status (libbacktrace) version-unused
|
||||
# Libtool was configured on host fv-az129-255:
|
||||
# Libtool was configured on host fv-az190-306:
|
||||
# NOTE: Changes made to this file will be lost: look at ltmain.sh.
|
||||
#
|
||||
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
|
||||
|
@ -440,10 +440,20 @@ when defined(rln):
|
||||
proc validator(topic: string, message: messages.Message): Future[ValidationResult] {.async.} =
|
||||
let msg = WakuMessage.init(message.data)
|
||||
if msg.isOk():
|
||||
# check the proof
|
||||
if node.wakuRlnRelay.rlnInstance.proofVerify(msg.value().payload, msg.value().proof):
|
||||
return ValidationResult.Accept
|
||||
return ValidationResult.Reject
|
||||
let
|
||||
wakumessage = msg.value()
|
||||
# validate the message
|
||||
validationRes = node.wakuRlnRelay.validateMessage(wakumessage)
|
||||
case validationRes:
|
||||
of Valid:
|
||||
info "message validity is verified, relaying:", wakumessage=wakumessage
|
||||
return ValidationResult.Accept
|
||||
of Invalid:
|
||||
info "message validity could not be verified, discarding:", wakumessage=wakumessage
|
||||
return ValidationResult.Reject
|
||||
of Spam:
|
||||
info "A spam message is found! yay! discarding:", wakumessage=wakumessage
|
||||
return ValidationResult.Reject
|
||||
# set a validator for the supplied pubsubTopic
|
||||
let pb = PubSub(node.wakuRelay)
|
||||
pb.addValidator(pubsubTopic, validator)
|
||||
|
@ -9,8 +9,9 @@
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
libp2p/protobuf/minprotobuf,
|
||||
waku_rln_relay/waku_rln_relay_types
|
||||
libp2p/protobuf/minprotobuf
|
||||
when defined(rln):
|
||||
import waku_rln_relay/waku_rln_relay_types
|
||||
|
||||
type
|
||||
ContentTopic* = string
|
||||
|
@ -1,6 +1,7 @@
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
options, chronos, stint,
|
||||
web3,
|
||||
eth/keys,
|
||||
@ -56,6 +57,11 @@ type RateLimitProof* = object
|
||||
|
||||
type MembershipIndex* = uint
|
||||
|
||||
type ProofMetadata* = object
|
||||
nullifier*: Nullifier
|
||||
shareX*: MerkleNode
|
||||
shareY*: MerkleNode
|
||||
|
||||
type WakuRLNRelay* = ref object
|
||||
membershipKeyPair*: MembershipKeyPair
|
||||
# membershipIndex denotes the index of a leaf in the Merkle tree
|
||||
@ -71,7 +77,11 @@ type WakuRLNRelay* = ref object
|
||||
ethAccountPrivateKey*: Option[PrivateKey]
|
||||
rlnInstance*: RLN[Bn256]
|
||||
pubsubTopic*: string # the pubsub topic for which rln relay is mounted
|
||||
|
||||
# the log of nullifiers and Shamir shares of the past messages grouped per epoch
|
||||
nullifierLog*: Table[Epoch, seq[ProofMetadata]]
|
||||
|
||||
type MessageValidationResult* {.pure.} = enum
|
||||
Valid, Invalid, Spam
|
||||
|
||||
# inputs of the membership contract constructor
|
||||
# TODO may be able to make these constants private and put them inside the waku_rln_relay_utils
|
||||
@ -103,8 +113,12 @@ const
|
||||
# the root is created locally, using createMembershipList proc from waku_rln_relay_utils module, and the result is hardcoded in here
|
||||
STATIC_GROUP_MERKLE_ROOT* = "a1877a553eff12e1b21632a0545a916a5c5b8060ad7cc6c69956741134397b2d"
|
||||
|
||||
# Protobufs enc and init
|
||||
const EPOCH_UNIT_SECONDS* = float64(2)
|
||||
const MAX_CLOCK_GAP_SECONDS* = 20.0 # the maximum clock difference between peers
|
||||
# maximum allowed gap between the epochs of messages' RateLimitProofs
|
||||
const MAX_EPOCH_GAP* = int64(MAX_CLOCK_GAP_SECONDS/EPOCH_UNIT_SECONDS)
|
||||
|
||||
# Protobufs enc and init
|
||||
proc init*(T: type RateLimitProof, buffer: seq[byte]): ProtoResult[T] =
|
||||
var nsp: RateLimitProof
|
||||
let pb = initProtoBuffer(buffer)
|
||||
|
@ -1,13 +1,14 @@
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/sequtils,
|
||||
std/sequtils, tables, times,
|
||||
chronicles, options, chronos, stint,
|
||||
web3,
|
||||
stew/results,
|
||||
stew/[byteutils, arrayops, endians2],
|
||||
rln,
|
||||
waku_rln_relay_types
|
||||
waku_rln_relay_types,
|
||||
../waku_message
|
||||
|
||||
logScope:
|
||||
topics = "wakurlnrelayutils"
|
||||
@ -338,4 +339,159 @@ proc rlnRelaySetUp*(rlnRelayMemIndex: MembershipIndex): (Option[seq[IDCommitment
|
||||
memKeyPairOpt = some(groupKeyPairs[rlnRelayMemIndex])
|
||||
memIndexOpt= some(rlnRelayMemIndex)
|
||||
|
||||
return (groupOpt, memKeyPairOpt, memIndexOpt)
|
||||
return (groupOpt, memKeyPairOpt, memIndexOpt)
|
||||
|
||||
proc hasDuplicate*(rlnPeer: WakuRLNRelay, msg: WakuMessage): Result[bool, string] =
|
||||
## returns true if there is another message in the `nullifierLog` of the `rlnPeer` with the same
|
||||
## epoch and nullifier as `msg`'s epoch and nullifier but different Shamir secret shares
|
||||
## otherwise, returns false
|
||||
## emits an error string if `KeyError` occurs (never happens, it is just to avoid raising unnecessary `KeyError` exception )
|
||||
|
||||
# extract the proof metadata of the supplied `msg`
|
||||
let proofMD = ProofMetadata(nullifier: msg.proof.nullifier, shareX: msg.proof.shareX, shareY: msg.proof.shareY)
|
||||
|
||||
# check if the epoch exists
|
||||
if not rlnPeer.nullifierLog.hasKey(msg.proof.epoch):
|
||||
return ok(false)
|
||||
try:
|
||||
if rlnPeer.nullifierLog[msg.proof.epoch].contains(proofMD):
|
||||
# there is an identical record, ignore rhe mag
|
||||
return ok(false)
|
||||
|
||||
# check for a message with the same nullifier but different secret shares
|
||||
let matched = rlnPeer.nullifierLog[msg.proof.epoch].filterIt((it.nullifier == proofMD.nullifier) and ((it.shareX != proofMD.shareX) or (it.shareY != proofMD.shareY)))
|
||||
|
||||
if matched.len != 0:
|
||||
# there is a duplicate
|
||||
return ok(true)
|
||||
|
||||
# there is no duplicate
|
||||
return ok(false)
|
||||
|
||||
except KeyError as e:
|
||||
return err("the epoch was not found")
|
||||
|
||||
proc updateLog*(rlnPeer: WakuRLNRelay, msg: WakuMessage): Result[bool, string] =
|
||||
## extracts the `ProofMetadata` of the supplied messages `msg` and
|
||||
## saves it in the `nullifierLog` of the `rlnPeer`
|
||||
|
||||
let proofMD = ProofMetadata(nullifier: msg.proof.nullifier, shareX: msg.proof.shareX, shareY: msg.proof.shareY)
|
||||
debug "proof metadata", proofMD=proofMD
|
||||
|
||||
# check if the epoch exists
|
||||
if not rlnPeer.nullifierLog.hasKey(msg.proof.epoch):
|
||||
rlnPeer.nullifierLog[msg.proof.epoch]= @[proofMD]
|
||||
return ok(true)
|
||||
|
||||
try:
|
||||
# check if an identical record exists
|
||||
if rlnPeer.nullifierLog[msg.proof.epoch].contains(proofMD):
|
||||
return ok(true)
|
||||
# add proofMD to the log
|
||||
rlnPeer.nullifierLog[msg.proof.epoch].add(proofMD)
|
||||
return ok(true)
|
||||
except KeyError as e:
|
||||
return err("the epoch was not found")
|
||||
|
||||
proc toEpoch*(t: uint64): Epoch =
|
||||
## converts `t` to `Epoch` in little-endian order
|
||||
let bytes = toBytes(t, Endianness.littleEndian)
|
||||
debug "bytes", bytes=bytes
|
||||
var epoch: Epoch
|
||||
discard epoch.copyFrom(bytes)
|
||||
return epoch
|
||||
|
||||
proc fromEpoch*(epoch: Epoch): uint64 =
|
||||
## decodes bytes of `epoch` (in little-endian) to uint64
|
||||
let t = fromBytesLE(uint64, array[32,byte](epoch))
|
||||
return t
|
||||
|
||||
proc calcEpoch*(t: float64): Epoch =
|
||||
## gets time `t` as `flaot64` with subseconds resolution in the fractional part
|
||||
## and returns its corresponding rln `Epoch` value
|
||||
let e = uint64(t/EPOCH_UNIT_SECONDS)
|
||||
return toEpoch(e)
|
||||
|
||||
proc getCurrentEpoch*(): Epoch =
|
||||
## gets the current rln Epoch time
|
||||
return calcEpoch(epochTime())
|
||||
|
||||
proc compare*(e1, e2: Epoch): int64 =
|
||||
## returns the difference between the two rln `Epoch`s `e1` and `e2`
|
||||
## i.e., e1 - e2
|
||||
|
||||
# convert epochs to their corresponding unsigned numerical values
|
||||
let
|
||||
epoch1 = fromEpoch(e1)
|
||||
epoch2 = fromEpoch(e2)
|
||||
return int64(epoch1) - int64(epoch2)
|
||||
|
||||
|
||||
proc validateMessage*(rlnPeer: WakuRLNRelay, msg: WakuMessage): MessageValidationResult =
|
||||
## validate the supplied `msg` based on the waku-rln-relay routing protocol i.e.,
|
||||
## the `msg`'s epoch is within MAX_EPOCH_GAP of the current epoch
|
||||
## the `msg` has valid rate limit proof
|
||||
## the `msg` does not violate the rate limit
|
||||
|
||||
|
||||
# checks if the `msg`'s epoch is far from the current epoch
|
||||
# it corresponds to the validation of rln external nullifier
|
||||
let
|
||||
# get current rln epoch
|
||||
epoch = getCurrentEpoch()
|
||||
msgEpoch = msg.proof.epoch
|
||||
# calculate the gaps
|
||||
gap = compare(epoch, msgEpoch)
|
||||
|
||||
# validate the epoch
|
||||
if abs(gap) >= MAX_EPOCH_GAP:
|
||||
# message's epoch is too old or too ahead
|
||||
# accept messages whose epoch is within +-MAX_EPOCH_GAP from the current epoch
|
||||
return MessageValidationResult.Invalid
|
||||
|
||||
# verify the proof
|
||||
if not rlnPeer.rlnInstance.proofVerify(msg.payload, msg.proof):
|
||||
# invalid proof
|
||||
return MessageValidationResult.Invalid
|
||||
|
||||
# check if double messaging has happened
|
||||
let hasDup = rlnPeer.hasDuplicate(msg)
|
||||
if hasDup.isOk and hasDup.value == true:
|
||||
return MessageValidationResult.Spam
|
||||
|
||||
# insert the message to the log
|
||||
# the result of `updateLog` is discarded because message insertion is guaranteed by the implementation i.e.,
|
||||
# it will never error out
|
||||
discard rlnPeer.updateLog(msg)
|
||||
return MessageValidationResult.Valid
|
||||
|
||||
|
||||
proc appendRLNProof*(rlnPeer: WakuRLNRelay, msg: var WakuMessage, senderEpochTime: float64): bool =
|
||||
## returns true if it can create and append a `RateLimitProof` to the supplied `msg`
|
||||
## returns false otherwise
|
||||
## `senderEpochTime` indicates the number of seconds passed since Unix epoch. The fractional part holds sub-seconds.
|
||||
## The `epoch` field of `RateLimitProof` is derived from the provided `senderEpochTime` (using `calcEpoch()`)
|
||||
|
||||
let
|
||||
contentTopicBytes = msg.contentTopic.toBytes
|
||||
input = concat(msg.payload, contentTopicBytes)
|
||||
|
||||
var proof: RateLimitProofResult = proofGen(rlnInstance = rlnPeer.rlnInstance, data = input,
|
||||
memKeys = rlnPeer.membershipKeyPair,
|
||||
memIndex = rlnPeer.membershipIndex,
|
||||
epoch = calcEpoch(senderEpochTime))
|
||||
|
||||
if proof.isErr:
|
||||
return false
|
||||
|
||||
msg.proof = proof.value
|
||||
return true
|
||||
|
||||
proc addAll*(rlnInstance: RLN[Bn256], list: seq[IDCommitment]): bool =
|
||||
# add members to the Merkle tree of the `rlnInstance`
|
||||
for i in 0..list.len-1:
|
||||
let member = list[i]
|
||||
let member_is_added = rlnInstance.insertMember(member)
|
||||
if not member_is_added:
|
||||
return false
|
||||
return true
|
Loading…
x
Reference in New Issue
Block a user