2023-06-08 11:52:21 +00:00
|
|
|
import
|
|
|
|
std/json
|
2023-01-16 12:56:18 +00:00
|
|
|
import
|
|
|
|
chronicles,
|
|
|
|
options,
|
2023-08-23 12:53:30 +00:00
|
|
|
eth/keys,
|
|
|
|
stew/[arrayops, byteutils, results, endians2],
|
|
|
|
std/[sequtils, strformat, strutils, tables],
|
2023-01-16 12:56:18 +00:00
|
|
|
nimcrypto/utils
|
|
|
|
|
|
|
|
import
|
|
|
|
./rln_interface,
|
|
|
|
../conversion_utils,
|
|
|
|
../protocol_types,
|
2023-03-20 10:51:35 +00:00
|
|
|
../protocol_metrics
|
2023-01-16 12:56:18 +00:00
|
|
|
import
|
2023-04-19 14:39:52 +00:00
|
|
|
../../waku_core,
|
|
|
|
../../waku_keystore
|
2023-01-16 12:56:18 +00:00
|
|
|
|
|
|
|
logScope:
|
|
|
|
topics = "waku rln_relay ffi"
|
|
|
|
|
|
|
|
proc membershipKeyGen*(ctxPtr: ptr RLN): RlnRelayResult[IdentityCredential] =
|
|
|
|
## generates a IdentityCredential that can be used for the registration into the rln membership contract
|
|
|
|
## Returns an error if the key generation fails
|
|
|
|
|
|
|
|
# keysBufferPtr will hold the generated identity tuple i.e., trapdoor, nullifier, secret hash and commitment
|
|
|
|
var
|
|
|
|
keysBuffer: Buffer
|
|
|
|
keysBufferPtr = addr(keysBuffer)
|
|
|
|
done = key_gen(ctxPtr, keysBufferPtr)
|
|
|
|
|
|
|
|
# check whether the keys are generated successfully
|
|
|
|
if(done == false):
|
|
|
|
return err("error in key generation")
|
|
|
|
|
2024-02-02 08:56:41 +00:00
|
|
|
if (keysBuffer.len != 4*32):
|
|
|
|
return err("keysBuffer is of invalid length")
|
|
|
|
|
2023-01-16 12:56:18 +00:00
|
|
|
var generatedKeys = cast[ptr array[4*32, byte]](keysBufferPtr.`ptr`)[]
|
|
|
|
# the public and secret keys together are 64 bytes
|
|
|
|
|
|
|
|
# TODO define a separate proc to decode the generated keys to the secret and public components
|
|
|
|
var
|
|
|
|
idTrapdoor: array[32, byte]
|
|
|
|
idNullifier: array[32, byte]
|
|
|
|
idSecretHash: array[32, byte]
|
|
|
|
idCommitment: array[32, byte]
|
|
|
|
for (i, x) in idTrapdoor.mpairs: x = generatedKeys[i+0*32]
|
|
|
|
for (i, x) in idNullifier.mpairs: x = generatedKeys[i+1*32]
|
|
|
|
for (i, x) in idSecretHash.mpairs: x = generatedKeys[i+2*32]
|
|
|
|
for (i, x) in idCommitment.mpairs: x = generatedKeys[i+3*32]
|
|
|
|
|
|
|
|
var
|
2023-02-08 15:26:23 +00:00
|
|
|
identityCredential = IdentityCredential(idTrapdoor: @idTrapdoor, idNullifier: @idNullifier, idSecretHash: @idSecretHash, idCommitment: @idCommitment)
|
2023-01-16 12:56:18 +00:00
|
|
|
|
|
|
|
return ok(identityCredential)
|
|
|
|
|
2023-06-08 11:52:21 +00:00
|
|
|
type RlnTreeConfig = ref object of RootObj
|
|
|
|
cache_capacity: int
|
|
|
|
mode: string
|
|
|
|
compression: bool
|
2023-08-23 12:53:44 +00:00
|
|
|
flush_every_ms: int
|
2023-06-08 11:52:21 +00:00
|
|
|
path: string
|
|
|
|
|
|
|
|
type RlnConfig = ref object of RootObj
|
|
|
|
resources_folder: string
|
|
|
|
tree_config: RlnTreeConfig
|
|
|
|
|
|
|
|
proc `%`(c: RlnConfig): JsonNode =
|
|
|
|
## wrapper around the generic JObject constructor.
|
|
|
|
## We don't need to have a separate proc for the tree_config field
|
|
|
|
let tree_config = %{ "cache_capacity": %c.tree_config.cache_capacity,
|
|
|
|
"mode": %c.tree_config.mode,
|
|
|
|
"compression": %c.tree_config.compression,
|
2023-08-23 12:53:44 +00:00
|
|
|
"flush_every_ms": %c.tree_config.flush_every_ms,
|
2023-06-08 11:52:21 +00:00
|
|
|
"path": %c.tree_config.path }
|
|
|
|
return %[("resources_folder", %c.resources_folder),
|
|
|
|
("tree_config", %tree_config)]
|
|
|
|
|
|
|
|
proc createRLNInstanceLocal(d = MerkleTreeDepth,
|
|
|
|
tree_path = DefaultRlnTreePath): RLNResult =
|
2023-01-16 12:56:18 +00:00
|
|
|
## generates an instance of RLN
|
|
|
|
## An RLN instance supports both zkSNARKs logics and Merkle tree data structure and operations
|
|
|
|
## d indicates the depth of Merkle tree
|
2023-06-08 11:52:21 +00:00
|
|
|
## tree_path indicates the path of the Merkle tree
|
2023-01-16 12:56:18 +00:00
|
|
|
## Returns an error if the instance creation fails
|
2023-06-08 11:52:21 +00:00
|
|
|
|
|
|
|
let rln_config = RlnConfig(
|
|
|
|
resources_folder: "tree_height_" & $d & "/",
|
|
|
|
tree_config: RlnTreeConfig(
|
|
|
|
cache_capacity: 15_000,
|
|
|
|
mode: "high_throughput",
|
|
|
|
compression: false,
|
2023-08-23 12:53:44 +00:00
|
|
|
flush_every_ms: 500,
|
2023-06-08 11:52:21 +00:00
|
|
|
path: if tree_path != "": tree_path else: DefaultRlnTreePath
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
var serialized_rln_config = $(%rln_config)
|
|
|
|
|
2023-01-16 12:56:18 +00:00
|
|
|
var
|
|
|
|
rlnInstance: ptr RLN
|
|
|
|
merkleDepth: csize_t = uint(d)
|
2023-06-08 11:52:21 +00:00
|
|
|
configBuffer = serialized_rln_config.toOpenArrayByte(0, serialized_rln_config.high).toBuffer()
|
2023-01-16 12:56:18 +00:00
|
|
|
|
|
|
|
# create an instance of RLN
|
2023-06-08 11:52:21 +00:00
|
|
|
let res = new_circuit(merkleDepth, addr configBuffer, addr rlnInstance)
|
2023-01-16 12:56:18 +00:00
|
|
|
# check whether the circuit parameters are generated successfully
|
|
|
|
if (res == false):
|
|
|
|
debug "error in parameters generation"
|
|
|
|
return err("error in parameters generation")
|
|
|
|
return ok(rlnInstance)
|
|
|
|
|
2023-06-08 11:52:21 +00:00
|
|
|
proc createRLNInstance*(d = MerkleTreeDepth,
|
|
|
|
tree_path = DefaultRlnTreePath): RLNResult =
|
2023-01-16 12:56:18 +00:00
|
|
|
## Wraps the rln instance creation for metrics
|
|
|
|
## Returns an error if the instance creation fails
|
|
|
|
var res: RLNResult
|
|
|
|
waku_rln_instance_creation_duration_seconds.nanosecondTime:
|
2023-06-08 11:52:21 +00:00
|
|
|
res = createRLNInstanceLocal(d, tree_path)
|
2023-01-16 12:56:18 +00:00
|
|
|
return res
|
|
|
|
|
2023-03-13 14:39:33 +00:00
|
|
|
proc sha256*(data: openArray[byte]): RlnRelayResult[MerkleNode] =
|
2023-02-22 14:17:12 +00:00
|
|
|
## a thin layer on top of the Nim wrapper of the sha256 hasher
|
2023-03-01 11:59:13 +00:00
|
|
|
var lenPrefData = encodeLengthPrefix(data)
|
2023-01-16 12:56:18 +00:00
|
|
|
var
|
|
|
|
hashInputBuffer = lenPrefData.toBuffer()
|
|
|
|
outputBuffer: Buffer # will holds the hash output
|
|
|
|
|
2023-03-13 14:39:33 +00:00
|
|
|
trace "sha256 hash input buffer length", bufflen = hashInputBuffer.len
|
2023-01-16 12:56:18 +00:00
|
|
|
let
|
2023-02-22 14:17:12 +00:00
|
|
|
hashSuccess = sha256(addr hashInputBuffer, addr outputBuffer)
|
2023-01-16 12:56:18 +00:00
|
|
|
|
|
|
|
# check whether the hash call is done successfully
|
|
|
|
if not hashSuccess:
|
2023-03-13 14:39:33 +00:00
|
|
|
return err("error in sha256 hash")
|
2023-01-16 12:56:18 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
output = cast[ptr MerkleNode](outputBuffer.`ptr`)[]
|
|
|
|
|
2023-03-13 14:39:33 +00:00
|
|
|
return ok(output)
|
|
|
|
|
|
|
|
proc poseidon*(data: seq[seq[byte]]): RlnRelayResult[array[32, byte]] =
|
|
|
|
## a thin layer on top of the Nim wrapper of the poseidon hasher
|
|
|
|
var inputBytes = serialize(data)
|
|
|
|
var
|
|
|
|
hashInputBuffer = inputBytes.toBuffer()
|
|
|
|
outputBuffer: Buffer # will holds the hash output
|
|
|
|
|
|
|
|
let
|
|
|
|
hashSuccess = poseidon(addr hashInputBuffer, addr outputBuffer)
|
|
|
|
|
|
|
|
# check whether the hash call is done successfully
|
|
|
|
if not hashSuccess:
|
|
|
|
return err("error in poseidon hash")
|
|
|
|
|
|
|
|
let
|
|
|
|
output = cast[ptr array[32, byte]](outputBuffer.`ptr`)[]
|
|
|
|
|
|
|
|
return ok(output)
|
|
|
|
|
2024-02-01 18:56:47 +00:00
|
|
|
when defined(rln_v2):
|
2024-02-09 11:01:45 +00:00
|
|
|
func toLeaf*(rateCommitment: RateCommitment): RlnRelayResult[MerkleNode] {.inline.} =
|
|
|
|
let idCommitment = rateCommitment.idCommitment
|
|
|
|
let userMessageLimit = rateCommitment.userMessageLimit
|
|
|
|
let leafRes = poseidon(@[@idCommitment, cast[seq[byte]](userMessageLimit)])
|
|
|
|
return leafRes
|
|
|
|
|
|
|
|
func toLeaves*(rateCommitments: seq[RateCommitment]): RlnRelayResult[seq[MerkleNode]] {.inline.} =
|
|
|
|
var leaves = newSeq[MerkleNode](rateCommitments.len)
|
|
|
|
for rateCommitment in rateCommitments:
|
|
|
|
let leafRes = toLeaf(rateCommitment)
|
|
|
|
if leafRes.isErr():
|
|
|
|
return err("could not convert the rate commitment to a leaf: " & leafRes.error)
|
|
|
|
leaves.add(leafRes.get())
|
|
|
|
return ok(leaves)
|
|
|
|
|
2024-02-01 18:56:47 +00:00
|
|
|
# TODO: collocate this proc with the definition of the RateLimitProof
|
|
|
|
# and the ProofMetadata types
|
|
|
|
proc extractMetadata*(proof: RateLimitProof): RlnRelayResult[ProofMetadata] =
|
|
|
|
return ok(ProofMetadata(
|
|
|
|
nullifier: proof.nullifier,
|
|
|
|
shareX: proof.shareX,
|
|
|
|
shareY: proof.shareY,
|
2024-02-09 11:01:45 +00:00
|
|
|
externalNullifier: proof.externalNullifier
|
2024-02-01 18:56:47 +00:00
|
|
|
))
|
|
|
|
else:
|
|
|
|
proc extractMetadata*(proof: RateLimitProof): RlnRelayResult[ProofMetadata] =
|
|
|
|
let externalNullifierRes = poseidon(@[@(proof.epoch),
|
|
|
|
@(proof.rlnIdentifier)])
|
|
|
|
if externalNullifierRes.isErr():
|
|
|
|
return err("could not construct the external nullifier")
|
|
|
|
return ok(ProofMetadata(
|
|
|
|
nullifier: proof.nullifier,
|
|
|
|
shareX: proof.shareX,
|
|
|
|
shareY: proof.shareY,
|
|
|
|
externalNullifier: externalNullifierRes.get()
|
|
|
|
))
|
|
|
|
|
|
|
|
when defined(rln_v2):
|
|
|
|
proc proofGen*(rlnInstance: ptr RLN,
|
|
|
|
data: openArray[byte],
|
|
|
|
membership: IdentityCredential,
|
|
|
|
userMessageLimit: UserMessageLimit,
|
|
|
|
messageId: MessageId,
|
|
|
|
index: MembershipIndex,
|
2024-02-09 11:01:45 +00:00
|
|
|
epoch: Epoch,
|
|
|
|
rlnIdentifier = DefaultRlnIdentifier): RateLimitProofResult =
|
2024-02-01 18:56:47 +00:00
|
|
|
|
|
|
|
# obtain the external nullifier
|
|
|
|
let externalNullifierRes = poseidon(@[@(epoch),
|
2024-02-09 11:01:45 +00:00
|
|
|
@(rlnIdentifier)])
|
2024-02-01 18:56:47 +00:00
|
|
|
|
|
|
|
if externalNullifierRes.isErr():
|
|
|
|
return err("could not construct the external nullifier")
|
|
|
|
|
|
|
|
# serialize inputs
|
|
|
|
let serializedInputs = serialize(idSecretHash = membership.idSecretHash,
|
2024-02-09 11:01:45 +00:00
|
|
|
memIndex = index,
|
|
|
|
userMessageLimit = userMessageLimit,
|
|
|
|
messageId = messageId,
|
|
|
|
externalNullifier = externalNullifierRes.get(),
|
|
|
|
msg = data)
|
2024-02-01 18:56:47 +00:00
|
|
|
var inputBuffer = toBuffer(serializedInputs)
|
|
|
|
|
|
|
|
debug "input buffer ", inputBuffer= repr(inputBuffer)
|
|
|
|
|
|
|
|
# generate the proof
|
|
|
|
var proof: Buffer
|
|
|
|
let proofIsSuccessful = generate_proof(rlnInstance, addr inputBuffer, addr proof)
|
|
|
|
# check whether the generate_proof call is done successfully
|
|
|
|
if not proofIsSuccessful:
|
|
|
|
return err("could not generate the proof")
|
|
|
|
|
|
|
|
var proofValue = cast[ptr array[320, byte]] (proof.`ptr`)
|
|
|
|
let proofBytes: array[320, byte] = proofValue[]
|
|
|
|
debug "proof content", proofHex = proofValue[].toHex
|
|
|
|
|
|
|
|
## parse the proof as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ]
|
|
|
|
|
|
|
|
let
|
|
|
|
proofOffset = 128
|
|
|
|
rootOffset = proofOffset + 32
|
|
|
|
externalNullifierOffset = rootOffset + 32
|
|
|
|
shareXOffset = externalNullifierOffset + 32
|
|
|
|
shareYOffset = shareXOffset + 32
|
|
|
|
nullifierOffset = shareYOffset + 32
|
|
|
|
|
|
|
|
var
|
|
|
|
zkproof: ZKSNARK
|
|
|
|
proofRoot, shareX, shareY: MerkleNode
|
|
|
|
externalNullifier: ExternalNullifier
|
|
|
|
nullifier: Nullifier
|
|
|
|
|
|
|
|
discard zkproof.copyFrom(proofBytes[0..proofOffset-1])
|
|
|
|
discard proofRoot.copyFrom(proofBytes[proofOffset..rootOffset-1])
|
|
|
|
discard externalNullifier.copyFrom(proofBytes[rootOffset..externalNullifierOffset-1])
|
|
|
|
discard shareX.copyFrom(proofBytes[externalNullifierOffset..shareXOffset-1])
|
|
|
|
discard shareY.copyFrom(proofBytes[shareXOffset..shareYOffset-1])
|
|
|
|
discard nullifier.copyFrom(proofBytes[shareYOffset..nullifierOffset-1])
|
|
|
|
|
|
|
|
let output = RateLimitProof(proof: zkproof,
|
|
|
|
merkleRoot: proofRoot,
|
|
|
|
externalNullifier: externalNullifier,
|
|
|
|
shareX: shareX,
|
|
|
|
shareY: shareY,
|
|
|
|
nullifier: nullifier)
|
|
|
|
return ok(output)
|
|
|
|
else:
|
|
|
|
proc proofGen*(rlnInstance: ptr RLN, data: openArray[byte],
|
2023-01-16 12:56:18 +00:00
|
|
|
memKeys: IdentityCredential, memIndex: MembershipIndex,
|
|
|
|
epoch: Epoch): RateLimitProofResult =
|
|
|
|
|
2024-02-01 18:56:47 +00:00
|
|
|
# serialize inputs
|
|
|
|
let serializedInputs = serialize(idSecretHash = memKeys.idSecretHash,
|
|
|
|
memIndex = memIndex,
|
|
|
|
epoch = epoch,
|
|
|
|
msg = data)
|
|
|
|
var inputBuffer = toBuffer(serializedInputs)
|
|
|
|
|
|
|
|
debug "input buffer ", inputBuffer= repr(inputBuffer)
|
|
|
|
|
|
|
|
# generate the proof
|
|
|
|
var proof: Buffer
|
|
|
|
let proofIsSuccessful = generate_proof(rlnInstance, addr inputBuffer, addr proof)
|
|
|
|
# check whether the generate_proof call is done successfully
|
|
|
|
if not proofIsSuccessful:
|
|
|
|
return err("could not generate the proof")
|
|
|
|
|
|
|
|
var proofValue = cast[ptr array[320, byte]] (proof.`ptr`)
|
|
|
|
let proofBytes: array[320, byte] = proofValue[]
|
|
|
|
debug "proof content", proofHex = proofValue[].toHex
|
|
|
|
|
|
|
|
## parse the proof as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ]
|
|
|
|
|
|
|
|
let
|
|
|
|
proofOffset = 128
|
|
|
|
rootOffset = proofOffset + 32
|
|
|
|
epochOffset = rootOffset + 32
|
|
|
|
shareXOffset = epochOffset + 32
|
|
|
|
shareYOffset = shareXOffset + 32
|
|
|
|
nullifierOffset = shareYOffset + 32
|
|
|
|
rlnIdentifierOffset = nullifierOffset + 32
|
|
|
|
|
|
|
|
var
|
|
|
|
zkproof: ZKSNARK
|
|
|
|
proofRoot, shareX, shareY: MerkleNode
|
|
|
|
epoch: Epoch
|
|
|
|
nullifier: Nullifier
|
|
|
|
rlnIdentifier: RlnIdentifier
|
|
|
|
|
|
|
|
discard zkproof.copyFrom(proofBytes[0..proofOffset-1])
|
|
|
|
discard proofRoot.copyFrom(proofBytes[proofOffset..rootOffset-1])
|
|
|
|
discard epoch.copyFrom(proofBytes[rootOffset..epochOffset-1])
|
|
|
|
discard shareX.copyFrom(proofBytes[epochOffset..shareXOffset-1])
|
|
|
|
discard shareY.copyFrom(proofBytes[shareXOffset..shareYOffset-1])
|
|
|
|
discard nullifier.copyFrom(proofBytes[shareYOffset..nullifierOffset-1])
|
|
|
|
discard rlnIdentifier.copyFrom(proofBytes[nullifierOffset..rlnIdentifierOffset-1])
|
|
|
|
|
|
|
|
let output = RateLimitProof(proof: zkproof,
|
|
|
|
merkleRoot: proofRoot,
|
|
|
|
epoch: epoch,
|
|
|
|
shareX: shareX,
|
|
|
|
shareY: shareY,
|
|
|
|
nullifier: nullifier,
|
|
|
|
rlnIdentifier: rlnIdentifier)
|
|
|
|
|
|
|
|
return ok(output)
|
2023-01-16 12:56:18 +00:00
|
|
|
|
|
|
|
# validRoots should contain a sequence of roots in the acceptable windows.
|
|
|
|
# As default, it is set to an empty sequence of roots. This implies that the validity check for the proof's root is skipped
|
|
|
|
proc proofVerify*(rlnInstance: ptr RLN,
|
|
|
|
data: openArray[byte],
|
|
|
|
proof: RateLimitProof,
|
|
|
|
validRoots: seq[MerkleNode] = @[]): RlnRelayResult[bool] =
|
|
|
|
## verifies the proof, returns an error if the proof verification fails
|
|
|
|
## returns true if the proof is valid
|
|
|
|
var
|
|
|
|
proofBytes = serialize(proof, data)
|
|
|
|
proofBuffer = proofBytes.toBuffer()
|
|
|
|
validProof: bool
|
|
|
|
rootsBytes = serialize(validRoots)
|
|
|
|
rootsBuffer = rootsBytes.toBuffer()
|
|
|
|
|
2023-08-23 12:53:30 +00:00
|
|
|
trace "serialized proof", proof = byteutils.toHex(proofBytes)
|
2023-01-16 12:56:18 +00:00
|
|
|
|
|
|
|
let verifyIsSuccessful = verify_with_roots(rlnInstance, addr proofBuffer, addr rootsBuffer, addr validProof)
|
|
|
|
if not verifyIsSuccessful:
|
|
|
|
# something went wrong in verification call
|
|
|
|
warn "could not verify validity of the proof", proof=proof
|
|
|
|
return err("could not verify the proof")
|
|
|
|
|
|
|
|
if not validProof:
|
|
|
|
return ok(false)
|
|
|
|
else:
|
|
|
|
return ok(true)
|
|
|
|
|
|
|
|
proc insertMember*(rlnInstance: ptr RLN, idComm: IDCommitment): bool =
|
|
|
|
## inserts a member to the tree
|
|
|
|
## returns true if the member is inserted successfully
|
|
|
|
## returns false if the member could not be inserted
|
|
|
|
var pkBuffer = toBuffer(idComm)
|
|
|
|
let pkBufferPtr = addr pkBuffer
|
|
|
|
|
|
|
|
# add the member to the tree
|
|
|
|
let memberAdded = update_next_member(rlnInstance, pkBufferPtr)
|
|
|
|
return memberAdded
|
|
|
|
|
2023-06-12 10:00:07 +00:00
|
|
|
proc getMember*(rlnInstance: ptr RLN, index: MembershipIndex): RlnRelayResult[IDCommitment] =
|
|
|
|
## returns the member at the given index
|
|
|
|
## returns an error if the index is out of bounds
|
|
|
|
## returns the member if the index is valid
|
|
|
|
var
|
|
|
|
idCommitment {.noinit.}: Buffer = Buffer()
|
|
|
|
idCommitmentPtr = addr(idCommitment)
|
|
|
|
memberRetrieved = get_leaf(rlnInstance, index, idCommitmentPtr)
|
|
|
|
|
|
|
|
if not memberRetrieved:
|
|
|
|
return err("could not get the member")
|
|
|
|
|
|
|
|
if not idCommitment.len == 32:
|
|
|
|
return err("wrong output size")
|
|
|
|
|
|
|
|
let idCommitmentValue = (cast[ptr array[32, byte]](idCommitment.`ptr`))[]
|
|
|
|
|
|
|
|
return ok(@idCommitmentValue)
|
|
|
|
|
2023-05-18 05:12:08 +00:00
|
|
|
proc atomicWrite*(rlnInstance: ptr RLN,
|
|
|
|
index = none(MembershipIndex),
|
|
|
|
idComms = newSeq[IDCommitment](),
|
|
|
|
toRemoveIndices = newSeq[MembershipIndex]()): bool =
|
|
|
|
## Insert multiple members i.e., identity commitments, and remove multiple members
|
|
|
|
## returns true if the operation is successful
|
|
|
|
## returns false if the operation fails
|
|
|
|
|
|
|
|
let startIndex = if index.isNone(): MembershipIndex(0) else: index.get()
|
|
|
|
|
|
|
|
# serialize the idComms
|
|
|
|
let idCommsBytes = serialize(idComms)
|
|
|
|
var idCommsBuffer = idCommsBytes.toBuffer()
|
|
|
|
let idCommsBufferPtr = addr idCommsBuffer
|
|
|
|
|
|
|
|
# serialize the toRemoveIndices
|
|
|
|
let indicesBytes = serialize(toRemoveIndices)
|
|
|
|
var indicesBuffer = indicesBytes.toBuffer()
|
|
|
|
let indicesBufferPtr = addr indicesBuffer
|
|
|
|
|
|
|
|
let operationSuccess = atomic_write(rlnInstance,
|
|
|
|
startIndex,
|
|
|
|
idCommsBufferPtr,
|
|
|
|
indicesBufferPtr)
|
|
|
|
return operationSuccess
|
2023-01-16 12:56:18 +00:00
|
|
|
|
|
|
|
proc insertMembers*(rlnInstance: ptr RLN,
|
2023-05-18 05:12:08 +00:00
|
|
|
index: MembershipIndex,
|
|
|
|
idComms: seq[IDCommitment]): bool =
|
2023-01-16 12:56:18 +00:00
|
|
|
## Insert multiple members i.e., identity commitments
|
|
|
|
## returns true if the insertion is successful
|
|
|
|
## returns false if any of the insertions fails
|
|
|
|
## Note: This proc is atomic, i.e., if any of the insertions fails, all the previous insertions are rolled back
|
|
|
|
|
2023-05-18 05:12:08 +00:00
|
|
|
return atomicWrite(rlnInstance, some(index), idComms)
|
2023-01-16 12:56:18 +00:00
|
|
|
|
|
|
|
proc removeMember*(rlnInstance: ptr RLN, index: MembershipIndex): bool =
|
2023-05-18 05:12:08 +00:00
|
|
|
let deletionSuccess = delete_member(rlnInstance, index)
|
|
|
|
return deletionSuccess
|
2023-01-16 12:56:18 +00:00
|
|
|
|
2023-03-31 13:45:04 +00:00
|
|
|
proc removeMembers*(rlnInstance: ptr RLN, indices: seq[MembershipIndex]): bool =
|
2023-05-18 05:12:08 +00:00
|
|
|
return atomicWrite(rlnInstance, idComms = @[], toRemoveIndices = indices)
|
2023-03-31 13:45:04 +00:00
|
|
|
|
2023-01-16 12:56:18 +00:00
|
|
|
proc getMerkleRoot*(rlnInstance: ptr RLN): MerkleNodeResult =
|
|
|
|
# read the Merkle Tree root after insertion
|
|
|
|
var
|
|
|
|
root {.noinit.}: Buffer = Buffer()
|
|
|
|
rootPtr = addr(root)
|
|
|
|
getRootSuccessful = getRoot(rlnInstance, rootPtr)
|
|
|
|
if not getRootSuccessful:
|
|
|
|
return err("could not get the root")
|
|
|
|
if not root.len == 32:
|
|
|
|
return err("wrong output size")
|
|
|
|
|
|
|
|
var rootValue = cast[ptr MerkleNode] (root.`ptr`)[]
|
|
|
|
return ok(rootValue)
|
2023-06-16 06:03:41 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
RlnMetadata* = object
|
|
|
|
lastProcessedBlock*: uint64
|
2023-08-23 12:53:30 +00:00
|
|
|
chainId*: uint64
|
|
|
|
contractAddress*: string
|
2023-08-25 17:59:17 +00:00
|
|
|
validRoots*: seq[MerkleNode]
|
2023-06-16 06:03:41 +00:00
|
|
|
|
|
|
|
proc serialize(metadata: RlnMetadata): seq[byte] =
|
|
|
|
## serializes the metadata
|
|
|
|
## returns the serialized metadata
|
2023-08-23 12:53:30 +00:00
|
|
|
return concat(@(metadata.lastProcessedBlock.toBytes()),
|
|
|
|
@(metadata.chainId.toBytes()),
|
2023-08-25 17:59:17 +00:00
|
|
|
@(hexToSeqByte(toLower(metadata.contractAddress))),
|
|
|
|
@(uint64(metadata.validRoots.len()).toBytes()),
|
|
|
|
@(serialize(metadata.validRoots)))
|
|
|
|
|
|
|
|
type MerkleNodeSeq = seq[MerkleNode]
|
|
|
|
|
|
|
|
proc deserialize*(T: type MerkleNodeSeq, merkleNodeByteSeq: seq[byte]): T =
|
|
|
|
## deserializes a byte seq to a seq of MerkleNodes
|
|
|
|
## the order of serialization is |merkle_node_len<8>|merkle_node[len]|
|
|
|
|
|
|
|
|
var roots = newSeq[MerkleNode]()
|
|
|
|
let len = uint64.fromBytes(merkleNodeByteSeq[0..7], Endianness.littleEndian)
|
|
|
|
trace "length of valid roots", len
|
2023-08-31 07:49:43 +00:00
|
|
|
for i in 0'u64..<len:
|
2023-08-25 17:59:17 +00:00
|
|
|
# convert seq[byte] to array[32, byte]
|
2023-08-31 07:49:43 +00:00
|
|
|
let fromByte = 8 + i*32
|
|
|
|
let toByte = fromByte + 31
|
|
|
|
let rawRoot = merkleNodeByteSeq[fromByte..toByte]
|
2023-08-25 17:59:17 +00:00
|
|
|
trace "raw root", rawRoot = rawRoot
|
|
|
|
var root: MerkleNode
|
|
|
|
discard root.copyFrom(rawRoot)
|
|
|
|
roots.add(root)
|
|
|
|
return roots
|
2023-06-16 06:03:41 +00:00
|
|
|
|
|
|
|
proc setMetadata*(rlnInstance: ptr RLN, metadata: RlnMetadata): RlnRelayResult[void] =
|
|
|
|
## sets the metadata of the RLN instance
|
|
|
|
## returns an error if the metadata could not be set
|
|
|
|
## returns void if the metadata is set successfully
|
|
|
|
|
|
|
|
# serialize the metadata
|
|
|
|
let metadataBytes = serialize(metadata)
|
2023-08-25 17:59:17 +00:00
|
|
|
trace "setting metadata", metadata = metadata, metadataBytes = metadataBytes, len = metadataBytes.len
|
2023-06-16 06:03:41 +00:00
|
|
|
var metadataBuffer = metadataBytes.toBuffer()
|
|
|
|
let metadataBufferPtr = addr metadataBuffer
|
|
|
|
|
|
|
|
# set the metadata
|
|
|
|
let metadataSet = set_metadata(rlnInstance, metadataBufferPtr)
|
2023-08-23 12:53:30 +00:00
|
|
|
|
2023-06-16 06:03:41 +00:00
|
|
|
if not metadataSet:
|
|
|
|
return err("could not set the metadata")
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
proc getMetadata*(rlnInstance: ptr RLN): RlnRelayResult[RlnMetadata] =
|
|
|
|
## gets the metadata of the RLN instance
|
|
|
|
## returns an error if the metadata could not be retrieved
|
|
|
|
## returns the metadata if the metadata is retrieved successfully
|
|
|
|
|
|
|
|
# read the metadata
|
|
|
|
var
|
|
|
|
metadata {.noinit.}: Buffer = Buffer()
|
|
|
|
metadataPtr = addr(metadata)
|
|
|
|
getMetadataSuccessful = get_metadata(rlnInstance, metadataPtr)
|
|
|
|
if not getMetadataSuccessful:
|
|
|
|
return err("could not get the metadata")
|
2023-08-25 17:59:17 +00:00
|
|
|
trace "metadata length", metadataLen = metadata.len
|
2023-06-16 06:03:41 +00:00
|
|
|
|
2023-08-23 12:53:30 +00:00
|
|
|
let
|
|
|
|
lastProcessedBlockOffset = 0
|
|
|
|
chainIdOffset = lastProcessedBlockOffset + 8
|
|
|
|
contractAddressOffset = chainIdOffset + 8
|
2023-08-25 17:59:17 +00:00
|
|
|
validRootsOffset = contractAddressOffset + 20
|
|
|
|
|
2023-08-23 12:53:30 +00:00
|
|
|
var
|
|
|
|
lastProcessedBlock: uint64
|
|
|
|
chainId: uint64
|
|
|
|
contractAddress: string
|
2023-08-25 17:59:17 +00:00
|
|
|
validRoots: MerkleNodeSeq
|
2023-08-23 12:53:30 +00:00
|
|
|
|
2023-08-25 17:59:17 +00:00
|
|
|
# 8 + 8 + 20 + 8 + (5*32) = 204
|
|
|
|
var metadataBytes = cast[ptr array[204, byte]](metadata.`ptr`)[]
|
|
|
|
trace "received metadata bytes", metadataBytes = metadataBytes, len = metadataBytes.len
|
2023-08-23 12:53:30 +00:00
|
|
|
|
|
|
|
lastProcessedBlock = uint64.fromBytes(metadataBytes[lastProcessedBlockOffset..chainIdOffset-1])
|
|
|
|
chainId = uint64.fromBytes(metadataBytes[chainIdOffset..contractAddressOffset-1])
|
2023-08-25 17:59:17 +00:00
|
|
|
contractAddress = byteutils.toHex(metadataBytes[contractAddressOffset..validRootsOffset - 1])
|
|
|
|
let validRootsBytes = metadataBytes[validRootsOffset..metadataBytes.high]
|
|
|
|
validRoots = MerkleNodeSeq.deserialize(validRootsBytes)
|
2023-08-23 12:53:30 +00:00
|
|
|
|
|
|
|
return ok(RlnMetadata(lastProcessedBlock: lastProcessedBlock,
|
|
|
|
chainId: chainId,
|
2023-08-25 17:59:17 +00:00
|
|
|
contractAddress: "0x" & contractAddress,
|
|
|
|
validRoots: validRoots))
|