mirror of https://github.com/waku-org/nwaku.git
chore: adding lint job to the CI (#2925)
This commit is contained in:
parent
8d107b0ded
commit
086cc8edd2
|
@ -141,3 +141,25 @@ jobs:
|
||||||
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
|
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
|
||||||
test_type: node-optional
|
test_type: node-optional
|
||||||
debug: waku*
|
debug: waku*
|
||||||
|
|
||||||
|
lint:
|
||||||
|
name: "Lint"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 2 # In PR, has extra merge commit: ^1 = PR, ^2 = base
|
||||||
|
|
||||||
|
- name: Check nph formatting
|
||||||
|
# Pin nph to a specific version to avoid sudden style differences.
|
||||||
|
# Updating nph version should be accompanied with running the new
|
||||||
|
# version on the fluffy directory.
|
||||||
|
run: |
|
||||||
|
VERSION="v0.5.1"
|
||||||
|
ARCHIVE="nph-linux_x64.tar.gz"
|
||||||
|
curl -L "https://github.com/arnetheduck/nph/releases/download/${VERSION}/${ARCHIVE}" -o ${ARCHIVE}
|
||||||
|
tar -xzf ${ARCHIVE}
|
||||||
|
shopt -s extglob # Enable extended globbing
|
||||||
|
./nph examples waku tests tools apps *.@(nim|nims|nimble)
|
||||||
|
git diff --exit-code
|
||||||
|
|
29
config.nims
29
config.nims
|
@ -9,7 +9,8 @@ if defined(windows):
|
||||||
# increase stack size
|
# increase stack size
|
||||||
switch("passL", "-Wl,--stack,8388608")
|
switch("passL", "-Wl,--stack,8388608")
|
||||||
# https://github.com/nim-lang/Nim/issues/4057
|
# https://github.com/nim-lang/Nim/issues/4057
|
||||||
--tlsEmulation:off
|
--tlsEmulation:
|
||||||
|
off
|
||||||
if defined(i386):
|
if defined(i386):
|
||||||
# set the IMAGE_FILE_LARGE_ADDRESS_AWARE flag so we can use PAE, if enabled, and access more than 2 GiB of RAM
|
# set the IMAGE_FILE_LARGE_ADDRESS_AWARE flag so we can use PAE, if enabled, and access more than 2 GiB of RAM
|
||||||
switch("passL", "-Wl,--large-address-aware")
|
switch("passL", "-Wl,--large-address-aware")
|
||||||
|
@ -60,14 +61,18 @@ else:
|
||||||
switch("passC", "-mno-avx512f")
|
switch("passC", "-mno-avx512f")
|
||||||
switch("passL", "-mno-avx512f")
|
switch("passL", "-mno-avx512f")
|
||||||
|
|
||||||
|
--threads:
|
||||||
--threads:on
|
on
|
||||||
--opt:speed
|
--opt:
|
||||||
--excessiveStackTrace:on
|
speed
|
||||||
|
--excessiveStackTrace:
|
||||||
|
on
|
||||||
# enable metric collection
|
# enable metric collection
|
||||||
--define:metrics
|
--define:
|
||||||
|
metrics
|
||||||
# for heap-usage-by-instance-type metrics and object base-type strings
|
# for heap-usage-by-instance-type metrics and object base-type strings
|
||||||
--define:nimTypeNames
|
--define:
|
||||||
|
nimTypeNames
|
||||||
|
|
||||||
switch("define", "withoutPCRE")
|
switch("define", "withoutPCRE")
|
||||||
|
|
||||||
|
@ -75,13 +80,17 @@ switch("define", "withoutPCRE")
|
||||||
# "--debugger:native" build. It can be increased with `ulimit -n 1024`.
|
# "--debugger:native" build. It can be increased with `ulimit -n 1024`.
|
||||||
if not defined(macosx) and not defined(android):
|
if not defined(macosx) and not defined(android):
|
||||||
# add debugging symbols and original files and line numbers
|
# add debugging symbols and original files and line numbers
|
||||||
--debugger:native
|
--debugger:
|
||||||
|
native
|
||||||
if not (defined(windows) and defined(i386)) and not defined(disable_libbacktrace):
|
if not (defined(windows) and defined(i386)) and not defined(disable_libbacktrace):
|
||||||
# light-weight stack traces using libbacktrace and libunwind
|
# light-weight stack traces using libbacktrace and libunwind
|
||||||
--define:nimStackTraceOverride
|
--define:
|
||||||
|
nimStackTraceOverride
|
||||||
switch("import", "libbacktrace")
|
switch("import", "libbacktrace")
|
||||||
|
|
||||||
--define:nimOldCaseObjects # https://github.com/status-im/nim-confutils/issues/9
|
--define:
|
||||||
|
nimOldCaseObjects
|
||||||
|
# https://github.com/status-im/nim-confutils/issues/9
|
||||||
|
|
||||||
# `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'"
|
# `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'"
|
||||||
switch("warning", "CaseTransition:off")
|
switch("warning", "CaseTransition:off")
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import waku/[common/logging, factory/[waku, networks_config, external_config]]
|
||||||
waku/[common/logging, factory/[waku, networks_config, external_config]]
|
|
||||||
import
|
import
|
||||||
std/[options, strutils, os, sequtils],
|
std/[options, strutils, os, sequtils],
|
||||||
stew/shims/net as stewNet,
|
stew/shims/net as stewNet,
|
||||||
|
|
|
@ -2,9 +2,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
results,
|
results,
|
||||||
waku/[common/logging,
|
waku/[common/logging, waku_node, waku_rln_relay],
|
||||||
waku_node,
|
|
||||||
waku_rln_relay,],
|
|
||||||
./erc_5564_interface as StealthCommitmentFFI,
|
./erc_5564_interface as StealthCommitmentFFI,
|
||||||
./node_spec,
|
./node_spec,
|
||||||
./wire_spec
|
./wire_spec
|
||||||
|
|
|
@ -1,2 +1 @@
|
||||||
import
|
import ./test_rpc_codec
|
||||||
./test_rpc_codec
|
|
||||||
|
|
|
@ -217,6 +217,7 @@ proc setup(): Future[OnchainGroupManager] {.async.} =
|
||||||
rlnInstance: rlnInstance,
|
rlnInstance: rlnInstance,
|
||||||
onFatalErrorAction: proc(errStr: string) =
|
onFatalErrorAction: proc(errStr: string) =
|
||||||
raiseAssert errStr
|
raiseAssert errStr
|
||||||
|
,
|
||||||
)
|
)
|
||||||
|
|
||||||
return manager
|
return manager
|
||||||
|
@ -279,6 +280,7 @@ suite "Onchain group manager":
|
||||||
rlnInstance: manager.rlnInstance,
|
rlnInstance: manager.rlnInstance,
|
||||||
onFatalErrorAction: proc(errStr: string) =
|
onFatalErrorAction: proc(errStr: string) =
|
||||||
raiseAssert errStr
|
raiseAssert errStr
|
||||||
|
,
|
||||||
)
|
)
|
||||||
(await manager2.init()).isErrOr:
|
(await manager2.init()).isErrOr:
|
||||||
raiseAssert "Expected error when contract address doesn't match"
|
raiseAssert "Expected error when contract address doesn't match"
|
||||||
|
@ -797,8 +799,8 @@ suite "Onchain group manager":
|
||||||
try:
|
try:
|
||||||
await node.mountRlnRelay(wakuRlnConfig)
|
await node.mountRlnRelay(wakuRlnConfig)
|
||||||
except CatchableError as e:
|
except CatchableError as e:
|
||||||
check e.msg == "failed to mount WakuRlnRelay: rln-relay-user-message-limit can't be exceed then MAX_MESSAGE_LIMIT set by rln contract"
|
check e.msg ==
|
||||||
|
"failed to mount WakuRlnRelay: rln-relay-user-message-limit can't be exceed then MAX_MESSAGE_LIMIT set by rln contract"
|
||||||
|
|
||||||
################################
|
################################
|
||||||
## Terminating/removing Anvil
|
## Terminating/removing Anvil
|
||||||
|
|
|
@ -63,7 +63,7 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
|
||||||
keystorePath: none(string),
|
keystorePath: none(string),
|
||||||
keystorePassword: none(string),
|
keystorePassword: none(string),
|
||||||
ethPrivateKey: some(conf.rlnRelayEthPrivateKey),
|
ethPrivateKey: some(conf.rlnRelayEthPrivateKey),
|
||||||
onFatalErrorAction: onFatalErrorAction
|
onFatalErrorAction: onFatalErrorAction,
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
(waitFor groupManager.init()).isOkOr:
|
(waitFor groupManager.init()).isOkOr:
|
||||||
|
|
|
@ -4,8 +4,6 @@
|
||||||
## Rate limit is applied separately by each peer upon first use. Also time period is counted distinct per peer.
|
## Rate limit is applied separately by each peer upon first use. Also time period is counted distinct per peer.
|
||||||
## It will use compensating replenish mode for peers to balance the load and allow fair usage of a service.
|
## It will use compensating replenish mode for peers to balance the load and allow fair usage of a service.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/[options, tables], chronos/timer, libp2p/stream/connection, libp2p/utility
|
import std/[options, tables], chronos/timer, libp2p/stream/connection, libp2p/utility
|
||||||
|
|
|
@ -1,14 +1,10 @@
|
||||||
import
|
import json_serialization, std/options
|
||||||
json_serialization,
|
import ../waku_core
|
||||||
std/options
|
|
||||||
import
|
|
||||||
../waku_core
|
|
||||||
|
|
||||||
# Implementing the RFC:
|
# Implementing the RFC:
|
||||||
# https://github.com/vacp2p/rfc/tree/master/content/docs/rfcs/73
|
# https://github.com/vacp2p/rfc/tree/master/content/docs/rfcs/73
|
||||||
|
|
||||||
type
|
type
|
||||||
|
|
||||||
EligibilityProof* = object
|
EligibilityProof* = object
|
||||||
proofOfPayment*: Option[seq[byte]]
|
proofOfPayment*: Option[seq[byte]]
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,5 @@
|
||||||
import
|
import std/options
|
||||||
std/options
|
import ../common/protobuf, ../waku_core, ./rpc
|
||||||
import
|
|
||||||
../common/protobuf,
|
|
||||||
../waku_core,
|
|
||||||
./rpc
|
|
||||||
|
|
||||||
|
|
||||||
# Codec for EligibilityProof
|
# Codec for EligibilityProof
|
||||||
|
|
||||||
|
@ -28,7 +23,6 @@ proc decode*(T: type EligibilityProof, buffer: seq[byte]): ProtobufResult[T] =
|
||||||
epRpc.proofOfPayment = some(proofOfPayment)
|
epRpc.proofOfPayment = some(proofOfPayment)
|
||||||
ok(epRpc)
|
ok(epRpc)
|
||||||
|
|
||||||
|
|
||||||
# Codec for EligibilityStatus
|
# Codec for EligibilityStatus
|
||||||
|
|
||||||
proc encode*(esRpc: EligibilityStatus): ProtoBuffer =
|
proc encode*(esRpc: EligibilityStatus): ProtoBuffer =
|
||||||
|
@ -55,5 +49,3 @@ proc decode*(T: type EligibilityStatus, buffer: seq[byte]): ProtobufResult[T] =
|
||||||
else:
|
else:
|
||||||
esRpc.statusDesc = some(description)
|
esRpc.statusDesc = some(description)
|
||||||
ok(esRpc)
|
ok(esRpc)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -426,6 +426,5 @@ proc readValue*(
|
||||||
reader.raiseUnexpectedValue("Field `requestId` is missing")
|
reader.raiseUnexpectedValue("Field `requestId` is missing")
|
||||||
|
|
||||||
value = FilterSubscriptionResponse(
|
value = FilterSubscriptionResponse(
|
||||||
requestId: requestId.get(),
|
requestId: requestId.get(), statusDesc: statusDesc.get("")
|
||||||
statusDesc: statusDesc.get(""),
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -33,7 +33,8 @@ proc parseHash*(input: Option[string]): Result[Option[WakuMessageHash], string]
|
||||||
return err("waku message hash parsing error: " & error)
|
return err("waku message hash parsing error: " & error)
|
||||||
|
|
||||||
if decodedBytes.len != 32:
|
if decodedBytes.len != 32:
|
||||||
return err("waku message hash parsing error: invalid hash length: " & $decodedBytes.len)
|
return
|
||||||
|
err("waku message hash parsing error: invalid hash length: " & $decodedBytes.len)
|
||||||
|
|
||||||
let hash: WakuMessageHash = fromBytes(decodedBytes)
|
let hash: WakuMessageHash = fromBytes(decodedBytes)
|
||||||
|
|
||||||
|
|
|
@ -30,8 +30,8 @@ const DefaultRlnTreePath* = "rln_tree.db"
|
||||||
const
|
const
|
||||||
# pre-processed "rln/waku-rln-relay/v2.0.0" to array[32, byte]
|
# pre-processed "rln/waku-rln-relay/v2.0.0" to array[32, byte]
|
||||||
DefaultRlnIdentifier*: RlnIdentifier = [
|
DefaultRlnIdentifier*: RlnIdentifier = [
|
||||||
114, 108, 110, 47, 119, 97, 107, 117, 45, 114, 108, 110, 45, 114, 101, 108, 97,
|
114, 108, 110, 47, 119, 97, 107, 117, 45, 114, 108, 110, 45, 114, 101, 108, 97, 121,
|
||||||
121, 47, 118, 50, 46, 48, 46, 48, 0, 0, 0, 0, 0, 0, 0,
|
47, 118, 50, 46, 48, 46, 48, 0, 0, 0, 0, 0, 0, 0,
|
||||||
]
|
]
|
||||||
DefaultUserMessageLimit* = UserMessageLimit(20)
|
DefaultUserMessageLimit* = UserMessageLimit(20)
|
||||||
|
|
||||||
|
@ -60052,7 +60052,6 @@ const StaticGroupKeys* =
|
||||||
const StaticGroupMerkleRoot* =
|
const StaticGroupMerkleRoot* =
|
||||||
"2c149e48886b5ba3da2edf8db8d7a364ae7a25618489c04cf0c0380f7cdd4d6f"
|
"2c149e48886b5ba3da2edf8db8d7a364ae7a25618489c04cf0c0380f7cdd4d6f"
|
||||||
|
|
||||||
|
|
||||||
const MaxClockGapSeconds* = 20.0 # the maximum clock difference between peers in seconds
|
const MaxClockGapSeconds* = 20.0 # the maximum clock difference between peers in seconds
|
||||||
|
|
||||||
# RLN Keystore defaults
|
# RLN Keystore defaults
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
# This contract code is used in deployment, note: this is not the deployedBytecode, it includes constructor args.
|
# This contract code is used in deployment, note: this is not the deployedBytecode, it includes constructor args.
|
||||||
# Ref: https://github.com/waku-org/waku-rlnv2-contract
|
# Ref: https://github.com/waku-org/waku-rlnv2-contract
|
||||||
const PoseidonT3* =
|
const PoseidonT3* =
|
||||||
|
|
|
@ -78,7 +78,6 @@ proc serialize*(
|
||||||
)
|
)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
proc serialize*(proof: RateLimitProof, data: openArray[byte]): seq[byte] =
|
proc serialize*(proof: RateLimitProof, data: openArray[byte]): seq[byte] =
|
||||||
## a private proc to convert RateLimitProof and data to a byte seq
|
## a private proc to convert RateLimitProof and data to a byte seq
|
||||||
## this conversion is used in the proof verification proc
|
## this conversion is used in the proof verification proc
|
||||||
|
|
|
@ -30,15 +30,11 @@ logScope:
|
||||||
# using the when predicate does not work within the contract macro, hence need to dupe
|
# using the when predicate does not work within the contract macro, hence need to dupe
|
||||||
contract(WakuRlnContract):
|
contract(WakuRlnContract):
|
||||||
# this serves as an entrypoint into the rln membership set
|
# this serves as an entrypoint into the rln membership set
|
||||||
proc register(
|
proc register(idCommitment: UInt256, userMessageLimit: UInt32)
|
||||||
idCommitment: UInt256, userMessageLimit: UInt32
|
|
||||||
)
|
|
||||||
# Initializes the implementation contract (only used in unit tests)
|
# Initializes the implementation contract (only used in unit tests)
|
||||||
proc initialize(maxMessageLimit: UInt256)
|
proc initialize(maxMessageLimit: UInt256)
|
||||||
# this event is raised when a new member is registered
|
# this event is raised when a new member is registered
|
||||||
proc MemberRegistered(
|
proc MemberRegistered(rateCommitment: UInt256, index: Uint32) {.event.}
|
||||||
rateCommitment: UInt256, index: Uint32
|
|
||||||
) {.event.}
|
|
||||||
|
|
||||||
# this function denotes existence of a given user
|
# this function denotes existence of a given user
|
||||||
proc memberExists(idCommitment: Uint256): UInt256 {.view.}
|
proc memberExists(idCommitment: Uint256): UInt256 {.view.}
|
||||||
|
@ -136,7 +132,8 @@ method atomicBatch*(
|
||||||
var membersSeq = newSeq[Membership]()
|
var membersSeq = newSeq[Membership]()
|
||||||
for i in 0 ..< rateCommitments.len:
|
for i in 0 ..< rateCommitments.len:
|
||||||
var index = start + MembershipIndex(i)
|
var index = start + MembershipIndex(i)
|
||||||
debug "registering member to callback", rateCommitment = rateCommitments[i], index = index
|
debug "registering member to callback",
|
||||||
|
rateCommitment = rateCommitments[i], index = index
|
||||||
let member = Membership(rateCommitment: rateCommitments[i], index: index)
|
let member = Membership(rateCommitment: rateCommitments[i], index: index)
|
||||||
membersSeq.add(member)
|
membersSeq.add(member)
|
||||||
await g.registerCb.get()(membersSeq)
|
await g.registerCb.get()(membersSeq)
|
||||||
|
@ -154,7 +151,6 @@ method register*(
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
raise newException(ValueError, getCurrentExceptionMsg())
|
raise newException(ValueError, getCurrentExceptionMsg())
|
||||||
|
|
||||||
|
|
||||||
method registerBatch*(
|
method registerBatch*(
|
||||||
g: OnchainGroupManager, rateCommitments: seq[RawRateCommitment]
|
g: OnchainGroupManager, rateCommitments: seq[RawRateCommitment]
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
): Future[void] {.async: (raises: [Exception]).} =
|
||||||
|
@ -179,13 +175,12 @@ method register*(
|
||||||
let idCommitment = identityCredential.idCommitment.toUInt256()
|
let idCommitment = identityCredential.idCommitment.toUInt256()
|
||||||
|
|
||||||
debug "registering the member",
|
debug "registering the member",
|
||||||
idCommitment = idCommitment,
|
idCommitment = idCommitment, userMessageLimit = userMessageLimit
|
||||||
userMessageLimit = userMessageLimit
|
|
||||||
var txHash: TxHash
|
var txHash: TxHash
|
||||||
g.retryWrapper(txHash, "Failed to register the member"):
|
g.retryWrapper(txHash, "Failed to register the member"):
|
||||||
await wakuRlnContract
|
await wakuRlnContract.register(idCommitment, userMessageLimit.stuint(32)).send(
|
||||||
.register(idCommitment, userMessageLimit.stuint(32))
|
gasPrice = gasPrice
|
||||||
.send(gasPrice = gasPrice)
|
)
|
||||||
|
|
||||||
# wait for the transaction to be mined
|
# wait for the transaction to be mined
|
||||||
var tsReceipt: ReceiptObject
|
var tsReceipt: ReceiptObject
|
||||||
|
@ -199,9 +194,7 @@ method register*(
|
||||||
let firstTopic = tsReceipt.logs[0].topics[0]
|
let firstTopic = tsReceipt.logs[0].topics[0]
|
||||||
# the hash of the signature of MemberRegistered(uint256,uint32) event is equal to the following hex value
|
# the hash of the signature of MemberRegistered(uint256,uint32) event is equal to the following hex value
|
||||||
if firstTopic !=
|
if firstTopic !=
|
||||||
cast[FixedBytes[32]](keccak256.digest(
|
cast[FixedBytes[32]](keccak256.digest("MemberRegistered(uint256,uint32)").data):
|
||||||
"MemberRegistered(uint256,uint32)"
|
|
||||||
).data):
|
|
||||||
raise newException(ValueError, "unexpected event signature")
|
raise newException(ValueError, "unexpected event signature")
|
||||||
|
|
||||||
# the arguments of the raised event i.e., MemberRegistered are encoded inside the data field
|
# the arguments of the raised event i.e., MemberRegistered are encoded inside the data field
|
||||||
|
@ -219,7 +212,6 @@ method register*(
|
||||||
# don't handle member insertion into the tree here, it will be handled by the event listener
|
# don't handle member insertion into the tree here, it will be handled by the event listener
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
method withdraw*(
|
method withdraw*(
|
||||||
g: OnchainGroupManager, idCommitment: IDCommitment
|
g: OnchainGroupManager, idCommitment: IDCommitment
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
): Future[void] {.async: (raises: [Exception]).} =
|
||||||
|
@ -260,7 +252,6 @@ proc parseEvent(
|
||||||
index: index.toMembershipIndex(),
|
index: index.toMembershipIndex(),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
return err("failed to parse the data field of the MemberRegistered event")
|
return err("failed to parse the data field of the MemberRegistered event")
|
||||||
|
|
||||||
|
@ -350,8 +341,8 @@ proc handleEvents(
|
||||||
toRemoveIndices = removalIndices,
|
toRemoveIndices = removalIndices,
|
||||||
)
|
)
|
||||||
g.latestIndex = startIndex + MembershipIndex(rateCommitments.len)
|
g.latestIndex = startIndex + MembershipIndex(rateCommitments.len)
|
||||||
trace "new members added to the Merkle tree", commitments = rateCommitments.mapIt(it.inHex)
|
trace "new members added to the Merkle tree",
|
||||||
|
commitments = rateCommitments.mapIt(it.inHex)
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
error "failed to insert members into the tree", error = getCurrentExceptionMsg()
|
error "failed to insert members into the tree", error = getCurrentExceptionMsg()
|
||||||
raise newException(ValueError, "failed to insert members into the tree")
|
raise newException(ValueError, "failed to insert members into the tree")
|
||||||
|
@ -545,10 +536,14 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
|
||||||
|
|
||||||
# Set the chain id
|
# Set the chain id
|
||||||
if g.chainId == 0:
|
if g.chainId == 0:
|
||||||
warn "Chain ID not set in config, using RPC Provider's Chain ID", providerChainId = fetchedChainId
|
warn "Chain ID not set in config, using RPC Provider's Chain ID",
|
||||||
|
providerChainId = fetchedChainId
|
||||||
|
|
||||||
if g.chainId != 0 and g.chainId != fetchedChainId:
|
if g.chainId != 0 and g.chainId != fetchedChainId:
|
||||||
return err("The RPC Provided a Chain ID which is different than the provided Chain ID: provided = " & $g.chainId & ", actual = " & $fetchedChainId)
|
return err(
|
||||||
|
"The RPC Provided a Chain ID which is different than the provided Chain ID: provided = " &
|
||||||
|
$g.chainId & ", actual = " & $fetchedChainId
|
||||||
|
)
|
||||||
|
|
||||||
g.chainId = fetchedChainId
|
g.chainId = fetchedChainId
|
||||||
|
|
||||||
|
@ -614,12 +609,16 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
|
||||||
g.validRoots = metadata.validRoots.toDeque()
|
g.validRoots = metadata.validRoots.toDeque()
|
||||||
|
|
||||||
var deployedBlockNumber: Uint256
|
var deployedBlockNumber: Uint256
|
||||||
g.retryWrapper(deployedBlockNumber, "Failed to get the deployed block number. Have you set the correct contract address?"):
|
g.retryWrapper(
|
||||||
|
deployedBlockNumber,
|
||||||
|
"Failed to get the deployed block number. Have you set the correct contract address?",
|
||||||
|
):
|
||||||
await wakuRlnContract.deployedBlockNumber().call()
|
await wakuRlnContract.deployedBlockNumber().call()
|
||||||
debug "using rln contract", deployedBlockNumber, rlnContractAddress = contractAddress
|
debug "using rln contract", deployedBlockNumber, rlnContractAddress = contractAddress
|
||||||
g.rlnContractDeployedBlockNumber = cast[BlockNumber](deployedBlockNumber)
|
g.rlnContractDeployedBlockNumber = cast[BlockNumber](deployedBlockNumber)
|
||||||
g.latestProcessedBlock = max(g.latestProcessedBlock, g.rlnContractDeployedBlockNumber)
|
g.latestProcessedBlock = max(g.latestProcessedBlock, g.rlnContractDeployedBlockNumber)
|
||||||
g.rlnRelayMaxMessageLimit = cast[uint64](await wakuRlnContract.MAX_MESSAGE_LIMIT().call())
|
g.rlnRelayMaxMessageLimit =
|
||||||
|
cast[uint64](await wakuRlnContract.MAX_MESSAGE_LIMIT().call())
|
||||||
|
|
||||||
proc onDisconnect() {.async.} =
|
proc onDisconnect() {.async.} =
|
||||||
error "Ethereum client disconnected"
|
error "Ethereum client disconnected"
|
||||||
|
|
|
@ -70,7 +70,6 @@ method register*(
|
||||||
|
|
||||||
await g.registerBatch(@[leaf])
|
await g.registerBatch(@[leaf])
|
||||||
|
|
||||||
|
|
||||||
method registerBatch*(
|
method registerBatch*(
|
||||||
g: StaticGroupManager, rateCommitments: seq[RawRateCommitment]
|
g: StaticGroupManager, rateCommitments: seq[RawRateCommitment]
|
||||||
): Future[void] {.async: (raises: [Exception]).} =
|
): Future[void] {.async: (raises: [Exception]).} =
|
||||||
|
|
|
@ -23,6 +23,7 @@ type
|
||||||
RateCommitment* = object
|
RateCommitment* = object
|
||||||
idCommitment*: IDCommitment
|
idCommitment*: IDCommitment
|
||||||
userMessageLimit*: UserMessageLimit
|
userMessageLimit*: UserMessageLimit
|
||||||
|
|
||||||
RawRateCommitment* = seq[byte]
|
RawRateCommitment* = seq[byte]
|
||||||
|
|
||||||
proc toRateCommitment*(rateCommitmentUint: UInt256): RawRateCommitment =
|
proc toRateCommitment*(rateCommitmentUint: UInt256): RawRateCommitment =
|
||||||
|
|
Loading…
Reference in New Issue