mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-03 06:23:10 +00:00
chore: tidy up based on hints
Used hints to tidy up some code.
This commit is contained in:
parent
f68d79996e
commit
ccc5927c7a
@ -249,9 +249,7 @@ proc stop*(cmb: Chat2MatterBridge) {.async: (raises: [Exception]).} =
|
||||
when isMainModule:
|
||||
import waku/common/utils/nat, waku/waku_api/message_cache
|
||||
|
||||
let
|
||||
rng = newRng()
|
||||
conf = Chat2MatterbridgeConf.load()
|
||||
let conf = Chat2MatterbridgeConf.load()
|
||||
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
setLogLevel(conf.logLevel)
|
||||
|
||||
@ -51,7 +51,6 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
|
||||
let
|
||||
nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||
ip = parseIpAddress("0.0.0.0")
|
||||
flags = CapabilitiesBitfield.init(relay = true)
|
||||
|
||||
let relayShards = RelayShards.init(clusterId, shardId).valueOr:
|
||||
error "Relay shards initialization failed", error = error
|
||||
|
||||
@ -43,7 +43,6 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} =
|
||||
let
|
||||
nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).get()
|
||||
ip = parseIpAddress("0.0.0.0")
|
||||
flags = CapabilitiesBitfield.init(relay = true)
|
||||
|
||||
let relayShards = RelayShards.init(clusterId, shardId).valueOr:
|
||||
error "Relay shards initialization failed", error = error
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
when defined(linux):
|
||||
{.passl: "-Wl,-soname,libwaku.so".}
|
||||
|
||||
import std/[json, atomics, strformat, options, atomics]
|
||||
import std/[json, atomics, strformat, options]
|
||||
import chronicles, chronos, chronos/threadsync
|
||||
import
|
||||
waku/common/base64,
|
||||
@ -305,7 +305,7 @@ proc waku_relay_subscribe(
|
||||
handleRequest(
|
||||
ctx,
|
||||
RequestType.RELAY,
|
||||
RelayRequest.createShared(RelayMsgType.SUBSCRIBE, pubSubTopic, WakuRelayHandler(cb)),
|
||||
RelayRequest.createShared(RelayMsgType.SUBSCRIBE, pubSubTopic, cb),
|
||||
callback,
|
||||
userData,
|
||||
)
|
||||
@ -347,7 +347,7 @@ proc waku_relay_unsubscribe(
|
||||
ctx,
|
||||
RequestType.RELAY,
|
||||
RelayRequest.createShared(
|
||||
RelayMsgType.UNSUBSCRIBE, pubSubTopic, WakuRelayHandler(onReceivedMessage(ctx))
|
||||
RelayMsgType.UNSUBSCRIBE, pubSubTopic, onReceivedMessage(ctx)
|
||||
),
|
||||
callback,
|
||||
userData,
|
||||
|
||||
@ -32,7 +32,6 @@ type WakuContext* = object
|
||||
running: Atomic[bool] # To control when the threads are running
|
||||
|
||||
const git_version* {.strdefine.} = "n/a"
|
||||
const versionString = "version / git commit hash: " & waku.git_version
|
||||
|
||||
template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untyped) =
|
||||
if isNil(ctx[].eventCallback):
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/options
|
||||
import chronos/timer
|
||||
import metrics, setting
|
||||
|
||||
export metrics
|
||||
|
||||
@ -538,7 +538,7 @@ proc build*(
|
||||
let dns4DomainName =
|
||||
if builder.dns4DomainName.isSome():
|
||||
let d = builder.dns4DomainName.get()
|
||||
if d.string != "":
|
||||
if d != "":
|
||||
some(d)
|
||||
else:
|
||||
none(string)
|
||||
|
||||
@ -69,7 +69,7 @@ proc networkConfiguration*(
|
||||
## `udpPort` is only supplied to satisfy underlying APIs but is not
|
||||
## actually a supported transport for libp2p traffic.
|
||||
let natRes = setupNat(
|
||||
conf.natStrategy.string,
|
||||
conf.natStrategy,
|
||||
clientId,
|
||||
Port(uint16(conf.p2pTcpPort) + portsShift),
|
||||
Port(uint16(conf.p2pTcpPort) + portsShift),
|
||||
|
||||
@ -174,7 +174,7 @@ proc validateNodeKey(wakuConf: WakuConf): Result[void, string] =
|
||||
|
||||
proc validateNoEmptyStrings(wakuConf: WakuConf): Result[void, string] =
|
||||
if wakuConf.endpointConf.dns4DomainName.isSome() and
|
||||
isEmptyOrWhiteSpace(wakuConf.endpointConf.dns4DomainName.get().string):
|
||||
isEmptyOrWhiteSpace(wakuConf.endpointConf.dns4DomainName.get()):
|
||||
return err("dns4-domain-name is an empty string, set it to none(string) instead")
|
||||
|
||||
if isEmptyOrWhiteSpace(wakuConf.relayServiceRatio):
|
||||
|
||||
@ -545,7 +545,6 @@ proc connectToRelayPeers*(pm: PeerManager) {.async.} =
|
||||
return
|
||||
|
||||
var (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec)
|
||||
let totalRelayPeers = inRelayPeers.len + outRelayPeers.len
|
||||
|
||||
if inRelayPeers.len > pm.inRelayPeersTarget:
|
||||
await pm.pruneInRelayConns(inRelayPeers.len - pm.inRelayPeersTarget)
|
||||
|
||||
@ -321,20 +321,20 @@ proc subscribe*(
|
||||
error "Invalid API call to `subscribe`. WakuRelay not mounted."
|
||||
return err("Invalid API call to `subscribe`. WakuRelay not mounted.")
|
||||
|
||||
let (pubsubTopic, contentTopicOp) =
|
||||
let pubsubTopic =
|
||||
case subscription.kind
|
||||
of ContentSub:
|
||||
if node.wakuAutoSharding.isSome():
|
||||
let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr:
|
||||
error "Autosharding error", error = error
|
||||
return err("Autosharding error: " & error)
|
||||
($shard, some(subscription.topic))
|
||||
$shard
|
||||
else:
|
||||
return err(
|
||||
"Static sharding is used, relay subscriptions must specify a pubsub topic"
|
||||
)
|
||||
of PubsubSub:
|
||||
(subscription.topic, none(ContentTopic))
|
||||
subscription.topic
|
||||
else:
|
||||
return err("Unsupported subscription type in relay subscribe")
|
||||
|
||||
@ -1347,7 +1347,7 @@ proc mountLibp2pPing*(node: WakuNode) {.async: (raises: []).} =
|
||||
|
||||
try:
|
||||
node.libp2pPing = Ping.new(rng = node.rng)
|
||||
except Exception as e:
|
||||
except Exception as _:
|
||||
error "failed to create ping", error = getCurrentExceptionMsg()
|
||||
|
||||
if node.started:
|
||||
|
||||
@ -21,8 +21,7 @@ import
|
||||
../rest_serdes,
|
||||
./types
|
||||
|
||||
from std/times import getTime
|
||||
from std/times import toUnix
|
||||
from std/times import getTime, toUnix
|
||||
|
||||
export types
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@ proc isSchemaVersion7*(db: SqliteDatabase): DatabaseResult[bool] =
|
||||
|
||||
var pkColumns = newSeq[string]()
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
let colName = cstring sqlite3_column_text(s, 0)
|
||||
let colName = sqlite3_column_text(s, 0)
|
||||
pkColumns.add($colName)
|
||||
|
||||
let query =
|
||||
|
||||
@ -31,7 +31,7 @@ proc isSchemaVersion7*(db: SqliteDatabase): DatabaseResult[bool] =
|
||||
|
||||
var pkColumns = newSeq[string]()
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
let colName = cstring sqlite3_column_text(s, 0)
|
||||
let colName = sqlite3_column_text(s, 0)
|
||||
pkColumns.add($colName)
|
||||
|
||||
let query =
|
||||
|
||||
@ -122,7 +122,6 @@ proc cleanUp*(fs: FilterSubscriptions) =
|
||||
let now = Moment.now()
|
||||
fs.peersSubscribed.keepItIf(now - val.lastSeen <= fs.subscriptionTimeout)
|
||||
|
||||
var filtersToRemove: seq[FilterCriterion] = @[]
|
||||
for filterCriterion, subscribedPeers in fs.subscriptions.mpairs:
|
||||
subscribedPeers.keepItIf(fs.isSubscribed(it) == true)
|
||||
|
||||
|
||||
@ -45,8 +45,6 @@ const
|
||||
dialFailure = "dial_failure"
|
||||
peerNotFoundFailure = "peer_not_found_failure"
|
||||
decodeRpcFailure = "decode_rpc_failure"
|
||||
retrievePeersDiscv5Error = "retrieve_peers_discv5_failure"
|
||||
pxFailure = "px_failure"
|
||||
|
||||
type
|
||||
WakuPeerExchangeResult*[T] = Result[T, PeerExchangeResponseStatus]
|
||||
|
||||
@ -276,7 +276,7 @@ proc initRelayObservers(w: WakuRelay) =
|
||||
|
||||
proc onRecv(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
for msg in msgs.messages:
|
||||
let (msg_id_short, topic, wakuMessage, msgSize) = decodeRpcMessageInfo(peer, msg).valueOr:
|
||||
let (_, topic, wakuMessage, msgSize) = decodeRpcMessageInfo(peer, msg).valueOr:
|
||||
continue
|
||||
# message receive log happens in onValidated observer as onRecv is called before checks
|
||||
updateMetrics(peer, topic, wakuMessage, msgSize, onRecv = true)
|
||||
|
||||
@ -179,7 +179,6 @@ method generateProof*(
|
||||
messageId: MessageId,
|
||||
rlnIdentifier = DefaultRlnIdentifier,
|
||||
): GroupManagerResult[RateLimitProof] {.base, gcsafe, raises: [].} =
|
||||
var lastProcessedEpoch {.global.}: Epoch
|
||||
## generates a proof for the given data and epoch
|
||||
## the proof is generated using the current merkle root
|
||||
if g.idCredentials.isNone():
|
||||
|
||||
@ -555,8 +555,7 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
|
||||
let parsedPk = keys.PrivateKey.fromHex(pk).valueOr:
|
||||
return err("failed to parse the private key" & ": " & $error)
|
||||
ethRpc.privateKey = Opt.some(parsedPk)
|
||||
ethRpc.defaultAccount =
|
||||
ethRpc.privateKey.get().toPublicKey().toCanonicalAddress().Address
|
||||
ethRpc.defaultAccount = ethRpc.privateKey.get().toPublicKey().toCanonicalAddress()
|
||||
|
||||
let contractAddress = web3.fromHex(web3.Address, g.ethContractAddress)
|
||||
let wakuRlnContract = ethRpc.contractSender(WakuRlnContract, contractAddress)
|
||||
|
||||
@ -382,12 +382,10 @@ proc generateRlnValidator*(
|
||||
|
||||
let
|
||||
proof = toHex(msgProof.proof)
|
||||
epoch = fromEpoch(msgProof.epoch)
|
||||
root = inHex(msgProof.merkleRoot)
|
||||
shareX = inHex(msgProof.shareX)
|
||||
shareY = inHex(msgProof.shareY)
|
||||
nullifier = inHex(msgProof.nullifier)
|
||||
payload = string.fromBytes(message.payload)
|
||||
case validationRes
|
||||
of Valid:
|
||||
trace "message validity is verified, relaying:",
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user