Merge branch 'master' into release/v0.37

This commit is contained in:
Darshan K 2025-10-15 14:28:14 +05:30 committed by GitHub
commit 2d9cc0a561
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
95 changed files with 519 additions and 522 deletions

View File

@ -54,9 +54,9 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
os: [ubuntu-22.04, macos-13] os: [ubuntu-22.04, macos-15]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
timeout-minutes: 60 timeout-minutes: 45
name: build-${{ matrix.os }} name: build-${{ matrix.os }}
steps: steps:
@ -92,9 +92,9 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
os: [ubuntu-22.04, macos-13] os: [ubuntu-22.04, macos-15]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
timeout-minutes: 60 timeout-minutes: 45
name: test-${{ matrix.os }} name: test-${{ matrix.os }}
steps: steps:

View File

@ -34,10 +34,10 @@ jobs:
needs: tag-name needs: tag-name
strategy: strategy:
matrix: matrix:
os: [ubuntu-22.04, macos-13] os: [ubuntu-22.04, macos-15]
arch: [amd64] arch: [amd64]
include: include:
- os: macos-13 - os: macos-15
arch: arm64 arch: arm64
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:

View File

@ -14,10 +14,10 @@ jobs:
build-and-upload: build-and-upload:
strategy: strategy:
matrix: matrix:
os: [ubuntu-22.04, macos-13] os: [ubuntu-22.04, macos-15]
arch: [amd64] arch: [amd64]
include: include:
- os: macos-13 - os: macos-15
arch: arm64 arch: arm64
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
timeout-minutes: 60 timeout-minutes: 60

View File

@ -177,7 +177,7 @@ nimbus-build-system-nimble-dir:
.PHONY: librln .PHONY: librln
LIBRLN_BUILDDIR := $(CURDIR)/vendor/zerokit LIBRLN_BUILDDIR := $(CURDIR)/vendor/zerokit
LIBRLN_VERSION := v0.7.0 LIBRLN_VERSION := v0.8.0
ifeq ($(detected_OS),Windows) ifeq ($(detected_OS),Windows)
LIBRLN_FILE := rln.lib LIBRLN_FILE := rln.lib

View File

@ -24,7 +24,7 @@ proc benchmark(
except Exception, CatchableError: except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg() assert false, "exception raised: " & getCurrentExceptionMsg()
debug "registration finished", info "registration finished",
iter = i, elapsed_ms = (getTime() - start_time).inMilliseconds iter = i, elapsed_ms = (getTime() - start_time).inMilliseconds
discard await manager.updateRoots() discard await manager.updateRoots()
@ -33,7 +33,7 @@ proc benchmark(
quit(QuitFailure) quit(QuitFailure)
let epoch = default(Epoch) let epoch = default(Epoch)
debug "epoch in bytes", epochHex = epoch.inHex() info "epoch in bytes", epochHex = epoch.inHex()
let data: seq[byte] = newSeq[byte](1024) let data: seq[byte] = newSeq[byte](1024)
var proofGenTimes: seq[times.Duration] = @[] var proofGenTimes: seq[times.Duration] = @[]
@ -50,7 +50,7 @@ proc benchmark(
let ok = manager.verifyProof(data, proof).valueOr: let ok = manager.verifyProof(data, proof).valueOr:
raiseAssert $error raiseAssert $error
proofVerTimes.add(getTime() - verify_time) proofVerTimes.add(getTime() - verify_time)
debug "iteration finished", info "iteration finished",
iter = i, elapsed_ms = (getTime() - start_time).inMilliseconds iter = i, elapsed_ms = (getTime() - start_time).inMilliseconds
echo "Proof generation times: ", sum(proofGenTimes) div len(proofGenTimes) echo "Proof generation times: ", sum(proofGenTimes) div len(proofGenTimes)

View File

@ -194,9 +194,9 @@ proc publish(c: Chat, line: string) =
# for future version when we support more than one rln protected content topic, # for future version when we support more than one rln protected content topic,
# we should check the message content topic as well # we should check the message content topic as well
if c.node.wakuRlnRelay.appendRLNProof(message, float64(time)).isErr(): if c.node.wakuRlnRelay.appendRLNProof(message, float64(time)).isErr():
debug "could not append rate limit proof to the message" info "could not append rate limit proof to the message"
else: else:
debug "rate limit proof is appended to the message" info "rate limit proof is appended to the message"
let proof = RateLimitProof.init(message.proof).valueOr: let proof = RateLimitProof.init(message.proof).valueOr:
error "could not decode the RLN proof" error "could not decode the RLN proof"
return return
@ -406,7 +406,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
) )
elif conf.dnsDiscoveryUrl != "": elif conf.dnsDiscoveryUrl != "":
# No pre-selected fleet. Discover nodes via DNS using user config # No pre-selected fleet. Discover nodes via DNS using user config
debug "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl info "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl
dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl) dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl)
var discoveredNodes: seq[RemotePeerInfo] var discoveredNodes: seq[RemotePeerInfo]
@ -528,7 +528,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
info "WakuRLNRelay is enabled" info "WakuRLNRelay is enabled"
proc spamHandler(wakuMessage: WakuMessage) {.gcsafe, closure.} = proc spamHandler(wakuMessage: WakuMessage) {.gcsafe, closure.} =
debug "spam handler is called" info "spam handler is called"
let chatLineResult = getChatLine(wakuMessage.payload) let chatLineResult = getChatLine(wakuMessage.payload)
echo "spam message is found and discarded : " & chatLineResult echo "spam message is found and discarded : " & chatLineResult
chat.prompt = false chat.prompt = false

View File

@ -197,7 +197,7 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
cmb.running = true cmb.running = true
debug "Start polling Matterbridge" info "Start polling Matterbridge"
# Start Matterbridge polling (@TODO: use streaming interface) # Start Matterbridge polling (@TODO: use streaming interface)
proc mbHandler(jsonNode: JsonNode) {.async.} = proc mbHandler(jsonNode: JsonNode) {.async.} =
@ -207,7 +207,7 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
asyncSpawn cmb.pollMatterbridge(mbHandler) asyncSpawn cmb.pollMatterbridge(mbHandler)
# Start Waku v2 node # Start Waku v2 node
debug "Start listening on Waku v2" info "Start listening on Waku v2"
await cmb.nodev2.start() await cmb.nodev2.start()
# Always mount relay for bridge # Always mount relay for bridge

View File

@ -501,7 +501,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
) )
elif conf.dnsDiscoveryUrl != "": elif conf.dnsDiscoveryUrl != "":
# No pre-selected fleet. Discover nodes via DNS using user config # No pre-selected fleet. Discover nodes via DNS using user config
debug "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl info "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl
dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl) dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl)
var discoveredNodes: seq[RemotePeerInfo] var discoveredNodes: seq[RemotePeerInfo]

View File

@ -130,7 +130,7 @@ when isMainModule:
error "Starting waku failed", error = error error "Starting waku failed", error = error
quit(QuitFailure) quit(QuitFailure)
debug "Setting up shutdown hooks" info "Setting up shutdown hooks"
proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} = proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} =
await waku.stop() await waku.stop()

View File

@ -73,7 +73,7 @@ proc selectRandomCapablePeer*(
let rndPeerIndex = rand(0 .. supportivePeers.len - 1) let rndPeerIndex = rand(0 .. supportivePeers.len - 1)
let randomPeer = supportivePeers[rndPeerIndex] let randomPeer = supportivePeers[rndPeerIndex]
debug "Dialing random peer", info "Dialing random peer",
idx = $rndPeerIndex, peer = constructMultiaddrStr(randomPeer) idx = $rndPeerIndex, peer = constructMultiaddrStr(randomPeer)
supportivePeers.delete(rndPeerIndex .. rndPeerIndex) supportivePeers.delete(rndPeerIndex .. rndPeerIndex)
@ -82,12 +82,12 @@ proc selectRandomCapablePeer*(
if (await connOpt.withTimeout(10.seconds)): if (await connOpt.withTimeout(10.seconds)):
if connOpt.value().isSome(): if connOpt.value().isSome():
found = some(randomPeer) found = some(randomPeer)
debug "Dialing successful", info "Dialing successful",
peer = constructMultiaddrStr(randomPeer), codec = codec peer = constructMultiaddrStr(randomPeer), codec = codec
else: else:
debug "Dialing failed", peer = constructMultiaddrStr(randomPeer), codec = codec info "Dialing failed", peer = constructMultiaddrStr(randomPeer), codec = codec
else: else:
debug "Timeout dialing service peer", info "Timeout dialing service peer",
peer = constructMultiaddrStr(randomPeer), codec = codec peer = constructMultiaddrStr(randomPeer), codec = codec
return found return found
@ -105,8 +105,8 @@ proc tryCallAllPxPeers*(
var supportivePeers = pm.switch.peerStore.getPeersByCapability(capability) var supportivePeers = pm.switch.peerStore.getPeersByCapability(capability)
lpt_px_peers.set(supportivePeers.len) lpt_px_peers.set(supportivePeers.len)
debug "Found supportive peers count", count = supportivePeers.len() info "Found supportive peers count", count = supportivePeers.len()
debug "Found supportive peers", supportivePeers = $supportivePeers info "Found supportive peers", supportivePeers = $supportivePeers
if supportivePeers.len == 0: if supportivePeers.len == 0:
return none(seq[RemotePeerInfo]) return none(seq[RemotePeerInfo])
@ -116,7 +116,7 @@ proc tryCallAllPxPeers*(
let rndPeerIndex = rand(0 .. supportivePeers.len - 1) let rndPeerIndex = rand(0 .. supportivePeers.len - 1)
let randomPeer = supportivePeers[rndPeerIndex] let randomPeer = supportivePeers[rndPeerIndex]
debug "Dialing random peer", info "Dialing random peer",
idx = $rndPeerIndex, peer = constructMultiaddrStr(randomPeer) idx = $rndPeerIndex, peer = constructMultiaddrStr(randomPeer)
supportivePeers.delete(rndPeerIndex, rndPeerIndex) supportivePeers.delete(rndPeerIndex, rndPeerIndex)

View File

@ -213,7 +213,7 @@ proc setConnectedPeersMetrics(
continue continue
var customPeerInfo = allPeers[peerIdStr] var customPeerInfo = allPeers[peerIdStr]
debug "connected to peer", peer = customPeerInfo[] info "connected to peer", peer = customPeerInfo[]
# after connection, get supported protocols # after connection, get supported protocols
let lp2pPeerStore = node.switch.peerStore let lp2pPeerStore = node.switch.peerStore
@ -358,7 +358,7 @@ proc retrieveDynamicBootstrapNodes(
if dnsDiscoveryUrl != "": if dnsDiscoveryUrl != "":
# DNS discovery # DNS discovery
debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl info "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl
var nameServers: seq[TransportAddress] var nameServers: seq[TransportAddress]
for ip in dnsAddrsNameServers: for ip in dnsAddrsNameServers:
@ -376,7 +376,7 @@ proc retrieveDynamicBootstrapNodes(
return (await value.findPeers()).mapErr(e => $e) return (await value.findPeers()).mapErr(e => $e)
warn "Failed to init Waku DNS discovery" warn "Failed to init Waku DNS discovery"
debug "No method for retrieving dynamic bootstrap nodes specified." info "No method for retrieving dynamic bootstrap nodes specified."
ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default
proc getBootstrapFromDiscDns( proc getBootstrapFromDiscDns(

View File

@ -123,7 +123,7 @@ proc areProtocolsSupported(
for rawProtocol in toValidateProtocols: for rawProtocol in toValidateProtocols:
let protocolTag = ProtocolsTable[rawProtocol] let protocolTag = ProtocolsTable[rawProtocol]
debug "Checking if protocol is supported", expected_protocol_tag = protocolTag info "Checking if protocol is supported", expected_protocol_tag = protocolTag
var protocolSupported = false var protocolSupported = false
for nodeProtocol in nodeProtocols: for nodeProtocol in nodeProtocols:

View File

@ -60,7 +60,7 @@ when isMainModule:
error "Starting waku failed", error = error error "Starting waku failed", error = error
quit(QuitFailure) quit(QuitFailure)
debug "Setting up shutdown hooks" info "Setting up shutdown hooks"
proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} = proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} =
await waku.stop() await waku.stop()
quit(QuitSuccess) quit(QuitSuccess)

View File

@ -46,7 +46,7 @@ proc setup*(): Waku =
if conf.relay: if conf.relay:
conf.rlnRelay = twnNetworkConf.rlnRelay conf.rlnRelay = twnNetworkConf.rlnRelay
debug "Starting node" info "Starting node"
var waku = (waitFor Waku.new(conf)).valueOr: var waku = (waitFor Waku.new(conf)).valueOr:
error "Waku initialization failed", error = error error "Waku initialization failed", error = error
quit(QuitFailure) quit(QuitFailure)

View File

@ -52,7 +52,7 @@ proc sendThruWaku*(
(await self.waku.node.publish(some(DefaultPubsubTopic), message)).isOkOr: (await self.waku.node.publish(some(DefaultPubsubTopic), message)).isOkOr:
return err("failed to publish message: " & $error) return err("failed to publish message: " & $error)
debug "rate limit proof is appended to the message" info "rate limit proof is appended to the message"
return ok() return ok()

View File

@ -129,7 +129,7 @@ proc watchdogThreadBody(ctx: ptr WakuContext) {.thread.} =
await sleepAsync(WatchdogTimeinterval) await sleepAsync(WatchdogTimeinterval)
if ctx.running.load == false: if ctx.running.load == false:
debug "Watchdog thread exiting because WakuContext is not running" info "Watchdog thread exiting because WakuContext is not running"
break break
let wakuCallback = proc( let wakuCallback = proc(

View File

@ -19,13 +19,7 @@ host_triplet=$(rustc --version --verbose | awk '/host:/{print $2}')
tarball="${host_triplet}" tarball="${host_triplet}"
# use arkzkey feature for v0.7.0 tarball+="-rln.tar.gz"
# TODO: update this script in the future when arkzkey is default
if [[ "${rln_version}" == "v0.7.0" ]]; then
tarball+="-arkzkey-rln.tar.gz"
else
tarball+="-rln.tar.gz"
fi
# Download the prebuilt rln library if it is available # Download the prebuilt rln library if it is available
if curl --silent --fail-with-body -L \ if curl --silent --fail-with-body -L \

View File

@ -595,13 +595,13 @@ suite "Waku Filter - End to End":
await peers[index].mountFilterClient() await peers[index].mountFilterClient()
## connect switches ## connect switches
debug "establish connection", peerId = peers[index].peerInfo.peerId info "establish connection", peerId = peers[index].peerInfo.peerId
await server.switch.connect( await server.switch.connect(
peers[index].switch.peerInfo.peerId, peers[index].switch.peerInfo.listenAddrs peers[index].switch.peerInfo.peerId, peers[index].switch.peerInfo.listenAddrs
) )
debug "adding subscription" info "adding subscription"
( (
await wakuFilter.subscriptions.addSubscription( await wakuFilter.subscriptions.addSubscription(

View File

@ -160,7 +160,7 @@ suite "RLN Proofs as a Lightpush Service":
"exception raised when calling register: " & getCurrentExceptionMsg() "exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots() let rootUpdated1 = waitFor manager1.updateRoots()
debug "Updated root for node1", rootUpdated1 info "Updated root for node1", rootUpdated1
if rootUpdated1: if rootUpdated1:
let proofResult = waitFor manager1.fetchMerkleProofElements() let proofResult = waitFor manager1.fetchMerkleProofElements()

View File

@ -160,7 +160,7 @@ suite "RLN Proofs as a Lightpush Service":
"exception raised when calling register: " & getCurrentExceptionMsg() "exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots() let rootUpdated1 = waitFor manager1.updateRoots()
debug "Updated root for node1", rootUpdated1 info "Updated root for node1", rootUpdated1
if rootUpdated1: if rootUpdated1:
let proofResult = waitFor manager1.fetchMerkleProofElements() let proofResult = waitFor manager1.fetchMerkleProofElements()

View File

@ -22,7 +22,7 @@ suite "Waku Keepalive":
var completionFut = newFuture[bool]() var completionFut = newFuture[bool]()
proc pingHandler(peerId: PeerID) {.async, gcsafe.} = proc pingHandler(peerId: PeerID) {.async, gcsafe.} =
debug "Ping received" info "Ping received"
check: check:
peerId == node1.switch.peerInfo.peerId peerId == node1.switch.peerInfo.peerId

View File

@ -18,6 +18,7 @@ import
factory/waku_conf, factory/waku_conf,
factory/conf_builder/conf_builder, factory/conf_builder/conf_builder,
factory/builder, factory/builder,
common/logging,
], ],
./common ./common
@ -72,6 +73,8 @@ proc newTestWakuNode*(
clusterId = DefaultClusterId, clusterId = DefaultClusterId,
subscribeShards = @[DefaultShardId], subscribeShards = @[DefaultShardId],
): WakuNode = ): WakuNode =
logging.setupLog(logging.LogLevel.DEBUG, logging.LogFormat.TEXT)
var resolvedExtIp = extIp var resolvedExtIp = extIp
# Update extPort to default value if it's missing and there's an extIp or a DNS domain # Update extPort to default value if it's missing and there's an extIp or a DNS domain

View File

@ -63,7 +63,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let putRes = await driver.put( let putRes = await driver.put(
@ -99,7 +99,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -150,7 +150,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -189,7 +189,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -230,7 +230,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -286,7 +286,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -370,7 +370,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -427,7 +427,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -482,7 +482,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -524,7 +524,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -566,7 +566,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -606,7 +606,7 @@ suite "Postgres driver - queries":
] ]
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -655,7 +655,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -700,7 +700,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -788,7 +788,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -875,7 +875,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -919,7 +919,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
let hashes = messages.mapIt(computeMessageHash(DefaultPubsubTopic, it)) let hashes = messages.mapIt(computeMessageHash(DefaultPubsubTopic, it))
@ -956,7 +956,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -996,7 +996,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1081,7 +1081,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1124,7 +1124,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1167,7 +1167,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1212,7 +1212,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1258,7 +1258,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1307,7 +1307,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1388,7 +1388,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1469,7 +1469,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1551,7 +1551,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1634,7 +1634,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1680,7 +1680,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1727,7 +1727,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1764,7 +1764,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (

View File

@ -43,7 +43,7 @@ suite "Queue driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -85,7 +85,7 @@ suite "Queue driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -129,7 +129,7 @@ suite "Queue driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -175,7 +175,7 @@ suite "Queue driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -218,7 +218,7 @@ suite "Queue driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -313,7 +313,7 @@ suite "SQLite driver - query by pubsub topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -377,7 +377,7 @@ suite "SQLite driver - query by pubsub topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -439,7 +439,7 @@ suite "SQLite driver - query by pubsub topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -489,7 +489,7 @@ suite "Queue driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -536,7 +536,7 @@ suite "Queue driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -581,7 +581,7 @@ suite "Queue driver - query by cursor":
] ]
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -633,7 +633,7 @@ suite "Queue driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -683,7 +683,7 @@ suite "Queue driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -776,7 +776,7 @@ suite "Queue driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -870,7 +870,7 @@ suite "Queue driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -922,7 +922,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -967,7 +967,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -1057,7 +1057,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1107,7 +1107,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -1155,7 +1155,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = await driver.put( let retFut = await driver.put(
@ -1205,7 +1205,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -1256,7 +1256,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = await driver.put( let retFut = await driver.put(
@ -1310,7 +1310,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = await driver.put( let retFut = await driver.put(
@ -1396,7 +1396,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1484,7 +1484,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1573,7 +1573,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1663,7 +1663,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:

View File

@ -36,7 +36,7 @@ suite "SQLite driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -79,7 +79,7 @@ suite "SQLite driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -136,7 +136,7 @@ suite "SQLite driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -181,7 +181,7 @@ suite "SQLite driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -228,7 +228,7 @@ suite "SQLite driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -272,7 +272,7 @@ suite "SQLite driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -369,7 +369,7 @@ suite "SQLite driver - query by pubsub topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -432,7 +432,7 @@ suite "SQLite driver - query by pubsub topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -493,7 +493,7 @@ suite "SQLite driver - query by pubsub topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -542,7 +542,7 @@ suite "SQLite driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -590,7 +590,7 @@ suite "SQLite driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -636,7 +636,7 @@ suite "SQLite driver - query by cursor":
] ]
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -689,7 +689,7 @@ suite "SQLite driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -740,7 +740,7 @@ suite "SQLite driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -834,7 +834,7 @@ suite "SQLite driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -927,7 +927,7 @@ suite "SQLite driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -978,7 +978,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1024,7 +1024,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1115,7 +1115,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1164,7 +1164,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1213,7 +1213,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1264,7 +1264,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1316,7 +1316,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1371,7 +1371,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1458,7 +1458,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1545,7 +1545,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1633,7 +1633,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1722,7 +1722,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:

View File

@ -90,7 +90,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -131,7 +131,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -186,7 +186,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -229,7 +229,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -274,7 +274,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -334,7 +334,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -426,7 +426,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -487,7 +487,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -546,7 +546,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -592,7 +592,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -638,7 +638,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -682,7 +682,7 @@ suite "Postgres driver - queries":
] ]
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -736,7 +736,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -785,7 +785,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -877,7 +877,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -968,7 +968,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1016,7 +1016,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
let hashes = messages.mapIt(computeMessageHash(DefaultPubsubTopic, it)) let hashes = messages.mapIt(computeMessageHash(DefaultPubsubTopic, it))
@ -1057,7 +1057,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1101,7 +1101,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1190,7 +1190,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1237,7 +1237,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1284,7 +1284,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1333,7 +1333,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1383,7 +1383,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1436,7 +1436,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1521,7 +1521,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1606,7 +1606,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1692,7 +1692,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1779,7 +1779,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1830,7 +1830,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1882,7 +1882,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1924,7 +1924,7 @@ suite "Postgres driver - queries":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (

View File

@ -49,7 +49,7 @@ suite "Queue driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -95,7 +95,7 @@ suite "Queue driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -143,7 +143,7 @@ suite "Queue driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -193,7 +193,7 @@ suite "Queue driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -240,7 +240,7 @@ suite "Queue driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -343,7 +343,7 @@ suite "SQLite driver - query by pubsub topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -409,7 +409,7 @@ suite "SQLite driver - query by pubsub topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -473,7 +473,7 @@ suite "SQLite driver - query by pubsub topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -525,7 +525,7 @@ suite "Queue driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -576,7 +576,7 @@ suite "Queue driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -625,7 +625,7 @@ suite "Queue driver - query by cursor":
] ]
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -682,7 +682,7 @@ suite "Queue driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -736,7 +736,7 @@ suite "Queue driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -833,7 +833,7 @@ suite "Queue driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -929,7 +929,7 @@ suite "Queue driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -983,7 +983,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -1032,7 +1032,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -1126,7 +1126,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1178,7 +1178,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -1230,7 +1230,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = await driver.put( let retFut = await driver.put(
@ -1284,7 +1284,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = waitFor driver.put( let retFut = waitFor driver.put(
@ -1339,7 +1339,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = await driver.put( let retFut = await driver.put(
@ -1397,7 +1397,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
let retFut = await driver.put( let retFut = await driver.put(
@ -1487,7 +1487,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1577,7 +1577,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1668,7 +1668,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1760,7 +1760,7 @@ suite "Queue driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:

View File

@ -38,7 +38,7 @@ suite "SQLite driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -85,7 +85,7 @@ suite "SQLite driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -146,7 +146,7 @@ suite "SQLite driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -195,7 +195,7 @@ suite "SQLite driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -246,7 +246,7 @@ suite "SQLite driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -294,7 +294,7 @@ suite "SQLite driver - query by content topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -399,7 +399,7 @@ suite "SQLite driver - query by pubsub topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -466,7 +466,7 @@ suite "SQLite driver - query by pubsub topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -531,7 +531,7 @@ suite "SQLite driver - query by pubsub topic":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -584,7 +584,7 @@ suite "SQLite driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -636,7 +636,7 @@ suite "SQLite driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -686,7 +686,7 @@ suite "SQLite driver - query by cursor":
] ]
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -744,7 +744,7 @@ suite "SQLite driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -799,7 +799,7 @@ suite "SQLite driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -897,7 +897,7 @@ suite "SQLite driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -994,7 +994,7 @@ suite "SQLite driver - query by cursor":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1049,7 +1049,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1099,7 +1099,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1194,7 +1194,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1247,7 +1247,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1300,7 +1300,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1355,7 +1355,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1411,7 +1411,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1470,7 +1470,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
for msg in messages: for msg in messages:
require ( require (
@ -1561,7 +1561,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1652,7 +1652,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1744,7 +1744,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:
@ -1837,7 +1837,7 @@ suite "SQLite driver - query by time range":
var messages = expected var messages = expected
shuffle(messages) shuffle(messages)
debug "randomized message insertion sequence", info "randomized message insertion sequence",
sequence = messages.mapIt(it[1].payload) sequence = messages.mapIt(it[1].payload)
for row in messages: for row in messages:

View File

@ -160,7 +160,7 @@ suite "Onchain group manager":
try: try:
for i in 0 ..< credentials.len(): for i in 0 ..< credentials.len():
debug "Registering credential", index = i, credential = credentials[i] info "Registering credential", index = i, credential = credentials[i]
waitFor manager.register(credentials[i], UserMessageLimit(20)) waitFor manager.register(credentials[i], UserMessageLimit(20))
discard waitFor manager.updateRoots() discard waitFor manager.updateRoots()
except Exception, CatchableError: except Exception, CatchableError:
@ -282,7 +282,7 @@ suite "Onchain group manager":
let messageBytes = "Hello".toBytes() let messageBytes = "Hello".toBytes()
let epoch = default(Epoch) let epoch = default(Epoch)
debug "epoch in bytes", epochHex = epoch.inHex() info "epoch in bytes", epochHex = epoch.inHex()
let validProofRes = manager.generateProof( let validProofRes = manager.generateProof(
data = messageBytes, epoch = epoch, messageId = MessageId(1) data = messageBytes, epoch = epoch, messageId = MessageId(1)
@ -315,7 +315,7 @@ suite "Onchain group manager":
let messageBytes = "Hello".toBytes() let messageBytes = "Hello".toBytes()
let epoch = default(Epoch) let epoch = default(Epoch)
debug "epoch in bytes", epochHex = epoch.inHex() info "epoch in bytes", epochHex = epoch.inHex()
let validProofRes = manager.generateProof( let validProofRes = manager.generateProof(
data = messageBytes, epoch = epoch, messageId = MessageId(1) data = messageBytes, epoch = epoch, messageId = MessageId(1)
@ -365,7 +365,7 @@ suite "Onchain group manager":
# prepare the epoch # prepare the epoch
let epoch = default(Epoch) let epoch = default(Epoch)
debug "epoch in bytes", epochHex = epoch.inHex() info "epoch in bytes", epochHex = epoch.inHex()
# generate proof # generate proof
let validProof = manager.generateProof( let validProof = manager.generateProof(
@ -400,7 +400,7 @@ suite "Onchain group manager":
manager.merkleProofCache[i] = byte(rand(255)) manager.merkleProofCache[i] = byte(rand(255))
let epoch = default(Epoch) let epoch = default(Epoch)
debug "epoch in bytes", epochHex = epoch.inHex() info "epoch in bytes", epochHex = epoch.inHex()
# generate proof # generate proof
let invalidProofRes = manager.generateProof( let invalidProofRes = manager.generateProof(

View File

@ -53,7 +53,7 @@ suite "Waku rln relay":
check: check:
# the id trapdoor, nullifier, secert hash and commitment together are 4*32 bytes # the id trapdoor, nullifier, secert hash and commitment together are 4*32 bytes
generatedKeys.len == 4 * 32 generatedKeys.len == 4 * 32
debug "generated keys: ", generatedKeys info "generated keys: ", generatedKeys
test "membership Key Generation": test "membership Key Generation":
# create an RLN instance # create an RLN instance
@ -77,7 +77,7 @@ suite "Waku rln relay":
idCredential.idSecretHash != empty idCredential.idSecretHash != empty
idCredential.idCommitment != empty idCredential.idCommitment != empty
debug "the generated identity credential: ", idCredential info "the generated identity credential: ", idCredential
test "setMetadata rln utils": test "setMetadata rln utils":
# create an RLN instance which also includes an empty Merkle tree # create an RLN instance which also includes an empty Merkle tree
@ -162,7 +162,7 @@ suite "Waku rln relay":
hashOutput = cast[ptr array[32, byte]](outputBuffer.`ptr`)[] hashOutput = cast[ptr array[32, byte]](outputBuffer.`ptr`)[]
hashOutputHex = hashOutput.toHex() hashOutputHex = hashOutput.toHex()
debug "hash output", hashOutputHex info "hash output", hashOutputHex
test "sha256 hash utils": test "sha256 hash utils":
# create an RLN instance # create an RLN instance
@ -255,7 +255,7 @@ suite "Waku rln relay":
decodedEpoch = epochBytes.fromEpoch() decodedEpoch = epochBytes.fromEpoch()
check: check:
epoch == decodedEpoch epoch == decodedEpoch
debug "encoded and decode time", info "encoded and decode time",
epoch = epoch, epochBytes = epochBytes, decodedEpoch = decodedEpoch epoch = epoch, epochBytes = epochBytes, decodedEpoch = decodedEpoch
test "Epoch comparison, epoch1 > epoch2": test "Epoch comparison, epoch1 > epoch2":
@ -547,7 +547,7 @@ suite "Waku rln relay":
idCredential.idSecretHash != empty idCredential.idSecretHash != empty
idCredential.idCommitment != empty idCredential.idCommitment != empty
debug "the generated identity credential: ", idCredential info "the generated identity credential: ", idCredential
let index = MembershipIndex(1) let index = MembershipIndex(1)

View File

@ -72,7 +72,7 @@ procSuite "WakuNode - RLN relay":
"exception raised when calling register: " & getCurrentExceptionMsg() "exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots() let rootUpdated1 = waitFor manager1.updateRoots()
debug "Updated root for node1", rootUpdated1 info "Updated root for node1", rootUpdated1
# node 2 # node 2
(await node2.mountRelay()).isOkOr: (await node2.mountRelay()).isOkOr:
@ -85,7 +85,7 @@ procSuite "WakuNode - RLN relay":
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
let rootUpdated2 = waitFor manager2.updateRoots() let rootUpdated2 = waitFor manager2.updateRoots()
debug "Updated root for node2", rootUpdated2 info "Updated root for node2", rootUpdated2
# node 3 # node 3
(await node3.mountRelay()).isOkOr: (await node3.mountRelay()).isOkOr:
@ -98,7 +98,7 @@ procSuite "WakuNode - RLN relay":
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
let rootUpdated3 = waitFor manager3.updateRoots() let rootUpdated3 = waitFor manager3.updateRoots()
debug "Updated root for node3", rootUpdated3 info "Updated root for node3", rootUpdated3
# connect them together # connect them together
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -108,7 +108,7 @@ procSuite "WakuNode - RLN relay":
proc relayHandler( proc relayHandler(
topic: PubsubTopic, msg: WakuMessage topic: PubsubTopic, msg: WakuMessage
): Future[void] {.async, gcsafe.} = ): Future[void] {.async, gcsafe.} =
debug "The received topic:", topic info "The received topic:", topic
if topic == DefaultPubsubTopic: if topic == DefaultPubsubTopic:
completionFut.complete(true) completionFut.complete(true)
@ -139,7 +139,7 @@ procSuite "WakuNode - RLN relay":
.isOk() .isOk()
) )
debug " Nodes participating in the test", info " Nodes participating in the test",
node1 = shortLog(node1.switch.peerInfo.peerId), node1 = shortLog(node1.switch.peerInfo.peerId),
node2 = shortLog(node2.switch.peerInfo.peerId), node2 = shortLog(node2.switch.peerInfo.peerId),
node3 = shortLog(node3.switch.peerInfo.peerId) node3 = shortLog(node3.switch.peerInfo.peerId)
@ -189,7 +189,7 @@ procSuite "WakuNode - RLN relay":
"exception raised when calling register: " & getCurrentExceptionMsg() "exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated = waitFor manager.updateRoots() let rootUpdated = waitFor manager.updateRoots()
debug "Updated root for node", node = index + 1, rootUpdated = rootUpdated info "Updated root for node", node = index + 1, rootUpdated = rootUpdated
# connect them together # connect them together
await nodes[0].connectToNodes(@[nodes[1].switch.peerInfo.toRemotePeerInfo()]) await nodes[0].connectToNodes(@[nodes[1].switch.peerInfo.toRemotePeerInfo()])
@ -308,7 +308,7 @@ procSuite "WakuNode - RLN relay":
"exception raised when calling register: " & getCurrentExceptionMsg() "exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots() let rootUpdated1 = waitFor manager1.updateRoots()
debug "Updated root for node1", rootUpdated1 info "Updated root for node1", rootUpdated1
# node 2 # node 2
(await node2.mountRelay()).isOkOr: (await node2.mountRelay()).isOkOr:
@ -321,7 +321,7 @@ procSuite "WakuNode - RLN relay":
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
let rootUpdated2 = waitFor manager2.updateRoots() let rootUpdated2 = waitFor manager2.updateRoots()
debug "Updated root for node2", rootUpdated2 info "Updated root for node2", rootUpdated2
# node 3 # node 3
(await node3.mountRelay()).isOkOr: (await node3.mountRelay()).isOkOr:
@ -334,7 +334,7 @@ procSuite "WakuNode - RLN relay":
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
let rootUpdated3 = waitFor manager3.updateRoots() let rootUpdated3 = waitFor manager3.updateRoots()
debug "Updated root for node3", rootUpdated3 info "Updated root for node3", rootUpdated3
# connect them together # connect them together
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -345,7 +345,7 @@ procSuite "WakuNode - RLN relay":
proc relayHandler( proc relayHandler(
topic: PubsubTopic, msg: WakuMessage topic: PubsubTopic, msg: WakuMessage
): Future[void] {.async, gcsafe.} = ): Future[void] {.async, gcsafe.} =
debug "The received topic:", topic info "The received topic:", topic
if topic == DefaultPubsubTopic: if topic == DefaultPubsubTopic:
completionFut.complete(true) completionFut.complete(true)
@ -425,7 +425,7 @@ procSuite "WakuNode - RLN relay":
"exception raised when calling register: " & getCurrentExceptionMsg() "exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots() let rootUpdated1 = waitFor manager1.updateRoots()
debug "Updated root for node1", rootUpdated1 info "Updated root for node1", rootUpdated1
# node 2 # node 2
(await node2.mountRelay()).isOkOr: (await node2.mountRelay()).isOkOr:
@ -440,7 +440,7 @@ procSuite "WakuNode - RLN relay":
# Registration is mandatory before sending messages with rln-relay # Registration is mandatory before sending messages with rln-relay
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
let rootUpdated2 = waitFor manager2.updateRoots() let rootUpdated2 = waitFor manager2.updateRoots()
debug "Updated root for node2", rootUpdated2 info "Updated root for node2", rootUpdated2
# node 3 # node 3
(await node3.mountRelay()).isOkOr: (await node3.mountRelay()).isOkOr:
@ -455,7 +455,7 @@ procSuite "WakuNode - RLN relay":
# Registration is mandatory before sending messages with rln-relay # Registration is mandatory before sending messages with rln-relay
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
let rootUpdated3 = waitFor manager3.updateRoots() let rootUpdated3 = waitFor manager3.updateRoots()
debug "Updated root for node3", rootUpdated3 info "Updated root for node3", rootUpdated3
# connect the nodes together node1 <-> node2 <-> node3 # connect the nodes together node1 <-> node2 <-> node3
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -510,7 +510,7 @@ procSuite "WakuNode - RLN relay":
proc relayHandler( proc relayHandler(
topic: PubsubTopic, msg: WakuMessage topic: PubsubTopic, msg: WakuMessage
): Future[void] {.async, gcsafe.} = ): Future[void] {.async, gcsafe.} =
debug "The received topic:", topic info "The received topic:", topic
if topic == DefaultPubsubTopic: if topic == DefaultPubsubTopic:
if msg == wm1: if msg == wm1:
completionFut1.complete(true) completionFut1.complete(true)
@ -592,7 +592,7 @@ procSuite "WakuNode - RLN relay":
"exception raised when calling register: " & getCurrentExceptionMsg() "exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots() let rootUpdated1 = waitFor manager1.updateRoots()
debug "Updated root for node1", rootUpdated1 info "Updated root for node1", rootUpdated1
# Mount rlnrelay in node2 in off-chain mode # Mount rlnrelay in node2 in off-chain mode
(await node2.mountRelay()).isOkOr: (await node2.mountRelay()).isOkOr:
@ -604,7 +604,7 @@ procSuite "WakuNode - RLN relay":
# Registration is mandatory before sending messages with rln-relay # Registration is mandatory before sending messages with rln-relay
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
let rootUpdated2 = waitFor manager2.updateRoots() let rootUpdated2 = waitFor manager2.updateRoots()
debug "Updated root for node2", rootUpdated2 info "Updated root for node2", rootUpdated2
# Given the two nodes are started and connected # Given the two nodes are started and connected
waitFor allFutures(node1.start(), node2.start()) waitFor allFutures(node1.start(), node2.start())
@ -636,7 +636,7 @@ procSuite "WakuNode - RLN relay":
proc relayHandler( proc relayHandler(
topic: PubsubTopic, msg: WakuMessage topic: PubsubTopic, msg: WakuMessage
): Future[void] {.async, gcsafe.} = ): Future[void] {.async, gcsafe.} =
debug "The received topic:", topic info "The received topic:", topic
if topic == DefaultPubsubTopic: if topic == DefaultPubsubTopic:
if msg == wm1: if msg == wm1:
completionFut1.complete(true) completionFut1.complete(true)

View File

@ -238,7 +238,7 @@ proc deployTestToken*(
return err( return err(
"Failed to get TestToken contract address from deploy script output: " & $error "Failed to get TestToken contract address from deploy script output: " & $error
) )
debug "Address of the TestToken contract", testTokenAddress info "Address of the TestToken contract", testTokenAddress
let testTokenAddressBytes = hexToByteArray[20](testTokenAddress) let testTokenAddressBytes = hexToByteArray[20](testTokenAddress)
let testTokenAddressAddress = Address(testTokenAddressBytes) let testTokenAddressAddress = Address(testTokenAddressBytes)
@ -334,7 +334,7 @@ proc executeForgeContractDeployScripts*(
return err("Submodule path does not exist: " & submodulePath) return err("Submodule path does not exist: " & submodulePath)
let forgePath = getForgePath() let forgePath = getForgePath()
debug "Forge path", forgePath info "Forge path", forgePath
# Verify forge executable exists # Verify forge executable exists
if not fileExists(forgePath): if not fileExists(forgePath):
@ -363,7 +363,7 @@ proc executeForgeContractDeployScripts*(
if priceCalculatorAddressRes.isErr(): if priceCalculatorAddressRes.isErr():
error "Failed to get LinearPriceCalculator contract address from deploy script output" error "Failed to get LinearPriceCalculator contract address from deploy script output"
let priceCalculatorAddress = priceCalculatorAddressRes.get() let priceCalculatorAddress = priceCalculatorAddressRes.get()
debug "Address of the LinearPriceCalculator contract", priceCalculatorAddress info "Address of the LinearPriceCalculator contract", priceCalculatorAddress
putEnv("PRICE_CALCULATOR_ADDRESS", priceCalculatorAddress) putEnv("PRICE_CALCULATOR_ADDRESS", priceCalculatorAddress)
let forgeCmdWakuRln = let forgeCmdWakuRln =
@ -382,7 +382,7 @@ proc executeForgeContractDeployScripts*(
error "Failed to get WakuRlnV2 contract address from deploy script output" error "Failed to get WakuRlnV2 contract address from deploy script output"
##TODO: raise exception here? ##TODO: raise exception here?
let wakuRlnV2Address = wakuRlnV2AddressRes.get() let wakuRlnV2Address = wakuRlnV2AddressRes.get()
debug "Address of the WakuRlnV2 contract", wakuRlnV2Address info "Address of the WakuRlnV2 contract", wakuRlnV2Address
putEnv("WAKURLNV2_ADDRESS", wakuRlnV2Address) putEnv("WAKURLNV2_ADDRESS", wakuRlnV2Address)
# Deploy Proxy contract # Deploy Proxy contract
@ -490,7 +490,7 @@ proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process =
# See anvil documentation https://book.getfoundry.sh/reference/anvil/ for more details # See anvil documentation https://book.getfoundry.sh/reference/anvil/ for more details
try: try:
let anvilPath = getAnvilPath() let anvilPath = getAnvilPath()
debug "Anvil path", anvilPath info "Anvil path", anvilPath
let runAnvil = startProcess( let runAnvil = startProcess(
anvilPath, anvilPath,
args = [ args = [
@ -518,7 +518,7 @@ proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process =
break break
except Exception, CatchableError: except Exception, CatchableError:
break break
debug "Anvil daemon is running and ready", pid = anvilPID, startLog = anvilStartLog info "Anvil daemon is running and ready", pid = anvilPID, startLog = anvilStartLog
return runAnvil return runAnvil
except: # TODO: Fix "BareExcept" warning except: # TODO: Fix "BareExcept" warning
error "Anvil daemon run failed", err = getCurrentExceptionMsg() error "Anvil daemon run failed", err = getCurrentExceptionMsg()
@ -526,11 +526,11 @@ proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process =
# Stops Anvil daemon # Stops Anvil daemon
proc stopAnvil*(runAnvil: Process) {.used.} = proc stopAnvil*(runAnvil: Process) {.used.} =
if runAnvil.isNil: if runAnvil.isNil:
debug "stopAnvil called with nil Process" info "stopAnvil called with nil Process"
return return
let anvilPID = runAnvil.processID let anvilPID = runAnvil.processID
debug "Stopping Anvil daemon", anvilPID = anvilPID info "Stopping Anvil daemon", anvilPID = anvilPID
try: try:
# Send termination signals # Send termination signals
@ -542,9 +542,9 @@ proc stopAnvil*(runAnvil: Process) {.used.} =
# Close Process object to release resources # Close Process object to release resources
close(runAnvil) close(runAnvil)
debug "Anvil daemon stopped", anvilPID = anvilPID info "Anvil daemon stopped", anvilPID = anvilPID
except Exception as e: except Exception as e:
debug "Error stopping Anvil daemon", anvilPID = anvilPID, error = e.msg info "Error stopping Anvil daemon", anvilPID = anvilPID, error = e.msg
proc setupOnchainGroupManager*( proc setupOnchainGroupManager*(
ethClientUrl: string = EthClient, amountEth: UInt256 = 10.u256 ethClientUrl: string = EthClient, amountEth: UInt256 = 10.u256

View File

@ -274,7 +274,7 @@ suite "Waku v2 Rest API - Relay":
"exception raised when calling register: " & getCurrentExceptionMsg() "exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated = waitFor manager.updateRoots() let rootUpdated = waitFor manager.updateRoots()
debug "Updated root for node", rootUpdated info "Updated root for node", rootUpdated
let proofRes = waitFor manager.fetchMerkleProofElements() let proofRes = waitFor manager.fetchMerkleProofElements()
if proofRes.isErr(): if proofRes.isErr():
@ -525,7 +525,7 @@ suite "Waku v2 Rest API - Relay":
"exception raised when calling register: " & getCurrentExceptionMsg() "exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated = waitFor manager.updateRoots() let rootUpdated = waitFor manager.updateRoots()
debug "Updated root for node", rootUpdated info "Updated root for node", rootUpdated
let proofRes = waitFor manager.fetchMerkleProofElements() let proofRes = waitFor manager.fetchMerkleProofElements()
if proofRes.isErr(): if proofRes.isErr():
@ -597,7 +597,7 @@ suite "Waku v2 Rest API - Relay":
"exception raised when calling register: " & getCurrentExceptionMsg() "exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated = waitFor manager.updateRoots() let rootUpdated = waitFor manager.updateRoots()
debug "Updated root for node", rootUpdated info "Updated root for node", rootUpdated
let proofRes = waitFor manager.fetchMerkleProofElements() let proofRes = waitFor manager.fetchMerkleProofElements()
if proofRes.isErr(): if proofRes.isErr():
@ -659,7 +659,7 @@ suite "Waku v2 Rest API - Relay":
"exception raised when calling register: " & getCurrentExceptionMsg() "exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated = waitFor manager.updateRoots() let rootUpdated = waitFor manager.updateRoots()
debug "Updated root for node", rootUpdated info "Updated root for node", rootUpdated
let proofRes = waitFor manager.fetchMerkleProofElements() let proofRes = waitFor manager.fetchMerkleProofElements()
if proofRes.isErr(): if proofRes.isErr():
@ -734,7 +734,7 @@ suite "Waku v2 Rest API - Relay":
"exception raised when calling register: " & getCurrentExceptionMsg() "exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated = waitFor manager.updateRoots() let rootUpdated = waitFor manager.updateRoots()
debug "Updated root for node", rootUpdated info "Updated root for node", rootUpdated
let proofRes = waitFor manager.fetchMerkleProofElements() let proofRes = waitFor manager.fetchMerkleProofElements()
if proofRes.isErr(): if proofRes.isErr():

View File

@ -45,7 +45,7 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) =
quit(1) quit(1)
let credential = credentialRes.get() let credential = credentialRes.get()
debug "credentials", info "credentials",
idTrapdoor = credential.idTrapdoor.inHex(), idTrapdoor = credential.idTrapdoor.inHex(),
idNullifier = credential.idNullifier.inHex(), idNullifier = credential.idNullifier.inHex(),
idSecretHash = credential.idSecretHash.inHex(), idSecretHash = credential.idSecretHash.inHex(),
@ -90,7 +90,7 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) =
error = getCurrentExceptionMsg() error = getCurrentExceptionMsg()
quit(1) quit(1)
debug "Transaction hash", txHash = groupManager.registrationTxHash.get() info "Transaction hash", txHash = groupManager.registrationTxHash.get()
info "Your membership has been registered on-chain.", info "Your membership has been registered on-chain.",
chainId = $groupManager.chainId, chainId = $groupManager.chainId,

2
vendor/zerokit vendored

@ -1 +1 @@
Subproject commit ba467d370c56b7432522227de22fbd664d44ef3e Subproject commit dc0b31752c91e7b4fefc441cfa6a8210ad7dba7b

View File

@ -280,7 +280,7 @@ proc dbConnQuery*(
query_count.inc(labelValues = [metricLabel]) query_count.inc(labelValues = [metricLabel])
if "insert" notin ($query).toLower(): if "insert" notin ($query).toLower():
debug "dbConnQuery", info "dbConnQuery",
requestId, requestId,
query = $query, query = $query,
args, args,
@ -321,7 +321,7 @@ proc dbConnQueryPrepared*(
query_count.inc(labelValues = [stmtName]) query_count.inc(labelValues = [stmtName])
if "insert" notin stmtName.toLower(): if "insert" notin stmtName.toLower():
debug "dbConnQueryPrepared", info "dbConnQueryPrepared",
requestId, requestId,
stmtName, stmtName,
paramValues, paramValues,

View File

@ -57,9 +57,9 @@ proc close*(pool: PgAsyncPool): Future[Result[void, string]] {.async.} =
# wait for the connections to be released and close them, without # wait for the connections to be released and close them, without
# blocking the async runtime # blocking the async runtime
debug "close PgAsyncPool" info "close PgAsyncPool"
await allFutures(pool.conns.mapIt(it.futBecomeFree)) await allFutures(pool.conns.mapIt(it.futBecomeFree))
debug "closing all connection PgAsyncPool" info "closing all connection PgAsyncPool"
for i in 0 ..< pool.conns.len: for i in 0 ..< pool.conns.len:
if pool.conns[i].isPgDbConnOpen(): if pool.conns[i].isPgDbConnOpen():
@ -128,7 +128,7 @@ proc pgQuery*(
defer: defer:
let queryDuration = getNowInNanosecondTime() - queryStartTime let queryDuration = getNowInNanosecondTime() - queryStartTime
if queryDuration > SlowQueryThreshold.nanos: if queryDuration > SlowQueryThreshold.nanos:
debug "pgQuery slow query", info "pgQuery slow query",
query_duration_secs = (queryDuration / 1_000_000_000), query, requestId query_duration_secs = (queryDuration / 1_000_000_000), query, requestId
(await dbConnWrapper.dbConnQuery(sql(query), args, rowCallback, requestId)).isOkOr: (await dbConnWrapper.dbConnQuery(sql(query), args, rowCallback, requestId)).isOkOr:
@ -162,7 +162,7 @@ proc runStmt*(
defer: defer:
let queryDuration = getNowInNanosecondTime() - queryStartTime let queryDuration = getNowInNanosecondTime() - queryStartTime
if queryDuration > SlowQueryThreshold.nanos: if queryDuration > SlowQueryThreshold.nanos:
debug "runStmt slow query", info "runStmt slow query",
query_duration = queryDuration / 1_000_000_000, query_duration = queryDuration / 1_000_000_000,
query = stmtDefinition, query = stmtDefinition,
requestId requestId

View File

@ -383,7 +383,7 @@ proc listSqlScripts(path: string): DatabaseResult[seq[string]] =
if isSqlScript(scriptPath): if isSqlScript(scriptPath):
scripts.add(scriptPath) scripts.add(scriptPath)
else: else:
debug "invalid migration script", file = scriptPath info "invalid migration script", file = scriptPath
except OSError: except OSError:
return err("failed to list migration scripts: " & getCurrentExceptionMsg()) return err("failed to list migration scripts: " & getCurrentExceptionMsg())
@ -448,7 +448,7 @@ proc migrate*(
let userVersion = ?db.getUserVersion() let userVersion = ?db.getUserVersion()
if userVersion == targetVersion: if userVersion == targetVersion:
debug "database schema is up to date", info "database schema is up to date",
userVersion = userVersion, targetVersion = targetVersion userVersion = userVersion, targetVersion = targetVersion
return ok() return ok()
@ -466,7 +466,7 @@ proc migrate*(
migrationScriptsPaths = sortMigrationScripts(migrationScriptsPaths) migrationScriptsPaths = sortMigrationScripts(migrationScriptsPaths)
if migrationScriptsPaths.len <= 0: if migrationScriptsPaths.len <= 0:
debug "no scripts to be run" info "no scripts to be run"
return ok() return ok()
let scripts = ?loadMigrationScripts(migrationScriptsPaths) let scripts = ?loadMigrationScripts(migrationScriptsPaths)
@ -474,7 +474,7 @@ proc migrate*(
# Run the migration scripts # Run the migration scripts
for script in scripts: for script in scripts:
for statement in script.breakIntoStatements(): for statement in script.breakIntoStatements():
debug "executing migration statement", statement = statement info "executing migration statement", statement = statement
let execRes = db.query(statement, NoopRowHandler) let execRes = db.query(statement, NoopRowHandler)
if execRes.isErr(): if execRes.isErr():
@ -482,12 +482,12 @@ proc migrate*(
statement = statement, error = execRes.error statement = statement, error = execRes.error
return err("failed to execute migration statement") return err("failed to execute migration statement")
debug "migration statement executed succesfully", statement = statement info "migration statement executed succesfully", statement = statement
# Update user_version # Update user_version
?db.setUserVersion(targetVersion) ?db.setUserVersion(targetVersion)
debug "database user_version updated", userVersion = targetVersion info "database user_version updated", userVersion = targetVersion
ok() ok()
proc performSqliteVacuum*(db: SqliteDatabase): DatabaseResult[void] = proc performSqliteVacuum*(db: SqliteDatabase): DatabaseResult[void] =
@ -495,11 +495,11 @@ proc performSqliteVacuum*(db: SqliteDatabase): DatabaseResult[void] =
# TODO: Run vacuuming conditionally based on database page stats # TODO: Run vacuuming conditionally based on database page stats
# if (pageCount > 0 and freelistCount > 0): # if (pageCount > 0 and freelistCount > 0):
debug "starting sqlite database vacuuming" info "starting sqlite database vacuuming"
let resVacuum = db.vacuum() let resVacuum = db.vacuum()
if resVacuum.isErr(): if resVacuum.isErr():
return err("failed to execute vacuum: " & resVacuum.error) return err("failed to execute vacuum: " & resVacuum.error)
debug "finished sqlite database vacuuming" info "finished sqlite database vacuuming"
ok() ok()

View File

@ -24,7 +24,7 @@ template heartbeat*(name: string, interval: Duration, body: untyped): untyped =
info "Missed multiple heartbeats", info "Missed multiple heartbeats",
heartbeat = name, delay = delay, hinterval = itv heartbeat = name, delay = delay, hinterval = itv
else: else:
debug "Missed heartbeat", heartbeat = name, delay = delay, hinterval = itv info "Missed heartbeat", heartbeat = name, delay = delay, hinterval = itv
nextHeartbeat = now + itv nextHeartbeat = now + itv

View File

@ -60,14 +60,14 @@ proc shardingPredicate*(
): Option[WakuDiscv5Predicate] = ): Option[WakuDiscv5Predicate] =
## Filter peers based on relay sharding information ## Filter peers based on relay sharding information
let typedRecord = record.toTyped().valueOr: let typedRecord = record.toTyped().valueOr:
debug "peer filtering failed", reason = error info "peer filtering failed", reason = error
return none(WakuDiscv5Predicate) return none(WakuDiscv5Predicate)
let nodeShard = typedRecord.relaySharding().valueOr: let nodeShard = typedRecord.relaySharding().valueOr:
debug "no relay sharding information, peer filtering disabled" info "no relay sharding information, peer filtering disabled"
return none(WakuDiscv5Predicate) return none(WakuDiscv5Predicate)
debug "peer filtering updated" info "peer filtering updated"
let predicate = proc(record: waku_enr.Record): bool = let predicate = proc(record: waku_enr.Record): bool =
bootnodes.contains(record) or # Temp. Bootnode exception bootnodes.contains(record) or # Temp. Bootnode exception
@ -124,7 +124,7 @@ proc updateAnnouncedMultiAddress*(
wd.protocol.updateRecord([(MultiaddrEnrField, encodedAddrs)]).isOkOr: wd.protocol.updateRecord([(MultiaddrEnrField, encodedAddrs)]).isOkOr:
return err("failed to update multiaddress in ENR: " & $error) return err("failed to update multiaddress in ENR: " & $error)
debug "ENR updated successfully with new multiaddress", info "ENR updated successfully with new multiaddress",
enrUri = wd.protocol.localNode.record.toUri(), enr = $(wd.protocol.localNode.record) enrUri = wd.protocol.localNode.record.toUri(), enr = $(wd.protocol.localNode.record)
return ok() return ok()
@ -312,15 +312,15 @@ proc subscriptionsListener(wd: WakuDiscoveryV5) {.async.} =
let subRes = wd.updateENRShards(subs, true) let subRes = wd.updateENRShards(subs, true)
if subRes.isErr(): if subRes.isErr():
debug "ENR shard addition failed", reason = $subRes.error info "ENR shard addition failed", reason = $subRes.error
if unsubRes.isErr(): if unsubRes.isErr():
debug "ENR shard removal failed", reason = $unsubRes.error info "ENR shard removal failed", reason = $unsubRes.error
if subRes.isErr() and unsubRes.isErr(): if subRes.isErr() and unsubRes.isErr():
continue continue
debug "ENR updated successfully", info "ENR updated successfully",
enrUri = wd.protocol.localNode.record.toUri(), enrUri = wd.protocol.localNode.record.toUri(),
enr = $(wd.protocol.localNode.record) enr = $(wd.protocol.localNode.record)
@ -335,7 +335,7 @@ proc start*(wd: WakuDiscoveryV5): Future[Result[void, string]] {.async: (raises:
info "Starting discovery v5 service" info "Starting discovery v5 service"
debug "start listening on udp port", address = $wd.conf.address, port = $wd.conf.port info "start listening on udp port", address = $wd.conf.address, port = $wd.conf.port
try: try:
wd.protocol.open() wd.protocol.open()
except CatchableError: except CatchableError:
@ -349,7 +349,7 @@ proc start*(wd: WakuDiscoveryV5): Future[Result[void, string]] {.async: (raises:
asyncSpawn wd.searchLoop() asyncSpawn wd.searchLoop()
asyncSpawn wd.subscriptionsListener() asyncSpawn wd.subscriptionsListener()
debug "Successfully started discovery v5 service" info "Successfully started discovery v5 service"
info "Discv5: discoverable ENR ", info "Discv5: discoverable ENR ",
enrUri = wd.protocol.localNode.record.toUri(), enr = $(wd.protocol.localNode.record) enrUri = wd.protocol.localNode.record.toUri(), enr = $(wd.protocol.localNode.record)
@ -365,7 +365,7 @@ proc stop*(wd: WakuDiscoveryV5): Future[void] {.async.} =
trace "Stop listening on discv5 port" trace "Stop listening on discv5 port"
await wd.protocol.closeWait() await wd.protocol.closeWait()
debug "Successfully stopped discovery v5 service" info "Successfully stopped discovery v5 service"
## Helper functions ## Helper functions
@ -395,7 +395,7 @@ proc addBootstrapNode*(bootstrapAddr: string, bootstrapEnrs: var seq[enr.Record]
let enrRes = parseBootstrapAddress(bootstrapAddr) let enrRes = parseBootstrapAddress(bootstrapAddr)
if enrRes.isErr(): if enrRes.isErr():
debug "ignoring invalid bootstrap address", reason = enrRes.error info "ignoring invalid bootstrap address", reason = enrRes.error
return return
bootstrapEnrs.add(enrRes.value) bootstrapEnrs.add(enrRes.value)
@ -422,7 +422,7 @@ proc setupDiscoveryV5*(
for enr in discv5BootstrapEnrs: for enr in discv5BootstrapEnrs:
let peerInfo = enr.toRemotePeerInfo().valueOr: let peerInfo = enr.toRemotePeerInfo().valueOr:
debug "could not convert discv5 bootstrap node to peerInfo, not adding peer to Peer Store", info "could not convert discv5 bootstrap node to peerInfo, not adding peer to Peer Store",
enr = enr.toUri(), error = error enr = enr.toUri(), error = error
continue continue
nodePeerManager.addPeer(peerInfo, PeerOrigin.Discv5) nodePeerManager.addPeer(peerInfo, PeerOrigin.Discv5)

View File

@ -37,7 +37,7 @@ type WakuDnsDiscovery* = object
##################### #####################
proc emptyResolver*(domain: string): Future[string] {.async, gcsafe.} = proc emptyResolver*(domain: string): Future[string] {.async, gcsafe.} =
debug "Empty resolver called", domain = domain info "Empty resolver called", domain = domain
return "" return ""
proc findPeers*( proc findPeers*(
@ -83,13 +83,13 @@ proc init*(
): Result[T, cstring] = ): Result[T, cstring] =
## Initialise Waku peer discovery via DNS ## Initialise Waku peer discovery via DNS
debug "init WakuDnsDiscovery", locationUrl = locationUrl info "init WakuDnsDiscovery", locationUrl = locationUrl
let let
client = ?Client.init(locationUrl) client = ?Client.init(locationUrl)
wakuDnsDisc = WakuDnsDiscovery(client: client, resolver: resolver) wakuDnsDisc = WakuDnsDiscovery(client: client, resolver: resolver)
debug "init success" info "init success"
return ok(wakuDnsDisc) return ok(wakuDnsDisc)
@ -100,7 +100,7 @@ proc retrieveDynamicBootstrapNodes*(
if dnsDiscoveryUrl != "": if dnsDiscoveryUrl != "":
# DNS discovery # DNS discovery
debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl info "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl
var nameServers: seq[TransportAddress] var nameServers: seq[TransportAddress]
for ip in dnsAddrsNameServers: for ip in dnsAddrsNameServers:
@ -118,5 +118,5 @@ proc retrieveDynamicBootstrapNodes*(
return (await value.findPeers()).mapErr(e => $e) return (await value.findPeers()).mapErr(e => $e)
warn "Failed to init Waku DNS discovery" warn "Failed to init Waku DNS discovery"
debug "No method for retrieving dynamic bootstrap nodes specified." info "No method for retrieving dynamic bootstrap nodes specified."
ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default

View File

@ -282,7 +282,7 @@ proc setupProtocols(
# only peers with populated records # only peers with populated records
.mapIt(toRemotePeerInfo(it.record.get())) .mapIt(toRemotePeerInfo(it.record.get()))
debug "adding exchanged peers", info "adding exchanged peers",
src = peer, topic = topic, numPeers = exchangedPeers.len src = peer, topic = topic, numPeers = exchangedPeers.len
for peer in exchangedPeers: for peer in exchangedPeers:
@ -301,7 +301,7 @@ proc setupProtocols(
else: else:
@[] @[]
debug "Shards created from content topics", info "Shards created from content topics",
contentTopics = conf.contentTopics, shards = autoShards contentTopics = conf.contentTopics, shards = autoShards
let confShards = conf.subscribeShards.mapIt( let confShards = conf.subscribeShards.mapIt(
@ -310,7 +310,7 @@ proc setupProtocols(
let shards = confShards & autoShards let shards = confShards & autoShards
if conf.relay: if conf.relay:
debug "Setting max message size", num_bytes = conf.maxMessageSizeBytes info "Setting max message size", num_bytes = conf.maxMessageSizeBytes
( (
await mountRelay( await mountRelay(
@ -500,7 +500,7 @@ proc setupNode*(
error "cluster id mismatch configured shards" error "cluster id mismatch configured shards"
return err("cluster id mismatch configured shards") return err("cluster id mismatch configured shards")
debug "Setting up storage" info "Setting up storage"
## Peer persistence ## Peer persistence
var peerStore: Option[WakuPeerStorage] var peerStore: Option[WakuPeerStorage]
@ -509,13 +509,13 @@ proc setupNode*(
error "Setting up storage failed", error = "failed to setup peer store " & error error "Setting up storage failed", error = "failed to setup peer store " & error
return err("Setting up storage failed: " & error) return err("Setting up storage failed: " & error)
debug "Initializing node" info "Initializing node"
let node = initNode(wakuConf, netConfig, rng, record, peerStore, relay).valueOr: let node = initNode(wakuConf, netConfig, rng, record, peerStore, relay).valueOr:
error "Initializing node failed", error = error error "Initializing node failed", error = error
return err("Initializing node failed: " & error) return err("Initializing node failed: " & error)
debug "Mounting protocols" info "Mounting protocols"
try: try:
(await node.setupProtocols(wakuConf)).isOkOr: (await node.setupProtocols(wakuConf)).isOkOr:

View File

@ -53,7 +53,7 @@ proc withinTimeWindow*(msg: WakuMessage): bool =
proc addSignedShardsValidator*( proc addSignedShardsValidator*(
w: WakuRelay, protectedShards: seq[ProtectedShard], clusterId: uint16 w: WakuRelay, protectedShards: seq[ProtectedShard], clusterId: uint16
) = ) =
debug "adding validator to signed shards", protectedShards, clusterId info "adding validator to signed shards", protectedShards, clusterId
proc validator( proc validator(
topic: string, msg: WakuMessage topic: string, msg: WakuMessage
@ -72,7 +72,7 @@ proc addSignedShardsValidator*(
outcome = errors.ValidationResult.Accept outcome = errors.ValidationResult.Accept
if outcome != errors.ValidationResult.Accept: if outcome != errors.ValidationResult.Accept:
debug "signed topic validation failed", info "signed topic validation failed",
topic = topic, publicShardKey = protectedShard.key topic = topic, publicShardKey = protectedShard.key
waku_msg_validator_signed_outcome.inc(labelValues = [$outcome]) waku_msg_validator_signed_outcome.inc(labelValues = [$outcome])
return outcome return outcome

View File

@ -82,12 +82,12 @@ proc setupSwitchServices(
waku: Waku, conf: WakuConf, circuitRelay: Relay, rng: ref HmacDrbgContext waku: Waku, conf: WakuConf, circuitRelay: Relay, rng: ref HmacDrbgContext
) = ) =
proc onReservation(addresses: seq[MultiAddress]) {.gcsafe, raises: [].} = proc onReservation(addresses: seq[MultiAddress]) {.gcsafe, raises: [].} =
debug "circuit relay handler new reserve event", info "circuit relay handler new reserve event",
addrs_before = $(waku.node.announcedAddresses), addrs = $addresses addrs_before = $(waku.node.announcedAddresses), addrs = $addresses
waku.node.announcedAddresses.setLen(0) ## remove previous addresses waku.node.announcedAddresses.setLen(0) ## remove previous addresses
waku.node.announcedAddresses.add(addresses) waku.node.announcedAddresses.add(addresses)
debug "waku node announced addresses updated", info "waku node announced addresses updated",
announcedAddresses = waku.node.announcedAddresses announcedAddresses = waku.node.announcedAddresses
if not isNil(waku.wakuDiscv5): if not isNil(waku.wakuDiscv5):
@ -297,7 +297,7 @@ proc updateAddressInENR(waku: ptr Waku): Result[void, string] =
waku[].node.enr.update(parsedPk, extraFields = enrFields).isOkOr: waku[].node.enr.update(parsedPk, extraFields = enrFields).isOkOr:
return err("failed to update multiaddress in ENR updateAddressInENR: " & $error) return err("failed to update multiaddress in ENR updateAddressInENR: " & $error)
debug "Waku node ENR updated successfully with new multiaddress", info "Waku node ENR updated successfully with new multiaddress",
enr = waku[].node.enr.toUri(), record = $(waku[].node.enr) enr = waku[].node.enr.toUri(), record = $(waku[].node.enr)
## Now update the ENR infor in discv5 ## Now update the ENR infor in discv5
@ -305,7 +305,7 @@ proc updateAddressInENR(waku: ptr Waku): Result[void, string] =
waku[].wakuDiscv5.protocol.localNode.record = waku[].node.enr waku[].wakuDiscv5.protocol.localNode.record = waku[].node.enr
let enr = waku[].wakuDiscv5.protocol.localNode.record let enr = waku[].wakuDiscv5.protocol.localNode.record
debug "Waku discv5 ENR updated successfully with new multiaddress", info "Waku discv5 ENR updated successfully with new multiaddress",
enr = enr.toUri(), record = $(enr) enr = enr.toUri(), record = $(enr)
return ok() return ok()
@ -365,7 +365,7 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
warn "startWaku: waku node already started" warn "startWaku: waku node already started"
return ok() return ok()
debug "Retrieve dynamic bootstrap nodes" info "Retrieve dynamic bootstrap nodes"
let conf = waku[].conf let conf = waku[].conf
if conf.dnsDiscoveryConf.isSome(): if conf.dnsDiscoveryConf.isSome():

View File

@ -42,16 +42,16 @@ proc mountLegacyLightPush*(
let pushHandler = let pushHandler =
if node.wakuRelay.isNil: if node.wakuRelay.isNil:
debug "mounting legacy lightpush without relay (nil)" info "mounting legacy lightpush without relay (nil)"
legacy_lightpush_protocol.getNilPushHandler() legacy_lightpush_protocol.getNilPushHandler()
else: else:
debug "mounting legacy lightpush with relay" info "mounting legacy lightpush with relay"
let rlnPeer = let rlnPeer =
if isNil(node.wakuRlnRelay): if isNil(node.wakuRlnRelay):
debug "mounting legacy lightpush without rln-relay" info "mounting legacy lightpush without rln-relay"
none(WakuRLNRelay) none(WakuRLNRelay)
else: else:
debug "mounting legacy lightpush with rln-relay" info "mounting legacy lightpush with rln-relay"
some(node.wakuRlnRelay) some(node.wakuRlnRelay)
legacy_lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer) legacy_lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)
@ -157,16 +157,16 @@ proc mountLightPush*(
let pushHandler = let pushHandler =
if node.wakuRelay.isNil(): if node.wakuRelay.isNil():
debug "mounting lightpush v2 without relay (nil)" info "mounting lightpush v2 without relay (nil)"
lightpush_protocol.getNilPushHandler() lightpush_protocol.getNilPushHandler()
else: else:
debug "mounting lightpush with relay" info "mounting lightpush with relay"
let rlnPeer = let rlnPeer =
if isNil(node.wakuRlnRelay): if isNil(node.wakuRlnRelay):
debug "mounting lightpush without rln-relay" info "mounting lightpush without rln-relay"
none(WakuRLNRelay) none(WakuRLNRelay)
else: else:
debug "mounting lightpush with rln-relay" info "mounting lightpush with rln-relay"
some(node.wakuRlnRelay) some(node.wakuRlnRelay)
lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer) lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)

View File

@ -49,7 +49,7 @@ proc pingPeer(node: WakuNode, peerId: PeerId): Future[Result[void, string]] {.as
try: try:
await stream.close() await stream.close()
except CatchableError as e: except CatchableError as e:
debug "Error closing ping connection", peerId = peerId, error = e.msg info "Error closing ping connection", peerId = peerId, error = e.msg
# Perform ping # Perform ping
let pingDuration = await node.libp2pPing.ping(stream) let pingDuration = await node.libp2pPing.ping(stream)

View File

@ -157,7 +157,7 @@ proc unsubscribe*(
warn "No-effect API call to `unsubscribe`. Was not subscribed", pubsubTopic warn "No-effect API call to `unsubscribe`. Was not subscribed", pubsubTopic
return ok() return ok()
debug "unsubscribe", pubsubTopic, contentTopicOp info "unsubscribe", pubsubTopic, contentTopicOp
node.wakuRelay.unsubscribe(pubsubTopic) node.wakuRelay.unsubscribe(pubsubTopic)
node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic)) node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic))
@ -250,7 +250,7 @@ proc mountRlnRelay*(
let validator = generateRlnValidator(rlnRelay, spamHandler) let validator = generateRlnValidator(rlnRelay, spamHandler)
# register rln validator as default validator # register rln validator as default validator
debug "Registering RLN validator" info "Registering RLN validator"
node.wakuRelay.addValidator(validator, "RLN validation failed") node.wakuRelay.addValidator(validator, "RLN validation failed")
node.wakuRlnRelay = rlnRelay node.wakuRlnRelay = rlnRelay

View File

@ -15,12 +15,12 @@ template projectRoot(): string =
const PeerStoreMigrationPath: string = projectRoot / "migrations" / "sent_msgs" const PeerStoreMigrationPath: string = projectRoot / "migrations" / "sent_msgs"
proc migrate*(db: SqliteDatabase): DatabaseResult[void] = proc migrate*(db: SqliteDatabase): DatabaseResult[void] =
debug "starting peer store's sqlite database migration for sent messages" info "starting peer store's sqlite database migration for sent messages"
let migrationRes = let migrationRes =
migrate(db, TargetSchemaVersion, migrationsScriptsDir = PeerStoreMigrationPath) migrate(db, TargetSchemaVersion, migrationsScriptsDir = PeerStoreMigrationPath)
if migrationRes.isErr(): if migrationRes.isErr():
return err("failed to execute migration scripts: " & migrationRes.error) return err("failed to execute migration scripts: " & migrationRes.error)
debug "finished peer store's sqlite database migration for sent messages" info "finished peer store's sqlite database migration for sent messages"
ok() ok()

View File

@ -76,7 +76,7 @@ proc performDeliveryFeedback(
success, dir, comment, msg_hash success, dir, comment, msg_hash
return return
debug "recv monitor performDeliveryFeedback", info "recv monitor performDeliveryFeedback",
success, dir, comment, msg_hash = shortLog(msgHash) success, dir, comment, msg_hash = shortLog(msgHash)
self.deliveryCb(success, dir, comment, msgHash, msg) self.deliveryCb(success, dir, comment, msgHash, msg)
@ -129,7 +129,7 @@ proc msgChecker(self: RecvMonitor) {.async.} =
method onSubscribe( method onSubscribe(
self: RecvMonitor, pubsubTopic: string, contentTopics: seq[string] self: RecvMonitor, pubsubTopic: string, contentTopics: seq[string]
) {.gcsafe, raises: [].} = ) {.gcsafe, raises: [].} =
debug "onSubscribe", pubsubTopic, contentTopics info "onSubscribe", pubsubTopic, contentTopics
self.topicsInterest.withValue(pubsubTopic, contentTopicsOfInterest): self.topicsInterest.withValue(pubsubTopic, contentTopicsOfInterest):
contentTopicsOfInterest[].add(contentTopics) contentTopicsOfInterest[].add(contentTopics)
do: do:
@ -138,7 +138,7 @@ method onSubscribe(
method onUnsubscribe( method onUnsubscribe(
self: RecvMonitor, pubsubTopic: string, contentTopics: seq[string] self: RecvMonitor, pubsubTopic: string, contentTopics: seq[string]
) {.gcsafe, raises: [].} = ) {.gcsafe, raises: [].} =
debug "onUnsubscribe", pubsubTopic, contentTopics info "onUnsubscribe", pubsubTopic, contentTopics
self.topicsInterest.withValue(pubsubTopic, contentTopicsOfInterest): self.topicsInterest.withValue(pubsubTopic, contentTopicsOfInterest):
let remainingCTopics = let remainingCTopics =

View File

@ -90,7 +90,7 @@ proc performFeedbackAndCleanup(
return return
for hash, deliveryInfo in msgsToDiscard: for hash, deliveryInfo in msgsToDiscard:
debug "send monitor performFeedbackAndCleanup", info "send monitor performFeedbackAndCleanup",
success, dir, comment, msg_hash = shortLog(hash) success, dir, comment, msg_hash = shortLog(hash)
self.deliveryCb(success, dir, comment, hash, deliveryInfo.msg) self.deliveryCb(success, dir, comment, hash, deliveryInfo.msg)
@ -170,14 +170,14 @@ proc processMessages(self: SendMonitor) {.async.} =
let pubsubTopic = deliveryInfo.pubsubTopic let pubsubTopic = deliveryInfo.pubsubTopic
let msg = deliveryInfo.msg let msg = deliveryInfo.msg
if not self.wakuRelay.isNil(): if not self.wakuRelay.isNil():
debug "trying to publish again with wakuRelay", msgHash, pubsubTopic info "trying to publish again with wakuRelay", msgHash, pubsubTopic
(await self.wakuRelay.publish(pubsubTopic, msg)).isOkOr: (await self.wakuRelay.publish(pubsubTopic, msg)).isOkOr:
error "could not publish with wakuRelay.publish", error "could not publish with wakuRelay.publish",
msgHash, pubsubTopic, error = $error msgHash, pubsubTopic, error = $error
continue continue
if not self.wakuLightpushClient.isNil(): if not self.wakuLightpushClient.isNil():
debug "trying to publish again with wakuLightpushClient", msgHash, pubsubTopic info "trying to publish again with wakuLightpushClient", msgHash, pubsubTopic
(await self.wakuLightpushClient.publishToAny(pubsubTopic, msg)).isOkOr: (await self.wakuLightpushClient.publishToAny(pubsubTopic, msg)).isOkOr:
error "could not publish with publishToAny", error = $error error "could not publish with publishToAny", error = $error
continue continue
@ -196,7 +196,7 @@ method onMessagePublished(
## When publishing a message either through relay or lightpush, we want to add some extra effort ## When publishing a message either through relay or lightpush, we want to add some extra effort
## to make sure it is received to one store node. Hence, keep track of those published messages. ## to make sure it is received to one store node. Hence, keep track of those published messages.
debug "onMessagePublished" info "onMessagePublished"
let msgHash = computeMessageHash(pubSubTopic, msg) let msgHash = computeMessageHash(pubSubTopic, msg)
if not self.publishedMessages.hasKey(msgHash): if not self.publishedMessages.hasKey(msgHash):

View File

@ -583,7 +583,7 @@ proc reconnectPeers*(
## Reconnect to peers registered for this protocol. This will update connectedness. ## Reconnect to peers registered for this protocol. This will update connectedness.
## Especially useful to resume connections from persistent storage after a restart. ## Especially useful to resume connections from persistent storage after a restart.
debug "Reconnecting peers", proto = proto info "Reconnecting peers", proto = proto
# Proto is not persisted, we need to iterate over all peers. # Proto is not persisted, we need to iterate over all peers.
for peerInfo in pm.switch.peerStore.peers(protocolMatcher(proto)): for peerInfo in pm.switch.peerStore.peers(protocolMatcher(proto)):
@ -594,7 +594,7 @@ proc reconnectPeers*(
continue continue
if backoffTime > ZeroDuration: if backoffTime > ZeroDuration:
debug "Backing off before reconnect", info "Backing off before reconnect",
peerId = peerInfo.peerId, backoffTime = backoffTime peerId = peerInfo.peerId, backoffTime = backoffTime
# We disconnected recently and still need to wait for a backoff period before connecting # We disconnected recently and still need to wait for a backoff period before connecting
await sleepAsync(backoffTime) await sleepAsync(backoffTime)
@ -682,7 +682,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
let inRelayPeers = pm.connectedPeers(WakuRelayCodec)[0] let inRelayPeers = pm.connectedPeers(WakuRelayCodec)[0]
if inRelayPeers.len > pm.inRelayPeersTarget and if inRelayPeers.len > pm.inRelayPeersTarget and
peerStore.hasPeer(peerId, WakuRelayCodec): peerStore.hasPeer(peerId, WakuRelayCodec):
debug "disconnecting relay peer because reached max num in-relay peers", info "disconnecting relay peer because reached max num in-relay peers",
peerId = peerId, peerId = peerId,
inRelayPeers = inRelayPeers.len, inRelayPeers = inRelayPeers.len,
inRelayPeersTarget = pm.inRelayPeersTarget inRelayPeersTarget = pm.inRelayPeersTarget
@ -698,7 +698,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
# pm.colocationLimit == 0 disables the ip colocation limit # pm.colocationLimit == 0 disables the ip colocation limit
if pm.colocationLimit != 0 and peersBehindIp.len > pm.colocationLimit: if pm.colocationLimit != 0 and peersBehindIp.len > pm.colocationLimit:
for peerId in peersBehindIp[0 ..< (peersBehindIp.len - pm.colocationLimit)]: for peerId in peersBehindIp[0 ..< (peersBehindIp.len - pm.colocationLimit)]:
debug "Pruning connection due to ip colocation", peerId = peerId, ip = ip info "Pruning connection due to ip colocation", peerId = peerId, ip = ip
asyncSpawn(pm.switch.disconnect(peerId)) asyncSpawn(pm.switch.disconnect(peerId))
peerStore.delete(peerId) peerStore.delete(peerId)
@ -721,7 +721,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
# we don't want to await for the callback to finish # we don't want to await for the callback to finish
asyncSpawn pm.onConnectionChange(peerId, Left) asyncSpawn pm.onConnectionChange(peerId, Left)
of Identified: of Identified:
debug "event identified", peerId = peerId info "event identified", peerId = peerId
peerStore[ConnectionBook][peerId] = connectedness peerStore[ConnectionBook][peerId] = connectedness
peerStore[DirectionBook][peerId] = direction peerStore[DirectionBook][peerId] = direction
@ -861,7 +861,7 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} =
let relayCount = connectablePeers.len let relayCount = connectablePeers.len
debug "Sharded Peer Management", info "Sharded Peer Management",
shard = shard, shard = shard,
connectable = $connectableCount & "/" & $shardCount, connectable = $connectableCount & "/" & $shardCount,
relayConnectable = $relayCount & "/" & $shardCount, relayConnectable = $relayCount & "/" & $shardCount,

View File

@ -22,12 +22,12 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
## If not `targetVersion` is provided, it defaults to `SchemaVersion`. ## If not `targetVersion` is provided, it defaults to `SchemaVersion`.
## ##
## NOTE: Down migration it is not currently supported ## NOTE: Down migration it is not currently supported
debug "starting peer store's sqlite database migration" info "starting peer store's sqlite database migration"
let migrationRes = let migrationRes =
migrate(db, targetVersion, migrationsScriptsDir = PeerStoreMigrationPath) migrate(db, targetVersion, migrationsScriptsDir = PeerStoreMigrationPath)
if migrationRes.isErr(): if migrationRes.isErr():
return err("failed to execute migration scripts: " & migrationRes.error) return err("failed to execute migration scripts: " & migrationRes.error)
debug "finished peer store's sqlite database migration" info "finished peer store's sqlite database migration"
ok() ok()

View File

@ -31,7 +31,7 @@ proc getSecureKey(path: string): TLSPrivateKey {.raises: [Defect, IOError].} =
let key = TLSPrivateKey.init(stringkey) let key = TLSPrivateKey.init(stringkey)
return key return key
except TLSStreamProtocolError as exc: except TLSStreamProtocolError as exc:
debug "exception raised from getSecureKey", err = exc.msg info "exception raised from getSecureKey", err = exc.msg
proc getSecureCert(path: string): TLSCertificate {.raises: [Defect, IOError].} = proc getSecureCert(path: string): TLSCertificate {.raises: [Defect, IOError].} =
trace "Certificate path is.", path = path trace "Certificate path is.", path = path
@ -40,7 +40,7 @@ proc getSecureCert(path: string): TLSCertificate {.raises: [Defect, IOError].} =
let cert = TLSCertificate.init(stringCert) let cert = TLSCertificate.init(stringCert)
return cert return cert
except TLSStreamProtocolError as exc: except TLSStreamProtocolError as exc:
debug "exception raised from getSecureCert", err = exc.msg info "exception raised from getSecureCert", err = exc.msg
proc withWssTransport*( proc withWssTransport*(
b: SwitchBuilder, secureKeyPath: string, secureCertPath: string b: SwitchBuilder, secureKeyPath: string, secureCertPath: string

View File

@ -17,7 +17,7 @@ proc installDebugInfoV1Handler(router: var RestRouter, node: WakuNode) =
let info = node.info().toDebugWakuInfo() let info = node.info().toDebugWakuInfo()
let resp = RestApiResponse.jsonResponse(info, status = Http200) let resp = RestApiResponse.jsonResponse(info, status = Http200)
if resp.isErr(): if resp.isErr():
debug "An error occurred while building the json respose", error = resp.error info "An error occurred while building the json respose", error = resp.error
return RestApiResponse.internalServerError() return RestApiResponse.internalServerError()
return resp.get() return resp.get()

View File

@ -222,7 +222,7 @@ proc installFilterPostSubscriptionsHandler(
contentBody: Option[ContentBody] contentBody: Option[ContentBody]
) -> RestApiResponse: ) -> RestApiResponse:
## Subscribes a node to a list of contentTopics of a pubsubTopic ## Subscribes a node to a list of contentTopics of a pubsubTopic
debug "post", ROUTE_FILTER_SUBSCRIPTIONS, contentBody info "post", ROUTE_FILTER_SUBSCRIPTIONS, contentBody
return await filterPostPutSubscriptionRequestHandler( return await filterPostPutSubscriptionRequestHandler(
node, contentBody, cache, discHandler node, contentBody, cache, discHandler
@ -238,7 +238,7 @@ proc installFilterPutSubscriptionsHandler(
contentBody: Option[ContentBody] contentBody: Option[ContentBody]
) -> RestApiResponse: ) -> RestApiResponse:
## Modifies a subscribtion of a node to a list of contentTopics of a pubsubTopic ## Modifies a subscribtion of a node to a list of contentTopics of a pubsubTopic
debug "put", ROUTE_FILTER_SUBSCRIPTIONS, contentBody info "put", ROUTE_FILTER_SUBSCRIPTIONS, contentBody
return await filterPostPutSubscriptionRequestHandler( return await filterPostPutSubscriptionRequestHandler(
node, contentBody, cache, discHandler node, contentBody, cache, discHandler
@ -254,7 +254,7 @@ proc installFilterDeleteSubscriptionsHandler(
contentBody: Option[ContentBody] contentBody: Option[ContentBody]
) -> RestApiResponse: ) -> RestApiResponse:
## Subscribes a node to a list of contentTopics of a PubSub topic ## Subscribes a node to a list of contentTopics of a PubSub topic
debug "delete", ROUTE_FILTER_SUBSCRIPTIONS, contentBody info "delete", ROUTE_FILTER_SUBSCRIPTIONS, contentBody
let decodedBody = decodeRequestBody[FilterUnsubscribeRequest](contentBody) let decodedBody = decodeRequestBody[FilterUnsubscribeRequest](contentBody)
@ -306,7 +306,7 @@ proc installFilterDeleteAllSubscriptionsHandler(
contentBody: Option[ContentBody] contentBody: Option[ContentBody]
) -> RestApiResponse: ) -> RestApiResponse:
## Subscribes a node to a list of contentTopics of a PubSub topic ## Subscribes a node to a list of contentTopics of a PubSub topic
debug "delete", ROUTE_FILTER_ALL_SUBSCRIPTIONS, contentBody info "delete", ROUTE_FILTER_ALL_SUBSCRIPTIONS, contentBody
let decodedBody = decodeRequestBody[FilterUnsubscribeAllRequest](contentBody) let decodedBody = decodeRequestBody[FilterUnsubscribeAllRequest](contentBody)
@ -357,7 +357,7 @@ proc installFilterPingSubscriberHandler(
requestId: string requestId: string
) -> RestApiResponse: ) -> RestApiResponse:
## Checks if a node has valid subscription or not. ## Checks if a node has valid subscription or not.
debug "get", ROUTE_FILTER_SUBSCRIBER_PING, requestId info "get", ROUTE_FILTER_SUBSCRIBER_PING, requestId
let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
let handler = discHandler.valueOr: let handler = discHandler.valueOr:
@ -397,7 +397,7 @@ proc installFilterGetMessagesHandler(
## Returns all WakuMessages received on a specified content topic since the ## Returns all WakuMessages received on a specified content topic since the
## last time this method was called ## last time this method was called
## TODO: ability to specify a return message limit, maybe use cursor to control paging response. ## TODO: ability to specify a return message limit, maybe use cursor to control paging response.
debug "get", ROUTE_FILTER_MESSAGES, contentTopic = contentTopic info "get", ROUTE_FILTER_MESSAGES, contentTopic = contentTopic
if contentTopic.isErr(): if contentTopic.isErr():
return RestApiResponse.badRequest("Missing contentTopic") return RestApiResponse.badRequest("Missing contentTopic")

View File

@ -25,7 +25,7 @@ proc installHealthApiHandler*(
if healthReportFut.completed(): if healthReportFut.completed():
let healthReport = healthReportFut.read() let healthReport = healthReportFut.read()
return RestApiResponse.jsonResponse(healthReport, Http200).valueOr: return RestApiResponse.jsonResponse(healthReport, Http200).valueOr:
debug "An error ocurred while building the json healthReport response", info "An error ocurred while building the json healthReport response",
error = error error = error
return return
RestApiResponse.internalServerError("Failed to serialize health report") RestApiResponse.internalServerError("Failed to serialize health report")

View File

@ -48,7 +48,7 @@ proc installLightPushRequestHandler*(
contentBody: Option[ContentBody] contentBody: Option[ContentBody]
) -> RestApiResponse: ) -> RestApiResponse:
## Send a request to push a waku message ## Send a request to push a waku message
debug "post", ROUTE_LIGHTPUSH, contentBody info "post", ROUTE_LIGHTPUSH, contentBody
let decodedBody = decodeRequestBody[PushRequest](contentBody) let decodedBody = decodeRequestBody[PushRequest](contentBody)

View File

@ -205,7 +205,7 @@ proc installStoreApiHandlers*(
pageSize: Option[string], pageSize: Option[string],
ascending: Option[string] ascending: Option[string]
) -> RestApiResponse: ) -> RestApiResponse:
debug "REST-GET /store/v1/messages ", peer_addr = $peerAddr info "REST-GET /store/v1/messages ", peer_addr = $peerAddr
# All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding) # All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding)
# Example: # Example:

View File

@ -66,7 +66,7 @@ proc installLightPushRequestHandler*(
contentBody: Option[ContentBody] contentBody: Option[ContentBody]
) -> RestApiResponse: ) -> RestApiResponse:
## Send a request to push a waku message ## Send a request to push a waku message
debug "post received", ROUTE_LIGHTPUSH info "post received", ROUTE_LIGHTPUSH
trace "content body", ROUTE_LIGHTPUSH, contentBody trace "content body", ROUTE_LIGHTPUSH, contentBody
let req: PushRequest = decodeRequestBody[PushRequest](contentBody).valueOr: let req: PushRequest = decodeRequestBody[PushRequest](contentBody).valueOr:

View File

@ -61,7 +61,7 @@ proc installRelayApiHandlers*(
) -> RestApiResponse: ) -> RestApiResponse:
## Subscribes a node to a list of PubSub topics ## Subscribes a node to a list of PubSub topics
debug "post_waku_v2_relay_v1_subscriptions" info "post_waku_v2_relay_v1_subscriptions"
# Check the request body # Check the request body
if contentBody.isNone(): if contentBody.isNone():
@ -90,7 +90,7 @@ proc installRelayApiHandlers*(
contentBody: Option[ContentBody] contentBody: Option[ContentBody]
) -> RestApiResponse: ) -> RestApiResponse:
# ## Subscribes a node to a list of PubSub topics # ## Subscribes a node to a list of PubSub topics
# debug "delete_waku_v2_relay_v1_subscriptions" # info "delete_waku_v2_relay_v1_subscriptions"
# Check the request body # Check the request body
if contentBody.isNone(): if contentBody.isNone():
@ -124,7 +124,7 @@ proc installRelayApiHandlers*(
# ## Returns all WakuMessages received on a PubSub topic since the # ## Returns all WakuMessages received on a PubSub topic since the
# ## last time this method was called # ## last time this method was called
# ## TODO: ability to specify a return message limit # ## TODO: ability to specify a return message limit
# debug "get_waku_v2_relay_v1_messages", topic=topic # info "get_waku_v2_relay_v1_messages", topic=topic
if pubsubTopic.isErr(): if pubsubTopic.isErr():
return RestApiResponse.badRequest() return RestApiResponse.badRequest()
@ -132,13 +132,13 @@ proc installRelayApiHandlers*(
let messages = cache.getMessages(pubSubTopic, clear = true) let messages = cache.getMessages(pubSubTopic, clear = true)
if messages.isErr(): if messages.isErr():
debug "Not subscribed to topic", topic = pubSubTopic info "Not subscribed to topic", topic = pubSubTopic
return RestApiResponse.notFound() return RestApiResponse.notFound()
let data = RelayGetMessagesResponse(messages.get().map(toRelayWakuMessage)) let data = RelayGetMessagesResponse(messages.get().map(toRelayWakuMessage))
let resp = RestApiResponse.jsonResponse(data, status = Http200) let resp = RestApiResponse.jsonResponse(data, status = Http200)
if resp.isErr(): if resp.isErr():
debug "An error ocurred while building the json respose", error = resp.error info "An error ocurred while building the json respose", error = resp.error
return RestApiResponse.internalServerError() return RestApiResponse.internalServerError()
return resp.get() return resp.get()
@ -185,7 +185,7 @@ proc installRelayApiHandlers*(
logMessageInfo(node.wakuRelay, "rest", pubsubTopic, "none", message, onRecv = true) logMessageInfo(node.wakuRelay, "rest", pubsubTopic, "none", message, onRecv = true)
# if we reach here its either a non-RLN message or a RLN message with a valid proof # if we reach here its either a non-RLN message or a RLN message with a valid proof
debug "Publishing message", info "Publishing message",
pubSubTopic = pubSubTopic, rln = not node.wakuRlnRelay.isNil() pubSubTopic = pubSubTopic, rln = not node.wakuRlnRelay.isNil()
if not (waitFor node.publish(some(pubSubTopic), message).withTimeout(futTimeout)): if not (waitFor node.publish(some(pubSubTopic), message).withTimeout(futTimeout)):
error "Failed to publish message to topic", pubSubTopic = pubSubTopic error "Failed to publish message to topic", pubSubTopic = pubSubTopic
@ -203,7 +203,7 @@ proc installRelayApiHandlers*(
) -> RestApiResponse: ) -> RestApiResponse:
## Subscribes a node to a list of content topics. ## Subscribes a node to a list of content topics.
debug "post_waku_v2_relay_v1_auto_subscriptions" info "post_waku_v2_relay_v1_auto_subscriptions"
let req: seq[ContentTopic] = decodeRequestBody[seq[ContentTopic]](contentBody).valueOr: let req: seq[ContentTopic] = decodeRequestBody[seq[ContentTopic]](contentBody).valueOr:
return error return error
@ -228,7 +228,7 @@ proc installRelayApiHandlers*(
) -> RestApiResponse: ) -> RestApiResponse:
## Unsubscribes a node from a list of content topics. ## Unsubscribes a node from a list of content topics.
debug "delete_waku_v2_relay_v1_auto_subscriptions" info "delete_waku_v2_relay_v1_auto_subscriptions"
let req: seq[ContentTopic] = decodeRequestBody[seq[ContentTopic]](contentBody).valueOr: let req: seq[ContentTopic] = decodeRequestBody[seq[ContentTopic]](contentBody).valueOr:
return error return error
@ -253,19 +253,19 @@ proc installRelayApiHandlers*(
## Returns all WakuMessages received on a content topic since the ## Returns all WakuMessages received on a content topic since the
## last time this method was called. ## last time this method was called.
debug "get_waku_v2_relay_v1_auto_messages", contentTopic = contentTopic info "get_waku_v2_relay_v1_auto_messages", contentTopic = contentTopic
let contentTopic = contentTopic.valueOr: let contentTopic = contentTopic.valueOr:
return RestApiResponse.badRequest($error) return RestApiResponse.badRequest($error)
let messages = cache.getAutoMessages(contentTopic, clear = true).valueOr: let messages = cache.getAutoMessages(contentTopic, clear = true).valueOr:
debug "Not subscribed to topic", topic = contentTopic info "Not subscribed to topic", topic = contentTopic
return RestApiResponse.notFound(contentTopic) return RestApiResponse.notFound(contentTopic)
let data = RelayGetMessagesResponse(messages.map(toRelayWakuMessage)) let data = RelayGetMessagesResponse(messages.map(toRelayWakuMessage))
return RestApiResponse.jsonResponse(data, status = Http200).valueOr: return RestApiResponse.jsonResponse(data, status = Http200).valueOr:
debug "An error ocurred while building the json respose", error = error info "An error ocurred while building the json respose", error = error
return RestApiResponse.internalServerError($error) return RestApiResponse.internalServerError($error)
router.api(MethodOptions, ROUTE_RELAY_AUTO_MESSAGESV1_NO_TOPIC) do() -> RestApiResponse: router.api(MethodOptions, ROUTE_RELAY_AUTO_MESSAGESV1_NO_TOPIC) do() -> RestApiResponse:
@ -311,7 +311,7 @@ proc installRelayApiHandlers*(
logMessageInfo(node.wakuRelay, "rest", pubsubTopic, "none", message, onRecv = true) logMessageInfo(node.wakuRelay, "rest", pubsubTopic, "none", message, onRecv = true)
# if we reach here its either a non-RLN message or a RLN message with a valid proof # if we reach here its either a non-RLN message or a RLN message with a valid proof
debug "Publishing message", info "Publishing message",
contentTopic = message.contentTopic, rln = not node.wakuRlnRelay.isNil() contentTopic = message.contentTopic, rln = not node.wakuRlnRelay.isNil()
var publishFut = node.publish(some($pubsubTopic), message) var publishFut = node.publish(some($pubsubTopic), message)

View File

@ -24,7 +24,7 @@ template unrecognizedFieldWarning*(field: typed) =
# TODO: There should be a different notification mechanism for informing the # TODO: There should be a different notification mechanism for informing the
# caller of a deserialization routine for unexpected fields. # caller of a deserialization routine for unexpected fields.
# The chonicles import in this module should be removed. # The chonicles import in this module should be removed.
debug "JSON field not recognized by the current version of nwaku. Consider upgrading", info "JSON field not recognized by the current version of nwaku. Consider upgrading",
fieldName, typeName = typetraits.name(typeof field) fieldName, typeName = typetraits.name(typeof field)
type SerdesResult*[T] = Result[T, cstring] type SerdesResult*[T] = Result[T, cstring]

View File

@ -45,7 +45,7 @@ proc performStoreQuery(
let res = futRes.get().toHex() let res = futRes.get().toHex()
if res.statusCode == uint32(ErrorCode.TOO_MANY_REQUESTS): if res.statusCode == uint32(ErrorCode.TOO_MANY_REQUESTS):
debug "Request rate limit reached on peer ", storePeer info "Request rate limit reached on peer ", storePeer
return RestApiResponse.tooManyRequests("Request rate limit reached") return RestApiResponse.tooManyRequests("Request rate limit reached")
let resp = RestApiResponse.jsonResponse(res, status = Http200).valueOr: let resp = RestApiResponse.jsonResponse(res, status = Http200).valueOr:
@ -194,7 +194,7 @@ proc installStoreApiHandlers*(
) -> RestApiResponse: ) -> RestApiResponse:
let peer = peerAddr.toOpt() let peer = peerAddr.toOpt()
debug "REST-GET /store/v3/messages ", peer_addr = $peer info "REST-GET /store/v3/messages ", peer_addr = $peer
# All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding) # All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding)
# Example: # Example:

View File

@ -256,7 +256,7 @@ proc periodicRetentionPolicy(self: WakuArchive) {.async.} =
let policy = self.retentionPolicy.get() let policy = self.retentionPolicy.get()
while true: while true:
debug "executing message retention policy" info "executing message retention policy"
(await policy.execute(self.driver)).isOkOr: (await policy.execute(self.driver)).isOkOr:
waku_archive_errors.inc(labelValues = [retPolicyFailure]) waku_archive_errors.inc(labelValues = [retPolicyFailure])
error "failed execution of retention policy", error = error error "failed execution of retention policy", error = error

View File

@ -60,7 +60,7 @@ proc new*(
return err("error while gathering sqlite stats: " & $sqliteStatsRes.error) return err("error while gathering sqlite stats: " & $sqliteStatsRes.error)
let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get() let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get()
debug "sqlite database page stats", info "sqlite database page stats",
pageSize = pageSize, pages = pageCount, freePages = freelistCount pageSize = pageSize, pages = pageCount, freePages = freelistCount
if vacuum and (pageCount > 0 and freelistCount > 0): if vacuum and (pageCount > 0 and freelistCount > 0):
@ -74,7 +74,7 @@ proc new*(
if migrateRes.isErr(): if migrateRes.isErr():
return err("error in migrate sqlite: " & $migrateRes.error) return err("error in migrate sqlite: " & $migrateRes.error)
debug "setting up sqlite waku archive driver" info "setting up sqlite waku archive driver"
let res = SqliteDriver.new(db) let res = SqliteDriver.new(db)
if res.isErr(): if res.isErr():
return err("failed to init sqlite archive driver: " & res.error) return err("failed to init sqlite archive driver: " & res.error)
@ -119,6 +119,6 @@ proc new*(
"Postgres has been configured but not been compiled. Check compiler definitions." "Postgres has been configured but not been compiled. Check compiler definitions."
) )
else: else:
debug "setting up in-memory waku archive driver" info "setting up in-memory waku archive driver"
let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages
return ok(driver) return ok(driver)

View File

@ -56,13 +56,13 @@ proc breakIntoStatements*(script: string): seq[string] =
proc migrate*( proc migrate*(
driver: PostgresDriver, targetVersion = SchemaVersion driver: PostgresDriver, targetVersion = SchemaVersion
): Future[DatabaseResult[void]] {.async.} = ): Future[DatabaseResult[void]] {.async.} =
debug "starting message store's postgres database migration" info "starting message store's postgres database migration"
let currentVersion = (await driver.getCurrentVersion()).valueOr: let currentVersion = (await driver.getCurrentVersion()).valueOr:
return err("migrate error could not retrieve current version: " & $error) return err("migrate error could not retrieve current version: " & $error)
if currentVersion == targetVersion: if currentVersion == targetVersion:
debug "database schema is up to date", info "database schema is up to date",
currentVersion = currentVersion, targetVersion = targetVersion currentVersion = currentVersion, targetVersion = targetVersion
return ok() return ok()
@ -85,15 +85,15 @@ proc migrate*(
# Run the migration scripts # Run the migration scripts
for script in scripts: for script in scripts:
for statement in script.breakIntoStatements(): for statement in script.breakIntoStatements():
debug "executing migration statement", statement = statement info "executing migration statement", statement = statement
(await driver.performWriteQuery(statement)).isOkOr: (await driver.performWriteQuery(statement)).isOkOr:
error "failed to execute migration statement", error "failed to execute migration statement",
statement = statement, error = error statement = statement, error = error
return err("failed to execute migration statement") return err("failed to execute migration statement")
debug "migration statement executed succesfully", statement = statement info "migration statement executed succesfully", statement = statement
debug "finished message store's postgres database migration" info "finished message store's postgres database migration"
return ok() return ok()

View File

@ -1143,31 +1143,31 @@ proc performWriteQueryWithLock(
(await self.performWriteQuery(query)).isOkOr: (await self.performWriteQuery(query)).isOkOr:
if error.contains(COULD_NOT_ACQUIRE_ADVISORY_LOCK): if error.contains(COULD_NOT_ACQUIRE_ADVISORY_LOCK):
## We don't consider this as an error. Just someone else acquired the advisory lock ## We don't consider this as an error. Just someone else acquired the advisory lock
debug "skip performWriteQuery because the advisory lock is acquired by other" info "skip performWriteQuery because the advisory lock is acquired by other"
return ok() return ok()
if error.contains("already exists"): if error.contains("already exists"):
## expected to happen when trying to add a partition table constraint that already exists ## expected to happen when trying to add a partition table constraint that already exists
## e.g., constraint "constraint_name" for relation "messages_1720364735_1720364740" already exists ## e.g., constraint "constraint_name" for relation "messages_1720364735_1720364740" already exists
debug "skip already exists error", error = error info "skip already exists error", error = error
return ok() return ok()
if error.contains("is already a partition"): if error.contains("is already a partition"):
## expected to happen when a node tries to add a partition that is already attached, ## expected to happen when a node tries to add a partition that is already attached,
## e.g., "messages_1720364735_1720364740" is already a partition ## e.g., "messages_1720364735_1720364740" is already a partition
debug "skip is already a partition error", error = error info "skip is already a partition error", error = error
return ok() return ok()
if error.contains("does not exist"): if error.contains("does not exist"):
## expected to happen when trying to drop a constraint that has already been dropped by other ## expected to happen when trying to drop a constraint that has already been dropped by other
## constraint "constraint_name" of relation "messages_1720364735_1720364740" does not exist ## constraint "constraint_name" of relation "messages_1720364735_1720364740" does not exist
debug "skip does not exist error", error = error info "skip does not exist error", error = error
return ok() return ok()
debug "protected query ended with error", error = $error info "protected query ended with error", error = $error
return err("protected query ended with error:" & $error) return err("protected query ended with error:" & $error)
debug "protected query ended correctly" info "protected query ended correctly"
return ok() return ok()
proc addPartition( proc addPartition(
@ -1222,7 +1222,7 @@ proc addPartition(
(await self.performWriteQueryWithLock(dropConstraint)).isOkOr: (await self.performWriteQueryWithLock(dropConstraint)).isOkOr:
return err(fmt"error dropping constraint [{partitionName}]: " & $error) return err(fmt"error dropping constraint [{partitionName}]: " & $error)
debug "new partition added", query = createPartitionQuery info "new partition added", query = createPartitionQuery
self.partitionMngr.addPartitionInfo(partitionName, beginning, `end`) self.partitionMngr.addPartitionInfo(partitionName, beginning, `end`)
return ok() return ok()
@ -1230,7 +1230,7 @@ proc addPartition(
proc refreshPartitionsInfo( proc refreshPartitionsInfo(
self: PostgresDriver self: PostgresDriver
): Future[ArchiveDriverResult[void]] {.async.} = ): Future[ArchiveDriverResult[void]] {.async.} =
debug "refreshPartitionsInfo" info "refreshPartitionsInfo"
self.partitionMngr.clearPartitionInfo() self.partitionMngr.clearPartitionInfo()
let partitionNames = (await self.getPartitionsList()).valueOr: let partitionNames = (await self.getPartitionsList()).valueOr:
@ -1266,7 +1266,7 @@ proc loopPartitionFactory(
## Loop proc that continuously checks whether we need to create a new partition. ## Loop proc that continuously checks whether we need to create a new partition.
## Notice that the deletion of partitions is handled by the retention policy modules. ## Notice that the deletion of partitions is handled by the retention policy modules.
debug "starting loopPartitionFactory" info "starting loopPartitionFactory"
while true: while true:
trace "Check if a new partition is needed" trace "Check if a new partition is needed"
@ -1278,7 +1278,7 @@ proc loopPartitionFactory(
let now = times.now().toTime().toUnix() let now = times.now().toTime().toUnix()
if self.partitionMngr.isEmpty(): if self.partitionMngr.isEmpty():
debug "adding partition because now there aren't more partitions" info "adding partition because now there aren't more partitions"
(await self.addPartition(now)).isOkOr: (await self.addPartition(now)).isOkOr:
onFatalError("error when creating a new partition from empty state: " & $error) onFatalError("error when creating a new partition from empty state: " & $error)
else: else:
@ -1288,14 +1288,14 @@ proc loopPartitionFactory(
let newestPartition = newestPartitionRes.get() let newestPartition = newestPartitionRes.get()
if newestPartition.containsMoment(now): if newestPartition.containsMoment(now):
debug "creating a new partition for the future" info "creating a new partition for the future"
## The current used partition is the last one that was created. ## The current used partition is the last one that was created.
## Thus, let's create another partition for the future. ## Thus, let's create another partition for the future.
(await self.addPartition(newestPartition.getLastMoment())).isOkOr: (await self.addPartition(newestPartition.getLastMoment())).isOkOr:
onFatalError("could not add the next partition for 'now': " & $error) onFatalError("could not add the next partition for 'now': " & $error)
elif now >= newestPartition.getLastMoment(): elif now >= newestPartition.getLastMoment():
debug "creating a new partition to contain current messages" info "creating a new partition to contain current messages"
## There is no partition to contain the current time. ## There is no partition to contain the current time.
## This happens if the node has been stopped for quite a long time. ## This happens if the node has been stopped for quite a long time.
## Then, let's create the needed partition to contain 'now'. ## Then, let's create the needed partition to contain 'now'.
@ -1333,23 +1333,23 @@ proc removePartition(
## whose rows belong to the partition time range ## whose rows belong to the partition time range
let partitionName = partition.getName() let partitionName = partition.getName()
debug "beginning of removePartition", partitionName info "beginning of removePartition", partitionName
let partSize = (await self.getTableSize(partitionName)).valueOr("") let partSize = (await self.getTableSize(partitionName)).valueOr("")
## Detach and remove the partition concurrently to not block the parent table (messages) ## Detach and remove the partition concurrently to not block the parent table (messages)
let detachPartitionQuery = let detachPartitionQuery =
"ALTER TABLE messages DETACH PARTITION " & partitionName & " CONCURRENTLY;" "ALTER TABLE messages DETACH PARTITION " & partitionName & " CONCURRENTLY;"
debug "removeOldestPartition", query = detachPartitionQuery info "removeOldestPartition", query = detachPartitionQuery
(await self.performWriteQuery(detachPartitionQuery)).isOkOr: (await self.performWriteQuery(detachPartitionQuery)).isOkOr:
debug "detected error when trying to detach partition", error info "detected error when trying to detach partition", error
if ($error).contains("FINALIZE") or if ($error).contains("FINALIZE") or
($error).contains("already pending detach in part"): ($error).contains("already pending detach in part"):
## We assume the database is suggesting to use FINALIZE when detaching a partition ## We assume the database is suggesting to use FINALIZE when detaching a partition
let detachPartitionFinalizeQuery = let detachPartitionFinalizeQuery =
"ALTER TABLE messages DETACH PARTITION " & partitionName & " FINALIZE;" "ALTER TABLE messages DETACH PARTITION " & partitionName & " FINALIZE;"
debug "removeOldestPartition detaching with FINALIZE", info "removeOldestPartition detaching with FINALIZE",
query = detachPartitionFinalizeQuery query = detachPartitionFinalizeQuery
(await self.performWriteQuery(detachPartitionFinalizeQuery)).isOkOr: (await self.performWriteQuery(detachPartitionFinalizeQuery)).isOkOr:
return err(fmt"error in FINALIZE {detachPartitionFinalizeQuery}: " & $error) return err(fmt"error in FINALIZE {detachPartitionFinalizeQuery}: " & $error)
@ -1358,11 +1358,11 @@ proc removePartition(
## Drop the partition ## Drop the partition
let dropPartitionQuery = "DROP TABLE " & partitionName let dropPartitionQuery = "DROP TABLE " & partitionName
debug "removeOldestPartition drop partition", query = dropPartitionQuery info "removeOldestPartition drop partition", query = dropPartitionQuery
(await self.performWriteQuery(dropPartitionQuery)).isOkOr: (await self.performWriteQuery(dropPartitionQuery)).isOkOr:
return err(fmt"error in {dropPartitionQuery}: " & $error) return err(fmt"error in {dropPartitionQuery}: " & $error)
debug "removed partition", partition_name = partitionName, partition_size = partSize info "removed partition", partition_name = partitionName, partition_size = partSize
self.partitionMngr.removeOldestPartitionName() self.partitionMngr.removeOldestPartitionName()
## Now delete rows from the messages_lookup table ## Now delete rows from the messages_lookup table
@ -1380,7 +1380,7 @@ proc removePartitionsOlderThan(
## Removes old partitions that don't contain the specified timestamp ## Removes old partitions that don't contain the specified timestamp
let tsInSec = Timestamp(float(tsInNanoSec) / 1_000_000_000) let tsInSec = Timestamp(float(tsInNanoSec) / 1_000_000_000)
debug "beginning of removePartitionsOlderThan", tsInSec info "beginning of removePartitionsOlderThan", tsInSec
var oldestPartition = self.partitionMngr.getOldestPartition().valueOr: var oldestPartition = self.partitionMngr.getOldestPartition().valueOr:
return err("could not get oldest partition in removePartitionOlderThan: " & $error) return err("could not get oldest partition in removePartitionOlderThan: " & $error)
@ -1411,7 +1411,7 @@ proc removeOldestPartition(
## The database contains a partition that would store current messages. ## The database contains a partition that would store current messages.
if currentPartitionRes.get() == oldestPartition: if currentPartitionRes.get() == oldestPartition:
debug "Skipping to remove the current partition" info "Skipping to remove the current partition"
return ok() return ok()
return await self.removePartition(oldestPartition) return await self.removePartition(oldestPartition)
@ -1431,7 +1431,7 @@ method decreaseDatabaseSize*(
if totalSizeOfDB <= targetSizeInBytes: if totalSizeOfDB <= targetSizeInBytes:
return ok() return ok()
debug "start reducing database size", info "start reducing database size",
targetSize = $targetSizeInBytes, currentSize = $totalSizeOfDB targetSize = $targetSizeInBytes, currentSize = $totalSizeOfDB
while totalSizeOfDB > targetSizeInBytes and driver.containsAnyPartition(): while totalSizeOfDB > targetSizeInBytes and driver.containsAnyPartition():
@ -1450,7 +1450,7 @@ method decreaseDatabaseSize*(
totalSizeOfDB = newCurrentSize totalSizeOfDB = newCurrentSize
debug "reducing database size", info "reducing database size",
targetSize = $targetSizeInBytes, newCurrentSize = $totalSizeOfDB targetSize = $targetSizeInBytes, newCurrentSize = $totalSizeOfDB
return ok() return ok()
@ -1527,22 +1527,22 @@ proc analyzeTableLoop(self: PostgresDriver) {.async.} =
## The database stats should be calculated regularly so that the planner ## The database stats should be calculated regularly so that the planner
## picks up the proper indexes and we have better query performance. ## picks up the proper indexes and we have better query performance.
while true: while true:
debug "analyzeTableLoop lock db" info "analyzeTableLoop lock db"
(await self.acquireDatabaseLock(AnalyzeTableLockId)).isOkOr: (await self.acquireDatabaseLock(AnalyzeTableLockId)).isOkOr:
if error != EXPECTED_LOCK_ERROR: if error != EXPECTED_LOCK_ERROR:
error "failed to acquire lock in analyzeTableLoop", error = error error "failed to acquire lock in analyzeTableLoop", error = error
await sleepAsync(RunAnalyzeInterval) await sleepAsync(RunAnalyzeInterval)
continue continue
debug "analyzeTableLoop start analysis" info "analyzeTableLoop start analysis"
(await self.performWriteQuery(AnalyzeQuery)).isOkOr: (await self.performWriteQuery(AnalyzeQuery)).isOkOr:
error "failed to run ANALYZE messages", error = error error "failed to run ANALYZE messages", error = error
debug "analyzeTableLoop unlock db" info "analyzeTableLoop unlock db"
(await self.releaseDatabaseLock(AnalyzeTableLockId)).isOkOr: (await self.releaseDatabaseLock(AnalyzeTableLockId)).isOkOr:
error "failed to release lock analyzeTableLoop", error = error error "failed to release lock analyzeTableLoop", error = error
debug "analyzeTableLoop analysis completed" info "analyzeTableLoop analysis completed"
await sleepAsync(RunAnalyzeInterval) await sleepAsync(RunAnalyzeInterval)

View File

@ -55,7 +55,7 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
## If not `targetVersion` is provided, it defaults to `SchemaVersion`. ## If not `targetVersion` is provided, it defaults to `SchemaVersion`.
## ##
## NOTE: Down migration it is not currently supported ## NOTE: Down migration it is not currently supported
debug "starting message store's sqlite database migration" info "starting message store's sqlite database migration"
let userVersion = ?db.getUserVersion() let userVersion = ?db.getUserVersion()
let isSchemaVersion7 = ?db.isSchemaVersion7() let isSchemaVersion7 = ?db.isSchemaVersion7()
@ -70,5 +70,5 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
if migrationRes.isErr(): if migrationRes.isErr():
return err("failed to execute migration scripts: " & migrationRes.error) return err("failed to execute migration scripts: " & migrationRes.error)
debug "finished message store's sqlite database migration" info "finished message store's sqlite database migration"
return ok() return ok()

View File

@ -53,7 +53,7 @@ proc new*(T: type CapacityRetentionPolicy, capacity = DefaultCapacity): T =
method execute*( method execute*(
p: CapacityRetentionPolicy, driver: ArchiveDriver p: CapacityRetentionPolicy, driver: ArchiveDriver
): Future[RetentionPolicyResult[void]] {.async.} = ): Future[RetentionPolicyResult[void]] {.async.} =
debug "beginning executing message retention policy - capacity" info "beginning executing message retention policy - capacity"
let numMessages = (await driver.getMessagesCount()).valueOr: let numMessages = (await driver.getMessagesCount()).valueOr:
return err("failed to get messages count: " & error) return err("failed to get messages count: " & error)
@ -64,6 +64,6 @@ method execute*(
(await driver.deleteOldestMessagesNotWithinLimit(limit = p.capacity + p.deleteWindow)).isOkOr: (await driver.deleteOldestMessagesNotWithinLimit(limit = p.capacity + p.deleteWindow)).isOkOr:
return err("deleting oldest messages failed: " & error) return err("deleting oldest messages failed: " & error)
debug "end executing message retention policy - capacity" info "end executing message retention policy - capacity"
return ok() return ok()

View File

@ -18,10 +18,10 @@ proc new*(T: type SizeRetentionPolicy, size = DefaultRetentionSize): T =
method execute*( method execute*(
p: SizeRetentionPolicy, driver: ArchiveDriver p: SizeRetentionPolicy, driver: ArchiveDriver
): Future[RetentionPolicyResult[void]] {.async.} = ): Future[RetentionPolicyResult[void]] {.async.} =
debug "beginning of executing message retention policy - size" info "beginning of executing message retention policy - size"
(await driver.decreaseDatabaseSize(p.sizeLimit)).isOkOr: (await driver.decreaseDatabaseSize(p.sizeLimit)).isOkOr:
return err("decreaseDatabaseSize failed: " & $error) return err("decreaseDatabaseSize failed: " & $error)
debug "end of executing message retention policy - size" info "end of executing message retention policy - size"
return ok() return ok()

View File

@ -18,7 +18,7 @@ method execute*(
p: TimeRetentionPolicy, driver: ArchiveDriver p: TimeRetentionPolicy, driver: ArchiveDriver
): Future[RetentionPolicyResult[void]] {.async.} = ): Future[RetentionPolicyResult[void]] {.async.} =
## Delete messages that exceed the retention time by 10% and more (batch delete for efficiency) ## Delete messages that exceed the retention time by 10% and more (batch delete for efficiency)
debug "beginning of executing message retention policy - time" info "beginning of executing message retention policy - time"
let omtRes = await driver.getOldestMessageTimestamp() let omtRes = await driver.getOldestMessageTimestamp()
if omtRes.isErr(): if omtRes.isErr():
@ -35,5 +35,5 @@ method execute*(
if res.isErr(): if res.isErr():
return err("failed to delete oldest messages: " & res.error) return err("failed to delete oldest messages: " & res.error)
debug "end of executing message retention policy - time" info "end of executing message retention policy - time"
return ok() return ok()

View File

@ -122,7 +122,7 @@ proc handleMessage*(
let insertDuration = getTime().toUnixFloat() - insertStartTime let insertDuration = getTime().toUnixFloat() - insertStartTime
waku_legacy_archive_insert_duration_seconds.observe(insertDuration) waku_legacy_archive_insert_duration_seconds.observe(insertDuration)
debug "message archived", info "message archived",
msg_hash = msgHashHex, msg_hash = msgHashHex,
pubsubTopic = pubsubTopic, pubsubTopic = pubsubTopic,
contentTopic = msg.contentTopic, contentTopic = msg.contentTopic,

View File

@ -62,7 +62,7 @@ proc new*(
return err("error while gathering sqlite stats: " & $sqliteStatsRes.error) return err("error while gathering sqlite stats: " & $sqliteStatsRes.error)
let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get() let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get()
debug "sqlite database page stats", info "sqlite database page stats",
pageSize = pageSize, pages = pageCount, freePages = freelistCount pageSize = pageSize, pages = pageCount, freePages = freelistCount
if vacuum and (pageCount > 0 and freelistCount > 0): if vacuum and (pageCount > 0 and freelistCount > 0):
@ -76,7 +76,7 @@ proc new*(
if migrateRes.isErr(): if migrateRes.isErr():
return err("error in migrate sqlite: " & $migrateRes.error) return err("error in migrate sqlite: " & $migrateRes.error)
debug "setting up sqlite waku archive driver" info "setting up sqlite waku archive driver"
let res = SqliteDriver.new(db) let res = SqliteDriver.new(db)
if res.isErr(): if res.isErr():
return err("failed to init sqlite archive driver: " & res.error) return err("failed to init sqlite archive driver: " & res.error)
@ -99,6 +99,6 @@ proc new*(
"Postgres has been configured but not been compiled. Check compiler definitions." "Postgres has been configured but not been compiled. Check compiler definitions."
) )
else: else:
debug "setting up in-memory waku archive driver" info "setting up in-memory waku archive driver"
let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages
return ok(driver) return ok(driver)

View File

@ -892,7 +892,7 @@ method decreaseDatabaseSize*(
# if totalSizeOfDB <= targetSizeInBytes: # if totalSizeOfDB <= targetSizeInBytes:
# return ok() # return ok()
# debug "start reducing database size", # info "start reducing database size",
# targetSize = $targetSizeInBytes, currentSize = $totalSizeOfDB # targetSize = $targetSizeInBytes, currentSize = $totalSizeOfDB
# while totalSizeOfDB > targetSizeInBytes and driver.containsAnyPartition(): # while totalSizeOfDB > targetSizeInBytes and driver.containsAnyPartition():
@ -911,7 +911,7 @@ method decreaseDatabaseSize*(
# totalSizeOfDB = newCurrentSize # totalSizeOfDB = newCurrentSize
# debug "reducing database size", # info "reducing database size",
# targetSize = $targetSizeInBytes, newCurrentSize = $totalSizeOfDB # targetSize = $targetSizeInBytes, newCurrentSize = $totalSizeOfDB
return ok() return ok()

View File

@ -55,7 +55,7 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
## If not `targetVersion` is provided, it defaults to `SchemaVersion`. ## If not `targetVersion` is provided, it defaults to `SchemaVersion`.
## ##
## NOTE: Down migration it is not currently supported ## NOTE: Down migration it is not currently supported
debug "starting message store's sqlite database migration" info "starting message store's sqlite database migration"
let userVersion = ?db.getUserVersion() let userVersion = ?db.getUserVersion()
let isSchemaVersion7 = ?db.isSchemaVersion7() let isSchemaVersion7 = ?db.isSchemaVersion7()
@ -70,5 +70,5 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
if migrationRes.isErr(): if migrationRes.isErr():
return err("failed to execute migration scripts: " & migrationRes.error) return err("failed to execute migration scripts: " & migrationRes.error)
debug "finished message store's sqlite database migration" info "finished message store's sqlite database migration"
return ok() return ok()

View File

@ -199,7 +199,7 @@ proc relayShardingIndicesList*(record: TypedRecord): Option[RelayShards] =
return none(RelayShards) return none(RelayShards)
let indexList = fromIndicesList(field).valueOr: let indexList = fromIndicesList(field).valueOr:
debug "invalid shards list", error = error info "invalid shards list", error = error
return none(RelayShards) return none(RelayShards)
some(indexList) some(indexList)
@ -209,7 +209,7 @@ proc relayShardingBitVector*(record: TypedRecord): Option[RelayShards] =
return none(RelayShards) return none(RelayShards)
let bitVector = fromBitVector(field).valueOr: let bitVector = fromBitVector(field).valueOr:
debug "invalid shards bit vector", error = error info "invalid shards bit vector", error = error
return none(RelayShards) return none(RelayShards)
some(bitVector) some(bitVector)
@ -241,7 +241,7 @@ proc containsShard*(r: Record, shard: RelayShard): bool =
proc containsShard*(r: Record, topic: PubsubTopic): bool = proc containsShard*(r: Record, topic: PubsubTopic): bool =
let parseRes = RelayShard.parse(topic) let parseRes = RelayShard.parse(topic)
if parseRes.isErr(): if parseRes.isErr():
debug "invalid static sharding topic", topic = topic, error = parseRes.error info "invalid static sharding topic", topic = topic, error = parseRes.error
return false return false
containsShard(r, parseRes.value) containsShard(r, parseRes.value)

View File

@ -110,7 +110,7 @@ proc sendSubscribeRequest(
proc ping*( proc ping*(
wfc: WakuFilterClient, servicePeer: RemotePeerInfo wfc: WakuFilterClient, servicePeer: RemotePeerInfo
): Future[FilterSubscribeResult] {.async.} = ): Future[FilterSubscribeResult] {.async.} =
debug "sending ping", servicePeer = shortLog($servicePeer) info "sending ping", servicePeer = shortLog($servicePeer)
let requestId = generateRequestId(wfc.rng) let requestId = generateRequestId(wfc.rng)
let filterSubscribeRequest = FilterSubscribeRequest.ping(requestId) let filterSubscribeRequest = FilterSubscribeRequest.ping(requestId)
@ -194,7 +194,7 @@ proc initProtocolHandler(wfc: WakuFilterClient) =
let msg_hash = let msg_hash =
computeMessageHash(msgPush.pubsubTopic, msgPush.wakuMessage).to0xHex() computeMessageHash(msgPush.pubsubTopic, msgPush.wakuMessage).to0xHex()
debug "Received message push", info "Received message push",
peerId = conn.peerId, peerId = conn.peerId,
msg_hash, msg_hash,
payload = shortLog(msgPush.wakuMessage.payload), payload = shortLog(msgPush.wakuMessage.payload),

View File

@ -31,7 +31,7 @@ type WakuFilter* = ref object of LPProtocol
peerConnections: Table[PeerId, Connection] peerConnections: Table[PeerId, Connection]
proc pingSubscriber(wf: WakuFilter, peerId: PeerID): FilterSubscribeResult = proc pingSubscriber(wf: WakuFilter, peerId: PeerID): FilterSubscribeResult =
debug "pinging subscriber", peerId = peerId info "pinging subscriber", peerId = peerId
if not wf.subscriptions.isSubscribed(peerId): if not wf.subscriptions.isSubscribed(peerId):
error "pinging peer has no subscriptions", peerId = peerId error "pinging peer has no subscriptions", peerId = peerId
@ -67,13 +67,13 @@ proc subscribe(
let filterCriteria = toHashSet(contentTopics.mapIt((pubsubTopic.get(), it))) let filterCriteria = toHashSet(contentTopics.mapIt((pubsubTopic.get(), it)))
debug "subscribing peer to filter criteria", info "subscribing peer to filter criteria",
peerId = peerId, filterCriteria = filterCriteria peerId = peerId, filterCriteria = filterCriteria
(await wf.subscriptions.addSubscription(peerId, filterCriteria)).isOkOr: (await wf.subscriptions.addSubscription(peerId, filterCriteria)).isOkOr:
return err(FilterSubscribeError.serviceUnavailable(error)) return err(FilterSubscribeError.serviceUnavailable(error))
debug "correct subscription", peerId = peerId info "correct subscription", peerId = peerId
ok() ok()
@ -99,7 +99,7 @@ proc unsubscribe(
let filterCriteria = toHashSet(contentTopics.mapIt((pubsubTopic.get(), it))) let filterCriteria = toHashSet(contentTopics.mapIt((pubsubTopic.get(), it)))
debug "unsubscribing peer from filter criteria", info "unsubscribing peer from filter criteria",
peerId = peerId, filterCriteria = filterCriteria peerId = peerId, filterCriteria = filterCriteria
wf.subscriptions.removeSubscription(peerId, filterCriteria).isOkOr: wf.subscriptions.removeSubscription(peerId, filterCriteria).isOkOr:
@ -108,7 +108,7 @@ proc unsubscribe(
## Note: do not remove from peerRequestRateLimiter to prevent trick with subscribe/unsubscribe loop ## Note: do not remove from peerRequestRateLimiter to prevent trick with subscribe/unsubscribe loop
## We remove only if peerManager removes the peer ## We remove only if peerManager removes the peer
debug "correct unsubscription", peerId = peerId info "correct unsubscription", peerId = peerId
ok() ok()
@ -116,10 +116,10 @@ proc unsubscribeAll(
wf: WakuFilter, peerId: PeerID wf: WakuFilter, peerId: PeerID
): Future[FilterSubscribeResult] {.async.} = ): Future[FilterSubscribeResult] {.async.} =
if not wf.subscriptions.isSubscribed(peerId): if not wf.subscriptions.isSubscribed(peerId):
debug "unsubscribing peer has no subscriptions", peerId = peerId info "unsubscribing peer has no subscriptions", peerId = peerId
return err(FilterSubscribeError.notFound()) return err(FilterSubscribeError.notFound())
debug "removing peer subscription", peerId = peerId info "removing peer subscription", peerId = peerId
await wf.subscriptions.removePeer(peerId) await wf.subscriptions.removePeer(peerId)
wf.subscriptions.cleanUp() wf.subscriptions.cleanUp()
@ -170,7 +170,7 @@ proc handleSubscribeRequest*(
proc pushToPeer( proc pushToPeer(
wf: WakuFilter, peerId: PeerId, buffer: seq[byte] wf: WakuFilter, peerId: PeerId, buffer: seq[byte]
): Future[Result[void, string]] {.async.} = ): Future[Result[void, string]] {.async.} =
debug "pushing message to subscribed peer", peerId = shortLog(peerId) info "pushing message to subscribed peer", peerId = shortLog(peerId)
let stream = ( let stream = (
await wf.peerManager.getStreamByPeerIdAndProtocol(peerId, WakuFilterPushCodec) await wf.peerManager.getStreamByPeerIdAndProtocol(peerId, WakuFilterPushCodec)
@ -180,7 +180,7 @@ proc pushToPeer(
await stream.writeLp(buffer) await stream.writeLp(buffer)
debug "published successful", peerId = shortLog(peerId), stream info "published successful", peerId = shortLog(peerId), stream
waku_service_network_bytes.inc( waku_service_network_bytes.inc(
amount = buffer.len().int64, labelValues = [WakuFilterPushCodec, "out"] amount = buffer.len().int64, labelValues = [WakuFilterPushCodec, "out"]
) )
@ -220,13 +220,13 @@ proc pushToPeers(
await allFutures(pushFuts) await allFutures(pushFuts)
proc maintainSubscriptions*(wf: WakuFilter) {.async.} = proc maintainSubscriptions*(wf: WakuFilter) {.async.} =
debug "maintaining subscriptions" info "maintaining subscriptions"
## Remove subscriptions for peers that have been removed from peer store ## Remove subscriptions for peers that have been removed from peer store
var peersToRemove: seq[PeerId] var peersToRemove: seq[PeerId]
for peerId in wf.subscriptions.peersSubscribed.keys: for peerId in wf.subscriptions.peersSubscribed.keys:
if not wf.peerManager.switch.peerStore.hasPeer(peerId, WakuFilterPushCodec): if not wf.peerManager.switch.peerStore.hasPeer(peerId, WakuFilterPushCodec):
debug "peer has been removed from peer store, we will remove subscription", info "peer has been removed from peer store, we will remove subscription",
peerId = peerId peerId = peerId
peersToRemove.add(peerId) peersToRemove.add(peerId)
@ -245,7 +245,7 @@ proc handleMessage*(
) {.async.} = ) {.async.} =
let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() let msgHash = computeMessageHash(pubsubTopic, message).to0xHex()
debug "handling message", pubsubTopic = pubsubTopic, msg_hash = msgHash info "handling message", pubsubTopic = pubsubTopic, msg_hash = msgHash
let handleMessageStartTime = Moment.now() let handleMessageStartTime = Moment.now()
@ -288,7 +288,7 @@ proc handleMessage*(
proc initProtocolHandler(wf: WakuFilter) = proc initProtocolHandler(wf: WakuFilter) =
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
debug "filter subscribe request handler triggered", info "filter subscribe request handler triggered",
peerId = shortLog(conn.peerId), conn peerId = shortLog(conn.peerId), conn
var response: FilterSubscribeResponse var response: FilterSubscribeResponse
@ -325,10 +325,10 @@ proc initProtocolHandler(wf: WakuFilter) =
remote_peer_id = conn.peerId, err = getCurrentExceptionMsg() remote_peer_id = conn.peerId, err = getCurrentExceptionMsg()
return return
debug "sending filter subscribe response", info "sending filter subscribe response",
peer_id = shortLog(conn.peerId), response = response peer_id = shortLog(conn.peerId), response = response
do: do:
debug "filter request rejected due rate limit exceeded", info "filter request rejected due rate limit exceeded",
peerId = shortLog(conn.peerId), limit = $wf.peerRequestRateLimiter.setting peerId = shortLog(conn.peerId), limit = $wf.peerRequestRateLimiter.setting
response = FilterSubscribeResponse( response = FilterSubscribeResponse(
requestId: "N/A", requestId: "N/A",
@ -392,17 +392,17 @@ proc new*(
proc periodicSubscriptionsMaintenance(wf: WakuFilter) {.async.} = proc periodicSubscriptionsMaintenance(wf: WakuFilter) {.async.} =
const MaintainSubscriptionsInterval = 1.minutes const MaintainSubscriptionsInterval = 1.minutes
debug "starting to maintain subscriptions" info "starting to maintain subscriptions"
while true: while true:
await wf.maintainSubscriptions() await wf.maintainSubscriptions()
await sleepAsync(MaintainSubscriptionsInterval) await sleepAsync(MaintainSubscriptionsInterval)
proc start*(wf: WakuFilter) {.async.} = proc start*(wf: WakuFilter) {.async.} =
debug "starting filter protocol" info "starting filter protocol"
await procCall LPProtocol(wf).start() await procCall LPProtocol(wf).start()
wf.subscriptionsManagerFut = wf.periodicSubscriptionsMaintenance() wf.subscriptionsManagerFut = wf.periodicSubscriptionsMaintenance()
proc stop*(wf: WakuFilter) {.async.} = proc stop*(wf: WakuFilter) {.async.} =
debug "stopping filter protocol" info "stopping filter protocol"
await wf.subscriptionsManagerFut.cancelAndWait() await wf.subscriptionsManagerFut.cancelAndWait()
await procCall LPProtocol(wf).stop() await procCall LPProtocol(wf).stop()

View File

@ -85,7 +85,7 @@ proc findSubscribedPeers*(
if s.isSubscribed(peer): if s.isSubscribed(peer):
foundPeers.add(peer) foundPeers.add(peer)
debug "findSubscribedPeers result", info "findSubscribedPeers result",
filter_criterion = filterCriterion, filter_criterion = filterCriterion,
subscr_set = s.subscriptions, subscr_set = s.subscriptions,
found_peers = foundPeers found_peers = foundPeers
@ -94,29 +94,29 @@ proc findSubscribedPeers*(
proc removePeer*(s: FilterSubscriptions, peerId: PeerID) {.async.} = proc removePeer*(s: FilterSubscriptions, peerId: PeerID) {.async.} =
## Remove all subscriptions for a given peer ## Remove all subscriptions for a given peer
debug "removePeer", info "removePeer",
currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)), peerId = peerId currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)), peerId = peerId
s.peersSubscribed.del(peerId) s.peersSubscribed.del(peerId)
debug "removePeer after deletion", info "removePeer after deletion",
currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)), peerId = peerId currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)), peerId = peerId
proc removePeers*(s: FilterSubscriptions, peerIds: seq[PeerID]) {.async.} = proc removePeers*(s: FilterSubscriptions, peerIds: seq[PeerID]) {.async.} =
## Remove all subscriptions for a given list of peers ## Remove all subscriptions for a given list of peers
debug "removePeers", info "removePeers",
currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)), currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)),
peerIds = peerIds.mapIt(shortLog(it)) peerIds = peerIds.mapIt(shortLog(it))
for peer in peerIds: for peer in peerIds:
await s.removePeer(peer) await s.removePeer(peer)
debug "removePeers after deletion", info "removePeers after deletion",
currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)), currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)),
peerIds = peerIds.mapIt(shortLog(it)) peerIds = peerIds.mapIt(shortLog(it))
proc cleanUp*(fs: FilterSubscriptions) = proc cleanUp*(fs: FilterSubscriptions) =
debug "cleanUp", currentPeerIds = toSeq(fs.peersSubscribed.keys).mapIt(shortLog(it)) info "cleanUp", currentPeerIds = toSeq(fs.peersSubscribed.keys).mapIt(shortLog(it))
## Remove all subscriptions for peers that have not been seen for a while ## Remove all subscriptions for peers that have not been seen for a while
let now = Moment.now() let now = Moment.now()
@ -128,7 +128,7 @@ proc cleanUp*(fs: FilterSubscriptions) =
fs.subscriptions.keepItIf(val.len > 0) fs.subscriptions.keepItIf(val.len > 0)
debug "after cleanUp", info "after cleanUp",
currentPeerIds = toSeq(fs.peersSubscribed.keys).mapIt(shortLog(it)) currentPeerIds = toSeq(fs.peersSubscribed.keys).mapIt(shortLog(it))
proc refreshSubscription*(s: var FilterSubscriptions, peerId: PeerID) = proc refreshSubscription*(s: var FilterSubscriptions, peerId: PeerID) =
@ -162,7 +162,7 @@ proc addSubscription*(
peersOfSub[].incl(peerId) peersOfSub[].incl(peerId)
peerData.criteriaCount += 1 peerData.criteriaCount += 1
debug "subscription added correctly", info "subscription added correctly",
new_peer = shortLog(peerId), subscr_set = s.subscriptions new_peer = shortLog(peerId), subscr_set = s.subscriptions
return ok() return ok()

View File

@ -127,7 +127,7 @@ proc initProtocolHandler(wl: WakuLightPush) =
except CatchableError: except CatchableError:
error "lightpush failed handleRequest", error = getCurrentExceptionMsg() error "lightpush failed handleRequest", error = getCurrentExceptionMsg()
do: do:
debug "lightpush request rejected due rate limit exceeded", info "lightpush request rejected due rate limit exceeded",
peerId = conn.peerId, limit = $wl.requestRateLimiter.setting peerId = conn.peerId, limit = $wl.requestRateLimiter.setting
rpc = static( rpc = static(

View File

@ -87,7 +87,7 @@ proc initProtocolHandler(wl: WakuLegacyLightPush) =
except CatchableError: except CatchableError:
error "lightpush legacy handleRequest failed", error = getCurrentExceptionMsg() error "lightpush legacy handleRequest failed", error = getCurrentExceptionMsg()
do: do:
debug "lightpush request rejected due rate limit exceeded", info "lightpush request rejected due rate limit exceeded",
peerId = conn.peerId, limit = $wl.requestRateLimiter.setting peerId = conn.peerId, limit = $wl.requestRateLimiter.setting
rpc = static( rpc = static(

View File

@ -86,7 +86,7 @@ proc initProtocolHandler(m: WakuMetadata) =
error "Response decoding error", error = error error "Response decoding error", error = error
return return
debug "Received WakuMetadata request", info "Received WakuMetadata request",
remoteClusterId = response.clusterId, remoteClusterId = response.clusterId,
remoteShards = response.shards, remoteShards = response.shards,
localClusterId = m.clusterId, localClusterId = m.clusterId,

View File

@ -95,7 +95,7 @@ proc encryptWithAd*(
# Otherwise we return the input plaintext according to specification http://www.noiseprotocol.org/noise.html#the-cipherstate-object # Otherwise we return the input plaintext according to specification http://www.noiseprotocol.org/noise.html#the-cipherstate-object
else: else:
ciphertext = @plaintext ciphertext = @plaintext
debug "encryptWithAd called with no encryption key set. Returning plaintext." info "encryptWithAd called with no encryption key set. Returning plaintext."
return ciphertext return ciphertext
@ -133,7 +133,7 @@ proc decryptWithAd*(
# We check if the input authorization tag matches the decryption authorization tag # We check if the input authorization tag matches the decryption authorization tag
if inputAuthorizationTag != authorizationTag: if inputAuthorizationTag != authorizationTag:
debug "decryptWithAd failed", info "decryptWithAd failed",
plaintext = plaintext, plaintext = plaintext,
ciphertext = ciphertext, ciphertext = ciphertext,
inputAuthorizationTag = inputAuthorizationTag, inputAuthorizationTag = inputAuthorizationTag,
@ -155,7 +155,7 @@ proc decryptWithAd*(
# Otherwise we return the input ciphertext according to specification http://www.noiseprotocol.org/noise.html#the-cipherstate-object # Otherwise we return the input ciphertext according to specification http://www.noiseprotocol.org/noise.html#the-cipherstate-object
else: else:
plaintext = @ciphertext plaintext = @ciphertext
debug "decryptWithAd called with no encryption key set. Returning ciphertext." info "decryptWithAd called with no encryption key set. Returning ciphertext."
return plaintext return plaintext
@ -359,6 +359,6 @@ proc decrypt*(
trace "decrypt", tagIn = tagIn, tagOut = tagOut, nonce = state.nonce trace "decrypt", tagIn = tagIn, tagOut = tagOut, nonce = state.nonce
# We check if the authorization tag computed while decrypting is the same as the input tag # We check if the authorization tag computed while decrypting is the same as the input tag
if tagIn != tagOut: if tagIn != tagOut:
debug "decrypt failed", plaintext = shortLog(plaintext) info "decrypt failed", plaintext = shortLog(plaintext)
raise newException(NoiseDecryptTagError, "decrypt tag authentication failed.") raise newException(NoiseDecryptTagError, "decrypt tag authentication failed.")
return plaintext return plaintext

View File

@ -497,7 +497,7 @@ proc stepHandshake*(
# If there are no more message patterns left for processing # If there are no more message patterns left for processing
# we return an empty HandshakeStepResult # we return an empty HandshakeStepResult
if hs.msgPatternIdx > uint8(hs.handshakePattern.messagePatterns.len - 1): if hs.msgPatternIdx > uint8(hs.handshakePattern.messagePatterns.len - 1):
debug "stepHandshake called more times than the number of message patterns present in handshake" info "stepHandshake called more times than the number of message patterns present in handshake"
return ok(hsStepResult) return ok(hsStepResult)
# We process the next handshake message pattern # We process the next handshake message pattern
@ -663,7 +663,7 @@ proc readMessage*(
# The message successfully decrypted, we can delete the first element of the inbound Message Nametag Buffer # The message successfully decrypted, we can delete the first element of the inbound Message Nametag Buffer
delete(inboundMessageNametagBuffer, 1) delete(inboundMessageNametagBuffer, 1)
except NoiseDecryptTagError: except NoiseDecryptTagError:
debug "A read message failed decryption. Returning empty message as plaintext." info "A read message failed decryption. Returning empty message as plaintext."
message = @[] message = @[]
return ok(message) return ok(message)

View File

@ -249,7 +249,7 @@ proc initNametagsBuffer*(mntb: var MessageNametagBuffer) =
mntb.counter += 1 mntb.counter += 1
else: else:
# We warn users if no secret is set # We warn users if no secret is set
debug "The message nametags buffer has not a secret set" info "The message nametags buffer has not a secret set"
# Deletes the first n elements in buffer and appends n new ones # Deletes the first n elements in buffer and appends n new ones
proc delete*(mntb: var MessageNametagBuffer, n: int) = proc delete*(mntb: var MessageNametagBuffer, n: int) =
@ -272,7 +272,7 @@ proc delete*(mntb: var MessageNametagBuffer, n: int) =
mntb.counter += 1 mntb.counter += 1
else: else:
# We warn users that no secret is set # We warn users that no secret is set
debug "The message nametags buffer has no secret set" info "The message nametags buffer has no secret set"
# Checks if the input messageNametag is contained in the input MessageNametagBuffer # Checks if the input messageNametag is contained in the input MessageNametagBuffer
proc checkNametag*( proc checkNametag*(
@ -486,7 +486,7 @@ proc serializePayloadV2*(self: PayloadV2): Result[seq[byte], cstring] =
serializedHandshakeMessage.add serializedPk serializedHandshakeMessage.add serializedPk
# If we are processing more than 256 byte, we return an error # If we are processing more than 256 byte, we return an error
if serializedHandshakeMessageLen > uint8.high.int: if serializedHandshakeMessageLen > uint8.high.int:
debug "PayloadV2 malformed: too many public keys contained in the handshake message" info "PayloadV2 malformed: too many public keys contained in the handshake message"
return err("Too many public keys in handshake message") return err("Too many public keys in handshake message")
# We get the transport message byte length # We get the transport message byte length
@ -542,7 +542,7 @@ proc deserializePayloadV2*(
# We read the Handshake Message lenght (1 byte) # We read the Handshake Message lenght (1 byte)
var handshakeMessageLen = payload[i].uint64 var handshakeMessageLen = payload[i].uint64
if handshakeMessageLen > uint8.high.uint64: if handshakeMessageLen > uint8.high.uint64:
debug "Payload malformed: too many public keys contained in the handshake message" info "Payload malformed: too many public keys contained in the handshake message"
return err("Too many public keys in handshake message") return err("Too many public keys in handshake message")
i += 1 i += 1

View File

@ -83,7 +83,7 @@ proc getEnrsFromCache(
wpx: WakuPeerExchange, numPeers: uint64 wpx: WakuPeerExchange, numPeers: uint64
): seq[enr.Record] {.gcsafe.} = ): seq[enr.Record] {.gcsafe.} =
if wpx.enrCache.len() == 0: if wpx.enrCache.len() == 0:
debug "peer exchange ENR cache is empty" info "peer exchange ENR cache is empty"
return @[] return @[]
# copy and shuffle # copy and shuffle
@ -100,11 +100,11 @@ proc poolFilter*(cluster: Option[uint16], peer: RemotePeerInfo): bool =
return false return false
if peer.enr.isNone(): if peer.enr.isNone():
debug "peer has no ENR", peer = $peer info "peer has no ENR", peer = $peer
return false return false
if cluster.isSome() and peer.enr.get().isClusterMismatched(cluster.get()): if cluster.isSome() and peer.enr.get().isClusterMismatched(cluster.get()):
debug "peer has mismatching cluster", peer = $peer info "peer has mismatching cluster", peer = $peer
return false return false
return true return true
@ -176,7 +176,7 @@ proc initProtocolHandler(wpx: WakuPeerExchange) =
return return
let enrs = wpx.getEnrsFromCache(decBuf.get().request.numPeers) let enrs = wpx.getEnrsFromCache(decBuf.get().request.numPeers)
debug "peer exchange request received" info "peer exchange request received"
trace "px enrs to respond", enrs = $enrs trace "px enrs to respond", enrs = $enrs
try: try:
(await wpx.respond(enrs, conn)).isErrOr: (await wpx.respond(enrs, conn)).isErrOr:

View File

@ -175,7 +175,7 @@ proc initProtocolHandler(w: WakuRelay) =
## main protocol handler that gets triggered on every ## main protocol handler that gets triggered on every
## connection for a protocol string ## connection for a protocol string
## e.g. ``/wakusub/0.0.1``, etc... ## e.g. ``/wakusub/0.0.1``, etc...
debug "Incoming WakuRelay connection", connection = conn, protocol = proto info "Incoming WakuRelay connection", connection = conn, protocol = proto
try: try:
await w.handleConn(conn, proto) await w.handleConn(conn, proto)
@ -204,7 +204,7 @@ proc logMessageInfo*(
let payloadSize = float64(msg.payload.len) let payloadSize = float64(msg.payload.len)
if onRecv: if onRecv:
notice "received relay message", debug "received relay message",
my_peer_id = w.switch.peerInfo.peerId, my_peer_id = w.switch.peerInfo.peerId,
msg_hash = msg_hash, msg_hash = msg_hash,
msg_id = msg_id_short, msg_id = msg_id_short,
@ -213,7 +213,7 @@ proc logMessageInfo*(
receivedTime = getNowInNanosecondTime(), receivedTime = getNowInNanosecondTime(),
payloadSizeBytes = payloadSize payloadSizeBytes = payloadSize
else: else:
notice "sent relay message", debug "sent relay message",
my_peer_id = w.switch.peerInfo.peerId, my_peer_id = w.switch.peerInfo.peerId,
msg_hash = msg_hash, msg_hash = msg_hash,
msg_id = msg_id_short, msg_id = msg_id_short,
@ -380,7 +380,7 @@ proc getPubSubPeersInMesh*(
return ok(allPeers) return ok(allPeers)
if not w.mesh.hasKey(pubsubTopic): if not w.mesh.hasKey(pubsubTopic):
debug "getPubSubPeersInMesh - there is no mesh peer for the given pubsub topic", info "getPubSubPeersInMesh - there is no mesh peer for the given pubsub topic",
pubsubTopic = pubsubTopic pubsubTopic = pubsubTopic
return ok(initHashSet[PubSubPeer]()) return ok(initHashSet[PubSubPeer]())
@ -461,12 +461,12 @@ proc topicsHealthLoop(wakuRelay: WakuRelay) {.async.} =
await sleepAsync(10.seconds) await sleepAsync(10.seconds)
method start*(w: WakuRelay) {.async, base.} = method start*(w: WakuRelay) {.async, base.} =
debug "start" info "start"
await procCall GossipSub(w).start() await procCall GossipSub(w).start()
w.topicHealthLoopHandle = w.topicsHealthLoop() w.topicHealthLoopHandle = w.topicsHealthLoop()
method stop*(w: WakuRelay) {.async, base.} = method stop*(w: WakuRelay) {.async, base.} =
debug "stop" info "stop"
await procCall GossipSub(w).stop() await procCall GossipSub(w).stop()
if not w.topicHealthLoopHandle.isNil(): if not w.topicHealthLoopHandle.isNil():
await w.topicHealthLoopHandle.cancelAndWait() await w.topicHealthLoopHandle.cancelAndWait()
@ -538,7 +538,7 @@ proc validateMessage*(
return ok() return ok()
proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandler) = proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandler) =
debug "subscribe", pubsubTopic = pubsubTopic info "subscribe", pubsubTopic = pubsubTopic
# We need to wrap the handler since gossipsub doesnt understand WakuMessage # We need to wrap the handler since gossipsub doesnt understand WakuMessage
let topicHandler = proc( let topicHandler = proc(
@ -580,7 +580,7 @@ proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandle
proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) = proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) =
## Unsubscribe all handlers on this pubsub topic ## Unsubscribe all handlers on this pubsub topic
debug "unsubscribe all", pubsubTopic = pubsubTopic info "unsubscribe all", pubsubTopic = pubsubTopic
procCall GossipSub(w).unsubscribeAll(pubsubTopic) procCall GossipSub(w).unsubscribeAll(pubsubTopic)
w.topicValidator.del(pubsubTopic) w.topicValidator.del(pubsubTopic)
@ -604,7 +604,7 @@ proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic) =
error "exception in unsubscribe", pubsubTopic, error = getCurrentExceptionMsg() error "exception in unsubscribe", pubsubTopic, error = getCurrentExceptionMsg()
return return
debug "unsubscribe", pubsubTopic info "unsubscribe", pubsubTopic
procCall GossipSub(w).unsubscribe(pubsubTopic, topicHandler) procCall GossipSub(w).unsubscribe(pubsubTopic, topicHandler)
procCall GossipSub(w).removeValidator(pubsubTopic, topicValidator) procCall GossipSub(w).removeValidator(pubsubTopic, topicValidator)

View File

@ -139,7 +139,7 @@ proc batchRequest*(
proc advertiseAll( proc advertiseAll(
self: WakuRendezVous self: WakuRendezVous
): Future[Result[void, string]] {.async: (raises: []).} = ): Future[Result[void, string]] {.async: (raises: []).} =
debug "waku rendezvous advertisements started" info "waku rendezvous advertisements started"
let shards = self.getShards() let shards = self.getShards()
@ -171,14 +171,14 @@ proc advertiseAll(
if fut.failed(): if fut.failed():
warn "a rendezvous advertisement failed", cause = fut.error.msg warn "a rendezvous advertisement failed", cause = fut.error.msg
debug "waku rendezvous advertisements finished" info "waku rendezvous advertisements finished"
return ok() return ok()
proc initialRequestAll*( proc initialRequestAll*(
self: WakuRendezVous self: WakuRendezVous
): Future[Result[void, string]] {.async: (raises: []).} = ): Future[Result[void, string]] {.async: (raises: []).} =
debug "waku rendezvous initial requests started" info "waku rendezvous initial requests started"
let shards = self.getShards() let shards = self.getShards()
@ -218,12 +218,12 @@ proc initialRequestAll*(
rendezvousPeerFoundTotal.inc() rendezvousPeerFoundTotal.inc()
self.peerManager.addPeer(record) self.peerManager.addPeer(record)
debug "waku rendezvous initial request finished" info "waku rendezvous initial request finished"
return ok() return ok()
proc periodicRegistration(self: WakuRendezVous) {.async.} = proc periodicRegistration(self: WakuRendezVous) {.async.} =
debug "waku rendezvous periodic registration started", info "waku rendezvous periodic registration started",
interval = self.registrationInterval interval = self.registrationInterval
# infinite loop # infinite loop
@ -231,7 +231,7 @@ proc periodicRegistration(self: WakuRendezVous) {.async.} =
await sleepAsync(self.registrationInterval) await sleepAsync(self.registrationInterval)
(await self.advertiseAll()).isOkOr: (await self.advertiseAll()).isOkOr:
debug "waku rendezvous advertisements failed", error = error info "waku rendezvous advertisements failed", error = error
if self.registrationInterval > MaxRegistrationInterval: if self.registrationInterval > MaxRegistrationInterval:
self.registrationInterval = MaxRegistrationInterval self.registrationInterval = MaxRegistrationInterval
@ -242,7 +242,7 @@ proc periodicRegistration(self: WakuRendezVous) {.async.} =
self.registrationInterval = DefaultRegistrationInterval self.registrationInterval = DefaultRegistrationInterval
proc periodicRequests(self: WakuRendezVous) {.async.} = proc periodicRequests(self: WakuRendezVous) {.async.} =
debug "waku rendezvous periodic requests started", interval = self.requestInterval info "waku rendezvous periodic requests started", interval = self.requestInterval
# infinite loop # infinite loop
while true: while true:
@ -288,7 +288,7 @@ proc new*(
wrv.registrationInterval = DefaultRegistrationInterval wrv.registrationInterval = DefaultRegistrationInterval
wrv.requestInterval = DefaultRequestsInterval wrv.requestInterval = DefaultRequestsInterval
debug "waku rendezvous initialized", info "waku rendezvous initialized",
clusterId = clusterId, shards = getShards(), capabilities = getCapabilities() clusterId = clusterId, shards = getShards(), capabilities = getCapabilities()
return ok(wrv) return ok(wrv)
@ -299,7 +299,7 @@ proc start*(self: WakuRendezVous) {.async: (raises: []).} =
self.periodicRequestFut = self.periodicRequests() self.periodicRequestFut = self.periodicRequests()
debug "waku rendezvous discovery started" info "waku rendezvous discovery started"
proc stopWait*(self: WakuRendezVous) {.async: (raises: []).} = proc stopWait*(self: WakuRendezVous) {.async: (raises: []).} =
if not self.periodicRegistrationFut.isNil(): if not self.periodicRegistrationFut.isNil():
@ -308,4 +308,4 @@ proc stopWait*(self: WakuRendezVous) {.async: (raises: []).} =
if not self.periodicRequestFut.isNil(): if not self.periodicRequestFut.isNil():
await self.periodicRequestFut.cancelAndWait() await self.periodicRequestFut.cancelAndWait()
debug "waku rendezvous discovery stopped" info "waku rendezvous discovery stopped"

View File

@ -233,7 +233,7 @@ method register*(
let leaf = rateCommitment.toLeaf().get() let leaf = rateCommitment.toLeaf().get()
if g.registerCb.isSome(): if g.registerCb.isSome():
let idx = g.latestIndex let idx = g.latestIndex
debug "registering member via callback", rateCommitment = leaf, index = idx info "registering member via callback", rateCommitment = leaf, index = idx
await g.registerCb.get()(@[Membership(rateCommitment: leaf, index: idx)]) await g.registerCb.get()(@[Membership(rateCommitment: leaf, index: idx)])
g.latestIndex.inc() g.latestIndex.inc()
except CatchableError: except CatchableError:
@ -253,10 +253,10 @@ method register*(
g.retryWrapper(gasPrice, "Failed to get gas price"): g.retryWrapper(gasPrice, "Failed to get gas price"):
int(await ethRpc.provider.eth_gasPrice()) * 2 int(await ethRpc.provider.eth_gasPrice()) * 2
let idCommitmentHex = identityCredential.idCommitment.inHex() let idCommitmentHex = identityCredential.idCommitment.inHex()
debug "identityCredential idCommitmentHex", idCommitment = idCommitmentHex info "identityCredential idCommitmentHex", idCommitment = idCommitmentHex
let idCommitment = identityCredential.idCommitment.toUInt256() let idCommitment = identityCredential.idCommitment.toUInt256()
let idCommitmentsToErase: seq[UInt256] = @[] let idCommitmentsToErase: seq[UInt256] = @[]
debug "registering the member", info "registering the member",
idCommitment = idCommitment, idCommitment = idCommitment,
userMessageLimit = userMessageLimit, userMessageLimit = userMessageLimit,
idCommitmentsToErase = idCommitmentsToErase idCommitmentsToErase = idCommitmentsToErase
@ -270,11 +270,11 @@ method register*(
var tsReceipt: ReceiptObject var tsReceipt: ReceiptObject
g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"): g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"):
await ethRpc.getMinedTransactionReceipt(txHash) await ethRpc.getMinedTransactionReceipt(txHash)
debug "registration transaction mined", txHash = txHash info "registration transaction mined", txHash = txHash
g.registrationTxHash = some(txHash) g.registrationTxHash = some(txHash)
# the receipt topic holds the hash of signature of the raised events # the receipt topic holds the hash of signature of the raised events
# TODO: make this robust. search within the event list for the event # TODO: make this robust. search within the event list for the event
debug "ts receipt", receipt = tsReceipt[] info "ts receipt", receipt = tsReceipt[]
if tsReceipt.status.isNone(): if tsReceipt.status.isNone():
raise newException(ValueError, "Transaction failed: status is None") raise newException(ValueError, "Transaction failed: status is None")
@ -285,7 +285,7 @@ method register*(
## Extract MembershipRegistered event from transaction logs (third event) ## Extract MembershipRegistered event from transaction logs (third event)
let thirdTopic = tsReceipt.logs[2].topics[0] let thirdTopic = tsReceipt.logs[2].topics[0]
debug "third topic", thirdTopic = thirdTopic info "third topic", thirdTopic = thirdTopic
if thirdTopic != if thirdTopic !=
cast[FixedBytes[32]](keccak.keccak256.digest( cast[FixedBytes[32]](keccak.keccak256.digest(
"MembershipRegistered(uint256,uint256,uint32)" "MembershipRegistered(uint256,uint256,uint32)"
@ -294,7 +294,7 @@ method register*(
## Parse MembershipRegistered event data: rateCommitment(256) || membershipRateLimit(256) || index(32) ## Parse MembershipRegistered event data: rateCommitment(256) || membershipRateLimit(256) || index(32)
let arguments = tsReceipt.logs[2].data let arguments = tsReceipt.logs[2].data
debug "tx log data", arguments = arguments info "tx log data", arguments = arguments
let let
## Extract membership index from transaction log data (big endian) ## Extract membership index from transaction log data (big endian)
membershipIndex = UInt256.fromBytesBE(arguments[64 .. 95]) membershipIndex = UInt256.fromBytesBE(arguments[64 .. 95])
@ -450,7 +450,7 @@ method generateProof*(
nullifier: nullifier, nullifier: nullifier,
) )
debug "Proof generated successfully", proof = output info "Proof generated successfully", proof = output
waku_rln_remaining_proofs_per_epoch.dec() waku_rln_remaining_proofs_per_epoch.dec()
waku_rln_total_generated_proofs.inc() waku_rln_total_generated_proofs.inc()
@ -486,7 +486,7 @@ method verifyProof*(
if not ffiOk: if not ffiOk:
return err("could not verify the proof") return err("could not verify the proof")
else: else:
debug "Proof verified successfully" info "Proof verified successfully"
return ok(validProof) return ok(validProof)
@ -583,13 +583,13 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
let idCommitmentBytes = keystoreCred.identityCredential.idCommitment let idCommitmentBytes = keystoreCred.identityCredential.idCommitment
let idCommitmentUInt256 = keystoreCred.identityCredential.idCommitment.toUInt256() let idCommitmentUInt256 = keystoreCred.identityCredential.idCommitment.toUInt256()
let idCommitmentHex = idCommitmentBytes.inHex() let idCommitmentHex = idCommitmentBytes.inHex()
debug "Keystore idCommitment in bytes", idCommitmentBytes = idCommitmentBytes info "Keystore idCommitment in bytes", idCommitmentBytes = idCommitmentBytes
debug "Keystore idCommitment in UInt256 ", idCommitmentUInt256 = idCommitmentUInt256 info "Keystore idCommitment in UInt256 ", idCommitmentUInt256 = idCommitmentUInt256
debug "Keystore idCommitment in hex ", idCommitmentHex = idCommitmentHex info "Keystore idCommitment in hex ", idCommitmentHex = idCommitmentHex
let idCommitment = keystoreCred.identityCredential.idCommitment let idCommitment = keystoreCred.identityCredential.idCommitment
let membershipExists = (await g.fetchMembershipStatus(idCommitment)).valueOr: let membershipExists = (await g.fetchMembershipStatus(idCommitment)).valueOr:
return err("the commitment does not have a membership: " & error) return err("the commitment does not have a membership: " & error)
debug "membershipExists", membershipExists = membershipExists info "membershipExists", membershipExists = membershipExists
g.idCredentials = some(keystoreCred.identityCredential) g.idCredentials = some(keystoreCred.identityCredential)

View File

@ -108,7 +108,7 @@ proc createRLNInstanceLocal(d = MerkleTreeDepth): RLNResult =
let res = new_circuit(merkleDepth, addr configBuffer, addr rlnInstance) let res = new_circuit(merkleDepth, addr configBuffer, addr rlnInstance)
# check whether the circuit parameters are generated successfully # check whether the circuit parameters are generated successfully
if (res == false): if (res == false):
debug "error in parameters generation" info "error in parameters generation"
return err("error in parameters generation") return err("error in parameters generation")
return ok(rlnInstance) return ok(rlnInstance)

View File

@ -193,7 +193,7 @@ proc validateMessage*(
let timeDiff = uint64(abs(currentTime - messageTime)) let timeDiff = uint64(abs(currentTime - messageTime))
debug "time info", info "time info",
currentTime = currentTime, messageTime = messageTime, msgHash = msg.hash currentTime = currentTime, messageTime = messageTime, msgHash = msg.hash
if timeDiff > rlnPeer.rlnMaxTimestampGap: if timeDiff > rlnPeer.rlnMaxTimestampGap:

View File

@ -123,7 +123,7 @@ proc initProtocolHandler(self: WakuStore) =
waku_store_time_seconds.set(queryDuration, ["query-db-time"]) waku_store_time_seconds.set(queryDuration, ["query-db-time"])
successfulQuery = true successfulQuery = true
do: do:
debug "store query request rejected due rate limit exceeded", info "store query request rejected due rate limit exceeded",
peerId = conn.peerId, limit = $self.requestRateLimiter.setting peerId = conn.peerId, limit = $self.requestRateLimiter.setting
resBuf = (rejectReposnseBuffer, "rejected") resBuf = (rejectReposnseBuffer, "rejected")
@ -139,7 +139,7 @@ proc initProtocolHandler(self: WakuStore) =
if successfulQuery: if successfulQuery:
let writeDuration = getTime().toUnixFloat() - writeRespStartTime let writeDuration = getTime().toUnixFloat() - writeRespStartTime
waku_store_time_seconds.set(writeDuration, ["send-store-resp-time"]) waku_store_time_seconds.set(writeDuration, ["send-store-resp-time"])
debug "after sending response", info "after sending response",
requestId = resBuf.requestId, requestId = resBuf.requestId,
queryDurationSecs = queryDuration, queryDurationSecs = queryDuration,
writeStreamDurationSecs = writeDuration writeStreamDurationSecs = writeDuration

View File

@ -201,7 +201,7 @@ when defined(waku_exp_store_resume):
lastSeenTime = w.store.getNewestMessageTimestamp().get(Timestamp(0)) lastSeenTime = w.store.getNewestMessageTimestamp().get(Timestamp(0))
now = getNanosecondTime(getTime().toUnixFloat()) now = getNanosecondTime(getTime().toUnixFloat())
debug "resuming with offline time window", info "resuming with offline time window",
lastSeenTime = lastSeenTime, currentTime = now lastSeenTime = lastSeenTime, currentTime = now
let let
@ -218,10 +218,10 @@ when defined(waku_exp_store_resume):
var res: WakuStoreResult[seq[WakuMessage]] var res: WakuStoreResult[seq[WakuMessage]]
if peerList.isSome(): if peerList.isSome():
debug "trying the candidate list to fetch the history" info "trying the candidate list to fetch the history"
res = await w.queryLoop(req, peerList.get()) res = await w.queryLoop(req, peerList.get())
else: else:
debug "no candidate list is provided, selecting a random peer" info "no candidate list is provided, selecting a random peer"
# if no peerList is set then query from one of the peers stored in the peer manager # if no peerList is set then query from one of the peers stored in the peer manager
let peerOpt = w.peerManager.selectPeer(WakuLegacyStoreCodec) let peerOpt = w.peerManager.selectPeer(WakuLegacyStoreCodec)
if peerOpt.isNone(): if peerOpt.isNone():
@ -229,11 +229,11 @@ when defined(waku_exp_store_resume):
waku_legacy_store_errors.inc(labelValues = [peerNotFoundFailure]) waku_legacy_store_errors.inc(labelValues = [peerNotFoundFailure])
return err("no suitable remote peers") return err("no suitable remote peers")
debug "a peer is selected from peer manager" info "a peer is selected from peer manager"
res = await w.queryAll(req, peerOpt.get()) res = await w.queryAll(req, peerOpt.get())
if res.isErr(): if res.isErr():
debug "failed to resume the history" info "failed to resume the history"
return err("failed to resume the history") return err("failed to resume the history")
# Save the retrieved messages in the store # Save the retrieved messages in the store

View File

@ -142,7 +142,7 @@ proc initProtocolHandler(ws: WakuStore) =
waku_legacy_store_time_seconds.set(queryDuration, ["query-db-time"]) waku_legacy_store_time_seconds.set(queryDuration, ["query-db-time"])
successfulQuery = true successfulQuery = true
do: do:
debug "Legacy store query request rejected due rate limit exceeded", info "Legacy store query request rejected due rate limit exceeded",
peerId = conn.peerId, limit = $ws.requestRateLimiter.setting peerId = conn.peerId, limit = $ws.requestRateLimiter.setting
resBuf = (rejectResponseBuf, "rejected") resBuf = (rejectResponseBuf, "rejected")
@ -157,7 +157,7 @@ proc initProtocolHandler(ws: WakuStore) =
if successfulQuery: if successfulQuery:
let writeDuration = getTime().toUnixFloat() - writeRespStartTime let writeDuration = getTime().toUnixFloat() - writeRespStartTime
waku_legacy_store_time_seconds.set(writeDuration, ["send-store-resp-time"]) waku_legacy_store_time_seconds.set(writeDuration, ["send-store-resp-time"])
debug "after sending response", info "after sending response",
requestId = resBuf.requestId, requestId = resBuf.requestId,
queryDurationSecs = queryDuration, queryDurationSecs = queryDuration,
writeStreamDurationSecs = writeDuration writeStreamDurationSecs = writeDuration

View File

@ -318,7 +318,7 @@ proc storeSynchronization*(
let conn: Connection = connOpt.valueOr: let conn: Connection = connOpt.valueOr:
return err("fail to dial remote " & $peer.peerId) return err("fail to dial remote " & $peer.peerId)
debug "sync session initialized", info "sync session initialized",
local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId
( (
@ -331,7 +331,7 @@ proc storeSynchronization*(
return err("sync request error: " & error) return err("sync request error: " & error)
debug "sync session ended gracefully", info "sync session ended gracefully",
local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId
return ok() return ok()
@ -354,7 +354,7 @@ proc initFillStorage(
direction: PagingDirection.FORWARD, direction: PagingDirection.FORWARD,
) )
debug "initial storage filling started" info "initial storage filling started"
var storage = SeqStorage.new(DefaultStorageCap) var storage = SeqStorage.new(DefaultStorageCap)
@ -376,7 +376,7 @@ proc initFillStorage(
query.cursor = response.cursor query.cursor = response.cursor
debug "initial storage filling done", elements = storage.length() info "initial storage filling done", elements = storage.length()
return ok(storage) return ok(storage)
@ -430,21 +430,21 @@ proc new*(
return ok(sync) return ok(sync)
proc periodicSync(self: SyncReconciliation) {.async.} = proc periodicSync(self: SyncReconciliation) {.async.} =
debug "periodic sync initialized", interval = $self.syncInterval info "periodic sync initialized", interval = $self.syncInterval
while true: # infinite loop while true: # infinite loop
await sleepAsync(self.syncInterval) await sleepAsync(self.syncInterval)
debug "periodic sync started" info "periodic sync started"
(await self.storeSynchronization()).isOkOr: (await self.storeSynchronization()).isOkOr:
error "periodic sync failed", err = error error "periodic sync failed", err = error
continue continue
debug "periodic sync done" info "periodic sync done"
proc periodicPrune(self: SyncReconciliation) {.async.} = proc periodicPrune(self: SyncReconciliation) {.async.} =
debug "periodic prune initialized", interval = $self.syncInterval info "periodic prune initialized", interval = $self.syncInterval
# preventing sync and prune loops of happening at the same time. # preventing sync and prune loops of happening at the same time.
await sleepAsync((self.syncInterval div 2)) await sleepAsync((self.syncInterval div 2))
@ -452,7 +452,7 @@ proc periodicPrune(self: SyncReconciliation) {.async.} =
while true: # infinite loop while true: # infinite loop
await sleepAsync(self.syncInterval) await sleepAsync(self.syncInterval)
debug "periodic prune started" info "periodic prune started"
let time = getNowInNanosecondTime() - self.syncRange.nanos let time = getNowInNanosecondTime() - self.syncRange.nanos
@ -460,7 +460,7 @@ proc periodicPrune(self: SyncReconciliation) {.async.} =
total_messages_cached.set(self.storage.length()) total_messages_cached.set(self.storage.length())
debug "periodic prune done", elements_pruned = count info "periodic prune done", elements_pruned = count
proc idsReceiverLoop(self: SyncReconciliation) {.async.} = proc idsReceiverLoop(self: SyncReconciliation) {.async.} =
while true: # infinite loop while true: # infinite loop

View File

@ -74,7 +74,7 @@ proc openConnection(
let conn: Connection = connOpt.valueOr: let conn: Connection = connOpt.valueOr:
return err("fail to dial remote " & $peerId) return err("fail to dial remote " & $peerId)
debug "transfer session initialized", info "transfer session initialized",
local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId
return ok(conn) return ok(conn)
@ -103,7 +103,7 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} =
## sanity check, should not be possible ## sanity check, should not be possible
self.outSessions[peerId].isClosedRemotely: self.outSessions[peerId].isClosedRemotely:
## quite possibly remote end has closed the connection, believing transfer to be done ## quite possibly remote end has closed the connection, believing transfer to be done
debug "opening transfer connection to remote peer", info "opening transfer connection to remote peer",
my_peer_id = self.peerManager.switch.peerInfo.peerId, remote_peer_id = peerId my_peer_id = self.peerManager.switch.peerInfo.peerId, remote_peer_id = peerId
let connection = (await self.openConnection(peerId)).valueOr: let connection = (await self.openConnection(peerId)).valueOr:
@ -188,7 +188,7 @@ proc initProtocolHandler(self: SyncTransfer) =
await conn.close() await conn.close()
debug "transfer session ended", info "transfer session ended",
local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId
return return