mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-02 14:03:06 +00:00
Move log level from debug to info (#3622)
* convert all debug logs to info log level * waku_relay protocol mv notice spammy logs to debug
This commit is contained in:
parent
7e3617cd48
commit
7e5041d5e1
@ -24,7 +24,7 @@ proc benchmark(
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
|
||||
debug "registration finished",
|
||||
info "registration finished",
|
||||
iter = i, elapsed_ms = (getTime() - start_time).inMilliseconds
|
||||
|
||||
discard await manager.updateRoots()
|
||||
@ -33,7 +33,7 @@ proc benchmark(
|
||||
quit(QuitFailure)
|
||||
|
||||
let epoch = default(Epoch)
|
||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
info "epoch in bytes", epochHex = epoch.inHex()
|
||||
let data: seq[byte] = newSeq[byte](1024)
|
||||
|
||||
var proofGenTimes: seq[times.Duration] = @[]
|
||||
@ -50,7 +50,7 @@ proc benchmark(
|
||||
let ok = manager.verifyProof(data, proof).valueOr:
|
||||
raiseAssert $error
|
||||
proofVerTimes.add(getTime() - verify_time)
|
||||
debug "iteration finished",
|
||||
info "iteration finished",
|
||||
iter = i, elapsed_ms = (getTime() - start_time).inMilliseconds
|
||||
|
||||
echo "Proof generation times: ", sum(proofGenTimes) div len(proofGenTimes)
|
||||
|
||||
@ -194,9 +194,9 @@ proc publish(c: Chat, line: string) =
|
||||
# for future version when we support more than one rln protected content topic,
|
||||
# we should check the message content topic as well
|
||||
if c.node.wakuRlnRelay.appendRLNProof(message, float64(time)).isErr():
|
||||
debug "could not append rate limit proof to the message"
|
||||
info "could not append rate limit proof to the message"
|
||||
else:
|
||||
debug "rate limit proof is appended to the message"
|
||||
info "rate limit proof is appended to the message"
|
||||
let proof = RateLimitProof.init(message.proof).valueOr:
|
||||
error "could not decode the RLN proof"
|
||||
return
|
||||
@ -406,7 +406,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
)
|
||||
elif conf.dnsDiscoveryUrl != "":
|
||||
# No pre-selected fleet. Discover nodes via DNS using user config
|
||||
debug "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl
|
||||
info "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl
|
||||
dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl)
|
||||
|
||||
var discoveredNodes: seq[RemotePeerInfo]
|
||||
@ -528,7 +528,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
info "WakuRLNRelay is enabled"
|
||||
|
||||
proc spamHandler(wakuMessage: WakuMessage) {.gcsafe, closure.} =
|
||||
debug "spam handler is called"
|
||||
info "spam handler is called"
|
||||
let chatLineResult = getChatLine(wakuMessage.payload)
|
||||
echo "spam message is found and discarded : " & chatLineResult
|
||||
chat.prompt = false
|
||||
|
||||
@ -197,7 +197,7 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
|
||||
|
||||
cmb.running = true
|
||||
|
||||
debug "Start polling Matterbridge"
|
||||
info "Start polling Matterbridge"
|
||||
|
||||
# Start Matterbridge polling (@TODO: use streaming interface)
|
||||
proc mbHandler(jsonNode: JsonNode) {.async.} =
|
||||
@ -207,7 +207,7 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
|
||||
asyncSpawn cmb.pollMatterbridge(mbHandler)
|
||||
|
||||
# Start Waku v2 node
|
||||
debug "Start listening on Waku v2"
|
||||
info "Start listening on Waku v2"
|
||||
await cmb.nodev2.start()
|
||||
|
||||
# Always mount relay for bridge
|
||||
|
||||
@ -501,7 +501,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
)
|
||||
elif conf.dnsDiscoveryUrl != "":
|
||||
# No pre-selected fleet. Discover nodes via DNS using user config
|
||||
debug "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl
|
||||
info "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl
|
||||
dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl)
|
||||
|
||||
var discoveredNodes: seq[RemotePeerInfo]
|
||||
|
||||
@ -130,7 +130,7 @@ when isMainModule:
|
||||
error "Starting waku failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
debug "Setting up shutdown hooks"
|
||||
info "Setting up shutdown hooks"
|
||||
|
||||
proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} =
|
||||
await waku.stop()
|
||||
|
||||
@ -73,7 +73,7 @@ proc selectRandomCapablePeer*(
|
||||
let rndPeerIndex = rand(0 .. supportivePeers.len - 1)
|
||||
let randomPeer = supportivePeers[rndPeerIndex]
|
||||
|
||||
debug "Dialing random peer",
|
||||
info "Dialing random peer",
|
||||
idx = $rndPeerIndex, peer = constructMultiaddrStr(randomPeer)
|
||||
|
||||
supportivePeers.delete(rndPeerIndex .. rndPeerIndex)
|
||||
@ -82,12 +82,12 @@ proc selectRandomCapablePeer*(
|
||||
if (await connOpt.withTimeout(10.seconds)):
|
||||
if connOpt.value().isSome():
|
||||
found = some(randomPeer)
|
||||
debug "Dialing successful",
|
||||
info "Dialing successful",
|
||||
peer = constructMultiaddrStr(randomPeer), codec = codec
|
||||
else:
|
||||
debug "Dialing failed", peer = constructMultiaddrStr(randomPeer), codec = codec
|
||||
info "Dialing failed", peer = constructMultiaddrStr(randomPeer), codec = codec
|
||||
else:
|
||||
debug "Timeout dialing service peer",
|
||||
info "Timeout dialing service peer",
|
||||
peer = constructMultiaddrStr(randomPeer), codec = codec
|
||||
|
||||
return found
|
||||
@ -105,8 +105,8 @@ proc tryCallAllPxPeers*(
|
||||
var supportivePeers = pm.switch.peerStore.getPeersByCapability(capability)
|
||||
|
||||
lpt_px_peers.set(supportivePeers.len)
|
||||
debug "Found supportive peers count", count = supportivePeers.len()
|
||||
debug "Found supportive peers", supportivePeers = $supportivePeers
|
||||
info "Found supportive peers count", count = supportivePeers.len()
|
||||
info "Found supportive peers", supportivePeers = $supportivePeers
|
||||
if supportivePeers.len == 0:
|
||||
return none(seq[RemotePeerInfo])
|
||||
|
||||
@ -116,7 +116,7 @@ proc tryCallAllPxPeers*(
|
||||
let rndPeerIndex = rand(0 .. supportivePeers.len - 1)
|
||||
let randomPeer = supportivePeers[rndPeerIndex]
|
||||
|
||||
debug "Dialing random peer",
|
||||
info "Dialing random peer",
|
||||
idx = $rndPeerIndex, peer = constructMultiaddrStr(randomPeer)
|
||||
|
||||
supportivePeers.delete(rndPeerIndex, rndPeerIndex)
|
||||
|
||||
@ -213,7 +213,7 @@ proc setConnectedPeersMetrics(
|
||||
continue
|
||||
var customPeerInfo = allPeers[peerIdStr]
|
||||
|
||||
debug "connected to peer", peer = customPeerInfo[]
|
||||
info "connected to peer", peer = customPeerInfo[]
|
||||
|
||||
# after connection, get supported protocols
|
||||
let lp2pPeerStore = node.switch.peerStore
|
||||
@ -358,7 +358,7 @@ proc retrieveDynamicBootstrapNodes(
|
||||
|
||||
if dnsDiscoveryUrl != "":
|
||||
# DNS discovery
|
||||
debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl
|
||||
info "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl
|
||||
|
||||
var nameServers: seq[TransportAddress]
|
||||
for ip in dnsAddrsNameServers:
|
||||
@ -376,7 +376,7 @@ proc retrieveDynamicBootstrapNodes(
|
||||
return (await value.findPeers()).mapErr(e => $e)
|
||||
warn "Failed to init Waku DNS discovery"
|
||||
|
||||
debug "No method for retrieving dynamic bootstrap nodes specified."
|
||||
info "No method for retrieving dynamic bootstrap nodes specified."
|
||||
ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default
|
||||
|
||||
proc getBootstrapFromDiscDns(
|
||||
|
||||
@ -123,7 +123,7 @@ proc areProtocolsSupported(
|
||||
|
||||
for rawProtocol in toValidateProtocols:
|
||||
let protocolTag = ProtocolsTable[rawProtocol]
|
||||
debug "Checking if protocol is supported", expected_protocol_tag = protocolTag
|
||||
info "Checking if protocol is supported", expected_protocol_tag = protocolTag
|
||||
|
||||
var protocolSupported = false
|
||||
for nodeProtocol in nodeProtocols:
|
||||
|
||||
@ -60,7 +60,7 @@ when isMainModule:
|
||||
error "Starting waku failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
debug "Setting up shutdown hooks"
|
||||
info "Setting up shutdown hooks"
|
||||
proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} =
|
||||
await waku.stop()
|
||||
quit(QuitSuccess)
|
||||
|
||||
@ -46,7 +46,7 @@ proc setup*(): Waku =
|
||||
if conf.relay:
|
||||
conf.rlnRelay = twnNetworkConf.rlnRelay
|
||||
|
||||
debug "Starting node"
|
||||
info "Starting node"
|
||||
var waku = (waitFor Waku.new(conf)).valueOr:
|
||||
error "Waku initialization failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
@ -52,7 +52,7 @@ proc sendThruWaku*(
|
||||
(await self.waku.node.publish(some(DefaultPubsubTopic), message)).isOkOr:
|
||||
return err("failed to publish message: " & $error)
|
||||
|
||||
debug "rate limit proof is appended to the message"
|
||||
info "rate limit proof is appended to the message"
|
||||
|
||||
return ok()
|
||||
|
||||
|
||||
@ -129,7 +129,7 @@ proc watchdogThreadBody(ctx: ptr WakuContext) {.thread.} =
|
||||
await sleepAsync(WatchdogTimeinterval)
|
||||
|
||||
if ctx.running.load == false:
|
||||
debug "Watchdog thread exiting because WakuContext is not running"
|
||||
info "Watchdog thread exiting because WakuContext is not running"
|
||||
break
|
||||
|
||||
let wakuCallback = proc(
|
||||
|
||||
@ -595,13 +595,13 @@ suite "Waku Filter - End to End":
|
||||
await peers[index].mountFilterClient()
|
||||
|
||||
## connect switches
|
||||
debug "establish connection", peerId = peers[index].peerInfo.peerId
|
||||
info "establish connection", peerId = peers[index].peerInfo.peerId
|
||||
|
||||
await server.switch.connect(
|
||||
peers[index].switch.peerInfo.peerId, peers[index].switch.peerInfo.listenAddrs
|
||||
)
|
||||
|
||||
debug "adding subscription"
|
||||
info "adding subscription"
|
||||
|
||||
(
|
||||
await wakuFilter.subscriptions.addSubscription(
|
||||
|
||||
@ -160,7 +160,7 @@ suite "RLN Proofs as a Lightpush Service":
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
|
||||
let rootUpdated1 = waitFor manager1.updateRoots()
|
||||
debug "Updated root for node1", rootUpdated1
|
||||
info "Updated root for node1", rootUpdated1
|
||||
|
||||
if rootUpdated1:
|
||||
let proofResult = waitFor manager1.fetchMerkleProofElements()
|
||||
|
||||
@ -160,7 +160,7 @@ suite "RLN Proofs as a Lightpush Service":
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
|
||||
let rootUpdated1 = waitFor manager1.updateRoots()
|
||||
debug "Updated root for node1", rootUpdated1
|
||||
info "Updated root for node1", rootUpdated1
|
||||
|
||||
if rootUpdated1:
|
||||
let proofResult = waitFor manager1.fetchMerkleProofElements()
|
||||
|
||||
@ -22,7 +22,7 @@ suite "Waku Keepalive":
|
||||
var completionFut = newFuture[bool]()
|
||||
|
||||
proc pingHandler(peerId: PeerID) {.async, gcsafe.} =
|
||||
debug "Ping received"
|
||||
info "Ping received"
|
||||
|
||||
check:
|
||||
peerId == node1.switch.peerInfo.peerId
|
||||
|
||||
@ -63,7 +63,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let putRes = await driver.put(
|
||||
@ -99,7 +99,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -150,7 +150,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -189,7 +189,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -230,7 +230,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -286,7 +286,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -370,7 +370,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -427,7 +427,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -482,7 +482,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -524,7 +524,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -566,7 +566,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -606,7 +606,7 @@ suite "Postgres driver - queries":
|
||||
]
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -655,7 +655,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -700,7 +700,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -788,7 +788,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -875,7 +875,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -919,7 +919,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
let hashes = messages.mapIt(computeMessageHash(DefaultPubsubTopic, it))
|
||||
|
||||
@ -956,7 +956,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -996,7 +996,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1081,7 +1081,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1124,7 +1124,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1167,7 +1167,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1212,7 +1212,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1258,7 +1258,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1307,7 +1307,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1388,7 +1388,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1469,7 +1469,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1551,7 +1551,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1634,7 +1634,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1680,7 +1680,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1727,7 +1727,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1764,7 +1764,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
|
||||
@ -43,7 +43,7 @@ suite "Queue driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -85,7 +85,7 @@ suite "Queue driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -129,7 +129,7 @@ suite "Queue driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -175,7 +175,7 @@ suite "Queue driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -218,7 +218,7 @@ suite "Queue driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -313,7 +313,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -377,7 +377,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -439,7 +439,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -489,7 +489,7 @@ suite "Queue driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -536,7 +536,7 @@ suite "Queue driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -581,7 +581,7 @@ suite "Queue driver - query by cursor":
|
||||
]
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -633,7 +633,7 @@ suite "Queue driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -683,7 +683,7 @@ suite "Queue driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -776,7 +776,7 @@ suite "Queue driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -870,7 +870,7 @@ suite "Queue driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -922,7 +922,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -967,7 +967,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -1057,7 +1057,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1107,7 +1107,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -1155,7 +1155,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = await driver.put(
|
||||
@ -1205,7 +1205,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -1256,7 +1256,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = await driver.put(
|
||||
@ -1310,7 +1310,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = await driver.put(
|
||||
@ -1396,7 +1396,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1484,7 +1484,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1573,7 +1573,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1663,7 +1663,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
|
||||
@ -36,7 +36,7 @@ suite "SQLite driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -79,7 +79,7 @@ suite "SQLite driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -136,7 +136,7 @@ suite "SQLite driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -181,7 +181,7 @@ suite "SQLite driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -228,7 +228,7 @@ suite "SQLite driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -272,7 +272,7 @@ suite "SQLite driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -369,7 +369,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -432,7 +432,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -493,7 +493,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -542,7 +542,7 @@ suite "SQLite driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -590,7 +590,7 @@ suite "SQLite driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -636,7 +636,7 @@ suite "SQLite driver - query by cursor":
|
||||
]
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -689,7 +689,7 @@ suite "SQLite driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -740,7 +740,7 @@ suite "SQLite driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -834,7 +834,7 @@ suite "SQLite driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -927,7 +927,7 @@ suite "SQLite driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -978,7 +978,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1024,7 +1024,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1115,7 +1115,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1164,7 +1164,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1213,7 +1213,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1264,7 +1264,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1316,7 +1316,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1371,7 +1371,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1458,7 +1458,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1545,7 +1545,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1633,7 +1633,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1722,7 +1722,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
|
||||
@ -90,7 +90,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -131,7 +131,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -186,7 +186,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -229,7 +229,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -274,7 +274,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -334,7 +334,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -426,7 +426,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -487,7 +487,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -546,7 +546,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -592,7 +592,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -638,7 +638,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -682,7 +682,7 @@ suite "Postgres driver - queries":
|
||||
]
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -736,7 +736,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -785,7 +785,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -877,7 +877,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -968,7 +968,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1016,7 +1016,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
let hashes = messages.mapIt(computeMessageHash(DefaultPubsubTopic, it))
|
||||
|
||||
@ -1057,7 +1057,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1101,7 +1101,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1190,7 +1190,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1237,7 +1237,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1284,7 +1284,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1333,7 +1333,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1383,7 +1383,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1436,7 +1436,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1521,7 +1521,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1606,7 +1606,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1692,7 +1692,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1779,7 +1779,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1830,7 +1830,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1882,7 +1882,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1924,7 +1924,7 @@ suite "Postgres driver - queries":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
|
||||
@ -49,7 +49,7 @@ suite "Queue driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -95,7 +95,7 @@ suite "Queue driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -143,7 +143,7 @@ suite "Queue driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -193,7 +193,7 @@ suite "Queue driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -240,7 +240,7 @@ suite "Queue driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -343,7 +343,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -409,7 +409,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -473,7 +473,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -525,7 +525,7 @@ suite "Queue driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -576,7 +576,7 @@ suite "Queue driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -625,7 +625,7 @@ suite "Queue driver - query by cursor":
|
||||
]
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -682,7 +682,7 @@ suite "Queue driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -736,7 +736,7 @@ suite "Queue driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -833,7 +833,7 @@ suite "Queue driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -929,7 +929,7 @@ suite "Queue driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -983,7 +983,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -1032,7 +1032,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -1126,7 +1126,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1178,7 +1178,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -1230,7 +1230,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = await driver.put(
|
||||
@ -1284,7 +1284,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
@ -1339,7 +1339,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = await driver.put(
|
||||
@ -1397,7 +1397,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
let retFut = await driver.put(
|
||||
@ -1487,7 +1487,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1577,7 +1577,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1668,7 +1668,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1760,7 +1760,7 @@ suite "Queue driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
|
||||
@ -38,7 +38,7 @@ suite "SQLite driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -85,7 +85,7 @@ suite "SQLite driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -146,7 +146,7 @@ suite "SQLite driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -195,7 +195,7 @@ suite "SQLite driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -246,7 +246,7 @@ suite "SQLite driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -294,7 +294,7 @@ suite "SQLite driver - query by content topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -399,7 +399,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -466,7 +466,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -531,7 +531,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -584,7 +584,7 @@ suite "SQLite driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -636,7 +636,7 @@ suite "SQLite driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -686,7 +686,7 @@ suite "SQLite driver - query by cursor":
|
||||
]
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -744,7 +744,7 @@ suite "SQLite driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -799,7 +799,7 @@ suite "SQLite driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -897,7 +897,7 @@ suite "SQLite driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -994,7 +994,7 @@ suite "SQLite driver - query by cursor":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1049,7 +1049,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1099,7 +1099,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1194,7 +1194,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1247,7 +1247,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1300,7 +1300,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1355,7 +1355,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1411,7 +1411,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1470,7 +1470,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
info "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
@ -1561,7 +1561,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1652,7 +1652,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1744,7 +1744,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
@ -1837,7 +1837,7 @@ suite "SQLite driver - query by time range":
|
||||
var messages = expected
|
||||
|
||||
shuffle(messages)
|
||||
debug "randomized message insertion sequence",
|
||||
info "randomized message insertion sequence",
|
||||
sequence = messages.mapIt(it[1].payload)
|
||||
|
||||
for row in messages:
|
||||
|
||||
@ -160,7 +160,7 @@ suite "Onchain group manager":
|
||||
|
||||
try:
|
||||
for i in 0 ..< credentials.len():
|
||||
debug "Registering credential", index = i, credential = credentials[i]
|
||||
info "Registering credential", index = i, credential = credentials[i]
|
||||
waitFor manager.register(credentials[i], UserMessageLimit(20))
|
||||
discard waitFor manager.updateRoots()
|
||||
except Exception, CatchableError:
|
||||
@ -282,7 +282,7 @@ suite "Onchain group manager":
|
||||
let messageBytes = "Hello".toBytes()
|
||||
|
||||
let epoch = default(Epoch)
|
||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
info "epoch in bytes", epochHex = epoch.inHex()
|
||||
|
||||
let validProofRes = manager.generateProof(
|
||||
data = messageBytes, epoch = epoch, messageId = MessageId(1)
|
||||
@ -315,7 +315,7 @@ suite "Onchain group manager":
|
||||
let messageBytes = "Hello".toBytes()
|
||||
|
||||
let epoch = default(Epoch)
|
||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
info "epoch in bytes", epochHex = epoch.inHex()
|
||||
|
||||
let validProofRes = manager.generateProof(
|
||||
data = messageBytes, epoch = epoch, messageId = MessageId(1)
|
||||
@ -365,7 +365,7 @@ suite "Onchain group manager":
|
||||
|
||||
# prepare the epoch
|
||||
let epoch = default(Epoch)
|
||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
info "epoch in bytes", epochHex = epoch.inHex()
|
||||
|
||||
# generate proof
|
||||
let validProof = manager.generateProof(
|
||||
@ -400,7 +400,7 @@ suite "Onchain group manager":
|
||||
manager.merkleProofCache[i] = byte(rand(255))
|
||||
|
||||
let epoch = default(Epoch)
|
||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
info "epoch in bytes", epochHex = epoch.inHex()
|
||||
|
||||
# generate proof
|
||||
let invalidProofRes = manager.generateProof(
|
||||
|
||||
@ -53,7 +53,7 @@ suite "Waku rln relay":
|
||||
check:
|
||||
# the id trapdoor, nullifier, secert hash and commitment together are 4*32 bytes
|
||||
generatedKeys.len == 4 * 32
|
||||
debug "generated keys: ", generatedKeys
|
||||
info "generated keys: ", generatedKeys
|
||||
|
||||
test "membership Key Generation":
|
||||
# create an RLN instance
|
||||
@ -77,7 +77,7 @@ suite "Waku rln relay":
|
||||
idCredential.idSecretHash != empty
|
||||
idCredential.idCommitment != empty
|
||||
|
||||
debug "the generated identity credential: ", idCredential
|
||||
info "the generated identity credential: ", idCredential
|
||||
|
||||
test "setMetadata rln utils":
|
||||
# create an RLN instance which also includes an empty Merkle tree
|
||||
@ -162,7 +162,7 @@ suite "Waku rln relay":
|
||||
hashOutput = cast[ptr array[32, byte]](outputBuffer.`ptr`)[]
|
||||
hashOutputHex = hashOutput.toHex()
|
||||
|
||||
debug "hash output", hashOutputHex
|
||||
info "hash output", hashOutputHex
|
||||
|
||||
test "sha256 hash utils":
|
||||
# create an RLN instance
|
||||
@ -255,7 +255,7 @@ suite "Waku rln relay":
|
||||
decodedEpoch = epochBytes.fromEpoch()
|
||||
check:
|
||||
epoch == decodedEpoch
|
||||
debug "encoded and decode time",
|
||||
info "encoded and decode time",
|
||||
epoch = epoch, epochBytes = epochBytes, decodedEpoch = decodedEpoch
|
||||
|
||||
test "Epoch comparison, epoch1 > epoch2":
|
||||
@ -547,7 +547,7 @@ suite "Waku rln relay":
|
||||
idCredential.idSecretHash != empty
|
||||
idCredential.idCommitment != empty
|
||||
|
||||
debug "the generated identity credential: ", idCredential
|
||||
info "the generated identity credential: ", idCredential
|
||||
|
||||
let index = MembershipIndex(1)
|
||||
|
||||
|
||||
@ -72,7 +72,7 @@ procSuite "WakuNode - RLN relay":
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
|
||||
let rootUpdated1 = waitFor manager1.updateRoots()
|
||||
debug "Updated root for node1", rootUpdated1
|
||||
info "Updated root for node1", rootUpdated1
|
||||
|
||||
# node 2
|
||||
(await node2.mountRelay()).isOkOr:
|
||||
@ -85,7 +85,7 @@ procSuite "WakuNode - RLN relay":
|
||||
|
||||
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
|
||||
let rootUpdated2 = waitFor manager2.updateRoots()
|
||||
debug "Updated root for node2", rootUpdated2
|
||||
info "Updated root for node2", rootUpdated2
|
||||
|
||||
# node 3
|
||||
(await node3.mountRelay()).isOkOr:
|
||||
@ -98,7 +98,7 @@ procSuite "WakuNode - RLN relay":
|
||||
|
||||
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
|
||||
let rootUpdated3 = waitFor manager3.updateRoots()
|
||||
debug "Updated root for node3", rootUpdated3
|
||||
info "Updated root for node3", rootUpdated3
|
||||
|
||||
# connect them together
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
@ -108,7 +108,7 @@ procSuite "WakuNode - RLN relay":
|
||||
proc relayHandler(
|
||||
topic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
debug "The received topic:", topic
|
||||
info "The received topic:", topic
|
||||
if topic == DefaultPubsubTopic:
|
||||
completionFut.complete(true)
|
||||
|
||||
@ -139,7 +139,7 @@ procSuite "WakuNode - RLN relay":
|
||||
.isOk()
|
||||
)
|
||||
|
||||
debug " Nodes participating in the test",
|
||||
info " Nodes participating in the test",
|
||||
node1 = shortLog(node1.switch.peerInfo.peerId),
|
||||
node2 = shortLog(node2.switch.peerInfo.peerId),
|
||||
node3 = shortLog(node3.switch.peerInfo.peerId)
|
||||
@ -189,7 +189,7 @@ procSuite "WakuNode - RLN relay":
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
|
||||
let rootUpdated = waitFor manager.updateRoots()
|
||||
debug "Updated root for node", node = index + 1, rootUpdated = rootUpdated
|
||||
info "Updated root for node", node = index + 1, rootUpdated = rootUpdated
|
||||
|
||||
# connect them together
|
||||
await nodes[0].connectToNodes(@[nodes[1].switch.peerInfo.toRemotePeerInfo()])
|
||||
@ -308,7 +308,7 @@ procSuite "WakuNode - RLN relay":
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
|
||||
let rootUpdated1 = waitFor manager1.updateRoots()
|
||||
debug "Updated root for node1", rootUpdated1
|
||||
info "Updated root for node1", rootUpdated1
|
||||
|
||||
# node 2
|
||||
(await node2.mountRelay()).isOkOr:
|
||||
@ -321,7 +321,7 @@ procSuite "WakuNode - RLN relay":
|
||||
|
||||
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
|
||||
let rootUpdated2 = waitFor manager2.updateRoots()
|
||||
debug "Updated root for node2", rootUpdated2
|
||||
info "Updated root for node2", rootUpdated2
|
||||
|
||||
# node 3
|
||||
(await node3.mountRelay()).isOkOr:
|
||||
@ -334,7 +334,7 @@ procSuite "WakuNode - RLN relay":
|
||||
|
||||
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
|
||||
let rootUpdated3 = waitFor manager3.updateRoots()
|
||||
debug "Updated root for node3", rootUpdated3
|
||||
info "Updated root for node3", rootUpdated3
|
||||
|
||||
# connect them together
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
@ -345,7 +345,7 @@ procSuite "WakuNode - RLN relay":
|
||||
proc relayHandler(
|
||||
topic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
debug "The received topic:", topic
|
||||
info "The received topic:", topic
|
||||
if topic == DefaultPubsubTopic:
|
||||
completionFut.complete(true)
|
||||
|
||||
@ -425,7 +425,7 @@ procSuite "WakuNode - RLN relay":
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
|
||||
let rootUpdated1 = waitFor manager1.updateRoots()
|
||||
debug "Updated root for node1", rootUpdated1
|
||||
info "Updated root for node1", rootUpdated1
|
||||
|
||||
# node 2
|
||||
(await node2.mountRelay()).isOkOr:
|
||||
@ -440,7 +440,7 @@ procSuite "WakuNode - RLN relay":
|
||||
# Registration is mandatory before sending messages with rln-relay
|
||||
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
|
||||
let rootUpdated2 = waitFor manager2.updateRoots()
|
||||
debug "Updated root for node2", rootUpdated2
|
||||
info "Updated root for node2", rootUpdated2
|
||||
|
||||
# node 3
|
||||
(await node3.mountRelay()).isOkOr:
|
||||
@ -455,7 +455,7 @@ procSuite "WakuNode - RLN relay":
|
||||
# Registration is mandatory before sending messages with rln-relay
|
||||
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
|
||||
let rootUpdated3 = waitFor manager3.updateRoots()
|
||||
debug "Updated root for node3", rootUpdated3
|
||||
info "Updated root for node3", rootUpdated3
|
||||
|
||||
# connect the nodes together node1 <-> node2 <-> node3
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
@ -510,7 +510,7 @@ procSuite "WakuNode - RLN relay":
|
||||
proc relayHandler(
|
||||
topic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
debug "The received topic:", topic
|
||||
info "The received topic:", topic
|
||||
if topic == DefaultPubsubTopic:
|
||||
if msg == wm1:
|
||||
completionFut1.complete(true)
|
||||
@ -592,7 +592,7 @@ procSuite "WakuNode - RLN relay":
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
|
||||
let rootUpdated1 = waitFor manager1.updateRoots()
|
||||
debug "Updated root for node1", rootUpdated1
|
||||
info "Updated root for node1", rootUpdated1
|
||||
|
||||
# Mount rlnrelay in node2 in off-chain mode
|
||||
(await node2.mountRelay()).isOkOr:
|
||||
@ -604,7 +604,7 @@ procSuite "WakuNode - RLN relay":
|
||||
# Registration is mandatory before sending messages with rln-relay
|
||||
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
|
||||
let rootUpdated2 = waitFor manager2.updateRoots()
|
||||
debug "Updated root for node2", rootUpdated2
|
||||
info "Updated root for node2", rootUpdated2
|
||||
|
||||
# Given the two nodes are started and connected
|
||||
waitFor allFutures(node1.start(), node2.start())
|
||||
@ -636,7 +636,7 @@ procSuite "WakuNode - RLN relay":
|
||||
proc relayHandler(
|
||||
topic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
debug "The received topic:", topic
|
||||
info "The received topic:", topic
|
||||
if topic == DefaultPubsubTopic:
|
||||
if msg == wm1:
|
||||
completionFut1.complete(true)
|
||||
|
||||
@ -238,7 +238,7 @@ proc deployTestToken*(
|
||||
return err(
|
||||
"Failed to get TestToken contract address from deploy script output: " & $error
|
||||
)
|
||||
debug "Address of the TestToken contract", testTokenAddress
|
||||
info "Address of the TestToken contract", testTokenAddress
|
||||
|
||||
let testTokenAddressBytes = hexToByteArray[20](testTokenAddress)
|
||||
let testTokenAddressAddress = Address(testTokenAddressBytes)
|
||||
@ -334,7 +334,7 @@ proc executeForgeContractDeployScripts*(
|
||||
return err("Submodule path does not exist: " & submodulePath)
|
||||
|
||||
let forgePath = getForgePath()
|
||||
debug "Forge path", forgePath
|
||||
info "Forge path", forgePath
|
||||
|
||||
# Verify forge executable exists
|
||||
if not fileExists(forgePath):
|
||||
@ -363,7 +363,7 @@ proc executeForgeContractDeployScripts*(
|
||||
if priceCalculatorAddressRes.isErr():
|
||||
error "Failed to get LinearPriceCalculator contract address from deploy script output"
|
||||
let priceCalculatorAddress = priceCalculatorAddressRes.get()
|
||||
debug "Address of the LinearPriceCalculator contract", priceCalculatorAddress
|
||||
info "Address of the LinearPriceCalculator contract", priceCalculatorAddress
|
||||
putEnv("PRICE_CALCULATOR_ADDRESS", priceCalculatorAddress)
|
||||
|
||||
let forgeCmdWakuRln =
|
||||
@ -382,7 +382,7 @@ proc executeForgeContractDeployScripts*(
|
||||
error "Failed to get WakuRlnV2 contract address from deploy script output"
|
||||
##TODO: raise exception here?
|
||||
let wakuRlnV2Address = wakuRlnV2AddressRes.get()
|
||||
debug "Address of the WakuRlnV2 contract", wakuRlnV2Address
|
||||
info "Address of the WakuRlnV2 contract", wakuRlnV2Address
|
||||
putEnv("WAKURLNV2_ADDRESS", wakuRlnV2Address)
|
||||
|
||||
# Deploy Proxy contract
|
||||
@ -490,7 +490,7 @@ proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process =
|
||||
# See anvil documentation https://book.getfoundry.sh/reference/anvil/ for more details
|
||||
try:
|
||||
let anvilPath = getAnvilPath()
|
||||
debug "Anvil path", anvilPath
|
||||
info "Anvil path", anvilPath
|
||||
let runAnvil = startProcess(
|
||||
anvilPath,
|
||||
args = [
|
||||
@ -518,7 +518,7 @@ proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process =
|
||||
break
|
||||
except Exception, CatchableError:
|
||||
break
|
||||
debug "Anvil daemon is running and ready", pid = anvilPID, startLog = anvilStartLog
|
||||
info "Anvil daemon is running and ready", pid = anvilPID, startLog = anvilStartLog
|
||||
return runAnvil
|
||||
except: # TODO: Fix "BareExcept" warning
|
||||
error "Anvil daemon run failed", err = getCurrentExceptionMsg()
|
||||
@ -526,11 +526,11 @@ proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process =
|
||||
# Stops Anvil daemon
|
||||
proc stopAnvil*(runAnvil: Process) {.used.} =
|
||||
if runAnvil.isNil:
|
||||
debug "stopAnvil called with nil Process"
|
||||
info "stopAnvil called with nil Process"
|
||||
return
|
||||
|
||||
let anvilPID = runAnvil.processID
|
||||
debug "Stopping Anvil daemon", anvilPID = anvilPID
|
||||
info "Stopping Anvil daemon", anvilPID = anvilPID
|
||||
|
||||
try:
|
||||
# Send termination signals
|
||||
@ -542,9 +542,9 @@ proc stopAnvil*(runAnvil: Process) {.used.} =
|
||||
|
||||
# Close Process object to release resources
|
||||
close(runAnvil)
|
||||
debug "Anvil daemon stopped", anvilPID = anvilPID
|
||||
info "Anvil daemon stopped", anvilPID = anvilPID
|
||||
except Exception as e:
|
||||
debug "Error stopping Anvil daemon", anvilPID = anvilPID, error = e.msg
|
||||
info "Error stopping Anvil daemon", anvilPID = anvilPID, error = e.msg
|
||||
|
||||
proc setupOnchainGroupManager*(
|
||||
ethClientUrl: string = EthClient, amountEth: UInt256 = 10.u256
|
||||
|
||||
@ -274,7 +274,7 @@ suite "Waku v2 Rest API - Relay":
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
|
||||
let rootUpdated = waitFor manager.updateRoots()
|
||||
debug "Updated root for node", rootUpdated
|
||||
info "Updated root for node", rootUpdated
|
||||
|
||||
let proofRes = waitFor manager.fetchMerkleProofElements()
|
||||
if proofRes.isErr():
|
||||
@ -525,7 +525,7 @@ suite "Waku v2 Rest API - Relay":
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
|
||||
let rootUpdated = waitFor manager.updateRoots()
|
||||
debug "Updated root for node", rootUpdated
|
||||
info "Updated root for node", rootUpdated
|
||||
|
||||
let proofRes = waitFor manager.fetchMerkleProofElements()
|
||||
if proofRes.isErr():
|
||||
@ -597,7 +597,7 @@ suite "Waku v2 Rest API - Relay":
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
|
||||
let rootUpdated = waitFor manager.updateRoots()
|
||||
debug "Updated root for node", rootUpdated
|
||||
info "Updated root for node", rootUpdated
|
||||
|
||||
let proofRes = waitFor manager.fetchMerkleProofElements()
|
||||
if proofRes.isErr():
|
||||
@ -659,7 +659,7 @@ suite "Waku v2 Rest API - Relay":
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
|
||||
let rootUpdated = waitFor manager.updateRoots()
|
||||
debug "Updated root for node", rootUpdated
|
||||
info "Updated root for node", rootUpdated
|
||||
|
||||
let proofRes = waitFor manager.fetchMerkleProofElements()
|
||||
if proofRes.isErr():
|
||||
@ -734,7 +734,7 @@ suite "Waku v2 Rest API - Relay":
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
|
||||
let rootUpdated = waitFor manager.updateRoots()
|
||||
debug "Updated root for node", rootUpdated
|
||||
info "Updated root for node", rootUpdated
|
||||
|
||||
let proofRes = waitFor manager.fetchMerkleProofElements()
|
||||
if proofRes.isErr():
|
||||
|
||||
@ -45,7 +45,7 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) =
|
||||
quit(1)
|
||||
|
||||
let credential = credentialRes.get()
|
||||
debug "credentials",
|
||||
info "credentials",
|
||||
idTrapdoor = credential.idTrapdoor.inHex(),
|
||||
idNullifier = credential.idNullifier.inHex(),
|
||||
idSecretHash = credential.idSecretHash.inHex(),
|
||||
@ -90,7 +90,7 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) =
|
||||
error = getCurrentExceptionMsg()
|
||||
quit(1)
|
||||
|
||||
debug "Transaction hash", txHash = groupManager.registrationTxHash.get()
|
||||
info "Transaction hash", txHash = groupManager.registrationTxHash.get()
|
||||
|
||||
info "Your membership has been registered on-chain.",
|
||||
chainId = $groupManager.chainId,
|
||||
|
||||
@ -280,7 +280,7 @@ proc dbConnQuery*(
|
||||
query_count.inc(labelValues = [metricLabel])
|
||||
|
||||
if "insert" notin ($query).toLower():
|
||||
debug "dbConnQuery",
|
||||
info "dbConnQuery",
|
||||
requestId,
|
||||
query = $query,
|
||||
args,
|
||||
@ -321,7 +321,7 @@ proc dbConnQueryPrepared*(
|
||||
query_count.inc(labelValues = [stmtName])
|
||||
|
||||
if "insert" notin stmtName.toLower():
|
||||
debug "dbConnQueryPrepared",
|
||||
info "dbConnQueryPrepared",
|
||||
requestId,
|
||||
stmtName,
|
||||
paramValues,
|
||||
|
||||
@ -57,9 +57,9 @@ proc close*(pool: PgAsyncPool): Future[Result[void, string]] {.async.} =
|
||||
# wait for the connections to be released and close them, without
|
||||
# blocking the async runtime
|
||||
|
||||
debug "close PgAsyncPool"
|
||||
info "close PgAsyncPool"
|
||||
await allFutures(pool.conns.mapIt(it.futBecomeFree))
|
||||
debug "closing all connection PgAsyncPool"
|
||||
info "closing all connection PgAsyncPool"
|
||||
|
||||
for i in 0 ..< pool.conns.len:
|
||||
if pool.conns[i].isPgDbConnOpen():
|
||||
@ -128,7 +128,7 @@ proc pgQuery*(
|
||||
defer:
|
||||
let queryDuration = getNowInNanosecondTime() - queryStartTime
|
||||
if queryDuration > SlowQueryThreshold.nanos:
|
||||
debug "pgQuery slow query",
|
||||
info "pgQuery slow query",
|
||||
query_duration_secs = (queryDuration / 1_000_000_000), query, requestId
|
||||
|
||||
(await dbConnWrapper.dbConnQuery(sql(query), args, rowCallback, requestId)).isOkOr:
|
||||
@ -162,7 +162,7 @@ proc runStmt*(
|
||||
defer:
|
||||
let queryDuration = getNowInNanosecondTime() - queryStartTime
|
||||
if queryDuration > SlowQueryThreshold.nanos:
|
||||
debug "runStmt slow query",
|
||||
info "runStmt slow query",
|
||||
query_duration = queryDuration / 1_000_000_000,
|
||||
query = stmtDefinition,
|
||||
requestId
|
||||
|
||||
@ -383,7 +383,7 @@ proc listSqlScripts(path: string): DatabaseResult[seq[string]] =
|
||||
if isSqlScript(scriptPath):
|
||||
scripts.add(scriptPath)
|
||||
else:
|
||||
debug "invalid migration script", file = scriptPath
|
||||
info "invalid migration script", file = scriptPath
|
||||
except OSError:
|
||||
return err("failed to list migration scripts: " & getCurrentExceptionMsg())
|
||||
|
||||
@ -448,7 +448,7 @@ proc migrate*(
|
||||
let userVersion = ?db.getUserVersion()
|
||||
|
||||
if userVersion == targetVersion:
|
||||
debug "database schema is up to date",
|
||||
info "database schema is up to date",
|
||||
userVersion = userVersion, targetVersion = targetVersion
|
||||
return ok()
|
||||
|
||||
@ -466,7 +466,7 @@ proc migrate*(
|
||||
migrationScriptsPaths = sortMigrationScripts(migrationScriptsPaths)
|
||||
|
||||
if migrationScriptsPaths.len <= 0:
|
||||
debug "no scripts to be run"
|
||||
info "no scripts to be run"
|
||||
return ok()
|
||||
|
||||
let scripts = ?loadMigrationScripts(migrationScriptsPaths)
|
||||
@ -474,7 +474,7 @@ proc migrate*(
|
||||
# Run the migration scripts
|
||||
for script in scripts:
|
||||
for statement in script.breakIntoStatements():
|
||||
debug "executing migration statement", statement = statement
|
||||
info "executing migration statement", statement = statement
|
||||
|
||||
let execRes = db.query(statement, NoopRowHandler)
|
||||
if execRes.isErr():
|
||||
@ -482,12 +482,12 @@ proc migrate*(
|
||||
statement = statement, error = execRes.error
|
||||
return err("failed to execute migration statement")
|
||||
|
||||
debug "migration statement executed succesfully", statement = statement
|
||||
info "migration statement executed succesfully", statement = statement
|
||||
|
||||
# Update user_version
|
||||
?db.setUserVersion(targetVersion)
|
||||
|
||||
debug "database user_version updated", userVersion = targetVersion
|
||||
info "database user_version updated", userVersion = targetVersion
|
||||
ok()
|
||||
|
||||
proc performSqliteVacuum*(db: SqliteDatabase): DatabaseResult[void] =
|
||||
@ -495,11 +495,11 @@ proc performSqliteVacuum*(db: SqliteDatabase): DatabaseResult[void] =
|
||||
# TODO: Run vacuuming conditionally based on database page stats
|
||||
# if (pageCount > 0 and freelistCount > 0):
|
||||
|
||||
debug "starting sqlite database vacuuming"
|
||||
info "starting sqlite database vacuuming"
|
||||
|
||||
let resVacuum = db.vacuum()
|
||||
if resVacuum.isErr():
|
||||
return err("failed to execute vacuum: " & resVacuum.error)
|
||||
|
||||
debug "finished sqlite database vacuuming"
|
||||
info "finished sqlite database vacuuming"
|
||||
ok()
|
||||
|
||||
@ -24,7 +24,7 @@ template heartbeat*(name: string, interval: Duration, body: untyped): untyped =
|
||||
info "Missed multiple heartbeats",
|
||||
heartbeat = name, delay = delay, hinterval = itv
|
||||
else:
|
||||
debug "Missed heartbeat", heartbeat = name, delay = delay, hinterval = itv
|
||||
info "Missed heartbeat", heartbeat = name, delay = delay, hinterval = itv
|
||||
|
||||
nextHeartbeat = now + itv
|
||||
|
||||
|
||||
@ -60,14 +60,14 @@ proc shardingPredicate*(
|
||||
): Option[WakuDiscv5Predicate] =
|
||||
## Filter peers based on relay sharding information
|
||||
let typedRecord = record.toTyped().valueOr:
|
||||
debug "peer filtering failed", reason = error
|
||||
info "peer filtering failed", reason = error
|
||||
return none(WakuDiscv5Predicate)
|
||||
|
||||
let nodeShard = typedRecord.relaySharding().valueOr:
|
||||
debug "no relay sharding information, peer filtering disabled"
|
||||
info "no relay sharding information, peer filtering disabled"
|
||||
return none(WakuDiscv5Predicate)
|
||||
|
||||
debug "peer filtering updated"
|
||||
info "peer filtering updated"
|
||||
|
||||
let predicate = proc(record: waku_enr.Record): bool =
|
||||
bootnodes.contains(record) or # Temp. Bootnode exception
|
||||
@ -124,7 +124,7 @@ proc updateAnnouncedMultiAddress*(
|
||||
wd.protocol.updateRecord([(MultiaddrEnrField, encodedAddrs)]).isOkOr:
|
||||
return err("failed to update multiaddress in ENR: " & $error)
|
||||
|
||||
debug "ENR updated successfully with new multiaddress",
|
||||
info "ENR updated successfully with new multiaddress",
|
||||
enrUri = wd.protocol.localNode.record.toUri(), enr = $(wd.protocol.localNode.record)
|
||||
|
||||
return ok()
|
||||
@ -312,15 +312,15 @@ proc subscriptionsListener(wd: WakuDiscoveryV5) {.async.} =
|
||||
let subRes = wd.updateENRShards(subs, true)
|
||||
|
||||
if subRes.isErr():
|
||||
debug "ENR shard addition failed", reason = $subRes.error
|
||||
info "ENR shard addition failed", reason = $subRes.error
|
||||
|
||||
if unsubRes.isErr():
|
||||
debug "ENR shard removal failed", reason = $unsubRes.error
|
||||
info "ENR shard removal failed", reason = $unsubRes.error
|
||||
|
||||
if subRes.isErr() and unsubRes.isErr():
|
||||
continue
|
||||
|
||||
debug "ENR updated successfully",
|
||||
info "ENR updated successfully",
|
||||
enrUri = wd.protocol.localNode.record.toUri(),
|
||||
enr = $(wd.protocol.localNode.record)
|
||||
|
||||
@ -335,7 +335,7 @@ proc start*(wd: WakuDiscoveryV5): Future[Result[void, string]] {.async: (raises:
|
||||
|
||||
info "Starting discovery v5 service"
|
||||
|
||||
debug "start listening on udp port", address = $wd.conf.address, port = $wd.conf.port
|
||||
info "start listening on udp port", address = $wd.conf.address, port = $wd.conf.port
|
||||
try:
|
||||
wd.protocol.open()
|
||||
except CatchableError:
|
||||
@ -349,7 +349,7 @@ proc start*(wd: WakuDiscoveryV5): Future[Result[void, string]] {.async: (raises:
|
||||
asyncSpawn wd.searchLoop()
|
||||
asyncSpawn wd.subscriptionsListener()
|
||||
|
||||
debug "Successfully started discovery v5 service"
|
||||
info "Successfully started discovery v5 service"
|
||||
info "Discv5: discoverable ENR ",
|
||||
enrUri = wd.protocol.localNode.record.toUri(), enr = $(wd.protocol.localNode.record)
|
||||
|
||||
@ -365,7 +365,7 @@ proc stop*(wd: WakuDiscoveryV5): Future[void] {.async.} =
|
||||
trace "Stop listening on discv5 port"
|
||||
await wd.protocol.closeWait()
|
||||
|
||||
debug "Successfully stopped discovery v5 service"
|
||||
info "Successfully stopped discovery v5 service"
|
||||
|
||||
## Helper functions
|
||||
|
||||
@ -395,7 +395,7 @@ proc addBootstrapNode*(bootstrapAddr: string, bootstrapEnrs: var seq[enr.Record]
|
||||
|
||||
let enrRes = parseBootstrapAddress(bootstrapAddr)
|
||||
if enrRes.isErr():
|
||||
debug "ignoring invalid bootstrap address", reason = enrRes.error
|
||||
info "ignoring invalid bootstrap address", reason = enrRes.error
|
||||
return
|
||||
|
||||
bootstrapEnrs.add(enrRes.value)
|
||||
@ -422,7 +422,7 @@ proc setupDiscoveryV5*(
|
||||
|
||||
for enr in discv5BootstrapEnrs:
|
||||
let peerInfo = enr.toRemotePeerInfo().valueOr:
|
||||
debug "could not convert discv5 bootstrap node to peerInfo, not adding peer to Peer Store",
|
||||
info "could not convert discv5 bootstrap node to peerInfo, not adding peer to Peer Store",
|
||||
enr = enr.toUri(), error = error
|
||||
continue
|
||||
nodePeerManager.addPeer(peerInfo, PeerOrigin.Discv5)
|
||||
|
||||
@ -37,7 +37,7 @@ type WakuDnsDiscovery* = object
|
||||
#####################
|
||||
|
||||
proc emptyResolver*(domain: string): Future[string] {.async, gcsafe.} =
|
||||
debug "Empty resolver called", domain = domain
|
||||
info "Empty resolver called", domain = domain
|
||||
return ""
|
||||
|
||||
proc findPeers*(
|
||||
@ -83,13 +83,13 @@ proc init*(
|
||||
): Result[T, cstring] =
|
||||
## Initialise Waku peer discovery via DNS
|
||||
|
||||
debug "init WakuDnsDiscovery", locationUrl = locationUrl
|
||||
info "init WakuDnsDiscovery", locationUrl = locationUrl
|
||||
|
||||
let
|
||||
client = ?Client.init(locationUrl)
|
||||
wakuDnsDisc = WakuDnsDiscovery(client: client, resolver: resolver)
|
||||
|
||||
debug "init success"
|
||||
info "init success"
|
||||
|
||||
return ok(wakuDnsDisc)
|
||||
|
||||
@ -100,7 +100,7 @@ proc retrieveDynamicBootstrapNodes*(
|
||||
|
||||
if dnsDiscoveryUrl != "":
|
||||
# DNS discovery
|
||||
debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl
|
||||
info "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl
|
||||
|
||||
var nameServers: seq[TransportAddress]
|
||||
for ip in dnsAddrsNameServers:
|
||||
@ -118,5 +118,5 @@ proc retrieveDynamicBootstrapNodes*(
|
||||
return (await value.findPeers()).mapErr(e => $e)
|
||||
warn "Failed to init Waku DNS discovery"
|
||||
|
||||
debug "No method for retrieving dynamic bootstrap nodes specified."
|
||||
info "No method for retrieving dynamic bootstrap nodes specified."
|
||||
ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default
|
||||
|
||||
@ -282,7 +282,7 @@ proc setupProtocols(
|
||||
# only peers with populated records
|
||||
.mapIt(toRemotePeerInfo(it.record.get()))
|
||||
|
||||
debug "adding exchanged peers",
|
||||
info "adding exchanged peers",
|
||||
src = peer, topic = topic, numPeers = exchangedPeers.len
|
||||
|
||||
for peer in exchangedPeers:
|
||||
@ -301,7 +301,7 @@ proc setupProtocols(
|
||||
else:
|
||||
@[]
|
||||
|
||||
debug "Shards created from content topics",
|
||||
info "Shards created from content topics",
|
||||
contentTopics = conf.contentTopics, shards = autoShards
|
||||
|
||||
let confShards = conf.subscribeShards.mapIt(
|
||||
@ -310,7 +310,7 @@ proc setupProtocols(
|
||||
let shards = confShards & autoShards
|
||||
|
||||
if conf.relay:
|
||||
debug "Setting max message size", num_bytes = conf.maxMessageSizeBytes
|
||||
info "Setting max message size", num_bytes = conf.maxMessageSizeBytes
|
||||
|
||||
(
|
||||
await mountRelay(
|
||||
@ -500,7 +500,7 @@ proc setupNode*(
|
||||
error "cluster id mismatch configured shards"
|
||||
return err("cluster id mismatch configured shards")
|
||||
|
||||
debug "Setting up storage"
|
||||
info "Setting up storage"
|
||||
|
||||
## Peer persistence
|
||||
var peerStore: Option[WakuPeerStorage]
|
||||
@ -509,13 +509,13 @@ proc setupNode*(
|
||||
error "Setting up storage failed", error = "failed to setup peer store " & error
|
||||
return err("Setting up storage failed: " & error)
|
||||
|
||||
debug "Initializing node"
|
||||
info "Initializing node"
|
||||
|
||||
let node = initNode(wakuConf, netConfig, rng, record, peerStore, relay).valueOr:
|
||||
error "Initializing node failed", error = error
|
||||
return err("Initializing node failed: " & error)
|
||||
|
||||
debug "Mounting protocols"
|
||||
info "Mounting protocols"
|
||||
|
||||
try:
|
||||
(await node.setupProtocols(wakuConf)).isOkOr:
|
||||
|
||||
@ -53,7 +53,7 @@ proc withinTimeWindow*(msg: WakuMessage): bool =
|
||||
proc addSignedShardsValidator*(
|
||||
w: WakuRelay, protectedShards: seq[ProtectedShard], clusterId: uint16
|
||||
) =
|
||||
debug "adding validator to signed shards", protectedShards, clusterId
|
||||
info "adding validator to signed shards", protectedShards, clusterId
|
||||
|
||||
proc validator(
|
||||
topic: string, msg: WakuMessage
|
||||
@ -72,7 +72,7 @@ proc addSignedShardsValidator*(
|
||||
outcome = errors.ValidationResult.Accept
|
||||
|
||||
if outcome != errors.ValidationResult.Accept:
|
||||
debug "signed topic validation failed",
|
||||
info "signed topic validation failed",
|
||||
topic = topic, publicShardKey = protectedShard.key
|
||||
waku_msg_validator_signed_outcome.inc(labelValues = [$outcome])
|
||||
return outcome
|
||||
|
||||
@ -82,12 +82,12 @@ proc setupSwitchServices(
|
||||
waku: Waku, conf: WakuConf, circuitRelay: Relay, rng: ref HmacDrbgContext
|
||||
) =
|
||||
proc onReservation(addresses: seq[MultiAddress]) {.gcsafe, raises: [].} =
|
||||
debug "circuit relay handler new reserve event",
|
||||
info "circuit relay handler new reserve event",
|
||||
addrs_before = $(waku.node.announcedAddresses), addrs = $addresses
|
||||
|
||||
waku.node.announcedAddresses.setLen(0) ## remove previous addresses
|
||||
waku.node.announcedAddresses.add(addresses)
|
||||
debug "waku node announced addresses updated",
|
||||
info "waku node announced addresses updated",
|
||||
announcedAddresses = waku.node.announcedAddresses
|
||||
|
||||
if not isNil(waku.wakuDiscv5):
|
||||
@ -297,7 +297,7 @@ proc updateAddressInENR(waku: ptr Waku): Result[void, string] =
|
||||
waku[].node.enr.update(parsedPk, extraFields = enrFields).isOkOr:
|
||||
return err("failed to update multiaddress in ENR updateAddressInENR: " & $error)
|
||||
|
||||
debug "Waku node ENR updated successfully with new multiaddress",
|
||||
info "Waku node ENR updated successfully with new multiaddress",
|
||||
enr = waku[].node.enr.toUri(), record = $(waku[].node.enr)
|
||||
|
||||
## Now update the ENR infor in discv5
|
||||
@ -305,7 +305,7 @@ proc updateAddressInENR(waku: ptr Waku): Result[void, string] =
|
||||
waku[].wakuDiscv5.protocol.localNode.record = waku[].node.enr
|
||||
let enr = waku[].wakuDiscv5.protocol.localNode.record
|
||||
|
||||
debug "Waku discv5 ENR updated successfully with new multiaddress",
|
||||
info "Waku discv5 ENR updated successfully with new multiaddress",
|
||||
enr = enr.toUri(), record = $(enr)
|
||||
|
||||
return ok()
|
||||
@ -365,7 +365,7 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
|
||||
warn "startWaku: waku node already started"
|
||||
return ok()
|
||||
|
||||
debug "Retrieve dynamic bootstrap nodes"
|
||||
info "Retrieve dynamic bootstrap nodes"
|
||||
let conf = waku[].conf
|
||||
|
||||
if conf.dnsDiscoveryConf.isSome():
|
||||
|
||||
@ -42,16 +42,16 @@ proc mountLegacyLightPush*(
|
||||
|
||||
let pushHandler =
|
||||
if node.wakuRelay.isNil:
|
||||
debug "mounting legacy lightpush without relay (nil)"
|
||||
info "mounting legacy lightpush without relay (nil)"
|
||||
legacy_lightpush_protocol.getNilPushHandler()
|
||||
else:
|
||||
debug "mounting legacy lightpush with relay"
|
||||
info "mounting legacy lightpush with relay"
|
||||
let rlnPeer =
|
||||
if isNil(node.wakuRlnRelay):
|
||||
debug "mounting legacy lightpush without rln-relay"
|
||||
info "mounting legacy lightpush without rln-relay"
|
||||
none(WakuRLNRelay)
|
||||
else:
|
||||
debug "mounting legacy lightpush with rln-relay"
|
||||
info "mounting legacy lightpush with rln-relay"
|
||||
some(node.wakuRlnRelay)
|
||||
legacy_lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)
|
||||
|
||||
@ -157,16 +157,16 @@ proc mountLightPush*(
|
||||
|
||||
let pushHandler =
|
||||
if node.wakuRelay.isNil():
|
||||
debug "mounting lightpush v2 without relay (nil)"
|
||||
info "mounting lightpush v2 without relay (nil)"
|
||||
lightpush_protocol.getNilPushHandler()
|
||||
else:
|
||||
debug "mounting lightpush with relay"
|
||||
info "mounting lightpush with relay"
|
||||
let rlnPeer =
|
||||
if isNil(node.wakuRlnRelay):
|
||||
debug "mounting lightpush without rln-relay"
|
||||
info "mounting lightpush without rln-relay"
|
||||
none(WakuRLNRelay)
|
||||
else:
|
||||
debug "mounting lightpush with rln-relay"
|
||||
info "mounting lightpush with rln-relay"
|
||||
some(node.wakuRlnRelay)
|
||||
lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)
|
||||
|
||||
|
||||
@ -49,7 +49,7 @@ proc pingPeer(node: WakuNode, peerId: PeerId): Future[Result[void, string]] {.as
|
||||
try:
|
||||
await stream.close()
|
||||
except CatchableError as e:
|
||||
debug "Error closing ping connection", peerId = peerId, error = e.msg
|
||||
info "Error closing ping connection", peerId = peerId, error = e.msg
|
||||
|
||||
# Perform ping
|
||||
let pingDuration = await node.libp2pPing.ping(stream)
|
||||
|
||||
@ -157,7 +157,7 @@ proc unsubscribe*(
|
||||
warn "No-effect API call to `unsubscribe`. Was not subscribed", pubsubTopic
|
||||
return ok()
|
||||
|
||||
debug "unsubscribe", pubsubTopic, contentTopicOp
|
||||
info "unsubscribe", pubsubTopic, contentTopicOp
|
||||
node.wakuRelay.unsubscribe(pubsubTopic)
|
||||
node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic))
|
||||
|
||||
@ -250,7 +250,7 @@ proc mountRlnRelay*(
|
||||
let validator = generateRlnValidator(rlnRelay, spamHandler)
|
||||
|
||||
# register rln validator as default validator
|
||||
debug "Registering RLN validator"
|
||||
info "Registering RLN validator"
|
||||
node.wakuRelay.addValidator(validator, "RLN validation failed")
|
||||
|
||||
node.wakuRlnRelay = rlnRelay
|
||||
|
||||
@ -15,12 +15,12 @@ template projectRoot(): string =
|
||||
const PeerStoreMigrationPath: string = projectRoot / "migrations" / "sent_msgs"
|
||||
|
||||
proc migrate*(db: SqliteDatabase): DatabaseResult[void] =
|
||||
debug "starting peer store's sqlite database migration for sent messages"
|
||||
info "starting peer store's sqlite database migration for sent messages"
|
||||
|
||||
let migrationRes =
|
||||
migrate(db, TargetSchemaVersion, migrationsScriptsDir = PeerStoreMigrationPath)
|
||||
if migrationRes.isErr():
|
||||
return err("failed to execute migration scripts: " & migrationRes.error)
|
||||
|
||||
debug "finished peer store's sqlite database migration for sent messages"
|
||||
info "finished peer store's sqlite database migration for sent messages"
|
||||
ok()
|
||||
|
||||
@ -76,7 +76,7 @@ proc performDeliveryFeedback(
|
||||
success, dir, comment, msg_hash
|
||||
return
|
||||
|
||||
debug "recv monitor performDeliveryFeedback",
|
||||
info "recv monitor performDeliveryFeedback",
|
||||
success, dir, comment, msg_hash = shortLog(msgHash)
|
||||
self.deliveryCb(success, dir, comment, msgHash, msg)
|
||||
|
||||
@ -129,7 +129,7 @@ proc msgChecker(self: RecvMonitor) {.async.} =
|
||||
method onSubscribe(
|
||||
self: RecvMonitor, pubsubTopic: string, contentTopics: seq[string]
|
||||
) {.gcsafe, raises: [].} =
|
||||
debug "onSubscribe", pubsubTopic, contentTopics
|
||||
info "onSubscribe", pubsubTopic, contentTopics
|
||||
self.topicsInterest.withValue(pubsubTopic, contentTopicsOfInterest):
|
||||
contentTopicsOfInterest[].add(contentTopics)
|
||||
do:
|
||||
@ -138,7 +138,7 @@ method onSubscribe(
|
||||
method onUnsubscribe(
|
||||
self: RecvMonitor, pubsubTopic: string, contentTopics: seq[string]
|
||||
) {.gcsafe, raises: [].} =
|
||||
debug "onUnsubscribe", pubsubTopic, contentTopics
|
||||
info "onUnsubscribe", pubsubTopic, contentTopics
|
||||
|
||||
self.topicsInterest.withValue(pubsubTopic, contentTopicsOfInterest):
|
||||
let remainingCTopics =
|
||||
|
||||
@ -90,7 +90,7 @@ proc performFeedbackAndCleanup(
|
||||
return
|
||||
|
||||
for hash, deliveryInfo in msgsToDiscard:
|
||||
debug "send monitor performFeedbackAndCleanup",
|
||||
info "send monitor performFeedbackAndCleanup",
|
||||
success, dir, comment, msg_hash = shortLog(hash)
|
||||
|
||||
self.deliveryCb(success, dir, comment, hash, deliveryInfo.msg)
|
||||
@ -170,14 +170,14 @@ proc processMessages(self: SendMonitor) {.async.} =
|
||||
let pubsubTopic = deliveryInfo.pubsubTopic
|
||||
let msg = deliveryInfo.msg
|
||||
if not self.wakuRelay.isNil():
|
||||
debug "trying to publish again with wakuRelay", msgHash, pubsubTopic
|
||||
info "trying to publish again with wakuRelay", msgHash, pubsubTopic
|
||||
(await self.wakuRelay.publish(pubsubTopic, msg)).isOkOr:
|
||||
error "could not publish with wakuRelay.publish",
|
||||
msgHash, pubsubTopic, error = $error
|
||||
continue
|
||||
|
||||
if not self.wakuLightpushClient.isNil():
|
||||
debug "trying to publish again with wakuLightpushClient", msgHash, pubsubTopic
|
||||
info "trying to publish again with wakuLightpushClient", msgHash, pubsubTopic
|
||||
(await self.wakuLightpushClient.publishToAny(pubsubTopic, msg)).isOkOr:
|
||||
error "could not publish with publishToAny", error = $error
|
||||
continue
|
||||
@ -196,7 +196,7 @@ method onMessagePublished(
|
||||
## When publishing a message either through relay or lightpush, we want to add some extra effort
|
||||
## to make sure it is received to one store node. Hence, keep track of those published messages.
|
||||
|
||||
debug "onMessagePublished"
|
||||
info "onMessagePublished"
|
||||
let msgHash = computeMessageHash(pubSubTopic, msg)
|
||||
|
||||
if not self.publishedMessages.hasKey(msgHash):
|
||||
|
||||
@ -583,7 +583,7 @@ proc reconnectPeers*(
|
||||
## Reconnect to peers registered for this protocol. This will update connectedness.
|
||||
## Especially useful to resume connections from persistent storage after a restart.
|
||||
|
||||
debug "Reconnecting peers", proto = proto
|
||||
info "Reconnecting peers", proto = proto
|
||||
|
||||
# Proto is not persisted, we need to iterate over all peers.
|
||||
for peerInfo in pm.switch.peerStore.peers(protocolMatcher(proto)):
|
||||
@ -594,7 +594,7 @@ proc reconnectPeers*(
|
||||
continue
|
||||
|
||||
if backoffTime > ZeroDuration:
|
||||
debug "Backing off before reconnect",
|
||||
info "Backing off before reconnect",
|
||||
peerId = peerInfo.peerId, backoffTime = backoffTime
|
||||
# We disconnected recently and still need to wait for a backoff period before connecting
|
||||
await sleepAsync(backoffTime)
|
||||
@ -682,7 +682,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
let inRelayPeers = pm.connectedPeers(WakuRelayCodec)[0]
|
||||
if inRelayPeers.len > pm.inRelayPeersTarget and
|
||||
peerStore.hasPeer(peerId, WakuRelayCodec):
|
||||
debug "disconnecting relay peer because reached max num in-relay peers",
|
||||
info "disconnecting relay peer because reached max num in-relay peers",
|
||||
peerId = peerId,
|
||||
inRelayPeers = inRelayPeers.len,
|
||||
inRelayPeersTarget = pm.inRelayPeersTarget
|
||||
@ -698,7 +698,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
# pm.colocationLimit == 0 disables the ip colocation limit
|
||||
if pm.colocationLimit != 0 and peersBehindIp.len > pm.colocationLimit:
|
||||
for peerId in peersBehindIp[0 ..< (peersBehindIp.len - pm.colocationLimit)]:
|
||||
debug "Pruning connection due to ip colocation", peerId = peerId, ip = ip
|
||||
info "Pruning connection due to ip colocation", peerId = peerId, ip = ip
|
||||
asyncSpawn(pm.switch.disconnect(peerId))
|
||||
peerStore.delete(peerId)
|
||||
|
||||
@ -721,7 +721,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
# we don't want to await for the callback to finish
|
||||
asyncSpawn pm.onConnectionChange(peerId, Left)
|
||||
of Identified:
|
||||
debug "event identified", peerId = peerId
|
||||
info "event identified", peerId = peerId
|
||||
|
||||
peerStore[ConnectionBook][peerId] = connectedness
|
||||
peerStore[DirectionBook][peerId] = direction
|
||||
@ -861,7 +861,7 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} =
|
||||
|
||||
let relayCount = connectablePeers.len
|
||||
|
||||
debug "Sharded Peer Management",
|
||||
info "Sharded Peer Management",
|
||||
shard = shard,
|
||||
connectable = $connectableCount & "/" & $shardCount,
|
||||
relayConnectable = $relayCount & "/" & $shardCount,
|
||||
|
||||
@ -22,12 +22,12 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
|
||||
## If not `targetVersion` is provided, it defaults to `SchemaVersion`.
|
||||
##
|
||||
## NOTE: Down migration it is not currently supported
|
||||
debug "starting peer store's sqlite database migration"
|
||||
info "starting peer store's sqlite database migration"
|
||||
|
||||
let migrationRes =
|
||||
migrate(db, targetVersion, migrationsScriptsDir = PeerStoreMigrationPath)
|
||||
if migrationRes.isErr():
|
||||
return err("failed to execute migration scripts: " & migrationRes.error)
|
||||
|
||||
debug "finished peer store's sqlite database migration"
|
||||
info "finished peer store's sqlite database migration"
|
||||
ok()
|
||||
|
||||
@ -31,7 +31,7 @@ proc getSecureKey(path: string): TLSPrivateKey {.raises: [Defect, IOError].} =
|
||||
let key = TLSPrivateKey.init(stringkey)
|
||||
return key
|
||||
except TLSStreamProtocolError as exc:
|
||||
debug "exception raised from getSecureKey", err = exc.msg
|
||||
info "exception raised from getSecureKey", err = exc.msg
|
||||
|
||||
proc getSecureCert(path: string): TLSCertificate {.raises: [Defect, IOError].} =
|
||||
trace "Certificate path is.", path = path
|
||||
@ -40,7 +40,7 @@ proc getSecureCert(path: string): TLSCertificate {.raises: [Defect, IOError].} =
|
||||
let cert = TLSCertificate.init(stringCert)
|
||||
return cert
|
||||
except TLSStreamProtocolError as exc:
|
||||
debug "exception raised from getSecureCert", err = exc.msg
|
||||
info "exception raised from getSecureCert", err = exc.msg
|
||||
|
||||
proc withWssTransport*(
|
||||
b: SwitchBuilder, secureKeyPath: string, secureCertPath: string
|
||||
|
||||
@ -17,7 +17,7 @@ proc installDebugInfoV1Handler(router: var RestRouter, node: WakuNode) =
|
||||
let info = node.info().toDebugWakuInfo()
|
||||
let resp = RestApiResponse.jsonResponse(info, status = Http200)
|
||||
if resp.isErr():
|
||||
debug "An error occurred while building the json respose", error = resp.error
|
||||
info "An error occurred while building the json respose", error = resp.error
|
||||
return RestApiResponse.internalServerError()
|
||||
|
||||
return resp.get()
|
||||
|
||||
@ -222,7 +222,7 @@ proc installFilterPostSubscriptionsHandler(
|
||||
contentBody: Option[ContentBody]
|
||||
) -> RestApiResponse:
|
||||
## Subscribes a node to a list of contentTopics of a pubsubTopic
|
||||
debug "post", ROUTE_FILTER_SUBSCRIPTIONS, contentBody
|
||||
info "post", ROUTE_FILTER_SUBSCRIPTIONS, contentBody
|
||||
|
||||
return await filterPostPutSubscriptionRequestHandler(
|
||||
node, contentBody, cache, discHandler
|
||||
@ -238,7 +238,7 @@ proc installFilterPutSubscriptionsHandler(
|
||||
contentBody: Option[ContentBody]
|
||||
) -> RestApiResponse:
|
||||
## Modifies a subscribtion of a node to a list of contentTopics of a pubsubTopic
|
||||
debug "put", ROUTE_FILTER_SUBSCRIPTIONS, contentBody
|
||||
info "put", ROUTE_FILTER_SUBSCRIPTIONS, contentBody
|
||||
|
||||
return await filterPostPutSubscriptionRequestHandler(
|
||||
node, contentBody, cache, discHandler
|
||||
@ -254,7 +254,7 @@ proc installFilterDeleteSubscriptionsHandler(
|
||||
contentBody: Option[ContentBody]
|
||||
) -> RestApiResponse:
|
||||
## Subscribes a node to a list of contentTopics of a PubSub topic
|
||||
debug "delete", ROUTE_FILTER_SUBSCRIPTIONS, contentBody
|
||||
info "delete", ROUTE_FILTER_SUBSCRIPTIONS, contentBody
|
||||
|
||||
let decodedBody = decodeRequestBody[FilterUnsubscribeRequest](contentBody)
|
||||
|
||||
@ -306,7 +306,7 @@ proc installFilterDeleteAllSubscriptionsHandler(
|
||||
contentBody: Option[ContentBody]
|
||||
) -> RestApiResponse:
|
||||
## Subscribes a node to a list of contentTopics of a PubSub topic
|
||||
debug "delete", ROUTE_FILTER_ALL_SUBSCRIPTIONS, contentBody
|
||||
info "delete", ROUTE_FILTER_ALL_SUBSCRIPTIONS, contentBody
|
||||
|
||||
let decodedBody = decodeRequestBody[FilterUnsubscribeAllRequest](contentBody)
|
||||
|
||||
@ -357,7 +357,7 @@ proc installFilterPingSubscriberHandler(
|
||||
requestId: string
|
||||
) -> RestApiResponse:
|
||||
## Checks if a node has valid subscription or not.
|
||||
debug "get", ROUTE_FILTER_SUBSCRIBER_PING, requestId
|
||||
info "get", ROUTE_FILTER_SUBSCRIBER_PING, requestId
|
||||
|
||||
let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
|
||||
let handler = discHandler.valueOr:
|
||||
@ -397,7 +397,7 @@ proc installFilterGetMessagesHandler(
|
||||
## Returns all WakuMessages received on a specified content topic since the
|
||||
## last time this method was called
|
||||
## TODO: ability to specify a return message limit, maybe use cursor to control paging response.
|
||||
debug "get", ROUTE_FILTER_MESSAGES, contentTopic = contentTopic
|
||||
info "get", ROUTE_FILTER_MESSAGES, contentTopic = contentTopic
|
||||
|
||||
if contentTopic.isErr():
|
||||
return RestApiResponse.badRequest("Missing contentTopic")
|
||||
|
||||
@ -25,7 +25,7 @@ proc installHealthApiHandler*(
|
||||
if healthReportFut.completed():
|
||||
let healthReport = healthReportFut.read()
|
||||
return RestApiResponse.jsonResponse(healthReport, Http200).valueOr:
|
||||
debug "An error ocurred while building the json healthReport response",
|
||||
info "An error ocurred while building the json healthReport response",
|
||||
error = error
|
||||
return
|
||||
RestApiResponse.internalServerError("Failed to serialize health report")
|
||||
|
||||
@ -48,7 +48,7 @@ proc installLightPushRequestHandler*(
|
||||
contentBody: Option[ContentBody]
|
||||
) -> RestApiResponse:
|
||||
## Send a request to push a waku message
|
||||
debug "post", ROUTE_LIGHTPUSH, contentBody
|
||||
info "post", ROUTE_LIGHTPUSH, contentBody
|
||||
|
||||
let decodedBody = decodeRequestBody[PushRequest](contentBody)
|
||||
|
||||
|
||||
@ -205,7 +205,7 @@ proc installStoreApiHandlers*(
|
||||
pageSize: Option[string],
|
||||
ascending: Option[string]
|
||||
) -> RestApiResponse:
|
||||
debug "REST-GET /store/v1/messages ", peer_addr = $peerAddr
|
||||
info "REST-GET /store/v1/messages ", peer_addr = $peerAddr
|
||||
|
||||
# All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding)
|
||||
# Example:
|
||||
|
||||
@ -66,7 +66,7 @@ proc installLightPushRequestHandler*(
|
||||
contentBody: Option[ContentBody]
|
||||
) -> RestApiResponse:
|
||||
## Send a request to push a waku message
|
||||
debug "post received", ROUTE_LIGHTPUSH
|
||||
info "post received", ROUTE_LIGHTPUSH
|
||||
trace "content body", ROUTE_LIGHTPUSH, contentBody
|
||||
|
||||
let req: PushRequest = decodeRequestBody[PushRequest](contentBody).valueOr:
|
||||
|
||||
@ -61,7 +61,7 @@ proc installRelayApiHandlers*(
|
||||
) -> RestApiResponse:
|
||||
## Subscribes a node to a list of PubSub topics
|
||||
|
||||
debug "post_waku_v2_relay_v1_subscriptions"
|
||||
info "post_waku_v2_relay_v1_subscriptions"
|
||||
|
||||
# Check the request body
|
||||
if contentBody.isNone():
|
||||
@ -90,7 +90,7 @@ proc installRelayApiHandlers*(
|
||||
contentBody: Option[ContentBody]
|
||||
) -> RestApiResponse:
|
||||
# ## Subscribes a node to a list of PubSub topics
|
||||
# debug "delete_waku_v2_relay_v1_subscriptions"
|
||||
# info "delete_waku_v2_relay_v1_subscriptions"
|
||||
|
||||
# Check the request body
|
||||
if contentBody.isNone():
|
||||
@ -124,7 +124,7 @@ proc installRelayApiHandlers*(
|
||||
# ## Returns all WakuMessages received on a PubSub topic since the
|
||||
# ## last time this method was called
|
||||
# ## TODO: ability to specify a return message limit
|
||||
# debug "get_waku_v2_relay_v1_messages", topic=topic
|
||||
# info "get_waku_v2_relay_v1_messages", topic=topic
|
||||
|
||||
if pubsubTopic.isErr():
|
||||
return RestApiResponse.badRequest()
|
||||
@ -132,13 +132,13 @@ proc installRelayApiHandlers*(
|
||||
|
||||
let messages = cache.getMessages(pubSubTopic, clear = true)
|
||||
if messages.isErr():
|
||||
debug "Not subscribed to topic", topic = pubSubTopic
|
||||
info "Not subscribed to topic", topic = pubSubTopic
|
||||
return RestApiResponse.notFound()
|
||||
|
||||
let data = RelayGetMessagesResponse(messages.get().map(toRelayWakuMessage))
|
||||
let resp = RestApiResponse.jsonResponse(data, status = Http200)
|
||||
if resp.isErr():
|
||||
debug "An error ocurred while building the json respose", error = resp.error
|
||||
info "An error ocurred while building the json respose", error = resp.error
|
||||
return RestApiResponse.internalServerError()
|
||||
|
||||
return resp.get()
|
||||
@ -185,7 +185,7 @@ proc installRelayApiHandlers*(
|
||||
logMessageInfo(node.wakuRelay, "rest", pubsubTopic, "none", message, onRecv = true)
|
||||
|
||||
# if we reach here its either a non-RLN message or a RLN message with a valid proof
|
||||
debug "Publishing message",
|
||||
info "Publishing message",
|
||||
pubSubTopic = pubSubTopic, rln = not node.wakuRlnRelay.isNil()
|
||||
if not (waitFor node.publish(some(pubSubTopic), message).withTimeout(futTimeout)):
|
||||
error "Failed to publish message to topic", pubSubTopic = pubSubTopic
|
||||
@ -203,7 +203,7 @@ proc installRelayApiHandlers*(
|
||||
) -> RestApiResponse:
|
||||
## Subscribes a node to a list of content topics.
|
||||
|
||||
debug "post_waku_v2_relay_v1_auto_subscriptions"
|
||||
info "post_waku_v2_relay_v1_auto_subscriptions"
|
||||
|
||||
let req: seq[ContentTopic] = decodeRequestBody[seq[ContentTopic]](contentBody).valueOr:
|
||||
return error
|
||||
@ -228,7 +228,7 @@ proc installRelayApiHandlers*(
|
||||
) -> RestApiResponse:
|
||||
## Unsubscribes a node from a list of content topics.
|
||||
|
||||
debug "delete_waku_v2_relay_v1_auto_subscriptions"
|
||||
info "delete_waku_v2_relay_v1_auto_subscriptions"
|
||||
|
||||
let req: seq[ContentTopic] = decodeRequestBody[seq[ContentTopic]](contentBody).valueOr:
|
||||
return error
|
||||
@ -253,19 +253,19 @@ proc installRelayApiHandlers*(
|
||||
## Returns all WakuMessages received on a content topic since the
|
||||
## last time this method was called.
|
||||
|
||||
debug "get_waku_v2_relay_v1_auto_messages", contentTopic = contentTopic
|
||||
info "get_waku_v2_relay_v1_auto_messages", contentTopic = contentTopic
|
||||
|
||||
let contentTopic = contentTopic.valueOr:
|
||||
return RestApiResponse.badRequest($error)
|
||||
|
||||
let messages = cache.getAutoMessages(contentTopic, clear = true).valueOr:
|
||||
debug "Not subscribed to topic", topic = contentTopic
|
||||
info "Not subscribed to topic", topic = contentTopic
|
||||
return RestApiResponse.notFound(contentTopic)
|
||||
|
||||
let data = RelayGetMessagesResponse(messages.map(toRelayWakuMessage))
|
||||
|
||||
return RestApiResponse.jsonResponse(data, status = Http200).valueOr:
|
||||
debug "An error ocurred while building the json respose", error = error
|
||||
info "An error ocurred while building the json respose", error = error
|
||||
return RestApiResponse.internalServerError($error)
|
||||
|
||||
router.api(MethodOptions, ROUTE_RELAY_AUTO_MESSAGESV1_NO_TOPIC) do() -> RestApiResponse:
|
||||
@ -311,7 +311,7 @@ proc installRelayApiHandlers*(
|
||||
logMessageInfo(node.wakuRelay, "rest", pubsubTopic, "none", message, onRecv = true)
|
||||
|
||||
# if we reach here its either a non-RLN message or a RLN message with a valid proof
|
||||
debug "Publishing message",
|
||||
info "Publishing message",
|
||||
contentTopic = message.contentTopic, rln = not node.wakuRlnRelay.isNil()
|
||||
|
||||
var publishFut = node.publish(some($pubsubTopic), message)
|
||||
|
||||
@ -24,7 +24,7 @@ template unrecognizedFieldWarning*(field: typed) =
|
||||
# TODO: There should be a different notification mechanism for informing the
|
||||
# caller of a deserialization routine for unexpected fields.
|
||||
# The chonicles import in this module should be removed.
|
||||
debug "JSON field not recognized by the current version of nwaku. Consider upgrading",
|
||||
info "JSON field not recognized by the current version of nwaku. Consider upgrading",
|
||||
fieldName, typeName = typetraits.name(typeof field)
|
||||
|
||||
type SerdesResult*[T] = Result[T, cstring]
|
||||
|
||||
@ -45,7 +45,7 @@ proc performStoreQuery(
|
||||
let res = futRes.get().toHex()
|
||||
|
||||
if res.statusCode == uint32(ErrorCode.TOO_MANY_REQUESTS):
|
||||
debug "Request rate limit reached on peer ", storePeer
|
||||
info "Request rate limit reached on peer ", storePeer
|
||||
return RestApiResponse.tooManyRequests("Request rate limit reached")
|
||||
|
||||
let resp = RestApiResponse.jsonResponse(res, status = Http200).valueOr:
|
||||
@ -194,7 +194,7 @@ proc installStoreApiHandlers*(
|
||||
) -> RestApiResponse:
|
||||
let peer = peerAddr.toOpt()
|
||||
|
||||
debug "REST-GET /store/v3/messages ", peer_addr = $peer
|
||||
info "REST-GET /store/v3/messages ", peer_addr = $peer
|
||||
|
||||
# All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding)
|
||||
# Example:
|
||||
|
||||
@ -256,7 +256,7 @@ proc periodicRetentionPolicy(self: WakuArchive) {.async.} =
|
||||
let policy = self.retentionPolicy.get()
|
||||
|
||||
while true:
|
||||
debug "executing message retention policy"
|
||||
info "executing message retention policy"
|
||||
(await policy.execute(self.driver)).isOkOr:
|
||||
waku_archive_errors.inc(labelValues = [retPolicyFailure])
|
||||
error "failed execution of retention policy", error = error
|
||||
|
||||
@ -60,7 +60,7 @@ proc new*(
|
||||
return err("error while gathering sqlite stats: " & $sqliteStatsRes.error)
|
||||
|
||||
let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get()
|
||||
debug "sqlite database page stats",
|
||||
info "sqlite database page stats",
|
||||
pageSize = pageSize, pages = pageCount, freePages = freelistCount
|
||||
|
||||
if vacuum and (pageCount > 0 and freelistCount > 0):
|
||||
@ -74,7 +74,7 @@ proc new*(
|
||||
if migrateRes.isErr():
|
||||
return err("error in migrate sqlite: " & $migrateRes.error)
|
||||
|
||||
debug "setting up sqlite waku archive driver"
|
||||
info "setting up sqlite waku archive driver"
|
||||
let res = SqliteDriver.new(db)
|
||||
if res.isErr():
|
||||
return err("failed to init sqlite archive driver: " & res.error)
|
||||
@ -119,6 +119,6 @@ proc new*(
|
||||
"Postgres has been configured but not been compiled. Check compiler definitions."
|
||||
)
|
||||
else:
|
||||
debug "setting up in-memory waku archive driver"
|
||||
info "setting up in-memory waku archive driver"
|
||||
let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages
|
||||
return ok(driver)
|
||||
|
||||
@ -56,13 +56,13 @@ proc breakIntoStatements*(script: string): seq[string] =
|
||||
proc migrate*(
|
||||
driver: PostgresDriver, targetVersion = SchemaVersion
|
||||
): Future[DatabaseResult[void]] {.async.} =
|
||||
debug "starting message store's postgres database migration"
|
||||
info "starting message store's postgres database migration"
|
||||
|
||||
let currentVersion = (await driver.getCurrentVersion()).valueOr:
|
||||
return err("migrate error could not retrieve current version: " & $error)
|
||||
|
||||
if currentVersion == targetVersion:
|
||||
debug "database schema is up to date",
|
||||
info "database schema is up to date",
|
||||
currentVersion = currentVersion, targetVersion = targetVersion
|
||||
return ok()
|
||||
|
||||
@ -85,15 +85,15 @@ proc migrate*(
|
||||
# Run the migration scripts
|
||||
for script in scripts:
|
||||
for statement in script.breakIntoStatements():
|
||||
debug "executing migration statement", statement = statement
|
||||
info "executing migration statement", statement = statement
|
||||
|
||||
(await driver.performWriteQuery(statement)).isOkOr:
|
||||
error "failed to execute migration statement",
|
||||
statement = statement, error = error
|
||||
return err("failed to execute migration statement")
|
||||
|
||||
debug "migration statement executed succesfully", statement = statement
|
||||
info "migration statement executed succesfully", statement = statement
|
||||
|
||||
debug "finished message store's postgres database migration"
|
||||
info "finished message store's postgres database migration"
|
||||
|
||||
return ok()
|
||||
|
||||
@ -1143,31 +1143,31 @@ proc performWriteQueryWithLock(
|
||||
(await self.performWriteQuery(query)).isOkOr:
|
||||
if error.contains(COULD_NOT_ACQUIRE_ADVISORY_LOCK):
|
||||
## We don't consider this as an error. Just someone else acquired the advisory lock
|
||||
debug "skip performWriteQuery because the advisory lock is acquired by other"
|
||||
info "skip performWriteQuery because the advisory lock is acquired by other"
|
||||
return ok()
|
||||
|
||||
if error.contains("already exists"):
|
||||
## expected to happen when trying to add a partition table constraint that already exists
|
||||
## e.g., constraint "constraint_name" for relation "messages_1720364735_1720364740" already exists
|
||||
debug "skip already exists error", error = error
|
||||
info "skip already exists error", error = error
|
||||
return ok()
|
||||
|
||||
if error.contains("is already a partition"):
|
||||
## expected to happen when a node tries to add a partition that is already attached,
|
||||
## e.g., "messages_1720364735_1720364740" is already a partition
|
||||
debug "skip is already a partition error", error = error
|
||||
info "skip is already a partition error", error = error
|
||||
return ok()
|
||||
|
||||
if error.contains("does not exist"):
|
||||
## expected to happen when trying to drop a constraint that has already been dropped by other
|
||||
## constraint "constraint_name" of relation "messages_1720364735_1720364740" does not exist
|
||||
debug "skip does not exist error", error = error
|
||||
info "skip does not exist error", error = error
|
||||
return ok()
|
||||
|
||||
debug "protected query ended with error", error = $error
|
||||
info "protected query ended with error", error = $error
|
||||
return err("protected query ended with error:" & $error)
|
||||
|
||||
debug "protected query ended correctly"
|
||||
info "protected query ended correctly"
|
||||
return ok()
|
||||
|
||||
proc addPartition(
|
||||
@ -1222,7 +1222,7 @@ proc addPartition(
|
||||
(await self.performWriteQueryWithLock(dropConstraint)).isOkOr:
|
||||
return err(fmt"error dropping constraint [{partitionName}]: " & $error)
|
||||
|
||||
debug "new partition added", query = createPartitionQuery
|
||||
info "new partition added", query = createPartitionQuery
|
||||
|
||||
self.partitionMngr.addPartitionInfo(partitionName, beginning, `end`)
|
||||
return ok()
|
||||
@ -1230,7 +1230,7 @@ proc addPartition(
|
||||
proc refreshPartitionsInfo(
|
||||
self: PostgresDriver
|
||||
): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
debug "refreshPartitionsInfo"
|
||||
info "refreshPartitionsInfo"
|
||||
self.partitionMngr.clearPartitionInfo()
|
||||
|
||||
let partitionNames = (await self.getPartitionsList()).valueOr:
|
||||
@ -1266,7 +1266,7 @@ proc loopPartitionFactory(
|
||||
## Loop proc that continuously checks whether we need to create a new partition.
|
||||
## Notice that the deletion of partitions is handled by the retention policy modules.
|
||||
|
||||
debug "starting loopPartitionFactory"
|
||||
info "starting loopPartitionFactory"
|
||||
|
||||
while true:
|
||||
trace "Check if a new partition is needed"
|
||||
@ -1278,7 +1278,7 @@ proc loopPartitionFactory(
|
||||
let now = times.now().toTime().toUnix()
|
||||
|
||||
if self.partitionMngr.isEmpty():
|
||||
debug "adding partition because now there aren't more partitions"
|
||||
info "adding partition because now there aren't more partitions"
|
||||
(await self.addPartition(now)).isOkOr:
|
||||
onFatalError("error when creating a new partition from empty state: " & $error)
|
||||
else:
|
||||
@ -1288,14 +1288,14 @@ proc loopPartitionFactory(
|
||||
|
||||
let newestPartition = newestPartitionRes.get()
|
||||
if newestPartition.containsMoment(now):
|
||||
debug "creating a new partition for the future"
|
||||
info "creating a new partition for the future"
|
||||
## The current used partition is the last one that was created.
|
||||
## Thus, let's create another partition for the future.
|
||||
|
||||
(await self.addPartition(newestPartition.getLastMoment())).isOkOr:
|
||||
onFatalError("could not add the next partition for 'now': " & $error)
|
||||
elif now >= newestPartition.getLastMoment():
|
||||
debug "creating a new partition to contain current messages"
|
||||
info "creating a new partition to contain current messages"
|
||||
## There is no partition to contain the current time.
|
||||
## This happens if the node has been stopped for quite a long time.
|
||||
## Then, let's create the needed partition to contain 'now'.
|
||||
@ -1333,23 +1333,23 @@ proc removePartition(
|
||||
## whose rows belong to the partition time range
|
||||
|
||||
let partitionName = partition.getName()
|
||||
debug "beginning of removePartition", partitionName
|
||||
info "beginning of removePartition", partitionName
|
||||
|
||||
let partSize = (await self.getTableSize(partitionName)).valueOr("")
|
||||
|
||||
## Detach and remove the partition concurrently to not block the parent table (messages)
|
||||
let detachPartitionQuery =
|
||||
"ALTER TABLE messages DETACH PARTITION " & partitionName & " CONCURRENTLY;"
|
||||
debug "removeOldestPartition", query = detachPartitionQuery
|
||||
info "removeOldestPartition", query = detachPartitionQuery
|
||||
(await self.performWriteQuery(detachPartitionQuery)).isOkOr:
|
||||
debug "detected error when trying to detach partition", error
|
||||
info "detected error when trying to detach partition", error
|
||||
|
||||
if ($error).contains("FINALIZE") or
|
||||
($error).contains("already pending detach in part"):
|
||||
## We assume the database is suggesting to use FINALIZE when detaching a partition
|
||||
let detachPartitionFinalizeQuery =
|
||||
"ALTER TABLE messages DETACH PARTITION " & partitionName & " FINALIZE;"
|
||||
debug "removeOldestPartition detaching with FINALIZE",
|
||||
info "removeOldestPartition detaching with FINALIZE",
|
||||
query = detachPartitionFinalizeQuery
|
||||
(await self.performWriteQuery(detachPartitionFinalizeQuery)).isOkOr:
|
||||
return err(fmt"error in FINALIZE {detachPartitionFinalizeQuery}: " & $error)
|
||||
@ -1358,11 +1358,11 @@ proc removePartition(
|
||||
|
||||
## Drop the partition
|
||||
let dropPartitionQuery = "DROP TABLE " & partitionName
|
||||
debug "removeOldestPartition drop partition", query = dropPartitionQuery
|
||||
info "removeOldestPartition drop partition", query = dropPartitionQuery
|
||||
(await self.performWriteQuery(dropPartitionQuery)).isOkOr:
|
||||
return err(fmt"error in {dropPartitionQuery}: " & $error)
|
||||
|
||||
debug "removed partition", partition_name = partitionName, partition_size = partSize
|
||||
info "removed partition", partition_name = partitionName, partition_size = partSize
|
||||
self.partitionMngr.removeOldestPartitionName()
|
||||
|
||||
## Now delete rows from the messages_lookup table
|
||||
@ -1380,7 +1380,7 @@ proc removePartitionsOlderThan(
|
||||
## Removes old partitions that don't contain the specified timestamp
|
||||
|
||||
let tsInSec = Timestamp(float(tsInNanoSec) / 1_000_000_000)
|
||||
debug "beginning of removePartitionsOlderThan", tsInSec
|
||||
info "beginning of removePartitionsOlderThan", tsInSec
|
||||
|
||||
var oldestPartition = self.partitionMngr.getOldestPartition().valueOr:
|
||||
return err("could not get oldest partition in removePartitionOlderThan: " & $error)
|
||||
@ -1411,7 +1411,7 @@ proc removeOldestPartition(
|
||||
## The database contains a partition that would store current messages.
|
||||
|
||||
if currentPartitionRes.get() == oldestPartition:
|
||||
debug "Skipping to remove the current partition"
|
||||
info "Skipping to remove the current partition"
|
||||
return ok()
|
||||
|
||||
return await self.removePartition(oldestPartition)
|
||||
@ -1431,7 +1431,7 @@ method decreaseDatabaseSize*(
|
||||
if totalSizeOfDB <= targetSizeInBytes:
|
||||
return ok()
|
||||
|
||||
debug "start reducing database size",
|
||||
info "start reducing database size",
|
||||
targetSize = $targetSizeInBytes, currentSize = $totalSizeOfDB
|
||||
|
||||
while totalSizeOfDB > targetSizeInBytes and driver.containsAnyPartition():
|
||||
@ -1450,7 +1450,7 @@ method decreaseDatabaseSize*(
|
||||
|
||||
totalSizeOfDB = newCurrentSize
|
||||
|
||||
debug "reducing database size",
|
||||
info "reducing database size",
|
||||
targetSize = $targetSizeInBytes, newCurrentSize = $totalSizeOfDB
|
||||
|
||||
return ok()
|
||||
@ -1527,22 +1527,22 @@ proc analyzeTableLoop(self: PostgresDriver) {.async.} =
|
||||
## The database stats should be calculated regularly so that the planner
|
||||
## picks up the proper indexes and we have better query performance.
|
||||
while true:
|
||||
debug "analyzeTableLoop lock db"
|
||||
info "analyzeTableLoop lock db"
|
||||
(await self.acquireDatabaseLock(AnalyzeTableLockId)).isOkOr:
|
||||
if error != EXPECTED_LOCK_ERROR:
|
||||
error "failed to acquire lock in analyzeTableLoop", error = error
|
||||
await sleepAsync(RunAnalyzeInterval)
|
||||
continue
|
||||
|
||||
debug "analyzeTableLoop start analysis"
|
||||
info "analyzeTableLoop start analysis"
|
||||
(await self.performWriteQuery(AnalyzeQuery)).isOkOr:
|
||||
error "failed to run ANALYZE messages", error = error
|
||||
|
||||
debug "analyzeTableLoop unlock db"
|
||||
info "analyzeTableLoop unlock db"
|
||||
(await self.releaseDatabaseLock(AnalyzeTableLockId)).isOkOr:
|
||||
error "failed to release lock analyzeTableLoop", error = error
|
||||
|
||||
debug "analyzeTableLoop analysis completed"
|
||||
info "analyzeTableLoop analysis completed"
|
||||
|
||||
await sleepAsync(RunAnalyzeInterval)
|
||||
|
||||
|
||||
@ -55,7 +55,7 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
|
||||
## If not `targetVersion` is provided, it defaults to `SchemaVersion`.
|
||||
##
|
||||
## NOTE: Down migration it is not currently supported
|
||||
debug "starting message store's sqlite database migration"
|
||||
info "starting message store's sqlite database migration"
|
||||
|
||||
let userVersion = ?db.getUserVersion()
|
||||
let isSchemaVersion7 = ?db.isSchemaVersion7()
|
||||
@ -70,5 +70,5 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
|
||||
if migrationRes.isErr():
|
||||
return err("failed to execute migration scripts: " & migrationRes.error)
|
||||
|
||||
debug "finished message store's sqlite database migration"
|
||||
info "finished message store's sqlite database migration"
|
||||
return ok()
|
||||
|
||||
@ -53,7 +53,7 @@ proc new*(T: type CapacityRetentionPolicy, capacity = DefaultCapacity): T =
|
||||
method execute*(
|
||||
p: CapacityRetentionPolicy, driver: ArchiveDriver
|
||||
): Future[RetentionPolicyResult[void]] {.async.} =
|
||||
debug "beginning executing message retention policy - capacity"
|
||||
info "beginning executing message retention policy - capacity"
|
||||
|
||||
let numMessages = (await driver.getMessagesCount()).valueOr:
|
||||
return err("failed to get messages count: " & error)
|
||||
@ -64,6 +64,6 @@ method execute*(
|
||||
(await driver.deleteOldestMessagesNotWithinLimit(limit = p.capacity + p.deleteWindow)).isOkOr:
|
||||
return err("deleting oldest messages failed: " & error)
|
||||
|
||||
debug "end executing message retention policy - capacity"
|
||||
info "end executing message retention policy - capacity"
|
||||
|
||||
return ok()
|
||||
|
||||
@ -18,10 +18,10 @@ proc new*(T: type SizeRetentionPolicy, size = DefaultRetentionSize): T =
|
||||
method execute*(
|
||||
p: SizeRetentionPolicy, driver: ArchiveDriver
|
||||
): Future[RetentionPolicyResult[void]] {.async.} =
|
||||
debug "beginning of executing message retention policy - size"
|
||||
info "beginning of executing message retention policy - size"
|
||||
|
||||
(await driver.decreaseDatabaseSize(p.sizeLimit)).isOkOr:
|
||||
return err("decreaseDatabaseSize failed: " & $error)
|
||||
|
||||
debug "end of executing message retention policy - size"
|
||||
info "end of executing message retention policy - size"
|
||||
return ok()
|
||||
|
||||
@ -18,7 +18,7 @@ method execute*(
|
||||
p: TimeRetentionPolicy, driver: ArchiveDriver
|
||||
): Future[RetentionPolicyResult[void]] {.async.} =
|
||||
## Delete messages that exceed the retention time by 10% and more (batch delete for efficiency)
|
||||
debug "beginning of executing message retention policy - time"
|
||||
info "beginning of executing message retention policy - time"
|
||||
|
||||
let omtRes = await driver.getOldestMessageTimestamp()
|
||||
if omtRes.isErr():
|
||||
@ -35,5 +35,5 @@ method execute*(
|
||||
if res.isErr():
|
||||
return err("failed to delete oldest messages: " & res.error)
|
||||
|
||||
debug "end of executing message retention policy - time"
|
||||
info "end of executing message retention policy - time"
|
||||
return ok()
|
||||
|
||||
@ -122,7 +122,7 @@ proc handleMessage*(
|
||||
let insertDuration = getTime().toUnixFloat() - insertStartTime
|
||||
waku_legacy_archive_insert_duration_seconds.observe(insertDuration)
|
||||
|
||||
debug "message archived",
|
||||
info "message archived",
|
||||
msg_hash = msgHashHex,
|
||||
pubsubTopic = pubsubTopic,
|
||||
contentTopic = msg.contentTopic,
|
||||
|
||||
@ -62,7 +62,7 @@ proc new*(
|
||||
return err("error while gathering sqlite stats: " & $sqliteStatsRes.error)
|
||||
|
||||
let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get()
|
||||
debug "sqlite database page stats",
|
||||
info "sqlite database page stats",
|
||||
pageSize = pageSize, pages = pageCount, freePages = freelistCount
|
||||
|
||||
if vacuum and (pageCount > 0 and freelistCount > 0):
|
||||
@ -76,7 +76,7 @@ proc new*(
|
||||
if migrateRes.isErr():
|
||||
return err("error in migrate sqlite: " & $migrateRes.error)
|
||||
|
||||
debug "setting up sqlite waku archive driver"
|
||||
info "setting up sqlite waku archive driver"
|
||||
let res = SqliteDriver.new(db)
|
||||
if res.isErr():
|
||||
return err("failed to init sqlite archive driver: " & res.error)
|
||||
@ -99,6 +99,6 @@ proc new*(
|
||||
"Postgres has been configured but not been compiled. Check compiler definitions."
|
||||
)
|
||||
else:
|
||||
debug "setting up in-memory waku archive driver"
|
||||
info "setting up in-memory waku archive driver"
|
||||
let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages
|
||||
return ok(driver)
|
||||
|
||||
@ -892,7 +892,7 @@ method decreaseDatabaseSize*(
|
||||
# if totalSizeOfDB <= targetSizeInBytes:
|
||||
# return ok()
|
||||
|
||||
# debug "start reducing database size",
|
||||
# info "start reducing database size",
|
||||
# targetSize = $targetSizeInBytes, currentSize = $totalSizeOfDB
|
||||
|
||||
# while totalSizeOfDB > targetSizeInBytes and driver.containsAnyPartition():
|
||||
@ -911,7 +911,7 @@ method decreaseDatabaseSize*(
|
||||
|
||||
# totalSizeOfDB = newCurrentSize
|
||||
|
||||
# debug "reducing database size",
|
||||
# info "reducing database size",
|
||||
# targetSize = $targetSizeInBytes, newCurrentSize = $totalSizeOfDB
|
||||
|
||||
return ok()
|
||||
|
||||
@ -55,7 +55,7 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
|
||||
## If not `targetVersion` is provided, it defaults to `SchemaVersion`.
|
||||
##
|
||||
## NOTE: Down migration it is not currently supported
|
||||
debug "starting message store's sqlite database migration"
|
||||
info "starting message store's sqlite database migration"
|
||||
|
||||
let userVersion = ?db.getUserVersion()
|
||||
let isSchemaVersion7 = ?db.isSchemaVersion7()
|
||||
@ -70,5 +70,5 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
|
||||
if migrationRes.isErr():
|
||||
return err("failed to execute migration scripts: " & migrationRes.error)
|
||||
|
||||
debug "finished message store's sqlite database migration"
|
||||
info "finished message store's sqlite database migration"
|
||||
return ok()
|
||||
|
||||
@ -199,7 +199,7 @@ proc relayShardingIndicesList*(record: TypedRecord): Option[RelayShards] =
|
||||
return none(RelayShards)
|
||||
|
||||
let indexList = fromIndicesList(field).valueOr:
|
||||
debug "invalid shards list", error = error
|
||||
info "invalid shards list", error = error
|
||||
return none(RelayShards)
|
||||
|
||||
some(indexList)
|
||||
@ -209,7 +209,7 @@ proc relayShardingBitVector*(record: TypedRecord): Option[RelayShards] =
|
||||
return none(RelayShards)
|
||||
|
||||
let bitVector = fromBitVector(field).valueOr:
|
||||
debug "invalid shards bit vector", error = error
|
||||
info "invalid shards bit vector", error = error
|
||||
return none(RelayShards)
|
||||
|
||||
some(bitVector)
|
||||
@ -241,7 +241,7 @@ proc containsShard*(r: Record, shard: RelayShard): bool =
|
||||
proc containsShard*(r: Record, topic: PubsubTopic): bool =
|
||||
let parseRes = RelayShard.parse(topic)
|
||||
if parseRes.isErr():
|
||||
debug "invalid static sharding topic", topic = topic, error = parseRes.error
|
||||
info "invalid static sharding topic", topic = topic, error = parseRes.error
|
||||
return false
|
||||
|
||||
containsShard(r, parseRes.value)
|
||||
|
||||
@ -110,7 +110,7 @@ proc sendSubscribeRequest(
|
||||
proc ping*(
|
||||
wfc: WakuFilterClient, servicePeer: RemotePeerInfo
|
||||
): Future[FilterSubscribeResult] {.async.} =
|
||||
debug "sending ping", servicePeer = shortLog($servicePeer)
|
||||
info "sending ping", servicePeer = shortLog($servicePeer)
|
||||
let requestId = generateRequestId(wfc.rng)
|
||||
let filterSubscribeRequest = FilterSubscribeRequest.ping(requestId)
|
||||
|
||||
@ -194,7 +194,7 @@ proc initProtocolHandler(wfc: WakuFilterClient) =
|
||||
let msg_hash =
|
||||
computeMessageHash(msgPush.pubsubTopic, msgPush.wakuMessage).to0xHex()
|
||||
|
||||
debug "Received message push",
|
||||
info "Received message push",
|
||||
peerId = conn.peerId,
|
||||
msg_hash,
|
||||
payload = shortLog(msgPush.wakuMessage.payload),
|
||||
|
||||
@ -31,7 +31,7 @@ type WakuFilter* = ref object of LPProtocol
|
||||
peerConnections: Table[PeerId, Connection]
|
||||
|
||||
proc pingSubscriber(wf: WakuFilter, peerId: PeerID): FilterSubscribeResult =
|
||||
debug "pinging subscriber", peerId = peerId
|
||||
info "pinging subscriber", peerId = peerId
|
||||
|
||||
if not wf.subscriptions.isSubscribed(peerId):
|
||||
error "pinging peer has no subscriptions", peerId = peerId
|
||||
@ -67,13 +67,13 @@ proc subscribe(
|
||||
|
||||
let filterCriteria = toHashSet(contentTopics.mapIt((pubsubTopic.get(), it)))
|
||||
|
||||
debug "subscribing peer to filter criteria",
|
||||
info "subscribing peer to filter criteria",
|
||||
peerId = peerId, filterCriteria = filterCriteria
|
||||
|
||||
(await wf.subscriptions.addSubscription(peerId, filterCriteria)).isOkOr:
|
||||
return err(FilterSubscribeError.serviceUnavailable(error))
|
||||
|
||||
debug "correct subscription", peerId = peerId
|
||||
info "correct subscription", peerId = peerId
|
||||
|
||||
ok()
|
||||
|
||||
@ -99,7 +99,7 @@ proc unsubscribe(
|
||||
|
||||
let filterCriteria = toHashSet(contentTopics.mapIt((pubsubTopic.get(), it)))
|
||||
|
||||
debug "unsubscribing peer from filter criteria",
|
||||
info "unsubscribing peer from filter criteria",
|
||||
peerId = peerId, filterCriteria = filterCriteria
|
||||
|
||||
wf.subscriptions.removeSubscription(peerId, filterCriteria).isOkOr:
|
||||
@ -108,7 +108,7 @@ proc unsubscribe(
|
||||
|
||||
## Note: do not remove from peerRequestRateLimiter to prevent trick with subscribe/unsubscribe loop
|
||||
## We remove only if peerManager removes the peer
|
||||
debug "correct unsubscription", peerId = peerId
|
||||
info "correct unsubscription", peerId = peerId
|
||||
|
||||
ok()
|
||||
|
||||
@ -116,10 +116,10 @@ proc unsubscribeAll(
|
||||
wf: WakuFilter, peerId: PeerID
|
||||
): Future[FilterSubscribeResult] {.async.} =
|
||||
if not wf.subscriptions.isSubscribed(peerId):
|
||||
debug "unsubscribing peer has no subscriptions", peerId = peerId
|
||||
info "unsubscribing peer has no subscriptions", peerId = peerId
|
||||
return err(FilterSubscribeError.notFound())
|
||||
|
||||
debug "removing peer subscription", peerId = peerId
|
||||
info "removing peer subscription", peerId = peerId
|
||||
await wf.subscriptions.removePeer(peerId)
|
||||
wf.subscriptions.cleanUp()
|
||||
|
||||
@ -170,7 +170,7 @@ proc handleSubscribeRequest*(
|
||||
proc pushToPeer(
|
||||
wf: WakuFilter, peerId: PeerId, buffer: seq[byte]
|
||||
): Future[Result[void, string]] {.async.} =
|
||||
debug "pushing message to subscribed peer", peerId = shortLog(peerId)
|
||||
info "pushing message to subscribed peer", peerId = shortLog(peerId)
|
||||
|
||||
let stream = (
|
||||
await wf.peerManager.getStreamByPeerIdAndProtocol(peerId, WakuFilterPushCodec)
|
||||
@ -180,7 +180,7 @@ proc pushToPeer(
|
||||
|
||||
await stream.writeLp(buffer)
|
||||
|
||||
debug "published successful", peerId = shortLog(peerId), stream
|
||||
info "published successful", peerId = shortLog(peerId), stream
|
||||
waku_service_network_bytes.inc(
|
||||
amount = buffer.len().int64, labelValues = [WakuFilterPushCodec, "out"]
|
||||
)
|
||||
@ -220,13 +220,13 @@ proc pushToPeers(
|
||||
await allFutures(pushFuts)
|
||||
|
||||
proc maintainSubscriptions*(wf: WakuFilter) {.async.} =
|
||||
debug "maintaining subscriptions"
|
||||
info "maintaining subscriptions"
|
||||
|
||||
## Remove subscriptions for peers that have been removed from peer store
|
||||
var peersToRemove: seq[PeerId]
|
||||
for peerId in wf.subscriptions.peersSubscribed.keys:
|
||||
if not wf.peerManager.switch.peerStore.hasPeer(peerId, WakuFilterPushCodec):
|
||||
debug "peer has been removed from peer store, we will remove subscription",
|
||||
info "peer has been removed from peer store, we will remove subscription",
|
||||
peerId = peerId
|
||||
peersToRemove.add(peerId)
|
||||
|
||||
@ -245,7 +245,7 @@ proc handleMessage*(
|
||||
) {.async.} =
|
||||
let msgHash = computeMessageHash(pubsubTopic, message).to0xHex()
|
||||
|
||||
debug "handling message", pubsubTopic = pubsubTopic, msg_hash = msgHash
|
||||
info "handling message", pubsubTopic = pubsubTopic, msg_hash = msgHash
|
||||
|
||||
let handleMessageStartTime = Moment.now()
|
||||
|
||||
@ -288,7 +288,7 @@ proc handleMessage*(
|
||||
|
||||
proc initProtocolHandler(wf: WakuFilter) =
|
||||
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
debug "filter subscribe request handler triggered",
|
||||
info "filter subscribe request handler triggered",
|
||||
peerId = shortLog(conn.peerId), conn
|
||||
|
||||
var response: FilterSubscribeResponse
|
||||
@ -325,10 +325,10 @@ proc initProtocolHandler(wf: WakuFilter) =
|
||||
remote_peer_id = conn.peerId, err = getCurrentExceptionMsg()
|
||||
return
|
||||
|
||||
debug "sending filter subscribe response",
|
||||
info "sending filter subscribe response",
|
||||
peer_id = shortLog(conn.peerId), response = response
|
||||
do:
|
||||
debug "filter request rejected due rate limit exceeded",
|
||||
info "filter request rejected due rate limit exceeded",
|
||||
peerId = shortLog(conn.peerId), limit = $wf.peerRequestRateLimiter.setting
|
||||
response = FilterSubscribeResponse(
|
||||
requestId: "N/A",
|
||||
@ -392,17 +392,17 @@ proc new*(
|
||||
|
||||
proc periodicSubscriptionsMaintenance(wf: WakuFilter) {.async.} =
|
||||
const MaintainSubscriptionsInterval = 1.minutes
|
||||
debug "starting to maintain subscriptions"
|
||||
info "starting to maintain subscriptions"
|
||||
while true:
|
||||
await wf.maintainSubscriptions()
|
||||
await sleepAsync(MaintainSubscriptionsInterval)
|
||||
|
||||
proc start*(wf: WakuFilter) {.async.} =
|
||||
debug "starting filter protocol"
|
||||
info "starting filter protocol"
|
||||
await procCall LPProtocol(wf).start()
|
||||
wf.subscriptionsManagerFut = wf.periodicSubscriptionsMaintenance()
|
||||
|
||||
proc stop*(wf: WakuFilter) {.async.} =
|
||||
debug "stopping filter protocol"
|
||||
info "stopping filter protocol"
|
||||
await wf.subscriptionsManagerFut.cancelAndWait()
|
||||
await procCall LPProtocol(wf).stop()
|
||||
|
||||
@ -85,7 +85,7 @@ proc findSubscribedPeers*(
|
||||
if s.isSubscribed(peer):
|
||||
foundPeers.add(peer)
|
||||
|
||||
debug "findSubscribedPeers result",
|
||||
info "findSubscribedPeers result",
|
||||
filter_criterion = filterCriterion,
|
||||
subscr_set = s.subscriptions,
|
||||
found_peers = foundPeers
|
||||
@ -94,29 +94,29 @@ proc findSubscribedPeers*(
|
||||
|
||||
proc removePeer*(s: FilterSubscriptions, peerId: PeerID) {.async.} =
|
||||
## Remove all subscriptions for a given peer
|
||||
debug "removePeer",
|
||||
info "removePeer",
|
||||
currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)), peerId = peerId
|
||||
|
||||
s.peersSubscribed.del(peerId)
|
||||
|
||||
debug "removePeer after deletion",
|
||||
info "removePeer after deletion",
|
||||
currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)), peerId = peerId
|
||||
|
||||
proc removePeers*(s: FilterSubscriptions, peerIds: seq[PeerID]) {.async.} =
|
||||
## Remove all subscriptions for a given list of peers
|
||||
debug "removePeers",
|
||||
info "removePeers",
|
||||
currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)),
|
||||
peerIds = peerIds.mapIt(shortLog(it))
|
||||
|
||||
for peer in peerIds:
|
||||
await s.removePeer(peer)
|
||||
|
||||
debug "removePeers after deletion",
|
||||
info "removePeers after deletion",
|
||||
currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)),
|
||||
peerIds = peerIds.mapIt(shortLog(it))
|
||||
|
||||
proc cleanUp*(fs: FilterSubscriptions) =
|
||||
debug "cleanUp", currentPeerIds = toSeq(fs.peersSubscribed.keys).mapIt(shortLog(it))
|
||||
info "cleanUp", currentPeerIds = toSeq(fs.peersSubscribed.keys).mapIt(shortLog(it))
|
||||
|
||||
## Remove all subscriptions for peers that have not been seen for a while
|
||||
let now = Moment.now()
|
||||
@ -128,7 +128,7 @@ proc cleanUp*(fs: FilterSubscriptions) =
|
||||
|
||||
fs.subscriptions.keepItIf(val.len > 0)
|
||||
|
||||
debug "after cleanUp",
|
||||
info "after cleanUp",
|
||||
currentPeerIds = toSeq(fs.peersSubscribed.keys).mapIt(shortLog(it))
|
||||
|
||||
proc refreshSubscription*(s: var FilterSubscriptions, peerId: PeerID) =
|
||||
@ -162,7 +162,7 @@ proc addSubscription*(
|
||||
peersOfSub[].incl(peerId)
|
||||
peerData.criteriaCount += 1
|
||||
|
||||
debug "subscription added correctly",
|
||||
info "subscription added correctly",
|
||||
new_peer = shortLog(peerId), subscr_set = s.subscriptions
|
||||
|
||||
return ok()
|
||||
|
||||
@ -127,7 +127,7 @@ proc initProtocolHandler(wl: WakuLightPush) =
|
||||
except CatchableError:
|
||||
error "lightpush failed handleRequest", error = getCurrentExceptionMsg()
|
||||
do:
|
||||
debug "lightpush request rejected due rate limit exceeded",
|
||||
info "lightpush request rejected due rate limit exceeded",
|
||||
peerId = conn.peerId, limit = $wl.requestRateLimiter.setting
|
||||
|
||||
rpc = static(
|
||||
|
||||
@ -87,7 +87,7 @@ proc initProtocolHandler(wl: WakuLegacyLightPush) =
|
||||
except CatchableError:
|
||||
error "lightpush legacy handleRequest failed", error = getCurrentExceptionMsg()
|
||||
do:
|
||||
debug "lightpush request rejected due rate limit exceeded",
|
||||
info "lightpush request rejected due rate limit exceeded",
|
||||
peerId = conn.peerId, limit = $wl.requestRateLimiter.setting
|
||||
|
||||
rpc = static(
|
||||
|
||||
@ -86,7 +86,7 @@ proc initProtocolHandler(m: WakuMetadata) =
|
||||
error "Response decoding error", error = error
|
||||
return
|
||||
|
||||
debug "Received WakuMetadata request",
|
||||
info "Received WakuMetadata request",
|
||||
remoteClusterId = response.clusterId,
|
||||
remoteShards = response.shards,
|
||||
localClusterId = m.clusterId,
|
||||
|
||||
@ -95,7 +95,7 @@ proc encryptWithAd*(
|
||||
# Otherwise we return the input plaintext according to specification http://www.noiseprotocol.org/noise.html#the-cipherstate-object
|
||||
else:
|
||||
ciphertext = @plaintext
|
||||
debug "encryptWithAd called with no encryption key set. Returning plaintext."
|
||||
info "encryptWithAd called with no encryption key set. Returning plaintext."
|
||||
|
||||
return ciphertext
|
||||
|
||||
@ -133,7 +133,7 @@ proc decryptWithAd*(
|
||||
|
||||
# We check if the input authorization tag matches the decryption authorization tag
|
||||
if inputAuthorizationTag != authorizationTag:
|
||||
debug "decryptWithAd failed",
|
||||
info "decryptWithAd failed",
|
||||
plaintext = plaintext,
|
||||
ciphertext = ciphertext,
|
||||
inputAuthorizationTag = inputAuthorizationTag,
|
||||
@ -155,7 +155,7 @@ proc decryptWithAd*(
|
||||
# Otherwise we return the input ciphertext according to specification http://www.noiseprotocol.org/noise.html#the-cipherstate-object
|
||||
else:
|
||||
plaintext = @ciphertext
|
||||
debug "decryptWithAd called with no encryption key set. Returning ciphertext."
|
||||
info "decryptWithAd called with no encryption key set. Returning ciphertext."
|
||||
|
||||
return plaintext
|
||||
|
||||
@ -359,6 +359,6 @@ proc decrypt*(
|
||||
trace "decrypt", tagIn = tagIn, tagOut = tagOut, nonce = state.nonce
|
||||
# We check if the authorization tag computed while decrypting is the same as the input tag
|
||||
if tagIn != tagOut:
|
||||
debug "decrypt failed", plaintext = shortLog(plaintext)
|
||||
info "decrypt failed", plaintext = shortLog(plaintext)
|
||||
raise newException(NoiseDecryptTagError, "decrypt tag authentication failed.")
|
||||
return plaintext
|
||||
|
||||
@ -497,7 +497,7 @@ proc stepHandshake*(
|
||||
# If there are no more message patterns left for processing
|
||||
# we return an empty HandshakeStepResult
|
||||
if hs.msgPatternIdx > uint8(hs.handshakePattern.messagePatterns.len - 1):
|
||||
debug "stepHandshake called more times than the number of message patterns present in handshake"
|
||||
info "stepHandshake called more times than the number of message patterns present in handshake"
|
||||
return ok(hsStepResult)
|
||||
|
||||
# We process the next handshake message pattern
|
||||
@ -663,7 +663,7 @@ proc readMessage*(
|
||||
# The message successfully decrypted, we can delete the first element of the inbound Message Nametag Buffer
|
||||
delete(inboundMessageNametagBuffer, 1)
|
||||
except NoiseDecryptTagError:
|
||||
debug "A read message failed decryption. Returning empty message as plaintext."
|
||||
info "A read message failed decryption. Returning empty message as plaintext."
|
||||
message = @[]
|
||||
|
||||
return ok(message)
|
||||
|
||||
@ -249,7 +249,7 @@ proc initNametagsBuffer*(mntb: var MessageNametagBuffer) =
|
||||
mntb.counter += 1
|
||||
else:
|
||||
# We warn users if no secret is set
|
||||
debug "The message nametags buffer has not a secret set"
|
||||
info "The message nametags buffer has not a secret set"
|
||||
|
||||
# Deletes the first n elements in buffer and appends n new ones
|
||||
proc delete*(mntb: var MessageNametagBuffer, n: int) =
|
||||
@ -272,7 +272,7 @@ proc delete*(mntb: var MessageNametagBuffer, n: int) =
|
||||
mntb.counter += 1
|
||||
else:
|
||||
# We warn users that no secret is set
|
||||
debug "The message nametags buffer has no secret set"
|
||||
info "The message nametags buffer has no secret set"
|
||||
|
||||
# Checks if the input messageNametag is contained in the input MessageNametagBuffer
|
||||
proc checkNametag*(
|
||||
@ -486,7 +486,7 @@ proc serializePayloadV2*(self: PayloadV2): Result[seq[byte], cstring] =
|
||||
serializedHandshakeMessage.add serializedPk
|
||||
# If we are processing more than 256 byte, we return an error
|
||||
if serializedHandshakeMessageLen > uint8.high.int:
|
||||
debug "PayloadV2 malformed: too many public keys contained in the handshake message"
|
||||
info "PayloadV2 malformed: too many public keys contained in the handshake message"
|
||||
return err("Too many public keys in handshake message")
|
||||
|
||||
# We get the transport message byte length
|
||||
@ -542,7 +542,7 @@ proc deserializePayloadV2*(
|
||||
# We read the Handshake Message lenght (1 byte)
|
||||
var handshakeMessageLen = payload[i].uint64
|
||||
if handshakeMessageLen > uint8.high.uint64:
|
||||
debug "Payload malformed: too many public keys contained in the handshake message"
|
||||
info "Payload malformed: too many public keys contained in the handshake message"
|
||||
return err("Too many public keys in handshake message")
|
||||
|
||||
i += 1
|
||||
|
||||
@ -83,7 +83,7 @@ proc getEnrsFromCache(
|
||||
wpx: WakuPeerExchange, numPeers: uint64
|
||||
): seq[enr.Record] {.gcsafe.} =
|
||||
if wpx.enrCache.len() == 0:
|
||||
debug "peer exchange ENR cache is empty"
|
||||
info "peer exchange ENR cache is empty"
|
||||
return @[]
|
||||
|
||||
# copy and shuffle
|
||||
@ -100,11 +100,11 @@ proc poolFilter*(cluster: Option[uint16], peer: RemotePeerInfo): bool =
|
||||
return false
|
||||
|
||||
if peer.enr.isNone():
|
||||
debug "peer has no ENR", peer = $peer
|
||||
info "peer has no ENR", peer = $peer
|
||||
return false
|
||||
|
||||
if cluster.isSome() and peer.enr.get().isClusterMismatched(cluster.get()):
|
||||
debug "peer has mismatching cluster", peer = $peer
|
||||
info "peer has mismatching cluster", peer = $peer
|
||||
return false
|
||||
|
||||
return true
|
||||
@ -176,7 +176,7 @@ proc initProtocolHandler(wpx: WakuPeerExchange) =
|
||||
return
|
||||
|
||||
let enrs = wpx.getEnrsFromCache(decBuf.get().request.numPeers)
|
||||
debug "peer exchange request received"
|
||||
info "peer exchange request received"
|
||||
trace "px enrs to respond", enrs = $enrs
|
||||
try:
|
||||
(await wpx.respond(enrs, conn)).isErrOr:
|
||||
|
||||
@ -175,7 +175,7 @@ proc initProtocolHandler(w: WakuRelay) =
|
||||
## main protocol handler that gets triggered on every
|
||||
## connection for a protocol string
|
||||
## e.g. ``/wakusub/0.0.1``, etc...
|
||||
debug "Incoming WakuRelay connection", connection = conn, protocol = proto
|
||||
info "Incoming WakuRelay connection", connection = conn, protocol = proto
|
||||
|
||||
try:
|
||||
await w.handleConn(conn, proto)
|
||||
@ -204,7 +204,7 @@ proc logMessageInfo*(
|
||||
let payloadSize = float64(msg.payload.len)
|
||||
|
||||
if onRecv:
|
||||
notice "received relay message",
|
||||
debug "received relay message",
|
||||
my_peer_id = w.switch.peerInfo.peerId,
|
||||
msg_hash = msg_hash,
|
||||
msg_id = msg_id_short,
|
||||
@ -213,7 +213,7 @@ proc logMessageInfo*(
|
||||
receivedTime = getNowInNanosecondTime(),
|
||||
payloadSizeBytes = payloadSize
|
||||
else:
|
||||
notice "sent relay message",
|
||||
debug "sent relay message",
|
||||
my_peer_id = w.switch.peerInfo.peerId,
|
||||
msg_hash = msg_hash,
|
||||
msg_id = msg_id_short,
|
||||
@ -380,7 +380,7 @@ proc getPubSubPeersInMesh*(
|
||||
return ok(allPeers)
|
||||
|
||||
if not w.mesh.hasKey(pubsubTopic):
|
||||
debug "getPubSubPeersInMesh - there is no mesh peer for the given pubsub topic",
|
||||
info "getPubSubPeersInMesh - there is no mesh peer for the given pubsub topic",
|
||||
pubsubTopic = pubsubTopic
|
||||
return ok(initHashSet[PubSubPeer]())
|
||||
|
||||
@ -461,12 +461,12 @@ proc topicsHealthLoop(wakuRelay: WakuRelay) {.async.} =
|
||||
await sleepAsync(10.seconds)
|
||||
|
||||
method start*(w: WakuRelay) {.async, base.} =
|
||||
debug "start"
|
||||
info "start"
|
||||
await procCall GossipSub(w).start()
|
||||
w.topicHealthLoopHandle = w.topicsHealthLoop()
|
||||
|
||||
method stop*(w: WakuRelay) {.async, base.} =
|
||||
debug "stop"
|
||||
info "stop"
|
||||
await procCall GossipSub(w).stop()
|
||||
if not w.topicHealthLoopHandle.isNil():
|
||||
await w.topicHealthLoopHandle.cancelAndWait()
|
||||
@ -538,7 +538,7 @@ proc validateMessage*(
|
||||
return ok()
|
||||
|
||||
proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandler) =
|
||||
debug "subscribe", pubsubTopic = pubsubTopic
|
||||
info "subscribe", pubsubTopic = pubsubTopic
|
||||
|
||||
# We need to wrap the handler since gossipsub doesnt understand WakuMessage
|
||||
let topicHandler = proc(
|
||||
@ -580,7 +580,7 @@ proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandle
|
||||
proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) =
|
||||
## Unsubscribe all handlers on this pubsub topic
|
||||
|
||||
debug "unsubscribe all", pubsubTopic = pubsubTopic
|
||||
info "unsubscribe all", pubsubTopic = pubsubTopic
|
||||
|
||||
procCall GossipSub(w).unsubscribeAll(pubsubTopic)
|
||||
w.topicValidator.del(pubsubTopic)
|
||||
@ -604,7 +604,7 @@ proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic) =
|
||||
error "exception in unsubscribe", pubsubTopic, error = getCurrentExceptionMsg()
|
||||
return
|
||||
|
||||
debug "unsubscribe", pubsubTopic
|
||||
info "unsubscribe", pubsubTopic
|
||||
procCall GossipSub(w).unsubscribe(pubsubTopic, topicHandler)
|
||||
procCall GossipSub(w).removeValidator(pubsubTopic, topicValidator)
|
||||
|
||||
|
||||
@ -139,7 +139,7 @@ proc batchRequest*(
|
||||
proc advertiseAll(
|
||||
self: WakuRendezVous
|
||||
): Future[Result[void, string]] {.async: (raises: []).} =
|
||||
debug "waku rendezvous advertisements started"
|
||||
info "waku rendezvous advertisements started"
|
||||
|
||||
let shards = self.getShards()
|
||||
|
||||
@ -171,14 +171,14 @@ proc advertiseAll(
|
||||
if fut.failed():
|
||||
warn "a rendezvous advertisement failed", cause = fut.error.msg
|
||||
|
||||
debug "waku rendezvous advertisements finished"
|
||||
info "waku rendezvous advertisements finished"
|
||||
|
||||
return ok()
|
||||
|
||||
proc initialRequestAll*(
|
||||
self: WakuRendezVous
|
||||
): Future[Result[void, string]] {.async: (raises: []).} =
|
||||
debug "waku rendezvous initial requests started"
|
||||
info "waku rendezvous initial requests started"
|
||||
|
||||
let shards = self.getShards()
|
||||
|
||||
@ -218,12 +218,12 @@ proc initialRequestAll*(
|
||||
rendezvousPeerFoundTotal.inc()
|
||||
self.peerManager.addPeer(record)
|
||||
|
||||
debug "waku rendezvous initial request finished"
|
||||
info "waku rendezvous initial request finished"
|
||||
|
||||
return ok()
|
||||
|
||||
proc periodicRegistration(self: WakuRendezVous) {.async.} =
|
||||
debug "waku rendezvous periodic registration started",
|
||||
info "waku rendezvous periodic registration started",
|
||||
interval = self.registrationInterval
|
||||
|
||||
# infinite loop
|
||||
@ -231,7 +231,7 @@ proc periodicRegistration(self: WakuRendezVous) {.async.} =
|
||||
await sleepAsync(self.registrationInterval)
|
||||
|
||||
(await self.advertiseAll()).isOkOr:
|
||||
debug "waku rendezvous advertisements failed", error = error
|
||||
info "waku rendezvous advertisements failed", error = error
|
||||
|
||||
if self.registrationInterval > MaxRegistrationInterval:
|
||||
self.registrationInterval = MaxRegistrationInterval
|
||||
@ -242,7 +242,7 @@ proc periodicRegistration(self: WakuRendezVous) {.async.} =
|
||||
self.registrationInterval = DefaultRegistrationInterval
|
||||
|
||||
proc periodicRequests(self: WakuRendezVous) {.async.} =
|
||||
debug "waku rendezvous periodic requests started", interval = self.requestInterval
|
||||
info "waku rendezvous periodic requests started", interval = self.requestInterval
|
||||
|
||||
# infinite loop
|
||||
while true:
|
||||
@ -288,7 +288,7 @@ proc new*(
|
||||
wrv.registrationInterval = DefaultRegistrationInterval
|
||||
wrv.requestInterval = DefaultRequestsInterval
|
||||
|
||||
debug "waku rendezvous initialized",
|
||||
info "waku rendezvous initialized",
|
||||
clusterId = clusterId, shards = getShards(), capabilities = getCapabilities()
|
||||
|
||||
return ok(wrv)
|
||||
@ -299,7 +299,7 @@ proc start*(self: WakuRendezVous) {.async: (raises: []).} =
|
||||
|
||||
self.periodicRequestFut = self.periodicRequests()
|
||||
|
||||
debug "waku rendezvous discovery started"
|
||||
info "waku rendezvous discovery started"
|
||||
|
||||
proc stopWait*(self: WakuRendezVous) {.async: (raises: []).} =
|
||||
if not self.periodicRegistrationFut.isNil():
|
||||
@ -308,4 +308,4 @@ proc stopWait*(self: WakuRendezVous) {.async: (raises: []).} =
|
||||
if not self.periodicRequestFut.isNil():
|
||||
await self.periodicRequestFut.cancelAndWait()
|
||||
|
||||
debug "waku rendezvous discovery stopped"
|
||||
info "waku rendezvous discovery stopped"
|
||||
|
||||
@ -233,7 +233,7 @@ method register*(
|
||||
let leaf = rateCommitment.toLeaf().get()
|
||||
if g.registerCb.isSome():
|
||||
let idx = g.latestIndex
|
||||
debug "registering member via callback", rateCommitment = leaf, index = idx
|
||||
info "registering member via callback", rateCommitment = leaf, index = idx
|
||||
await g.registerCb.get()(@[Membership(rateCommitment: leaf, index: idx)])
|
||||
g.latestIndex.inc()
|
||||
except CatchableError:
|
||||
@ -253,10 +253,10 @@ method register*(
|
||||
g.retryWrapper(gasPrice, "Failed to get gas price"):
|
||||
int(await ethRpc.provider.eth_gasPrice()) * 2
|
||||
let idCommitmentHex = identityCredential.idCommitment.inHex()
|
||||
debug "identityCredential idCommitmentHex", idCommitment = idCommitmentHex
|
||||
info "identityCredential idCommitmentHex", idCommitment = idCommitmentHex
|
||||
let idCommitment = identityCredential.idCommitment.toUInt256()
|
||||
let idCommitmentsToErase: seq[UInt256] = @[]
|
||||
debug "registering the member",
|
||||
info "registering the member",
|
||||
idCommitment = idCommitment,
|
||||
userMessageLimit = userMessageLimit,
|
||||
idCommitmentsToErase = idCommitmentsToErase
|
||||
@ -270,11 +270,11 @@ method register*(
|
||||
var tsReceipt: ReceiptObject
|
||||
g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"):
|
||||
await ethRpc.getMinedTransactionReceipt(txHash)
|
||||
debug "registration transaction mined", txHash = txHash
|
||||
info "registration transaction mined", txHash = txHash
|
||||
g.registrationTxHash = some(txHash)
|
||||
# the receipt topic holds the hash of signature of the raised events
|
||||
# TODO: make this robust. search within the event list for the event
|
||||
debug "ts receipt", receipt = tsReceipt[]
|
||||
info "ts receipt", receipt = tsReceipt[]
|
||||
|
||||
if tsReceipt.status.isNone():
|
||||
raise newException(ValueError, "Transaction failed: status is None")
|
||||
@ -285,7 +285,7 @@ method register*(
|
||||
|
||||
## Extract MembershipRegistered event from transaction logs (third event)
|
||||
let thirdTopic = tsReceipt.logs[2].topics[0]
|
||||
debug "third topic", thirdTopic = thirdTopic
|
||||
info "third topic", thirdTopic = thirdTopic
|
||||
if thirdTopic !=
|
||||
cast[FixedBytes[32]](keccak.keccak256.digest(
|
||||
"MembershipRegistered(uint256,uint256,uint32)"
|
||||
@ -294,7 +294,7 @@ method register*(
|
||||
|
||||
## Parse MembershipRegistered event data: rateCommitment(256) || membershipRateLimit(256) || index(32)
|
||||
let arguments = tsReceipt.logs[2].data
|
||||
debug "tx log data", arguments = arguments
|
||||
info "tx log data", arguments = arguments
|
||||
let
|
||||
## Extract membership index from transaction log data (big endian)
|
||||
membershipIndex = UInt256.fromBytesBE(arguments[64 .. 95])
|
||||
@ -450,7 +450,7 @@ method generateProof*(
|
||||
nullifier: nullifier,
|
||||
)
|
||||
|
||||
debug "Proof generated successfully", proof = output
|
||||
info "Proof generated successfully", proof = output
|
||||
|
||||
waku_rln_remaining_proofs_per_epoch.dec()
|
||||
waku_rln_total_generated_proofs.inc()
|
||||
@ -486,7 +486,7 @@ method verifyProof*(
|
||||
if not ffiOk:
|
||||
return err("could not verify the proof")
|
||||
else:
|
||||
debug "Proof verified successfully"
|
||||
info "Proof verified successfully"
|
||||
|
||||
return ok(validProof)
|
||||
|
||||
@ -583,13 +583,13 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
|
||||
let idCommitmentBytes = keystoreCred.identityCredential.idCommitment
|
||||
let idCommitmentUInt256 = keystoreCred.identityCredential.idCommitment.toUInt256()
|
||||
let idCommitmentHex = idCommitmentBytes.inHex()
|
||||
debug "Keystore idCommitment in bytes", idCommitmentBytes = idCommitmentBytes
|
||||
debug "Keystore idCommitment in UInt256 ", idCommitmentUInt256 = idCommitmentUInt256
|
||||
debug "Keystore idCommitment in hex ", idCommitmentHex = idCommitmentHex
|
||||
info "Keystore idCommitment in bytes", idCommitmentBytes = idCommitmentBytes
|
||||
info "Keystore idCommitment in UInt256 ", idCommitmentUInt256 = idCommitmentUInt256
|
||||
info "Keystore idCommitment in hex ", idCommitmentHex = idCommitmentHex
|
||||
let idCommitment = keystoreCred.identityCredential.idCommitment
|
||||
let membershipExists = (await g.fetchMembershipStatus(idCommitment)).valueOr:
|
||||
return err("the commitment does not have a membership: " & error)
|
||||
debug "membershipExists", membershipExists = membershipExists
|
||||
info "membershipExists", membershipExists = membershipExists
|
||||
|
||||
g.idCredentials = some(keystoreCred.identityCredential)
|
||||
|
||||
|
||||
@ -108,7 +108,7 @@ proc createRLNInstanceLocal(d = MerkleTreeDepth): RLNResult =
|
||||
let res = new_circuit(merkleDepth, addr configBuffer, addr rlnInstance)
|
||||
# check whether the circuit parameters are generated successfully
|
||||
if (res == false):
|
||||
debug "error in parameters generation"
|
||||
info "error in parameters generation"
|
||||
return err("error in parameters generation")
|
||||
return ok(rlnInstance)
|
||||
|
||||
|
||||
@ -193,7 +193,7 @@ proc validateMessage*(
|
||||
|
||||
let timeDiff = uint64(abs(currentTime - messageTime))
|
||||
|
||||
debug "time info",
|
||||
info "time info",
|
||||
currentTime = currentTime, messageTime = messageTime, msgHash = msg.hash
|
||||
|
||||
if timeDiff > rlnPeer.rlnMaxTimestampGap:
|
||||
|
||||
@ -123,7 +123,7 @@ proc initProtocolHandler(self: WakuStore) =
|
||||
waku_store_time_seconds.set(queryDuration, ["query-db-time"])
|
||||
successfulQuery = true
|
||||
do:
|
||||
debug "store query request rejected due rate limit exceeded",
|
||||
info "store query request rejected due rate limit exceeded",
|
||||
peerId = conn.peerId, limit = $self.requestRateLimiter.setting
|
||||
resBuf = (rejectReposnseBuffer, "rejected")
|
||||
|
||||
@ -139,7 +139,7 @@ proc initProtocolHandler(self: WakuStore) =
|
||||
if successfulQuery:
|
||||
let writeDuration = getTime().toUnixFloat() - writeRespStartTime
|
||||
waku_store_time_seconds.set(writeDuration, ["send-store-resp-time"])
|
||||
debug "after sending response",
|
||||
info "after sending response",
|
||||
requestId = resBuf.requestId,
|
||||
queryDurationSecs = queryDuration,
|
||||
writeStreamDurationSecs = writeDuration
|
||||
|
||||
@ -201,7 +201,7 @@ when defined(waku_exp_store_resume):
|
||||
lastSeenTime = w.store.getNewestMessageTimestamp().get(Timestamp(0))
|
||||
now = getNanosecondTime(getTime().toUnixFloat())
|
||||
|
||||
debug "resuming with offline time window",
|
||||
info "resuming with offline time window",
|
||||
lastSeenTime = lastSeenTime, currentTime = now
|
||||
|
||||
let
|
||||
@ -218,10 +218,10 @@ when defined(waku_exp_store_resume):
|
||||
|
||||
var res: WakuStoreResult[seq[WakuMessage]]
|
||||
if peerList.isSome():
|
||||
debug "trying the candidate list to fetch the history"
|
||||
info "trying the candidate list to fetch the history"
|
||||
res = await w.queryLoop(req, peerList.get())
|
||||
else:
|
||||
debug "no candidate list is provided, selecting a random peer"
|
||||
info "no candidate list is provided, selecting a random peer"
|
||||
# if no peerList is set then query from one of the peers stored in the peer manager
|
||||
let peerOpt = w.peerManager.selectPeer(WakuLegacyStoreCodec)
|
||||
if peerOpt.isNone():
|
||||
@ -229,11 +229,11 @@ when defined(waku_exp_store_resume):
|
||||
waku_legacy_store_errors.inc(labelValues = [peerNotFoundFailure])
|
||||
return err("no suitable remote peers")
|
||||
|
||||
debug "a peer is selected from peer manager"
|
||||
info "a peer is selected from peer manager"
|
||||
res = await w.queryAll(req, peerOpt.get())
|
||||
|
||||
if res.isErr():
|
||||
debug "failed to resume the history"
|
||||
info "failed to resume the history"
|
||||
return err("failed to resume the history")
|
||||
|
||||
# Save the retrieved messages in the store
|
||||
|
||||
@ -142,7 +142,7 @@ proc initProtocolHandler(ws: WakuStore) =
|
||||
waku_legacy_store_time_seconds.set(queryDuration, ["query-db-time"])
|
||||
successfulQuery = true
|
||||
do:
|
||||
debug "Legacy store query request rejected due rate limit exceeded",
|
||||
info "Legacy store query request rejected due rate limit exceeded",
|
||||
peerId = conn.peerId, limit = $ws.requestRateLimiter.setting
|
||||
resBuf = (rejectResponseBuf, "rejected")
|
||||
|
||||
@ -157,7 +157,7 @@ proc initProtocolHandler(ws: WakuStore) =
|
||||
if successfulQuery:
|
||||
let writeDuration = getTime().toUnixFloat() - writeRespStartTime
|
||||
waku_legacy_store_time_seconds.set(writeDuration, ["send-store-resp-time"])
|
||||
debug "after sending response",
|
||||
info "after sending response",
|
||||
requestId = resBuf.requestId,
|
||||
queryDurationSecs = queryDuration,
|
||||
writeStreamDurationSecs = writeDuration
|
||||
|
||||
@ -318,7 +318,7 @@ proc storeSynchronization*(
|
||||
let conn: Connection = connOpt.valueOr:
|
||||
return err("fail to dial remote " & $peer.peerId)
|
||||
|
||||
debug "sync session initialized",
|
||||
info "sync session initialized",
|
||||
local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId
|
||||
|
||||
(
|
||||
@ -331,7 +331,7 @@ proc storeSynchronization*(
|
||||
|
||||
return err("sync request error: " & error)
|
||||
|
||||
debug "sync session ended gracefully",
|
||||
info "sync session ended gracefully",
|
||||
local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId
|
||||
|
||||
return ok()
|
||||
@ -354,7 +354,7 @@ proc initFillStorage(
|
||||
direction: PagingDirection.FORWARD,
|
||||
)
|
||||
|
||||
debug "initial storage filling started"
|
||||
info "initial storage filling started"
|
||||
|
||||
var storage = SeqStorage.new(DefaultStorageCap)
|
||||
|
||||
@ -376,7 +376,7 @@ proc initFillStorage(
|
||||
|
||||
query.cursor = response.cursor
|
||||
|
||||
debug "initial storage filling done", elements = storage.length()
|
||||
info "initial storage filling done", elements = storage.length()
|
||||
|
||||
return ok(storage)
|
||||
|
||||
@ -430,21 +430,21 @@ proc new*(
|
||||
return ok(sync)
|
||||
|
||||
proc periodicSync(self: SyncReconciliation) {.async.} =
|
||||
debug "periodic sync initialized", interval = $self.syncInterval
|
||||
info "periodic sync initialized", interval = $self.syncInterval
|
||||
|
||||
while true: # infinite loop
|
||||
await sleepAsync(self.syncInterval)
|
||||
|
||||
debug "periodic sync started"
|
||||
info "periodic sync started"
|
||||
|
||||
(await self.storeSynchronization()).isOkOr:
|
||||
error "periodic sync failed", err = error
|
||||
continue
|
||||
|
||||
debug "periodic sync done"
|
||||
info "periodic sync done"
|
||||
|
||||
proc periodicPrune(self: SyncReconciliation) {.async.} =
|
||||
debug "periodic prune initialized", interval = $self.syncInterval
|
||||
info "periodic prune initialized", interval = $self.syncInterval
|
||||
|
||||
# preventing sync and prune loops of happening at the same time.
|
||||
await sleepAsync((self.syncInterval div 2))
|
||||
@ -452,7 +452,7 @@ proc periodicPrune(self: SyncReconciliation) {.async.} =
|
||||
while true: # infinite loop
|
||||
await sleepAsync(self.syncInterval)
|
||||
|
||||
debug "periodic prune started"
|
||||
info "periodic prune started"
|
||||
|
||||
let time = getNowInNanosecondTime() - self.syncRange.nanos
|
||||
|
||||
@ -460,7 +460,7 @@ proc periodicPrune(self: SyncReconciliation) {.async.} =
|
||||
|
||||
total_messages_cached.set(self.storage.length())
|
||||
|
||||
debug "periodic prune done", elements_pruned = count
|
||||
info "periodic prune done", elements_pruned = count
|
||||
|
||||
proc idsReceiverLoop(self: SyncReconciliation) {.async.} =
|
||||
while true: # infinite loop
|
||||
|
||||
@ -74,7 +74,7 @@ proc openConnection(
|
||||
let conn: Connection = connOpt.valueOr:
|
||||
return err("fail to dial remote " & $peerId)
|
||||
|
||||
debug "transfer session initialized",
|
||||
info "transfer session initialized",
|
||||
local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId
|
||||
|
||||
return ok(conn)
|
||||
@ -103,7 +103,7 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} =
|
||||
## sanity check, should not be possible
|
||||
self.outSessions[peerId].isClosedRemotely:
|
||||
## quite possibly remote end has closed the connection, believing transfer to be done
|
||||
debug "opening transfer connection to remote peer",
|
||||
info "opening transfer connection to remote peer",
|
||||
my_peer_id = self.peerManager.switch.peerInfo.peerId, remote_peer_id = peerId
|
||||
|
||||
let connection = (await self.openConnection(peerId)).valueOr:
|
||||
@ -188,7 +188,7 @@ proc initProtocolHandler(self: SyncTransfer) =
|
||||
|
||||
await conn.close()
|
||||
|
||||
debug "transfer session ended",
|
||||
info "transfer session ended",
|
||||
local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId
|
||||
|
||||
return
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user