mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-04 06:53:12 +00:00
chore(refactoring): replace some isErr usage with better alternatives (#3615)
* Closes apply isOkOr || valueOr approach (#1969)
This commit is contained in:
parent
36bc01ac0d
commit
7b580dbf39
@ -317,27 +317,19 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
|||||||
if conf.logLevel != LogLevel.NONE:
|
if conf.logLevel != LogLevel.NONE:
|
||||||
setLogLevel(conf.logLevel)
|
setLogLevel(conf.logLevel)
|
||||||
|
|
||||||
let natRes = setupNat(
|
let (extIp, extTcpPort, extUdpPort) = setupNat(
|
||||||
conf.nat,
|
conf.nat,
|
||||||
clientId,
|
clientId,
|
||||||
Port(uint16(conf.tcpPort) + conf.portsShift),
|
Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||||
Port(uint16(conf.udpPort) + conf.portsShift),
|
Port(uint16(conf.udpPort) + conf.portsShift),
|
||||||
)
|
).valueOr:
|
||||||
|
raise newException(ValueError, "setupNat error " & error)
|
||||||
if natRes.isErr():
|
|
||||||
raise newException(ValueError, "setupNat error " & natRes.error)
|
|
||||||
|
|
||||||
let (extIp, extTcpPort, extUdpPort) = natRes.get()
|
|
||||||
|
|
||||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||||
|
|
||||||
let recordRes = enrBuilder.build()
|
let record = enrBuilder.build().valueOr:
|
||||||
let record =
|
error "failed to create enr record", error = error
|
||||||
if recordRes.isErr():
|
quit(QuitFailure)
|
||||||
error "failed to create enr record", error = recordRes.error
|
|
||||||
quit(QuitFailure)
|
|
||||||
else:
|
|
||||||
recordRes.get()
|
|
||||||
|
|
||||||
let node = block:
|
let node = block:
|
||||||
var builder = WakuNodeBuilder.init()
|
var builder = WakuNodeBuilder.init()
|
||||||
|
|||||||
@ -126,23 +126,20 @@ proc toMatterbridge(
|
|||||||
|
|
||||||
assert chat2Msg.isOk
|
assert chat2Msg.isOk
|
||||||
|
|
||||||
let postRes = cmb.mbClient.postMessage(
|
if not cmb.mbClient
|
||||||
text = string.fromBytes(chat2Msg[].payload), username = chat2Msg[].nick
|
.postMessage(text = string.fromBytes(chat2Msg[].payload), username = chat2Msg[].nick)
|
||||||
)
|
.containsValue(true):
|
||||||
|
|
||||||
if postRes.isErr() or (postRes[] == false):
|
|
||||||
chat2_mb_dropped.inc(labelValues = ["duplicate"])
|
chat2_mb_dropped.inc(labelValues = ["duplicate"])
|
||||||
error "Matterbridge host unreachable. Dropping message."
|
error "Matterbridge host unreachable. Dropping message."
|
||||||
|
|
||||||
proc pollMatterbridge(cmb: Chat2MatterBridge, handler: MbMessageHandler) {.async.} =
|
proc pollMatterbridge(cmb: Chat2MatterBridge, handler: MbMessageHandler) {.async.} =
|
||||||
while cmb.running:
|
while cmb.running:
|
||||||
if (let getRes = cmb.mbClient.getMessages(); getRes.isOk()):
|
let msg = cmb.mbClient.getMessages().valueOr:
|
||||||
for jsonNode in getRes[]:
|
|
||||||
await handler(jsonNode)
|
|
||||||
else:
|
|
||||||
error "Matterbridge host unreachable. Sleeping before retrying."
|
error "Matterbridge host unreachable. Sleeping before retrying."
|
||||||
await sleepAsync(chronos.seconds(10))
|
await sleepAsync(chronos.seconds(10))
|
||||||
|
continue
|
||||||
|
for jsonNode in msg:
|
||||||
|
await handler(jsonNode)
|
||||||
await sleepAsync(cmb.pollPeriod)
|
await sleepAsync(cmb.pollPeriod)
|
||||||
|
|
||||||
##############
|
##############
|
||||||
@ -252,25 +249,21 @@ when isMainModule:
|
|||||||
if conf.logLevel != LogLevel.NONE:
|
if conf.logLevel != LogLevel.NONE:
|
||||||
setLogLevel(conf.logLevel)
|
setLogLevel(conf.logLevel)
|
||||||
|
|
||||||
let natRes = setupNat(
|
let (nodev2ExtIp, nodev2ExtPort, _) = setupNat(
|
||||||
conf.nat,
|
conf.nat,
|
||||||
clientId,
|
clientId,
|
||||||
Port(uint16(conf.libp2pTcpPort) + conf.portsShift),
|
Port(uint16(conf.libp2pTcpPort) + conf.portsShift),
|
||||||
Port(uint16(conf.udpPort) + conf.portsShift),
|
Port(uint16(conf.udpPort) + conf.portsShift),
|
||||||
)
|
).valueOr:
|
||||||
if natRes.isErr():
|
raise newException(ValueError, "setupNat error " & error)
|
||||||
error "Error in setupNat", error = natRes.error
|
|
||||||
|
|
||||||
# Load address configuration
|
## The following heuristic assumes that, in absence of manual
|
||||||
let
|
## config, the external port is the same as the bind port.
|
||||||
(nodev2ExtIp, nodev2ExtPort, _) = natRes.get()
|
let extPort =
|
||||||
## The following heuristic assumes that, in absence of manual
|
if nodev2ExtIp.isSome() and nodev2ExtPort.isNone():
|
||||||
## config, the external port is the same as the bind port.
|
some(Port(uint16(conf.libp2pTcpPort) + conf.portsShift))
|
||||||
extPort =
|
else:
|
||||||
if nodev2ExtIp.isSome() and nodev2ExtPort.isNone():
|
nodev2ExtPort
|
||||||
some(Port(uint16(conf.libp2pTcpPort) + conf.portsShift))
|
|
||||||
else:
|
|
||||||
nodev2ExtPort
|
|
||||||
|
|
||||||
let bridge = Chat2Matterbridge.new(
|
let bridge = Chat2Matterbridge.new(
|
||||||
mbHostUri = "http://" & $initTAddress(conf.mbHostAddress, Port(conf.mbHostPort)),
|
mbHostUri = "http://" & $initTAddress(conf.mbHostAddress, Port(conf.mbHostPort)),
|
||||||
|
|||||||
@ -175,18 +175,16 @@ proc startMetricsServer(
|
|||||||
): Result[MetricsHttpServerRef, string] =
|
): Result[MetricsHttpServerRef, string] =
|
||||||
info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort
|
info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort
|
||||||
|
|
||||||
let metricsServerRes = MetricsHttpServerRef.new($serverIp, serverPort)
|
let server = MetricsHttpServerRef.new($serverIp, serverPort).valueOr:
|
||||||
if metricsServerRes.isErr():
|
return err("metrics HTTP server start failed: " & $error)
|
||||||
return err("metrics HTTP server start failed: " & $metricsServerRes.error)
|
|
||||||
|
|
||||||
let server = metricsServerRes.value
|
|
||||||
try:
|
try:
|
||||||
waitFor server.start()
|
waitFor server.start()
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
return err("metrics HTTP server start failed: " & getCurrentExceptionMsg())
|
return err("metrics HTTP server start failed: " & getCurrentExceptionMsg())
|
||||||
|
|
||||||
info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort
|
info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort
|
||||||
ok(metricsServerRes.value)
|
ok(server)
|
||||||
|
|
||||||
proc publish(c: Chat, line: string) {.async.} =
|
proc publish(c: Chat, line: string) {.async.} =
|
||||||
# First create a Chat2Message protobuf with this line of text
|
# First create a Chat2Message protobuf with this line of text
|
||||||
@ -333,57 +331,56 @@ proc maintainSubscription(
|
|||||||
const maxFailedServiceNodeSwitches = 10
|
const maxFailedServiceNodeSwitches = 10
|
||||||
var noFailedSubscribes = 0
|
var noFailedSubscribes = 0
|
||||||
var noFailedServiceNodeSwitches = 0
|
var noFailedServiceNodeSwitches = 0
|
||||||
|
const RetryWaitMs = 2.seconds # Quick retry interval
|
||||||
|
const SubscriptionMaintenanceMs = 30.seconds # Subscription maintenance interval
|
||||||
while true:
|
while true:
|
||||||
info "maintaining subscription at", peer = constructMultiaddrStr(actualFilterPeer)
|
info "maintaining subscription at", peer = constructMultiaddrStr(actualFilterPeer)
|
||||||
# First use filter-ping to check if we have an active subscription
|
# First use filter-ping to check if we have an active subscription
|
||||||
let pingRes = await wakuNode.wakuFilterClient.ping(actualFilterPeer)
|
let pingErr = (await wakuNode.wakuFilterClient.ping(actualFilterPeer)).errorOr:
|
||||||
if pingRes.isErr():
|
await sleepAsync(SubscriptionMaintenanceMs)
|
||||||
# No subscription found. Let's subscribe.
|
info "subscription is live."
|
||||||
error "ping failed.", err = pingRes.error
|
continue
|
||||||
trace "no subscription found. Sending subscribe request"
|
|
||||||
|
|
||||||
let subscribeRes = await wakuNode.filterSubscribe(
|
# No subscription found. Let's subscribe.
|
||||||
|
error "ping failed.", error = pingErr
|
||||||
|
trace "no subscription found. Sending subscribe request"
|
||||||
|
|
||||||
|
let subscribeErr = (
|
||||||
|
await wakuNode.filterSubscribe(
|
||||||
some(filterPubsubTopic), filterContentTopic, actualFilterPeer
|
some(filterPubsubTopic), filterContentTopic, actualFilterPeer
|
||||||
)
|
)
|
||||||
|
).errorOr:
|
||||||
|
await sleepAsync(SubscriptionMaintenanceMs)
|
||||||
|
if noFailedSubscribes > 0:
|
||||||
|
noFailedSubscribes -= 1
|
||||||
|
notice "subscribe request successful."
|
||||||
|
continue
|
||||||
|
|
||||||
if subscribeRes.isErr():
|
noFailedSubscribes += 1
|
||||||
noFailedSubscribes += 1
|
error "Subscribe request failed.",
|
||||||
error "Subscribe request failed.",
|
error = subscribeErr, peer = actualFilterPeer, failCount = noFailedSubscribes
|
||||||
err = subscribeRes.error,
|
|
||||||
peer = actualFilterPeer,
|
|
||||||
failCount = noFailedSubscribes
|
|
||||||
|
|
||||||
# TODO: disconnet from failed actualFilterPeer
|
# TODO: disconnet from failed actualFilterPeer
|
||||||
# asyncSpawn(wakuNode.peerManager.switch.disconnect(p))
|
# asyncSpawn(wakuNode.peerManager.switch.disconnect(p))
|
||||||
# wakunode.peerManager.peerStore.delete(actualFilterPeer)
|
# wakunode.peerManager.peerStore.delete(actualFilterPeer)
|
||||||
|
|
||||||
if noFailedSubscribes < maxFailedSubscribes:
|
if noFailedSubscribes < maxFailedSubscribes:
|
||||||
await sleepAsync(2000) # Wait a bit before retrying
|
await sleepAsync(RetryWaitMs) # Wait a bit before retrying
|
||||||
continue
|
elif not preventPeerSwitch:
|
||||||
elif not preventPeerSwitch:
|
# try again with new peer without delay
|
||||||
let peerOpt = selectRandomServicePeer(
|
let actualFilterPeer = selectRandomServicePeer(
|
||||||
wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec
|
wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec
|
||||||
)
|
).valueOr:
|
||||||
peerOpt.isOkOr:
|
error "Failed to find new service peer. Exiting."
|
||||||
error "Failed to find new service peer. Exiting."
|
noFailedServiceNodeSwitches += 1
|
||||||
noFailedServiceNodeSwitches += 1
|
break
|
||||||
break
|
|
||||||
|
|
||||||
actualFilterPeer = peerOpt.get()
|
info "Found new peer for codec",
|
||||||
info "Found new peer for codec",
|
codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer)
|
||||||
codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer)
|
|
||||||
|
|
||||||
noFailedSubscribes = 0
|
noFailedSubscribes = 0
|
||||||
continue # try again with new peer without delay
|
|
||||||
else:
|
|
||||||
if noFailedSubscribes > 0:
|
|
||||||
noFailedSubscribes -= 1
|
|
||||||
|
|
||||||
notice "subscribe request successful."
|
|
||||||
else:
|
else:
|
||||||
info "subscription is live."
|
await sleepAsync(SubscriptionMaintenanceMs)
|
||||||
|
|
||||||
await sleepAsync(30000) # Subscription maintenance interval
|
|
||||||
|
|
||||||
{.pop.}
|
{.pop.}
|
||||||
# @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
# @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||||
@ -401,17 +398,13 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
|||||||
if conf.logLevel != LogLevel.NONE:
|
if conf.logLevel != LogLevel.NONE:
|
||||||
setLogLevel(conf.logLevel)
|
setLogLevel(conf.logLevel)
|
||||||
|
|
||||||
let natRes = setupNat(
|
let (extIp, extTcpPort, extUdpPort) = setupNat(
|
||||||
conf.nat,
|
conf.nat,
|
||||||
clientId,
|
clientId,
|
||||||
Port(uint16(conf.tcpPort) + conf.portsShift),
|
Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||||
Port(uint16(conf.udpPort) + conf.portsShift),
|
Port(uint16(conf.udpPort) + conf.portsShift),
|
||||||
)
|
).valueOr:
|
||||||
|
raise newException(ValueError, "setupNat error " & error)
|
||||||
if natRes.isErr():
|
|
||||||
raise newException(ValueError, "setupNat error " & natRes.error)
|
|
||||||
|
|
||||||
let (extIp, extTcpPort, extUdpPort) = natRes.get()
|
|
||||||
|
|
||||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||||
|
|
||||||
@ -421,13 +414,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
|||||||
error "failed to add sharded topics to ENR", error = error
|
error "failed to add sharded topics to ENR", error = error
|
||||||
quit(QuitFailure)
|
quit(QuitFailure)
|
||||||
|
|
||||||
let recordRes = enrBuilder.build()
|
let record = enrBuilder.build().valueOr:
|
||||||
let record =
|
error "failed to create enr record", error = error
|
||||||
if recordRes.isErr():
|
quit(QuitFailure)
|
||||||
error "failed to create enr record", error = recordRes.error
|
|
||||||
quit(QuitFailure)
|
|
||||||
else:
|
|
||||||
recordRes.get()
|
|
||||||
|
|
||||||
let node = block:
|
let node = block:
|
||||||
var builder = WakuNodeBuilder.init()
|
var builder = WakuNodeBuilder.init()
|
||||||
|
|||||||
@ -59,7 +59,4 @@ proc logSelfPeers*(pm: PeerManager) =
|
|||||||
{allPeers(pm)}
|
{allPeers(pm)}
|
||||||
*------------------------------------------------------------------------------------------*""".fmt()
|
*------------------------------------------------------------------------------------------*""".fmt()
|
||||||
|
|
||||||
if printable.isErr():
|
echo printable.valueOr("Error while printing statistics: " & error.msg)
|
||||||
echo "Error while printing statistics: " & printable.error().msg
|
|
||||||
else:
|
|
||||||
echo printable.get()
|
|
||||||
|
|||||||
@ -49,13 +49,10 @@ when isMainModule:
|
|||||||
|
|
||||||
const versionString = "version / git commit hash: " & waku_factory.git_version
|
const versionString = "version / git commit hash: " & waku_factory.git_version
|
||||||
|
|
||||||
let confRes = LiteProtocolTesterConf.load(version = versionString)
|
let conf = LiteProtocolTesterConf.load(version = versionString).valueOr:
|
||||||
if confRes.isErr():
|
error "failure while loading the configuration", error = error
|
||||||
error "failure while loading the configuration", error = confRes.error
|
|
||||||
quit(QuitFailure)
|
quit(QuitFailure)
|
||||||
|
|
||||||
var conf = confRes.get()
|
|
||||||
|
|
||||||
## Logging setup
|
## Logging setup
|
||||||
logging.setupLog(conf.logLevel, conf.logFormat)
|
logging.setupLog(conf.logLevel, conf.logFormat)
|
||||||
|
|
||||||
@ -187,7 +184,7 @@ when isMainModule:
|
|||||||
error "Service node not found in time via PX"
|
error "Service node not found in time via PX"
|
||||||
quit(QuitFailure)
|
quit(QuitFailure)
|
||||||
|
|
||||||
if futForServiceNode.read().isErr():
|
futForServiceNode.read().isOkOr:
|
||||||
error "Service node for test not found via PX"
|
error "Service node for test not found via PX"
|
||||||
quit(QuitFailure)
|
quit(QuitFailure)
|
||||||
|
|
||||||
|
|||||||
@ -89,10 +89,7 @@ proc reportSentMessages() =
|
|||||||
|{numMessagesToSend+failedToSendCount:>11} |{messagesSent:>11} |{failedToSendCount:>11} |
|
|{numMessagesToSend+failedToSendCount:>11} |{messagesSent:>11} |{failedToSendCount:>11} |
|
||||||
*----------------------------------------*""".fmt()
|
*----------------------------------------*""".fmt()
|
||||||
|
|
||||||
if report.isErr:
|
echo report.valueOr("Error while printing statistics")
|
||||||
echo "Error while printing statistics"
|
|
||||||
else:
|
|
||||||
echo report.get()
|
|
||||||
|
|
||||||
echo "*--------------------------------------------------------------------------------------------------*"
|
echo "*--------------------------------------------------------------------------------------------------*"
|
||||||
echo "| Failure cause | count |"
|
echo "| Failure cause | count |"
|
||||||
|
|||||||
@ -54,64 +54,65 @@ proc maintainSubscription(
|
|||||||
var noFailedSubscribes = 0
|
var noFailedSubscribes = 0
|
||||||
var noFailedServiceNodeSwitches = 0
|
var noFailedServiceNodeSwitches = 0
|
||||||
var isFirstPingOnNewPeer = true
|
var isFirstPingOnNewPeer = true
|
||||||
|
const RetryWaitMs = 2.seconds # Quick retry interval
|
||||||
|
const SubscriptionMaintenanceMs = 30.seconds # Subscription maintenance interval
|
||||||
while true:
|
while true:
|
||||||
info "maintaining subscription at", peer = constructMultiaddrStr(actualFilterPeer)
|
info "maintaining subscription at", peer = constructMultiaddrStr(actualFilterPeer)
|
||||||
# First use filter-ping to check if we have an active subscription
|
# First use filter-ping to check if we have an active subscription
|
||||||
let pingRes = await wakuNode.wakuFilterClient.ping(actualFilterPeer)
|
let pingErr = (await wakuNode.wakuFilterClient.ping(actualFilterPeer)).errorOr:
|
||||||
if pingRes.isErr():
|
await sleepAsync(SubscriptionMaintenanceMs)
|
||||||
if isFirstPingOnNewPeer == false:
|
info "subscription is live."
|
||||||
# Very first ping expected to fail as we have not yet subscribed at all
|
continue
|
||||||
lpt_receiver_lost_subscription_count.inc()
|
|
||||||
isFirstPingOnNewPeer = false
|
|
||||||
# No subscription found. Let's subscribe.
|
|
||||||
error "ping failed.", err = pingRes.error
|
|
||||||
trace "no subscription found. Sending subscribe request"
|
|
||||||
|
|
||||||
let subscribeRes = await wakuNode.filterSubscribe(
|
if isFirstPingOnNewPeer == false:
|
||||||
|
# Very first ping expected to fail as we have not yet subscribed at all
|
||||||
|
lpt_receiver_lost_subscription_count.inc()
|
||||||
|
isFirstPingOnNewPeer = false
|
||||||
|
# No subscription found. Let's subscribe.
|
||||||
|
error "ping failed.", error = pingErr
|
||||||
|
trace "no subscription found. Sending subscribe request"
|
||||||
|
|
||||||
|
let subscribeErr = (
|
||||||
|
await wakuNode.filterSubscribe(
|
||||||
some(filterPubsubTopic), filterContentTopic, actualFilterPeer
|
some(filterPubsubTopic), filterContentTopic, actualFilterPeer
|
||||||
)
|
)
|
||||||
|
).errorOr:
|
||||||
|
await sleepAsync(subscriptionMaintenanceMs)
|
||||||
|
if noFailedSubscribes > 0:
|
||||||
|
noFailedSubscribes -= 1
|
||||||
|
notice "subscribe request successful."
|
||||||
|
continue
|
||||||
|
|
||||||
if subscribeRes.isErr():
|
noFailedSubscribes += 1
|
||||||
noFailedSubscribes += 1
|
lpt_service_peer_failure_count.inc(
|
||||||
lpt_service_peer_failure_count.inc(
|
labelValues = ["receiver", actualFilterPeer.getAgent()]
|
||||||
labelValues = ["receiver", actualFilterPeer.getAgent()]
|
)
|
||||||
)
|
error "Subscribe request failed.",
|
||||||
error "Subscribe request failed.",
|
err = subscribeErr, peer = actualFilterPeer, failCount = noFailedSubscribes
|
||||||
err = subscribeRes.error,
|
|
||||||
peer = actualFilterPeer,
|
|
||||||
failCount = noFailedSubscribes
|
|
||||||
|
|
||||||
# TODO: disconnet from failed actualFilterPeer
|
# TODO: disconnet from failed actualFilterPeer
|
||||||
# asyncSpawn(wakuNode.peerManager.switch.disconnect(p))
|
# asyncSpawn(wakuNode.peerManager.switch.disconnect(p))
|
||||||
# wakunode.peerManager.peerStore.delete(actualFilterPeer)
|
# wakunode.peerManager.peerStore.delete(actualFilterPeer)
|
||||||
|
|
||||||
if noFailedSubscribes < maxFailedSubscribes:
|
if noFailedSubscribes < maxFailedSubscribes:
|
||||||
await sleepAsync(2.seconds) # Wait a bit before retrying
|
await sleepAsync(RetryWaitMs) # Wait a bit before retrying
|
||||||
continue
|
elif not preventPeerSwitch:
|
||||||
elif not preventPeerSwitch:
|
# try again with new peer without delay
|
||||||
actualFilterPeer = selectRandomServicePeer(
|
actualFilterPeer = selectRandomServicePeer(
|
||||||
wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec
|
wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec
|
||||||
).valueOr:
|
).valueOr:
|
||||||
error "Failed to find new service peer. Exiting."
|
error "Failed to find new service peer. Exiting."
|
||||||
noFailedServiceNodeSwitches += 1
|
noFailedServiceNodeSwitches += 1
|
||||||
break
|
break
|
||||||
|
|
||||||
info "Found new peer for codec",
|
info "Found new peer for codec",
|
||||||
codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer)
|
codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer)
|
||||||
|
|
||||||
noFailedSubscribes = 0
|
noFailedSubscribes = 0
|
||||||
lpt_change_service_peer_count.inc(labelValues = ["receiver"])
|
lpt_change_service_peer_count.inc(labelValues = ["receiver"])
|
||||||
isFirstPingOnNewPeer = true
|
isFirstPingOnNewPeer = true
|
||||||
continue # try again with new peer without delay
|
|
||||||
else:
|
|
||||||
if noFailedSubscribes > 0:
|
|
||||||
noFailedSubscribes -= 1
|
|
||||||
|
|
||||||
notice "subscribe request successful."
|
|
||||||
else:
|
else:
|
||||||
info "subscription is live."
|
await sleepAsync(SubscriptionMaintenanceMs)
|
||||||
|
|
||||||
await sleepAsync(30.seconds) # Subscription maintenance interval
|
|
||||||
|
|
||||||
proc setupAndListen*(
|
proc setupAndListen*(
|
||||||
wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo
|
wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo
|
||||||
|
|||||||
@ -181,7 +181,7 @@ proc pxLookupServiceNode*(
|
|||||||
if not await futPeers.withTimeout(30.seconds):
|
if not await futPeers.withTimeout(30.seconds):
|
||||||
notice "Cannot get peers from PX", round = 5 - trialCount
|
notice "Cannot get peers from PX", round = 5 - trialCount
|
||||||
else:
|
else:
|
||||||
if futPeers.value().isErr():
|
futPeers.value().isOkOr:
|
||||||
info "PeerExchange reported error", error = futPeers.read().error
|
info "PeerExchange reported error", error = futPeers.read().error
|
||||||
return err()
|
return err()
|
||||||
|
|
||||||
|
|||||||
@ -114,12 +114,7 @@ proc addMessage*(
|
|||||||
if not self.contains(peerId):
|
if not self.contains(peerId):
|
||||||
self[peerId] = Statistics.init()
|
self[peerId] = Statistics.init()
|
||||||
|
|
||||||
let shortSenderId = block:
|
let shortSenderId = PeerId.init(msg.sender).map(p => p.shortLog()).valueOr(msg.sender)
|
||||||
let senderPeer = PeerId.init(msg.sender)
|
|
||||||
if senderPeer.isErr():
|
|
||||||
msg.sender
|
|
||||||
else:
|
|
||||||
senderPeer.get().shortLog()
|
|
||||||
|
|
||||||
discard catch:
|
discard catch:
|
||||||
self[peerId].addMessage(shortSenderId, msg, msgHash)
|
self[peerId].addMessage(shortSenderId, msg, msgHash)
|
||||||
@ -220,10 +215,7 @@ proc echoStat*(self: Statistics, peerId: string) =
|
|||||||
| {self.missingIndices()} |
|
| {self.missingIndices()} |
|
||||||
*------------------------------------------------------------------------------------------*""".fmt()
|
*------------------------------------------------------------------------------------------*""".fmt()
|
||||||
|
|
||||||
if printable.isErr():
|
echo printable.valueOr("Error while printing statistics: " & error.msg)
|
||||||
echo "Error while printing statistics: " & printable.error().msg
|
|
||||||
else:
|
|
||||||
echo printable.get()
|
|
||||||
|
|
||||||
proc jsonStat*(self: Statistics): string =
|
proc jsonStat*(self: Statistics): string =
|
||||||
let minL, maxL, avgL = self.calcLatency()
|
let minL, maxL, avgL = self.calcLatency()
|
||||||
@ -243,20 +235,18 @@ proc jsonStat*(self: Statistics): string =
|
|||||||
}},
|
}},
|
||||||
"lostIndices": {self.missingIndices()}
|
"lostIndices": {self.missingIndices()}
|
||||||
}}""".fmt()
|
}}""".fmt()
|
||||||
if json.isErr:
|
|
||||||
return "{\"result:\": \"" & json.error.msg & "\"}"
|
|
||||||
|
|
||||||
return json.get()
|
return json.valueOr("{\"result:\": \"" & error.msg & "\"}")
|
||||||
|
|
||||||
proc echoStats*(self: var PerPeerStatistics) =
|
proc echoStats*(self: var PerPeerStatistics) =
|
||||||
for peerId, stats in self.pairs:
|
for peerId, stats in self.pairs:
|
||||||
let peerLine = catch:
|
let peerLine = catch:
|
||||||
"Receiver statistics from peer {peerId}".fmt()
|
"Receiver statistics from peer {peerId}".fmt()
|
||||||
if peerLine.isErr:
|
peerLine.isOkOr:
|
||||||
echo "Error while printing statistics"
|
echo "Error while printing statistics"
|
||||||
else:
|
continue
|
||||||
echo peerLine.get()
|
echo peerLine.get()
|
||||||
stats.echoStat(peerId)
|
stats.echoStat(peerId)
|
||||||
|
|
||||||
proc jsonStats*(self: PerPeerStatistics): string =
|
proc jsonStats*(self: PerPeerStatistics): string =
|
||||||
try:
|
try:
|
||||||
|
|||||||
@ -443,12 +443,8 @@ proc initAndStartApp(
|
|||||||
error "failed to add sharded topics to ENR", error = error
|
error "failed to add sharded topics to ENR", error = error
|
||||||
return err("failed to add sharded topics to ENR: " & $error)
|
return err("failed to add sharded topics to ENR: " & $error)
|
||||||
|
|
||||||
let recordRes = builder.build()
|
let record = builder.build().valueOr:
|
||||||
let record =
|
return err("cannot build record: " & $error)
|
||||||
if recordRes.isErr():
|
|
||||||
return err("cannot build record: " & $recordRes.error)
|
|
||||||
else:
|
|
||||||
recordRes.get()
|
|
||||||
|
|
||||||
var nodeBuilder = WakuNodeBuilder.init()
|
var nodeBuilder = WakuNodeBuilder.init()
|
||||||
|
|
||||||
@ -461,21 +457,15 @@ proc initAndStartApp(
|
|||||||
relayServiceRatio = "13.33:86.67",
|
relayServiceRatio = "13.33:86.67",
|
||||||
shardAware = true,
|
shardAware = true,
|
||||||
)
|
)
|
||||||
let res = nodeBuilder.withNetworkConfigurationDetails(bindIp, nodeTcpPort)
|
nodeBuilder.withNetworkConfigurationDetails(bindIp, nodeTcpPort).isOkOr:
|
||||||
if res.isErr():
|
return err("node building error" & $error)
|
||||||
return err("node building error" & $res.error)
|
|
||||||
|
|
||||||
let nodeRes = nodeBuilder.build()
|
let node = nodeBuilder.build().valueOr:
|
||||||
let node =
|
return err("node building error" & $error)
|
||||||
if nodeRes.isErr():
|
|
||||||
return err("node building error" & $res.error)
|
|
||||||
else:
|
|
||||||
nodeRes.get()
|
|
||||||
|
|
||||||
var discv5BootstrapEnrsRes = await getBootstrapFromDiscDns(conf)
|
var discv5BootstrapEnrs = (await getBootstrapFromDiscDns(conf)).valueOr:
|
||||||
if discv5BootstrapEnrsRes.isErr():
|
|
||||||
error("failed discovering peers from DNS")
|
error("failed discovering peers from DNS")
|
||||||
var discv5BootstrapEnrs = discv5BootstrapEnrsRes.get()
|
quit(QuitFailure)
|
||||||
|
|
||||||
# parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq
|
# parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq
|
||||||
for enrUri in conf.bootstrapNodes:
|
for enrUri in conf.bootstrapNodes:
|
||||||
@ -553,12 +543,10 @@ proc subscribeAndHandleMessages(
|
|||||||
when isMainModule:
|
when isMainModule:
|
||||||
# known issue: confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
# known issue: confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||||
{.pop.}
|
{.pop.}
|
||||||
let confRes = NetworkMonitorConf.loadConfig()
|
var conf = NetworkMonitorConf.loadConfig().valueOr:
|
||||||
if confRes.isErr():
|
error "could not load cli variables", error = error
|
||||||
error "could not load cli variables", err = confRes.error
|
quit(QuitFailure)
|
||||||
quit(1)
|
|
||||||
|
|
||||||
var conf = confRes.get()
|
|
||||||
info "cli flags", conf = conf
|
info "cli flags", conf = conf
|
||||||
|
|
||||||
if conf.clusterId == 1:
|
if conf.clusterId == 1:
|
||||||
@ -586,37 +574,30 @@ when isMainModule:
|
|||||||
|
|
||||||
# start metrics server
|
# start metrics server
|
||||||
if conf.metricsServer:
|
if conf.metricsServer:
|
||||||
let res =
|
startMetricsServer(conf.metricsServerAddress, Port(conf.metricsServerPort)).isOkOr:
|
||||||
startMetricsServer(conf.metricsServerAddress, Port(conf.metricsServerPort))
|
error "could not start metrics server", error = error
|
||||||
if res.isErr():
|
quit(QuitFailure)
|
||||||
error "could not start metrics server", err = res.error
|
|
||||||
quit(1)
|
|
||||||
|
|
||||||
# start rest server for custom metrics
|
# start rest server for custom metrics
|
||||||
let res = startRestApiServer(conf, allPeersInfo, msgPerContentTopic)
|
startRestApiServer(conf, allPeersInfo, msgPerContentTopic).isOkOr:
|
||||||
if res.isErr():
|
error "could not start rest api server", error = error
|
||||||
error "could not start rest api server", err = res.error
|
quit(QuitFailure)
|
||||||
quit(1)
|
|
||||||
|
|
||||||
# create a rest client
|
# create a rest client
|
||||||
let clientRest =
|
let restClient = RestClientRef.new(
|
||||||
RestClientRef.new(url = "http://ip-api.com", connectTimeout = ctime.seconds(2))
|
url = "http://ip-api.com", connectTimeout = ctime.seconds(2)
|
||||||
if clientRest.isErr():
|
).valueOr:
|
||||||
error "could not start rest api client", err = res.error
|
error "could not start rest api client", error = error
|
||||||
quit(1)
|
quit(QuitFailure)
|
||||||
let restClient = clientRest.get()
|
|
||||||
|
|
||||||
# start waku node
|
# start waku node
|
||||||
let nodeRes = waitFor initAndStartApp(conf)
|
let (node, discv5) = (waitFor initAndStartApp(conf)).valueOr:
|
||||||
if nodeRes.isErr():
|
error "could not start node", error = error
|
||||||
error "could not start node"
|
quit(QuitFailure)
|
||||||
quit 1
|
|
||||||
|
|
||||||
let (node, discv5) = nodeRes.get()
|
|
||||||
|
|
||||||
(waitFor node.mountRelay()).isOkOr:
|
(waitFor node.mountRelay()).isOkOr:
|
||||||
error "failed to mount waku relay protocol: ", err = error
|
error "failed to mount waku relay protocol: ", error = error
|
||||||
quit 1
|
quit(QuitFailure)
|
||||||
|
|
||||||
waitFor node.mountLibp2pPing()
|
waitFor node.mountLibp2pPing()
|
||||||
|
|
||||||
@ -640,12 +621,12 @@ when isMainModule:
|
|||||||
try:
|
try:
|
||||||
waitFor node.mountRlnRelay(rlnConf)
|
waitFor node.mountRlnRelay(rlnConf)
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
error "failed to setup RLN", err = getCurrentExceptionMsg()
|
error "failed to setup RLN", error = getCurrentExceptionMsg()
|
||||||
quit 1
|
quit(QuitFailure)
|
||||||
|
|
||||||
node.mountMetadata(conf.clusterId, conf.shards).isOkOr:
|
node.mountMetadata(conf.clusterId, conf.shards).isOkOr:
|
||||||
error "failed to mount waku metadata protocol: ", err = error
|
error "failed to mount waku metadata protocol: ", error = error
|
||||||
quit 1
|
quit(QuitFailure)
|
||||||
|
|
||||||
for shard in conf.shards:
|
for shard in conf.shards:
|
||||||
# Subscribe the node to the shards, to count messages
|
# Subscribe the node to the shards, to count messages
|
||||||
|
|||||||
@ -181,13 +181,10 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
|||||||
protocols = conf.protocols,
|
protocols = conf.protocols,
|
||||||
logLevel = conf.logLevel
|
logLevel = conf.logLevel
|
||||||
|
|
||||||
let peerRes = parsePeerInfo(conf.address)
|
let peer = parsePeerInfo(conf.address).valueOr:
|
||||||
if peerRes.isErr():
|
error "Couldn't parse 'conf.address'", error = error
|
||||||
error "Couldn't parse 'conf.address'", error = peerRes.error
|
|
||||||
quit(QuitFailure)
|
quit(QuitFailure)
|
||||||
|
|
||||||
let peer = peerRes.value
|
|
||||||
|
|
||||||
let
|
let
|
||||||
nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||||
bindIp = parseIpAddress("0.0.0.0")
|
bindIp = parseIpAddress("0.0.0.0")
|
||||||
@ -225,13 +222,9 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
|||||||
error "could not initialize ENR with shards", error
|
error "could not initialize ENR with shards", error
|
||||||
quit(QuitFailure)
|
quit(QuitFailure)
|
||||||
|
|
||||||
let recordRes = enrBuilder.build()
|
let record = enrBuilder.build().valueOr:
|
||||||
let record =
|
error "failed to create enr record", error = error
|
||||||
if recordRes.isErr():
|
quit(QuitFailure)
|
||||||
error "failed to create enr record", error = recordRes.error
|
|
||||||
quit(QuitFailure)
|
|
||||||
else:
|
|
||||||
recordRes.get()
|
|
||||||
|
|
||||||
if isWss and
|
if isWss and
|
||||||
(conf.websocketSecureKeyPath.len == 0 or conf.websocketSecureCertPath.len == 0):
|
(conf.websocketSecureKeyPath.len == 0 or conf.websocketSecureCertPath.len == 0):
|
||||||
|
|||||||
@ -62,13 +62,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
|
|||||||
"Building ENR with relay sharding failed"
|
"Building ENR with relay sharding failed"
|
||||||
)
|
)
|
||||||
|
|
||||||
let recordRes = enrBuilder.build()
|
let record = enrBuilder.build().valueOr:
|
||||||
let record =
|
error "failed to create enr record", error = error
|
||||||
if recordRes.isErr():
|
quit(QuitFailure)
|
||||||
error "failed to create enr record", error = recordRes.error
|
|
||||||
quit(QuitFailure)
|
|
||||||
else:
|
|
||||||
recordRes.get()
|
|
||||||
|
|
||||||
var builder = WakuNodeBuilder.init()
|
var builder = WakuNodeBuilder.init()
|
||||||
builder.withNodeKey(nodeKey)
|
builder.withNodeKey(nodeKey)
|
||||||
@ -92,20 +88,18 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
|
|||||||
while true:
|
while true:
|
||||||
notice "maintaining subscription"
|
notice "maintaining subscription"
|
||||||
# First use filter-ping to check if we have an active subscription
|
# First use filter-ping to check if we have an active subscription
|
||||||
let pingRes = await node.wakuFilterClient.ping(filterPeer)
|
if (await node.wakuFilterClient.ping(filterPeer)).isErr():
|
||||||
if pingRes.isErr():
|
|
||||||
# No subscription found. Let's subscribe.
|
# No subscription found. Let's subscribe.
|
||||||
notice "no subscription found. Sending subscribe request"
|
notice "no subscription found. Sending subscribe request"
|
||||||
|
|
||||||
let subscribeRes = await node.wakuFilterClient.subscribe(
|
(
|
||||||
filterPeer, FilterPubsubTopic, @[FilterContentTopic]
|
await node.wakuFilterClient.subscribe(
|
||||||
)
|
filterPeer, FilterPubsubTopic, @[FilterContentTopic]
|
||||||
|
)
|
||||||
if subscribeRes.isErr():
|
).isOkOr:
|
||||||
notice "subscribe request failed. Quitting.", err = subscribeRes.error
|
notice "subscribe request failed. Quitting.", error = error
|
||||||
break
|
break
|
||||||
else:
|
notice "subscribe request successful."
|
||||||
notice "subscribe request successful."
|
|
||||||
else:
|
else:
|
||||||
notice "subscription found."
|
notice "subscription found."
|
||||||
|
|
||||||
|
|||||||
@ -54,13 +54,9 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} =
|
|||||||
"Building ENR with relay sharding failed"
|
"Building ENR with relay sharding failed"
|
||||||
)
|
)
|
||||||
|
|
||||||
let recordRes = enrBuilder.build()
|
let record = enrBuilder.build().valueOr:
|
||||||
let record =
|
error "failed to create enr record", error = error
|
||||||
if recordRes.isErr():
|
quit(QuitFailure)
|
||||||
error "failed to create enr record", error = recordRes.error
|
|
||||||
quit(QuitFailure)
|
|
||||||
else:
|
|
||||||
recordRes.get()
|
|
||||||
|
|
||||||
var builder = WakuNodeBuilder.init()
|
var builder = WakuNodeBuilder.init()
|
||||||
builder.withNodeKey(nodeKey)
|
builder.withNodeKey(nodeKey)
|
||||||
|
|||||||
@ -49,13 +49,9 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} =
|
|||||||
|
|
||||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||||
|
|
||||||
let recordRes = enrBuilder.build()
|
let record = enrBuilder.build().valueOr:
|
||||||
let record =
|
error "failed to create enr record", error = error
|
||||||
if recordRes.isErr():
|
quit(QuitFailure)
|
||||||
error "failed to create enr record", error = recordRes.error
|
|
||||||
quit(QuitFailure)
|
|
||||||
else:
|
|
||||||
recordRes.get()
|
|
||||||
|
|
||||||
var builder = WakuNodeBuilder.init()
|
var builder = WakuNodeBuilder.init()
|
||||||
builder.withNodeKey(nodeKey)
|
builder.withNodeKey(nodeKey)
|
||||||
|
|||||||
@ -47,13 +47,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
|
|||||||
|
|
||||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||||
|
|
||||||
let recordRes = enrBuilder.build()
|
let record = enrBuilder.build().valueOr:
|
||||||
let record =
|
error "failed to create enr record", error = error
|
||||||
if recordRes.isErr():
|
quit(QuitFailure)
|
||||||
error "failed to create enr record", error = recordRes.error
|
|
||||||
quit(QuitFailure)
|
|
||||||
else:
|
|
||||||
recordRes.get()
|
|
||||||
|
|
||||||
var builder = WakuNodeBuilder.init()
|
var builder = WakuNodeBuilder.init()
|
||||||
builder.withNodeKey(nodeKey)
|
builder.withNodeKey(nodeKey)
|
||||||
|
|||||||
@ -18,13 +18,10 @@ proc setup*(): Waku =
|
|||||||
const versionString = "version / git commit hash: " & waku.git_version
|
const versionString = "version / git commit hash: " & waku.git_version
|
||||||
let rng = crypto.newRng()
|
let rng = crypto.newRng()
|
||||||
|
|
||||||
let confRes = WakuNodeConf.load(version = versionString)
|
let conf = WakuNodeConf.load(version = versionString).valueOr:
|
||||||
if confRes.isErr():
|
error "failure while loading the configuration", error = $error
|
||||||
error "failure while loading the configuration", error = $confRes.error
|
|
||||||
quit(QuitFailure)
|
quit(QuitFailure)
|
||||||
|
|
||||||
var conf = confRes.get()
|
|
||||||
|
|
||||||
let twnNetworkConf = NetworkConf.TheWakuNetworkConf()
|
let twnNetworkConf = NetworkConf.TheWakuNetworkConf()
|
||||||
if len(conf.shards) != 0:
|
if len(conf.shards) != 0:
|
||||||
conf.pubsubTopics = conf.shards.mapIt(twnNetworkConf.pubsubTopics[it.uint16])
|
conf.pubsubTopics = conf.shards.mapIt(twnNetworkConf.pubsubTopics[it.uint16])
|
||||||
|
|||||||
@ -95,61 +95,54 @@ proc sendResponse*(
|
|||||||
type SCPHandler* = proc(msg: WakuMessage): Future[void] {.async.}
|
type SCPHandler* = proc(msg: WakuMessage): Future[void] {.async.}
|
||||||
proc getSCPHandler(self: StealthCommitmentProtocol): SCPHandler =
|
proc getSCPHandler(self: StealthCommitmentProtocol): SCPHandler =
|
||||||
let handler = proc(msg: WakuMessage): Future[void] {.async.} =
|
let handler = proc(msg: WakuMessage): Future[void] {.async.} =
|
||||||
let decodedRes = WakuStealthCommitmentMsg.decode(msg.payload)
|
let decoded = WakuStealthCommitmentMsg.decode(msg.payload).valueOr:
|
||||||
if decodedRes.isErr():
|
error "could not decode scp message", error = error
|
||||||
error "could not decode scp message"
|
quit(QuitFailure)
|
||||||
let decoded = decodedRes.get()
|
|
||||||
if decoded.request == false:
|
if decoded.request == false:
|
||||||
# check if the generated stealth commitment belongs to the receiver
|
# check if the generated stealth commitment belongs to the receiver
|
||||||
# if not, continue
|
# if not, continue
|
||||||
let ephemeralPubKeyRes =
|
let ephemeralPubKey = deserialize(
|
||||||
deserialize(StealthCommitmentFFI.PublicKey, decoded.ephemeralPubKey.get())
|
StealthCommitmentFFI.PublicKey, decoded.ephemeralPubKey.get()
|
||||||
if ephemeralPubKeyRes.isErr():
|
).valueOr:
|
||||||
error "could not deserialize ephemeral public key: ",
|
error "could not deserialize ephemeral public key: ", error = error
|
||||||
err = ephemeralPubKeyRes.error()
|
quit(QuitFailure)
|
||||||
let ephemeralPubKey = ephemeralPubKeyRes.get()
|
let stealthCommitmentPrivateKey = StealthCommitmentFFI.generateStealthPrivateKey(
|
||||||
let stealthCommitmentPrivateKeyRes = StealthCommitmentFFI.generateStealthPrivateKey(
|
|
||||||
ephemeralPubKey,
|
ephemeralPubKey,
|
||||||
self.spendingKeyPair.privateKey,
|
self.spendingKeyPair.privateKey,
|
||||||
self.viewingKeyPair.privateKey,
|
self.viewingKeyPair.privateKey,
|
||||||
decoded.viewTag.get(),
|
decoded.viewTag.get(),
|
||||||
)
|
).valueOr:
|
||||||
if stealthCommitmentPrivateKeyRes.isErr():
|
error "received stealth commitment does not belong to the receiver: ",
|
||||||
info "received stealth commitment does not belong to the receiver: ",
|
error = error
|
||||||
err = stealthCommitmentPrivateKeyRes.error()
|
quit(QuitFailure)
|
||||||
|
|
||||||
let stealthCommitmentPrivateKey = stealthCommitmentPrivateKeyRes.get()
|
|
||||||
info "received stealth commitment belongs to the receiver: ",
|
info "received stealth commitment belongs to the receiver: ",
|
||||||
stealthCommitmentPrivateKey,
|
stealthCommitmentPrivateKey,
|
||||||
stealthCommitmentPubKey = decoded.stealthCommitment.get()
|
stealthCommitmentPubKey = decoded.stealthCommitment.get()
|
||||||
return
|
return
|
||||||
# send response
|
# send response
|
||||||
# deseralize the keys
|
# deseralize the keys
|
||||||
let spendingKeyRes =
|
let spendingKey = deserialize(
|
||||||
deserialize(StealthCommitmentFFI.PublicKey, decoded.spendingPubKey.get())
|
StealthCommitmentFFI.PublicKey, decoded.spendingPubKey.get()
|
||||||
if spendingKeyRes.isErr():
|
).valueOr:
|
||||||
error "could not deserialize spending key: ", err = spendingKeyRes.error()
|
error "could not deserialize spending key: ", error = error
|
||||||
let spendingKey = spendingKeyRes.get()
|
quit(QuitFailure)
|
||||||
let viewingKeyRes =
|
let viewingKey = (
|
||||||
(deserialize(StealthCommitmentFFI.PublicKey, decoded.viewingPubKey.get()))
|
deserialize(StealthCommitmentFFI.PublicKey, decoded.viewingPubKey.get())
|
||||||
if viewingKeyRes.isErr():
|
).valueOr:
|
||||||
error "could not deserialize viewing key: ", err = viewingKeyRes.error()
|
error "could not deserialize viewing key: ", error = error
|
||||||
let viewingKey = viewingKeyRes.get()
|
quit(QuitFailure)
|
||||||
|
|
||||||
info "received spending key", spendingKey
|
info "received spending key", spendingKey
|
||||||
info "received viewing key", viewingKey
|
info "received viewing key", viewingKey
|
||||||
let ephemeralKeyPairRes = StealthCommitmentFFI.generateKeyPair()
|
let ephemeralKeyPair = StealthCommitmentFFI.generateKeyPair().valueOr:
|
||||||
if ephemeralKeyPairRes.isErr():
|
error "could not generate ephemeral key pair: ", error = error
|
||||||
error "could not generate ephemeral key pair: ", err = ephemeralKeyPairRes.error()
|
quit(QuitFailure)
|
||||||
let ephemeralKeyPair = ephemeralKeyPairRes.get()
|
|
||||||
|
|
||||||
let stealthCommitmentRes = StealthCommitmentFFI.generateStealthCommitment(
|
let stealthCommitment = StealthCommitmentFFI.generateStealthCommitment(
|
||||||
spendingKey, viewingKey, ephemeralKeyPair.privateKey
|
spendingKey, viewingKey, ephemeralKeyPair.privateKey
|
||||||
)
|
).valueOr:
|
||||||
if stealthCommitmentRes.isErr():
|
error "could not generate stealth commitment: ", error = error
|
||||||
error "could not generate stealth commitment: ",
|
quit(QuitFailure)
|
||||||
err = stealthCommitmentRes.error()
|
|
||||||
let stealthCommitment = stealthCommitmentRes.get()
|
|
||||||
|
|
||||||
(
|
(
|
||||||
await self.sendResponse(
|
await self.sendResponse(
|
||||||
@ -157,7 +150,7 @@ proc getSCPHandler(self: StealthCommitmentProtocol): SCPHandler =
|
|||||||
stealthCommitment.viewTag,
|
stealthCommitment.viewTag,
|
||||||
)
|
)
|
||||||
).isOkOr:
|
).isOkOr:
|
||||||
error "could not send response: ", err = $error
|
error "could not send response: ", error = $error
|
||||||
|
|
||||||
return handler
|
return handler
|
||||||
|
|
||||||
|
|||||||
@ -96,18 +96,16 @@ proc sendRequestToWakuThread*(
|
|||||||
deallocShared(req)
|
deallocShared(req)
|
||||||
return err("Couldn't send a request to the waku thread: " & $req[])
|
return err("Couldn't send a request to the waku thread: " & $req[])
|
||||||
|
|
||||||
let fireSyncRes = ctx.reqSignal.fireSync()
|
let fireSync = ctx.reqSignal.fireSync().valueOr:
|
||||||
if fireSyncRes.isErr():
|
|
||||||
deallocShared(req)
|
deallocShared(req)
|
||||||
return err("failed fireSync: " & $fireSyncRes.error)
|
return err("failed fireSync: " & $error)
|
||||||
|
|
||||||
if fireSyncRes.get() == false:
|
if not fireSync:
|
||||||
deallocShared(req)
|
deallocShared(req)
|
||||||
return err("Couldn't fireSync in time")
|
return err("Couldn't fireSync in time")
|
||||||
|
|
||||||
## wait until the Waku Thread properly received the request
|
## wait until the Waku Thread properly received the request
|
||||||
let res = ctx.reqReceivedSignal.waitSync(timeout)
|
ctx.reqReceivedSignal.waitSync(timeout).isOkOr:
|
||||||
if res.isErr():
|
|
||||||
deallocShared(req)
|
deallocShared(req)
|
||||||
return err("Couldn't receive reqReceivedSignal signal")
|
return err("Couldn't receive reqReceivedSignal signal")
|
||||||
|
|
||||||
@ -176,9 +174,8 @@ proc wakuThreadBody(ctx: ptr WakuContext) {.thread.} =
|
|||||||
## Handle the request
|
## Handle the request
|
||||||
asyncSpawn WakuThreadRequest.process(request, addr waku)
|
asyncSpawn WakuThreadRequest.process(request, addr waku)
|
||||||
|
|
||||||
let fireRes = ctx.reqReceivedSignal.fireSync()
|
ctx.reqReceivedSignal.fireSync().isOkOr:
|
||||||
if fireRes.isErr():
|
error "could not fireSync back to requester thread", error = error
|
||||||
error "could not fireSync back to requester thread", error = fireRes.error
|
|
||||||
|
|
||||||
waitFor wakuRun(ctx)
|
waitFor wakuRun(ctx)
|
||||||
|
|
||||||
|
|||||||
@ -44,13 +44,11 @@ proc process*(
|
|||||||
let pingFuture = ping()
|
let pingFuture = ping()
|
||||||
let pingRTT: Duration =
|
let pingRTT: Duration =
|
||||||
if self[].timeout == chronos.milliseconds(0): # No timeout expected
|
if self[].timeout == chronos.milliseconds(0): # No timeout expected
|
||||||
(await pingFuture).valueOr:
|
?(await pingFuture)
|
||||||
return err(error)
|
|
||||||
else:
|
else:
|
||||||
let timedOut = not (await pingFuture.withTimeout(self[].timeout))
|
let timedOut = not (await pingFuture.withTimeout(self[].timeout))
|
||||||
if timedOut:
|
if timedOut:
|
||||||
return err("ping timed out")
|
return err("ping timed out")
|
||||||
pingFuture.read().valueOr:
|
?(pingFuture.read())
|
||||||
return err(error)
|
|
||||||
|
|
||||||
ok($(pingRTT.nanos))
|
ok($(pingRTT.nanos))
|
||||||
|
|||||||
@ -485,7 +485,7 @@ procSuite "Waku Rest API - Store v3":
|
|||||||
$response.contentType == $MIMETYPE_TEXT
|
$response.contentType == $MIMETYPE_TEXT
|
||||||
response.data.messages.len == 0
|
response.data.messages.len == 0
|
||||||
response.data.statusDesc ==
|
response.data.statusDesc ==
|
||||||
"Failed parsing remote peer info [MultiAddress.init [multiaddress: Invalid MultiAddress, must start with `/`]]"
|
"Failed parsing remote peer info: MultiAddress.init [multiaddress: Invalid MultiAddress, must start with `/`]"
|
||||||
|
|
||||||
await restServer.stop()
|
await restServer.stop()
|
||||||
await restServer.closeWait()
|
await restServer.closeWait()
|
||||||
|
|||||||
@ -725,12 +725,11 @@ proc parseCmdArg*(T: type ProtectedShard, p: string): T =
|
|||||||
raise newException(
|
raise newException(
|
||||||
ValueError, "Invalid format for protected shard expected shard:publickey"
|
ValueError, "Invalid format for protected shard expected shard:publickey"
|
||||||
)
|
)
|
||||||
let publicKey = secp256k1.SkPublicKey.fromHex(elements[1])
|
let publicKey = secp256k1.SkPublicKey.fromHex(elements[1]).valueOr:
|
||||||
if publicKey.isErr:
|
|
||||||
raise newException(ValueError, "Invalid public key")
|
raise newException(ValueError, "Invalid public key")
|
||||||
|
|
||||||
if isNumber(elements[0]):
|
if isNumber(elements[0]):
|
||||||
return ProtectedShard(shard: uint16.parseCmdArg(elements[0]), key: publicKey.get())
|
return ProtectedShard(shard: uint16.parseCmdArg(elements[0]), key: publicKey)
|
||||||
|
|
||||||
# TODO: Remove when removing protected-topic configuration
|
# TODO: Remove when removing protected-topic configuration
|
||||||
let shard = RelayShard.parse(elements[0]).valueOr:
|
let shard = RelayShard.parse(elements[0]).valueOr:
|
||||||
@ -738,7 +737,7 @@ proc parseCmdArg*(T: type ProtectedShard, p: string): T =
|
|||||||
ValueError,
|
ValueError,
|
||||||
"Invalid pubsub topic. Pubsub topics must be in the format /waku/2/rs/<cluster-id>/<shard-id>",
|
"Invalid pubsub topic. Pubsub topics must be in the format /waku/2/rs/<cluster-id>/<shard-id>",
|
||||||
)
|
)
|
||||||
return ProtectedShard(shard: shard.shardId, key: publicKey.get())
|
return ProtectedShard(shard: shard.shardId, key: publicKey)
|
||||||
|
|
||||||
proc completeCmdArg*(T: type ProtectedShard, val: string): seq[string] =
|
proc completeCmdArg*(T: type ProtectedShard, val: string): seq[string] =
|
||||||
return @[]
|
return @[]
|
||||||
|
|||||||
@ -31,12 +31,10 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) =
|
|||||||
trace "configuration", conf = $conf
|
trace "configuration", conf = $conf
|
||||||
|
|
||||||
# 2. generate credentials
|
# 2. generate credentials
|
||||||
let credentialRes = membershipKeyGen()
|
let credential = membershipKeyGen().valueOr:
|
||||||
if credentialRes.isErr():
|
error "failure while generating credentials", error = error
|
||||||
error "failure while generating credentials", error = credentialRes.error
|
quit(QuitFailure)
|
||||||
quit(1)
|
|
||||||
|
|
||||||
let credential = credentialRes.get()
|
|
||||||
info "credentials",
|
info "credentials",
|
||||||
idTrapdoor = credential.idTrapdoor.inHex(),
|
idTrapdoor = credential.idTrapdoor.inHex(),
|
||||||
idNullifier = credential.idNullifier.inHex(),
|
idNullifier = credential.idNullifier.inHex(),
|
||||||
@ -45,7 +43,7 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) =
|
|||||||
|
|
||||||
if not conf.execute:
|
if not conf.execute:
|
||||||
info "not executing, exiting"
|
info "not executing, exiting"
|
||||||
quit(0)
|
quit(QuitSuccess)
|
||||||
|
|
||||||
var onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} =
|
var onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} =
|
||||||
## Action to be taken when an internal error occurs during the node run.
|
## Action to be taken when an internal error occurs during the node run.
|
||||||
@ -66,12 +64,12 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) =
|
|||||||
try:
|
try:
|
||||||
(waitFor groupManager.init()).isOkOr:
|
(waitFor groupManager.init()).isOkOr:
|
||||||
error "failure while initializing OnchainGroupManager", error = $error
|
error "failure while initializing OnchainGroupManager", error = $error
|
||||||
quit(1)
|
quit(QuitFailure)
|
||||||
# handling the exception is required since waitFor raises an exception
|
# handling the exception is required since waitFor raises an exception
|
||||||
except Exception, CatchableError:
|
except Exception, CatchableError:
|
||||||
error "failure while initializing OnchainGroupManager",
|
error "failure while initializing OnchainGroupManager",
|
||||||
error = getCurrentExceptionMsg()
|
error = getCurrentExceptionMsg()
|
||||||
quit(1)
|
quit(QuitFailure)
|
||||||
|
|
||||||
# 4. register on-chain
|
# 4. register on-chain
|
||||||
try:
|
try:
|
||||||
@ -79,7 +77,7 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) =
|
|||||||
except Exception, CatchableError:
|
except Exception, CatchableError:
|
||||||
error "failure while registering credentials on-chain",
|
error "failure while registering credentials on-chain",
|
||||||
error = getCurrentExceptionMsg()
|
error = getCurrentExceptionMsg()
|
||||||
quit(1)
|
quit(QuitFailure)
|
||||||
|
|
||||||
info "Transaction hash", txHash = groupManager.registrationTxHash.get()
|
info "Transaction hash", txHash = groupManager.registrationTxHash.get()
|
||||||
|
|
||||||
@ -99,11 +97,9 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) =
|
|||||||
userMessageLimit: conf.userMessageLimit,
|
userMessageLimit: conf.userMessageLimit,
|
||||||
)
|
)
|
||||||
|
|
||||||
let persistRes =
|
addMembershipCredentials(conf.credPath, keystoreCred, conf.credPassword, RLNAppInfo).isOkOr:
|
||||||
addMembershipCredentials(conf.credPath, keystoreCred, conf.credPassword, RLNAppInfo)
|
error "failed to persist credentials", error = error
|
||||||
if persistRes.isErr():
|
quit(QuitFailure)
|
||||||
error "failed to persist credentials", error = persistRes.error
|
|
||||||
quit(1)
|
|
||||||
|
|
||||||
info "credentials persisted", path = conf.credPath
|
info "credentials persisted", path = conf.credPath
|
||||||
|
|
||||||
@ -111,5 +107,5 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) =
|
|||||||
waitFor groupManager.stop()
|
waitFor groupManager.stop()
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
error "failure while stopping OnchainGroupManager", error = getCurrentExceptionMsg()
|
error "failure while stopping OnchainGroupManager", error = getCurrentExceptionMsg()
|
||||||
quit(0) # 0 because we already registered on-chain
|
quit(QuitSuccess) # 0 because we already registered on-chain
|
||||||
quit(0)
|
quit(QuitSuccess)
|
||||||
|
|||||||
@ -63,9 +63,8 @@ proc openDbConn(connString: string): Result[DbConn, string] =
|
|||||||
return err("exception opening new connection: " & getCurrentExceptionMsg())
|
return err("exception opening new connection: " & getCurrentExceptionMsg())
|
||||||
|
|
||||||
if conn.status != CONNECTION_OK:
|
if conn.status != CONNECTION_OK:
|
||||||
let checkRes = conn.check()
|
conn.check().isOkOr:
|
||||||
if checkRes.isErr():
|
return err("failed to connect to database: " & error)
|
||||||
return err("failed to connect to database: " & checkRes.error)
|
|
||||||
|
|
||||||
return err("unknown reason")
|
return err("unknown reason")
|
||||||
|
|
||||||
|
|||||||
@ -174,8 +174,8 @@ proc runStmt*(
|
|||||||
let len = paramValues.len
|
let len = paramValues.len
|
||||||
discard dbConnWrapper.getDbConn().prepare(stmtName, sql(stmtDefinition), len)
|
discard dbConnWrapper.getDbConn().prepare(stmtName, sql(stmtDefinition), len)
|
||||||
|
|
||||||
if res.isErr():
|
res.isOkOr:
|
||||||
return err("failed prepare in runStmt: " & res.error.msg)
|
return err("failed prepare in runStmt: " & error.msg)
|
||||||
|
|
||||||
pool.conns[connIndex].inclPreparedStmt(stmtName)
|
pool.conns[connIndex].inclPreparedStmt(stmtName)
|
||||||
|
|
||||||
|
|||||||
@ -265,8 +265,7 @@ proc getPageSize*(db: SqliteDatabase): DatabaseResult[int64] =
|
|||||||
proc handler(s: RawStmtPtr) =
|
proc handler(s: RawStmtPtr) =
|
||||||
size = sqlite3_column_int64(s, 0)
|
size = sqlite3_column_int64(s, 0)
|
||||||
|
|
||||||
let res = db.query("PRAGMA page_size;", handler)
|
db.query("PRAGMA page_size;", handler).isOkOr:
|
||||||
if res.isErr():
|
|
||||||
return err("failed to get page_size")
|
return err("failed to get page_size")
|
||||||
|
|
||||||
return ok(size)
|
return ok(size)
|
||||||
@ -277,8 +276,7 @@ proc getFreelistCount*(db: SqliteDatabase): DatabaseResult[int64] =
|
|||||||
proc handler(s: RawStmtPtr) =
|
proc handler(s: RawStmtPtr) =
|
||||||
count = sqlite3_column_int64(s, 0)
|
count = sqlite3_column_int64(s, 0)
|
||||||
|
|
||||||
let res = db.query("PRAGMA freelist_count;", handler)
|
db.query("PRAGMA freelist_count;", handler).isOkOr:
|
||||||
if res.isErr():
|
|
||||||
return err("failed to get freelist_count")
|
return err("failed to get freelist_count")
|
||||||
|
|
||||||
return ok(count)
|
return ok(count)
|
||||||
@ -289,8 +287,7 @@ proc getPageCount*(db: SqliteDatabase): DatabaseResult[int64] =
|
|||||||
proc handler(s: RawStmtPtr) =
|
proc handler(s: RawStmtPtr) =
|
||||||
count = sqlite3_column_int64(s, 0)
|
count = sqlite3_column_int64(s, 0)
|
||||||
|
|
||||||
let res = db.query("PRAGMA page_count;", handler)
|
db.query("PRAGMA page_count;", handler).isOkOr:
|
||||||
if res.isErr():
|
|
||||||
return err("failed to get page_count")
|
return err("failed to get page_count")
|
||||||
|
|
||||||
return ok(count)
|
return ok(count)
|
||||||
@ -319,8 +316,7 @@ proc gatherSqlitePageStats*(db: SqliteDatabase): DatabaseResult[(int64, int64, i
|
|||||||
|
|
||||||
proc vacuum*(db: SqliteDatabase): DatabaseResult[void] =
|
proc vacuum*(db: SqliteDatabase): DatabaseResult[void] =
|
||||||
## The VACUUM command rebuilds the database file, repacking it into a minimal amount of disk space.
|
## The VACUUM command rebuilds the database file, repacking it into a minimal amount of disk space.
|
||||||
let res = db.query("VACUUM;", NoopRowHandler)
|
db.query("VACUUM;", NoopRowHandler).isOkOr:
|
||||||
if res.isErr():
|
|
||||||
return err("vacuum failed")
|
return err("vacuum failed")
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
@ -339,8 +335,7 @@ proc getUserVersion*(database: SqliteDatabase): DatabaseResult[int64] =
|
|||||||
proc handler(s: ptr sqlite3_stmt) =
|
proc handler(s: ptr sqlite3_stmt) =
|
||||||
version = sqlite3_column_int64(s, 0)
|
version = sqlite3_column_int64(s, 0)
|
||||||
|
|
||||||
let res = database.query("PRAGMA user_version;", handler)
|
database.query("PRAGMA user_version;", handler).isOkOr:
|
||||||
if res.isErr():
|
|
||||||
return err("failed to get user_version")
|
return err("failed to get user_version")
|
||||||
|
|
||||||
ok(version)
|
ok(version)
|
||||||
@ -354,8 +349,7 @@ proc setUserVersion*(database: SqliteDatabase, version: int64): DatabaseResult[v
|
|||||||
##
|
##
|
||||||
## For more info check: https://www.sqlite.org/pragma.html#pragma_user_version
|
## For more info check: https://www.sqlite.org/pragma.html#pragma_user_version
|
||||||
let query = "PRAGMA user_version=" & $version & ";"
|
let query = "PRAGMA user_version=" & $version & ";"
|
||||||
let res = database.query(query, NoopRowHandler)
|
database.query(query, NoopRowHandler).isOkOr:
|
||||||
if res.isErr():
|
|
||||||
return err("failed to set user_version")
|
return err("failed to set user_version")
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
@ -400,11 +394,9 @@ proc filterMigrationScripts(
|
|||||||
if direction != "" and not script.toLower().endsWith("." & direction & ".sql"):
|
if direction != "" and not script.toLower().endsWith("." & direction & ".sql"):
|
||||||
return false
|
return false
|
||||||
|
|
||||||
let scriptVersionRes = getMigrationScriptVersion(script)
|
let scriptVersion = getMigrationScriptVersion(script).valueOr:
|
||||||
if scriptVersionRes.isErr():
|
|
||||||
return false
|
return false
|
||||||
|
|
||||||
let scriptVersion = scriptVersionRes.value
|
|
||||||
return lowVersion < scriptVersion and scriptVersion <= highVersion
|
return lowVersion < scriptVersion and scriptVersion <= highVersion
|
||||||
|
|
||||||
paths.filter(filterPredicate)
|
paths.filter(filterPredicate)
|
||||||
@ -476,10 +468,9 @@ proc migrate*(
|
|||||||
for statement in script.breakIntoStatements():
|
for statement in script.breakIntoStatements():
|
||||||
info "executing migration statement", statement = statement
|
info "executing migration statement", statement = statement
|
||||||
|
|
||||||
let execRes = db.query(statement, NoopRowHandler)
|
db.query(statement, NoopRowHandler).isOkOr:
|
||||||
if execRes.isErr():
|
|
||||||
error "failed to execute migration statement",
|
error "failed to execute migration statement",
|
||||||
statement = statement, error = execRes.error
|
statement = statement, error = error
|
||||||
return err("failed to execute migration statement")
|
return err("failed to execute migration statement")
|
||||||
|
|
||||||
info "migration statement executed succesfully", statement = statement
|
info "migration statement executed succesfully", statement = statement
|
||||||
@ -497,9 +488,8 @@ proc performSqliteVacuum*(db: SqliteDatabase): DatabaseResult[void] =
|
|||||||
|
|
||||||
info "starting sqlite database vacuuming"
|
info "starting sqlite database vacuuming"
|
||||||
|
|
||||||
let resVacuum = db.vacuum()
|
db.vacuum().isOkOr:
|
||||||
if resVacuum.isErr():
|
return err("failed to execute vacuum: " & error)
|
||||||
return err("failed to execute vacuum: " & resVacuum.error)
|
|
||||||
|
|
||||||
info "finished sqlite database vacuuming"
|
info "finished sqlite database vacuuming"
|
||||||
ok()
|
ok()
|
||||||
|
|||||||
@ -65,11 +65,10 @@ func id*(record: TypedRecord): Option[RecordId] =
|
|||||||
if fieldOpt.isNone():
|
if fieldOpt.isNone():
|
||||||
return none(RecordId)
|
return none(RecordId)
|
||||||
|
|
||||||
let fieldRes = toRecordId(fieldOpt.get())
|
let field = toRecordId(fieldOpt.get()).valueOr:
|
||||||
if fieldRes.isErr():
|
|
||||||
return none(RecordId)
|
return none(RecordId)
|
||||||
|
|
||||||
some(fieldRes.value)
|
return some(field)
|
||||||
|
|
||||||
func secp256k1*(record: TypedRecord): Option[array[33, byte]] =
|
func secp256k1*(record: TypedRecord): Option[array[33, byte]] =
|
||||||
record.tryGet("secp256k1", array[33, byte])
|
record.tryGet("secp256k1", array[33, byte])
|
||||||
|
|||||||
@ -393,12 +393,11 @@ proc addBootstrapNode*(bootstrapAddr: string, bootstrapEnrs: var seq[enr.Record]
|
|||||||
if bootstrapAddr.len == 0 or bootstrapAddr[0] == '#':
|
if bootstrapAddr.len == 0 or bootstrapAddr[0] == '#':
|
||||||
return
|
return
|
||||||
|
|
||||||
let enrRes = parseBootstrapAddress(bootstrapAddr)
|
let enr = parseBootstrapAddress(bootstrapAddr).valueOr:
|
||||||
if enrRes.isErr():
|
info "ignoring invalid bootstrap address", reason = error
|
||||||
info "ignoring invalid bootstrap address", reason = enrRes.error
|
|
||||||
return
|
return
|
||||||
|
|
||||||
bootstrapEnrs.add(enrRes.value)
|
bootstrapEnrs.add(enr)
|
||||||
|
|
||||||
proc setupDiscoveryV5*(
|
proc setupDiscoveryV5*(
|
||||||
myENR: enr.Record,
|
myENR: enr.Record,
|
||||||
|
|||||||
@ -29,13 +29,9 @@ proc enrConfiguration*(
|
|||||||
).isOkOr:
|
).isOkOr:
|
||||||
return err("could not initialize ENR with shards")
|
return err("could not initialize ENR with shards")
|
||||||
|
|
||||||
let recordRes = enrBuilder.build()
|
let record = enrBuilder.build().valueOr:
|
||||||
let record =
|
error "failed to create enr record", error = error
|
||||||
if recordRes.isErr():
|
return err($error)
|
||||||
error "failed to create record", error = recordRes.error
|
|
||||||
return err($recordRes.error)
|
|
||||||
else:
|
|
||||||
recordRes.get()
|
|
||||||
|
|
||||||
return ok(record)
|
return ok(record)
|
||||||
|
|
||||||
@ -70,16 +66,13 @@ proc networkConfiguration*(
|
|||||||
): Future[NetConfigResult] {.async.} =
|
): Future[NetConfigResult] {.async.} =
|
||||||
## `udpPort` is only supplied to satisfy underlying APIs but is not
|
## `udpPort` is only supplied to satisfy underlying APIs but is not
|
||||||
## actually a supported transport for libp2p traffic.
|
## actually a supported transport for libp2p traffic.
|
||||||
let natRes = setupNat(
|
var (extIp, extTcpPort, _) = setupNat(
|
||||||
conf.natStrategy.string,
|
conf.natStrategy.string,
|
||||||
clientId,
|
clientId,
|
||||||
Port(uint16(conf.p2pTcpPort) + portsShift),
|
Port(uint16(conf.p2pTcpPort) + portsShift),
|
||||||
Port(uint16(conf.p2pTcpPort) + portsShift),
|
Port(uint16(conf.p2pTcpPort) + portsShift),
|
||||||
)
|
).valueOr:
|
||||||
if natRes.isErr():
|
return err("failed to setup NAT: " & $error)
|
||||||
return err("failed to setup NAT: " & $natRes.error)
|
|
||||||
|
|
||||||
var (extIp, extTcpPort, _) = natRes.get()
|
|
||||||
|
|
||||||
let
|
let
|
||||||
discv5UdpPort =
|
discv5UdpPort =
|
||||||
@ -101,12 +94,10 @@ proc networkConfiguration*(
|
|||||||
# Resolve and use DNS domain IP
|
# Resolve and use DNS domain IP
|
||||||
if conf.dns4DomainName.isSome() and extIp.isNone():
|
if conf.dns4DomainName.isSome() and extIp.isNone():
|
||||||
try:
|
try:
|
||||||
let dnsRes = await dnsResolve(conf.dns4DomainName.get(), dnsAddrsNameServers)
|
let dns = (await dnsResolve(conf.dns4DomainName.get(), dnsAddrsNameServers)).valueOr:
|
||||||
|
return err($error) # Pass error down the stack
|
||||||
|
|
||||||
if dnsRes.isErr():
|
extIp = some(parseIpAddress(dns))
|
||||||
return err($dnsRes.error) # Pass error down the stack
|
|
||||||
|
|
||||||
extIp = some(parseIpAddress(dnsRes.get()))
|
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
return
|
return
|
||||||
err("Could not update extIp to resolved DNS IP: " & getCurrentExceptionMsg())
|
err("Could not update extIp to resolved DNS IP: " & getCurrentExceptionMsg())
|
||||||
|
|||||||
@ -47,11 +47,10 @@ proc setupPeerStorage(): Result[Option[WakuPeerStorage], string] =
|
|||||||
|
|
||||||
?peer_store_sqlite_migrations.migrate(db)
|
?peer_store_sqlite_migrations.migrate(db)
|
||||||
|
|
||||||
let res = WakuPeerStorage.new(db)
|
let res = WakuPeerStorage.new(db).valueOr:
|
||||||
if res.isErr():
|
return err("failed to init peer store" & error)
|
||||||
return err("failed to init peer store" & res.error)
|
|
||||||
|
|
||||||
ok(some(res.value))
|
return ok(some(res))
|
||||||
|
|
||||||
## Init waku node instance
|
## Init waku node instance
|
||||||
|
|
||||||
@ -167,16 +166,17 @@ proc setupProtocols(
|
|||||||
if conf.storeServiceConf.isSome():
|
if conf.storeServiceConf.isSome():
|
||||||
let storeServiceConf = conf.storeServiceConf.get()
|
let storeServiceConf = conf.storeServiceConf.get()
|
||||||
if storeServiceConf.supportV2:
|
if storeServiceConf.supportV2:
|
||||||
let archiveDriverRes = await legacy_driver.ArchiveDriver.new(
|
let archiveDriver = (
|
||||||
storeServiceConf.dbUrl, storeServiceConf.dbVacuum, storeServiceConf.dbMigration,
|
await legacy_driver.ArchiveDriver.new(
|
||||||
storeServiceConf.maxNumDbConnections, onFatalErrorAction,
|
storeServiceConf.dbUrl, storeServiceConf.dbVacuum,
|
||||||
)
|
storeServiceConf.dbMigration, storeServiceConf.maxNumDbConnections,
|
||||||
if archiveDriverRes.isErr():
|
onFatalErrorAction,
|
||||||
return err("failed to setup legacy archive driver: " & archiveDriverRes.error)
|
)
|
||||||
|
).valueOr:
|
||||||
|
return err("failed to setup legacy archive driver: " & error)
|
||||||
|
|
||||||
let mountArcRes = node.mountLegacyArchive(archiveDriverRes.get())
|
node.mountLegacyArchive(archiveDriver).isOkOr:
|
||||||
if mountArcRes.isErr():
|
return err("failed to mount waku legacy archive protocol: " & error)
|
||||||
return err("failed to mount waku legacy archive protocol: " & mountArcRes.error)
|
|
||||||
|
|
||||||
## For now we always mount the future archive driver but if the legacy one is mounted,
|
## For now we always mount the future archive driver but if the legacy one is mounted,
|
||||||
## then the legacy will be in charge of performing the archiving.
|
## then the legacy will be in charge of performing the archiving.
|
||||||
@ -189,11 +189,8 @@ proc setupProtocols(
|
|||||||
## So for now, we need to make sure that when legacy store is enabled and we use sqlite
|
## So for now, we need to make sure that when legacy store is enabled and we use sqlite
|
||||||
## that we migrate our db according to legacy store's schema to have the extra field
|
## that we migrate our db according to legacy store's schema to have the extra field
|
||||||
|
|
||||||
let engineRes = dburl.getDbEngine(storeServiceConf.dbUrl)
|
let engine = dburl.getDbEngine(storeServiceConf.dbUrl).valueOr:
|
||||||
if engineRes.isErr():
|
return err("error getting db engine in setupProtocols: " & error)
|
||||||
return err("error getting db engine in setupProtocols: " & engineRes.error)
|
|
||||||
|
|
||||||
let engine = engineRes.get()
|
|
||||||
|
|
||||||
let migrate =
|
let migrate =
|
||||||
if engine == "sqlite" and storeServiceConf.supportV2:
|
if engine == "sqlite" and storeServiceConf.supportV2:
|
||||||
@ -201,20 +198,19 @@ proc setupProtocols(
|
|||||||
else:
|
else:
|
||||||
storeServiceConf.dbMigration
|
storeServiceConf.dbMigration
|
||||||
|
|
||||||
let archiveDriverRes = await driver.ArchiveDriver.new(
|
let archiveDriver = (
|
||||||
storeServiceConf.dbUrl, storeServiceConf.dbVacuum, migrate,
|
await driver.ArchiveDriver.new(
|
||||||
storeServiceConf.maxNumDbConnections, onFatalErrorAction,
|
storeServiceConf.dbUrl, storeServiceConf.dbVacuum, migrate,
|
||||||
)
|
storeServiceConf.maxNumDbConnections, onFatalErrorAction,
|
||||||
if archiveDriverRes.isErr():
|
)
|
||||||
return err("failed to setup archive driver: " & archiveDriverRes.error)
|
).valueOr:
|
||||||
|
return err("failed to setup archive driver: " & error)
|
||||||
|
|
||||||
let retPolicyRes = policy.RetentionPolicy.new(storeServiceConf.retentionPolicy)
|
let retPolicy = policy.RetentionPolicy.new(storeServiceConf.retentionPolicy).valueOr:
|
||||||
if retPolicyRes.isErr():
|
return err("failed to create retention policy: " & error)
|
||||||
return err("failed to create retention policy: " & retPolicyRes.error)
|
|
||||||
|
|
||||||
let mountArcRes = node.mountArchive(archiveDriverRes.get(), retPolicyRes.get())
|
node.mountArchive(archiveDriver, retPolicy).isOkOr:
|
||||||
if mountArcRes.isErr():
|
return err("failed to mount waku archive protocol: " & error)
|
||||||
return err("failed to mount waku archive protocol: " & mountArcRes.error)
|
|
||||||
|
|
||||||
if storeServiceConf.supportV2:
|
if storeServiceConf.supportV2:
|
||||||
# Store legacy setup
|
# Store legacy setup
|
||||||
|
|||||||
@ -205,13 +205,11 @@ proc new*(
|
|||||||
if wakuConf.remoteStoreNode.isNone():
|
if wakuConf.remoteStoreNode.isNone():
|
||||||
return err("A storenode should be set when reliability mode is on")
|
return err("A storenode should be set when reliability mode is on")
|
||||||
|
|
||||||
let deliveryMonitorRes = DeliveryMonitor.new(
|
let deliveryMonitor = DeliveryMonitor.new(
|
||||||
node.wakuStoreClient, node.wakuRelay, node.wakuLightpushClient,
|
node.wakuStoreClient, node.wakuRelay, node.wakuLightpushClient,
|
||||||
node.wakuFilterClient,
|
node.wakuFilterClient,
|
||||||
)
|
).valueOr:
|
||||||
if deliveryMonitorRes.isErr():
|
return err("could not create delivery monitor: " & $error)
|
||||||
return err("could not create delivery monitor: " & $deliveryMonitorRes.error)
|
|
||||||
deliveryMonitor = deliveryMonitorRes.get()
|
|
||||||
|
|
||||||
var waku = Waku(
|
var waku = Waku(
|
||||||
version: git_version,
|
version: git_version,
|
||||||
@ -328,16 +326,14 @@ proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} =
|
|||||||
await sleepAsync(30.seconds)
|
await sleepAsync(30.seconds)
|
||||||
if waku.conf.dnsDiscoveryConf.isSome():
|
if waku.conf.dnsDiscoveryConf.isSome():
|
||||||
let dnsDiscoveryConf = waku.conf.dnsDiscoveryConf.get()
|
let dnsDiscoveryConf = waku.conf.dnsDiscoveryConf.get()
|
||||||
let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes(
|
waku[].dynamicBootstrapNodes = (
|
||||||
dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers
|
await waku_dnsdisc.retrieveDynamicBootstrapNodes(
|
||||||
)
|
dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers
|
||||||
if dynamicBootstrapNodesRes.isErr():
|
)
|
||||||
error "Retrieving dynamic bootstrap nodes failed",
|
).valueOr:
|
||||||
error = dynamicBootstrapNodesRes.error
|
error "Retrieving dynamic bootstrap nodes failed", error = error
|
||||||
continue
|
continue
|
||||||
|
|
||||||
waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
|
|
||||||
|
|
||||||
if not waku[].wakuDiscv5.isNil():
|
if not waku[].wakuDiscv5.isNil():
|
||||||
let dynamicBootstrapEnrs = waku[].dynamicBootstrapNodes
|
let dynamicBootstrapEnrs = waku[].dynamicBootstrapNodes
|
||||||
.filterIt(it.hasUdpPort())
|
.filterIt(it.hasUdpPort())
|
||||||
|
|||||||
@ -42,10 +42,9 @@ proc getTxAndTxReceipt(
|
|||||||
let receiptFuture = eligibilityManager.getMinedTransactionReceipt(txHash)
|
let receiptFuture = eligibilityManager.getMinedTransactionReceipt(txHash)
|
||||||
await allFutures(txFuture, receiptFuture)
|
await allFutures(txFuture, receiptFuture)
|
||||||
let tx = txFuture.read()
|
let tx = txFuture.read()
|
||||||
let txReceipt = receiptFuture.read()
|
let txReceipt = receiptFuture.read().valueOr:
|
||||||
if txReceipt.isErr():
|
return err("Cannot get tx receipt: " & error)
|
||||||
return err("Cannot get tx receipt: " & txReceipt.error)
|
return ok((tx, txReceipt))
|
||||||
return ok((tx, txReceipt.get()))
|
|
||||||
|
|
||||||
proc isEligibleTxId*(
|
proc isEligibleTxId*(
|
||||||
eligibilityManager: EligibilityManager,
|
eligibilityManager: EligibilityManager,
|
||||||
|
|||||||
@ -108,13 +108,10 @@ proc filterSubscribe*(
|
|||||||
error = "waku filter client is not set up"
|
error = "waku filter client is not set up"
|
||||||
return err(FilterSubscribeError.serviceUnavailable())
|
return err(FilterSubscribeError.serviceUnavailable())
|
||||||
|
|
||||||
let remotePeerRes = parsePeerInfo(peer)
|
let remotePeer = parsePeerInfo(peer).valueOr:
|
||||||
if remotePeerRes.isErr():
|
error "Couldn't parse the peer info properly", error = error
|
||||||
error "Couldn't parse the peer info properly", error = remotePeerRes.error
|
|
||||||
return err(FilterSubscribeError.serviceUnavailable("No peers available"))
|
return err(FilterSubscribeError.serviceUnavailable("No peers available"))
|
||||||
|
|
||||||
let remotePeer = remotePeerRes.value
|
|
||||||
|
|
||||||
if pubsubTopic.isSome():
|
if pubsubTopic.isSome():
|
||||||
info "registering filter subscription to content",
|
info "registering filter subscription to content",
|
||||||
pubsubTopic = pubsubTopic.get(),
|
pubsubTopic = pubsubTopic.get(),
|
||||||
@ -143,15 +140,11 @@ proc filterSubscribe*(
|
|||||||
else:
|
else:
|
||||||
# No pubsub topic, autosharding is used to deduce it
|
# No pubsub topic, autosharding is used to deduce it
|
||||||
# but content topics must be well-formed for this
|
# but content topics must be well-formed for this
|
||||||
let topicMapRes =
|
let topicMap = node.wakuAutoSharding
|
||||||
node.wakuAutoSharding.get().getShardsFromContentTopics(contentTopics)
|
.get()
|
||||||
|
.getShardsFromContentTopics(contentTopics).valueOr:
|
||||||
let topicMap =
|
error "can't get shard", error = error
|
||||||
if topicMapRes.isErr():
|
|
||||||
error "can't get shard", error = topicMapRes.error
|
|
||||||
return err(FilterSubscribeError.badResponse("can't get shard"))
|
return err(FilterSubscribeError.badResponse("can't get shard"))
|
||||||
else:
|
|
||||||
topicMapRes.get()
|
|
||||||
|
|
||||||
var futures = collect(newSeq):
|
var futures = collect(newSeq):
|
||||||
for shard, topics in topicMap.pairs:
|
for shard, topics in topicMap.pairs:
|
||||||
@ -195,13 +188,10 @@ proc filterUnsubscribe*(
|
|||||||
): Future[FilterSubscribeResult] {.async: (raises: []).} =
|
): Future[FilterSubscribeResult] {.async: (raises: []).} =
|
||||||
## Unsubscribe from a content filter V2".
|
## Unsubscribe from a content filter V2".
|
||||||
|
|
||||||
let remotePeerRes = parsePeerInfo(peer)
|
let remotePeer = parsePeerInfo(peer).valueOr:
|
||||||
if remotePeerRes.isErr():
|
error "couldn't parse remotePeerInfo", error = error
|
||||||
error "couldn't parse remotePeerInfo", error = remotePeerRes.error
|
|
||||||
return err(FilterSubscribeError.serviceUnavailable("No peers available"))
|
return err(FilterSubscribeError.serviceUnavailable("No peers available"))
|
||||||
|
|
||||||
let remotePeer = remotePeerRes.value
|
|
||||||
|
|
||||||
if pubsubTopic.isSome():
|
if pubsubTopic.isSome():
|
||||||
info "deregistering filter subscription to content",
|
info "deregistering filter subscription to content",
|
||||||
pubsubTopic = pubsubTopic.get(),
|
pubsubTopic = pubsubTopic.get(),
|
||||||
@ -226,15 +216,11 @@ proc filterUnsubscribe*(
|
|||||||
error "Failed filter un-subscription, pubsub topic must be specified with static sharding"
|
error "Failed filter un-subscription, pubsub topic must be specified with static sharding"
|
||||||
waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"])
|
waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"])
|
||||||
else: # pubsubTopic.isNone
|
else: # pubsubTopic.isNone
|
||||||
let topicMapRes =
|
let topicMap = node.wakuAutoSharding
|
||||||
node.wakuAutoSharding.get().getShardsFromContentTopics(contentTopics)
|
.get()
|
||||||
|
.getShardsFromContentTopics(contentTopics).valueOr:
|
||||||
let topicMap =
|
error "can't get shard", error = error
|
||||||
if topicMapRes.isErr():
|
|
||||||
error "can't get shard", error = topicMapRes.error
|
|
||||||
return err(FilterSubscribeError.badResponse("can't get shard"))
|
return err(FilterSubscribeError.badResponse("can't get shard"))
|
||||||
else:
|
|
||||||
topicMapRes.get()
|
|
||||||
|
|
||||||
var futures = collect(newSeq):
|
var futures = collect(newSeq):
|
||||||
for shard, topics in topicMap.pairs:
|
for shard, topics in topicMap.pairs:
|
||||||
@ -275,13 +261,10 @@ proc filterUnsubscribeAll*(
|
|||||||
): Future[FilterSubscribeResult] {.async: (raises: []).} =
|
): Future[FilterSubscribeResult] {.async: (raises: []).} =
|
||||||
## Unsubscribe from a content filter V2".
|
## Unsubscribe from a content filter V2".
|
||||||
|
|
||||||
let remotePeerRes = parsePeerInfo(peer)
|
let remotePeer = parsePeerInfo(peer).valueOr:
|
||||||
if remotePeerRes.isErr():
|
error "couldn't parse remotePeerInfo", error = error
|
||||||
error "couldn't parse remotePeerInfo", error = remotePeerRes.error
|
|
||||||
return err(FilterSubscribeError.serviceUnavailable("No peers available"))
|
return err(FilterSubscribeError.serviceUnavailable("No peers available"))
|
||||||
|
|
||||||
let remotePeer = remotePeerRes.value
|
|
||||||
|
|
||||||
info "deregistering all filter subscription to content", peer = remotePeer.peerId
|
info "deregistering all filter subscription to content", peer = remotePeer.peerId
|
||||||
|
|
||||||
let unsubRes = await node.wakuFilterClient.unsubscribeAll(remotePeer)
|
let unsubRes = await node.wakuFilterClient.unsubscribeAll(remotePeer)
|
||||||
|
|||||||
@ -114,14 +114,8 @@ proc legacyLightpushPublish*(
|
|||||||
|
|
||||||
if node.wakuAutoSharding.isNone():
|
if node.wakuAutoSharding.isNone():
|
||||||
return err("Pubsub topic must be specified when static sharding is enabled")
|
return err("Pubsub topic must be specified when static sharding is enabled")
|
||||||
let topicMapRes =
|
|
||||||
node.wakuAutoSharding.get().getShardsFromContentTopics(message.contentTopic)
|
|
||||||
|
|
||||||
let topicMap =
|
let topicMap =
|
||||||
if topicMapRes.isErr():
|
?node.wakuAutoSharding.get().getShardsFromContentTopics(message.contentTopic)
|
||||||
return err(topicMapRes.error)
|
|
||||||
else:
|
|
||||||
topicMapRes.get()
|
|
||||||
|
|
||||||
for pubsub, _ in topicMap.pairs: # There's only one pair anyway
|
for pubsub, _ in topicMap.pairs: # There's only one pair anyway
|
||||||
return await internalPublish(node, $pubsub, message, peer)
|
return await internalPublish(node, $pubsub, message, peer)
|
||||||
|
|||||||
@ -111,10 +111,9 @@ proc setPeerExchangePeer*(
|
|||||||
|
|
||||||
info "Set peer-exchange peer", peer = peer
|
info "Set peer-exchange peer", peer = peer
|
||||||
|
|
||||||
let remotePeerRes = parsePeerInfo(peer)
|
let remotePeer = parsePeerInfo(peer).valueOr:
|
||||||
if remotePeerRes.isErr():
|
error "could not parse peer info", error = error
|
||||||
error "could not parse peer info", error = remotePeerRes.error
|
|
||||||
return
|
return
|
||||||
|
|
||||||
node.peerManager.addPeer(remotePeerRes.value, PeerExchange)
|
node.peerManager.addPeer(remotePeer, PeerExchange)
|
||||||
waku_px_peers.inc()
|
waku_px_peers.inc()
|
||||||
|
|||||||
@ -240,11 +240,8 @@ proc mountRlnRelay*(
|
|||||||
CatchableError, "WakuRelay protocol is not mounted, cannot mount WakuRlnRelay"
|
CatchableError, "WakuRelay protocol is not mounted, cannot mount WakuRlnRelay"
|
||||||
)
|
)
|
||||||
|
|
||||||
let rlnRelayRes = await WakuRlnRelay.new(rlnConf, registrationHandler)
|
let rlnRelay = (await WakuRlnRelay.new(rlnConf, registrationHandler)).valueOr:
|
||||||
if rlnRelayRes.isErr():
|
raise newException(CatchableError, "failed to mount WakuRlnRelay: " & error)
|
||||||
raise
|
|
||||||
newException(CatchableError, "failed to mount WakuRlnRelay: " & rlnRelayRes.error)
|
|
||||||
let rlnRelay = rlnRelayRes.get()
|
|
||||||
if (rlnConf.userMessageLimit > rlnRelay.groupManager.rlnRelayMaxMessageLimit):
|
if (rlnConf.userMessageLimit > rlnRelay.groupManager.rlnRelayMaxMessageLimit):
|
||||||
error "rln-relay-user-message-limit can't exceed the MAX_MESSAGE_LIMIT in the rln contract"
|
error "rln-relay-user-message-limit can't exceed the MAX_MESSAGE_LIMIT in the rln contract"
|
||||||
let validator = generateRlnValidator(rlnRelay, spamHandler)
|
let validator = generateRlnValidator(rlnRelay, spamHandler)
|
||||||
|
|||||||
@ -87,30 +87,27 @@ proc toArchiveQuery(
|
|||||||
proc toHistoryResult*(
|
proc toHistoryResult*(
|
||||||
res: waku_archive_legacy.ArchiveResult
|
res: waku_archive_legacy.ArchiveResult
|
||||||
): legacy_store_common.HistoryResult =
|
): legacy_store_common.HistoryResult =
|
||||||
if res.isErr():
|
let response = res.valueOr:
|
||||||
let error = res.error
|
case error.kind
|
||||||
case res.error.kind
|
|
||||||
of waku_archive_legacy.ArchiveErrorKind.DRIVER_ERROR,
|
of waku_archive_legacy.ArchiveErrorKind.DRIVER_ERROR,
|
||||||
waku_archive_legacy.ArchiveErrorKind.INVALID_QUERY:
|
waku_archive_legacy.ArchiveErrorKind.INVALID_QUERY:
|
||||||
err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST, cause: res.error.cause))
|
return err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST, cause: error.cause))
|
||||||
else:
|
else:
|
||||||
err(HistoryError(kind: HistoryErrorKind.UNKNOWN))
|
return err(HistoryError(kind: HistoryErrorKind.UNKNOWN))
|
||||||
else:
|
return ok(
|
||||||
let response = res.get()
|
HistoryResponse(
|
||||||
ok(
|
messages: response.messages,
|
||||||
HistoryResponse(
|
cursor: response.cursor.map(
|
||||||
messages: response.messages,
|
proc(cursor: waku_archive_legacy.ArchiveCursor): HistoryCursor =
|
||||||
cursor: response.cursor.map(
|
HistoryCursor(
|
||||||
proc(cursor: waku_archive_legacy.ArchiveCursor): HistoryCursor =
|
pubsubTopic: cursor.pubsubTopic,
|
||||||
HistoryCursor(
|
senderTime: cursor.senderTime,
|
||||||
pubsubTopic: cursor.pubsubTopic,
|
storeTime: cursor.storeTime,
|
||||||
senderTime: cursor.senderTime,
|
digest: cursor.digest,
|
||||||
storeTime: cursor.storeTime,
|
)
|
||||||
digest: cursor.digest,
|
),
|
||||||
)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
proc mountLegacyStore*(
|
proc mountLegacyStore*(
|
||||||
node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit
|
node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit
|
||||||
@ -126,8 +123,7 @@ proc mountLegacyStore*(
|
|||||||
request: HistoryQuery
|
request: HistoryQuery
|
||||||
): Future[legacy_store_common.HistoryResult] {.async.} =
|
): Future[legacy_store_common.HistoryResult] {.async.} =
|
||||||
if request.cursor.isSome():
|
if request.cursor.isSome():
|
||||||
request.cursor.get().checkHistCursor().isOkOr:
|
?request.cursor.get().checkHistCursor()
|
||||||
return err(error)
|
|
||||||
|
|
||||||
let request = request.toArchiveQuery()
|
let request = request.toArchiveQuery()
|
||||||
let response = await node.wakuLegacyArchive.findMessagesV2(request)
|
let response = await node.wakuLegacyArchive.findMessagesV2(request)
|
||||||
@ -160,11 +156,8 @@ proc query*(
|
|||||||
if node.wakuLegacyStoreClient.isNil():
|
if node.wakuLegacyStoreClient.isNil():
|
||||||
return err("waku legacy store client is nil")
|
return err("waku legacy store client is nil")
|
||||||
|
|
||||||
let queryRes = await node.wakuLegacyStoreClient.query(query, peer)
|
let response = (await node.wakuLegacyStoreClient.query(query, peer)).valueOr:
|
||||||
if queryRes.isErr():
|
return err("legacy store client query error: " & $error)
|
||||||
return err("legacy store client query error: " & $queryRes.error)
|
|
||||||
|
|
||||||
let response = queryRes.get()
|
|
||||||
|
|
||||||
return ok(response)
|
return ok(response)
|
||||||
|
|
||||||
@ -201,9 +194,8 @@ when defined(waku_exp_store_resume):
|
|||||||
if node.wakuLegacyStoreClient.isNil():
|
if node.wakuLegacyStoreClient.isNil():
|
||||||
return
|
return
|
||||||
|
|
||||||
let retrievedMessages = await node.wakuLegacyStoreClient.resume(peerList)
|
let retrievedMessages = (await node.wakuLegacyStoreClient.resume(peerList)).valueOr:
|
||||||
if retrievedMessages.isErr():
|
error "failed to resume store", error = error
|
||||||
error "failed to resume store", error = retrievedMessages.error
|
|
||||||
return
|
return
|
||||||
|
|
||||||
info "the number of retrieved messages since the last online time: ",
|
info "the number of retrieved messages since the last online time: ",
|
||||||
|
|||||||
@ -17,10 +17,8 @@ const PeerStoreMigrationPath: string = projectRoot / "migrations" / "sent_msgs"
|
|||||||
proc migrate*(db: SqliteDatabase): DatabaseResult[void] =
|
proc migrate*(db: SqliteDatabase): DatabaseResult[void] =
|
||||||
info "starting peer store's sqlite database migration for sent messages"
|
info "starting peer store's sqlite database migration for sent messages"
|
||||||
|
|
||||||
let migrationRes =
|
migrate(db, TargetSchemaVersion, migrationsScriptsDir = PeerStoreMigrationPath).isOkOr:
|
||||||
migrate(db, TargetSchemaVersion, migrationsScriptsDir = PeerStoreMigrationPath)
|
return err("failed to execute migration scripts: " & error)
|
||||||
if migrationRes.isErr():
|
|
||||||
return err("failed to execute migration scripts: " & migrationRes.error)
|
|
||||||
|
|
||||||
info "finished peer store's sqlite database migration for sent messages"
|
info "finished peer store's sqlite database migration for sent messages"
|
||||||
ok()
|
ok()
|
||||||
|
|||||||
@ -18,16 +18,14 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
|
|||||||
## it runs migration scripts if the `user_version` is outdated. The `migrationScriptsDir` path
|
## it runs migration scripts if the `user_version` is outdated. The `migrationScriptsDir` path
|
||||||
## points to the directory holding the migrations scripts once the db is updated, it sets the
|
## points to the directory holding the migrations scripts once the db is updated, it sets the
|
||||||
## `user_version` to the `tragetVersion`.
|
## `user_version` to the `tragetVersion`.
|
||||||
##
|
##
|
||||||
## If not `targetVersion` is provided, it defaults to `SchemaVersion`.
|
## If not `targetVersion` is provided, it defaults to `SchemaVersion`.
|
||||||
##
|
##
|
||||||
## NOTE: Down migration it is not currently supported
|
## NOTE: Down migration it is not currently supported
|
||||||
info "starting peer store's sqlite database migration"
|
info "starting peer store's sqlite database migration"
|
||||||
|
|
||||||
let migrationRes =
|
migrate(db, targetVersion, migrationsScriptsDir = PeerStoreMigrationPath).isOkOr:
|
||||||
migrate(db, targetVersion, migrationsScriptsDir = PeerStoreMigrationPath)
|
return err("failed to execute migration scripts: " & error)
|
||||||
if migrationRes.isErr():
|
|
||||||
return err("failed to execute migration scripts: " & migrationRes.error)
|
|
||||||
|
|
||||||
info "finished peer store's sqlite database migration"
|
info "finished peer store's sqlite database migration"
|
||||||
ok()
|
ok()
|
||||||
|
|||||||
@ -67,7 +67,7 @@ proc encode*(remotePeerInfo: RemotePeerInfo): PeerStorageResult[ProtoBuffer] =
|
|||||||
|
|
||||||
let catchRes = catch:
|
let catchRes = catch:
|
||||||
pb.write(4, remotePeerInfo.publicKey)
|
pb.write(4, remotePeerInfo.publicKey)
|
||||||
if catchRes.isErr():
|
catchRes.isOkOr:
|
||||||
return err("Enncoding public key failed: " & catchRes.error.msg)
|
return err("Enncoding public key failed: " & catchRes.error.msg)
|
||||||
|
|
||||||
pb.write(5, uint32(ord(remotePeerInfo.connectedness)))
|
pb.write(5, uint32(ord(remotePeerInfo.connectedness)))
|
||||||
@ -154,14 +154,11 @@ method getAll*(
|
|||||||
let catchRes = catch:
|
let catchRes = catch:
|
||||||
db.database.query("SELECT peerId, storedInfo FROM Peer", peer)
|
db.database.query("SELECT peerId, storedInfo FROM Peer", peer)
|
||||||
|
|
||||||
let queryRes =
|
let queryRes = catchRes.valueOr:
|
||||||
if catchRes.isErr():
|
return err("failed to extract peer from query result: " & catchRes.error.msg)
|
||||||
return err("failed to extract peer from query result: " & catchRes.error.msg)
|
|
||||||
else:
|
|
||||||
catchRes.get()
|
|
||||||
|
|
||||||
if queryRes.isErr():
|
queryRes.isOkOr:
|
||||||
return err("peer storage query failed: " & queryRes.error)
|
return err("peer storage query failed: " & error)
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
|
|||||||
@ -225,8 +225,8 @@ proc mountMetadata*(
|
|||||||
|
|
||||||
let catchRes = catch:
|
let catchRes = catch:
|
||||||
node.switch.mount(node.wakuMetadata, protocolMatcher(WakuMetadataCodec))
|
node.switch.mount(node.wakuMetadata, protocolMatcher(WakuMetadataCodec))
|
||||||
if catchRes.isErr():
|
catchRes.isOkOr:
|
||||||
return err(catchRes.error.msg)
|
return err(error.msg)
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
@ -266,8 +266,8 @@ proc mountMix*(
|
|||||||
node.wakuMix.registerDestReadBehavior(WakuLightPushCodec, readLp(int(-1)))
|
node.wakuMix.registerDestReadBehavior(WakuLightPushCodec, readLp(int(-1)))
|
||||||
let catchRes = catch:
|
let catchRes = catch:
|
||||||
node.switch.mount(node.wakuMix)
|
node.switch.mount(node.wakuMix)
|
||||||
if catchRes.isErr():
|
catchRes.isOkOr:
|
||||||
return err(catchRes.error.msg)
|
return err(error.msg)
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
## Waku Sync
|
## Waku Sync
|
||||||
@ -300,8 +300,8 @@ proc mountStoreSync*(
|
|||||||
node.switch.mount(
|
node.switch.mount(
|
||||||
node.wakuStoreReconciliation, protocolMatcher(WakuReconciliationCodec)
|
node.wakuStoreReconciliation, protocolMatcher(WakuReconciliationCodec)
|
||||||
)
|
)
|
||||||
if reconMountRes.isErr():
|
reconMountRes.isOkOr:
|
||||||
return err(reconMountRes.error.msg)
|
return err(error.msg)
|
||||||
|
|
||||||
let transfer = SyncTransfer.new(
|
let transfer = SyncTransfer.new(
|
||||||
node.peerManager, node.wakuArchive, idsChannel, wantsChannel, needsChannel
|
node.peerManager, node.wakuArchive, idsChannel, wantsChannel, needsChannel
|
||||||
@ -311,8 +311,8 @@ proc mountStoreSync*(
|
|||||||
|
|
||||||
let transMountRes = catch:
|
let transMountRes = catch:
|
||||||
node.switch.mount(node.wakuStoreTransfer, protocolMatcher(WakuTransferCodec))
|
node.switch.mount(node.wakuStoreTransfer, protocolMatcher(WakuTransferCodec))
|
||||||
if transMountRes.isErr():
|
transMountRes.isOkOr:
|
||||||
return err(transMountRes.error.msg)
|
return err(error.msg)
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
|
|||||||
@ -426,14 +426,13 @@ proc installAdminV1GetFilterSubsHandler(router: var RestRouter, node: WakuNode)
|
|||||||
FilterSubscription(peerId: $peerId, filterCriteria: filterCriteria)
|
FilterSubscription(peerId: $peerId, filterCriteria: filterCriteria)
|
||||||
)
|
)
|
||||||
|
|
||||||
let resp = RestApiResponse.jsonResponse(subscriptions, status = Http200)
|
let resp = RestApiResponse.jsonResponse(subscriptions, status = Http200).valueOr:
|
||||||
if resp.isErr():
|
error "An error ocurred while building the json respose", error = error
|
||||||
error "An error ocurred while building the json respose: ", error = resp.error
|
|
||||||
return RestApiResponse.internalServerError(
|
return RestApiResponse.internalServerError(
|
||||||
fmt("An error ocurred while building the json respose: {resp.error}")
|
fmt("An error ocurred while building the json respose: {error}")
|
||||||
)
|
)
|
||||||
|
|
||||||
return resp.get()
|
return resp
|
||||||
|
|
||||||
proc installAdminV1PostLogLevelHandler(router: var RestRouter, node: WakuNode) =
|
proc installAdminV1PostLogLevelHandler(router: var RestRouter, node: WakuNode) =
|
||||||
router.api(MethodPost, ROUTE_ADMIN_V1_POST_LOG_LEVEL) do(
|
router.api(MethodPost, ROUTE_ADMIN_V1_POST_LOG_LEVEL) do(
|
||||||
|
|||||||
@ -15,12 +15,11 @@ const ROUTE_DEBUG_INFOV1 = "/debug/v1/info"
|
|||||||
proc installDebugInfoV1Handler(router: var RestRouter, node: WakuNode) =
|
proc installDebugInfoV1Handler(router: var RestRouter, node: WakuNode) =
|
||||||
let getInfo = proc(): RestApiResponse =
|
let getInfo = proc(): RestApiResponse =
|
||||||
let info = node.info().toDebugWakuInfo()
|
let info = node.info().toDebugWakuInfo()
|
||||||
let resp = RestApiResponse.jsonResponse(info, status = Http200)
|
let resp = RestApiResponse.jsonResponse(info, status = Http200).valueOr:
|
||||||
if resp.isErr():
|
info "An error occurred while building the json respose", error = error
|
||||||
info "An error occurred while building the json respose", error = resp.error
|
|
||||||
return RestApiResponse.internalServerError()
|
return RestApiResponse.internalServerError()
|
||||||
|
|
||||||
return resp.get()
|
return resp
|
||||||
|
|
||||||
# /debug route is deprecated, will be removed
|
# /debug route is deprecated, will be removed
|
||||||
router.api(MethodGet, ROUTE_DEBUG_INFOV1) do() -> RestApiResponse:
|
router.api(MethodGet, ROUTE_DEBUG_INFOV1) do() -> RestApiResponse:
|
||||||
|
|||||||
@ -49,15 +49,12 @@ func decodeRequestBody[T](
|
|||||||
|
|
||||||
let reqBodyData = contentBody.get().data
|
let reqBodyData = contentBody.get().data
|
||||||
|
|
||||||
let requestResult = decodeFromJsonBytes(T, reqBodyData)
|
let requestResult = decodeFromJsonBytes(T, reqBodyData).valueOr:
|
||||||
if requestResult.isErr():
|
|
||||||
return err(
|
return err(
|
||||||
RestApiResponse.badRequest(
|
RestApiResponse.badRequest("Invalid content body, could not decode. " & $error)
|
||||||
"Invalid content body, could not decode. " & $requestResult.error
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return ok(requestResult.get())
|
return ok(requestResult)
|
||||||
|
|
||||||
proc getStatusDesc(
|
proc getStatusDesc(
|
||||||
protocolClientRes: filter_protocol_type.FilterSubscribeResult
|
protocolClientRes: filter_protocol_type.FilterSubscribeResult
|
||||||
@ -129,16 +126,15 @@ proc makeRestResponse(
|
|||||||
httpStatus = convertErrorKindToHttpStatus(protocolClientRes.error().kind)
|
httpStatus = convertErrorKindToHttpStatus(protocolClientRes.error().kind)
|
||||||
# TODO: convert status codes!
|
# TODO: convert status codes!
|
||||||
|
|
||||||
let resp =
|
let resp = RestApiResponse.jsonResponse(
|
||||||
RestApiResponse.jsonResponse(filterSubscriptionResponse, status = httpStatus)
|
filterSubscriptionResponse, status = httpStatus
|
||||||
|
).valueOr:
|
||||||
if resp.isErr():
|
error "An error ocurred while building the json respose: ", error = error
|
||||||
error "An error ocurred while building the json respose: ", error = resp.error
|
|
||||||
return RestApiResponse.internalServerError(
|
return RestApiResponse.internalServerError(
|
||||||
fmt("An error ocurred while building the json respose: {resp.error}")
|
fmt("An error ocurred while building the json respose: {error}")
|
||||||
)
|
)
|
||||||
|
|
||||||
return resp.get()
|
return resp
|
||||||
|
|
||||||
proc makeRestResponse(
|
proc makeRestResponse(
|
||||||
requestId: string, protocolClientRes: filter_protocol_type.FilterSubscribeError
|
requestId: string, protocolClientRes: filter_protocol_type.FilterSubscribeError
|
||||||
@ -149,16 +145,15 @@ proc makeRestResponse(
|
|||||||
let httpStatus = convertErrorKindToHttpStatus(protocolClientRes.kind)
|
let httpStatus = convertErrorKindToHttpStatus(protocolClientRes.kind)
|
||||||
# TODO: convert status codes!
|
# TODO: convert status codes!
|
||||||
|
|
||||||
let resp =
|
let resp = RestApiResponse.jsonResponse(
|
||||||
RestApiResponse.jsonResponse(filterSubscriptionResponse, status = httpStatus)
|
filterSubscriptionResponse, status = httpStatus
|
||||||
|
).valueOr:
|
||||||
if resp.isErr():
|
error "An error ocurred while building the json respose: ", error = error
|
||||||
error "An error ocurred while building the json respose: ", error = resp.error
|
|
||||||
return RestApiResponse.internalServerError(
|
return RestApiResponse.internalServerError(
|
||||||
fmt("An error ocurred while building the json respose: {resp.error}")
|
fmt("An error ocurred while building the json respose: {error}")
|
||||||
)
|
)
|
||||||
|
|
||||||
return resp.get()
|
return resp
|
||||||
|
|
||||||
const NoPeerNoDiscoError = FilterSubscribeError.serviceUnavailable(
|
const NoPeerNoDiscoError = FilterSubscribeError.serviceUnavailable(
|
||||||
"No suitable service peer & no discovery method"
|
"No suitable service peer & no discovery method"
|
||||||
@ -175,18 +170,14 @@ proc filterPostPutSubscriptionRequestHandler(
|
|||||||
): Future[RestApiResponse] {.async.} =
|
): Future[RestApiResponse] {.async.} =
|
||||||
## handles any filter subscription requests, adds or modifies.
|
## handles any filter subscription requests, adds or modifies.
|
||||||
|
|
||||||
let decodedBody = decodeRequestBody[FilterSubscribeRequest](contentBody)
|
let req: FilterSubscribeRequest = decodeRequestBody[FilterSubscribeRequest](
|
||||||
|
contentBody
|
||||||
if decodedBody.isErr():
|
).valueOr:
|
||||||
return makeRestResponse(
|
return makeRestResponse(
|
||||||
"unknown",
|
"unknown",
|
||||||
FilterSubscribeError.badRequest(
|
FilterSubscribeError.badRequest(fmt("Failed to decode request: {error}")),
|
||||||
fmt("Failed to decode request: {decodedBody.error}")
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
let req: FilterSubscribeRequest = decodedBody.value()
|
|
||||||
|
|
||||||
let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
|
let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
|
||||||
let handler = discHandler.valueOr:
|
let handler = discHandler.valueOr:
|
||||||
return makeRestResponse(req.requestId, NoPeerNoDiscoError)
|
return makeRestResponse(req.requestId, NoPeerNoDiscoError)
|
||||||
@ -256,18 +247,14 @@ proc installFilterDeleteSubscriptionsHandler(
|
|||||||
## Subscribes a node to a list of contentTopics of a PubSub topic
|
## Subscribes a node to a list of contentTopics of a PubSub topic
|
||||||
info "delete", ROUTE_FILTER_SUBSCRIPTIONS, contentBody
|
info "delete", ROUTE_FILTER_SUBSCRIPTIONS, contentBody
|
||||||
|
|
||||||
let decodedBody = decodeRequestBody[FilterUnsubscribeRequest](contentBody)
|
let req: FilterUnsubscribeRequest = decodeRequestBody[FilterUnsubscribeRequest](
|
||||||
|
contentBody
|
||||||
if decodedBody.isErr():
|
).valueOr:
|
||||||
return makeRestResponse(
|
return makeRestResponse(
|
||||||
"unknown",
|
"unknown",
|
||||||
FilterSubscribeError.badRequest(
|
FilterSubscribeError.badRequest(fmt("Failed to decode request: {error}")),
|
||||||
fmt("Failed to decode request: {decodedBody.error}")
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
let req: FilterUnsubscribeRequest = decodedBody.value()
|
|
||||||
|
|
||||||
let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
|
let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
|
||||||
let handler = discHandler.valueOr:
|
let handler = discHandler.valueOr:
|
||||||
return makeRestResponse(req.requestId, NoPeerNoDiscoError)
|
return makeRestResponse(req.requestId, NoPeerNoDiscoError)
|
||||||
@ -308,18 +295,14 @@ proc installFilterDeleteAllSubscriptionsHandler(
|
|||||||
## Subscribes a node to a list of contentTopics of a PubSub topic
|
## Subscribes a node to a list of contentTopics of a PubSub topic
|
||||||
info "delete", ROUTE_FILTER_ALL_SUBSCRIPTIONS, contentBody
|
info "delete", ROUTE_FILTER_ALL_SUBSCRIPTIONS, contentBody
|
||||||
|
|
||||||
let decodedBody = decodeRequestBody[FilterUnsubscribeAllRequest](contentBody)
|
let req: FilterUnsubscribeAllRequest = decodeRequestBody[
|
||||||
|
FilterUnsubscribeAllRequest
|
||||||
if decodedBody.isErr():
|
](contentBody).valueOr:
|
||||||
return makeRestResponse(
|
return makeRestResponse(
|
||||||
"unknown",
|
"unknown",
|
||||||
FilterSubscribeError.badRequest(
|
FilterSubscribeError.badRequest(fmt("Failed to decode request: {error}")),
|
||||||
fmt("Failed to decode request: {decodedBody.error}")
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
let req: FilterUnsubscribeAllRequest = decodedBody.value()
|
|
||||||
|
|
||||||
let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
|
let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
|
||||||
let handler = discHandler.valueOr:
|
let handler = discHandler.valueOr:
|
||||||
return makeRestResponse(req.requestId, NoPeerNoDiscoError)
|
return makeRestResponse(req.requestId, NoPeerNoDiscoError)
|
||||||
@ -399,24 +382,20 @@ proc installFilterGetMessagesHandler(
|
|||||||
## TODO: ability to specify a return message limit, maybe use cursor to control paging response.
|
## TODO: ability to specify a return message limit, maybe use cursor to control paging response.
|
||||||
info "get", ROUTE_FILTER_MESSAGES, contentTopic = contentTopic
|
info "get", ROUTE_FILTER_MESSAGES, contentTopic = contentTopic
|
||||||
|
|
||||||
if contentTopic.isErr():
|
let contentTopic = contentTopic.valueOr:
|
||||||
return RestApiResponse.badRequest("Missing contentTopic")
|
return RestApiResponse.badRequest("Missing contentTopic")
|
||||||
|
|
||||||
let contentTopic = contentTopic.get()
|
let msg = cache.getAutoMessages(contentTopic, clear = true).valueOr:
|
||||||
|
|
||||||
let msgRes = cache.getAutoMessages(contentTopic, clear = true)
|
|
||||||
if msgRes.isErr():
|
|
||||||
return RestApiResponse.badRequest("Not subscribed to topic: " & contentTopic)
|
return RestApiResponse.badRequest("Not subscribed to topic: " & contentTopic)
|
||||||
|
|
||||||
let data = FilterGetMessagesResponse(msgRes.get().map(toFilterWakuMessage))
|
let data = FilterGetMessagesResponse(msg.map(toFilterWakuMessage))
|
||||||
let resp = RestApiResponse.jsonResponse(data, status = Http200)
|
let resp = RestApiResponse.jsonResponse(data, status = Http200).valueOr:
|
||||||
if resp.isErr():
|
error "An error ocurred while building the json respose: ", error = error
|
||||||
error "An error ocurred while building the json respose: ", error = resp.error
|
|
||||||
return RestApiResponse.internalServerError(
|
return RestApiResponse.internalServerError(
|
||||||
"An error ocurred while building the json respose"
|
"An error ocurred while building the json respose"
|
||||||
)
|
)
|
||||||
|
|
||||||
return resp.get()
|
return resp
|
||||||
|
|
||||||
proc installFilterRestApiHandlers*(
|
proc installFilterRestApiHandlers*(
|
||||||
router: var RestRouter,
|
router: var RestRouter,
|
||||||
|
|||||||
@ -50,12 +50,8 @@ proc installLightPushRequestHandler*(
|
|||||||
## Send a request to push a waku message
|
## Send a request to push a waku message
|
||||||
info "post", ROUTE_LIGHTPUSH, contentBody
|
info "post", ROUTE_LIGHTPUSH, contentBody
|
||||||
|
|
||||||
let decodedBody = decodeRequestBody[PushRequest](contentBody)
|
let req: PushRequest = decodeRequestBody[PushRequest](contentBody).valueOr:
|
||||||
|
return error
|
||||||
if decodedBody.isErr():
|
|
||||||
return decodedBody.error()
|
|
||||||
|
|
||||||
let req: PushRequest = decodedBody.value()
|
|
||||||
|
|
||||||
let msg = req.message.toWakuMessage().valueOr:
|
let msg = req.message.toWakuMessage().valueOr:
|
||||||
return RestApiResponse.badRequest("Invalid message: " & $error)
|
return RestApiResponse.badRequest("Invalid message: " & $error)
|
||||||
@ -80,12 +76,12 @@ proc installLightPushRequestHandler*(
|
|||||||
error "Failed to request a message push due to timeout!"
|
error "Failed to request a message push due to timeout!"
|
||||||
return RestApiResponse.serviceUnavailable("Push request timed out")
|
return RestApiResponse.serviceUnavailable("Push request timed out")
|
||||||
|
|
||||||
if subFut.value().isErr():
|
subFut.value().isOkOr:
|
||||||
if subFut.value().error == TooManyRequestsMessage:
|
if error == TooManyRequestsMessage:
|
||||||
return RestApiResponse.tooManyRequests("Request rate limmit reached")
|
return RestApiResponse.tooManyRequests("Request rate limmit reached")
|
||||||
|
|
||||||
return RestApiResponse.serviceUnavailable(
|
return RestApiResponse.serviceUnavailable(
|
||||||
fmt("Failed to request a message push: {subFut.value().error}")
|
fmt("Failed to request a message push: {error}")
|
||||||
)
|
)
|
||||||
|
|
||||||
return RestApiResponse.ok()
|
return RestApiResponse.ok()
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/strformat, results, chronicles, uri, json_serialization, presto/route
|
import
|
||||||
|
std/[strformat, sugar], results, chronicles, uri, json_serialization, presto/route
|
||||||
import
|
import
|
||||||
../../../waku_core,
|
../../../waku_core,
|
||||||
../../../waku_store_legacy/common,
|
../../../waku_store_legacy/common,
|
||||||
@ -34,20 +35,17 @@ proc performHistoryQuery(
|
|||||||
error msg
|
error msg
|
||||||
return RestApiResponse.internalServerError(msg)
|
return RestApiResponse.internalServerError(msg)
|
||||||
|
|
||||||
let res = queryFut.read()
|
let storeResp = queryFut.read().map(res => res.toStoreResponseRest()).valueOr:
|
||||||
if res.isErr():
|
const msg = "Error occurred in queryFut.read()"
|
||||||
const msg = "Error occurred in queryFut.read()"
|
error msg, error = error
|
||||||
error msg, error = res.error
|
return RestApiResponse.internalServerError(fmt("{msg} [{error}]"))
|
||||||
return RestApiResponse.internalServerError(fmt("{msg} [{res.error}]"))
|
|
||||||
|
|
||||||
let storeResp = res.value.toStoreResponseRest()
|
let resp = RestApiResponse.jsonResponse(storeResp, status = Http200).valueOr:
|
||||||
let resp = RestApiResponse.jsonResponse(storeResp, status = Http200)
|
|
||||||
if resp.isErr():
|
|
||||||
const msg = "Error building the json respose"
|
const msg = "Error building the json respose"
|
||||||
error msg, error = resp.error
|
error msg, error = error
|
||||||
return RestApiResponse.internalServerError(fmt("{msg} [{resp.error}]"))
|
return RestApiResponse.internalServerError(fmt("{msg} [{error}]"))
|
||||||
|
|
||||||
return resp.get()
|
return resp
|
||||||
|
|
||||||
# Converts a string time representation into an Option[Timestamp].
|
# Converts a string time representation into an Option[Timestamp].
|
||||||
# Only positive time is considered a valid Timestamp in the request
|
# Only positive time is considered a valid Timestamp in the request
|
||||||
@ -70,16 +68,13 @@ proc parseCursor(
|
|||||||
digest: Option[string],
|
digest: Option[string],
|
||||||
): Result[Option[HistoryCursor], string] =
|
): Result[Option[HistoryCursor], string] =
|
||||||
# Parse sender time
|
# Parse sender time
|
||||||
let parsedSenderTime = parseTime(senderTime).valueOr:
|
let parsedSenderTime = ?parseTime(senderTime)
|
||||||
return err(error)
|
|
||||||
|
|
||||||
# Parse store time
|
# Parse store time
|
||||||
let parsedStoreTime = parseTime(storeTime).valueOr:
|
let parsedStoreTime = ?parseTime(storeTime)
|
||||||
return err(error)
|
|
||||||
|
|
||||||
# Parse message digest
|
# Parse message digest
|
||||||
let parsedMsgDigest = parseMsgDigest(digest).valueOr:
|
let parsedMsgDigest = ?parseMsgDigest(digest)
|
||||||
return err(error)
|
|
||||||
|
|
||||||
# Parse cursor information
|
# Parse cursor information
|
||||||
if parsedPubsubTopic.isSome() and parsedSenderTime.isSome() and
|
if parsedPubsubTopic.isSome() and parsedSenderTime.isSome() and
|
||||||
|
|||||||
@ -60,8 +60,7 @@ proc parseMsgDigest*(
|
|||||||
return ok(none(waku_store_common.MessageDigest))
|
return ok(none(waku_store_common.MessageDigest))
|
||||||
|
|
||||||
let decodedUrl = decodeUrl(input.get())
|
let decodedUrl = decodeUrl(input.get())
|
||||||
let base64DecodedArr = base64.decode(Base64String(decodedUrl)).valueOr:
|
let base64DecodedArr = ?base64.decode(Base64String(decodedUrl))
|
||||||
return err(error)
|
|
||||||
|
|
||||||
var messageDigest = waku_store_common.MessageDigest()
|
var messageDigest = waku_store_common.MessageDigest()
|
||||||
|
|
||||||
|
|||||||
@ -74,13 +74,12 @@ proc originMiddlewareProc(
|
|||||||
reqfence: RequestFence,
|
reqfence: RequestFence,
|
||||||
nextHandler: HttpProcessCallback2,
|
nextHandler: HttpProcessCallback2,
|
||||||
): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} =
|
): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} =
|
||||||
if reqfence.isErr():
|
let request = reqfence.valueOr:
|
||||||
# Ignore request errors that detected before our middleware.
|
# Ignore request errors that detected before our middleware.
|
||||||
# Let final handler deal with it.
|
# Let final handler deal with it.
|
||||||
return await nextHandler(reqfence)
|
return await nextHandler(reqfence)
|
||||||
|
|
||||||
let self = OriginHandlerMiddlewareRef(middleware)
|
let self = OriginHandlerMiddlewareRef(middleware)
|
||||||
let request = reqfence.get()
|
|
||||||
var reqHeaders = request.headers
|
var reqHeaders = request.headers
|
||||||
var response = request.getResponse()
|
var response = request.getResponse()
|
||||||
|
|
||||||
|
|||||||
@ -126,29 +126,25 @@ proc installRelayApiHandlers*(
|
|||||||
# ## TODO: ability to specify a return message limit
|
# ## TODO: ability to specify a return message limit
|
||||||
# info "get_waku_v2_relay_v1_messages", topic=topic
|
# info "get_waku_v2_relay_v1_messages", topic=topic
|
||||||
|
|
||||||
if pubsubTopic.isErr():
|
let pubSubTopic = pubsubTopic.valueOr:
|
||||||
return RestApiResponse.badRequest()
|
return RestApiResponse.badRequest()
|
||||||
let pubSubTopic = pubsubTopic.get()
|
|
||||||
|
|
||||||
let messages = cache.getMessages(pubSubTopic, clear = true)
|
let messages = cache.getMessages(pubSubTopic, clear = true).valueOr:
|
||||||
if messages.isErr():
|
|
||||||
info "Not subscribed to topic", topic = pubSubTopic
|
info "Not subscribed to topic", topic = pubSubTopic
|
||||||
return RestApiResponse.notFound()
|
return RestApiResponse.notFound()
|
||||||
|
|
||||||
let data = RelayGetMessagesResponse(messages.get().map(toRelayWakuMessage))
|
let data = RelayGetMessagesResponse(messages.map(toRelayWakuMessage))
|
||||||
let resp = RestApiResponse.jsonResponse(data, status = Http200)
|
let resp = RestApiResponse.jsonResponse(data, status = Http200).valueOr:
|
||||||
if resp.isErr():
|
info "An error ocurred while building the json respose", error = error
|
||||||
info "An error ocurred while building the json respose", error = resp.error
|
|
||||||
return RestApiResponse.internalServerError()
|
return RestApiResponse.internalServerError()
|
||||||
|
|
||||||
return resp.get()
|
return resp
|
||||||
|
|
||||||
router.api(MethodPost, ROUTE_RELAY_MESSAGESV1) do(
|
router.api(MethodPost, ROUTE_RELAY_MESSAGESV1) do(
|
||||||
pubsubTopic: string, contentBody: Option[ContentBody]
|
pubsubTopic: string, contentBody: Option[ContentBody]
|
||||||
) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
if pubsubTopic.isErr():
|
let pubSubTopic = pubsubTopic.valueOr:
|
||||||
return RestApiResponse.badRequest()
|
return RestApiResponse.badRequest()
|
||||||
let pubSubTopic = pubsubTopic.get()
|
|
||||||
|
|
||||||
# ensure the node is subscribed to the topic. otherwise it risks publishing
|
# ensure the node is subscribed to the topic. otherwise it risks publishing
|
||||||
# to a topic with no connected peers
|
# to a topic with no connected peers
|
||||||
@ -318,9 +314,7 @@ proc installRelayApiHandlers*(
|
|||||||
if not await publishFut.withTimeout(futTimeout):
|
if not await publishFut.withTimeout(futTimeout):
|
||||||
return RestApiResponse.internalServerError("Failed to publish: timedout")
|
return RestApiResponse.internalServerError("Failed to publish: timedout")
|
||||||
|
|
||||||
var res = publishFut.read()
|
publishFut.read().isOkOr:
|
||||||
|
return RestApiResponse.badRequest("Failed to publish: " & error)
|
||||||
if res.isErr():
|
|
||||||
return RestApiResponse.badRequest("Failed to publish. " & res.error)
|
|
||||||
|
|
||||||
return RestApiResponse.ok()
|
return RestApiResponse.ok()
|
||||||
|
|||||||
@ -45,15 +45,12 @@ func decodeRequestBody*[T](
|
|||||||
|
|
||||||
let reqBodyData = contentBody.get().data
|
let reqBodyData = contentBody.get().data
|
||||||
|
|
||||||
let requestResult = decodeFromJsonBytes(T, reqBodyData)
|
let requestResult = decodeFromJsonBytes(T, reqBodyData).valueOr:
|
||||||
if requestResult.isErr():
|
|
||||||
return err(
|
return err(
|
||||||
RestApiResponse.badRequest(
|
RestApiResponse.badRequest("Invalid content body, could not decode: " & $error)
|
||||||
"Invalid content body, could not decode. " & $requestResult.error
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return ok(requestResult.get())
|
return ok(requestResult)
|
||||||
|
|
||||||
proc decodeBytes*(
|
proc decodeBytes*(
|
||||||
t: typedesc[string], value: openarray[byte], contentType: Opt[ContentTypeData]
|
t: typedesc[string], value: openarray[byte], contentType: Opt[ContentTypeData]
|
||||||
|
|||||||
@ -117,8 +117,4 @@ proc encodeString*(value: SomeUnsignedInt): SerdesResult[string] =
|
|||||||
ok(Base10.toString(value))
|
ok(Base10.toString(value))
|
||||||
|
|
||||||
proc decodeString*(T: typedesc[SomeUnsignedInt], value: string): SerdesResult[T] =
|
proc decodeString*(T: typedesc[SomeUnsignedInt], value: string): SerdesResult[T] =
|
||||||
let v = Base10.decode(T, value)
|
return Base10.decode(T, value)
|
||||||
if v.isErr():
|
|
||||||
return err(v.error())
|
|
||||||
else:
|
|
||||||
return ok(v.get())
|
|
||||||
|
|||||||
@ -91,23 +91,23 @@ proc new*(
|
|||||||
): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} =
|
): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
server.httpServer = HttpServerRef.new(
|
server.httpServer =
|
||||||
address,
|
?HttpServerRef.new(
|
||||||
defaultProcessCallback,
|
address,
|
||||||
serverFlags,
|
defaultProcessCallback,
|
||||||
socketFlags,
|
serverFlags,
|
||||||
serverUri,
|
socketFlags,
|
||||||
serverIdent,
|
serverUri,
|
||||||
maxConnections,
|
serverIdent,
|
||||||
bufferSize,
|
maxConnections,
|
||||||
backlogSize,
|
bufferSize,
|
||||||
httpHeadersTimeout,
|
backlogSize,
|
||||||
maxHeadersSize,
|
httpHeadersTimeout,
|
||||||
maxRequestBodySize,
|
maxHeadersSize,
|
||||||
dualstack = dualstack,
|
maxRequestBodySize,
|
||||||
middlewares = middlewares,
|
dualstack = dualstack,
|
||||||
).valueOr:
|
middlewares = middlewares,
|
||||||
return err(error)
|
)
|
||||||
return ok(server)
|
return ok(server)
|
||||||
|
|
||||||
proc getRouter(): RestRouter =
|
proc getRouter(): RestRouter =
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/strformat, results, chronicles, uri, json_serialization, presto/route
|
import
|
||||||
|
std/[strformat, sugar], results, chronicles, uri, json_serialization, presto/route
|
||||||
import
|
import
|
||||||
../../../waku_core,
|
../../../waku_core,
|
||||||
../../../waku_store/common,
|
../../../waku_store/common,
|
||||||
@ -35,14 +36,10 @@ proc performStoreQuery(
|
|||||||
error msg
|
error msg
|
||||||
return RestApiResponse.internalServerError(msg)
|
return RestApiResponse.internalServerError(msg)
|
||||||
|
|
||||||
let futRes = queryFut.read()
|
let res = queryFut.read().map(val => val.toHex()).valueOr:
|
||||||
|
const msg = "Error occurred in queryFut.read()"
|
||||||
if futRes.isErr():
|
error msg, error = error
|
||||||
const msg = "Error occurred in queryFut.read()"
|
return RestApiResponse.internalServerError(fmt("{msg} [{error}]"))
|
||||||
error msg, error = futRes.error
|
|
||||||
return RestApiResponse.internalServerError(fmt("{msg} [{futRes.error}]"))
|
|
||||||
|
|
||||||
let res = futRes.get().toHex()
|
|
||||||
|
|
||||||
if res.statusCode == uint32(ErrorCode.TOO_MANY_REQUESTS):
|
if res.statusCode == uint32(ErrorCode.TOO_MANY_REQUESTS):
|
||||||
info "Request rate limit reached on peer ", storePeer
|
info "Request rate limit reached on peer ", storePeer
|
||||||
|
|||||||
@ -32,71 +32,54 @@ proc new*(
|
|||||||
## maxNumConn - defines the maximum number of connections to handle simultaneously (Postgres)
|
## maxNumConn - defines the maximum number of connections to handle simultaneously (Postgres)
|
||||||
## onFatalErrorAction - called if, e.g., the connection with db got lost
|
## onFatalErrorAction - called if, e.g., the connection with db got lost
|
||||||
|
|
||||||
let dbUrlValidationRes = dburl.validateDbUrl(url)
|
dburl.validateDbUrl(url).isOkOr:
|
||||||
if dbUrlValidationRes.isErr():
|
return err("DbUrl failure in ArchiveDriver.new: " & error)
|
||||||
return err("DbUrl failure in ArchiveDriver.new: " & dbUrlValidationRes.error)
|
|
||||||
|
|
||||||
let engineRes = dburl.getDbEngine(url)
|
let engine = dburl.getDbEngine(url).valueOr:
|
||||||
if engineRes.isErr():
|
return err("error getting db engine in setupWakuArchiveDriver: " & error)
|
||||||
return err("error getting db engine in setupWakuArchiveDriver: " & engineRes.error)
|
|
||||||
|
|
||||||
let engine = engineRes.get()
|
|
||||||
|
|
||||||
case engine
|
case engine
|
||||||
of "sqlite":
|
of "sqlite":
|
||||||
let pathRes = dburl.getDbPath(url)
|
let path = dburl.getDbPath(url).valueOr:
|
||||||
if pathRes.isErr():
|
return err("error get path in setupWakuArchiveDriver: " & error)
|
||||||
return err("error get path in setupWakuArchiveDriver: " & pathRes.error)
|
|
||||||
|
|
||||||
let dbRes = SqliteDatabase.new(pathRes.get())
|
let db = SqliteDatabase.new(path).valueOr:
|
||||||
if dbRes.isErr():
|
return err("error in setupWakuArchiveDriver: " & error)
|
||||||
return err("error in setupWakuArchiveDriver: " & dbRes.error)
|
|
||||||
|
|
||||||
let db = dbRes.get()
|
|
||||||
|
|
||||||
# SQLite vacuum
|
# SQLite vacuum
|
||||||
let sqliteStatsRes = db.gatherSqlitePageStats()
|
let (pageSize, pageCount, freelistCount) = db.gatherSqlitePageStats().valueOr:
|
||||||
if sqliteStatsRes.isErr():
|
return err("error while gathering sqlite stats: " & $error)
|
||||||
return err("error while gathering sqlite stats: " & $sqliteStatsRes.error)
|
|
||||||
|
|
||||||
let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get()
|
|
||||||
info "sqlite database page stats",
|
info "sqlite database page stats",
|
||||||
pageSize = pageSize, pages = pageCount, freePages = freelistCount
|
pageSize = pageSize, pages = pageCount, freePages = freelistCount
|
||||||
|
|
||||||
if vacuum and (pageCount > 0 and freelistCount > 0):
|
if vacuum and (pageCount > 0 and freelistCount > 0):
|
||||||
let vacuumRes = db.performSqliteVacuum()
|
db.performSqliteVacuum().isOkOr:
|
||||||
if vacuumRes.isErr():
|
return err("error in vacuum sqlite: " & $error)
|
||||||
return err("error in vacuum sqlite: " & $vacuumRes.error)
|
|
||||||
|
|
||||||
# Database migration
|
# Database migration
|
||||||
if migrate:
|
if migrate:
|
||||||
let migrateRes = archive_driver_sqlite_migrations.migrate(db)
|
archive_driver_sqlite_migrations.migrate(db).isOkOr:
|
||||||
if migrateRes.isErr():
|
return err("error in migrate sqlite: " & $error)
|
||||||
return err("error in migrate sqlite: " & $migrateRes.error)
|
|
||||||
|
|
||||||
info "setting up sqlite waku archive driver"
|
info "setting up sqlite waku archive driver"
|
||||||
let res = SqliteDriver.new(db)
|
let res = SqliteDriver.new(db).valueOr:
|
||||||
if res.isErr():
|
return err("failed to init sqlite archive driver: " & error)
|
||||||
return err("failed to init sqlite archive driver: " & res.error)
|
|
||||||
|
|
||||||
return ok(res.get())
|
return ok(res)
|
||||||
of "postgres":
|
of "postgres":
|
||||||
when defined(postgres):
|
when defined(postgres):
|
||||||
let res = PostgresDriver.new(
|
let driver = PostgresDriver.new(
|
||||||
dbUrl = url,
|
dbUrl = url,
|
||||||
maxConnections = maxNumConn,
|
maxConnections = maxNumConn,
|
||||||
onFatalErrorAction = onFatalErrorAction,
|
onFatalErrorAction = onFatalErrorAction,
|
||||||
)
|
).valueOr:
|
||||||
if res.isErr():
|
return err("failed to init postgres archive driver: " & error)
|
||||||
return err("failed to init postgres archive driver: " & res.error)
|
|
||||||
|
|
||||||
let driver = res.get()
|
|
||||||
|
|
||||||
# Database migration
|
# Database migration
|
||||||
if migrate:
|
if migrate:
|
||||||
let migrateRes = await archive_postgres_driver_migrations.migrate(driver)
|
(await archive_postgres_driver_migrations.migrate(driver)).isOkOr:
|
||||||
if migrateRes.isErr():
|
return err("ArchiveDriver build failed in migration: " & $error)
|
||||||
return err("ArchiveDriver build failed in migration: " & $migrateRes.error)
|
|
||||||
|
|
||||||
## This should be started once we make sure the 'messages' table exists
|
## This should be started once we make sure the 'messages' table exists
|
||||||
## Hence, this should be run after the migration is completed.
|
## Hence, this should be run after the migration is completed.
|
||||||
|
|||||||
@ -186,11 +186,11 @@ proc timeCursorCallbackImpl(pqResult: ptr PGresult, timeCursor: var Option[Times
|
|||||||
let catchable = catch:
|
let catchable = catch:
|
||||||
parseBiggestInt(rawTimestamp)
|
parseBiggestInt(rawTimestamp)
|
||||||
|
|
||||||
if catchable.isErr():
|
let time = catchable.valueOr:
|
||||||
error "could not parse correctly", error = catchable.error.msg
|
error "could not parse correctly", error = error.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
timeCursor = some(catchable.get())
|
timeCursor = some(time)
|
||||||
|
|
||||||
proc hashCallbackImpl(
|
proc hashCallbackImpl(
|
||||||
pqResult: ptr PGresult, rows: var seq[(WakuMessageHash, PubsubTopic, WakuMessage)]
|
pqResult: ptr PGresult, rows: var seq[(WakuMessageHash, PubsubTopic, WakuMessage)]
|
||||||
@ -214,11 +214,10 @@ proc hashCallbackImpl(
|
|||||||
let catchable = catch:
|
let catchable = catch:
|
||||||
parseHexStr(rawHash)
|
parseHexStr(rawHash)
|
||||||
|
|
||||||
if catchable.isErr():
|
let hashHex = catchable.valueOr:
|
||||||
error "could not parse correctly", error = catchable.error.msg
|
error "could not parse correctly", error = error.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
let hashHex = catchable.get()
|
|
||||||
let msgHash = fromBytes(hashHex.toOpenArrayByte(0, 31))
|
let msgHash = fromBytes(hashHex.toOpenArrayByte(0, 31))
|
||||||
|
|
||||||
rows.add((msgHash, "", WakuMessage()))
|
rows.add((msgHash, "", WakuMessage()))
|
||||||
@ -953,11 +952,10 @@ method getDatabaseSize*(
|
|||||||
method getMessagesCount*(
|
method getMessagesCount*(
|
||||||
s: PostgresDriver
|
s: PostgresDriver
|
||||||
): Future[ArchiveDriverResult[int64]] {.async.} =
|
): Future[ArchiveDriverResult[int64]] {.async.} =
|
||||||
let intRes = await s.getInt("SELECT COUNT(1) FROM messages")
|
let intRes = (await s.getInt("SELECT COUNT(1) FROM messages")).valueOr:
|
||||||
if intRes.isErr():
|
return err("error in getMessagesCount: " & error)
|
||||||
return err("error in getMessagesCount: " & intRes.error)
|
|
||||||
|
|
||||||
return ok(intRes.get())
|
return ok(intRes)
|
||||||
|
|
||||||
method getOldestMessageTimestamp*(
|
method getOldestMessageTimestamp*(
|
||||||
s: PostgresDriver
|
s: PostgresDriver
|
||||||
@ -970,47 +968,44 @@ method getOldestMessageTimestamp*(
|
|||||||
|
|
||||||
let oldestPartitionTimeNanoSec = oldestPartition.getPartitionStartTimeInNanosec()
|
let oldestPartitionTimeNanoSec = oldestPartition.getPartitionStartTimeInNanosec()
|
||||||
|
|
||||||
let intRes = await s.getInt("SELECT MIN(timestamp) FROM messages")
|
let intRes = (await s.getInt("SELECT MIN(timestamp) FROM messages")).valueOr:
|
||||||
if intRes.isErr():
|
|
||||||
## Just return the oldest partition time considering the partitions set
|
## Just return the oldest partition time considering the partitions set
|
||||||
return ok(Timestamp(oldestPartitionTimeNanoSec))
|
return ok(Timestamp(oldestPartitionTimeNanoSec))
|
||||||
|
|
||||||
return ok(Timestamp(min(intRes.get(), oldestPartitionTimeNanoSec)))
|
return ok(Timestamp(min(intRes, oldestPartitionTimeNanoSec)))
|
||||||
|
|
||||||
method getNewestMessageTimestamp*(
|
method getNewestMessageTimestamp*(
|
||||||
s: PostgresDriver
|
s: PostgresDriver
|
||||||
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
||||||
let intRes = await s.getInt("SELECT MAX(timestamp) FROM messages")
|
let intRes = (await s.getInt("SELECT MAX(timestamp) FROM messages")).valueOr:
|
||||||
|
return err("error in getNewestMessageTimestamp: " & error)
|
||||||
|
|
||||||
if intRes.isErr():
|
return ok(Timestamp(intRes))
|
||||||
return err("error in getNewestMessageTimestamp: " & intRes.error)
|
|
||||||
|
|
||||||
return ok(Timestamp(intRes.get()))
|
|
||||||
|
|
||||||
method deleteOldestMessagesNotWithinLimit*(
|
method deleteOldestMessagesNotWithinLimit*(
|
||||||
s: PostgresDriver, limit: int
|
s: PostgresDriver, limit: int
|
||||||
): Future[ArchiveDriverResult[void]] {.async.} =
|
): Future[ArchiveDriverResult[void]] {.async.} =
|
||||||
var execRes = await s.writeConnPool.pgQuery(
|
(
|
||||||
"""DELETE FROM messages WHERE messageHash NOT IN
|
await s.writeConnPool.pgQuery(
|
||||||
|
"""DELETE FROM messages WHERE messageHash NOT IN
|
||||||
(
|
(
|
||||||
SELECT messageHash FROM messages ORDER BY timestamp DESC LIMIT ?
|
SELECT messageHash FROM messages ORDER BY timestamp DESC LIMIT ?
|
||||||
);""",
|
);""",
|
||||||
@[$limit],
|
@[$limit],
|
||||||
)
|
|
||||||
if execRes.isErr():
|
|
||||||
return err("error in deleteOldestMessagesNotWithinLimit: " & execRes.error)
|
|
||||||
|
|
||||||
execRes = await s.writeConnPool.pgQuery(
|
|
||||||
"""DELETE FROM messages_lookup WHERE messageHash NOT IN
|
|
||||||
(
|
|
||||||
SELECT messageHash FROM messages ORDER BY timestamp DESC LIMIT ?
|
|
||||||
);""",
|
|
||||||
@[$limit],
|
|
||||||
)
|
|
||||||
if execRes.isErr():
|
|
||||||
return err(
|
|
||||||
"error in deleteOldestMessagesNotWithinLimit messages_lookup: " & execRes.error
|
|
||||||
)
|
)
|
||||||
|
).isOkOr:
|
||||||
|
return err("error in deleteOldestMessagesNotWithinLimit: " & error)
|
||||||
|
|
||||||
|
(
|
||||||
|
await s.writeConnPool.pgQuery(
|
||||||
|
"""DELETE FROM messages_lookup WHERE messageHash NOT IN
|
||||||
|
(
|
||||||
|
SELECT messageHash FROM messages ORDER BY timestamp DESC LIMIT ?
|
||||||
|
);""",
|
||||||
|
@[$limit],
|
||||||
|
)
|
||||||
|
).isOkOr:
|
||||||
|
return err("error in deleteOldestMessagesNotWithinLimit messages_lookup: " & error)
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
|
|||||||
@ -97,8 +97,7 @@ proc getPage(
|
|||||||
|
|
||||||
# Find starting entry
|
# Find starting entry
|
||||||
if cursor.isSome():
|
if cursor.isSome():
|
||||||
let cursorEntry = w.walkToCursor(cursor.get(), forward)
|
w.walkToCursor(cursor.get(), forward).isOkOr:
|
||||||
if cursorEntry.isErr():
|
|
||||||
return err(QueueDriverErrorKind.INVALID_CURSOR)
|
return err(QueueDriverErrorKind.INVALID_CURSOR)
|
||||||
|
|
||||||
# Advance walker once more
|
# Advance walker once more
|
||||||
@ -177,7 +176,7 @@ proc first*(driver: QueueDriver): ArchiveDriverResult[Index] =
|
|||||||
res = w.first()
|
res = w.first()
|
||||||
w.destroy()
|
w.destroy()
|
||||||
|
|
||||||
if res.isErr():
|
res.isOkOr:
|
||||||
return err("Not found")
|
return err("Not found")
|
||||||
|
|
||||||
return ok(res.value.key)
|
return ok(res.value.key)
|
||||||
@ -188,7 +187,7 @@ proc last*(driver: QueueDriver): ArchiveDriverResult[Index] =
|
|||||||
res = w.last()
|
res = w.last()
|
||||||
w.destroy()
|
w.destroy()
|
||||||
|
|
||||||
if res.isErr():
|
res.isOkOr:
|
||||||
return err("Not found")
|
return err("Not found")
|
||||||
|
|
||||||
return ok(res.value.key)
|
return ok(res.value.key)
|
||||||
@ -285,14 +284,11 @@ method getMessages*(
|
|||||||
let catchable = catch:
|
let catchable = catch:
|
||||||
driver.getPage(maxPageSize, ascendingOrder, index, matchesQuery)
|
driver.getPage(maxPageSize, ascendingOrder, index, matchesQuery)
|
||||||
|
|
||||||
let pageRes: QueueDriverGetPageResult =
|
let pageRes: QueueDriverGetPageResult = catchable.valueOr:
|
||||||
if catchable.isErr():
|
return err(catchable.error.msg)
|
||||||
return err(catchable.error.msg)
|
|
||||||
else:
|
|
||||||
catchable.get()
|
|
||||||
|
|
||||||
if pageRes.isErr():
|
pageRes.isOkOr:
|
||||||
return err($pageRes.error)
|
return err($error)
|
||||||
|
|
||||||
return ok(pageRes.value)
|
return ok(pageRes.value)
|
||||||
|
|
||||||
|
|||||||
@ -36,9 +36,8 @@ proc isSchemaVersion7*(db: SqliteDatabase): DatabaseResult[bool] =
|
|||||||
|
|
||||||
let query =
|
let query =
|
||||||
"""SELECT l.name FROM pragma_table_info("Message") as l WHERE l.pk != 0;"""
|
"""SELECT l.name FROM pragma_table_info("Message") as l WHERE l.pk != 0;"""
|
||||||
let res = db.query(query, queryRowCallback)
|
db.query(query, queryRowCallback).isOkOr:
|
||||||
if res.isErr():
|
return err("failed to determine the current SchemaVersion: " & $error)
|
||||||
return err("failed to determine the current SchemaVersion: " & $res.error)
|
|
||||||
|
|
||||||
if pkColumns == @["pubsubTopic", "id", "storedAt"]:
|
if pkColumns == @["pubsubTopic", "id", "storedAt"]:
|
||||||
return ok(true)
|
return ok(true)
|
||||||
@ -65,10 +64,8 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
|
|||||||
## Force the correct schema version
|
## Force the correct schema version
|
||||||
?db.setUserVersion(7)
|
?db.setUserVersion(7)
|
||||||
|
|
||||||
let migrationRes =
|
migrate(db, targetVersion, migrationsScriptsDir = MessageStoreMigrationPath).isOkOr:
|
||||||
migrate(db, targetVersion, migrationsScriptsDir = MessageStoreMigrationPath)
|
return err("failed to execute migration scripts: " & error)
|
||||||
if migrationRes.isErr():
|
|
||||||
return err("failed to execute migration scripts: " & migrationRes.error)
|
|
||||||
|
|
||||||
info "finished message store's sqlite database migration"
|
info "finished message store's sqlite database migration"
|
||||||
return ok()
|
return ok()
|
||||||
|
|||||||
@ -129,8 +129,7 @@ proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] =
|
|||||||
count = sqlite3_column_int64(s, 0)
|
count = sqlite3_column_int64(s, 0)
|
||||||
|
|
||||||
let query = countMessagesQuery(DbTable)
|
let query = countMessagesQuery(DbTable)
|
||||||
let res = db.query(query, queryRowCallback)
|
db.query(query, queryRowCallback).isOkOr:
|
||||||
if res.isErr():
|
|
||||||
return err("failed to count number of messages in the database")
|
return err("failed to count number of messages in the database")
|
||||||
|
|
||||||
return ok(count)
|
return ok(count)
|
||||||
@ -146,8 +145,7 @@ proc selectOldestTimestamp*(db: SqliteDatabase): DatabaseResult[Timestamp] {.inl
|
|||||||
timestamp = queryRowTimestampCallback(s, 0)
|
timestamp = queryRowTimestampCallback(s, 0)
|
||||||
|
|
||||||
let query = selectOldestMessageTimestampQuery(DbTable)
|
let query = selectOldestMessageTimestampQuery(DbTable)
|
||||||
let res = db.query(query, queryRowCallback)
|
db.query(query, queryRowCallback).isOkOr:
|
||||||
if res.isErr():
|
|
||||||
return err("failed to get the oldest receiver timestamp from the database")
|
return err("failed to get the oldest receiver timestamp from the database")
|
||||||
|
|
||||||
return ok(timestamp)
|
return ok(timestamp)
|
||||||
@ -163,8 +161,7 @@ proc selectNewestTimestamp*(db: SqliteDatabase): DatabaseResult[Timestamp] {.inl
|
|||||||
timestamp = queryRowTimestampCallback(s, 0)
|
timestamp = queryRowTimestampCallback(s, 0)
|
||||||
|
|
||||||
let query = selectNewestMessageTimestampQuery(DbTable)
|
let query = selectNewestMessageTimestampQuery(DbTable)
|
||||||
let res = db.query(query, queryRowCallback)
|
db.query(query, queryRowCallback).isOkOr:
|
||||||
if res.isErr():
|
|
||||||
return err("failed to get the newest receiver timestamp from the database")
|
return err("failed to get the newest receiver timestamp from the database")
|
||||||
|
|
||||||
return ok(timestamp)
|
return ok(timestamp)
|
||||||
|
|||||||
@ -20,14 +20,12 @@ proc init(db: SqliteDatabase): ArchiveDriverResult[void] =
|
|||||||
return err("db not initialized")
|
return err("db not initialized")
|
||||||
|
|
||||||
# Create table, if doesn't exist
|
# Create table, if doesn't exist
|
||||||
let resCreate = createTable(db)
|
createTable(db).isOkOr:
|
||||||
if resCreate.isErr():
|
return err("failed to create table: " & error)
|
||||||
return err("failed to create table: " & resCreate.error())
|
|
||||||
|
|
||||||
# Create indices, if don't exist
|
# Create indices, if don't exist
|
||||||
let resRtIndex = createOldestMessageTimestampIndex(db)
|
createOldestMessageTimestampIndex(db).isOkOr:
|
||||||
if resRtIndex.isErr():
|
return err("failed to create i_ts index: " & error)
|
||||||
return err("failed to create i_ts index: " & resRtIndex.error())
|
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
@ -37,9 +35,7 @@ type SqliteDriver* = ref object of ArchiveDriver
|
|||||||
|
|
||||||
proc new*(T: type SqliteDriver, db: SqliteDatabase): ArchiveDriverResult[T] =
|
proc new*(T: type SqliteDriver, db: SqliteDatabase): ArchiveDriverResult[T] =
|
||||||
# Database initialization
|
# Database initialization
|
||||||
let resInit = init(db)
|
?init(db)
|
||||||
if resInit.isErr():
|
|
||||||
return err(resInit.error())
|
|
||||||
|
|
||||||
# General initialization
|
# General initialization
|
||||||
let insertStmt = db.prepareInsertMessageStmt()
|
let insertStmt = db.prepareInsertMessageStmt()
|
||||||
|
|||||||
@ -20,20 +20,18 @@ method execute*(
|
|||||||
## Delete messages that exceed the retention time by 10% and more (batch delete for efficiency)
|
## Delete messages that exceed the retention time by 10% and more (batch delete for efficiency)
|
||||||
info "beginning of executing message retention policy - time"
|
info "beginning of executing message retention policy - time"
|
||||||
|
|
||||||
let omtRes = await driver.getOldestMessageTimestamp()
|
let omt = (await driver.getOldestMessageTimestamp()).valueOr:
|
||||||
if omtRes.isErr():
|
return err("failed to get oldest message timestamp: " & error)
|
||||||
return err("failed to get oldest message timestamp: " & omtRes.error)
|
|
||||||
|
|
||||||
let now = getNanosecondTime(getTime().toUnixFloat())
|
let now = getNanosecondTime(getTime().toUnixFloat())
|
||||||
let retentionTimestamp = now - p.retentionTime.nanoseconds
|
let retentionTimestamp = now - p.retentionTime.nanoseconds
|
||||||
let thresholdTimestamp = retentionTimestamp - p.retentionTime.nanoseconds div 10
|
let thresholdTimestamp = retentionTimestamp - p.retentionTime.nanoseconds div 10
|
||||||
|
|
||||||
if thresholdTimestamp <= omtRes.value:
|
if thresholdTimestamp <= omt:
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
let res = await driver.deleteMessagesOlderThanTimestamp(ts = retentionTimestamp)
|
(await driver.deleteMessagesOlderThanTimestamp(ts = retentionTimestamp)).isOkOr:
|
||||||
if res.isErr():
|
return err("failed to delete oldest messages: " & error)
|
||||||
return err("failed to delete oldest messages: " & res.error)
|
|
||||||
|
|
||||||
info "end of executing message retention policy - time"
|
info "end of executing message retention policy - time"
|
||||||
return ok()
|
return ok()
|
||||||
|
|||||||
@ -34,65 +34,50 @@ proc new*(
|
|||||||
## maxNumConn - defines the maximum number of connections to handle simultaneously (Postgres)
|
## maxNumConn - defines the maximum number of connections to handle simultaneously (Postgres)
|
||||||
## onFatalErrorAction - called if, e.g., the connection with db got lost
|
## onFatalErrorAction - called if, e.g., the connection with db got lost
|
||||||
|
|
||||||
let dbUrlValidationRes = dburl.validateDbUrl(url)
|
dburl.validateDbUrl(url).isOkOr:
|
||||||
if dbUrlValidationRes.isErr():
|
return err("DbUrl failure in ArchiveDriver.new: " & error)
|
||||||
return err("DbUrl failure in ArchiveDriver.new: " & dbUrlValidationRes.error)
|
|
||||||
|
|
||||||
let engineRes = dburl.getDbEngine(url)
|
let engine = dburl.getDbEngine(url).valueOr:
|
||||||
if engineRes.isErr():
|
return err("error getting db engine in setupWakuArchiveDriver: " & error)
|
||||||
return err("error getting db engine in setupWakuArchiveDriver: " & engineRes.error)
|
|
||||||
|
|
||||||
let engine = engineRes.get()
|
|
||||||
|
|
||||||
case engine
|
case engine
|
||||||
of "sqlite":
|
of "sqlite":
|
||||||
let pathRes = dburl.getDbPath(url)
|
let path = dburl.getDbPath(url).valueOr:
|
||||||
if pathRes.isErr():
|
return err("error get path in setupWakuArchiveDriver: " & error)
|
||||||
return err("error get path in setupWakuArchiveDriver: " & pathRes.error)
|
|
||||||
|
|
||||||
let dbRes = SqliteDatabase.new(pathRes.get())
|
let db = SqliteDatabase.new(path).valueOr:
|
||||||
if dbRes.isErr():
|
return err("error in setupWakuArchiveDriver: " & error)
|
||||||
return err("error in setupWakuArchiveDriver: " & dbRes.error)
|
|
||||||
|
|
||||||
let db = dbRes.get()
|
|
||||||
|
|
||||||
# SQLite vacuum
|
# SQLite vacuum
|
||||||
let sqliteStatsRes = db.gatherSqlitePageStats()
|
let (pageSize, pageCount, freelistCount) = db.gatherSqlitePageStats().valueOr:
|
||||||
if sqliteStatsRes.isErr():
|
return err("error while gathering sqlite stats: " & $error)
|
||||||
return err("error while gathering sqlite stats: " & $sqliteStatsRes.error)
|
|
||||||
|
|
||||||
let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get()
|
|
||||||
info "sqlite database page stats",
|
info "sqlite database page stats",
|
||||||
pageSize = pageSize, pages = pageCount, freePages = freelistCount
|
pageSize = pageSize, pages = pageCount, freePages = freelistCount
|
||||||
|
|
||||||
if vacuum and (pageCount > 0 and freelistCount > 0):
|
if vacuum and (pageCount > 0 and freelistCount > 0):
|
||||||
let vacuumRes = db.performSqliteVacuum()
|
db.performSqliteVacuum().isOkOr:
|
||||||
if vacuumRes.isErr():
|
return err("error in vacuum sqlite: " & $error)
|
||||||
return err("error in vacuum sqlite: " & $vacuumRes.error)
|
|
||||||
|
|
||||||
# Database migration
|
# Database migration
|
||||||
if migrate:
|
if migrate:
|
||||||
let migrateRes = archive_driver_sqlite_migrations.migrate(db)
|
archive_driver_sqlite_migrations.migrate(db).isOkOr:
|
||||||
if migrateRes.isErr():
|
return err("error in migrate sqlite: " & $error)
|
||||||
return err("error in migrate sqlite: " & $migrateRes.error)
|
|
||||||
|
|
||||||
info "setting up sqlite waku archive driver"
|
info "setting up sqlite waku archive driver"
|
||||||
let res = SqliteDriver.new(db)
|
let res = SqliteDriver.new(db).valueOr:
|
||||||
if res.isErr():
|
return err("failed to init sqlite archive driver: " & error)
|
||||||
return err("failed to init sqlite archive driver: " & res.error)
|
|
||||||
|
|
||||||
return ok(res.get())
|
return ok(res)
|
||||||
of "postgres":
|
of "postgres":
|
||||||
when defined(postgres):
|
when defined(postgres):
|
||||||
let res = PostgresDriver.new(
|
let driver = PostgresDriver.new(
|
||||||
dbUrl = url,
|
dbUrl = url,
|
||||||
maxConnections = maxNumConn,
|
maxConnections = maxNumConn,
|
||||||
onFatalErrorAction = onFatalErrorAction,
|
onFatalErrorAction = onFatalErrorAction,
|
||||||
)
|
).valueOr:
|
||||||
if res.isErr():
|
return err("failed to init postgres archive driver: " & error)
|
||||||
return err("failed to init postgres archive driver: " & res.error)
|
|
||||||
|
|
||||||
let driver = res.get()
|
|
||||||
return ok(driver)
|
return ok(driver)
|
||||||
else:
|
else:
|
||||||
return err(
|
return err(
|
||||||
|
|||||||
@ -798,11 +798,10 @@ method getDatabaseSize*(
|
|||||||
method getMessagesCount*(
|
method getMessagesCount*(
|
||||||
s: PostgresDriver
|
s: PostgresDriver
|
||||||
): Future[ArchiveDriverResult[int64]] {.async.} =
|
): Future[ArchiveDriverResult[int64]] {.async.} =
|
||||||
let intRes = await s.getInt("SELECT COUNT(1) FROM messages")
|
let intRes = (await s.getInt("SELECT COUNT(1) FROM messages")).valueOr:
|
||||||
if intRes.isErr():
|
return err("error in getMessagesCount: " & error)
|
||||||
return err("error in getMessagesCount: " & intRes.error)
|
|
||||||
|
|
||||||
return ok(intRes.get())
|
return ok(intRes)
|
||||||
|
|
||||||
method getOldestMessageTimestamp*(
|
method getOldestMessageTimestamp*(
|
||||||
s: PostgresDriver
|
s: PostgresDriver
|
||||||
@ -812,11 +811,10 @@ method getOldestMessageTimestamp*(
|
|||||||
method getNewestMessageTimestamp*(
|
method getNewestMessageTimestamp*(
|
||||||
s: PostgresDriver
|
s: PostgresDriver
|
||||||
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
||||||
let intRes = await s.getInt("SELECT MAX(timestamp) FROM messages")
|
let intRes = (await s.getInt("SELECT MAX(timestamp) FROM messages")).valueOr:
|
||||||
if intRes.isErr():
|
return err("error in getNewestMessageTimestamp: " & error)
|
||||||
return err("error in getNewestMessageTimestamp: " & intRes.error)
|
|
||||||
|
|
||||||
return ok(Timestamp(intRes.get()))
|
return ok(Timestamp(intRes))
|
||||||
|
|
||||||
method deleteOldestMessagesNotWithinLimit*(
|
method deleteOldestMessagesNotWithinLimit*(
|
||||||
s: PostgresDriver, limit: int
|
s: PostgresDriver, limit: int
|
||||||
|
|||||||
@ -100,8 +100,7 @@ proc getPage(
|
|||||||
|
|
||||||
# Find starting entry
|
# Find starting entry
|
||||||
if cursor.isSome():
|
if cursor.isSome():
|
||||||
let cursorEntry = w.walkToCursor(cursor.get(), forward)
|
w.walkToCursor(cursor.get(), forward).isOkOr:
|
||||||
if cursorEntry.isErr():
|
|
||||||
return err(QueueDriverErrorKind.INVALID_CURSOR)
|
return err(QueueDriverErrorKind.INVALID_CURSOR)
|
||||||
|
|
||||||
# Advance walker once more
|
# Advance walker once more
|
||||||
@ -182,7 +181,7 @@ proc first*(driver: QueueDriver): ArchiveDriverResult[Index] =
|
|||||||
res = w.first()
|
res = w.first()
|
||||||
w.destroy()
|
w.destroy()
|
||||||
|
|
||||||
if res.isErr():
|
res.isOkOr:
|
||||||
return err("Not found")
|
return err("Not found")
|
||||||
|
|
||||||
return ok(res.value.key)
|
return ok(res.value.key)
|
||||||
@ -193,7 +192,7 @@ proc last*(driver: QueueDriver): ArchiveDriverResult[Index] =
|
|||||||
res = w.last()
|
res = w.last()
|
||||||
w.destroy()
|
w.destroy()
|
||||||
|
|
||||||
if res.isErr():
|
res.isOkOr:
|
||||||
return err("Not found")
|
return err("Not found")
|
||||||
|
|
||||||
return ok(res.value.key)
|
return ok(res.value.key)
|
||||||
@ -297,8 +296,8 @@ method getMessages*(
|
|||||||
except CatchableError, Exception:
|
except CatchableError, Exception:
|
||||||
return err(getCurrentExceptionMsg())
|
return err(getCurrentExceptionMsg())
|
||||||
|
|
||||||
if pageRes.isErr():
|
pageRes.isOkOr:
|
||||||
return err($pageRes.error)
|
return err($error)
|
||||||
|
|
||||||
return ok(pageRes.value)
|
return ok(pageRes.value)
|
||||||
|
|
||||||
|
|||||||
@ -36,9 +36,8 @@ proc isSchemaVersion7*(db: SqliteDatabase): DatabaseResult[bool] =
|
|||||||
|
|
||||||
let query =
|
let query =
|
||||||
"""SELECT l.name FROM pragma_table_info("Message") as l WHERE l.pk != 0;"""
|
"""SELECT l.name FROM pragma_table_info("Message") as l WHERE l.pk != 0;"""
|
||||||
let res = db.query(query, queryRowCallback)
|
db.query(query, queryRowCallback).isOkOr:
|
||||||
if res.isErr():
|
return err("failed to determine the current SchemaVersion: " & $error)
|
||||||
return err("failed to determine the current SchemaVersion: " & $res.error)
|
|
||||||
|
|
||||||
if pkColumns == @["pubsubTopic", "id", "storedAt"]:
|
if pkColumns == @["pubsubTopic", "id", "storedAt"]:
|
||||||
return ok(true)
|
return ok(true)
|
||||||
@ -65,10 +64,8 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
|
|||||||
## Force the correct schema version
|
## Force the correct schema version
|
||||||
?db.setUserVersion(7)
|
?db.setUserVersion(7)
|
||||||
|
|
||||||
let migrationRes =
|
migrate(db, targetVersion, migrationsScriptsDir = MessageStoreMigrationPath).isOkOr:
|
||||||
migrate(db, targetVersion, migrationsScriptsDir = MessageStoreMigrationPath)
|
return err("failed to execute migration scripts: " & error)
|
||||||
if migrationRes.isErr():
|
|
||||||
return err("failed to execute migration scripts: " & migrationRes.error)
|
|
||||||
|
|
||||||
info "finished message store's sqlite database migration"
|
info "finished message store's sqlite database migration"
|
||||||
return ok()
|
return ok()
|
||||||
|
|||||||
@ -166,8 +166,7 @@ proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] =
|
|||||||
count = sqlite3_column_int64(s, 0)
|
count = sqlite3_column_int64(s, 0)
|
||||||
|
|
||||||
let query = countMessagesQuery(DbTable)
|
let query = countMessagesQuery(DbTable)
|
||||||
let res = db.query(query, queryRowCallback)
|
db.query(query, queryRowCallback).isOkOr:
|
||||||
if res.isErr():
|
|
||||||
return err("failed to count number of messages in the database")
|
return err("failed to count number of messages in the database")
|
||||||
|
|
||||||
return ok(count)
|
return ok(count)
|
||||||
@ -185,8 +184,7 @@ proc selectOldestReceiverTimestamp*(
|
|||||||
timestamp = queryRowReceiverTimestampCallback(s, 0)
|
timestamp = queryRowReceiverTimestampCallback(s, 0)
|
||||||
|
|
||||||
let query = selectOldestMessageTimestampQuery(DbTable)
|
let query = selectOldestMessageTimestampQuery(DbTable)
|
||||||
let res = db.query(query, queryRowCallback)
|
db.query(query, queryRowCallback).isOkOr:
|
||||||
if res.isErr():
|
|
||||||
return err("failed to get the oldest receiver timestamp from the database")
|
return err("failed to get the oldest receiver timestamp from the database")
|
||||||
|
|
||||||
return ok(timestamp)
|
return ok(timestamp)
|
||||||
@ -204,8 +202,7 @@ proc selectNewestReceiverTimestamp*(
|
|||||||
timestamp = queryRowReceiverTimestampCallback(s, 0)
|
timestamp = queryRowReceiverTimestampCallback(s, 0)
|
||||||
|
|
||||||
let query = selectNewestMessageTimestampQuery(DbTable)
|
let query = selectNewestMessageTimestampQuery(DbTable)
|
||||||
let res = db.query(query, queryRowCallback)
|
db.query(query, queryRowCallback).isOkOr:
|
||||||
if res.isErr():
|
|
||||||
return err("failed to get the newest receiver timestamp from the database")
|
return err("failed to get the newest receiver timestamp from the database")
|
||||||
|
|
||||||
return ok(timestamp)
|
return ok(timestamp)
|
||||||
@ -280,9 +277,7 @@ proc selectAllMessages*(
|
|||||||
rows.add((pubsubTopic, wakuMessage, digest, storedAt, hash))
|
rows.add((pubsubTopic, wakuMessage, digest, storedAt, hash))
|
||||||
|
|
||||||
let query = selectAllMessagesQuery(DbTable)
|
let query = selectAllMessagesQuery(DbTable)
|
||||||
let res = db.query(query, queryRowCallback)
|
discard ?db.query(query, queryRowCallback)
|
||||||
if res.isErr():
|
|
||||||
return err(res.error())
|
|
||||||
|
|
||||||
return ok(rows)
|
return ok(rows)
|
||||||
|
|
||||||
@ -498,7 +493,7 @@ proc execSelectMessageByHash(
|
|||||||
except Exception, CatchableError:
|
except Exception, CatchableError:
|
||||||
# release implicit transaction
|
# release implicit transaction
|
||||||
discard sqlite3_reset(s) # same return information as step
|
discard sqlite3_reset(s) # same return information as step
|
||||||
discard sqlite3_clear_bindings(s) # no errors possible
|
discard sqlite3_clear_bindings(s) # no errors possible
|
||||||
|
|
||||||
proc selectMessageByHashQuery(): SqlQueryStr =
|
proc selectMessageByHashQuery(): SqlQueryStr =
|
||||||
var query: string
|
var query: string
|
||||||
|
|||||||
@ -24,18 +24,15 @@ proc init(db: SqliteDatabase): ArchiveDriverResult[void] =
|
|||||||
return err("db not initialized")
|
return err("db not initialized")
|
||||||
|
|
||||||
# Create table, if doesn't exist
|
# Create table, if doesn't exist
|
||||||
let resCreate = createTable(db)
|
createTable(db).isOkOr:
|
||||||
if resCreate.isErr():
|
return err("failed to create table: " & error)
|
||||||
return err("failed to create table: " & resCreate.error())
|
|
||||||
|
|
||||||
# Create indices, if don't exist
|
# Create indices, if don't exist
|
||||||
let resRtIndex = createOldestMessageTimestampIndex(db)
|
createOldestMessageTimestampIndex(db).isOkOr:
|
||||||
if resRtIndex.isErr():
|
return err("failed to create i_rt index: " & error)
|
||||||
return err("failed to create i_rt index: " & resRtIndex.error())
|
|
||||||
|
|
||||||
let resMsgIndex = createHistoryQueryIndex(db)
|
createHistoryQueryIndex(db).isOkOr:
|
||||||
if resMsgIndex.isErr():
|
return err("failed to create i_query index: " & error)
|
||||||
return err("failed to create i_query index: " & resMsgIndex.error())
|
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
@ -45,9 +42,7 @@ type SqliteDriver* = ref object of ArchiveDriver
|
|||||||
|
|
||||||
proc new*(T: type SqliteDriver, db: SqliteDatabase): ArchiveDriverResult[T] =
|
proc new*(T: type SqliteDriver, db: SqliteDatabase): ArchiveDriverResult[T] =
|
||||||
# Database initialization
|
# Database initialization
|
||||||
let resInit = init(db)
|
?init(db)
|
||||||
if resInit.isErr():
|
|
||||||
return err(resInit.error())
|
|
||||||
|
|
||||||
# General initialization
|
# General initialization
|
||||||
let insertStmt = db.prepareInsertMessageStmt()
|
let insertStmt = db.prepareInsertMessageStmt()
|
||||||
|
|||||||
@ -249,11 +249,10 @@ proc parseUrlPeerAddr*(
|
|||||||
return ok(none(RemotePeerInfo))
|
return ok(none(RemotePeerInfo))
|
||||||
|
|
||||||
let parsedAddr = decodeUrl(peerAddr.get())
|
let parsedAddr = decodeUrl(peerAddr.get())
|
||||||
let parsedPeerInfo = parsePeerInfo(parsedAddr)
|
let parsedPeerInfo = parsePeerInfo(parsedAddr).valueOr:
|
||||||
if parsedPeerInfo.isErr():
|
return err("Failed parsing remote peer info: " & error)
|
||||||
return err("Failed parsing remote peer info [" & parsedPeerInfo.error & "]")
|
|
||||||
|
|
||||||
return ok(some(parsedPeerInfo.value))
|
return ok(some(parsedPeerInfo))
|
||||||
|
|
||||||
proc toRemotePeerInfo*(enrRec: enr.Record): Result[RemotePeerInfo, cstring] =
|
proc toRemotePeerInfo*(enrRec: enr.Record): Result[RemotePeerInfo, cstring] =
|
||||||
## Converts an ENR to dialable RemotePeerInfo
|
## Converts an ENR to dialable RemotePeerInfo
|
||||||
@ -339,11 +338,10 @@ proc hasProtocol*(ma: MultiAddress, proto: string): bool =
|
|||||||
## Returns ``true`` if ``ma`` contains protocol ``proto``.
|
## Returns ``true`` if ``ma`` contains protocol ``proto``.
|
||||||
let proto = MultiCodec.codec(proto)
|
let proto = MultiCodec.codec(proto)
|
||||||
|
|
||||||
let protos = ma.protocols()
|
let protos = ma.protocols().valueOr:
|
||||||
if protos.isErr():
|
|
||||||
return false
|
return false
|
||||||
|
|
||||||
return protos.get().anyIt(it == proto)
|
return protos.anyIt(it == proto)
|
||||||
|
|
||||||
func hasUdpPort*(peer: RemotePeerInfo): bool =
|
func hasUdpPort*(peer: RemotePeerInfo): bool =
|
||||||
if peer.enr.isNone():
|
if peer.enr.isNone():
|
||||||
|
|||||||
@ -127,11 +127,10 @@ proc parse*(
|
|||||||
): ParsingResult[seq[NsContentTopic]] =
|
): ParsingResult[seq[NsContentTopic]] =
|
||||||
var res: seq[NsContentTopic] = @[]
|
var res: seq[NsContentTopic] = @[]
|
||||||
for contentTopic in topics:
|
for contentTopic in topics:
|
||||||
let parseRes = NsContentTopic.parse(contentTopic)
|
let parseRes = NsContentTopic.parse(contentTopic).valueOr:
|
||||||
if parseRes.isErr():
|
let pError: ParsingError = error
|
||||||
let error: ParsingError = parseRes.error
|
return ParsingResult[seq[NsContentTopic]].err(pError)
|
||||||
return ParsingResult[seq[NsContentTopic]].err(error)
|
res.add(parseRes)
|
||||||
res.add(parseRes.value)
|
|
||||||
return ParsingResult[seq[NsContentTopic]].ok(res)
|
return ParsingResult[seq[NsContentTopic]].ok(res)
|
||||||
|
|
||||||
# Content topic compatibility
|
# Content topic compatibility
|
||||||
|
|||||||
@ -59,12 +59,8 @@ proc getShardsFromContentTopics*(
|
|||||||
else:
|
else:
|
||||||
@[contentTopics]
|
@[contentTopics]
|
||||||
|
|
||||||
let parseRes = NsContentTopic.parse(topics)
|
let nsContentTopics = NsContentTopic.parse(topics).valueOr:
|
||||||
let nsContentTopics =
|
return err("Cannot parse content topic: " & $error)
|
||||||
if parseRes.isErr():
|
|
||||||
return err("Cannot parse content topic: " & $parseRes.error)
|
|
||||||
else:
|
|
||||||
parseRes.get()
|
|
||||||
|
|
||||||
var topicMap = initTable[RelayShard, seq[NsContentTopic]]()
|
var topicMap = initTable[RelayShard, seq[NsContentTopic]]()
|
||||||
for content in nsContentTopics:
|
for content in nsContentTopics:
|
||||||
|
|||||||
@ -94,11 +94,10 @@ func waku2*(record: TypedRecord): Option[CapabilitiesBitfield] =
|
|||||||
some(CapabilitiesBitfield(field.get()[0]))
|
some(CapabilitiesBitfield(field.get()[0]))
|
||||||
|
|
||||||
proc supportsCapability*(r: Record, cap: Capabilities): bool =
|
proc supportsCapability*(r: Record, cap: Capabilities): bool =
|
||||||
let recordRes = r.toTyped()
|
let recordRes = r.toTyped().valueOr:
|
||||||
if recordRes.isErr():
|
|
||||||
return false
|
return false
|
||||||
|
|
||||||
let bitfieldOpt = recordRes.value.waku2
|
let bitfieldOpt = recordRes.waku2
|
||||||
if bitfieldOpt.isNone():
|
if bitfieldOpt.isNone():
|
||||||
return false
|
return false
|
||||||
|
|
||||||
@ -106,11 +105,10 @@ proc supportsCapability*(r: Record, cap: Capabilities): bool =
|
|||||||
bitfield.supportsCapability(cap)
|
bitfield.supportsCapability(cap)
|
||||||
|
|
||||||
proc getCapabilities*(r: Record): seq[Capabilities] =
|
proc getCapabilities*(r: Record): seq[Capabilities] =
|
||||||
let recordRes = r.toTyped()
|
let recordRes = r.toTyped().valueOr:
|
||||||
if recordRes.isErr():
|
|
||||||
return @[]
|
return @[]
|
||||||
|
|
||||||
let bitfieldOpt = recordRes.value.waku2
|
let bitfieldOpt = recordRes.waku2
|
||||||
if bitfieldOpt.isNone():
|
if bitfieldOpt.isNone():
|
||||||
return @[]
|
return @[]
|
||||||
|
|
||||||
|
|||||||
@ -88,8 +88,7 @@ func multiaddrs*(record: TypedRecord): Option[seq[MultiAddress]] =
|
|||||||
if field.isNone():
|
if field.isNone():
|
||||||
return none(seq[MultiAddress])
|
return none(seq[MultiAddress])
|
||||||
|
|
||||||
let decodeRes = decodeMultiaddrs(field.get())
|
let decodeRes = decodeMultiaddrs(field.get()).valueOr:
|
||||||
if decodeRes.isErr():
|
|
||||||
return none(seq[MultiAddress])
|
return none(seq[MultiAddress])
|
||||||
|
|
||||||
some(decodeRes.value)
|
some(decodeRes)
|
||||||
|
|||||||
@ -64,8 +64,8 @@ func topicsToRelayShards*(topics: seq[string]): Result[Option[RelayShards], stri
|
|||||||
let parsedTopicsRes = topics.mapIt(RelayShard.parse(it))
|
let parsedTopicsRes = topics.mapIt(RelayShard.parse(it))
|
||||||
|
|
||||||
for res in parsedTopicsRes:
|
for res in parsedTopicsRes:
|
||||||
if res.isErr():
|
res.isOkOr:
|
||||||
return err("failed to parse topic: " & $res.error)
|
return err("failed to parse topic: " & $error)
|
||||||
|
|
||||||
if parsedTopicsRes.anyIt(it.get().clusterId != parsedTopicsRes[0].get().clusterId):
|
if parsedTopicsRes.anyIt(it.get().clusterId != parsedTopicsRes[0].get().clusterId):
|
||||||
return err("use shards with the same cluster Id.")
|
return err("use shards with the same cluster Id.")
|
||||||
@ -84,11 +84,10 @@ func contains*(rs: RelayShards, shard: RelayShard): bool =
|
|||||||
return rs.contains(shard.clusterId, shard.shardId)
|
return rs.contains(shard.clusterId, shard.shardId)
|
||||||
|
|
||||||
func contains*(rs: RelayShards, topic: PubsubTopic): bool =
|
func contains*(rs: RelayShards, topic: PubsubTopic): bool =
|
||||||
let parseRes = RelayShard.parse(topic)
|
let parseRes = RelayShard.parse(topic).valueOr:
|
||||||
if parseRes.isErr():
|
|
||||||
return false
|
return false
|
||||||
|
|
||||||
rs.contains(parseRes.value)
|
rs.contains(parseRes)
|
||||||
|
|
||||||
# ENR builder extension
|
# ENR builder extension
|
||||||
|
|
||||||
@ -239,12 +238,11 @@ proc containsShard*(r: Record, shard: RelayShard): bool =
|
|||||||
return containsShard(r, shard.clusterId, shard.shardId)
|
return containsShard(r, shard.clusterId, shard.shardId)
|
||||||
|
|
||||||
proc containsShard*(r: Record, topic: PubsubTopic): bool =
|
proc containsShard*(r: Record, topic: PubsubTopic): bool =
|
||||||
let parseRes = RelayShard.parse(topic)
|
let parseRes = RelayShard.parse(topic).valueOr:
|
||||||
if parseRes.isErr():
|
info "invalid static sharding topic", topic = topic, error = error
|
||||||
info "invalid static sharding topic", topic = topic, error = parseRes.error
|
|
||||||
return false
|
return false
|
||||||
|
|
||||||
containsShard(r, parseRes.value)
|
containsShard(r, parseRes)
|
||||||
|
|
||||||
proc isClusterMismatched*(record: Record, clusterId: uint16): bool =
|
proc isClusterMismatched*(record: Record, clusterId: uint16): bool =
|
||||||
## Check the ENR sharding info for matching cluster id
|
## Check the ENR sharding info for matching cluster id
|
||||||
|
|||||||
@ -80,14 +80,11 @@ proc sendSubscribeRequest(
|
|||||||
waku_filter_errors.inc(labelValues = [errMsg])
|
waku_filter_errors.inc(labelValues = [errMsg])
|
||||||
return err(FilterSubscribeError.badResponse(errMsg))
|
return err(FilterSubscribeError.badResponse(errMsg))
|
||||||
|
|
||||||
let respDecodeRes = FilterSubscribeResponse.decode(respBuf)
|
let response = FilterSubscribeResponse.decode(respBuf).valueOr:
|
||||||
if respDecodeRes.isErr():
|
|
||||||
trace "Failed to decode filter subscribe response", servicePeer
|
trace "Failed to decode filter subscribe response", servicePeer
|
||||||
waku_filter_errors.inc(labelValues = [decodeRpcFailure])
|
waku_filter_errors.inc(labelValues = [decodeRpcFailure])
|
||||||
return err(FilterSubscribeError.badResponse(decodeRpcFailure))
|
return err(FilterSubscribeError.badResponse(decodeRpcFailure))
|
||||||
|
|
||||||
let response = respDecodeRes.get()
|
|
||||||
|
|
||||||
# DOS protection rate limit checks does not know about request id
|
# DOS protection rate limit checks does not know about request id
|
||||||
if response.statusCode != FilterSubscribeErrorKind.TOO_MANY_REQUESTS.uint32 and
|
if response.statusCode != FilterSubscribeErrorKind.TOO_MANY_REQUESTS.uint32 and
|
||||||
response.requestId != filterSubscribeRequest.requestId:
|
response.requestId != filterSubscribeRequest.requestId:
|
||||||
|
|||||||
@ -157,15 +157,14 @@ proc handleSubscribeRequest*(
|
|||||||
requestDurationSec, labelValues = [$request.filterSubscribeType]
|
requestDurationSec, labelValues = [$request.filterSubscribeType]
|
||||||
)
|
)
|
||||||
|
|
||||||
if subscribeResult.isErr():
|
subscribeResult.isOkOr:
|
||||||
error "subscription request error", peerId = shortLog(peerId), request = request
|
error "subscription request error", peerId = shortLog(peerId), request = request
|
||||||
return FilterSubscribeResponse(
|
return FilterSubscribeResponse(
|
||||||
requestId: request.requestId,
|
requestId: request.requestId,
|
||||||
statusCode: subscribeResult.error.kind.uint32,
|
statusCode: error.kind.uint32,
|
||||||
statusDesc: some($subscribeResult.error),
|
statusDesc: some($error),
|
||||||
)
|
)
|
||||||
else:
|
return FilterSubscribeResponse.ok(request.requestId)
|
||||||
return FilterSubscribeResponse.ok(request.requestId)
|
|
||||||
|
|
||||||
proc pushToPeer(
|
proc pushToPeer(
|
||||||
wf: WakuFilter, peerId: PeerId, buffer: seq[byte]
|
wf: WakuFilter, peerId: PeerId, buffer: seq[byte]
|
||||||
@ -309,15 +308,12 @@ proc initProtocolHandler(wf: WakuFilter) =
|
|||||||
amount = buf.len().int64, labelValues = [WakuFilterSubscribeCodec, "in"]
|
amount = buf.len().int64, labelValues = [WakuFilterSubscribeCodec, "in"]
|
||||||
)
|
)
|
||||||
|
|
||||||
let decodeRes = FilterSubscribeRequest.decode(buf)
|
let request = FilterSubscribeRequest.decode(buf).valueOr:
|
||||||
if decodeRes.isErr():
|
|
||||||
error "failed to decode filter subscribe request",
|
error "failed to decode filter subscribe request",
|
||||||
peer_id = conn.peerId, err = decodeRes.error
|
peer_id = conn.peerId, err = error
|
||||||
waku_filter_errors.inc(labelValues = [decodeRpcFailure])
|
waku_filter_errors.inc(labelValues = [decodeRpcFailure])
|
||||||
return
|
return
|
||||||
|
|
||||||
let request = decodeRes.value #TODO: toAPI() split here
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = await wf.handleSubscribeRequest(conn.peerId, request)
|
response = await wf.handleSubscribeRequest(conn.peerId, request)
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
# This implementation is originally taken from nim-eth keyfile module https://github.com/status-im/nim-eth/blob/master/eth/keyfile and adapted to
|
# This implementation is originally taken from nim-eth keyfile module https://github.com/status-im/nim-eth/blob/master/eth/keyfile and adapted to
|
||||||
# - create keyfiles for arbitrary-long input byte data (rather than fixed-size private keys)
|
# - create keyfiles for arbitrary-long input byte data (rather than fixed-size private keys)
|
||||||
# - allow storage of multiple keyfiles (encrypted with different passwords) in same file and iteration among successful decryptions
|
# - allow storage of multiple keyfiles (encrypted with different passwords) in same file and iteration among successful decryptions
|
||||||
# - enable/disable at compilation time the keyfile id and version fields
|
# - enable/disable at compilation time the keyfile id and version fields
|
||||||
@ -517,26 +517,15 @@ func decryptSecret(crypto: Crypto, dkey: DKey): KfResult[seq[byte]] =
|
|||||||
proc decodeKeyFileJson*(j: JsonNode, password: string): KfResult[seq[byte]] =
|
proc decodeKeyFileJson*(j: JsonNode, password: string): KfResult[seq[byte]] =
|
||||||
## Decode secret from keyfile json object ``j`` using
|
## Decode secret from keyfile json object ``j`` using
|
||||||
## password string ``password``.
|
## password string ``password``.
|
||||||
let res = decodeCrypto(j)
|
let crypto = ?decodeCrypto(j)
|
||||||
if res.isErr:
|
|
||||||
return err(res.error)
|
|
||||||
let crypto = res.get()
|
|
||||||
|
|
||||||
case crypto.kind
|
case crypto.kind
|
||||||
of PBKDF2:
|
of PBKDF2:
|
||||||
let res = decodePbkdf2Params(crypto.kdfParams)
|
let params = ?decodePbkdf2Params(crypto.kdfParams)
|
||||||
if res.isErr:
|
|
||||||
return err(res.error)
|
|
||||||
|
|
||||||
let params = res.get()
|
|
||||||
let dkey = ?deriveKey(password, params.salt, PBKDF2, params.prf, params.c)
|
let dkey = ?deriveKey(password, params.salt, PBKDF2, params.prf, params.c)
|
||||||
return decryptSecret(crypto, dkey)
|
return decryptSecret(crypto, dkey)
|
||||||
of SCRYPT:
|
of SCRYPT:
|
||||||
let res = decodeScryptParams(crypto.kdfParams)
|
let params = ?decodeScryptParams(crypto.kdfParams)
|
||||||
if res.isErr:
|
|
||||||
return err(res.error)
|
|
||||||
|
|
||||||
let params = res.get()
|
|
||||||
let dkey = ?deriveKey(password, params.salt, params.n, params.r, params.p)
|
let dkey = ?deriveKey(password, params.salt, params.n, params.r, params.p)
|
||||||
return decryptSecret(crypto, dkey)
|
return decryptSecret(crypto, dkey)
|
||||||
|
|
||||||
|
|||||||
@ -50,9 +50,7 @@ proc loadAppKeystore*(
|
|||||||
|
|
||||||
# If no keystore exists at path we create a new empty one with passed keystore parameters
|
# If no keystore exists at path we create a new empty one with passed keystore parameters
|
||||||
if fileExists(path) == false:
|
if fileExists(path) == false:
|
||||||
let newKeystoreRes = createAppKeystore(path, appInfo, separator)
|
?createAppKeystore(path, appInfo, separator)
|
||||||
if newKeystoreRes.isErr():
|
|
||||||
return err(newKeystoreRes.error)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# We read all the file contents
|
# We read all the file contents
|
||||||
@ -175,13 +173,9 @@ proc addMembershipCredentials*(
|
|||||||
): KeystoreResult[void] =
|
): KeystoreResult[void] =
|
||||||
# We load the keystore corresponding to the desired parameters
|
# We load the keystore corresponding to the desired parameters
|
||||||
# This call ensures that JSON has all required fields
|
# This call ensures that JSON has all required fields
|
||||||
let jsonKeystoreRes = loadAppKeystore(path, appInfo, separator)
|
|
||||||
|
|
||||||
if jsonKeystoreRes.isErr():
|
|
||||||
return err(jsonKeystoreRes.error)
|
|
||||||
|
|
||||||
# We load the JSON node corresponding to the app keystore
|
# We load the JSON node corresponding to the app keystore
|
||||||
var jsonKeystore = jsonKeystoreRes.get()
|
let jsonKeystore = ?loadAppKeystore(path, appInfo, separator)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if jsonKeystore.hasKey("credentials"):
|
if jsonKeystore.hasKey("credentials"):
|
||||||
@ -193,21 +187,16 @@ proc addMembershipCredentials*(
|
|||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
let encodedMembershipCredential = membership.encode()
|
let encodedMembershipCredential = membership.encode()
|
||||||
let keyfileRes = createKeyFileJson(encodedMembershipCredential, password)
|
|
||||||
if keyfileRes.isErr():
|
|
||||||
return err(
|
|
||||||
AppKeystoreError(kind: KeystoreCreateKeyfileError, msg: $keyfileRes.error)
|
|
||||||
)
|
|
||||||
|
|
||||||
# We add it to the credentials field of the keystore
|
# We add it to the credentials field of the keystore
|
||||||
jsonKeystore["credentials"][key] = keyfileRes.get()
|
jsonKeystore["credentials"][key] = createKeyFileJson(
|
||||||
|
encodedMembershipCredential, password
|
||||||
|
).valueOr:
|
||||||
|
return err(AppKeystoreError(kind: KeystoreCreateKeyfileError, msg: $error))
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
return err(AppKeystoreError(kind: KeystoreJsonError, msg: getCurrentExceptionMsg()))
|
return err(AppKeystoreError(kind: KeystoreJsonError, msg: getCurrentExceptionMsg()))
|
||||||
|
|
||||||
# We save to disk the (updated) keystore.
|
# We save to disk the (updated) keystore.
|
||||||
let saveRes = save(jsonKeystore, path, separator)
|
?save(jsonKeystore, path, separator)
|
||||||
if saveRes.isErr():
|
|
||||||
return err(saveRes.error)
|
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
@ -218,13 +207,9 @@ proc getMembershipCredentials*(
|
|||||||
): KeystoreResult[KeystoreMembership] =
|
): KeystoreResult[KeystoreMembership] =
|
||||||
# We load the keystore corresponding to the desired parameters
|
# We load the keystore corresponding to the desired parameters
|
||||||
# This call ensures that JSON has all required fields
|
# This call ensures that JSON has all required fields
|
||||||
let jsonKeystoreRes = loadAppKeystore(path, appInfo)
|
|
||||||
|
|
||||||
if jsonKeystoreRes.isErr():
|
|
||||||
return err(jsonKeystoreRes.error)
|
|
||||||
|
|
||||||
# We load the JSON node corresponding to the app keystore
|
# We load the JSON node corresponding to the app keystore
|
||||||
var jsonKeystore = jsonKeystoreRes.get()
|
let jsonKeystore = ?loadAppKeystore(path, appInfo)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if jsonKeystore.hasKey("credentials"):
|
if jsonKeystore.hasKey("credentials"):
|
||||||
@ -254,15 +239,10 @@ proc getMembershipCredentials*(
|
|||||||
)
|
)
|
||||||
keystoreCredential = keystoreCredentials[key]
|
keystoreCredential = keystoreCredentials[key]
|
||||||
|
|
||||||
let decodedKeyfileRes = decodeKeyFileJson(keystoreCredential, password)
|
let decodedKeyfile = decodeKeyFileJson(keystoreCredential, password).valueOr:
|
||||||
if decodedKeyfileRes.isErr():
|
return err(AppKeystoreError(kind: KeystoreReadKeyfileError, msg: $error))
|
||||||
return err(
|
|
||||||
AppKeystoreError(
|
|
||||||
kind: KeystoreReadKeyfileError, msg: $decodedKeyfileRes.error
|
|
||||||
)
|
|
||||||
)
|
|
||||||
# we parse the json decrypted keystoreCredential
|
# we parse the json decrypted keystoreCredential
|
||||||
let decodedCredentialRes = decode(decodedKeyfileRes.get())
|
let decodedCredentialRes = decode(decodedKeyfile)
|
||||||
let keyfileMembershipCredential = decodedCredentialRes.get()
|
let keyfileMembershipCredential = decodedCredentialRes.get()
|
||||||
return ok(keyfileMembershipCredential)
|
return ok(keyfileMembershipCredential)
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
|
|||||||
@ -26,8 +26,7 @@ proc checkAndGenerateRLNProof*(
|
|||||||
time = getTime().toUnix()
|
time = getTime().toUnix()
|
||||||
senderEpochTime = float64(time)
|
senderEpochTime = float64(time)
|
||||||
var msgWithProof = message
|
var msgWithProof = message
|
||||||
rlnPeer.get().appendRLNProof(msgWithProof, senderEpochTime).isOkOr:
|
?(rlnPeer.get().appendRLNProof(msgWithProof, senderEpochTime))
|
||||||
return err(error)
|
|
||||||
return ok(msgWithProof)
|
return ok(msgWithProof)
|
||||||
|
|
||||||
proc getNilPushHandler*(): PushMessageHandler =
|
proc getNilPushHandler*(): PushMessageHandler =
|
||||||
@ -49,12 +48,10 @@ proc getRelayPushHandler*(
|
|||||||
(await wakuRelay.validateMessage(pubSubTopic, msgWithProof)).isOkOr:
|
(await wakuRelay.validateMessage(pubSubTopic, msgWithProof)).isOkOr:
|
||||||
return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, $error)
|
return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, $error)
|
||||||
|
|
||||||
let publishedResult = await wakuRelay.publish(pubsubTopic, msgWithProof)
|
let publishedResult = (await wakuRelay.publish(pubsubTopic, msgWithProof)).valueOr:
|
||||||
|
|
||||||
if publishedResult.isErr():
|
|
||||||
let msgHash = computeMessageHash(pubsubTopic, message).to0xHex()
|
let msgHash = computeMessageHash(pubsubTopic, message).to0xHex()
|
||||||
notice "Lightpush request has not been published to any peers",
|
notice "Lightpush request has not been published to any peers",
|
||||||
msg_hash = msgHash, reason = $publishedResult.error
|
msg_hash = msgHash, reason = $error
|
||||||
return mapPubishingErrorToPushResult(publishedResult.error)
|
return mapPubishingErrorToPushResult(error)
|
||||||
|
|
||||||
return lightpushSuccessResult(publishedResult.get().uint32)
|
return lightpushSuccessResult(publishedResult.uint32)
|
||||||
|
|||||||
@ -25,8 +25,7 @@ proc checkAndGenerateRLNProof*(
|
|||||||
time = getTime().toUnix()
|
time = getTime().toUnix()
|
||||||
senderEpochTime = float64(time)
|
senderEpochTime = float64(time)
|
||||||
var msgWithProof = message
|
var msgWithProof = message
|
||||||
rlnPeer.get().appendRLNProof(msgWithProof, senderEpochTime).isOkOr:
|
?(rlnPeer.get().appendRLNProof(msgWithProof, senderEpochTime))
|
||||||
return err(error)
|
|
||||||
return ok(msgWithProof)
|
return ok(msgWithProof)
|
||||||
|
|
||||||
proc getNilPushHandler*(): PushMessageHandler =
|
proc getNilPushHandler*(): PushMessageHandler =
|
||||||
@ -42,19 +41,15 @@ proc getRelayPushHandler*(
|
|||||||
peer: PeerId, pubsubTopic: string, message: WakuMessage
|
peer: PeerId, pubsubTopic: string, message: WakuMessage
|
||||||
): Future[WakuLightPushResult[void]] {.async.} =
|
): Future[WakuLightPushResult[void]] {.async.} =
|
||||||
# append RLN proof
|
# append RLN proof
|
||||||
let msgWithProof = checkAndGenerateRLNProof(rlnPeer, message)
|
let msgWithProof = ?checkAndGenerateRLNProof(rlnPeer, message)
|
||||||
if msgWithProof.isErr():
|
|
||||||
return err(msgWithProof.error)
|
|
||||||
|
|
||||||
(await wakuRelay.validateMessage(pubSubTopic, msgWithProof.value)).isOkOr:
|
?(await wakuRelay.validateMessage(pubSubTopic, msgWithProof))
|
||||||
return err(error)
|
|
||||||
|
|
||||||
let publishResult = await wakuRelay.publish(pubsubTopic, msgWithProof.value)
|
(await wakuRelay.publish(pubsubTopic, msgWithProof)).isOkOr:
|
||||||
if publishResult.isErr():
|
|
||||||
## Agreed change expected to the lightpush protocol to better handle such case. https://github.com/waku-org/pm/issues/93
|
## Agreed change expected to the lightpush protocol to better handle such case. https://github.com/waku-org/pm/issues/93
|
||||||
let msgHash = computeMessageHash(pubsubTopic, message).to0xHex()
|
let msgHash = computeMessageHash(pubsubTopic, message).to0xHex()
|
||||||
notice "Lightpush request has not been published to any peers",
|
notice "Lightpush request has not been published to any peers",
|
||||||
msg_hash = msgHash, reason = $publishResult.error
|
msg_hash = msgHash, reason = $error
|
||||||
# for legacy lightpush we do not detail the reason towards clients. All error during publish result in not-published-to-any-peer
|
# for legacy lightpush we do not detail the reason towards clients. All error during publish result in not-published-to-any-peer
|
||||||
# this let client of the legacy protocol to react as they did so far.
|
# this let client of the legacy protocol to react as they did so far.
|
||||||
return err(protocol_metrics.notPublishedAnyPeer)
|
return err(protocol_metrics.notPublishedAnyPeer)
|
||||||
|
|||||||
@ -52,13 +52,11 @@ proc sendPushRequest(
|
|||||||
except LPStreamRemoteClosedError:
|
except LPStreamRemoteClosedError:
|
||||||
return err("Exception reading: " & getCurrentExceptionMsg())
|
return err("Exception reading: " & getCurrentExceptionMsg())
|
||||||
|
|
||||||
let decodeRespRes = PushRPC.decode(buffer)
|
let pushResponseRes = PushRPC.decode(buffer).valueOr:
|
||||||
if decodeRespRes.isErr():
|
|
||||||
error "failed to decode response"
|
error "failed to decode response"
|
||||||
waku_lightpush_errors.inc(labelValues = [decodeRpcFailure])
|
waku_lightpush_errors.inc(labelValues = [decodeRpcFailure])
|
||||||
return err(decodeRpcFailure)
|
return err(decodeRpcFailure)
|
||||||
|
|
||||||
let pushResponseRes = decodeRespRes.get()
|
|
||||||
if pushResponseRes.response.isNone():
|
if pushResponseRes.response.isNone():
|
||||||
waku_lightpush_errors.inc(labelValues = [emptyResponseBodyFailure])
|
waku_lightpush_errors.inc(labelValues = [emptyResponseBodyFailure])
|
||||||
return err(emptyResponseBodyFailure)
|
return err(emptyResponseBodyFailure)
|
||||||
|
|||||||
@ -33,8 +33,8 @@ proc respond(
|
|||||||
|
|
||||||
let res = catch:
|
let res = catch:
|
||||||
await conn.writeLP(response.encode().buffer)
|
await conn.writeLP(response.encode().buffer)
|
||||||
if res.isErr():
|
res.isOkOr:
|
||||||
return err(res.error.msg)
|
return err(error.msg)
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
@ -53,17 +53,14 @@ proc request*(
|
|||||||
# close no matter what
|
# close no matter what
|
||||||
let closeRes = catch:
|
let closeRes = catch:
|
||||||
await conn.closeWithEof()
|
await conn.closeWithEof()
|
||||||
if closeRes.isErr():
|
closeRes.isOkOr:
|
||||||
return err("close failed: " & closeRes.error.msg)
|
return err("close failed: " & error.msg)
|
||||||
|
|
||||||
if writeRes.isErr():
|
writeRes.isOkOr:
|
||||||
return err("write failed: " & writeRes.error.msg)
|
return err("write failed: " & error.msg)
|
||||||
|
|
||||||
let buffer =
|
let buffer = readRes.valueOr:
|
||||||
if readRes.isErr():
|
return err("read failed: " & error.msg)
|
||||||
return err("read failed: " & readRes.error.msg)
|
|
||||||
else:
|
|
||||||
readRes.get()
|
|
||||||
|
|
||||||
let response = WakuMetadataResponse.decode(buffer).valueOr:
|
let response = WakuMetadataResponse.decode(buffer).valueOr:
|
||||||
return err("decode failed: " & $error)
|
return err("decode failed: " & $error)
|
||||||
|
|||||||
@ -157,15 +157,14 @@ proc initProtocolHandler(wpx: WakuPeerExchange) =
|
|||||||
error "Failed to respond with BAD_REQUEST:", error = $error
|
error "Failed to respond with BAD_REQUEST:", error = $error
|
||||||
return
|
return
|
||||||
|
|
||||||
let decBuf = PeerExchangeRpc.decode(buffer)
|
let decBuf = PeerExchangeRpc.decode(buffer).valueOr:
|
||||||
if decBuf.isErr():
|
|
||||||
waku_px_errors.inc(labelValues = [decodeRpcFailure])
|
waku_px_errors.inc(labelValues = [decodeRpcFailure])
|
||||||
error "Failed to decode PeerExchange request", error = $decBuf.error
|
error "Failed to decode PeerExchange request", error = $error
|
||||||
|
|
||||||
(
|
(
|
||||||
try:
|
try:
|
||||||
await wpx.respondError(
|
await wpx.respondError(
|
||||||
PeerExchangeResponseStatusCode.BAD_REQUEST, some($decBuf.error), conn
|
PeerExchangeResponseStatusCode.BAD_REQUEST, some($error), conn
|
||||||
)
|
)
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
error "could not send error response decode",
|
error "could not send error response decode",
|
||||||
@ -175,7 +174,7 @@ proc initProtocolHandler(wpx: WakuPeerExchange) =
|
|||||||
error "Failed to respond with BAD_REQUEST:", error = $error
|
error "Failed to respond with BAD_REQUEST:", error = $error
|
||||||
return
|
return
|
||||||
|
|
||||||
let enrs = wpx.getEnrsFromCache(decBuf.get().request.numPeers)
|
let enrs = wpx.getEnrsFromCache(decBuf.request.numPeers)
|
||||||
info "peer exchange request received"
|
info "peer exchange request received"
|
||||||
trace "px enrs to respond", enrs = $enrs
|
trace "px enrs to respond", enrs = $enrs
|
||||||
try:
|
try:
|
||||||
|
|||||||
@ -399,8 +399,7 @@ proc getPeersInMesh*(
|
|||||||
): Result[seq[PeerId], string] =
|
): Result[seq[PeerId], string] =
|
||||||
## Returns the list of peerIds in a mesh defined by the passed pubsub topic.
|
## Returns the list of peerIds in a mesh defined by the passed pubsub topic.
|
||||||
## The 'mesh' atribute is defined in the GossipSub ref object.
|
## The 'mesh' atribute is defined in the GossipSub ref object.
|
||||||
let pubSubPeers = w.getPubSubPeersInMesh(pubsubTopic).valueOr:
|
let pubSubPeers = ?w.getPubSubPeersInMesh(pubsubTopic)
|
||||||
return err(error)
|
|
||||||
let peerIds = toSeq(pubSubPeers).mapIt(it.peerId)
|
let peerIds = toSeq(pubSubPeers).mapIt(it.peerId)
|
||||||
|
|
||||||
return ok(peerIds)
|
return ok(peerIds)
|
||||||
@ -544,22 +543,20 @@ proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandle
|
|||||||
let topicHandler = proc(
|
let topicHandler = proc(
|
||||||
pubsubTopic: string, data: seq[byte]
|
pubsubTopic: string, data: seq[byte]
|
||||||
): Future[void] {.gcsafe, raises: [].} =
|
): Future[void] {.gcsafe, raises: [].} =
|
||||||
let decMsg = WakuMessage.decode(data)
|
let decMsg = WakuMessage.decode(data).valueOr:
|
||||||
if decMsg.isErr():
|
|
||||||
# fine if triggerSelf enabled, since validators are bypassed
|
# fine if triggerSelf enabled, since validators are bypassed
|
||||||
error "failed to decode WakuMessage, validator passed a wrong message",
|
error "failed to decode WakuMessage, validator passed a wrong message",
|
||||||
pubsubTopic = pubsubTopic, error = decMsg.error
|
pubsubTopic = pubsubTopic, error = error
|
||||||
let fut = newFuture[void]()
|
let fut = newFuture[void]()
|
||||||
fut.complete()
|
fut.complete()
|
||||||
return fut
|
return fut
|
||||||
else:
|
# this subscription handler is called once for every validated message
|
||||||
# this subscription handler is called once for every validated message
|
# that will be relayed, hence this is the place we can count net incoming traffic
|
||||||
# that will be relayed, hence this is the place we can count net incoming traffic
|
waku_relay_network_bytes.inc(
|
||||||
waku_relay_network_bytes.inc(
|
data.len.int64 + pubsubTopic.len.int64, labelValues = [pubsubTopic, "net", "in"]
|
||||||
data.len.int64 + pubsubTopic.len.int64, labelValues = [pubsubTopic, "net", "in"]
|
)
|
||||||
)
|
|
||||||
|
|
||||||
return handler(pubsubTopic, decMsg.get())
|
return handler(pubsubTopic, decMsg)
|
||||||
|
|
||||||
# Add the ordered validator to the topic
|
# Add the ordered validator to the topic
|
||||||
# This assumes that if `w.validatorInserted.hasKey(pubSubTopic) is true`, it contains the ordered validator.
|
# This assumes that if `w.validatorInserted.hasKey(pubSubTopic) is true`, it contains the ordered validator.
|
||||||
@ -670,8 +667,7 @@ proc getConnectedPeers*(
|
|||||||
## Returns the list of peerIds of connected peers and subscribed to the passed pubsub topic.
|
## Returns the list of peerIds of connected peers and subscribed to the passed pubsub topic.
|
||||||
## The 'gossipsub' atribute is defined in the GossipSub ref object.
|
## The 'gossipsub' atribute is defined in the GossipSub ref object.
|
||||||
|
|
||||||
let peers = w.getConnectedPubSubPeers(pubsubTopic).valueOr:
|
let peers = ?w.getConnectedPubSubPeers(pubsubTopic)
|
||||||
return err(error)
|
|
||||||
|
|
||||||
let peerIds = toSeq(peers).mapIt(it.peerId)
|
let peerIds = toSeq(peers).mapIt(it.peerId)
|
||||||
return ok(peerIds)
|
return ok(peerIds)
|
||||||
|
|||||||
@ -55,18 +55,16 @@ proc batchAdvertise*(
|
|||||||
let dialCatch = catch:
|
let dialCatch = catch:
|
||||||
await allFinished(futs)
|
await allFinished(futs)
|
||||||
|
|
||||||
if dialCatch.isErr():
|
futs = dialCatch.valueOr:
|
||||||
return err("batchAdvertise: " & dialCatch.error.msg)
|
return err("batchAdvertise: " & error.msg)
|
||||||
|
|
||||||
futs = dialCatch.get()
|
|
||||||
|
|
||||||
let conns = collect(newSeq):
|
let conns = collect(newSeq):
|
||||||
for fut in futs:
|
for fut in futs:
|
||||||
let catchable = catch:
|
let catchable = catch:
|
||||||
fut.read()
|
fut.read()
|
||||||
|
|
||||||
if catchable.isErr():
|
catchable.isOkOr:
|
||||||
warn "a rendezvous dial failed", cause = catchable.error.msg
|
warn "a rendezvous dial failed", cause = error.msg
|
||||||
continue
|
continue
|
||||||
|
|
||||||
let connOpt = catchable.get()
|
let connOpt = catchable.get()
|
||||||
@ -82,8 +80,8 @@ proc batchAdvertise*(
|
|||||||
for conn in conns:
|
for conn in conns:
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
if advertCatch.isErr():
|
advertCatch.isOkOr:
|
||||||
return err("batchAdvertise: " & advertCatch.error.msg)
|
return err("batchAdvertise: " & error.msg)
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
@ -104,18 +102,16 @@ proc batchRequest*(
|
|||||||
let dialCatch = catch:
|
let dialCatch = catch:
|
||||||
await allFinished(futs)
|
await allFinished(futs)
|
||||||
|
|
||||||
if dialCatch.isErr():
|
futs = dialCatch.valueOr:
|
||||||
return err("batchRequest: " & dialCatch.error.msg)
|
return err("batchRequest: " & error.msg)
|
||||||
|
|
||||||
futs = dialCatch.get()
|
|
||||||
|
|
||||||
let conns = collect(newSeq):
|
let conns = collect(newSeq):
|
||||||
for fut in futs:
|
for fut in futs:
|
||||||
let catchable = catch:
|
let catchable = catch:
|
||||||
fut.read()
|
fut.read()
|
||||||
|
|
||||||
if catchable.isErr():
|
catchable.isOkOr:
|
||||||
warn "a rendezvous dial failed", cause = catchable.error.msg
|
warn "a rendezvous dial failed", cause = error.msg
|
||||||
continue
|
continue
|
||||||
|
|
||||||
let connOpt = catchable.get()
|
let connOpt = catchable.get()
|
||||||
@ -131,8 +127,8 @@ proc batchRequest*(
|
|||||||
for conn in conns:
|
for conn in conns:
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
if reqCatch.isErr():
|
reqCatch.isOkOr:
|
||||||
return err("batchRequest: " & reqCatch.error.msg)
|
return err("batchRequest: " & error.msg)
|
||||||
|
|
||||||
return ok(reqCatch.get())
|
return ok(reqCatch.get())
|
||||||
|
|
||||||
@ -164,8 +160,8 @@ proc advertiseAll(
|
|||||||
let catchable = catch:
|
let catchable = catch:
|
||||||
await allFinished(futs)
|
await allFinished(futs)
|
||||||
|
|
||||||
if catchable.isErr():
|
catchable.isOkOr:
|
||||||
return err(catchable.error.msg)
|
return err(error.msg)
|
||||||
|
|
||||||
for fut in catchable.get():
|
for fut in catchable.get():
|
||||||
if fut.failed():
|
if fut.failed():
|
||||||
@ -201,8 +197,8 @@ proc initialRequestAll*(
|
|||||||
let catchable = catch:
|
let catchable = catch:
|
||||||
await allFinished(futs)
|
await allFinished(futs)
|
||||||
|
|
||||||
if catchable.isErr():
|
catchable.isOkOr:
|
||||||
return err(catchable.error.msg)
|
return err(error.msg)
|
||||||
|
|
||||||
for fut in catchable.get():
|
for fut in catchable.get():
|
||||||
if fut.failed():
|
if fut.failed():
|
||||||
@ -211,7 +207,7 @@ proc initialRequestAll*(
|
|||||||
let res = fut.value()
|
let res = fut.value()
|
||||||
|
|
||||||
let records = res.valueOr:
|
let records = res.valueOr:
|
||||||
warn "a rendezvous request failed", cause = $res.error
|
warn "a rendezvous request failed", cause = $error
|
||||||
continue
|
continue
|
||||||
|
|
||||||
for record in records:
|
for record in records:
|
||||||
@ -268,16 +264,14 @@ proc new*(
|
|||||||
let rvCatchable = catch:
|
let rvCatchable = catch:
|
||||||
RendezVous.new(switch = switch, minDuration = DefaultRegistrationTTL)
|
RendezVous.new(switch = switch, minDuration = DefaultRegistrationTTL)
|
||||||
|
|
||||||
if rvCatchable.isErr():
|
let rv = rvCatchable.valueOr:
|
||||||
return err(rvCatchable.error.msg)
|
return err(error.msg)
|
||||||
|
|
||||||
let rv = rvCatchable.get()
|
|
||||||
|
|
||||||
let mountCatchable = catch:
|
let mountCatchable = catch:
|
||||||
switch.mount(rv)
|
switch.mount(rv)
|
||||||
|
|
||||||
if mountCatchable.isErr():
|
mountCatchable.isOkOr:
|
||||||
return err(mountCatchable.error.msg)
|
return err(error.msg)
|
||||||
|
|
||||||
var wrv = WakuRendezVous()
|
var wrv = WakuRendezVous()
|
||||||
wrv.rendezvous = rv
|
wrv.rendezvous = rv
|
||||||
|
|||||||
@ -111,17 +111,17 @@ proc fetchMembershipStatus*(
|
|||||||
): Future[Result[bool, string]] {.async.} =
|
): Future[Result[bool, string]] {.async.} =
|
||||||
try:
|
try:
|
||||||
let params = idCommitment.reversed()
|
let params = idCommitment.reversed()
|
||||||
let resultBytes = await sendEthCallWithParams(
|
let responseBytes = (
|
||||||
ethRpc = g.ethRpc.get(),
|
await sendEthCallWithParams(
|
||||||
functionSignature = "isInMembershipSet(uint256)",
|
ethRpc = g.ethRpc.get(),
|
||||||
params = params,
|
functionSignature = "isInMembershipSet(uint256)",
|
||||||
fromAddress = g.ethRpc.get().defaultAccount,
|
params = params,
|
||||||
toAddress = fromHex(Address, g.ethContractAddress),
|
fromAddress = g.ethRpc.get().defaultAccount,
|
||||||
chainId = g.chainId,
|
toAddress = fromHex(Address, g.ethContractAddress),
|
||||||
)
|
chainId = g.chainId,
|
||||||
if resultBytes.isErr():
|
)
|
||||||
return err("Failed to check membership: " & resultBytes.error)
|
).valueOr:
|
||||||
let responseBytes = resultBytes.get()
|
return err("Failed to check membership: " & error)
|
||||||
|
|
||||||
return ok(responseBytes.len == 32 and responseBytes[^1] == 1'u8)
|
return ok(responseBytes.len == 32 and responseBytes[^1] == 1'u8)
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
@ -155,11 +155,10 @@ template retryWrapper(
|
|||||||
body
|
body
|
||||||
|
|
||||||
proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} =
|
proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} =
|
||||||
let rootRes = await g.fetchMerkleRoot()
|
let rootRes = (await g.fetchMerkleRoot()).valueOr:
|
||||||
if rootRes.isErr():
|
|
||||||
return false
|
return false
|
||||||
|
|
||||||
let merkleRoot = UInt256ToField(rootRes.get())
|
let merkleRoot = UInt256ToField(rootRes)
|
||||||
|
|
||||||
if g.validRoots.len == 0:
|
if g.validRoots.len == 0:
|
||||||
g.validRoots.addLast(merkleRoot)
|
g.validRoots.addLast(merkleRoot)
|
||||||
@ -193,14 +192,12 @@ proc trackRootChanges*(g: OnchainGroupManager) {.async: (raises: [CatchableError
|
|||||||
else:
|
else:
|
||||||
g.merkleProofCache = proofResult.get()
|
g.merkleProofCache = proofResult.get()
|
||||||
|
|
||||||
let nextFreeIndex = await g.fetchNextFreeIndex()
|
let nextFreeIndex = (await g.fetchNextFreeIndex()).valueOr:
|
||||||
if nextFreeIndex.isErr():
|
error "Failed to fetch next free index", error = error
|
||||||
error "Failed to fetch next free index", error = nextFreeIndex.error
|
raise
|
||||||
raise newException(
|
newException(CatchableError, "Failed to fetch next free index: " & error)
|
||||||
CatchableError, "Failed to fetch next free index: " & nextFreeIndex.error
|
|
||||||
)
|
|
||||||
|
|
||||||
let memberCount = cast[int64](nextFreeIndex.get())
|
let memberCount = cast[int64](nextFreeIndex)
|
||||||
waku_rln_number_registered_memberships.set(float64(memberCount))
|
waku_rln_number_registered_memberships.set(float64(memberCount))
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
error "Fatal error in trackRootChanges", error = getCurrentExceptionMsg()
|
error "Fatal error in trackRootChanges", error = getCurrentExceptionMsg()
|
||||||
@ -315,11 +312,9 @@ proc getRootFromProofAndIndex(
|
|||||||
# it's currently not used anywhere, but can be used to verify the root from the proof and index
|
# it's currently not used anywhere, but can be used to verify the root from the proof and index
|
||||||
# Compute leaf hash from idCommitment and messageLimit
|
# Compute leaf hash from idCommitment and messageLimit
|
||||||
let messageLimitField = uint64ToField(g.userMessageLimit.get())
|
let messageLimitField = uint64ToField(g.userMessageLimit.get())
|
||||||
let leafHashRes = poseidon(@[g.idCredentials.get().idCommitment, @messageLimitField])
|
var hash = poseidon(@[g.idCredentials.get().idCommitment, @messageLimitField]).valueOr:
|
||||||
if leafHashRes.isErr():
|
return err("Failed to compute leaf hash: " & error)
|
||||||
return err("Failed to compute leaf hash: " & leafHashRes.error)
|
|
||||||
|
|
||||||
var hash = leafHashRes.get()
|
|
||||||
for i in 0 ..< bits.len:
|
for i in 0 ..< bits.len:
|
||||||
let sibling = elements[i * 32 .. (i + 1) * 32 - 1]
|
let sibling = elements[i * 32 .. (i + 1) * 32 - 1]
|
||||||
|
|
||||||
@ -331,7 +326,6 @@ proc getRootFromProofAndIndex(
|
|||||||
|
|
||||||
hash = hashRes.valueOr:
|
hash = hashRes.valueOr:
|
||||||
return err("Failed to compute poseidon hash: " & error)
|
return err("Failed to compute poseidon hash: " & error)
|
||||||
hash = hashRes.get()
|
|
||||||
|
|
||||||
return ok(hash)
|
return ok(hash)
|
||||||
|
|
||||||
|
|||||||
@ -178,12 +178,9 @@ proc validateMessage*(
|
|||||||
## `timeOption` indicates Unix epoch time (fractional part holds sub-seconds)
|
## `timeOption` indicates Unix epoch time (fractional part holds sub-seconds)
|
||||||
## if `timeOption` is supplied, then the current epoch is calculated based on that
|
## if `timeOption` is supplied, then the current epoch is calculated based on that
|
||||||
|
|
||||||
let decodeRes = RateLimitProof.init(msg.proof)
|
let proof = RateLimitProof.init(msg.proof).valueOr:
|
||||||
if decodeRes.isErr():
|
|
||||||
return MessageValidationResult.Invalid
|
return MessageValidationResult.Invalid
|
||||||
|
|
||||||
let proof = decodeRes.get()
|
|
||||||
|
|
||||||
# track message count for metrics
|
# track message count for metrics
|
||||||
waku_rln_messages_total.inc()
|
waku_rln_messages_total.inc()
|
||||||
|
|
||||||
@ -228,7 +225,7 @@ proc validateMessage*(
|
|||||||
let proofVerificationRes =
|
let proofVerificationRes =
|
||||||
rlnPeer.groupManager.verifyProof(msg.toRLNSignal(), proof)
|
rlnPeer.groupManager.verifyProof(msg.toRLNSignal(), proof)
|
||||||
|
|
||||||
if proofVerificationRes.isErr():
|
proofVerificationRes.isOkOr:
|
||||||
waku_rln_errors_total.inc(labelValues = ["proof_verification"])
|
waku_rln_errors_total.inc(labelValues = ["proof_verification"])
|
||||||
warn "invalid message: proof verification failed", payloadLen = msg.payload.len
|
warn "invalid message: proof verification failed", payloadLen = msg.payload.len
|
||||||
return MessageValidationResult.Invalid
|
return MessageValidationResult.Invalid
|
||||||
@ -240,13 +237,12 @@ proc validateMessage*(
|
|||||||
return MessageValidationResult.Invalid
|
return MessageValidationResult.Invalid
|
||||||
|
|
||||||
# check if double messaging has happened
|
# check if double messaging has happened
|
||||||
let proofMetadataRes = proof.extractMetadata()
|
let proofMetadata = proof.extractMetadata().valueOr:
|
||||||
if proofMetadataRes.isErr():
|
|
||||||
waku_rln_errors_total.inc(labelValues = ["proof_metadata_extraction"])
|
waku_rln_errors_total.inc(labelValues = ["proof_metadata_extraction"])
|
||||||
return MessageValidationResult.Invalid
|
return MessageValidationResult.Invalid
|
||||||
|
|
||||||
let msgEpoch = proof.epoch
|
let msgEpoch = proof.epoch
|
||||||
let hasDup = rlnPeer.hasDuplicate(msgEpoch, proofMetadataRes.get())
|
let hasDup = rlnPeer.hasDuplicate(msgEpoch, proofMetadata)
|
||||||
if hasDup.isErr():
|
if hasDup.isErr():
|
||||||
waku_rln_errors_total.inc(labelValues = ["duplicate_check"])
|
waku_rln_errors_total.inc(labelValues = ["duplicate_check"])
|
||||||
elif hasDup.value == true:
|
elif hasDup.value == true:
|
||||||
@ -266,20 +262,16 @@ proc validateMessageAndUpdateLog*(
|
|||||||
|
|
||||||
let isValidMessage = rlnPeer.validateMessage(msg)
|
let isValidMessage = rlnPeer.validateMessage(msg)
|
||||||
|
|
||||||
let decodeRes = RateLimitProof.init(msg.proof)
|
let msgProof = RateLimitProof.init(msg.proof).valueOr:
|
||||||
if decodeRes.isErr():
|
|
||||||
return MessageValidationResult.Invalid
|
return MessageValidationResult.Invalid
|
||||||
|
|
||||||
let msgProof = decodeRes.get()
|
let proofMetadata = msgProof.extractMetadata().valueOr:
|
||||||
let proofMetadataRes = msgProof.extractMetadata()
|
|
||||||
|
|
||||||
if proofMetadataRes.isErr():
|
|
||||||
return MessageValidationResult.Invalid
|
return MessageValidationResult.Invalid
|
||||||
|
|
||||||
# insert the message to the log (never errors) only if the
|
# insert the message to the log (never errors) only if the
|
||||||
# message is valid.
|
# message is valid.
|
||||||
if isValidMessage == MessageValidationResult.Valid:
|
if isValidMessage == MessageValidationResult.Valid:
|
||||||
discard rlnPeer.updateLog(msgProof.epoch, proofMetadataRes.get())
|
discard rlnPeer.updateLog(msgProof.epoch, proofMetadata)
|
||||||
|
|
||||||
return isValidMessage
|
return isValidMessage
|
||||||
|
|
||||||
@ -333,14 +325,10 @@ proc generateRlnValidator*(
|
|||||||
trace "rln-relay topic validator is called"
|
trace "rln-relay topic validator is called"
|
||||||
wakuRlnRelay.clearNullifierLog()
|
wakuRlnRelay.clearNullifierLog()
|
||||||
|
|
||||||
let decodeRes = RateLimitProof.init(message.proof)
|
let msgProof = RateLimitProof.init(message.proof).valueOr:
|
||||||
|
trace "generateRlnValidator reject", error = error
|
||||||
if decodeRes.isErr():
|
|
||||||
trace "generateRlnValidator reject", error = decodeRes.error
|
|
||||||
return pubsub.ValidationResult.Reject
|
return pubsub.ValidationResult.Reject
|
||||||
|
|
||||||
let msgProof = decodeRes.get()
|
|
||||||
|
|
||||||
# validate the message and update log
|
# validate the message and update log
|
||||||
let validationRes = wakuRlnRelay.validateMessageAndUpdateLog(message)
|
let validationRes = wakuRlnRelay.validateMessageAndUpdateLog(message)
|
||||||
|
|
||||||
|
|||||||
@ -132,8 +132,8 @@ proc initProtocolHandler(self: WakuStore) =
|
|||||||
let writeRes = catch:
|
let writeRes = catch:
|
||||||
await conn.writeLp(resBuf.resp)
|
await conn.writeLp(resBuf.resp)
|
||||||
|
|
||||||
if writeRes.isErr():
|
writeRes.isOkOr:
|
||||||
error "Connection write error", error = writeRes.error.msg
|
error "Connection write error", error = error.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
if successfulQuery:
|
if successfulQuery:
|
||||||
|
|||||||
@ -92,8 +92,8 @@ proc initTransferHandler(
|
|||||||
let catchable = catch:
|
let catchable = catch:
|
||||||
await wakuStoreClient.query(req, peer)
|
await wakuStoreClient.query(req, peer)
|
||||||
|
|
||||||
if catchable.isErr():
|
catchable.isOkOr:
|
||||||
return err("store client error: " & catchable.error.msg)
|
return err("store client error: " & error.msg)
|
||||||
|
|
||||||
let res = catchable.get()
|
let res = catchable.get()
|
||||||
let response = res.valueOr:
|
let response = res.valueOr:
|
||||||
@ -105,8 +105,8 @@ proc initTransferHandler(
|
|||||||
let handleRes = catch:
|
let handleRes = catch:
|
||||||
await wakuArchive.handleMessage(kv.pubsubTopic.get(), kv.message.get())
|
await wakuArchive.handleMessage(kv.pubsubTopic.get(), kv.message.get())
|
||||||
|
|
||||||
if handleRes.isErr():
|
handleRes.isOkOr:
|
||||||
error "message transfer failed", error = handleRes.error.msg
|
error "message transfer failed", error = error.msg
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if req.paginationCursor.isNone():
|
if req.paginationCursor.isNone():
|
||||||
|
|||||||
@ -25,11 +25,8 @@ proc handleSelfStoreRequest*(
|
|||||||
let handlerResult = catch:
|
let handlerResult = catch:
|
||||||
await self.requestHandler(req)
|
await self.requestHandler(req)
|
||||||
|
|
||||||
let resResult =
|
let resResult = handlerResult.valueOr:
|
||||||
if handlerResult.isErr():
|
return err("exception in handleSelfStoreRequest: " & error.msg)
|
||||||
return err("exception in handleSelfStoreRequest: " & handlerResult.error.msg)
|
|
||||||
else:
|
|
||||||
handlerResult.get()
|
|
||||||
|
|
||||||
let res = resResult.valueOr:
|
let res = resResult.valueOr:
|
||||||
return err("error in handleSelfStoreRequest: " & $error)
|
return err("error in handleSelfStoreRequest: " & $error)
|
||||||
|
|||||||
@ -58,14 +58,11 @@ proc sendHistoryQueryRPC(
|
|||||||
#TODO: I see a challenge here, if storeNode uses a different MaxRPCSize this read will fail.
|
#TODO: I see a challenge here, if storeNode uses a different MaxRPCSize this read will fail.
|
||||||
# Need to find a workaround for this.
|
# Need to find a workaround for this.
|
||||||
let buf = await connection.readLp(DefaultMaxRpcSize.int)
|
let buf = await connection.readLp(DefaultMaxRpcSize.int)
|
||||||
let respDecodeRes = HistoryRPC.decode(buf)
|
let respRpc = HistoryRPC.decode(buf).valueOr:
|
||||||
if respDecodeRes.isErr():
|
|
||||||
waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure])
|
waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure])
|
||||||
return
|
return
|
||||||
err(HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: decodeRpcFailure))
|
err(HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: decodeRpcFailure))
|
||||||
|
|
||||||
let respRpc = respDecodeRes.get()
|
|
||||||
|
|
||||||
# Disabled ,for now, since the default response is a possible case (no messages, pagesize = 0, error = NONE(0))
|
# Disabled ,for now, since the default response is a possible case (no messages, pagesize = 0, error = NONE(0))
|
||||||
# TODO: Rework the RPC protocol to differentiate the default value from an empty value (e.g., status = 200 (OK))
|
# TODO: Rework the RPC protocol to differentiate the default value from an empty value (e.g., status = 200 (OK))
|
||||||
# and rework the protobuf parsing to return Option[T] when empty values are received
|
# and rework the protobuf parsing to return Option[T] when empty values are received
|
||||||
@ -112,11 +109,8 @@ when defined(waku_exp_store_resume):
|
|||||||
var messageList: seq[WakuMessage] = @[]
|
var messageList: seq[WakuMessage] = @[]
|
||||||
|
|
||||||
while true:
|
while true:
|
||||||
let queryRes = await w.query(req, peer)
|
let response = (await w.query(req, peer)).valueOr:
|
||||||
if queryRes.isErr():
|
return err($error)
|
||||||
return err($queryRes.error)
|
|
||||||
|
|
||||||
let response = queryRes.get()
|
|
||||||
|
|
||||||
messageList.add(response.messages)
|
messageList.add(response.messages)
|
||||||
|
|
||||||
@ -232,15 +226,14 @@ when defined(waku_exp_store_resume):
|
|||||||
info "a peer is selected from peer manager"
|
info "a peer is selected from peer manager"
|
||||||
res = await w.queryAll(req, peerOpt.get())
|
res = await w.queryAll(req, peerOpt.get())
|
||||||
|
|
||||||
if res.isErr():
|
res.isOkOr:
|
||||||
info "failed to resume the history"
|
info "failed to resume the history"
|
||||||
return err("failed to resume the history")
|
return err("failed to resume the history")
|
||||||
|
|
||||||
# Save the retrieved messages in the store
|
# Save the retrieved messages in the store
|
||||||
var added: uint = 0
|
var added: uint = 0
|
||||||
for msg in res.get():
|
for msg in res.get():
|
||||||
let putStoreRes = w.store.put(pubsubTopic, msg)
|
w.store.put(pubsubTopic, msg).isOkOr:
|
||||||
if putStoreRes.isErr():
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
added.inc()
|
added.inc()
|
||||||
|
|||||||
@ -42,14 +42,11 @@ type StoreResp = tuple[resp: seq[byte], requestId: string]
|
|||||||
proc handleLegacyQueryRequest(
|
proc handleLegacyQueryRequest(
|
||||||
self: WakuStore, requestor: PeerId, raw_request: seq[byte]
|
self: WakuStore, requestor: PeerId, raw_request: seq[byte]
|
||||||
): Future[StoreResp] {.async.} =
|
): Future[StoreResp] {.async.} =
|
||||||
let decodeRes = HistoryRPC.decode(raw_request)
|
let reqRpc = HistoryRPC.decode(raw_request).valueOr:
|
||||||
if decodeRes.isErr():
|
error "failed to decode rpc", peerId = requestor, error = $error
|
||||||
error "failed to decode rpc", peerId = requestor, error = $decodeRes.error
|
|
||||||
waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure])
|
waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure])
|
||||||
return (newSeq[byte](), "failed to decode rpc")
|
return (newSeq[byte](), "failed to decode rpc")
|
||||||
|
|
||||||
let reqRpc = decodeRes.value
|
|
||||||
|
|
||||||
if reqRpc.query.isNone():
|
if reqRpc.query.isNone():
|
||||||
error "empty query rpc", peerId = requestor, requestId = reqRpc.requestId
|
error "empty query rpc", peerId = requestor, requestId = reqRpc.requestId
|
||||||
waku_legacy_store_errors.inc(labelValues = [emptyRpcQueryFailure])
|
waku_legacy_store_errors.inc(labelValues = [emptyRpcQueryFailure])
|
||||||
@ -77,9 +74,9 @@ proc handleLegacyQueryRequest(
|
|||||||
requestId,
|
requestId,
|
||||||
)
|
)
|
||||||
|
|
||||||
if responseRes.isErr():
|
responseRes.isOkOr:
|
||||||
error "history query failed",
|
error "history query failed",
|
||||||
peerId = requestor, requestId = requestId, error = responseRes.error
|
peerId = requestor, requestId = requestId, error = error
|
||||||
|
|
||||||
let response = responseRes.toRPC()
|
let response = responseRes.toRPC()
|
||||||
return (
|
return (
|
||||||
@ -150,8 +147,8 @@ proc initProtocolHandler(ws: WakuStore) =
|
|||||||
let writeRes = catch:
|
let writeRes = catch:
|
||||||
await conn.writeLp(resBuf.resp)
|
await conn.writeLp(resBuf.resp)
|
||||||
|
|
||||||
if writeRes.isErr():
|
writeRes.isOkOr:
|
||||||
error "Connection write error", error = writeRes.error.msg
|
error "Connection write error", error = error.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
if successfulQuery:
|
if successfulQuery:
|
||||||
|
|||||||
@ -187,25 +187,20 @@ proc toAPI*(err: HistoryResponseErrorRPC): HistoryError =
|
|||||||
HistoryError(kind: HistoryErrorKind.UNKNOWN)
|
HistoryError(kind: HistoryErrorKind.UNKNOWN)
|
||||||
|
|
||||||
proc toRPC*(res: HistoryResult): HistoryResponseRPC =
|
proc toRPC*(res: HistoryResult): HistoryResponseRPC =
|
||||||
if res.isErr():
|
let resp = res.valueOr:
|
||||||
let error = res.error.toRPC()
|
return HistoryResponseRPC(error: error.toRPC())
|
||||||
|
let
|
||||||
|
messages = resp.messages
|
||||||
|
|
||||||
HistoryResponseRPC(error: error)
|
pagingInfo = block:
|
||||||
else:
|
if resp.cursor.isNone():
|
||||||
let resp = res.get()
|
none(PagingInfoRPC)
|
||||||
|
else:
|
||||||
|
some(PagingInfoRPC(cursor: resp.cursor.map(toRPC)))
|
||||||
|
|
||||||
let
|
error = HistoryResponseErrorRPC.NONE
|
||||||
messages = resp.messages
|
|
||||||
|
|
||||||
pagingInfo = block:
|
HistoryResponseRPC(messages: messages, pagingInfo: pagingInfo, error: error)
|
||||||
if resp.cursor.isNone():
|
|
||||||
none(PagingInfoRPC)
|
|
||||||
else:
|
|
||||||
some(PagingInfoRPC(cursor: resp.cursor.map(toRPC)))
|
|
||||||
|
|
||||||
error = HistoryResponseErrorRPC.NONE
|
|
||||||
|
|
||||||
HistoryResponseRPC(messages: messages, pagingInfo: pagingInfo, error: error)
|
|
||||||
|
|
||||||
proc toAPI*(rpc: HistoryResponseRPC): HistoryResult =
|
proc toAPI*(rpc: HistoryResponseRPC): HistoryResult =
|
||||||
if rpc.error != HistoryResponseErrorRPC.NONE:
|
if rpc.error != HistoryResponseErrorRPC.NONE:
|
||||||
|
|||||||
@ -230,10 +230,9 @@ proc processRequest(
|
|||||||
let writeRes = catch:
|
let writeRes = catch:
|
||||||
await conn.writeLP(rawPayload)
|
await conn.writeLP(rawPayload)
|
||||||
|
|
||||||
if writeRes.isErr():
|
writeRes.isOkOr:
|
||||||
await conn.close()
|
await conn.close()
|
||||||
return
|
return err("remote " & $conn.peerId & " connection write error: " & error.msg)
|
||||||
err("remote " & $conn.peerId & " connection write error: " & writeRes.error.msg)
|
|
||||||
|
|
||||||
trace "sync payload sent",
|
trace "sync payload sent",
|
||||||
local = self.peerManager.switch.peerInfo.peerId,
|
local = self.peerManager.switch.peerInfo.peerId,
|
||||||
@ -286,11 +285,9 @@ proc initiate(
|
|||||||
let writeRes = catch:
|
let writeRes = catch:
|
||||||
await connection.writeLP(sendPayload)
|
await connection.writeLP(sendPayload)
|
||||||
|
|
||||||
if writeRes.isErr():
|
writeRes.isOkOr:
|
||||||
await connection.close()
|
await connection.close()
|
||||||
return err(
|
return err("remote " & $connection.peerId & " connection write error: " & error.msg)
|
||||||
"remote " & $connection.peerId & " connection write error: " & writeRes.error.msg
|
|
||||||
)
|
|
||||||
|
|
||||||
trace "sync payload sent",
|
trace "sync payload sent",
|
||||||
local = self.peerManager.switch.peerInfo.peerId,
|
local = self.peerManager.switch.peerInfo.peerId,
|
||||||
|
|||||||
@ -58,9 +58,8 @@ proc sendMessage(
|
|||||||
let writeRes = catch:
|
let writeRes = catch:
|
||||||
await conn.writeLP(rawPayload)
|
await conn.writeLP(rawPayload)
|
||||||
|
|
||||||
if writeRes.isErr():
|
writeRes.isOkOr:
|
||||||
return
|
return err("remote [" & $conn.peerId & "] connection write error: " & error.msg)
|
||||||
err("remote " & $conn.peerId & " connection write error: " & writeRes.error.msg)
|
|
||||||
|
|
||||||
total_transfer_messages_exchanged.inc(labelValues = [Sending])
|
total_transfer_messages_exchanged.inc(labelValues = [Sending])
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user