mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-02 14:03:06 +00:00
Generic re-style with nph 0.5.1 (#2396)
This commit is contained in:
parent
dde94d4b52
commit
cf6298ca1f
@ -1,7 +1,7 @@
|
||||
## chat2 is an example of usage of Waku v2. For suggested usage options, please
|
||||
## see dingpu tutorial in docs folder.
|
||||
|
||||
when not(compileOption("threads")):
|
||||
when not (compileOption("threads")):
|
||||
{.fatal: "Please, compile this program with the --threads:on option!".}
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
@ -10,18 +10,29 @@ else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strformat, strutils, times, options, random]
|
||||
import confutils, chronicles, chronos, stew/shims/net as stewNet,
|
||||
eth/keys, bearssl, stew/[byteutils, results],
|
||||
metrics,
|
||||
metrics/chronos_httpserver
|
||||
import libp2p/[switch, # manage transports, a single entry point for dialing and listening
|
||||
crypto/crypto, # cryptographic functions
|
||||
stream/connection, # create and close stream read / write connections
|
||||
multiaddress, # encode different addressing schemes. For example, /ip4/7.7.7.7/tcp/6543 means it is using IPv4 protocol and TCP
|
||||
peerinfo, # manage the information of a peer, such as peer ID and public / private key
|
||||
peerid, # Implement how peers interact
|
||||
protobuf/minprotobuf, # message serialisation/deserialisation from and to protobufs
|
||||
nameresolving/dnsresolver]# define DNS resolution
|
||||
import
|
||||
confutils,
|
||||
chronicles,
|
||||
chronos,
|
||||
stew/shims/net as stewNet,
|
||||
eth/keys,
|
||||
bearssl,
|
||||
stew/[byteutils, results],
|
||||
metrics,
|
||||
metrics/chronos_httpserver
|
||||
import
|
||||
libp2p/[
|
||||
switch, # manage transports, a single entry point for dialing and listening
|
||||
crypto/crypto, # cryptographic functions
|
||||
stream/connection, # create and close stream read / write connections
|
||||
multiaddress,
|
||||
# encode different addressing schemes. For example, /ip4/7.7.7.7/tcp/6543 means it is using IPv4 protocol and TCP
|
||||
peerinfo,
|
||||
# manage the information of a peer, such as peer ID and public / private key
|
||||
peerid, # Implement how peers interact
|
||||
protobuf/minprotobuf, # message serialisation/deserialisation from and to protobufs
|
||||
nameresolving/dnsresolver,
|
||||
] # define DNS resolution
|
||||
import
|
||||
../../waku/waku_core,
|
||||
../../waku/waku_lightpush/common,
|
||||
@ -37,13 +48,11 @@ import
|
||||
../../waku/common/utils/nat,
|
||||
./config_chat2
|
||||
|
||||
import
|
||||
libp2p/protocols/pubsub/rpc/messages,
|
||||
libp2p/protocols/pubsub/pubsub
|
||||
import
|
||||
../../waku/waku_rln_relay
|
||||
import libp2p/protocols/pubsub/rpc/messages, libp2p/protocols/pubsub/pubsub
|
||||
import ../../waku/waku_rln_relay
|
||||
|
||||
const Help = """
|
||||
const Help =
|
||||
"""
|
||||
Commands: /[?|help|connect|nick|exit]
|
||||
help: Prints this help
|
||||
connect: dials a remote peer
|
||||
@ -55,14 +64,14 @@ const Help = """
|
||||
# Could poll connection pool or something here, I suppose
|
||||
# TODO Ensure connected turns true on incoming connections, or get rid of it
|
||||
type Chat = ref object
|
||||
node: WakuNode # waku node for publishing, subscribing, etc
|
||||
transp: StreamTransport # transport streams between read & write file descriptor
|
||||
subscribed: bool # indicates if a node is subscribed or not to a topic
|
||||
connected: bool # if the node is connected to another peer
|
||||
started: bool # if the node has started
|
||||
nick: string # nickname for this chat session
|
||||
prompt: bool # chat prompt is showing
|
||||
contentTopic: string # default content topic for chat messages
|
||||
node: WakuNode # waku node for publishing, subscribing, etc
|
||||
transp: StreamTransport # transport streams between read & write file descriptor
|
||||
subscribed: bool # indicates if a node is subscribed or not to a topic
|
||||
connected: bool # if the node is connected to another peer
|
||||
started: bool # if the node has started
|
||||
nick: string # nickname for this chat session
|
||||
prompt: bool # chat prompt is showing
|
||||
contentTopic: string # default content topic for chat messages
|
||||
|
||||
type
|
||||
PrivateKey* = crypto.PrivateKey
|
||||
@ -85,11 +94,11 @@ proc init*(T: type Chat2Message, buffer: seq[byte]): ProtoResult[T] =
|
||||
let pb = initProtoBuffer(buffer)
|
||||
|
||||
var timestamp: uint64
|
||||
discard ? pb.getField(1, timestamp)
|
||||
discard ?pb.getField(1, timestamp)
|
||||
msg.timestamp = int64(timestamp)
|
||||
|
||||
discard ? pb.getField(2, msg.nick)
|
||||
discard ? pb.getField(3, msg.payload)
|
||||
discard ?pb.getField(2, msg.nick)
|
||||
discard ?pb.getField(3, msg.payload)
|
||||
|
||||
ok(msg)
|
||||
|
||||
@ -124,19 +133,25 @@ proc showChatPrompt(c: Chat) =
|
||||
except IOError:
|
||||
discard
|
||||
|
||||
proc getChatLine(c: Chat, msg:WakuMessage): Result[string, string]=
|
||||
proc getChatLine(c: Chat, msg: WakuMessage): Result[string, string] =
|
||||
# No payload encoding/encryption from Waku
|
||||
let
|
||||
pb = Chat2Message.init(msg.payload)
|
||||
chatLine = if pb.isOk: pb[].toString()
|
||||
else: string.fromBytes(msg.payload)
|
||||
chatLine =
|
||||
if pb.isOk:
|
||||
pb[].toString()
|
||||
else:
|
||||
string.fromBytes(msg.payload)
|
||||
return ok(chatline)
|
||||
|
||||
proc printReceivedMessage(c: Chat, msg: WakuMessage) =
|
||||
let
|
||||
pb = Chat2Message.init(msg.payload)
|
||||
chatLine = if pb.isOk: pb[].toString()
|
||||
else: string.fromBytes(msg.payload)
|
||||
chatLine =
|
||||
if pb.isOk:
|
||||
pb[].toString()
|
||||
else:
|
||||
string.fromBytes(msg.payload)
|
||||
try:
|
||||
echo &"{chatLine}"
|
||||
except ValueError:
|
||||
@ -145,8 +160,8 @@ proc printReceivedMessage(c: Chat, msg: WakuMessage) =
|
||||
|
||||
c.prompt = false
|
||||
showChatPrompt(c)
|
||||
trace "Printing message", topic=DefaultPubsubTopic, chatLine,
|
||||
contentTopic = msg.contentTopic
|
||||
trace "Printing message",
|
||||
topic = DefaultPubsubTopic, chatLine, contentTopic = msg.contentTopic
|
||||
|
||||
proc readNick(transp: StreamTransport): Future[string] {.async.} =
|
||||
# Chat prompt
|
||||
@ -154,9 +169,10 @@ proc readNick(transp: StreamTransport): Future[string] {.async.} =
|
||||
stdout.flushFile()
|
||||
return await transp.readLine()
|
||||
|
||||
|
||||
proc startMetricsServer(serverIp: IpAddress, serverPort: Port): Result[MetricsHttpServerRef, string] =
|
||||
info "Starting metrics HTTP server", serverIp= $serverIp, serverPort= $serverPort
|
||||
proc startMetricsServer(
|
||||
serverIp: IpAddress, serverPort: Port
|
||||
): Result[MetricsHttpServerRef, string] =
|
||||
info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort
|
||||
|
||||
let metricsServerRes = MetricsHttpServerRef.new($serverIp, serverPort)
|
||||
if metricsServerRes.isErr():
|
||||
@ -168,23 +184,25 @@ proc startMetricsServer(serverIp: IpAddress, serverPort: Port): Result[MetricsHt
|
||||
except CatchableError:
|
||||
return err("metrics HTTP server start failed: " & getCurrentExceptionMsg())
|
||||
|
||||
info "Metrics HTTP server started", serverIp= $serverIp, serverPort= $serverPort
|
||||
info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort
|
||||
ok(metricsServerRes.value)
|
||||
|
||||
|
||||
proc publish(c: Chat, line: string) =
|
||||
# First create a Chat2Message protobuf with this line of text
|
||||
let time = getTime().toUnix()
|
||||
let chat2pb = Chat2Message(timestamp: time,
|
||||
nick: c.nick,
|
||||
payload: line.toBytes()).encode()
|
||||
let chat2pb =
|
||||
Chat2Message(timestamp: time, nick: c.nick, payload: line.toBytes()).encode()
|
||||
|
||||
## @TODO: error handling on failure
|
||||
proc handler(response: PushResponse) {.gcsafe, closure.} =
|
||||
trace "lightpush response received", response=response
|
||||
trace "lightpush response received", response = response
|
||||
|
||||
var message = WakuMessage(payload: chat2pb.buffer,
|
||||
contentTopic: c.contentTopic, version: 0, timestamp: getNanosecondTime(time))
|
||||
var message = WakuMessage(
|
||||
payload: chat2pb.buffer,
|
||||
contentTopic: c.contentTopic,
|
||||
version: 0,
|
||||
timestamp: getNanosecondTime(time),
|
||||
)
|
||||
if not isNil(c.node.wakuRlnRelay):
|
||||
# for future version when we support more than one rln protected content topic,
|
||||
# we should check the message content topic as well
|
||||
@ -201,7 +219,8 @@ proc publish(c: Chat, line: string) =
|
||||
# TODO move it to log after dogfooding
|
||||
let msgEpoch = fromEpoch(proof.epoch)
|
||||
if fromEpoch(c.node.wakuRlnRelay.lastEpoch) == msgEpoch:
|
||||
echo "--rln epoch: ", msgEpoch, " ⚠️ message rate violation! you are spamming the network!"
|
||||
echo "--rln epoch: ",
|
||||
msgEpoch, " ⚠️ message rate violation! you are spamming the network!"
|
||||
else:
|
||||
echo "--rln epoch: ", msgEpoch
|
||||
# update the last epoch
|
||||
@ -216,25 +235,25 @@ proc publish(c: Chat, line: string) =
|
||||
(waitFor c.node.publish(some(DefaultPubsubTopic), message)).isOkOr:
|
||||
error "failed to publish message", error = error
|
||||
except CatchableError:
|
||||
error "caught error publishing message: ", error = getCurrentExceptionMsg()
|
||||
error "caught error publishing message: ", error = getCurrentExceptionMsg()
|
||||
|
||||
# TODO This should read or be subscribe handler subscribe
|
||||
proc readAndPrint(c: Chat) {.async.} =
|
||||
while true:
|
||||
# while p.connected:
|
||||
# # TODO: echo &"{p.id} -> "
|
||||
#
|
||||
# echo cast[string](await p.conn.readLp(1024))
|
||||
# while p.connected:
|
||||
# # TODO: echo &"{p.id} -> "
|
||||
#
|
||||
# echo cast[string](await p.conn.readLp(1024))
|
||||
#echo "readAndPrint subscribe NYI"
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
# TODO Implement
|
||||
proc writeAndPrint(c: Chat) {.async.} =
|
||||
while true:
|
||||
# Connect state not updated on incoming WakuRelay connections
|
||||
# if not c.connected:
|
||||
# echo "type an address or wait for a connection:"
|
||||
# echo "type /[help|?] for help"
|
||||
# Connect state not updated on incoming WakuRelay connections
|
||||
# if not c.connected:
|
||||
# echo "type an address or wait for a connection:"
|
||||
# echo "type /[help|?] for help"
|
||||
|
||||
# Chat prompt
|
||||
showChatPrompt(c)
|
||||
@ -244,11 +263,11 @@ proc writeAndPrint(c: Chat) {.async.} =
|
||||
echo Help
|
||||
continue
|
||||
|
||||
# if line.startsWith("/disconnect"):
|
||||
# echo "Ending current session"
|
||||
# if p.connected and p.conn.closed.not:
|
||||
# await p.conn.close()
|
||||
# p.connected = false
|
||||
# if line.startsWith("/disconnect"):
|
||||
# echo "Ending current session"
|
||||
# if p.connected and p.conn.closed.not:
|
||||
# await p.conn.close()
|
||||
# p.connected = false
|
||||
elif line.startsWith("/connect"):
|
||||
# TODO Should be able to connect to multiple peers for Waku chat
|
||||
if c.connected:
|
||||
@ -259,19 +278,21 @@ proc writeAndPrint(c: Chat) {.async.} =
|
||||
let address = await c.transp.readLine()
|
||||
if address.len > 0:
|
||||
await c.connectToNodes(@[address])
|
||||
|
||||
elif line.startsWith("/nick"):
|
||||
# Set a new nickname
|
||||
c.nick = await readNick(c.transp)
|
||||
echo "You are now known as " & c.nick
|
||||
|
||||
elif line.startsWith("/exit"):
|
||||
if not c.node.wakuFilterLegacy.isNil():
|
||||
echo "unsubscribing from content filters..."
|
||||
|
||||
let peerOpt = c.node.peerManager.selectPeer(WakuLegacyFilterCodec)
|
||||
if peerOpt.isSome():
|
||||
await c.node.legacyFilterUnsubscribe(pubsubTopic=some(DefaultPubsubTopic), contentTopics=c.contentTopic, peer=peerOpt.get())
|
||||
await c.node.legacyFilterUnsubscribe(
|
||||
pubsubTopic = some(DefaultPubsubTopic),
|
||||
contentTopics = c.contentTopic,
|
||||
peer = peerOpt.get(),
|
||||
)
|
||||
|
||||
echo "quitting..."
|
||||
|
||||
@ -307,21 +328,28 @@ proc readInput(wfd: AsyncFD) {.thread, raises: [Defect, CatchableError].} =
|
||||
let line = stdin.readLine()
|
||||
discard waitFor transp.write(line & "\r\n")
|
||||
|
||||
{.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||
{.pop.}
|
||||
# @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||
proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
let
|
||||
transp = fromPipe(rfd)
|
||||
conf = Chat2Conf.load()
|
||||
nodekey = if conf.nodekey.isSome(): conf.nodekey.get()
|
||||
else: PrivateKey.random(Secp256k1, rng[]).tryGet()
|
||||
nodekey =
|
||||
if conf.nodekey.isSome():
|
||||
conf.nodekey.get()
|
||||
else:
|
||||
PrivateKey.random(Secp256k1, rng[]).tryGet()
|
||||
|
||||
# set log level
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
setLogLevel(conf.logLevel)
|
||||
|
||||
let natRes = setupNat(conf.nat, clientId,
|
||||
Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||
Port(uint16(conf.udpPort) + conf.portsShift))
|
||||
let natRes = setupNat(
|
||||
conf.nat,
|
||||
clientId,
|
||||
Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||
Port(uint16(conf.udpPort) + conf.portsShift),
|
||||
)
|
||||
|
||||
if natRes.isErr():
|
||||
raise newException(ValueError, "setupNat error " & natRes.error)
|
||||
@ -333,20 +361,28 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error=recordRes.error
|
||||
error "failed to create enr record", error = recordRes.error
|
||||
quit(QuitFailure)
|
||||
else: recordRes.get()
|
||||
else:
|
||||
recordRes.get()
|
||||
|
||||
let node = block:
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
builder.withRecord(record)
|
||||
builder.withNetworkConfigurationDetails(conf.listenAddress, Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||
extIp, extTcpPort,
|
||||
wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift),
|
||||
wsEnabled = conf.websocketSupport,
|
||||
wssEnabled = conf.websocketSecureSupport).tryGet()
|
||||
builder.build().tryGet()
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
builder.withRecord(record)
|
||||
|
||||
builder
|
||||
.withNetworkConfigurationDetails(
|
||||
conf.listenAddress,
|
||||
Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||
extIp,
|
||||
extTcpPort,
|
||||
wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift),
|
||||
wsEnabled = conf.websocketSupport,
|
||||
wssEnabled = conf.websocketSecureSupport,
|
||||
)
|
||||
.tryGet()
|
||||
builder.build().tryGet()
|
||||
|
||||
await node.start()
|
||||
|
||||
@ -361,14 +397,16 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
let nick = await readNick(transp)
|
||||
echo "Welcome, " & nick & "!"
|
||||
|
||||
var chat = Chat(node: node,
|
||||
transp: transp,
|
||||
subscribed: true,
|
||||
connected: false,
|
||||
started: true,
|
||||
nick: nick,
|
||||
prompt: false,
|
||||
contentTopic: conf.contentTopic)
|
||||
var chat = Chat(
|
||||
node: node,
|
||||
transp: transp,
|
||||
subscribed: true,
|
||||
connected: false,
|
||||
started: true,
|
||||
nick: nick,
|
||||
prompt: false,
|
||||
contentTopic: conf.contentTopic,
|
||||
)
|
||||
|
||||
if conf.staticnodes.len > 0:
|
||||
echo "Connecting to static peers..."
|
||||
@ -381,14 +419,17 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
echo "Connecting to " & $conf.fleet & " fleet using DNS discovery..."
|
||||
|
||||
if conf.fleet == Fleet.test:
|
||||
dnsDiscoveryUrl = some("enrtree://AO47IDOLBKH72HIZZOXQP6NMRESAN7CHYWIBNXDXWRJRZWLODKII6@test.wakuv2.nodes.status.im")
|
||||
dnsDiscoveryUrl = some(
|
||||
"enrtree://AO47IDOLBKH72HIZZOXQP6NMRESAN7CHYWIBNXDXWRJRZWLODKII6@test.wakuv2.nodes.status.im"
|
||||
)
|
||||
else:
|
||||
# Connect to prod by default
|
||||
dnsDiscoveryUrl = some("enrtree://ANEDLO25QVUGJOUTQFRYKWX6P4Z4GKVESBMHML7DZ6YK4LGS5FC5O@prod.wakuv2.nodes.status.im")
|
||||
|
||||
dnsDiscoveryUrl = some(
|
||||
"enrtree://ANEDLO25QVUGJOUTQFRYKWX6P4Z4GKVESBMHML7DZ6YK4LGS5FC5O@prod.wakuv2.nodes.status.im"
|
||||
)
|
||||
elif conf.dnsDiscovery and conf.dnsDiscoveryUrl != "":
|
||||
# No pre-selected fleet. Discover nodes via DNS using user config
|
||||
debug "Discovering nodes using Waku DNS discovery", url=conf.dnsDiscoveryUrl
|
||||
debug "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl
|
||||
dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl)
|
||||
|
||||
var discoveredNodes: seq[RemotePeerInfo]
|
||||
@ -401,12 +442,11 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
let dnsResolver = DnsResolver.new(nameServers)
|
||||
|
||||
proc resolver(domain: string): Future[string] {.async, gcsafe.} =
|
||||
trace "resolving", domain=domain
|
||||
trace "resolving", domain = domain
|
||||
let resolved = await dnsResolver.resolveTxt(domain)
|
||||
return resolved[0] # Use only first answer
|
||||
|
||||
var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl.get(),
|
||||
resolver)
|
||||
var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl.get(), resolver)
|
||||
if wakuDnsDiscovery.isOk:
|
||||
let discoveredPeers = wakuDnsDiscovery.get().findPeers()
|
||||
if discoveredPeers.isOk:
|
||||
@ -432,10 +472,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
storenode = some(peerInfo.value)
|
||||
else:
|
||||
error "Incorrect conf.storenode", error = peerInfo.error
|
||||
|
||||
elif discoveredNodes.len > 0:
|
||||
echo "Store enabled, but no store nodes configured. Choosing one at random from discovered peers"
|
||||
storenode = some(discoveredNodes[rand(0..len(discoveredNodes) - 1)])
|
||||
storenode = some(discoveredNodes[rand(0 .. len(discoveredNodes) - 1)])
|
||||
|
||||
if storenode.isSome():
|
||||
# We have a viable storenode. Let's query it for historical messages.
|
||||
@ -448,8 +487,11 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
for msg in response.messages:
|
||||
let
|
||||
pb = Chat2Message.init(msg.payload)
|
||||
chatLine = if pb.isOk: pb[].toString()
|
||||
else: string.fromBytes(msg.payload)
|
||||
chatLine =
|
||||
if pb.isOk:
|
||||
pb[].toString()
|
||||
else:
|
||||
string.fromBytes(msg.payload)
|
||||
echo &"{chatLine}"
|
||||
info "Hit store handler"
|
||||
|
||||
@ -466,7 +508,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
node.peerManager.addServicePeer(peerInfo.value, WakuLightpushCodec)
|
||||
else:
|
||||
error "LightPush not mounted. Couldn't parse conf.lightpushnode",
|
||||
error = peerInfo.error
|
||||
error = peerInfo.error
|
||||
|
||||
if conf.filternode != "":
|
||||
let peerInfo = parsePeerInfo(conf.filternode)
|
||||
@ -476,19 +518,22 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
await node.mountFilterClient()
|
||||
node.peerManager.addServicePeer(peerInfo.value, WakuLegacyFilterCodec)
|
||||
|
||||
proc filterHandler(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async, gcsafe, closure.} =
|
||||
trace "Hit filter handler", contentTopic=msg.contentTopic
|
||||
proc filterHandler(
|
||||
pubsubTopic: PubsubTopic, msg: WakuMessage
|
||||
) {.async, gcsafe, closure.} =
|
||||
trace "Hit filter handler", contentTopic = msg.contentTopic
|
||||
chat.printReceivedMessage(msg)
|
||||
|
||||
await node.legacyFilterSubscribe(pubsubTopic=some(DefaultPubsubTopic),
|
||||
contentTopics=chat.contentTopic,
|
||||
filterHandler,
|
||||
peerInfo.value)
|
||||
await node.legacyFilterSubscribe(
|
||||
pubsubTopic = some(DefaultPubsubTopic),
|
||||
contentTopics = chat.contentTopic,
|
||||
filterHandler,
|
||||
peerInfo.value,
|
||||
)
|
||||
# TODO: Here to support FilterV2 relevant subscription, but still
|
||||
# Legacy Filter is concurrent to V2 untill legacy filter will be removed
|
||||
else:
|
||||
error "Filter not mounted. Couldn't parse conf.filternode",
|
||||
error = peerInfo.error
|
||||
error "Filter not mounted. Couldn't parse conf.filternode", error = peerInfo.error
|
||||
|
||||
# Subscribe to a topic, if relay is mounted
|
||||
if conf.relay:
|
||||
@ -524,7 +569,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
rlnRelayCredPath: conf.rlnRelayCredPath,
|
||||
rlnRelayCredPassword: conf.rlnRelayCredPassword,
|
||||
rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit,
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
||||
)
|
||||
else:
|
||||
let rlnConf = WakuRlnConfig(
|
||||
@ -534,16 +579,16 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
|
||||
rlnRelayCredPath: conf.rlnRelayCredPath,
|
||||
rlnRelayCredPassword: conf.rlnRelayCredPassword,
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
||||
)
|
||||
|
||||
waitFor node.mountRlnRelay(rlnConf,
|
||||
spamHandler=some(spamHandler))
|
||||
waitFor node.mountRlnRelay(rlnConf, spamHandler = some(spamHandler))
|
||||
|
||||
let membershipIndex = node.wakuRlnRelay.groupManager.membershipIndex.get()
|
||||
let identityCredential = node.wakuRlnRelay.groupManager.idCredentials.get()
|
||||
echo "your membership index is: ", membershipIndex
|
||||
echo "your rln identity commitment key is: ", identityCredential.idCommitment.inHex()
|
||||
echo "your rln identity commitment key is: ",
|
||||
identityCredential.idCommitment.inHex()
|
||||
else:
|
||||
info "WakuRLNRelay is disabled"
|
||||
echo "WakuRLNRelay is disabled, please enable it by passing in the --rln-relay flag"
|
||||
@ -552,11 +597,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
|
||||
if conf.metricsServer:
|
||||
let metricsServer = startMetricsServer(
|
||||
conf.metricsServerAddress,
|
||||
Port(conf.metricsServerPort + conf.portsShift)
|
||||
conf.metricsServerAddress, Port(conf.metricsServerPort + conf.portsShift)
|
||||
)
|
||||
|
||||
|
||||
await chat.readWriteLoop()
|
||||
|
||||
if conf.keepAlive:
|
||||
@ -578,7 +621,6 @@ proc main(rng: ref HmacDrbgContext) {.async.} =
|
||||
except ConfigurationError as e:
|
||||
raise e
|
||||
|
||||
|
||||
when isMainModule: # isMainModule = true when the module is compiled as the main file
|
||||
let rng = crypto.newRng()
|
||||
try:
|
||||
|
||||
@ -1,254 +1,268 @@
|
||||
import
|
||||
chronicles, chronos,
|
||||
confutils, confutils/defs, confutils/std/net,
|
||||
chronicles,
|
||||
chronos,
|
||||
confutils,
|
||||
confutils/defs,
|
||||
confutils/std/net,
|
||||
eth/keys,
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/crypto/secp,
|
||||
nimcrypto/utils,
|
||||
std/strutils,
|
||||
regex
|
||||
import
|
||||
../../../waku/waku_core
|
||||
import ../../../waku/waku_core
|
||||
|
||||
type
|
||||
Fleet* = enum
|
||||
Fleet* = enum
|
||||
none
|
||||
prod
|
||||
test
|
||||
|
||||
EthRpcUrl = distinct string
|
||||
Chat2Conf* = object
|
||||
## General node config
|
||||
|
||||
Chat2Conf* = object ## General node config
|
||||
logLevel* {.
|
||||
desc: "Sets the log level."
|
||||
defaultValue: LogLevel.INFO
|
||||
name: "log-level" }: LogLevel
|
||||
desc: "Sets the log level.", defaultValue: LogLevel.INFO, name: "log-level"
|
||||
.}: LogLevel
|
||||
|
||||
nodekey* {.
|
||||
desc: "P2P node private key as 64 char hex string.",
|
||||
name: "nodekey" }: Option[crypto.PrivateKey]
|
||||
nodekey* {.desc: "P2P node private key as 64 char hex string.", name: "nodekey".}:
|
||||
Option[crypto.PrivateKey]
|
||||
|
||||
listenAddress* {.
|
||||
defaultValue: defaultListenAddress(config)
|
||||
desc: "Listening address for the LibP2P traffic."
|
||||
name: "listen-address"}: IpAddress
|
||||
defaultValue: defaultListenAddress(config),
|
||||
desc: "Listening address for the LibP2P traffic.",
|
||||
name: "listen-address"
|
||||
.}: IpAddress
|
||||
|
||||
tcpPort* {.
|
||||
desc: "TCP listening port."
|
||||
defaultValue: 60000
|
||||
name: "tcp-port" }: Port
|
||||
tcpPort* {.desc: "TCP listening port.", defaultValue: 60000, name: "tcp-port".}:
|
||||
Port
|
||||
|
||||
udpPort* {.
|
||||
desc: "UDP listening port."
|
||||
defaultValue: 60000
|
||||
name: "udp-port" }: Port
|
||||
udpPort* {.desc: "UDP listening port.", defaultValue: 60000, name: "udp-port".}:
|
||||
Port
|
||||
|
||||
portsShift* {.
|
||||
desc: "Add a shift to all port numbers."
|
||||
defaultValue: 0
|
||||
name: "ports-shift" }: uint16
|
||||
desc: "Add a shift to all port numbers.", defaultValue: 0, name: "ports-shift"
|
||||
.}: uint16
|
||||
|
||||
nat* {.
|
||||
desc: "Specify method to use for determining public address. " &
|
||||
"Must be one of: any, none, upnp, pmp, extip:<IP>."
|
||||
defaultValue: "any" }: string
|
||||
desc:
|
||||
"Specify method to use for determining public address. " &
|
||||
"Must be one of: any, none, upnp, pmp, extip:<IP>.",
|
||||
defaultValue: "any"
|
||||
.}: string
|
||||
|
||||
## Persistence config
|
||||
|
||||
dbPath* {.
|
||||
desc: "The database path for peristent storage",
|
||||
defaultValue: ""
|
||||
name: "db-path" }: string
|
||||
desc: "The database path for peristent storage", defaultValue: "", name: "db-path"
|
||||
.}: string
|
||||
|
||||
persistPeers* {.
|
||||
desc: "Enable peer persistence: true|false",
|
||||
defaultValue: false
|
||||
name: "persist-peers" }: bool
|
||||
defaultValue: false,
|
||||
name: "persist-peers"
|
||||
.}: bool
|
||||
|
||||
persistMessages* {.
|
||||
desc: "Enable message persistence: true|false",
|
||||
defaultValue: false
|
||||
name: "persist-messages" }: bool
|
||||
defaultValue: false,
|
||||
name: "persist-messages"
|
||||
.}: bool
|
||||
|
||||
## Relay config
|
||||
|
||||
relay* {.
|
||||
desc: "Enable relay protocol: true|false",
|
||||
defaultValue: true
|
||||
name: "relay" }: bool
|
||||
desc: "Enable relay protocol: true|false", defaultValue: true, name: "relay"
|
||||
.}: bool
|
||||
|
||||
staticnodes* {.
|
||||
desc: "Peer multiaddr to directly connect with. Argument may be repeated."
|
||||
name: "staticnode" }: seq[string]
|
||||
desc: "Peer multiaddr to directly connect with. Argument may be repeated.",
|
||||
name: "staticnode"
|
||||
.}: seq[string]
|
||||
|
||||
keepAlive* {.
|
||||
desc: "Enable keep-alive for idle connections: true|false",
|
||||
defaultValue: false
|
||||
name: "keep-alive" }: bool
|
||||
defaultValue: false,
|
||||
name: "keep-alive"
|
||||
.}: bool
|
||||
|
||||
topics* {.
|
||||
desc: "Default topics to subscribe to (space separated list)."
|
||||
defaultValue: "/waku/2/default-waku/proto"
|
||||
name: "topics" .}: string
|
||||
desc: "Default topics to subscribe to (space separated list).",
|
||||
defaultValue: "/waku/2/default-waku/proto",
|
||||
name: "topics"
|
||||
.}: string
|
||||
|
||||
## Store config
|
||||
|
||||
store* {.
|
||||
desc: "Enable store protocol: true|false",
|
||||
defaultValue: true
|
||||
name: "store" }: bool
|
||||
desc: "Enable store protocol: true|false", defaultValue: true, name: "store"
|
||||
.}: bool
|
||||
|
||||
storenode* {.
|
||||
desc: "Peer multiaddr to query for storage.",
|
||||
defaultValue: ""
|
||||
name: "storenode" }: string
|
||||
desc: "Peer multiaddr to query for storage.", defaultValue: "", name: "storenode"
|
||||
.}: string
|
||||
|
||||
## Filter config
|
||||
|
||||
filter* {.
|
||||
desc: "Enable filter protocol: true|false",
|
||||
defaultValue: false
|
||||
name: "filter" }: bool
|
||||
desc: "Enable filter protocol: true|false", defaultValue: false, name: "filter"
|
||||
.}: bool
|
||||
|
||||
filternode* {.
|
||||
desc: "Peer multiaddr to request content filtering of messages.",
|
||||
defaultValue: ""
|
||||
name: "filternode" }: string
|
||||
defaultValue: "",
|
||||
name: "filternode"
|
||||
.}: string
|
||||
|
||||
## Lightpush config
|
||||
|
||||
lightpush* {.
|
||||
desc: "Enable lightpush protocol: true|false",
|
||||
defaultValue: false
|
||||
name: "lightpush" }: bool
|
||||
defaultValue: false,
|
||||
name: "lightpush"
|
||||
.}: bool
|
||||
|
||||
lightpushnode* {.
|
||||
desc: "Peer multiaddr to request lightpush of published messages.",
|
||||
defaultValue: ""
|
||||
name: "lightpushnode" }: string
|
||||
defaultValue: "",
|
||||
name: "lightpushnode"
|
||||
.}: string
|
||||
|
||||
## Metrics config
|
||||
|
||||
metricsServer* {.
|
||||
desc: "Enable the metrics server: true|false"
|
||||
defaultValue: false
|
||||
name: "metrics-server" }: bool
|
||||
desc: "Enable the metrics server: true|false",
|
||||
defaultValue: false,
|
||||
name: "metrics-server"
|
||||
.}: bool
|
||||
|
||||
metricsServerAddress* {.
|
||||
desc: "Listening address of the metrics server."
|
||||
defaultValue: parseIpAddress("127.0.0.1")
|
||||
name: "metrics-server-address" }: IpAddress
|
||||
desc: "Listening address of the metrics server.",
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
name: "metrics-server-address"
|
||||
.}: IpAddress
|
||||
|
||||
metricsServerPort* {.
|
||||
desc: "Listening HTTP port of the metrics server."
|
||||
defaultValue: 8008
|
||||
name: "metrics-server-port" }: uint16
|
||||
desc: "Listening HTTP port of the metrics server.",
|
||||
defaultValue: 8008,
|
||||
name: "metrics-server-port"
|
||||
.}: uint16
|
||||
|
||||
metricsLogging* {.
|
||||
desc: "Enable metrics logging: true|false"
|
||||
defaultValue: true
|
||||
name: "metrics-logging" }: bool
|
||||
desc: "Enable metrics logging: true|false",
|
||||
defaultValue: true,
|
||||
name: "metrics-logging"
|
||||
.}: bool
|
||||
|
||||
## DNS discovery config
|
||||
|
||||
dnsDiscovery* {.
|
||||
desc: "Enable discovering nodes via DNS"
|
||||
defaultValue: false
|
||||
name: "dns-discovery" }: bool
|
||||
desc: "Enable discovering nodes via DNS",
|
||||
defaultValue: false,
|
||||
name: "dns-discovery"
|
||||
.}: bool
|
||||
|
||||
dnsDiscoveryUrl* {.
|
||||
desc: "URL for DNS node list in format 'enrtree://<key>@<fqdn>'",
|
||||
defaultValue: ""
|
||||
name: "dns-discovery-url" }: string
|
||||
defaultValue: "",
|
||||
name: "dns-discovery-url"
|
||||
.}: string
|
||||
|
||||
dnsDiscoveryNameServers* {.
|
||||
desc: "DNS name server IPs to query. Argument may be repeated."
|
||||
defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")]
|
||||
name: "dns-discovery-name-server" }: seq[IpAddress]
|
||||
desc: "DNS name server IPs to query. Argument may be repeated.",
|
||||
defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")],
|
||||
name: "dns-discovery-name-server"
|
||||
.}: seq[IpAddress]
|
||||
|
||||
## Chat2 configuration
|
||||
|
||||
fleet* {.
|
||||
desc: "Select the fleet to connect to. This sets the DNS discovery URL to the selected fleet."
|
||||
defaultValue: Fleet.prod
|
||||
name: "fleet" }: Fleet
|
||||
desc:
|
||||
"Select the fleet to connect to. This sets the DNS discovery URL to the selected fleet.",
|
||||
defaultValue: Fleet.prod,
|
||||
name: "fleet"
|
||||
.}: Fleet
|
||||
|
||||
contentTopic* {.
|
||||
desc: "Content topic for chat messages."
|
||||
defaultValue: "/toy-chat/2/huilong/proto"
|
||||
name: "content-topic" }: string
|
||||
desc: "Content topic for chat messages.",
|
||||
defaultValue: "/toy-chat/2/huilong/proto",
|
||||
name: "content-topic"
|
||||
.}: string
|
||||
|
||||
## Websocket Configuration
|
||||
websocketSupport* {.
|
||||
desc: "Enable websocket: true|false",
|
||||
defaultValue: false
|
||||
name: "websocket-support"}: bool
|
||||
defaultValue: false,
|
||||
name: "websocket-support"
|
||||
.}: bool
|
||||
|
||||
websocketPort* {.
|
||||
desc: "WebSocket listening port."
|
||||
defaultValue: 8000
|
||||
name: "websocket-port" }: Port
|
||||
desc: "WebSocket listening port.", defaultValue: 8000, name: "websocket-port"
|
||||
.}: Port
|
||||
|
||||
websocketSecureSupport* {.
|
||||
desc: "WebSocket Secure Support."
|
||||
defaultValue: false
|
||||
name: "websocket-secure-support" }: bool
|
||||
desc: "WebSocket Secure Support.",
|
||||
defaultValue: false,
|
||||
name: "websocket-secure-support"
|
||||
.}: bool
|
||||
|
||||
## rln-relay configuration
|
||||
|
||||
rlnRelay* {.
|
||||
desc: "Enable spam protection through rln-relay: true|false",
|
||||
defaultValue: false
|
||||
name: "rln-relay" }: bool
|
||||
defaultValue: false,
|
||||
name: "rln-relay"
|
||||
.}: bool
|
||||
|
||||
rlnRelayCredPath* {.
|
||||
desc: "The path for peristing rln-relay credential",
|
||||
defaultValue: ""
|
||||
name: "rln-relay-cred-path" }: string
|
||||
defaultValue: "",
|
||||
name: "rln-relay-cred-path"
|
||||
.}: string
|
||||
|
||||
rlnRelayCredIndex* {.
|
||||
desc: "the index of the onchain commitment to use",
|
||||
name: "rln-relay-cred-index" }: Option[uint]
|
||||
desc: "the index of the onchain commitment to use", name: "rln-relay-cred-index"
|
||||
.}: Option[uint]
|
||||
|
||||
rlnRelayDynamic* {.
|
||||
desc: "Enable waku-rln-relay with on-chain dynamic group management: true|false",
|
||||
defaultValue: false
|
||||
name: "rln-relay-dynamic" }: bool
|
||||
defaultValue: false,
|
||||
name: "rln-relay-dynamic"
|
||||
.}: bool
|
||||
|
||||
rlnRelayIdKey* {.
|
||||
desc: "Rln relay identity secret key as a Hex string",
|
||||
defaultValue: ""
|
||||
name: "rln-relay-id-key" }: string
|
||||
defaultValue: "",
|
||||
name: "rln-relay-id-key"
|
||||
.}: string
|
||||
|
||||
rlnRelayIdCommitmentKey* {.
|
||||
desc: "Rln relay identity commitment key as a Hex string",
|
||||
defaultValue: ""
|
||||
name: "rln-relay-id-commitment-key" }: string
|
||||
defaultValue: "",
|
||||
name: "rln-relay-id-commitment-key"
|
||||
.}: string
|
||||
|
||||
rlnRelayEthClientAddress* {.
|
||||
desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/",
|
||||
defaultValue: "http://localhost:8540/"
|
||||
name: "rln-relay-eth-client-address" }: EthRpcUrl
|
||||
defaultValue: "http://localhost:8540/",
|
||||
name: "rln-relay-eth-client-address"
|
||||
.}: EthRpcUrl
|
||||
|
||||
rlnRelayEthContractAddress* {.
|
||||
desc: "Address of membership contract on an Ethereum testnet",
|
||||
defaultValue: ""
|
||||
name: "rln-relay-eth-contract-address" }: string
|
||||
defaultValue: "",
|
||||
name: "rln-relay-eth-contract-address"
|
||||
.}: string
|
||||
|
||||
rlnRelayCredPassword* {.
|
||||
desc: "Password for encrypting RLN credentials",
|
||||
defaultValue: ""
|
||||
name: "rln-relay-cred-password" }: string
|
||||
defaultValue: "",
|
||||
name: "rln-relay-cred-password"
|
||||
.}: string
|
||||
|
||||
rlnRelayUserMessageLimit* {.
|
||||
desc: "Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.",
|
||||
desc:
|
||||
"Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.",
|
||||
defaultValue: 1,
|
||||
name: "rln-relay-user-message-limit" .}: uint64
|
||||
name: "rln-relay-user-message-limit"
|
||||
.}: uint64
|
||||
|
||||
rlnEpochSizeSec* {.
|
||||
desc: "Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.",
|
||||
defaultValue: 1
|
||||
name: "rln-relay-epoch-sec" .}: uint64
|
||||
desc:
|
||||
"Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.",
|
||||
defaultValue: 1,
|
||||
name: "rln-relay-epoch-sec"
|
||||
.}: uint64
|
||||
|
||||
# NOTE: Keys are different in nim-libp2p
|
||||
proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T =
|
||||
@ -300,10 +314,14 @@ proc parseCmdArg*(T: type EthRpcUrl, s: string): T =
|
||||
## https://url:port/path?query
|
||||
## disallowed patterns:
|
||||
## any valid/invalid ws or wss url
|
||||
var httpPattern = re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
var wsPattern = re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
var httpPattern =
|
||||
re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
var wsPattern =
|
||||
re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
if regex.match(s, wsPattern):
|
||||
raise newException(ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL")
|
||||
raise newException(
|
||||
ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL"
|
||||
)
|
||||
if not regex.match(s, httpPattern):
|
||||
raise newException(ValueError, "Invalid HTTP RPC URL")
|
||||
return EthRpcUrl(s)
|
||||
|
||||
@ -5,8 +5,13 @@ else:
|
||||
|
||||
import
|
||||
std/[tables, times, strutils, hashes, sequtils],
|
||||
chronos, confutils, chronicles, chronicles/topics_registry, chronos/streams/tlsstream,
|
||||
metrics, metrics/chronos_httpserver,
|
||||
chronos,
|
||||
confutils,
|
||||
chronicles,
|
||||
chronicles/topics_registry,
|
||||
chronos/streams/tlsstream,
|
||||
metrics,
|
||||
metrics/chronos_httpserver,
|
||||
stew/byteutils,
|
||||
eth/net/nat,
|
||||
json_rpc/rpcserver,
|
||||
@ -27,7 +32,8 @@ import
|
||||
# Common cli config
|
||||
./config_chat2bridge
|
||||
|
||||
declarePublicCounter chat2_mb_transfers, "Number of messages transferred between chat2 and Matterbridge", ["type"]
|
||||
declarePublicCounter chat2_mb_transfers,
|
||||
"Number of messages transferred between chat2 and Matterbridge", ["type"]
|
||||
declarePublicCounter chat2_mb_dropped, "Number of messages dropped", ["reason"]
|
||||
|
||||
logScope:
|
||||
@ -37,8 +43,7 @@ logScope:
|
||||
# Default values #
|
||||
##################
|
||||
|
||||
const
|
||||
DeduplQSize = 20 # Maximum number of seen messages to keep in deduplication queue
|
||||
const DeduplQSize = 20 # Maximum number of seen messages to keep in deduplication queue
|
||||
|
||||
#########
|
||||
# Types #
|
||||
@ -53,7 +58,7 @@ type
|
||||
seen: seq[Hash] #FIFO queue
|
||||
contentTopic: string
|
||||
|
||||
MbMessageHandler = proc (jsonNode: JsonNode) {.async.}
|
||||
MbMessageHandler = proc(jsonNode: JsonNode) {.async.}
|
||||
|
||||
###################
|
||||
# Helper funtions #
|
||||
@ -65,25 +70,27 @@ proc containsOrAdd(sequence: var seq[Hash], hash: Hash): bool =
|
||||
|
||||
if sequence.len >= DeduplQSize:
|
||||
trace "Deduplication queue full. Removing oldest item."
|
||||
sequence.delete 0, 0 # Remove first item in queue
|
||||
sequence.delete 0, 0 # Remove first item in queue
|
||||
|
||||
sequence.add(hash)
|
||||
|
||||
return false
|
||||
|
||||
proc toWakuMessage(cmb: Chat2MatterBridge, jsonNode: JsonNode): WakuMessage {.raises: [Defect, KeyError]} =
|
||||
proc toWakuMessage(
|
||||
cmb: Chat2MatterBridge, jsonNode: JsonNode
|
||||
): WakuMessage {.raises: [Defect, KeyError].} =
|
||||
# Translates a Matterbridge API JSON response to a Waku v2 message
|
||||
let msgFields = jsonNode.getFields()
|
||||
|
||||
# @TODO error handling here - verify expected fields
|
||||
|
||||
let chat2pb = Chat2Message(timestamp: getTime().toUnix(), # @TODO use provided timestamp
|
||||
nick: msgFields["username"].getStr(),
|
||||
payload: msgFields["text"].getStr().toBytes()).encode()
|
||||
let chat2pb = Chat2Message(
|
||||
timestamp: getTime().toUnix(), # @TODO use provided timestamp
|
||||
nick: msgFields["username"].getStr(),
|
||||
payload: msgFields["text"].getStr().toBytes(),
|
||||
).encode()
|
||||
|
||||
WakuMessage(payload: chat2pb.buffer,
|
||||
contentTopic: cmb.contentTopic,
|
||||
version: 0)
|
||||
WakuMessage(payload: chat2pb.buffer, contentTopic: cmb.contentTopic, version: 0)
|
||||
|
||||
proc toChat2(cmb: Chat2MatterBridge, jsonNode: JsonNode) {.async.} =
|
||||
let msg = cmb.toWakuMessage(jsonNode)
|
||||
@ -100,7 +107,9 @@ proc toChat2(cmb: Chat2MatterBridge, jsonNode: JsonNode) {.async.} =
|
||||
(await cmb.nodev2.publish(some(DefaultPubsubTopic), msg)).isOkOr:
|
||||
error "failed to publish message", error = error
|
||||
|
||||
proc toMatterbridge(cmb: Chat2MatterBridge, msg: WakuMessage) {.gcsafe, raises: [Exception].} =
|
||||
proc toMatterbridge(
|
||||
cmb: Chat2MatterBridge, msg: WakuMessage
|
||||
) {.gcsafe, raises: [Exception].} =
|
||||
if cmb.seen.containsOrAdd(msg.payload.hash()):
|
||||
# This is a duplicate message. Return.
|
||||
chat2_mb_dropped.inc(labelValues = ["duplicate"])
|
||||
@ -119,8 +128,9 @@ proc toMatterbridge(cmb: Chat2MatterBridge, msg: WakuMessage) {.gcsafe, raises:
|
||||
|
||||
assert chat2Msg.isOk
|
||||
|
||||
let postRes = cmb.mbClient.postMessage(text = string.fromBytes(chat2Msg[].payload),
|
||||
username = chat2Msg[].nick)
|
||||
let postRes = cmb.mbClient.postMessage(
|
||||
text = string.fromBytes(chat2Msg[].payload), username = chat2Msg[].nick
|
||||
)
|
||||
|
||||
if postRes.isErr() or (postRes[] == false):
|
||||
chat2_mb_dropped.inc(labelValues = ["duplicate"])
|
||||
@ -142,41 +152,50 @@ proc pollMatterbridge(cmb: Chat2MatterBridge, handler: MbMessageHandler) {.async
|
||||
##############
|
||||
# Public API #
|
||||
##############
|
||||
proc new*(T: type Chat2MatterBridge,
|
||||
# Matterbridge initialisation
|
||||
mbHostUri: string,
|
||||
mbGateway: string,
|
||||
# NodeV2 initialisation
|
||||
nodev2Key: crypto.PrivateKey,
|
||||
nodev2BindIp: IpAddress, nodev2BindPort: Port,
|
||||
nodev2ExtIp = none[IpAddress](), nodev2ExtPort = none[Port](),
|
||||
contentTopic: string): T
|
||||
{.raises: [Defect, ValueError, KeyError, TLSStreamProtocolError, IOError, LPError].} =
|
||||
|
||||
proc new*(
|
||||
T: type Chat2MatterBridge,
|
||||
# Matterbridge initialisation
|
||||
mbHostUri: string,
|
||||
mbGateway: string,
|
||||
# NodeV2 initialisation
|
||||
nodev2Key: crypto.PrivateKey,
|
||||
nodev2BindIp: IpAddress,
|
||||
nodev2BindPort: Port,
|
||||
nodev2ExtIp = none[IpAddress](),
|
||||
nodev2ExtPort = none[Port](),
|
||||
contentTopic: string,
|
||||
): T {.
|
||||
raises: [Defect, ValueError, KeyError, TLSStreamProtocolError, IOError, LPError]
|
||||
.} =
|
||||
# Setup Matterbridge
|
||||
let
|
||||
mbClient = MatterbridgeClient.new(mbHostUri, mbGateway)
|
||||
let mbClient = MatterbridgeClient.new(mbHostUri, mbGateway)
|
||||
|
||||
# Let's verify the Matterbridge configuration before continuing
|
||||
let clientHealth = mbClient.isHealthy()
|
||||
|
||||
if clientHealth.isOk() and clientHealth[]:
|
||||
info "Reached Matterbridge host", host=mbClient.host
|
||||
info "Reached Matterbridge host", host = mbClient.host
|
||||
else:
|
||||
raise newException(ValueError, "Matterbridge client not reachable/healthy")
|
||||
|
||||
# Setup Waku v2 node
|
||||
let nodev2 = block:
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodev2Key)
|
||||
builder.withNetworkConfigurationDetails(nodev2BindIp, nodev2BindPort, nodev2ExtIp, nodev2ExtPort).tryGet()
|
||||
builder.build().tryGet()
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodev2Key)
|
||||
builder
|
||||
.withNetworkConfigurationDetails(
|
||||
nodev2BindIp, nodev2BindPort, nodev2ExtIp, nodev2ExtPort
|
||||
)
|
||||
.tryGet()
|
||||
builder.build().tryGet()
|
||||
|
||||
return Chat2MatterBridge(mbClient: mbClient,
|
||||
nodev2: nodev2,
|
||||
running: false,
|
||||
pollPeriod: chronos.seconds(1),
|
||||
contentTopic: contentTopic)
|
||||
return Chat2MatterBridge(
|
||||
mbClient: mbClient,
|
||||
nodev2: nodev2,
|
||||
running: false,
|
||||
pollPeriod: chronos.seconds(1),
|
||||
contentTopic: contentTopic,
|
||||
)
|
||||
|
||||
proc start*(cmb: Chat2MatterBridge) {.async.} =
|
||||
info "Starting Chat2MatterBridge"
|
||||
@ -187,7 +206,7 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
|
||||
|
||||
# Start Matterbridge polling (@TODO: use streaming interface)
|
||||
proc mbHandler(jsonNode: JsonNode) {.async.} =
|
||||
trace "Bridging message from Matterbridge to chat2", jsonNode=jsonNode
|
||||
trace "Bridging message from Matterbridge to chat2", jsonNode = jsonNode
|
||||
waitFor cmb.toChat2(jsonNode)
|
||||
|
||||
asyncSpawn cmb.pollMatterbridge(mbHandler)
|
||||
@ -203,8 +222,10 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
|
||||
|
||||
# Bridging
|
||||
# Handle messages on Waku v2 and bridge to Matterbridge
|
||||
proc relayHandler(pubsubTopic: PubsubTopic, msg: WakuMessage): Future[void] {.async.} =
|
||||
trace "Bridging message from Chat2 to Matterbridge", msg=msg
|
||||
proc relayHandler(
|
||||
pubsubTopic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async.} =
|
||||
trace "Bridging message from Chat2 to Matterbridge", msg = msg
|
||||
try:
|
||||
cmb.toMatterbridge(msg)
|
||||
except:
|
||||
@ -219,11 +240,10 @@ proc stop*(cmb: Chat2MatterBridge) {.async: (raises: [Exception]).} =
|
||||
|
||||
await cmb.nodev2.stop()
|
||||
|
||||
{.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||
{.pop.}
|
||||
# @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||
when isMainModule:
|
||||
import
|
||||
../../../waku/common/utils/nat,
|
||||
../../waku/waku_api/message_cache
|
||||
import ../../../waku/common/utils/nat, ../../waku/waku_api/message_cache
|
||||
|
||||
let
|
||||
rng = newRng()
|
||||
@ -232,9 +252,12 @@ when isMainModule:
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
setLogLevel(conf.logLevel)
|
||||
|
||||
let natRes = setupNat(conf.nat, clientId,
|
||||
Port(uint16(conf.libp2pTcpPort) + conf.portsShift),
|
||||
Port(uint16(conf.udpPort) + conf.portsShift))
|
||||
let natRes = setupNat(
|
||||
conf.nat,
|
||||
clientId,
|
||||
Port(uint16(conf.libp2pTcpPort) + conf.portsShift),
|
||||
Port(uint16(conf.udpPort) + conf.portsShift),
|
||||
)
|
||||
if natRes.isErr():
|
||||
error "Error in setupNat", error = natRes.error
|
||||
|
||||
@ -243,19 +266,22 @@ when isMainModule:
|
||||
(nodev2ExtIp, nodev2ExtPort, _) = natRes.get()
|
||||
## The following heuristic assumes that, in absence of manual
|
||||
## config, the external port is the same as the bind port.
|
||||
extPort = if nodev2ExtIp.isSome() and nodev2ExtPort.isNone():
|
||||
some(Port(uint16(conf.libp2pTcpPort) + conf.portsShift))
|
||||
else:
|
||||
nodev2ExtPort
|
||||
extPort =
|
||||
if nodev2ExtIp.isSome() and nodev2ExtPort.isNone():
|
||||
some(Port(uint16(conf.libp2pTcpPort) + conf.portsShift))
|
||||
else:
|
||||
nodev2ExtPort
|
||||
|
||||
let
|
||||
bridge = Chat2Matterbridge.new(
|
||||
mbHostUri = "http://" & $initTAddress(conf.mbHostAddress, Port(conf.mbHostPort)),
|
||||
mbGateway = conf.mbGateway,
|
||||
nodev2Key = conf.nodekey,
|
||||
nodev2BindIp = conf.listenAddress, nodev2BindPort = Port(uint16(conf.libp2pTcpPort) + conf.portsShift),
|
||||
nodev2ExtIp = nodev2ExtIp, nodev2ExtPort = extPort,
|
||||
contentTopic = conf.contentTopic)
|
||||
let bridge = Chat2Matterbridge.new(
|
||||
mbHostUri = "http://" & $initTAddress(conf.mbHostAddress, Port(conf.mbHostPort)),
|
||||
mbGateway = conf.mbGateway,
|
||||
nodev2Key = conf.nodekey,
|
||||
nodev2BindIp = conf.listenAddress,
|
||||
nodev2BindPort = Port(uint16(conf.libp2pTcpPort) + conf.portsShift),
|
||||
nodev2ExtIp = nodev2ExtIp,
|
||||
nodev2ExtPort = extPort,
|
||||
contentTopic = conf.contentTopic,
|
||||
)
|
||||
|
||||
waitFor bridge.start()
|
||||
|
||||
@ -284,7 +310,9 @@ when isMainModule:
|
||||
let filterPeer = parsePeerInfo(conf.filternode)
|
||||
if filterPeer.isOk():
|
||||
bridge.nodev2.peerManager.addServicePeer(filterPeer.value, WakuLegacyFilterCodec)
|
||||
bridge.nodev2.peerManager.addServicePeer(filterPeer.value, WakuFilterSubscribeCodec)
|
||||
bridge.nodev2.peerManager.addServicePeer(
|
||||
filterPeer.value, WakuFilterSubscribeCodec
|
||||
)
|
||||
else:
|
||||
error "Error parsing conf.filternode", error = filterPeer.error
|
||||
|
||||
|
||||
@ -1,118 +1,125 @@
|
||||
import
|
||||
confutils, confutils/defs, confutils/std/net, chronicles, chronos,
|
||||
confutils,
|
||||
confutils/defs,
|
||||
confutils/std/net,
|
||||
chronicles,
|
||||
chronos,
|
||||
libp2p/crypto/[crypto, secp],
|
||||
eth/keys
|
||||
|
||||
type
|
||||
Chat2MatterbridgeConf* = object
|
||||
logLevel* {.
|
||||
desc: "Sets the log level"
|
||||
defaultValue: LogLevel.INFO
|
||||
name: "log-level" .}: LogLevel
|
||||
type Chat2MatterbridgeConf* = object
|
||||
logLevel* {.
|
||||
desc: "Sets the log level", defaultValue: LogLevel.INFO, name: "log-level"
|
||||
.}: LogLevel
|
||||
|
||||
listenAddress* {.
|
||||
defaultValue: defaultListenAddress(config)
|
||||
desc: "Listening address for the LibP2P traffic"
|
||||
name: "listen-address"}: IpAddress
|
||||
listenAddress* {.
|
||||
defaultValue: defaultListenAddress(config),
|
||||
desc: "Listening address for the LibP2P traffic",
|
||||
name: "listen-address"
|
||||
.}: IpAddress
|
||||
|
||||
libp2pTcpPort* {.
|
||||
desc: "Libp2p TCP listening port (for Waku v2)"
|
||||
defaultValue: 9000
|
||||
name: "libp2p-tcp-port" .}: uint16
|
||||
libp2pTcpPort* {.
|
||||
desc: "Libp2p TCP listening port (for Waku v2)",
|
||||
defaultValue: 9000,
|
||||
name: "libp2p-tcp-port"
|
||||
.}: uint16
|
||||
|
||||
udpPort* {.
|
||||
desc: "UDP listening port"
|
||||
defaultValue: 9000
|
||||
name: "udp-port" .}: uint16
|
||||
udpPort* {.desc: "UDP listening port", defaultValue: 9000, name: "udp-port".}: uint16
|
||||
|
||||
portsShift* {.
|
||||
desc: "Add a shift to all default port numbers"
|
||||
defaultValue: 0
|
||||
name: "ports-shift" .}: uint16
|
||||
portsShift* {.
|
||||
desc: "Add a shift to all default port numbers",
|
||||
defaultValue: 0,
|
||||
name: "ports-shift"
|
||||
.}: uint16
|
||||
|
||||
nat* {.
|
||||
desc: "Specify method to use for determining public address. " &
|
||||
"Must be one of: any, none, upnp, pmp, extip:<IP>"
|
||||
defaultValue: "any" .}: string
|
||||
nat* {.
|
||||
desc:
|
||||
"Specify method to use for determining public address. " &
|
||||
"Must be one of: any, none, upnp, pmp, extip:<IP>",
|
||||
defaultValue: "any"
|
||||
.}: string
|
||||
|
||||
metricsServer* {.
|
||||
desc: "Enable the metrics server"
|
||||
defaultValue: false
|
||||
name: "metrics-server" .}: bool
|
||||
metricsServer* {.
|
||||
desc: "Enable the metrics server", defaultValue: false, name: "metrics-server"
|
||||
.}: bool
|
||||
|
||||
metricsServerAddress* {.
|
||||
desc: "Listening address of the metrics server"
|
||||
defaultValue: parseIpAddress("127.0.0.1")
|
||||
name: "metrics-server-address" }: IpAddress
|
||||
metricsServerAddress* {.
|
||||
desc: "Listening address of the metrics server",
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
name: "metrics-server-address"
|
||||
.}: IpAddress
|
||||
|
||||
metricsServerPort* {.
|
||||
desc: "Listening HTTP port of the metrics server"
|
||||
defaultValue: 8008
|
||||
name: "metrics-server-port" .}: uint16
|
||||
metricsServerPort* {.
|
||||
desc: "Listening HTTP port of the metrics server",
|
||||
defaultValue: 8008,
|
||||
name: "metrics-server-port"
|
||||
.}: uint16
|
||||
|
||||
### Waku v2 options
|
||||
|
||||
staticnodes* {.
|
||||
desc: "Multiaddr of peer to directly connect with. Argument may be repeated"
|
||||
name: "staticnode" }: seq[string]
|
||||
### Waku v2 options
|
||||
staticnodes* {.
|
||||
desc: "Multiaddr of peer to directly connect with. Argument may be repeated",
|
||||
name: "staticnode"
|
||||
.}: seq[string]
|
||||
|
||||
nodekey* {.
|
||||
desc: "P2P node private key as hex"
|
||||
defaultValue: crypto.PrivateKey.random(Secp256k1, newRng()[]).tryGet()
|
||||
name: "nodekey" }: crypto.PrivateKey
|
||||
nodekey* {.
|
||||
desc: "P2P node private key as hex",
|
||||
defaultValue: crypto.PrivateKey.random(Secp256k1, newRng()[]).tryGet(),
|
||||
name: "nodekey"
|
||||
.}: crypto.PrivateKey
|
||||
|
||||
topics* {.
|
||||
desc: "Default topics to subscribe to (space separated list)"
|
||||
defaultValue: "/waku/2/default-waku/proto"
|
||||
name: "topics" .}: string
|
||||
topics* {.
|
||||
desc: "Default topics to subscribe to (space separated list)",
|
||||
defaultValue: "/waku/2/default-waku/proto",
|
||||
name: "topics"
|
||||
.}: string
|
||||
|
||||
store* {.
|
||||
desc: "Flag whether to start store protocol",
|
||||
defaultValue: true
|
||||
name: "store" }: bool
|
||||
store* {.
|
||||
desc: "Flag whether to start store protocol", defaultValue: true, name: "store"
|
||||
.}: bool
|
||||
|
||||
filter* {.
|
||||
desc: "Flag whether to start filter protocol",
|
||||
defaultValue: false
|
||||
name: "filter" }: bool
|
||||
filter* {.
|
||||
desc: "Flag whether to start filter protocol", defaultValue: false, name: "filter"
|
||||
.}: bool
|
||||
|
||||
relay* {.
|
||||
desc: "Flag whether to start relay protocol",
|
||||
defaultValue: true
|
||||
name: "relay" }: bool
|
||||
relay* {.
|
||||
desc: "Flag whether to start relay protocol", defaultValue: true, name: "relay"
|
||||
.}: bool
|
||||
|
||||
storenode* {.
|
||||
desc: "Multiaddr of peer to connect with for waku store protocol"
|
||||
defaultValue: ""
|
||||
name: "storenode" }: string
|
||||
storenode* {.
|
||||
desc: "Multiaddr of peer to connect with for waku store protocol",
|
||||
defaultValue: "",
|
||||
name: "storenode"
|
||||
.}: string
|
||||
|
||||
filternode* {.
|
||||
desc: "Multiaddr of peer to connect with for waku filter protocol"
|
||||
defaultValue: ""
|
||||
name: "filternode" }: string
|
||||
|
||||
# Matterbridge options
|
||||
mbHostAddress* {.
|
||||
desc: "Listening address of the Matterbridge host",
|
||||
defaultValue: parseIpAddress("127.0.0.1")
|
||||
name: "mb-host-address" }: IpAddress
|
||||
filternode* {.
|
||||
desc: "Multiaddr of peer to connect with for waku filter protocol",
|
||||
defaultValue: "",
|
||||
name: "filternode"
|
||||
.}: string
|
||||
|
||||
mbHostPort* {.
|
||||
desc: "Listening port of the Matterbridge host",
|
||||
defaultValue: 4242
|
||||
name: "mb-host-port" }: uint16
|
||||
|
||||
mbGateway* {.
|
||||
desc: "Matterbridge gateway"
|
||||
defaultValue: "gateway1"
|
||||
name: "mb-gateway" }: string
|
||||
# Matterbridge options
|
||||
mbHostAddress* {.
|
||||
desc: "Listening address of the Matterbridge host",
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
name: "mb-host-address"
|
||||
.}: IpAddress
|
||||
|
||||
## Chat2 options
|
||||
mbHostPort* {.
|
||||
desc: "Listening port of the Matterbridge host",
|
||||
defaultValue: 4242,
|
||||
name: "mb-host-port"
|
||||
.}: uint16
|
||||
|
||||
contentTopic* {.
|
||||
desc: "Content topic to bridge chat messages to."
|
||||
defaultValue: "/toy-chat/2/huilong/proto"
|
||||
name: "content-topic" }: string
|
||||
mbGateway* {.
|
||||
desc: "Matterbridge gateway", defaultValue: "gateway1", name: "mb-gateway"
|
||||
.}: string
|
||||
|
||||
## Chat2 options
|
||||
contentTopic* {.
|
||||
desc: "Content topic to bridge chat messages to.",
|
||||
defaultValue: "/toy-chat/2/huilong/proto",
|
||||
name: "content-topic"
|
||||
.}: string
|
||||
|
||||
proc parseCmdArg*(T: type keys.KeyPair, p: string): T =
|
||||
try:
|
||||
|
||||
@ -4,7 +4,7 @@ else:
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[tables,strutils,times,sequtils],
|
||||
std/[tables, strutils, times, sequtils],
|
||||
stew/results,
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
@ -44,19 +44,22 @@ const AvgPingWindow = 10.0
|
||||
|
||||
const git_version* {.strdefine.} = "n/a"
|
||||
|
||||
proc setDiscoveredPeersCapabilities(
|
||||
routingTableNodes: seq[Node]) =
|
||||
proc setDiscoveredPeersCapabilities(routingTableNodes: seq[Node]) =
|
||||
for capability in @[Relay, Store, Filter, Lightpush]:
|
||||
let nOfNodesWithCapability = routingTableNodes.countIt(it.record.supportsCapability(capability))
|
||||
info "capabilities as per ENR waku flag", capability=capability, amount=nOfNodesWithCapability
|
||||
networkmonitor_peer_type_as_per_enr.set(int64(nOfNodesWithCapability), labelValues = [$capability])
|
||||
let nOfNodesWithCapability =
|
||||
routingTableNodes.countIt(it.record.supportsCapability(capability))
|
||||
info "capabilities as per ENR waku flag",
|
||||
capability = capability, amount = nOfNodesWithCapability
|
||||
networkmonitor_peer_type_as_per_enr.set(
|
||||
int64(nOfNodesWithCapability), labelValues = [$capability]
|
||||
)
|
||||
|
||||
proc analyzePeer(
|
||||
customPeerInfo: CustomPeerInfoRef,
|
||||
peerInfo: RemotePeerInfo,
|
||||
node: WakuNode,
|
||||
timeout: chronos.Duration
|
||||
): Future[Result[string, string]] {.async.} =
|
||||
customPeerInfo: CustomPeerInfoRef,
|
||||
peerInfo: RemotePeerInfo,
|
||||
node: WakuNode,
|
||||
timeout: chronos.Duration,
|
||||
): Future[Result[string, string]] {.async.} =
|
||||
var pingDelay: chronos.Duration
|
||||
|
||||
proc ping(): Future[Result[void, string]] {.async, gcsafe.} =
|
||||
@ -64,12 +67,11 @@ proc analyzePeer(
|
||||
let conn = await node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec)
|
||||
pingDelay = await node.libp2pPing.ping(conn)
|
||||
return ok()
|
||||
|
||||
except CatchableError:
|
||||
var msg = getCurrentExceptionMsg()
|
||||
if msg == "Future operation cancelled!":
|
||||
msg = "timedout"
|
||||
warn "failed to ping the peer", peer=peerInfo, err=msg
|
||||
warn "failed to ping the peer", peer = peerInfo, err = msg
|
||||
|
||||
customPeerInfo.connError = msg
|
||||
return err("could not ping peer: " & msg)
|
||||
@ -81,36 +83,44 @@ proc analyzePeer(
|
||||
return err(customPeerInfo.connError)
|
||||
|
||||
customPeerInfo.connError = ""
|
||||
info "successfully pinged peer", peer=peerInfo, duration=pingDelay.millis
|
||||
info "successfully pinged peer", peer = peerInfo, duration = pingDelay.millis
|
||||
networkmonitor_peer_ping.observe(pingDelay.millis)
|
||||
|
||||
if customPeerInfo.avgPingDuration == 0.millis:
|
||||
customPeerInfo.avgPingDuration = pingDelay
|
||||
|
||||
# TODO: check why the calculation ends up losing precision
|
||||
customPeerInfo.avgPingDuration = int64((float64(customPeerInfo.avgPingDuration.millis) * (AvgPingWindow - 1.0) + float64(pingDelay.millis)) / AvgPingWindow).millis
|
||||
customPeerInfo.avgPingDuration = int64(
|
||||
(
|
||||
float64(customPeerInfo.avgPingDuration.millis) * (AvgPingWindow - 1.0) +
|
||||
float64(pingDelay.millis)
|
||||
) / AvgPingWindow
|
||||
).millis
|
||||
customPeerInfo.lastPingDuration = pingDelay
|
||||
|
||||
return ok(customPeerInfo.peerId)
|
||||
|
||||
proc shouldReconnect(customPeerInfo: CustomPeerInfoRef): bool =
|
||||
let reconnetIntervalCheck = getTime().toUnix() >= customPeerInfo.lastTimeConnected + ReconnectTime
|
||||
let reconnetIntervalCheck =
|
||||
getTime().toUnix() >= customPeerInfo.lastTimeConnected + ReconnectTime
|
||||
var retriesCheck = customPeerInfo.retries < MaxConnectionRetries
|
||||
|
||||
if not retriesCheck and getTime().toUnix() >= customPeerInfo.lastTimeConnected + ResetRetriesAfter:
|
||||
if not retriesCheck and
|
||||
getTime().toUnix() >= customPeerInfo.lastTimeConnected + ResetRetriesAfter:
|
||||
customPeerInfo.retries = 0
|
||||
retriesCheck = true
|
||||
info "resetting retries counter", peerId=customPeerInfo.peerId
|
||||
info "resetting retries counter", peerId = customPeerInfo.peerId
|
||||
|
||||
return reconnetIntervalCheck and retriesCheck
|
||||
|
||||
# TODO: Split in discover, connect
|
||||
proc setConnectedPeersMetrics(discoveredNodes: seq[Node],
|
||||
node: WakuNode,
|
||||
timeout: chronos.Duration,
|
||||
restClient: RestClientRef,
|
||||
allPeers: CustomPeersTableRef) {.async.} =
|
||||
|
||||
proc setConnectedPeersMetrics(
|
||||
discoveredNodes: seq[Node],
|
||||
node: WakuNode,
|
||||
timeout: chronos.Duration,
|
||||
restClient: RestClientRef,
|
||||
allPeers: CustomPeersTableRef,
|
||||
) {.async.} =
|
||||
let currentTime = getTime().toUnix()
|
||||
|
||||
var newPeers = 0
|
||||
@ -122,18 +132,18 @@ proc setConnectedPeersMetrics(discoveredNodes: seq[Node],
|
||||
for discNode in discoveredNodes:
|
||||
let typedRecord = discNode.record.toTypedRecord()
|
||||
if not typedRecord.isOk():
|
||||
warn "could not convert record to typed record", record=discNode.record
|
||||
warn "could not convert record to typed record", record = discNode.record
|
||||
continue
|
||||
|
||||
let secp256k1 = typedRecord.get().secp256k1
|
||||
if not secp256k1.isSome():
|
||||
warn "could not get secp256k1 key", typedRecord=typedRecord.get()
|
||||
warn "could not get secp256k1 key", typedRecord = typedRecord.get()
|
||||
continue
|
||||
|
||||
let peerRes = toRemotePeerInfo(discNode.record)
|
||||
|
||||
let peerInfo = peerRes.valueOr():
|
||||
warn "error converting record to remote peer info", record=discNode.record
|
||||
let peerInfo = peerRes.valueOr:
|
||||
warn "error converting record to remote peer info", record = discNode.record
|
||||
continue
|
||||
|
||||
# create new entry if new peerId found
|
||||
@ -143,7 +153,7 @@ proc setConnectedPeersMetrics(discoveredNodes: seq[Node],
|
||||
allPeers[peerId] = CustomPeerInfoRef(peerId: peerId)
|
||||
newPeers += 1
|
||||
else:
|
||||
info "already seen", peerId=peerId
|
||||
info "already seen", peerId = peerId
|
||||
|
||||
let customPeerInfo = allPeers[peerId]
|
||||
|
||||
@ -153,7 +163,7 @@ proc setConnectedPeersMetrics(discoveredNodes: seq[Node],
|
||||
customPeerInfo.discovered += 1
|
||||
|
||||
if not typedRecord.get().ip.isSome():
|
||||
warn "ip field is not set", record=typedRecord.get()
|
||||
warn "ip field is not set", record = typedRecord.get()
|
||||
continue
|
||||
|
||||
let ip = $typedRecord.get().ip.get().join(".")
|
||||
@ -162,7 +172,8 @@ proc setConnectedPeersMetrics(discoveredNodes: seq[Node],
|
||||
# try to ping the peer
|
||||
if shouldReconnect(customPeerInfo):
|
||||
if customPeerInfo.retries > 0:
|
||||
warn "trying to dial failed peer again", peerId=peerId, retry=customPeerInfo.retries
|
||||
warn "trying to dial failed peer again",
|
||||
peerId = peerId, retry = customPeerInfo.retries
|
||||
analyzeFuts.add(analyzePeer(customPeerInfo, peerInfo, node, timeout))
|
||||
|
||||
# Wait for all connection attempts to finish
|
||||
@ -170,16 +181,16 @@ proc setConnectedPeersMetrics(discoveredNodes: seq[Node],
|
||||
|
||||
for peerIdFut in analyzedPeers:
|
||||
let peerIdRes = await peerIdFut
|
||||
let peerIdStr = peerIdRes.valueOr():
|
||||
let peerIdStr = peerIdRes.valueOr:
|
||||
continue
|
||||
|
||||
successfulConnections += 1
|
||||
let peerId = PeerId.init(peerIdStr).valueOr():
|
||||
warn "failed to parse peerId", peerId=peerIdStr
|
||||
let peerId = PeerId.init(peerIdStr).valueOr:
|
||||
warn "failed to parse peerId", peerId = peerIdStr
|
||||
continue
|
||||
var customPeerInfo = allPeers[peerIdStr]
|
||||
|
||||
debug "connected to peer", peer=customPeerInfo[]
|
||||
debug "connected to peer", peer = customPeerInfo[]
|
||||
|
||||
# after connection, get supported protocols
|
||||
let lp2pPeerStore = node.switch.peerStore
|
||||
@ -191,9 +202,9 @@ proc setConnectedPeersMetrics(discoveredNodes: seq[Node],
|
||||
let nodeUserAgent = lp2pPeerStore[AgentBook][peerId]
|
||||
customPeerInfo.userAgent = nodeUserAgent
|
||||
|
||||
info "number of newly discovered peers", amount=newPeers
|
||||
info "number of newly discovered peers", amount = newPeers
|
||||
# inform the total connections that we did in this round
|
||||
info "number of successful connections", amount=successfulConnections
|
||||
info "number of successful connections", amount = successfulConnections
|
||||
|
||||
proc updateMetrics(allPeersRef: CustomPeersTableRef) {.gcsafe.} =
|
||||
var allProtocols: Table[string, int]
|
||||
@ -207,8 +218,9 @@ proc updateMetrics(allPeersRef: CustomPeersTableRef) {.gcsafe.} =
|
||||
for protocol in peerInfo.supportedProtocols:
|
||||
allProtocols[protocol] = allProtocols.mgetOrPut(protocol, 0) + 1
|
||||
|
||||
# store available user-agents in the network
|
||||
allAgentStrings[peerInfo.userAgent] = allAgentStrings.mgetOrPut(peerInfo.userAgent, 0) + 1
|
||||
# store available user-agents in the network
|
||||
allAgentStrings[peerInfo.userAgent] =
|
||||
allAgentStrings.mgetOrPut(peerInfo.userAgent, 0) + 1
|
||||
|
||||
if peerInfo.country != "":
|
||||
countries[peerInfo.country] = countries.mgetOrPut(peerInfo.country, 0) + 1
|
||||
@ -219,25 +231,32 @@ proc updateMetrics(allPeersRef: CustomPeersTableRef) {.gcsafe.} =
|
||||
|
||||
networkmonitor_peer_count.set(int64(connectedPeers), labelValues = ["true"])
|
||||
networkmonitor_peer_count.set(int64(failedPeers), labelValues = ["false"])
|
||||
# update count on each protocol
|
||||
# update count on each protocol
|
||||
for protocol in allProtocols.keys():
|
||||
let countOfProtocols = allProtocols.mgetOrPut(protocol, 0)
|
||||
networkmonitor_peer_type_as_per_protocol.set(int64(countOfProtocols), labelValues = [protocol])
|
||||
info "supported protocols in the network", protocol=protocol, count=countOfProtocols
|
||||
networkmonitor_peer_type_as_per_protocol.set(
|
||||
int64(countOfProtocols), labelValues = [protocol]
|
||||
)
|
||||
info "supported protocols in the network",
|
||||
protocol = protocol, count = countOfProtocols
|
||||
|
||||
# update count on each user-agent
|
||||
for userAgent in allAgentStrings.keys():
|
||||
let countOfUserAgent = allAgentStrings.mgetOrPut(userAgent, 0)
|
||||
networkmonitor_peer_user_agents.set(int64(countOfUserAgent), labelValues = [userAgent])
|
||||
info "user agents participating in the network", userAgent=userAgent, count=countOfUserAgent
|
||||
networkmonitor_peer_user_agents.set(
|
||||
int64(countOfUserAgent), labelValues = [userAgent]
|
||||
)
|
||||
info "user agents participating in the network",
|
||||
userAgent = userAgent, count = countOfUserAgent
|
||||
|
||||
for country in countries.keys():
|
||||
let peerCount = countries.mgetOrPut(country, 0)
|
||||
networkmonitor_peer_country_count.set(int64(peerCount), labelValues = [country])
|
||||
info "number of peers per country", country=country, count=peerCount
|
||||
info "number of peers per country", country = country, count = peerCount
|
||||
|
||||
proc populateInfoFromIp(allPeersRef: CustomPeersTableRef,
|
||||
restClient: RestClientRef) {.async.} =
|
||||
proc populateInfoFromIp(
|
||||
allPeersRef: CustomPeersTableRef, restClient: RestClientRef
|
||||
) {.async.} =
|
||||
for peer in allPeersRef.keys():
|
||||
if allPeersRef[peer].country != "" and allPeersRef[peer].city != "":
|
||||
continue
|
||||
@ -252,7 +271,7 @@ proc populateInfoFromIp(allPeersRef: CustomPeersTableRef,
|
||||
let response = await restClient.ipToLocation(allPeersRef[peer].ip)
|
||||
location = response.data
|
||||
except CatchableError:
|
||||
warn "could not get location", ip=allPeersRef[peer].ip
|
||||
warn "could not get location", ip = allPeersRef[peer].ip
|
||||
continue
|
||||
allPeersRef[peer].country = location.country
|
||||
allPeersRef[peer].city = location.city
|
||||
@ -260,12 +279,13 @@ proc populateInfoFromIp(allPeersRef: CustomPeersTableRef,
|
||||
# TODO: Split in discovery, connections, and ip2location
|
||||
# crawls the network discovering peers and trying to connect to them
|
||||
# metrics are processed and exposed
|
||||
proc crawlNetwork(node: WakuNode,
|
||||
wakuDiscv5: WakuDiscoveryV5,
|
||||
restClient: RestClientRef,
|
||||
conf: NetworkMonitorConf,
|
||||
allPeersRef: CustomPeersTableRef) {.async.} =
|
||||
|
||||
proc crawlNetwork(
|
||||
node: WakuNode,
|
||||
wakuDiscv5: WakuDiscoveryV5,
|
||||
restClient: RestClientRef,
|
||||
conf: NetworkMonitorConf,
|
||||
allPeersRef: CustomPeersTableRef,
|
||||
) {.async.} =
|
||||
let crawlInterval = conf.refreshInterval * 1000
|
||||
while true:
|
||||
let startTime = Moment.now()
|
||||
@ -281,7 +301,9 @@ proc crawlNetwork(node: WakuNode,
|
||||
# tries to connect to all newly discovered nodes
|
||||
# and populates metrics related to peers we could connect
|
||||
# note random discovered nodes can be already known
|
||||
await setConnectedPeersMetrics(discoveredNodes, node, conf.timeout, restClient, allPeersRef)
|
||||
await setConnectedPeersMetrics(
|
||||
discoveredNodes, node, conf.timeout, restClient, allPeersRef
|
||||
)
|
||||
|
||||
updateMetrics(allPeersRef)
|
||||
|
||||
@ -291,7 +313,7 @@ proc crawlNetwork(node: WakuNode,
|
||||
let totalNodes = flatNodes.len
|
||||
let seenNodes = flatNodes.countIt(it.seen)
|
||||
|
||||
info "discovered nodes: ", total=totalNodes, seen=seenNodes
|
||||
info "discovered nodes: ", total = totalNodes, seen = seenNodes
|
||||
|
||||
# Notes:
|
||||
# we dont run ipMajorityLoop
|
||||
@ -299,14 +321,16 @@ proc crawlNetwork(node: WakuNode,
|
||||
let endTime = Moment.now()
|
||||
let elapsed = (endTime - startTime).nanos
|
||||
|
||||
info "crawl duration", time=elapsed.millis
|
||||
info "crawl duration", time = elapsed.millis
|
||||
|
||||
await sleepAsync(crawlInterval.millis - elapsed.millis)
|
||||
|
||||
proc retrieveDynamicBootstrapNodes(dnsDiscovery: bool, dnsDiscoveryUrl: string, dnsDiscoveryNameServers: seq[IpAddress]): Result[seq[RemotePeerInfo], string] =
|
||||
proc retrieveDynamicBootstrapNodes(
|
||||
dnsDiscovery: bool, dnsDiscoveryUrl: string, dnsDiscoveryNameServers: seq[IpAddress]
|
||||
): Result[seq[RemotePeerInfo], string] =
|
||||
if dnsDiscovery and dnsDiscoveryUrl != "":
|
||||
# DNS discovery
|
||||
debug "Discovering nodes using Waku DNS discovery", url=dnsDiscoveryUrl
|
||||
debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl
|
||||
|
||||
var nameServers: seq[TransportAddress]
|
||||
for ip in dnsDiscoveryNameServers:
|
||||
@ -315,24 +339,29 @@ proc retrieveDynamicBootstrapNodes(dnsDiscovery: bool, dnsDiscoveryUrl: string,
|
||||
let dnsResolver = DnsResolver.new(nameServers)
|
||||
|
||||
proc resolver(domain: string): Future[string] {.async, gcsafe.} =
|
||||
trace "resolving", domain=domain
|
||||
trace "resolving", domain = domain
|
||||
let resolved = await dnsResolver.resolveTxt(domain)
|
||||
return resolved[0] # Use only first answer
|
||||
|
||||
var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl, resolver)
|
||||
if wakuDnsDiscovery.isOk():
|
||||
return wakuDnsDiscovery.get().findPeers()
|
||||
.mapErr(proc (e: cstring): string = $e)
|
||||
return wakuDnsDiscovery.get().findPeers().mapErr(
|
||||
proc(e: cstring): string =
|
||||
$e
|
||||
)
|
||||
else:
|
||||
warn "Failed to init Waku DNS discovery"
|
||||
|
||||
debug "No method for retrieving dynamic bootstrap nodes specified."
|
||||
ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default
|
||||
|
||||
proc getBootstrapFromDiscDns(conf: NetworkMonitorConf): Result[seq[enr.Record], string] =
|
||||
proc getBootstrapFromDiscDns(
|
||||
conf: NetworkMonitorConf
|
||||
): Result[seq[enr.Record], string] =
|
||||
try:
|
||||
let dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")]
|
||||
let dynamicBootstrapNodesRes = retrieveDynamicBootstrapNodes(true, conf.dnsDiscoveryUrl, dnsNameServers)
|
||||
let dynamicBootstrapNodesRes =
|
||||
retrieveDynamicBootstrapNodes(true, conf.dnsDiscoveryUrl, dnsNameServers)
|
||||
if not dynamicBootstrapNodesRes.isOk():
|
||||
error("failed discovering peers from DNS")
|
||||
let dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
|
||||
@ -345,22 +374,28 @@ proc getBootstrapFromDiscDns(conf: NetworkMonitorConf): Result[seq[enr.Record],
|
||||
let
|
||||
enr = n.enr.get()
|
||||
tenrRes = enr.toTypedRecord()
|
||||
if tenrRes.isOk() and (tenrRes.get().udp.isSome() or tenrRes.get().udp6.isSome()):
|
||||
if tenrRes.isOk() and (
|
||||
tenrRes.get().udp.isSome() or tenrRes.get().udp6.isSome()
|
||||
):
|
||||
discv5BootstrapEnrs.add(enr)
|
||||
return ok(discv5BootstrapEnrs)
|
||||
except CatchableError:
|
||||
error("failed discovering peers from DNS")
|
||||
|
||||
proc initAndStartApp(conf: NetworkMonitorConf): Result[(WakuNode, WakuDiscoveryV5), string] =
|
||||
let bindIp = try:
|
||||
parseIpAddress("0.0.0.0")
|
||||
except CatchableError:
|
||||
return err("could not start node: " & getCurrentExceptionMsg())
|
||||
proc initAndStartApp(
|
||||
conf: NetworkMonitorConf
|
||||
): Result[(WakuNode, WakuDiscoveryV5), string] =
|
||||
let bindIp =
|
||||
try:
|
||||
parseIpAddress("0.0.0.0")
|
||||
except CatchableError:
|
||||
return err("could not start node: " & getCurrentExceptionMsg())
|
||||
|
||||
let extIp = try:
|
||||
parseIpAddress("127.0.0.1")
|
||||
except CatchableError:
|
||||
return err("could not start node: " & getCurrentExceptionMsg())
|
||||
let extIp =
|
||||
try:
|
||||
parseIpAddress("127.0.0.1")
|
||||
except CatchableError:
|
||||
return err("could not start node: " & getCurrentExceptionMsg())
|
||||
|
||||
let
|
||||
# some hardcoded parameters
|
||||
@ -368,34 +403,33 @@ proc initAndStartApp(conf: NetworkMonitorConf): Result[(WakuNode, WakuDiscoveryV
|
||||
key = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||
nodeTcpPort = Port(60000)
|
||||
nodeUdpPort = Port(9000)
|
||||
flags = CapabilitiesBitfield.init(lightpush = false, filter = false, store = false, relay = true)
|
||||
flags = CapabilitiesBitfield.init(
|
||||
lightpush = false, filter = false, store = false, relay = true
|
||||
)
|
||||
|
||||
var builder = EnrBuilder.init(key)
|
||||
|
||||
builder.withIpAddressAndPorts(
|
||||
ipAddr = some(extIp),
|
||||
tcpPort = some(nodeTcpPort),
|
||||
udpPort = some(nodeUdpPort),
|
||||
ipAddr = some(extIp), tcpPort = some(nodeTcpPort), udpPort = some(nodeUdpPort)
|
||||
)
|
||||
builder.withWakuCapabilities(flags)
|
||||
let addShardedTopics = builder.withShardedTopics(conf.pubsubTopics)
|
||||
if addShardedTopics.isErr():
|
||||
error "failed to add sharded topics to ENR", error=addShardedTopics.error
|
||||
error "failed to add sharded topics to ENR", error = addShardedTopics.error
|
||||
return err($addShardedTopics.error)
|
||||
|
||||
let recordRes = builder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
return err("cannot build record: " & $recordRes.error)
|
||||
else: recordRes.get()
|
||||
else:
|
||||
recordRes.get()
|
||||
|
||||
var nodeBuilder = WakuNodeBuilder.init()
|
||||
|
||||
nodeBuilder.withNodeKey(key)
|
||||
nodeBuilder.withRecord(record)
|
||||
nodeBuilder.withPeerManagerConfig(
|
||||
maxRelayPeers = none(int),
|
||||
shardAware = true)
|
||||
nodeBuilder.withPeerManagerConfig(maxRelayPeers = none(int), shardAware = true)
|
||||
let res = nodeBuilder.withNetworkConfigurationDetails(bindIp, nodeTcpPort)
|
||||
if res.isErr():
|
||||
return err("node building error" & $res.error)
|
||||
@ -404,7 +438,8 @@ proc initAndStartApp(conf: NetworkMonitorConf): Result[(WakuNode, WakuDiscoveryV
|
||||
let node =
|
||||
if nodeRes.isErr():
|
||||
return err("node building error" & $res.error)
|
||||
else: nodeRes.get()
|
||||
else:
|
||||
nodeRes.get()
|
||||
|
||||
var discv5BootstrapEnrsRes = getBootstrapFromDiscDns(conf)
|
||||
if discv5BootstrapEnrsRes.isErr():
|
||||
@ -422,7 +457,7 @@ proc initAndStartApp(conf: NetworkMonitorConf): Result[(WakuNode, WakuDiscoveryV
|
||||
port: nodeUdpPort,
|
||||
privateKey: keys.PrivateKey(key.skkey),
|
||||
bootstrapRecords: discv5BootstrapEnrs,
|
||||
autoupdateRecord: false
|
||||
autoupdateRecord: false,
|
||||
)
|
||||
|
||||
let wakuDiscv5 = WakuDiscoveryV5.new(node.rng, discv5Conf, some(record))
|
||||
@ -434,15 +469,17 @@ proc initAndStartApp(conf: NetworkMonitorConf): Result[(WakuNode, WakuDiscoveryV
|
||||
|
||||
ok((node, wakuDiscv5))
|
||||
|
||||
proc startRestApiServer(conf: NetworkMonitorConf,
|
||||
allPeersInfo: CustomPeersTableRef,
|
||||
numMessagesPerContentTopic: ContentTopicMessageTableRef
|
||||
): Result[void, string] =
|
||||
proc startRestApiServer(
|
||||
conf: NetworkMonitorConf,
|
||||
allPeersInfo: CustomPeersTableRef,
|
||||
numMessagesPerContentTopic: ContentTopicMessageTableRef,
|
||||
): Result[void, string] =
|
||||
try:
|
||||
let serverAddress = initTAddress(conf.metricsRestAddress & ":" & $conf.metricsRestPort)
|
||||
let serverAddress =
|
||||
initTAddress(conf.metricsRestAddress & ":" & $conf.metricsRestPort)
|
||||
proc validate(pattern: string, value: string): int =
|
||||
if pattern.startsWith("{") and pattern.endsWith("}"): 0
|
||||
else: 1
|
||||
if pattern.startsWith("{") and pattern.endsWith("}"): 0 else: 1
|
||||
|
||||
var router = RestRouter.init(validate)
|
||||
router.installHandler(allPeersInfo, numMessagesPerContentTopic)
|
||||
var sres = RestServerRef.new(router, serverAddress)
|
||||
@ -454,13 +491,16 @@ proc startRestApiServer(conf: NetworkMonitorConf,
|
||||
|
||||
# handles rx of messages over a topic (see subscribe)
|
||||
# counts the number of messages per content topic
|
||||
proc subscribeAndHandleMessages(node: WakuNode,
|
||||
pubsubTopic: PubsubTopic,
|
||||
msgPerContentTopic: ContentTopicMessageTableRef) =
|
||||
|
||||
proc subscribeAndHandleMessages(
|
||||
node: WakuNode,
|
||||
pubsubTopic: PubsubTopic,
|
||||
msgPerContentTopic: ContentTopicMessageTableRef,
|
||||
) =
|
||||
# handle function
|
||||
proc handler(pubsubTopic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} =
|
||||
trace "rx message", pubsubTopic=pubsubTopic, contentTopic=msg.contentTopic
|
||||
proc handler(
|
||||
pubsubTopic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
trace "rx message", pubsubTopic = pubsubTopic, contentTopic = msg.contentTopic
|
||||
|
||||
# If we reach a table limit size, remove c topics with the least messages.
|
||||
let tableSize = 100
|
||||
@ -482,11 +522,11 @@ when isMainModule:
|
||||
{.pop.}
|
||||
let confRes = NetworkMonitorConf.loadConfig()
|
||||
if confRes.isErr():
|
||||
error "could not load cli variables", err=confRes.error
|
||||
error "could not load cli variables", err = confRes.error
|
||||
quit(1)
|
||||
|
||||
var conf = confRes.get()
|
||||
info "cli flags", conf=conf
|
||||
info "cli flags", conf = conf
|
||||
|
||||
if conf.clusterId == 1:
|
||||
let twnClusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
@ -509,22 +549,23 @@ when isMainModule:
|
||||
|
||||
# start metrics server
|
||||
if conf.metricsServer:
|
||||
let res = startMetricsServer(conf.metricsServerAddress, Port(conf.metricsServerPort))
|
||||
let res =
|
||||
startMetricsServer(conf.metricsServerAddress, Port(conf.metricsServerPort))
|
||||
if res.isErr():
|
||||
error "could not start metrics server", err=res.error
|
||||
error "could not start metrics server", err = res.error
|
||||
quit(1)
|
||||
|
||||
# start rest server for custom metrics
|
||||
let res = startRestApiServer(conf, allPeersInfo, msgPerContentTopic)
|
||||
if res.isErr():
|
||||
error "could not start rest api server", err=res.error
|
||||
error "could not start rest api server", err = res.error
|
||||
quit(1)
|
||||
|
||||
# create a rest client
|
||||
let clientRest = RestClientRef.new(url="http://ip-api.com",
|
||||
connectTimeout=ctime.seconds(2))
|
||||
let clientRest =
|
||||
RestClientRef.new(url = "http://ip-api.com", connectTimeout = ctime.seconds(2))
|
||||
if clientRest.isErr():
|
||||
error "could not start rest api client", err=res.error
|
||||
error "could not start rest api client", err = res.error
|
||||
quit(1)
|
||||
let restClient = clientRest.get()
|
||||
|
||||
@ -540,7 +581,6 @@ when isMainModule:
|
||||
waitFor node.mountLibp2pPing()
|
||||
|
||||
if conf.rlnRelayEthContractAddress != "":
|
||||
|
||||
let rlnConf = WakuRlnConfig(
|
||||
rlnRelayDynamic: conf.rlnRelayDynamic,
|
||||
rlnRelayCredIndex: some(uint(0)),
|
||||
@ -549,17 +589,17 @@ when isMainModule:
|
||||
rlnRelayCredPath: "",
|
||||
rlnRelayCredPassword: "",
|
||||
rlnRelayTreePath: conf.rlnRelayTreePath,
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec
|
||||
rlnEpochSizeSec: conf.rlnEpochSizeSec,
|
||||
)
|
||||
|
||||
try:
|
||||
waitFor node.mountRlnRelay(rlnConf)
|
||||
except CatchableError:
|
||||
error "failed to setup RLN", err=getCurrentExceptionMsg()
|
||||
error "failed to setup RLN", err = getCurrentExceptionMsg()
|
||||
quit 1
|
||||
|
||||
node.mountMetadata(conf.clusterId).isOkOr:
|
||||
error "failed to mount waku metadata protocol: ", err=error
|
||||
error "failed to mount waku metadata protocol: ", err = error
|
||||
quit 1
|
||||
|
||||
for pubsubTopic in conf.pubsubTopics:
|
||||
|
||||
@ -10,106 +10,127 @@ import
|
||||
|
||||
type EthRpcUrl = distinct string
|
||||
|
||||
type
|
||||
NetworkMonitorConf* = object
|
||||
logLevel* {.
|
||||
desc: "Sets the log level",
|
||||
defaultValue: LogLevel.INFO,
|
||||
name: "log-level",
|
||||
abbr: "l" .}: LogLevel
|
||||
type NetworkMonitorConf* = object
|
||||
logLevel* {.
|
||||
desc: "Sets the log level",
|
||||
defaultValue: LogLevel.INFO,
|
||||
name: "log-level",
|
||||
abbr: "l"
|
||||
.}: LogLevel
|
||||
|
||||
timeout* {.
|
||||
desc: "Timeout to consider that the connection failed",
|
||||
defaultValue: chronos.seconds(10),
|
||||
name: "timeout",
|
||||
abbr: "t" }: chronos.Duration
|
||||
timeout* {.
|
||||
desc: "Timeout to consider that the connection failed",
|
||||
defaultValue: chronos.seconds(10),
|
||||
name: "timeout",
|
||||
abbr: "t"
|
||||
.}: chronos.Duration
|
||||
|
||||
bootstrapNodes* {.
|
||||
desc: "Bootstrap ENR node. Argument may be repeated.",
|
||||
defaultValue: @[""],
|
||||
name: "bootstrap-node",
|
||||
abbr: "b" }: seq[string]
|
||||
bootstrapNodes* {.
|
||||
desc: "Bootstrap ENR node. Argument may be repeated.",
|
||||
defaultValue: @[""],
|
||||
name: "bootstrap-node",
|
||||
abbr: "b"
|
||||
.}: seq[string]
|
||||
|
||||
dnsDiscoveryUrl* {.
|
||||
desc: "URL for DNS node list in format 'enrtree://<key>@<fqdn>'",
|
||||
defaultValue: ""
|
||||
name: "dns-discovery-url" }: string
|
||||
dnsDiscoveryUrl* {.
|
||||
desc: "URL for DNS node list in format 'enrtree://<key>@<fqdn>'",
|
||||
defaultValue: "",
|
||||
name: "dns-discovery-url"
|
||||
.}: string
|
||||
|
||||
pubsubTopics* {.
|
||||
desc: "Default pubsub topic to subscribe to. Argument may be repeated."
|
||||
name: "pubsub-topic" .}: seq[string]
|
||||
pubsubTopics* {.
|
||||
desc: "Default pubsub topic to subscribe to. Argument may be repeated.",
|
||||
name: "pubsub-topic"
|
||||
.}: seq[string]
|
||||
|
||||
refreshInterval* {.
|
||||
desc: "How often new peers are discovered and connected to (in seconds)",
|
||||
defaultValue: 5,
|
||||
name: "refresh-interval",
|
||||
abbr: "r" }: int
|
||||
refreshInterval* {.
|
||||
desc: "How often new peers are discovered and connected to (in seconds)",
|
||||
defaultValue: 5,
|
||||
name: "refresh-interval",
|
||||
abbr: "r"
|
||||
.}: int
|
||||
|
||||
clusterId* {.
|
||||
desc: "Cluster id that the node is running in. Node in a different cluster id is disconnected."
|
||||
defaultValue: 1
|
||||
name: "cluster-id" }: uint32
|
||||
clusterId* {.
|
||||
desc:
|
||||
"Cluster id that the node is running in. Node in a different cluster id is disconnected.",
|
||||
defaultValue: 1,
|
||||
name: "cluster-id"
|
||||
.}: uint32
|
||||
|
||||
rlnRelay* {.
|
||||
desc: "Enable spam protection through rln-relay: true|false",
|
||||
defaultValue: true
|
||||
name: "rln-relay" }: bool
|
||||
rlnRelay* {.
|
||||
desc: "Enable spam protection through rln-relay: true|false",
|
||||
defaultValue: true,
|
||||
name: "rln-relay"
|
||||
.}: bool
|
||||
|
||||
rlnRelayDynamic* {.
|
||||
desc: "Enable waku-rln-relay with on-chain dynamic group management: true|false",
|
||||
defaultValue: true
|
||||
name: "rln-relay-dynamic" }: bool
|
||||
rlnRelayDynamic* {.
|
||||
desc: "Enable waku-rln-relay with on-chain dynamic group management: true|false",
|
||||
defaultValue: true,
|
||||
name: "rln-relay-dynamic"
|
||||
.}: bool
|
||||
|
||||
rlnRelayTreePath* {.
|
||||
desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)",
|
||||
defaultValue: ""
|
||||
name: "rln-relay-tree-path" }: string
|
||||
rlnRelayTreePath* {.
|
||||
desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)",
|
||||
defaultValue: "",
|
||||
name: "rln-relay-tree-path"
|
||||
.}: string
|
||||
|
||||
rlnRelayEthClientAddress* {.
|
||||
desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/",
|
||||
defaultValue: "http://localhost:8540/",
|
||||
name: "rln-relay-eth-client-address" }: EthRpcUrl
|
||||
rlnRelayEthClientAddress* {.
|
||||
desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/",
|
||||
defaultValue: "http://localhost:8540/",
|
||||
name: "rln-relay-eth-client-address"
|
||||
.}: EthRpcUrl
|
||||
|
||||
rlnRelayEthContractAddress* {.
|
||||
desc: "Address of membership contract on an Ethereum testnet",
|
||||
defaultValue: "",
|
||||
name: "rln-relay-eth-contract-address" }: string
|
||||
rlnRelayEthContractAddress* {.
|
||||
desc: "Address of membership contract on an Ethereum testnet",
|
||||
defaultValue: "",
|
||||
name: "rln-relay-eth-contract-address"
|
||||
.}: string
|
||||
|
||||
rlnEpochSizeSec* {.
|
||||
desc: "Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.",
|
||||
defaultValue: 1
|
||||
name: "rln-relay-epoch-sec" .}: uint64
|
||||
rlnEpochSizeSec* {.
|
||||
desc:
|
||||
"Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.",
|
||||
defaultValue: 1,
|
||||
name: "rln-relay-epoch-sec"
|
||||
.}: uint64
|
||||
|
||||
rlnRelayUserMessageLimit* {.
|
||||
desc: "Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.",
|
||||
defaultValue: 1,
|
||||
name: "rln-relay-user-message-limit" .}: uint64
|
||||
rlnRelayUserMessageLimit* {.
|
||||
desc:
|
||||
"Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.",
|
||||
defaultValue: 1,
|
||||
name: "rln-relay-user-message-limit"
|
||||
.}: uint64
|
||||
|
||||
## Prometheus metrics config
|
||||
metricsServer* {.
|
||||
desc: "Enable the metrics server: true|false"
|
||||
defaultValue: true
|
||||
name: "metrics-server" }: bool
|
||||
## Prometheus metrics config
|
||||
metricsServer* {.
|
||||
desc: "Enable the metrics server: true|false",
|
||||
defaultValue: true,
|
||||
name: "metrics-server"
|
||||
.}: bool
|
||||
|
||||
metricsServerAddress* {.
|
||||
desc: "Listening address of the metrics server."
|
||||
defaultValue: parseIpAddress("127.0.0.1")
|
||||
name: "metrics-server-address" }: IpAddress
|
||||
metricsServerAddress* {.
|
||||
desc: "Listening address of the metrics server.",
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
name: "metrics-server-address"
|
||||
.}: IpAddress
|
||||
|
||||
metricsServerPort* {.
|
||||
desc: "Listening HTTP port of the metrics server."
|
||||
defaultValue: 8008
|
||||
name: "metrics-server-port" }: uint16
|
||||
metricsServerPort* {.
|
||||
desc: "Listening HTTP port of the metrics server.",
|
||||
defaultValue: 8008,
|
||||
name: "metrics-server-port"
|
||||
.}: uint16
|
||||
|
||||
## Custom metrics rest server
|
||||
metricsRestAddress* {.
|
||||
desc: "Listening address of the metrics rest server.",
|
||||
defaultValue: "127.0.0.1",
|
||||
name: "metrics-rest-address" }: string
|
||||
metricsRestPort* {.
|
||||
desc: "Listening HTTP port of the metrics rest server.",
|
||||
defaultValue: 8009,
|
||||
name: "metrics-rest-port" }: uint16
|
||||
## Custom metrics rest server
|
||||
metricsRestAddress* {.
|
||||
desc: "Listening address of the metrics rest server.",
|
||||
defaultValue: "127.0.0.1",
|
||||
name: "metrics-rest-address"
|
||||
.}: string
|
||||
metricsRestPort* {.
|
||||
desc: "Listening HTTP port of the metrics rest server.",
|
||||
defaultValue: 8009,
|
||||
name: "metrics-rest-port"
|
||||
.}: uint16
|
||||
|
||||
proc parseCmdArg*(T: type IpAddress, p: string): T =
|
||||
try:
|
||||
@ -143,18 +164,22 @@ proc parseCmdArg*(T: type EthRpcUrl, s: string): T =
|
||||
## https://url:port/path?query
|
||||
## disallowed patterns:
|
||||
## any valid/invalid ws or wss url
|
||||
var httpPattern = re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
var wsPattern = re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
var httpPattern =
|
||||
re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
var wsPattern =
|
||||
re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
if regex.match(s, wsPattern):
|
||||
echo "here"
|
||||
raise newException(ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL")
|
||||
raise newException(
|
||||
ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL"
|
||||
)
|
||||
if not regex.match(s, httpPattern):
|
||||
raise newException(ValueError, "Invalid HTTP RPC URL")
|
||||
return EthRpcUrl(s)
|
||||
|
||||
proc loadConfig*(T: type NetworkMonitorConf): Result[T, string] =
|
||||
try:
|
||||
let conf = NetworkMonitorConf.load(version=git_version)
|
||||
let conf = NetworkMonitorConf.load(version = git_version)
|
||||
ok(conf)
|
||||
except CatchableError:
|
||||
err(getCurrentExceptionMsg())
|
||||
|
||||
@ -4,7 +4,7 @@ else:
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[json,tables,sequtils],
|
||||
std/[json, tables, sequtils],
|
||||
chronicles,
|
||||
chronicles/topics_registry,
|
||||
chronos,
|
||||
@ -26,32 +26,29 @@ logScope:
|
||||
#discovery_message_requests_outgoing_total{response="no_response"}
|
||||
|
||||
declarePublicGauge networkmonitor_peer_type_as_per_enr,
|
||||
"Number of peers supporting each capability according to the ENR",
|
||||
labels = ["capability"]
|
||||
"Number of peers supporting each capability according to the ENR",
|
||||
labels = ["capability"]
|
||||
|
||||
declarePublicGauge networkmonitor_peer_type_as_per_protocol,
|
||||
"Number of peers supporting each protocol, after a successful connection) ",
|
||||
labels = ["protocols"]
|
||||
"Number of peers supporting each protocol, after a successful connection) ",
|
||||
labels = ["protocols"]
|
||||
|
||||
declarePublicGauge networkmonitor_peer_user_agents,
|
||||
"Number of peers with each user agent",
|
||||
labels = ["user_agent"]
|
||||
"Number of peers with each user agent", labels = ["user_agent"]
|
||||
|
||||
declarePublicHistogram networkmonitor_peer_ping,
|
||||
"Histogram tracking ping durations for discovered peers",
|
||||
buckets = [100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0, 2000.0, Inf]
|
||||
"Histogram tracking ping durations for discovered peers",
|
||||
buckets =
|
||||
[100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0, 2000.0, Inf]
|
||||
|
||||
declarePublicGauge networkmonitor_peer_count,
|
||||
"Number of discovered peers",
|
||||
labels = ["connected"]
|
||||
"Number of discovered peers", labels = ["connected"]
|
||||
|
||||
declarePublicGauge networkmonitor_peer_country_count,
|
||||
"Number of peers per country",
|
||||
labels = ["country"]
|
||||
"Number of peers per country", labels = ["country"]
|
||||
|
||||
type
|
||||
CustomPeerInfo* = object
|
||||
# populated after discovery
|
||||
CustomPeerInfo* = object # populated after discovery
|
||||
lastTimeDiscovered*: int64
|
||||
discovered*: int64
|
||||
peerId*: string
|
||||
@ -80,23 +77,32 @@ type
|
||||
# stores the content topic and the count of rx messages
|
||||
ContentTopicMessageTableRef* = TableRef[string, int]
|
||||
|
||||
proc installHandler*(router: var RestRouter,
|
||||
allPeers: CustomPeersTableRef,
|
||||
numMessagesPerContentTopic: ContentTopicMessageTableRef) =
|
||||
router.api(MethodGet, "/allpeersinfo") do () -> RestApiResponse:
|
||||
proc installHandler*(
|
||||
router: var RestRouter,
|
||||
allPeers: CustomPeersTableRef,
|
||||
numMessagesPerContentTopic: ContentTopicMessageTableRef,
|
||||
) =
|
||||
router.api(MethodGet, "/allpeersinfo") do() -> RestApiResponse:
|
||||
let values = toSeq(allPeers.values())
|
||||
return RestApiResponse.response(values.toJson(), contentType="application/json")
|
||||
router.api(MethodGet, "/contenttopics") do () -> RestApiResponse:
|
||||
return RestApiResponse.response(values.toJson(), contentType = "application/json")
|
||||
router.api(MethodGet, "/contenttopics") do() -> RestApiResponse:
|
||||
# TODO: toJson() includes the hash
|
||||
return RestApiResponse.response($(%numMessagesPerContentTopic), contentType="application/json")
|
||||
return RestApiResponse.response(
|
||||
$(%numMessagesPerContentTopic), contentType = "application/json"
|
||||
)
|
||||
|
||||
proc startMetricsServer*(serverIp: IpAddress, serverPort: Port): Result[void, string] =
|
||||
info "Starting metrics HTTP server", serverIp, serverPort
|
||||
info "Starting metrics HTTP server", serverIp, serverPort
|
||||
|
||||
try:
|
||||
startMetricsHttpServer($serverIp, serverPort)
|
||||
except Exception as e:
|
||||
error("Failed to start metrics HTTP server", serverIp=serverIp, serverPort=serverPort, msg=e.msg)
|
||||
try:
|
||||
startMetricsHttpServer($serverIp, serverPort)
|
||||
except Exception as e:
|
||||
error(
|
||||
"Failed to start metrics HTTP server",
|
||||
serverIp = serverIp,
|
||||
serverPort = serverPort,
|
||||
msg = e.msg,
|
||||
)
|
||||
|
||||
info "Metrics HTTP server started", serverIp, serverPort
|
||||
ok()
|
||||
info "Metrics HTTP server started", serverIp, serverPort
|
||||
ok()
|
||||
|
||||
@ -10,15 +10,14 @@ import
|
||||
chronicles,
|
||||
chronicles/topics_registry,
|
||||
chronos,
|
||||
presto/[client,common]
|
||||
presto/[client, common]
|
||||
|
||||
type
|
||||
NodeLocation* = object
|
||||
country*: string
|
||||
city*: string
|
||||
lat*: string
|
||||
long*: string
|
||||
isp*: string
|
||||
type NodeLocation* = object
|
||||
country*: string
|
||||
city*: string
|
||||
lat*: string
|
||||
long*: string
|
||||
isp*: string
|
||||
|
||||
proc flatten*[T](a: seq[seq[T]]): seq[T] =
|
||||
var aFlat = newSeq[T](0)
|
||||
@ -26,8 +25,9 @@ proc flatten*[T](a: seq[seq[T]]): seq[T] =
|
||||
aFlat &= subseq
|
||||
return aFlat
|
||||
|
||||
proc decodeBytes*(t: typedesc[NodeLocation], value: openArray[byte],
|
||||
contentType: Opt[ContentTypeData]): RestResult[NodeLocation] =
|
||||
proc decodeBytes*(
|
||||
t: typedesc[NodeLocation], value: openArray[byte], contentType: Opt[ContentTypeData]
|
||||
): RestResult[NodeLocation] =
|
||||
var res: string
|
||||
if len(value) > 0:
|
||||
res = newString(len(value))
|
||||
@ -35,19 +35,23 @@ proc decodeBytes*(t: typedesc[NodeLocation], value: openArray[byte],
|
||||
try:
|
||||
let jsonContent = parseJson(res)
|
||||
if $jsonContent["status"].getStr() != "success":
|
||||
error "query failed", result=jsonContent
|
||||
error "query failed", result = jsonContent
|
||||
return err("query failed")
|
||||
return ok(NodeLocation(
|
||||
country: jsonContent["country"].getStr(),
|
||||
city: jsonContent["city"].getStr(),
|
||||
lat: $jsonContent["lat"].getFloat(),
|
||||
long: $jsonContent["lon"].getFloat(),
|
||||
isp: jsonContent["isp"].getStr()
|
||||
))
|
||||
return ok(
|
||||
NodeLocation(
|
||||
country: jsonContent["country"].getStr(),
|
||||
city: jsonContent["city"].getStr(),
|
||||
lat: $jsonContent["lat"].getFloat(),
|
||||
long: $jsonContent["lon"].getFloat(),
|
||||
isp: jsonContent["isp"].getStr(),
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
return err("failed to get the location: " & getCurrentExceptionMsg())
|
||||
|
||||
proc encodeString*(value: string): RestResult[string] =
|
||||
ok(value)
|
||||
|
||||
proc ipToLocation*(ip: string): RestResponse[NodeLocation] {.rest, endpoint: "json/{ip}", meth: MethodGet.}
|
||||
proc ipToLocation*(
|
||||
ip: string
|
||||
): RestResponse[NodeLocation] {.rest, endpoint: "json/{ip}", meth: MethodGet.}
|
||||
|
||||
@ -1,8 +1,4 @@
|
||||
import
|
||||
osproc,
|
||||
os,
|
||||
httpclient,
|
||||
strutils
|
||||
import osproc, os, httpclient, strutils
|
||||
|
||||
proc getPublicIP(): string =
|
||||
let client = newHttpClient()
|
||||
@ -14,29 +10,28 @@ proc getPublicIP(): string =
|
||||
return "127.0.0.1"
|
||||
|
||||
# Function to generate a self-signed certificate
|
||||
proc generateSelfSignedCertificate*(certPath: string, keyPath: string) : int =
|
||||
|
||||
proc generateSelfSignedCertificate*(certPath: string, keyPath: string): int =
|
||||
# Ensure the OpenSSL is installed
|
||||
if findExe("openssl") == "":
|
||||
echo "OpenSSL is not installed or not in the PATH."
|
||||
return 1
|
||||
|
||||
let publicIP = getPublicIP()
|
||||
|
||||
|
||||
if publicIP != "127.0.0.1":
|
||||
echo "Your public IP address is: ", publicIP
|
||||
|
||||
echo "Your public IP address is: ", publicIP
|
||||
|
||||
# Command to generate private key and cert
|
||||
let
|
||||
cmd = "openssl req -x509 -newkey rsa:4096 -keyout " & keyPath & " -out " & certPath &
|
||||
" -sha256 -days 3650 -nodes -subj '/C=XX/ST=StateName/L=CityName/O=CompanyName/OU=CompanySectionName/CN=" &
|
||||
publicIP & "'"
|
||||
let
|
||||
cmd =
|
||||
"openssl req -x509 -newkey rsa:4096 -keyout " & keyPath & " -out " & certPath &
|
||||
" -sha256 -days 3650 -nodes -subj '/C=XX/ST=StateName/L=CityName/O=CompanyName/OU=CompanySectionName/CN=" &
|
||||
publicIP & "'"
|
||||
res = execCmd(cmd)
|
||||
|
||||
if res == 0:
|
||||
echo "Successfully generated self-signed certificate and key."
|
||||
else:
|
||||
echo "Failed to generate certificate and key."
|
||||
|
||||
|
||||
return res
|
||||
|
||||
@ -30,52 +30,58 @@ const WebSocketPortOffset = 1000
|
||||
const CertsDirectory = "./certs"
|
||||
|
||||
# cli flags
|
||||
type
|
||||
WakuCanaryConf* = object
|
||||
address* {.
|
||||
desc: "Multiaddress of the peer node to attempt to dial",
|
||||
defaultValue: "",
|
||||
name: "address",
|
||||
abbr: "a".}: string
|
||||
type WakuCanaryConf* = object
|
||||
address* {.
|
||||
desc: "Multiaddress of the peer node to attempt to dial",
|
||||
defaultValue: "",
|
||||
name: "address",
|
||||
abbr: "a"
|
||||
.}: string
|
||||
|
||||
timeout* {.
|
||||
desc: "Timeout to consider that the connection failed",
|
||||
defaultValue: chronos.seconds(10),
|
||||
name: "timeout",
|
||||
abbr: "t".}: chronos.Duration
|
||||
timeout* {.
|
||||
desc: "Timeout to consider that the connection failed",
|
||||
defaultValue: chronos.seconds(10),
|
||||
name: "timeout",
|
||||
abbr: "t"
|
||||
.}: chronos.Duration
|
||||
|
||||
protocols* {.
|
||||
desc: "Protocol required to be supported: store,relay,lightpush,filter (can be used multiple times)",
|
||||
name: "protocol",
|
||||
abbr: "p".}: seq[string]
|
||||
protocols* {.
|
||||
desc:
|
||||
"Protocol required to be supported: store,relay,lightpush,filter (can be used multiple times)",
|
||||
name: "protocol",
|
||||
abbr: "p"
|
||||
.}: seq[string]
|
||||
|
||||
logLevel* {.
|
||||
desc: "Sets the log level",
|
||||
defaultValue: LogLevel.INFO,
|
||||
name: "log-level",
|
||||
abbr: "l".}: LogLevel
|
||||
logLevel* {.
|
||||
desc: "Sets the log level",
|
||||
defaultValue: LogLevel.INFO,
|
||||
name: "log-level",
|
||||
abbr: "l"
|
||||
.}: LogLevel
|
||||
|
||||
nodePort* {.
|
||||
desc: "Listening port for waku node",
|
||||
defaultValue: 60000,
|
||||
name: "node-port",
|
||||
abbr: "np".}: uint16
|
||||
nodePort* {.
|
||||
desc: "Listening port for waku node",
|
||||
defaultValue: 60000,
|
||||
name: "node-port",
|
||||
abbr: "np"
|
||||
.}: uint16
|
||||
|
||||
## websocket secure config
|
||||
websocketSecureKeyPath* {.
|
||||
desc: "Secure websocket key path: '/path/to/key.txt' ",
|
||||
defaultValue: ""
|
||||
name: "websocket-secure-key-path".}: string
|
||||
## websocket secure config
|
||||
websocketSecureKeyPath* {.
|
||||
desc: "Secure websocket key path: '/path/to/key.txt' ",
|
||||
defaultValue: "",
|
||||
name: "websocket-secure-key-path"
|
||||
.}: string
|
||||
|
||||
websocketSecureCertPath* {.
|
||||
desc: "Secure websocket Certificate path: '/path/to/cert.txt' ",
|
||||
defaultValue: ""
|
||||
name: "websocket-secure-cert-path".}: string
|
||||
websocketSecureCertPath* {.
|
||||
desc: "Secure websocket Certificate path: '/path/to/cert.txt' ",
|
||||
defaultValue: "",
|
||||
name: "websocket-secure-cert-path"
|
||||
.}: string
|
||||
|
||||
ping* {.
|
||||
desc: "Ping the peer node to measure latency",
|
||||
defaultValue: true,
|
||||
name: "ping" .}: bool
|
||||
ping* {.
|
||||
desc: "Ping the peer node to measure latency", defaultValue: true, name: "ping"
|
||||
.}: bool
|
||||
|
||||
proc parseCmdArg*(T: type chronos.Duration, p: string): T =
|
||||
try:
|
||||
@ -88,17 +94,15 @@ proc completeCmdArg*(T: type chronos.Duration, val: string): seq[string] =
|
||||
|
||||
# checks if rawProtocols (skipping version) are supported in nodeProtocols
|
||||
proc areProtocolsSupported(
|
||||
rawProtocols: seq[string],
|
||||
nodeProtocols: seq[string]): bool =
|
||||
|
||||
rawProtocols: seq[string], nodeProtocols: seq[string]
|
||||
): bool =
|
||||
var numOfSupportedProt: int = 0
|
||||
|
||||
for nodeProtocol in nodeProtocols:
|
||||
for rawProtocol in rawProtocols:
|
||||
let protocolTag = ProtocolsTable[rawProtocol]
|
||||
if nodeProtocol.startsWith(protocolTag):
|
||||
info "Supported protocol ok", expected = protocolTag,
|
||||
supported = nodeProtocol
|
||||
info "Supported protocol ok", expected = protocolTag, supported = nodeProtocol
|
||||
numOfSupportedProt += 1
|
||||
break
|
||||
|
||||
@ -107,26 +111,29 @@ proc areProtocolsSupported(
|
||||
|
||||
return false
|
||||
|
||||
proc pingNode(node: WakuNode, peerInfo: RemotePeerInfo): Future[void] {.async, gcsafe.} =
|
||||
proc pingNode(
|
||||
node: WakuNode, peerInfo: RemotePeerInfo
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
try:
|
||||
let conn = await node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec)
|
||||
let pingDelay = await node.libp2pPing.ping(conn)
|
||||
info "Peer response time (ms)", peerId = peerInfo.peerId, ping=pingDelay.millis
|
||||
|
||||
info "Peer response time (ms)", peerId = peerInfo.peerId, ping = pingDelay.millis
|
||||
except CatchableError:
|
||||
var msg = getCurrentExceptionMsg()
|
||||
if msg == "Future operation cancelled!":
|
||||
msg = "timedout"
|
||||
error "Failed to ping the peer", peer=peerInfo, err=msg
|
||||
error "Failed to ping the peer", peer = peerInfo, err = msg
|
||||
|
||||
proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
||||
let conf: WakuCanaryConf = WakuCanaryConf.load()
|
||||
|
||||
# create dns resolver
|
||||
let
|
||||
nameServers = @[
|
||||
initTAddress(parseIpAddress("1.1.1.1"), Port(53)),
|
||||
initTAddress(parseIpAddress("1.0.0.1"), Port(53))]
|
||||
nameServers =
|
||||
@[
|
||||
initTAddress(parseIpAddress("1.1.1.1"), Port(53)),
|
||||
initTAddress(parseIpAddress("1.0.0.1"), Port(53)),
|
||||
]
|
||||
resolver: DnsResolver = DnsResolver.new(nameServers)
|
||||
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
@ -158,14 +165,16 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
||||
nodeTcpPort = Port(conf.nodePort)
|
||||
isWs = peer.addrs[0].contains(multiCodec("ws")).get()
|
||||
isWss = peer.addrs[0].contains(multiCodec("wss")).get()
|
||||
keyPath = if conf.websocketSecureKeyPath.len > 0:
|
||||
conf.websocketSecureKeyPath
|
||||
else:
|
||||
CertsDirectory & "/key.pem"
|
||||
certPath = if conf.websocketSecureCertPath.len > 0:
|
||||
conf.websocketSecureCertPath
|
||||
else:
|
||||
CertsDirectory & "/cert.pem"
|
||||
keyPath =
|
||||
if conf.websocketSecureKeyPath.len > 0:
|
||||
conf.websocketSecureKeyPath
|
||||
else:
|
||||
CertsDirectory & "/key.pem"
|
||||
certPath =
|
||||
if conf.websocketSecureCertPath.len > 0:
|
||||
conf.websocketSecureCertPath
|
||||
else:
|
||||
CertsDirectory & "/cert.pem"
|
||||
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
@ -183,12 +192,13 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error=recordRes.error
|
||||
error "failed to create enr record", error = recordRes.error
|
||||
quit(QuitFailure)
|
||||
else: recordRes.get()
|
||||
else:
|
||||
recordRes.get()
|
||||
|
||||
if isWss and (conf.websocketSecureKeyPath.len == 0 or
|
||||
conf.websocketSecureCertPath.len == 0):
|
||||
if isWss and
|
||||
(conf.websocketSecureKeyPath.len == 0 or conf.websocketSecureCertPath.len == 0):
|
||||
info "WebSocket Secure requires key and certificate. Generating them"
|
||||
if not dirExists(CertsDirectory):
|
||||
createDir(CertsDirectory)
|
||||
@ -199,9 +209,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
||||
builder.withRecord(record)
|
||||
builder.withNetworkConfiguration(netConfig.tryGet())
|
||||
builder.withSwitchConfiguration(
|
||||
secureKey = some(keyPath),
|
||||
secureCert = some(certPath),
|
||||
nameResolver = resolver,
|
||||
secureKey = some(keyPath), secureCert = some(certPath), nameResolver = resolver
|
||||
)
|
||||
|
||||
let node = builder.build().tryGet()
|
||||
@ -215,7 +223,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
||||
|
||||
await node.start()
|
||||
|
||||
var pingFut:Future[bool]
|
||||
var pingFut: Future[bool]
|
||||
if conf.ping:
|
||||
pingFut = pingNode(node, peer).withTimeout(conf.timeout)
|
||||
|
||||
@ -233,8 +241,8 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
||||
if conStatus in [Connected, CanConnect]:
|
||||
let nodeProtocols = lp2pPeerStore[ProtoBook][peer.peerId]
|
||||
if not areProtocolsSupported(conf.protocols, nodeProtocols):
|
||||
error "Not all protocols are supported", expected = conf.protocols,
|
||||
supported = nodeProtocols
|
||||
error "Not all protocols are supported",
|
||||
expected = conf.protocols, supported = nodeProtocols
|
||||
return 1
|
||||
elif conStatus == CannotConnect:
|
||||
error "Could not connect", peerId = peer.peerId
|
||||
|
||||
@ -57,7 +57,6 @@ import
|
||||
logScope:
|
||||
topics = "wakunode app"
|
||||
|
||||
|
||||
# Git version in git describe format (defined at compile time)
|
||||
const git_version* {.strdefine.} = "n/a"
|
||||
|
||||
@ -78,7 +77,6 @@ type
|
||||
|
||||
AppResult*[T] = Result[T, string]
|
||||
|
||||
|
||||
func node*(app: App): WakuNode =
|
||||
app.node
|
||||
|
||||
@ -87,14 +85,12 @@ func version*(app: App): string =
|
||||
|
||||
## Retrieve dynamic bootstrap nodes (DNS discovery)
|
||||
|
||||
proc retrieveDynamicBootstrapNodes*(dnsDiscovery: bool,
|
||||
dnsDiscoveryUrl: string,
|
||||
dnsDiscoveryNameServers: seq[IpAddress]):
|
||||
Result[seq[RemotePeerInfo], string] =
|
||||
|
||||
proc retrieveDynamicBootstrapNodes*(
|
||||
dnsDiscovery: bool, dnsDiscoveryUrl: string, dnsDiscoveryNameServers: seq[IpAddress]
|
||||
): Result[seq[RemotePeerInfo], string] =
|
||||
if dnsDiscovery and dnsDiscoveryUrl != "":
|
||||
# DNS discovery
|
||||
debug "Discovering nodes using Waku DNS discovery", url=dnsDiscoveryUrl
|
||||
debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl
|
||||
|
||||
var nameServers: seq[TransportAddress]
|
||||
for ip in dnsDiscoveryNameServers:
|
||||
@ -103,14 +99,16 @@ proc retrieveDynamicBootstrapNodes*(dnsDiscovery: bool,
|
||||
let dnsResolver = DnsResolver.new(nameServers)
|
||||
|
||||
proc resolver(domain: string): Future[string] {.async, gcsafe.} =
|
||||
trace "resolving", domain=domain
|
||||
trace "resolving", domain = domain
|
||||
let resolved = await dnsResolver.resolveTxt(domain)
|
||||
return resolved[0] # Use only first answer
|
||||
|
||||
var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl, resolver)
|
||||
if wakuDnsDiscovery.isOk():
|
||||
return wakuDnsDiscovery.get().findPeers()
|
||||
.mapErr(proc (e: cstring): string = $e)
|
||||
return wakuDnsDiscovery.get().findPeers().mapErr(
|
||||
proc(e: cstring): string =
|
||||
$e
|
||||
)
|
||||
else:
|
||||
warn "Failed to init Waku DNS discovery"
|
||||
|
||||
@ -120,47 +118,48 @@ proc retrieveDynamicBootstrapNodes*(dnsDiscovery: bool,
|
||||
## Initialisation
|
||||
|
||||
proc init*(T: type App, conf: WakuNodeConf): Result[App, string] =
|
||||
|
||||
var confCopy = conf
|
||||
let rng = crypto.newRng()
|
||||
|
||||
if not confCopy.nodekey.isSome():
|
||||
let keyRes = crypto.PrivateKey.random(Secp256k1, rng[])
|
||||
if keyRes.isErr():
|
||||
if keyRes.isErr():
|
||||
error "Failed to generate key", error = $keyRes.error
|
||||
return err("Failed to generate key: " & $keyRes.error)
|
||||
confCopy.nodekey = some(keyRes.get())
|
||||
|
||||
debug "Retrieve dynamic bootstrap nodes"
|
||||
let dynamicBootstrapNodesRes = retrieveDynamicBootstrapNodes(confCopy.dnsDiscovery,
|
||||
confCopy.dnsDiscoveryUrl,
|
||||
confCopy.dnsDiscoveryNameServers)
|
||||
let dynamicBootstrapNodesRes = retrieveDynamicBootstrapNodes(
|
||||
confCopy.dnsDiscovery, confCopy.dnsDiscoveryUrl, confCopy.dnsDiscoveryNameServers
|
||||
)
|
||||
if dynamicBootstrapNodesRes.isErr():
|
||||
error "Retrieving dynamic bootstrap nodes failed", error = dynamicBootstrapNodesRes.error
|
||||
return err("Retrieving dynamic bootstrap nodes failed: " & dynamicBootstrapNodesRes.error)
|
||||
error "Retrieving dynamic bootstrap nodes failed",
|
||||
error = dynamicBootstrapNodesRes.error
|
||||
return err(
|
||||
"Retrieving dynamic bootstrap nodes failed: " & dynamicBootstrapNodesRes.error
|
||||
)
|
||||
|
||||
let nodeRes = setupNode(confCopy, some(rng))
|
||||
if nodeRes.isErr():
|
||||
error "Failed setting up node", error=nodeRes.error
|
||||
if nodeRes.isErr():
|
||||
error "Failed setting up node", error = nodeRes.error
|
||||
return err("Failed setting up node: " & nodeRes.error)
|
||||
|
||||
var app = App(
|
||||
version: git_version,
|
||||
conf: confCopy,
|
||||
rng: rng,
|
||||
key: confCopy.nodekey.get(),
|
||||
node: nodeRes.get(),
|
||||
dynamicBootstrapNodes: dynamicBootstrapNodesRes.get()
|
||||
)
|
||||
version: git_version,
|
||||
conf: confCopy,
|
||||
rng: rng,
|
||||
key: confCopy.nodekey.get(),
|
||||
node: nodeRes.get(),
|
||||
dynamicBootstrapNodes: dynamicBootstrapNodesRes.get(),
|
||||
)
|
||||
|
||||
ok(app)
|
||||
|
||||
## Setup DiscoveryV5
|
||||
|
||||
proc setupDiscoveryV5*(app: App): WakuDiscoveryV5 =
|
||||
let dynamicBootstrapEnrs = app.dynamicBootstrapNodes
|
||||
.filterIt(it.hasUdpPort())
|
||||
.mapIt(it.enr.get())
|
||||
let dynamicBootstrapEnrs =
|
||||
app.dynamicBootstrapNodes.filterIt(it.hasUdpPort()).mapIt(it.enr.get())
|
||||
|
||||
var discv5BootstrapEnrs: seq[enr.Record]
|
||||
|
||||
@ -170,9 +169,9 @@ proc setupDiscoveryV5*(app: App): WakuDiscoveryV5 =
|
||||
|
||||
discv5BootstrapEnrs.add(dynamicBootstrapEnrs)
|
||||
|
||||
let discv5Config = DiscoveryConfig.init(app.conf.discv5TableIpLimit,
|
||||
app.conf.discv5BucketIpLimit,
|
||||
app.conf.discv5BitsPerHop)
|
||||
let discv5Config = DiscoveryConfig.init(
|
||||
app.conf.discv5TableIpLimit, app.conf.discv5BucketIpLimit, app.conf.discv5BitsPerHop
|
||||
)
|
||||
|
||||
let discv5UdpPort = Port(uint16(app.conf.discv5UdpPort) + app.conf.portsShift)
|
||||
|
||||
@ -193,9 +192,9 @@ proc setupDiscoveryV5*(app: App): WakuDiscoveryV5 =
|
||||
app.node.topicSubscriptionQueue,
|
||||
)
|
||||
|
||||
proc getPorts(listenAddrs: seq[MultiAddress]):
|
||||
AppResult[tuple[tcpPort, websocketPort: Option[Port]]] =
|
||||
|
||||
proc getPorts(
|
||||
listenAddrs: seq[MultiAddress]
|
||||
): AppResult[tuple[tcpPort, websocketPort: Option[Port]]] =
|
||||
var tcpPort, websocketPort = none(Port)
|
||||
|
||||
for a in listenAddrs:
|
||||
@ -212,7 +211,6 @@ proc getPorts(listenAddrs: seq[MultiAddress]):
|
||||
return ok((tcpPort: tcpPort, websocketPort: websocketPort))
|
||||
|
||||
proc getRunningNetConfig(app: App): AppResult[NetConfig] =
|
||||
|
||||
var conf = app.conf
|
||||
let (tcpPort, websocketPort) = getPorts(app.node.switch.peerInfo.listenAddrs).valueOr:
|
||||
return err("Could not retrieve ports " & error)
|
||||
@ -230,7 +228,6 @@ proc getRunningNetConfig(app: App): AppResult[NetConfig] =
|
||||
return ok(netConf)
|
||||
|
||||
proc updateEnr(app: var App, netConf: NetConfig): AppResult[void] =
|
||||
|
||||
let record = enrConfiguration(app.conf, netConf, app.key).valueOr:
|
||||
return err("ENR setup failed: " & error)
|
||||
|
||||
@ -242,9 +239,7 @@ proc updateEnr(app: var App, netConf: NetConfig): AppResult[void] =
|
||||
return ok()
|
||||
|
||||
proc updateApp(app: var App): AppResult[void] =
|
||||
|
||||
if app.conf.tcpPort == Port(0) or app.conf.websocketPort == Port(0):
|
||||
|
||||
let netConf = getRunningNetConfig(app).valueOr:
|
||||
return err("error calling updateNetConfig: " & $error)
|
||||
|
||||
@ -258,8 +253,8 @@ proc updateApp(app: var App): AppResult[void] =
|
||||
return ok()
|
||||
|
||||
proc startApp*(app: var App): AppResult[void] =
|
||||
|
||||
let nodeRes = catch: (waitFor startNode(app.node, app.conf, app.dynamicBootstrapNodes))
|
||||
let nodeRes = catch:
|
||||
(waitFor startNode(app.node, app.conf, app.dynamicBootstrapNodes))
|
||||
if nodeRes.isErr():
|
||||
return err("exception starting node: " & nodeRes.error.msg)
|
||||
|
||||
@ -273,10 +268,11 @@ proc startApp*(app: var App): AppResult[void] =
|
||||
## Discv5
|
||||
if app.conf.discv5Discovery:
|
||||
app.wakuDiscV5 = some(app.setupDiscoveryV5())
|
||||
|
||||
|
||||
if app.wakuDiscv5.isSome():
|
||||
let wakuDiscv5 = app.wakuDiscv5.get()
|
||||
let catchRes = catch: (waitFor wakuDiscv5.start())
|
||||
let catchRes = catch:
|
||||
(waitFor wakuDiscv5.start())
|
||||
let startRes = catchRes.valueOr:
|
||||
return err("failed to start waku discovery v5: " & catchRes.error.msg)
|
||||
|
||||
@ -285,38 +281,37 @@ proc startApp*(app: var App): AppResult[void] =
|
||||
|
||||
return ok()
|
||||
|
||||
|
||||
|
||||
## Monitoring and external interfaces
|
||||
|
||||
proc startRestServer(app: App,
|
||||
address: IpAddress,
|
||||
port: Port,
|
||||
conf: WakuNodeConf):
|
||||
AppResult[WakuRestServerRef] =
|
||||
|
||||
proc startRestServer(
|
||||
app: App, address: IpAddress, port: Port, conf: WakuNodeConf
|
||||
): AppResult[WakuRestServerRef] =
|
||||
# Used to register api endpoints that are not currently installed as keys,
|
||||
# values are holding error messages to be returned to the client
|
||||
var notInstalledTab: Table[string, string] = initTable[string, string]()
|
||||
|
||||
let requestErrorHandler : RestRequestErrorHandler = proc (error: RestRequestError,
|
||||
request: HttpRequestRef):
|
||||
Future[HttpResponseRef]
|
||||
{.async: (raises: [CancelledError]).} =
|
||||
let requestErrorHandler: RestRequestErrorHandler = proc(
|
||||
error: RestRequestError, request: HttpRequestRef
|
||||
): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
case error
|
||||
of RestRequestError.Invalid:
|
||||
return await request.respond(Http400, "Invalid request", HttpTable.init())
|
||||
of RestRequestError.NotFound:
|
||||
let paths = request.rawPath.split("/")
|
||||
let rootPath = if len(paths) > 1:
|
||||
paths[1]
|
||||
else:
|
||||
""
|
||||
let rootPath =
|
||||
if len(paths) > 1:
|
||||
paths[1]
|
||||
else:
|
||||
""
|
||||
notInstalledTab.withValue(rootPath, errMsg):
|
||||
return await request.respond(Http404, errMsg[], HttpTable.init())
|
||||
do:
|
||||
return await request.respond(Http400, "Bad request initiated. Invalid path or method used.", HttpTable.init())
|
||||
return await request.respond(
|
||||
Http400,
|
||||
"Bad request initiated. Invalid path or method used.",
|
||||
HttpTable.init(),
|
||||
)
|
||||
of RestRequestError.InvalidContentBody:
|
||||
return await request.respond(Http400, "Invalid content body", HttpTable.init())
|
||||
of RestRequestError.InvalidContentType:
|
||||
@ -329,14 +324,19 @@ proc startRestServer(app: App,
|
||||
|
||||
return defaultResponse()
|
||||
|
||||
let allowedOrigin = if len(conf.restAllowOrigin) > 0 :
|
||||
some(conf.restAllowOrigin.join(","))
|
||||
else:
|
||||
none(string)
|
||||
let allowedOrigin =
|
||||
if len(conf.restAllowOrigin) > 0:
|
||||
some(conf.restAllowOrigin.join(","))
|
||||
else:
|
||||
none(string)
|
||||
|
||||
let server = ? newRestHttpServer(address, port,
|
||||
allowedOrigin = allowedOrigin,
|
||||
requestErrorHandler = requestErrorHandler)
|
||||
let server =
|
||||
?newRestHttpServer(
|
||||
address,
|
||||
port,
|
||||
allowedOrigin = allowedOrigin,
|
||||
requestErrorHandler = requestErrorHandler,
|
||||
)
|
||||
|
||||
## Admin REST API
|
||||
if conf.restAdmin:
|
||||
@ -364,59 +364,65 @@ proc startRestServer(app: App,
|
||||
|
||||
installRelayApiHandlers(server.router, app.node, cache)
|
||||
else:
|
||||
notInstalledTab["relay"] = "/relay endpoints are not available. Please check your configuration: --relay"
|
||||
notInstalledTab["relay"] =
|
||||
"/relay endpoints are not available. Please check your configuration: --relay"
|
||||
|
||||
## Filter REST API
|
||||
if conf.filternode != "" and
|
||||
app.node.wakuFilterClient != nil and
|
||||
app.node.wakuFilterClientLegacy != nil:
|
||||
|
||||
if conf.filternode != "" and app.node.wakuFilterClient != nil and
|
||||
app.node.wakuFilterClientLegacy != nil:
|
||||
let legacyFilterCache = MessageCache.init()
|
||||
rest_legacy_filter_api.installLegacyFilterRestApiHandlers(server.router, app.node, legacyFilterCache)
|
||||
rest_legacy_filter_api.installLegacyFilterRestApiHandlers(
|
||||
server.router, app.node, legacyFilterCache
|
||||
)
|
||||
|
||||
let filterCache = MessageCache.init()
|
||||
|
||||
let filterDiscoHandler =
|
||||
if app.wakuDiscv5.isSome():
|
||||
some(defaultDiscoveryHandler(app.wakuDiscv5.get(), Filter))
|
||||
else: none(DiscoveryHandler)
|
||||
else:
|
||||
none(DiscoveryHandler)
|
||||
|
||||
rest_filter_api.installFilterRestApiHandlers(
|
||||
server.router,
|
||||
app.node,
|
||||
filterCache,
|
||||
filterDiscoHandler,
|
||||
server.router, app.node, filterCache, filterDiscoHandler
|
||||
)
|
||||
else:
|
||||
notInstalledTab["filter"] = "/filter endpoints are not available. Please check your configuration: --filternode"
|
||||
notInstalledTab["filter"] =
|
||||
"/filter endpoints are not available. Please check your configuration: --filternode"
|
||||
|
||||
## Store REST API
|
||||
let storeDiscoHandler =
|
||||
if app.wakuDiscv5.isSome():
|
||||
some(defaultDiscoveryHandler(app.wakuDiscv5.get(), Store))
|
||||
else: none(DiscoveryHandler)
|
||||
else:
|
||||
none(DiscoveryHandler)
|
||||
|
||||
installStoreApiHandlers(server.router, app.node, storeDiscoHandler)
|
||||
|
||||
## Light push API
|
||||
if conf.lightpushnode != "" and
|
||||
app.node.wakuLightpushClient != nil:
|
||||
if conf.lightpushnode != "" and app.node.wakuLightpushClient != nil:
|
||||
let lightDiscoHandler =
|
||||
if app.wakuDiscv5.isSome():
|
||||
some(defaultDiscoveryHandler(app.wakuDiscv5.get(), Lightpush))
|
||||
else: none(DiscoveryHandler)
|
||||
else:
|
||||
none(DiscoveryHandler)
|
||||
|
||||
rest_lightpush_api.installLightPushRequestHandler(server.router, app.node, lightDiscoHandler)
|
||||
rest_lightpush_api.installLightPushRequestHandler(
|
||||
server.router, app.node, lightDiscoHandler
|
||||
)
|
||||
else:
|
||||
notInstalledTab["lightpush"] = "/lightpush endpoints are not available. Please check your configuration: --lightpushnode"
|
||||
notInstalledTab["lightpush"] =
|
||||
"/lightpush endpoints are not available. Please check your configuration: --lightpushnode"
|
||||
|
||||
server.start()
|
||||
info "Starting REST HTTP server", url = "http://" & $address & ":" & $port & "/"
|
||||
|
||||
ok(server)
|
||||
|
||||
proc startMetricsServer(serverIp: IpAddress, serverPort: Port): AppResult[MetricsHttpServerRef] =
|
||||
info "Starting metrics HTTP server", serverIp= $serverIp, serverPort= $serverPort
|
||||
proc startMetricsServer(
|
||||
serverIp: IpAddress, serverPort: Port
|
||||
): AppResult[MetricsHttpServerRef] =
|
||||
info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort
|
||||
|
||||
let metricsServerRes = MetricsHttpServerRef.new($serverIp, serverPort)
|
||||
if metricsServerRes.isErr():
|
||||
@ -428,7 +434,7 @@ proc startMetricsServer(serverIp: IpAddress, serverPort: Port): AppResult[Metric
|
||||
except CatchableError:
|
||||
return err("metrics HTTP server start failed: " & getCurrentExceptionMsg())
|
||||
|
||||
info "Metrics HTTP server started", serverIp= $serverIp, serverPort= $serverPort
|
||||
info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort
|
||||
ok(server)
|
||||
|
||||
proc startMetricsLogging(): AppResult[void] =
|
||||
@ -437,28 +443,34 @@ proc startMetricsLogging(): AppResult[void] =
|
||||
|
||||
proc setupMonitoringAndExternalInterfaces*(app: var App): AppResult[void] =
|
||||
if app.conf.rest:
|
||||
let startRestServerRes = startRestServer(app, app.conf.restAddress, Port(app.conf.restPort + app.conf.portsShift), app.conf)
|
||||
let startRestServerRes = startRestServer(
|
||||
app, app.conf.restAddress, Port(app.conf.restPort + app.conf.portsShift), app.conf
|
||||
)
|
||||
if startRestServerRes.isErr():
|
||||
error "Starting REST server failed. Continuing in current state.", error=startRestServerRes.error
|
||||
error "Starting REST server failed. Continuing in current state.",
|
||||
error = startRestServerRes.error
|
||||
else:
|
||||
app.restServer = some(startRestServerRes.value)
|
||||
|
||||
|
||||
if app.conf.metricsServer:
|
||||
let startMetricsServerRes = startMetricsServer(app.conf.metricsServerAddress, Port(app.conf.metricsServerPort + app.conf.portsShift))
|
||||
let startMetricsServerRes = startMetricsServer(
|
||||
app.conf.metricsServerAddress,
|
||||
Port(app.conf.metricsServerPort + app.conf.portsShift),
|
||||
)
|
||||
if startMetricsServerRes.isErr():
|
||||
error "Starting metrics server failed. Continuing in current state.", error=startMetricsServerRes.error
|
||||
error "Starting metrics server failed. Continuing in current state.",
|
||||
error = startMetricsServerRes.error
|
||||
else:
|
||||
app.metricsServer = some(startMetricsServerRes.value)
|
||||
|
||||
if app.conf.metricsLogging:
|
||||
let startMetricsLoggingRes = startMetricsLogging()
|
||||
if startMetricsLoggingRes.isErr():
|
||||
error "Starting metrics console logging failed. Continuing in current state.", error=startMetricsLoggingRes.error
|
||||
error "Starting metrics console logging failed. Continuing in current state.",
|
||||
error = startMetricsLoggingRes.error
|
||||
|
||||
ok()
|
||||
|
||||
|
||||
# App shutdown
|
||||
|
||||
proc stop*(app: App): Future[void] {.async: (raises: [Exception]).} =
|
||||
|
||||
@ -33,13 +33,13 @@ proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf =
|
||||
pubsubTopics:
|
||||
@[
|
||||
"/waku/2/rs/1/0", "/waku/2/rs/1/1", "/waku/2/rs/1/2", "/waku/2/rs/1/3",
|
||||
"/waku/2/rs/1/4", "/waku/2/rs/1/5", "/waku/2/rs/1/6", "/waku/2/rs/1/7"
|
||||
"/waku/2/rs/1/4", "/waku/2/rs/1/5", "/waku/2/rs/1/6", "/waku/2/rs/1/7",
|
||||
],
|
||||
discv5Discovery: true,
|
||||
discv5BootstrapNodes:
|
||||
@[
|
||||
"enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Ugl_r25UHQJ3f1rIRrpzxJXSMaJe4yk1XFSAYJpZIJ2NIJpcISygI2rim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQJATXRSRSUyTw_QLB6H_U3oziVQgNRgrXpK7wp2AMyNxYN0Y3CCdl-DdWRwgiMohXdha3UyDw",
|
||||
"enr:-QEkuECnZ3IbVAgkOzv-QLnKC4dRKAPRY80m1-R7G8jZ7yfT3ipEfBrhKN7ARcQgQ-vg-h40AQzyvAkPYlHPaFKk6u9uAYJpZIJ2NIJpcIQiEAFDim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQMIJwesBVgUiBCi8yiXGx7RWylBQkYm1U9dvEy-neLG2YN0Y3CCdl-DdWRwgiMohXdha3UyDw",
|
||||
"enr:-QEkuEDzQyIAhs-CgBHIrJqtBv3EY1uP1Psrc-y8yJKsmxW7dh3DNcq2ergMUWSFVcJNlfcgBeVsFPkgd_QopRIiCV2pAYJpZIJ2NIJpcIQI2ttrim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQJIN4qwz3v4r2Q8Bv8zZD0eqBcKw6bdLvdkV7-JLjqIj4N0Y3CCdl-DdWRwgiMohXdha3UyDw"
|
||||
"enr:-QEkuEDzQyIAhs-CgBHIrJqtBv3EY1uP1Psrc-y8yJKsmxW7dh3DNcq2ergMUWSFVcJNlfcgBeVsFPkgd_QopRIiCV2pAYJpZIJ2NIJpcIQI2ttrim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQJIN4qwz3v4r2Q8Bv8zZD0eqBcKw6bdLvdkV7-JLjqIj4N0Y3CCdl-DdWRwgiMohXdha3UyDw",
|
||||
],
|
||||
)
|
||||
|
||||
@ -32,12 +32,10 @@ proc logConfig(conf: WakuNodeConf) =
|
||||
lightpush = conf.lightpush,
|
||||
peerExchange = conf.peerExchange
|
||||
|
||||
info "Configuration. Network",
|
||||
cluster = conf.clusterId,
|
||||
maxPeers = conf.maxRelayPeers
|
||||
info "Configuration. Network", cluster = conf.clusterId, maxPeers = conf.maxRelayPeers
|
||||
|
||||
for shard in conf.pubsubTopics:
|
||||
info "Configuration. Shards", shard=shard
|
||||
info "Configuration. Shards", shard = shard
|
||||
|
||||
for i in conf.discv5BootstrapNodes:
|
||||
info "Configuration. Bootstrap nodes", node = i
|
||||
@ -123,7 +121,7 @@ when isMainModule:
|
||||
|
||||
wakunode2.setupMonitoringAndExternalInterfaces().isOkOr:
|
||||
error "Starting monitoring and external interfaces failed", error = error
|
||||
quit(QuitFailure)
|
||||
quit(QuitFailure)
|
||||
|
||||
debug "Setting up shutdown hooks"
|
||||
## Setup shutdown hooks for this process.
|
||||
|
||||
@ -1,11 +1,7 @@
|
||||
## Example showing how a resource restricted client may
|
||||
## subscribe to messages without relay
|
||||
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
stew/byteutils,
|
||||
stew/results
|
||||
import chronicles, chronos, stew/byteutils, stew/results
|
||||
import
|
||||
../../../waku/common/logging,
|
||||
../../../waku/node/peer_manager,
|
||||
@ -13,34 +9,42 @@ import
|
||||
../../../waku/waku_filter_v2/client
|
||||
|
||||
const
|
||||
FilterPeer = "/ip4/104.154.239.128/tcp/30303/p2p/16Uiu2HAmJb2e28qLXxT5kZxVUUoJt72EMzNGXB47Rxx5hw3q4YjS" # node-01.gc-us-central1-a.wakuv2.test.statusim.net on wakuv2.test
|
||||
FilterPeer =
|
||||
"/ip4/104.154.239.128/tcp/30303/p2p/16Uiu2HAmJb2e28qLXxT5kZxVUUoJt72EMzNGXB47Rxx5hw3q4YjS"
|
||||
# node-01.gc-us-central1-a.wakuv2.test.statusim.net on wakuv2.test
|
||||
FilterPubsubTopic = PubsubTopic("/waku/2/default-waku/proto")
|
||||
FilterContentTopic = ContentTopic("/examples/1/light-pubsub-example/proto")
|
||||
|
||||
proc unsubscribe(wfc: WakuFilterClient,
|
||||
filterPeer: RemotePeerInfo,
|
||||
filterPubsubTopic: PubsubTopic,
|
||||
filterContentTopic: ContentTopic) {.async.} =
|
||||
proc unsubscribe(
|
||||
wfc: WakuFilterClient,
|
||||
filterPeer: RemotePeerInfo,
|
||||
filterPubsubTopic: PubsubTopic,
|
||||
filterContentTopic: ContentTopic,
|
||||
) {.async.} =
|
||||
notice "unsubscribing from filter"
|
||||
let unsubscribeRes = await wfc.unsubscribe(filterPeer, filterPubsubTopic, @[filterContentTopic])
|
||||
let unsubscribeRes =
|
||||
await wfc.unsubscribe(filterPeer, filterPubsubTopic, @[filterContentTopic])
|
||||
if unsubscribeRes.isErr:
|
||||
notice "unsubscribe request failed", err=unsubscribeRes.error
|
||||
notice "unsubscribe request failed", err = unsubscribeRes.error
|
||||
else:
|
||||
notice "unsubscribe request successful"
|
||||
|
||||
proc messagePushHandler(pubsubTopic: PubsubTopic, message: WakuMessage)
|
||||
{.async, gcsafe.} =
|
||||
proc messagePushHandler(
|
||||
pubsubTopic: PubsubTopic, message: WakuMessage
|
||||
) {.async, gcsafe.} =
|
||||
let payloadStr = string.fromBytes(message.payload)
|
||||
notice "message received", payload=payloadStr,
|
||||
pubsubTopic=pubsubTopic,
|
||||
contentTopic=message.contentTopic,
|
||||
timestamp=message.timestamp
|
||||
notice "message received",
|
||||
payload = payloadStr,
|
||||
pubsubTopic = pubsubTopic,
|
||||
contentTopic = message.contentTopic,
|
||||
timestamp = message.timestamp
|
||||
|
||||
|
||||
proc maintainSubscription(wfc: WakuFilterClient,
|
||||
filterPeer: RemotePeerInfo,
|
||||
filterPubsubTopic: PubsubTopic,
|
||||
filterContentTopic: ContentTopic) {.async.} =
|
||||
proc maintainSubscription(
|
||||
wfc: WakuFilterClient,
|
||||
filterPeer: RemotePeerInfo,
|
||||
filterPubsubTopic: PubsubTopic,
|
||||
filterContentTopic: ContentTopic,
|
||||
) {.async.} =
|
||||
while true:
|
||||
notice "maintaining subscription"
|
||||
# First use filter-ping to check if we have an active subscription
|
||||
@ -49,10 +53,11 @@ proc maintainSubscription(wfc: WakuFilterClient,
|
||||
# No subscription found. Let's subscribe.
|
||||
notice "no subscription found. Sending subscribe request"
|
||||
|
||||
let subscribeRes = await wfc.subscribe(filterPeer, filterPubsubTopic, @[filterContentTopic])
|
||||
let subscribeRes =
|
||||
await wfc.subscribe(filterPeer, filterPubsubTopic, @[filterContentTopic])
|
||||
|
||||
if subscribeRes.isErr():
|
||||
notice "subscribe request failed. Quitting.", err=subscribeRes.error
|
||||
notice "subscribe request failed. Quitting.", err = subscribeRes.error
|
||||
break
|
||||
else:
|
||||
notice "subscribe request successful."
|
||||
@ -78,7 +83,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) =
|
||||
wfc.registerPushHandler(messagePushHandler)
|
||||
|
||||
# Start maintaining subscription
|
||||
asyncSpawn maintainSubscription(wfc, filterPeer, FilterPubsubTopic, FilterContentTopic)
|
||||
asyncSpawn maintainSubscription(
|
||||
wfc, filterPeer, FilterPubsubTopic, FilterContentTopic
|
||||
)
|
||||
|
||||
when isMainModule:
|
||||
let rng = newRng()
|
||||
|
||||
@ -1,11 +1,7 @@
|
||||
## Example showing how a resource restricted client may
|
||||
## use lightpush to publish messages without relay
|
||||
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
stew/byteutils,
|
||||
stew/results
|
||||
import chronicles, chronos, stew/byteutils, stew/results
|
||||
import
|
||||
../../../waku/common/logging,
|
||||
../../../waku/node/peer_manager,
|
||||
@ -13,27 +9,33 @@ import
|
||||
../../../waku/waku_lightpush/client
|
||||
|
||||
const
|
||||
LightpushPeer = "/ip4/134.209.139.210/tcp/30303/p2p/16Uiu2HAmPLe7Mzm8TsYUubgCAW1aJoeFScxrLj8ppHFivPo97bUZ" # node-01.do-ams3.wakuv2.test.statusim.net on wakuv2.test
|
||||
LightpushPeer =
|
||||
"/ip4/134.209.139.210/tcp/30303/p2p/16Uiu2HAmPLe7Mzm8TsYUubgCAW1aJoeFScxrLj8ppHFivPo97bUZ"
|
||||
# node-01.do-ams3.wakuv2.test.statusim.net on wakuv2.test
|
||||
LightpushPubsubTopic = PubsubTopic("/waku/2/default-waku/proto")
|
||||
LightpushContentTopic = ContentTopic("/examples/1/light-pubsub-example/proto")
|
||||
|
||||
proc publishMessages(wlc: WakuLightpushClient,
|
||||
lightpushPeer: RemotePeerInfo,
|
||||
lightpushPubsubTopic: PubsubTopic,
|
||||
lightpushContentTopic: ContentTopic) {.async.} =
|
||||
proc publishMessages(
|
||||
wlc: WakuLightpushClient,
|
||||
lightpushPeer: RemotePeerInfo,
|
||||
lightpushPubsubTopic: PubsubTopic,
|
||||
lightpushContentTopic: ContentTopic,
|
||||
) {.async.} =
|
||||
while true:
|
||||
let text = "hi there i'm a lightpush publisher"
|
||||
let message = WakuMessage(payload: toBytes(text), # content of the message
|
||||
contentTopic: lightpushContentTopic, # content topic to publish to
|
||||
ephemeral: true, # tell store nodes to not store it
|
||||
timestamp: getNowInNanosecondTime()) # current timestamp
|
||||
let message = WakuMessage(
|
||||
payload: toBytes(text), # content of the message
|
||||
contentTopic: lightpushContentTopic, # content topic to publish to
|
||||
ephemeral: true, # tell store nodes to not store it
|
||||
timestamp: getNowInNanosecondTime(),
|
||||
) # current timestamp
|
||||
|
||||
let wlpRes = await wlc.publish(lightpushPubsubTopic, message, lightpushPeer)
|
||||
|
||||
if wlpRes.isOk():
|
||||
notice "published message using lightpush", message=message
|
||||
notice "published message using lightpush", message = message
|
||||
else:
|
||||
notice "failed to publish message using lightpush", err=wlpRes.error()
|
||||
notice "failed to publish message using lightpush", err = wlpRes.error()
|
||||
|
||||
await sleepAsync(5000) # Publish every 5 seconds
|
||||
|
||||
@ -49,7 +51,9 @@ proc setupAndPublish(rng: ref HmacDrbgContext) =
|
||||
wlc = WakuLightpushClient.new(pm, rng)
|
||||
|
||||
# Start maintaining subscription
|
||||
asyncSpawn publishMessages(wlc, lightpushPeer, LightpushPubsubTopic, LightpushContentTopic)
|
||||
asyncSpawn publishMessages(
|
||||
wlc, lightpushPeer, LightpushPubsubTopic, LightpushContentTopic
|
||||
)
|
||||
|
||||
when isMainModule:
|
||||
let rng = newRng()
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import
|
||||
std/[tables,times,sequtils],
|
||||
std/[tables, times, sequtils],
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
@ -23,103 +23,113 @@ proc now*(): Timestamp =
|
||||
|
||||
# An accesible bootstrap node. See wakuv2.prod fleets.status.im
|
||||
|
||||
|
||||
const bootstrapNode = "enr:-Nm4QOdTOKZJKTUUZ4O_W932CXIET-M9NamewDnL78P5u9D" &
|
||||
"OGnZlK0JFZ4k0inkfe6iY-0JAaJVovZXc575VV3njeiABgmlkgn" &
|
||||
"Y0gmlwhAjS3ueKbXVsdGlhZGRyc7g6ADg2MW5vZGUtMDEuYWMtY" &
|
||||
"24taG9uZ2tvbmctYy53YWt1djIucHJvZC5zdGF0dXNpbS5uZXQG" &
|
||||
"H0DeA4lzZWNwMjU2azGhAo0C-VvfgHiXrxZi3umDiooXMGY9FvY" &
|
||||
"j5_d1Q4EeS7eyg3RjcIJ2X4N1ZHCCIyiFd2FrdTIP"
|
||||
const bootstrapNode =
|
||||
"enr:-Nm4QOdTOKZJKTUUZ4O_W932CXIET-M9NamewDnL78P5u9D" &
|
||||
"OGnZlK0JFZ4k0inkfe6iY-0JAaJVovZXc575VV3njeiABgmlkgn" &
|
||||
"Y0gmlwhAjS3ueKbXVsdGlhZGRyc7g6ADg2MW5vZGUtMDEuYWMtY" &
|
||||
"24taG9uZ2tvbmctYy53YWt1djIucHJvZC5zdGF0dXNpbS5uZXQG" &
|
||||
"H0DeA4lzZWNwMjU2azGhAo0C-VvfgHiXrxZi3umDiooXMGY9FvY" &
|
||||
"j5_d1Q4EeS7eyg3RjcIJ2X4N1ZHCCIyiFd2FrdTIP"
|
||||
|
||||
# careful if running pub and sub in the same machine
|
||||
const wakuPort = 60000
|
||||
const discv5Port = 9000
|
||||
|
||||
proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} =
|
||||
# use notice to filter all waku messaging
|
||||
setupLogLevel(logging.LogLevel.NOTICE)
|
||||
notice "starting publisher", wakuPort=wakuPort, discv5Port=discv5Port
|
||||
let
|
||||
nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).get()
|
||||
ip = parseIpAddress("0.0.0.0")
|
||||
flags = CapabilitiesBitfield.init(lightpush = false, filter = false, store = false, relay = true)
|
||||
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error=recordRes.error
|
||||
quit(QuitFailure)
|
||||
else: recordRes.get()
|
||||
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
builder.withRecord(record)
|
||||
builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet()
|
||||
let node = builder.build().tryGet()
|
||||
|
||||
var bootstrapNodeEnr: enr.Record
|
||||
discard bootstrapNodeEnr.fromURI(bootstrapNode)
|
||||
|
||||
let discv5Conf = WakuDiscoveryV5Config(
|
||||
discv5Config: none(DiscoveryConfig),
|
||||
address: ip,
|
||||
port: Port(discv5Port),
|
||||
privateKey: keys.PrivateKey(nodeKey.skkey),
|
||||
bootstrapRecords: @[bootstrapNodeEnr],
|
||||
autoupdateRecord: true,
|
||||
# use notice to filter all waku messaging
|
||||
setupLogLevel(logging.LogLevel.NOTICE)
|
||||
notice "starting publisher", wakuPort = wakuPort, discv5Port = discv5Port
|
||||
let
|
||||
nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).get()
|
||||
ip = parseIpAddress("0.0.0.0")
|
||||
flags = CapabilitiesBitfield.init(
|
||||
lightpush = false, filter = false, store = false, relay = true
|
||||
)
|
||||
|
||||
# assumes behind a firewall, so not care about being discoverable
|
||||
let wakuDiscv5 = WakuDiscoveryV5.new(
|
||||
node.rng,
|
||||
discv5Conf,
|
||||
some(node.enr),
|
||||
some(node.peerManager),
|
||||
node.topicSubscriptionQueue,
|
||||
)
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
|
||||
await node.start()
|
||||
await node.mountRelay()
|
||||
node.peerManager.start()
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error = recordRes.error
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
recordRes.get()
|
||||
|
||||
(await wakuDiscv5.start()).isOkOr:
|
||||
error "failed to start discv5", error = error
|
||||
quit(1)
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
builder.withRecord(record)
|
||||
builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet()
|
||||
let node = builder.build().tryGet()
|
||||
|
||||
# wait for a minimum of peers to be connected, otherwise messages wont be gossiped
|
||||
while true:
|
||||
let numConnectedPeers = node.peerManager.peerStore[ConnectionBook].book.values().countIt(it == Connected)
|
||||
if numConnectedPeers >= 6:
|
||||
notice "publisher is ready", connectedPeers=numConnectedPeers, required=6
|
||||
break
|
||||
notice "waiting to be ready", connectedPeers=numConnectedPeers, required=6
|
||||
await sleepAsync(5000)
|
||||
var bootstrapNodeEnr: enr.Record
|
||||
discard bootstrapNodeEnr.fromURI(bootstrapNode)
|
||||
|
||||
# Make sure it matches the publisher. Use default value
|
||||
# see spec: https://rfc.vac.dev/spec/23/
|
||||
let pubSubTopic = PubsubTopic("/waku/2/default-waku/proto")
|
||||
let discv5Conf = WakuDiscoveryV5Config(
|
||||
discv5Config: none(DiscoveryConfig),
|
||||
address: ip,
|
||||
port: Port(discv5Port),
|
||||
privateKey: keys.PrivateKey(nodeKey.skkey),
|
||||
bootstrapRecords: @[bootstrapNodeEnr],
|
||||
autoupdateRecord: true,
|
||||
)
|
||||
|
||||
# any content topic can be chosen
|
||||
let contentTopic = ContentTopic("/examples/1/pubsub-example/proto")
|
||||
# assumes behind a firewall, so not care about being discoverable
|
||||
let wakuDiscv5 = WakuDiscoveryV5.new(
|
||||
node.rng,
|
||||
discv5Conf,
|
||||
some(node.enr),
|
||||
some(node.peerManager),
|
||||
node.topicSubscriptionQueue,
|
||||
)
|
||||
|
||||
notice "publisher service started"
|
||||
while true:
|
||||
let text = "hi there i'm a publisher"
|
||||
let message = WakuMessage(payload: toBytes(text), # content of the message
|
||||
contentTopic: contentTopic, # content topic to publish to
|
||||
ephemeral: true, # tell store nodes to not store it
|
||||
timestamp: now()) # current timestamp
|
||||
|
||||
let res = await node.publish(some(pubSubTopic), message)
|
||||
|
||||
if res.isOk:
|
||||
notice "published message", text = text, timestamp = message.timestamp, psTopic = pubSubTopic, contentTopic = contentTopic
|
||||
else:
|
||||
error "failed to publish message", error = res.error
|
||||
|
||||
await sleepAsync(5000)
|
||||
await node.start()
|
||||
await node.mountRelay()
|
||||
node.peerManager.start()
|
||||
|
||||
(await wakuDiscv5.start()).isOkOr:
|
||||
error "failed to start discv5", error = error
|
||||
quit(1)
|
||||
|
||||
# wait for a minimum of peers to be connected, otherwise messages wont be gossiped
|
||||
while true:
|
||||
let numConnectedPeers =
|
||||
node.peerManager.peerStore[ConnectionBook].book.values().countIt(it == Connected)
|
||||
if numConnectedPeers >= 6:
|
||||
notice "publisher is ready", connectedPeers = numConnectedPeers, required = 6
|
||||
break
|
||||
notice "waiting to be ready", connectedPeers = numConnectedPeers, required = 6
|
||||
await sleepAsync(5000)
|
||||
|
||||
# Make sure it matches the publisher. Use default value
|
||||
# see spec: https://rfc.vac.dev/spec/23/
|
||||
let pubSubTopic = PubsubTopic("/waku/2/default-waku/proto")
|
||||
|
||||
# any content topic can be chosen
|
||||
let contentTopic = ContentTopic("/examples/1/pubsub-example/proto")
|
||||
|
||||
notice "publisher service started"
|
||||
while true:
|
||||
let text = "hi there i'm a publisher"
|
||||
let message = WakuMessage(
|
||||
payload: toBytes(text), # content of the message
|
||||
contentTopic: contentTopic, # content topic to publish to
|
||||
ephemeral: true, # tell store nodes to not store it
|
||||
timestamp: now(),
|
||||
) # current timestamp
|
||||
|
||||
let res = await node.publish(some(pubSubTopic), message)
|
||||
|
||||
if res.isOk:
|
||||
notice "published message",
|
||||
text = text,
|
||||
timestamp = message.timestamp,
|
||||
psTopic = pubSubTopic,
|
||||
contentTopic = contentTopic
|
||||
else:
|
||||
error "failed to publish message", error = res.error
|
||||
|
||||
await sleepAsync(5000)
|
||||
|
||||
when isMainModule:
|
||||
let rng = crypto.newRng()
|
||||
|
||||
@ -19,94 +19,100 @@ import
|
||||
../../../waku/factory/builder
|
||||
|
||||
# An accesible bootstrap node. See wakuv2.prod fleets.status.im
|
||||
const bootstrapNode = "enr:-Nm4QOdTOKZJKTUUZ4O_W932CXIET-M9NamewDnL78P5u9DOGnZl" &
|
||||
"K0JFZ4k0inkfe6iY-0JAaJVovZXc575VV3njeiABgmlkgnY0gmlwhAjS" &
|
||||
"3ueKbXVsdGlhZGRyc7g6ADg2MW5vZGUtMDEuYWMtY24taG9uZ2tvbmct" &
|
||||
"Yy53YWt1djIucHJvZC5zdGF0dXNpbS5uZXQGH0DeA4lzZWNwMjU2azGh" &
|
||||
"Ao0C-VvfgHiXrxZi3umDiooXMGY9FvYj5_d1Q4EeS7eyg3RjcIJ2X4N1" &
|
||||
"ZHCCIyiFd2FrdTIP"
|
||||
const bootstrapNode =
|
||||
"enr:-Nm4QOdTOKZJKTUUZ4O_W932CXIET-M9NamewDnL78P5u9DOGnZl" &
|
||||
"K0JFZ4k0inkfe6iY-0JAaJVovZXc575VV3njeiABgmlkgnY0gmlwhAjS" &
|
||||
"3ueKbXVsdGlhZGRyc7g6ADg2MW5vZGUtMDEuYWMtY24taG9uZ2tvbmct" &
|
||||
"Yy53YWt1djIucHJvZC5zdGF0dXNpbS5uZXQGH0DeA4lzZWNwMjU2azGh" &
|
||||
"Ao0C-VvfgHiXrxZi3umDiooXMGY9FvYj5_d1Q4EeS7eyg3RjcIJ2X4N1" & "ZHCCIyiFd2FrdTIP"
|
||||
|
||||
# careful if running pub and sub in the same machine
|
||||
const wakuPort = 50000
|
||||
const discv5Port = 8000
|
||||
|
||||
proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
|
||||
# use notice to filter all waku messaging
|
||||
setupLogLevel(logging.LogLevel.NOTICE)
|
||||
notice "starting subscriber", wakuPort=wakuPort, discv5Port=discv5Port
|
||||
let
|
||||
nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||
ip = parseIpAddress("0.0.0.0")
|
||||
flags = CapabilitiesBitfield.init(lightpush = false, filter = false, store = false, relay = true)
|
||||
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error=recordRes.error
|
||||
quit(QuitFailure)
|
||||
else: recordRes.get()
|
||||
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
builder.withRecord(record)
|
||||
builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet()
|
||||
let node = builder.build().tryGet()
|
||||
|
||||
var bootstrapNodeEnr: enr.Record
|
||||
discard bootstrapNodeEnr.fromURI(bootstrapNode)
|
||||
|
||||
let discv5Conf = WakuDiscoveryV5Config(
|
||||
discv5Config: none(DiscoveryConfig),
|
||||
address: ip,
|
||||
port: Port(discv5Port),
|
||||
privateKey: keys.PrivateKey(nodeKey.skkey),
|
||||
bootstrapRecords: @[bootstrapNodeEnr],
|
||||
autoupdateRecord: true,
|
||||
# use notice to filter all waku messaging
|
||||
setupLogLevel(logging.LogLevel.NOTICE)
|
||||
notice "starting subscriber", wakuPort = wakuPort, discv5Port = discv5Port
|
||||
let
|
||||
nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||
ip = parseIpAddress("0.0.0.0")
|
||||
flags = CapabilitiesBitfield.init(
|
||||
lightpush = false, filter = false, store = false, relay = true
|
||||
)
|
||||
|
||||
# assumes behind a firewall, so not care about being discoverable
|
||||
let wakuDiscv5 = WakuDiscoveryV5.new(
|
||||
node.rng,
|
||||
discv5Conf,
|
||||
some(node.enr),
|
||||
some(node.peerManager),
|
||||
node.topicSubscriptionQueue,
|
||||
)
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
|
||||
await node.start()
|
||||
await node.mountRelay()
|
||||
node.peerManager.start()
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error = recordRes.error
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
recordRes.get()
|
||||
|
||||
(await wakuDiscv5.start()).isOkOr:
|
||||
error "failed to start discv5", error = error
|
||||
quit(1)
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
builder.withRecord(record)
|
||||
builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet()
|
||||
let node = builder.build().tryGet()
|
||||
|
||||
# wait for a minimum of peers to be connected, otherwise messages wont be gossiped
|
||||
while true:
|
||||
let numConnectedPeers = node.peerManager.peerStore[ConnectionBook].book.values().countIt(it == Connected)
|
||||
if numConnectedPeers >= 6:
|
||||
notice "subscriber is ready", connectedPeers=numConnectedPeers, required=6
|
||||
break
|
||||
notice "waiting to be ready", connectedPeers=numConnectedPeers, required=6
|
||||
await sleepAsync(5000)
|
||||
var bootstrapNodeEnr: enr.Record
|
||||
discard bootstrapNodeEnr.fromURI(bootstrapNode)
|
||||
|
||||
# Make sure it matches the publisher. Use default value
|
||||
# see spec: https://rfc.vac.dev/spec/23/
|
||||
let pubSubTopic = PubsubTopic("/waku/2/default-waku/proto")
|
||||
let discv5Conf = WakuDiscoveryV5Config(
|
||||
discv5Config: none(DiscoveryConfig),
|
||||
address: ip,
|
||||
port: Port(discv5Port),
|
||||
privateKey: keys.PrivateKey(nodeKey.skkey),
|
||||
bootstrapRecords: @[bootstrapNodeEnr],
|
||||
autoupdateRecord: true,
|
||||
)
|
||||
|
||||
# any content topic can be chosen. make sure it matches the publisher
|
||||
let contentTopic = ContentTopic("/examples/1/pubsub-example/proto")
|
||||
# assumes behind a firewall, so not care about being discoverable
|
||||
let wakuDiscv5 = WakuDiscoveryV5.new(
|
||||
node.rng,
|
||||
discv5Conf,
|
||||
some(node.enr),
|
||||
some(node.peerManager),
|
||||
node.topicSubscriptionQueue,
|
||||
)
|
||||
|
||||
proc handler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} =
|
||||
let payloadStr = string.fromBytes(msg.payload)
|
||||
if msg.contentTopic == contentTopic:
|
||||
notice "message received", payload=payloadStr,
|
||||
pubsubTopic=pubsubTopic,
|
||||
contentTopic=msg.contentTopic,
|
||||
timestamp=msg.timestamp
|
||||
node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(handler))
|
||||
await node.start()
|
||||
await node.mountRelay()
|
||||
node.peerManager.start()
|
||||
|
||||
(await wakuDiscv5.start()).isOkOr:
|
||||
error "failed to start discv5", error = error
|
||||
quit(1)
|
||||
|
||||
# wait for a minimum of peers to be connected, otherwise messages wont be gossiped
|
||||
while true:
|
||||
let numConnectedPeers =
|
||||
node.peerManager.peerStore[ConnectionBook].book.values().countIt(it == Connected)
|
||||
if numConnectedPeers >= 6:
|
||||
notice "subscriber is ready", connectedPeers = numConnectedPeers, required = 6
|
||||
break
|
||||
notice "waiting to be ready", connectedPeers = numConnectedPeers, required = 6
|
||||
await sleepAsync(5000)
|
||||
|
||||
# Make sure it matches the publisher. Use default value
|
||||
# see spec: https://rfc.vac.dev/spec/23/
|
||||
let pubSubTopic = PubsubTopic("/waku/2/default-waku/proto")
|
||||
|
||||
# any content topic can be chosen. make sure it matches the publisher
|
||||
let contentTopic = ContentTopic("/examples/1/pubsub-example/proto")
|
||||
|
||||
proc handler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} =
|
||||
let payloadStr = string.fromBytes(msg.payload)
|
||||
if msg.contentTopic == contentTopic:
|
||||
notice "message received",
|
||||
payload = payloadStr,
|
||||
pubsubTopic = pubsubTopic,
|
||||
contentTopic = msg.contentTopic,
|
||||
timestamp = msg.timestamp
|
||||
|
||||
node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(handler))
|
||||
|
||||
when isMainModule:
|
||||
let rng = crypto.newRng()
|
||||
|
||||
@ -30,32 +30,47 @@ type CKeyPair* = object
|
||||
private_key: CFr
|
||||
public_key: CG1Projective
|
||||
|
||||
proc drop_ffi_derive_public_key*(ptrx: ptr CReturn[CG1Projective]) {.importc: "drop_ffi_derive_public_key".}
|
||||
proc drop_ffi_derive_public_key*(
|
||||
ptrx: ptr CReturn[CG1Projective]
|
||||
) {.importc: "drop_ffi_derive_public_key".}
|
||||
|
||||
proc drop_ffi_generate_random_fr*(ptrx: ptr CReturn[CFr]) {.importc: "drop_ffi_generate_random_fr".}
|
||||
proc drop_ffi_generate_random_fr*(
|
||||
ptrx: ptr CReturn[CFr]
|
||||
) {.importc: "drop_ffi_generate_random_fr".}
|
||||
|
||||
proc drop_ffi_generate_stealth_commitment*(ptrx: ptr CReturn[CStealthCommitment]) {.importc: "drop_ffi_generate_stealth_commitment".}
|
||||
proc drop_ffi_generate_stealth_commitment*(
|
||||
ptrx: ptr CReturn[CStealthCommitment]
|
||||
) {.importc: "drop_ffi_generate_stealth_commitment".}
|
||||
|
||||
proc drop_ffi_generate_stealth_private_key*(ptrx: ptr CReturn[CFr]) {.importc: "drop_ffi_generate_stealth_private_key".}
|
||||
proc drop_ffi_generate_stealth_private_key*(
|
||||
ptrx: ptr CReturn[CFr]
|
||||
) {.importc: "drop_ffi_generate_stealth_private_key".}
|
||||
|
||||
proc drop_ffi_random_keypair*(ptrx: ptr CReturn[CKeyPair]) {.importc: "drop_ffi_random_keypair".}
|
||||
proc drop_ffi_random_keypair*(
|
||||
ptrx: ptr CReturn[CKeyPair]
|
||||
) {.importc: "drop_ffi_random_keypair".}
|
||||
|
||||
proc ffi_derive_public_key*(private_key: ptr CFr): (ptr CReturn[CG1Projective]) {.importc: "ffi_derive_public_key".}
|
||||
proc ffi_derive_public_key*(
|
||||
private_key: ptr CFr
|
||||
): (ptr CReturn[CG1Projective]) {.importc: "ffi_derive_public_key".}
|
||||
|
||||
proc ffi_generate_random_fr*(): (ptr CReturn[CFr]) {.importc: "ffi_generate_random_fr".}
|
||||
|
||||
proc ffi_generate_stealth_commitment*(viewing_public_key: ptr CG1Projective,
|
||||
spending_public_key: ptr CG1Projective,
|
||||
ephemeral_private_key: ptr CFr): (ptr CReturn[CStealthCommitment]) {.importc: "ffi_generate_stealth_commitment".}
|
||||
proc ffi_generate_stealth_commitment*(
|
||||
viewing_public_key: ptr CG1Projective,
|
||||
spending_public_key: ptr CG1Projective,
|
||||
ephemeral_private_key: ptr CFr,
|
||||
): (ptr CReturn[CStealthCommitment]) {.importc: "ffi_generate_stealth_commitment".}
|
||||
|
||||
proc ffi_generate_stealth_private_key*(ephemeral_public_key: ptr CG1Projective,
|
||||
spending_key: ptr CFr,
|
||||
viewing_key: ptr CFr,
|
||||
view_tag: ptr uint64): (ptr CReturn[CFr]) {.importc: "ffi_generate_stealth_private_key".}
|
||||
proc ffi_generate_stealth_private_key*(
|
||||
ephemeral_public_key: ptr CG1Projective,
|
||||
spending_key: ptr CFr,
|
||||
viewing_key: ptr CFr,
|
||||
view_tag: ptr uint64,
|
||||
): (ptr CReturn[CFr]) {.importc: "ffi_generate_stealth_private_key".}
|
||||
|
||||
proc ffi_random_keypair*(): (ptr CReturn[CKeyPair]) {.importc: "ffi_random_keypair".}
|
||||
|
||||
|
||||
## Nim wrappers and types for the ERC-5564-BN254 module
|
||||
|
||||
type FFIResult[T] = Result[T, string]
|
||||
@ -64,9 +79,11 @@ type G1Projective = array[32, uint8]
|
||||
type KeyPair* = object
|
||||
private_key*: Fr
|
||||
public_key*: G1Projective
|
||||
|
||||
type StealthCommitment* = object
|
||||
stealth_commitment*: G1Projective
|
||||
view_tag*: uint64
|
||||
|
||||
type PrivateKey* = Fr
|
||||
type PublicKey* = G1Projective
|
||||
|
||||
@ -88,13 +105,18 @@ proc generateKeypair*(): FFIResult[KeyPair] =
|
||||
drop_ffi_random_keypair(res_ptr)
|
||||
return err("Error generating random keypair: " & $res_value.err_code)
|
||||
|
||||
let ret = KeyPair(private_key: res_value.value.private_key.x0, public_key: res_value.value.public_key.x0)
|
||||
let ret = KeyPair(
|
||||
private_key: res_value.value.private_key.x0,
|
||||
public_key: res_value.value.public_key.x0,
|
||||
)
|
||||
drop_ffi_random_keypair(res_ptr)
|
||||
return ok(ret)
|
||||
|
||||
proc generateStealthCommitment*(viewing_public_key: G1Projective,
|
||||
spending_public_key: G1Projective,
|
||||
ephemeral_private_key: Fr): FFIResult[StealthCommitment] =
|
||||
proc generateStealthCommitment*(
|
||||
viewing_public_key: G1Projective,
|
||||
spending_public_key: G1Projective,
|
||||
ephemeral_private_key: Fr,
|
||||
): FFIResult[StealthCommitment] =
|
||||
let viewing_public_key = CG1Projective(x0: viewing_public_key)
|
||||
let viewing_public_key_ptr = unsafeAddr(viewing_public_key)
|
||||
let spending_public_key = CG1Projective(x0: spending_public_key)
|
||||
@ -102,20 +124,29 @@ proc generateStealthCommitment*(viewing_public_key: G1Projective,
|
||||
let ephemeral_private_key = CFr(x0: ephemeral_private_key)
|
||||
let ephemeral_private_key_ptr = unsafeAddr(ephemeral_private_key)
|
||||
|
||||
let res_ptr = (ffi_generate_stealth_commitment(viewing_public_key_ptr, spending_public_key_ptr, ephemeral_private_key_ptr))
|
||||
let res_ptr = (
|
||||
ffi_generate_stealth_commitment(
|
||||
viewing_public_key_ptr, spending_public_key_ptr, ephemeral_private_key_ptr
|
||||
)
|
||||
)
|
||||
let res_value = res_ptr[]
|
||||
if res_value.err_code != 0:
|
||||
drop_ffi_generate_stealth_commitment(res_ptr)
|
||||
return err("Error generating stealth commitment: " & $res_value.err_code)
|
||||
|
||||
let ret = StealthCommitment(stealth_commitment: res_value.value.stealth_commitment.x0, view_tag: res_value.value.view_tag)
|
||||
let ret = StealthCommitment(
|
||||
stealth_commitment: res_value.value.stealth_commitment.x0,
|
||||
view_tag: res_value.value.view_tag,
|
||||
)
|
||||
drop_ffi_generate_stealth_commitment(res_ptr)
|
||||
return ok(ret)
|
||||
|
||||
proc generateStealthPrivateKey*(ephemeral_public_key: G1Projective,
|
||||
spending_key: Fr,
|
||||
viewing_key: Fr,
|
||||
view_tag: uint64): FFIResult[Fr] =
|
||||
proc generateStealthPrivateKey*(
|
||||
ephemeral_public_key: G1Projective,
|
||||
spending_key: Fr,
|
||||
viewing_key: Fr,
|
||||
view_tag: uint64,
|
||||
): FFIResult[Fr] =
|
||||
let ephemeral_public_key = CG1Projective(x0: ephemeral_public_key)
|
||||
let ephemeral_public_key_ptr = unsafeAddr(ephemeral_public_key)
|
||||
let spending_key = CFr(x0: spending_key)
|
||||
@ -124,7 +155,11 @@ proc generateStealthPrivateKey*(ephemeral_public_key: G1Projective,
|
||||
let viewing_key_ptr = unsafeAddr(viewing_key)
|
||||
let view_tag_ptr = unsafeAddr(view_tag)
|
||||
|
||||
let res_ptr = (ffi_generate_stealth_private_key(ephemeral_public_key_ptr, spending_key_ptr, viewing_key_ptr, view_tag_ptr))
|
||||
let res_ptr = (
|
||||
ffi_generate_stealth_private_key(
|
||||
ephemeral_public_key_ptr, spending_key_ptr, viewing_key_ptr, view_tag_ptr
|
||||
)
|
||||
)
|
||||
let res_value = res_ptr[]
|
||||
if res_value.err_code != 0:
|
||||
drop_ffi_generate_stealth_private_key(res_ptr)
|
||||
|
||||
@ -15,19 +15,8 @@ import
|
||||
libp2p/crypto/crypto
|
||||
|
||||
export
|
||||
networks_config,
|
||||
app,
|
||||
logging,
|
||||
options,
|
||||
strutils,
|
||||
os,
|
||||
sequtils,
|
||||
stewNet,
|
||||
chronicles,
|
||||
chronos,
|
||||
metrics,
|
||||
libbacktrace,
|
||||
crypto
|
||||
networks_config, app, logging, options, strutils, os, sequtils, stewNet, chronicles,
|
||||
chronos, metrics, libbacktrace, crypto
|
||||
|
||||
proc setup*(): App =
|
||||
const versionString = "version / git commit hash: " & app.git_version
|
||||
|
||||
@ -12,9 +12,7 @@ import
|
||||
./node_spec,
|
||||
./wire_spec
|
||||
|
||||
export
|
||||
wire_spec,
|
||||
logging
|
||||
export wire_spec, logging
|
||||
|
||||
type StealthCommitmentProtocol* = object
|
||||
wakuApp: App
|
||||
@ -22,28 +20,36 @@ type StealthCommitmentProtocol* = object
|
||||
spendingKeyPair: StealthCommitmentFFI.KeyPair
|
||||
viewingKeyPair: StealthCommitmentFFI.KeyPair
|
||||
|
||||
proc deserialize(T: type StealthCommitmentFFI.PublicKey, v: SerializedKey): Result[T, string] =
|
||||
proc deserialize(
|
||||
T: type StealthCommitmentFFI.PublicKey, v: SerializedKey
|
||||
): Result[T, string] =
|
||||
# deserialize seq[byte] into array[32, uint8]
|
||||
if v.len != 32:
|
||||
return err("invalid key length")
|
||||
var buf: array[32, uint8]
|
||||
for i in 0..<v.len:
|
||||
for i in 0 ..< v.len:
|
||||
buf[i] = v[i]
|
||||
return ok(buf)
|
||||
|
||||
proc serialize(v: StealthCommitmentFFI.PublicKey | StealthCommitmentFFI.PrivateKey): SerializedKey =
|
||||
proc serialize(
|
||||
v: StealthCommitmentFFI.PublicKey | StealthCommitmentFFI.PrivateKey
|
||||
): SerializedKey =
|
||||
# serialize array[32, uint8] into seq[byte]
|
||||
var buf = newSeq[byte](v.len)
|
||||
for i in 0..<v.len:
|
||||
for i in 0 ..< v.len:
|
||||
buf[i] = v[i]
|
||||
return buf
|
||||
|
||||
proc sendThruWaku*(self: StealthCommitmentProtocol, msg: seq[byte]): Future[Result[void, string]] {.async.} =
|
||||
proc sendThruWaku*(
|
||||
self: StealthCommitmentProtocol, msg: seq[byte]
|
||||
): Future[Result[void, string]] {.async.} =
|
||||
let time = getTime().toUnix()
|
||||
var message = WakuMessage(payload: msg,
|
||||
contentTopic: self.contentTopic,
|
||||
version: 0,
|
||||
timestamp: getNanosecondTime(time))
|
||||
var message = WakuMessage(
|
||||
payload: msg,
|
||||
contentTopic: self.contentTopic,
|
||||
version: 0,
|
||||
timestamp: getNanosecondTime(time),
|
||||
)
|
||||
|
||||
(self.wakuApp.node.wakuRlnRelay.appendRLNProof(message, float64(time))).isOkOr:
|
||||
return err("could not append rate limit proof to the message: " & $error)
|
||||
@ -52,31 +58,47 @@ proc sendThruWaku*(self: StealthCommitmentProtocol, msg: seq[byte]): Future[Resu
|
||||
return err("failed to publish message: " & $error)
|
||||
|
||||
debug "rate limit proof is appended to the message"
|
||||
|
||||
|
||||
return ok()
|
||||
|
||||
proc sendRequest*(self: StealthCommitmentProtocol): Future[Result[void, string]] {.async.} =
|
||||
let request = constructRequest(serialize(self.spendingKeyPair.publicKey), serialize(self.viewingKeyPair.publicKey)).encode()
|
||||
proc sendRequest*(
|
||||
self: StealthCommitmentProtocol
|
||||
): Future[Result[void, string]] {.async.} =
|
||||
let request = constructRequest(
|
||||
serialize(self.spendingKeyPair.publicKey),
|
||||
serialize(self.viewingKeyPair.publicKey),
|
||||
)
|
||||
.encode()
|
||||
try:
|
||||
(await self.sendThruWaku(request.buffer)).isOkOr:
|
||||
return err("Could not send stealth commitment payload thru waku: " & $error)
|
||||
except CatchableError:
|
||||
return err("Could not send stealth commitment payload thru waku: " & getCurrentExceptionMsg())
|
||||
return err(
|
||||
"Could not send stealth commitment payload thru waku: " & getCurrentExceptionMsg()
|
||||
)
|
||||
return ok()
|
||||
|
||||
|
||||
proc sendResponse*(self: StealthCommitmentProtocol, stealthCommitment: StealthCommitmentFFI.PublicKey, ephemeralPubKey: StealthCommitmentFFI.PublicKey, viewTag: uint64): Future[Result[void, string]] {.async.} =
|
||||
let response = constructResponse(serialize(stealthCommitment), serialize(ephemeralPubKey), viewTag).encode()
|
||||
proc sendResponse*(
|
||||
self: StealthCommitmentProtocol,
|
||||
stealthCommitment: StealthCommitmentFFI.PublicKey,
|
||||
ephemeralPubKey: StealthCommitmentFFI.PublicKey,
|
||||
viewTag: uint64,
|
||||
): Future[Result[void, string]] {.async.} =
|
||||
let response = constructResponse(
|
||||
serialize(stealthCommitment), serialize(ephemeralPubKey), viewTag
|
||||
)
|
||||
.encode()
|
||||
try:
|
||||
(await self.sendThruWaku(response.buffer)).isOkOr:
|
||||
return err("Could not send stealth commitment payload thru waku: " & $error)
|
||||
except CatchableError:
|
||||
return err("Could not send stealth commitment payload thru waku: " & getCurrentExceptionMsg())
|
||||
return err(
|
||||
"Could not send stealth commitment payload thru waku: " & getCurrentExceptionMsg()
|
||||
)
|
||||
return ok()
|
||||
|
||||
type SCPHandler* = proc (msg: WakuMessage): Future[void] {.async.}
|
||||
type SCPHandler* = proc(msg: WakuMessage): Future[void] {.async.}
|
||||
proc getSCPHandler(self: StealthCommitmentProtocol): SCPHandler =
|
||||
|
||||
let handler = proc(msg: WakuMessage): Future[void] {.async.} =
|
||||
let decodedRes = WakuStealthCommitmentMsg.decode(msg.payload)
|
||||
if decodedRes.isErr():
|
||||
@ -85,49 +107,68 @@ proc getSCPHandler(self: StealthCommitmentProtocol): SCPHandler =
|
||||
if decoded.request == false:
|
||||
# check if the generated stealth commitment belongs to the receiver
|
||||
# if not, continue
|
||||
let ephemeralPubKeyRes = deserialize(StealthCommitmentFFI.PublicKey, decoded.ephemeralPubKey.get())
|
||||
let ephemeralPubKeyRes =
|
||||
deserialize(StealthCommitmentFFI.PublicKey, decoded.ephemeralPubKey.get())
|
||||
if ephemeralPubKeyRes.isErr():
|
||||
error "could not deserialize ephemeral public key: ", err = ephemeralPubKeyRes.error()
|
||||
error "could not deserialize ephemeral public key: ",
|
||||
err = ephemeralPubKeyRes.error()
|
||||
let ephemeralPubKey = ephemeralPubKeyRes.get()
|
||||
let stealthCommitmentPrivateKeyRes = StealthCommitmentFFI.generateStealthPrivateKey(ephemeralPubKey,
|
||||
self.spendingKeyPair.privateKey,
|
||||
self.viewingKeyPair.privateKey,
|
||||
decoded.viewTag.get())
|
||||
let stealthCommitmentPrivateKeyRes = StealthCommitmentFFI.generateStealthPrivateKey(
|
||||
ephemeralPubKey,
|
||||
self.spendingKeyPair.privateKey,
|
||||
self.viewingKeyPair.privateKey,
|
||||
decoded.viewTag.get(),
|
||||
)
|
||||
if stealthCommitmentPrivateKeyRes.isErr():
|
||||
info "received stealth commitment does not belong to the receiver: ", err = stealthCommitmentPrivateKeyRes.error()
|
||||
|
||||
info "received stealth commitment does not belong to the receiver: ",
|
||||
err = stealthCommitmentPrivateKeyRes.error()
|
||||
|
||||
let stealthCommitmentPrivateKey = stealthCommitmentPrivateKeyRes.get()
|
||||
info "received stealth commitment belongs to the receiver: ", stealthCommitmentPrivateKey, stealthCommitmentPubKey = decoded.stealthCommitment.get()
|
||||
info "received stealth commitment belongs to the receiver: ",
|
||||
stealthCommitmentPrivateKey,
|
||||
stealthCommitmentPubKey = decoded.stealthCommitment.get()
|
||||
return
|
||||
# send response
|
||||
# deseralize the keys
|
||||
let spendingKeyRes = deserialize(StealthCommitmentFFI.PublicKey, decoded.spendingPubKey.get())
|
||||
let spendingKeyRes =
|
||||
deserialize(StealthCommitmentFFI.PublicKey, decoded.spendingPubKey.get())
|
||||
if spendingKeyRes.isErr():
|
||||
error "could not deserialize spending key: ", err = spendingKeyRes.error()
|
||||
let spendingKey = spendingKeyRes.get()
|
||||
let viewingKeyRes = (deserialize(StealthCommitmentFFI.PublicKey, decoded.viewingPubKey.get()))
|
||||
let viewingKeyRes =
|
||||
(deserialize(StealthCommitmentFFI.PublicKey, decoded.viewingPubKey.get()))
|
||||
if viewingKeyRes.isErr():
|
||||
error "could not deserialize viewing key: ", err = viewingKeyRes.error()
|
||||
let viewingKey = viewingKeyRes.get()
|
||||
|
||||
info "received spending key", spendingKey
|
||||
info "received viewing key", viewingKey
|
||||
info "received spending key", spendingKey
|
||||
info "received viewing key", viewingKey
|
||||
let ephemeralKeyPairRes = StealthCommitmentFFI.generateKeyPair()
|
||||
if ephemeralKeyPairRes.isErr():
|
||||
error "could not generate ephemeral key pair: ", err = ephemeralKeyPairRes.error()
|
||||
let ephemeralKeyPair = ephemeralKeyPairRes.get()
|
||||
|
||||
let stealthCommitmentRes = StealthCommitmentFFI.generateStealthCommitment(spendingKey, viewingKey, ephemeralKeyPair.privateKey)
|
||||
|
||||
let stealthCommitmentRes = StealthCommitmentFFI.generateStealthCommitment(
|
||||
spendingKey, viewingKey, ephemeralKeyPair.privateKey
|
||||
)
|
||||
if stealthCommitmentRes.isErr():
|
||||
error "could not generate stealth commitment: ", err = stealthCommitmentRes.error()
|
||||
error "could not generate stealth commitment: ",
|
||||
err = stealthCommitmentRes.error()
|
||||
let stealthCommitment = stealthCommitmentRes.get()
|
||||
|
||||
(await self.sendResponse(stealthCommitment.stealthCommitment, ephemeralKeyPair.publicKey, stealthCommitment.viewTag)).isOkOr:
|
||||
|
||||
(
|
||||
await self.sendResponse(
|
||||
stealthCommitment.stealthCommitment, ephemeralKeyPair.publicKey,
|
||||
stealthCommitment.viewTag,
|
||||
)
|
||||
).isOkOr:
|
||||
error "could not send response: ", err = $error
|
||||
|
||||
return handler
|
||||
|
||||
proc new*(wakuApp: App, contentTopic = ContentTopic("/wakustealthcommitments/1/app/proto")): Result[StealthCommitmentProtocol, string] =
|
||||
proc new*(
|
||||
wakuApp: App, contentTopic = ContentTopic("/wakustealthcommitments/1/app/proto")
|
||||
): Result[StealthCommitmentProtocol, string] =
|
||||
let spendingKeyPair = StealthCommitmentFFI.generateKeyPair().valueOr:
|
||||
return err("could not generate spending key pair: " & $error)
|
||||
let viewingKeyPair = StealthCommitmentFFI.generateKeyPair().valueOr:
|
||||
@ -136,11 +177,12 @@ proc new*(wakuApp: App, contentTopic = ContentTopic("/wakustealthcommitments/1/a
|
||||
info "spending public key", publicKey = spendingKeyPair.publicKey
|
||||
info "viewing public key", publicKey = viewingKeyPair.publicKey
|
||||
|
||||
let SCP = StealthCommitmentProtocol(wakuApp: wakuApp,
|
||||
contentTopic: contentTopic,
|
||||
spendingKeyPair: spendingKeyPair,
|
||||
viewingKeyPair: viewingKeyPair)
|
||||
|
||||
let SCP = StealthCommitmentProtocol(
|
||||
wakuApp: wakuApp,
|
||||
contentTopic: contentTopic,
|
||||
spendingKeyPair: spendingKeyPair,
|
||||
viewingKeyPair: viewingKeyPair,
|
||||
)
|
||||
|
||||
proc handler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} =
|
||||
let scpHandler = getSCPHandler(SCP)
|
||||
|
||||
@ -4,10 +4,7 @@ else:
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
stew/results,
|
||||
chronicles,
|
||||
./node_spec as Waku,
|
||||
./stealth_commitment_protocol as SCP
|
||||
stew/results, chronicles, ./node_spec as Waku, ./stealth_commitment_protocol as SCP
|
||||
|
||||
logScope:
|
||||
topics = "waku stealthcommitments"
|
||||
@ -24,7 +21,7 @@ when isMainModule:
|
||||
|
||||
logging.setupLogLevel(logging.LogLevel.INFO)
|
||||
logging.setupLogFormat(logging.LogFormat.TEXT, color)
|
||||
|
||||
|
||||
info "Starting Waku Stealth Commitment Protocol"
|
||||
info "Starting Waku Node"
|
||||
let node = Waku.setup()
|
||||
@ -32,7 +29,7 @@ when isMainModule:
|
||||
let scp = SCP.new(node).valueOr:
|
||||
error "Could not start Stealth Commitment Protocol", error = $error
|
||||
quit(1)
|
||||
|
||||
|
||||
try:
|
||||
info "Sending stealth commitment request"
|
||||
(waitFor scp.sendRequest()).isOkOr:
|
||||
|
||||
@ -1,52 +1,45 @@
|
||||
|
||||
import std/[times, options]
|
||||
import
|
||||
confutils,
|
||||
chronicles,
|
||||
chronos,
|
||||
stew/results
|
||||
import confutils, chronicles, chronos, stew/results
|
||||
|
||||
import
|
||||
../../waku/waku_core,
|
||||
../../waku/common/protobuf
|
||||
import libp2p/protobuf/minprotobuf
|
||||
import ../../waku/waku_core, ../../waku/common/protobuf
|
||||
import libp2p/protobuf/minprotobuf
|
||||
|
||||
export
|
||||
times,
|
||||
options,
|
||||
confutils,
|
||||
chronicles,
|
||||
chronos,
|
||||
results,
|
||||
waku_core,
|
||||
protobuf,
|
||||
times, options, confutils, chronicles, chronos, results, waku_core, protobuf,
|
||||
minprotobuf
|
||||
|
||||
type SerializedKey* = seq[byte]
|
||||
|
||||
type
|
||||
WakuStealthCommitmentMsg* = object
|
||||
request*: bool
|
||||
spendingPubKey*: Option[SerializedKey]
|
||||
viewingPubKey*: Option[SerializedKey]
|
||||
ephemeralPubKey*: Option[SerializedKey]
|
||||
stealthCommitment*: Option[SerializedKey]
|
||||
viewTag*: Option[uint64]
|
||||
type WakuStealthCommitmentMsg* = object
|
||||
request*: bool
|
||||
spendingPubKey*: Option[SerializedKey]
|
||||
viewingPubKey*: Option[SerializedKey]
|
||||
ephemeralPubKey*: Option[SerializedKey]
|
||||
stealthCommitment*: Option[SerializedKey]
|
||||
viewTag*: Option[uint64]
|
||||
|
||||
proc decode*(T: type WakuStealthCommitmentMsg, buffer: seq[byte]): ProtoResult[T] =
|
||||
var msg = WakuStealthCommitmentMsg()
|
||||
let pb = initProtoBuffer(buffer)
|
||||
|
||||
var request: uint64
|
||||
discard ? pb.getField(1, request)
|
||||
discard ?pb.getField(1, request)
|
||||
msg.request = request == 1
|
||||
var spendingPubKey = newSeq[byte]()
|
||||
discard ? pb.getField(2, spendingPubKey)
|
||||
msg.spendingPubKey = if spendingPubKey.len > 0: some(spendingPubKey) else: none(SerializedKey)
|
||||
discard ?pb.getField(2, spendingPubKey)
|
||||
msg.spendingPubKey =
|
||||
if spendingPubKey.len > 0:
|
||||
some(spendingPubKey)
|
||||
else:
|
||||
none(SerializedKey)
|
||||
var viewingPubKey = newSeq[byte]()
|
||||
discard ? pb.getField(3, viewingPubKey)
|
||||
msg.viewingPubKey = if viewingPubKey.len > 0: some(viewingPubKey) else: none(SerializedKey)
|
||||
|
||||
discard ?pb.getField(3, viewingPubKey)
|
||||
msg.viewingPubKey =
|
||||
if viewingPubKey.len > 0:
|
||||
some(viewingPubKey)
|
||||
else:
|
||||
none(SerializedKey)
|
||||
|
||||
if msg.spendingPubKey.isSome() and msg.viewingPubKey.isSome():
|
||||
msg.stealthCommitment = none(SerializedKey)
|
||||
msg.viewTag = none(uint64)
|
||||
@ -58,20 +51,32 @@ proc decode*(T: type WakuStealthCommitmentMsg, buffer: seq[byte]): ProtoResult[T
|
||||
if msg.request == true and msg.spendingPubKey.isNone() and msg.viewingPubKey.isNone():
|
||||
return err(ProtoError.RequiredFieldMissing)
|
||||
|
||||
|
||||
var stealthCommitment = newSeq[byte]()
|
||||
discard ? pb.getField(4, stealthCommitment)
|
||||
msg.stealthCommitment = if stealthCommitment.len > 0: some(stealthCommitment) else: none(SerializedKey)
|
||||
discard ?pb.getField(4, stealthCommitment)
|
||||
msg.stealthCommitment =
|
||||
if stealthCommitment.len > 0:
|
||||
some(stealthCommitment)
|
||||
else:
|
||||
none(SerializedKey)
|
||||
|
||||
var ephemeralPubKey = newSeq[byte]()
|
||||
discard ? pb.getField(5, ephemeralPubKey)
|
||||
msg.ephemeralPubKey = if ephemeralPubKey.len > 0: some(ephemeralPubKey) else: none(SerializedKey)
|
||||
discard ?pb.getField(5, ephemeralPubKey)
|
||||
msg.ephemeralPubKey =
|
||||
if ephemeralPubKey.len > 0:
|
||||
some(ephemeralPubKey)
|
||||
else:
|
||||
none(SerializedKey)
|
||||
|
||||
var viewTag: uint64
|
||||
discard ? pb.getField(6, viewTag)
|
||||
msg.viewTag = if viewTag != 0: some(viewTag) else: none(uint64)
|
||||
discard ?pb.getField(6, viewTag)
|
||||
msg.viewTag =
|
||||
if viewTag != 0:
|
||||
some(viewTag)
|
||||
else:
|
||||
none(uint64)
|
||||
|
||||
if msg.stealthCommitment.isNone() and msg.viewTag.isNone() and msg.ephemeralPubKey.isNone():
|
||||
if msg.stealthCommitment.isNone() and msg.viewTag.isNone() and
|
||||
msg.ephemeralPubKey.isNone():
|
||||
return err(ProtoError.RequiredFieldMissing)
|
||||
|
||||
if msg.stealthCommitment.isSome() and msg.viewTag.isNone():
|
||||
@ -90,7 +95,7 @@ proc encode*(msg: WakuStealthCommitmentMsg): ProtoBuffer =
|
||||
var serialised = initProtoBuffer()
|
||||
|
||||
serialised.write(1, uint64(msg.request))
|
||||
|
||||
|
||||
if msg.spendingPubKey.isSome():
|
||||
serialised.write(2, msg.spendingPubKey.get())
|
||||
if msg.viewingPubKey.isSome():
|
||||
@ -108,8 +113,21 @@ func toByteSeq*(str: string): seq[byte] {.inline.} =
|
||||
## Converts a string to the corresponding byte sequence.
|
||||
@(str.toOpenArrayByte(0, str.high))
|
||||
|
||||
proc constructRequest*(spendingPubKey: SerializedKey, viewingPubKey: SerializedKey): WakuStealthCommitmentMsg =
|
||||
WakuStealthCommitmentMsg(request: true, spendingPubKey: some(spendingPubKey), viewingPubKey: some(viewingPubKey))
|
||||
proc constructRequest*(
|
||||
spendingPubKey: SerializedKey, viewingPubKey: SerializedKey
|
||||
): WakuStealthCommitmentMsg =
|
||||
WakuStealthCommitmentMsg(
|
||||
request: true,
|
||||
spendingPubKey: some(spendingPubKey),
|
||||
viewingPubKey: some(viewingPubKey),
|
||||
)
|
||||
|
||||
proc constructResponse*(stealthCommitment: SerializedKey, ephemeralPubKey: SerializedKey, viewTag: uint64): WakuStealthCommitmentMsg =
|
||||
WakuStealthCommitmentMsg(request: false, stealthCommitment: some(stealthCommitment), ephemeralPubKey: some(ephemeralPubKey), viewTag: some(viewTag))
|
||||
proc constructResponse*(
|
||||
stealthCommitment: SerializedKey, ephemeralPubKey: SerializedKey, viewTag: uint64
|
||||
): WakuStealthCommitmentMsg =
|
||||
WakuStealthCommitmentMsg(
|
||||
request: false,
|
||||
stealthCommitment: some(stealthCommitment),
|
||||
ephemeralPubKey: some(ephemeralPubKey),
|
||||
viewTag: some(viewTag),
|
||||
)
|
||||
|
||||
@ -13,7 +13,7 @@ proc alloc*(str: string): cstring =
|
||||
## There should be the corresponding manual deallocation with deallocShared !
|
||||
var ret = cast[cstring](allocShared(str.len + 1))
|
||||
let s = cast[seq[char]](str)
|
||||
for i in 0..<str.len:
|
||||
for i in 0 ..< str.len:
|
||||
ret[i] = s[i]
|
||||
ret[str.len] = '\0'
|
||||
return ret
|
||||
@ -32,6 +32,6 @@ proc toSeq*[T](s: SharedSeq[T]): seq[T] =
|
||||
## Creates a seq[T] from a SharedSeq[T]. No explicit dealloc is required
|
||||
## as req[T] is a GC managed type.
|
||||
var ret = newSeq[T]()
|
||||
for i in 0..<s.len:
|
||||
for i in 0 ..< s.len:
|
||||
ret.add(s.data[i])
|
||||
return ret
|
||||
|
||||
@ -1,6 +1,3 @@
|
||||
|
||||
type
|
||||
WakuCallBack* = proc(callerRet: cint,
|
||||
msg: ptr cchar,
|
||||
len: csize_t,
|
||||
userData: pointer) {.cdecl, gcsafe, raises: [].}
|
||||
type WakuCallBack* = proc(
|
||||
callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer
|
||||
) {.cdecl, gcsafe, raises: [].}
|
||||
|
||||
@ -1,8 +1,6 @@
|
||||
|
||||
type JsonEvent* = ref object of RootObj
|
||||
# https://rfc.vac.dev/spec/36/#jsonsignal-type
|
||||
type JsonEvent* = ref object of RootObj # https://rfc.vac.dev/spec/36/#jsonsignal-type
|
||||
eventType* {.requiresInit.}: string
|
||||
|
||||
method `$`*(jsonEvent: JsonEvent): string {.base.} = discard
|
||||
method `$`*(jsonEvent: JsonEvent): string {.base.} =
|
||||
discard
|
||||
# All events should implement this
|
||||
|
||||
|
||||
@ -1,25 +1,19 @@
|
||||
import system, std/[json, sequtils]
|
||||
import stew/[byteutils, results]
|
||||
import
|
||||
../../waku/common/base64,
|
||||
../../waku/waku_core/message,
|
||||
../../waku/waku_core/message/message,
|
||||
./json_base_event
|
||||
|
||||
import
|
||||
system,
|
||||
std/[json,sequtils]
|
||||
import
|
||||
stew/[byteutils,results]
|
||||
import
|
||||
../../waku/common/base64,
|
||||
../../waku/waku_core/message,
|
||||
../../waku/waku_core/message/message,
|
||||
./json_base_event
|
||||
|
||||
type
|
||||
JsonMessage* = ref object
|
||||
# https://rfc.vac.dev/spec/36/#jsonmessage-type
|
||||
payload*: Base64String
|
||||
contentTopic*: string
|
||||
version*: uint
|
||||
timestamp*: int64
|
||||
ephemeral*: bool
|
||||
meta*: Base64String
|
||||
proof*: Base64String
|
||||
type JsonMessage* = ref object # https://rfc.vac.dev/spec/36/#jsonmessage-type
|
||||
payload*: Base64String
|
||||
contentTopic*: string
|
||||
version*: uint
|
||||
timestamp*: int64
|
||||
ephemeral*: bool
|
||||
meta*: Base64String
|
||||
proof*: Base64String
|
||||
|
||||
func fromJsonNode*(T: type JsonMessage, jsonContent: JsonNode): JsonMessage =
|
||||
# Visit https://rfc.vac.dev/spec/14/ for further details
|
||||
@ -30,7 +24,7 @@ func fromJsonNode*(T: type JsonMessage, jsonContent: JsonNode): JsonMessage =
|
||||
timestamp: int64(jsonContent{"timestamp"}.getBiggestInt()),
|
||||
ephemeral: jsonContent{"ephemeral"}.getBool(),
|
||||
meta: Base64String(jsonContent{"meta"}.getStr()),
|
||||
proof: Base64String(jsonContent{"proof"}.getStr())
|
||||
proof: Base64String(jsonContent{"proof"}.getStr()),
|
||||
)
|
||||
|
||||
proc toWakuMessage*(self: JsonMessage): Result[WakuMessage, string] =
|
||||
@ -43,15 +37,17 @@ proc toWakuMessage*(self: JsonMessage): Result[WakuMessage, string] =
|
||||
let proof = base64.decode(self.proof).valueOr:
|
||||
return err("invalid proof format: " & error)
|
||||
|
||||
ok(WakuMessage(
|
||||
payload: payload,
|
||||
meta: meta,
|
||||
contentTopic: self.contentTopic,
|
||||
version: uint32(self.version),
|
||||
timestamp: self.timestamp,
|
||||
ephemeral: self.ephemeral,
|
||||
proof: proof,
|
||||
))
|
||||
ok(
|
||||
WakuMessage(
|
||||
payload: payload,
|
||||
meta: meta,
|
||||
contentTopic: self.contentTopic,
|
||||
version: uint32(self.version),
|
||||
timestamp: self.timestamp,
|
||||
ephemeral: self.ephemeral,
|
||||
proof: proof,
|
||||
)
|
||||
)
|
||||
|
||||
proc `%`*(value: Base64String): JsonNode =
|
||||
%(value.string)
|
||||
@ -60,13 +56,11 @@ proc `%`*(value: WakuMessageHash): JsonNode =
|
||||
%(to0xHex(value))
|
||||
|
||||
type JsonMessageEvent* = ref object of JsonEvent
|
||||
pubsubTopic*: string
|
||||
messageHash*: WakuMessageHash
|
||||
wakuMessage*: JsonMessage
|
||||
pubsubTopic*: string
|
||||
messageHash*: WakuMessageHash
|
||||
wakuMessage*: JsonMessage
|
||||
|
||||
proc new*(T: type JsonMessageEvent,
|
||||
pubSubTopic: string,
|
||||
msg: WakuMessage): T =
|
||||
proc new*(T: type JsonMessageEvent, pubSubTopic: string, msg: WakuMessage): T =
|
||||
# Returns a WakuMessage event as indicated in
|
||||
# https://rfc.vac.dev/spec/36/#jsonmessageevent-type
|
||||
|
||||
@ -89,15 +83,15 @@ proc new*(T: type JsonMessageEvent,
|
||||
pubSubTopic: pubSubTopic,
|
||||
messageHash: msgHash,
|
||||
wakuMessage: JsonMessage(
|
||||
payload: base64.encode(payload),
|
||||
contentTopic: msg.contentTopic,
|
||||
version: msg.version,
|
||||
timestamp: int64(msg.timestamp),
|
||||
ephemeral: msg.ephemeral,
|
||||
meta: base64.encode(meta),
|
||||
proof: base64.encode(proof),
|
||||
)
|
||||
payload: base64.encode(payload),
|
||||
contentTopic: msg.contentTopic,
|
||||
version: msg.version,
|
||||
timestamp: int64(msg.timestamp),
|
||||
ephemeral: msg.ephemeral,
|
||||
meta: base64.encode(meta),
|
||||
proof: base64.encode(proof),
|
||||
),
|
||||
)
|
||||
|
||||
method `$`*(jsonMessage: JsonMessageEvent): string =
|
||||
$( %* jsonMessage )
|
||||
$(%*jsonMessage)
|
||||
|
||||
@ -1,13 +1,9 @@
|
||||
|
||||
{.pragma: exported, exportc, cdecl, raises: [].}
|
||||
{.pragma: callback, cdecl, raises: [], gcsafe.}
|
||||
{.passc: "-fPIC".}
|
||||
|
||||
import
|
||||
std/[json,sequtils,times,strformat,options,atomics,strutils]
|
||||
import
|
||||
chronicles,
|
||||
chronos
|
||||
import std/[json, sequtils, times, strformat, options, atomics, strutils]
|
||||
import chronicles, chronos
|
||||
import
|
||||
../../waku/common/base64,
|
||||
../../waku/waku_core/message/message,
|
||||
@ -44,7 +40,9 @@ const RET_MISSING_CALLBACK: cint = 2
|
||||
### Not-exported components
|
||||
|
||||
proc relayEventCallback(ctx: ptr Context): WakuRelayHandler =
|
||||
return proc (pubsubTopic: PubsubTopic, msg: WakuMessage): Future[system.void]{.async.} =
|
||||
return proc(
|
||||
pubsubTopic: PubsubTopic, msg: WakuMessage
|
||||
): Future[system.void] {.async.} =
|
||||
# Callback that hadles the Waku Relay events. i.e. messages or errors.
|
||||
if isNil(ctx[].eventCallback):
|
||||
error "eventCallback is nil"
|
||||
@ -56,12 +54,14 @@ proc relayEventCallback(ctx: ptr Context): WakuRelayHandler =
|
||||
|
||||
try:
|
||||
let event = $JsonMessageEvent.new(pubsubTopic, msg)
|
||||
cast[WakuCallBack](ctx[].eventCallback)(RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData)
|
||||
except Exception,CatchableError:
|
||||
let msg = "Exception when calling 'eventCallBack': " &
|
||||
getCurrentExceptionMsg()
|
||||
cast[WakuCallBack](ctx[].eventCallback)(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData)
|
||||
|
||||
cast[WakuCallBack](ctx[].eventCallback)(
|
||||
RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData
|
||||
)
|
||||
except Exception, CatchableError:
|
||||
let msg = "Exception when calling 'eventCallBack': " & getCurrentExceptionMsg()
|
||||
cast[WakuCallBack](ctx[].eventCallback)(
|
||||
RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData
|
||||
)
|
||||
|
||||
### End of not-exported components
|
||||
################################################################################
|
||||
@ -69,10 +69,9 @@ proc relayEventCallback(ctx: ptr Context): WakuRelayHandler =
|
||||
################################################################################
|
||||
### Exported procs
|
||||
|
||||
proc waku_new(configJson: cstring,
|
||||
callback: WakuCallback,
|
||||
userData: pointer): pointer
|
||||
{.dynlib, exportc, cdecl.} =
|
||||
proc waku_new(
|
||||
configJson: cstring, callback: WakuCallback, userData: pointer
|
||||
): pointer {.dynlib, exportc, cdecl.} =
|
||||
## Creates a new instance of the WakuNode.
|
||||
|
||||
if isNil(callback):
|
||||
@ -88,11 +87,10 @@ proc waku_new(configJson: cstring,
|
||||
ctx.userData = userData
|
||||
|
||||
let sendReqRes = waku_thread.sendRequestToWakuThread(
|
||||
ctx,
|
||||
RequestType.LIFECYCLE,
|
||||
NodeLifecycleRequest.createShared(
|
||||
NodeLifecycleMsgType.CREATE_NODE,
|
||||
configJson))
|
||||
ctx,
|
||||
RequestType.LIFECYCLE,
|
||||
NodeLifecycleRequest.createShared(NodeLifecycleMsgType.CREATE_NODE, configJson),
|
||||
)
|
||||
if sendReqRes.isErr():
|
||||
let msg = $sendReqRes.error
|
||||
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
|
||||
@ -100,10 +98,9 @@ proc waku_new(configJson: cstring,
|
||||
|
||||
return ctx
|
||||
|
||||
proc waku_destroy(ctx: ptr Context,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer): cint {.dynlib, exportc.} =
|
||||
|
||||
proc waku_destroy(
|
||||
ctx: ptr Context, callback: WakuCallBack, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
if isNil(callback):
|
||||
return RET_MISSING_CALLBACK
|
||||
|
||||
@ -114,33 +111,38 @@ proc waku_destroy(ctx: ptr Context,
|
||||
|
||||
return RET_OK
|
||||
|
||||
proc waku_version(ctx: ptr Context,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer): cint {.dynlib, exportc.} =
|
||||
|
||||
proc waku_version(
|
||||
ctx: ptr Context, callback: WakuCallBack, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
ctx[].userData = userData
|
||||
|
||||
if isNil(callback):
|
||||
return RET_MISSING_CALLBACK
|
||||
|
||||
callback(RET_OK, cast[ptr cchar](WakuNodeVersionString),
|
||||
cast[csize_t](len(WakuNodeVersionString)), userData)
|
||||
callback(
|
||||
RET_OK,
|
||||
cast[ptr cchar](WakuNodeVersionString),
|
||||
cast[csize_t](len(WakuNodeVersionString)),
|
||||
userData,
|
||||
)
|
||||
|
||||
return RET_OK
|
||||
|
||||
proc waku_set_event_callback(ctx: ptr Context,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer) {.dynlib, exportc.} =
|
||||
proc waku_set_event_callback(
|
||||
ctx: ptr Context, callback: WakuCallBack, userData: pointer
|
||||
) {.dynlib, exportc.} =
|
||||
ctx[].eventCallback = cast[pointer](callback)
|
||||
ctx[].eventUserData = userData
|
||||
|
||||
proc waku_content_topic(ctx: ptr Context,
|
||||
appName: cstring,
|
||||
appVersion: cuint,
|
||||
contentTopicName: cstring,
|
||||
encoding: cstring,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer): cint {.dynlib, exportc.} =
|
||||
proc waku_content_topic(
|
||||
ctx: ptr Context,
|
||||
appName: cstring,
|
||||
appVersion: cuint,
|
||||
contentTopicName: cstring,
|
||||
encoding: cstring,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer,
|
||||
): cint {.dynlib, exportc.} =
|
||||
# https://rfc.vac.dev/spec/36/#extern-char-waku_content_topicchar-applicationname-unsigned-int-applicationversion-char-contenttopicname-char-encoding
|
||||
|
||||
ctx[].userData = userData
|
||||
@ -153,7 +155,9 @@ proc waku_content_topic(ctx: ptr Context,
|
||||
let encodingStr = encoding.alloc()
|
||||
|
||||
let contentTopic = fmt"/{$appStr}/{appVersion}/{$ctnStr}/{$encodingStr}"
|
||||
callback(RET_OK, unsafeAddr contentTopic[0], cast[csize_t](len(contentTopic)), userData)
|
||||
callback(
|
||||
RET_OK, unsafeAddr contentTopic[0], cast[csize_t](len(contentTopic)), userData
|
||||
)
|
||||
|
||||
deallocShared(appStr)
|
||||
deallocShared(ctnStr)
|
||||
@ -161,10 +165,9 @@ proc waku_content_topic(ctx: ptr Context,
|
||||
|
||||
return RET_OK
|
||||
|
||||
proc waku_pubsub_topic(ctx: ptr Context,
|
||||
topicName: cstring,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer): cint {.dynlib, exportc, cdecl.} =
|
||||
proc waku_pubsub_topic(
|
||||
ctx: ptr Context, topicName: cstring, callback: WakuCallBack, userData: pointer
|
||||
): cint {.dynlib, exportc, cdecl.} =
|
||||
# https://rfc.vac.dev/spec/36/#extern-char-waku_pubsub_topicchar-name-char-encoding
|
||||
|
||||
ctx[].userData = userData
|
||||
@ -175,15 +178,17 @@ proc waku_pubsub_topic(ctx: ptr Context,
|
||||
let topicNameStr = topicName.alloc()
|
||||
|
||||
let outPubsubTopic = fmt"/waku/2/{$topicNameStr}"
|
||||
callback(RET_OK, unsafeAddr outPubsubTopic[0], cast[csize_t](len(outPubsubTopic)), userData)
|
||||
callback(
|
||||
RET_OK, unsafeAddr outPubsubTopic[0], cast[csize_t](len(outPubsubTopic)), userData
|
||||
)
|
||||
|
||||
deallocShared(topicNameStr)
|
||||
|
||||
return RET_OK
|
||||
|
||||
proc waku_default_pubsub_topic(ctx: ptr Context,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer): cint {.dynlib, exportc.} =
|
||||
proc waku_default_pubsub_topic(
|
||||
ctx: ptr Context, callback: WakuCallBack, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
# https://rfc.vac.dev/spec/36/#extern-char-waku_default_pubsub_topic
|
||||
|
||||
ctx[].userData = userData
|
||||
@ -191,18 +196,23 @@ proc waku_default_pubsub_topic(ctx: ptr Context,
|
||||
if isNil(callback):
|
||||
return RET_MISSING_CALLBACK
|
||||
|
||||
callback(RET_OK, cast[ptr cchar](DefaultPubsubTopic), cast[csize_t](len(DefaultPubsubTopic)), userData)
|
||||
callback(
|
||||
RET_OK,
|
||||
cast[ptr cchar](DefaultPubsubTopic),
|
||||
cast[csize_t](len(DefaultPubsubTopic)),
|
||||
userData,
|
||||
)
|
||||
|
||||
return RET_OK
|
||||
|
||||
proc waku_relay_publish(ctx: ptr Context,
|
||||
pubSubTopic: cstring,
|
||||
jsonWakuMessage: cstring,
|
||||
timeoutMs: cuint,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer): cint
|
||||
|
||||
{.dynlib, exportc, cdecl.} =
|
||||
proc waku_relay_publish(
|
||||
ctx: ptr Context,
|
||||
pubSubTopic: cstring,
|
||||
jsonWakuMessage: cstring,
|
||||
timeoutMs: cuint,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer,
|
||||
): cint {.dynlib, exportc, cdecl.} =
|
||||
# https://rfc.vac.dev/spec/36/#extern-char-waku_relay_publishchar-messagejson-char-pubsubtopic-int-timeoutms
|
||||
|
||||
ctx[].userData = userData
|
||||
@ -211,7 +221,7 @@ proc waku_relay_publish(ctx: ptr Context,
|
||||
return RET_MISSING_CALLBACK
|
||||
|
||||
let jwm = jsonWakuMessage.alloc()
|
||||
var jsonMessage:JsonMessage
|
||||
var jsonMessage: JsonMessage
|
||||
try:
|
||||
let jsonContent = parseJson($jwm)
|
||||
jsonMessage = JsonMessage.fromJsonNode(jsonContent)
|
||||
@ -230,18 +240,22 @@ proc waku_relay_publish(ctx: ptr Context,
|
||||
|
||||
let pst = pubSubTopic.alloc()
|
||||
|
||||
let targetPubSubTopic = if len(pst) == 0:
|
||||
DefaultPubsubTopic
|
||||
else:
|
||||
$pst
|
||||
let targetPubSubTopic =
|
||||
if len(pst) == 0:
|
||||
DefaultPubsubTopic
|
||||
else:
|
||||
$pst
|
||||
|
||||
let sendReqRes = waku_thread.sendRequestToWakuThread(
|
||||
ctx,
|
||||
RequestType.RELAY,
|
||||
RelayRequest.createShared(RelayMsgType.PUBLISH,
|
||||
PubsubTopic($pst),
|
||||
WakuRelayHandler(relayEventCallback(ctx)),
|
||||
wakuMessage))
|
||||
ctx,
|
||||
RequestType.RELAY,
|
||||
RelayRequest.createShared(
|
||||
RelayMsgType.PUBLISH,
|
||||
PubsubTopic($pst),
|
||||
WakuRelayHandler(relayEventCallback(ctx)),
|
||||
wakuMessage,
|
||||
),
|
||||
)
|
||||
deallocShared(pst)
|
||||
|
||||
if sendReqRes.isErr():
|
||||
@ -253,46 +267,42 @@ proc waku_relay_publish(ctx: ptr Context,
|
||||
callback(RET_OK, unsafeAddr msgHash[0], cast[csize_t](len(msgHash)), userData)
|
||||
return RET_OK
|
||||
|
||||
proc waku_start(ctx: ptr Context,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer): cint {.dynlib, exportc.} =
|
||||
|
||||
proc waku_start(
|
||||
ctx: ptr Context, callback: WakuCallBack, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
ctx[].userData = userData
|
||||
## TODO: handle the error
|
||||
discard waku_thread.sendRequestToWakuThread(
|
||||
ctx,
|
||||
RequestType.LIFECYCLE,
|
||||
NodeLifecycleRequest.createShared(
|
||||
NodeLifecycleMsgType.START_NODE))
|
||||
ctx,
|
||||
RequestType.LIFECYCLE,
|
||||
NodeLifecycleRequest.createShared(NodeLifecycleMsgType.START_NODE),
|
||||
)
|
||||
|
||||
proc waku_stop(ctx: ptr Context,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer): cint {.dynlib, exportc.} =
|
||||
proc waku_stop(
|
||||
ctx: ptr Context, callback: WakuCallBack, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
ctx[].userData = userData
|
||||
## TODO: handle the error
|
||||
discard waku_thread.sendRequestToWakuThread(
|
||||
ctx,
|
||||
RequestType.LIFECYCLE,
|
||||
NodeLifecycleRequest.createShared(
|
||||
NodeLifecycleMsgType.STOP_NODE))
|
||||
ctx,
|
||||
RequestType.LIFECYCLE,
|
||||
NodeLifecycleRequest.createShared(NodeLifecycleMsgType.STOP_NODE),
|
||||
)
|
||||
|
||||
proc waku_relay_subscribe(
|
||||
ctx: ptr Context,
|
||||
pubSubTopic: cstring,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer): cint
|
||||
{.dynlib, exportc.} =
|
||||
|
||||
ctx: ptr Context, pubSubTopic: cstring, callback: WakuCallBack, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
ctx[].userData = userData
|
||||
|
||||
let pst = pubSubTopic.alloc()
|
||||
var cb = relayEventCallback(ctx)
|
||||
let sendReqRes = waku_thread.sendRequestToWakuThread(
|
||||
ctx,
|
||||
RequestType.RELAY,
|
||||
RelayRequest.createShared(RelayMsgType.SUBSCRIBE,
|
||||
PubsubTopic($pst),
|
||||
WakuRelayHandler(cb)))
|
||||
ctx,
|
||||
RequestType.RELAY,
|
||||
RelayRequest.createShared(
|
||||
RelayMsgType.SUBSCRIBE, PubsubTopic($pst), WakuRelayHandler(cb)
|
||||
),
|
||||
)
|
||||
deallocShared(pst)
|
||||
|
||||
if sendReqRes.isErr():
|
||||
@ -303,22 +313,21 @@ proc waku_relay_subscribe(
|
||||
return RET_OK
|
||||
|
||||
proc waku_relay_unsubscribe(
|
||||
ctx: ptr Context,
|
||||
pubSubTopic: cstring,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer): cint
|
||||
{.dynlib, exportc.} =
|
||||
|
||||
ctx: ptr Context, pubSubTopic: cstring, callback: WakuCallBack, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
ctx[].userData = userData
|
||||
|
||||
let pst = pubSubTopic.alloc()
|
||||
|
||||
let sendReqRes = waku_thread.sendRequestToWakuThread(
|
||||
ctx,
|
||||
RequestType.RELAY,
|
||||
RelayRequest.createShared(RelayMsgType.SUBSCRIBE,
|
||||
PubsubTopic($pst),
|
||||
WakuRelayHandler(relayEventCallback(ctx))))
|
||||
ctx,
|
||||
RequestType.RELAY,
|
||||
RelayRequest.createShared(
|
||||
RelayMsgType.SUBSCRIBE,
|
||||
PubsubTopic($pst),
|
||||
WakuRelayHandler(relayEventCallback(ctx)),
|
||||
),
|
||||
)
|
||||
deallocShared(pst)
|
||||
|
||||
if sendReqRes.isErr():
|
||||
@ -328,22 +337,22 @@ proc waku_relay_unsubscribe(
|
||||
|
||||
return RET_OK
|
||||
|
||||
proc waku_connect(ctx: ptr Context,
|
||||
peerMultiAddr: cstring,
|
||||
timeoutMs: cuint,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer): cint
|
||||
{.dynlib, exportc.} =
|
||||
|
||||
proc waku_connect(
|
||||
ctx: ptr Context,
|
||||
peerMultiAddr: cstring,
|
||||
timeoutMs: cuint,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer,
|
||||
): cint {.dynlib, exportc.} =
|
||||
ctx[].userData = userData
|
||||
|
||||
let connRes = waku_thread.sendRequestToWakuThread(
|
||||
ctx,
|
||||
RequestType.PEER_MANAGER,
|
||||
PeerManagementRequest.createShared(
|
||||
PeerManagementMsgType.CONNECT_TO,
|
||||
$peerMultiAddr,
|
||||
chronos.milliseconds(timeoutMs)))
|
||||
ctx,
|
||||
RequestType.PEER_MANAGER,
|
||||
PeerManagementRequest.createShared(
|
||||
PeerManagementMsgType.CONNECT_TO, $peerMultiAddr, chronos.milliseconds(timeoutMs)
|
||||
),
|
||||
)
|
||||
if connRes.isErr():
|
||||
let msg = $connRes.error
|
||||
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
|
||||
@ -351,14 +360,14 @@ proc waku_connect(ctx: ptr Context,
|
||||
|
||||
return RET_OK
|
||||
|
||||
proc waku_store_query(ctx: ptr Context,
|
||||
queryJson: cstring,
|
||||
peerId: cstring,
|
||||
timeoutMs: cint,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer): cint
|
||||
{.dynlib, exportc.} =
|
||||
|
||||
proc waku_store_query(
|
||||
ctx: ptr Context,
|
||||
queryJson: cstring,
|
||||
peerId: cstring,
|
||||
timeoutMs: cint,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer,
|
||||
): cint {.dynlib, exportc.} =
|
||||
ctx[].userData = userData
|
||||
|
||||
## TODO: implement the logic that make the "self" node to act as a Store client
|
||||
@ -370,18 +379,16 @@ proc waku_store_query(ctx: ptr Context,
|
||||
|
||||
return RET_OK
|
||||
|
||||
proc waku_listen_addresses(ctx: ptr Context,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer): cint
|
||||
{.dynlib, exportc.} =
|
||||
|
||||
proc waku_listen_addresses(
|
||||
ctx: ptr Context, callback: WakuCallBack, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
ctx[].userData = userData
|
||||
|
||||
let connRes = waku_thread.sendRequestToWakuThread(
|
||||
ctx,
|
||||
RequestType.DEBUG,
|
||||
DebugNodeRequest.createShared(
|
||||
DebugNodeMsgType.RETRIEVE_LISTENING_ADDRESSES))
|
||||
ctx,
|
||||
RequestType.DEBUG,
|
||||
DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_LISTENING_ADDRESSES),
|
||||
)
|
||||
if connRes.isErr():
|
||||
let msg = $connRes.error
|
||||
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
|
||||
|
||||
@ -1,7 +1,4 @@
|
||||
|
||||
|
||||
import
|
||||
std/[json,strformat,options]
|
||||
import std/[json, strformat, options]
|
||||
import
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/crypto/secp,
|
||||
@ -12,10 +9,9 @@ import
|
||||
../../waku/node/config,
|
||||
../events/json_base_event
|
||||
|
||||
proc parsePrivateKey(jsonNode: JsonNode,
|
||||
privateKey: var PrivateKey,
|
||||
errorResp: var string): bool =
|
||||
|
||||
proc parsePrivateKey(
|
||||
jsonNode: JsonNode, privateKey: var PrivateKey, errorResp: var string
|
||||
): bool =
|
||||
if not jsonNode.contains("key") or jsonNode["key"].kind == JsonNodeKind.JNull:
|
||||
privateKey = PrivateKey.random(Secp256k1, newRng()[]).tryGet()
|
||||
return true
|
||||
@ -36,10 +32,9 @@ proc parsePrivateKey(jsonNode: JsonNode,
|
||||
|
||||
return true
|
||||
|
||||
proc parseListenAddr(jsonNode: JsonNode,
|
||||
listenAddr: var IpAddress,
|
||||
errorResp: var string): bool =
|
||||
|
||||
proc parseListenAddr(
|
||||
jsonNode: JsonNode, listenAddr: var IpAddress, errorResp: var string
|
||||
): bool =
|
||||
if not jsonNode.contains("host"):
|
||||
errorResp = "host attribute is required"
|
||||
return false
|
||||
@ -59,10 +54,7 @@ proc parseListenAddr(jsonNode: JsonNode,
|
||||
|
||||
return true
|
||||
|
||||
proc parsePort(jsonNode: JsonNode,
|
||||
port: var int,
|
||||
errorResp: var string): bool =
|
||||
|
||||
proc parsePort(jsonNode: JsonNode, port: var int, errorResp: var string): bool =
|
||||
if not jsonNode.contains("port"):
|
||||
errorResp = "port attribute is required"
|
||||
return false
|
||||
@ -75,10 +67,7 @@ proc parsePort(jsonNode: JsonNode,
|
||||
|
||||
return true
|
||||
|
||||
proc parseRelay(jsonNode: JsonNode,
|
||||
relay: var bool,
|
||||
errorResp: var string): bool =
|
||||
|
||||
proc parseRelay(jsonNode: JsonNode, relay: var bool, errorResp: var string): bool =
|
||||
if not jsonNode.contains("relay"):
|
||||
errorResp = "relay attribute is required"
|
||||
return false
|
||||
@ -91,16 +80,17 @@ proc parseRelay(jsonNode: JsonNode,
|
||||
|
||||
return true
|
||||
|
||||
proc parseStore(jsonNode: JsonNode,
|
||||
store: var bool,
|
||||
storeNode: var string,
|
||||
storeRetentionPolicy: var string,
|
||||
storeDbUrl: var string,
|
||||
storeVacuum: var bool,
|
||||
storeDbMigration: var bool,
|
||||
storeMaxNumDbConnections: var int,
|
||||
errorResp: var string): bool =
|
||||
|
||||
proc parseStore(
|
||||
jsonNode: JsonNode,
|
||||
store: var bool,
|
||||
storeNode: var string,
|
||||
storeRetentionPolicy: var string,
|
||||
storeDbUrl: var string,
|
||||
storeVacuum: var bool,
|
||||
storeDbMigration: var bool,
|
||||
storeMaxNumDbConnections: var int,
|
||||
errorResp: var string,
|
||||
): bool =
|
||||
if not jsonNode.contains("store"):
|
||||
## the store parameter is not required. By default is is disabled
|
||||
store = false
|
||||
@ -163,20 +153,21 @@ proc parseTopics(jsonNode: JsonNode, topics: var seq[string]) =
|
||||
else:
|
||||
topics = @["/waku/2/default-waku/proto"]
|
||||
|
||||
proc parseConfig*(configNodeJson: string,
|
||||
privateKey: var PrivateKey,
|
||||
netConfig: var NetConfig,
|
||||
relay: var bool,
|
||||
topics: var seq[string],
|
||||
store: var bool,
|
||||
storeNode: var string,
|
||||
storeRetentionPolicy: var string,
|
||||
storeDbUrl: var string,
|
||||
storeVacuum: var bool,
|
||||
storeDbMigration: var bool,
|
||||
storeMaxNumDbConnections: var int,
|
||||
errorResp: var string): bool {.raises: [].} =
|
||||
|
||||
proc parseConfig*(
|
||||
configNodeJson: string,
|
||||
privateKey: var PrivateKey,
|
||||
netConfig: var NetConfig,
|
||||
relay: var bool,
|
||||
topics: var seq[string],
|
||||
store: var bool,
|
||||
storeNode: var string,
|
||||
storeRetentionPolicy: var string,
|
||||
storeDbUrl: var string,
|
||||
storeVacuum: var bool,
|
||||
storeDbMigration: var bool,
|
||||
storeMaxNumDbConnections: var int,
|
||||
errorResp: var string,
|
||||
): bool {.raises: [].} =
|
||||
if configNodeJson.len == 0:
|
||||
errorResp = "The configNodeJson is empty"
|
||||
return false
|
||||
@ -215,19 +206,18 @@ proc parseConfig*(configNodeJson: string,
|
||||
errorResp = "Exception calling parsePort: " & getCurrentExceptionMsg()
|
||||
return false
|
||||
|
||||
let natRes = setupNat("any", clientId,
|
||||
Port(uint16(port)),
|
||||
Port(uint16(port)))
|
||||
let natRes = setupNat("any", clientId, Port(uint16(port)), Port(uint16(port)))
|
||||
if natRes.isErr():
|
||||
errorResp = "failed to setup NAT: " & $natRes.error
|
||||
return false
|
||||
|
||||
let (extIp, extTcpPort, _) = natRes.get()
|
||||
|
||||
let extPort = if extIp.isSome() and extTcpPort.isNone():
|
||||
some(Port(uint16(port)))
|
||||
else:
|
||||
extTcpPort
|
||||
let extPort =
|
||||
if extIp.isSome() and extTcpPort.isNone():
|
||||
some(Port(uint16(port)))
|
||||
else:
|
||||
extTcpPort
|
||||
|
||||
# relay
|
||||
try:
|
||||
@ -246,26 +236,26 @@ proc parseConfig*(configNodeJson: string,
|
||||
|
||||
# store
|
||||
try:
|
||||
if not parseStore(jsonNode, store, storeNode, storeRetentionPolicy, storeDbUrl,
|
||||
storeVacuum, storeDbMigration, storeMaxNumDbConnections, errorResp):
|
||||
if not parseStore(
|
||||
jsonNode, store, storeNode, storeRetentionPolicy, storeDbUrl, storeVacuum,
|
||||
storeDbMigration, storeMaxNumDbConnections, errorResp,
|
||||
):
|
||||
return false
|
||||
except Exception, KeyError:
|
||||
errorResp = "Exception calling parseStore: " & getCurrentExceptionMsg()
|
||||
return false
|
||||
|
||||
let wakuFlags = CapabilitiesBitfield.init(
|
||||
lightpush = false,
|
||||
filter = false,
|
||||
store = false,
|
||||
relay = relay
|
||||
)
|
||||
lightpush = false, filter = false, store = false, relay = relay
|
||||
)
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = listenAddr,
|
||||
bindPort = Port(uint16(port)),
|
||||
extIp = extIp,
|
||||
extPort = extPort,
|
||||
wakuFlags = some(wakuFlags))
|
||||
bindIp = listenAddr,
|
||||
bindPort = Port(uint16(port)),
|
||||
extIp = extIp,
|
||||
extPort = extPort,
|
||||
wakuFlags = some(wakuFlags),
|
||||
)
|
||||
|
||||
if netConfigRes.isErr():
|
||||
errorResp = "Error creating NetConfig: " & $netConfigRes.error
|
||||
|
||||
@ -1,26 +1,14 @@
|
||||
import std/[options, sequtils, strutils, json]
|
||||
import chronicles, chronos, stew/results, stew/shims/net
|
||||
import ../../../../waku/node/waku_node, ../../../alloc
|
||||
|
||||
import
|
||||
std/[options,sequtils,strutils,json]
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
stew/results,
|
||||
stew/shims/net
|
||||
import
|
||||
../../../../waku/node/waku_node,
|
||||
../../../alloc
|
||||
type DebugNodeMsgType* = enum
|
||||
RETRIEVE_LISTENING_ADDRESSES
|
||||
|
||||
type
|
||||
DebugNodeMsgType* = enum
|
||||
RETRIEVE_LISTENING_ADDRESSES
|
||||
|
||||
type
|
||||
DebugNodeRequest* = object
|
||||
operation: DebugNodeMsgType
|
||||
|
||||
proc createShared*(T: type DebugNodeRequest,
|
||||
op: DebugNodeMsgType): ptr type T =
|
||||
type DebugNodeRequest* = object
|
||||
operation: DebugNodeMsgType
|
||||
|
||||
proc createShared*(T: type DebugNodeRequest, op: DebugNodeMsgType): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].operation = op
|
||||
return ret
|
||||
@ -31,14 +19,14 @@ proc destroyShared(self: ptr DebugNodeRequest) =
|
||||
proc getMultiaddresses(node: WakuNode): seq[string] =
|
||||
return node.info().listenAddresses
|
||||
|
||||
proc process*(self: ptr DebugNodeRequest,
|
||||
node: WakuNode): Future[Result[string, string]] {.async.} =
|
||||
proc process*(
|
||||
self: ptr DebugNodeRequest, node: WakuNode
|
||||
): Future[Result[string, string]] {.async.} =
|
||||
defer:
|
||||
destroyShared(self)
|
||||
|
||||
defer: destroyShared(self)
|
||||
|
||||
case self.operation:
|
||||
of RETRIEVE_LISTENING_ADDRESSES:
|
||||
return ok($( %* node.getMultiaddresses()))
|
||||
case self.operation
|
||||
of RETRIEVE_LISTENING_ADDRESSES:
|
||||
return ok($(%*node.getMultiaddresses()))
|
||||
|
||||
return err("unsupported operation in DebugNodeRequest")
|
||||
|
||||
|
||||
@ -1,11 +1,5 @@
|
||||
|
||||
import
|
||||
std/options
|
||||
import
|
||||
chronos,
|
||||
chronicles,
|
||||
stew/results,
|
||||
stew/shims/net
|
||||
import std/options
|
||||
import chronos, chronicles, stew/results, stew/shims/net
|
||||
import
|
||||
../../../../waku/common/enr/builder,
|
||||
../../../../waku/waku_enr/capabilities,
|
||||
@ -24,25 +18,22 @@ import
|
||||
../../../../waku/waku_relay/protocol,
|
||||
../../../../waku/waku_store,
|
||||
../../../../waku/factory/builder,
|
||||
../../../events/[json_message_event,json_base_event],
|
||||
../../../events/[json_message_event, json_base_event],
|
||||
../../../alloc,
|
||||
../../config
|
||||
|
||||
type
|
||||
NodeLifecycleMsgType* = enum
|
||||
CREATE_NODE
|
||||
START_NODE
|
||||
STOP_NODE
|
||||
type NodeLifecycleMsgType* = enum
|
||||
CREATE_NODE
|
||||
START_NODE
|
||||
STOP_NODE
|
||||
|
||||
type
|
||||
NodeLifecycleRequest* = object
|
||||
operation: NodeLifecycleMsgType
|
||||
configJson: cstring ## Only used in 'CREATE_NODE' operation
|
||||
|
||||
proc createShared*(T: type NodeLifecycleRequest,
|
||||
op: NodeLifecycleMsgType,
|
||||
configJson: cstring = ""): ptr type T =
|
||||
type NodeLifecycleRequest* = object
|
||||
operation: NodeLifecycleMsgType
|
||||
configJson: cstring ## Only used in 'CREATE_NODE' operation
|
||||
|
||||
proc createShared*(
|
||||
T: type NodeLifecycleRequest, op: NodeLifecycleMsgType, configJson: cstring = ""
|
||||
): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].operation = op
|
||||
ret[].configJson = configJson.alloc()
|
||||
@ -52,14 +43,15 @@ proc destroyShared(self: ptr NodeLifecycleRequest) =
|
||||
deallocShared(self[].configJson)
|
||||
deallocShared(self)
|
||||
|
||||
proc configureStore(node: WakuNode,
|
||||
storeNode: string,
|
||||
storeRetentionPolicy: string,
|
||||
storeDbUrl: string,
|
||||
storeVacuum: bool,
|
||||
storeDbMigration: bool,
|
||||
storeMaxNumDbConnections: int):
|
||||
Future[Result[void, string]] {.async.} =
|
||||
proc configureStore(
|
||||
node: WakuNode,
|
||||
storeNode: string,
|
||||
storeRetentionPolicy: string,
|
||||
storeDbUrl: string,
|
||||
storeVacuum: bool,
|
||||
storeDbMigration: bool,
|
||||
storeMaxNumDbConnections: int,
|
||||
): Future[Result[void, string]] {.async.} =
|
||||
## This snippet is extracted/duplicated from the app.nim file
|
||||
|
||||
var onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} =
|
||||
@ -70,11 +62,10 @@ proc configureStore(node: WakuNode,
|
||||
discard
|
||||
|
||||
# Archive setup
|
||||
let archiveDriverRes = await ArchiveDriver.new(storeDbUrl,
|
||||
storeVacuum,
|
||||
storeDbMigration,
|
||||
storeMaxNumDbConnections,
|
||||
onFatalErrorAction)
|
||||
let archiveDriverRes = await ArchiveDriver.new(
|
||||
storeDbUrl, storeVacuum, storeDbMigration, storeMaxNumDbConnections,
|
||||
onFatalErrorAction,
|
||||
)
|
||||
if archiveDriverRes.isErr():
|
||||
return err("failed to setup archive driver: " & archiveDriverRes.error)
|
||||
|
||||
@ -82,8 +73,7 @@ proc configureStore(node: WakuNode,
|
||||
if retPolicyRes.isErr():
|
||||
return err("failed to create retention policy: " & retPolicyRes.error)
|
||||
|
||||
let mountArcRes = node.mountArchive(archiveDriverRes.get(),
|
||||
retPolicyRes.get())
|
||||
let mountArcRes = node.mountArchive(archiveDriverRes.get(), retPolicyRes.get())
|
||||
if mountArcRes.isErr():
|
||||
return err("failed to mount waku archive protocol: " & mountArcRes.error)
|
||||
|
||||
@ -103,12 +93,9 @@ proc configureStore(node: WakuNode,
|
||||
|
||||
return ok()
|
||||
|
||||
proc createNode(configJson: cstring):
|
||||
Future[Result[WakuNode, string]] {.async.} =
|
||||
|
||||
proc createNode(configJson: cstring): Future[Result[WakuNode, string]] {.async.} =
|
||||
var privateKey: PrivateKey
|
||||
var netConfig = NetConfig.init(parseIpAddress("127.0.0.1"),
|
||||
Port(60000'u16)).value
|
||||
var netConfig = NetConfig.init(parseIpAddress("127.0.0.1"), Port(60000'u16)).value
|
||||
## relay
|
||||
var relay: bool
|
||||
var topics = @[""]
|
||||
@ -125,19 +112,21 @@ proc createNode(configJson: cstring):
|
||||
var errorResp: string
|
||||
|
||||
try:
|
||||
if not parseConfig($configJson,
|
||||
privateKey,
|
||||
netConfig,
|
||||
relay,
|
||||
topics,
|
||||
store,
|
||||
storeNode,
|
||||
storeRetentionPolicy,
|
||||
storeDbUrl,
|
||||
storeVacuum,
|
||||
storeDbMigration,
|
||||
storeMaxNumDbConnections,
|
||||
errorResp):
|
||||
if not parseConfig(
|
||||
$configJson,
|
||||
privateKey,
|
||||
netConfig,
|
||||
relay,
|
||||
topics,
|
||||
store,
|
||||
storeNode,
|
||||
storeRetentionPolicy,
|
||||
storeDbUrl,
|
||||
storeVacuum,
|
||||
storeDbMigration,
|
||||
storeMaxNumDbConnections,
|
||||
errorResp,
|
||||
):
|
||||
return err(errorResp)
|
||||
except Exception:
|
||||
return err("exception calling parseConfig: " & getCurrentExceptionMsg())
|
||||
@ -145,9 +134,7 @@ proc createNode(configJson: cstring):
|
||||
var enrBuilder = EnrBuilder.init(privateKey)
|
||||
|
||||
enrBuilder.withIpAddressAndPorts(
|
||||
netConfig.enrIp,
|
||||
netConfig.enrPort,
|
||||
netConfig.discv5UdpPort
|
||||
netConfig.enrIp, netConfig.enrPort, netConfig.discv5UdpPort
|
||||
)
|
||||
|
||||
if netConfig.wakuFlags.isSome():
|
||||
@ -165,8 +152,8 @@ proc createNode(configJson: cstring):
|
||||
if recordRes.isErr():
|
||||
let msg = "Error building enr record: " & $recordRes.error
|
||||
return err(msg)
|
||||
|
||||
else: recordRes.get()
|
||||
else:
|
||||
recordRes.get()
|
||||
|
||||
## TODO: make the next const configurable from 'configJson'.
|
||||
const MAX_CONNECTIONS = 50.int
|
||||
@ -176,9 +163,7 @@ proc createNode(configJson: cstring):
|
||||
builder.withNodeKey(privateKey)
|
||||
builder.withRecord(record)
|
||||
builder.withNetworkConfiguration(netConfig)
|
||||
builder.withSwitchConfiguration(
|
||||
maxConnections = some(MAX_CONNECTIONS)
|
||||
)
|
||||
builder.withSwitchConfiguration(maxConnections = some(MAX_CONNECTIONS))
|
||||
|
||||
let wakuNodeRes = builder.build()
|
||||
if wakuNodeRes.isErr():
|
||||
@ -192,36 +177,35 @@ proc createNode(configJson: cstring):
|
||||
newNode.peerManager.start()
|
||||
|
||||
if store:
|
||||
(await newNode.configureStore(storeNode,
|
||||
storeRetentionPolicy,
|
||||
storeDbUrl,
|
||||
storeVacuum,
|
||||
storeDbMigration,
|
||||
storeMaxNumDbConnections)).isOkOr:
|
||||
(
|
||||
await newNode.configureStore(
|
||||
storeNode, storeRetentionPolicy, storeDbUrl, storeVacuum, storeDbMigration,
|
||||
storeMaxNumDbConnections,
|
||||
)
|
||||
).isOkOr:
|
||||
return err("error configuring store: " & $error)
|
||||
|
||||
return ok(newNode)
|
||||
|
||||
proc process*(self: ptr NodeLifecycleRequest,
|
||||
node: ptr WakuNode): Future[Result[string, string]] {.async.} =
|
||||
proc process*(
|
||||
self: ptr NodeLifecycleRequest, node: ptr WakuNode
|
||||
): Future[Result[string, string]] {.async.} =
|
||||
defer:
|
||||
destroyShared(self)
|
||||
|
||||
defer: destroyShared(self)
|
||||
case self.operation
|
||||
of CREATE_NODE:
|
||||
let newNodeRes = await createNode(self.configJson)
|
||||
if newNodeRes.isErr():
|
||||
return err(newNodeRes.error)
|
||||
|
||||
case self.operation:
|
||||
of CREATE_NODE:
|
||||
let newNodeRes = await createNode(self.configJson)
|
||||
if newNodeRes.isErr():
|
||||
return err(newNodeRes.error)
|
||||
|
||||
node[] = newNodeRes.get()
|
||||
|
||||
of START_NODE:
|
||||
await node[].start()
|
||||
|
||||
of STOP_NODE:
|
||||
try:
|
||||
await node[].stop()
|
||||
except Exception:
|
||||
return err("exception stopping node: " & getCurrentExceptionMsg())
|
||||
node[] = newNodeRes.get()
|
||||
of START_NODE:
|
||||
await node[].start()
|
||||
of STOP_NODE:
|
||||
try:
|
||||
await node[].stop()
|
||||
except Exception:
|
||||
return err("exception stopping node: " & getCurrentExceptionMsg())
|
||||
|
||||
return ok("")
|
||||
|
||||
@ -1,30 +1,21 @@
|
||||
import std/[options, sequtils, strutils]
|
||||
import chronicles, chronos, stew/results, stew/shims/net
|
||||
import ../../../../waku/node/waku_node, ../../../alloc
|
||||
|
||||
import
|
||||
std/[options,sequtils,strutils]
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
stew/results,
|
||||
stew/shims/net
|
||||
import
|
||||
../../../../waku/node/waku_node,
|
||||
../../../alloc
|
||||
type PeerManagementMsgType* = enum
|
||||
CONNECT_TO
|
||||
|
||||
type
|
||||
PeerManagementMsgType* = enum
|
||||
CONNECT_TO
|
||||
|
||||
type
|
||||
PeerManagementRequest* = object
|
||||
operation: PeerManagementMsgType
|
||||
peerMultiAddr: cstring
|
||||
dialTimeout: Duration
|
||||
|
||||
proc createShared*(T: type PeerManagementRequest,
|
||||
op: PeerManagementMsgType,
|
||||
peerMultiAddr: string,
|
||||
dialTimeout: Duration): ptr type T =
|
||||
type PeerManagementRequest* = object
|
||||
operation: PeerManagementMsgType
|
||||
peerMultiAddr: cstring
|
||||
dialTimeout: Duration
|
||||
|
||||
proc createShared*(
|
||||
T: type PeerManagementRequest,
|
||||
op: PeerManagementMsgType,
|
||||
peerMultiAddr: string,
|
||||
dialTimeout: Duration,
|
||||
): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].operation = op
|
||||
ret[].peerMultiAddr = peerMultiAddr.alloc()
|
||||
@ -35,14 +26,13 @@ proc destroyShared(self: ptr PeerManagementRequest) =
|
||||
deallocShared(self[].peerMultiAddr)
|
||||
deallocShared(self)
|
||||
|
||||
proc connectTo(node: WakuNode,
|
||||
peerMultiAddr: string,
|
||||
dialTimeout: Duration): Result[void, string] =
|
||||
|
||||
proc connectTo(
|
||||
node: WakuNode, peerMultiAddr: string, dialTimeout: Duration
|
||||
): Result[void, string] =
|
||||
let peers = (peerMultiAddr).split(",").mapIt(strip(it))
|
||||
|
||||
# TODO: the dialTimeout is not being used at all!
|
||||
let connectFut = node.connectToNodes(peers, source="static")
|
||||
let connectFut = node.connectToNodes(peers, source = "static")
|
||||
while not connectFut.finished():
|
||||
poll()
|
||||
|
||||
@ -52,16 +42,16 @@ proc connectTo(node: WakuNode,
|
||||
|
||||
return ok()
|
||||
|
||||
proc process*(self: ptr PeerManagementRequest,
|
||||
node: WakuNode): Future[Result[string, string]] {.async.} =
|
||||
proc process*(
|
||||
self: ptr PeerManagementRequest, node: WakuNode
|
||||
): Future[Result[string, string]] {.async.} =
|
||||
defer:
|
||||
destroyShared(self)
|
||||
|
||||
defer: destroyShared(self)
|
||||
|
||||
case self.operation:
|
||||
|
||||
of CONNECT_TO:
|
||||
let ret = node.connectTo($self[].peerMultiAddr, self[].dialTimeout)
|
||||
if ret.isErr():
|
||||
return err(ret.error)
|
||||
case self.operation
|
||||
of CONNECT_TO:
|
||||
let ret = node.connectTo($self[].peerMultiAddr, self[].dialTimeout)
|
||||
if ret.isErr():
|
||||
return err(ret.error)
|
||||
|
||||
return ok("")
|
||||
|
||||
@ -1,12 +1,5 @@
|
||||
|
||||
import
|
||||
std/[options,sequtils,strutils]
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
stew/byteutils,
|
||||
stew/results,
|
||||
stew/shims/net
|
||||
import std/[options, sequtils, strutils]
|
||||
import chronicles, chronos, stew/byteutils, stew/results, stew/shims/net
|
||||
import
|
||||
../../../../../waku/waku_core/message/message,
|
||||
../../../../../waku/node/waku_node,
|
||||
@ -16,48 +9,46 @@ import
|
||||
../../../../../waku/waku_relay/protocol,
|
||||
../../../../alloc
|
||||
|
||||
type
|
||||
RelayMsgType* = enum
|
||||
SUBSCRIBE
|
||||
UNSUBSCRIBE
|
||||
PUBLISH
|
||||
type RelayMsgType* = enum
|
||||
SUBSCRIBE
|
||||
UNSUBSCRIBE
|
||||
PUBLISH
|
||||
|
||||
type
|
||||
ThreadSafeWakuMessage* = object
|
||||
payload: SharedSeq[byte]
|
||||
contentTopic: cstring
|
||||
meta: SharedSeq[byte]
|
||||
version: uint32
|
||||
timestamp: Timestamp
|
||||
ephemeral: bool
|
||||
when defined(rln):
|
||||
proof: SharedSeq[byte]
|
||||
type ThreadSafeWakuMessage* = object
|
||||
payload: SharedSeq[byte]
|
||||
contentTopic: cstring
|
||||
meta: SharedSeq[byte]
|
||||
version: uint32
|
||||
timestamp: Timestamp
|
||||
ephemeral: bool
|
||||
when defined(rln):
|
||||
proof: SharedSeq[byte]
|
||||
|
||||
type
|
||||
RelayRequest* = object
|
||||
operation: RelayMsgType
|
||||
pubsubTopic: cstring
|
||||
relayEventCallback: WakuRelayHandler # not used in 'PUBLISH' requests
|
||||
message: ThreadSafeWakuMessage # only used in 'PUBLISH' requests
|
||||
|
||||
proc createShared*(T: type RelayRequest,
|
||||
op: RelayMsgType,
|
||||
pubsubTopic: PubsubTopic,
|
||||
relayEventCallback: WakuRelayHandler = nil,
|
||||
m = WakuMessage()): ptr type T =
|
||||
type RelayRequest* = object
|
||||
operation: RelayMsgType
|
||||
pubsubTopic: cstring
|
||||
relayEventCallback: WakuRelayHandler # not used in 'PUBLISH' requests
|
||||
message: ThreadSafeWakuMessage # only used in 'PUBLISH' requests
|
||||
|
||||
proc createShared*(
|
||||
T: type RelayRequest,
|
||||
op: RelayMsgType,
|
||||
pubsubTopic: PubsubTopic,
|
||||
relayEventCallback: WakuRelayHandler = nil,
|
||||
m = WakuMessage(),
|
||||
): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].operation = op
|
||||
ret[].pubsubTopic = pubsubTopic.alloc()
|
||||
ret[].relayEventCallback = relayEventCallback
|
||||
ret[].message = ThreadSafeWakuMessage(
|
||||
payload: allocSharedSeq(m.payload),
|
||||
contentTopic: m.contentTopic.alloc(),
|
||||
meta: allocSharedSeq(m.meta),
|
||||
version: m.version,
|
||||
timestamp: m.timestamp,
|
||||
ephemeral: m.ephemeral,
|
||||
)
|
||||
payload: allocSharedSeq(m.payload),
|
||||
contentTopic: m.contentTopic.alloc(),
|
||||
meta: allocSharedSeq(m.meta),
|
||||
version: m.version,
|
||||
timestamp: m.timestamp,
|
||||
ephemeral: m.ephemeral,
|
||||
)
|
||||
when defined(rln):
|
||||
ret[].message.proof = allocSharedSeq(m.proof)
|
||||
|
||||
@ -87,35 +78,31 @@ proc toWakuMessage(m: ThreadSafeWakuMessage): WakuMessage =
|
||||
|
||||
return wakuMessage
|
||||
|
||||
proc process*(self: ptr RelayRequest,
|
||||
node: ptr WakuNode): Future[Result[string, string]] {.async.} =
|
||||
|
||||
defer: destroyShared(self)
|
||||
proc process*(
|
||||
self: ptr RelayRequest, node: ptr WakuNode
|
||||
): Future[Result[string, string]] {.async.} =
|
||||
defer:
|
||||
destroyShared(self)
|
||||
|
||||
if node.wakuRelay.isNil():
|
||||
return err("Operation not supported without Waku Relay enabled.")
|
||||
|
||||
case self.operation:
|
||||
case self.operation
|
||||
of SUBSCRIBE:
|
||||
# TO DO: properly perform 'subscribe'
|
||||
discard node.wakuRelay.subscribe($self.pubsubTopic, self.relayEventCallback)
|
||||
of UNSUBSCRIBE:
|
||||
# TODO: properly perform 'unsubscribe'
|
||||
node.wakuRelay.unsubscribeAll($self.pubsubTopic)
|
||||
of PUBLISH:
|
||||
let msg = self.message.toWakuMessage()
|
||||
let pubsubTopic = $self.pubsubTopic
|
||||
|
||||
of SUBSCRIBE:
|
||||
# TO DO: properly perform 'subscribe'
|
||||
discard node.wakuRelay.subscribe($self.pubsubTopic, self.relayEventCallback)
|
||||
|
||||
of UNSUBSCRIBE:
|
||||
# TODO: properly perform 'unsubscribe'
|
||||
node.wakuRelay.unsubscribeAll($self.pubsubTopic)
|
||||
|
||||
of PUBLISH:
|
||||
let msg = self.message.toWakuMessage()
|
||||
let pubsubTopic = $self.pubsubTopic
|
||||
|
||||
let numPeers = await node.wakuRelay.publish(pubsubTopic,
|
||||
msg)
|
||||
if numPeers == 0:
|
||||
return err("Message not sent because no peers found.")
|
||||
|
||||
elif numPeers > 0:
|
||||
let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex
|
||||
return ok(msgHash)
|
||||
let numPeers = await node.wakuRelay.publish(pubsubTopic, msg)
|
||||
if numPeers == 0:
|
||||
return err("Message not sent because no peers found.")
|
||||
elif numPeers > 0:
|
||||
let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex
|
||||
return ok(msgHash)
|
||||
|
||||
return ok("")
|
||||
|
||||
@ -1,10 +1,5 @@
|
||||
|
||||
import
|
||||
std/[options,sequtils,strutils]
|
||||
import
|
||||
chronos,
|
||||
stew/results,
|
||||
stew/shims/net
|
||||
import std/[options, sequtils, strutils]
|
||||
import chronos, stew/results, stew/shims/net
|
||||
import
|
||||
../../../../../waku/node/waku_node,
|
||||
../../../../../waku/waku_archive/driver/builder,
|
||||
@ -14,36 +9,34 @@ import
|
||||
../../../../alloc,
|
||||
../../../../callback
|
||||
|
||||
type
|
||||
StoreReqType* = enum
|
||||
REMOTE_QUERY ## to perform a query to another Store node
|
||||
LOCAL_QUERY ## to retrieve the data from 'self' node
|
||||
type StoreReqType* = enum
|
||||
REMOTE_QUERY ## to perform a query to another Store node
|
||||
LOCAL_QUERY ## to retrieve the data from 'self' node
|
||||
|
||||
type
|
||||
StoreQueryRequest* = object
|
||||
queryJson: cstring
|
||||
peerAddr: cstring
|
||||
timeoutMs: cint
|
||||
storeCallback: WakuCallBack
|
||||
type StoreQueryRequest* = object
|
||||
queryJson: cstring
|
||||
peerAddr: cstring
|
||||
timeoutMs: cint
|
||||
storeCallback: WakuCallBack
|
||||
|
||||
type
|
||||
StoreRequest* = object
|
||||
operation: StoreReqType
|
||||
storeReq: pointer
|
||||
type StoreRequest* = object
|
||||
operation: StoreReqType
|
||||
storeReq: pointer
|
||||
|
||||
proc createShared*(T: type StoreRequest,
|
||||
operation: StoreReqType,
|
||||
request: pointer): ptr type T =
|
||||
proc createShared*(
|
||||
T: type StoreRequest, operation: StoreReqType, request: pointer
|
||||
): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].request = request
|
||||
return ret
|
||||
|
||||
proc createShared*(T: type StoreQueryRequest,
|
||||
queryJson: cstring,
|
||||
peerAddr: cstring,
|
||||
timeoutMs: cint,
|
||||
storeCallback: WakuCallBack = nil): ptr type T =
|
||||
|
||||
proc createShared*(
|
||||
T: type StoreQueryRequest,
|
||||
queryJson: cstring,
|
||||
peerAddr: cstring,
|
||||
timeoutMs: cint,
|
||||
storeCallback: WakuCallBack = nil,
|
||||
): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].timeoutMs = timeoutMs
|
||||
ret[].queryJson = queryJson.alloc()
|
||||
@ -56,20 +49,23 @@ proc destroyShared(self: ptr StoreQueryRequest) =
|
||||
deallocShared(self[].peerAddr)
|
||||
deallocShared(self)
|
||||
|
||||
proc process(self: ptr StoreQueryRequest,
|
||||
node: ptr WakuNode): Future[Result[string, string]] {.async.} =
|
||||
defer: destroyShared(self)
|
||||
proc process(
|
||||
self: ptr StoreQueryRequest, node: ptr WakuNode
|
||||
): Future[Result[string, string]] {.async.} =
|
||||
defer:
|
||||
destroyShared(self)
|
||||
|
||||
proc process*(self: ptr StoreRequest,
|
||||
node: ptr WakuNode): Future[Result[string, string]] {.async.} =
|
||||
proc process*(
|
||||
self: ptr StoreRequest, node: ptr WakuNode
|
||||
): Future[Result[string, string]] {.async.} =
|
||||
defer:
|
||||
deallocShared(self)
|
||||
|
||||
defer: deallocShared(self)
|
||||
|
||||
case self.operation:
|
||||
of REMOTE_QUERY:
|
||||
return await cast[ptr StoreQueryRequest](self[].storeReq).process(node)
|
||||
of LOCAL_QUERY:
|
||||
discard
|
||||
# cast[ptr StoreQueryRequest](request[].reqContent).process(node)
|
||||
case self.operation
|
||||
of REMOTE_QUERY:
|
||||
return await cast[ptr StoreQueryRequest](self[].storeReq).process(node)
|
||||
of LOCAL_QUERY:
|
||||
discard
|
||||
# cast[ptr StoreQueryRequest](request[].reqContent).process(node)
|
||||
|
||||
return ok("")
|
||||
|
||||
@ -1,13 +1,9 @@
|
||||
|
||||
## This file contains the base message request type that will be handled.
|
||||
## The requests are created by the main thread and processed by
|
||||
## the Waku Thread.
|
||||
|
||||
import
|
||||
std/json,
|
||||
stew/results
|
||||
import
|
||||
chronos
|
||||
import std/json, stew/results
|
||||
import chronos
|
||||
import
|
||||
../../../waku/node/waku_node,
|
||||
./requests/node_lifecycle_request,
|
||||
@ -16,50 +12,48 @@ import
|
||||
./requests/protocols/store_request,
|
||||
./requests/debug_node_request
|
||||
|
||||
type
|
||||
RequestType* {.pure.} = enum
|
||||
LIFECYCLE,
|
||||
PEER_MANAGER,
|
||||
RELAY,
|
||||
STORE,
|
||||
DEBUG,
|
||||
type RequestType* {.pure.} = enum
|
||||
LIFECYCLE
|
||||
PEER_MANAGER
|
||||
RELAY
|
||||
STORE
|
||||
DEBUG
|
||||
|
||||
type
|
||||
InterThreadRequest* = object
|
||||
reqType: RequestType
|
||||
reqContent: pointer
|
||||
type InterThreadRequest* = object
|
||||
reqType: RequestType
|
||||
reqContent: pointer
|
||||
|
||||
proc createShared*(T: type InterThreadRequest,
|
||||
reqType: RequestType,
|
||||
reqContent: pointer): ptr type T =
|
||||
proc createShared*(
|
||||
T: type InterThreadRequest, reqType: RequestType, reqContent: pointer
|
||||
): ptr type T =
|
||||
var ret = createShared(T)
|
||||
ret[].reqType = reqType
|
||||
ret[].reqContent = reqContent
|
||||
return ret
|
||||
|
||||
proc process*(T: type InterThreadRequest,
|
||||
request: ptr InterThreadRequest,
|
||||
node: ptr WakuNode):
|
||||
Future[Result[string, string]] {.async.} =
|
||||
proc process*(
|
||||
T: type InterThreadRequest, request: ptr InterThreadRequest, node: ptr WakuNode
|
||||
): Future[Result[string, string]] {.async.} =
|
||||
## Processes the request and deallocates its memory
|
||||
defer: deallocShared(request)
|
||||
defer:
|
||||
deallocShared(request)
|
||||
|
||||
echo "Request received: " & $request[].reqType
|
||||
|
||||
let retFut =
|
||||
case request[].reqType
|
||||
of LIFECYCLE:
|
||||
cast[ptr NodeLifecycleRequest](request[].reqContent).process(node)
|
||||
of PEER_MANAGER:
|
||||
cast[ptr PeerManagementRequest](request[].reqContent).process(node[])
|
||||
of RELAY:
|
||||
cast[ptr RelayRequest](request[].reqContent).process(node)
|
||||
of STORE:
|
||||
cast[ptr StoreRequest](request[].reqContent).process(node)
|
||||
of DEBUG:
|
||||
cast[ptr DebugNodeRequest](request[].reqContent).process(node[])
|
||||
of LIFECYCLE:
|
||||
cast[ptr NodeLifecycleRequest](request[].reqContent).process(node)
|
||||
of PEER_MANAGER:
|
||||
cast[ptr PeerManagementRequest](request[].reqContent).process(node[])
|
||||
of RELAY:
|
||||
cast[ptr RelayRequest](request[].reqContent).process(node)
|
||||
of STORE:
|
||||
cast[ptr StoreRequest](request[].reqContent).process(node)
|
||||
of DEBUG:
|
||||
cast[ptr DebugNodeRequest](request[].reqContent).process(node[])
|
||||
|
||||
return await retFut
|
||||
|
||||
proc `$`*(self: InterThreadRequest): string =
|
||||
return $self.reqType
|
||||
return $self.reqType
|
||||
|
||||
@ -1,26 +1,21 @@
|
||||
|
||||
## This file contains the base message response type that will be handled.
|
||||
## The response will be created from the Waku Thread and processed in
|
||||
## the main thread.
|
||||
|
||||
import
|
||||
std/json,
|
||||
stew/results
|
||||
import
|
||||
../../alloc
|
||||
import std/json, stew/results
|
||||
import ../../alloc
|
||||
|
||||
type
|
||||
ResponseType {.pure.} = enum
|
||||
OK,
|
||||
ERR,
|
||||
type ResponseType {.pure.} = enum
|
||||
OK
|
||||
ERR
|
||||
|
||||
type
|
||||
InterThreadResponse* = object
|
||||
respType: ResponseType
|
||||
content: cstring
|
||||
type InterThreadResponse* = object
|
||||
respType: ResponseType
|
||||
content: cstring
|
||||
|
||||
proc createShared*(T: type InterThreadResponse,
|
||||
res: Result[string, string]): ptr type T =
|
||||
proc createShared*(
|
||||
T: type InterThreadResponse, res: Result[string, string]
|
||||
): ptr type T =
|
||||
## Converts a `Result[string, string]` into a `ptr InterThreadResponse`
|
||||
## so that it can be transfered to another thread in a safe way.
|
||||
|
||||
@ -35,9 +30,9 @@ proc createShared*(T: type InterThreadResponse,
|
||||
ret[].content = res.error.alloc()
|
||||
return ret
|
||||
|
||||
proc process*(T: type InterThreadResponse,
|
||||
resp: ptr InterThreadResponse):
|
||||
Result[string, string] =
|
||||
proc process*(
|
||||
T: type InterThreadResponse, resp: ptr InterThreadResponse
|
||||
): Result[string, string] =
|
||||
## Converts the received `ptr InterThreadResponse` into a
|
||||
## `Result[string, string]`. Notice that the response is expected to be
|
||||
## allocated from the Waku Thread and deallocated by the main thread.
|
||||
@ -47,7 +42,7 @@ proc process*(T: type InterThreadResponse,
|
||||
deallocShared(resp)
|
||||
|
||||
case resp[].respType
|
||||
of OK:
|
||||
return ok($resp[].content)
|
||||
of ERR:
|
||||
return err($resp[].content)
|
||||
of OK:
|
||||
return ok($resp[].content)
|
||||
of ERR:
|
||||
return err($resp[].content)
|
||||
|
||||
@ -1,10 +1,8 @@
|
||||
|
||||
{.pragma: exported, exportc, cdecl, raises: [].}
|
||||
{.pragma: callback, cdecl, raises: [], gcsafe.}
|
||||
{.passc: "-fPIC".}
|
||||
|
||||
import
|
||||
std/[json,sequtils,times,strformat,options,atomics,strutils,os]
|
||||
import std/[json, sequtils, times, strformat, options, atomics, strutils, os]
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
@ -14,20 +12,19 @@ import
|
||||
stew/shims/net
|
||||
import
|
||||
../../../waku/node/waku_node,
|
||||
../events/[json_message_event,json_base_event],
|
||||
../events/[json_message_event, json_base_event],
|
||||
./inter_thread_communication/waku_thread_request,
|
||||
./inter_thread_communication/waku_thread_response
|
||||
|
||||
type
|
||||
Context* = object
|
||||
thread: Thread[(ptr Context)]
|
||||
reqChannel: ChannelSPSCSingle[ptr InterThreadRequest]
|
||||
reqSignal: ThreadSignalPtr
|
||||
respChannel: ChannelSPSCSingle[ptr InterThreadResponse]
|
||||
respSignal: ThreadSignalPtr
|
||||
userData*: pointer
|
||||
eventCallback*: pointer
|
||||
eventUserdata*: pointer
|
||||
type Context* = object
|
||||
thread: Thread[(ptr Context)]
|
||||
reqChannel: ChannelSPSCSingle[ptr InterThreadRequest]
|
||||
reqSignal: ThreadSignalPtr
|
||||
respChannel: ChannelSPSCSingle[ptr InterThreadResponse]
|
||||
respSignal: ThreadSignalPtr
|
||||
userData*: pointer
|
||||
eventCallback*: pointer
|
||||
eventUserdata*: pointer
|
||||
|
||||
# To control when the thread is running
|
||||
var running: Atomic[bool]
|
||||
@ -40,7 +37,8 @@ var initialized: Atomic[bool]
|
||||
proc waku_init() =
|
||||
if not initialized.exchange(true):
|
||||
NimMain() # Every Nim library needs to call `NimMain` once exactly
|
||||
when declared(setupForeignThreadGc): setupForeignThreadGc()
|
||||
when declared(setupForeignThreadGc):
|
||||
setupForeignThreadGc()
|
||||
when declared(nimGC_setStackBottom):
|
||||
var locals {.volatile, noinit.}: pointer
|
||||
locals = addr(locals)
|
||||
@ -59,8 +57,7 @@ proc run(ctx: ptr Context) {.thread.} =
|
||||
waitFor ctx.reqSignal.wait()
|
||||
let recvOk = ctx.reqChannel.tryRecv(request)
|
||||
if recvOk == true:
|
||||
let resultResponse =
|
||||
waitFor InterThreadRequest.process(request, addr node)
|
||||
let resultResponse = waitFor InterThreadRequest.process(request, addr node)
|
||||
|
||||
## Converting a `Result` into a thread-safe transferable response type
|
||||
let threadSafeResp = InterThreadResponse.createShared(resultResponse)
|
||||
@ -106,10 +103,9 @@ proc stopWakuThread*(ctx: ptr Context): Result[void, string] =
|
||||
freeShared(ctx)
|
||||
return ok()
|
||||
|
||||
proc sendRequestToWakuThread*(ctx: ptr Context,
|
||||
reqType: RequestType,
|
||||
reqContent: pointer): Result[string, string] =
|
||||
|
||||
proc sendRequestToWakuThread*(
|
||||
ctx: ptr Context, reqType: RequestType, reqContent: pointer
|
||||
): Result[string, string] =
|
||||
let req = InterThreadRequest.createShared(reqType, reqContent)
|
||||
|
||||
## Sending the request
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
const ContentScriptVersion_1* = """
|
||||
const ContentScriptVersion_1* =
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS messages (
|
||||
pubsubTopic VARCHAR NOT NULL,
|
||||
contentTopic VARCHAR NOT NULL,
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
const ContentScriptVersion_2* = """
|
||||
const ContentScriptVersion_2* =
|
||||
"""
|
||||
ALTER TABLE IF EXISTS messages_backup RENAME TO messages;
|
||||
ALTER TABLE messages RENAME TO messages_backup;
|
||||
ALTER TABLE messages_backup DROP CONSTRAINT messageIndex;
|
||||
|
||||
@ -1,37 +1,22 @@
|
||||
import content_script_version_1, content_script_version_2
|
||||
|
||||
import
|
||||
content_script_version_1,
|
||||
content_script_version_2
|
||||
type MigrationScript* = object
|
||||
version*: int
|
||||
scriptContent*: string
|
||||
|
||||
type
|
||||
MigrationScript* = object
|
||||
version*: int
|
||||
scriptContent*: string
|
||||
proc init*(T: type MigrationScript, targetVersion: int, scriptContent: string): T =
|
||||
return MigrationScript(targetVersion: targetVersion, scriptContent: scriptContent)
|
||||
|
||||
proc init*(T: type MigrationScript,
|
||||
targetVersion: int,
|
||||
scriptContent: string): T =
|
||||
const PgMigrationScripts* =
|
||||
@[
|
||||
MigrationScript(version: 1, scriptContent: ContentScriptVersion_1),
|
||||
MigrationScript(version: 2, scriptContent: ContentScriptVersion_2),
|
||||
]
|
||||
|
||||
return MigrationScript(
|
||||
targetVersion: targetVersion,
|
||||
scriptContent: scriptContent)
|
||||
|
||||
const PgMigrationScripts* = @[
|
||||
MigrationScript(
|
||||
version: 1,
|
||||
scriptContent: ContentScriptVersion_1),
|
||||
MigrationScript(
|
||||
version: 2,
|
||||
scriptContent: ContentScriptVersion_2)
|
||||
]
|
||||
|
||||
proc getMigrationScripts*(currentVersion: int64,
|
||||
targetVersion: int64): seq[string] =
|
||||
proc getMigrationScripts*(currentVersion: int64, targetVersion: int64): seq[string] =
|
||||
var ret = newSeq[string]()
|
||||
var v = currentVersion
|
||||
while v < targetVersion:
|
||||
ret.add(PgMigrationScripts[v].scriptContent)
|
||||
v.inc()
|
||||
return ret
|
||||
|
||||
|
||||
|
||||
@ -21,11 +21,10 @@ import
|
||||
|
||||
const os* {.strdefine.} = ""
|
||||
when os == "Linux" and
|
||||
# GitHub only supports container actions on Linux
|
||||
# and we need to start a postgress database in a docker container
|
||||
defined(postgres):
|
||||
import
|
||||
./waku_archive/test_driver_postgres_query, ./waku_archive/test_driver_postgres
|
||||
# GitHub only supports container actions on Linux
|
||||
# and we need to start a postgress database in a docker container
|
||||
defined(postgres):
|
||||
import ./waku_archive/test_driver_postgres_query, ./waku_archive/test_driver_postgres
|
||||
|
||||
# Waku store test suite
|
||||
import
|
||||
|
||||
@ -1,37 +1,31 @@
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/strutils,
|
||||
stew/[results, byteutils],
|
||||
testutils/unittests
|
||||
import
|
||||
../../waku/common/base64
|
||||
|
||||
import std/strutils, stew/[results, byteutils], testutils/unittests
|
||||
import ../../waku/common/base64
|
||||
|
||||
suite "Waku Common - stew base64 wrapper":
|
||||
const TestData = @[
|
||||
# Test vectors from RFC 4648
|
||||
# See: https://datatracker.ietf.org/doc/html/rfc4648#section-10
|
||||
("", Base64String("")),
|
||||
("f", Base64String("Zg==")),
|
||||
("fo", Base64String("Zm8=")),
|
||||
("foo", Base64String("Zm9v")),
|
||||
("foob", Base64String("Zm9vYg==")),
|
||||
("fooba", Base64String("Zm9vYmE=")),
|
||||
("foobar", Base64String("Zm9vYmFy")),
|
||||
const TestData =
|
||||
@[
|
||||
# Test vectors from RFC 4648
|
||||
# See: https://datatracker.ietf.org/doc/html/rfc4648#section-10
|
||||
("", Base64String("")),
|
||||
("f", Base64String("Zg==")),
|
||||
("fo", Base64String("Zm8=")),
|
||||
("foo", Base64String("Zm9v")),
|
||||
("foob", Base64String("Zm9vYg==")),
|
||||
("fooba", Base64String("Zm9vYmE=")),
|
||||
("foobar", Base64String("Zm9vYmFy")),
|
||||
|
||||
# Custom test vectors
|
||||
("\x01", Base64String("AQ==")),
|
||||
("\x13", Base64String("Ew==")),
|
||||
("\x01\x02\x03\x04", Base64String("AQIDBA=="))
|
||||
]
|
||||
# Custom test vectors
|
||||
("\x01", Base64String("AQ==")),
|
||||
("\x13", Base64String("Ew==")),
|
||||
("\x01\x02\x03\x04", Base64String("AQIDBA==")),
|
||||
]
|
||||
|
||||
for (plaintext, encoded) in TestData:
|
||||
|
||||
test "encode into base64 (" & escape(plaintext) & " -> \"" & string(encoded) & "\")":
|
||||
## Given
|
||||
let data = plaintext
|
||||
let data = plaintext
|
||||
|
||||
## When
|
||||
let encodedData = base64.encode(data)
|
||||
@ -40,7 +34,6 @@ suite "Waku Common - stew base64 wrapper":
|
||||
check:
|
||||
encodedData == encoded
|
||||
|
||||
|
||||
test "decode from base64 (\"" & string(encoded) & "\" -> " & escape(plaintext) & ")":
|
||||
## Given
|
||||
let data = encoded
|
||||
@ -55,4 +48,3 @@ suite "Waku Common - stew base64 wrapper":
|
||||
let decoded = decodedRes.tryGet()
|
||||
check:
|
||||
decoded == toBytes(plaintext)
|
||||
|
||||
|
||||
@ -15,31 +15,28 @@ import
|
||||
type ConfResult[T] = Result[T, string]
|
||||
|
||||
type TestConf = object
|
||||
configFile* {.
|
||||
desc: "Configuration file path"
|
||||
name: "config-file" }: Option[InputFile]
|
||||
|
||||
testFile* {.
|
||||
desc: "Configuration test file path"
|
||||
name: "test-file" }: Option[InputFile]
|
||||
configFile* {.desc: "Configuration file path", name: "config-file".}:
|
||||
Option[InputFile]
|
||||
|
||||
listenAddress* {.
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
desc: "Listening address",
|
||||
name: "listen-address"}: IpAddress
|
||||
testFile* {.desc: "Configuration test file path", name: "test-file".}:
|
||||
Option[InputFile]
|
||||
|
||||
tcpPort* {.
|
||||
desc: "TCP listening port",
|
||||
defaultValue: 60000,
|
||||
name: "tcp-port" }: Port
|
||||
listenAddress* {.
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
desc: "Listening address",
|
||||
name: "listen-address"
|
||||
.}: IpAddress
|
||||
|
||||
tcpPort* {.desc: "TCP listening port", defaultValue: 60000, name: "tcp-port".}: Port
|
||||
|
||||
{.push warning[ProveInit]: off.}
|
||||
|
||||
proc load*(T: type TestConf, prefix: string): ConfResult[T] =
|
||||
try:
|
||||
let conf = TestConf.load(
|
||||
secondarySources = proc (conf: TestConf, sources: auto)
|
||||
{.gcsafe, raises: [ConfigurationError].} =
|
||||
secondarySources = proc(
|
||||
conf: TestConf, sources: auto
|
||||
) {.gcsafe, raises: [ConfigurationError].} =
|
||||
sources.addConfigFile(Envvar, InputFile(prefix))
|
||||
)
|
||||
ok(conf)
|
||||
@ -52,7 +49,7 @@ suite "nim-confutils - envvar":
|
||||
test "load configuration from environment variables":
|
||||
## Given
|
||||
let prefix = "test-prefix"
|
||||
|
||||
|
||||
let
|
||||
listenAddress = "1.1.1.1"
|
||||
tcpPort = "8080"
|
||||
@ -62,7 +59,7 @@ suite "nim-confutils - envvar":
|
||||
os.putEnv("TEST_PREFIX_CONFIG_FILE", configFile)
|
||||
os.putEnv("TEST_PREFIX_LISTEN_ADDRESS", listenAddress)
|
||||
os.putEnv("TEST_PREFIX_TCP_PORT", tcpPort)
|
||||
|
||||
|
||||
let confLoadRes = TestConf.load(prefix)
|
||||
|
||||
## Then
|
||||
@ -72,7 +69,7 @@ suite "nim-confutils - envvar":
|
||||
check:
|
||||
conf.listenAddress == parseIpAddress(listenAddress)
|
||||
conf.tcpPort == Port(8080)
|
||||
|
||||
|
||||
conf.configFile.isSome()
|
||||
conf.configFile.get().string == configFile
|
||||
|
||||
|
||||
@ -1,17 +1,9 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/options,
|
||||
stew/results,
|
||||
stew/shims/net,
|
||||
testutils/unittests
|
||||
import
|
||||
../../waku/common/enr,
|
||||
../testlib/wakucore
|
||||
|
||||
import std/options, stew/results, stew/shims/net, testutils/unittests
|
||||
import ../../waku/common/enr, ../testlib/wakucore
|
||||
|
||||
suite "nim-eth ENR - builder and typed record":
|
||||
|
||||
test "Non-supported private key (ECDSA)":
|
||||
## Given
|
||||
let privateKey = generateEcdsaKey()
|
||||
@ -45,28 +37,28 @@ suite "nim-eth ENR - builder and typed record":
|
||||
publicKey.isSome()
|
||||
@(publicKey.get()) == expectedPubKey
|
||||
|
||||
|
||||
suite "nim-eth ENR - Ext: IP address and TCP/UDP ports":
|
||||
|
||||
test "EIP-778 test vector":
|
||||
## Given
|
||||
# Test vector from EIP-778
|
||||
# See: https://eips.ethereum.org/EIPS/eip-778#test-vectors
|
||||
let expectedEnr = "-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04j" &
|
||||
"RzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJ" &
|
||||
"c2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0x" &
|
||||
"OIN1ZHCCdl8"
|
||||
let expectedEnr =
|
||||
"-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04j" &
|
||||
"RzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJ" &
|
||||
"c2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0x" & "OIN1ZHCCdl8"
|
||||
|
||||
let
|
||||
seqNum = 1u64
|
||||
privateKey = ethSecp256k1Key("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
privateKey = ethSecp256k1Key(
|
||||
"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"
|
||||
)
|
||||
|
||||
enrIpAddr = parseIpAddress("127.0.0.1")
|
||||
enrUdpPort = Port(30303)
|
||||
|
||||
## When
|
||||
var builder = EnrBuilder.init(privateKey, seqNum)
|
||||
builder.withIpAddressAndPorts(ipAddr=some(enrIpAddr), udpPort=some(enrUdpPort))
|
||||
builder.withIpAddressAndPorts(ipAddr = some(enrIpAddr), udpPort = some(enrUdpPort))
|
||||
|
||||
let enrRes = builder.build()
|
||||
|
||||
@ -89,10 +81,7 @@ suite "nim-eth ENR - Ext: IP address and TCP/UDP ports":
|
||||
|
||||
## When
|
||||
var builder = EnrBuilder.init(privateKey, seqNum)
|
||||
builder.withIpAddressAndPorts(
|
||||
ipAddr=some(enrIpAddr),
|
||||
tcpPort=some(enrTcpPort),
|
||||
)
|
||||
builder.withIpAddressAndPorts(ipAddr = some(enrIpAddr), tcpPort = some(enrTcpPort))
|
||||
|
||||
let enrRes = builder.build()
|
||||
|
||||
@ -119,10 +108,7 @@ suite "nim-eth ENR - Ext: IP address and TCP/UDP ports":
|
||||
|
||||
## When
|
||||
var builder = EnrBuilder.init(privateKey, seqNum)
|
||||
builder.withIpAddressAndPorts(
|
||||
ipAddr=some(enrIpAddr),
|
||||
udpPort=some(enrUdpPort),
|
||||
)
|
||||
builder.withIpAddressAndPorts(ipAddr = some(enrIpAddr), udpPort = some(enrUdpPort))
|
||||
|
||||
let enrRes = builder.build()
|
||||
|
||||
|
||||
@ -1,20 +1,17 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testutils/unittests
|
||||
import
|
||||
../../waku/common/envvar_serialization/utils
|
||||
|
||||
import testutils/unittests
|
||||
import ../../waku/common/envvar_serialization/utils
|
||||
|
||||
suite "nim-envvar-serialization - utils":
|
||||
test "construct env var key":
|
||||
## Given
|
||||
let prefix = "some-prefix"
|
||||
let name = @["db-url"]
|
||||
|
||||
|
||||
## When
|
||||
let key = constructKey(prefix, name)
|
||||
|
||||
## Then
|
||||
check:
|
||||
key == "SOME_PREFIX_DB_URL"
|
||||
key == "SOME_PREFIX_DB_URL"
|
||||
|
||||
@ -1,10 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testutils/unittests,
|
||||
stew/results
|
||||
import
|
||||
../../waku/common/utils/parse_size_units
|
||||
import testutils/unittests, stew/results
|
||||
import ../../waku/common/utils/parse_size_units
|
||||
|
||||
suite "Size serialization test":
|
||||
test "parse normal sizes":
|
||||
@ -104,4 +101,4 @@ suite "Size serialization test":
|
||||
assert sizeInBytesRes.isErr(), "The size should be considered incorrect"
|
||||
|
||||
sizeInBytesRes = parseMsgSize("15..0 KiB")
|
||||
assert sizeInBytesRes.isErr(), "The size should be considered incorrect"
|
||||
assert sizeInBytesRes.isErr(), "The size should be considered incorrect"
|
||||
|
||||
@ -1,18 +1,14 @@
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testutils/unittests
|
||||
import
|
||||
../../waku/common/protobuf
|
||||
|
||||
import testutils/unittests
|
||||
import ../../waku/common/protobuf
|
||||
|
||||
## Fixtures
|
||||
|
||||
const MaxTestRpcFieldLen = 5
|
||||
|
||||
type TestRpc = object
|
||||
testField*: string
|
||||
testField*: string
|
||||
|
||||
proc init(T: type TestRpc, field: string): T =
|
||||
T(testField: field)
|
||||
@ -40,11 +36,9 @@ proc decode(T: type TestRpc, buf: seq[byte]): ProtobufResult[T] =
|
||||
|
||||
ok(TestRpc.init(field))
|
||||
|
||||
|
||||
## Tests
|
||||
|
||||
suite "Waku Common - libp2p minprotobuf wrapper":
|
||||
|
||||
test "serialize and deserialize - valid length field":
|
||||
## Given
|
||||
let field = "12345"
|
||||
@ -82,10 +76,9 @@ suite "Waku Common - libp2p minprotobuf wrapper":
|
||||
error.kind == ProtobufErrorKind.MissingRequiredField
|
||||
error.field == "test_field"
|
||||
|
||||
|
||||
test "serialize and deserialize - invalid length field":
|
||||
## Given
|
||||
let field = "123456" # field.len = MaxTestRpcFieldLen + 1
|
||||
let field = "123456" # field.len = MaxTestRpcFieldLen + 1
|
||||
|
||||
let rpc = TestRpc.init(field)
|
||||
|
||||
|
||||
@ -1,19 +1,12 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[strutils, os],
|
||||
stew/results,
|
||||
testutils/unittests
|
||||
import
|
||||
../../waku/common/databases/db_sqlite {.all.},
|
||||
../waku_archive/archive_utils
|
||||
|
||||
|
||||
template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
|
||||
import std/[strutils, os], stew/results, testutils/unittests
|
||||
import ../../waku/common/databases/db_sqlite {.all.}, ../waku_archive/archive_utils
|
||||
|
||||
template sourceDir(): string =
|
||||
currentSourcePath.rsplit(DirSep, 1)[0]
|
||||
|
||||
suite "SQLite - migrations":
|
||||
|
||||
test "set and get user version":
|
||||
## Given
|
||||
let database = newSqliteDatabase()
|
||||
@ -36,16 +29,17 @@ suite "SQLite - migrations":
|
||||
|
||||
test "filter and order migration script file paths":
|
||||
## Given
|
||||
let paths = @[
|
||||
sourceDir / "00001_valid.up.sql",
|
||||
sourceDir / "00002_alsoValidWithUpperCaseExtension.UP.SQL",
|
||||
sourceDir / "00007_unorderedValid.up.sql",
|
||||
sourceDir / "00003_validRepeated.up.sql",
|
||||
sourceDir / "00003_validRepeated.up.sql",
|
||||
sourceDir / "00666_noMigrationScript.bmp",
|
||||
sourceDir / "00X00_invalidVersion.down.sql",
|
||||
sourceDir / "00008_notWithinVersionRange.up.sql",
|
||||
]
|
||||
let paths =
|
||||
@[
|
||||
sourceDir / "00001_valid.up.sql",
|
||||
sourceDir / "00002_alsoValidWithUpperCaseExtension.UP.SQL",
|
||||
sourceDir / "00007_unorderedValid.up.sql",
|
||||
sourceDir / "00003_validRepeated.up.sql",
|
||||
sourceDir / "00003_validRepeated.up.sql",
|
||||
sourceDir / "00666_noMigrationScript.bmp",
|
||||
sourceDir / "00X00_invalidVersion.down.sql",
|
||||
sourceDir / "00008_notWithinVersionRange.up.sql",
|
||||
]
|
||||
|
||||
let
|
||||
lowerVersion = 0
|
||||
@ -53,29 +47,33 @@ suite "SQLite - migrations":
|
||||
|
||||
## When
|
||||
var migrationSciptPaths: seq[string]
|
||||
migrationSciptPaths = filterMigrationScripts(paths, lowerVersion, highVersion, direction="up")
|
||||
migrationSciptPaths =
|
||||
filterMigrationScripts(paths, lowerVersion, highVersion, direction = "up")
|
||||
migrationSciptPaths = sortMigrationScripts(migrationSciptPaths)
|
||||
|
||||
## Then
|
||||
check:
|
||||
migrationSciptPaths == @[
|
||||
sourceDir / "00001_valid.up.sql",
|
||||
sourceDir / "00002_alsoValidWithUpperCaseExtension.UP.SQL",
|
||||
sourceDir / "00003_validRepeated.up.sql",
|
||||
sourceDir / "00003_validRepeated.up.sql",
|
||||
sourceDir / "00007_unorderedValid.up.sql"
|
||||
]
|
||||
migrationSciptPaths ==
|
||||
@[
|
||||
sourceDir / "00001_valid.up.sql",
|
||||
sourceDir / "00002_alsoValidWithUpperCaseExtension.UP.SQL",
|
||||
sourceDir / "00003_validRepeated.up.sql",
|
||||
sourceDir / "00003_validRepeated.up.sql",
|
||||
sourceDir / "00007_unorderedValid.up.sql",
|
||||
]
|
||||
|
||||
test "break migration scripts into queries":
|
||||
## Given
|
||||
let statement1 = """CREATE TABLE contacts1 (
|
||||
let statement1 =
|
||||
"""CREATE TABLE contacts1 (
|
||||
contact_id INTEGER PRIMARY KEY,
|
||||
first_name TEXT NOT NULL,
|
||||
last_name TEXT NOT NULL,
|
||||
email TEXT NOT NULL UNIQUE,
|
||||
phone TEXT NOT NULL UNIQUE
|
||||
);"""
|
||||
let statement2 = """CREATE TABLE contacts2 (
|
||||
let statement2 =
|
||||
"""CREATE TABLE contacts2 (
|
||||
contact_id INTEGER PRIMARY KEY,
|
||||
first_name TEXT NOT NULL,
|
||||
last_name TEXT NOT NULL,
|
||||
@ -89,18 +87,20 @@ suite "SQLite - migrations":
|
||||
|
||||
## Then
|
||||
check:
|
||||
statements == @[statement1, statement2]
|
||||
statements == @[statement1, statement2]
|
||||
|
||||
test "break statements script into queries - empty statements":
|
||||
## Given
|
||||
let statement1 = """CREATE TABLE contacts1 (
|
||||
let statement1 =
|
||||
"""CREATE TABLE contacts1 (
|
||||
contact_id INTEGER PRIMARY KEY,
|
||||
first_name TEXT NOT NULL,
|
||||
last_name TEXT NOT NULL,
|
||||
email TEXT NOT NULL UNIQUE,
|
||||
phone TEXT NOT NULL UNIQUE
|
||||
);"""
|
||||
let statement2 = """CREATE TABLE contacts2 (
|
||||
let statement2 =
|
||||
"""CREATE TABLE contacts2 (
|
||||
contact_id INTEGER PRIMARY KEY,
|
||||
first_name TEXT NOT NULL,
|
||||
last_name TEXT NOT NULL,
|
||||
|
||||
@ -1,16 +1,11 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testutils/unittests,
|
||||
chronos
|
||||
import testutils/unittests, chronos
|
||||
|
||||
import
|
||||
../testlib/wakunode,
|
||||
../../waku/factory/node_factory,
|
||||
../../waku/waku_node
|
||||
import ../testlib/wakunode, ../../waku/factory/node_factory, ../../waku/waku_node
|
||||
|
||||
suite "Node Factory":
|
||||
test "Set up a node based on default configurations":
|
||||
test "Set up a node based on default configurations":
|
||||
let conf = defaultTestWakuNodeConf()
|
||||
|
||||
let node = setupNode(conf).valueOr:
|
||||
@ -24,7 +19,7 @@ suite "Node Factory":
|
||||
not node.wakuStoreClient.isNil()
|
||||
not node.rendezvous.isNil()
|
||||
|
||||
test "Set up a node with Store enabled":
|
||||
test "Set up a node with Store enabled":
|
||||
var conf = defaultTestWakuNodeConf()
|
||||
conf.store = true
|
||||
|
||||
@ -36,32 +31,33 @@ suite "Node Factory":
|
||||
not node.wakuStore.isNil()
|
||||
not node.wakuArchive.isNil()
|
||||
|
||||
test "Set up a node with Filter enabled":
|
||||
var conf = defaultTestWakuNodeConf()
|
||||
conf.filter = true
|
||||
test "Set up a node with Filter enabled":
|
||||
var conf = defaultTestWakuNodeConf()
|
||||
conf.filter = true
|
||||
|
||||
let node = setupNode(conf).valueOr:
|
||||
raiseAssert error
|
||||
let node = setupNode(conf).valueOr:
|
||||
raiseAssert error
|
||||
|
||||
check:
|
||||
not node.isNil()
|
||||
not node.wakuFilter.isNil()
|
||||
check:
|
||||
not node.isNil()
|
||||
not node.wakuFilter.isNil()
|
||||
|
||||
test "Start a node based on default configurations":
|
||||
let conf = defaultTestWakuNodeConf()
|
||||
test "Start a node based on default configurations":
|
||||
let conf = defaultTestWakuNodeConf()
|
||||
|
||||
let node = setupNode(conf).valueOr:
|
||||
raiseAssert error
|
||||
let node = setupNode(conf).valueOr:
|
||||
raiseAssert error
|
||||
|
||||
assert not node.isNil(), "Node can't be nil"
|
||||
assert not node.isNil(), "Node can't be nil"
|
||||
|
||||
let startRes = catch: (waitFor startNode(node, conf))
|
||||
let startRes = catch:
|
||||
(waitFor startNode(node, conf))
|
||||
|
||||
assert not startRes.isErr(), "Exception starting node"
|
||||
assert startRes.get().isOk(), "Error starting node " & startRes.get().error
|
||||
assert not startRes.isErr(), "Exception starting node"
|
||||
assert startRes.get().isOk(), "Error starting node " & startRes.get().error
|
||||
|
||||
check:
|
||||
node.started == true
|
||||
check:
|
||||
node.started == true
|
||||
|
||||
## Cleanup
|
||||
waitFor node.stop()
|
||||
## Cleanup
|
||||
waitFor node.stop()
|
||||
|
||||
@ -17,9 +17,7 @@ suite "PeerStorage":
|
||||
|
||||
suite "getAll":
|
||||
test "unimplemented":
|
||||
let
|
||||
emptyClosure =
|
||||
proc(remotePeerInfo: RemotePeerInfo) =
|
||||
discard
|
||||
let emptyClosure = proc(remotePeerInfo: RemotePeerInfo) =
|
||||
discard
|
||||
check:
|
||||
peerStorage.getAll(emptyClosure) == PeerStorageResult[void].err("Unimplemented")
|
||||
|
||||
@ -13,23 +13,22 @@ import
|
||||
../../../../waku/node/peer_manager/peer_store/waku_peer_storage
|
||||
|
||||
proc `==`(a, b: RemotePeerInfo): bool =
|
||||
let
|
||||
comparisons =
|
||||
@[
|
||||
a.peerId == b.peerId,
|
||||
a.addrs == b.addrs,
|
||||
a.enr == b.enr,
|
||||
a.protocols == b.protocols,
|
||||
a.agent == b.agent,
|
||||
a.protoVersion == b.protoVersion,
|
||||
a.publicKey == b.publicKey,
|
||||
a.connectedness == b.connectedness,
|
||||
a.disconnectTime == b.disconnectTime,
|
||||
a.origin == b.origin,
|
||||
a.direction == b.direction,
|
||||
a.lastFailedConn == b.lastFailedConn,
|
||||
a.numberFailedConn == b.numberFailedConn
|
||||
]
|
||||
let comparisons =
|
||||
@[
|
||||
a.peerId == b.peerId,
|
||||
a.addrs == b.addrs,
|
||||
a.enr == b.enr,
|
||||
a.protocols == b.protocols,
|
||||
a.agent == b.agent,
|
||||
a.protoVersion == b.protoVersion,
|
||||
a.publicKey == b.publicKey,
|
||||
a.connectedness == b.connectedness,
|
||||
a.disconnectTime == b.disconnectTime,
|
||||
a.origin == b.origin,
|
||||
a.direction == b.direction,
|
||||
a.lastFailedConn == b.lastFailedConn,
|
||||
a.numberFailedConn == b.numberFailedConn,
|
||||
]
|
||||
|
||||
allIt(comparisons, it == true)
|
||||
|
||||
@ -65,19 +64,18 @@ suite "Protobuf Serialisation":
|
||||
suite "encode":
|
||||
test "simple":
|
||||
# Given the expected bytes representation of a valid RemotePeerInfo
|
||||
let
|
||||
expectedBuffer: seq[byte] =
|
||||
@[
|
||||
10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40,
|
||||
145, 217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232,
|
||||
170, 74, 141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144,
|
||||
34, 95, 8, 3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6,
|
||||
8, 42, 134, 72, 206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106,
|
||||
224, 232, 245, 213, 48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22,
|
||||
42, 75, 201, 1, 216, 230, 236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37,
|
||||
231, 29, 104, 81, 81, 12, 9, 142, 92, 71, 198, 70, 165, 151, 251, 77, 206,
|
||||
192, 52, 233, 247, 124, 64, 158, 98, 40, 0, 48, 0
|
||||
]
|
||||
let expectedBuffer: seq[byte] =
|
||||
@[
|
||||
10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40, 145,
|
||||
217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232, 170, 74,
|
||||
141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144, 34, 95, 8,
|
||||
3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6, 8, 42, 134,
|
||||
72, 206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106, 224, 232, 245,
|
||||
213, 48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22, 42, 75, 201, 1,
|
||||
216, 230, 236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37, 231, 29, 104, 81,
|
||||
81, 12, 9, 142, 92, 71, 198, 70, 165, 151, 251, 77, 206, 192, 52, 233, 247,
|
||||
124, 64, 158, 98, 40, 0, 48, 0,
|
||||
]
|
||||
|
||||
# When converting a valid RemotePeerInfo to a ProtoBuffer
|
||||
let encodedRemotePeerInfo = encode(remotePeerInfo).get()
|
||||
@ -92,19 +90,18 @@ suite "Protobuf Serialisation":
|
||||
suite "decode":
|
||||
test "simple":
|
||||
# Given the bytes representation of a valid RemotePeerInfo
|
||||
let
|
||||
buffer: seq[byte] =
|
||||
@[
|
||||
10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40,
|
||||
145, 217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232,
|
||||
170, 74, 141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144,
|
||||
34, 95, 8, 3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6,
|
||||
8, 42, 134, 72, 206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106,
|
||||
224, 232, 245, 213, 48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22,
|
||||
42, 75, 201, 1, 216, 230, 236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37,
|
||||
231, 29, 104, 81, 81, 12, 9, 142, 92, 71, 198, 70, 165, 151, 251, 77, 206,
|
||||
192, 52, 233, 247, 124, 64, 158, 98, 40, 0, 48, 0
|
||||
]
|
||||
let buffer: seq[byte] =
|
||||
@[
|
||||
10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40, 145,
|
||||
217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232, 170, 74,
|
||||
141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144, 34, 95, 8,
|
||||
3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6, 8, 42, 134,
|
||||
72, 206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106, 224, 232, 245,
|
||||
213, 48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22, 42, 75, 201, 1,
|
||||
216, 230, 236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37, 231, 29, 104, 81,
|
||||
81, 12, 9, 142, 92, 71, 198, 70, 165, 151, 251, 77, 206, 192, 52, 233, 247,
|
||||
124, 64, 158, 98, 40, 0, 48, 0,
|
||||
]
|
||||
|
||||
# When converting a valid buffer to RemotePeerInfo
|
||||
let decodedRemotePeerInfo = RemotePeerInfo.decode(buffer).get()
|
||||
|
||||
@ -16,7 +16,7 @@ import
|
||||
node/waku_node,
|
||||
waku_filter_v2,
|
||||
waku_filter_v2/client,
|
||||
waku_filter_v2/subscriptions
|
||||
waku_filter_v2/subscriptions,
|
||||
],
|
||||
../testlib/[common, wakucore, wakunode, testasync, futures, testutils]
|
||||
|
||||
@ -34,11 +34,10 @@ suite "Waku Filter - End to End":
|
||||
|
||||
asyncSetup:
|
||||
pushHandlerFuture = newFuture[(string, WakuMessage)]()
|
||||
messagePushHandler =
|
||||
proc(pubsubTopic: PubsubTopic, message: WakuMessage): Future[void] {.
|
||||
async, closure, gcsafe
|
||||
.} =
|
||||
pushHandlerFuture.complete((pubsubTopic, message))
|
||||
messagePushHandler = proc(
|
||||
pubsubTopic: PubsubTopic, message: WakuMessage
|
||||
): Future[void] {.async, closure, gcsafe.} =
|
||||
pushHandlerFuture.complete((pubsubTopic, message))
|
||||
|
||||
pubsubTopic = DefaultPubsubTopic
|
||||
contentTopic = DefaultContentTopic
|
||||
@ -72,11 +71,9 @@ suite "Waku Filter - End to End":
|
||||
|
||||
asyncTest "Client Node receives Push from Server Node, via Filter":
|
||||
# When a client node subscribes to a filter node
|
||||
let
|
||||
subscribeResponse =
|
||||
await client.filterSubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
let subscribeResponse = await client.filterSubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
|
||||
# Then the subscription is successful
|
||||
check:
|
||||
@ -96,11 +93,9 @@ suite "Waku Filter - End to End":
|
||||
pushedMsg1 == msg1
|
||||
|
||||
# When unsubscribing from the subscription
|
||||
let
|
||||
unsubscribeResponse =
|
||||
await client.filterUnsubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
let unsubscribeResponse = await client.filterUnsubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
|
||||
# Then the unsubscription is successful
|
||||
check:
|
||||
@ -121,11 +116,9 @@ suite "Waku Filter - End to End":
|
||||
await server.mountRelay()
|
||||
|
||||
# And valid filter subscription
|
||||
let
|
||||
subscribeResponse =
|
||||
await client.filterSubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
let subscribeResponse = await client.filterSubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
require:
|
||||
subscribeResponse.isOk()
|
||||
server.wakuFilter.subscriptions.subscribedPeerCount() == 1
|
||||
@ -149,22 +142,18 @@ suite "Waku Filter - End to End":
|
||||
let serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo()
|
||||
|
||||
# When a client node subscribes to the server node
|
||||
let
|
||||
subscribeResponse =
|
||||
await client.filterSubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
let subscribeResponse = await client.filterSubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
|
||||
# Then the subscription is successful
|
||||
check (not subscribeResponse.isOk())
|
||||
|
||||
asyncTest "Filter Client Node can receive messages after subscribing and restarting, via Filter":
|
||||
# Given a valid filter subscription
|
||||
let
|
||||
subscribeResponse =
|
||||
await client.filterSubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
let subscribeResponse = await client.filterSubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
require:
|
||||
subscribeResponse.isOk()
|
||||
server.wakuFilter.subscriptions.subscribedPeerCount() == 1
|
||||
@ -188,11 +177,9 @@ suite "Waku Filter - End to End":
|
||||
await server.mountRelay()
|
||||
|
||||
# Given a valid filter subscription
|
||||
let
|
||||
subscribeResponse =
|
||||
await client.filterSubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
let subscribeResponse = await client.filterSubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
require:
|
||||
subscribeResponse.isOk()
|
||||
server.wakuFilter.subscriptions.subscribedPeerCount() == 1
|
||||
@ -209,11 +196,9 @@ suite "Waku Filter - End to End":
|
||||
check (not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT))
|
||||
|
||||
# Given the client refreshes the subscription
|
||||
let
|
||||
subscribeResponse2 =
|
||||
await clientClone.filterSubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
let subscribeResponse2 = await clientClone.filterSubscribe(
|
||||
some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo
|
||||
)
|
||||
check:
|
||||
subscribeResponse2.isOk()
|
||||
server.wakuFilter.subscriptions.subscribedPeerCount() == 1
|
||||
|
||||
@ -21,7 +21,7 @@ import
|
||||
waku_lightpush/common,
|
||||
waku_lightpush/client,
|
||||
waku_lightpush/protocol_metrics,
|
||||
waku_lightpush/rpc
|
||||
waku_lightpush/rpc,
|
||||
],
|
||||
../testlib/[assertions, common, wakucore, wakunode, testasync, futures, testutils]
|
||||
|
||||
@ -40,12 +40,11 @@ suite "Waku Lightpush - End To End":
|
||||
|
||||
asyncSetup:
|
||||
handlerFuture = newPushHandlerFuture()
|
||||
handler =
|
||||
proc(
|
||||
handler = proc(
|
||||
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
|
||||
): Future[WakuLightPushResult[void]] {.async.} =
|
||||
handlerFuture.complete((pubsubTopic, message))
|
||||
return ok()
|
||||
): Future[WakuLightPushResult[void]] {.async.} =
|
||||
handlerFuture.complete((pubsubTopic, message))
|
||||
return ok()
|
||||
|
||||
let
|
||||
serverKey = generateSecp256k1Key()
|
||||
@ -72,19 +71,14 @@ suite "Waku Lightpush - End To End":
|
||||
suite "Assessment of Message Relaying Mechanisms":
|
||||
asyncTest "Via 11/WAKU2-RELAY from Relay/Full Node":
|
||||
# Given a light lightpush client
|
||||
let
|
||||
lightpushClient =
|
||||
newTestWakuNode(
|
||||
generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)
|
||||
)
|
||||
let lightpushClient =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
lightpushClient.mountLightpushClient()
|
||||
|
||||
# When the client publishes a message
|
||||
let
|
||||
publishResponse =
|
||||
await lightpushClient.lightpushPublish(
|
||||
some(pubsubTopic), message, serverRemotePeerInfo
|
||||
)
|
||||
let publishResponse = await lightpushClient.lightpushPublish(
|
||||
some(pubsubTopic), message, serverRemotePeerInfo
|
||||
)
|
||||
|
||||
if not publishResponse.isOk():
|
||||
echo "Publish failed: ", publishResponse.error()
|
||||
|
||||
@ -19,7 +19,7 @@ import
|
||||
waku_peer_exchange,
|
||||
node/peer_manager,
|
||||
waku_relay/protocol,
|
||||
waku_core
|
||||
waku_core,
|
||||
],
|
||||
../waku_peer_exchange/utils,
|
||||
../testlib/[wakucore, wakunode, testasync]
|
||||
@ -187,82 +187,72 @@ suite "Waku Peer Exchange with discv5":
|
||||
## Given (copied from test_waku_discv5.nim)
|
||||
let
|
||||
# todo: px flag
|
||||
flags =
|
||||
CapabilitiesBitfield.init(
|
||||
lightpush = false, filter = false, store = false, relay = true
|
||||
)
|
||||
flags = CapabilitiesBitfield.init(
|
||||
lightpush = false, filter = false, store = false, relay = true
|
||||
)
|
||||
bindIp = parseIpAddress("0.0.0.0")
|
||||
extIp = parseIpAddress("127.0.0.1")
|
||||
|
||||
nodeKey1 = generateSecp256k1Key()
|
||||
nodeTcpPort1 = Port(64010)
|
||||
nodeUdpPort1 = Port(9000)
|
||||
node1 =
|
||||
newTestWakuNode(
|
||||
nodeKey1,
|
||||
bindIp,
|
||||
nodeTcpPort1,
|
||||
some(extIp),
|
||||
wakuFlags = some(flags),
|
||||
discv5UdpPort = some(nodeUdpPort1),
|
||||
)
|
||||
node1 = newTestWakuNode(
|
||||
nodeKey1,
|
||||
bindIp,
|
||||
nodeTcpPort1,
|
||||
some(extIp),
|
||||
wakuFlags = some(flags),
|
||||
discv5UdpPort = some(nodeUdpPort1),
|
||||
)
|
||||
|
||||
nodeKey2 = generateSecp256k1Key()
|
||||
nodeTcpPort2 = Port(64012)
|
||||
nodeUdpPort2 = Port(9002)
|
||||
node2 =
|
||||
newTestWakuNode(
|
||||
nodeKey2,
|
||||
bindIp,
|
||||
nodeTcpPort2,
|
||||
some(extIp),
|
||||
wakuFlags = some(flags),
|
||||
discv5UdpPort = some(nodeUdpPort2),
|
||||
)
|
||||
node2 = newTestWakuNode(
|
||||
nodeKey2,
|
||||
bindIp,
|
||||
nodeTcpPort2,
|
||||
some(extIp),
|
||||
wakuFlags = some(flags),
|
||||
discv5UdpPort = some(nodeUdpPort2),
|
||||
)
|
||||
|
||||
nodeKey3 = generateSecp256k1Key()
|
||||
nodeTcpPort3 = Port(64014)
|
||||
nodeUdpPort3 = Port(9004)
|
||||
node3 =
|
||||
newTestWakuNode(
|
||||
nodeKey3,
|
||||
bindIp,
|
||||
nodeTcpPort3,
|
||||
some(extIp),
|
||||
wakuFlags = some(flags),
|
||||
discv5UdpPort = some(nodeUdpPort3),
|
||||
)
|
||||
node3 = newTestWakuNode(
|
||||
nodeKey3,
|
||||
bindIp,
|
||||
nodeTcpPort3,
|
||||
some(extIp),
|
||||
wakuFlags = some(flags),
|
||||
discv5UdpPort = some(nodeUdpPort3),
|
||||
)
|
||||
|
||||
# discv5
|
||||
let
|
||||
conf1 =
|
||||
WakuDiscoveryV5Config(
|
||||
discv5Config: none(DiscoveryConfig),
|
||||
address: bindIp,
|
||||
port: nodeUdpPort1,
|
||||
privateKey: keys.PrivateKey(nodeKey1.skkey),
|
||||
bootstrapRecords: @[],
|
||||
autoupdateRecord: true,
|
||||
)
|
||||
let conf1 = WakuDiscoveryV5Config(
|
||||
discv5Config: none(DiscoveryConfig),
|
||||
address: bindIp,
|
||||
port: nodeUdpPort1,
|
||||
privateKey: keys.PrivateKey(nodeKey1.skkey),
|
||||
bootstrapRecords: @[],
|
||||
autoupdateRecord: true,
|
||||
)
|
||||
|
||||
let
|
||||
disc1 =
|
||||
WakuDiscoveryV5.new(node1.rng, conf1, some(node1.enr), some(node1.peerManager))
|
||||
let disc1 =
|
||||
WakuDiscoveryV5.new(node1.rng, conf1, some(node1.enr), some(node1.peerManager))
|
||||
|
||||
let
|
||||
conf2 =
|
||||
WakuDiscoveryV5Config(
|
||||
discv5Config: none(DiscoveryConfig),
|
||||
address: bindIp,
|
||||
port: nodeUdpPort2,
|
||||
privateKey: keys.PrivateKey(nodeKey2.skkey),
|
||||
bootstrapRecords: @[disc1.protocol.getRecord()],
|
||||
autoupdateRecord: true,
|
||||
)
|
||||
let conf2 = WakuDiscoveryV5Config(
|
||||
discv5Config: none(DiscoveryConfig),
|
||||
address: bindIp,
|
||||
port: nodeUdpPort2,
|
||||
privateKey: keys.PrivateKey(nodeKey2.skkey),
|
||||
bootstrapRecords: @[disc1.protocol.getRecord()],
|
||||
autoupdateRecord: true,
|
||||
)
|
||||
|
||||
let
|
||||
disc2 =
|
||||
WakuDiscoveryV5.new(node2.rng, conf2, some(node2.enr), some(node2.peerManager))
|
||||
let disc2 =
|
||||
WakuDiscoveryV5.new(node2.rng, conf2, some(node2.enr), some(node2.peerManager))
|
||||
|
||||
await allFutures(node1.start(), node2.start(), node3.start())
|
||||
let resultDisc1StartRes = await disc1.start()
|
||||
@ -286,9 +276,8 @@ suite "Waku Peer Exchange with discv5":
|
||||
await node1.mountPeerExchange()
|
||||
await node3.mountPeerExchange()
|
||||
|
||||
let
|
||||
dialResponse =
|
||||
await node3.dialForPeerExchange(node1.switch.peerInfo.toRemotePeerInfo())
|
||||
let dialResponse =
|
||||
await node3.dialForPeerExchange(node1.switch.peerInfo.toRemotePeerInfo())
|
||||
|
||||
check dialResponse.isOk
|
||||
|
||||
|
||||
@ -12,13 +12,8 @@ import
|
||||
from std/times import epochTime
|
||||
|
||||
import
|
||||
../../../waku/[
|
||||
node/waku_node,
|
||||
node/peer_manager,
|
||||
waku_core,
|
||||
waku_node,
|
||||
waku_rln_relay,
|
||||
],
|
||||
../../../waku/
|
||||
[node/waku_node, node/peer_manager, waku_core, waku_node, waku_rln_relay],
|
||||
../waku_store/store_utils,
|
||||
../waku_archive/archive_utils,
|
||||
../testlib/[wakucore, wakunode, testasync, futures],
|
||||
@ -30,7 +25,7 @@ proc setupRln(node: WakuNode, identifier: uint) {.async.} =
|
||||
rlnRelayDynamic: false,
|
||||
rlnRelayCredIndex: some(identifier),
|
||||
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $identifier),
|
||||
rlnEpochSizeSec: 1
|
||||
rlnEpochSizeSec: 1,
|
||||
)
|
||||
)
|
||||
|
||||
@ -73,12 +68,11 @@ proc sendRlnMessageWithInvalidProof(
|
||||
): Future[bool] {.async.} =
|
||||
let
|
||||
extraBytes: seq[byte] = @[byte(1), 2, 3]
|
||||
rateLimitProofRes =
|
||||
client.wakuRlnRelay.groupManager.generateProof(
|
||||
concat(payload, extraBytes),
|
||||
# we add extra bytes to invalidate proof verification against original payload
|
||||
client.wakuRlnRelay.getCurrentEpoch()
|
||||
)
|
||||
rateLimitProofRes = client.wakuRlnRelay.groupManager.generateProof(
|
||||
concat(payload, extraBytes),
|
||||
# we add extra bytes to invalidate proof verification against original payload
|
||||
client.wakuRlnRelay.getCurrentEpoch(),
|
||||
)
|
||||
rateLimitProof = rateLimitProofRes.get().encode().buffer
|
||||
message =
|
||||
WakuMessage(payload: @payload, contentTopic: contentTopic, proof: rateLimitProof)
|
||||
@ -127,10 +121,8 @@ suite "Waku RlnRelay - End to End":
|
||||
server.wakuRlnRelay == nil
|
||||
|
||||
# When RlnRelay is mounted
|
||||
let
|
||||
catchRes =
|
||||
catch:
|
||||
await server.setupRln(1)
|
||||
let catchRes = catch:
|
||||
await server.setupRln(1)
|
||||
|
||||
# Then Relay and RLN are not mounted,and the process fails
|
||||
check:
|
||||
@ -156,9 +148,8 @@ suite "Waku RlnRelay - End to End":
|
||||
var completionFuture = subscribeCompletionHandler(server, pubsubTopic)
|
||||
|
||||
# When the client sends a valid RLN message
|
||||
let
|
||||
isCompleted1 =
|
||||
await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture)
|
||||
let isCompleted1 =
|
||||
await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture)
|
||||
|
||||
# Then the valid RLN message is relayed
|
||||
check:
|
||||
@ -167,11 +158,9 @@ suite "Waku RlnRelay - End to End":
|
||||
|
||||
# When the client sends an invalid RLN message
|
||||
completionFuture = newBoolFuture()
|
||||
let
|
||||
isCompleted2 =
|
||||
await sendRlnMessageWithInvalidProof(
|
||||
client, pubsubTopic, contentTopic, completionFuture
|
||||
)
|
||||
let isCompleted2 = await sendRlnMessageWithInvalidProof(
|
||||
client, pubsubTopic, contentTopic, completionFuture
|
||||
)
|
||||
|
||||
# Then the invalid RLN message is not relayed
|
||||
check:
|
||||
@ -191,9 +180,8 @@ suite "Waku RlnRelay - End to End":
|
||||
|
||||
await sleepAsync(FUTURE_TIMEOUT)
|
||||
# When the client sends a valid RLN message
|
||||
let
|
||||
isCompleted1 =
|
||||
await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture)
|
||||
let isCompleted1 =
|
||||
await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture)
|
||||
|
||||
# Then the valid RLN message is relayed
|
||||
check:
|
||||
@ -202,11 +190,9 @@ suite "Waku RlnRelay - End to End":
|
||||
|
||||
# When the client sends an invalid RLN message
|
||||
completionFuture = newBoolFuture()
|
||||
let
|
||||
isCompleted2 =
|
||||
await sendRlnMessageWithInvalidProof(
|
||||
client, pubsubTopic, contentTopic, completionFuture
|
||||
)
|
||||
let isCompleted2 = await sendRlnMessageWithInvalidProof(
|
||||
client, pubsubTopic, contentTopic, completionFuture
|
||||
)
|
||||
|
||||
# Then the invalid RLN message is not relayed
|
||||
check:
|
||||
@ -250,18 +236,26 @@ suite "Waku RlnRelay - End to End":
|
||||
WakuMessage(payload: @payload150kibPlus, contentTopic: contentTopic)
|
||||
|
||||
doAssert(
|
||||
client.wakuRlnRelay.appendRLNProof(message1b, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 0).isOk()
|
||||
client.wakuRlnRelay
|
||||
.appendRLNProof(message1b, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 0)
|
||||
.isOk()
|
||||
)
|
||||
doAssert(
|
||||
client.wakuRlnRelay.appendRLNProof(message1kib, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 1).isOk()
|
||||
client.wakuRlnRelay
|
||||
.appendRLNProof(message1kib, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 1)
|
||||
.isOk()
|
||||
)
|
||||
doAssert(
|
||||
client.wakuRlnRelay.appendRLNProof(message150kib, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 2).isOk()
|
||||
client.wakuRlnRelay
|
||||
.appendRLNProof(message150kib, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 2)
|
||||
.isOk()
|
||||
)
|
||||
doAssert(
|
||||
client.wakuRlnRelay.appendRLNProof(
|
||||
client.wakuRlnRelay
|
||||
.appendRLNProof(
|
||||
message151kibPlus, epoch + client.wakuRlnRelay.rlnEpochSizeSec * 3
|
||||
).isOk()
|
||||
)
|
||||
.isOk()
|
||||
)
|
||||
|
||||
# When sending the 1B message
|
||||
@ -319,9 +313,8 @@ suite "Waku RlnRelay - End to End":
|
||||
overhead: uint64 = 419
|
||||
payload150kibPlus = getByteSequence((150 * 1024) - overhead + 1)
|
||||
|
||||
var
|
||||
message151kibPlus =
|
||||
WakuMessage(payload: @payload150kibPlus, contentTopic: contentTopic)
|
||||
var message151kibPlus =
|
||||
WakuMessage(payload: @payload150kibPlus, contentTopic: contentTopic)
|
||||
|
||||
doAssert(
|
||||
client.wakuRlnRelay.appendRLNProof(
|
||||
|
||||
@ -17,7 +17,7 @@ import
|
||||
waku_store/client,
|
||||
waku_archive,
|
||||
waku_archive/driver/sqlite_driver,
|
||||
common/databases/db_sqlite
|
||||
common/databases/db_sqlite,
|
||||
],
|
||||
../waku_store/store_utils,
|
||||
../waku_archive/archive_utils,
|
||||
@ -55,16 +55,15 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)),
|
||||
fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)),
|
||||
fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)),
|
||||
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin))
|
||||
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)),
|
||||
]
|
||||
|
||||
historyQuery =
|
||||
HistoryQuery(
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.Forward,
|
||||
pageSize: 5,
|
||||
)
|
||||
historyQuery = HistoryQuery(
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.Forward,
|
||||
pageSize: 5,
|
||||
)
|
||||
|
||||
let
|
||||
serverKey = generateSecp256k1Key()
|
||||
@ -95,26 +94,24 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse.get().messages == archiveMessages[0..<5]
|
||||
queryResponse.get().messages == archiveMessages[0 ..< 5]
|
||||
|
||||
# Given the next query
|
||||
var
|
||||
otherHistoryQuery =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
var otherHistoryQuery = HistoryQuery(
|
||||
cursor: queryResponse.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let
|
||||
otherQueryResponse = await client.query(otherHistoryQuery, serverRemotePeerInfo)
|
||||
let otherQueryResponse =
|
||||
await client.query(otherHistoryQuery, serverRemotePeerInfo)
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
otherQueryResponse.get().messages == archiveMessages[5..<10]
|
||||
otherQueryResponse.get().messages == archiveMessages[5 ..< 10]
|
||||
|
||||
asyncTest "Backward Pagination":
|
||||
# Given the history query is backward
|
||||
@ -125,26 +122,24 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse.get().messages == archiveMessages[5..<10]
|
||||
queryResponse.get().messages == archiveMessages[5 ..< 10]
|
||||
|
||||
# Given the next query
|
||||
var
|
||||
nextHistoryQuery =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.BACKWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
var nextHistoryQuery = HistoryQuery(
|
||||
cursor: queryResponse.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.BACKWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let
|
||||
otherQueryResponse = await client.query(nextHistoryQuery, serverRemotePeerInfo)
|
||||
let otherQueryResponse =
|
||||
await client.query(nextHistoryQuery, serverRemotePeerInfo)
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
otherQueryResponse.get().messages == archiveMessages[0..<5]
|
||||
otherQueryResponse.get().messages == archiveMessages[0 ..< 5]
|
||||
|
||||
suite "Pagination with Differente Page Sizes":
|
||||
asyncTest "Pagination with Small Page Size":
|
||||
@ -156,79 +151,71 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse1.get().messages == archiveMessages[0..<2]
|
||||
queryResponse1.get().messages == archiveMessages[0 ..< 2]
|
||||
|
||||
# Given the next query (2/5)
|
||||
let
|
||||
historyQuery2 =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse1.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 2,
|
||||
)
|
||||
let historyQuery2 = HistoryQuery(
|
||||
cursor: queryResponse1.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 2,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse2.get().messages == archiveMessages[2..<4]
|
||||
queryResponse2.get().messages == archiveMessages[2 ..< 4]
|
||||
|
||||
# Given the next query (3/5)
|
||||
let
|
||||
historyQuery3 =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse2.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 2,
|
||||
)
|
||||
let historyQuery3 = HistoryQuery(
|
||||
cursor: queryResponse2.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 2,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let queryResponse3 = await client.query(historyQuery3, serverRemotePeerInfo)
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse3.get().messages == archiveMessages[4..<6]
|
||||
queryResponse3.get().messages == archiveMessages[4 ..< 6]
|
||||
|
||||
# Given the next query (4/5)
|
||||
let
|
||||
historyQuery4 =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse3.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 2,
|
||||
)
|
||||
let historyQuery4 = HistoryQuery(
|
||||
cursor: queryResponse3.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 2,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let queryResponse4 = await client.query(historyQuery4, serverRemotePeerInfo)
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse4.get().messages == archiveMessages[6..<8]
|
||||
queryResponse4.get().messages == archiveMessages[6 ..< 8]
|
||||
|
||||
# Given the next query (5/5)
|
||||
let
|
||||
historyQuery5 =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse4.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 2,
|
||||
)
|
||||
let historyQuery5 = HistoryQuery(
|
||||
cursor: queryResponse4.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 2,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let queryResponse5 = await client.query(historyQuery5, serverRemotePeerInfo)
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse5.get().messages == archiveMessages[8..<10]
|
||||
queryResponse5.get().messages == archiveMessages[8 ..< 10]
|
||||
|
||||
asyncTest "Pagination with Large Page Size":
|
||||
# Given the first query (1/2)
|
||||
@ -239,25 +226,23 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse1.get().messages == archiveMessages[0..<8]
|
||||
queryResponse1.get().messages == archiveMessages[0 ..< 8]
|
||||
|
||||
# Given the next query (2/2)
|
||||
let
|
||||
historyQuery2 =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse1.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 8,
|
||||
)
|
||||
let historyQuery2 = HistoryQuery(
|
||||
cursor: queryResponse1.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 8,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse2.get().messages == archiveMessages[8..<10]
|
||||
queryResponse2.get().messages == archiveMessages[8 ..< 10]
|
||||
|
||||
asyncTest "Pagination with Excessive Page Size":
|
||||
# Given the first query (1/1)
|
||||
@ -268,7 +253,7 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse1.get().messages == archiveMessages[0..<10]
|
||||
queryResponse1.get().messages == archiveMessages[0 ..< 10]
|
||||
|
||||
asyncTest "Pagination with Mixed Page Size":
|
||||
# Given the first query (1/3)
|
||||
@ -279,43 +264,39 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse1.get().messages == archiveMessages[0..<2]
|
||||
queryResponse1.get().messages == archiveMessages[0 ..< 2]
|
||||
|
||||
# Given the next query (2/3)
|
||||
let
|
||||
historyQuery2 =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse1.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 4,
|
||||
)
|
||||
let historyQuery2 = HistoryQuery(
|
||||
cursor: queryResponse1.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 4,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse2.get().messages == archiveMessages[2..<6]
|
||||
queryResponse2.get().messages == archiveMessages[2 ..< 6]
|
||||
|
||||
# Given the next query (3/3)
|
||||
let
|
||||
historyQuery3 =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse2.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 6,
|
||||
)
|
||||
let historyQuery3 = HistoryQuery(
|
||||
cursor: queryResponse2.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 6,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let queryResponse3 = await client.query(historyQuery3, serverRemotePeerInfo)
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse3.get().messages == archiveMessages[6..<10]
|
||||
queryResponse3.get().messages == archiveMessages[6 ..< 10]
|
||||
|
||||
asyncTest "Pagination with Zero Page Size (Behaves as DefaultPageSize)":
|
||||
# Given a message list of size higher than the default page size
|
||||
@ -326,7 +307,7 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
|
||||
let lastMessageTimestamp = archiveMessages[archiveMessages.len - 1].timestamp
|
||||
var extraMessages: seq[WakuMessage] = @[]
|
||||
for i in 0..<missingMessagesAmount:
|
||||
for i in 0 ..< missingMessagesAmount:
|
||||
let
|
||||
timestampOffset = 10 * int(i + 1)
|
||||
# + 1 to avoid collision with existing messages
|
||||
@ -345,18 +326,16 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
|
||||
# Then the response contains the archive.DefaultPageSize messages
|
||||
check:
|
||||
queryResponse1.get().messages == totalMessages[0..<archive.DefaultPageSize]
|
||||
queryResponse1.get().messages == totalMessages[0 ..< archive.DefaultPageSize]
|
||||
|
||||
# Given the next query (2/2)
|
||||
let
|
||||
historyQuery2 =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse1.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 0,
|
||||
)
|
||||
let historyQuery2 = HistoryQuery(
|
||||
cursor: queryResponse1.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 0,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
|
||||
@ -364,7 +343,7 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
# Then the response contains the remaining messages
|
||||
check:
|
||||
queryResponse2.get().messages ==
|
||||
totalMessages[archive.DefaultPageSize..<archive.DefaultPageSize + 5]
|
||||
totalMessages[archive.DefaultPageSize ..< archive.DefaultPageSize + 5]
|
||||
|
||||
asyncTest "Pagination with Default Page Size":
|
||||
# Given a message list of size higher than the default page size
|
||||
@ -375,7 +354,7 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
|
||||
let lastMessageTimestamp = archiveMessages[archiveMessages.len - 1].timestamp
|
||||
var extraMessages: seq[WakuMessage] = @[]
|
||||
for i in 0..<missingMessagesAmount:
|
||||
for i in 0 ..< missingMessagesAmount:
|
||||
let
|
||||
timestampOffset = 10 * int(i + 1)
|
||||
# + 1 to avoid collision with existing messages
|
||||
@ -387,29 +366,26 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
let totalMessages = archiveMessages & extraMessages
|
||||
|
||||
# Given a query with default page size (1/2)
|
||||
historyQuery =
|
||||
HistoryQuery(
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
)
|
||||
historyQuery = HistoryQuery(
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
)
|
||||
|
||||
# When making a history query
|
||||
let queryResponse = await client.query(historyQuery, serverRemotePeerInfo)
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse.get().messages == totalMessages[0..<archive.DefaultPageSize]
|
||||
queryResponse.get().messages == totalMessages[0 ..< archive.DefaultPageSize]
|
||||
|
||||
# Given the next query (2/2)
|
||||
let
|
||||
historyQuery2 =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
)
|
||||
let historyQuery2 = HistoryQuery(
|
||||
cursor: queryResponse.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
|
||||
@ -417,7 +393,7 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse2.get().messages ==
|
||||
totalMessages[archive.DefaultPageSize..<archive.DefaultPageSize + 5]
|
||||
totalMessages[archive.DefaultPageSize ..< archive.DefaultPageSize + 5]
|
||||
|
||||
suite "Pagination with Different Cursors":
|
||||
asyncTest "Starting Cursor":
|
||||
@ -431,7 +407,7 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
|
||||
# Then the response contains the message
|
||||
check:
|
||||
queryResponse.get().messages == archiveMessages[1..<2]
|
||||
queryResponse.get().messages == archiveMessages[1 ..< 2]
|
||||
|
||||
asyncTest "Middle Cursor":
|
||||
# Given a cursor pointing to the middle message1
|
||||
@ -444,7 +420,7 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
|
||||
# Then the response contains the message
|
||||
check:
|
||||
queryResponse.get().messages == archiveMessages[6..<7]
|
||||
queryResponse.get().messages == archiveMessages[6 ..< 7]
|
||||
|
||||
asyncTest "Ending Cursor":
|
||||
# Given a cursor pointing to the last message
|
||||
@ -482,28 +458,25 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse.get().messages == archiveMessages[0..<5]
|
||||
queryResponse.get().messages == archiveMessages[0 ..< 5]
|
||||
|
||||
# Given the cursor from the first query
|
||||
let cursor = queryResponse.get().cursor
|
||||
|
||||
# When making a history query to the second server node
|
||||
let
|
||||
otherHistoryQuery =
|
||||
HistoryQuery(
|
||||
cursor: cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
let
|
||||
otherQueryResponse =
|
||||
await client.query(otherHistoryQuery, otherServerRemotePeerInfo)
|
||||
let otherHistoryQuery = HistoryQuery(
|
||||
cursor: cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
let otherQueryResponse =
|
||||
await client.query(otherHistoryQuery, otherServerRemotePeerInfo)
|
||||
|
||||
# Then the response contains the remaining messages
|
||||
check:
|
||||
otherQueryResponse.get().messages == archiveMessages[5..<10]
|
||||
otherQueryResponse.get().messages == archiveMessages[5 ..< 10]
|
||||
|
||||
# Cleanup
|
||||
waitFor otherServer.stop()
|
||||
@ -526,13 +499,12 @@ suite "Waku Store - End to End - Unsorted Archive":
|
||||
contentTopic = DefaultContentTopic
|
||||
contentTopicSeq = @[contentTopic]
|
||||
|
||||
historyQuery =
|
||||
HistoryQuery(
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
historyQuery = HistoryQuery(
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
|
||||
let timeOrigin = now()
|
||||
unsortedArchiveMessages =
|
||||
@ -546,7 +518,7 @@ suite "Waku Store - End to End - Unsorted Archive":
|
||||
fakeWakuMessage(@[byte 06], ts = ts(20, timeOrigin)), # 6
|
||||
fakeWakuMessage(@[byte 01], ts = ts(20, timeOrigin)), # 9
|
||||
fakeWakuMessage(@[byte 04], ts = ts(20, timeOrigin)), # 7
|
||||
fakeWakuMessage(@[byte 05], ts = ts(20, timeOrigin)) # 8
|
||||
fakeWakuMessage(@[byte 05], ts = ts(20, timeOrigin)), # 8
|
||||
]
|
||||
|
||||
let
|
||||
@ -586,19 +558,17 @@ suite "Waku Store - End to End - Unsorted Archive":
|
||||
unsortedArchiveMessages[0],
|
||||
unsortedArchiveMessages[1],
|
||||
unsortedArchiveMessages[4],
|
||||
unsortedArchiveMessages[3]
|
||||
unsortedArchiveMessages[3],
|
||||
]
|
||||
|
||||
# Given the next query
|
||||
var
|
||||
historyQuery2 =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
var historyQuery2 = HistoryQuery(
|
||||
cursor: queryResponse.get().cursor,
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
|
||||
@ -611,7 +581,7 @@ suite "Waku Store - End to End - Unsorted Archive":
|
||||
unsortedArchiveMessages[6],
|
||||
unsortedArchiveMessages[8],
|
||||
unsortedArchiveMessages[9],
|
||||
unsortedArchiveMessages[7]
|
||||
unsortedArchiveMessages[7],
|
||||
]
|
||||
|
||||
asyncTest "Backward pagination with Ascending Sorting":
|
||||
@ -629,7 +599,7 @@ suite "Waku Store - End to End - Unsorted Archive":
|
||||
@[
|
||||
unsortedArchiveMessages[2],
|
||||
unsortedArchiveMessages[0],
|
||||
unsortedArchiveMessages[1]
|
||||
unsortedArchiveMessages[1],
|
||||
]
|
||||
|
||||
asyncTest "Forward Pagination with Ascending Sorting":
|
||||
@ -649,7 +619,7 @@ suite "Waku Store - End to End - Unsorted Archive":
|
||||
unsortedArchiveMessages[5],
|
||||
unsortedArchiveMessages[6],
|
||||
unsortedArchiveMessages[8],
|
||||
unsortedArchiveMessages[9]
|
||||
unsortedArchiveMessages[9],
|
||||
]
|
||||
|
||||
suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
@ -680,18 +650,16 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
contentTopicSeq =
|
||||
@[contentTopic, contentTopicB, contentTopicC, contentTopicSpecials]
|
||||
|
||||
historyQuery =
|
||||
HistoryQuery(
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
historyQuery = HistoryQuery(
|
||||
pubsubTopic: some(pubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
|
||||
let timeOrigin = now()
|
||||
originTs =
|
||||
proc(offset = 0): Timestamp {.gcsafe, raises: [].} =
|
||||
ts(offset, timeOrigin)
|
||||
originTs = proc(offset = 0): Timestamp {.gcsafe, raises: [].} =
|
||||
ts(offset, timeOrigin)
|
||||
|
||||
archiveMessages =
|
||||
@[
|
||||
@ -706,7 +674,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
fakeWakuMessage(@[byte 08], ts = originTs(80), contentTopic = contentTopicC),
|
||||
fakeWakuMessage(
|
||||
@[byte 09], ts = originTs(90), contentTopic = contentTopicSpecials
|
||||
)
|
||||
),
|
||||
]
|
||||
|
||||
let
|
||||
@ -716,11 +684,9 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
|
||||
let
|
||||
archiveDriver =
|
||||
newSqliteArchiveDriver().put(pubsubTopic, archiveMessages[0..<6]).put(
|
||||
pubsubTopicB, archiveMessages[6..<10]
|
||||
)
|
||||
let archiveDriver = newSqliteArchiveDriver()
|
||||
.put(pubsubTopic, archiveMessages[0 ..< 6])
|
||||
.put(pubsubTopicB, archiveMessages[6 ..< 10])
|
||||
let mountSortedArchiveResult = server.mountArchive(archiveDriver)
|
||||
|
||||
assert mountSortedArchiveResult.isOk()
|
||||
@ -761,7 +727,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
archiveMessages[0],
|
||||
archiveMessages[1],
|
||||
archiveMessages[3],
|
||||
archiveMessages[4]
|
||||
archiveMessages[4],
|
||||
]
|
||||
|
||||
asyncTest "Empty Content Filtering":
|
||||
@ -773,25 +739,23 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse.get().messages == archiveMessages[0..<5]
|
||||
queryResponse.get().messages == archiveMessages[0 ..< 5]
|
||||
|
||||
# Given the next query
|
||||
let
|
||||
historyQuery2 =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse.get().cursor,
|
||||
pubsubTopic: none(PubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
let historyQuery2 = HistoryQuery(
|
||||
cursor: queryResponse.get().cursor,
|
||||
pubsubTopic: none(PubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse2.get().messages == archiveMessages[5..<10]
|
||||
queryResponse2.get().messages == archiveMessages[5 ..< 10]
|
||||
|
||||
asyncTest "Non-Existent Content Topic":
|
||||
# Given a history query with non-existent content filtering
|
||||
@ -830,7 +794,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
archiveMessages[6],
|
||||
archiveMessages[7],
|
||||
archiveMessages[8],
|
||||
archiveMessages[9]
|
||||
archiveMessages[9],
|
||||
]
|
||||
|
||||
asyncTest "PubsubTopic Left Empty":
|
||||
@ -842,25 +806,23 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse.get().messages == archiveMessages[0..<5]
|
||||
queryResponse.get().messages == archiveMessages[0 ..< 5]
|
||||
|
||||
# Given the next query
|
||||
let
|
||||
historyQuery2 =
|
||||
HistoryQuery(
|
||||
cursor: queryResponse.get().cursor,
|
||||
pubsubTopic: none(PubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
let historyQuery2 = HistoryQuery(
|
||||
cursor: queryResponse.get().cursor,
|
||||
pubsubTopic: none(PubsubTopic),
|
||||
contentTopics: contentTopicSeq,
|
||||
direction: PagingDirection.FORWARD,
|
||||
pageSize: 5,
|
||||
)
|
||||
|
||||
# When making the next history query
|
||||
let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
queryResponse2.get().messages == archiveMessages[5..<10]
|
||||
queryResponse2.get().messages == archiveMessages[5 ..< 10]
|
||||
|
||||
suite "Validation of Time-based Filtering":
|
||||
asyncTest "Basic Time Filtering":
|
||||
@ -891,7 +853,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
archiveMessages[2],
|
||||
archiveMessages[3],
|
||||
archiveMessages[4],
|
||||
archiveMessages[5]
|
||||
archiveMessages[5],
|
||||
]
|
||||
|
||||
asyncTest "Only End Time Specified":
|
||||
@ -910,7 +872,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
archiveMessages[1],
|
||||
archiveMessages[2],
|
||||
archiveMessages[3],
|
||||
archiveMessages[4]
|
||||
archiveMessages[4],
|
||||
]
|
||||
|
||||
asyncTest "Invalid Time Range":
|
||||
@ -959,7 +921,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
@[
|
||||
fakeWakuMessage(@[byte 00], ts = ts(00), ephemeral = true),
|
||||
fakeWakuMessage(@[byte 01], ts = ts(10), ephemeral = true),
|
||||
fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true)
|
||||
fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true),
|
||||
]
|
||||
ephemeralArchiveDriver =
|
||||
newSqliteArchiveDriver().put(pubsubTopic, ephemeralMessages)
|
||||
@ -978,8 +940,8 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let ephemeralServerRemotePeerInfo = ephemeralServer.peerInfo.toRemotePeerInfo()
|
||||
|
||||
# When making a history query to the server with only ephemeral messages
|
||||
let
|
||||
queryResponse = await client.query(historyQuery, ephemeralServerRemotePeerInfo)
|
||||
let queryResponse =
|
||||
await client.query(historyQuery, ephemeralServerRemotePeerInfo)
|
||||
|
||||
# Then the response contains no messages
|
||||
check:
|
||||
@ -995,18 +957,17 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
@[
|
||||
fakeWakuMessage(@[byte 00], ts = ts(00), ephemeral = true),
|
||||
fakeWakuMessage(@[byte 01], ts = ts(10), ephemeral = true),
|
||||
fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true)
|
||||
fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true),
|
||||
]
|
||||
nonEphemeralMessages =
|
||||
@[
|
||||
fakeWakuMessage(@[byte 03], ts = ts(30), ephemeral = false),
|
||||
fakeWakuMessage(@[byte 04], ts = ts(40), ephemeral = false),
|
||||
fakeWakuMessage(@[byte 05], ts = ts(50), ephemeral = false)
|
||||
fakeWakuMessage(@[byte 05], ts = ts(50), ephemeral = false),
|
||||
]
|
||||
mixedArchiveDriver =
|
||||
newSqliteArchiveDriver().put(pubsubTopic, ephemeralMessages).put(
|
||||
pubsubTopic, nonEphemeralMessages
|
||||
)
|
||||
mixedArchiveDriver = newSqliteArchiveDriver()
|
||||
.put(pubsubTopic, ephemeralMessages)
|
||||
.put(pubsubTopic, nonEphemeralMessages)
|
||||
|
||||
# And a server node with the mixed archive
|
||||
let
|
||||
@ -1060,12 +1021,11 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
asyncTest "Voluminous Message Store":
|
||||
# Given a voluminous archive (1M+ messages)
|
||||
var voluminousArchiveMessages: seq[WakuMessage] = @[]
|
||||
for i in 0..<100000:
|
||||
for i in 0 ..< 100000:
|
||||
let topic = "topic" & $i
|
||||
voluminousArchiveMessages.add(fakeWakuMessage(@[byte i], contentTopic = topic))
|
||||
let
|
||||
voluminousArchiveDriverWithMessages =
|
||||
newArchiveDriverWithMessages(pubsubTopic, voluminousArchiveMessages)
|
||||
let voluminousArchiveDriverWithMessages =
|
||||
newArchiveDriverWithMessages(pubsubTopic, voluminousArchiveMessages)
|
||||
|
||||
# And a server node with the voluminous archive
|
||||
let
|
||||
@ -1085,8 +1045,8 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
@["topic10000", "topic30000", "topic50000", "topic70000", "topic90000"]
|
||||
|
||||
# When making a history query to the server with a voluminous archive
|
||||
let
|
||||
queryResponse = await client.query(historyQuery, voluminousServerRemotePeerInfo)
|
||||
let queryResponse =
|
||||
await client.query(historyQuery, voluminousServerRemotePeerInfo)
|
||||
|
||||
# Then the response contains the messages
|
||||
check:
|
||||
@ -1096,7 +1056,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
voluminousArchiveMessages[30000],
|
||||
voluminousArchiveMessages[50000],
|
||||
voluminousArchiveMessages[70000],
|
||||
voluminousArchiveMessages[90000]
|
||||
voluminousArchiveMessages[90000],
|
||||
]
|
||||
|
||||
# Cleanup
|
||||
@ -1105,7 +1065,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
asyncTest "Large contentFilters Array":
|
||||
# Given a history query with the max contentFilters len, 10
|
||||
historyQuery.contentTopics = @[contentTopic]
|
||||
for i in 0..<9:
|
||||
for i in 0 ..< 9:
|
||||
let topic = "topic" & $i
|
||||
historyQuery.contentTopics.add(topic)
|
||||
|
||||
|
||||
@ -1,7 +1,11 @@
|
||||
proc getContentTopic*(applicationName: string, applicationVersion: int, contentTopicName: string, encoding: string): string =
|
||||
proc getContentTopic*(
|
||||
applicationName: string,
|
||||
applicationVersion: int,
|
||||
contentTopicName: string,
|
||||
encoding: string,
|
||||
): string =
|
||||
return "/$applicationName/$applicationVersion/$contentTopicName/$enconding"
|
||||
|
||||
|
||||
const
|
||||
CURRENT* = getContentTopic("application", 1, "content-topic", "proto")
|
||||
TESTNET* = getContentTopic("toychat", 2, "huilong", "proto")
|
||||
|
||||
@ -1,13 +1,15 @@
|
||||
import
|
||||
std/json
|
||||
import std/json
|
||||
|
||||
const
|
||||
ALPHABETIC* = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
ALPHANUMERIC* = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
ALPHANUMERIC_SPECIAL* = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()_+-=[]{}|;':\\\",./<>?`~"
|
||||
EMOJI* = "😀 😃 😄 😁 😆 😅 🤣 😂 🙂 🙃 😉 😊 😇 🥰 😍 🤩 😘 😗 😚 😙"
|
||||
ALPHANUMERIC_SPECIAL* =
|
||||
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()_+-=[]{}|;':\\\",./<>?`~"
|
||||
EMOJI* =
|
||||
"😀 😃 😄 😁 😆 😅 🤣 😂 🙂 🙃 😉 😊 😇 🥰 😍 🤩 😘 😗 😚 😙"
|
||||
CODE* = "def main():\n\tprint('Hello, world!')"
|
||||
QUERY* = """
|
||||
QUERY* =
|
||||
"""
|
||||
SELECT
|
||||
u.id,
|
||||
u.name,
|
||||
@ -28,7 +30,8 @@ const
|
||||
u.id = 1
|
||||
"""
|
||||
TEXT_SMALL* = "Lorem ipsum dolor sit amet, consectetur adipiscing elit."
|
||||
TEXT_LARGE* = """
|
||||
TEXT_LARGE* =
|
||||
"""
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras gravida vulputate semper. Proin
|
||||
eleifend varius cursus. Morbi lacinia posuere quam sit amet pretium. Sed non metus fermentum,
|
||||
venenatis nisl id, vestibulum eros. Quisque non lorem sit amet lectus faucibus elementum eu
|
||||
@ -40,35 +43,12 @@ const
|
||||
|
||||
proc getSampleJsonDictionary*(): JsonNode =
|
||||
%*{
|
||||
"shapes": [
|
||||
{
|
||||
"type": "circle",
|
||||
"radius": 10
|
||||
},
|
||||
{
|
||||
"type": "square",
|
||||
"side": 10
|
||||
}
|
||||
],
|
||||
"colours": [
|
||||
"red",
|
||||
"green",
|
||||
"blue"
|
||||
]
|
||||
"shapes": [{"type": "circle", "radius": 10}, {"type": "square", "side": 10}],
|
||||
"colours": ["red", "green", "blue"],
|
||||
}
|
||||
|
||||
proc getSampleJsonList*(): JsonNode =
|
||||
%*[
|
||||
{
|
||||
"type": "cat",
|
||||
"name": "Salem"
|
||||
},
|
||||
{
|
||||
"type": "dog",
|
||||
"name": "Oberon"
|
||||
},
|
||||
]
|
||||
|
||||
proc getSampleJsonList*(): JsonNode =
|
||||
%*[{"type": "cat", "name": "Salem"}, {"type": "dog", "name": "Oberon"}]
|
||||
|
||||
proc getByteSequence*(bytesNumber: uint64): seq[byte] =
|
||||
result = newSeq[byte](bytesNumber)
|
||||
|
||||
@ -3,7 +3,6 @@ import std/strformat
|
||||
proc getPubsubTopic*(pubsubTopicName: string): string =
|
||||
return fmt"/waku/2/{pubsubTopicName}"
|
||||
|
||||
|
||||
const
|
||||
CURRENT* = getPubsubTopic("test")
|
||||
CURRENT_NESTED* = getPubsubTopic("test/nested")
|
||||
|
||||
@ -1,6 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
./all_tests_common,
|
||||
./all_tests_waku,
|
||||
./all_tests_wakunode2
|
||||
import ./all_tests_common, ./all_tests_waku, ./all_tests_wakunode2
|
||||
|
||||
@ -1,6 +1,4 @@
|
||||
import
|
||||
chronos, bearssl/rand,
|
||||
eth/[keys, p2p]
|
||||
import chronos, bearssl/rand, eth/[keys, p2p]
|
||||
|
||||
import libp2p/crypto/crypto
|
||||
|
||||
@ -8,20 +6,23 @@ var nextPort = 30303
|
||||
|
||||
proc localAddress*(port: int): Address =
|
||||
let port = Port(port)
|
||||
result = Address(udpPort: port, tcpPort: port,
|
||||
ip: parseIpAddress("127.0.0.1"))
|
||||
result = Address(udpPort: port, tcpPort: port, ip: parseIpAddress("127.0.0.1"))
|
||||
|
||||
proc setupTestNode*(
|
||||
rng: ref HmacDrbgContext,
|
||||
capabilities: varargs[ProtocolInfo, `protocolInfo`]): EthereumNode =
|
||||
rng: ref HmacDrbgContext, capabilities: varargs[ProtocolInfo, `protocolInfo`]
|
||||
): EthereumNode =
|
||||
let
|
||||
keys1 = keys.KeyPair.random(rng[])
|
||||
address = localAddress(nextPort)
|
||||
result = newEthereumNode(keys1, address, NetworkId(1),
|
||||
addAllCapabilities = false,
|
||||
bindUdpPort = address.udpPort, # Assume same as external
|
||||
bindTcpPort = address.tcpPort, # Assume same as external
|
||||
rng = rng)
|
||||
result = newEthereumNode(
|
||||
keys1,
|
||||
address,
|
||||
NetworkId(1),
|
||||
addAllCapabilities = false,
|
||||
bindUdpPort = address.udpPort, # Assume same as external
|
||||
bindTcpPort = address.tcpPort, # Assume same as external
|
||||
rng = rng,
|
||||
)
|
||||
nextPort.inc
|
||||
for capability in capabilities:
|
||||
result.addCapability capability
|
||||
|
||||
@ -1,13 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/sets,
|
||||
stew/[results, byteutils],
|
||||
testutils/unittests
|
||||
import
|
||||
../../waku/waku_core,
|
||||
../../waku/waku_api/message_cache,
|
||||
./testlib/wakucore
|
||||
import std/sets, stew/[results, byteutils], testutils/unittests
|
||||
import ../../waku/waku_core, ../../waku/waku_api/message_cache, ./testlib/wakucore
|
||||
|
||||
suite "MessageCache":
|
||||
setup:
|
||||
@ -77,7 +71,7 @@ suite "MessageCache":
|
||||
cache.addMessage(testPubsubTopic, testMessage)
|
||||
|
||||
## When
|
||||
var res = cache.getMessages(testPubsubTopic, clear=true)
|
||||
var res = cache.getMessages(testPubsubTopic, clear = true)
|
||||
require(res.isOk())
|
||||
|
||||
res = cache.getMessages(testPubsubTopic)
|
||||
@ -121,15 +115,15 @@ suite "MessageCache":
|
||||
## Then
|
||||
let res = cache.getMessages(testPubsubTopic)
|
||||
check:
|
||||
res.isErr()
|
||||
res.error() == "not subscribed to any pubsub topics"
|
||||
res.isErr()
|
||||
res.error() == "not subscribed to any pubsub topics"
|
||||
|
||||
test "add messages beyond the capacity":
|
||||
## Given
|
||||
var testMessages = @[fakeWakuMessage(toBytes("MSG-1"))]
|
||||
|
||||
# Prevent duplicate messages timestamp
|
||||
for i in 0..<5:
|
||||
for i in 0 ..< 5:
|
||||
var msg = fakeWakuMessage(toBytes("MSG-1"))
|
||||
|
||||
while msg.timestamp <= testMessages[i].timestamp:
|
||||
@ -166,7 +160,7 @@ suite "MessageCache":
|
||||
check:
|
||||
getRes.isOk
|
||||
getRes.get() == @[fakeMessage]
|
||||
|
||||
|
||||
test "add same message twice":
|
||||
cache.pubsubSubscribe(testPubsubTopic)
|
||||
|
||||
@ -214,4 +208,4 @@ suite "MessageCache":
|
||||
# msg0 was delete because no refs
|
||||
|
||||
check:
|
||||
cache.messagesCount() == 2
|
||||
cache.messagesCount() == 2
|
||||
|
||||
@ -37,20 +37,28 @@ import
|
||||
procSuite "Peer Manager":
|
||||
asyncTest "connectRelay() works":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0..<2).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)))
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
)
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
|
||||
let connOk = await nodes[0].peerManager.connectRelay(nodes[1].peerInfo.toRemotePeerInfo())
|
||||
let connOk =
|
||||
await nodes[0].peerManager.connectRelay(nodes[1].peerInfo.toRemotePeerInfo())
|
||||
await sleepAsync(chronos.milliseconds(500))
|
||||
|
||||
check:
|
||||
connOk == true
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[1].peerInfo.peerId)
|
||||
nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) == Connectedness.Connected
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == nodes[1].peerInfo.peerId
|
||||
)
|
||||
nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
|
||||
Connectedness.Connected
|
||||
|
||||
asyncTest "dialPeer() works":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0..<2).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)))
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
await allFutures(nodes.mapIt(it.mountRelay()))
|
||||
@ -58,7 +66,9 @@ procSuite "Peer Manager":
|
||||
await allFutures(nodes.mapIt(it.mountLegacyFilter()))
|
||||
|
||||
# Dial node2 from node1
|
||||
let conn = await nodes[0].peerManager.dialPeer(nodes[1].peerInfo.toRemotePeerInfo(), WakuLegacyFilterCodec)
|
||||
let conn = await nodes[0].peerManager.dialPeer(
|
||||
nodes[1].peerInfo.toRemotePeerInfo(), WakuLegacyFilterCodec
|
||||
)
|
||||
await sleepAsync(chronos.milliseconds(500))
|
||||
|
||||
# Check connection
|
||||
@ -69,32 +79,42 @@ procSuite "Peer Manager":
|
||||
|
||||
# Check that node2 is being managed in node1
|
||||
check:
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[1].peerInfo.peerId)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == nodes[1].peerInfo.peerId
|
||||
)
|
||||
|
||||
# Check connectedness
|
||||
check:
|
||||
nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) == Connectedness.Connected
|
||||
nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
|
||||
Connectedness.Connected
|
||||
|
||||
await allFutures(nodes.mapIt(it.stop()))
|
||||
|
||||
asyncTest "dialPeer() fails gracefully":
|
||||
# Create 2 nodes and start them
|
||||
let nodes = toSeq(0..<2).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)))
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
)
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
await allFutures(nodes.mapIt(it.mountRelay()))
|
||||
|
||||
let nonExistentPeerRes = parsePeerInfo("/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e")
|
||||
let nonExistentPeerRes = parsePeerInfo(
|
||||
"/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e"
|
||||
)
|
||||
require nonExistentPeerRes.isOk()
|
||||
|
||||
let nonExistentPeer = nonExistentPeerRes.value
|
||||
|
||||
# Dial non-existent peer from node1
|
||||
let conn1 = await nodes[0].peerManager.dialPeer(nonExistentPeer, WakuLegacyFilterCodec)
|
||||
let conn1 =
|
||||
await nodes[0].peerManager.dialPeer(nonExistentPeer, WakuLegacyFilterCodec)
|
||||
check:
|
||||
conn1.isNone()
|
||||
|
||||
# Dial peer not supporting given protocol
|
||||
let conn2 = await nodes[0].peerManager.dialPeer(nodes[1].peerInfo.toRemotePeerInfo(), WakuLegacyFilterCodec)
|
||||
let conn2 = await nodes[0].peerManager.dialPeer(
|
||||
nodes[1].peerInfo.toRemotePeerInfo(), WakuLegacyFilterCodec
|
||||
)
|
||||
check:
|
||||
conn2.isNone()
|
||||
|
||||
@ -102,7 +122,8 @@ procSuite "Peer Manager":
|
||||
|
||||
asyncTest "Adding, selecting and filtering peers work":
|
||||
let
|
||||
node = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
node =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
|
||||
# Create filter peer
|
||||
filterLoc = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()
|
||||
@ -117,23 +138,29 @@ procSuite "Peer Manager":
|
||||
node.mountStoreClient()
|
||||
|
||||
node.peerManager.addServicePeer(storePeer.toRemotePeerInfo(), WakuStoreCodec)
|
||||
node.peerManager.addServicePeer(filterPeer.toRemotePeerInfo(), WakuLegacyFilterCodec)
|
||||
node.peerManager.addServicePeer(
|
||||
filterPeer.toRemotePeerInfo(), WakuLegacyFilterCodec
|
||||
)
|
||||
|
||||
# Check peers were successfully added to peer manager
|
||||
check:
|
||||
node.peerManager.peerStore.peers().len == 2
|
||||
node.peerManager.peerStore.peers(WakuLegacyFilterCodec).allIt(it.peerId == filterPeer.peerId and
|
||||
it.addrs.contains(filterLoc) and
|
||||
it.protocols.contains(WakuLegacyFilterCodec))
|
||||
node.peerManager.peerStore.peers(WakuStoreCodec).allIt(it.peerId == storePeer.peerId and
|
||||
it.addrs.contains(storeLoc) and
|
||||
it.protocols.contains(WakuStoreCodec))
|
||||
node.peerManager.peerStore.peers(WakuLegacyFilterCodec).allIt(
|
||||
it.peerId == filterPeer.peerId and it.addrs.contains(filterLoc) and
|
||||
it.protocols.contains(WakuLegacyFilterCodec)
|
||||
)
|
||||
node.peerManager.peerStore.peers(WakuStoreCodec).allIt(
|
||||
it.peerId == storePeer.peerId and it.addrs.contains(storeLoc) and
|
||||
it.protocols.contains(WakuStoreCodec)
|
||||
)
|
||||
|
||||
await node.stop()
|
||||
|
||||
asyncTest "Peer manager keeps track of connections":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0..<2).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)))
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
await allFutures(nodes.mapIt(it.mountRelay()))
|
||||
@ -142,10 +169,13 @@ procSuite "Peer Manager":
|
||||
nodes[0].peerManager.addPeer(nodes[1].peerInfo.toRemotePeerInfo())
|
||||
check:
|
||||
# No information about node2's connectedness
|
||||
nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) == NotConnected
|
||||
nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
|
||||
NotConnected
|
||||
|
||||
# Failed connection
|
||||
let nonExistentPeerRes = parsePeerInfo("/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e")
|
||||
let nonExistentPeerRes = parsePeerInfo(
|
||||
"/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e"
|
||||
)
|
||||
require:
|
||||
nonExistentPeerRes.isOk()
|
||||
|
||||
@ -156,11 +186,13 @@ procSuite "Peer Manager":
|
||||
|
||||
check:
|
||||
# Cannot connect to node2
|
||||
nodes[0].peerManager.peerStore.connectedness(nonExistentPeer.peerId) == CannotConnect
|
||||
nodes[0].peerManager.peerStore.connectedness(nonExistentPeer.peerId) ==
|
||||
CannotConnect
|
||||
|
||||
# Successful connection
|
||||
require:
|
||||
(await nodes[0].peerManager.connectRelay(nodes[1].peerInfo.toRemotePeerInfo())) == true
|
||||
(await nodes[0].peerManager.connectRelay(nodes[1].peerInfo.toRemotePeerInfo())) ==
|
||||
true
|
||||
await sleepAsync(chronos.milliseconds(500))
|
||||
|
||||
check:
|
||||
@ -171,18 +203,23 @@ procSuite "Peer Manager":
|
||||
await nodes[0].stop()
|
||||
check:
|
||||
# Not currently connected to node2, but had recent, successful connection.
|
||||
nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) == CanConnect
|
||||
nodes[0].peerManager.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
|
||||
CanConnect
|
||||
|
||||
await nodes[1].stop()
|
||||
|
||||
asyncTest "Peer manager updates failed peers correctly":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0..<2).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)))
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
await allFutures(nodes.mapIt(it.mountRelay()))
|
||||
|
||||
let nonExistentPeerRes = parsePeerInfo("/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e")
|
||||
let nonExistentPeerRes = parsePeerInfo(
|
||||
"/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e"
|
||||
)
|
||||
require nonExistentPeerRes.isOk()
|
||||
|
||||
let nonExistentPeer = nonExistentPeerRes.value
|
||||
@ -197,8 +234,10 @@ procSuite "Peer Manager":
|
||||
let conn1Ok = await nodes[0].peerManager.connectRelay(nonExistentPeer)
|
||||
check:
|
||||
# Cannot connect to node2
|
||||
nodes[0].peerManager.peerStore.connectedness(nonExistentPeer.peerId) == CannotConnect
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nonExistentPeer.peerId] == CannotConnect
|
||||
nodes[0].peerManager.peerStore.connectedness(nonExistentPeer.peerId) ==
|
||||
CannotConnect
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nonExistentPeer.peerId] ==
|
||||
CannotConnect
|
||||
nodes[0].peerManager.peerStore[NumberFailedConnBook][nonExistentPeer.peerId] == 1
|
||||
|
||||
# Connection attempt failed
|
||||
@ -216,7 +255,8 @@ procSuite "Peer Manager":
|
||||
|
||||
# After a successful connection, the number of failed connections is reset
|
||||
nodes[0].peerManager.peerStore[NumberFailedConnBook][nodes[1].peerInfo.peerId] = 4
|
||||
let conn2Ok = await nodes[0].peerManager.connectRelay(nodes[1].peerInfo.toRemotePeerInfo())
|
||||
let conn2Ok =
|
||||
await nodes[0].peerManager.connectRelay(nodes[1].peerInfo.toRemotePeerInfo())
|
||||
check:
|
||||
conn2Ok == true
|
||||
nodes[0].peerManager.peerStore[NumberFailedConnBook][nodes[1].peerInfo.peerId] == 0
|
||||
@ -231,10 +271,12 @@ procSuite "Peer Manager":
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("127.0.0.1"),
|
||||
Port(44048),
|
||||
peerStorage = storage
|
||||
)
|
||||
node2 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(34023))
|
||||
|
||||
peerStorage = storage,
|
||||
)
|
||||
node2 = newTestWakuNode(
|
||||
generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(34023)
|
||||
)
|
||||
|
||||
node1.mountMetadata(0).expect("Mounted Waku Metadata")
|
||||
node2.mountMetadata(0).expect("Mounted Waku Metadata")
|
||||
|
||||
@ -243,7 +285,7 @@ procSuite "Peer Manager":
|
||||
|
||||
await node1.mountRelay()
|
||||
await node2.mountRelay()
|
||||
|
||||
|
||||
let peerInfo2 = node2.switch.peerInfo
|
||||
var remotePeerInfo2 = peerInfo2.toRemotePeerInfo()
|
||||
remotePeerInfo2.enr = some(node2.enr)
|
||||
@ -252,7 +294,8 @@ procSuite "Peer Manager":
|
||||
assert is12Connected == true, "Node 1 and 2 not connected"
|
||||
|
||||
check:
|
||||
node1.peerManager.peerStore[AddressBook][remotePeerInfo2.peerId] == remotePeerInfo2.addrs
|
||||
node1.peerManager.peerStore[AddressBook][remotePeerInfo2.peerId] ==
|
||||
remotePeerInfo2.addrs
|
||||
|
||||
# wait for the peer store update
|
||||
await sleepAsync(chronos.milliseconds(500))
|
||||
@ -268,8 +311,8 @@ procSuite "Peer Manager":
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("127.0.0.1"),
|
||||
Port(56037),
|
||||
peerStorage = storage
|
||||
)
|
||||
peerStorage = storage,
|
||||
)
|
||||
|
||||
node3.mountMetadata(0).expect("Mounted Waku Metadata")
|
||||
|
||||
@ -303,10 +346,12 @@ procSuite "Peer Manager":
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("127.0.0.1"),
|
||||
Port(44048),
|
||||
peerStorage = storage
|
||||
)
|
||||
node2 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(34023))
|
||||
|
||||
peerStorage = storage,
|
||||
)
|
||||
node2 = newTestWakuNode(
|
||||
generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(34023)
|
||||
)
|
||||
|
||||
node1.mountMetadata(0).expect("Mounted Waku Metadata")
|
||||
node2.mountMetadata(0).expect("Mounted Waku Metadata")
|
||||
|
||||
@ -315,7 +360,7 @@ procSuite "Peer Manager":
|
||||
|
||||
await node1.mountRelay()
|
||||
await node2.mountRelay()
|
||||
|
||||
|
||||
let peerInfo2 = node2.switch.peerInfo
|
||||
var remotePeerInfo2 = peerInfo2.toRemotePeerInfo()
|
||||
remotePeerInfo2.enr = some(node2.enr)
|
||||
@ -324,7 +369,8 @@ procSuite "Peer Manager":
|
||||
assert is12Connected == true, "Node 1 and 2 not connected"
|
||||
|
||||
check:
|
||||
node1.peerManager.peerStore[AddressBook][remotePeerInfo2.peerId] == remotePeerInfo2.addrs
|
||||
node1.peerManager.peerStore[AddressBook][remotePeerInfo2.peerId] ==
|
||||
remotePeerInfo2.addrs
|
||||
|
||||
# wait for the peer store update
|
||||
await sleepAsync(chronos.milliseconds(500))
|
||||
@ -340,8 +386,8 @@ procSuite "Peer Manager":
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("127.0.0.1"),
|
||||
Port(56037),
|
||||
peerStorage = storage
|
||||
)
|
||||
peerStorage = storage,
|
||||
)
|
||||
|
||||
node3.mountMetadata(0).expect("Mounted Waku Metadata")
|
||||
|
||||
@ -405,13 +451,19 @@ procSuite "Peer Manager":
|
||||
await allFutures([node1.start(), node2.start(), node3.start()])
|
||||
|
||||
# 1->2 (fails)
|
||||
let conn1 = await node1.peerManager.dialPeer(node2.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec)
|
||||
let conn1 = await node1.peerManager.dialPeer(
|
||||
node2.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec
|
||||
)
|
||||
|
||||
# 1->3 (fails)
|
||||
let conn2 = await node1.peerManager.dialPeer(node3.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec)
|
||||
let conn2 = await node1.peerManager.dialPeer(
|
||||
node3.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec
|
||||
)
|
||||
|
||||
# 2->3 (succeeds)
|
||||
let conn3 = await node2.peerManager.dialPeer(node3.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec)
|
||||
let conn3 = await node2.peerManager.dialPeer(
|
||||
node3.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec
|
||||
)
|
||||
|
||||
check:
|
||||
conn1.isNone
|
||||
@ -423,8 +475,14 @@ procSuite "Peer Manager":
|
||||
let
|
||||
database = SqliteDatabase.new(":memory:")[]
|
||||
storage = WakuPeerStorage.new(database)[]
|
||||
node1 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0), peerStorage = storage)
|
||||
node2 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
node1 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("0.0.0.0"),
|
||||
Port(0),
|
||||
peerStorage = storage,
|
||||
)
|
||||
node2 =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
peerInfo2 = node2.switch.peerInfo
|
||||
betaCodec = "/vac/waku/relay/2.0.0-beta2"
|
||||
stableCodec = "/vac/waku/relay/2.0.0"
|
||||
@ -443,12 +501,18 @@ procSuite "Peer Manager":
|
||||
# Currently connected to node2
|
||||
node1.peerManager.peerStore.peers().len == 1
|
||||
node1.peerManager.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
|
||||
node1.peerManager.peerStore.peers().anyIt(it.protocols.contains(node2.wakuRelay.codec))
|
||||
node1.peerManager.peerStore.peers().anyIt(
|
||||
it.protocols.contains(node2.wakuRelay.codec)
|
||||
)
|
||||
node1.peerManager.peerStore.connectedness(peerInfo2.peerId) == Connected
|
||||
|
||||
# Simulate restart by initialising a new node using the same storage
|
||||
let
|
||||
node3 = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0), peerStorage = storage)
|
||||
let node3 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("0.0.0.0"),
|
||||
Port(0),
|
||||
peerStorage = storage,
|
||||
)
|
||||
|
||||
await node3.mountRelay()
|
||||
node3.wakuRelay.codec = stableCodec
|
||||
@ -476,16 +540,14 @@ procSuite "Peer Manager":
|
||||
|
||||
asyncTest "Peer manager connects to all peers supporting a given protocol":
|
||||
# Create 4 nodes
|
||||
let nodes =
|
||||
toSeq(0..<4)
|
||||
.mapIt(
|
||||
newTestWakuNode(
|
||||
nodeKey = generateSecp256k1Key(),
|
||||
bindIp = ValidIpAddress.init("0.0.0.0"),
|
||||
bindPort = Port(0),
|
||||
wakuFlags = some(CapabilitiesBitfield.init(@[Relay]))
|
||||
)
|
||||
let nodes = toSeq(0 ..< 4).mapIt(
|
||||
newTestWakuNode(
|
||||
nodeKey = generateSecp256k1Key(),
|
||||
bindIp = ValidIpAddress.init("0.0.0.0"),
|
||||
bindPort = Port(0),
|
||||
wakuFlags = some(CapabilitiesBitfield.init(@[Relay])),
|
||||
)
|
||||
)
|
||||
|
||||
# Start them
|
||||
discard nodes.mapIt(it.mountMetadata(0))
|
||||
@ -494,7 +556,7 @@ procSuite "Peer Manager":
|
||||
|
||||
# Get all peer infos
|
||||
let peerInfos = collect:
|
||||
for i in 0..nodes.high:
|
||||
for i in 0 .. nodes.high:
|
||||
let peerInfo = nodes[i].switch.peerInfo.toRemotePeerInfo()
|
||||
peerInfo.enr = some(nodes[i].enr)
|
||||
peerInfo
|
||||
@ -512,34 +574,47 @@ procSuite "Peer Manager":
|
||||
nodes[0].peerManager.peerStore.peers().len == 3
|
||||
|
||||
# All peer ids are correct
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[1].switch.peerInfo.peerId)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[2].switch.peerInfo.peerId)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[3].switch.peerInfo.peerId)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == nodes[1].switch.peerInfo.peerId
|
||||
)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == nodes[2].switch.peerInfo.peerId
|
||||
)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == nodes[3].switch.peerInfo.peerId
|
||||
)
|
||||
|
||||
# All peers support the relay protocol
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(WakuRelayCodec)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(WakuRelayCodec)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(WakuRelayCodec)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
|
||||
WakuRelayCodec
|
||||
)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
|
||||
WakuRelayCodec
|
||||
)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
|
||||
WakuRelayCodec
|
||||
)
|
||||
|
||||
# All peers are connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[1].switch.peerInfo.peerId] == Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[2].switch.peerInfo.peerId] == Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[3].switch.peerInfo.peerId] == Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[1].switch.peerInfo.peerId] ==
|
||||
Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[2].switch.peerInfo.peerId] ==
|
||||
Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[3].switch.peerInfo.peerId] ==
|
||||
Connected
|
||||
|
||||
await allFutures(nodes.mapIt(it.stop()))
|
||||
|
||||
asyncTest "Sharded peer manager connects to all peers supporting a given protocol":
|
||||
# Create 4 nodes
|
||||
let nodes =
|
||||
toSeq(0..<4)
|
||||
.mapIt(
|
||||
newTestWakuNode(
|
||||
nodeKey = generateSecp256k1Key(),
|
||||
bindIp = ValidIpAddress.init("0.0.0.0"),
|
||||
bindPort = Port(0),
|
||||
wakuFlags = some(CapabilitiesBitfield.init(@[Relay]))
|
||||
)
|
||||
let nodes = toSeq(0 ..< 4).mapIt(
|
||||
newTestWakuNode(
|
||||
nodeKey = generateSecp256k1Key(),
|
||||
bindIp = ValidIpAddress.init("0.0.0.0"),
|
||||
bindPort = Port(0),
|
||||
wakuFlags = some(CapabilitiesBitfield.init(@[Relay])),
|
||||
)
|
||||
)
|
||||
|
||||
# Start them
|
||||
discard nodes.mapIt(it.mountMetadata(0))
|
||||
@ -548,7 +623,7 @@ procSuite "Peer Manager":
|
||||
|
||||
# Get all peer infos
|
||||
let peerInfos = collect:
|
||||
for i in 0..nodes.high:
|
||||
for i in 0 .. nodes.high:
|
||||
let peerInfo = nodes[i].switch.peerInfo.toRemotePeerInfo()
|
||||
peerInfo.enr = some(nodes[i].enr)
|
||||
peerInfo
|
||||
@ -566,25 +641,42 @@ procSuite "Peer Manager":
|
||||
nodes[0].peerManager.peerStore.peers().len == 3
|
||||
|
||||
# All peer ids are correct
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[1].switch.peerInfo.peerId)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[2].switch.peerInfo.peerId)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[3].switch.peerInfo.peerId)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == nodes[1].switch.peerInfo.peerId
|
||||
)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == nodes[2].switch.peerInfo.peerId
|
||||
)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == nodes[3].switch.peerInfo.peerId
|
||||
)
|
||||
|
||||
# All peers support the relay protocol
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(WakuRelayCodec)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(WakuRelayCodec)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(WakuRelayCodec)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
|
||||
WakuRelayCodec
|
||||
)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
|
||||
WakuRelayCodec
|
||||
)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
|
||||
WakuRelayCodec
|
||||
)
|
||||
|
||||
# All peers are connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[1].switch.peerInfo.peerId] == Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[2].switch.peerInfo.peerId] == Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[3].switch.peerInfo.peerId] == Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[1].switch.peerInfo.peerId] ==
|
||||
Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[2].switch.peerInfo.peerId] ==
|
||||
Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[3].switch.peerInfo.peerId] ==
|
||||
Connected
|
||||
|
||||
await allFutures(nodes.mapIt(it.stop()))
|
||||
|
||||
asyncTest "Peer store keeps track of incoming connections":
|
||||
# Create 4 nodes
|
||||
let nodes = toSeq(0..<4).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)))
|
||||
let nodes = toSeq(0 ..< 4).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
# Start them
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
@ -616,29 +708,50 @@ procSuite "Peer Manager":
|
||||
nodes[3].peerManager.peerStore.getPeersByDirection(Outbound).len == 1
|
||||
|
||||
# All peer ids are correct
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[1].switch.peerInfo.peerId)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[2].switch.peerInfo.peerId)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(it.peerId == nodes[3].switch.peerInfo.peerId)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == nodes[1].switch.peerInfo.peerId
|
||||
)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == nodes[2].switch.peerInfo.peerId
|
||||
)
|
||||
nodes[0].peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == nodes[3].switch.peerInfo.peerId
|
||||
)
|
||||
|
||||
# All peers support the relay protocol
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(WakuRelayCodec)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(WakuRelayCodec)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(WakuRelayCodec)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
|
||||
WakuRelayCodec
|
||||
)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
|
||||
WakuRelayCodec
|
||||
)
|
||||
nodes[0].peerManager.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
|
||||
WakuRelayCodec
|
||||
)
|
||||
|
||||
# All peers are connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[1].switch.peerInfo.peerId] == Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[2].switch.peerInfo.peerId] == Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[3].switch.peerInfo.peerId] == Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[1].switch.peerInfo.peerId] ==
|
||||
Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[2].switch.peerInfo.peerId] ==
|
||||
Connected
|
||||
nodes[0].peerManager.peerStore[ConnectionBook][nodes[3].switch.peerInfo.peerId] ==
|
||||
Connected
|
||||
|
||||
# All peers are Inbound in peer 0
|
||||
nodes[0].peerManager.peerStore[DirectionBook][nodes[1].switch.peerInfo.peerId] == Inbound
|
||||
nodes[0].peerManager.peerStore[DirectionBook][nodes[2].switch.peerInfo.peerId] == Inbound
|
||||
nodes[0].peerManager.peerStore[DirectionBook][nodes[3].switch.peerInfo.peerId] == Inbound
|
||||
nodes[0].peerManager.peerStore[DirectionBook][nodes[1].switch.peerInfo.peerId] ==
|
||||
Inbound
|
||||
nodes[0].peerManager.peerStore[DirectionBook][nodes[2].switch.peerInfo.peerId] ==
|
||||
Inbound
|
||||
nodes[0].peerManager.peerStore[DirectionBook][nodes[3].switch.peerInfo.peerId] ==
|
||||
Inbound
|
||||
|
||||
# All peers have an Outbound connection with peer 0
|
||||
nodes[1].peerManager.peerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] == Outbound
|
||||
nodes[2].peerManager.peerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] == Outbound
|
||||
nodes[3].peerManager.peerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] == Outbound
|
||||
nodes[1].peerManager.peerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] ==
|
||||
Outbound
|
||||
nodes[2].peerManager.peerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] ==
|
||||
Outbound
|
||||
nodes[3].peerManager.peerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] ==
|
||||
Outbound
|
||||
|
||||
await allFutures(nodes.mapIt(it.stop()))
|
||||
|
||||
@ -647,13 +760,12 @@ procSuite "Peer Manager":
|
||||
let basePeerId = "16Uiu2HAm7QGEZKujdSbbo1aaQyfDPQ6Bw3ybQnj6fruH5Dxwd7D"
|
||||
|
||||
let
|
||||
node = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
peers = toSeq(1..5)
|
||||
.mapIt(
|
||||
parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it)
|
||||
)
|
||||
.filterIt(it.isOk())
|
||||
.mapIt(it.value)
|
||||
node =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
peers = toSeq(1 .. 5)
|
||||
.mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it))
|
||||
.filterIt(it.isOk())
|
||||
.mapIt(it.value)
|
||||
|
||||
require:
|
||||
peers.len == 5
|
||||
@ -689,7 +801,9 @@ procSuite "Peer Manager":
|
||||
|
||||
asyncTest "connectedPeers() returns expected number of connections per protocol":
|
||||
# Create 4 nodes
|
||||
let nodes = toSeq(0..<4).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)))
|
||||
let nodes = toSeq(0 ..< 4).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
# Start them with relay + filter
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
@ -706,12 +820,14 @@ procSuite "Peer Manager":
|
||||
(await nodes[0].peerManager.connectRelay(pInfos[2])) == true
|
||||
(await nodes[1].peerManager.connectRelay(pInfos[2])) == true
|
||||
|
||||
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == true
|
||||
(await nodes[0].peerManager.dialPeer(pInfos[2], WakuLegacyFilterCodec)).isSome() == true
|
||||
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() ==
|
||||
true
|
||||
(await nodes[0].peerManager.dialPeer(pInfos[2], WakuLegacyFilterCodec)).isSome() ==
|
||||
true
|
||||
|
||||
# isolated dial creates a relay conn under the hood (libp2p behaviour)
|
||||
(await nodes[2].peerManager.dialPeer(pInfos[3], WakuLegacyFilterCodec)).isSome() == true
|
||||
|
||||
(await nodes[2].peerManager.dialPeer(pInfos[3], WakuLegacyFilterCodec)).isSome() ==
|
||||
true
|
||||
|
||||
# assert physical connections
|
||||
check:
|
||||
@ -741,7 +857,9 @@ procSuite "Peer Manager":
|
||||
|
||||
asyncTest "getNumStreams() returns expected number of connections per protocol":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0..<2).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)))
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
# Start them with relay + filter
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
@ -754,10 +872,14 @@ procSuite "Peer Manager":
|
||||
require:
|
||||
# multiple streams are multiplexed over a single connection.
|
||||
# note that a relay connection is created under the hood when dialing a peer (libp2p behaviour)
|
||||
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == true
|
||||
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == true
|
||||
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == true
|
||||
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() == true
|
||||
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() ==
|
||||
true
|
||||
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() ==
|
||||
true
|
||||
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() ==
|
||||
true
|
||||
(await nodes[0].peerManager.dialPeer(pInfos[1], WakuLegacyFilterCodec)).isSome() ==
|
||||
true
|
||||
|
||||
check:
|
||||
nodes[0].peerManager.getNumStreams(WakuRelayCodec) == (1, 1)
|
||||
@ -773,19 +895,21 @@ procSuite "Peer Manager":
|
||||
# Create peer manager
|
||||
let pm = PeerManager.new(
|
||||
switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise().build(),
|
||||
storage = nil)
|
||||
storage = nil,
|
||||
)
|
||||
|
||||
# Create 3 peer infos
|
||||
let peers = toSeq(1..3)
|
||||
.mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it))
|
||||
.filterIt(it.isOk())
|
||||
.mapIt(it.value)
|
||||
let peers = toSeq(1 .. 3)
|
||||
.mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it))
|
||||
.filterIt(it.isOk())
|
||||
.mapIt(it.value)
|
||||
require:
|
||||
peers.len == 3
|
||||
|
||||
# Add a peer[0] to the peerstore
|
||||
pm.peerStore[AddressBook][peers[0].peerId] = peers[0].addrs
|
||||
pm.peerStore[ProtoBook][peers[0].peerId] = @[WakuRelayCodec, WakuStoreCodec, WakuLegacyFilterCodec]
|
||||
pm.peerStore[ProtoBook][peers[0].peerId] =
|
||||
@[WakuRelayCodec, WakuStoreCodec, WakuLegacyFilterCodec]
|
||||
|
||||
# When no service peers, we get one from the peerstore
|
||||
let selectedPeer1 = pm.selectPeer(WakuStoreCodec)
|
||||
@ -826,28 +950,38 @@ procSuite "Peer Manager":
|
||||
|
||||
expect(Defect):
|
||||
let pm = PeerManager.new(
|
||||
switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise()
|
||||
.withPeerStore(peerStoreSize)
|
||||
.withMaxConnections(maxConnections)
|
||||
.build(),
|
||||
storage = nil)
|
||||
switch = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng)
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
.withPeerStore(peerStoreSize)
|
||||
.withMaxConnections(maxConnections)
|
||||
.build(),
|
||||
storage = nil,
|
||||
)
|
||||
|
||||
test "prunePeerStore() correctly removes peers to match max quota":
|
||||
# Create peer manager
|
||||
let pm = PeerManager.new(
|
||||
switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise()
|
||||
.withPeerStore(10)
|
||||
.withMaxConnections(5)
|
||||
.build(),
|
||||
switch = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng)
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
.withPeerStore(10)
|
||||
.withMaxConnections(5)
|
||||
.build(),
|
||||
maxFailedAttempts = 1,
|
||||
maxRelayPeers = some(5),
|
||||
storage = nil)
|
||||
storage = nil,
|
||||
)
|
||||
|
||||
# Create 15 peers and add them to the peerstore
|
||||
let peers = toSeq(1..15)
|
||||
.mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/0/p2p/" & $PeerId.random().get()))
|
||||
.filterIt(it.isOk())
|
||||
.mapIt(it.value)
|
||||
let peers = toSeq(1 .. 15)
|
||||
.mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/0/p2p/" & $PeerId.random().get()))
|
||||
.filterIt(it.isOk())
|
||||
.mapIt(it.value)
|
||||
for p in peers:
|
||||
pm.addPeer(p)
|
||||
|
||||
@ -886,19 +1020,24 @@ procSuite "Peer Manager":
|
||||
|
||||
asyncTest "canBeConnected() returns correct value":
|
||||
let pm = PeerManager.new(
|
||||
switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise()
|
||||
.withPeerStore(10)
|
||||
.withMaxConnections(5)
|
||||
.build(),
|
||||
initialBackoffInSec = 1, # with InitialBackoffInSec = 1 backoffs are: 1, 2, 4, 8secs.
|
||||
switch = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng)
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
.withPeerStore(10)
|
||||
.withMaxConnections(5)
|
||||
.build(),
|
||||
initialBackoffInSec = 1,
|
||||
# with InitialBackoffInSec = 1 backoffs are: 1, 2, 4, 8secs.
|
||||
backoffFactor = 2,
|
||||
maxFailedAttempts = 10,
|
||||
maxRelayPeers = some(5),
|
||||
storage = nil)
|
||||
storage = nil,
|
||||
)
|
||||
var p1: PeerId
|
||||
require p1.init("QmeuZJbXrszW2jdT7GdduSjQskPU3S7vvGWKtKgDfkDvW" & "1")
|
||||
|
||||
|
||||
# new peer with no errors can be connected
|
||||
check:
|
||||
pm.canBeConnected(p1) == true
|
||||
@ -938,37 +1077,54 @@ procSuite "Peer Manager":
|
||||
# Should result in overflow exception
|
||||
expect(Defect):
|
||||
let pm = PeerManager.new(
|
||||
switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise()
|
||||
.withPeerStore(10)
|
||||
.withMaxConnections(5)
|
||||
.build(),
|
||||
switch = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng)
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
.withPeerStore(10)
|
||||
.withMaxConnections(5)
|
||||
.build(),
|
||||
maxRelayPeers = some(5),
|
||||
maxFailedAttempts = 150,
|
||||
storage = nil)
|
||||
storage = nil,
|
||||
)
|
||||
|
||||
# Should result in backoff > 1 week
|
||||
expect(Defect):
|
||||
let pm = PeerManager.new(
|
||||
switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise()
|
||||
switch = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng)
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
.withPeerStore(10)
|
||||
.withMaxConnections(5)
|
||||
.build(),
|
||||
maxFailedAttempts = 10,
|
||||
maxRelayPeers = some(5),
|
||||
storage = nil,
|
||||
)
|
||||
|
||||
let pm = PeerManager.new(
|
||||
switch = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng)
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
.withPeerStore(10)
|
||||
.withMaxConnections(5)
|
||||
.build(),
|
||||
maxFailedAttempts = 10,
|
||||
maxRelayPeers = some(5),
|
||||
storage = nil)
|
||||
|
||||
let pm = PeerManager.new(
|
||||
switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise()
|
||||
.withPeerStore(10)
|
||||
.withMaxConnections(5)
|
||||
.build(),
|
||||
maxFailedAttempts = 5,
|
||||
maxRelayPeers = some(5),
|
||||
storage = nil)
|
||||
storage = nil,
|
||||
)
|
||||
|
||||
asyncTest "colocationLimit is enforced by pruneConnsByIp()":
|
||||
# Create 5 nodes
|
||||
let nodes = toSeq(0..<5).mapIt(newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)))
|
||||
let nodes = toSeq(0 ..< 5).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
# Start them with relay + filter
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
|
||||
@ -1,10 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/options,
|
||||
testutils/unittests,
|
||||
eth/p2p/discoveryv5/enr,
|
||||
libp2p/crypto/crypto
|
||||
import std/options, testutils/unittests, eth/p2p/discoveryv5/enr, libp2p/crypto/crypto
|
||||
import
|
||||
../../waku/common/databases/db_sqlite,
|
||||
../../waku/node/peer_manager/peer_manager,
|
||||
@ -12,9 +8,7 @@ import
|
||||
../../waku/waku_enr,
|
||||
./testlib/wakucore
|
||||
|
||||
|
||||
suite "Peer Storage":
|
||||
|
||||
test "Store, replace and retrieve from persistent peer storage":
|
||||
let
|
||||
database = SqliteDatabase.new(":memory:").tryGet()
|
||||
@ -28,22 +22,24 @@ suite "Peer Storage":
|
||||
connectedness = Connectedness.CanConnect
|
||||
disconn = 999999
|
||||
topics = @["/waku/2/rs/2/0", "/waku/2/rs/2/1"]
|
||||
|
||||
|
||||
# Create ENR
|
||||
var enrBuilder = EnrBuilder.init(peerKey)
|
||||
enrBuilder.withShardedTopics(topics).expect("Valid topics")
|
||||
let record = enrBuilder.build().expect("Valid record")
|
||||
|
||||
let stored = RemotePeerInfo(
|
||||
peerId: peer.peerId,
|
||||
addrs: @[peerLoc],
|
||||
enr: some(record),
|
||||
protocols: @[peerProto],
|
||||
publicKey: peerKey.getPublicKey().tryGet(),
|
||||
connectedness: connectedness,
|
||||
disconnectTime: disconn)
|
||||
peerId: peer.peerId,
|
||||
addrs: @[peerLoc],
|
||||
enr: some(record),
|
||||
protocols: @[peerProto],
|
||||
publicKey: peerKey.getPublicKey().tryGet(),
|
||||
connectedness: connectedness,
|
||||
disconnectTime: disconn,
|
||||
)
|
||||
|
||||
defer: storage.close()
|
||||
defer:
|
||||
storage.close()
|
||||
|
||||
# Test insert and retrieve
|
||||
|
||||
@ -69,9 +65,10 @@ suite "Peer Storage":
|
||||
resStoredInfo.publicKey == peerKey.getPublicKey().tryGet()
|
||||
resStoredInfo.connectedness == connectedness
|
||||
resStoredInfo.disconnectTime == disconn
|
||||
|
||||
|
||||
assert resStoredInfo.enr.isSome(), "The ENR info wasn't properly stored"
|
||||
check: resStoredInfo.enr.get() == record
|
||||
check:
|
||||
resStoredInfo.enr.get() == record
|
||||
|
||||
# Test replace and retrieve (update an existing entry)
|
||||
stored.connectedness = CannotConnect
|
||||
|
||||
@ -14,7 +14,6 @@ import
|
||||
../../waku/waku_node,
|
||||
./testlib/wakucore
|
||||
|
||||
|
||||
suite "Extended nim-libp2p Peer Store":
|
||||
# Valid peerId missing the last digit. Useful for creating new peerIds
|
||||
# basePeerId & "1"
|
||||
@ -64,7 +63,8 @@ suite "Extended nim-libp2p Peer Store":
|
||||
|
||||
# Peer3: Connected
|
||||
peerStore[AddressBook][p3] = @[MultiAddress.init("/ip4/127.0.0.1/tcp/3").tryGet()]
|
||||
peerStore[ProtoBook][p3] = @["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"]
|
||||
peerStore[ProtoBook][p3] =
|
||||
@["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"]
|
||||
peerStore[KeyBook][p3] = generateEcdsaKeyPair().pubkey
|
||||
peerStore[AgentBook][p3] = "gowaku"
|
||||
peerStore[ProtoVersionBook][p3] = "protoVersion3"
|
||||
@ -180,7 +180,8 @@ suite "Extended nim-libp2p Peer Store":
|
||||
# Only p3 supports that protocol
|
||||
lpPeers.len == 1
|
||||
lpPeers.anyIt(it.peerId == p3)
|
||||
lpPeers[0].protocols == @["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"]
|
||||
lpPeers[0].protocols ==
|
||||
@["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"]
|
||||
|
||||
test "peers() returns all StoredInfo matching a given protocolMatcher":
|
||||
# When
|
||||
@ -197,15 +198,20 @@ suite "Extended nim-libp2p Peer Store":
|
||||
pMatcherStorePeers.anyIt(it.peerId == p5)
|
||||
|
||||
check:
|
||||
pMatcherStorePeers.filterIt(it.peerId == p1)[0].protocols == @["/vac/waku/relay/2.0.0-beta1", "/vac/waku/store/2.0.0"]
|
||||
pMatcherStorePeers.filterIt(it.peerId == p2)[0].protocols == @["/vac/waku/relay/2.0.0", "/vac/waku/store/2.0.0"]
|
||||
pMatcherStorePeers.filterIt(it.peerId == p3)[0].protocols == @["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"]
|
||||
pMatcherStorePeers.filterIt(it.peerId == p5)[0].protocols == @["/vac/waku/swap/2.0.0", "/vac/waku/store/2.0.0-beta2"]
|
||||
pMatcherStorePeers.filterIt(it.peerId == p1)[0].protocols ==
|
||||
@["/vac/waku/relay/2.0.0-beta1", "/vac/waku/store/2.0.0"]
|
||||
pMatcherStorePeers.filterIt(it.peerId == p2)[0].protocols ==
|
||||
@["/vac/waku/relay/2.0.0", "/vac/waku/store/2.0.0"]
|
||||
pMatcherStorePeers.filterIt(it.peerId == p3)[0].protocols ==
|
||||
@["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"]
|
||||
pMatcherStorePeers.filterIt(it.peerId == p5)[0].protocols ==
|
||||
@["/vac/waku/swap/2.0.0", "/vac/waku/store/2.0.0-beta2"]
|
||||
|
||||
check:
|
||||
pMatcherSwapPeers.len == 1
|
||||
pMatcherSwapPeers.anyIt(it.peerId == p5)
|
||||
pMatcherSwapPeers[0].protocols == @["/vac/waku/swap/2.0.0", "/vac/waku/store/2.0.0-beta2"]
|
||||
pMatcherSwapPeers[0].protocols ==
|
||||
@["/vac/waku/swap/2.0.0", "/vac/waku/store/2.0.0-beta2"]
|
||||
|
||||
test "toRemotePeerInfo() converts a StoredInfo to a RemotePeerInfo":
|
||||
# Given
|
||||
|
||||
@ -11,10 +11,7 @@ import
|
||||
libp2p/protocols/pubsub/gossipsub
|
||||
|
||||
import
|
||||
../../waku/waku_core,
|
||||
../../waku/waku_node,
|
||||
./testlib/wakucore,
|
||||
./testlib/wakunode
|
||||
../../waku/waku_core, ../../waku/waku_node, ./testlib/wakucore, ./testlib/wakunode
|
||||
|
||||
procSuite "Relay (GossipSub) Peer Exchange":
|
||||
asyncTest "Mount relay without peer exchange handler":
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
testutils/unittests
|
||||
import testutils/unittests
|
||||
import
|
||||
stew/results,
|
||||
../../waku/waku_core/message,
|
||||
@ -9,7 +8,6 @@ import
|
||||
./testlib/common
|
||||
|
||||
suite "Waku Payload":
|
||||
|
||||
test "Encode/Decode waku message with timestamp":
|
||||
## Test encoding and decoding of the timestamp field of a WakuMessage
|
||||
|
||||
@ -21,7 +19,7 @@ suite "Waku Payload":
|
||||
msg = WakuMessage(payload: payload, version: version, timestamp: timestamp)
|
||||
|
||||
## When
|
||||
let pb = msg.encode()
|
||||
let pb = msg.encode()
|
||||
let msgDecoded = WakuMessage.decode(pb.buffer)
|
||||
|
||||
## Then
|
||||
@ -42,7 +40,7 @@ suite "Waku Payload":
|
||||
msg = WakuMessage(payload: payload, version: version)
|
||||
|
||||
## When
|
||||
let pb = msg.encode()
|
||||
let pb = msg.encode()
|
||||
let msgDecoded = WakuMessage.decode(pb.buffer)
|
||||
|
||||
## Then
|
||||
|
||||
@ -42,9 +42,12 @@ suite "Waku DNS Discovery":
|
||||
await allFutures([node1.start(), node2.start(), node3.start()])
|
||||
|
||||
# Build and sign tree
|
||||
var tree = buildTree(1, # Seq no
|
||||
@[enr1, enr2, enr3], # ENR entries
|
||||
@[]).get() # No link entries
|
||||
var tree = buildTree(
|
||||
1, # Seq no
|
||||
@[enr1, enr2, enr3], # ENR entries
|
||||
@[],
|
||||
)
|
||||
.get() # No link entries
|
||||
|
||||
let treeKeys = keys.KeyPair.random(rng[])
|
||||
|
||||
@ -57,7 +60,8 @@ suite "Waku DNS Discovery":
|
||||
domain = "testnodes.aq"
|
||||
zoneTxts = tree.buildTXT(domain).get()
|
||||
username = Base32.encode(treeKeys.pubkey().toRawCompressed())
|
||||
location = LinkPrefix & username & "@" & domain # See EIP-1459: https://eips.ethereum.org/EIPS/eip-1459
|
||||
location = LinkPrefix & username & "@" & domain
|
||||
# See EIP-1459: https://eips.ethereum.org/EIPS/eip-1459
|
||||
|
||||
# Create a resolver for the domain
|
||||
|
||||
@ -90,11 +94,20 @@ suite "Waku DNS Discovery":
|
||||
|
||||
check:
|
||||
# We have successfully connected to all discovered nodes
|
||||
node4.peerManager.peerStore.peers().anyIt(it.peerId == node1.switch.peerInfo.peerId)
|
||||
node4.peerManager.peerStore.connectedness(node1.switch.peerInfo.peerId) == Connected
|
||||
node4.peerManager.peerStore.peers().anyIt(it.peerId == node2.switch.peerInfo.peerId)
|
||||
node4.peerManager.peerStore.connectedness(node2.switch.peerInfo.peerId) == Connected
|
||||
node4.peerManager.peerStore.peers().anyIt(it.peerId == node3.switch.peerInfo.peerId)
|
||||
node4.peerManager.peerStore.connectedness(node3.switch.peerInfo.peerId) == Connected
|
||||
node4.peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == node1.switch.peerInfo.peerId
|
||||
)
|
||||
node4.peerManager.peerStore.connectedness(node1.switch.peerInfo.peerId) ==
|
||||
Connected
|
||||
node4.peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == node2.switch.peerInfo.peerId
|
||||
)
|
||||
node4.peerManager.peerStore.connectedness(node2.switch.peerInfo.peerId) ==
|
||||
Connected
|
||||
node4.peerManager.peerStore.peers().anyIt(
|
||||
it.peerId == node3.switch.peerInfo.peerId
|
||||
)
|
||||
node4.peerManager.peerStore.connectedness(node3.switch.peerInfo.peerId) ==
|
||||
Connected
|
||||
|
||||
await allFutures([node1.stop(), node2.stop(), node3.stop(), node4.stop()])
|
||||
|
||||
@ -1,19 +1,12 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, sequtils],
|
||||
stew/results,
|
||||
testutils/unittests
|
||||
import
|
||||
../../waku/waku_core,
|
||||
../../waku/waku_enr,
|
||||
./testlib/wakucore
|
||||
|
||||
import std/[options, sequtils], stew/results, testutils/unittests
|
||||
import ../../waku/waku_core, ../../waku/waku_enr, ./testlib/wakucore
|
||||
|
||||
suite "Waku ENR - Capabilities bitfield":
|
||||
test "check capabilities support":
|
||||
## Given
|
||||
let bitfield: CapabilitiesBitfield = 0b0000_1101u8 # Lightpush, Filter, Relay
|
||||
let bitfield: CapabilitiesBitfield = 0b0000_1101u8 # Lightpush, Filter, Relay
|
||||
|
||||
## Then
|
||||
check:
|
||||
@ -25,11 +18,8 @@ suite "Waku ENR - Capabilities bitfield":
|
||||
test "bitfield to capabilities list":
|
||||
## Given
|
||||
let bitfield = CapabilitiesBitfield.init(
|
||||
relay = true,
|
||||
store = false,
|
||||
lightpush = true,
|
||||
filter = true
|
||||
)
|
||||
relay = true, store = false, lightpush = true, filter = true
|
||||
)
|
||||
|
||||
## When
|
||||
let caps = bitfield.toCapabilities()
|
||||
@ -83,9 +73,10 @@ suite "Waku ENR - Capabilities bitfield":
|
||||
|
||||
test "check capabilities on a waku node record":
|
||||
## Given
|
||||
let wakuRecord = "-Hy4QC73_E3B_FkZhsOakaD4pHe-U--UoGASdG9N0F3SFFUDY_jdQbud8" &
|
||||
"EXVyrlOZ5pZ7VYFBDPMRCENwy87Lh74dFIBgmlkgnY0iXNlY3AyNTZrMaECvNt1jIWbWGp" &
|
||||
"AWWdlLGYm1E1OjlkQk3ONoxDC5sfw8oOFd2FrdTID"
|
||||
let wakuRecord =
|
||||
"-Hy4QC73_E3B_FkZhsOakaD4pHe-U--UoGASdG9N0F3SFFUDY_jdQbud8" &
|
||||
"EXVyrlOZ5pZ7VYFBDPMRCENwy87Lh74dFIBgmlkgnY0iXNlY3AyNTZrMaECvNt1jIWbWGp" &
|
||||
"AWWdlLGYm1E1OjlkQk3ONoxDC5sfw8oOFd2FrdTID"
|
||||
|
||||
## When
|
||||
var record: Record
|
||||
@ -109,9 +100,10 @@ suite "Waku ENR - Capabilities bitfield":
|
||||
test "check capabilities on a non-waku node record":
|
||||
## Given
|
||||
# non waku enr, i.e. Ethereum one
|
||||
let nonWakuEnr = "enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2G" &
|
||||
"xb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNl" &
|
||||
"Y3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA"
|
||||
let nonWakuEnr =
|
||||
"enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2G" &
|
||||
"xb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNl" &
|
||||
"Y3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA"
|
||||
|
||||
## When
|
||||
var record: Record
|
||||
@ -131,26 +123,33 @@ suite "Waku ENR - Capabilities bitfield":
|
||||
record.supportsCapability(Capabilities.Filter) == false
|
||||
record.supportsCapability(Capabilities.Lightpush) == false
|
||||
|
||||
|
||||
suite "Waku ENR - Multiaddresses":
|
||||
|
||||
test "decode record with multiaddrs field":
|
||||
## Given
|
||||
let enrUri = "enr:-QEnuEBEAyErHEfhiQxAVQoWowGTCuEF9fKZtXSd7H_PymHFhGJA3rGAYDVSH" &
|
||||
"KCyJDGRLBGsloNbS8AZF33IVuefjOO6BIJpZIJ2NIJpcIQS39tkim11bHRpYWRkcn" &
|
||||
"O4lgAvNihub2RlLTAxLmRvLWFtczMud2FrdXYyLnRlc3Quc3RhdHVzaW0ubmV0BgG" &
|
||||
"73gMAODcxbm9kZS0wMS5hYy1jbi1ob25na29uZy1jLndha3V2Mi50ZXN0LnN0YXR1" &
|
||||
"c2ltLm5ldAYBu94DACm9A62t7AQL4Ef5ZYZosRpQTzFVAB8jGjf1TER2wH-0zBOe1" &
|
||||
"-MDBNLeA4lzZWNwMjU2azGhAzfsxbxyCkgCqq8WwYsVWH7YkpMLnU2Bw5xJSimxKa" &
|
||||
"v-g3VkcIIjKA"
|
||||
let enrUri =
|
||||
"enr:-QEnuEBEAyErHEfhiQxAVQoWowGTCuEF9fKZtXSd7H_PymHFhGJA3rGAYDVSH" &
|
||||
"KCyJDGRLBGsloNbS8AZF33IVuefjOO6BIJpZIJ2NIJpcIQS39tkim11bHRpYWRkcn" &
|
||||
"O4lgAvNihub2RlLTAxLmRvLWFtczMud2FrdXYyLnRlc3Quc3RhdHVzaW0ubmV0BgG" &
|
||||
"73gMAODcxbm9kZS0wMS5hYy1jbi1ob25na29uZy1jLndha3V2Mi50ZXN0LnN0YXR1" &
|
||||
"c2ltLm5ldAYBu94DACm9A62t7AQL4Ef5ZYZosRpQTzFVAB8jGjf1TER2wH-0zBOe1" &
|
||||
"-MDBNLeA4lzZWNwMjU2azGhAzfsxbxyCkgCqq8WwYsVWH7YkpMLnU2Bw5xJSimxKa" &
|
||||
"v-g3VkcIIjKA"
|
||||
|
||||
var record: Record
|
||||
require record.fromURI(enrUri)
|
||||
|
||||
let
|
||||
expectedAddr1 = MultiAddress.init("/dns4/node-01.do-ams3.wakuv2.test.statusim.net/tcp/443/wss").get()
|
||||
expectedAddr2 = MultiAddress.init("/dns6/node-01.ac-cn-hongkong-c.wakuv2.test.statusim.net/tcp/443/wss").get()
|
||||
expectedAddr3 = MultiAddress.init("/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:1234/wss").get()
|
||||
expectedAddr1 = MultiAddress
|
||||
.init("/dns4/node-01.do-ams3.wakuv2.test.statusim.net/tcp/443/wss")
|
||||
.get()
|
||||
expectedAddr2 = MultiAddress
|
||||
.init("/dns6/node-01.ac-cn-hongkong-c.wakuv2.test.statusim.net/tcp/443/wss")
|
||||
.get()
|
||||
expectedAddr3 = MultiAddress
|
||||
.init(
|
||||
"/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:1234/wss"
|
||||
)
|
||||
.get()
|
||||
|
||||
## When
|
||||
let typedRecord = record.toTyped()
|
||||
@ -225,7 +224,11 @@ suite "Waku ENR - Multiaddresses":
|
||||
enrPrivKey = generatesecp256k1key()
|
||||
|
||||
let
|
||||
addr1 = MultiAddress.init("/ip4/127.0.0.1/tcp/80/ws/p2p/16Uiu2HAm4v86W3bmT1BiH6oSPzcsSr31iDQpSN5Qa882BCjjwgrD").get()
|
||||
addr1 = MultiAddress
|
||||
.init(
|
||||
"/ip4/127.0.0.1/tcp/80/ws/p2p/16Uiu2HAm4v86W3bmT1BiH6oSPzcsSr31iDQpSN5Qa882BCjjwgrD"
|
||||
)
|
||||
.get()
|
||||
addr2 = MultiAddress.init("/ip4/127.0.0.1/tcp/443/wss").get()
|
||||
|
||||
let expectedAddr1 = MultiAddress.init("/ip4/127.0.0.1/tcp/80/ws").get()
|
||||
@ -252,9 +255,7 @@ suite "Waku ENR - Multiaddresses":
|
||||
multiaddrs.contains(expectedAddr1)
|
||||
multiaddrs.contains(addr2)
|
||||
|
||||
|
||||
suite "Waku ENR - Relay static sharding":
|
||||
|
||||
test "new relay shards object with single invalid shard id":
|
||||
## Given
|
||||
let
|
||||
@ -374,7 +375,8 @@ suite "Waku ENR - Relay static sharding":
|
||||
enrSeqNum = 1u64
|
||||
enrPrivKey = generatesecp256k1key()
|
||||
|
||||
let shardsTopics = RelayShards.init(33, toSeq(0u16 ..< 64u16)).expect("Valid Shards")
|
||||
let shardsTopics =
|
||||
RelayShards.init(33, toSeq(0u16 ..< 64u16)).expect("Valid Shards")
|
||||
|
||||
var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum)
|
||||
require builder.withWakuRelaySharding(shardsTopics).isOk()
|
||||
@ -402,9 +404,12 @@ suite "Waku ENR - Relay static sharding":
|
||||
enrPrivKey = generatesecp256k1key()
|
||||
|
||||
let
|
||||
relayShardsIndicesList = RelayShards.init(22, @[1u16, 1u16, 2u16, 3u16, 5u16, 8u16]).expect("Valid Shards")
|
||||
relayShardsBitVector = RelayShards.init(33, @[13u16, 24u16, 37u16, 61u16, 98u16, 159u16]).expect("Valid Shards")
|
||||
|
||||
relayShardsIndicesList = RelayShards
|
||||
.init(22, @[1u16, 1u16, 2u16, 3u16, 5u16, 8u16])
|
||||
.expect("Valid Shards")
|
||||
relayShardsBitVector = RelayShards
|
||||
.init(33, @[13u16, 24u16, 37u16, 61u16, 98u16, 159u16])
|
||||
.expect("Valid Shards")
|
||||
|
||||
var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum)
|
||||
require builder.withWakuRelayShardingIndicesList(relayShardsIndicesList).isOk()
|
||||
|
||||
@ -1,11 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, tables],
|
||||
testutils/unittests,
|
||||
chronicles,
|
||||
chronos,
|
||||
libp2p/crypto/crypto
|
||||
std/[options, tables], testutils/unittests, chronicles, chronos, libp2p/crypto/crypto
|
||||
import
|
||||
../../waku/node/peer_manager,
|
||||
../../waku/waku_core,
|
||||
@ -14,8 +10,9 @@ import
|
||||
./testlib/common,
|
||||
./testlib/wakucore
|
||||
|
||||
|
||||
proc newTestWakuFilterNode(switch: Switch, timeout: Duration = 2.hours): Future[WakuFilterLegacy] {.async.} =
|
||||
proc newTestWakuFilterNode(
|
||||
switch: Switch, timeout: Duration = 2.hours
|
||||
): Future[WakuFilterLegacy] {.async.} =
|
||||
let
|
||||
peerManager = PeerManager.new(switch)
|
||||
proto = WakuFilterLegacy.new(peerManager, rng, timeout)
|
||||
@ -35,7 +32,6 @@ proc newTestWakuFilterClient(switch: Switch): Future[WakuFilterClientLegacy] {.a
|
||||
|
||||
return proto
|
||||
|
||||
|
||||
# TODO: Extend test coverage
|
||||
suite "Waku Filter":
|
||||
asyncTest "should forward messages to client after subscribed":
|
||||
@ -54,16 +50,20 @@ suite "Waku Filter":
|
||||
let serverAddr = serverSwitch.peerInfo.toRemotePeerInfo()
|
||||
|
||||
let pushHandlerFuture = newFuture[(string, WakuMessage)]()
|
||||
proc pushHandler(pubsubTopic: PubsubTopic, message: WakuMessage) {.async, gcsafe, closure.} =
|
||||
proc pushHandler(
|
||||
pubsubTopic: PubsubTopic, message: WakuMessage
|
||||
) {.async, gcsafe, closure.} =
|
||||
pushHandlerFuture.complete((pubsubTopic, message))
|
||||
|
||||
let
|
||||
pubsubTopic = DefaultPubsubTopic
|
||||
contentTopic = "test-content-topic"
|
||||
msg = fakeWakuMessage(contentTopic=contentTopic)
|
||||
msg = fakeWakuMessage(contentTopic = contentTopic)
|
||||
|
||||
## When
|
||||
require (await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer=serverAddr)).isOk()
|
||||
require (
|
||||
await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer = serverAddr)
|
||||
).isOk()
|
||||
|
||||
# WARN: Sleep necessary to avoid a race condition between the subscription and the handle message proc
|
||||
await sleepAsync(500.milliseconds)
|
||||
@ -97,16 +97,20 @@ suite "Waku Filter":
|
||||
let serverAddr = serverSwitch.peerInfo.toRemotePeerInfo()
|
||||
|
||||
var pushHandlerFuture = newFuture[void]()
|
||||
proc pushHandler(pubsubTopic: PubsubTopic, message: WakuMessage) {.async, gcsafe, closure.} =
|
||||
proc pushHandler(
|
||||
pubsubTopic: PubsubTopic, message: WakuMessage
|
||||
) {.async, gcsafe, closure.} =
|
||||
pushHandlerFuture.complete()
|
||||
|
||||
let
|
||||
pubsubTopic = DefaultPubsubTopic
|
||||
contentTopic = "test-content-topic"
|
||||
msg = fakeWakuMessage(contentTopic=contentTopic)
|
||||
msg = fakeWakuMessage(contentTopic = contentTopic)
|
||||
|
||||
## When
|
||||
require (await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer=serverAddr)).isOk()
|
||||
require (
|
||||
await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer = serverAddr)
|
||||
).isOk()
|
||||
|
||||
# WARN: Sleep necessary to avoid a race condition between the subscription and the handle message proc
|
||||
await sleepAsync(500.milliseconds)
|
||||
@ -118,7 +122,7 @@ suite "Waku Filter":
|
||||
# Reset to test unsubscribe
|
||||
pushHandlerFuture = newFuture[void]()
|
||||
|
||||
require (await client.unsubscribe(pubsubTopic, contentTopic, peer=serverAddr)).isOk()
|
||||
require (await client.unsubscribe(pubsubTopic, contentTopic, peer = serverAddr)).isOk()
|
||||
|
||||
# WARN: Sleep necessary to avoid a race condition between the unsubscription and the handle message proc
|
||||
await sleepAsync(500.milliseconds)
|
||||
@ -126,7 +130,8 @@ suite "Waku Filter":
|
||||
await server.handleMessage(pubsubTopic, msg)
|
||||
|
||||
## Then
|
||||
let handlerWasCalledAfterUnsubscription = await pushHandlerFuture.withTimeout(1.seconds)
|
||||
let handlerWasCalledAfterUnsubscription =
|
||||
await pushHandlerFuture.withTimeout(1.seconds)
|
||||
check:
|
||||
not handlerWasCalledAfterUnsubscription
|
||||
|
||||
@ -142,23 +147,27 @@ suite "Waku Filter":
|
||||
await allFutures(serverSwitch.start(), clientSwitch.start())
|
||||
|
||||
let
|
||||
server = await newTestWakuFilterNode(serverSwitch, timeout=200.milliseconds)
|
||||
server = await newTestWakuFilterNode(serverSwitch, timeout = 200.milliseconds)
|
||||
client = await newTestWakuFilterClient(clientSwitch)
|
||||
|
||||
## Given
|
||||
let serverAddr = serverSwitch.peerInfo.toRemotePeerInfo()
|
||||
|
||||
var pushHandlerFuture = newFuture[void]()
|
||||
proc pushHandler(pubsubTopic: PubsubTopic, message: WakuMessage) {.async, gcsafe, closure.} =
|
||||
proc pushHandler(
|
||||
pubsubTopic: PubsubTopic, message: WakuMessage
|
||||
) {.async, gcsafe, closure.} =
|
||||
pushHandlerFuture.complete()
|
||||
|
||||
let
|
||||
pubsubTopic = DefaultPubsubTopic
|
||||
contentTopic = "test-content-topic"
|
||||
msg = fakeWakuMessage(contentTopic=contentTopic)
|
||||
msg = fakeWakuMessage(contentTopic = contentTopic)
|
||||
|
||||
## When
|
||||
require (await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer=serverAddr)).isOk()
|
||||
require (
|
||||
await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer = serverAddr)
|
||||
).isOk()
|
||||
|
||||
# WARN: Sleep necessary to avoid a race condition between the unsubscription and the handle message proc
|
||||
await sleepAsync(500.milliseconds)
|
||||
@ -207,23 +216,27 @@ suite "Waku Filter":
|
||||
await allFutures(serverSwitch.start(), clientSwitch.start())
|
||||
|
||||
let
|
||||
server = await newTestWakuFilterNode(serverSwitch, timeout=200.milliseconds)
|
||||
server = await newTestWakuFilterNode(serverSwitch, timeout = 200.milliseconds)
|
||||
client = await newTestWakuFilterClient(clientSwitch)
|
||||
|
||||
## Given
|
||||
let serverAddr = serverSwitch.peerInfo.toRemotePeerInfo()
|
||||
|
||||
var pushHandlerFuture = newFuture[void]()
|
||||
proc pushHandler(pubsubTopic: PubsubTopic, message: WakuMessage) {.async, gcsafe, closure.} =
|
||||
proc pushHandler(
|
||||
pubsubTopic: PubsubTopic, message: WakuMessage
|
||||
) {.async, gcsafe, closure.} =
|
||||
pushHandlerFuture.complete()
|
||||
|
||||
let
|
||||
pubsubTopic = DefaultPubsubTopic
|
||||
contentTopic = "test-content-topic"
|
||||
msg = fakeWakuMessage(contentTopic=contentTopic)
|
||||
msg = fakeWakuMessage(contentTopic = contentTopic)
|
||||
|
||||
## When
|
||||
require (await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer=serverAddr)).isOk()
|
||||
require (
|
||||
await client.subscribe(pubsubTopic, contentTopic, pushHandler, peer = serverAddr)
|
||||
).isOk()
|
||||
|
||||
# WARN: Sleep necessary to avoid a race condition between the unsubscription and the handle message proc
|
||||
await sleepAsync(500.milliseconds)
|
||||
@ -255,8 +268,7 @@ suite "Waku Filter":
|
||||
|
||||
# Start switch with same key as before
|
||||
let clientSwitch2 = newTestSwitch(
|
||||
some(clientSwitch.peerInfo.privateKey),
|
||||
some(clientSwitch.peerInfo.addrs[0])
|
||||
some(clientSwitch.peerInfo.privateKey), some(clientSwitch.peerInfo.addrs[0])
|
||||
)
|
||||
await clientSwitch2.start()
|
||||
await client.start()
|
||||
|
||||
@ -12,14 +12,9 @@ import
|
||||
libp2p/stream/connection,
|
||||
libp2p/crypto/crypto
|
||||
import
|
||||
../../waku/waku_core,
|
||||
../../waku/waku_node,
|
||||
./testlib/wakucore,
|
||||
./testlib/wakunode
|
||||
|
||||
../../waku/waku_core, ../../waku/waku_node, ./testlib/wakucore, ./testlib/wakunode
|
||||
|
||||
suite "Waku Keepalive":
|
||||
|
||||
asyncTest "handle ping keepalives":
|
||||
let
|
||||
nodeKey1 = generateSecp256k1Key()
|
||||
|
||||
@ -1,38 +1,30 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[os, json],
|
||||
chronos,
|
||||
testutils/unittests
|
||||
import
|
||||
../../waku/waku_keystore,
|
||||
./testlib/common
|
||||
import std/[os, json], chronos, testutils/unittests
|
||||
import ../../waku/waku_keystore, ./testlib/common
|
||||
|
||||
from ../../waku/waku_noise/noise_utils import randomSeqByte
|
||||
from ../../waku/waku_noise/noise_utils import randomSeqByte
|
||||
|
||||
procSuite "Credentials test suite":
|
||||
|
||||
let testAppInfo = AppInfo(application: "test", appIdentifier: "1234", version: "0.1")
|
||||
|
||||
test "Create keystore":
|
||||
|
||||
let filepath = "./testAppKeystore.txt"
|
||||
defer: removeFile(filepath)
|
||||
defer:
|
||||
removeFile(filepath)
|
||||
|
||||
let keystoreRes = createAppKeystore(path = filepath,
|
||||
appInfo = testAppInfo)
|
||||
let keystoreRes = createAppKeystore(path = filepath, appInfo = testAppInfo)
|
||||
|
||||
check:
|
||||
keystoreRes.isOk()
|
||||
|
||||
test "Load keystore":
|
||||
|
||||
let filepath = "./testAppKeystore.txt"
|
||||
defer: removeFile(filepath)
|
||||
defer:
|
||||
removeFile(filepath)
|
||||
|
||||
# If no keystore exists at filepath, a new one is created for appInfo and empty credentials
|
||||
let keystoreRes = loadAppKeystore(path = filepath,
|
||||
appInfo = testAppInfo)
|
||||
let keystoreRes = loadAppKeystore(path = filepath, appInfo = testAppInfo)
|
||||
|
||||
check:
|
||||
keystoreRes.isOk()
|
||||
@ -48,106 +40,134 @@ procSuite "Credentials test suite":
|
||||
keystore["credentials"].len() == 0
|
||||
|
||||
test "Add credentials to keystore":
|
||||
|
||||
let filepath = "./testAppKeystore.txt"
|
||||
defer: removeFile(filepath)
|
||||
defer:
|
||||
removeFile(filepath)
|
||||
|
||||
# We generate a random identity credential (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen)
|
||||
var
|
||||
idTrapdoor = randomSeqByte(rng[], 32)
|
||||
idNullifier = randomSeqByte(rng[], 32)
|
||||
idNullifier = randomSeqByte(rng[], 32)
|
||||
idSecretHash = randomSeqByte(rng[], 32)
|
||||
idCommitment = randomSeqByte(rng[], 32)
|
||||
|
||||
var idCredential = IdentityCredential(idTrapdoor: idTrapdoor, idNullifier: idNullifier, idSecretHash: idSecretHash, idCommitment: idCommitment)
|
||||
var idCredential = IdentityCredential(
|
||||
idTrapdoor: idTrapdoor,
|
||||
idNullifier: idNullifier,
|
||||
idSecretHash: idSecretHash,
|
||||
idCommitment: idCommitment,
|
||||
)
|
||||
|
||||
var contract = MembershipContract(chainId: "5", address: "0x0123456789012345678901234567890123456789")
|
||||
var contract = MembershipContract(
|
||||
chainId: "5", address: "0x0123456789012345678901234567890123456789"
|
||||
)
|
||||
var index = MembershipIndex(1)
|
||||
|
||||
let membershipCredential = KeystoreMembership(membershipContract: contract,
|
||||
treeIndex: index,
|
||||
identityCredential: idCredential)
|
||||
let membershipCredential = KeystoreMembership(
|
||||
membershipContract: contract, treeIndex: index, identityCredential: idCredential
|
||||
)
|
||||
let password = "%m0um0ucoW%"
|
||||
|
||||
let keystoreRes = addMembershipCredentials(path = filepath,
|
||||
membership = membershipCredential,
|
||||
password = password,
|
||||
appInfo = testAppInfo)
|
||||
let keystoreRes = addMembershipCredentials(
|
||||
path = filepath,
|
||||
membership = membershipCredential,
|
||||
password = password,
|
||||
appInfo = testAppInfo,
|
||||
)
|
||||
|
||||
check:
|
||||
keystoreRes.isOk()
|
||||
|
||||
test "Add/retrieve credentials in keystore":
|
||||
|
||||
let filepath = "./testAppKeystore.txt"
|
||||
defer: removeFile(filepath)
|
||||
defer:
|
||||
removeFile(filepath)
|
||||
|
||||
# We generate two random identity credentials (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen)
|
||||
var
|
||||
idTrapdoor = randomSeqByte(rng[], 32)
|
||||
idNullifier = randomSeqByte(rng[], 32)
|
||||
idNullifier = randomSeqByte(rng[], 32)
|
||||
idSecretHash = randomSeqByte(rng[], 32)
|
||||
idCommitment = randomSeqByte(rng[], 32)
|
||||
idCredential = IdentityCredential(idTrapdoor: idTrapdoor, idNullifier: idNullifier, idSecretHash: idSecretHash, idCommitment: idCommitment)
|
||||
idCredential = IdentityCredential(
|
||||
idTrapdoor: idTrapdoor,
|
||||
idNullifier: idNullifier,
|
||||
idSecretHash: idSecretHash,
|
||||
idCommitment: idCommitment,
|
||||
)
|
||||
|
||||
# We generate two distinct membership groups
|
||||
var contract = MembershipContract(chainId: "5", address: "0x0123456789012345678901234567890123456789")
|
||||
var contract = MembershipContract(
|
||||
chainId: "5", address: "0x0123456789012345678901234567890123456789"
|
||||
)
|
||||
var index = MembershipIndex(1)
|
||||
var membershipCredential = KeystoreMembership(membershipContract: contract,
|
||||
treeIndex: index,
|
||||
identityCredential: idCredential)
|
||||
|
||||
var membershipCredential = KeystoreMembership(
|
||||
membershipContract: contract, treeIndex: index, identityCredential: idCredential
|
||||
)
|
||||
|
||||
let password = "%m0um0ucoW%"
|
||||
|
||||
# We add credentials to the keystore. Note that only 3 credentials should be effectively added, since rlnMembershipCredentials3 is equal to membershipCredentials2
|
||||
let keystoreRes = addMembershipCredentials(path = filepath,
|
||||
membership = membershipCredential,
|
||||
password = password,
|
||||
appInfo = testAppInfo)
|
||||
let keystoreRes = addMembershipCredentials(
|
||||
path = filepath,
|
||||
membership = membershipCredential,
|
||||
password = password,
|
||||
appInfo = testAppInfo,
|
||||
)
|
||||
|
||||
check:
|
||||
keystoreRes.isOk()
|
||||
|
||||
# We test retrieval of credentials.
|
||||
var expectedMembership = membershipCredential
|
||||
let membershipQuery = KeystoreMembership(membershipContract: contract,
|
||||
treeIndex: index)
|
||||
let membershipQuery =
|
||||
KeystoreMembership(membershipContract: contract, treeIndex: index)
|
||||
|
||||
var recoveredCredentialsRes = getMembershipCredentials(path = filepath,
|
||||
password = password,
|
||||
query = membershipQuery,
|
||||
appInfo = testAppInfo)
|
||||
var recoveredCredentialsRes = getMembershipCredentials(
|
||||
path = filepath,
|
||||
password = password,
|
||||
query = membershipQuery,
|
||||
appInfo = testAppInfo,
|
||||
)
|
||||
|
||||
check:
|
||||
recoveredCredentialsRes.isOk()
|
||||
recoveredCredentialsRes.get() == expectedMembership
|
||||
|
||||
test "if the keystore contains only one credential, fetch that irrespective of treeIndex":
|
||||
|
||||
let filepath = "./testAppKeystore.txt"
|
||||
defer: removeFile(filepath)
|
||||
defer:
|
||||
removeFile(filepath)
|
||||
|
||||
# We generate random identity credentials (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen)
|
||||
let
|
||||
idTrapdoor = randomSeqByte(rng[], 32)
|
||||
idNullifier = randomSeqByte(rng[], 32)
|
||||
idNullifier = randomSeqByte(rng[], 32)
|
||||
idSecretHash = randomSeqByte(rng[], 32)
|
||||
idCommitment = randomSeqByte(rng[], 32)
|
||||
idCredential = IdentityCredential(idTrapdoor: idTrapdoor, idNullifier: idNullifier, idSecretHash: idSecretHash, idCommitment: idCommitment)
|
||||
idCredential = IdentityCredential(
|
||||
idTrapdoor: idTrapdoor,
|
||||
idNullifier: idNullifier,
|
||||
idSecretHash: idSecretHash,
|
||||
idCommitment: idCommitment,
|
||||
)
|
||||
|
||||
let contract = MembershipContract(chainId: "5", address: "0x0123456789012345678901234567890123456789")
|
||||
let contract = MembershipContract(
|
||||
chainId: "5", address: "0x0123456789012345678901234567890123456789"
|
||||
)
|
||||
let index = MembershipIndex(1)
|
||||
let membershipCredential = KeystoreMembership(membershipContract: contract,
|
||||
treeIndex: index,
|
||||
identityCredential: idCredential)
|
||||
let membershipCredential = KeystoreMembership(
|
||||
membershipContract: contract, treeIndex: index, identityCredential: idCredential
|
||||
)
|
||||
|
||||
let password = "%m0um0ucoW%"
|
||||
|
||||
let keystoreRes = addMembershipCredentials(path = filepath,
|
||||
membership = membershipCredential,
|
||||
password = password,
|
||||
appInfo = testAppInfo)
|
||||
let keystoreRes = addMembershipCredentials(
|
||||
path = filepath,
|
||||
membership = membershipCredential,
|
||||
password = password,
|
||||
appInfo = testAppInfo,
|
||||
)
|
||||
|
||||
assert(keystoreRes.isOk(), $keystoreRes.error)
|
||||
|
||||
@ -155,56 +175,73 @@ procSuite "Credentials test suite":
|
||||
let expectedMembership = membershipCredential
|
||||
let membershipQuery = KeystoreMembership(membershipContract: contract)
|
||||
|
||||
let recoveredCredentialsRes = getMembershipCredentials(path = filepath,
|
||||
password = password,
|
||||
query = membershipQuery,
|
||||
appInfo = testAppInfo)
|
||||
let recoveredCredentialsRes = getMembershipCredentials(
|
||||
path = filepath,
|
||||
password = password,
|
||||
query = membershipQuery,
|
||||
appInfo = testAppInfo,
|
||||
)
|
||||
|
||||
assert(recoveredCredentialsRes.isOk(), $recoveredCredentialsRes.error)
|
||||
check: recoveredCredentialsRes.get() == expectedMembership
|
||||
check:
|
||||
recoveredCredentialsRes.get() == expectedMembership
|
||||
|
||||
test "if the keystore contains multiple credentials, then error out if treeIndex has not been passed in":
|
||||
let filepath = "./testAppKeystore.txt"
|
||||
defer: removeFile(filepath)
|
||||
defer:
|
||||
removeFile(filepath)
|
||||
|
||||
# We generate random identity credentials (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen)
|
||||
let
|
||||
idTrapdoor = randomSeqByte(rng[], 32)
|
||||
idNullifier = randomSeqByte(rng[], 32)
|
||||
idNullifier = randomSeqByte(rng[], 32)
|
||||
idSecretHash = randomSeqByte(rng[], 32)
|
||||
idCommitment = randomSeqByte(rng[], 32)
|
||||
idCredential = IdentityCredential(idTrapdoor: idTrapdoor, idNullifier: idNullifier, idSecretHash: idSecretHash, idCommitment: idCommitment)
|
||||
idCredential = IdentityCredential(
|
||||
idTrapdoor: idTrapdoor,
|
||||
idNullifier: idNullifier,
|
||||
idSecretHash: idSecretHash,
|
||||
idCommitment: idCommitment,
|
||||
)
|
||||
|
||||
# We generate two distinct membership groups
|
||||
let contract = MembershipContract(chainId: "5", address: "0x0123456789012345678901234567890123456789")
|
||||
let contract = MembershipContract(
|
||||
chainId: "5", address: "0x0123456789012345678901234567890123456789"
|
||||
)
|
||||
let index = MembershipIndex(1)
|
||||
var membershipCredential = KeystoreMembership(membershipContract: contract,
|
||||
treeIndex: index,
|
||||
identityCredential: idCredential)
|
||||
var membershipCredential = KeystoreMembership(
|
||||
membershipContract: contract, treeIndex: index, identityCredential: idCredential
|
||||
)
|
||||
|
||||
let password = "%m0um0ucoW%"
|
||||
|
||||
let keystoreRes = addMembershipCredentials(path = filepath,
|
||||
membership = membershipCredential,
|
||||
password = password,
|
||||
appInfo = testAppInfo)
|
||||
let keystoreRes = addMembershipCredentials(
|
||||
path = filepath,
|
||||
membership = membershipCredential,
|
||||
password = password,
|
||||
appInfo = testAppInfo,
|
||||
)
|
||||
|
||||
assert(keystoreRes.isOk(), $keystoreRes.error)
|
||||
|
||||
membershipCredential.treeIndex = MembershipIndex(2)
|
||||
let keystoreRes2 = addMembershipCredentials(path = filepath,
|
||||
membership = membershipCredential,
|
||||
password = password,
|
||||
appInfo = testAppInfo)
|
||||
let keystoreRes2 = addMembershipCredentials(
|
||||
path = filepath,
|
||||
membership = membershipCredential,
|
||||
password = password,
|
||||
appInfo = testAppInfo,
|
||||
)
|
||||
assert(keystoreRes2.isOk(), $keystoreRes2.error)
|
||||
|
||||
# We test retrieval of credentials.
|
||||
let membershipQuery = KeystoreMembership(membershipContract: contract)
|
||||
|
||||
let recoveredCredentialsRes = getMembershipCredentials(path = filepath,
|
||||
password = password,
|
||||
query = membershipQuery,
|
||||
appInfo = testAppInfo)
|
||||
let recoveredCredentialsRes = getMembershipCredentials(
|
||||
path = filepath,
|
||||
password = password,
|
||||
query = membershipQuery,
|
||||
appInfo = testAppInfo,
|
||||
)
|
||||
|
||||
check:
|
||||
recoveredCredentialsRes.isErr()
|
||||
|
||||
@ -1,26 +1,19 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[json, os],
|
||||
stew/byteutils,
|
||||
testutils/unittests, chronos,
|
||||
eth/keys
|
||||
import
|
||||
../../waku/waku_keystore,
|
||||
./testlib/common
|
||||
import std/[json, os], stew/byteutils, testutils/unittests, chronos, eth/keys
|
||||
import ../../waku/waku_keystore, ./testlib/common
|
||||
|
||||
from ../../waku/waku_noise/noise_utils import randomSeqByte
|
||||
|
||||
suite "KeyFile test suite":
|
||||
|
||||
test "Create/Save/Load single keyfile":
|
||||
|
||||
# The password we use to encrypt our secret
|
||||
let password = "randompassword"
|
||||
|
||||
# The filepath were the keyfile will be stored
|
||||
let filepath = "./test.keyfile"
|
||||
defer: removeFile(filepath)
|
||||
defer:
|
||||
removeFile(filepath)
|
||||
|
||||
# The secret
|
||||
var secret = randomSeqByte(rng[], 300)
|
||||
@ -48,7 +41,6 @@ suite "KeyFile test suite":
|
||||
secret == decodedSecret.get()
|
||||
|
||||
test "Create/Save/Load multiple keyfiles in same file":
|
||||
|
||||
# We set different passwords for different keyfiles that will be stored in same file
|
||||
let password1 = string.fromBytes(randomSeqByte(rng[], 20))
|
||||
let password2 = ""
|
||||
@ -56,7 +48,8 @@ suite "KeyFile test suite":
|
||||
var keyfile: KfResult[JsonNode]
|
||||
|
||||
let filepath = "./test.keyfile"
|
||||
defer: removeFile(filepath)
|
||||
defer:
|
||||
removeFile(filepath)
|
||||
|
||||
# We generate 6 different secrets and we encrypt them using 3 different passwords, and we store the obtained keystore
|
||||
|
||||
@ -133,218 +126,218 @@ suite "KeyFile test suite":
|
||||
secret3 == decodedSecretsPassword3[0].get()
|
||||
secret4 == decodedSecretsPassword3[1].get()
|
||||
|
||||
|
||||
# The following tests are originally from the nim-eth keyfile tests module https://github.com/status-im/nim-eth/blob/master/tests/keyfile/test_keyfile.nim
|
||||
# and are slightly adapted to test backwards compatibility with nim-eth implementation of our customized version of the utils/keyfile module
|
||||
# Note: the original nim-eth "Create/Save/Load test" is redefined and expanded above in "KeyFile test suite"
|
||||
suite "KeyFile test suite (adapted from nim-eth keyfile tests)":
|
||||
|
||||
# Testvectors originally from https://github.com/status-im/nim-eth/blob/fef47331c37ee8abb8608037222658737ff498a6/tests/keyfile/test_keyfile.nim#L22-L168
|
||||
let TestVectors = [
|
||||
%*{
|
||||
"keyfile": {
|
||||
"crypto" : {
|
||||
"cipher" : "aes-128-ctr",
|
||||
"cipherparams" : {"iv" : "6087dab2f9fdbbfaddc31a909735c1e6"},
|
||||
"ciphertext" : "5318b4d5bcd28de64ee5559e671353e16f075ecae9f99c7a79a38af5f869aa46",
|
||||
"kdf" : "pbkdf2",
|
||||
"kdfparams" : {
|
||||
"c" : 262144,
|
||||
"dklen" : 32,
|
||||
"prf" : "hmac-sha256",
|
||||
"salt" : "ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd"
|
||||
"crypto": {
|
||||
"cipher": "aes-128-ctr",
|
||||
"cipherparams": {"iv": "6087dab2f9fdbbfaddc31a909735c1e6"},
|
||||
"ciphertext":
|
||||
"5318b4d5bcd28de64ee5559e671353e16f075ecae9f99c7a79a38af5f869aa46",
|
||||
"kdf": "pbkdf2",
|
||||
"kdfparams": {
|
||||
"c": 262144,
|
||||
"dklen": 32,
|
||||
"prf": "hmac-sha256",
|
||||
"salt": "ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd",
|
||||
},
|
||||
"mac" : "517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e9b2"
|
||||
"mac": "517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e9b2",
|
||||
},
|
||||
"id" : "3198bc9c-6672-5ab3-d995-4942343ae5b6",
|
||||
"version" : 3
|
||||
"id": "3198bc9c-6672-5ab3-d995-4942343ae5b6",
|
||||
"version": 3,
|
||||
},
|
||||
"name": "test1",
|
||||
"password": "testpassword",
|
||||
"priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d"
|
||||
"priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d",
|
||||
},
|
||||
%*{
|
||||
"keyfile": {
|
||||
"version": 3,
|
||||
"crypto": {
|
||||
"ciphertext": "ee75456c006b1e468133c5d2a916bacd3cf515ced4d9b021b5c59978007d1e87",
|
||||
"ciphertext":
|
||||
"ee75456c006b1e468133c5d2a916bacd3cf515ced4d9b021b5c59978007d1e87",
|
||||
"version": 1,
|
||||
"kdf": "pbkdf2",
|
||||
"kdfparams": {
|
||||
"dklen": 32,
|
||||
"c": 262144,
|
||||
"prf": "hmac-sha256",
|
||||
"salt": "504490577620f64f43d73f29479c2cf0"
|
||||
"salt": "504490577620f64f43d73f29479c2cf0",
|
||||
},
|
||||
"mac": "196815708465de9af7504144a1360d08874fc3c30bb0e648ce88fbc36830d35d",
|
||||
"cipherparams": {"iv": "514ccc8c4fb3e60e5538e0cf1e27c233"},
|
||||
"cipher": "aes-128-ctr"
|
||||
"cipher": "aes-128-ctr",
|
||||
},
|
||||
"id": "98d193c7-5174-4c7c-5345-c1daf95477b5"
|
||||
"id": "98d193c7-5174-4c7c-5345-c1daf95477b5",
|
||||
},
|
||||
"name": "python_generated_test_with_odd_iv",
|
||||
"password": "foo",
|
||||
"priv": "0101010101010101010101010101010101010101010101010101010101010101"
|
||||
"priv": "0101010101010101010101010101010101010101010101010101010101010101",
|
||||
},
|
||||
%*{
|
||||
"keyfile": {
|
||||
"version": 3,
|
||||
"crypto": {
|
||||
"ciphertext": "d69313b6470ac1942f75d72ebf8818a0d484ac78478a132ee081cd954d6bd7a9",
|
||||
"ciphertext":
|
||||
"d69313b6470ac1942f75d72ebf8818a0d484ac78478a132ee081cd954d6bd7a9",
|
||||
"cipherparams": {"iv": "ffffffffffffffffffffffffffffffff"},
|
||||
"kdf": "pbkdf2",
|
||||
"kdfparams": {
|
||||
"dklen": 32,
|
||||
"c": 262144,
|
||||
"prf": "hmac-sha256",
|
||||
"salt": "c82ef14476014cbf438081a42709e2ed"
|
||||
"salt": "c82ef14476014cbf438081a42709e2ed",
|
||||
},
|
||||
"mac": "cf6bfbcc77142a22c4a908784b4a16f1023a1d0e2aff404c20158fa4f1587177",
|
||||
"cipher": "aes-128-ctr",
|
||||
"version": 1
|
||||
"version": 1,
|
||||
},
|
||||
"id": "abb67040-8dbe-0dad-fc39-2b082ef0ee5f"
|
||||
"id": "abb67040-8dbe-0dad-fc39-2b082ef0ee5f",
|
||||
},
|
||||
"name": "evilnonce",
|
||||
"password": "bar",
|
||||
"priv": "0202020202020202020202020202020202020202020202020202020202020202"
|
||||
"priv": "0202020202020202020202020202020202020202020202020202020202020202",
|
||||
},
|
||||
%*{
|
||||
"keyfile": {
|
||||
"version" : 3,
|
||||
"crypto" : {
|
||||
"cipher" : "aes-128-ctr",
|
||||
"cipherparams" : {
|
||||
"iv" : "83dbcc02d8ccb40e466191a123791e0e"
|
||||
},
|
||||
"ciphertext" : "d172bf743a674da9cdad04534d56926ef8358534d458fffccd4e6ad2fbde479c",
|
||||
"kdf" : "scrypt",
|
||||
"kdfparams" : {
|
||||
"dklen" : 32,
|
||||
"n" : 262144,
|
||||
"r" : 1,
|
||||
"p" : 8,
|
||||
"salt" : "ab0c7876052600dd703518d6fc3fe8984592145b591fc8fb5c6d43190334ba19"
|
||||
},
|
||||
"mac" : "2103ac29920d71da29f15d75b4a16dbe95cfd7ff8faea1056c33131d846e3097"
|
||||
"version": 3,
|
||||
"crypto": {
|
||||
"cipher": "aes-128-ctr",
|
||||
"cipherparams": {"iv": "83dbcc02d8ccb40e466191a123791e0e"},
|
||||
"ciphertext":
|
||||
"d172bf743a674da9cdad04534d56926ef8358534d458fffccd4e6ad2fbde479c",
|
||||
"kdf": "scrypt",
|
||||
"kdfparams": {
|
||||
"dklen": 32,
|
||||
"n": 262144,
|
||||
"r": 1,
|
||||
"p": 8,
|
||||
"salt": "ab0c7876052600dd703518d6fc3fe8984592145b591fc8fb5c6d43190334ba19",
|
||||
},
|
||||
"mac": "2103ac29920d71da29f15d75b4a16dbe95cfd7ff8faea1056c33131d846e3097",
|
||||
},
|
||||
"id" : "3198bc9c-6672-5ab3-d995-4942343ae5b6"
|
||||
"id": "3198bc9c-6672-5ab3-d995-4942343ae5b6",
|
||||
},
|
||||
"name" : "test2",
|
||||
"name": "test2",
|
||||
"password": "testpassword",
|
||||
"priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d"
|
||||
"priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d",
|
||||
},
|
||||
%*{
|
||||
"keyfile": {
|
||||
"version": 3,
|
||||
"address": "460121576cc7df020759730751f92bd62fd78dd6",
|
||||
"crypto": {
|
||||
"ciphertext": "54ae683c6287fa3d58321f09d56e26d94e58a00d4f90bdd95782ae0e4aab618b",
|
||||
"cipherparams": {
|
||||
"iv": "681679cdb125bba9495d068b002816a4"
|
||||
},
|
||||
"cipher": "aes-128-ctr",
|
||||
"kdf": "scrypt",
|
||||
"kdfparams": {
|
||||
"dklen": 32,
|
||||
"salt": "c3407f363fce02a66e3c4bf4a8f6b7da1c1f54266cef66381f0625c251c32785",
|
||||
"n": 8192,
|
||||
"r": 8,
|
||||
"p": 1
|
||||
},
|
||||
"mac": "dea6bdf22a2f522166ed82808c22a6311e84c355f4bbe100d4260483ff675a46"
|
||||
"ciphertext":
|
||||
"54ae683c6287fa3d58321f09d56e26d94e58a00d4f90bdd95782ae0e4aab618b",
|
||||
"cipherparams": {"iv": "681679cdb125bba9495d068b002816a4"},
|
||||
"cipher": "aes-128-ctr",
|
||||
"kdf": "scrypt",
|
||||
"kdfparams": {
|
||||
"dklen": 32,
|
||||
"salt": "c3407f363fce02a66e3c4bf4a8f6b7da1c1f54266cef66381f0625c251c32785",
|
||||
"n": 8192,
|
||||
"r": 8,
|
||||
"p": 1,
|
||||
},
|
||||
"mac": "dea6bdf22a2f522166ed82808c22a6311e84c355f4bbe100d4260483ff675a46",
|
||||
},
|
||||
"id": "0eb785e0-340a-4290-9c42-90a11973ee47"
|
||||
"id": "0eb785e0-340a-4290-9c42-90a11973ee47",
|
||||
},
|
||||
"name": "mycrypto",
|
||||
"password": "foobartest121",
|
||||
"priv": "05a4d3eb46c742cb8850440145ce70cbc80b59f891cf5f50fd3e9c280b50c4e4"
|
||||
"priv": "05a4d3eb46c742cb8850440145ce70cbc80b59f891cf5f50fd3e9c280b50c4e4",
|
||||
},
|
||||
%*{
|
||||
"keyfile": {
|
||||
"crypto": {
|
||||
"cipher": "aes-128-ctr",
|
||||
"cipherparams": {
|
||||
"iv": "7e7b02d2b4ef45d6c98cb885e75f48d5",
|
||||
},
|
||||
"ciphertext": "a7a5743a6c7eb3fa52396bd3fd94043b79075aac3ccbae8e62d3af94db00397c",
|
||||
"kdf": "scrypt",
|
||||
"kdfparams": {
|
||||
"dklen": 32,
|
||||
"n": 8192,
|
||||
"p": 1,
|
||||
"r": 8,
|
||||
"salt": "247797c7a357b707a3bdbfaa55f4c553756bca09fec20ddc938e7636d21e4a20",
|
||||
},
|
||||
"mac": "5a3ba5bebfda2c384586eda5fcda9c8397d37c9b0cc347fea86525cf2ea3a468",
|
||||
"crypto": {
|
||||
"cipher": "aes-128-ctr",
|
||||
"cipherparams": {"iv": "7e7b02d2b4ef45d6c98cb885e75f48d5"},
|
||||
"ciphertext":
|
||||
"a7a5743a6c7eb3fa52396bd3fd94043b79075aac3ccbae8e62d3af94db00397c",
|
||||
"kdf": "scrypt",
|
||||
"kdfparams": {
|
||||
"dklen": 32,
|
||||
"n": 8192,
|
||||
"p": 1,
|
||||
"r": 8,
|
||||
"salt": "247797c7a357b707a3bdbfaa55f4c553756bca09fec20ddc938e7636d21e4a20",
|
||||
},
|
||||
"address": "0b6f2de3dee015a95d3330dcb7baf8e08aa0112d",
|
||||
"id": "3c8efdd6-d538-47ec-b241-36783d3418b9",
|
||||
"version": 3
|
||||
"mac": "5a3ba5bebfda2c384586eda5fcda9c8397d37c9b0cc347fea86525cf2ea3a468",
|
||||
},
|
||||
"address": "0b6f2de3dee015a95d3330dcb7baf8e08aa0112d",
|
||||
"id": "3c8efdd6-d538-47ec-b241-36783d3418b9",
|
||||
"version": 3,
|
||||
},
|
||||
"password": "moomoocow",
|
||||
"priv": "21eac69b9a52f466bfe9047f0f21c9caf3a5cdaadf84e2750a9b3265d450d481",
|
||||
"name": "eth-keyfile-conftest"
|
||||
}
|
||||
"name": "eth-keyfile-conftest",
|
||||
},
|
||||
]
|
||||
|
||||
test "Testing nim-eth test vectors":
|
||||
|
||||
var secret: KfResult[seq[byte]]
|
||||
var expectedSecret: seq[byte]
|
||||
|
||||
for i in 0..<TestVectors.len:
|
||||
|
||||
for i in 0 ..< TestVectors.len:
|
||||
# Decryption with correct password
|
||||
expectedSecret = decodeHex(TestVectors[i].getOrDefault("priv").getStr())
|
||||
secret =
|
||||
decodeKeyFileJson(TestVectors[i].getOrDefault("keyfile"),
|
||||
TestVectors[i].getOrDefault("password").getStr())
|
||||
secret = decodeKeyFileJson(
|
||||
TestVectors[i].getOrDefault("keyfile"),
|
||||
TestVectors[i].getOrDefault("password").getStr(),
|
||||
)
|
||||
check:
|
||||
secret.isOk()
|
||||
secret.get() == expectedSecret
|
||||
|
||||
# Decryption with wrong password
|
||||
secret = decodeKeyFileJson(TestVectors[i].getOrDefault("keyfile"), "wrongpassword")
|
||||
secret =
|
||||
decodeKeyFileJson(TestVectors[i].getOrDefault("keyfile"), "wrongpassword")
|
||||
|
||||
check:
|
||||
secret.isErr()
|
||||
secret.error == KeyFileError.KeyfileIncorrectMac
|
||||
|
||||
test "Wrong mac in keyfile":
|
||||
|
||||
# This keyfile is the same as the first one in TestVectors,
|
||||
# but the last byte of mac is changed to 00.
|
||||
# While ciphertext is the correct encryption of priv under password,
|
||||
# mac verfication should fail and nothing will be decrypted
|
||||
let keyfileWrongMac = %*{
|
||||
let keyfileWrongMac =
|
||||
%*{
|
||||
"keyfile": {
|
||||
"crypto" : {
|
||||
"cipher" : "aes-128-ctr",
|
||||
"cipherparams" : {"iv" : "6087dab2f9fdbbfaddc31a909735c1e6"},
|
||||
"ciphertext" : "5318b4d5bcd28de64ee5559e671353e16f075ecae9f99c7a79a38af5f869aa46",
|
||||
"kdf" : "pbkdf2",
|
||||
"kdfparams" : {
|
||||
"c" : 262144,
|
||||
"dklen" : 32,
|
||||
"prf" : "hmac-sha256",
|
||||
"salt" : "ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd"
|
||||
"crypto": {
|
||||
"cipher": "aes-128-ctr",
|
||||
"cipherparams": {"iv": "6087dab2f9fdbbfaddc31a909735c1e6"},
|
||||
"ciphertext":
|
||||
"5318b4d5bcd28de64ee5559e671353e16f075ecae9f99c7a79a38af5f869aa46",
|
||||
"kdf": "pbkdf2",
|
||||
"kdfparams": {
|
||||
"c": 262144,
|
||||
"dklen": 32,
|
||||
"prf": "hmac-sha256",
|
||||
"salt": "ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd",
|
||||
},
|
||||
"mac" : "517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e900"
|
||||
"mac": "517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e900",
|
||||
},
|
||||
"id" : "3198bc9c-6672-5ab3-d995-4942343ae5b6",
|
||||
"version" : 3
|
||||
"id": "3198bc9c-6672-5ab3-d995-4942343ae5b6",
|
||||
"version": 3,
|
||||
},
|
||||
"name": "test1",
|
||||
"password": "testpassword",
|
||||
"priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d"
|
||||
"priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d",
|
||||
}
|
||||
|
||||
# Decryption with correct password
|
||||
let expectedSecret = decodeHex(keyfileWrongMac.getOrDefault("priv").getStr())
|
||||
let secret =
|
||||
decodeKeyFileJson(keyfileWrongMac.getOrDefault("keyfile"),
|
||||
keyfileWrongMac.getOrDefault("password").getStr())
|
||||
let secret = decodeKeyFileJson(
|
||||
keyfileWrongMac.getOrDefault("keyfile"),
|
||||
keyfileWrongMac.getOrDefault("password").getStr(),
|
||||
)
|
||||
check:
|
||||
secret.isErr()
|
||||
secret.error == KeyFileError.KeyFileIncorrectMac
|
||||
@ -364,11 +357,10 @@ suite "KeyFile test suite (adapted from nim-eth keyfile tests)":
|
||||
let secret = decodeKeyFileJson(jsonKeyfile.get(), password)
|
||||
|
||||
check:
|
||||
secret.isOk()
|
||||
secret.get() == expectedSecret
|
||||
secret.isOk()
|
||||
secret.get() == expectedSecret
|
||||
|
||||
test "Load non-existent keyfile test":
|
||||
|
||||
check:
|
||||
loadKeyFiles("nonexistant.keyfile", "password").error ==
|
||||
KeyFileError.KeyfileDoesNotExist
|
||||
|
||||
@ -22,13 +22,22 @@ import
|
||||
./testlib/wakucore,
|
||||
./testlib/wakunode
|
||||
|
||||
|
||||
procSuite "Waku Metadata Protocol":
|
||||
asyncTest "request() returns the supported metadata of the peer":
|
||||
let clusterId = 10.uint32
|
||||
let
|
||||
node1 = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0), clusterId = clusterId)
|
||||
node2 = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0), clusterId = clusterId)
|
||||
node1 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
parseIpAddress("0.0.0.0"),
|
||||
Port(0),
|
||||
clusterId = clusterId,
|
||||
)
|
||||
node2 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
parseIpAddress("0.0.0.0"),
|
||||
Port(0),
|
||||
clusterId = clusterId,
|
||||
)
|
||||
|
||||
# Start nodes
|
||||
await allFutures([node1.start(), node2.start()])
|
||||
@ -37,7 +46,9 @@ procSuite "Waku Metadata Protocol":
|
||||
node1.topicSubscriptionQueue.emit((kind: PubsubSub, topic: "/waku/2/rs/10/6"))
|
||||
|
||||
# Create connection
|
||||
let connOpt = await node2.peerManager.dialPeer(node1.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec)
|
||||
let connOpt = await node2.peerManager.dialPeer(
|
||||
node1.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec
|
||||
)
|
||||
require:
|
||||
connOpt.isSome
|
||||
|
||||
@ -51,4 +62,3 @@ procSuite "Waku Metadata Protocol":
|
||||
check:
|
||||
response1.get().clusterId.get() == clusterId
|
||||
response1.get().shards == @[uint32(6), uint32(7)]
|
||||
|
||||
@ -1,34 +1,22 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
chronos,
|
||||
confutils/toml/std/net,
|
||||
libp2p/multiaddress,
|
||||
testutils/unittests
|
||||
import chronos, confutils/toml/std/net, libp2p/multiaddress, testutils/unittests
|
||||
|
||||
import
|
||||
./testlib/wakunode,
|
||||
../../waku/waku_enr/capabilities
|
||||
import ./testlib/wakunode, ../../waku/waku_enr/capabilities
|
||||
|
||||
include
|
||||
../../waku/node/config
|
||||
include ../../waku/node/config
|
||||
|
||||
proc defaultTestWakuFlags(): CapabilitiesBitfield =
|
||||
CapabilitiesBitfield.init(
|
||||
lightpush = false,
|
||||
filter = false,
|
||||
store = false,
|
||||
relay = true
|
||||
lightpush = false, filter = false, store = false, relay = true
|
||||
)
|
||||
|
||||
suite "Waku NetConfig":
|
||||
|
||||
asyncTest "Create NetConfig with default values":
|
||||
|
||||
suite "Waku NetConfig":
|
||||
asyncTest "Create NetConfig with default values":
|
||||
let conf = defaultTestWakuNodeConf()
|
||||
|
||||
|
||||
let wakuFlags = defaultTestWakuFlags()
|
||||
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
@ -40,55 +28,50 @@ suite "Waku NetConfig":
|
||||
wssEnabled = conf.websocketSecureSupport,
|
||||
dns4DomainName = none(string),
|
||||
discv5UdpPort = none(Port),
|
||||
wakuFlags = some(wakuFlags)
|
||||
wakuFlags = some(wakuFlags),
|
||||
)
|
||||
|
||||
check:
|
||||
netConfigRes.isOk()
|
||||
|
||||
asyncTest "AnnouncedAddresses contains only bind address when no external addresses are provided":
|
||||
|
||||
let conf = defaultTestWakuNodeConf()
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort
|
||||
)
|
||||
|
||||
|
||||
let netConfigRes =
|
||||
NetConfig.init(bindIp = conf.listenAddress, bindPort = conf.tcpPort)
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
|
||||
let netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 1 # Only bind address should be present
|
||||
netConfig.announcedAddresses[0] == formatListenAddress(ip4TcpEndPoint(conf.listenAddress, conf.tcpPort))
|
||||
|
||||
netConfig.announcedAddresses.len == 1 # Only bind address should be present
|
||||
netConfig.announcedAddresses[0] ==
|
||||
formatListenAddress(ip4TcpEndPoint(conf.listenAddress, conf.tcpPort))
|
||||
|
||||
asyncTest "AnnouncedAddresses contains external address if extIp/Port are provided":
|
||||
|
||||
let
|
||||
let
|
||||
conf = defaultTestWakuNodeConf()
|
||||
extIp = parseIpAddress("1.2.3.4")
|
||||
extPort = Port(1234)
|
||||
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort)
|
||||
extPort = some(extPort),
|
||||
)
|
||||
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
|
||||
let netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 1 # Only external address should be present
|
||||
netConfig.announcedAddresses.len == 1 # Only external address should be present
|
||||
netConfig.announcedAddresses[0] == ip4TcpEndPoint(extIp, extPort)
|
||||
|
||||
asyncTest "AnnouncedAddresses contains dns4DomainName if provided":
|
||||
|
||||
let
|
||||
let
|
||||
conf = defaultTestWakuNodeConf()
|
||||
dns4DomainName = "example.com"
|
||||
extPort = Port(1234)
|
||||
@ -97,95 +80,91 @@ suite "Waku NetConfig":
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extPort = some(extPort)
|
||||
extPort = some(extPort),
|
||||
)
|
||||
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
|
||||
let netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 1 # Only DNS address should be present
|
||||
netConfig.announcedAddresses.len == 1 # Only DNS address should be present
|
||||
netConfig.announcedAddresses[0] == dns4TcpEndPoint(dns4DomainName, extPort)
|
||||
|
||||
asyncTest "AnnouncedAddresses includes extMultiAddrs when provided":
|
||||
|
||||
let
|
||||
let
|
||||
conf = defaultTestWakuNodeConf()
|
||||
extIp = parseIpAddress("1.2.3.4")
|
||||
extPort = Port(1234)
|
||||
extMultiAddrs = @[ip4TcpEndPoint(extIp, extPort)]
|
||||
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
extMultiAddrs = extMultiAddrs
|
||||
extMultiAddrs = extMultiAddrs,
|
||||
)
|
||||
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
|
||||
let netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + extAddress
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + extAddress
|
||||
netConfig.announcedAddresses[1] == extMultiAddrs[0]
|
||||
|
||||
|
||||
asyncTest "AnnouncedAddresses uses dns4DomainName over extIp when both are provided":
|
||||
|
||||
let
|
||||
let
|
||||
conf = defaultTestWakuNodeConf()
|
||||
dns4DomainName = "example.com"
|
||||
extIp = parseIpAddress("1.2.3.4")
|
||||
extPort = Port(1234)
|
||||
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort)
|
||||
extPort = some(extPort),
|
||||
)
|
||||
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
|
||||
let netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 1 # DNS address
|
||||
netConfig.announcedAddresses[0] == dns4TcpEndPoint(dns4DomainName, extPort)
|
||||
netConfig.announcedAddresses.len == 1 # DNS address
|
||||
netConfig.announcedAddresses[0] == dns4TcpEndPoint(dns4DomainName, extPort)
|
||||
|
||||
asyncTest "AnnouncedAddresses includes WebSocket addresses when enabled":
|
||||
|
||||
var
|
||||
var
|
||||
conf = defaultTestWakuNodeConf()
|
||||
wssEnabled = false
|
||||
|
||||
|
||||
var netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
wsEnabled = true,
|
||||
wssEnabled = wssEnabled
|
||||
wssEnabled = wssEnabled,
|
||||
)
|
||||
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
|
||||
var netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
|
||||
netConfig.announcedAddresses[1] == (ip4TcpEndPoint(conf.listenAddress,
|
||||
conf.websocketPort) & wsFlag(wssEnabled))
|
||||
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
|
||||
netConfig.announcedAddresses[1] ==
|
||||
(ip4TcpEndPoint(conf.listenAddress, conf.websocketPort) & wsFlag(wssEnabled))
|
||||
|
||||
## Now try the same for the case of wssEnabled = true
|
||||
|
||||
|
||||
wssEnabled = true
|
||||
|
||||
|
||||
netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
wsEnabled = true,
|
||||
wssEnabled = wssEnabled
|
||||
wssEnabled = wssEnabled,
|
||||
)
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
@ -193,71 +172,68 @@ suite "Waku NetConfig":
|
||||
netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
|
||||
netConfig.announcedAddresses[1] == (ip4TcpEndPoint(conf.listenAddress,
|
||||
conf.websocketPort) & wsFlag(wssEnabled))
|
||||
|
||||
asyncTest "Announced WebSocket address contains external IP if provided":
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
|
||||
netConfig.announcedAddresses[1] ==
|
||||
(ip4TcpEndPoint(conf.listenAddress, conf.websocketPort) & wsFlag(wssEnabled))
|
||||
|
||||
let
|
||||
asyncTest "Announced WebSocket address contains external IP if provided":
|
||||
let
|
||||
conf = defaultTestWakuNodeConf()
|
||||
extIp = parseIpAddress("1.2.3.4")
|
||||
extPort = Port(1234)
|
||||
wssEnabled = false
|
||||
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort),
|
||||
wsEnabled = true,
|
||||
wssEnabled = wssEnabled
|
||||
wssEnabled = wssEnabled,
|
||||
)
|
||||
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
|
||||
let netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 2 # External address + wsHostAddress
|
||||
netConfig.announcedAddresses[1] == (ip4TcpEndPoint(extIp,
|
||||
conf.websocketPort) & wsFlag(wssEnabled))
|
||||
netConfig.announcedAddresses.len == 2 # External address + wsHostAddress
|
||||
netConfig.announcedAddresses[1] ==
|
||||
(ip4TcpEndPoint(extIp, conf.websocketPort) & wsFlag(wssEnabled))
|
||||
|
||||
asyncTest "Announced WebSocket address contains dns4DomainName if provided":
|
||||
|
||||
let
|
||||
let
|
||||
conf = defaultTestWakuNodeConf()
|
||||
dns4DomainName = "example.com"
|
||||
extPort = Port(1234)
|
||||
wssEnabled = false
|
||||
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extPort = some(extPort),
|
||||
wsEnabled = true,
|
||||
wssEnabled = wssEnabled
|
||||
wssEnabled = wssEnabled,
|
||||
)
|
||||
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
|
||||
let netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
|
||||
netConfig.announcedAddresses[1] == (dns4TcpEndPoint(dns4DomainName, conf.websocketPort) &
|
||||
wsFlag(wssEnabled))
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
|
||||
netConfig.announcedAddresses[1] ==
|
||||
(dns4TcpEndPoint(dns4DomainName, conf.websocketPort) & wsFlag(wssEnabled))
|
||||
|
||||
asyncTest "Announced WebSocket address contains dns4DomainName if provided alongside extIp":
|
||||
|
||||
let
|
||||
let
|
||||
conf = defaultTestWakuNodeConf()
|
||||
dns4DomainName = "example.com"
|
||||
extIp = parseIpAddress("1.2.3.4")
|
||||
extPort = Port(1234)
|
||||
wssEnabled = false
|
||||
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
@ -265,28 +241,25 @@ suite "Waku NetConfig":
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort),
|
||||
wsEnabled = true,
|
||||
wssEnabled = wssEnabled
|
||||
wssEnabled = wssEnabled,
|
||||
)
|
||||
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
|
||||
let netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 2 # DNS address + wsHostAddress
|
||||
netConfig.announcedAddresses.len == 2 # DNS address + wsHostAddress
|
||||
netConfig.announcedAddresses[0] == dns4TcpEndPoint(dns4DomainName, extPort)
|
||||
netConfig.announcedAddresses[1] == (dns4TcpEndPoint(dns4DomainName, conf.websocketPort) &
|
||||
wsFlag(wssEnabled))
|
||||
netConfig.announcedAddresses[1] ==
|
||||
(dns4TcpEndPoint(dns4DomainName, conf.websocketPort) & wsFlag(wssEnabled))
|
||||
|
||||
asyncTest "ENR is set with bindIp/Port if no extIp/Port are provided":
|
||||
|
||||
let conf = defaultTestWakuNodeConf()
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort
|
||||
)
|
||||
|
||||
|
||||
let netConfigRes =
|
||||
NetConfig.init(bindIp = conf.listenAddress, bindPort = conf.tcpPort)
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
|
||||
let netConfig = netConfigRes.get()
|
||||
@ -296,19 +269,18 @@ suite "Waku NetConfig":
|
||||
netConfig.enrPort.get() == conf.tcpPort
|
||||
|
||||
asyncTest "ENR is set with extIp/Port if provided":
|
||||
|
||||
let
|
||||
let
|
||||
conf = defaultTestWakuNodeConf()
|
||||
extIp = parseIpAddress("1.2.3.4")
|
||||
extPort = Port(1234)
|
||||
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort)
|
||||
extPort = some(extPort),
|
||||
)
|
||||
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
|
||||
let netConfig = netConfigRes.get()
|
||||
@ -318,8 +290,7 @@ suite "Waku NetConfig":
|
||||
netConfig.enrPort.get() == extPort
|
||||
|
||||
asyncTest "ENR is set with dns4DomainName if provided":
|
||||
|
||||
let
|
||||
let
|
||||
conf = defaultTestWakuNodeConf()
|
||||
dns4DomainName = "example.com"
|
||||
extPort = Port(1234)
|
||||
@ -328,7 +299,7 @@ suite "Waku NetConfig":
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extPort = some(extPort)
|
||||
extPort = some(extPort),
|
||||
)
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
@ -339,39 +310,38 @@ suite "Waku NetConfig":
|
||||
netConfig.enrMultiaddrs.contains(dns4TcpEndPoint(dns4DomainName, extPort))
|
||||
|
||||
asyncTest "wsHostAddress is not announced if a WS/WSS address is provided in extMultiAddrs":
|
||||
|
||||
var
|
||||
var
|
||||
conf = defaultTestWakuNodeConf()
|
||||
extAddIp = parseIpAddress("1.2.3.4")
|
||||
extAddPort = Port(1234)
|
||||
wsEnabled = true
|
||||
wssEnabled = false
|
||||
extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))]
|
||||
|
||||
|
||||
var netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
extMultiAddrs = extMultiAddrs,
|
||||
wsEnabled = wsEnabled
|
||||
wsEnabled = wsEnabled,
|
||||
)
|
||||
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
|
||||
var netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + extAddress
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + extAddress
|
||||
netConfig.announcedAddresses[1] == extMultiAddrs[0]
|
||||
|
||||
# Now same test for WSS external address
|
||||
wssEnabled = true
|
||||
extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))]
|
||||
|
||||
|
||||
netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
extMultiAddrs = extMultiAddrs,
|
||||
wssEnabled = wssEnabled
|
||||
wssEnabled = wssEnabled,
|
||||
)
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
@ -379,29 +349,27 @@ suite "Waku NetConfig":
|
||||
netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + extAddress
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + extAddress
|
||||
netConfig.announcedAddresses[1] == extMultiAddrs[0]
|
||||
|
||||
asyncTest "Only extMultiAddrs are published when enabling extMultiAddrsOnly flag":
|
||||
|
||||
let
|
||||
let
|
||||
conf = defaultTestWakuNodeConf()
|
||||
extAddIp = parseIpAddress("1.2.3.4")
|
||||
extAddPort = Port(1234)
|
||||
extMultiAddrs = @[ip4TcpEndPoint(extAddIp, extAddPort)]
|
||||
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.listenAddress,
|
||||
bindPort = conf.tcpPort,
|
||||
extMultiAddrs = extMultiAddrs,
|
||||
extMultiAddrsOnly = true
|
||||
extMultiAddrsOnly = true,
|
||||
)
|
||||
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
|
||||
let netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 1 # ExtAddress
|
||||
netConfig.announcedAddresses.len == 1 # ExtAddress
|
||||
netConfig.announcedAddresses[0] == extMultiAddrs[0]
|
||||
|
||||
|
||||
@ -17,16 +17,13 @@ import
|
||||
../../waku/waku_core,
|
||||
./testlib/common
|
||||
|
||||
|
||||
procSuite "Waku Noise":
|
||||
common.randomize()
|
||||
|
||||
test "PKCS#7 Padding/Unpadding":
|
||||
|
||||
# We test padding for different message lengths
|
||||
let maxMessageLength = 3 * NoisePaddingBlockSize
|
||||
for messageLen in 0..maxMessageLength:
|
||||
|
||||
for messageLen in 0 .. maxMessageLength:
|
||||
let
|
||||
message = randomSeqByte(rng[], messageLen)
|
||||
padded = pkcs7_pad(message, NoisePaddingBlockSize)
|
||||
@ -38,12 +35,11 @@ procSuite "Waku Noise":
|
||||
message == unpadded
|
||||
|
||||
test "ChaChaPoly Encryption/Decryption: random byte sequences":
|
||||
|
||||
let cipherState = randomChaChaPolyCipherState(rng[])
|
||||
|
||||
# We encrypt/decrypt random byte sequences
|
||||
let
|
||||
plaintext: seq[byte] = randomSeqByte(rng[], rand(1..128))
|
||||
plaintext: seq[byte] = randomSeqByte(rng[], rand(1 .. 128))
|
||||
ciphertext: ChaChaPolyCiphertext = encrypt(cipherState, plaintext)
|
||||
decryptedCiphertext: seq[byte] = decrypt(cipherState, ciphertext)
|
||||
|
||||
@ -51,12 +47,11 @@ procSuite "Waku Noise":
|
||||
plaintext == decryptedCiphertext
|
||||
|
||||
test "ChaChaPoly Encryption/Decryption: random strings":
|
||||
|
||||
let cipherState = randomChaChaPolyCipherState(rng[])
|
||||
|
||||
# We encrypt/decrypt random strings
|
||||
var plaintext: string
|
||||
for _ in 1..rand(1..128):
|
||||
for _ in 1 .. rand(1 .. 128):
|
||||
add(plaintext, char(rand(int('A') .. int('z'))))
|
||||
|
||||
let
|
||||
@ -67,7 +62,6 @@ procSuite "Waku Noise":
|
||||
plaintext.toBytes() == decryptedCiphertext
|
||||
|
||||
test "Noise public keys: encrypt and decrypt a public key":
|
||||
|
||||
let noisePublicKey: NoisePublicKey = genNoisePublicKey(rng[])
|
||||
|
||||
let
|
||||
@ -79,7 +73,6 @@ procSuite "Waku Noise":
|
||||
noisePublicKey == decryptedPk
|
||||
|
||||
test "Noise public keys: decrypt an unencrypted public key":
|
||||
|
||||
let noisePublicKey: NoisePublicKey = genNoisePublicKey(rng[])
|
||||
|
||||
let
|
||||
@ -90,7 +83,6 @@ procSuite "Waku Noise":
|
||||
noisePublicKey == decryptedPk
|
||||
|
||||
test "Noise public keys: encrypt an encrypted public key":
|
||||
|
||||
let noisePublicKey: NoisePublicKey = genNoisePublicKey(rng[])
|
||||
|
||||
let
|
||||
@ -102,7 +94,6 @@ procSuite "Waku Noise":
|
||||
encryptedPk == encryptedPk2
|
||||
|
||||
test "Noise public keys: encrypt, decrypt and decrypt a public key":
|
||||
|
||||
let noisePublicKey: NoisePublicKey = genNoisePublicKey(rng[])
|
||||
|
||||
let
|
||||
@ -115,32 +106,31 @@ procSuite "Waku Noise":
|
||||
decryptedPk == decryptedPk2
|
||||
|
||||
test "Noise public keys: serialize and deserialize an unencrypted public key":
|
||||
|
||||
let
|
||||
noisePublicKey: NoisePublicKey = genNoisePublicKey(rng[])
|
||||
serializedNoisePublicKey: seq[byte] = serializeNoisePublicKey(noisePublicKey)
|
||||
deserializedNoisePublicKey: NoisePublicKey = intoNoisePublicKey(serializedNoisePublicKey)
|
||||
deserializedNoisePublicKey: NoisePublicKey =
|
||||
intoNoisePublicKey(serializedNoisePublicKey)
|
||||
|
||||
check:
|
||||
noisePublicKey == deserializedNoisePublicKey
|
||||
|
||||
test "Noise public keys: encrypt, serialize, deserialize and decrypt a public key":
|
||||
|
||||
let noisePublicKey: NoisePublicKey = genNoisePublicKey(rng[])
|
||||
|
||||
let
|
||||
cs: ChaChaPolyCipherState = randomChaChaPolyCipherState(rng[])
|
||||
encryptedPk: NoisePublicKey = encryptNoisePublicKey(cs, noisePublicKey)
|
||||
serializedNoisePublicKey: seq[byte] = serializeNoisePublicKey(encryptedPk)
|
||||
deserializedNoisePublicKey: NoisePublicKey = intoNoisePublicKey(serializedNoisePublicKey)
|
||||
decryptedPk: NoisePublicKey = decryptNoisePublicKey(cs, deserializedNoisePublicKey)
|
||||
deserializedNoisePublicKey: NoisePublicKey =
|
||||
intoNoisePublicKey(serializedNoisePublicKey)
|
||||
decryptedPk: NoisePublicKey =
|
||||
decryptNoisePublicKey(cs, deserializedNoisePublicKey)
|
||||
|
||||
check:
|
||||
noisePublicKey == decryptedPk
|
||||
|
||||
|
||||
test "PayloadV2: serialize/deserialize PayloadV2 to byte sequence":
|
||||
|
||||
let
|
||||
payload2: PayloadV2 = randomPayloadV2(rng[])
|
||||
serializedPayload = serializePayloadV2(payload2)
|
||||
@ -154,9 +144,7 @@ procSuite "Waku Noise":
|
||||
deserializedPayload.isOk()
|
||||
payload2 == deserializedPayload.get()
|
||||
|
||||
|
||||
test "PayloadV2: Encode/Decode a Waku Message (version 2) to a PayloadV2":
|
||||
|
||||
# We encode to a WakuMessage a random PayloadV2
|
||||
let
|
||||
payload2 = randomPayloadV2(rng[])
|
||||
@ -181,7 +169,6 @@ procSuite "Waku Noise":
|
||||
payload2 == decoded.get()
|
||||
|
||||
test "Noise State Machine: Diffie-Hellman operation":
|
||||
|
||||
#We generate random keypairs
|
||||
let
|
||||
aliceKey = genKeyPair(rng[])
|
||||
@ -197,13 +184,12 @@ procSuite "Waku Noise":
|
||||
dh1 == dh2
|
||||
|
||||
test "Noise State Machine: Cipher State primitives":
|
||||
|
||||
# We generate a random Cipher State, associated data ad and plaintext
|
||||
var
|
||||
cipherState: CipherState = randomCipherState(rng[])
|
||||
nonce: uint64 = uint64(rand(0 .. int.high))
|
||||
ad: seq[byte] = randomSeqByte(rng[], rand(1..128))
|
||||
plaintext: seq[byte] = randomSeqByte(rng[], rand(1..128))
|
||||
ad: seq[byte] = randomSeqByte(rng[], rand(1 .. 128))
|
||||
plaintext: seq[byte] = randomSeqByte(rng[], rand(1 .. 128))
|
||||
|
||||
# We set the random nonce generated in the cipher state
|
||||
setNonce(cipherState, nonce)
|
||||
@ -230,7 +216,7 @@ procSuite "Waku Noise":
|
||||
setCipherStateKey(cipherState, EmptyKey)
|
||||
nonce = getNonce(cipherState)
|
||||
|
||||
plaintext = randomSeqByte(rng[], rand(1..128))
|
||||
plaintext = randomSeqByte(rng[], rand(1 .. 128))
|
||||
ciphertext = encryptWithAd(cipherState, ad, plaintext)
|
||||
|
||||
check:
|
||||
@ -242,7 +228,7 @@ procSuite "Waku Noise":
|
||||
nonce = getNonce(cipherState)
|
||||
|
||||
# Note that we set ciphertext minimum length to 16 to not trigger checks on authentication tag length
|
||||
ciphertext = randomSeqByte(rng[], rand(16..128))
|
||||
ciphertext = randomSeqByte(rng[], rand(16 .. 128))
|
||||
plaintext = decryptWithAd(cipherState, ad, ciphertext)
|
||||
|
||||
check:
|
||||
@ -255,10 +241,10 @@ procSuite "Waku Noise":
|
||||
# We generate a test Cipher State with nonce set to MaxNonce
|
||||
cipherState = randomCipherState(rng[])
|
||||
setNonce(cipherState, NonceMax)
|
||||
plaintext = randomSeqByte(rng[], rand(1..128))
|
||||
plaintext = randomSeqByte(rng[], rand(1 .. 128))
|
||||
|
||||
# We test if encryption fails with a NoiseNonceMaxError error. Any subsequent encryption call over the Cipher State should fail similarly and leave the nonce unchanged
|
||||
for _ in [1..5]:
|
||||
for _ in [1 .. 5]:
|
||||
expect NoiseNonceMaxError:
|
||||
ciphertext = encryptWithAd(cipherState, ad, plaintext)
|
||||
|
||||
@ -271,14 +257,14 @@ procSuite "Waku Noise":
|
||||
# To perform such test, we then need to encrypt a test plaintext using directly ChaChaPoly primitive
|
||||
cipherState = randomCipherState(rng[])
|
||||
setNonce(cipherState, NonceMax)
|
||||
plaintext = randomSeqByte(rng[], rand(1..128))
|
||||
plaintext = randomSeqByte(rng[], rand(1 .. 128))
|
||||
|
||||
# We perform encryption using the Cipher State key, NonceMax and ad
|
||||
# By Noise specification the nonce is 8 bytes long out of the 12 bytes supported by ChaChaPoly, thus we copy the Little endian conversion of the nonce to a ChaChaPolyNonce
|
||||
var
|
||||
encNonce: ChaChaPolyNonce
|
||||
authorizationTag: ChaChaPolyTag
|
||||
encNonce[4..<12] = toBytesLE(NonceMax)
|
||||
encNonce[4 ..< 12] = toBytesLE(NonceMax)
|
||||
ChaChaPoly.encrypt(getKey(cipherState), encNonce, authorizationTag, plaintext, ad)
|
||||
|
||||
# The output ciphertext is stored in the plaintext variable after ChaChaPoly.encrypt is called: we copy it along with the authorization tag.
|
||||
@ -289,7 +275,7 @@ procSuite "Waku Noise":
|
||||
# At this point ciphertext is a proper encryption of the original plaintext obtained with nonce equal to NonceMax
|
||||
# We can now test if decryption fails with a NoiseNonceMaxError error. Any subsequent decryption call over the Cipher State should fail similarly and leave the nonce unchanged
|
||||
# Note that decryptWithAd doesn't fail in decrypting the ciphertext (otherwise a NoiseDecryptTagError would have been triggered)
|
||||
for _ in [1..5]:
|
||||
for _ in [1 .. 5]:
|
||||
expect NoiseNonceMaxError:
|
||||
plaintext = decryptWithAd(cipherState, ad, ciphertext)
|
||||
|
||||
@ -297,11 +283,10 @@ procSuite "Waku Noise":
|
||||
getNonce(cipherState) == NonceMax + 1
|
||||
|
||||
test "Noise State Machine: Symmetric State primitives":
|
||||
|
||||
# We select one supported handshake pattern and we initialize a symmetric state
|
||||
var
|
||||
hsPattern = NoiseHandshakePatterns["XX"]
|
||||
symmetricState: SymmetricState = SymmetricState.init(hsPattern)
|
||||
symmetricState: SymmetricState = SymmetricState.init(hsPattern)
|
||||
|
||||
# We get all the Symmetric State field
|
||||
# cs : Cipher State
|
||||
@ -321,7 +306,7 @@ procSuite "Waku Noise":
|
||||
########################################
|
||||
|
||||
# We generate a random byte sequence and execute a mixHash over it
|
||||
mixHash(symmetricState, randomSeqByte(rng[], rand(1..128)))
|
||||
mixHash(symmetricState, randomSeqByte(rng[], rand(1 .. 128)))
|
||||
|
||||
# mixHash changes only the handshake hash value of the Symmetric state
|
||||
check:
|
||||
@ -337,7 +322,7 @@ procSuite "Waku Noise":
|
||||
########################################
|
||||
|
||||
# We generate random input key material and we execute mixKey
|
||||
var inputKeyMaterial = randomSeqByte(rng[], rand(1..128))
|
||||
var inputKeyMaterial = randomSeqByte(rng[], rand(1 .. 128))
|
||||
mixKey(symmetricState, inputKeyMaterial)
|
||||
|
||||
# mixKey changes the Symmetric State's chaining key and encryption key of the embedded Cipher State
|
||||
@ -358,7 +343,7 @@ procSuite "Waku Noise":
|
||||
########################################
|
||||
|
||||
# We generate random input key material and we execute mixKeyAndHash
|
||||
inputKeyMaterial = randomSeqByte(rng[], rand(1..128))
|
||||
inputKeyMaterial = randomSeqByte(rng[], rand(1 .. 128))
|
||||
mixKeyAndHash(symmetricState, inputKeyMaterial)
|
||||
|
||||
# mixKeyAndHash executes a mixKey and a mixHash using the input key material
|
||||
@ -431,12 +416,12 @@ procSuite "Waku Noise":
|
||||
getKey(cs1) != getKey(cs2)
|
||||
|
||||
test "Noise XX Handhshake and message encryption (extended test)":
|
||||
|
||||
let hsPattern = NoiseHandshakePatterns["XX"]
|
||||
|
||||
# We initialize Alice's and Bob's Handshake State
|
||||
let aliceStaticKey = genKeyPair(rng[])
|
||||
var aliceHS = initialize(hsPattern = hsPattern, staticKey = aliceStaticKey, initiator = true)
|
||||
var aliceHS =
|
||||
initialize(hsPattern = hsPattern, staticKey = aliceStaticKey, initiator = true)
|
||||
|
||||
let bobStaticKey = genKeyPair(rng[])
|
||||
var bobHS = initialize(hsPattern = hsPattern, staticKey = bobStaticKey)
|
||||
@ -457,7 +442,8 @@ procSuite "Waku Noise":
|
||||
|
||||
# By being the handshake initiator, Alice writes a Waku2 payload v2 containing her handshake message
|
||||
# and the (encrypted) transport message
|
||||
aliceStep = stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
aliceStep =
|
||||
stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
|
||||
# Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him
|
||||
bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get()
|
||||
@ -489,7 +475,8 @@ procSuite "Waku Noise":
|
||||
sentTransportMessage = randomSeqByte(rng[], 32)
|
||||
|
||||
# Similarly as in first step, Alice writes a Waku2 payload containing the handshake message and the (encrypted) transport message
|
||||
aliceStep = stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
aliceStep =
|
||||
stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
|
||||
# Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him
|
||||
bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get()
|
||||
@ -504,10 +491,14 @@ procSuite "Waku Noise":
|
||||
let prevAliceHS = aliceHS
|
||||
let prevBobHS = bobHS
|
||||
|
||||
let bobStep1 = stepHandshake(rng[], bobHS, transportMessage = sentTransportMessage).get()
|
||||
let aliceStep1 = stepHandshake(rng[], aliceHS, readPayloadV2 = bobStep1.payload2).get()
|
||||
let aliceStep2 = stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
let bobStep2 = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep2.payload2).get()
|
||||
let bobStep1 =
|
||||
stepHandshake(rng[], bobHS, transportMessage = sentTransportMessage).get()
|
||||
let aliceStep1 =
|
||||
stepHandshake(rng[], aliceHS, readPayloadV2 = bobStep1.payload2).get()
|
||||
let aliceStep2 =
|
||||
stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
let bobStep2 =
|
||||
stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep2.payload2).get()
|
||||
|
||||
check:
|
||||
aliceStep1 == default(HandshakeStepResult)
|
||||
@ -534,12 +525,12 @@ procSuite "Waku Noise":
|
||||
readMessage: seq[byte]
|
||||
defaultMessageNametagBuffer: MessageNametagBuffer
|
||||
|
||||
for _ in 0..10:
|
||||
|
||||
for _ in 0 .. 10:
|
||||
# Alice writes to Bob
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(aliceHSResult, message, defaultMessageNametagBuffer)
|
||||
readMessage = readMessage(bobHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
readMessage =
|
||||
readMessage(bobHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
|
||||
check:
|
||||
message == readMessage
|
||||
@ -547,13 +538,13 @@ procSuite "Waku Noise":
|
||||
# Bob writes to Alice
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(bobHSResult, message, defaultMessageNametagBuffer)
|
||||
readMessage = readMessage(aliceHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
readMessage =
|
||||
readMessage(aliceHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
|
||||
check:
|
||||
message == readMessage
|
||||
|
||||
test "Noise XXpsk0 Handhshake and message encryption (short test)":
|
||||
|
||||
let hsPattern = NoiseHandshakePatterns["XXpsk0"]
|
||||
|
||||
# We generate a random psk
|
||||
@ -561,7 +552,9 @@ procSuite "Waku Noise":
|
||||
|
||||
# We initialize Alice's and Bob's Handshake State
|
||||
let aliceStaticKey = genKeyPair(rng[])
|
||||
var aliceHS = initialize(hsPattern = hsPattern, staticKey = aliceStaticKey, psk = psk, initiator = true)
|
||||
var aliceHS = initialize(
|
||||
hsPattern = hsPattern, staticKey = aliceStaticKey, psk = psk, initiator = true
|
||||
)
|
||||
|
||||
let bobStaticKey = genKeyPair(rng[])
|
||||
var bobHS = initialize(hsPattern = hsPattern, staticKey = bobStaticKey, psk = psk)
|
||||
@ -582,7 +575,8 @@ procSuite "Waku Noise":
|
||||
|
||||
# By being the handshake initiator, Alice writes a Waku2 payload v2 containing her handshake message
|
||||
# and the (encrypted) transport message
|
||||
aliceStep = stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
aliceStep =
|
||||
stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
|
||||
# Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him
|
||||
bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get()
|
||||
@ -614,7 +608,8 @@ procSuite "Waku Noise":
|
||||
sentTransportMessage = randomSeqByte(rng[], 32)
|
||||
|
||||
# Similarly as in first step, Alice writes a Waku2 payload containing the handshake message and the (encrypted) transport message
|
||||
aliceStep = stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
aliceStep =
|
||||
stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
|
||||
# Bob reads Alice's payloads, and returns the (decrypted) transportMessage alice sent to him
|
||||
bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get()
|
||||
@ -641,12 +636,12 @@ procSuite "Waku Noise":
|
||||
readMessage: seq[byte]
|
||||
defaultMessageNametagBuffer: MessageNametagBuffer
|
||||
|
||||
for _ in 0..10:
|
||||
|
||||
for _ in 0 .. 10:
|
||||
# Alice writes to Bob
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(aliceHSResult, message, defaultMessageNametagBuffer)
|
||||
readMessage = readMessage(bobHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
readMessage =
|
||||
readMessage(bobHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
|
||||
check:
|
||||
message == readMessage
|
||||
@ -654,13 +649,13 @@ procSuite "Waku Noise":
|
||||
# Bob writes to Alice
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(bobHSResult, message, defaultMessageNametagBuffer)
|
||||
readMessage = readMessage(aliceHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
readMessage =
|
||||
readMessage(aliceHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
|
||||
check:
|
||||
message == readMessage
|
||||
|
||||
test "Noise K1K1 Handhshake and message encryption (short test)":
|
||||
|
||||
let hsPattern = NoiseHandshakePatterns["K1K1"]
|
||||
|
||||
# We initialize Alice's and Bob's Handshake State
|
||||
@ -672,10 +667,21 @@ procSuite "Waku Noise":
|
||||
# <- s
|
||||
# ...
|
||||
# So we define accordingly the sequence of the pre-message public keys
|
||||
let preMessagePKs: seq[NoisePublicKey] = @[toNoisePublicKey(getPublicKey(aliceStaticKey)), toNoisePublicKey(getPublicKey(bobStaticKey))]
|
||||
let preMessagePKs: seq[NoisePublicKey] =
|
||||
@[
|
||||
toNoisePublicKey(getPublicKey(aliceStaticKey)),
|
||||
toNoisePublicKey(getPublicKey(bobStaticKey)),
|
||||
]
|
||||
|
||||
var aliceHS = initialize(hsPattern = hsPattern, staticKey = aliceStaticKey, preMessagePKs = preMessagePKs, initiator = true)
|
||||
var bobHS = initialize(hsPattern = hsPattern, staticKey = bobStaticKey, preMessagePKs = preMessagePKs)
|
||||
var aliceHS = initialize(
|
||||
hsPattern = hsPattern,
|
||||
staticKey = aliceStaticKey,
|
||||
preMessagePKs = preMessagePKs,
|
||||
initiator = true,
|
||||
)
|
||||
var bobHS = initialize(
|
||||
hsPattern = hsPattern, staticKey = bobStaticKey, preMessagePKs = preMessagePKs
|
||||
)
|
||||
|
||||
var
|
||||
sentTransportMessage: seq[byte]
|
||||
@ -693,7 +699,8 @@ procSuite "Waku Noise":
|
||||
|
||||
# By being the handshake initiator, Alice writes a Waku2 payload v2 containing her handshake message
|
||||
# and the (encrypted) transport message
|
||||
aliceStep = stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
aliceStep =
|
||||
stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
|
||||
# Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him
|
||||
bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get()
|
||||
@ -725,7 +732,8 @@ procSuite "Waku Noise":
|
||||
sentTransportMessage = randomSeqByte(rng[], 32)
|
||||
|
||||
# Similarly as in first step, Alice writes a Waku2 payload containing the handshake_message and the (encrypted) transportMessage
|
||||
aliceStep = stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
aliceStep =
|
||||
stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
|
||||
# Bob reads Alice's payloads, and returns the (decrypted) transportMessage alice sent to him
|
||||
bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get()
|
||||
@ -752,12 +760,12 @@ procSuite "Waku Noise":
|
||||
readMessage: seq[byte]
|
||||
defaultMessageNametagBuffer: MessageNametagBuffer
|
||||
|
||||
for _ in 0..10:
|
||||
|
||||
for _ in 0 .. 10:
|
||||
# Alice writes to Bob
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(aliceHSResult, message, defaultMessageNametagBuffer)
|
||||
readMessage = readMessage(bobHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
readMessage =
|
||||
readMessage(bobHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
|
||||
check:
|
||||
message == readMessage
|
||||
@ -765,14 +773,13 @@ procSuite "Waku Noise":
|
||||
# Bob writes to Alice
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(bobHSResult, message, defaultMessageNametagBuffer)
|
||||
readMessage = readMessage(aliceHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
readMessage =
|
||||
readMessage(aliceHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
|
||||
check:
|
||||
message == readMessage
|
||||
|
||||
|
||||
test "Noise XK1 Handhshake and message encryption (short test)":
|
||||
|
||||
let hsPattern = NoiseHandshakePatterns["XK1"]
|
||||
|
||||
# We initialize Alice's and Bob's Handshake State
|
||||
@ -783,10 +790,18 @@ procSuite "Waku Noise":
|
||||
# <- s
|
||||
# ...
|
||||
# So we define accordingly the sequence of the pre-message public keys
|
||||
let preMessagePKs: seq[NoisePublicKey] = @[toNoisePublicKey(getPublicKey(bobStaticKey))]
|
||||
let preMessagePKs: seq[NoisePublicKey] =
|
||||
@[toNoisePublicKey(getPublicKey(bobStaticKey))]
|
||||
|
||||
var aliceHS = initialize(hsPattern = hsPattern, staticKey = aliceStaticKey, preMessagePKs = preMessagePKs, initiator = true)
|
||||
var bobHS = initialize(hsPattern = hsPattern, staticKey = bobStaticKey, preMessagePKs = preMessagePKs)
|
||||
var aliceHS = initialize(
|
||||
hsPattern = hsPattern,
|
||||
staticKey = aliceStaticKey,
|
||||
preMessagePKs = preMessagePKs,
|
||||
initiator = true,
|
||||
)
|
||||
var bobHS = initialize(
|
||||
hsPattern = hsPattern, staticKey = bobStaticKey, preMessagePKs = preMessagePKs
|
||||
)
|
||||
|
||||
var
|
||||
sentTransportMessage: seq[byte]
|
||||
@ -804,7 +819,8 @@ procSuite "Waku Noise":
|
||||
|
||||
# By being the handshake initiator, Alice writes a Waku2 payload v2 containing her handshake message
|
||||
# and the (encrypted) transport message
|
||||
aliceStep = stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
aliceStep =
|
||||
stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
|
||||
# Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him
|
||||
bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get()
|
||||
@ -836,7 +852,8 @@ procSuite "Waku Noise":
|
||||
sentTransportMessage = randomSeqByte(rng[], 32)
|
||||
|
||||
# Similarly as in first step, Alice writes a Waku2 payload containing the handshake message and the (encrypted) transport message
|
||||
aliceStep = stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
aliceStep =
|
||||
stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get()
|
||||
|
||||
# Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him
|
||||
bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get()
|
||||
@ -863,12 +880,12 @@ procSuite "Waku Noise":
|
||||
readMessage: seq[byte]
|
||||
defaultMessageNametagBuffer: MessageNametagBuffer
|
||||
|
||||
for _ in 0..10:
|
||||
|
||||
for _ in 0 .. 10:
|
||||
# Alice writes to Bob
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(aliceHSResult, message, defaultMessageNametagBuffer)
|
||||
readMessage = readMessage(bobHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
readMessage =
|
||||
readMessage(bobHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
|
||||
check:
|
||||
message == readMessage
|
||||
@ -876,7 +893,8 @@ procSuite "Waku Noise":
|
||||
# Bob writes to Alice
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(bobHSResult, message, defaultMessageNametagBuffer)
|
||||
readMessage = readMessage(aliceHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
readMessage =
|
||||
readMessage(aliceHSResult, payload2, defaultMessageNametagBuffer).get()
|
||||
|
||||
check:
|
||||
message == readMessage
|
||||
|
||||
@ -1,9 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
stew/[results, byteutils],
|
||||
testutils/unittests
|
||||
import std/tables, stew/[results, byteutils], testutils/unittests
|
||||
import
|
||||
../../waku/common/protobuf,
|
||||
../../waku/utils/noise as waku_message_utils,
|
||||
@ -19,7 +16,6 @@ procSuite "Waku Noise Sessions":
|
||||
# This test implements the Device pairing and Secure Transfers with Noise
|
||||
# detailed in the 43/WAKU2-DEVICE-PAIRING RFC https://rfc.vac.dev/spec/43/
|
||||
test "Noise Waku Pairing Handhshake and Secure transfer":
|
||||
|
||||
#########################
|
||||
# Pairing Phase
|
||||
#########################
|
||||
@ -47,10 +43,19 @@ procSuite "Waku Noise Sessions":
|
||||
# Out-of-band Communication
|
||||
|
||||
# Bob prepares the QR and sends it out-of-band to Alice
|
||||
let qr = toQr(applicationName, applicationVersion, shardId, getPublicKey(bobEphemeralKey), bobCommittedStaticKey)
|
||||
let qr = toQr(
|
||||
applicationName,
|
||||
applicationVersion,
|
||||
shardId,
|
||||
getPublicKey(bobEphemeralKey),
|
||||
bobCommittedStaticKey,
|
||||
)
|
||||
|
||||
# Alice deserializes the QR code
|
||||
let (readApplicationName, readApplicationVersion, readShardId, readEphemeralKey, readCommittedStaticKey) = fromQr(qr)
|
||||
let (
|
||||
readApplicationName, readApplicationVersion, readShardId, readEphemeralKey,
|
||||
readCommittedStaticKey,
|
||||
) = fromQr(qr)
|
||||
|
||||
# We check if QR serialization/deserialization works
|
||||
check:
|
||||
@ -61,19 +66,35 @@ procSuite "Waku Noise Sessions":
|
||||
bobCommittedStaticKey == readCommittedStaticKey
|
||||
|
||||
# We set the contentTopic from the content topic parameters exchanged in the QR
|
||||
let contentTopic: ContentTopic = "/" & applicationName & "/" & applicationVersion & "/wakunoise/1/sessions_shard-" & shardId & "/proto"
|
||||
let contentTopic: ContentTopic =
|
||||
"/" & applicationName & "/" & applicationVersion & "/wakunoise/1/sessions_shard-" &
|
||||
shardId & "/proto"
|
||||
|
||||
###############
|
||||
# Pre-handshake message
|
||||
#
|
||||
# <- eB {H(sB||r), contentTopicParams, messageNametag}
|
||||
###############
|
||||
let preMessagePKs: seq[NoisePublicKey] = @[toNoisePublicKey(getPublicKey(bobEphemeralKey))]
|
||||
let preMessagePKs: seq[NoisePublicKey] =
|
||||
@[toNoisePublicKey(getPublicKey(bobEphemeralKey))]
|
||||
|
||||
# We initialize the Handshake states.
|
||||
# Note that we pass the whole qr serialization as prologue information
|
||||
var aliceHS = initialize(hsPattern = hsPattern, ephemeralKey = aliceEphemeralKey, staticKey = aliceStaticKey, prologue = qr.toBytes, preMessagePKs = preMessagePKs, initiator = true)
|
||||
var bobHS = initialize(hsPattern = hsPattern, ephemeralKey = bobEphemeralKey, staticKey = bobStaticKey, prologue = qr.toBytes, preMessagePKs = preMessagePKs)
|
||||
var aliceHS = initialize(
|
||||
hsPattern = hsPattern,
|
||||
ephemeralKey = aliceEphemeralKey,
|
||||
staticKey = aliceStaticKey,
|
||||
prologue = qr.toBytes,
|
||||
preMessagePKs = preMessagePKs,
|
||||
initiator = true,
|
||||
)
|
||||
var bobHS = initialize(
|
||||
hsPattern = hsPattern,
|
||||
ephemeralKey = bobEphemeralKey,
|
||||
staticKey = bobStaticKey,
|
||||
prologue = qr.toBytes,
|
||||
preMessagePKs = preMessagePKs,
|
||||
)
|
||||
|
||||
###############
|
||||
# Pairing Handshake
|
||||
@ -109,7 +130,13 @@ procSuite "Waku Noise Sessions":
|
||||
# By being the handshake initiator, Alice writes a Waku2 payload v2 containing her handshake message
|
||||
# and the (encrypted) transport message
|
||||
# The message is sent with a messageNametag equal to the one received through the QR code
|
||||
aliceStep = stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage, messageNametag = qrMessageNametag).get()
|
||||
aliceStep = stepHandshake(
|
||||
rng[],
|
||||
aliceHS,
|
||||
transportMessage = sentTransportMessage,
|
||||
messageNametag = qrMessageNametag,
|
||||
)
|
||||
.get()
|
||||
|
||||
###############################################
|
||||
# We prepare a Waku message from Alice's payload2
|
||||
@ -138,7 +165,10 @@ procSuite "Waku Noise Sessions":
|
||||
|
||||
# Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him
|
||||
# Note that Bob verifies if the received payloadv2 has the expected messageNametag set
|
||||
bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = readPayloadV2, messageNametag = qrMessageNametag).get()
|
||||
bobStep = stepHandshake(
|
||||
rng[], bobHS, readPayloadV2 = readPayloadV2, messageNametag = qrMessageNametag
|
||||
)
|
||||
.get()
|
||||
|
||||
check:
|
||||
bobStep.transportMessage == sentTransportMessage
|
||||
@ -166,7 +196,13 @@ procSuite "Waku Noise Sessions":
|
||||
sentTransportMessage = r
|
||||
|
||||
# At this step, Bob writes and returns a payload
|
||||
bobStep = stepHandshake(rng[], bobHS, transportMessage = sentTransportMessage, messageNametag = bobMessageNametag).get()
|
||||
bobStep = stepHandshake(
|
||||
rng[],
|
||||
bobHS,
|
||||
transportMessage = sentTransportMessage,
|
||||
messageNametag = bobMessageNametag,
|
||||
)
|
||||
.get()
|
||||
|
||||
###############################################
|
||||
# We prepare a Waku message from Bob's payload2
|
||||
@ -194,13 +230,20 @@ procSuite "Waku Noise Sessions":
|
||||
###############################################
|
||||
|
||||
# While Alice reads and returns the (decrypted) transport message
|
||||
aliceStep = stepHandshake(rng[], aliceHS, readPayloadV2 = readPayloadV2, messageNametag = aliceMessageNametag).get()
|
||||
aliceStep = stepHandshake(
|
||||
rng[],
|
||||
aliceHS,
|
||||
readPayloadV2 = readPayloadV2,
|
||||
messageNametag = aliceMessageNametag,
|
||||
)
|
||||
.get()
|
||||
|
||||
check:
|
||||
aliceStep.transportMessage == sentTransportMessage
|
||||
|
||||
# Alice further checks if Bob's commitment opens to Bob's static key she just received
|
||||
let expectedBobCommittedStaticKey = commitPublicKey(aliceHS.rs, aliceStep.transportMessage)
|
||||
let expectedBobCommittedStaticKey =
|
||||
commitPublicKey(aliceHS.rs, aliceStep.transportMessage)
|
||||
|
||||
check:
|
||||
expectedBobCommittedStaticKey == bobCommittedStaticKey
|
||||
@ -219,7 +262,13 @@ procSuite "Waku Noise Sessions":
|
||||
sentTransportMessage = s
|
||||
|
||||
# Similarly as in first step, Alice writes a Waku2 payload containing the handshake message and the (encrypted) transport message
|
||||
aliceStep = stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage, messageNametag = aliceMessageNametag).get()
|
||||
aliceStep = stepHandshake(
|
||||
rng[],
|
||||
aliceHS,
|
||||
transportMessage = sentTransportMessage,
|
||||
messageNametag = aliceMessageNametag,
|
||||
)
|
||||
.get()
|
||||
|
||||
###############################################
|
||||
# We prepare a Waku message from Bob's payload2
|
||||
@ -247,13 +296,17 @@ procSuite "Waku Noise Sessions":
|
||||
###############################################
|
||||
|
||||
# Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him
|
||||
bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = readPayloadV2, messageNametag = bobMessageNametag).get()
|
||||
bobStep = stepHandshake(
|
||||
rng[], bobHS, readPayloadV2 = readPayloadV2, messageNametag = bobMessageNametag
|
||||
)
|
||||
.get()
|
||||
|
||||
check:
|
||||
bobStep.transportMessage == sentTransportMessage
|
||||
|
||||
# Bob further checks if Alice's commitment opens to Alice's static key he just received
|
||||
let expectedAliceCommittedStaticKey = commitPublicKey(bobHS.rs, bobStep.transportMessage)
|
||||
let expectedAliceCommittedStaticKey =
|
||||
commitPublicKey(bobHS.rs, bobStep.transportMessage)
|
||||
|
||||
check:
|
||||
expectedAliceCommittedStaticKey == aliceCommittedStaticKey
|
||||
@ -277,19 +330,36 @@ procSuite "Waku Noise Sessions":
|
||||
# We test message exchange
|
||||
# Note that we exchange more than the number of messages contained in the nametag buffer to test if they are filled correctly as the communication proceeds
|
||||
for i in 0 .. 10 * MessageNametagBufferSize:
|
||||
|
||||
# Alice writes to Bob
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(aliceHSResult, message, outboundMessageNametagBuffer = aliceHSResult.nametagsOutbound)
|
||||
readMessage = readMessage(bobHSResult, payload2, inboundMessageNametagBuffer = bobHSResult.nametagsInbound).get()
|
||||
payload2 = writeMessage(
|
||||
aliceHSResult,
|
||||
message,
|
||||
outboundMessageNametagBuffer = aliceHSResult.nametagsOutbound,
|
||||
)
|
||||
readMessage = readMessage(
|
||||
bobHSResult,
|
||||
payload2,
|
||||
inboundMessageNametagBuffer = bobHSResult.nametagsInbound,
|
||||
)
|
||||
.get()
|
||||
|
||||
check:
|
||||
message == readMessage
|
||||
|
||||
# Bob writes to Alice
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(bobHSResult, message, outboundMessageNametagBuffer = bobHSResult.nametagsOutbound)
|
||||
readMessage = readMessage(aliceHSResult, payload2, inboundMessageNametagBuffer = aliceHSResult.nametagsInbound).get()
|
||||
payload2 = writeMessage(
|
||||
bobHSResult,
|
||||
message,
|
||||
outboundMessageNametagBuffer = bobHSResult.nametagsOutbound,
|
||||
)
|
||||
readMessage = readMessage(
|
||||
aliceHSResult,
|
||||
payload2,
|
||||
inboundMessageNametagBuffer = aliceHSResult.nametagsInbound,
|
||||
)
|
||||
.get()
|
||||
|
||||
check:
|
||||
message == readMessage
|
||||
@ -297,25 +367,53 @@ procSuite "Waku Noise Sessions":
|
||||
# We test how nametag buffers help in detecting lost messages
|
||||
# Alice writes two messages to Bob, but only the second is received
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(aliceHSResult, message, outboundMessageNametagBuffer = aliceHSResult.nametagsOutbound)
|
||||
payload2 = writeMessage(
|
||||
aliceHSResult,
|
||||
message,
|
||||
outboundMessageNametagBuffer = aliceHSResult.nametagsOutbound,
|
||||
)
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(aliceHSResult, message, outboundMessageNametagBuffer = aliceHSResult.nametagsOutbound)
|
||||
payload2 = writeMessage(
|
||||
aliceHSResult,
|
||||
message,
|
||||
outboundMessageNametagBuffer = aliceHSResult.nametagsOutbound,
|
||||
)
|
||||
expect NoiseSomeMessagesWereLost:
|
||||
readMessage = readMessage(bobHSResult, payload2, inboundMessageNametagBuffer = bobHSResult.nametagsInbound).get()
|
||||
readMessage = readMessage(
|
||||
bobHSResult,
|
||||
payload2,
|
||||
inboundMessageNametagBuffer = bobHSResult.nametagsInbound,
|
||||
)
|
||||
.get()
|
||||
|
||||
# We adjust bob nametag buffer for next test (i.e. the missed message is correctly recovered)
|
||||
delete(bobHSResult.nametagsInbound, 2)
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(bobHSResult, message, outboundMessageNametagBuffer = bobHSResult.nametagsOutbound)
|
||||
readMessage = readMessage(aliceHSResult, payload2, inboundMessageNametagBuffer = aliceHSResult.nametagsInbound).get()
|
||||
payload2 = writeMessage(
|
||||
bobHSResult, message, outboundMessageNametagBuffer = bobHSResult.nametagsOutbound
|
||||
)
|
||||
readMessage = readMessage(
|
||||
aliceHSResult,
|
||||
payload2,
|
||||
inboundMessageNametagBuffer = aliceHSResult.nametagsInbound,
|
||||
)
|
||||
.get()
|
||||
|
||||
check:
|
||||
message == readMessage
|
||||
message == readMessage
|
||||
|
||||
# We test if a missing nametag is correctly detected
|
||||
message = randomSeqByte(rng[], 32)
|
||||
payload2 = writeMessage(aliceHSResult, message, outboundMessageNametagBuffer = aliceHSResult.nametagsOutbound)
|
||||
payload2 = writeMessage(
|
||||
aliceHSResult,
|
||||
message,
|
||||
outboundMessageNametagBuffer = aliceHSResult.nametagsOutbound,
|
||||
)
|
||||
delete(bobHSResult.nametagsInbound, 1)
|
||||
expect NoiseMessageNametagError:
|
||||
readMessage = readMessage(bobHSResult, payload2, inboundMessageNametagBuffer = bobHSResult.nametagsInbound).get()
|
||||
|
||||
readMessage = readMessage(
|
||||
bobHSResult,
|
||||
payload2,
|
||||
inboundMessageNametagBuffer = bobHSResult.nametagsInbound,
|
||||
)
|
||||
.get()
|
||||
|
||||
@ -1,25 +1,17 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, sequtils, tables],
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
chronicles
|
||||
import std/[options, sequtils, tables], testutils/unittests, chronos, chronicles
|
||||
import
|
||||
../../waku/waku_metadata,
|
||||
../../waku/waku_metadata/rpc,
|
||||
./testlib/wakucore,
|
||||
./testlib/wakunode
|
||||
|
||||
|
||||
procSuite "Waku Protobufs":
|
||||
# TODO: Missing test coverage in many encode/decode protobuf functions
|
||||
|
||||
test "WakuMetadataResponse":
|
||||
let res = WakuMetadataResponse(
|
||||
clusterId: some(7),
|
||||
shards: @[10, 23, 33],
|
||||
)
|
||||
let res = WakuMetadataResponse(clusterId: some(7), shards: @[10, 23, 33])
|
||||
|
||||
let buffer = res.encode()
|
||||
|
||||
@ -30,10 +22,7 @@ procSuite "Waku Protobufs":
|
||||
decodedBuff.get().shards == res.shards
|
||||
|
||||
test "WakuMetadataRequest":
|
||||
let req = WakuMetadataRequest(
|
||||
clusterId: some(5),
|
||||
shards: @[100, 2, 0],
|
||||
)
|
||||
let req = WakuMetadataRequest(clusterId: some(5), shards: @[100, 2, 0])
|
||||
|
||||
let buffer = req.encode()
|
||||
|
||||
@ -42,4 +31,3 @@ procSuite "Waku Protobufs":
|
||||
decodedBuff.isOk()
|
||||
decodedBuff.get().clusterId.get() == req.clusterId.get()
|
||||
decodedBuff.get().shards == req.shards
|
||||
|
||||
|
||||
@ -1,25 +1,19 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
chronos,
|
||||
testutils/unittests,
|
||||
libp2p,
|
||||
libp2p/protocols/rendezvous
|
||||
import chronos, testutils/unittests, libp2p, libp2p/protocols/rendezvous
|
||||
|
||||
import
|
||||
../../waku/node/waku_switch,
|
||||
./testlib/common,
|
||||
./testlib/wakucore
|
||||
import ../../waku/node/waku_switch, ./testlib/common, ./testlib/wakucore
|
||||
|
||||
proc newRendezvousClientSwitch(rdv: RendezVous): Switch =
|
||||
SwitchBuilder.new()
|
||||
.withRng(rng())
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withTcpTransport()
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
.withRendezVous(rdv)
|
||||
.build()
|
||||
SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng())
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withTcpTransport()
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
.withRendezVous(rdv)
|
||||
.build()
|
||||
|
||||
procSuite "Waku Rendezvous":
|
||||
asyncTest "Waku Switch uses Rendezvous":
|
||||
@ -53,7 +47,5 @@ procSuite "Waku Rendezvous":
|
||||
check:
|
||||
res1.len == 1
|
||||
res1[0] == sourceSwitch.peerInfo.signedPeerRecord.data
|
||||
|
||||
await allFutures(wakuSwitch.stop(), sourceSwitch.stop(), destSwitch.stop())
|
||||
|
||||
|
||||
await allFutures(wakuSwitch.stop(), sourceSwitch.stop(), destSwitch.stop())
|
||||
|
||||
@ -8,23 +8,20 @@ import
|
||||
libp2p/protocols/connectivity/relay/relay,
|
||||
libp2p/protocols/connectivity/relay/client,
|
||||
stew/byteutils
|
||||
import
|
||||
../../waku/node/waku_switch,
|
||||
./testlib/common,
|
||||
./testlib/wakucore
|
||||
import ../../waku/node/waku_switch, ./testlib/common, ./testlib/wakucore
|
||||
|
||||
proc newCircuitRelayClientSwitch(relayClient: RelayClient): Switch =
|
||||
SwitchBuilder.new()
|
||||
.withRng(rng())
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withTcpTransport()
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
.withCircuitRelay(relayClient)
|
||||
.build()
|
||||
SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng())
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withTcpTransport()
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
.withCircuitRelay(relayClient)
|
||||
.build()
|
||||
|
||||
suite "Waku Switch":
|
||||
|
||||
asyncTest "Waku Switch works with AutoNat":
|
||||
## Given
|
||||
let
|
||||
@ -35,7 +32,9 @@ suite "Waku Switch":
|
||||
|
||||
## When
|
||||
await sourceSwitch.connect(wakuSwitch.peerInfo.peerId, wakuSwitch.peerInfo.addrs)
|
||||
let ma = await AutonatClient.new().dialMe(sourceSwitch, wakuSwitch.peerInfo.peerId, wakuSwitch.peerInfo.addrs)
|
||||
let ma = await AutonatClient.new().dialMe(
|
||||
sourceSwitch, wakuSwitch.peerInfo.peerId, wakuSwitch.peerInfo.addrs
|
||||
)
|
||||
|
||||
## Then
|
||||
check:
|
||||
@ -62,8 +61,12 @@ suite "Waku Switch":
|
||||
## Given
|
||||
let
|
||||
# Create a relay address to destSwitch using wakuSwitch as the relay
|
||||
addrs = MultiAddress.init($wakuSwitch.peerInfo.addrs[0] & "/p2p/" &
|
||||
$wakuSwitch.peerInfo.peerId & "/p2p-circuit").get()
|
||||
addrs = MultiAddress
|
||||
.init(
|
||||
$wakuSwitch.peerInfo.addrs[0] & "/p2p/" & $wakuSwitch.peerInfo.peerId &
|
||||
"/p2p-circuit"
|
||||
)
|
||||
.get()
|
||||
msg = "Just one relay away..."
|
||||
|
||||
# Create a custom protocol
|
||||
@ -87,10 +90,12 @@ suite "Waku Switch":
|
||||
await sourceSwitch.connect(wakuSwitch.peerInfo.peerId, wakuSwitch.peerInfo.addrs)
|
||||
|
||||
# destClient reserves a slot on the relay.
|
||||
let rsvp = await destClient.reserve(wakuSwitch.peerInfo.peerId, wakuSwitch.peerInfo.addrs)
|
||||
let rsvp =
|
||||
await destClient.reserve(wakuSwitch.peerInfo.peerId, wakuSwitch.peerInfo.addrs)
|
||||
|
||||
# sourceSwitch dial destSwitch using the relay
|
||||
let conn = await sourceSwitch.dial(destSwitch.peerInfo.peerId, @[addrs], customProtoCodec)
|
||||
let conn =
|
||||
await sourceSwitch.dial(destSwitch.peerInfo.peerId, @[addrs], customProtoCodec)
|
||||
|
||||
await conn.writeLp(msg)
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[sequtils,strutils],
|
||||
std/[sequtils, strutils],
|
||||
stew/byteutils,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
@ -25,9 +25,7 @@ import
|
||||
./testlib/wakucore,
|
||||
./testlib/wakunode
|
||||
|
||||
|
||||
suite "WakuNode":
|
||||
|
||||
asyncTest "Protocol matcher works as expected":
|
||||
let
|
||||
nodeKey1 = generateSecp256k1Key()
|
||||
@ -53,14 +51,16 @@ suite "WakuNode":
|
||||
|
||||
check:
|
||||
# Check that mounted codecs are actually different
|
||||
node1.wakuRelay.codec == "/vac/waku/relay/2.0.0"
|
||||
node1.wakuRelay.codec == "/vac/waku/relay/2.0.0"
|
||||
node2.wakuRelay.codec == "/vac/waku/relay/2.0.0-beta2"
|
||||
|
||||
# Now verify that protocol matcher returns `true` and relay works
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
|
||||
var completionFut = newFuture[bool]()
|
||||
proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} =
|
||||
proc relayHandler(
|
||||
topic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
check:
|
||||
topic == pubSubTopic
|
||||
msg.contentTopic == contentTopic
|
||||
@ -87,7 +87,9 @@ suite "WakuNode":
|
||||
|
||||
let
|
||||
nodeKey1 = generateSecp256k1Key()
|
||||
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(61020), nameResolver = resolver)
|
||||
node1 = newTestWakuNode(
|
||||
nodeKey1, parseIpAddress("0.0.0.0"), Port(61020), nameResolver = resolver
|
||||
)
|
||||
nodeKey2 = generateSecp256k1Key()
|
||||
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(61022))
|
||||
|
||||
@ -112,14 +114,16 @@ suite "WakuNode":
|
||||
let
|
||||
maxConnections = 2
|
||||
nodeKey1 = generateSecp256k1Key()
|
||||
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"),
|
||||
Port(60010), maxConnections = maxConnections)
|
||||
node1 = newTestWakuNode(
|
||||
nodeKey1,
|
||||
parseIpAddress("0.0.0.0"),
|
||||
Port(60010),
|
||||
maxConnections = maxConnections,
|
||||
)
|
||||
nodeKey2 = generateSecp256k1Key()
|
||||
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"),
|
||||
Port(60012))
|
||||
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(60012))
|
||||
nodeKey3 = generateSecp256k1Key()
|
||||
node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"),
|
||||
Port(60013))
|
||||
node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(60013))
|
||||
|
||||
check:
|
||||
# Sanity check, to verify config was applied
|
||||
@ -137,9 +141,11 @@ suite "WakuNode":
|
||||
await node3.start()
|
||||
await node3.mountRelay()
|
||||
|
||||
discard await node1.peerManager.connectRelay(node2.switch.peerInfo.toRemotePeerInfo())
|
||||
discard
|
||||
await node1.peerManager.connectRelay(node2.switch.peerInfo.toRemotePeerInfo())
|
||||
await sleepAsync(3.seconds)
|
||||
discard await node1.peerManager.connectRelay(node3.switch.peerInfo.toRemotePeerInfo())
|
||||
discard
|
||||
await node1.peerManager.connectRelay(node3.switch.peerInfo.toRemotePeerInfo())
|
||||
|
||||
check:
|
||||
# Verify that only the first connection succeeded
|
||||
@ -153,11 +159,14 @@ suite "WakuNode":
|
||||
|
||||
expect ResultDefect:
|
||||
# gibberish
|
||||
discard newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"),
|
||||
discard newTestWakuNode(
|
||||
nodeKey1,
|
||||
parseIpAddress("0.0.0.0"),
|
||||
bindPort = Port(61004),
|
||||
wsBindPort = Port(8000),
|
||||
wssEnabled = true,
|
||||
secureKey = "../../waku/node/key_dummy.txt")
|
||||
secureKey = "../../waku/node/key_dummy.txt",
|
||||
)
|
||||
|
||||
asyncTest "Peer info updates with correct announced addresses":
|
||||
let
|
||||
@ -166,10 +175,7 @@ suite "WakuNode":
|
||||
bindPort = Port(61006)
|
||||
extIp = some(parseIpAddress("127.0.0.1"))
|
||||
extPort = some(Port(61008))
|
||||
node = newTestWakuNode(
|
||||
nodeKey,
|
||||
bindIp, bindPort,
|
||||
extIp, extPort)
|
||||
node = newTestWakuNode(nodeKey, bindIp, bindPort, extIp, extPort)
|
||||
|
||||
let
|
||||
bindEndpoint = MultiAddress.init(bindIp, tcpProtocol, bindPort)
|
||||
@ -206,12 +212,11 @@ suite "WakuNode":
|
||||
extIp = some(parseIpAddress("127.0.0.1"))
|
||||
extPort = some(Port(61012))
|
||||
domainName = "example.com"
|
||||
expectedDns4Addr = MultiAddress.init("/dns4/" & domainName & "/tcp/" & $(extPort.get())).get()
|
||||
expectedDns4Addr =
|
||||
MultiAddress.init("/dns4/" & domainName & "/tcp/" & $(extPort.get())).get()
|
||||
node = newTestWakuNode(
|
||||
nodeKey,
|
||||
bindIp, bindPort,
|
||||
extIp, extPort,
|
||||
dns4DomainName = some(domainName))
|
||||
nodeKey, bindIp, bindPort, extIp, extPort, dns4DomainName = some(domainName)
|
||||
)
|
||||
|
||||
check:
|
||||
node.announcedAddresses.len == 1
|
||||
@ -224,17 +229,15 @@ suite "WakuNode":
|
||||
bindPort = Port(0)
|
||||
|
||||
domainName = "status.im"
|
||||
node = newTestWakuNode(
|
||||
nodeKey,
|
||||
bindIp, bindPort,
|
||||
dns4DomainName = some(domainName))
|
||||
node =
|
||||
newTestWakuNode(nodeKey, bindIp, bindPort, dns4DomainName = some(domainName))
|
||||
|
||||
var ipStr = ""
|
||||
var enrIp = node.enr.tryGet("ip", array[4, byte])
|
||||
|
||||
|
||||
if enrIp.isSome():
|
||||
ipStr &= $ipv4(enrIp.get())
|
||||
|
||||
ipStr &= $ipv4(enrIp.get())
|
||||
|
||||
# Check that the IP filled is the one received by the DNS lookup
|
||||
# As IPs may change, we check that it's not empty, not the 0 IP and not localhost
|
||||
check:
|
||||
@ -251,24 +254,21 @@ suite "WakuNode":
|
||||
inexistentDomain = "thisdomain.doesnot.exist"
|
||||
invalidDomain = ""
|
||||
expectedError = "Could not resolve IP from DNS: empty response"
|
||||
|
||||
|
||||
var inexistentDomainErr, invalidDomainErr: string = ""
|
||||
|
||||
# Create node with inexistent domain
|
||||
try:
|
||||
let node = newTestWakuNode(
|
||||
nodeKey,
|
||||
bindIp, bindPort,
|
||||
dns4DomainName = some(inexistentDomain))
|
||||
nodeKey, bindIp, bindPort, dns4DomainName = some(inexistentDomain)
|
||||
)
|
||||
except Exception as e:
|
||||
inexistentDomainErr = e.msg
|
||||
|
||||
# Create node with invalid domain
|
||||
try:
|
||||
let node = newTestWakuNode(
|
||||
nodeKey,
|
||||
bindIp, bindPort,
|
||||
dns4DomainName = some(invalidDomain))
|
||||
let node =
|
||||
newTestWakuNode(nodeKey, bindIp, bindPort, dns4DomainName = some(invalidDomain))
|
||||
except Exception as e:
|
||||
invalidDomainErr = e.msg
|
||||
|
||||
@ -287,8 +287,12 @@ suite "WakuNode":
|
||||
let
|
||||
# node with custom agent string
|
||||
nodeKey1 = generateSecp256k1Key()
|
||||
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(61014),
|
||||
agentString = some(expectedAgentString1))
|
||||
node1 = newTestWakuNode(
|
||||
nodeKey1,
|
||||
parseIpAddress("0.0.0.0"),
|
||||
Port(61014),
|
||||
agentString = some(expectedAgentString1),
|
||||
)
|
||||
|
||||
# node with default agent string from libp2p
|
||||
nodeKey2 = generateSecp256k1Key()
|
||||
@ -303,8 +307,10 @@ suite "WakuNode":
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
await node2.connectToNodes(@[node1.switch.peerInfo.toRemotePeerInfo()])
|
||||
|
||||
let node1Agent = node2.switch.peerStore[AgentBook][node1.switch.peerInfo.toRemotePeerInfo().peerId]
|
||||
let node2Agent = node1.switch.peerStore[AgentBook][node2.switch.peerInfo.toRemotePeerInfo().peerId]
|
||||
let node1Agent =
|
||||
node2.switch.peerStore[AgentBook][node1.switch.peerInfo.toRemotePeerInfo().peerId]
|
||||
let node2Agent =
|
||||
node1.switch.peerStore[AgentBook][node2.switch.peerInfo.toRemotePeerInfo().peerId]
|
||||
|
||||
check:
|
||||
node1Agent == expectedAgentString1
|
||||
@ -322,8 +328,12 @@ suite "WakuNode":
|
||||
let
|
||||
# node with custom multiaddress
|
||||
nodeKey1 = generateSecp256k1Key()
|
||||
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(61018),
|
||||
extMultiAddrs = @[expectedMultiaddress1])
|
||||
node1 = newTestWakuNode(
|
||||
nodeKey1,
|
||||
parseIpAddress("0.0.0.0"),
|
||||
Port(61018),
|
||||
extMultiAddrs = @[expectedMultiaddress1],
|
||||
)
|
||||
|
||||
# node with default multiaddress
|
||||
nodeKey2 = generateSecp256k1Key()
|
||||
@ -338,7 +348,9 @@ suite "WakuNode":
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
await node2.connectToNodes(@[node1.switch.peerInfo.toRemotePeerInfo()])
|
||||
|
||||
let node1MultiAddrs = node2.switch.peerStore[AddressBook][node1.switch.peerInfo.toRemotePeerInfo().peerId]
|
||||
let node1MultiAddrs = node2.switch.peerStore[AddressBook][
|
||||
node1.switch.peerInfo.toRemotePeerInfo().peerId
|
||||
]
|
||||
|
||||
check:
|
||||
node1MultiAddrs.contains(expectedMultiaddress1)
|
||||
|
||||
@ -15,9 +15,7 @@ import
|
||||
./testlib/wakucore,
|
||||
./testlib/wakunode
|
||||
|
||||
|
||||
suite "WakuNode - Filter":
|
||||
|
||||
asyncTest "subscriber should receive the message handled by the publisher":
|
||||
## Setup
|
||||
let
|
||||
@ -38,14 +36,18 @@ suite "WakuNode - Filter":
|
||||
let
|
||||
pubSubTopic = DefaultPubsubTopic
|
||||
contentTopic = DefaultContentTopic
|
||||
message = fakeWakuMessage(contentTopic=contentTopic)
|
||||
message = fakeWakuMessage(contentTopic = contentTopic)
|
||||
|
||||
var filterPushHandlerFut = newFuture[(PubsubTopic, WakuMessage)]()
|
||||
proc filterPushHandler(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async, gcsafe, closure.} =
|
||||
proc filterPushHandler(
|
||||
pubsubTopic: PubsubTopic, msg: WakuMessage
|
||||
) {.async, gcsafe, closure.} =
|
||||
filterPushHandlerFut.complete((pubsubTopic, msg))
|
||||
|
||||
## When
|
||||
await client.legacyFilterSubscribe(some(pubsubTopic), contentTopic, filterPushHandler, peer=serverPeerInfo)
|
||||
await client.legacyFilterSubscribe(
|
||||
some(pubsubTopic), contentTopic, filterPushHandler, peer = serverPeerInfo
|
||||
)
|
||||
|
||||
# Wait for subscription to take effect
|
||||
waitFor sleepAsync(100.millis)
|
||||
|
||||
@ -1,10 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/options,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos
|
||||
import std/options, stew/shims/net as stewNet, testutils/unittests, chronos
|
||||
import
|
||||
../../waku/waku_core,
|
||||
../../waku/waku_lightpush/common,
|
||||
@ -13,7 +9,6 @@ import
|
||||
./testlib/wakucore,
|
||||
./testlib/wakunode
|
||||
|
||||
|
||||
suite "WakuNode - Lightpush":
|
||||
asyncTest "Lightpush message return success":
|
||||
## Setup
|
||||
@ -32,7 +27,9 @@ suite "WakuNode - Lightpush":
|
||||
await bridgeNode.mountLightPush()
|
||||
lightNode.mountLightPushClient()
|
||||
|
||||
discard await lightNode.peerManager.dialPeer(bridgeNode.peerInfo.toRemotePeerInfo(), WakuLightPushCodec)
|
||||
discard await lightNode.peerManager.dialPeer(
|
||||
bridgeNode.peerInfo.toRemotePeerInfo(), WakuLightPushCodec
|
||||
)
|
||||
await sleepAsync(100.milliseconds)
|
||||
await destNode.connectToNodes(@[bridgeNode.peerInfo.toRemotePeerInfo()])
|
||||
|
||||
@ -40,11 +37,14 @@ suite "WakuNode - Lightpush":
|
||||
let message = fakeWakuMessage()
|
||||
|
||||
var completionFutRelay = newFuture[bool]()
|
||||
proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} =
|
||||
proc relayHandler(
|
||||
topic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
check:
|
||||
topic == DefaultPubsubTopic
|
||||
msg == message
|
||||
completionFutRelay.complete(true)
|
||||
|
||||
destNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler))
|
||||
|
||||
# Wait for subscription to take effect
|
||||
|
||||
@ -1,8 +1,4 @@
|
||||
import
|
||||
std/[times, random],
|
||||
bearssl/rand,
|
||||
libp2p/crypto/crypto
|
||||
|
||||
import std/[times, random], bearssl/rand, libp2p/crypto/crypto
|
||||
|
||||
## Randomization
|
||||
|
||||
@ -12,12 +8,11 @@ proc randomize*() =
|
||||
let now = getTime()
|
||||
randomize(now.toUnix() * 1_000_000_000 + now.nanosecond)
|
||||
|
||||
|
||||
## RNG
|
||||
# Copied from here: https://github.com/status-im/nim-libp2p/blob/d522537b19a532bc4af94fcd146f779c1f23bad0/tests/helpers.nim#L28
|
||||
|
||||
type Rng = object
|
||||
rng: ref HmacDrbgContext
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
# Typically having a module variable is considered bad design. This case should
|
||||
# be considered as an exception and it should be used only in the tests.
|
||||
@ -33,5 +28,5 @@ proc getRng(): ref HmacDrbgContext =
|
||||
|
||||
rngVar.rng
|
||||
|
||||
|
||||
template rng*(): ref HmacDrbgContext = getRng()
|
||||
template rng*(): ref HmacDrbgContext =
|
||||
getRng()
|
||||
|
||||
@ -1,7 +1,4 @@
|
||||
|
||||
import
|
||||
chronicles,
|
||||
chronos
|
||||
import chronicles, chronos
|
||||
import
|
||||
../../../waku/waku_archive,
|
||||
../../../waku/waku_archive/driver as driver_module,
|
||||
@ -11,7 +8,6 @@ import
|
||||
const storeMessageDbUrl = "postgres://postgres:test123@localhost:5432/postgres"
|
||||
|
||||
proc newTestPostgresDriver*(): Future[Result[ArchiveDriver, string]] {.async.} =
|
||||
|
||||
proc onErr(errMsg: string) {.gcsafe, closure.} =
|
||||
error "error creating ArchiveDriver", error = errMsg
|
||||
quit(QuitFailure)
|
||||
@ -21,13 +17,9 @@ proc newTestPostgresDriver*(): Future[Result[ArchiveDriver, string]] {.async.} =
|
||||
migrate = true
|
||||
maxNumConn = 50
|
||||
|
||||
let driverRes = await ArchiveDriver.new(storeMessageDbUrl,
|
||||
vacuum,
|
||||
migrate,
|
||||
maxNumConn,
|
||||
onErr)
|
||||
let driverRes =
|
||||
await ArchiveDriver.new(storeMessageDbUrl, vacuum, migrate, maxNumConn, onErr)
|
||||
if driverRes.isErr():
|
||||
onErr("could not create archive driver: " & driverRes.error)
|
||||
|
||||
return ok(driverRes.get())
|
||||
|
||||
|
||||
@ -2,10 +2,9 @@
|
||||
|
||||
import posix
|
||||
|
||||
type
|
||||
Instr {.union.} = object
|
||||
bytes: array[8, byte]
|
||||
value: uint64
|
||||
type Instr {.union.} = object
|
||||
bytes: array[8, byte]
|
||||
value: uint64
|
||||
|
||||
proc mockImpl(target, replacement: pointer) =
|
||||
# YOLO who needs alignment
|
||||
@ -13,20 +12,18 @@ proc mockImpl(target, replacement: pointer) =
|
||||
var page = cast[pointer](cast[ByteAddress](target) and (not 0xfff))
|
||||
doAssert mprotect(page, 4096, PROT_WRITE or PROT_EXEC) == 0
|
||||
let rel = cast[ByteAddress](replacement) - cast[ByteAddress](target) - 5
|
||||
var
|
||||
instr =
|
||||
Instr(
|
||||
bytes: [
|
||||
0xe9.byte,
|
||||
(rel shr 0).byte,
|
||||
(rel shr 8).byte,
|
||||
(rel shr 16).byte,
|
||||
(rel shr 24).byte,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
]
|
||||
)
|
||||
var instr = Instr(
|
||||
bytes: [
|
||||
0xe9.byte,
|
||||
(rel shr 0).byte,
|
||||
(rel shr 8).byte,
|
||||
(rel shr 16).byte,
|
||||
(rel shr 24).byte,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
]
|
||||
)
|
||||
cast[ptr uint64](target)[] = instr.value
|
||||
doAssert mprotect(page, 4096, PROT_EXEC) == 0
|
||||
|
||||
|
||||
@ -1,26 +1,19 @@
|
||||
import
|
||||
std/[
|
||||
tables,
|
||||
sequtils,
|
||||
options
|
||||
]
|
||||
import std/[tables, sequtils, options]
|
||||
|
||||
import
|
||||
../../../waku/waku_core/topics,
|
||||
../testlib/wakucore
|
||||
import ../../../waku/waku_core/topics, ../testlib/wakucore
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
proc `==`*(table: Table[pubsub_topic.NsPubsubTopic, seq[NsContentTopic]], other: array[0..0, (string, seq[string])]): bool =
|
||||
proc `==`*(
|
||||
table: Table[pubsub_topic.NsPubsubTopic, seq[NsContentTopic]],
|
||||
other: array[0 .. 0, (string, seq[string])],
|
||||
): bool =
|
||||
let otherTyped = other.map(
|
||||
proc(item: (string, seq[string])): (NsPubsubTopic, seq[NsContentTopic]) =
|
||||
let
|
||||
proc(item: (string, seq[string])): (NsPubsubTopic, seq[NsContentTopic]) =
|
||||
let
|
||||
(pubsubTopic, contentTopics) = item
|
||||
nsPubsubTopic = NsPubsubTopic.parse(pubsubTopic).value()
|
||||
nsContentTopics = contentTopics.map(
|
||||
proc(contentTopic: string): NsContentTopic = NsContentTopic.parse(contentTopic).value()
|
||||
proc(contentTopic: string): NsContentTopic =
|
||||
NsContentTopic.parse(contentTopic).value()
|
||||
)
|
||||
return (nsPubsubTopic, nsContentTopics)
|
||||
)
|
||||
|
||||
@ -3,14 +3,18 @@
|
||||
|
||||
template asyncTeardown*(body: untyped): untyped =
|
||||
teardown:
|
||||
waitFor((
|
||||
proc() {.async, gcsafe.} =
|
||||
body
|
||||
)())
|
||||
waitFor(
|
||||
(
|
||||
proc() {.async, gcsafe.} =
|
||||
body
|
||||
)()
|
||||
)
|
||||
|
||||
template asyncSetup*(body: untyped): untyped =
|
||||
setup:
|
||||
waitFor((
|
||||
proc() {.async, gcsafe.} =
|
||||
body
|
||||
)())
|
||||
waitFor(
|
||||
(
|
||||
proc() {.async, gcsafe.} =
|
||||
body
|
||||
)()
|
||||
)
|
||||
|
||||
@ -1,12 +1,16 @@
|
||||
import
|
||||
testutils/unittests
|
||||
import testutils/unittests
|
||||
|
||||
template xsuite*(name: string, body: untyped) = discard
|
||||
template suitex*(name: string, body: untyped) = discard
|
||||
template xsuite*(name: string, body: untyped) =
|
||||
discard
|
||||
|
||||
template xprocSuite*(name: string, body: untyped) = discard
|
||||
template procSuitex*(name: string, body: untyped) = discard
|
||||
template suitex*(name: string, body: untyped) =
|
||||
discard
|
||||
|
||||
template xprocSuite*(name: string, body: untyped) =
|
||||
discard
|
||||
|
||||
template procSuitex*(name: string, body: untyped) =
|
||||
discard
|
||||
|
||||
template xtest*(name: string, body: untyped) =
|
||||
test name:
|
||||
|
||||
@ -7,22 +7,18 @@ import
|
||||
libp2p/builders,
|
||||
libp2p/crypto/crypto as libp2p_keys,
|
||||
eth/keys as eth_keys
|
||||
import
|
||||
../../../waku/waku_core,
|
||||
./common
|
||||
import ../../../waku/waku_core, ./common
|
||||
|
||||
export switch
|
||||
|
||||
|
||||
# Time
|
||||
|
||||
proc now*(): Timestamp =
|
||||
getNanosecondTime(getTime().toUnixFloat())
|
||||
|
||||
proc ts*(offset=0, origin=now()): Timestamp =
|
||||
proc ts*(offset = 0, origin = now()): Timestamp =
|
||||
origin + getNanosecondTime(int64(offset))
|
||||
|
||||
|
||||
# Switch
|
||||
|
||||
proc generateEcdsaKey*(): libp2p_keys.PrivateKey =
|
||||
@ -37,26 +33,23 @@ proc generateSecp256k1Key*(): libp2p_keys.PrivateKey =
|
||||
proc ethSecp256k1Key*(hex: string): eth_keys.PrivateKey =
|
||||
eth_keys.PrivateKey.fromHex(hex).get()
|
||||
|
||||
|
||||
proc newTestSwitch*(key=none(libp2p_keys.PrivateKey), address=none(MultiAddress)): Switch =
|
||||
proc newTestSwitch*(
|
||||
key = none(libp2p_keys.PrivateKey), address = none(MultiAddress)
|
||||
): Switch =
|
||||
let peerKey = key.get(generateSecp256k1Key())
|
||||
let peerAddr = address.get(MultiAddress.init("/ip4/127.0.0.1/tcp/0").get())
|
||||
return newStandardSwitch(some(peerKey), addrs=peerAddr)
|
||||
|
||||
return newStandardSwitch(some(peerKey), addrs = peerAddr)
|
||||
|
||||
# Waku message
|
||||
|
||||
export
|
||||
waku_core.DefaultPubsubTopic,
|
||||
waku_core.DefaultContentTopic
|
||||
|
||||
export waku_core.DefaultPubsubTopic, waku_core.DefaultContentTopic
|
||||
|
||||
proc fakeWakuMessage*(
|
||||
payload: string|seq[byte] = "TEST-PAYLOAD",
|
||||
contentTopic = DefaultContentTopic,
|
||||
meta = newSeq[byte](),
|
||||
ts = now(),
|
||||
ephemeral = false
|
||||
payload: string | seq[byte] = "TEST-PAYLOAD",
|
||||
contentTopic = DefaultContentTopic,
|
||||
meta = newSeq[byte](),
|
||||
ts = now(),
|
||||
ephemeral = false,
|
||||
): WakuMessage =
|
||||
var payloadBytes: seq[byte]
|
||||
when payload is string:
|
||||
@ -70,5 +63,5 @@ proc fakeWakuMessage*(
|
||||
meta: meta,
|
||||
version: 2,
|
||||
timestamp: ts,
|
||||
ephemeral: ephemeral
|
||||
ephemeral: ephemeral,
|
||||
)
|
||||
|
||||
@ -18,7 +18,6 @@ import
|
||||
../../../waku/factory/builder,
|
||||
./common
|
||||
|
||||
|
||||
# Waku node
|
||||
|
||||
proc defaultTestWakuNodeConf*(): WakuNodeConf =
|
||||
@ -36,38 +35,41 @@ proc defaultTestWakuNodeConf*(): WakuNodeConf =
|
||||
clusterId: 1.uint32,
|
||||
topics: @["/waku/2/rs/1/0"],
|
||||
relay: true,
|
||||
storeMessageDbUrl: "sqlite://store.sqlite3"
|
||||
storeMessageDbUrl: "sqlite://store.sqlite3",
|
||||
)
|
||||
|
||||
proc newTestWakuNode*(nodeKey: crypto.PrivateKey,
|
||||
bindIp: IpAddress,
|
||||
bindPort: Port,
|
||||
extIp = none(IpAddress),
|
||||
extPort = none(Port),
|
||||
extMultiAddrs = newSeq[MultiAddress](),
|
||||
peerStorage: PeerStorage = nil,
|
||||
maxConnections = builders.MaxConnections,
|
||||
wsBindPort: Port = (Port)8000,
|
||||
wsEnabled: bool = false,
|
||||
wssEnabled: bool = false,
|
||||
secureKey: string = "",
|
||||
secureCert: string = "",
|
||||
wakuFlags = none(CapabilitiesBitfield),
|
||||
nameResolver: NameResolver = nil,
|
||||
sendSignedPeerRecord = false,
|
||||
dns4DomainName = none(string),
|
||||
discv5UdpPort = none(Port),
|
||||
agentString = none(string),
|
||||
clusterId: uint32 = 1.uint32,
|
||||
topics: seq[string] = @["/waku/2/rs/1/0"],
|
||||
peerStoreCapacity = none(int)): WakuNode =
|
||||
|
||||
proc newTestWakuNode*(
|
||||
nodeKey: crypto.PrivateKey,
|
||||
bindIp: IpAddress,
|
||||
bindPort: Port,
|
||||
extIp = none(IpAddress),
|
||||
extPort = none(Port),
|
||||
extMultiAddrs = newSeq[MultiAddress](),
|
||||
peerStorage: PeerStorage = nil,
|
||||
maxConnections = builders.MaxConnections,
|
||||
wsBindPort: Port = (Port) 8000,
|
||||
wsEnabled: bool = false,
|
||||
wssEnabled: bool = false,
|
||||
secureKey: string = "",
|
||||
secureCert: string = "",
|
||||
wakuFlags = none(CapabilitiesBitfield),
|
||||
nameResolver: NameResolver = nil,
|
||||
sendSignedPeerRecord = false,
|
||||
dns4DomainName = none(string),
|
||||
discv5UdpPort = none(Port),
|
||||
agentString = none(string),
|
||||
clusterId: uint32 = 1.uint32,
|
||||
topics: seq[string] = @["/waku/2/rs/1/0"],
|
||||
peerStoreCapacity = none(int),
|
||||
): WakuNode =
|
||||
var resolvedExtIp = extIp
|
||||
|
||||
# Update extPort to default value if it's missing and there's an extIp or a DNS domain
|
||||
let extPort =
|
||||
if (extIp.isSome() or dns4DomainName.isSome()) and extPort.isNone(): some(Port(60000))
|
||||
else: extPort
|
||||
if (extIp.isSome() or dns4DomainName.isSome()) and extPort.isNone():
|
||||
some(Port(60000))
|
||||
else:
|
||||
extPort
|
||||
|
||||
var conf = defaultTestWakuNodeConf()
|
||||
|
||||
@ -78,7 +80,7 @@ proc newTestWakuNode*(nodeKey: crypto.PrivateKey,
|
||||
# If there's an error resolving the IP, an exception is thrown and test fails
|
||||
let dns = (waitFor dnsResolve(dns4DomainName.get(), conf)).valueOr:
|
||||
raise newException(Defect, error)
|
||||
|
||||
|
||||
resolvedExtIp = some(parseIpAddress(dns))
|
||||
|
||||
let netConf = NetConfig.init(
|
||||
@ -103,9 +105,7 @@ proc newTestWakuNode*(nodeKey: crypto.PrivateKey,
|
||||
raise newException(Defect, "Invalid record: " & error)
|
||||
|
||||
enrBuilder.withIpAddressAndPorts(
|
||||
ipAddr = netConf.enrIp,
|
||||
tcpPort = netConf.enrPort,
|
||||
udpPort = netConf.discv5UdpPort,
|
||||
ipAddr = netConf.enrIp, tcpPort = netConf.enrPort, udpPort = netConf.discv5UdpPort
|
||||
)
|
||||
|
||||
enrBuilder.withMultiaddrs(netConf.enrMultiaddrs)
|
||||
@ -114,7 +114,7 @@ proc newTestWakuNode*(nodeKey: crypto.PrivateKey,
|
||||
enrBuilder.withWakuCapabilities(netConf.wakuFlags.get())
|
||||
|
||||
let record = enrBuilder.build().valueOr:
|
||||
raise newException(Defect, "Invalid record: " & $error)
|
||||
raise newException(Defect, "Invalid record: " & $error)
|
||||
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withRng(rng())
|
||||
@ -126,10 +126,19 @@ proc newTestWakuNode*(nodeKey: crypto.PrivateKey,
|
||||
maxConnections = some(maxConnections),
|
||||
nameResolver = nameResolver,
|
||||
sendSignedPeerRecord = sendSignedPeerRecord,
|
||||
secureKey = if secureKey != "": some(secureKey) else: none(string),
|
||||
secureCert = if secureCert != "": some(secureCert) else: none(string),
|
||||
secureKey =
|
||||
if secureKey != "":
|
||||
some(secureKey)
|
||||
else:
|
||||
none(string)
|
||||
,
|
||||
secureCert =
|
||||
if secureCert != "":
|
||||
some(secureCert)
|
||||
else:
|
||||
none(string)
|
||||
,
|
||||
agentString = agentString,
|
||||
|
||||
)
|
||||
|
||||
return builder.build().get()
|
||||
|
||||
@ -9,7 +9,7 @@ import
|
||||
waku_archive,
|
||||
waku_archive/common,
|
||||
waku_archive/driver/sqlite_driver,
|
||||
common/databases/db_sqlite
|
||||
common/databases/db_sqlite,
|
||||
],
|
||||
../testlib/[wakucore]
|
||||
|
||||
@ -42,7 +42,7 @@ proc put*(
|
||||
msgDigest = computeDigest(msg)
|
||||
msgHash = computeMessageHash(pubsubTopic, msg)
|
||||
_ = waitFor driver.put(pubsubTopic, msg, msgDigest, msgHash, msg.timestamp)
|
||||
# discard crashes
|
||||
# discard crashes
|
||||
return driver
|
||||
|
||||
proc newArchiveDriverWithMessages*(
|
||||
|
||||
@ -1,9 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[sequtils,options],
|
||||
testutils/unittests,
|
||||
chronos
|
||||
import std/[sequtils, options], testutils/unittests, chronos
|
||||
import
|
||||
../../../waku/waku_archive,
|
||||
../../../waku/waku_archive/driver/postgres_driver,
|
||||
@ -13,9 +10,7 @@ import
|
||||
../testlib/testasync,
|
||||
../testlib/postgres
|
||||
|
||||
proc computeTestCursor(pubsubTopic: PubsubTopic,
|
||||
message: WakuMessage):
|
||||
ArchiveCursor =
|
||||
proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveCursor =
|
||||
ArchiveCursor(
|
||||
pubsubTopic: pubsubTopic,
|
||||
senderTime: message.timestamp,
|
||||
@ -60,12 +55,14 @@ suite "Postgres driver":
|
||||
asyncTest "Insert a message":
|
||||
const contentTopic = "test-content-topic"
|
||||
|
||||
let msg = fakeWakuMessage(contentTopic=contentTopic)
|
||||
let msg = fakeWakuMessage(contentTopic = contentTopic)
|
||||
|
||||
let computedDigest = computeDigest(msg)
|
||||
let computedHash = computeMessageHash(DefaultPubsubTopic, msg)
|
||||
|
||||
let putRes = await driver.put(DefaultPubsubTopic, msg, computedDigest, computedHash, msg.timestamp)
|
||||
let putRes = await driver.put(
|
||||
DefaultPubsubTopic, msg, computedDigest, computedHash, msg.timestamp
|
||||
)
|
||||
assert putRes.isOk(), putRes.error
|
||||
|
||||
let storedMsg = (await driver.getAllMessages()).tryGet()
|
||||
@ -85,14 +82,26 @@ suite "Postgres driver":
|
||||
const pubsubTopic1 = "pubsubtopic-1"
|
||||
const pubsubTopic2 = "pubsubtopic-2"
|
||||
|
||||
let msg1 = fakeWakuMessage(contentTopic=contentTopic1)
|
||||
let msg1 = fakeWakuMessage(contentTopic = contentTopic1)
|
||||
|
||||
var putRes = await driver.put(pubsubTopic1, msg1, computeDigest(msg1), computeMessageHash(pubsubTopic1, msg1), msg1.timestamp)
|
||||
var putRes = await driver.put(
|
||||
pubsubTopic1,
|
||||
msg1,
|
||||
computeDigest(msg1),
|
||||
computeMessageHash(pubsubTopic1, msg1),
|
||||
msg1.timestamp,
|
||||
)
|
||||
assert putRes.isOk(), putRes.error
|
||||
|
||||
let msg2 = fakeWakuMessage(contentTopic=contentTopic2)
|
||||
let msg2 = fakeWakuMessage(contentTopic = contentTopic2)
|
||||
|
||||
putRes = await driver.put(pubsubTopic2, msg2, computeDigest(msg2), computeMessageHash(pubsubTopic2, msg2), msg2.timestamp)
|
||||
putRes = await driver.put(
|
||||
pubsubTopic2,
|
||||
msg2,
|
||||
computeDigest(msg2),
|
||||
computeMessageHash(pubsubTopic2, msg2),
|
||||
msg2.timestamp,
|
||||
)
|
||||
assert putRes.isOk(), putRes.error
|
||||
|
||||
let countMessagesRes = await driver.getMessagesCount()
|
||||
@ -106,17 +115,17 @@ suite "Postgres driver":
|
||||
assert messagesRes.get().len == 1
|
||||
|
||||
# Get both content topics, check ordering
|
||||
messagesRes = await driver.getMessages(contentTopic = @[contentTopic1,
|
||||
contentTopic2])
|
||||
messagesRes =
|
||||
await driver.getMessages(contentTopic = @[contentTopic1, contentTopic2])
|
||||
assert messagesRes.isOk(), messagesRes.error
|
||||
|
||||
assert messagesRes.get().len == 2
|
||||
assert messagesRes.get()[0][1].contentTopic == contentTopic1
|
||||
|
||||
# Descending order
|
||||
messagesRes = await driver.getMessages(contentTopic = @[contentTopic1,
|
||||
contentTopic2],
|
||||
ascendingOrder = false)
|
||||
messagesRes = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2], ascendingOrder = false
|
||||
)
|
||||
assert messagesRes.isOk(), messagesRes.error
|
||||
|
||||
assert messagesRes.get().len == 2
|
||||
@ -124,28 +133,26 @@ suite "Postgres driver":
|
||||
|
||||
# cursor
|
||||
# Get both content topics
|
||||
messagesRes =
|
||||
await driver.getMessages(contentTopic = @[contentTopic1,
|
||||
contentTopic2],
|
||||
cursor = some(
|
||||
computeTestCursor(pubsubTopic1,
|
||||
messagesRes.get()[1][1])))
|
||||
messagesRes = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2],
|
||||
cursor = some(computeTestCursor(pubsubTopic1, messagesRes.get()[1][1])),
|
||||
)
|
||||
assert messagesRes.isOk()
|
||||
assert messagesRes.get().len == 1
|
||||
|
||||
# Get both content topics but one pubsub topic
|
||||
messagesRes = await driver.getMessages(contentTopic = @[contentTopic1,
|
||||
contentTopic2],
|
||||
pubsubTopic = some(pubsubTopic1))
|
||||
messagesRes = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2], pubsubTopic = some(pubsubTopic1)
|
||||
)
|
||||
assert messagesRes.isOk(), messagesRes.error
|
||||
|
||||
assert messagesRes.get().len == 1
|
||||
assert messagesRes.get()[0][1].contentTopic == contentTopic1
|
||||
|
||||
# Limit
|
||||
messagesRes = await driver.getMessages(contentTopic = @[contentTopic1,
|
||||
contentTopic2],
|
||||
maxPageSize = 1)
|
||||
messagesRes = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2], maxPageSize = 1
|
||||
)
|
||||
assert messagesRes.isOk(), messagesRes.error
|
||||
assert messagesRes.get().len == 1
|
||||
|
||||
@ -157,11 +164,20 @@ suite "Postgres driver":
|
||||
let msg1 = fakeWakuMessage(ts = now)
|
||||
let msg2 = fakeWakuMessage(ts = now)
|
||||
|
||||
var putRes = await driver.put(DefaultPubsubTopic,
|
||||
msg1, computeDigest(msg1), computeMessageHash(DefaultPubsubTopic, msg1), msg1.timestamp)
|
||||
var putRes = await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg1,
|
||||
computeDigest(msg1),
|
||||
computeMessageHash(DefaultPubsubTopic, msg1),
|
||||
msg1.timestamp,
|
||||
)
|
||||
assert putRes.isOk(), putRes.error
|
||||
|
||||
putRes = await driver.put(DefaultPubsubTopic,
|
||||
msg2, computeDigest(msg2), computeMessageHash(DefaultPubsubTopic, msg2), msg2.timestamp)
|
||||
putRes = await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg2,
|
||||
computeDigest(msg2),
|
||||
computeMessageHash(DefaultPubsubTopic, msg2),
|
||||
msg2.timestamp,
|
||||
)
|
||||
assert not putRes.isOk()
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,22 +1,19 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/options,
|
||||
stew/results,
|
||||
testutils/unittests
|
||||
import std/options, stew/results, testutils/unittests
|
||||
import
|
||||
../../../waku/waku_archive,
|
||||
../../../waku/waku_archive/driver/queue_driver/queue_driver {.all.},
|
||||
../../../waku/waku_archive/driver/queue_driver/index,
|
||||
../../../waku/waku_core
|
||||
|
||||
|
||||
# Helper functions
|
||||
|
||||
proc genIndexedWakuMessage(i: int8): (Index, WakuMessage) =
|
||||
## Use i to generate an Index WakuMessage
|
||||
var data {.noinit.}: array[32, byte]
|
||||
for x in data.mitems: x = i.byte
|
||||
for x in data.mitems:
|
||||
x = i.byte
|
||||
|
||||
let
|
||||
message = WakuMessage(payload: @[byte i], timestamp: Timestamp(i))
|
||||
@ -24,7 +21,7 @@ proc genIndexedWakuMessage(i: int8): (Index, WakuMessage) =
|
||||
receiverTime: Timestamp(i),
|
||||
senderTime: Timestamp(i),
|
||||
digest: MessageDigest(data: data),
|
||||
pubsubTopic: "test-pubsub-topic"
|
||||
pubsubTopic: "test-pubsub-topic",
|
||||
)
|
||||
|
||||
(cursor, message)
|
||||
@ -38,9 +35,7 @@ proc getPrepopulatedTestQueue(unsortedSet: auto, capacity: int): QueueDriver =
|
||||
|
||||
driver
|
||||
|
||||
|
||||
procSuite "Sorted driver queue":
|
||||
|
||||
test "queue capacity - add a message over the limit":
|
||||
## Given
|
||||
let capacity = 5
|
||||
@ -48,7 +43,7 @@ procSuite "Sorted driver queue":
|
||||
|
||||
## When
|
||||
# Fill up the queue
|
||||
for i in 1..capacity:
|
||||
for i in 1 .. capacity:
|
||||
let (index, message) = genIndexedWakuMessage(i.int8)
|
||||
require(driver.add(index, message).isOk())
|
||||
|
||||
@ -67,7 +62,7 @@ procSuite "Sorted driver queue":
|
||||
|
||||
## When
|
||||
# Fill up the queue
|
||||
for i in 1..capacity:
|
||||
for i in 1 .. capacity:
|
||||
let (index, message) = genIndexedWakuMessage(i.int8)
|
||||
require(driver.add(index, message).isOk())
|
||||
|
||||
@ -89,7 +84,7 @@ procSuite "Sorted driver queue":
|
||||
## Given
|
||||
let
|
||||
capacity = 5
|
||||
unsortedSet = [5,1,3,2,4]
|
||||
unsortedSet = [5, 1, 3, 2, 4]
|
||||
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
|
||||
|
||||
# Walk forward through the set and verify ascending order
|
||||
@ -110,7 +105,7 @@ procSuite "Sorted driver queue":
|
||||
## Given
|
||||
let
|
||||
capacity = 5
|
||||
unsortedSet = [5,1,3,2,4]
|
||||
unsortedSet = [5, 1, 3, 2, 4]
|
||||
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
|
||||
|
||||
## When
|
||||
@ -141,7 +136,7 @@ procSuite "Sorted driver queue":
|
||||
## Given
|
||||
let
|
||||
capacity = 5
|
||||
unsortedSet = [5,1,3,2,4]
|
||||
unsortedSet = [5, 1, 3, 2, 4]
|
||||
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
|
||||
|
||||
## When
|
||||
@ -172,7 +167,7 @@ procSuite "Sorted driver queue":
|
||||
## Given
|
||||
let
|
||||
capacity = 5
|
||||
unsortedSet = [5,1,3,2,4]
|
||||
unsortedSet = [5, 1, 3, 2, 4]
|
||||
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
|
||||
|
||||
let
|
||||
|
||||
@ -1,18 +1,11 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/times,
|
||||
stew/byteutils,
|
||||
testutils/unittests,
|
||||
nimcrypto
|
||||
import
|
||||
../../../waku/waku_core,
|
||||
../../../waku/waku_archive/driver/queue_driver/index
|
||||
|
||||
import std/times, stew/byteutils, testutils/unittests, nimcrypto
|
||||
import ../../../waku/waku_core, ../../../waku/waku_archive/driver/queue_driver/index
|
||||
|
||||
## Helpers
|
||||
|
||||
proc getTestTimestamp(offset=0): Timestamp =
|
||||
proc getTestTimestamp(offset = 0): Timestamp =
|
||||
let now = getNanosecondTime(epochTime() + float(offset))
|
||||
Timestamp(now)
|
||||
|
||||
@ -26,52 +19,75 @@ proc hashFromStr(input: string): MDigest[256] =
|
||||
|
||||
return hashed
|
||||
|
||||
|
||||
suite "Queue Driver - index":
|
||||
|
||||
## Test vars
|
||||
let
|
||||
smallIndex1 = Index(digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000))
|
||||
smallIndex2 = Index(digest: hashFromStr("1234567"), # digest is less significant than senderTime
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000))
|
||||
largeIndex1 = Index(digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(9000)) # only senderTime differ from smallIndex1
|
||||
largeIndex2 = Index(digest: hashFromStr("12345"), # only digest differs from smallIndex1
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000))
|
||||
eqIndex1 = Index(digest: hashFromStr("0003"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(54321))
|
||||
eqIndex2 = Index(digest: hashFromStr("0003"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(54321))
|
||||
eqIndex3 = Index(digest: hashFromStr("0003"),
|
||||
receiverTime: getNanosecondTime(9999), # receiverTime difference should have no effect on comparisons
|
||||
senderTime: getNanosecondTime(54321))
|
||||
diffPsTopic = Index(digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000),
|
||||
pubsubTopic: "zzzz")
|
||||
noSenderTime1 = Index(digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(1100),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "zzzz")
|
||||
noSenderTime2 = Index(digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(10000),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "zzzz")
|
||||
noSenderTime3 = Index(digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(1200),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "aaaa")
|
||||
noSenderTime4 = Index(digest: hashFromStr("0"),
|
||||
receiverTime: getNanosecondTime(1200),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "zzzz")
|
||||
smallIndex1 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000),
|
||||
)
|
||||
smallIndex2 = Index(
|
||||
digest: hashFromStr("1234567"), # digest is less significant than senderTime
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000),
|
||||
)
|
||||
largeIndex1 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(9000),
|
||||
) # only senderTime differ from smallIndex1
|
||||
largeIndex2 = Index(
|
||||
digest: hashFromStr("12345"), # only digest differs from smallIndex1
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000),
|
||||
)
|
||||
eqIndex1 = Index(
|
||||
digest: hashFromStr("0003"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(54321),
|
||||
)
|
||||
eqIndex2 = Index(
|
||||
digest: hashFromStr("0003"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(54321),
|
||||
)
|
||||
eqIndex3 = Index(
|
||||
digest: hashFromStr("0003"),
|
||||
receiverTime: getNanosecondTime(9999),
|
||||
# receiverTime difference should have no effect on comparisons
|
||||
senderTime: getNanosecondTime(54321),
|
||||
)
|
||||
diffPsTopic = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000),
|
||||
pubsubTopic: "zzzz",
|
||||
)
|
||||
noSenderTime1 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(1100),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "zzzz",
|
||||
)
|
||||
noSenderTime2 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(10000),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "zzzz",
|
||||
)
|
||||
noSenderTime3 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(1200),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "aaaa",
|
||||
)
|
||||
noSenderTime4 = Index(
|
||||
digest: hashFromStr("0"),
|
||||
receiverTime: getNanosecondTime(1200),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "zzzz",
|
||||
)
|
||||
|
||||
test "Index comparison":
|
||||
# Index comparison with senderTime diff
|
||||
@ -125,9 +141,9 @@ suite "Queue Driver - index":
|
||||
# Receiver time plays no role, even without sender time
|
||||
check:
|
||||
eqIndex1 == eqIndex3
|
||||
noSenderTime1 == noSenderTime2 # only receiver time differs, indices are equal
|
||||
noSenderTime1 != noSenderTime3 # pubsubTopics differ
|
||||
noSenderTime1 != noSenderTime4 # digests differ
|
||||
noSenderTime1 == noSenderTime2 # only receiver time differs, indices are equal
|
||||
noSenderTime1 != noSenderTime3 # pubsubTopics differ
|
||||
noSenderTime1 != noSenderTime4 # digests differ
|
||||
|
||||
# Unequal sender time
|
||||
check:
|
||||
|
||||
@ -1,9 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, sequtils, algorithm],
|
||||
testutils/unittests,
|
||||
libp2p/protobuf/minprotobuf
|
||||
std/[options, sequtils, algorithm], testutils/unittests, libp2p/protobuf/minprotobuf
|
||||
import
|
||||
../../../waku/waku_archive,
|
||||
../../../waku/waku_archive/driver/queue_driver/queue_driver {.all.},
|
||||
@ -12,28 +10,26 @@ import
|
||||
../testlib/common,
|
||||
../testlib/wakucore
|
||||
|
||||
|
||||
proc getTestQueueDriver(numMessages: int): QueueDriver =
|
||||
let testQueueDriver = QueueDriver.new(numMessages)
|
||||
|
||||
var data {.noinit.}: array[32, byte]
|
||||
for x in data.mitems: x = 1
|
||||
for x in data.mitems:
|
||||
x = 1
|
||||
|
||||
for i in 0..<numMessages:
|
||||
|
||||
for i in 0 ..< numMessages:
|
||||
let msg = WakuMessage(payload: @[byte i], timestamp: Timestamp(i))
|
||||
|
||||
let index = Index(
|
||||
receiverTime: Timestamp(i),
|
||||
senderTime: Timestamp(i),
|
||||
digest: MessageDigest(data: data)
|
||||
digest: MessageDigest(data: data),
|
||||
)
|
||||
|
||||
|
||||
discard testQueueDriver.add(index, msg)
|
||||
|
||||
return testQueueDriver
|
||||
|
||||
|
||||
procSuite "Queue driver - pagination":
|
||||
let driver = getTestQueueDriver(10)
|
||||
let
|
||||
@ -48,13 +44,13 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 2
|
||||
data == msgList[4..5]
|
||||
data == msgList[4 .. 5]
|
||||
|
||||
test "Forward pagination - initial pagination request with an empty cursor":
|
||||
## Given
|
||||
@ -64,13 +60,13 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 2
|
||||
data == msgList[0..1]
|
||||
data == msgList[0 .. 1]
|
||||
|
||||
test "Forward pagination - initial pagination request with an empty cursor to fetch the entire history":
|
||||
## Given
|
||||
@ -80,13 +76,13 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 10
|
||||
data == msgList[0..9]
|
||||
data == msgList[0 .. 9]
|
||||
|
||||
test "Forward pagination - empty msgList":
|
||||
## Given
|
||||
@ -97,7 +93,7 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
@ -112,13 +108,13 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 6
|
||||
data == msgList[4..9]
|
||||
data == msgList[4 .. 9]
|
||||
|
||||
test "Forward pagination - page size larger than the maximum allowed page size":
|
||||
## Given
|
||||
@ -128,7 +124,7 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
@ -143,7 +139,7 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
@ -152,12 +148,12 @@ procSuite "Queue driver - pagination":
|
||||
|
||||
test "Forward pagination - invalid cursor":
|
||||
## Given
|
||||
let msg = fakeWakuMessage(payload= @[byte 10])
|
||||
let msg = fakeWakuMessage(payload = @[byte 10])
|
||||
let index = ArchiveCursor(
|
||||
pubsubTopic: DefaultPubsubTopic,
|
||||
senderTime: msg.timestamp,
|
||||
storeTime: msg.timestamp,
|
||||
digest: computeDigest(msg)
|
||||
digest: computeDigest(msg),
|
||||
).toIndex()
|
||||
|
||||
let
|
||||
@ -166,7 +162,7 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let error = page.tryError()
|
||||
@ -182,7 +178,7 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
@ -198,7 +194,7 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
@ -212,17 +208,19 @@ procSuite "Queue driver - pagination":
|
||||
cursor: Option[Index] = none(Index)
|
||||
forward = true
|
||||
|
||||
proc onlyEvenTimes(index: Index, msg: WakuMessage): bool = msg.timestamp.int64 mod 2 == 0
|
||||
proc onlyEvenTimes(index: Index, msg: WakuMessage): bool =
|
||||
msg.timestamp.int64 mod 2 == 0
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor, predicate=onlyEvenTimes)
|
||||
let page = driver.getPage(
|
||||
pageSize = pageSize, forward = forward, cursor = cursor, predicate = onlyEvenTimes
|
||||
)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.mapIt(it.timestamp.int) == @[0, 2, 4]
|
||||
|
||||
|
||||
test "Backward pagination - normal pagination":
|
||||
## Given
|
||||
let
|
||||
@ -231,12 +229,12 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data == msgList[1..2].reversed
|
||||
data == msgList[1 .. 2].reversed
|
||||
|
||||
test "Backward pagination - empty msgList":
|
||||
## Given
|
||||
@ -247,7 +245,7 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
@ -262,13 +260,13 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 2
|
||||
data == msgList[8..9].reversed
|
||||
data == msgList[8 .. 9].reversed
|
||||
|
||||
test "Backward pagination - initial pagination request with an empty cursor to fetch the entire history":
|
||||
## Given
|
||||
@ -278,13 +276,13 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 10
|
||||
data == msgList[0..9].reversed
|
||||
data == msgList[0 .. 9].reversed
|
||||
|
||||
test "Backward pagination - page size larger than the remaining messages":
|
||||
## Given
|
||||
@ -294,12 +292,12 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data == msgList[0..2].reversed
|
||||
data == msgList[0 .. 2].reversed
|
||||
|
||||
test "Backward pagination - page size larger than the Maximum allowed page size":
|
||||
## Given
|
||||
@ -309,7 +307,7 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
@ -324,7 +322,7 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
@ -333,12 +331,12 @@ procSuite "Queue driver - pagination":
|
||||
|
||||
test "Backward pagination - invalid cursor":
|
||||
## Given
|
||||
let msg = fakeWakuMessage(payload= @[byte 10])
|
||||
let msg = fakeWakuMessage(payload = @[byte 10])
|
||||
let index = ArchiveCursor(
|
||||
pubsubTopic: DefaultPubsubTopic,
|
||||
senderTime: msg.timestamp,
|
||||
storeTime: msg.timestamp,
|
||||
digest: computeDigest(msg)
|
||||
digest: computeDigest(msg),
|
||||
).toIndex()
|
||||
|
||||
let
|
||||
@ -347,7 +345,7 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let error = page.tryError()
|
||||
@ -363,7 +361,7 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
@ -379,7 +377,7 @@ procSuite "Queue driver - pagination":
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor)
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
@ -393,12 +391,15 @@ procSuite "Queue driver - pagination":
|
||||
cursor: Option[Index] = none(Index)
|
||||
forward = false
|
||||
|
||||
proc onlyOddTimes(index: Index, msg: WakuMessage): bool = msg.timestamp.int64 mod 2 != 0
|
||||
proc onlyOddTimes(index: Index, msg: WakuMessage): bool =
|
||||
msg.timestamp.int64 mod 2 != 0
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor, predicate=onlyOddTimes)
|
||||
let page = driver.getPage(
|
||||
pageSize = pageSize, forward = forward, cursor = cursor, predicate = onlyOddTimes
|
||||
)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.mapIt(it.timestamp.int) == @[5, 7,9].reversed
|
||||
data.mapIt(it.timestamp.int) == @[5, 7, 9].reversed
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,9 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/sequtils,
|
||||
testutils/unittests,
|
||||
chronos
|
||||
import std/sequtils, testutils/unittests, chronos
|
||||
import
|
||||
../../../waku/common/databases/db_sqlite,
|
||||
../../../waku/waku_archive,
|
||||
@ -13,9 +10,7 @@ import
|
||||
../testlib/common,
|
||||
../testlib/wakucore
|
||||
|
||||
|
||||
suite "SQLite driver":
|
||||
|
||||
test "init driver and database":
|
||||
## Given
|
||||
let database = newSqliteDatabase()
|
||||
@ -40,11 +35,13 @@ suite "SQLite driver":
|
||||
|
||||
let driver = newSqliteArchiveDriver()
|
||||
|
||||
let msg = fakeWakuMessage(contentTopic=contentTopic)
|
||||
let msg = fakeWakuMessage(contentTopic = contentTopic)
|
||||
let msgHash = computeMessageHash(DefaultPubsubTopic, msg)
|
||||
|
||||
## When
|
||||
let putRes = waitFor driver.put(DefaultPubsubTopic, msg, computeDigest(msg), msgHash, msg.timestamp)
|
||||
let putRes = waitFor driver.put(
|
||||
DefaultPubsubTopic, msg, computeDigest(msg), msgHash, msg.timestamp
|
||||
)
|
||||
|
||||
## Then
|
||||
check:
|
||||
@ -53,11 +50,10 @@ suite "SQLite driver":
|
||||
let storedMsg = (waitFor driver.getAllMessages()).tryGet()
|
||||
check:
|
||||
storedMsg.len == 1
|
||||
storedMsg.all do (item: auto) -> bool:
|
||||
storedMsg.all do(item: auto) -> bool:
|
||||
let (pubsubTopic, msg, _, _, hash) = item
|
||||
msg.contentTopic == contentTopic and
|
||||
pubsubTopic == DefaultPubsubTopic and
|
||||
hash == msgHash
|
||||
msg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic and
|
||||
hash == msgHash
|
||||
|
||||
## Cleanup
|
||||
(waitFor driver.close()).expect("driver to close")
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,10 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[sequtils,times],
|
||||
stew/results,
|
||||
testutils/unittests,
|
||||
chronos
|
||||
import std/[sequtils, times], stew/results, testutils/unittests, chronos
|
||||
import
|
||||
../../../waku/common/databases/db_sqlite,
|
||||
../../../waku/waku_core,
|
||||
@ -18,9 +14,7 @@ import
|
||||
../testlib/common,
|
||||
../testlib/wakucore
|
||||
|
||||
|
||||
suite "Waku Archive - Retention policy":
|
||||
|
||||
test "capacity retention policy - windowed message deletion":
|
||||
## Given
|
||||
let
|
||||
@ -29,14 +23,25 @@ suite "Waku Archive - Retention policy":
|
||||
|
||||
let driver = newSqliteArchiveDriver()
|
||||
|
||||
let retentionPolicy: RetentionPolicy = CapacityRetentionPolicy.new(capacity=capacity)
|
||||
let retentionPolicy: RetentionPolicy =
|
||||
CapacityRetentionPolicy.new(capacity = capacity)
|
||||
var putFutures = newSeq[Future[ArchiveDriverResult[void]]]()
|
||||
|
||||
## When
|
||||
for i in 1..capacity+excess:
|
||||
let msg = fakeWakuMessage(payload= @[byte i], contentTopic=DefaultContentTopic, ts=Timestamp(i))
|
||||
putFutures.add(driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp))
|
||||
|
||||
for i in 1 .. capacity + excess:
|
||||
let msg = fakeWakuMessage(
|
||||
payload = @[byte i], contentTopic = DefaultContentTopic, ts = Timestamp(i)
|
||||
)
|
||||
putFutures.add(
|
||||
driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
)
|
||||
)
|
||||
|
||||
discard waitFor allFinished(putFutures)
|
||||
|
||||
require (waitFor retentionPolicy.execute(driver)).isOk()
|
||||
@ -51,17 +56,17 @@ suite "Waku Archive - Retention policy":
|
||||
|
||||
## Cleanup
|
||||
(waitFor driver.close()).expect("driver to close")
|
||||
|
||||
|
||||
test "size retention policy - windowed message deletion":
|
||||
## Given
|
||||
let
|
||||
# in bytes
|
||||
sizeLimit:int64 = 52428
|
||||
sizeLimit: int64 = 52428
|
||||
excess = 325
|
||||
|
||||
let driver = newSqliteArchiveDriver()
|
||||
|
||||
let retentionPolicy: RetentionPolicy = SizeRetentionPolicy.new(size=sizeLimit)
|
||||
let retentionPolicy: RetentionPolicy = SizeRetentionPolicy.new(size = sizeLimit)
|
||||
var putFutures = newSeq[Future[ArchiveDriverResult[void]]]()
|
||||
|
||||
# make sure that the db is empty to before test begins
|
||||
@ -69,16 +74,26 @@ suite "Waku Archive - Retention policy":
|
||||
# if there are messages in db, empty them
|
||||
if storedMsg.len > 0:
|
||||
let now = getNanosecondTime(getTime().toUnixFloat())
|
||||
require (waitFor driver.deleteMessagesOlderThanTimestamp(ts=now)).isOk()
|
||||
require (waitFor driver.deleteMessagesOlderThanTimestamp(ts = now)).isOk()
|
||||
require (waitFor driver.performVacuum()).isOk()
|
||||
|
||||
## When
|
||||
##
|
||||
|
||||
# create a number of messages so that the size of the DB overshoots
|
||||
for i in 1..excess:
|
||||
let msg = fakeWakuMessage(payload= @[byte i], contentTopic=DefaultContentTopic, ts=Timestamp(i))
|
||||
putFutures.add(driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp))
|
||||
for i in 1 .. excess:
|
||||
let msg = fakeWakuMessage(
|
||||
payload = @[byte i], contentTopic = DefaultContentTopic, ts = Timestamp(i)
|
||||
)
|
||||
putFutures.add(
|
||||
driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
)
|
||||
)
|
||||
|
||||
# waitFor is used to synchronously wait for the futures to complete.
|
||||
discard waitFor allFinished(putFutures)
|
||||
@ -95,7 +110,7 @@ suite "Waku Archive - Retention policy":
|
||||
# execute policy provided the current db size oveflows, results in rows deletion
|
||||
require (sizeDB >= sizeLimit)
|
||||
require (waitFor retentionPolicy.execute(driver)).isOk()
|
||||
|
||||
|
||||
# get the number or rows from database
|
||||
let rowCountAfterDeletion = (waitFor driver.getMessagesCount()).tryGet()
|
||||
|
||||
@ -115,33 +130,40 @@ suite "Waku Archive - Retention policy":
|
||||
|
||||
let
|
||||
driver = newSqliteArchiveDriver()
|
||||
retentionPolicy: RetentionPolicy = CapacityRetentionPolicy.new(capacity=capacity)
|
||||
retentionPolicy: RetentionPolicy =
|
||||
CapacityRetentionPolicy.new(capacity = capacity)
|
||||
|
||||
let messages = @[
|
||||
fakeWakuMessage(contentTopic=DefaultContentTopic, ts=ts(0)),
|
||||
fakeWakuMessage(contentTopic=DefaultContentTopic, ts=ts(1)),
|
||||
|
||||
fakeWakuMessage(contentTopic=contentTopic, ts=ts(2)),
|
||||
fakeWakuMessage(contentTopic=contentTopic, ts=ts(3)),
|
||||
fakeWakuMessage(contentTopic=contentTopic, ts=ts(4)),
|
||||
fakeWakuMessage(contentTopic=contentTopic, ts=ts(5)),
|
||||
fakeWakuMessage(contentTopic=contentTopic, ts=ts(6))
|
||||
]
|
||||
let messages =
|
||||
@[
|
||||
fakeWakuMessage(contentTopic = DefaultContentTopic, ts = ts(0)),
|
||||
fakeWakuMessage(contentTopic = DefaultContentTopic, ts = ts(1)),
|
||||
fakeWakuMessage(contentTopic = contentTopic, ts = ts(2)),
|
||||
fakeWakuMessage(contentTopic = contentTopic, ts = ts(3)),
|
||||
fakeWakuMessage(contentTopic = contentTopic, ts = ts(4)),
|
||||
fakeWakuMessage(contentTopic = contentTopic, ts = ts(5)),
|
||||
fakeWakuMessage(contentTopic = contentTopic, ts = ts(6)),
|
||||
]
|
||||
|
||||
## When
|
||||
for msg in messages:
|
||||
require (waitFor driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)).isOk()
|
||||
require (
|
||||
waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
)
|
||||
).isOk()
|
||||
require (waitFor retentionPolicy.execute(driver)).isOk()
|
||||
|
||||
## Then
|
||||
let storedMsg = (waitFor driver.getAllMessages()).tryGet()
|
||||
check:
|
||||
storedMsg.len == capacity
|
||||
storedMsg.all do (item: auto) -> bool:
|
||||
storedMsg.all do(item: auto) -> bool:
|
||||
let (pubsubTopic, msg, _, _, _) = item
|
||||
msg.contentTopic == contentTopic and
|
||||
pubsubTopic == DefaultPubsubTopic
|
||||
msg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic
|
||||
|
||||
## Cleanup
|
||||
(waitFor driver.close()).expect("driver to close")
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user