chore: remove Waku v1 and wakubridge code (#1874)

* chore: remove v1 code

* chore: deprecate support for v1 compatibility from JSON-RPC API

* chore: remove v1 tests from JSON-RPC suite

* chore: remove wakubridge code

* chore: remove v1 compatibility functions

* chore: remove support for v1 payloads from chat2 app

* chore: remove v1 from CI processes

* fix: lingering references to private API functions

* fix: remove v1 compat from chat2

* fix: some more lingering references in tests
This commit is contained in:
Hanno Cornelius 2023-08-07 15:11:46 +01:00 committed by GitHub
parent 08ff667227
commit ab344a9dec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
60 changed files with 69 additions and 7622 deletions

View File

@ -42,19 +42,9 @@ jobs:
- 'tests/all_tests_v2.nim'
- 'tests/v2/**'
legacy:
- 'waku/v1/**'
- 'tests/all_tests_v1.nim'
- 'tests/v1/**'
- 'waku/whisper/**'
- 'tests/whisper/**'
- 'tests/all_tests_whisper.nim'
outputs:
common: ${{ steps.filter.outputs.common }}
v2: ${{ steps.filter.outputs.v2 }}
legacy: ${{ steps.filter.outputs.legacy }}
build-v2:
needs: changes
@ -125,63 +115,3 @@ jobs:
fi
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 test2 testwakunode2
build-legacy:
needs: changes
if: ${{ needs.changes.outputs.legacy == 'true' || needs.changes.outputs.common == 'true' }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
timeout-minutes: 60
name: build-legacy-${{ matrix.os }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Get submodules hash
id: submodules
run: |
echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT
- name: Cache submodules
uses: actions/cache@v3
with:
path: |
vendor/
.git/modules
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
- name: Build binaries
run: make V=1 QUICK_AND_DIRTY_COMPILER=1 v1
test-legacy:
needs: changes
if: ${{ needs.changes.outputs.legacy == 'true' || needs.changes.outputs.common == 'true' }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
timeout-minutes: 60
name: test-legacy-${{ matrix.os }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Get submodules hash
id: submodules
run: |
echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT
- name: Cache submodules
uses: actions/cache@v3
with:
path: |
vendor/
.git/modules
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
- name: Run tests
run: make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 test1

View File

@ -53,7 +53,6 @@ jobs:
OS=$([[ "${{runner.os}}" == "macOS" ]] && echo "macosx" || echo "linux")
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" V=1 update
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" CI=false wakunode1
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" CI=false wakunode2
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" CI=false chat2
tar -cvzf ${{steps.vars.outputs.nwaku}} ./build/

View File

@ -32,17 +32,15 @@ else # "variables.mk" was included. Business as usual until the end of this file
##########
## Main ##
##########
.PHONY: all test update clean v1 v2 test1 test2
.PHONY: all test update clean v2 test2
# default target, because it's the first one that doesn't start with '.'
all: | v1 v2
all: | v2
test: | test1 test2
test1: | testcommon testwhisper testwaku1
test: | test2
test2: | testcommon testwaku2
v1: | wakunode1 example1 sim1
v2: | wakunode2 example2 wakubridge chat2 chat2bridge
v2: | wakunode2 example2 chat2 chat2bridge
waku.nims:
ln -s waku.nimble $@
@ -167,7 +165,7 @@ testcommon: | build deps
#############
## Waku v2 ##
#############
.PHONY: testwaku2 wakunode2 testwakunode2 example2 wakubridge testbridge chat2 chat2bridge
.PHONY: testwaku2 wakunode2 testwakunode2 example2 chat2 chat2bridge
testwaku2: | build deps librln
echo -e $(BUILD_MSG) "build/$@" && \
@ -185,14 +183,6 @@ example2: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim example2 $(NIM_PARAMS) waku.nims
wakubridge: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim bridge $(NIM_PARAMS) waku.nims
testbridge: | build deps librln
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testbridge $(NIM_PARAMS) $(EXPERIMENTAL_PARAMS) waku.nims
chat2: | build deps librln
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim chat2 $(NIM_PARAMS) $(EXPERIMENTAL_PARAMS) waku.nims
@ -218,32 +208,6 @@ networkmonitor: | build deps
$(ENV_SCRIPT) nim networkmonitor $(NIM_PARAMS) waku.nims
#################
## Waku legacy ##
#################
.PHONY: testwhisper testwaku1 wakunode1 example1 sim1
testwhisper: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testwhisper $(NIM_PARAMS) waku.nims
testwaku1: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim test1 $(NIM_PARAMS) waku.nims
wakunode1: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim wakunode1 $(NIM_PARAMS) waku.nims
example1: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim example1 $(NIM_PARAMS) waku.nims
sim1: | build deps wakunode1
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim sim1 $(NIM_PARAMS) waku.nims
###################
## Documentation ##
###################

View File

@ -2,22 +2,18 @@
## Introduction
The nwaku repository implements Waku v1 and v2, and provides tools related to it.
The nwaku repository implements Waku, and provides tools related to it.
- A Nim implementation of the [Waku v1 protocol](https://specs.vac.dev/waku/waku.html).
- A Nim implementation of the [Waku v2 protocol](https://specs.vac.dev/specs/waku/v2/waku-v2.html).
- CLI applications `wakunode` and `wakunode2` that allow you to run a Waku v1 or v2 node.
- Examples of Waku v1 and v2 usage.
- A Nim implementation of the [Waku (v2) protocol](https://specs.vac.dev/specs/waku/v2/waku-v2.html).
- CLI application `wakunode2` that allows you to run a Waku node.
- Examples of Waku usage.
- Various tests of above.
For more details on Waku v1 and v2, see their respective home folders:
- [Waku v1](waku/v1/README.md)
- [Waku v2](waku/v2/README.md)
For more details see the [source code](waku/v2/README.md)
## How to Build & Run
These instructions are generic and apply to both Waku v1 and v2. For more detailed instructions, see Waku v1 and v2 home above.
These instructions are generic. For more detailed instructions, see the Waku source code above.
### Prerequisites
@ -28,10 +24,9 @@ The standard developer tools, including a C compiler, GNU Make, Bash, and Git. M
```bash
# The first `make` invocation will update all Git submodules.
# You'll run `make update` after each `git pull` in the future to keep those submodules updated.
make wakunode1 wakunode2
make wakunode2
# See available command line options
./build/wakunode --help
./build/wakunode2 --help
```
@ -43,14 +38,14 @@ For more on how to run `wakunode2`, refer to:
### Waku Protocol Test Suite
```bash
# Run all the Waku v1 and v2 tests
# Run all the Waku tests
make test
```
### Examples
Examples can be found in the examples folder. For Waku v2, there is a fully
featured chat example.
Examples can be found in the examples folder.
This includes a fully featured chat example.
### Tools

View File

@ -34,7 +34,6 @@ import
../../waku/v2/waku_node,
../../waku/v2/node/waku_metrics,
../../waku/v2/node/peer_manager,
../../waku/v2/utils/compat,
../../waku/common/utils/nat,
./config_chat2
@ -53,9 +52,6 @@ const Help = """
exit: exits chat session
"""
const
PayloadV1* {.booldefine.} = false
# XXX Connected is a bit annoying, because incoming connections don't trigger state change
# Could poll connection pool or something here, I suppose
# TODO Ensure connected turns true on incoming connections, or get rid of it
@ -68,7 +64,6 @@ type Chat = ref object
nick: string # nickname for this chat session
prompt: bool # chat prompt is showing
contentTopic: string # default content topic for chat messages
symkey: SymKey # SymKey used for v1 payload encryption (if enabled)
type
PrivateKey* = crypto.PrivateKey
@ -116,15 +111,6 @@ proc toString*(message: Chat2Message): string =
#####################
# Similarly as Status public chats now.
proc generateSymKey(contentTopic: ContentTopic): SymKey =
var ctx: HMAC[pbkdf2.sha256]
var symKey: SymKey
if pbkdf2(ctx, contentTopic.toBytes(), "", 65356, symKey) != sizeof(SymKey):
raise (ref Defect)(msg: "Should not occur as array is properly sized")
symKey
proc connectToNodes(c: Chat, nodes: seq[string]) {.async.} =
echo "Connecting to nodes"
await c.node.connectToNodes(nodes)
@ -140,66 +126,28 @@ proc showChatPrompt(c: Chat) =
discard
proc getChatLine(c: Chat, msg:WakuMessage): Result[string, string]=
when PayloadV1:
# Use Waku v1 payload encoding/encryption
let
keyInfo = KeyInfo(kind: Symmetric, symKey: c.symKey)
decodedPayload = decodePayload(decoded.get(), keyInfo)
if decodedPayload.isOK():
let
pb = Chat2Message.init(decodedPayload.get().payload)
chatLine = if pb.isOk: pb[].toString()
else: string.fromBytes(decodedPayload.get().payload)
return ok(chatLine)
else:
debug "Invalid encoded WakuMessage payload",
error = decodedPayload.error
return err("Invalid encoded WakuMessage payload")
else:
# No payload encoding/encryption from Waku
let
pb = Chat2Message.init(msg.payload)
chatLine = if pb.isOk: pb[].toString()
else: string.fromBytes(msg.payload)
return ok(chatline)
# No payload encoding/encryption from Waku
let
pb = Chat2Message.init(msg.payload)
chatLine = if pb.isOk: pb[].toString()
else: string.fromBytes(msg.payload)
return ok(chatline)
proc printReceivedMessage(c: Chat, msg: WakuMessage) =
when PayloadV1:
# Use Waku v1 payload encoding/encryption
let
keyInfo = KeyInfo(kind: Symmetric, symKey: c.symKey)
decodedPayload = decodePayload(decoded.get(), keyInfo)
let
pb = Chat2Message.init(msg.payload)
chatLine = if pb.isOk: pb[].toString()
else: string.fromBytes(msg.payload)
try:
echo &"{chatLine}"
except ValueError:
# Formatting fail. Print chat line in any case.
echo chatLine
if decodedPayload.isOK():
let
pb = Chat2Message.init(decodedPayload.get().payload)
chatLine = if pb.isOk: pb[].toString()
else: string.fromBytes(decodedPayload.get().payload)
echo &"{chatLine}"
c.prompt = false
showChatPrompt(c)
trace "Printing message", topic=DefaultPubsubTopic, chatLine,
contentTopic = msg.contentTopic
else:
debug "Invalid encoded WakuMessage payload",
error = decodedPayload.error
else:
# No payload encoding/encryption from Waku
let
pb = Chat2Message.init(msg.payload)
chatLine = if pb.isOk: pb[].toString()
else: string.fromBytes(msg.payload)
try:
echo &"{chatLine}"
except ValueError:
# Formatting fail. Print chat line in any case.
echo chatLine
c.prompt = false
showChatPrompt(c)
trace "Printing message", topic=DefaultPubsubTopic, chatLine,
contentTopic = msg.contentTopic
c.prompt = false
showChatPrompt(c)
trace "Printing message", topic=DefaultPubsubTopic, chatLine,
contentTopic = msg.contentTopic
proc readNick(transp: StreamTransport): Future[string] {.async.} =
# Chat prompt
@ -236,71 +184,30 @@ proc publish(c: Chat, line: string) =
proc handler(response: PushResponse) {.gcsafe, closure.} =
trace "lightpush response received", response=response
when PayloadV1:
# Use Waku v1 payload encoding/encryption
let
rng = keys.newRng()
payload = Payload(payload: chat2pb.buffer, symKey: some(c.symKey))
version = 1'u32
encodedPayload = payload.encode(version, rng[])
if encodedPayload.isOk():
var message = WakuMessage(payload: encodedPayload.get(),
contentTopic: c.contentTopic, version: version, timestamp: getNanosecondTime(time))
when defined(rln):
if not isNil(c.node.wakuRlnRelay):
# for future version when we support more than one rln protected content topic,
# we should check the message content topic as well
let success = c.node.wakuRlnRelay.appendRLNProof(message, float64(time))
if not success:
debug "could not append rate limit proof to the message", success=success
else:
debug "rate limit proof is appended to the message", success=success
let decodeRes = RateLimitProof.init(message.proof)
if decodeRes.isErr():
error "could not decode RLN proof"
let proof = decodeRes.get()
# TODO move it to log after dogfooding
let msgEpoch = fromEpoch(proof.epoch)
if fromEpoch(c.node.wakuRlnRelay.lastEpoch) == fromEpoch(proof.epoch):
echo "--rln epoch: ", msgEpoch, " ⚠️ message rate violation! you are spamming the network!"
else:
echo "--rln epoch: ", msgEpoch
# update the last epoch
c.node.wakuRlnRelay.lastEpoch = proof.epoch
if not c.node.wakuLightPush.isNil():
# Attempt lightpush
asyncSpawn c.node.lightpushPublish(DefaultPubsubTopic, message)
var message = WakuMessage(payload: chat2pb.buffer,
contentTopic: c.contentTopic, version: 0, timestamp: getNanosecondTime(time))
when defined(rln):
if not isNil(c.node.wakuRlnRelay):
# for future version when we support more than one rln protected content topic,
# we should check the message content topic as well
let success = c.node.wakuRlnRelay.appendRLNProof(message, float64(time))
if not success:
debug "could not append rate limit proof to the message", success=success
else:
asyncSpawn c.node.publish(DefaultPubsubTopic, message, handler)
else:
warn "Payload encoding failed", error = encodedPayload.error
else:
# No payload encoding/encryption from Waku
var message = WakuMessage(payload: chat2pb.buffer,
contentTopic: c.contentTopic, version: 0, timestamp: getNanosecondTime(time))
when defined(rln):
if not isNil(c.node.wakuRlnRelay):
# for future version when we support more than one rln protected content topic,
# we should check the message content topic as well
let success = c.node.wakuRlnRelay.appendRLNProof(message, float64(time))
if not success:
debug "could not append rate limit proof to the message", success=success
else:
debug "rate limit proof is appended to the message", success=success
let decodeRes = RateLimitProof.init(message.proof)
if decodeRes.isErr():
error "could not decode the RLN proof"
debug "rate limit proof is appended to the message", success=success
let decodeRes = RateLimitProof.init(message.proof)
if decodeRes.isErr():
error "could not decode the RLN proof"
let proof = decodeRes.get()
# TODO move it to log after dogfooding
let msgEpoch = fromEpoch(proof.epoch)
if fromEpoch(c.node.wakuRlnRelay.lastEpoch) == msgEpoch:
echo "--rln epoch: ", msgEpoch, " ⚠️ message rate violation! you are spamming the network!"
else:
echo "--rln epoch: ", msgEpoch
# update the last epoch
c.node.wakuRlnRelay.lastEpoch = proof.epoch
let proof = decodeRes.get()
# TODO move it to log after dogfooding
let msgEpoch = fromEpoch(proof.epoch)
if fromEpoch(c.node.wakuRlnRelay.lastEpoch) == msgEpoch:
echo "--rln epoch: ", msgEpoch, " ⚠️ message rate violation! you are spamming the network!"
else:
echo "--rln epoch: ", msgEpoch
# update the last epoch
c.node.wakuRlnRelay.lastEpoch = proof.epoch
if not c.node.wakuLightPush.isNil():
# Attempt lightpush
@ -444,8 +351,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
started: true,
nick: nick,
prompt: false,
contentTopic: conf.contentTopic,
symKey: generateSymKey(conf.contentTopic))
contentTopic: conf.contentTopic)
if conf.staticnodes.len > 0:
echo "Connecting to static peers..."

View File

@ -1,8 +0,0 @@
# Common
This folder contains (a) modules that use both Waku v1 and Waku v2. and (b) utilities that are useful for both Waku v1 and v2.
Examples include:
- Bridge between v1 and v2
- NAT traversal
- interworking with protocols external to Waku (such as Matterbridge)

View File

@ -1,218 +0,0 @@
import
stew/results,
chronos,
confutils,
confutils/defs,
confutils/std/net,
libp2p/crypto/crypto,
libp2p/crypto/secp,
eth/keys
import
../../waku/common/logging
type ConfResult*[T] = Result[T, string]
type
FleetV1* = enum
none
prod
staging
test
WakuBridgeConf* = object
## Log configuration
logLevel* {.
desc: "Sets the log level for process. Supported levels: TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL",
defaultValue: logging.LogLevel.INFO,
name: "log-level" .}: logging.LogLevel
logFormat* {.
desc: "Specifies what kind of logs should be written to stdout. Suported formats: Text, JSON",
defaultValue: logging.LogFormat.Text,
name: "log-format" .}: logging.LogFormat
## General node config
listenAddress* {.
defaultValue: defaultListenAddress()
desc: "Listening address for the LibP2P traffic"
name: "listen-address"}: ValidIpAddress
libp2pTcpPort* {.
desc: "Libp2p TCP listening port (for Waku v2)"
defaultValue: 9000
name: "libp2p-tcp-port" .}: uint16
devp2pTcpPort* {.
desc: "Devp2p TCP listening port (for Waku v1)"
defaultValue: 30303
name: "devp2p-tcp-port" .}: uint16
portsShift* {.
desc: "Add a shift to all default port numbers"
defaultValue: 0
name: "ports-shift" .}: uint16
nat* {.
desc: "Specify method to use for determining public address. " &
"Must be one of: any, none, upnp, pmp, extip:<IP>"
defaultValue: "any" .}: string
rpc* {.
desc: "Enable Waku RPC server"
defaultValue: false
name: "rpc" .}: bool
rpcAddress* {.
desc: "Listening address of the RPC server",
defaultValue: ValidIpAddress.init("127.0.0.1")
name: "rpc-address" }: ValidIpAddress
rpcPort* {.
desc: "Listening port of the RPC server"
defaultValue: 8545
name: "rpc-port" .}: uint16
metricsServer* {.
desc: "Enable the metrics server"
defaultValue: false
name: "metrics-server" .}: bool
metricsServerAddress* {.
desc: "Listening address of the metrics server"
defaultValue: ValidIpAddress.init("127.0.0.1")
name: "metrics-server-address" }: ValidIpAddress
metricsServerPort* {.
desc: "Listening HTTP port of the metrics server"
defaultValue: 8008
name: "metrics-server-port" .}: uint16
### Waku v1 options
fleetV1* {.
desc: "Select the Waku v1 fleet to connect to"
defaultValue: FleetV1.none
name: "fleet-v1" .}: FleetV1
staticnodesV1* {.
desc: "Enode URL to directly connect with. Argument may be repeated"
name: "staticnode-v1" .}: seq[string]
nodekeyV1* {.
desc: "DevP2P node private key as hex",
# TODO: can the rng be passed in somehow via Load?
defaultValue: keys.KeyPair.random(keys.newRng()[])
name: "nodekey-v1" .}: keys.KeyPair
wakuV1Pow* {.
desc: "PoW requirement of Waku v1 node.",
defaultValue: 0.002
name: "waku-v1-pow" .}: float64
wakuV1TopicInterest* {.
desc: "Run as Waku v1 node with a topic-interest",
defaultValue: false
name: "waku-v1-topic-interest" .}: bool
### Waku v2 options
staticnodesV2* {.
desc: "Multiaddr of peer to directly connect with. Argument may be repeated"
name: "staticnode-v2" }: seq[string]
nodekeyV2* {.
desc: "P2P node private key as hex"
defaultValue: crypto.PrivateKey.random(Secp256k1, crypto.newRng()[]).tryGet()
name: "nodekey-v2" }: crypto.PrivateKey
store* {.
desc: "Flag whether to start store protocol",
defaultValue: false
name: "store" }: bool
filter* {.
desc: "Flag whether to start filter protocol",
defaultValue: false
name: "filter" }: bool
relay* {.
desc: "Flag whether to start relay protocol",
defaultValue: true
name: "relay" }: bool
storenode* {.
desc: "Multiaddr of peer to connect with for waku store protocol"
defaultValue: ""
name: "storenode" }: string
filternode* {.
desc: "Multiaddr of peer to connect with for waku filter protocol"
defaultValue: ""
name: "filternode" }: string
dnsAddrs* {.
desc: "Enable resolution of `dnsaddr`, `dns4` or `dns6` multiaddrs"
defaultValue: true
name: "dns-addrs" }: bool
dnsAddrsNameServers* {.
desc: "DNS name server IPs to query for DNS multiaddrs resolution. Argument may be repeated."
defaultValue: @[ValidIpAddress.init("1.1.1.1"), ValidIpAddress.init("1.0.0.1")]
name: "dns-addrs-name-server" }: seq[ValidIpAddress]
### Bridge options
bridgePubsubTopic* {.
desc: "Waku v2 Pubsub topic to bridge to/from"
defaultValue: "/waku/2/default-waku/proto"
name: "bridge-pubsub-topic" }: string
proc parseCmdArg*(T: type keys.KeyPair, p: string): T =
try:
let privkey = keys.PrivateKey.fromHex(p).tryGet()
result = privkey.toKeyPair()
except CatchableError:
raise newException(ConfigurationError, "Invalid private key")
proc completeCmdArg*(T: type keys.KeyPair, val: string): seq[string] =
return @[]
proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T =
let key = SkPrivateKey.init(p)
if key.isOk():
crypto.PrivateKey(scheme: Secp256k1, skkey: key.get())
else:
raise newException(ConfigurationError, "Invalid private key")
proc completeCmdArg*(T: type crypto.PrivateKey, val: string): seq[string] =
return @[]
proc parseCmdArg*(T: type ValidIpAddress, p: string): T =
try:
result = ValidIpAddress.init(p)
except CatchableError:
raise newException(ConfigurationError, "Invalid IP address")
proc completeCmdArg*(T: type ValidIpAddress, val: string): seq[string] =
return @[]
func defaultListenAddress*(): ValidIpAddress =
(static ValidIpAddress.init("0.0.0.0"))
## Load
{.push warning[ProveInit]: off.}
proc load*(T: type WakuBridgeConf, version=""): ConfResult[T] =
try:
let conf = confutils.load(WakuBridgeConf, version=version)
ok(conf)
except CatchableError:
err(getCurrentExceptionMsg())
{.pop.}

View File

@ -1,55 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
std/options,
stew/[byteutils, results],
libp2p/crypto/crypto
import
../../waku/v1/protocol/waku_protocol,
../../waku/v2/waku_core
const
ContentTopicApplication = "waku"
ContentTopicAppVersion = "1"
proc toV1Topic*(contentTopic: ContentTopic): waku_protocol.Topic {.raises: [ValueError]} =
## Extracts the 4-byte array v1 topic from a content topic
## with format `/waku/1/<v1-topic-bytes-as-hex>/rfc26`
let ns = NsContentTopic.parse(contentTopic)
if ns.isErr():
let err = ns.tryError()
raise newException(ValueError, $err)
let name = ns.value.name
hexToByteArray(hexStr=name, N=4) # Byte array length
proc toV2ContentTopic*(v1Topic: waku_protocol.Topic): ContentTopic =
## Convert a 4-byte array v1 topic to a namespaced content topic
## with format `/waku/1/<v1-topic-bytes-as-hex>/rfc26`
##
## <v1-topic-bytes-as-hex> should be prefixed with `0x`
var namespacedTopic = NsContentTopic()
namespacedTopic.generation = none(int)
namespacedTopic.bias = Unbiased
namespacedTopic.application = ContentTopicApplication
namespacedTopic.version = ContentTopicAppVersion
namespacedTopic.name = v1Topic.to0xHex()
namespacedTopic.encoding = "rfc26"
return ContentTopic($namespacedTopic)
proc isBridgeable*(msg: WakuMessage): bool =
## Determines if a Waku v2 msg is on a bridgeable content topic
let ns = NsContentTopic.parse(msg.contentTopic)
if ns.isErr():
return false
return ns.value.application == ContentTopicApplication and ns.value.version == ContentTopicAppVersion

View File

@ -1,13 +0,0 @@
-d:chronicles_line_numbers
-d:chronicles_runtime_filtering:on
-d:discv5_protocol_id:d5waku
-d:chronicles_line_numbers
-d:discv5_protocol_id="d5waku"
-d:chronicles_runtime_filtering=on
-d:chronicles_sinks="textlines,json"
-d:chronicles_default_output_device=dynamic
# Disabling the following topics from nim-eth and nim-dnsdisc since some types cannot be serialized
-d:chronicles_disabled_topics="eth,dnsdisc.client"
# Results in empty output for some reason
#-d:"chronicles_enabled_topics=GossipSub:TRACE,WakuRelay:TRACE"

View File

@ -1,485 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
std/[tables, hashes, sequtils],
stew/byteutils,
stew/shims/net as stewNet, json_rpc/rpcserver,
chronicles,
chronos,
chronos/streams/tlsstream,
metrics,
metrics/chronos_httpserver,
libp2p/errors,
libp2p/peerstore,
eth/[keys, p2p],
eth/common/utils,
eth/p2p/[enode, peer_pool],
eth/p2p/discoveryv5/random2
import
# Waku v1 imports
../../waku/v1/protocol/waku_protocol,
# Waku v2 imports
libp2p/crypto/crypto,
libp2p/nameresolving/nameresolver,
../../waku/v2/waku_enr,
../../waku/v2/waku_core,
../../waku/v2/waku_store,
../../waku/v2/waku_filter,
../../waku/v2/node/message_cache,
../../waku/v2/waku_node,
../../waku/v2/node/peer_manager,
../../waku/v2/node/jsonrpc/debug/handlers as debug_api,
../../waku/v2/node/jsonrpc/filter/handlers as filter_api,
../../waku/v2/node/jsonrpc/relay/handlers as relay_api,
../../waku/v2/node/jsonrpc/store/handlers as store_api,
./message_compat,
./config
declarePublicCounter waku_bridge_transfers, "Number of messages transferred between Waku v1 and v2 networks", ["type"]
declarePublicCounter waku_bridge_dropped, "Number of messages dropped", ["type"]
logScope:
topics = "wakubridge"
##################
# Default values #
##################
const
ClientIdV1 = "nim-waku v1 node"
DefaultTTL = 5'u32
DeduplQSize = 20 # Maximum number of seen messages to keep in deduplication queue
MaintenancePeriod = 1.minutes
TargetV1Peers = 4 # Target number of v1 connections to maintain. Could be made configurable in future.
#########
# Types #
#########
type
WakuBridge* = ref object of RootObj
nodev1*: EthereumNode
nodev2*: WakuNode
nodev2PubsubTopic: waku_core.PubsubTopic # Pubsub topic to bridge to/from
seen: seq[hashes.Hash] # FIFO queue of seen WakuMessages. Used for deduplication.
rng: ref HmacDrbgContext
v1Pool: seq[Node] # Pool of v1 nodes for possible connections
targetV1Peers: int # Target number of v1 peers to maintain
started: bool # Indicates that bridge is running
###################
# Helper funtions #
###################
# Deduplication
proc containsOrAdd(sequence: var seq[hashes.Hash], hash: hashes.Hash): bool =
if sequence.contains(hash):
return true
if sequence.len >= DeduplQSize:
trace "Deduplication queue full. Removing oldest item."
sequence.delete 0, 0 # Remove first item in queue
sequence.add(hash)
return false
# Message conversion
func toWakuMessage(env: waku_protocol.Envelope): WakuMessage =
# Translate a Waku v1 envelope to a Waku v2 message
WakuMessage(payload: env.data,
contentTopic: toV2ContentTopic(env.topic),
timestamp: (getNanosecondTime(env.expiry) - getNanosecondTime(env.ttl)),
version: 1)
proc toWakuV2(bridge: WakuBridge, env: waku_protocol.Envelope) {.async.} =
let msg = env.toWakuMessage()
if bridge.seen.containsOrAdd(msg.encode().buffer.hash()):
# This is a duplicate message. Return
trace "Already seen. Dropping.", msg=msg
waku_bridge_dropped.inc(labelValues = ["duplicate"])
return
trace "Sending message to V2", msg=msg
waku_bridge_transfers.inc(labelValues = ["v1_to_v2"])
await bridge.nodev2.publish(bridge.nodev2PubsubTopic, msg)
proc toWakuV1(bridge: WakuBridge, msg: WakuMessage) {.gcsafe, raises: [Defect, LPError, ValueError].} =
if bridge.seen.containsOrAdd(msg.encode().buffer.hash()):
# This is a duplicate message. Return
trace "Already seen. Dropping.", msg=msg
waku_bridge_dropped.inc(labelValues = ["duplicate"])
return
trace "Sending message to V1", msg=msg
waku_bridge_transfers.inc(labelValues = ["v2_to_v1"])
# @TODO: use namespacing to map v2 contentTopics to v1 topics
let v1TopicSeq = msg.contentTopic.toBytes()[0..3]
case msg.version:
of 1:
discard bridge.nodev1.postEncoded(ttl = DefaultTTL,
topic = toV1Topic(msg.contentTopic),
encodedPayload = msg.payload) # The payload is already encoded according to https://rfc.vac.dev/spec/26/
else:
discard bridge.nodev1.postMessage(ttl = DefaultTTL,
topic = toV1Topic(msg.contentTopic),
payload = msg.payload)
proc connectToV1(bridge: WakuBridge, target: int) =
## Uses the initialised peer pool to attempt to connect
## to the set target number of v1 peers at random.
# First filter the peers in the pool that we're not yet connected to
var candidates = bridge.v1Pool.filterIt(it notin bridge.nodev1.peerPool.connectedNodes)
debug "connecting to v1", candidates=candidates.len(), target=target
# Now attempt connection to random peers from candidate list until we reach target
let maxAttempts = min(target, candidates.len())
trace "Attempting to connect to random peers from pool", target=maxAttempts
for i in 1..maxAttempts:
let
randIndex = rand(bridge.rng[], candidates.len() - 1)
randPeer = candidates[randIndex]
debug "Attempting to connect to random peer", randPeer= $randPeer
asyncSpawn bridge.nodev1.peerPool.connectToNode(randPeer)
candidates.delete(randIndex, randIndex)
if candidates.len() == 0:
# Stop when we've exhausted all candidates
break;
proc maintenanceLoop*(bridge: WakuBridge) {.async.} =
while bridge.started:
trace "running maintenance"
let
v1Connections = bridge.nodev1.peerPool.connectedNodes.len()
v2Connections = bridge.nodev2.switch.peerStore[AddressBook].len()
info "Bridge connectivity",
v1Peers=v1Connections,
v2Peers=v2Connections
# Replenish v1 connections if necessary
if v1Connections < bridge.targetV1Peers:
debug "Attempting to replenish v1 connections",
current=v1Connections,
target=bridge.targetV1Peers
bridge.connectToV1(bridge.targetV1Peers - v1Connections)
# TODO: we could do similar maintenance for v2 connections here
await sleepAsync(MaintenancePeriod)
##############
# Public API #
##############
proc new*(T: type WakuBridge,
# NodeV1 initialisation
nodev1Key: keys.KeyPair,
nodev1Address: Address,
powRequirement = 0.002,
rng: ref HmacDrbgContext,
topicInterest = none(seq[waku_protocol.Topic]),
bloom = some(fullBloom()),
# NodeV2 initialisation
nodev2Key: crypto.PrivateKey,
nodev2BindIp: ValidIpAddress, nodev2BindPort: Port,
nodev2ExtIp = none[ValidIpAddress](), nodev2ExtPort = none[Port](),
nameResolver: NameResolver = nil,
# Bridge configuration
nodev2PubsubTopic: waku_core.PubsubTopic,
v1Pool: seq[Node] = @[],
targetV1Peers = 0): T
{.raises: [Defect,IOError, TLSStreamProtocolError, LPError].} =
# Setup Waku v1 node
var
nodev1 = newEthereumNode(keys = nodev1Key, address = nodev1Address,
networkId = NetworkId(1), clientId = ClientIdV1,
addAllCapabilities = false, bindUdpPort = nodev1Address.udpPort, bindTcpPort = nodev1Address.tcpPort, rng = rng)
nodev1.addCapability Waku # Always enable Waku protocol
# Setup the Waku configuration.
# This node is being set up as a bridge. By default it gets configured as a node with
# a full bloom filter so that it will receive and forward all messages.
# It is, however, possible to configure a topic interest to bridge only
# selected messages.
# TODO: What is the PoW setting now?
let wakuConfig = WakuConfig(powRequirement: powRequirement,
bloom: bloom, isLightNode: false,
maxMsgSize: waku_protocol.defaultMaxMsgSize,
topics: topicInterest)
nodev1.configureWaku(wakuConfig)
var builder = EnrBuilder.init(nodev2Key)
builder.withIpAddressAndPorts(nodev2ExtIp, nodev2ExtPort, none(Port))
let record = builder.build().tryGet()
# Setup Waku v2 node
let nodev2 = block:
var builder = WakuNodeBuilder.init()
builder.withNodeKey(nodev2Key)
builder.withRecord(record)
builder.withNetworkConfigurationDetails(nodev2BindIp, nodev2BindPort, nodev2ExtIp, nodev2ExtPort).tryGet()
builder.withSwitchConfiguration(nameResolver=nameResolver)
builder.build().tryGet()
return WakuBridge(nodev1: nodev1,
nodev2: nodev2,
nodev2PubsubTopic: nodev2PubsubTopic,
rng: rng,
v1Pool: v1Pool,
targetV1Peers: targetV1Peers)
proc start*(bridge: WakuBridge) {.async.} =
info "Starting WakuBridge"
debug "Start listening on Waku v1"
# Start listening on Waku v1 node
let connectedFut = bridge.nodev1.connectToNetwork(
true, # Always enable listening
false # Disable discovery (only discovery v4 is currently supported)
)
connectedFut.callback = proc(data: pointer) {.gcsafe.} =
{.gcsafe.}:
if connectedFut.failed:
fatal "connectToNetwork failed", msg = connectedFut.readError.msg
quit(1)
# Start Waku v2 node
debug "Start listening on Waku v2"
await bridge.nodev2.start()
# Always mount relay for bridge.
# `triggerSelf` is false on a `bridge` to avoid duplicates
await bridge.nodev2.mountRelay()
bridge.nodev2.wakuRelay.triggerSelf = false
# Bridging
# Handle messages on Waku v1 and bridge to Waku v2
proc handleEnvReceived(envelope: waku_protocol.Envelope) {.gcsafe, raises: [Defect].} =
trace "Bridging envelope from V1 to V2", envelope=envelope
asyncSpawn bridge.toWakuV2(envelope)
bridge.nodev1.registerEnvReceivedHandler(handleEnvReceived)
# Handle messages on Waku v2 and bridge to Waku v1
proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} =
if msg.isBridgeable():
try:
trace "Bridging message from V2 to V1", msg=msg
bridge.toWakuV1(msg)
except ValueError:
trace "Failed to convert message to Waku v1. Check content-topic format.", msg=msg
waku_bridge_dropped.inc(labelValues = ["value_error"])
bridge.nodev2.subscribe(bridge.nodev2PubsubTopic, relayHandler)
bridge.started = true
asyncSpawn bridge.maintenanceLoop()
proc stop*(bridge: WakuBridge) {.async.} =
bridge.started = false
await bridge.nodev2.stop()
proc setupV2Rpc(node: WakuNode, rpcServer: RpcHttpServer, conf: WakuBridgeConf) =
installDebugApiHandlers(node, rpcServer)
# Install enabled API handlers:
if conf.relay:
let topicCache = relay_api.MessageCache.init(capacity=30)
installRelayApiHandlers(node, rpcServer, topicCache)
if conf.filternode != "":
let messageCache = filter_api.MessageCache.init(capacity=30)
installFilterApiHandlers(node, rpcServer, messageCache)
if conf.storenode != "":
installStoreApiHandlers(node, rpcServer)
{.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
when isMainModule:
import
std/os,
libp2p/nameresolving/dnsresolver
import
../../waku/common/logging,
../../waku/common/utils/nat,
../../waku/whisper/whispernodes,
../../waku/v1/node/rpc/wakusim,
../../waku/v1/node/rpc/waku,
../../waku/v1/node/rpc/key_storage
const versionString = "version / git commit hash: " & git_version
let rng = keys.newRng()
let confRes = WakuBridgeConf.load(version=versionString)
if confRes.isErr():
error "failure while loading the configuration", error=confRes.error
quit(QuitFailure)
let conf = confRes.get()
## Logging setup
# Adhere to NO_COLOR initiative: https://no-color.org/
let color = try: not parseBool(os.getEnv("NO_COLOR", "false"))
except CatchableError: true
logging.setupLogLevel(conf.logLevel)
logging.setupLogFormat(conf.logFormat, color)
## `udpPort` is only supplied to satisfy underlying APIs but is not
## actually a supported transport.
let udpPort = conf.devp2pTcpPort
let natRes = setupNat(conf.nat, ClientIdV1,
Port(conf.devp2pTcpPort + conf.portsShift),
Port(udpPort + conf.portsShift))
if natRes.isErr():
error "failed setupNat", error = natRes.error
quit(QuitFailure)
let natRes2 = setupNat(conf.nat, clientId,
Port(uint16(conf.libp2pTcpPort) + conf.portsShift),
Port(uint16(udpPort) + conf.portsShift))
if natRes2.isErr():
error "failed setupNat", error = natRes2.error
quit(QuitFailure)
# Load address configuration
let
(nodev1ExtIp, _, _) = natRes.get()
# TODO: EthereumNode should have a better split of binding address and
# external address. Also, can't have different ports as it stands now.
nodev1Address = if nodev1ExtIp.isNone():
Address(ip: parseIpAddress("0.0.0.0"),
tcpPort: Port(conf.devp2pTcpPort + conf.portsShift),
udpPort: Port(udpPort + conf.portsShift))
else:
Address(ip: nodev1ExtIp.get(),
tcpPort: Port(conf.devp2pTcpPort + conf.portsShift),
udpPort: Port(udpPort + conf.portsShift))
(nodev2ExtIp, nodev2ExtPort, _) = natRes2.get()
# Topic interest and bloom
var topicInterest: Option[seq[waku_protocol.Topic]]
var bloom: Option[Bloom]
if conf.wakuV1TopicInterest:
var topics: seq[waku_protocol.Topic]
topicInterest = some(topics)
else:
bloom = some(fullBloom())
# DNS resolution
var dnsReslvr: DnsResolver
if conf.dnsAddrs:
# Support for DNS multiaddrs
var nameServers: seq[TransportAddress]
for ip in conf.dnsAddrsNameServers:
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
dnsReslvr = DnsResolver.new(nameServers)
# Initialise bridge with a candidate pool of v1 nodes to connect to
var v1PoolStrs: seq[string]
if conf.staticnodesV1.len > 0: v1PoolStrs = conf.staticnodesV1
elif conf.fleetV1 == prod: v1PoolStrs = @WhisperNodes
elif conf.fleetV1 == staging: v1PoolStrs = @WhisperNodesStaging
elif conf.fleetV1 == test: v1PoolStrs = @WhisperNodesTest
let
v1Pool = v1PoolStrs.mapIt(newNode(ENode.fromString(it).expect("correct node addrs")))
bridge = WakuBridge.new(nodev1Key = conf.nodekeyV1,
nodev1Address = nodev1Address,
powRequirement = conf.wakuV1Pow,
rng = rng,
topicInterest = topicInterest,
bloom = bloom,
nodev2Key = conf.nodekeyV2,
nodev2BindIp = conf.listenAddress, nodev2BindPort = Port(uint16(conf.libp2pTcpPort) + conf.portsShift),
nodev2ExtIp = nodev2ExtIp, nodev2ExtPort = nodev2ExtPort,
nameResolver = dnsReslvr,
nodev2PubsubTopic = conf.bridgePubsubTopic,
v1Pool = v1Pool,
targetV1Peers = min(v1Pool.len(), TargetV1Peers))
waitFor bridge.start()
# Now load rest of config
# Mount configured Waku v2 protocols
waitFor mountLibp2pPing(bridge.nodev2)
if conf.store:
waitFor mountStore(bridge.nodev2) # Bridge does not persist messages
if conf.filter:
waitFor mountFilter(bridge.nodev2)
if conf.staticnodesV2.len > 0:
waitFor connectToNodes(bridge.nodev2, conf.staticnodesV2)
if conf.storenode != "":
mountStoreClient(bridge.nodev2)
let storeNode = parsePeerInfo(conf.storenode)
if storeNode.isOk():
bridge.nodev2.peerManager.addServicePeer(storeNode.value, WakuStoreCodec)
else:
error "Couldn't parse conf.storenode", error = storeNode.error
if conf.filternode != "":
waitFor mountFilterClient(bridge.nodev2)
let filterNode = parsePeerInfo(conf.filternode)
if filterNode.isOk():
bridge.nodev2.peerManager.addServicePeer(filterNode.value, WakuFilterCodec)
else:
error "Couldn't parse conf.filternode", error = filterNode.error
if conf.rpc:
let ta = initTAddress(conf.rpcAddress,
Port(conf.rpcPort + conf.portsShift))
var rpcServer = newRpcHttpServer([ta])
# Waku v1 RPC
let keys = newKeyStorage()
setupWakuRPC(bridge.nodev1, keys, rpcServer, rng)
setupWakuSimRPC(bridge.nodev1, rpcServer)
# Waku v2 rpc
setupV2Rpc(bridge.nodev2, rpcServer, conf)
rpcServer.start()
if conf.metricsServer:
let
address = conf.metricsServerAddress
port = conf.metricsServerPort + conf.portsShift
info "Starting metrics HTTP server", address, port
startMetricsHttpServer($address, Port(port))
runForever()

View File

@ -594,8 +594,6 @@ proc startRpcServer(app: App, address: ValidIpAddress, port: Port, conf: WakuNod
if conf.relay:
let relayMessageCache = rpc_relay_api.MessageCache.init(capacity=30)
installRelayApiHandlers(app.node, server, relayMessageCache)
if conf.rpcPrivate:
installRelayPrivateApiHandlers(app.node, server, relayMessageCache)
if conf.filternode != "":
let filterMessageCache = rpc_filter_api.MessageCache.init(capacity=30)

View File

@ -55,7 +55,6 @@ pipeline {
stages {
stage('Deps') { steps { script {
/* Avoid checking multiple times. */
v1changed = versionWasChanged('v1')
v2changed = versionWasChanged('v2')
/* TODO: Re-add caching of Nim compiler. */
nix.shell("make ${params.MAKEFLAGS} V=${params.VERBOSITY} update", pure: false)
@ -64,12 +63,6 @@ pipeline {
stage('Binaries') {
parallel {
stage('V1') {
when { expression { v1changed } }
steps { script {
nix.shell("make ${params.MAKEFLAGS} NIMFLAGS=\"${params.NIMFLAGS}\" V=${params.VERBOSITY} v1")
} }
}
stage('V2') {
when { expression { v2changed } }
steps { script {
@ -81,12 +74,6 @@ pipeline {
stage('Run Tests') {
parallel {
stage('V1') {
when { expression { v1changed } }
steps { script {
nix.shell("make ${params.MAKEFLAGS} NIMFLAGS=\"${params.NIMFLAGS}\" V=${params.VERBOSITY} test1")
} }
}
stage('V2') {
when { expression { v2changed } }
steps { script {

View File

@ -1,42 +0,0 @@
# Waku v1 example
## Introduction
This is a basic Waku v1 example to show the Waku v1 API usage.
It can be run as a single node, in which case it will just post and receive its
own messages.
Or multiple nodes can be started and can connect to each other, so that
messages can be passed around.
## How to build
```sh
make example1
```
## How to run
### Single node
```sh
# Lauch example node
./build/example
```
Messages will be posted and received.
### Multiple nodes
```sh
# Launch first example node
./build/example
```
Now look for an `INFO` log containing the enode address, e.g.:
`enode://26..5b@0.0.0.0:30303` (but with full address)
Copy the full enode string of the first node and start the second
node with that enode string as staticnode config option:
```sh
# Launch second example node, providing the enode address of the first node
./build/example --staticnode:enode://26..5b@0.0.0.0:30303 --ports-shift:1
```
Now both nodes will receive also messages from each other.

View File

@ -1,65 +0,0 @@
import
confutils/defs, chronicles, chronos, eth/keys
type
WakuNodeCmd* = enum
noCommand
WakuNodeConf* = object
logLevel* {.
desc: "Sets the log level."
defaultValue: LogLevel.INFO
name: "log-level" .}: LogLevel
case cmd* {.
command
defaultValue: noCommand .}: WakuNodeCmd
of noCommand:
tcpPort* {.
desc: "TCP listening port."
defaultValue: 30303
name: "tcp-port" .}: uint16
udpPort* {.
desc: "UDP listening port."
defaultValue: 30303
name: "udp-port" .}: uint16
portsShift* {.
desc: "Add a shift to all port numbers."
defaultValue: 0
name: "ports-shift" .}: uint16
nat* {.
desc: "Specify method to use for determining public address. " &
"Must be one of: any, none, upnp, pmp, extip:<IP>."
defaultValue: "any" .}: string
staticnodes* {.
desc: "Enode URL to directly connect with. Argument may be repeated."
name: "staticnode" .}: seq[string]
nodekey* {.
desc: "P2P node private key as hex.",
defaultValue: KeyPair.random(keys.newRng()[])
name: "nodekey" .}: KeyPair
proc parseCmdArg*(T: type KeyPair, p: string): T =
try:
let privkey = PrivateKey.fromHex(string(p)).tryGet()
result = privkey.toKeyPair()
except CatchableError as e:
raise newException(ConfigurationError, "Invalid private key")
proc completeCmdArg*(T: type KeyPair, val: string): seq[string] =
return @[]
proc parseCmdArg*(T: type IpAddress, p: string): T =
try:
result = parseIpAddress(p)
except CatchableError as e:
raise newException(ConfigurationError, "Invalid IP address")
proc completeCmdArg*(T: type IpAddress, val: string): seq[string] =
return @[]

View File

@ -1,119 +0,0 @@
import
confutils, chronicles, chronos, stew/byteutils, stew/shims/net as stewNet,
eth/[keys, p2p],
../../waku/v1/protocol/waku_protocol,
../../waku/v1/node/waku_helpers,
../../waku/common/utils/nat,
./config_example
## This is a simple Waku v1 example to show the Waku v1 API usage.
const clientId = "Waku example v1"
proc run(config: WakuNodeConf, rng: ref HmacDrbgContext) =
let natRes = setupNat(config.nat, clientId,
Port(config.tcpPort + config.portsShift),
Port(config.udpPort + config.portsShift))
if natRes.isErr():
fatal "setupNat failed", error = natRes.error
quit(1)
# Set up the address according to NAT information.
let (ipExt, tcpPortExt, udpPortExt) = natRes.get()
# TODO: EthereumNode should have a better split of binding address and
# external address. Also, can't have different ports as it stands now.
let address = if ipExt.isNone():
Address(ip: parseIpAddress("0.0.0.0"),
tcpPort: Port(config.tcpPort + config.portsShift),
udpPort: Port(config.udpPort + config.portsShift))
else:
Address(ip: ipExt.get(),
tcpPort: Port(config.tcpPort + config.portsShift),
udpPort: Port(config.udpPort + config.portsShift))
# Create Ethereum Node
var node = newEthereumNode(config.nodekey, # Node identifier
address, # Address reachable for incoming requests
NetworkId(1), # Network Id, only applicable for ETH protocol
clientId, # Client id string
addAllCapabilities = false, # Disable default all RLPx capabilities
bindUdpPort = address.udpPort, # Assume same as external
bindTcpPort = address.tcpPort, # Assume same as external
rng = rng)
node.addCapability Waku # Enable only the Waku protocol.
# Set up the Waku configuration.
let wakuConfig = WakuConfig(powRequirement: 0.002,
bloom: some(fullBloom()), # Full bloom filter
isLightNode: false, # Full node
maxMsgSize: waku_protocol.defaultMaxMsgSize,
topics: none(seq[waku_protocol.Topic]) # empty topic interest
)
node.configureWaku(wakuConfig)
# Optionally direct connect to a set of nodes.
if config.staticnodes.len > 0:
connectToNodes(node, config.staticnodes)
# Connect to the network, which will make the node start listening and/or
# connect to bootnodes, and/or start discovery.
# This will block until first connection is made, which in this case can only
# happen if we directly connect to nodes (step above) or if an incoming
# connection occurs, which is why we use a callback to exit on errors instead of
# using `await`.
# TODO: This looks a bit awkward and the API should perhaps be altered here.
let connectedFut = node.connectToNetwork(
true, # Enable listening
false # Disable discovery (only discovery v4 is currently supported)
)
connectedFut.callback = proc(data: pointer) {.gcsafe.} =
{.gcsafe.}:
if connectedFut.failed:
fatal "connectToNetwork failed", msg = connectedFut.readError.msg
quit(1)
# Using a hardcoded symmetric key for encryption of the payload for the sake of
# simplicity.
var symKey: SymKey
symKey[31] = 1
# Asymmetric keypair to sign the payload.
let signKeyPair = KeyPair.random(rng[])
# Code to be executed on receival of a message on filter.
proc handler(msg: ReceivedMessage) =
if msg.decoded.src.isSome():
echo "Received message from ", $msg.decoded.src.get(), ": ",
string.fromBytes(msg.decoded.payload)
# Create and subscribe filter with above handler.
let
topic = [byte 0, 0, 0, 0]
filter = initFilter(symKey = some(symKey), topics = @[topic])
discard node.subscribeFilter(filter, handler)
# Repeat the posting of a message every 5 seconds.
# https://github.com/nim-lang/Nim/issues/17369
var repeatMessage: proc(udata: pointer) {.gcsafe, raises: [Defect].}
repeatMessage = proc(udata: pointer) =
{.gcsafe.}:
# Post a waku message on the network, encrypted with provided symmetric key,
# signed with asymmetric key, on topic and with ttl of 30 seconds.
let posted = node.postMessage(
symKey = some(symKey), src = some(signKeyPair.seckey),
ttl = 30, topic = topic, payload = @[byte 0x48, 0x65, 0x6C, 0x6C, 0x6F])
if posted: echo "Posted message as ", $signKeyPair.pubkey
else: echo "Posting message failed."
discard setTimer(Moment.fromNow(5.seconds), repeatMessage)
discard setTimer(Moment.fromNow(5.seconds), repeatMessage)
runForever()
when isMainModule:
let
rng = keys.newRng()
conf = WakuNodeConf.load()
run(conf, rng)

View File

@ -1,2 +0,0 @@
-d:chronicles_line_numbers
-d:chronicles_runtime_filtering:on

View File

@ -1,10 +0,0 @@
import
# Waku - Whisper common whisper_types test
./whisper/test_shh,
# Waku v1 tests
./v1/test_waku_connect,
./v1/test_waku_config,
./v1/test_waku_bridge,
./v1/test_waku_mail,
./v1/test_rpc_waku

View File

@ -70,9 +70,7 @@ import
./v2/test_waku_noise,
./v2/test_waku_noise_sessions,
./v2/test_waku_switch,
./v2/test_waku_rendezvous,
# Utils
./v2/test_utils_compat
./v2/test_waku_rendezvous
# Waku Keystore test suite
import
@ -97,12 +95,6 @@ import
./v2/wakunode_rest/test_rest_store
## Apps
# Wakubridge test suite
import ./all_tests_wakubridge
## Experimental
when defined(rln):

View File

@ -1,6 +0,0 @@
{.used.}
# Wakubridge test suite
import
./wakubridge/test_message_compat,
./wakubridge/test_wakubridge

View File

@ -1,5 +0,0 @@
import
# Whisper tests
./whisper/test_shh,
./whisper/test_shh_config,
./whisper/test_shh_connect

View File

@ -1,237 +0,0 @@
{.used.}
import
std/[options, os, strutils],
testutils/unittests, stew/byteutils, json_rpc/[rpcserver, rpcclient],
eth/common as eth_common, eth/[keys, p2p],
../../waku/v1/protocol/waku_protocol,
../../waku/v1/node/rpc/[hexstrings, rpc_types, waku, key_storage]
template sourceDir*: string = currentSourcePath.rsplit(DirSep, 1)[0]
## Generate client convenience marshalling wrappers from forward declarations
## For testing, ethcallsigs needs to be kept in sync with ../waku/node/v1/rpc/waku
const sigPath = sourceDir / ParDir / ParDir / "waku" / "v1" / "node" / "rpc" / "wakucallsigs.nim"
createRpcSigs(RpcSocketClient, sigPath)
proc setupNode(capabilities: varargs[ProtocolInfo, `protocolInfo`],
rng: ref HmacDrbgContext, ): EthereumNode =
let
keypair = KeyPair.random(rng[])
srvAddress = Address(ip: parseIpAddress("0.0.0.0"), tcpPort: Port(30303),
udpPort: Port(30303))
result = newEthereumNode(keypair, srvAddress, NetworkId(1), "waku test rpc",
addAllCapabilities = false, bindUdpPort = srvAddress.udpPort, bindTcpPort = srvAddress.tcpPort, rng = rng)
for capability in capabilities:
result.addCapability capability
proc doTests {.async.} =
suite "Waku Remote Procedure Calls":
let
rng = keys.newRng()
ethNode = setupNode(Waku, rng)
rpcPort = 8545
rpcServer = newRpcSocketServer(["localhost:" & $rpcPort])
client = newRpcSocketClient()
keys = newKeyStorage()
setupWakuRPC(ethNode, keys, rpcServer, rng)
rpcServer.start()
await client.connect("localhost", Port(rpcPort))
asyncTest "waku_version":
check await(client.waku_version()) == wakuVersionStr
asyncTest "waku_info":
let info = await client.waku_info()
check info.maxMessageSize == defaultMaxMsgSize
asyncTest "waku_setMaxMessageSize":
let testValue = 1024'u64
check await(client.waku_setMaxMessageSize(testValue)) == true
var info = await client.waku_info()
check info.maxMessageSize == testValue
expect ValueError:
discard await(client.waku_setMaxMessageSize(defaultMaxMsgSize + 1))
info = await client.waku_info()
check info.maxMessageSize == testValue
asyncTest "waku_setMinPoW":
let testValue = 0.0001
check await(client.waku_setMinPoW(testValue)) == true
let info = await client.waku_info()
check info.minPow == testValue
# test "waku_markTrustedPeer":
# TODO: need to connect a peer to test
asyncTest "waku asymKey tests":
let keyID = await client.waku_newKeyPair()
check:
await(client.waku_hasKeyPair(keyID)) == true
await(client.waku_deleteKeyPair(keyID)) == true
await(client.waku_hasKeyPair(keyID)) == false
expect ValueError:
discard await(client.waku_deleteKeyPair(keyID))
let privkey = "0x5dc5381cae54ba3174dc0d46040fe11614d0cc94d41185922585198b4fcef9d3"
let pubkey = "0x04e5fd642a0f630bbb1e4cd7df629d7b8b019457a9a74f983c0484a045cebb176def86a54185b50bbba6bbf97779173695e92835d63109c23471e6da382f922fdb"
let keyID2 = await client.waku_addPrivateKey(privkey)
check:
await(client.waku_getPublicKey(keyID2)) == pubkey.toPublicKey
await(client.waku_getPrivateKey(keyID2)).toRaw() == privkey.toPrivateKey.toRaw()
await(client.waku_hasKeyPair(keyID2)) == true
await(client.waku_deleteKeyPair(keyID2)) == true
await(client.waku_hasKeyPair(keyID2)) == false
expect ValueError:
discard await(client.waku_deleteKeyPair(keyID2))
asyncTest "waku symKey tests":
let keyID = await client.waku_newSymKey()
check:
await(client.waku_hasSymKey(keyID)) == true
await(client.waku_deleteSymKey(keyID)) == true
await(client.waku_hasSymKey(keyID)) == false
expect ValueError:
discard await(client.waku_deleteSymKey(keyID))
let symKey = "0x0000000000000000000000000000000000000000000000000000000000000001"
let keyID2 = await client.waku_addSymKey(symKey)
check:
await(client.waku_getSymKey(keyID2)) == symKey.toSymKey
await(client.waku_hasSymKey(keyID2)) == true
await(client.waku_deleteSymKey(keyID2)) == true
await(client.waku_hasSymKey(keyID2)) == false
expect ValueError:
discard await(client.waku_deleteSymKey(keyID2))
let keyID3 = await client.waku_generateSymKeyFromPassword("password")
let keyID4 = await client.waku_generateSymKeyFromPassword("password")
let keyID5 = await client.waku_generateSymKeyFromPassword("nimbus!")
check:
await(client.waku_getSymKey(keyID3)) ==
await(client.waku_getSymKey(keyID4))
await(client.waku_getSymKey(keyID3)) !=
await(client.waku_getSymKey(keyID5))
await(client.waku_hasSymKey(keyID3)) == true
await(client.waku_deleteSymKey(keyID3)) == true
await(client.waku_hasSymKey(keyID3)) == false
expect ValueError:
discard await(client.waku_deleteSymKey(keyID3))
# Some defaults for the filter & post tests
let
ttl = 30'u64
topicStr = "0x12345678"
payload = "0x45879632"
# A very low target and long time so we are sure the test never fails
# because of this
powTarget = 0.001
powTime = 1.0
asyncTest "waku filter create and delete":
let
topic = topicStr.toTopic()
symKeyID = await client.waku_newSymKey()
options = WakuFilterOptions(symKeyID: some(symKeyID),
topics: some(@[topic]))
filterID = await client.waku_newMessageFilter(options)
check:
filterID.string.isValidIdentifier
await(client.waku_deleteMessageFilter(filterID)) == true
expect ValueError:
discard await(client.waku_deleteMessageFilter(filterID))
asyncTest "waku symKey post and filter loop":
let
topic = topicStr.toTopic()
symKeyID = await client.waku_newSymKey()
options = WakuFilterOptions(symKeyID: some(symKeyID),
topics: some(@[topic]))
filterID = await client.waku_newMessageFilter(options)
message = WakuPostMessage(symKeyID: some(symKeyID),
ttl: ttl,
topic: some(topic),
payload: payload.HexDataStr,
powTime: powTime,
powTarget: powTarget)
check:
await(client.waku_setMinPoW(powTarget)) == true
await(client.waku_post(message)) == true
let messages = await client.waku_getFilterMessages(filterID)
check:
messages.len == 1
messages[0].sig.isNone()
messages[0].recipientPublicKey.isNone()
messages[0].ttl == ttl
messages[0].topic == topic
messages[0].payload == hexToSeqByte(payload)
messages[0].padding.len > 0
messages[0].pow >= powTarget
await(client.waku_deleteMessageFilter(filterID)) == true
asyncTest "waku asymKey post and filter loop":
let
topic = topicStr.toTopic()
privateKeyID = await client.waku_newKeyPair()
options = WakuFilterOptions(privateKeyID: some(privateKeyID))
filterID = await client.waku_newMessageFilter(options)
pubKey = await client.waku_getPublicKey(privateKeyID)
message = WakuPostMessage(pubKey: some(pubKey),
ttl: ttl,
topic: some(topic),
payload: payload.HexDataStr,
powTime: powTime,
powTarget: powTarget)
check:
await(client.waku_setMinPoW(powTarget)) == true
await(client.waku_post(message)) == true
let messages = await client.waku_getFilterMessages(filterID)
check:
messages.len == 1
messages[0].sig.isNone()
messages[0].recipientPublicKey.get() == pubKey
messages[0].ttl == ttl
messages[0].topic == topic
messages[0].payload == hexToSeqByte(payload)
messages[0].padding.len > 0
messages[0].pow >= powTarget
await(client.waku_deleteMessageFilter(filterID)) == true
asyncTest "waku signature in post and filter loop":
let
topic = topicStr.toTopic()
symKeyID = await client.waku_newSymKey()
privateKeyID = await client.waku_newKeyPair()
pubKey = await client.waku_getPublicKey(privateKeyID)
options = WakuFilterOptions(symKeyID: some(symKeyID),
topics: some(@[topic]),
sig: some(pubKey))
filterID = await client.waku_newMessageFilter(options)
message = WakuPostMessage(symKeyID: some(symKeyID),
sig: some(privateKeyID),
ttl: ttl,
topic: some(topic),
payload: payload.HexDataStr,
powTime: powTime,
powTarget: powTarget)
check:
await(client.waku_setMinPoW(powTarget)) == true
await(client.waku_post(message)) == true
let messages = await client.waku_getFilterMessages(filterID)
check:
messages.len == 1
messages[0].sig.get() == pubKey
messages[0].recipientPublicKey.isNone()
messages[0].ttl == ttl
messages[0].topic == topic
messages[0].payload == hexToSeqByte(payload)
messages[0].padding.len > 0
messages[0].pow >= powTarget
await(client.waku_deleteMessageFilter(filterID)) == true
rpcServer.stop()
rpcServer.close()
waitFor doTests()

View File

@ -1,98 +0,0 @@
#
# Waku
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.used.}
import
std/[sequtils, tables],
chronos, testutils/unittests, eth/p2p, eth/p2p/peer_pool,
../../waku/whisper/whisper_protocol as whisper,
../../waku/v1/protocol/waku_protocol as waku,
../../waku/v1/protocol/waku_bridge,
../test_helpers
let safeTTL = 5'u32
let waitInterval = waku.messageInterval + 150.milliseconds
procSuite "Waku - Whisper bridge tests":
let rng = newRng()
# Waku Whisper node has both capabilities, listens to Whisper and Waku and
# relays traffic between the two.
var
nodeWakuWhisper = setupTestNode(rng, Whisper, Waku) # This will be the bridge
nodeWhisper = setupTestNode(rng, Whisper)
nodeWaku = setupTestNode(rng, Waku)
nodeWakuWhisper.startListening()
let bridgeNode = newNode(nodeWakuWhisper.toENode())
nodeWakuWhisper.shareMessageQueue()
waitFor nodeWhisper.peerPool.connectToNode(bridgeNode)
waitFor nodeWaku.peerPool.connectToNode(bridgeNode)
asyncTest "WakuWhisper and Whisper peers connected":
check:
nodeWhisper.peerPool.connectedNodes.len() == 1
nodeWaku.peerPool.connectedNodes.len() == 1
asyncTest "Whisper - Waku communcation via bridge":
# topic whisper node subscribes to, waku node posts to
let topic1 = [byte 0x12, 0, 0, 0]
# topic waku node subscribes to, whisper node posts to
let topic2 = [byte 0x34, 0, 0, 0]
var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)]
var futures = [newFuture[int](), newFuture[int]()]
proc handler1(msg: whisper.ReceivedMessage) =
check msg.decoded.payload == payloads[0]
futures[0].complete(1)
proc handler2(msg: waku.ReceivedMessage) =
check msg.decoded.payload == payloads[1]
futures[1].complete(1)
var filter1 = whisper.subscribeFilter(nodeWhisper,
whisper.initFilter(topics = @[topic1]), handler1)
var filter2 = waku.subscribeFilter(nodeWaku,
waku.initFilter(topics = @[topic2]), handler2)
check:
# Message should also end up in the Whisper node its queue via the bridge
waku.postMessage(nodeWaku, ttl = safeTTL + 1, topic = topic1,
payload = payloads[0]) == true
# Message should also end up in the Waku node its queue via the bridge
whisper.postMessage(nodeWhisper, ttl = safeTTL, topic = topic2,
payload = payloads[1]) == true
nodeWhisper.protocolState(Whisper).queue.items.len == 1
nodeWaku.protocolState(Waku).queue.items.len == 1
# waitInterval*2 as messages have to pass the bridge also (2 hops)
await allFutures(futures).withTimeout(waitInterval*2)
# Relay can receive Whisper & Waku messages
nodeWakuWhisper.protocolState(Whisper).queue.items.len == 2
nodeWakuWhisper.protocolState(Waku).queue.items.len == 2
# Whisper node can receive Waku messages (via bridge)
nodeWhisper.protocolState(Whisper).queue.items.len == 2
# Waku node can receive Whisper messages (via bridge)
nodeWaku.protocolState(Waku).queue.items.len == 2
whisper.unsubscribeFilter(nodeWhisper, filter1) == true
waku.unsubscribeFilter(nodeWaku, filter2) == true
# XXX: This reads a bit weird, but eh
waku.resetMessageQueue(nodeWaku)
whisper.resetMessageQueue(nodeWhisper)
# shared queue so Waku and Whisper should be set to 0
waku.resetMessageQueue(nodeWakuWhisper)
check:
nodeWhisper.protocolState(Whisper).queue.items.len == 0
nodeWaku.protocolState(Waku).queue.items.len == 0
nodeWakuWhisper.protocolState(Whisper).queue.items.len == 0
nodeWakuWhisper.protocolState(Waku).queue.items.len == 0

View File

@ -1,65 +0,0 @@
#
# Waku
# (c) Copyright 2020
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.used.}
import
std/[sequtils, options, unittest, times],
../../waku/v1/protocol/waku_protocol
suite "Waku envelope validation":
test "should validate and allow envelope according to config":
let ttl = 1'u32
let topic = [byte 1, 2, 3, 4]
let config = WakuConfig(powRequirement: 0, bloom: some(topic.topicBloom()),
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
check env.valid()
let msg = initMessage(env)
check msg.allowed(config)
test "should invalidate envelope due to ttl 0":
let ttl = 0'u32
let topic = [byte 1, 2, 3, 4]
let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
check env.valid() == false
test "should invalidate envelope due to expired":
let ttl = 1'u32
let topic = [byte 1, 2, 3, 4]
let env = Envelope(expiry:epochTime().uint32, ttl: ttl, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
check env.valid() == false
test "should invalidate envelope due to in the future":
let ttl = 1'u32
let topic = [byte 1, 2, 3, 4]
# there is currently a 2 second tolerance, hence the + 3
let env = Envelope(expiry:epochTime().uint32 + ttl + 3, ttl: ttl,
topic: topic, data: repeat(byte 9, 256), nonce: 0)
check env.valid() == false
test "should not allow envelope due to bloom filter":
let topic = [byte 1, 2, 3, 4]
let wrongTopic = [byte 9, 8, 7, 6]
let config = WakuConfig(powRequirement: 0,
bloom: some(wrongTopic.topicBloom()),
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
let env = Envelope(expiry:100000 , ttl: 30, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
let msg = initMessage(env)
check msg.allowed(config) == false

View File

@ -1,560 +0,0 @@
#
# Waku
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.used.}
import
std/[sequtils, tables],
chronos, testutils/unittests, eth/[keys, p2p], eth/p2p/peer_pool,
../../waku/v1/protocol/waku_protocol,
../test_helpers
const
safeTTL = 5'u32
waitInterval = messageInterval + 150.milliseconds
conditionTimeoutMs = 3000.milliseconds
proc resetMessageQueues(nodes: varargs[EthereumNode]) =
for node in nodes:
node.resetMessageQueue()
# check on a condition until true or return a future containing false
# if timeout expires first
proc eventually(timeout: Duration,
condition: proc(): bool {.gcsafe, raises: [Defect].}): Future[bool] =
let wrappedCondition = proc(): Future[bool] {.async.} =
let f = newFuture[bool]()
while not condition():
await sleepAsync(100.milliseconds)
f.complete(true)
return await f
return withTimeout(wrappedCondition(), timeout)
procSuite "Waku connections":
let rng = keys.newRng()
asyncTest "Waku connections":
var
n1 = setupTestNode(rng, Waku)
n2 = setupTestNode(rng, Waku)
n3 = setupTestNode(rng, Waku)
n4 = setupTestNode(rng, Waku)
var topics: seq[waku_protocol.Topic]
n1.protocolState(Waku).config.topics = some(topics)
n2.protocolState(Waku).config.topics = some(topics)
n3.protocolState(Waku).config.topics = none(seq[waku_protocol.Topic])
n4.protocolState(Waku).config.topics = none(seq[waku_protocol.Topic])
n1.startListening()
n3.startListening()
let
p1 = await n2.rlpxConnect(newNode(n1.toENode()))
p2 = await n2.rlpxConnect(newNode(n3.toENode()))
p3 = await n4.rlpxConnect(newNode(n3.toENode()))
check:
p1.isErr() == true
p2.isErr() == false
p3.isErr() == false
asyncTest "Filters with encryption and signing":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let encryptKeyPair = KeyPair.random(rng[])
let signKeyPair = KeyPair.random(rng[])
var symKey: SymKey
let topic = [byte 0x12, 0, 0, 0]
var filters: seq[string] = @[]
var payloads = [repeat(byte 1, 10), repeat(byte 2, 10),
repeat(byte 3, 10), repeat(byte 4, 10)]
var futures = [newFuture[int](), newFuture[int](),
newFuture[int](), newFuture[int]()]
proc handler1(msg: ReceivedMessage) =
var count {.global.}: int
check msg.decoded.payload == payloads[0] or
msg.decoded.payload == payloads[1]
count += 1
if count == 2: futures[0].complete(1)
proc handler2(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[1]
futures[1].complete(1)
proc handler3(msg: ReceivedMessage) =
var count {.global.}: int
check msg.decoded.payload == payloads[2] or
msg.decoded.payload == payloads[3]
count += 1
if count == 2: futures[2].complete(1)
proc handler4(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[3]
futures[3].complete(1)
# Filters
# filter for encrypted asym
filters.add(node1.subscribeFilter(initFilter(
privateKey = some(encryptKeyPair.seckey), topics = @[topic]), handler1))
# filter for encrypted asym + signed
filters.add(node1.subscribeFilter(initFilter(some(signKeyPair.pubkey),
privateKey = some(encryptKeyPair.seckey), topics = @[topic]), handler2))
# filter for encrypted sym
filters.add(node1.subscribeFilter(initFilter(symKey = some(symKey),
topics = @[topic]), handler3))
# filter for encrypted sym + signed
filters.add(node1.subscribeFilter(initFilter(some(signKeyPair.pubkey),
symKey = some(symKey), topics = @[topic]), handler4))
# Messages
check:
# encrypted asym
node2.postMessage(some(encryptKeyPair.pubkey), ttl = safeTTL,
topic = topic, payload = payloads[0]) == true
# encrypted asym + signed
node2.postMessage(some(encryptKeyPair.pubkey),
src = some(signKeyPair.seckey), ttl = safeTTL,
topic = topic, payload = payloads[1]) == true
# encrypted sym
node2.postMessage(symKey = some(symKey), ttl = safeTTL, topic = topic,
payload = payloads[2]) == true
# encrypted sym + signed
node2.postMessage(symKey = some(symKey),
src = some(signKeyPair.seckey),
ttl = safeTTL, topic = topic,
payload = payloads[3]) == true
node2.protocolState(Waku).queue.items.len == 4
check:
await allFutures(futures).withTimeout(waitInterval)
node1.protocolState(Waku).queue.items.len == 4
for filter in filters:
check node1.unsubscribeFilter(filter) == true
asyncTest "Filters with topics":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic1 = [byte 0x12, 0, 0, 0]
let topic2 = [byte 0x34, 0, 0, 0]
var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)]
var futures = [newFuture[int](), newFuture[int]()]
proc handler1(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[0]
futures[0].complete(1)
proc handler2(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[1]
futures[1].complete(1)
var filter1 = node1.subscribeFilter(initFilter(topics = @[topic1]), handler1)
var filter2 = node1.subscribeFilter(initFilter(topics = @[topic2]), handler2)
check:
node2.postMessage(ttl = safeTTL + 1, topic = topic1,
payload = payloads[0]) == true
node2.postMessage(ttl = safeTTL, topic = topic2,
payload = payloads[1]) == true
node2.protocolState(Waku).queue.items.len == 2
await allFutures(futures).withTimeout(waitInterval)
node1.protocolState(Waku).queue.items.len == 2
node1.unsubscribeFilter(filter1) == true
node1.unsubscribeFilter(filter2) == true
asyncTest "Filters with PoW":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0x12, 0, 0, 0]
var payload = repeat(byte 0, 10)
var futures = [newFuture[int](), newFuture[int]()]
proc handler1(msg: ReceivedMessage) =
check msg.decoded.payload == payload
futures[0].complete(1)
proc handler2(msg: ReceivedMessage) =
check msg.decoded.payload == payload
futures[1].complete(1)
var filter1 = node1.subscribeFilter(
initFilter(topics = @[topic], powReq = 0), handler1)
var filter2 = node1.subscribeFilter(
initFilter(topics = @[topic], powReq = 1_000_000), handler2)
check:
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
(await futures[0].withTimeout(waitInterval)) == true
(await futures[1].withTimeout(waitInterval)) == false
node1.protocolState(Waku).queue.items.len == 1
node1.unsubscribeFilter(filter1) == true
node1.unsubscribeFilter(filter2) == true
asyncTest "Filters with queues":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
let payload = repeat(byte 0, 10)
var filter = node1.subscribeFilter(initFilter(topics = @[topic]))
for i in countdown(10, 1):
check node2.postMessage(ttl = safeTTL, topic = topic,
payload = payload) == true
await sleepAsync(waitInterval)
check:
node1.getFilterMessages(filter).len() == 10
node1.getFilterMessages(filter).len() == 0
node1.unsubscribeFilter(filter) == true
asyncTest "Local filter notify":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
var filter = node1.subscribeFilter(initFilter(topics = @[topic]))
check:
node1.postMessage(ttl = safeTTL, topic = topic,
payload = repeat(byte 4, 10)) == true
node1.getFilterMessages(filter).len() == 1
node1.unsubscribeFilter(filter) == true
asyncTest "Bloomfilter blocking":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let sendTopic1 = [byte 0x12, 0, 0, 0]
let sendTopic2 = [byte 0x34, 0, 0, 0]
let filterTopics = @[[byte 0x34, 0, 0, 0],[byte 0x56, 0, 0, 0]]
let payload = repeat(byte 0, 10)
var f: Future[int] = newFuture[int]()
proc handler(msg: ReceivedMessage) =
check msg.decoded.payload == payload
f.complete(1)
var filter = node1.subscribeFilter(
initFilter(topics = filterTopics), handler)
await node1.setBloomFilter(node1.filtersToBloom())
check:
node2.postMessage(ttl = safeTTL, topic = sendTopic1,
payload = payload) == true
node2.protocolState(Waku).queue.items.len == 1
(await f.withTimeout(waitInterval)) == false
node1.protocolState(Waku).queue.items.len == 0
resetMessageQueues(node1, node2)
f = newFuture[int]()
check:
node2.postMessage(ttl = safeTTL, topic = sendTopic2,
payload = payload) == true
node2.protocolState(Waku).queue.items.len == 1
await f.withTimeout(waitInterval)
f.read() == 1
node1.protocolState(Waku).queue.items.len == 1
node1.unsubscribeFilter(filter) == true
await node1.setBloomFilter(fullBloom())
asyncTest "PoW blocking":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
let payload = repeat(byte 0, 10)
await node1.setPowRequirement(1_000_000)
check:
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
node2.protocolState(Waku).queue.items.len == 1
await sleepAsync(waitInterval)
check:
node1.protocolState(Waku).queue.items.len == 0
resetMessageQueues(node1, node2)
await node1.setPowRequirement(0.0)
check:
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
node2.protocolState(Waku).queue.items.len == 1
await sleepAsync(waitInterval)
check:
node1.protocolState(Waku).queue.items.len == 1
asyncTest "Queue pruning":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
let payload = repeat(byte 0, 10)
# We need a minimum TTL of 2 as when set to 1 there is a small chance that
# it is already expired after messageInterval due to rounding down of float
# to uint32 in postMessage()
let lowerTTL = 2'u32 # Lower TTL as we need to wait for messages to expire
for i in countdown(10, 1):
check node2.postMessage(ttl = lowerTTL, topic = topic, payload = payload)
check node2.protocolState(Waku).queue.items.len == 10
await sleepAsync(waitInterval)
check node1.protocolState(Waku).queue.items.len == 10
await sleepAsync(milliseconds((lowerTTL+1)*1000))
check node1.protocolState(Waku).queue.items.len == 0
check node2.protocolState(Waku).queue.items.len == 0
asyncTest "P2P post":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
var f: Future[int] = newFuture[int]()
proc handler(msg: ReceivedMessage) =
check msg.decoded.payload == repeat(byte 4, 10)
f.complete(1)
var filter = node1.subscribeFilter(initFilter(topics = @[topic],
allowP2P = true), handler)
# Need to be sure that node1 is added in the peerpool of node2 as
# postMessage with target will iterate over the peers
require await eventually(conditionTimeoutMs,
proc(): bool = node2.peerPool.len == 1)
check:
node1.setPeerTrusted(toNodeId(node2.keys.pubkey)) == true
node2.postMessage(ttl = 10, topic = topic,
payload = repeat(byte 4, 10),
targetPeer = some(toNodeId(node1.keys.pubkey))) == true
await f.withTimeout(waitInterval)
f.read() == 1
node1.protocolState(Waku).queue.items.len == 0
node2.protocolState(Waku).queue.items.len == 0
node1.unsubscribeFilter(filter) == true
asyncTest "Light node posting":
var ln = setupTestNode(rng, Waku)
await ln.setLightNode(true)
var fn = setupTestNode(rng, Waku)
fn.startListening()
await ln.peerPool.connectToNode(newNode(fn.toENode()))
let topic = [byte 0, 0, 0, 0]
check:
ln.peerPool.connectedNodes.len() == 1
# normal post
ln.postMessage(ttl = safeTTL, topic = topic,
payload = repeat(byte 0, 10)) == true
ln.protocolState(Waku).queue.items.len == 1
# TODO: add test on message relaying
asyncTest "Connect two light nodes":
var ln1 = setupTestNode(rng, Waku)
var ln2 = setupTestNode(rng, Waku)
await ln1.setLightNode(true)
await ln2.setLightNode(true)
ln2.startListening()
let peer = await ln1.rlpxConnect(newNode(ln2.toENode()))
check peer.isErr() == true
asyncTest "Waku set-topic-interest":
var
wakuTopicNode = setupTestNode(rng, Waku)
wakuNode = setupTestNode(rng, Waku)
let
topic1 = [byte 0xDA, 0xDA, 0xDA, 0xAA]
topic2 = [byte 0xD0, 0xD0, 0xD0, 0x00]
wrongTopic = [byte 0x4B, 0x1D, 0x4B, 0x1D]
# Set one topic so we are not considered a full node
wakuTopicNode.protocolState(Waku).config.topics = some(@[topic1])
wakuNode.startListening()
await wakuTopicNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
# Update topic interest
check:
await setTopicInterest(wakuTopicNode, @[topic1, topic2])
let payload = repeat(byte 0, 10)
check:
wakuNode.postMessage(ttl = safeTTL, topic = topic1, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = topic2, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = wrongTopic, payload = payload)
wakuNode.protocolState(Waku).queue.items.len == 3
await sleepAsync(waitInterval)
check:
wakuTopicNode.protocolState(Waku).queue.items.len == 2
asyncTest "Waku set-minimum-pow":
var
wakuPowNode = setupTestNode(rng, Waku)
wakuNode = setupTestNode(rng, Waku)
wakuNode.startListening()
await wakuPowNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
# Update minimum pow
await setPowRequirement(wakuPowNode, 1.0)
await sleepAsync(waitInterval)
check:
wakuNode.peerPool.len == 1
# check powRequirement is updated
for peer in wakuNode.peerPool.peers:
check:
peer.state(Waku).powRequirement == 1.0
asyncTest "Waku set-light-node":
var
wakuLightNode = setupTestNode(rng, Waku)
wakuNode = setupTestNode(rng, Waku)
wakuNode.startListening()
await wakuLightNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
# Update minimum pow
await setLightNode(wakuLightNode, true)
await sleepAsync(waitInterval)
check:
wakuNode.peerPool.len == 1
# check lightNode is updated
for peer in wakuNode.peerPool.peers:
check:
peer.state(Waku).isLightNode
asyncTest "Waku set-bloom-filter":
var
wakuBloomNode = setupTestNode(rng, Waku)
wakuNode = setupTestNode(rng, Waku)
bloom = fullBloom()
topics = @[[byte 0xDA, 0xDA, 0xDA, 0xAA]]
# Set topic interest
discard await wakuBloomNode.setTopicInterest(topics)
wakuBloomNode.startListening()
await wakuNode.peerPool.connectToNode(newNode(wakuBloomNode.toENode()))
# Sanity check
check:
wakuNode.peerPool.len == 1
# check bloom filter is updated
for peer in wakuNode.peerPool.peers:
check:
peer.state(Waku).bloom == bloom
peer.state(Waku).topics == some(topics)
let hasBloomNodeConnectedCondition = proc(): bool =
wakuBloomNode.peerPool.len == 1
# wait for the peer to be connected on the other side
let hasBloomNodeConnected =
await eventually(conditionTimeoutMs, hasBloomNodeConnectedCondition)
# check bloom filter is updated
check:
hasBloomNodeConnected
# disable one bit in the bloom filter
bloom[0] = 0x0
# and set it
await setBloomFilter(wakuBloomNode, bloom)
let bloomFilterUpdatedCondition = proc(): bool =
for peer in wakuNode.peerPool.peers:
return peer.state(Waku).bloom == bloom and
peer.state(Waku).topics == none(seq[waku_protocol.Topic])
let bloomFilterUpdated =
await eventually(conditionTimeoutMs, bloomFilterUpdatedCondition)
# check bloom filter is updated
check:
bloomFilterUpdated
asyncTest "Waku topic-interest":
var
wakuTopicNode = setupTestNode(rng, Waku)
wakuNode = setupTestNode(rng, Waku)
let
topic1 = [byte 0xDA, 0xDA, 0xDA, 0xAA]
topic2 = [byte 0xD0, 0xD0, 0xD0, 0x00]
wrongTopic = [byte 0x4B, 0x1D, 0x4B, 0x1D]
wakuTopicNode.protocolState(Waku).config.topics = some(@[topic1, topic2])
wakuNode.startListening()
await wakuTopicNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
let payload = repeat(byte 0, 10)
check:
wakuNode.postMessage(ttl = safeTTL, topic = topic1, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = topic2, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = wrongTopic, payload = payload)
wakuNode.protocolState(Waku).queue.items.len == 3
await eventually(conditionTimeoutMs,
proc (): bool = wakuTopicNode.protocolState(Waku).queue.items.len == 2)
asyncTest "Waku topic-interest versus bloom filter":
var
wakuTopicNode = setupTestNode(rng, Waku)
wakuNode = setupTestNode(rng, Waku)
let
topic1 = [byte 0xDA, 0xDA, 0xDA, 0xAA]
topic2 = [byte 0xD0, 0xD0, 0xD0, 0x00]
bloomTopic = [byte 0x4B, 0x1D, 0x4B, 0x1D]
# It was checked that the topics don't trigger false positives on the bloom.
wakuTopicNode.protocolState(Waku).config.topics = some(@[topic1, topic2])
wakuTopicNode.protocolState(Waku).config.bloom = some(toBloom([bloomTopic]))
wakuNode.startListening()
await wakuTopicNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
let payload = repeat(byte 0, 10)
check:
wakuNode.postMessage(ttl = safeTTL, topic = topic1, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = topic2, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = bloomTopic, payload = payload)
wakuNode.protocolState(Waku).queue.items.len == 3
await sleepAsync(waitInterval)
check:
wakuTopicNode.protocolState(Waku).queue.items.len == 2

View File

@ -1,119 +0,0 @@
{.used.}
import
std/[tables, sequtils, times],
chronos, testutils/unittests, eth/[p2p, async_utils], eth/p2p/peer_pool,
../../waku/v1/protocol/[waku_protocol, waku_mail],
../test_helpers
const
transmissionTimeout = chronos.milliseconds(100)
proc waitForConnected(node: EthereumNode) {.async.} =
while node.peerPool.connectedNodes.len == 0:
await sleepAsync(chronos.milliseconds(1))
procSuite "Waku Mail Client":
let rng = newRng()
var client = setupTestNode(rng, Waku)
var simpleServer = setupTestNode(rng, Waku)
simpleServer.startListening()
let simpleServerNode = newNode(simpleServer.toENode())
let clientNode = newNode(client.toENode())
waitFor client.peerPool.connectToNode(simpleServerNode)
require:
waitFor simpleServer.waitForConnected().withTimeout(transmissionTimeout)
asyncTest "Two peers connected":
check:
client.peerPool.connectedNodes.len() == 1
simpleServer.peerPool.connectedNodes.len() == 1
asyncTest "Mail Request and Request Complete":
let
topic = [byte 0, 0, 0, 0]
bloom = toBloom(@[topic])
lower = 0'u32
upper = epochTime().uint32
limit = 100'u32
request = MailRequest(lower: lower, upper: upper, bloom: @bloom,
limit: limit)
var symKey: SymKey
check client.setPeerTrusted(simpleServerNode.id)
var cursorFut = client.requestMail(simpleServerNode.id, request, symKey, 1)
# Simple mailserver part
let peer = simpleServer.peerPool.connectedNodes[clientNode]
var f: Future[Waku.p2pRequest] = peer.nextMsg(Waku.p2pRequest)
require await f.withTimeout(transmissionTimeout)
let response = f.read()
let decoded = decode(response.envelope.data, symKey = some(symKey))
require decoded.isSome()
var rlp = rlpFromBytes(decoded.get().payload)
let output = rlp.read(MailRequest)
check:
output.lower == lower
output.upper == upper
output.bloom == bloom
output.limit == limit
var dummy: Hash
await peer.p2pRequestComplete(dummy, dummy, @[])
check await cursorFut.withTimeout(transmissionTimeout)
asyncTest "Mail Send":
let topic = [byte 0x12, 0x34, 0x56, 0x78]
let payload = repeat(byte 0, 10)
var f = newFuture[int]()
proc handler(msg: ReceivedMessage) =
check msg.decoded.payload == payload
f.complete(1)
let filter = subscribeFilter(client,
initFilter(topics = @[topic], allowP2P = true), handler)
check:
client.setPeerTrusted(simpleServerNode.id)
# ttl 0 to show that ttl should be ignored
# TODO: perhaps not the best way to test this, means no PoW calculation
# may be done, and not sure if that is OK?
simpleServer.postMessage(ttl = 0, topic = topic, payload = payload,
targetPeer = some(clientNode.id))
await f.withTimeout(transmissionTimeout)
client.unsubscribeFilter(filter)
asyncTest "Multiple Client Request and Complete":
var count = 5
proc customHandler(peer: Peer, envelope: Envelope)=
var envelopes: seq[Envelope]
traceAsyncErrors peer.p2pMessage(envelopes)
var cursor: seq[byte]
count = count - 1
if count == 0:
cursor = @[]
else:
cursor = @[byte count]
var dummy: Hash
traceAsyncErrors peer.p2pRequestComplete(dummy, dummy, cursor)
simpleServer.registerP2PRequestHandler(customHandler)
check client.setPeerTrusted(simpleServerNode.id)
var request: MailRequest
var symKey: SymKey
let cursor =
await client.requestMail(simpleServerNode.id, request, symKey, 5)
require cursor.isSome()
check:
cursor.get().len == 0
count == 0
# TODO: Also check for received envelopes.

View File

@ -3,115 +3,13 @@
import
testutils/unittests
import
stew/results,
../../waku/v2/waku_core/message,
../../waku/v2/waku_core/time,
../../waku/v2/utils/compat,
./testlib/common
suite "Waku Payload":
test "Encode/Decode without encryption (version 0)":
## This would be the usual way when no encryption is done or when it is done
## on the application layer.
# Encoding
let
version = 0'u32
payload = @[byte 0, 1, 2]
msg = WakuMessage(payload: payload, version: version)
pb = msg.encode()
# Decoding
let msgDecoded = WakuMessage.decode(pb.buffer)
check msgDecoded.isOk()
let
keyInfo = KeyInfo(kind:None)
decoded = decodePayload(msgDecoded.get(), keyInfo)
check:
decoded.isOk()
decoded.get().payload == payload
test "Encode/Decode without encryption (version 0) with encodePayload":
## This is a bit silly and only there for completeness
# Encoding
let
version = 0'u32
payload = Payload(payload: @[byte 0, 1, 2])
encodedPayload = payload.encode(version, rng[])
check encodedPayload.isOk()
let
msg = WakuMessage(payload: encodedPayload.get(), version: version)
pb = msg.encode()
# Decoding
let msgDecoded = WakuMessage.decode(pb.buffer)
check msgDecoded.isOk()
let
keyInfo = KeyInfo(kind:None)
decoded = decodePayload(msgDecoded.get(), keyInfo)
check:
decoded.isOk()
decoded.get().payload == payload.payload
test "Encode/Decode with encryption (version 1)":
# Encoding
let
privKey = PrivateKey.random(rng[])
version = 1'u32
payload = Payload(payload: @[byte 0, 1, 2],
dst: some(privKey.toPublicKey()))
encodedPayload = payload.encode(version, rng[])
check encodedPayload.isOk()
let
msg = WakuMessage(payload: encodedPayload.get(), version: version)
pb = msg.encode()
# Decoding
let msgDecoded = WakuMessage.decode(pb.buffer)
check msgDecoded.isOk()
let
keyInfo = KeyInfo(kind: Asymmetric, privKey: privKey)
decoded = decodePayload(msgDecoded.get(), keyInfo)
check:
decoded.isOk()
decoded.get().payload == payload.payload
test "Encode with unsupported version":
let
version = 2'u32
payload = Payload(payload: @[byte 0, 1, 2])
encodedPayload = payload.encode(version, rng[])
check encodedPayload.isErr()
test "Decode with unsupported version":
# Encoding
let
version = 2'u32
payload = @[byte 0, 1, 2]
msg = WakuMessage(payload: payload, version: version)
pb = msg.encode()
# Decoding
let msgDecoded = WakuMessage.decode(pb.buffer)
check msgDecoded.isOk()
let
keyInfo = KeyInfo(kind:None)
decoded = decodePayload(msgDecoded.get(), keyInfo)
check:
decoded.isErr()
test "Encode/Decode waku message with timestamp":
## Test encoding and decoding of the timestamp field of a WakuMessage

View File

@ -17,7 +17,6 @@ import
../../../waku/v2/node/jsonrpc/relay/client as relay_api_client,
../../../waku/v2/waku_core,
../../../waku/v2/waku_relay,
../../../waku/v2/utils/compat,
../testlib/common,
../testlib/wakucore,
../testlib/wakunode
@ -211,186 +210,3 @@ suite "Waku v2 JSON-RPC API - Relay":
await server.stop()
await server.closeWait()
await allFutures(srcNode.stop(), dstNode.stop())
suite "Waku v2 JSON-RPC API - Relay (Private)":
asyncTest "generate symmetric keys and encrypt/decrypt communication":
let
pubSubTopic = "test-relay-pubsub-topic"
contentTopic = "test-relay-content-topic"
let
srcNode = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(0))
relNode = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(0))
dstNode = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(0))
await allFutures(srcNode.start(), relNode.start(), dstNode.start())
await srcNode.mountRelay(@[pubSubTopic])
await relNode.mountRelay(@[pubSubTopic])
await dstNode.mountRelay(@[pubSubTopic])
await srcNode.connectToNodes(@[relNode.peerInfo.toRemotePeerInfo()])
await relNode.connectToNodes(@[dstNode.peerInfo.toRemotePeerInfo()])
# Setup two servers so we can see both sides of encrypted communication
let
srcRpcPort = Port(8554)
srcTa = initTAddress(ValidIpAddress.init("127.0.0.1"), srcRpcPort)
srcServer = newRpcHttpServer([srcTa])
let srcMessageCache = newTestMessageCache()
installRelayApiHandlers(srcNode, srcServer, srcMessageCache)
installRelayPrivateApiHandlers(srcNode, srcServer, srcMessageCache)
srcServer.start()
let
dstRpcPort = Port(8555)
dstTa = initTAddress(ValidIpAddress.init("127.0.0.1"), dstRpcPort)
dstServer = newRpcHttpServer([dstTa])
let dstMessageCache = newTestMessageCache()
installRelayApiHandlers(dstNode, dstServer, dstMessageCache)
installRelayPrivateApiHandlers(dstNode, dstServer, dstMessageCache)
dstServer.start()
let srcClient = newRpcHttpClient()
await srcClient.connect("127.0.0.1", srcRpcPort, false)
let dstClient = newRpcHttpClient()
await dstClient.connect("127.0.0.1", dstRpcPort, false)
## Given
let
payload = @[byte 38]
payloadBase64 = base64.encode(payload)
let message = WakuMessageRPC(
payload: payloadBase64,
contentTopic: some(contentTopic),
timestamp: some(now()),
)
## When
let symkey = await dstClient.get_waku_v2_private_v1_symmetric_key()
let posted = await srcCLient.post_waku_v2_private_v1_symmetric_message(pubSubTopic, message, symkey = (%symkey).getStr())
require:
posted
await sleepAsync(100.millis)
# Let's see if we can receive, and decrypt, this message on dstNode
var messages = await dstClient.get_waku_v2_private_v1_symmetric_messages(pubSubTopic, symkey = (%symkey).getStr())
check:
messages.len == 1
messages[0].payload == payloadBase64
messages[0].contentTopic == message.contentTopic
messages[0].timestamp == message.timestamp
messages[0].version.get() == 1'u32
# Ensure that read messages are cleared from cache
messages = await dstClient.get_waku_v2_private_v1_symmetric_messages(pubSubTopic, symkey = (%symkey).getStr())
check:
messages.len == 0
## Cleanup
await srcServer.stop()
await srcServer.closeWait()
await dstServer.stop()
await dstServer.closeWait()
await allFutures(srcNode.stop(), relNode.stop(), dstNode.stop())
asyncTest "generate asymmetric keys and encrypt/decrypt communication":
let
pubSubTopic = "test-relay-pubsub-topic"
contentTopic = "test-relay-content-topic"
let
srcNode = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(0))
relNode = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(0))
dstNode = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("127.0.0.1"), Port(0))
await allFutures(srcNode.start(), relNode.start(), dstNode.start())
await srcNode.mountRelay(@[pubSubTopic])
await relNode.mountRelay(@[pubSubTopic])
await dstNode.mountRelay(@[pubSubTopic])
await srcNode.connectToNodes(@[relNode.peerInfo.toRemotePeerInfo()])
await relNode.connectToNodes(@[dstNode.peerInfo.toRemotePeerInfo()])
# Setup two servers so we can see both sides of encrypted communication
let
srcRpcPort = Port(8554)
srcTa = initTAddress(ValidIpAddress.init("127.0.0.1"), srcRpcPort)
srcServer = newRpcHttpServer([srcTa])
let srcMessageCache = newTestMessageCache()
installRelayApiHandlers(srcNode, srcServer, srcMessageCache)
installRelayPrivateApiHandlers(srcNode, srcServer, srcMessageCache)
srcServer.start()
let
dstRpcPort = Port(8555)
dstTa = initTAddress(ValidIpAddress.init("127.0.0.1"), dstRpcPort)
dstServer = newRpcHttpServer([dstTa])
let dstMessageCache = newTestMessageCache()
installRelayApiHandlers(dstNode, dstServer, dstMessageCache)
installRelayPrivateApiHandlers(dstNode, dstServer, dstMessageCache)
dstServer.start()
let srcClient = newRpcHttpClient()
await srcClient.connect("127.0.0.1", srcRpcPort, false)
let dstClient = newRpcHttpClient()
await dstClient.connect("127.0.0.1", dstRpcPort, false)
## Given
let
payload = @[byte 38]
payloadBase64 = base64.encode(payload)
let message = WakuMessageRPC(
payload: payloadBase64,
contentTopic: some(contentTopic),
timestamp: some(now()),
)
## When
let keypair = await dstClient.get_waku_v2_private_v1_asymmetric_keypair()
# Now publish and encrypt a message on srcNode using dstNode's public key
let posted = await srcClient.post_waku_v2_private_v1_asymmetric_message(pubSubTopic, message, publicKey = (%keypair.pubkey).getStr())
require:
posted
await sleepAsync(100.millis)
# Let's see if we can receive, and decrypt, this message on dstNode
var messages = await dstClient.get_waku_v2_private_v1_asymmetric_messages(pubSubTopic, privateKey = (%keypair.seckey).getStr())
check:
messages.len == 1
messages[0].payload == payloadBase64
messages[0].contentTopic == message.contentTopic
messages[0].timestamp == message.timestamp
messages[0].version.get() == 1'u32
# Ensure that read messages are cleared from cache
messages = await dstClient.get_waku_v2_private_v1_asymmetric_messages(pubSubTopic, privateKey = (%keypair.seckey).getStr())
check:
messages.len == 0
## Cleanup
await srcServer.stop()
await srcServer.closeWait()
await dstServer.stop()
await dstServer.closeWait()
await allFutures(srcNode.stop(), relNode.stop(), dstNode.stop())

View File

@ -1,58 +0,0 @@
{.used.}
import
stew/shims/net as stewNet,
testutils/unittests,
chronos,
libp2p/switch
import
../../waku/v2/waku_core
import
../../apps/wakubridge/message_compat
suite "WakuBridge - Message compat":
test "Topics are correctly converted between Waku v1 and Waku v2":
# Expected cases
check:
toV1Topic(ContentTopic("/waku/1/0x00000000/rfc26")) == [byte 0x00, byte 0x00, byte 0x00, byte 0x00]
toV2ContentTopic([byte 0x00, byte 0x00, byte 0x00, byte 0x00]) == ContentTopic("/waku/1/0x00000000/rfc26")
toV1Topic(ContentTopic("/waku/1/0xffffffff/rfc26")) == [byte 0xff, byte 0xff, byte 0xff, byte 0xff]
toV2ContentTopic([byte 0xff, byte 0xff, byte 0xff, byte 0xff]) == ContentTopic("/waku/1/0xffffffff/rfc26")
toV1Topic(ContentTopic("/waku/1/0x1a2b3c4d/rfc26")) == [byte 0x1a, byte 0x2b, byte 0x3c, byte 0x4d]
toV2ContentTopic([byte 0x1a, byte 0x2b, byte 0x3c, byte 0x4d]) == ContentTopic("/waku/1/0x1a2b3c4d/rfc26")
# Topic conversion should still work where '0x' prefix is omitted from <v1 topic byte array>
toV1Topic(ContentTopic("/waku/1/1a2b3c4d/rfc26")) == [byte 0x1a, byte 0x2b, byte 0x3c, byte 0x4d]
# Invalid cases
test "Invalid topics conversion between Waku v1 and Waku v2 fails":
expect ValueError:
# Content topic not namespaced
discard toV1Topic(ContentTopic("this-is-my-content"))
expect ValueError:
# Content topic name too short
discard toV1Topic(ContentTopic("/waku/1/0x112233/rfc26"))
expect ValueError:
# Content topic name not hex
discard toV1Topic(ContentTopic("/waku/1/my-content/rfc26"))
test "Verify that WakuMessages are on bridgeable content topics":
let
validCT = ContentTopic("/waku/1/my-content/rfc26")
unnamespacedCT = ContentTopic("just_a_bunch_of_words")
invalidAppCT = ContentTopic("/facebook/1/my-content/rfc26")
invalidVersionCT = ContentTopic("/waku/2/my-content/rfc26")
check:
WakuMessage(contentTopic: validCT).isBridgeable() == true
WakuMessage(contentTopic: unnamespacedCT).isBridgeable() == false
WakuMessage(contentTopic: invalidAppCT).isBridgeable() == false
WakuMessage(contentTopic: invalidVersionCT).isBridgeable() == false

View File

@ -1,217 +0,0 @@
{.used.}
import
std/[sequtils, strutils, tables],
stew/[results, byteutils],
stew/shims/net as stewNet,
testutils/unittests,
chronos,
libp2p/crypto/crypto,
libp2p/crypto/secp,
libp2p/peerid,
libp2p/multiaddress,
libp2p/switch,
libp2p/protocols/pubsub/rpc/messages,
libp2p/protocols/pubsub/pubsub,
eth/p2p,
eth/keys
import
../../waku/v1/protocol/waku_protocol,
../../waku/v2/waku_core,
../../waku/v2/waku_node,
../../waku/v2/waku_enr,
../../waku/v2/utils/compat,
../test_helpers
import
../../apps/wakubridge/wakubridge
procSuite "WakuBridge":
###############
# Suite setup #
###############
const DefaultBridgeTopic = "/waku/2/default-bridge/proto"
let
rng = keys.newRng()
cryptoRng = crypto.newRng()
# Bridge
nodev1Key = keys.KeyPair.random(rng[])
nodev2Key = crypto.PrivateKey.random(Secp256k1, cryptoRng[])[]
bridge = WakuBridge.new(
nodev1Key= nodev1Key,
nodev1Address = localAddress(62200),
powRequirement = 0.002,
rng = rng,
nodev2Key = nodev2Key,
nodev2BindIp = ValidIpAddress.init("0.0.0.0"), nodev2BindPort= Port(62201),
nodev2PubsubTopic = DefaultBridgeTopic)
# Waku v1 node
v1Node = setupTestNode(rng, Waku)
# Waku v2 node
v2NodeKey = crypto.PrivateKey.random(Secp256k1, cryptoRng[])[]
var builder = EnrBuilder.init(v2NodeKey)
builder.withIpAddressAndPorts(none(ValidIpAddress), none(Port), none(Port))
let record = builder.build().tryGet()
let
v2Node = block:
var builder = WakuNodeBuilder.init()
builder.withNodeKey(v2NodeKey)
builder.withRecord(record)
builder.withNetworkConfigurationDetails(ValidIpAddress.init("0.0.0.0"), Port(62203)).tryGet()
builder.build().tryGet()
contentTopic = ContentTopic("/waku/1/0x1a2b3c4d/rfc26")
topic = [byte 0x1a, byte 0x2b, byte 0x3c, byte 0x4d]
payloadV1 = "hello from V1".toBytes()
payloadV2 = "hello from V2".toBytes()
encodedPayloadV2 = Payload(payload: payloadV2, dst: some(nodev1Key.pubKey))
message = WakuMessage(payload: encodedPayloadV2.encode(1, rng[]).get(), contentTopic: contentTopic, version: 1)
########################
# Tests setup/teardown #
########################
# setup:
# # Runs before each test
# teardown:
# # Runs after each test
###############
# Suite tests #
###############
asyncTest "Messages are bridged between Waku v1 and Waku v2":
# Setup test
waitFor bridge.start()
waitFor v2Node.start()
await v2Node.mountRelay(@[DefaultBridgeTopic])
v2Node.wakuRelay.triggerSelf = false
discard waitFor v1Node.rlpxConnect(newNode(bridge.nodev1.toENode()))
waitFor waku_node.connectToNodes(v2Node, @[bridge.nodev2.switch.peerInfo.toRemotePeerInfo()])
var completionFut = newFuture[bool]()
proc relayHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} =
if msg.version == 1:
check:
# Message fields are as expected
msg.contentTopic == contentTopic # Topic translation worked
string.fromBytes(msg.payload).contains("from V1")
completionFut.complete(true)
v2Node.subscribe(DefaultBridgeTopic, relayHandler)
# Test bridging from V2 to V1
await v2Node.publish(DefaultBridgeTopic, message)
await sleepAsync(1.seconds)
check:
# v1Node received message published by v2Node
v1Node.protocolState(Waku).queue.items.len == 1
let
msg = v1Node.protocolState(Waku).queue.items[0]
decodedPayload = msg.env.data.decode(some(nodev1Key.seckey), none[SymKey]()).get()
check:
# Message fields are as expected
msg.env.topic == topic # Topic translation worked
string.fromBytes(decodedPayload.payload).contains("from V2")
# Test bridging from V1 to V2
check:
v1Node.postMessage(ttl = 5,
topic = topic,
payload = payloadV1) == true
# v2Node received payload published by v1Node
await completionFut.withTimeout(5.seconds)
# Test filtering of WakuMessage duplicates
v1Node.resetMessageQueue()
await v2Node.publish(DefaultBridgeTopic, message)
await sleepAsync(1.seconds)
check:
# v1Node did not receive duplicate of previous message
v1Node.protocolState(Waku).queue.items.len == 0
# Teardown test
bridge.nodeV1.resetMessageQueue()
v1Node.resetMessageQueue()
waitFor allFutures([bridge.stop(), v2Node.stop()])
asyncTest "Bridge manages its v1 connections":
# Given
let
# Waku v1 node
v1NodePool = @[setupTestNode(rng, Waku),
setupTestNode(rng, Waku),
setupTestNode(rng, Waku)]
targetV1Peers = v1NodePool.len() - 1
# Bridge
v1Bridge = WakuBridge.new(
nodev1Key= nodev1Key,
nodev1Address = localAddress(62210),
powRequirement = 0.002,
rng = rng,
nodev2Key = nodev2Key,
nodev2BindIp = ValidIpAddress.init("0.0.0.0"), nodev2BindPort= Port(62211),
nodev2PubsubTopic = DefaultBridgeTopic,
v1Pool = v1NodePool.mapIt(newNode(it.toEnode())),
targetV1Peers = targetV1Peers)
for node in v1NodePool:
node.startListening()
# When
waitFor v1Bridge.start()
await sleepAsync(250.millis) # Give peers some time to connect
# Then
check:
v1Bridge.nodev1.peerPool.connectedNodes.len() == targetV1Peers
# When
let connected = v1Bridge.nodev1.peerPool.connectedNodes
for peer in connected.values():
waitFor peer.disconnect(SubprotocolReason)
# Then
check:
v1Bridge.nodev1.peerPool.connectedNodes.len() == 0
# When
discard v1Bridge.maintenanceLoop() # Forces one more run of the maintenance loop
await sleepAsync(250.millis) # Give peers some time to connect
# Then
check:
v1Bridge.nodev1.peerPool.connectedNodes.len() == targetV1Peers
# Cleanup
v1Bridge.nodev1.resetMessageQueue()
for node in v1NodePool:
node.resetMessageQueue()
waitFor v1Bridge.stop()

View File

@ -1,382 +0,0 @@
#
# Ethereum P2P
# (c) Copyright 2018-2021
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.used.}
import
std/[sequtils, options, unittest, tables],
nimcrypto/hash,
eth/[keys, rlp],
../../waku/whisper/whisper_types as whisper
let rng = newRng()
suite "Whisper payload":
test "should roundtrip without keys":
let payload = Payload(payload: @[byte 0, 1, 2])
let encoded = whisper.encode(rng[], payload)
let decoded = whisper.decode(encoded.get())
check:
decoded.isSome()
payload.payload == decoded.get().payload
decoded.get().src.isNone()
decoded.get().padding.get().len == 251 # 256 -1 -1 -3
test "should roundtrip with symmetric encryption":
var symKey: SymKey
let payload = Payload(symKey: some(symKey), payload: @[byte 0, 1, 2])
let encoded = whisper.encode(rng[], payload)
let decoded = whisper.decode(encoded.get(), symKey = some(symKey))
check:
decoded.isSome()
payload.payload == decoded.get().payload
decoded.get().src.isNone()
decoded.get().padding.get().len == 251 # 256 -1 -1 -3
test "should roundtrip with signature":
let privKey = PrivateKey.random(rng[])
let payload = Payload(src: some(privKey), payload: @[byte 0, 1, 2])
let encoded = whisper.encode(rng[], payload)
let decoded = whisper.decode(encoded.get())
check:
decoded.isSome()
payload.payload == decoded.get().payload
privKey.toPublicKey() == decoded.get().src.get()
decoded.get().padding.get().len == 186 # 256 -1 -1 -3 -65
test "should roundtrip with asymmetric encryption":
let privKey = PrivateKey.random(rng[])
let payload = Payload(dst: some(privKey.toPublicKey()),
payload: @[byte 0, 1, 2])
let encoded = whisper.encode(rng[], payload)
let decoded = whisper.decode(encoded.get(), dst = some(privKey))
check:
decoded.isSome()
payload.payload == decoded.get().payload
decoded.get().src.isNone()
decoded.get().padding.get().len == 251 # 256 -1 -1 -3
test "should return specified bloom":
# Geth test: https://github.com/ethersphere/go-ethereum/blob/d3441ebb563439bac0837d70591f92e2c6080303/whisper/whisperv6/whisper_test.go#L834
let top0 = [byte 0, 0, 255, 6]
var x: Bloom
x[0] = byte 1
x[32] = byte 1
x[^1] = byte 128
check @(top0.topicBloom) == @x
suite "Whisper payload padding":
test "should do max padding":
let payload = Payload(payload: repeat(byte 1, 254))
let encoded = whisper.encode(rng[], payload)
let decoded = whisper.decode(encoded.get())
check:
decoded.isSome()
payload.payload == decoded.get().payload
decoded.get().padding.isSome()
decoded.get().padding.get().len == 256 # as dataLen == 256
test "should do max padding with signature":
let privKey = PrivateKey.random(rng[])
let payload = Payload(src: some(privKey), payload: repeat(byte 1, 189))
let encoded = whisper.encode(rng[], payload)
let decoded = whisper.decode(encoded.get())
check:
decoded.isSome()
payload.payload == decoded.get().payload
privKey.toPublicKey() == decoded.get().src.get()
decoded.get().padding.isSome()
decoded.get().padding.get().len == 256 # as dataLen == 256
test "should do min padding":
let payload = Payload(payload: repeat(byte 1, 253))
let encoded = whisper.encode(rng[], payload)
let decoded = whisper.decode(encoded.get())
check:
decoded.isSome()
payload.payload == decoded.get().payload
decoded.get().padding.isSome()
decoded.get().padding.get().len == 1 # as dataLen == 255
test "should do min padding with signature":
let privKey = PrivateKey.random(rng[])
let payload = Payload(src: some(privKey), payload: repeat(byte 1, 188))
let encoded = whisper.encode(rng[], payload)
let decoded = whisper.decode(encoded.get())
check:
decoded.isSome()
payload.payload == decoded.get().payload
privKey.toPublicKey() == decoded.get().src.get()
decoded.get().padding.isSome()
decoded.get().padding.get().len == 1 # as dataLen == 255
test "should roundtrip custom padding":
let payload = Payload(payload: repeat(byte 1, 10),
padding: some(repeat(byte 2, 100)))
let encoded = whisper.encode(rng[], payload)
let decoded = whisper.decode(encoded.get())
check:
decoded.isSome()
payload.payload == decoded.get().payload
decoded.get().padding.isSome()
payload.padding.get() == decoded.get().padding.get()
test "should roundtrip custom 0 padding":
let padding: seq[byte] = @[]
let payload = Payload(payload: repeat(byte 1, 10),
padding: some(padding))
let encoded = whisper.encode(rng[], payload)
let decoded = whisper.decode(encoded.get())
check:
decoded.isSome()
payload.payload == decoded.get().payload
decoded.get().padding.isNone()
test "should roundtrip custom padding with signature":
let privKey = PrivateKey.random(rng[])
let payload = Payload(src: some(privKey), payload: repeat(byte 1, 10),
padding: some(repeat(byte 2, 100)))
let encoded = whisper.encode(rng[], payload)
let decoded = whisper.decode(encoded.get())
check:
decoded.isSome()
payload.payload == decoded.get().payload
privKey.toPublicKey() == decoded.get().src.get()
decoded.get().padding.isSome()
payload.padding.get() == decoded.get().padding.get()
test "should roundtrip custom 0 padding with signature":
let padding: seq[byte] = @[]
let privKey = PrivateKey.random(rng[])
let payload = Payload(src: some(privKey), payload: repeat(byte 1, 10),
padding: some(padding))
let encoded = whisper.encode(rng[], payload)
let decoded = whisper.decode(encoded.get())
check:
decoded.isSome()
payload.payload == decoded.get().payload
privKey.toPublicKey() == decoded.get().src.get()
decoded.get().padding.isNone()
# example from https://github.com/paritytech/parity-ethereum/blob/93e1040d07e385d1219d00af71c46c720b0a1acf/whisper/src/message.rs#L439
let
env0 = Envelope(
expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0],
data: repeat(byte 9, 256), nonce: 1010101)
env1 = Envelope(
expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0],
data: repeat(byte 9, 256), nonce: 1010102)
env2 = Envelope(
expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0],
data: repeat(byte 9, 256), nonce: 1010103)
suite "Whisper envelope":
proc hashAndPow(env: Envelope): (string, float64) =
# This is the current implementation of go-ethereum
let size = env.toShortRlp().len().uint32
# This is our current implementation in `whisper_protocol.nim`
# let size = env.len().uint32
# This is the EIP-627 specification
# let size = env.toRlp().len().uint32
let hash = env.calcPowHash()
($hash, calcPow(size, env.ttl, hash))
test "PoW calculation leading zeroes tests":
# Test values from Parity, in message.rs
let testHashes = [
# 256 leading zeroes
"0x0000000000000000000000000000000000000000000000000000000000000000",
# 255 leading zeroes
"0x0000000000000000000000000000000000000000000000000000000000000001",
# no leading zeroes
"0xff00000000000000000000000000000000000000000000000000000000000000"
]
check:
calcPow(1, 1, Hash.fromHex(testHashes[0])) ==
115792089237316200000000000000000000000000000000000000000000000000000000000000.0
calcPow(1, 1, Hash.fromHex(testHashes[1])) ==
57896044618658100000000000000000000000000000000000000000000000000000000000000.0
calcPow(1, 1, Hash.fromHex(testHashes[2])) == 1.0
# Test values from go-ethereum whisperv6 in envelope_test
var env = Envelope(ttl: 1, data: @[byte 0xde, 0xad, 0xbe, 0xef])
# PoW calculation with no leading zeroes
env.nonce = 100000
check hashAndPoW(env) == ("A788E02A95BFC673709E97CA81E39CA903BAD5638D3388964C51EB64952172D6",
0.07692307692307693)
# PoW calculation with 8 leading zeroes
env.nonce = 276
check hashAndPoW(env) == ("00E2374C6353C243E4073E209A7F2ACB2506522AF318B3B78CF9A88310A2A11C",
19.692307692307693)
suite "Whisper queue":
test "should throw out lower proof-of-work item when full":
var queue = initQueue(1)
let msg0 = initMessage(env0)
let msg1 = initMessage(env1)
discard queue.add(msg0)
discard queue.add(msg1)
check:
queue.items.len() == 1
queue.items[0].env.nonce ==
(if msg0.pow > msg1.pow: msg0.env.nonce else: msg1.env.nonce)
test "should not throw out messages as long as there is capacity":
var queue = initQueue(2)
check:
queue.add(initMessage(env0)) == true
queue.add(initMessage(env1)) == true
queue.items.len() == 2
test "check if order of queue is by decreasing PoW":
var queue = initQueue(3)
let msg0 = initMessage(env0)
let msg1 = initMessage(env1)
let msg2 = initMessage(env2)
discard queue.add(msg0)
discard queue.add(msg1)
discard queue.add(msg2)
check:
queue.items.len() == 3
queue.items[0].pow > queue.items[1].pow and
queue.items[1].pow > queue.items[2].pow
test "check field order against expected rlp order":
check rlp.encode(env0) ==
rlp.encodeList(env0.expiry, env0.ttl, env0.topic, env0.data, env0.nonce)
# To test filters we do not care if the msg is valid or allowed
proc prepFilterTestMsg(pubKey = none[PublicKey](), symKey = none[SymKey](),
src = none[PrivateKey](), topic: Topic,
padding = none[seq[byte]]()): Message =
let payload = Payload(dst: pubKey, symKey: symKey, src: src,
payload: @[byte 0, 1, 2], padding: padding)
let encoded = whisper.encode(rng[], payload)
let env = Envelope(expiry: 1, ttl: 1, topic: topic, data: encoded.get(),
nonce: 0)
result = initMessage(env)
suite "Whisper filter":
test "should notify filter on message with symmetric encryption":
var symKey: SymKey
let topic = [byte 0, 0, 0, 0]
let msg = prepFilterTestMsg(symKey = some(symKey), topic = topic)
var filters = initTable[string, Filter]()
let filter = initFilter(symKey = some(symKey), topics = @[topic])
let filterId = subscribeFilter(rng[], filters, filter)
notify(filters, msg)
let messages = filters.getFilterMessages(filterId)
check:
messages.len == 1
messages[0].decoded.src.isNone()
messages[0].dst.isNone()
test "should notify filter on message with asymmetric encryption":
let privKey = PrivateKey.random(rng[])
let topic = [byte 0, 0, 0, 0]
let msg = prepFilterTestMsg(pubKey = some(privKey.toPublicKey()),
topic = topic)
var filters = initTable[string, Filter]()
let filter = initFilter(privateKey = some(privKey), topics = @[topic])
let filterId = subscribeFilter(rng[], filters, filter)
notify(filters, msg)
let messages = filters.getFilterMessages(filterId)
check:
messages.len == 1
messages[0].decoded.src.isNone()
messages[0].dst.isSome()
test "should notify filter on message with signature":
let privKey = PrivateKey.random(rng[])
let topic = [byte 0, 0, 0, 0]
let msg = prepFilterTestMsg(src = some(privKey), topic = topic)
var filters = initTable[string, Filter]()
let filter = initFilter(src = some(privKey.toPublicKey()),
topics = @[topic])
let filterId = subscribeFilter(rng[], filters, filter)
notify(filters, msg)
let messages = filters.getFilterMessages(filterId)
check:
messages.len == 1
messages[0].decoded.src.isSome()
messages[0].dst.isNone()
test "test notify of filter against PoW requirement":
let topic = [byte 0, 0, 0, 0]
let padding = some(repeat(byte 0, 251))
# this message has a PoW of 0.02962962962962963, number should be updated
# in case PoW algorithm changes or contents of padding, payload, topic, etc.
# update: now with NON rlp encoded envelope size the PoW of this message is
# 0.014492753623188406
let msg = prepFilterTestMsg(topic = topic, padding = padding)
var filters = initTable[string, Filter]()
let
filterId1 = subscribeFilter(rng[], filters,
initFilter(topics = @[topic], powReq = 0.014492753623188406))
filterId2 = subscribeFilter(rng[], filters,
initFilter(topics = @[topic], powReq = 0.014492753623188407))
notify(filters, msg)
check:
filters.getFilterMessages(filterId1).len == 1
filters.getFilterMessages(filterId2).len == 0
test "test notify of filter on message with certain topic":
let
topic1 = [byte 0xAB, 0x12, 0xCD, 0x34]
topic2 = [byte 0, 0, 0, 0]
let msg = prepFilterTestMsg(topic = topic1)
var filters = initTable[string, Filter]()
let
filterId1 = subscribeFilter(rng[], filters, initFilter(topics = @[topic1]))
filterId2 = subscribeFilter(rng[], filters, initFilter(topics = @[topic2]))
notify(filters, msg)
check:
filters.getFilterMessages(filterId1).len == 1
filters.getFilterMessages(filterId2).len == 0

View File

@ -1,71 +0,0 @@
#
# Ethereum P2P
# (c) Copyright 2018-2021
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.used.}
import
std/[sequtils, unittest, times],
../../waku/whisper/whisper_protocol as whisper
suite "Whisper envelope validation":
test "should validate and allow envelope according to config":
let ttl = 1'u32
let topic = [byte 1, 2, 3, 4]
let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(),
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
check env.valid()
let msg = initMessage(env)
check msg.allowed(config)
test "should invalidate envelope due to ttl 0":
let ttl = 0'u32
let topic = [byte 1, 2, 3, 4]
let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(),
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
check env.valid() == false
test "should invalidate envelope due to expired":
let ttl = 1'u32
let topic = [byte 1, 2, 3, 4]
let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(),
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
let env = Envelope(expiry:epochTime().uint32, ttl: ttl, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
check env.valid() == false
test "should invalidate envelope due to in the future":
let ttl = 1'u32
let topic = [byte 1, 2, 3, 4]
let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(),
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
# there is currently a 2 second tolerance, hence the + 3
let env = Envelope(expiry:epochTime().uint32 + ttl + 3, ttl: ttl, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
check env.valid() == false
test "should not allow envelope due to bloom filter":
let topic = [byte 1, 2, 3, 4]
let wrongTopic = [byte 9, 8, 7, 6]
let config = WhisperConfig(powRequirement: 0, bloom: wrongTopic.topicBloom(),
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
let env = Envelope(expiry:100000 , ttl: 30, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
let msg = initMessage(env)
check msg.allowed(config) == false

View File

@ -1,329 +0,0 @@
#
# Ethereum P2P
# (c) Copyright 2018-2021
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.used.}
import
std/[sequtils, options, tables],
chronos, testutils/unittests, bearssl,
eth/[keys, p2p], eth/p2p/peer_pool,
../../waku/whisper/whisper_protocol as whisper,
../test_helpers
proc resetMessageQueues(nodes: varargs[EthereumNode]) =
for node in nodes:
node.resetMessageQueue()
let safeTTL = 5'u32
let waitInterval = messageInterval + 150.milliseconds
procSuite "Whisper connections":
let rng = newRng()
var node1 = setupTestNode(rng, Whisper)
var node2 = setupTestNode(rng, Whisper)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
asyncTest "Two peers connected":
check:
node1.peerPool.connectedNodes.len() == 1
asyncTest "Filters with encryption and signing":
let encryptKeyPair = KeyPair.random(rng[])
let signKeyPair = KeyPair.random(rng[])
var symKey: SymKey
let topic = [byte 0x12, 0, 0, 0]
var filters: seq[string] = @[]
var payloads = [repeat(byte 1, 10), repeat(byte 2, 10),
repeat(byte 3, 10), repeat(byte 4, 10)]
var futures = [newFuture[int](), newFuture[int](),
newFuture[int](), newFuture[int]()]
proc handler1(msg: ReceivedMessage) =
var count {.global.}: int
check msg.decoded.payload == payloads[0] or msg.decoded.payload == payloads[1]
count += 1
if count == 2: futures[0].complete(1)
proc handler2(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[1]
futures[1].complete(1)
proc handler3(msg: ReceivedMessage) =
var count {.global.}: int
check msg.decoded.payload == payloads[2] or msg.decoded.payload == payloads[3]
count += 1
if count == 2: futures[2].complete(1)
proc handler4(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[3]
futures[3].complete(1)
# Filters
# filter for encrypted asym
filters.add(node1.subscribeFilter(initFilter(privateKey = some(encryptKeyPair.seckey),
topics = @[topic]), handler1))
# filter for encrypted asym + signed
filters.add(node1.subscribeFilter(initFilter(some(signKeyPair.pubkey),
privateKey = some(encryptKeyPair.seckey),
topics = @[topic]), handler2))
# filter for encrypted sym
filters.add(node1.subscribeFilter(initFilter(symKey = some(symKey),
topics = @[topic]), handler3))
# filter for encrypted sym + signed
filters.add(node1.subscribeFilter(initFilter(some(signKeyPair.pubkey),
symKey = some(symKey),
topics = @[topic]), handler4))
# Messages
check:
# encrypted asym
node2.postMessage(some(encryptKeyPair.pubkey), ttl = safeTTL,
topic = topic, payload = payloads[0]) == true
# encrypted asym + signed
node2.postMessage(some(encryptKeyPair.pubkey),
src = some(signKeyPair.seckey), ttl = safeTTL,
topic = topic, payload = payloads[1]) == true
# encrypted sym
node2.postMessage(symKey = some(symKey), ttl = safeTTL, topic = topic,
payload = payloads[2]) == true
# encrypted sym + signed
node2.postMessage(symKey = some(symKey),
src = some(signKeyPair.seckey),
ttl = safeTTL, topic = topic,
payload = payloads[3]) == true
node2.protocolState(Whisper).queue.items.len == 4
check:
await allFutures(futures).withTimeout(waitInterval)
node1.protocolState(Whisper).queue.items.len == 4
for filter in filters:
check node1.unsubscribeFilter(filter) == true
resetMessageQueues(node1, node2)
asyncTest "Filters with topics":
let topic1 = [byte 0x12, 0, 0, 0]
let topic2 = [byte 0x34, 0, 0, 0]
var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)]
var futures = [newFuture[int](), newFuture[int]()]
proc handler1(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[0]
futures[0].complete(1)
proc handler2(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[1]
futures[1].complete(1)
var filter1 = node1.subscribeFilter(initFilter(topics = @[topic1]), handler1)
var filter2 = node1.subscribeFilter(initFilter(topics = @[topic2]), handler2)
check:
node2.postMessage(ttl = safeTTL + 1, topic = topic1,
payload = payloads[0]) == true
node2.postMessage(ttl = safeTTL, topic = topic2,
payload = payloads[1]) == true
node2.protocolState(Whisper).queue.items.len == 2
await allFutures(futures).withTimeout(waitInterval)
node1.protocolState(Whisper).queue.items.len == 2
node1.unsubscribeFilter(filter1) == true
node1.unsubscribeFilter(filter2) == true
resetMessageQueues(node1, node2)
asyncTest "Filters with PoW":
let topic = [byte 0x12, 0, 0, 0]
var payload = repeat(byte 0, 10)
var futures = [newFuture[int](), newFuture[int]()]
proc handler1(msg: ReceivedMessage) =
check msg.decoded.payload == payload
futures[0].complete(1)
proc handler2(msg: ReceivedMessage) =
check msg.decoded.payload == payload
futures[1].complete(1)
var filter1 = node1.subscribeFilter(initFilter(topics = @[topic], powReq = 0),
handler1)
var filter2 = node1.subscribeFilter(initFilter(topics = @[topic],
powReq = 1_000_000), handler2)
check:
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
(await futures[0].withTimeout(waitInterval)) == true
(await futures[1].withTimeout(waitInterval)) == false
node1.protocolState(Whisper).queue.items.len == 1
node1.unsubscribeFilter(filter1) == true
node1.unsubscribeFilter(filter2) == true
resetMessageQueues(node1, node2)
asyncTest "Filters with queues":
let topic = [byte 0, 0, 0, 0]
let payload = repeat(byte 0, 10)
var filter = node1.subscribeFilter(initFilter(topics = @[topic]))
for i in countdown(10, 1):
check node2.postMessage(ttl = safeTTL, topic = topic,
payload = payload) == true
await sleepAsync(waitInterval)
check:
node1.getFilterMessages(filter).len() == 10
node1.getFilterMessages(filter).len() == 0
node1.unsubscribeFilter(filter) == true
resetMessageQueues(node1, node2)
asyncTest "Local filter notify":
let topic = [byte 0, 0, 0, 0]
var filter = node1.subscribeFilter(initFilter(topics = @[topic]))
check:
node1.postMessage(ttl = safeTTL, topic = topic,
payload = repeat(byte 4, 10)) == true
node1.getFilterMessages(filter).len() == 1
node1.unsubscribeFilter(filter) == true
await sleepAsync(waitInterval)
resetMessageQueues(node1, node2)
asyncTest "Bloomfilter blocking":
let sendTopic1 = [byte 0x12, 0, 0, 0]
let sendTopic2 = [byte 0x34, 0, 0, 0]
let filterTopics = @[[byte 0x34, 0, 0, 0],[byte 0x56, 0, 0, 0]]
let payload = repeat(byte 0, 10)
var f: Future[int] = newFuture[int]()
proc handler(msg: ReceivedMessage) =
check msg.decoded.payload == payload
f.complete(1)
var filter = node1.subscribeFilter(initFilter(topics = filterTopics), handler)
await node1.setBloomFilter(node1.filtersToBloom())
check:
node2.postMessage(ttl = safeTTL, topic = sendTopic1,
payload = payload) == true
node2.protocolState(Whisper).queue.items.len == 1
(await f.withTimeout(waitInterval)) == false
node1.protocolState(Whisper).queue.items.len == 0
resetMessageQueues(node1, node2)
f = newFuture[int]()
check:
node2.postMessage(ttl = safeTTL, topic = sendTopic2,
payload = payload) == true
node2.protocolState(Whisper).queue.items.len == 1
await f.withTimeout(waitInterval)
f.read() == 1
node1.protocolState(Whisper).queue.items.len == 1
node1.unsubscribeFilter(filter) == true
await node1.setBloomFilter(fullBloom())
resetMessageQueues(node1, node2)
asyncTest "PoW blocking":
let topic = [byte 0, 0, 0, 0]
let payload = repeat(byte 0, 10)
await node1.setPowRequirement(1_000_000)
check:
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
node2.protocolState(Whisper).queue.items.len == 1
await sleepAsync(waitInterval)
check:
node1.protocolState(Whisper).queue.items.len == 0
resetMessageQueues(node1, node2)
await node1.setPowRequirement(0.0)
check:
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
node2.protocolState(Whisper).queue.items.len == 1
await sleepAsync(waitInterval)
check:
node1.protocolState(Whisper).queue.items.len == 1
resetMessageQueues(node1, node2)
asyncTest "Queue pruning":
let topic = [byte 0, 0, 0, 0]
let payload = repeat(byte 0, 10)
# We need a minimum TTL of 2 as when set to 1 there is a small chance that
# it is already expired after messageInterval due to rounding down of float
# to uint32 in postMessage()
let lowerTTL = 2'u32 # Lower TTL as we need to wait for messages to expire
for i in countdown(10, 1):
check node2.postMessage(ttl = lowerTTL, topic = topic, payload = payload) == true
check node2.protocolState(Whisper).queue.items.len == 10
await sleepAsync(waitInterval)
check node1.protocolState(Whisper).queue.items.len == 10
await sleepAsync(milliseconds((lowerTTL+1)*1000))
check node1.protocolState(Whisper).queue.items.len == 0
check node2.protocolState(Whisper).queue.items.len == 0
resetMessageQueues(node1, node2)
asyncTest "P2P post":
let topic = [byte 0, 0, 0, 0]
var f: Future[int] = newFuture[int]()
proc handler(msg: ReceivedMessage) =
check msg.decoded.payload == repeat(byte 4, 10)
f.complete(1)
var filter = node1.subscribeFilter(initFilter(topics = @[topic],
allowP2P = true), handler)
check:
node1.setPeerTrusted(toNodeId(node2.keys.pubkey)) == true
node2.postMessage(ttl = 10, topic = topic,
payload = repeat(byte 4, 10),
targetPeer = some(toNodeId(node1.keys.pubkey))) == true
await f.withTimeout(waitInterval)
f.read() == 1
node1.protocolState(Whisper).queue.items.len == 0
node2.protocolState(Whisper).queue.items.len == 0
node1.unsubscribeFilter(filter) == true
asyncTest "Light node posting":
var ln1 = setupTestNode(rng, Whisper)
ln1.setLightNode(true)
await ln1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
check:
# normal post
ln1.postMessage(ttl = safeTTL, topic = topic,
payload = repeat(byte 0, 10)) == false
ln1.protocolState(Whisper).queue.items.len == 0
# P2P post
ln1.postMessage(ttl = safeTTL, topic = topic,
payload = repeat(byte 0, 10),
targetPeer = some(toNodeId(node2.keys.pubkey))) == true
ln1.protocolState(Whisper).queue.items.len == 0
asyncTest "Connect two light nodes":
var ln1 = setupTestNode(rng, Whisper)
var ln2 = setupTestNode(rng, Whisper)
ln1.setLightNode(true)
ln2.setLightNode(true)
ln2.startListening()
let peer = await ln1.rlpxConnect(newNode(ln2.toENode()))
check peer.isErr() == true

View File

@ -62,10 +62,6 @@ task wakunode2, "Build Waku v2 cli node":
let name = "wakunode2"
buildBinary name, "apps/wakunode2/"
task bridge, "Build Waku v1 - v2 bridge":
let name = "wakubridge"
buildBinary name, "apps/wakubridge/"
task wakucanary, "Build waku-canary tool":
let name = "wakucanary"
buildBinary name, "apps/wakucanary/"
@ -80,9 +76,6 @@ task test2, "Build & run Waku v2 tests":
task testwakunode2, "Build & run wakunode2 app tests":
test "all_tests_wakunode2"
task testbridge, "Build & run wakubridge tests":
test "all_tests_wakubridge"
task example2, "Build Waku v2 example":
buildBinary "publisher", "examples/v2/"
buildBinary "subscriber", "examples/v2/"
@ -109,24 +102,3 @@ task libwakuStatic, "Build the cbindings waku node library":
task libwakuDynamic, "Build the cbindings waku node library":
let name = "libwaku"
buildLibrary name, "library/", "-d:chronicles_log_level=ERROR", "dynamic"
### Legacy: Whisper & Waku v1 tasks
task testwhisper, "Build & run Whisper tests":
test "all_tests_whisper", "-d:chronicles_log_level=WARN -d:chronosStrictException"
task wakunode1, "Build Waku v1 cli node":
buildBinary "wakunode1", "waku/v1/node/",
"-d:chronicles_log_level=DEBUG -d:chronosStrictException"
task sim1, "Build Waku v1 simulation tools":
buildBinary "quicksim", "waku/v1/node/",
"-d:chronicles_log_level=INFO -d:chronosStrictException"
buildBinary "start_network", "waku/v1/node/",
"-d:chronicles_log_level=DEBUG -d:chronosStrictException"
task example1, "Build Waku v1 example":
buildBinary "example", "examples/v1/",
"-d:chronicles_log_level=DEBUG -d:chronosStrictException"
task test1, "Build & run Waku v1 tests":
test "all_tests_v1", "-d:chronicles_log_level=WARN -d:chronosStrictException"

View File

@ -1,155 +1,3 @@
# Waku v1
This folder contains code related to Waku v1, both as a node and as a protocol.
## Introduction
This is a Nim implementation of the Nim implementation of the [Waku v1 protocol](https://rfc.vac.dev/spec/6/) and a cli application `wakunode` that allows you to run a Waku enabled node from command line.
For supported specification details see [here](#spec-support).
Additionally the original Whisper (EIP-627) protocol can also be enabled as can
an experimental Whisper - Waku bridging option.
The underlying transport protocol is [rlpx + devp2p](https://github.com/ethereum/devp2p/blob/master/rlpx.md) and the [nim-eth](https://github.com/status-im/nim-eth) implementation is used.
⚠️ Note that Waku v1 development has been stopped, in favour of libp2p-based protocol Waku v2: [specs](https://rfc.vac.dev/spec/10/), [code](https://github.com/waku-org/nwaku/tree/master/waku/v2).
## How to Build & Run
All of the below commands should be executed at the root level, i.e. `cd ../..`.
### Prerequisites
* GNU Make, Bash and the usual POSIX utilities. Git 2.9.4 or newer.
### Wakunode
```bash
# The first `make` invocation will update all Git submodules.
# You'll run `make update` after each `git pull`, in the future, to keep those submodules up to date.
make wakunode1
# See available command line options
./build/wakunode --help
# Connect the client directly with the Status test fleet
./build/wakunode --log-level:debug --discovery:off --fleet:test --log-metrics
```
### Waku v1 Protocol Test Suite
```bash
# Run all the Waku v1 tests
make test1
```
You can also run a specific test (and alter compile options as you want):
```bash
# Get a shell with the right environment variables set
./env.sh bash
# Run a specific test
nim c -r ./tests/v1/test_waku_connect.nim
```
### Waku v1 Protocol Example
There is a more basic example, more limited in features and configuration than
the `wakunode`, located in `examples/v1/example.nim`.
More information on how to run this example can be found it its
[readme](../../examples/v1/README.md).
### Waku Quick Simulation
One can set up several nodes, get them connected and then instruct them via the
JSON-RPC interface. This can be done via e.g. web3.js, nim-web3 (needs to be
updated) or simply curl your way out.
The JSON-RPC interface is currently the same as the one of Whisper. The only
difference is the addition of broadcasting the topics interest when a filter
with a certain set of topics is subcribed.
The quick simulation uses this approach, `start_network` launches a set of
`wakunode`s, and `quicksim` instructs the nodes through RPC calls.
Example of how to build and run:
```bash
# Build wakunode + quicksim with metrics enabled
make NIMFLAGS="-d:insecure" sim1
# Start the simulation nodes, this currently requires multitail to be installed
./build/start_network --topology:FullMesh --amount:6 --test-node-peers:2
# In another shell run
./build/quicksim
```
The `start_network` tool will also provide a `prometheus.yml` with targets
set to all simulation nodes that are started. This way you can easily start
prometheus with this config, e.g.:
```bash
cd ./metrics/prometheus
prometheus
```
A Grafana dashboard containing the example dashboard for each simulation node
is also generated and can be imported in case you have Grafana running.
This dashboard can be found at `./metrics/waku-sim-all-nodes-grafana-dashboard.json`
To read more details about metrics, see [next](#using-metrics) section.
## Using Metrics
Metrics are available for valid envelopes and dropped envelopes.
To compile in an HTTP endpoint for accessing the metrics we need to provide the
`insecure` flag:
```bash
make NIMFLAGS="-d:insecure" wakunode1
./build/wakunode --metrics-server
```
Ensure your Prometheus config `prometheus.yml` contains the targets you care about, e.g.:
```
scrape_configs:
- job_name: "waku"
static_configs:
- targets: ['localhost:8008', 'localhost:8009', 'localhost:8010']
```
For visualisation, similar steps can be used as is written down for Nimbus
[here](https://github.com/status-im/nimbus#metric-visualisation).
There is a similar example dashboard that includes visualisation of the
envelopes available at `metrics/waku-grafana-dashboard.json`.
## Spec support
*This section last updated April 21, 2020*
This client of Waku is spec compliant with [Waku spec v1.0.0](https://specs.vac.dev/waku/waku.html).
It doesn't yet implement the following recommended features:
- No support for rate limiting
- No support for DNS discovery to find Waku nodes
- It doesn't disconnect a peer if it receives a message before a Status message
- No support for negotiation with peer supporting multiple versions via Devp2p capabilities in `Hello` packet
Additionally it makes the following choices:
- It doesn't send message confirmations
- It has partial support for accounting:
- Accounting of total resource usage and total circulated envelopes is done through metrics But no accounting is done for individual peers.
## Docker Image
You can create a Docker image using:
```bash
make docker-image
docker run --rm -it statusteam/nim-waku:latest --help
```
The target will be a docker image with `wakunode`, which is the Waku v1 node.
Note that the legacy Waku v1 code has been moved to https://github.com/waku-org/waku-legacy.

View File

@ -1,3 +0,0 @@
# Waku Node v1
This folder contains code related to running a `wakunode` process. The main entrypoint is the `wakunode` file.

View File

@ -1,164 +0,0 @@
import
confutils/defs, chronicles, chronos, eth/keys
type
Fleet* = enum
none
prod
staging
test
WakuNodeCmd* = enum
noCommand
genNodekey
WakuNodeConf* = object
logLevel* {.
desc: "Sets the log level."
defaultValue: LogLevel.INFO
name: "log-level" .}: LogLevel
case cmd* {.
command
defaultValue: noCommand .}: WakuNodeCmd
of noCommand:
tcpPort* {.
desc: "TCP listening port."
defaultValue: 30303
name: "tcp-port" .}: uint16
portsShift* {.
desc: "Add a shift to all port numbers."
defaultValue: 0
name: "ports-shift" .}: uint16
nat* {.
desc: "Specify method to use for determining public address. " &
"Must be one of: any, none, upnp, pmp, extip:<IP>."
defaultValue: "any" .}: string
discovery* {.
desc: "Enable/disable discovery v4."
defaultValue: true
name: "discovery" .}: bool
noListen* {.
desc: "Disable listening for incoming peers."
defaultValue: false
name: "no-listen" .}: bool
fleet* {.
desc: "Select the fleet to connect to."
defaultValue: Fleet.none
name: "fleet" .}: Fleet
bootnodes* {.
desc: "Enode URL to bootstrap P2P discovery with. Argument may be repeated."
name: "bootnode" .}: seq[string]
staticnodes* {.
desc: "Enode URL to directly connect with. Argument may be repeated."
name: "staticnode" .}: seq[string]
whisper* {.
desc: "Enable the Whisper protocol."
defaultValue: false
name: "whisper" .}: bool
whisperBridge* {.
desc: "Enable the Whisper protocol and bridge with Waku protocol."
defaultValue: false
name: "whisper-bridge" .}: bool
lightNode* {.
desc: "Run as light node (no message relay).",
defaultValue: false
name: "light-node" .}: bool
wakuTopicInterest* {.
desc: "Run as node with a topic-interest",
defaultValue: false
name: "waku-topic-interest" .}: bool
wakuPow* {.
desc: "PoW requirement of Waku node.",
defaultValue: 0.002
name: "waku-pow" .}: float64
nodekey* {.
desc: "P2P node private key as hex.",
# TODO: can the rng be passed in somehow via Load?
defaultValue: KeyPair.random(keys.newRng()[])
name: "nodekey" .}: KeyPair
# TODO: Add nodekey file option
bootnodeOnly* {.
desc: "Run only as discovery bootnode."
defaultValue: false
name: "bootnode-only" .}: bool
rpc* {.
desc: "Enable Waku RPC server.",
defaultValue: false
name: "rpc" .}: bool
rpcAddress* {.
desc: "Listening address of the RPC server.",
defaultValue: parseIpAddress("127.0.0.1")
name: "rpc-address" .}: IpAddress
rpcPort* {.
desc: "Listening port of the RPC server.",
defaultValue: 8545
name: "rpc-port" .}: uint16
metricsServer* {.
desc: "Enable the metrics server."
defaultValue: false
name: "metrics-server" .}: bool
metricsServerAddress* {.
desc: "Listening address of the metrics server."
defaultValue: parseIpAddress("127.0.0.1")
name: "metrics-server-address" .}: IpAddress
metricsServerPort* {.
desc: "Listening HTTP port of the metrics server."
defaultValue: 8008
name: "metrics-server-port" .}: uint16
logMetrics* {.
desc: "Enable metrics logging."
defaultValue: false
name: "log-metrics" .}: bool
logAccounting* {.
desc: "Enable peer accounting logging."
defaultValue: false
name: "log-accounting" .}: bool
# TODO:
# - discv5 + topic register
# - mailserver functionality
of genNodekey:
discard
proc parseCmdArg*(T: type KeyPair, p: string): T =
try:
let privkey = PrivateKey.fromHex(string(p)).tryGet()
result = privkey.toKeyPair()
except CatchableError:
raise newException(ConfigurationError, "Invalid private key")
proc completeCmdArg*(T: type KeyPair, val: string): seq[string] =
return @[]
proc parseCmdArg*(T: type IpAddress, p: string): T =
try:
result = parseIpAddress(p)
except CatchableError:
raise newException(ConfigurationError, "Invalid IP address")
proc completeCmdArg*(T: type IpAddress, val: string): seq[string] =
return @[]

View File

@ -1,4 +0,0 @@
-d:chronicles_line_numbers
-d:"chronicles_runtime_filtering=on"
-d:nimDebugDlOpen

View File

@ -1,76 +0,0 @@
import
os, strformat, chronicles, json_rpc/[rpcclient, rpcserver], nimcrypto/sysrand,
eth/common as eth_common, eth/keys,
../protocol/waku_protocol, ./rpc/[hexstrings, rpc_types],
options as what # TODO: Huh? Redefinition?
from os import DirSep
from strutils import rsplit
template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
const sigWakuPath = &"{sourceDir}{DirSep}rpc{DirSep}wakucallsigs.nim"
createRpcSigs(RpcHttpClient, sigWakuPath)
const topicAmount = 100
let
trafficNode = newRpcHttpClient()
lightNode = newRpcHttpClient()
lightNode2 = newRpcHttpClient()
waitFor lightNode.connect("localhost", Port(8545), false)
waitFor lightNode2.connect("localhost", Port(8546), false)
waitFor trafficNode.connect("localhost", Port(8548), false)
proc generateTopics(amount = topicAmount): seq[waku_protocol.Topic] =
var topic: waku_protocol.Topic
for i in 0..<amount:
if randomBytes(topic) != 4:
raise newException(ValueError, "Generation of random topic failed.")
result.add(topic)
let
symKey = "0x0000000000000000000000000000000000000000000000000000000000000001"
topics = generateTopics()
symKeyID = waitFor lightNode.waku_addSymKey(symKey)
options = WakuFilterOptions(symKeyID: some(symKeyID),
topics: some(topics))
filterID = waitFor lightNode.waku_newMessageFilter(options)
symKeyID2 = waitFor lightNode2.waku_addSymKey(symKey)
options2 = WakuFilterOptions(symKeyID: some(symKeyID2),
topics: some(topics))
filterID2 = waitFor lightNode2.waku_newMessageFilter(options2)
symkeyID3 = waitFor trafficNode.waku_addSymKey(symKey)
var message = WakuPostMessage(symKeyID: some(symkeyID3),
ttl: 30,
topic: some(topics[0]),
payload: "0x45879632".HexDataStr,
powTime: 1.0,
powTarget: 0.002)
info "Posting envelopes on all subscribed topics"
for i in 0..<topicAmount:
message.topic = some(topics[i])
discard waitFor trafficNode.waku_post(message)
# Check if the subscription for the topics works
waitFor sleepAsync(1000.milliseconds) # This is a bit brittle
let
messages = waitFor lightNode.waku_getFilterMessages(filterID)
messages2 = waitFor lightNode2.waku_getFilterMessages(filterID2)
if messages.len != topicAmount or messages2.len != topicAmount:
error "Light node did not receive envelopes on all subscribed topics",
lightnode1=messages.len, lightnode2=messages2.len
quit 1
info "Received envelopes on all subscribed topics"
# Generate test traffic on node
discard waitFor trafficNode.wakusim_generateRandomTraffic(10_000)
info "Started random traffic generation"

View File

@ -1,222 +0,0 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## This module implements the Ethereum hexadecimal string formats for JSON
## See: https://github.com/ethereum/wiki/wiki/JSON-RPC#hex-value-encoding
#[
Note:
The following types are converted to hex strings when marshalled to JSON:
* Hash256
* UInt256
* seq[byte]
* openArray[seq]
* PublicKey
* PrivateKey
* SymKey
* Topic
* Bytes
]#
import
stint, stew/byteutils, eth/keys, eth/common/eth_types,
../../protocol/waku_protocol
type
HexDataStr* = distinct string
Identifier* = distinct string # 32 bytes, no 0x prefix!
HexStrings = HexDataStr | Identifier
# Hex validation
template hasHexHeader(value: string): bool =
if value.len >= 2 and value[0] == '0' and value[1] in {'x', 'X'}: true
else: false
template isHexChar(c: char): bool =
if c notin {'0'..'9'} and
c notin {'a'..'f'} and
c notin {'A'..'F'}: false
else: true
func isValidHexQuantity*(value: string): bool =
if not value.hasHexHeader:
return false
# No leading zeros (but allow 0x0)
if value.len < 3 or (value.len > 3 and value[2] == '0'): return false
for i in 2 ..< value.len:
let c = value[i]
if not c.isHexChar:
return false
return true
func isValidHexData*(value: string, header = true): bool =
if header and not value.hasHexHeader:
return false
# Must be even number of digits
if value.len mod 2 != 0: return false
# Leading zeros are allowed
for i in 2 ..< value.len:
let c = value[i]
if not c.isHexChar:
return false
return true
template isValidHexData(value: string, hexLen: int, header = true): bool =
value.len == hexLen and value.isValidHexData(header)
func isValidIdentifier*(value: string): bool =
# 32 bytes for Whisper ID, no 0x prefix
result = value.isValidHexData(64, false)
func isValidPublicKey*(value: string): bool =
# 65 bytes for Public Key plus 1 byte for 0x prefix
result = value.isValidHexData(132)
func isValidPrivateKey*(value: string): bool =
# 32 bytes for Private Key plus 1 byte for 0x prefix
result = value.isValidHexData(66)
func isValidSymKey*(value: string): bool =
# 32 bytes for Private Key plus 1 byte for 0x prefix
result = value.isValidHexData(66)
func isValidHash256*(value: string): bool =
# 32 bytes for Hash256 plus 1 byte for 0x prefix
result = value.isValidHexData(66)
func isValidTopic*(value: string): bool =
# 4 bytes for Topic plus 1 byte for 0x prefix
result = value.isValidHexData(10)
const
SInvalidData = "Invalid hex data format for Ethereum"
proc validateHexData*(value: string) {.inline.} =
if unlikely(not value.isValidHexData):
raise newException(ValueError, SInvalidData & ": " & value)
# Initialisation
proc hexDataStr*(value: string): HexDataStr {.inline.} =
value.validateHexData
result = value.HexDataStr
# Converters for use in RPC
import json
from json_rpc/rpcserver import expect
proc `%`*(value: HexStrings): JsonNode =
result = %(value.string)
# Overloads to support expected representation of hex data
proc `%`*(value: Hash256): JsonNode =
#result = %("0x" & $value) # More clean but no lowercase :(
result = %("0x" & value.data.toHex)
proc `%`*(value: UInt256): JsonNode =
result = %("0x" & value.toString(16))
proc `%`*(value: PublicKey): JsonNode =
result = %("0x04" & $value)
proc `%`*(value: PrivateKey): JsonNode =
result = %("0x" & $value)
proc `%`*(value: SymKey): JsonNode =
result = %("0x" & value.toHex)
proc `%`*(value: waku_protocol.Topic): JsonNode =
result = %("0x" & value.toHex)
proc `%`*(value: seq[byte]): JsonNode =
if value.len > 0:
result = %("0x" & value.toHex)
else:
result = newJArray()
# Helpers for the fromJson procs
proc toPublicKey*(key: string): PublicKey {.inline.} =
result = PublicKey.fromHex(key[4 .. ^1]).tryGet()
proc toPrivateKey*(key: string): PrivateKey {.inline.} =
result = PrivateKey.fromHex(key[2 .. ^1]).tryGet()
proc toSymKey*(key: string): SymKey {.inline.} =
hexToByteArray(key[2 .. ^1], result)
proc toTopic*(topic: string): waku_protocol.Topic {.inline.} =
hexToByteArray(topic[2 .. ^1], result)
# Marshalling from JSON to Nim types that includes format checking
func invalidMsg(name: string): string = "When marshalling from JSON, parameter \"" & name & "\" is not valid"
proc fromJson*(n: JsonNode, argName: string, result: var HexDataStr) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidHexData:
raise newException(ValueError, invalidMsg(argName) & " as Ethereum data \"" & hexStr & "\"")
result = hexStr.hexDataStr
proc fromJson*(n: JsonNode, argName: string, result: var Identifier) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidIdentifier:
raise newException(ValueError, invalidMsg(argName) & " as a identifier \"" & hexStr & "\"")
result = hexStr.Identifier
proc fromJson*(n: JsonNode, argName: string, result: var UInt256) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not (hexStr.len <= 66 and hexStr.isValidHexQuantity):
raise newException(ValueError, invalidMsg(argName) & " as a UInt256 \"" & hexStr & "\"")
result = readUintBE[256](hexToPaddedByteArray[32](hexStr))
proc fromJson*(n: JsonNode, argName: string, result: var PublicKey) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidPublicKey:
raise newException(ValueError, invalidMsg(argName) & " as a public key \"" & hexStr & "\"")
result = hexStr.toPublicKey
proc fromJson*(n: JsonNode, argName: string, result: var PrivateKey) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidPrivateKey:
raise newException(ValueError, invalidMsg(argName) & " as a private key \"" & hexStr & "\"")
result = hexStr.toPrivateKey
proc fromJson*(n: JsonNode, argName: string, result: var SymKey) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidSymKey:
raise newException(ValueError, invalidMsg(argName) & " as a symmetric key \"" & hexStr & "\"")
result = toSymKey(hexStr)
proc fromJson*(n: JsonNode, argName: string, result: var waku_protocol.Topic) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidTopic:
raise newException(ValueError, invalidMsg(argName) & " as a topic \"" & hexStr & "\"")
result = toTopic(hexStr)
# Following procs currently required only for testing, the `createRpcSigs` macro
# requires it as it will convert the JSON results back to the original Nim
# types, but it needs the `fromJson` calls for those specific Nim types to do so
proc fromJson*(n: JsonNode, argName: string, result: var Hash256) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidHash256:
raise newException(ValueError, invalidMsg(argName) & " as a Hash256 \"" & hexStr & "\"")
hexToByteArray(hexStr, result.data)

View File

@ -1,22 +0,0 @@
#
# Nimbus
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import tables, eth/keys, ../../../whisper/whisper_types
type
KeyStorage* = ref object
asymKeys*: Table[string, KeyPair]
symKeys*: Table[string, SymKey]
KeyGenerationError* = object of CatchableError
proc newKeyStorage*(): KeyStorage =
new(result)
result.asymKeys = initTable[string, KeyPair]()
result.symKeys = initTable[string, SymKey]()

View File

@ -1,58 +0,0 @@
import
hexstrings, options, eth/keys,
../../protocol/waku_protocol
#[
Notes:
* Some of the types suppose 'null' when there is no appropriate value.
To allow for this, you can use Option[T] or use refs so the JSON transform can convert to `JNull`.
* Parameter objects from users must have their data verified so will use EthAddressStr instead of EthAddres, for example
* Objects returned to the user can use native Waku types, where hexstrings provides converters to hex strings.
This is because returned arrays in JSON is
a) not an efficient use of space
b) not the format the user expects (for example addresses are expected to be hex strings prefixed by "0x")
]#
type
WakuInfo* = object
# Returned to user
minPow*: float64 # Current minimum PoW requirement.
# TODO: may be uint32
maxMessageSize*: uint64 # Current message size limit in bytes.
memory*: int # Memory size of the floating messages in bytes.
messages*: int # Number of floating messages.
WakuFilterOptions* = object
# Parameter from user
symKeyID*: Option[Identifier] # ID of symmetric key for message decryption.
privateKeyID*: Option[Identifier] # ID of private (asymmetric) key for message decryption.
sig*: Option[PublicKey] # (Optional) Public key of the signature.
minPow*: Option[float64] # (Optional) Minimal PoW requirement for incoming messages.
topics*: Option[seq[waku_protocol.Topic]] # (Optional when asym key): Array of possible topics (or partial topics).
allowP2P*: Option[bool] # (Optional) Indicates if this filter allows processing of direct peer-to-peer messages.
WakuFilterMessage* = object
# Returned to user
sig*: Option[PublicKey] # Public key who signed this message.
recipientPublicKey*: Option[PublicKey] # The recipients public key.
ttl*: uint64 # Time-to-live in seconds.
timestamp*: uint64 # Unix timestamp of the message generation.
topic*: waku_protocol.Topic # 4 Bytes: Message topic.
payload*: seq[byte] # Decrypted payload.
padding*: seq[byte] # (Optional) Padding (byte array of arbitrary length).
pow*: float64 # Proof of work value.
hash*: Hash # Hash of the enveloped message.
WakuPostMessage* = object
# Parameter from user
symKeyID*: Option[Identifier] # ID of symmetric key for message encryption.
pubKey*: Option[PublicKey] # Public key for message encryption.
sig*: Option[Identifier] # (Optional) ID of the signing key.
ttl*: uint64 # Time-to-live in seconds.
topic*: Option[waku_protocol.Topic] # Message topic (mandatory when key is symmetric).
payload*: HexDataStr # Payload to be encrypted.
padding*: Option[HexDataStr] # (Optional) Padding (byte array of arbitrary length).
powTime*: float64 # Maximal time in seconds to be spent on proof of work.
powTarget*: float64 # Minimal PoW target required for this message.
# TODO: EnodeStr
targetPeer*: Option[string] # (Optional) Peer ID (for peer-to-peer message only).

View File

@ -1,365 +0,0 @@
import
json_rpc/rpcserver, tables, options, sequtils,
eth/[common, keys, p2p],
nimcrypto/[sysrand, hmac, sha2, pbkdf2],
rpc_types, hexstrings, key_storage,
../../protocol/waku_protocol
from stew/byteutils import hexToSeqByte, hexToByteArray
# Blatant copy of Whisper RPC but for the Waku protocol
proc setupWakuRPC*(node: EthereumNode, keys: KeyStorage, rpcsrv: RpcServer,
rng: ref HmacDrbgContext) =
rpcsrv.rpc("waku_version") do() -> string:
## Returns string of the current Waku protocol version.
result = wakuVersionStr
rpcsrv.rpc("waku_info") do() -> WakuInfo:
## Returns diagnostic information about the Waku node.
let config = node.protocolState(Waku).config
result = WakuInfo(minPow: config.powRequirement,
maxMessageSize: config.maxMsgSize,
memory: 0,
messages: 0)
# TODO: uint32 instead of uint64 is OK here, but needs to be added in json_rpc
rpcsrv.rpc("waku_setMaxMessageSize") do(size: uint64) -> bool:
## Sets the maximal message size allowed by this node.
## Incoming and outgoing messages with a larger size will be rejected.
## Waku message size can never exceed the limit imposed by the underlying
## P2P protocol (10 Mb).
##
## size: Message size in bytes.
##
## Returns true on success and an error on failure.
result = node.setMaxMessageSize(size.uint32)
if not result:
raise newException(ValueError, "Invalid size")
rpcsrv.rpc("waku_setMinPoW") do(pow: float) -> bool:
## Sets the minimal PoW required by this node.
##
## pow: The new PoW requirement.
##
## Returns true on success and an error on failure.
# Note: `setPowRequirement` does not raise on failures of sending the update
# to the peers. Hence in theory this should not causes errors.
await node.setPowRequirement(pow)
result = true
# TODO: change string in to ENodeStr with extra checks
rpcsrv.rpc("waku_markTrustedPeer") do(enode: string) -> bool:
## Marks specific peer trusted, which will allow it to send historic
## (expired) messages.
## Note: This function is not adding new nodes, the node needs to exists as
## a peer.
##
## enode: Enode of the trusted peer.
##
## Returns true on success and an error on failure.
# TODO: It will now require an enode://pubkey@ip:port uri
# could also accept only the pubkey (like geth)?
let peerNode = newNode(enode)
result = node.setPeerTrusted(peerNode.id)
if not result:
raise newException(ValueError, "Not a peer")
rpcsrv.rpc("waku_newKeyPair") do() -> Identifier:
## Generates a new public and private key pair for message decryption and
## encryption.
##
## Returns key identifier on success and an error on failure.
result = generateRandomID(rng[]).Identifier
keys.asymKeys[result.string] = KeyPair.random(rng[])
rpcsrv.rpc("waku_addPrivateKey") do(key: PrivateKey) -> Identifier:
## Stores the key pair, and returns its ID.
##
## key: Private key as hex bytes.
##
## Returns key identifier on success and an error on failure.
result = generateRandomID(rng[]).Identifier
keys.asymKeys[result.string] = key.toKeyPair()
rpcsrv.rpc("waku_deleteKeyPair") do(id: Identifier) -> bool:
## Deletes the specifies key if it exists.
##
## id: Identifier of key pair
##
## Returns true on success and an error on failure.
var unneeded: KeyPair
result = keys.asymKeys.take(id.string, unneeded)
if not result:
raise newException(ValueError, "Invalid key id")
rpcsrv.rpc("waku_hasKeyPair") do(id: Identifier) -> bool:
## Checks if the Waku node has a private key of a key pair matching the
## given ID.
##
## id: Identifier of key pair
##
## Returns (true or false) on success and an error on failure.
result = keys.asymkeys.hasKey(id.string)
rpcsrv.rpc("waku_getPublicKey") do(id: Identifier) -> PublicKey:
## Returns the public key for identity ID.
##
## id: Identifier of key pair
##
## Returns public key on success and an error on failure.
# Note: key not found exception as error in case not existing
result = keys.asymkeys[id.string].pubkey
rpcsrv.rpc("waku_getPrivateKey") do(id: Identifier) -> PrivateKey:
## Returns the private key for identity ID.
##
## id: Identifier of key pair
##
## Returns private key on success and an error on failure.
# Note: key not found exception as error in case not existing
result = keys.asymkeys[id.string].seckey
rpcsrv.rpc("waku_newSymKey") do() -> Identifier:
## Generates a random symmetric key and stores it under an ID, which is then
## returned. Can be used encrypting and decrypting messages where the key is
## known to both parties.
##
## Returns key identifier on success and an error on failure.
result = generateRandomID(rng[]).Identifier
var key: SymKey
if randomBytes(key) != key.len:
raise newException(KeyGenerationError, "Failed generating key")
keys.symKeys[result.string] = key
rpcsrv.rpc("waku_addSymKey") do(key: SymKey) -> Identifier:
## Stores the key, and returns its ID.
##
## key: The raw key for symmetric encryption as hex bytes.
##
## Returns key identifier on success and an error on failure.
result = generateRandomID(rng[]).Identifier
keys.symKeys[result.string] = key
rpcsrv.rpc("waku_generateSymKeyFromPassword") do(password: string) -> Identifier:
## Generates the key from password, stores it, and returns its ID.
##
## password: Password.
##
## Returns key identifier on success and an error on failure.
## Warning: an empty string is used as salt because the shh RPC API does not
## allow for passing a salt. A very good password is necessary (calculate
## yourself what that means :))
var ctx: HMAC[sha256]
var symKey: SymKey
if pbkdf2(ctx, password, "", 65356, symKey) != sizeof(SymKey):
raise newException(KeyGenerationError, "Failed generating key")
result = generateRandomID(rng[]).Identifier
keys.symKeys[result.string] = symKey
rpcsrv.rpc("waku_hasSymKey") do(id: Identifier) -> bool:
## Returns true if there is a key associated with the name string.
## Otherwise, returns false.
##
## id: Identifier of key.
##
## Returns (true or false) on success and an error on failure.
result = keys.symkeys.hasKey(id.string)
rpcsrv.rpc("waku_getSymKey") do(id: Identifier) -> SymKey:
## Returns the symmetric key associated with the given ID.
##
## id: Identifier of key.
##
## Returns Raw key on success and an error on failure.
# Note: key not found exception as error in case not existing
result = keys.symkeys[id.string]
rpcsrv.rpc("waku_deleteSymKey") do(id: Identifier) -> bool:
## Deletes the key associated with the name string if it exists.
##
## id: Identifier of key.
##
## Returns (true or false) on success and an error on failure.
var unneeded: SymKey
result = keys.symKeys.take(id.string, unneeded)
if not result:
raise newException(ValueError, "Invalid key id")
rpcsrv.rpc("waku_subscribe") do(id: string,
options: WakuFilterOptions) -> Identifier:
## Creates and registers a new subscription to receive notifications for
## inbound Waku messages. Returns the ID of the newly created
## subscription.
##
## id: identifier of function call. In case of Waku must contain the
## value "messages".
## options: WakuFilterOptions
##
## Returns the subscription ID on success, the error on failure.
# TODO: implement subscriptions, only for WS & IPC?
discard
rpcsrv.rpc("waku_unsubscribe") do(id: Identifier) -> bool:
## Cancels and removes an existing subscription.
##
## id: Subscription identifier
##
## Returns true on success, the error on failure
result = node.unsubscribeFilter(id.string)
if not result:
raise newException(ValueError, "Invalid filter id")
proc validateOptions[T,U,V](asym: Option[T], sym: Option[U], topic: Option[V]) =
if (asym.isSome() and sym.isSome()) or (asym.isNone() and sym.isNone()):
raise newException(ValueError,
"Either privateKeyID/pubKey or symKeyID must be present")
if asym.isNone() and topic.isNone():
raise newException(ValueError, "Topic mandatory with symmetric key")
rpcsrv.rpc("waku_newMessageFilter") do(options: WakuFilterOptions) -> Identifier:
## Create a new filter within the node. This filter can be used to poll for
## new messages that match the set of criteria.
##
## options: WakuFilterOptions
##
## Returns filter identifier on success, error on failure
# Check if either symKeyID or privateKeyID is present, and not both
# Check if there are Topics when symmetric key is used
validateOptions(options.privateKeyID, options.symKeyID, options.topics)
var
src: Option[PublicKey]
privateKey: Option[PrivateKey]
symKey: Option[SymKey]
topics: seq[waku_protocol.Topic]
powReq: float64
allowP2P: bool
src = options.sig
if options.privateKeyID.isSome():
privateKey = some(keys.asymKeys[options.privateKeyID.get().string].seckey)
if options.symKeyID.isSome():
symKey= some(keys.symKeys[options.symKeyID.get().string])
if options.minPow.isSome():
powReq = options.minPow.get()
if options.topics.isSome():
topics = options.topics.get()
if options.allowP2P.isSome():
allowP2P = options.allowP2P.get()
let filter = initFilter(src, privateKey, symKey, topics, powReq, allowP2P)
result = node.subscribeFilter(filter).Identifier
# TODO: Should we do this here "automatically" or separate it in another
# RPC call? Is there a use case for that?
# Same could be said about bloomfilter, except that there is a use case
# there to have a full node no matter what message filters.
# Could also be moved to waku_protocol.nim
let config = node.protocolState(Waku).config
if config.topics.isSome():
try:
# TODO: an addTopics call would probably be more useful
let result = await node.setTopicInterest(config.topics.get().concat(filter.topics))
if not result:
raise newException(ValueError, "Too many topics")
except CatchableError:
trace "setTopics error occured"
elif config.isLightNode:
try:
await node.setBloomFilter(node.filtersToBloom())
except CatchableError:
trace "setBloomFilter error occured"
rpcsrv.rpc("waku_deleteMessageFilter") do(id: Identifier) -> bool:
## Uninstall a message filter in the node.
##
## id: Filter identifier as returned when the filter was created.
##
## Returns true on success, error on failure.
result = node.unsubscribeFilter(id.string)
if not result:
raise newException(ValueError, "Invalid filter id")
rpcsrv.rpc("waku_getFilterMessages") do(id: Identifier) -> seq[WakuFilterMessage]:
## Retrieve messages that match the filter criteria and are received between
## the last time this function was called and now.
##
## id: ID of filter that was created with `waku_newMessageFilter`.
##
## Returns array of messages on success and an error on failure.
let messages = node.getFilterMessages(id.string)
for msg in messages:
result.add WakuFilterMessage(
sig: msg.decoded.src,
recipientPublicKey: msg.dst,
ttl: msg.ttl,
topic: msg.topic,
timestamp: msg.timestamp,
payload: msg.decoded.payload,
# Note: waku_protocol padding is an Option as there is the
# possibility of 0 padding in case of custom padding.
padding: msg.decoded.padding.get(@[]),
pow: msg.pow,
hash: msg.hash)
rpcsrv.rpc("waku_post") do(message: WakuPostMessage) -> bool:
## Creates a Waku message and injects it into the network for
## distribution.
##
## message: Waku message to post.
##
## Returns true on success and an error on failure.
# Check if either symKeyID or pubKey is present, and not both
# Check if there is a Topic when symmetric key is used
validateOptions(message.pubKey, message.symKeyID, message.topic)
var
sigPrivKey: Option[PrivateKey]
symKey: Option[SymKey]
topic: waku_protocol.Topic
padding: Option[seq[byte]]
targetPeer: Option[NodeId]
if message.sig.isSome():
sigPrivKey = some(keys.asymKeys[message.sig.get().string].seckey)
if message.symKeyID.isSome():
symKey = some(keys.symKeys[message.symKeyID.get().string])
# Note: If no topic it will be defaulted to 0x00000000
if message.topic.isSome():
topic = message.topic.get()
if message.padding.isSome():
padding = some(hexToSeqByte(message.padding.get().string))
if message.targetPeer.isSome():
targetPeer = some(newNode(message.targetPeer.get()).id)
result = node.postMessage(message.pubKey,
symKey,
sigPrivKey,
ttl = message.ttl.uint32,
topic = topic,
payload = hexToSeqByte(message.payload.string),
padding = padding,
powTime = message.powTime,
powTarget = message.powTarget,
targetPeer = targetPeer)
if not result:
raise newException(ValueError, "Message could not be posted")

View File

@ -1,27 +0,0 @@
proc waku_version(): string
proc waku_info(): WakuInfo
proc waku_setMaxMessageSize(size: uint64): bool
proc waku_setMinPoW(pow: float): bool
proc waku_markTrustedPeer(enode: string): bool
proc waku_newKeyPair(): Identifier
proc waku_addPrivateKey(key: string): Identifier
proc waku_deleteKeyPair(id: Identifier): bool
proc waku_hasKeyPair(id: Identifier): bool
proc waku_getPublicKey(id: Identifier): PublicKey
proc waku_getPrivateKey(id: Identifier): PrivateKey
proc waku_newSymKey(): Identifier
proc waku_addSymKey(key: string): Identifier
proc waku_generateSymKeyFromPassword(password: string): Identifier
proc waku_hasSymKey(id: Identifier): bool
proc waku_getSymKey(id: Identifier): SymKey
proc waku_deleteSymKey(id: Identifier): bool
proc waku_newMessageFilter(options: WakuFilterOptions): Identifier
proc waku_deleteMessageFilter(id: Identifier): bool
proc waku_getFilterMessages(id: Identifier): seq[WakuFilterMessage]
proc waku_post(message: WakuPostMessage): bool
proc wakusim_generateTraffic(amount: int): bool
proc wakusim_generateRandomTraffic(amount: int): bool

View File

@ -1,31 +0,0 @@
import
json_rpc/rpcserver, stew/endians2, nimcrypto/sysrand,
eth/[p2p, async_utils],
../../protocol/waku_protocol
proc generateTraffic(node: EthereumNode, amount = 100) {.async.} =
let payload = @[byte 0]
for i in 0..<amount:
discard waku_protocol.postMessage(node, ttl = 10,
topic = toBytesLE(i.uint32), payload = payload)
await sleepAsync(1.milliseconds)
proc generateRandomTraffic(node: EthereumNode, amount = 100) {.async.} =
var topic: array[4, byte]
let payload = @[byte 0]
for i in 0..<amount:
while randomBytes(topic) != 4:
discard
discard waku_protocol.postMessage(node, ttl = 10, topic = topic,
payload = payload)
await sleepAsync(1.milliseconds)
proc setupWakuSimRPC*(node: EthereumNode, rpcsrv: RpcServer) =
rpcsrv.rpc("wakusim_generateTraffic") do(amount: int) -> bool:
traceAsyncErrors node.generateTraffic(amount)
return true
rpcsrv.rpc("wakusim_generateRandomTraffic") do(amount: int) -> bool:
traceAsyncErrors node.generateRandomTraffic(amount)
return true

View File

@ -1,204 +0,0 @@
import
options, strformat, os, osproc, net, confutils, strformat, chronicles, json, strutils,
eth/keys, eth/p2p/enode
const
defaults ="--log-level:DEBUG --log-metrics --metrics-server --rpc"
wakuNodeBin = "build" / "wakunode1"
metricsDir = "metrics"
portOffset = 2
type
NodeType = enum
FullNode = "",
LightNode = "--light-node:on",
Topology = enum
Star,
FullMesh,
DiscoveryBased # Whatever topology the discovery brings
WakuNetworkConf* = object
topology* {.
desc: "Set the network topology."
defaultValue: Star
name: "topology" .}: Topology
amount* {.
desc: "Amount of full nodes to be started."
defaultValue: 4
name: "amount" .}: int
testNodes* {.
desc: "Initialize light test nodes as part of network."
defaultValue: true
name: "test-nodes" .}: bool
testNodePeers* {.
desc: "Amount of peers a test node should connect to."
defaultValue: 1
name: "test-node-peers" .}: int
NodeInfo* = object
cmd: string
master: bool
enode: string
shift: int
label: string
proc initNodeCmd(nodeType: NodeType, shift: int, staticNodes: seq[string] = @[],
discovery = false, bootNodes: seq[string] = @[], topicInterest = false,
master = false, label: string): NodeInfo =
let
rng = keys.newRng()
keypair = KeyPair.random(rng[])
address = Address(ip: parseIpAddress("127.0.0.1"),
udpPort: (30303 + shift).Port, tcpPort: (30303 + shift).Port)
enode = ENode(pubkey: keypair.pubkey, address: address)
result.cmd = wakuNodeBin & " " & defaults & " "
result.cmd &= $nodeType & " "
result.cmd &= "--waku-topic-interest:" & $topicInterest & " "
result.cmd &= "--nodekey:" & $keypair.seckey & " "
result.cmd &= "--ports-shift:" & $shift & " "
if discovery:
result.cmd &= "--discovery:on" & " "
if bootNodes.len > 0:
for bootNode in bootNodes:
result.cmd &= "--bootnode:" & bootNode & " "
else:
result.cmd &= "--discovery:off" & " "
if staticNodes.len > 0:
for staticNode in staticNodes:
result.cmd &= "--staticnode:" & staticNode & " "
result.master = master
result.enode = $enode
result.shift = shift
result.label = label
debug "Node command created.", cmd=result.cmd
proc starNetwork(amount: int): seq[NodeInfo] =
let masterNode = initNodeCmd(FullNode, portOffset, master = true,
label = "master node")
result.add(masterNode)
for i in 1..<amount:
result.add(initNodeCmd(FullNode, portOffset + i, @[masterNode.enode],
label = "full node"))
proc fullMeshNetwork(amount: int): seq[NodeInfo] =
debug "amount", amount
for i in 0..<amount:
var staticnodes: seq[string]
for item in result:
staticnodes.add(item.enode)
result.add(initNodeCmd(FullNode, portOffset + i, staticnodes,
label = "full node"))
proc discoveryNetwork(amount: int): seq[NodeInfo] =
let bootNode = initNodeCmd(FullNode, portOffset, discovery = true,
master = true, label = "boot node")
result.add(bootNode)
for i in 1..<amount:
result.add(initNodeCmd(FullNode, portOffset + i, label = "full node",
discovery = true, bootNodes = @[bootNode.enode]))
proc generatePrometheusConfig(nodes: seq[NodeInfo], outputFile: string) =
var config = """
global:
scrape_interval: 1s
scrape_configs:
- job_name: "wakusim"
static_configs:"""
var count = 0
for node in nodes:
let port = 8008 + node.shift
config &= &"""
- targets: ['127.0.0.1:{port}']
labels:
node: '{count}'"""
count += 1
var (path, file) = splitPath(outputFile)
createDir(path)
writeFile(outputFile, config)
proc proccessGrafanaDashboard(nodes: int, inputFile: string,
outputFile: string) =
# from https://github.com/status-im/nim-beacon-chain/blob/master/tests/simulation/process_dashboard.nim
var
inputData = parseFile(inputFile)
panels = inputData["panels"].copy()
numPanels = len(panels)
gridHeight = 0
outputData = inputData
for panel in panels:
if panel["gridPos"]["x"].getInt() == 0:
gridHeight += panel["gridPos"]["h"].getInt()
outputData["panels"] = %* []
for nodeNum in 0 .. (nodes - 1):
var
nodePanels = panels.copy()
panelIndex = 0
for panel in nodePanels.mitems:
panel["title"] = %* replace(panel["title"].getStr(), "#0", "#" & $nodeNum)
panel["id"] = %* (panelIndex + (nodeNum * numPanels))
panel["gridPos"]["y"] = %* (panel["gridPos"]["y"].getInt() + (nodeNum * gridHeight))
var targets = panel["targets"]
for target in targets.mitems:
target["expr"] = %* replace(target["expr"].getStr(), "{node=\"0\"}", "{node=\"" & $nodeNum & "\"}")
outputData["panels"].add(panel)
panelIndex.inc()
outputData["uid"] = %* (outputData["uid"].getStr() & "a")
outputData["title"] = %* (outputData["title"].getStr() & " (all nodes)")
writeFile(outputFile, pretty(outputData))
when isMainModule:
let conf = WakuNetworkConf.load()
var nodes: seq[NodeInfo]
case conf.topology:
of Star:
nodes = starNetwork(conf.amount)
of FullMesh:
nodes = fullMeshNetwork(conf.amount)
of DiscoveryBased:
nodes = discoveryNetwork(conf.amount)
if conf.testNodes:
var staticnodes: seq[string]
for i in 0..<conf.testNodePeers:
# TODO: could also select nodes randomly
staticnodes.add(nodes[i].enode)
# light node with topic interest
nodes.add(initNodeCmd(LightNode, 0, staticnodes, topicInterest = true,
label = "light node topic interest"))
# Regular light node
nodes.add(initNodeCmd(LightNode, 1, staticnodes, label = "light node"))
var commandStr = "multitail -s 2 -M 0 -x \"Waku Simulation\""
var count = 0
var sleepDuration = 0
for node in nodes:
if conf.topology in {Star, DiscoveryBased}:
sleepDuration = if node.master: 0
else: 1
commandStr &= &" -cT ansi -t 'node #{count} {node.label}' -l 'sleep {sleepDuration}; {node.cmd}; echo [node execution completed]; while true; do sleep 100; done'"
if conf.topology == FullMesh:
sleepDuration += 1
count += 1
generatePrometheusConfig(nodes, metricsDir / "prometheus" / "prometheus.yml")
proccessGrafanaDashboard(nodes.len,
metricsDir / "waku-grafana-dashboard.json",
metricsDir / "waku-sim-all-nodes-grafana-dashboard.json")
let errorCode = execCmd(commandStr)
if errorCode != 0:
error "launch command failed", command=commandStr

View File

@ -1,16 +0,0 @@
import
chronos,
eth/[p2p, async_utils], eth/p2p/peer_pool
proc setBootNodes*(nodes: openArray[string]): seq[ENode] =
result = newSeqOfCap[ENode](nodes.len)
for nodeId in nodes:
# TODO: something more user friendly than an expect
result.add(ENode.fromString(nodeId).expect("correct node"))
proc connectToNodes*(node: EthereumNode, nodes: openArray[string]) =
for nodeId in nodes:
# TODO: something more user friendly than an assert
let whisperENode = ENode.fromString(nodeId).expect("correct node")
traceAsyncErrors node.peerPool.connectToNode(newNode(whisperENode))

View File

@ -1,150 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
confutils, chronos, json_rpc/rpcserver,
metrics, metrics/chronicles_support, metrics/chronos_httpserver,
stew/shims/net as stewNet,
eth/[keys, p2p],
eth/p2p/[enode, peer_pool],
../../whisper/[whispernodes, whisper_protocol],
../protocol/[waku_protocol, waku_bridge],
../../common/utils/nat,
./rpc/[waku, wakusim, key_storage], ./waku_helpers, ./config
const clientId = "Nimbus waku node"
proc run(config: WakuNodeConf, rng: ref HmacDrbgContext)
{.raises: [Defect, ValueError, RpcBindError, CatchableError, Exception]} =
## `udpPort` is only supplied to satisfy underlying APIs but is not
## actually a supported transport.
let udpPort = config.tcpPort
let natRes = setupNat(config.nat, clientId,
Port(config.tcpPort + config.portsShift),
Port(udpPort + config.portsShift))
if natRes.isErr():
fatal "setupNat failed", error = natRes.error
quit(1)
let
(ipExt, tcpPortExt, _) = natRes.get()
# TODO: EthereumNode should have a better split of binding address and
# external address. Also, can't have different ports as it stands now.
address = if ipExt.isNone():
Address(ip: parseIpAddress("0.0.0.0"),
tcpPort: Port(config.tcpPort + config.portsShift),
udpPort: Port(udpPort + config.portsShift))
else:
Address(ip: ipExt.get(),
tcpPort: Port(config.tcpPort + config.portsShift),
udpPort: Port(udpPort + config.portsShift))
bootnodes = if config.bootnodes.len > 0: setBootNodes(config.bootnodes)
elif config.fleet == prod: setBootNodes(StatusBootNodes)
elif config.fleet == staging: setBootNodes(StatusBootNodesStaging)
elif config.fleet == test : setBootNodes(StatusBootNodesTest)
else: @[]
# Set-up node
var node = newEthereumNode(config.nodekey, address, NetworkId(1), clientId,
addAllCapabilities = false, bootstrapNodes = bootnodes, bindUdpPort = address.udpPort, bindTcpPort = address.tcpPort, rng = rng)
if not config.bootnodeOnly:
node.addCapability Waku # Always enable Waku protocol
var topicInterest: Option[seq[waku_protocol.Topic]]
var bloom: Option[Bloom]
if config.wakuTopicInterest:
var topics: seq[waku_protocol.Topic]
topicInterest = some(topics)
else:
bloom = some(fullBloom())
let wakuConfig = WakuConfig(powRequirement: config.wakuPow,
bloom: bloom,
isLightNode: config.lightNode,
maxMsgSize: waku_protocol.defaultMaxMsgSize,
topics: topicInterest)
node.configureWaku(wakuConfig)
if config.whisper or config.whisperBridge:
node.addCapability Whisper
node.protocolState(Whisper).config.powRequirement = 0.002
if config.whisperBridge:
node.shareMessageQueue()
let connectedFut = node.connectToNetwork(not config.noListen,
config.discovery)
connectedFut.callback = proc(data: pointer) {.gcsafe.} =
{.gcsafe.}:
if connectedFut.failed:
fatal "connectToNetwork failed", msg = connectedFut.readError.msg
quit(1)
if not config.bootnodeOnly:
# Optionally direct connect with a set of nodes
if config.staticnodes.len > 0: connectToNodes(node, config.staticnodes)
elif config.fleet == prod: connectToNodes(node, WhisperNodes)
elif config.fleet == staging: connectToNodes(node, WhisperNodesStaging)
elif config.fleet == test: connectToNodes(node, WhisperNodesTest)
if config.rpc:
let ta = initTAddress(config.rpcAddress,
Port(config.rpcPort + config.portsShift))
var rpcServer = newRpcHttpServer([ta])
let keys = newKeyStorage()
setupWakuRPC(node, keys, rpcServer, rng)
setupWakuSimRPC(node, rpcServer)
rpcServer.start()
if config.logAccounting:
# https://github.com/nim-lang/Nim/issues/17369
var logPeerAccounting: proc(udata: pointer) {.gcsafe, raises: [Defect].}
logPeerAccounting = proc(udata: pointer) =
{.gcsafe.}:
for peer in node.peerPool.peers:
let
sent = peer.state(Waku).accounting.sent
received = peer.state(Waku).accounting.received
id = peer.network.toEnode
info "Peer accounting", id, sent, received
peer.state(Waku).accounting = Accounting(sent: 0, received: 0)
discard setTimer(Moment.fromNow(2.seconds), logPeerAccounting)
discard setTimer(Moment.fromNow(2.seconds), logPeerAccounting)
if config.metricsServer:
let
address = config.metricsServerAddress
port = config.metricsServerPort + config.portsShift
info "Starting metrics HTTP server", address, port
startMetricsHttpServer($address, Port(port))
if config.logMetrics:
# https://github.com/nim-lang/Nim/issues/17369
var logMetrics: proc(udata: pointer) {.gcsafe, raises: [Defect].}
logMetrics = proc(udata: pointer) =
{.gcsafe.}:
let
connectedPeers = rlpx_connected_peers
validEnvelopes = waku_protocol.envelopes_valid
droppedEnvelopes = waku_protocol.envelopes_dropped
info "Node metrics", connectedPeers, validEnvelopes, droppedEnvelopes
discard setTimer(Moment.fromNow(2.seconds), logMetrics)
discard setTimer(Moment.fromNow(2.seconds), logMetrics)
runForever()
{.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
when isMainModule:
let
rng = keys.newRng()
conf = WakuNodeConf.load()
if conf.logLevel != LogLevel.NONE:
setLogLevel(conf.logLevel)
case conf.cmd
of genNodekey:
echo PrivateKey.random(rng[])
of noCommand:
run(conf, rng)

View File

@ -1,3 +0,0 @@
# Waku v1 protocol
This folder contains implementations of [Waku v1 protocols](https://specs.vac.dev/specs/waku/v1/waku-1.html).

View File

@ -1,22 +0,0 @@
#
# Waku - Whisper Bridge
# (c) Copyright 2018-2021
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
#
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
eth/p2p,
../../whisper/whisper_protocol,
./waku_protocol
proc shareMessageQueue*(node: EthereumNode) =
node.protocolState(Waku).queue = node.protocolState(Whisper).queue

View File

@ -1,91 +0,0 @@
#
# Waku Mail Client & Server
# (c) Copyright 2018-2021
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)F
# MIT license (LICENSE-MIT)
#
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
chronos,
eth/[p2p, async_utils],
./waku_protocol
const
requestCompleteTimeout = chronos.seconds(5)
type
Cursor = seq[byte]
MailRequest* = object
lower*: uint32 ## Unix timestamp; oldest requested envelope's creation time
upper*: uint32 ## Unix timestamp; newest requested envelope's creation time
bloom*: seq[byte] ## Bloom filter to apply on the envelopes
limit*: uint32 ## Maximum amount of envelopes to return
cursor*: Cursor ## Optional cursor
proc requestMail*(node: EthereumNode, peerId: NodeId, request: MailRequest,
symKey: SymKey, requests = 10): Future[Option[Cursor]] {.async.} =
## Send p2p mail request and check request complete.
## If result is none, and error occured. If result is a none empty cursor,
## more envelopes are available.
# TODO: Perhaps don't go the recursive route or could use the actual response
# proc to implement this (via a handler) and store the necessary data in the
# WakuPeer object.
# TODO: Several requestMail calls in parallel can create issues with handling
# the wrong response to a request. Can additionaly check the requestId but
# that would only solve it half. Better to use the requestResponse mechanism.
# TODO: move this check out of requestMail?
let peer = node.getPeer(peerId, Waku)
if not peer.isSome():
error "Invalid peer"
return result
elif not peer.get().state(Waku).trusted:
return result
var writer = initRlpWriter()
writer.append(request)
let payload = writer.finish()
let data = encode(node.rng[], Payload(payload: payload, symKey: some(symKey)))
if not data.isSome():
error "Encoding of payload failed"
return result
# TODO: should this envelope be valid in terms of ttl, PoW, etc.?
let env = Envelope(expiry:0, ttl: 0, data: data.get(), nonce: 0)
# Send the request
traceAsyncErrors peer.get().p2pRequest(env)
# Wait for the Request Complete packet
var f: Future[Waku.p2pRequestComplete] = peer.get().nextMsg(Waku.p2pRequestComplete)
if await f.withTimeout(requestCompleteTimeout):
let response = f.read()
# TODO: I guess the idea is to check requestId (Hash) also?
let requests = requests - 1
# If there is cursor data, do another request
if response.cursor.len > 0 and requests > 0:
var newRequest = request
newRequest.cursor = response.cursor
return await requestMail(node, peerId, newRequest, symKey, requests)
else:
return some(response.cursor)
else:
error "p2pRequestComplete timeout"
return result
proc p2pRequestHandler(peer: Peer, envelope: Envelope) =
# Mail server p2p request implementation
discard
proc enableMailServer*(node: EthereumNode) =
# TODO: This could become part of an init call for an actual `MailServer`
# object.
node.registerP2PRequestHandler(p2pRequestHandler)

View File

@ -1,694 +0,0 @@
#
# Waku
# (c) Copyright 2018-2021
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
#
## Waku
## *******
##
## Waku is a fork of Whisper.
##
## Waku is a gossip protocol that synchronizes a set of messages across nodes
## with attention given to sender and recipient anonymitiy. Messages are
## categorized by a topic and stay alive in the network based on a time-to-live
## measured in seconds. Spam prevention is based on proof-of-work, where large
## or long-lived messages must spend more work.
##
## Implementation should be according to Waku specification defined here:
## https://github.com/vacp2p/specs/blob/master/waku/waku.md
##
## Example usage
## ----------
## First an `EthereumNode` needs to be created, either with all capabilities set
## or with specifically the Waku capability set.
## The latter can be done like this:
##
## .. code-block::nim
## var node = newEthereumNode(keypair, address, netId, nil,
## addAllCapabilities = false)
## node.addCapability Waku
##
## Now calls such as ``postMessage`` and ``subscribeFilter`` can be done.
## However, they only make real sense after ``connectToNetwork`` was started. As
## else there will be no peers to send and receive messages from.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
options, tables, times, chronos, chronicles, metrics,
eth/[keys, async_utils, p2p],
../../whisper/whisper_types,
eth/trie/trie_defs
export
whisper_types
logScope:
topics = "waku"
const
defaultQueueCapacity = 2048
wakuVersion* = 1 ## Waku version.
wakuVersionStr* = $wakuVersion ## Waku version.
defaultMinPow* = 0.2'f64 ## The default minimum PoW requirement for this node.
defaultMaxMsgSize* = 1024'u32 * 1024'u32 ## The current default and max
## message size. This can never be larger than the maximum RLPx message size.
messageInterval* = chronos.milliseconds(300) ## Interval at which messages are
## send to peers, in ms.
pruneInterval* = chronos.milliseconds(1000) ## Interval at which message
## queue is pruned, in ms.
topicInterestMax = 10000
type
WakuConfig* = object
powRequirement*: float64
bloom*: Option[Bloom]
isLightNode*: bool
maxMsgSize*: uint32
confirmationsEnabled*: bool
rateLimits*: Option[RateLimits]
topics*: Option[seq[whisper_types.Topic]]
Accounting* = ref object
sent*: uint
received*: uint
WakuPeer = ref object
initialized: bool # when successfully completed the handshake
powRequirement*: float64
bloom*: Bloom
isLightNode*: bool
trusted*: bool
topics*: Option[seq[whisper_types.Topic]]
received: HashSet[Hash]
accounting*: Accounting
P2PRequestHandler* = proc(peer: Peer, envelope: Envelope)
{.gcsafe, raises: [Defect].}
EnvReceivedHandler* = proc(envelope: Envelope) {.gcsafe, raises: [Defect].}
WakuNetwork = ref object
queue*: ref Queue
filters*: Filters
config*: WakuConfig
p2pRequestHandler*: P2PRequestHandler
envReceivedHandler*: EnvReceivedHandler
RateLimits* = object
# TODO: uint or specifically uint32?
limitIp*: uint
limitPeerId*: uint
limitTopic*: uint
StatusOptions* = object
powRequirement*: Option[(float64)]
bloomFilter*: Option[Bloom]
lightNode*: Option[bool]
confirmationsEnabled*: Option[bool]
rateLimits*: Option[RateLimits]
topicInterest*: Option[seq[whisper_types.Topic]]
KeyKind* = enum
powRequirementKey,
bloomFilterKey,
lightNodeKey,
confirmationsEnabledKey,
rateLimitsKey,
topicInterestKey
template countSomeFields*(x: StatusOptions): int =
var count = 0
for f in fields(x):
if f.isSome():
inc count
count
proc append*(rlpWriter: var RlpWriter, value: StatusOptions) =
var list = initRlpList(countSomeFields(value))
if value.powRequirement.isSome():
list.append((powRequirementKey, cast[uint64](value.powRequirement.get())))
if value.bloomFilter.isSome():
list.append((bloomFilterKey, @(value.bloomFilter.get())))
if value.lightNode.isSome():
list.append((lightNodeKey, value.lightNode.get()))
if value.confirmationsEnabled.isSome():
list.append((confirmationsEnabledKey, value.confirmationsEnabled.get()))
if value.rateLimits.isSome():
list.append((rateLimitsKey, value.rateLimits.get()))
if value.topicInterest.isSome():
list.append((topicInterestKey, value.topicInterest.get()))
let bytes = list.finish()
try:
rlpWriter.append(rlpFromBytes(bytes))
except RlpError as e:
# bytes is valid rlp just created here, rlpFromBytes should thus never fail
raiseAssert e.msg
proc read*(rlp: var Rlp, T: typedesc[StatusOptions]):
T {.raises: [RlpError, Defect].}=
if not rlp.isList():
raise newException(RlpTypeMismatch,
"List expected, but the source RLP is not a list.")
let sz = rlp.listLen()
# We already know that we are working with a list
doAssert rlp.enterList()
for i in 0 ..< sz:
rlp.tryEnterList()
var k: KeyKind
try:
k = rlp.read(KeyKind)
except RlpTypeMismatch:
# skip unknown keys and their value
rlp.skipElem()
rlp.skipElem()
continue
case k
of powRequirementKey:
let pow = rlp.read(uint64)
result.powRequirement = some(cast[float64](pow))
of bloomFilterKey:
let bloom = rlp.read(seq[byte])
if bloom.len != bloomSize:
raise newException(RlpTypeMismatch, "Bloomfilter size mismatch")
var bloomFilter: Bloom
bloomFilter.bytesCopy(bloom)
result.bloomFilter = some(bloomFilter)
of lightNodeKey:
result.lightNode = some(rlp.read(bool))
of confirmationsEnabledKey:
result.confirmationsEnabled = some(rlp.read(bool))
of rateLimitsKey:
result.rateLimits = some(rlp.read(RateLimits))
of topicInterestKey:
result.topicInterest = some(rlp.read(seq[whisper_types.Topic]))
proc allowed*(msg: Message, config: WakuConfig): bool =
# Check max msg size, already happens in RLPx but there is a specific waku
# max msg size which should always be < RLPx max msg size
if msg.size > config.maxMsgSize:
envelopes_dropped.inc(labelValues = ["too_large"])
warn "Message size too large", size = msg.size
return false
if msg.pow < config.powRequirement:
envelopes_dropped.inc(labelValues = ["low_pow"])
warn "Message PoW too low", pow = msg.pow, minPow = config.powRequirement
return false
if config.topics.isSome():
if msg.env.topic notin config.topics.get():
envelopes_dropped.inc(labelValues = ["topic_mismatch"])
warn "Message topic does not match Waku topic list"
return false
else:
if config.bloom.isSome() and not bloomFilterMatch(config.bloom.get(), msg.bloom):
envelopes_dropped.inc(labelValues = ["bloom_filter_mismatch"])
warn "Message does not match node bloom filter"
return false
return true
proc run(peer: Peer) {.gcsafe, async, raises: [Defect].}
proc run(node: EthereumNode, network: WakuNetwork)
{.gcsafe, async, raises: [Defect].}
proc initProtocolState*(network: WakuNetwork, node: EthereumNode) {.gcsafe.} =
new(network.queue)
network.queue[] = initQueue(defaultQueueCapacity)
network.filters = initTable[string, Filter]()
network.config.bloom = some(fullBloom())
network.config.powRequirement = defaultMinPow
network.config.isLightNode = false
# RateLimits and confirmations are not yet implemented so we set confirmations
# to false and we don't pass RateLimits at all.
network.config.confirmationsEnabled = false
network.config.rateLimits = none(RateLimits)
network.config.maxMsgSize = defaultMaxMsgSize
network.config.topics = none(seq[whisper_types.Topic])
asyncSpawn node.run(network)
p2pProtocol Waku(version = wakuVersion,
rlpxName = "waku",
peerState = WakuPeer,
networkState = WakuNetwork):
onPeerConnected do (peer: Peer):
trace "onPeerConnected Waku"
let
wakuNet = peer.networkState
wakuPeer = peer.state
let options = StatusOptions(
powRequirement: some(wakuNet.config.powRequirement),
bloomFilter: wakuNet.config.bloom,
lightNode: some(wakuNet.config.isLightNode),
confirmationsEnabled: some(wakuNet.config.confirmationsEnabled),
rateLimits: wakuNet.config.rateLimits,
topicInterest: wakuNet.config.topics)
let m = await peer.status(options,
timeout = chronos.milliseconds(5000))
wakuPeer.powRequirement = m.options.powRequirement.get(defaultMinPow)
wakuPeer.bloom = m.options.bloomFilter.get(fullBloom())
wakuPeer.isLightNode = m.options.lightNode.get(false)
if wakuPeer.isLightNode and wakuNet.config.isLightNode:
# No sense in connecting two light nodes so we disconnect
raise newException(UselessPeerError, "Two light nodes connected")
wakuPeer.topics = m.options.topicInterest
if wakuPeer.topics.isSome():
if wakuPeer.topics.get().len > topicInterestMax:
raise newException(UselessPeerError, "Topic-interest is too large")
if wakuNet.config.topics.isSome():
raise newException(UselessPeerError,
"Two Waku nodes with topic-interest connected")
wakuPeer.received.init()
wakuPeer.trusted = false
wakuPeer.accounting = Accounting(sent: 0, received: 0)
wakuPeer.initialized = true
# No timer based queue processing for a light node.
if not wakuNet.config.isLightNode:
asyncSpawn peer.run()
debug "Waku peer initialized", peer
handshake:
proc status(peer: Peer, options: StatusOptions)
proc messages(peer: Peer, envelopes: openarray[Envelope]) =
if not peer.state.initialized:
warn "Handshake not completed yet, discarding messages"
return
for envelope in envelopes:
# check if expired or in future, or ttl not 0
if not envelope.valid():
warn "Expired or future timed envelope", peer
# disconnect from peers sending bad envelopes
# await peer.disconnect(SubprotocolReason)
continue
peer.state.accounting.received += 1
let msg = initMessage(envelope)
if not msg.allowed(peer.networkState.config):
# disconnect from peers sending bad envelopes
# await peer.disconnect(SubprotocolReason)
continue
# This peer send this message thus should not receive it again.
# If this peer has the message in the `received` set already, this means
# it was either already received here from this peer or send to this peer.
# Either way it will be in our queue already (and the peer should know
# this) and this peer is sending duplicates.
# Note: geth does not check if a peer has send a message to them before
# broadcasting this message. This too is seen here as a duplicate message
# (see above comment). If we want to seperate these cases (e.g. when peer
# rating), then we have to add a "peer.state.send" HashSet.
# Note: it could also be a race between the arrival of a message send by
# this node to a peer and that same message arriving from that peer (after
# it was received from another peer) here.
if peer.state.received.containsOrIncl(msg.hash):
envelopes_dropped.inc(labelValues = ["duplicate"])
trace "Peer sending duplicate messages", peer, hash = $msg.hash
# await peer.disconnect(SubprotocolReason)
continue
# This can still be a duplicate message, but from another peer than
# the peer who send the message.
if peer.networkState.queue[].add(msg):
# notify filters of this message
peer.networkState.filters.notify(msg)
# trigger handler on received envelope, if registered
if not peer.networkState.envReceivedHandler.isNil():
peer.networkState.envReceivedHandler(envelope)
nextID 22
proc statusOptions(peer: Peer, options: StatusOptions) =
if not peer.state.initialized:
warn "Handshake not completed yet, discarding statusOptions"
return
if options.topicInterest.isSome():
peer.state.topics = options.topicInterest
elif options.bloomFilter.isSome():
peer.state.bloom = options.bloomFilter.get()
peer.state.topics = none(seq[whisper_types.Topic])
if options.powRequirement.isSome():
peer.state.powRequirement = options.powRequirement.get()
if options.lightNode.isSome():
peer.state.isLightNode = options.lightNode.get()
nextID 126
proc p2pRequest(peer: Peer, envelope: Envelope) =
if not peer.networkState.p2pRequestHandler.isNil():
peer.networkState.p2pRequestHandler(peer, envelope)
proc p2pMessage(peer: Peer, envelopes: openarray[Envelope]) =
if peer.state.trusted:
# when trusted we can bypass any checks on envelope
for envelope in envelopes:
let msg = Message(env: envelope, isP2P: true)
peer.networkState.filters.notify(msg)
# Following message IDs are not part of EIP-627, but are added and used by
# the Status application, we ignore them for now.
nextID 11
proc batchAcknowledged(peer: Peer) = discard
proc messageResponse(peer: Peer) = discard
nextID 123
requestResponse:
proc p2pSyncRequest(peer: Peer) = discard
proc p2pSyncResponse(peer: Peer) = discard
proc p2pRequestComplete(peer: Peer, requestId: Hash, lastEnvelopeHash: Hash,
cursor: seq[byte]) = discard
# TODO:
# In the current specification the parameters are not wrapped in a regular
# envelope as is done for the P2P Request packet. If we could alter this in
# the spec it would be a cleaner separation between Waku and Mail server /
# client.
# Also, if a requestResponse block is used, a reqestId will automatically
# be added by the protocol DSL.
# However the requestResponse block in combination with p2pRequest cannot be
# used due to the unfortunate fact that the packet IDs are not consecutive,
# and nextID is not recognized in between these. The nextID behaviour could
# be fixed, however it would be cleaner if the specification could be
# changed to have these IDs to be consecutive.
# 'Runner' calls ---------------------------------------------------------------
proc processQueue(peer: Peer) {.raises: [Defect].} =
# Send to peer all valid and previously not send envelopes in the queue.
var
envelopes: seq[Envelope] = @[]
wakuPeer = peer.state(Waku)
wakuNet = peer.networkState(Waku)
for message in wakuNet.queue.items:
if wakuPeer.received.contains(message.hash):
# trace "message was already send to peer", hash = $message.hash, peer
continue
if message.pow < wakuPeer.powRequirement:
trace "Message PoW too low for peer", pow = message.pow,
powReq = wakuPeer.powRequirement
continue
if wakuPeer.topics.isSome():
if message.env.topic notin wakuPeer.topics.get():
trace "Message does not match topics list"
continue
else:
if not bloomFilterMatch(wakuPeer.bloom, message.bloom):
trace "Message does not match peer bloom filter"
continue
trace "Adding envelope"
envelopes.add(message.env)
wakuPeer.accounting.sent += 1
wakuPeer.received.incl(message.hash)
if envelopes.len() > 0:
trace "Sending envelopes", amount=envelopes.len
# Ignore failure of sending messages, this could occur when the connection
# gets dropped
traceAsyncErrors peer.messages(envelopes)
proc run(peer: Peer) {.async, raises: [Defect].} =
while peer.connectionState notin {Disconnecting, Disconnected}:
peer.processQueue()
await sleepAsync(messageInterval)
proc pruneReceived(node: EthereumNode) =
if node.peerPool != nil: # XXX: a bit dirty to need to check for this here ...
var wakuNet = node.protocolState(Waku)
for peer in node.protocolPeers(Waku):
if not peer.initialized:
continue
# NOTE: Perhaps alter the queue prune call to keep track of a HashSet
# of pruned messages (as these should be smaller), and diff this with
# the received sets.
peer.received = intersection(peer.received, wakuNet.queue.itemHashes)
proc run(node: EthereumNode, network: WakuNetwork) {.async, raises: [Defect].} =
while true:
# prune message queue every second
# TTL unit is in seconds, so this should be sufficient?
network.queue[].prune()
# pruning the received sets is not necessary for correct workings
# but simply from keeping the sets growing indefinitely
node.pruneReceived()
await sleepAsync(pruneInterval)
# Private EthereumNode calls ---------------------------------------------------
proc sendP2PMessage(node: EthereumNode, peerId: NodeId,
envelopes: openarray[Envelope]): bool =
for peer in node.peers(Waku):
if peer.remote.id == peerId:
let f = peer.p2pMessage(envelopes)
# Can't make p2pMessage not raise so this is the "best" option I can think
# of instead of using asyncSpawn and still keeping the call not async.
f.callback = proc(data: pointer) {.gcsafe, raises: [Defect].} =
if f.failed:
warn "P2PMessage send failed", msg = f.readError.msg
return true
proc queueMessage(node: EthereumNode, msg: Message): bool =
var wakuNet = node.protocolState(Waku)
# We have to do the same checks here as in the messages proc not to leak
# any information that the message originates from this node.
if not msg.allowed(wakuNet.config):
return false
trace "Adding message to queue", hash = $msg.hash
if wakuNet.queue[].add(msg):
# Also notify our own filters of the message we are sending,
# e.g. msg from local Dapp to Dapp
wakuNet.filters.notify(msg)
return true
# Public EthereumNode calls ----------------------------------------------------
proc postEncoded*(node: EthereumNode, ttl: uint32,
topic: whisper_types.Topic, encodedPayload: seq[byte],
powTime = 1'f,
powTarget = defaultMinPow,
targetPeer = none[NodeId]()): bool =
## Post a message from pre-encoded payload on the message queue.
## This will be processed at the next `messageInterval`.
## The encodedPayload must be encoded according to RFC 26/WAKU-PAYLOAD
## at https://rfc.vac.dev/spec/26/
var env = Envelope(expiry:epochTime().uint32 + ttl,
ttl: ttl, topic: topic, data: encodedPayload, nonce: 0)
# Allow lightnode to post only direct p2p messages
if targetPeer.isSome():
return node.sendP2PMessage(targetPeer.get(), [env])
else:
# non direct p2p message can not have ttl of 0
if env.ttl == 0:
return false
var msg = initMessage(env, powCalc = false)
# XXX: make this non blocking or not?
# In its current blocking state, it could be noticed by a peer that no
# messages are send for a while, and thus that mining PoW is done, and
# that next messages contains a message originated from this peer
# zah: It would be hard to execute this in a background thread at the
# moment. We'll need a way to send custom "tasks" to the async message
# loop (e.g. AD2 support for AsyncChannels).
if not msg.sealEnvelope(powTime, powTarget):
return false
# need to check expiry after mining PoW
if not msg.env.valid():
return false
result = node.queueMessage(msg)
# Allows light nodes to post via untrusted messages packet.
# Queue gets processed immediatly as the node sends only its own messages,
# so the privacy ship has already sailed anyhow.
# TODO:
# - Could be still a concern in terms of efficiency, if multiple messages
# need to be send.
# - For Waku Mode, the checks in processQueue are rather useless as the
# idea is to connect only to 1 node? Also refactor in that case.
if node.protocolState(Waku).config.isLightNode:
for peer in node.peers(Waku):
peer.processQueue()
proc postMessage*(node: EthereumNode, pubKey = none[PublicKey](),
symKey = none[SymKey](), src = none[PrivateKey](),
ttl: uint32, topic: whisper_types.Topic, payload: seq[byte],
padding = none[seq[byte]](), powTime = 1'f,
powTarget = defaultMinPow,
targetPeer = none[NodeId]()): bool =
## Post a message on the message queue which will be processed at the
## next `messageInterval`.
##
## NOTE: This call allows a post without encryption. If encryption is
## mandatory it should be enforced a layer up
let payload = encode(node.rng[], Payload(
payload: payload, src: src, dst: pubKey, symKey: symKey, padding: padding))
if payload.isSome():
return node.postEncoded(ttl, topic, payload.get(), powTime, powTarget, targetPeer)
else:
error "Encoding of payload failed"
return false
proc subscribeFilter*(node: EthereumNode, filter: Filter,
handler:FilterMsgHandler = nil): string =
## Initiate a filter for incoming/outgoing messages. Messages can be
## retrieved with the `getFilterMessages` call or with a provided
## `FilterMsgHandler`.
##
## NOTE: This call allows for a filter without decryption. If encryption is
## mandatory it should be enforced a layer up.
return subscribeFilter(
node.rng[], node.protocolState(Waku).filters, filter, handler)
proc unsubscribeFilter*(node: EthereumNode, filterId: string): bool =
## Remove a previously subscribed filter.
var filter: Filter
return node.protocolState(Waku).filters.take(filterId, filter)
proc getFilterMessages*(node: EthereumNode, filterId: string):
seq[ReceivedMessage] {.raises: [KeyError, Defect].} =
## Get all the messages currently in the filter queue. This will reset the
## filter message queue.
return node.protocolState(Waku).filters.getFilterMessages(filterId)
proc filtersToBloom*(node: EthereumNode): Bloom =
## Returns the bloom filter of all topics of all subscribed filters.
return node.protocolState(Waku).filters.toBloom()
proc setPowRequirement*(node: EthereumNode, powReq: float64) {.async.} =
## Sets the PoW requirement for this node, will also send
## this new PoW requirement to all connected peers.
##
## Failures when sending messages to peers will not be reported.
# NOTE: do we need a tolerance of old PoW for some time?
node.protocolState(Waku).config.powRequirement = powReq
var futures: seq[Future[void]] = @[]
let list = StatusOptions(powRequirement: some(powReq))
for peer in node.peers(Waku):
futures.add(peer.statusOptions(list))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
proc setBloomFilter*(node: EthereumNode, bloom: Bloom) {.async.} =
## Sets the bloom filter for this node, will also send
## this new bloom filter to all connected peers.
##
## Failures when sending messages to peers will not be reported.
# NOTE: do we need a tolerance of old bloom filter for some time?
node.protocolState(Waku).config.bloom = some(bloom)
# reset topics
node.protocolState(Waku).config.topics = none(seq[whisper_types.Topic])
var futures: seq[Future[void]] = @[]
let list = StatusOptions(bloomFilter: some(bloom))
for peer in node.peers(Waku):
futures.add(peer.statusOptions(list))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
proc setTopicInterest*(node: EthereumNode, topics: seq[whisper_types.Topic]):
Future[bool] {.async.} =
if topics.len > topicInterestMax:
return false
node.protocolState(Waku).config.topics = some(topics)
var futures: seq[Future[void]] = @[]
let list = StatusOptions(topicInterest: some(topics))
for peer in node.peers(Waku):
futures.add(peer.statusOptions(list))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
return true
proc setMaxMessageSize*(node: EthereumNode, size: uint32): bool =
## Set the maximum allowed message size.
## Can not be set higher than ``defaultMaxMsgSize``.
if size > defaultMaxMsgSize:
warn "size > defaultMaxMsgSize"
return false
node.protocolState(Waku).config.maxMsgSize = size
return true
proc setPeerTrusted*(node: EthereumNode, peerId: NodeId): bool =
## Set a connected peer as trusted.
for peer in node.peers(Waku):
if peer.remote.id == peerId:
peer.state(Waku).trusted = true
return true
proc setLightNode*(node: EthereumNode, isLightNode: bool) {.async.} =
## Set this node as a Waku light node.
node.protocolState(Waku).config.isLightNode = isLightNode
# TODO: Add starting/stopping of `processQueue` loop depending on value of isLightNode.
var futures: seq[Future[void]] = @[]
let list = StatusOptions(lightNode: some(isLightNode))
for peer in node.peers(Waku):
futures.add(peer.statusOptions(list))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
proc configureWaku*(node: EthereumNode, config: WakuConfig) =
## Apply a Waku configuration.
##
## NOTE: Should be run before connection is made with peers as some
## of the settings are only communicated at peer handshake.
node.protocolState(Waku).config = config
proc registerP2PRequestHandler*(node: EthereumNode,
customHandler: P2PRequestHandler) =
node.protocolState(Waku).p2pRequestHandler = customHandler
proc registerEnvReceivedHandler*(node: EthereumNode,
customHandler: EnvReceivedHandler) =
node.protocolState(Waku).envReceivedHandler = customHandler
proc resetMessageQueue*(node: EthereumNode) =
## Full reset of the message queue.
##
## NOTE: Not something that should be run in normal circumstances.
node.protocolState(Waku).queue[] = initQueue(defaultQueueCapacity)

View File

@ -5,12 +5,5 @@ proc post_waku_v2_relay_v1_message(topic: PubsubTopic, message: WakuMessageRPC):
proc get_waku_v2_relay_v1_messages(topic: PubsubTopic): seq[WakuMessageRPC]
# Relay Private API
# Symmetric
proc get_waku_v2_private_v1_symmetric_key(): SymKey
proc post_waku_v2_private_v1_symmetric_message(topic: string, message: WakuMessageRPC, symkey: string): bool
proc get_waku_v2_private_v1_symmetric_messages(topic: string, symkey: string): seq[WakuMessageRPC]
# Asymmetric
proc get_waku_v2_private_v1_asymmetric_keypair(): WakuKeyPair
proc post_waku_v2_private_v1_asymmetric_message(topic: string, message: WakuMessageRPC, publicKey: string): bool
proc get_waku_v2_private_v1_asymmetric_messages(topic: string, privateKey: string): seq[WakuMessageRPC]
# Support for the Relay Private API has been deprecated.
# This API existed for compatibility with the Waku v1 spec and encryption scheme.

View File

@ -8,11 +8,9 @@ import
json_rpc/rpcclient
import
../../../waku_core,
../../../utils/compat,
./types
export types
../message
export message
template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]

View File

@ -13,11 +13,9 @@ import
../../../../common/base64,
../../../waku_core,
../../../waku_relay,
../../../utils/compat,
../../waku_node,
../../message_cache,
./types
../message
logScope:
topics = "waku node jsonrpc relay_api"
@ -111,148 +109,6 @@ proc installRelayApiHandlers*(node: WakuNode, server: RpcServer, cache: MessageC
## Waku Relay Private JSON-RPC API (Whisper/Waku v1 compatibility)
func keyInfo(symkey: Option[SymKey], privateKey: Option[PrivateKey]): KeyInfo =
if symkey.isSome():
KeyInfo(kind: Symmetric, symKey: symkey.get())
elif privateKey.isSome():
KeyInfo(kind: Asymmetric, privKey: privateKey.get())
else:
KeyInfo(kind: KeyKind.None)
proc toWakuMessageRPC(message: WakuMessage,
symkey = none(SymKey),
privateKey = none(PrivateKey)): WakuMessageRPC =
let
keyInfo = keyInfo(symkey, privateKey)
decoded = decodePayload(message, keyInfo)
WakuMessageRPC(payload: base64.encode(decoded.get().payload),
contentTopic: some(message.contentTopic),
version: some(message.version),
timestamp: some(message.timestamp))
proc installRelayPrivateApiHandlers*(node: WakuNode, server: RpcServer, cache: MessageCache) =
server.rpc("get_waku_v2_private_v1_symmetric_key") do () -> SymKey:
## Generates and returns a symmetric key for message encryption and decryption
debug "get_waku_v2_private_v1_symmetric_key"
var key: SymKey
if randomBytes(key) != key.len:
raise newException(ValueError, "Failed generating key")
return key
server.rpc("post_waku_v2_private_v1_symmetric_message") do (topic: string, msg: WakuMessageRPC, symkey: string) -> bool:
## Publishes and encrypts a message to be relayed on a PubSub topic
debug "post_waku_v2_private_v1_symmetric_message"
let payloadRes = base64.decode(msg.payload)
if payloadRes.isErr():
raise newException(ValueError, "invalid payload format: " & payloadRes.error)
let payloadV1 = Payload(
payload: payloadRes.value,
dst: none(keys.PublicKey),
symkey: some(symkey.toSymKey())
)
let encryptedPayloadRes = payloadV1.encode(1, node.rng[])
if encryptedPayloadRes.isErr():
raise newException(ValueError, "payload encryption failed: " & $encryptedPayloadRes.error)
let message = WakuMessage(
payload: encryptedPayloadRes.value,
# TODO: Fail if the message doesn't have a content topic
contentTopic: msg.contentTopic.get(DefaultContentTopic),
version: 1,
timestamp: msg.timestamp.get(Timestamp(0)),
ephemeral: msg.ephemeral.get(false)
)
let publishFut = node.publish(topic, message)
if not await publishFut.withTimeout(futTimeout):
raise newException(ValueError, "publish to topic timed out")
# Successfully published message
return true
server.rpc("get_waku_v2_private_v1_symmetric_messages") do (topic: string, symkey: string) -> seq[WakuMessageRPC]:
## Returns all WakuMessages received on a PubSub topic since the
## last time this method was called. Decrypts the message payloads
## before returning.
debug "get_waku_v2_private_v1_symmetric_messages", topic=topic
if not cache.isSubscribed(topic):
raise newException(ValueError, "not subscribed to topic: " & topic)
let msgRes = cache.getMessages(topic, clear=true)
if msgRes.isErr():
raise newException(ValueError, "not subscribed to topic: " & topic)
let msgs = msgRes.get()
let key = some(symkey.toSymKey())
return msgs.mapIt(it.toWakuMessageRPC(symkey=key))
server.rpc("get_waku_v2_private_v1_asymmetric_keypair") do () -> WakuKeyPair:
## Generates and returns a public/private key pair for asymmetric message encryption and decryption.
debug "get_waku_v2_private_v1_asymmetric_keypair"
let privKey = keys.PrivateKey.random(node.rng[])
return WakuKeyPair(seckey: privKey, pubkey: privKey.toPublicKey())
server.rpc("post_waku_v2_private_v1_asymmetric_message") do (topic: string, msg: WakuMessageRPC, publicKey: string) -> bool:
## Publishes and encrypts a message to be relayed on a PubSub topic
debug "post_waku_v2_private_v1_asymmetric_message"
let payloadRes = base64.decode(msg.payload)
if payloadRes.isErr():
raise newException(ValueError, "invalid payload format: " & payloadRes.error)
let payloadV1 = Payload(
payload: payloadRes.value,
dst: some(publicKey.toPublicKey()),
symkey: none(SymKey)
)
let encryptedPayloadRes = payloadV1.encode(1, node.rng[])
if encryptedPayloadRes.isErr():
raise newException(ValueError, "payload encryption failed: " & $encryptedPayloadRes.error)
let message = WakuMessage(
payload: encryptedPayloadRes.value,
# TODO: Fail if the message doesn't have a content topic
contentTopic: msg.contentTopic.get(DefaultContentTopic),
version: 1,
timestamp: msg.timestamp.get(Timestamp(0)),
ephemeral: msg.ephemeral.get(false)
)
let publishFut = node.publish(topic, message)
if not await publishFut.withTimeout(futTimeout):
raise newException(ValueError, "publish to topic timed out")
# Successfully published message
return true
server.rpc("get_waku_v2_private_v1_asymmetric_messages") do (topic: string, privateKey: string) -> seq[WakuMessageRPC]:
## Returns all WakuMessages received on a PubSub topic since the
## last time this method was called. Decrypts the message payloads
## before returning.
debug "get_waku_v2_private_v1_asymmetric_messages", topic=topic
if not cache.isSubscribed(topic):
raise newException(ValueError, "not subscribed to topic: " & topic)
let msgRes = cache.getMessages(topic, clear=true)
if msgRes.isErr():
raise newException(ValueError, "not subscribed to topic: " & topic)
let msgs = msgRes.get()
let key = some(privateKey.toPrivateKey())
return msgs.mapIt(it.toWakuMessageRPC(privateKey=key))
## Support for the Relay Private API has been deprecated.
## This API existed for compatibility with the Waku v1/Whisper spec and encryption schemes.
## It is recommended to use the Relay API instead.

View File

@ -1,84 +0,0 @@
import
stew/[results, byteutils],
eth/keys,
json,
json_rpc/rpcserver
import
../../waku/whisper/whisper_types,
../../waku/common/hexstrings,
../marshalling,
../message
export message
type
WakuKeyPair* = object
seckey*: keys.PrivateKey
pubkey*: keys.PublicKey
## JSON-RPC type marshalling
# SymKey
proc `%`*(value: SymKey): JsonNode =
%("0x" & value.toHex())
func isValidSymKey*(value: string): bool =
# 32 bytes for Private Key plus 1 byte for 0x prefix
value.isValidHexData(66)
proc toSymKey*(key: string): SymKey {.inline.} =
hexToByteArray(key[2 .. ^1], result)
proc fromJson*(n: JsonNode, argName: string, value: var SymKey) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not isValidSymKey(hexStr):
raise newException(ValueError, invalidMsg(argName) & " as a symmetric key \"" & hexStr & "\"")
value = hexStr.toSymKey()
# PublicKey
proc `%`*(value: PublicKey): JsonNode =
%("0x04" & $value)
func isValidPublicKey*(value: string): bool =
# 65 bytes for Public Key plus 1 byte for 0x prefix
value.isValidHexData(132)
proc toPublicKey*(key: string): PublicKey {.inline.} =
PublicKey.fromHex(key[4 .. ^1]).tryGet()
proc fromJson*(n: JsonNode, argName: string, value: var PublicKey) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not isValidPublicKey(hexStr):
raise newException(ValueError, invalidMsg(argName) & " as a public key \"" & hexStr & "\"")
value = hexStr.toPublicKey()
# PrivateKey
proc `%`*(value: PrivateKey): JsonNode =
%("0x" & $value)
func isValidPrivateKey*(value: string): bool =
# 32 bytes for Private Key plus 1 byte for 0x prefix
value.isValidHexData(66)
proc toPrivateKey*(key: string): PrivateKey {.inline.} =
PrivateKey.fromHex(key[2 .. ^1]).tryGet()
proc fromJson*(n: JsonNode, argName: string, value: var PrivateKey) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not isValidPrivateKey(hexStr):
raise newException(ValueError, invalidMsg(argName) & " as a private key \"" & hexStr & "\"")
value = hexStr.toPrivateKey()

View File

@ -1,82 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
std/options,
stew/results,
eth/keys
import
../../whisper/whisper_types,
../waku_core
export whisper_types, keys, options
type
KeyKind* = enum
Symmetric
Asymmetric
None
KeyInfo* = object
case kind*: KeyKind
of Symmetric:
symKey*: SymKey
of Asymmetric:
privKey*: PrivateKey
of None:
discard
# TODO:
# - This is using `DecodedPayload` from Waku v1 / Whisper and could be altered
# by making that a case object also, e.g. useful for the version 0, but
# especially in the future if there would be yet another version.
# - Also reworking that API to use Result instead of Option could make this
# cleaner.
# - For now this `KeyInfo` is a bit silly also, but perhaps with v2 or
# adjustments to Waku v1 encoding, it can be better.
proc decodePayload*(message: WakuMessage, keyInfo: KeyInfo):
Result[DecodedPayload, cstring] =
case message.version
of 0:
return ok(DecodedPayload(payload:message.payload))
of 1:
case keyInfo.kind
of Symmetric:
let decoded = message.payload.decode(none[PrivateKey](),
some(keyInfo.symKey))
if decoded.isSome():
return ok(decoded.get())
else:
return err("Couldn't decrypt using symmetric key")
of Asymmetric:
let decoded = message.payload.decode(some(keyInfo.privkey),
none[SymKey]())
if decoded.isSome():
return ok(decoded.get())
else:
return err("Couldn't decrypt using asymmetric key")
else:
discard
else:
return err("Unsupported WakuMessage version")
# TODO: same story as for `decodedPayload`, but then regarding the `Payload`
# object.
proc encode*(payload: Payload, version: uint32, rng: var HmacDrbgContext):
Result[seq[byte], cstring] =
case version
of 0:
# This is rather silly
return ok(payload.payload)
of 1:
let encoded = encode(rng, payload)
if encoded.isSome():
return ok(encoded.get())
else:
return err("Couldn't encode the payload")
else:
return err("Unsupported WakuMessage version")

View File

@ -5,6 +5,5 @@
# - APACHEv2 ([LICENSE-APACHEv2](../LICENSE-APACHEv2) or https://www.apache.org/licenses/LICENSE-2.0)
## An implementation of the [Waku v1](https://specs.vac.dev/specs/waku/waku.html) and [Waku v2](https://specs.vac.dev/specs/waku/v2/waku-v2.html) in nim.
import v2/waku_node as wakunode2, v1/node/wakunode1
import v2/waku_node as wakunode2
export wakunode2
export wakunode1

View File

@ -1,481 +0,0 @@
# nim-eth - Whisper
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
## Whisper
## *******
##
## Whisper is a gossip protocol that synchronizes a set of messages across nodes
## with attention given to sender and recipient anonymitiy. Messages are
## categorized by a topic and stay alive in the network based on a time-to-live
## measured in seconds. Spam prevention is based on proof-of-work, where large
## or long-lived messages must spend more work.
##
## Example usage
## ----------
## First an `EthereumNode` needs to be created, either with all capabilities set
## or with specifically the Whisper capability set.
## The latter can be done like this:
##
## .. code-block::nim
## var node = newEthereumNode(keypair, address, netId, nil,
## addAllCapabilities = false)
## node.addCapability Whisper
##
## Now calls such as ``postMessage`` and ``subscribeFilter`` can be done.
## However, they only make real sense after ``connectToNetwork`` was started. As
## else there will be no peers to send and receive messages from.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
std/[options, tables, times],
chronos, chronicles, metrics,
eth/[keys, async_utils, p2p],
./whisper_types
export
whisper_types
logScope:
topics = "whisper"
const
defaultQueueCapacity = 2048
whisperVersion* = 6 ## Whisper version.
whisperVersionStr* = $whisperVersion ## Whisper version.
defaultMinPow* = 0.2'f64 ## The default minimum PoW requirement for this node.
defaultMaxMsgSize* = 1024'u32 * 1024'u32 ## The current default and max
## message size. This can never be larger than the maximum RLPx message size.
messageInterval* = chronos.milliseconds(300) ## Interval at which messages are
## send to peers, in ms.
pruneInterval* = chronos.milliseconds(1000) ## Interval at which message
## queue is pruned, in ms.
type
WhisperConfig* = object
powRequirement*: float64
bloom*: Bloom
isLightNode*: bool
maxMsgSize*: uint32
WhisperPeer = ref object
initialized: bool # when successfully completed the handshake
powRequirement*: float64
bloom*: Bloom
isLightNode*: bool
trusted*: bool
received: HashSet[Hash]
WhisperNetwork = ref object
queue*: ref Queue
filters*: Filters
config*: WhisperConfig
proc allowed*(msg: Message, config: WhisperConfig): bool =
# Check max msg size, already happens in RLPx but there is a specific shh
# max msg size which should always be < RLPx max msg size
if msg.size > config.maxMsgSize:
envelopes_dropped.inc(labelValues = ["too_large"])
warn "Message size too large", size = msg.size
return false
if msg.pow < config.powRequirement:
envelopes_dropped.inc(labelValues = ["low_pow"])
warn "Message PoW too low", pow = msg.pow, minPow = config.powRequirement
return false
if not bloomFilterMatch(config.bloom, msg.bloom):
envelopes_dropped.inc(labelValues = ["bloom_filter_mismatch"])
warn "Message does not match node bloom filter"
return false
return true
proc run(peer: Peer) {.gcsafe, async, raises: [Defect].}
proc run(node: EthereumNode, network: WhisperNetwork)
{.gcsafe, async, raises: [Defect].}
proc initProtocolState*(network: WhisperNetwork, node: EthereumNode) {.gcsafe.} =
new(network.queue)
network.queue[] = initQueue(defaultQueueCapacity)
network.filters = initTable[string, Filter]()
network.config.bloom = fullBloom()
network.config.powRequirement = defaultMinPow
network.config.isLightNode = false
network.config.maxMsgSize = defaultMaxMsgSize
asyncSpawn node.run(network)
p2pProtocol Whisper(version = whisperVersion,
rlpxName = "shh",
peerState = WhisperPeer,
networkState = WhisperNetwork):
onPeerConnected do (peer: Peer):
trace "onPeerConnected Whisper"
let
whisperNet = peer.networkState
whisperPeer = peer.state
let m = await peer.status(whisperVersion,
cast[uint64](whisperNet.config.powRequirement),
@(whisperNet.config.bloom),
whisperNet.config.isLightNode,
timeout = chronos.milliseconds(5000))
if m.protocolVersion == whisperVersion:
debug "Whisper peer", peer, whisperVersion
else:
raise newException(UselessPeerError, "Incompatible Whisper version")
whisperPeer.powRequirement = cast[float64](m.powConverted)
if m.bloom.len > 0:
if m.bloom.len != bloomSize:
raise newException(UselessPeerError, "Bloomfilter size mismatch")
else:
whisperPeer.bloom.bytesCopy(m.bloom)
else:
# If no bloom filter is send we allow all
whisperPeer.bloom = fullBloom()
whisperPeer.isLightNode = m.isLightNode
if whisperPeer.isLightNode and whisperNet.config.isLightNode:
# No sense in connecting two light nodes so we disconnect
raise newException(UselessPeerError, "Two light nodes connected")
whisperPeer.received.init()
whisperPeer.trusted = false
whisperPeer.initialized = true
if not whisperNet.config.isLightNode:
asyncSpawn peer.run()
debug "Whisper peer initialized", peer
handshake:
proc status(peer: Peer,
protocolVersion: uint,
powConverted: uint64,
bloom: seq[byte],
isLightNode: bool)
proc messages(peer: Peer, envelopes: openarray[Envelope]) =
if not peer.state.initialized:
warn "Handshake not completed yet, discarding messages"
return
for envelope in envelopes:
# check if expired or in future, or ttl not 0
if not envelope.valid():
warn "Expired or future timed envelope", peer
# disconnect from peers sending bad envelopes
# await peer.disconnect(SubprotocolReason)
continue
let msg = initMessage(envelope)
if not msg.allowed(peer.networkState.config):
# disconnect from peers sending bad envelopes
# await peer.disconnect(SubprotocolReason)
continue
# This peer send this message thus should not receive it again.
# If this peer has the message in the `received` set already, this means
# it was either already received here from this peer or send to this peer.
# Either way it will be in our queue already (and the peer should know
# this) and this peer is sending duplicates.
# Note: geth does not check if a peer has send a message to them before
# broadcasting this message. This too is seen here as a duplicate message
# (see above comment). If we want to seperate these cases (e.g. when peer
# rating), then we have to add a "peer.state.send" HashSet.
# Note: it could also be a race between the arrival of a message send by
# this node to a peer and that same message arriving from that peer (after
# it was received from another peer) here.
if peer.state.received.containsOrIncl(msg.hash):
envelopes_dropped.inc(labelValues = ["duplicate"])
trace "Peer sending duplicate messages", peer, hash = $msg.hash
# await peer.disconnect(SubprotocolReason)
continue
# This can still be a duplicate message, but from another peer than
# the peer who send the message.
if peer.networkState.queue[].add(msg):
# notify filters of this message
peer.networkState.filters.notify(msg)
proc powRequirement(peer: Peer, value: uint64) =
if not peer.state.initialized:
warn "Handshake not completed yet, discarding powRequirement"
return
peer.state.powRequirement = cast[float64](value)
proc bloomFilterExchange(peer: Peer, bloom: openArray[byte]) =
if not peer.state.initialized:
warn "Handshake not completed yet, discarding bloomFilterExchange"
return
if bloom.len == bloomSize:
peer.state.bloom.bytesCopy(bloom)
nextID 126
proc p2pRequest(peer: Peer, envelope: Envelope) =
# TODO: here we would have to allow to insert some specific implementation
# such as e.g. Whisper Mail Server
discard
proc p2pMessage(peer: Peer, envelope: Envelope) =
if peer.state.trusted:
# when trusted we can bypass any checks on envelope
let msg = Message(env: envelope, isP2P: true)
peer.networkState.filters.notify(msg)
# Following message IDs are not part of EIP-627, but are added and used by
# the Status application, we ignore them for now.
nextID 11
proc batchAcknowledged(peer: Peer) = discard
proc messageResponse(peer: Peer) = discard
nextID 123
requestResponse:
proc p2pSyncRequest(peer: Peer) = discard
proc p2pSyncResponse(peer: Peer) = discard
proc p2pRequestComplete(peer: Peer) = discard
# 'Runner' calls ---------------------------------------------------------------
proc processQueue(peer: Peer) =
# Send to peer all valid and previously not send envelopes in the queue.
var
envelopes: seq[Envelope] = @[]
whisperPeer = peer.state(Whisper)
whisperNet = peer.networkState(Whisper)
for message in whisperNet.queue.items:
if whisperPeer.received.contains(message.hash):
# trace "message was already send to peer", hash = $message.hash, peer
continue
if message.pow < whisperPeer.powRequirement:
trace "Message PoW too low for peer", pow = message.pow,
powReq = whisperPeer.powRequirement
continue
if not bloomFilterMatch(whisperPeer.bloom, message.bloom):
trace "Message does not match peer bloom filter"
continue
trace "Adding envelope"
envelopes.add(message.env)
whisperPeer.received.incl(message.hash)
if envelopes.len() > 0:
trace "Sending envelopes", amount=envelopes.len
# Ignore failure of sending messages, this could occur when the connection
# gets dropped
traceAsyncErrors peer.messages(envelopes)
proc run(peer: Peer) {.async.} =
while peer.connectionState notin {Disconnecting, Disconnected}:
peer.processQueue()
await sleepAsync(messageInterval)
proc pruneReceived(node: EthereumNode) =
if node.peerPool != nil: # XXX: a bit dirty to need to check for this here ...
var whisperNet = node.protocolState(Whisper)
for peer in node.protocolPeers(Whisper):
if not peer.initialized:
continue
# NOTE: Perhaps alter the queue prune call to keep track of a HashSet
# of pruned messages (as these should be smaller), and diff this with
# the received sets.
peer.received = intersection(peer.received, whisperNet.queue.itemHashes)
proc run(node: EthereumNode, network: WhisperNetwork) {.async.} =
while true:
# prune message queue every second
# TTL unit is in seconds, so this should be sufficient?
network.queue[].prune()
# pruning the received sets is not necessary for correct workings
# but simply from keeping the sets growing indefinitely
node.pruneReceived()
await sleepAsync(pruneInterval)
# Private EthereumNode calls ---------------------------------------------------
proc sendP2PMessage(node: EthereumNode, peerId: NodeId, env: Envelope): bool =
for peer in node.peers(Whisper):
if peer.remote.id == peerId:
let f = peer.p2pMessage(env)
# Can't make p2pMessage not raise so this is the "best" option I can think
# of instead of using asyncSpawn and still keeping the call not async.
f.callback = proc(data: pointer) {.gcsafe, raises: [Defect].} =
if f.failed:
warn "P2PMessage send failed", msg = f.readError.msg
return true
proc queueMessage(node: EthereumNode, msg: Message): bool =
var whisperNet = node.protocolState(Whisper)
# We have to do the same checks here as in the messages proc not to leak
# any information that the message originates from this node.
if not msg.allowed(whisperNet.config):
return false
trace "Adding message to queue", hash = $msg.hash
if whisperNet.queue[].add(msg):
# Also notify our own filters of the message we are sending,
# e.g. msg from local Dapp to Dapp
whisperNet.filters.notify(msg)
return true
# Public EthereumNode calls ----------------------------------------------------
proc postMessage*(node: EthereumNode, pubKey = none[PublicKey](),
symKey = none[SymKey](), src = none[PrivateKey](),
ttl: uint32, topic: whisper_types.Topic, payload: seq[byte],
padding = none[seq[byte]](), powTime = 1'f,
powTarget = defaultMinPow,
targetPeer = none[NodeId]()): bool =
## Post a message on the message queue which will be processed at the
## next `messageInterval`.
##
## NOTE: This call allows a post without encryption. If encryption is
## mandatory it should be enforced a layer up
let payload = encode(node.rng[], Payload(
payload: payload, src: src, dst: pubKey, symKey: symKey, padding: padding))
if payload.isSome():
var env = Envelope(expiry:epochTime().uint32 + ttl,
ttl: ttl, topic: topic, data: payload.get(), nonce: 0)
# Allow lightnode to post only direct p2p messages
if targetPeer.isSome():
return node.sendP2PMessage(targetPeer.get(), env)
elif not node.protocolState(Whisper).config.isLightNode:
# non direct p2p message can not have ttl of 0
if env.ttl == 0:
return false
var msg = initMessage(env, powCalc = false)
# XXX: make this non blocking or not?
# In its current blocking state, it could be noticed by a peer that no
# messages are send for a while, and thus that mining PoW is done, and
# that next messages contains a message originated from this peer
# zah: It would be hard to execute this in a background thread at the
# moment. We'll need a way to send custom "tasks" to the async message
# loop (e.g. AD2 support for AsyncChannels).
if not msg.sealEnvelope(powTime, powTarget):
return false
# need to check expiry after mining PoW
if not msg.env.valid():
return false
return node.queueMessage(msg)
else:
warn "Light node not allowed to post messages"
return false
else:
error "Encoding of payload failed"
return false
proc subscribeFilter*(node: EthereumNode, filter: Filter,
handler:FilterMsgHandler = nil): string =
## Initiate a filter for incoming/outgoing messages. Messages can be
## retrieved with the `getFilterMessages` call or with a provided
## `FilterMsgHandler`.
##
## NOTE: This call allows for a filter without decryption. If encryption is
## mandatory it should be enforced a layer up.
return subscribeFilter(
node.rng[], node.protocolState(Whisper).filters, filter, handler)
proc unsubscribeFilter*(node: EthereumNode, filterId: string): bool =
## Remove a previously subscribed filter.
var filter: Filter
return node.protocolState(Whisper).filters.take(filterId, filter)
proc getFilterMessages*(node: EthereumNode, filterId: string):
seq[ReceivedMessage] {.raises: [KeyError, Defect].} =
## Get all the messages currently in the filter queue. This will reset the
## filter message queue.
return node.protocolState(Whisper).filters.getFilterMessages(filterId)
proc filtersToBloom*(node: EthereumNode): Bloom =
## Returns the bloom filter of all topics of all subscribed filters.
return node.protocolState(Whisper).filters.toBloom()
proc setPowRequirement*(node: EthereumNode, powReq: float64) {.async.} =
## Sets the PoW requirement for this node, will also send
## this new PoW requirement to all connected peers.
##
## Failures when sending messages to peers will not be reported.
# NOTE: do we need a tolerance of old PoW for some time?
node.protocolState(Whisper).config.powRequirement = powReq
var futures: seq[Future[void]] = @[]
for peer in node.peers(Whisper):
futures.add(peer.powRequirement(cast[uint64](powReq)))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
proc setBloomFilter*(node: EthereumNode, bloom: Bloom) {.async.} =
## Sets the bloom filter for this node, will also send
## this new bloom filter to all connected peers.
##
## Failures when sending messages to peers will not be reported.
# NOTE: do we need a tolerance of old bloom filter for some time?
node.protocolState(Whisper).config.bloom = bloom
var futures: seq[Future[void]] = @[]
for peer in node.peers(Whisper):
futures.add(peer.bloomFilterExchange(@bloom))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
proc setMaxMessageSize*(node: EthereumNode, size: uint32): bool =
## Set the maximum allowed message size.
## Can not be set higher than ``defaultMaxMsgSize``.
if size > defaultMaxMsgSize:
warn "size > defaultMaxMsgSize"
return false
node.protocolState(Whisper).config.maxMsgSize = size
return true
proc setPeerTrusted*(node: EthereumNode, peerId: NodeId): bool =
## Set a connected peer as trusted.
for peer in node.peers(Whisper):
if peer.remote.id == peerId:
peer.state(Whisper).trusted = true
return true
proc setLightNode*(node: EthereumNode, isLightNode: bool) =
## Set this node as a Whisper light node.
##
## NOTE: Should be run before connection is made with peers as this
## setting is only communicated at peer handshake.
node.protocolState(Whisper).config.isLightNode = isLightNode
proc configureWhisper*(node: EthereumNode, config: WhisperConfig) =
## Apply a Whisper configuration.
##
## NOTE: Should be run before connection is made with peers as some
## of the settings are only communicated at peer handshake.
node.protocolState(Whisper).config = config
proc resetMessageQueue*(node: EthereumNode) =
## Full reset of the message queue.
##
## NOTE: Not something that should be run in normal circumstances.
node.protocolState(Whisper).queue[] = initQueue(defaultQueueCapacity)

View File

@ -1,674 +0,0 @@
# nim-eth - Whisper
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
std/[algorithm, bitops, math, options, tables, times, hashes],
chronicles, stew/[byteutils, endians2], metrics, bearssl/rand,
nimcrypto/[bcmode, hash, keccak, rijndael],
eth/[keys, rlp, p2p], eth/p2p/ecies
when chronicles.enabledLogLevel == LogLevel.TRACE:
import std/strutils
logScope:
topics = "whisper_types"
declarePublicCounter envelopes_valid,
"Received & posted valid envelopes"
declarePublicCounter envelopes_dropped,
"Dropped envelopes", labels = ["reason"]
const
flagsLen = 1 ## payload flags field length, bytes
gcmIVLen = 12 ## Length of IV (seed) used for AES
gcmTagLen = 16 ## Length of tag used to authenticate AES-GCM-encrypted message
padMaxLen = 256 ## payload will be padded to multiples of this by default
signatureBits = 0b100'u8 ## payload flags signature mask
bloomSize* = 512 div 8
defaultFilterQueueCapacity = 64
type
Hash* = MDigest[256]
SymKey* = array[256 div 8, byte] ## AES256 key.
Topic* = array[4, byte] ## 4 bytes that can be used to filter messages on.
Bloom* = array[bloomSize, byte] ## A bloom filter that can be used to identify
## a number of topics that a peer is interested in.
# XXX: nim-eth-bloom has really quirky API and fixed
# bloom size.
# stint is massive overkill / poor fit - a bloom filter is an array of bits,
# not a number
Payload* = object
## Payload is what goes in the data field of the Envelope.
src*: Option[PrivateKey] ## Optional key used for signing message
dst*: Option[PublicKey] ## Optional key used for asymmetric encryption
symKey*: Option[SymKey] ## Optional key used for symmetric encryption
payload*: seq[byte] ## Application data / message contents
padding*: Option[seq[byte]] ## Padding - if unset, will automatically pad up to
## nearest maxPadLen-byte boundary
DecodedPayload* = object
## The decoded payload of a received message.
src*: Option[PublicKey] ## If the message was signed, this is the public key
## of the source
payload*: seq[byte] ## Application data / message contents
padding*: Option[seq[byte]] ## Message padding
Envelope* = object
## What goes on the wire in the whisper protocol - a payload and some
## book-keeping
# Don't touch field order, there's lots of macro magic that depends on it
expiry*: uint32 ## Unix timestamp when message expires
ttl*: uint32 ## Time-to-live, seconds - message was created at (expiry - ttl)
topic*: Topic
data*: seq[byte] ## Payload, as given by user
nonce*: uint64 ## Nonce used for proof-of-work calculation
Message* = object
## An Envelope with a few cached properties
env*: Envelope
hash*: Hash ## Hash, as calculated for proof-of-work
size*: uint32 ## RLP-encoded size of message
pow*: float64 ## Calculated proof-of-work
bloom*: Bloom ## Filter sent to direct peers for topic-based filtering
isP2P*: bool
ReceivedMessage* = object
## A received message that matched a filter and was possible to decrypt.
## Contains the decoded payload and additional information.
decoded*: DecodedPayload
timestamp*: uint32
ttl*: uint32
topic*: Topic
pow*: float64
hash*: Hash
dst*: Option[PublicKey]
Queue* = object
## Bounded message repository
##
## Whisper uses proof-of-work to judge the usefulness of a message staying
## in the "cloud" - messages with low proof-of-work will be removed to make
## room for those with higher pow, even if they haven't expired yet.
## Larger messages and those with high time-to-live will require more pow.
items*: seq[Message] ## Sorted by proof-of-work
itemHashes*: HashSet[Hash] ## For easy duplication checking
# XXX: itemHashes is added for easy message duplication checking and for
# easy pruning of the peer received message sets. It does have an impact on
# adding and pruning of items however.
# Need to give it some more thought and check where most time is lost in
# typical cases, perhaps we are better of with one hash table (lose PoW
# sorting however), or perhaps there is a simpler solution...
capacity*: int ## Max messages to keep. \
## XXX: really big messages can cause excessive mem usage when using msg \
## count
FilterMsgHandler* = proc(msg: ReceivedMessage) {.gcsafe, raises: [Defect].}
Filter* = object
src*: Option[PublicKey]
privateKey*: Option[PrivateKey]
symKey*: Option[SymKey]
topics*: seq[Topic]
powReq*: float64
allowP2P*: bool
bloom: Bloom # Cached bloom filter of all topics of filter
handler: FilterMsgHandler
queue: seq[ReceivedMessage]
Filters* = Table[string, Filter]
# Utilities --------------------------------------------------------------------
proc leadingZeroBits(hash: MDigest): int =
## Number of most significant zero bits before the first one
for h in hash.data:
static: doAssert sizeof(h) == 1
if h == 0:
result += 8
else:
result += countLeadingZeroBits(h)
break
proc calcPow*(size, ttl: uint64, hash: Hash): float64 =
## Whisper proof-of-work is defined as the best bit of a hash divided by
## encoded size and time-to-live, such that large and long-lived messages get
## penalized
let bits = leadingZeroBits(hash)
return pow(2.0, bits.float64) / (size.float64 * ttl.float64)
proc topicBloom*(topic: Topic): Bloom =
## Whisper uses 512-bit bloom filters meaning 9 bits of indexing - 3 9-bit
## indexes into the bloom are created using the first 3 bytes of the topic and
## complementing each byte with an extra bit from the last topic byte
for i in 0..<3:
var idx = uint16(topic[i])
if (topic[3] and byte(1 shl i)) != 0: # fetch the 9'th bit from the last byte
idx = idx + 256
doAssert idx <= 511
result[idx div 8] = result[idx div 8] or byte(1 shl (idx and 7'u16))
proc generateRandomID*(rng: var HmacDrbgContext): string =
var bytes: array[256 div 8, byte]
hmacDrbgGenerate(rng, bytes)
toHex(bytes)
proc `or`(a, b: Bloom): Bloom =
for i in 0..<a.len:
result[i] = a[i] or b[i]
proc bytesCopy*(bloom: var Bloom, b: openArray[byte]) =
doAssert b.len == bloomSize
copyMem(addr bloom[0], unsafeAddr b[0], bloomSize)
proc toBloom*(topics: openArray[Topic]): Bloom =
for topic in topics:
result = result or topicBloom(topic)
proc bloomFilterMatch*(filter, sample: Bloom): bool =
for i in 0..<filter.len:
if (filter[i] or sample[i]) != filter[i]:
return false
return true
proc fullBloom*(): Bloom =
## Returns a fully set bloom filter. To be used when allowing all topics.
# There is no setMem exported in system, assume compiler is smart enough?
for i in 0..<result.len:
result[i] = 0xFF
proc encryptAesGcm(plain: openarray[byte], key: SymKey,
iv: array[gcmIVLen, byte]): seq[byte] =
## Encrypt using AES-GCM, making sure to append tag and iv, in that order
var gcm: GCM[aes256]
result = newSeqOfCap[byte](plain.len + gcmTagLen + iv.len)
result.setLen plain.len
gcm.init(key, iv, [])
gcm.encrypt(plain, result)
var tag: array[gcmTagLen, byte]
gcm.getTag(tag)
gcm.clear()
result.add tag
result.add iv
proc decryptAesGcm(cipher: openarray[byte], key: SymKey): Option[seq[byte]] =
## Decrypt AES-GCM ciphertext and validate authenticity - assumes
## cipher-tag-iv format of the buffer
if cipher.len < gcmTagLen + gcmIVLen:
debug "cipher missing tag/iv", len = cipher.len
return
let plainLen = cipher.len - gcmTagLen - gcmIVLen
var gcm: GCM[aes256]
var res = newSeq[byte](plainLen)
let iv = cipher[^gcmIVLen .. ^1]
let tag = cipher[^(gcmIVLen + gcmTagLen) .. ^(gcmIVLen + 1)]
gcm.init(key, iv, [])
gcm.decrypt(cipher[0 ..< ^(gcmIVLen + gcmTagLen)], res)
var tag2: array[gcmTagLen, byte]
gcm.getTag(tag2)
gcm.clear()
if tag != tag2:
debug "cipher tag mismatch", len = cipher.len, tag, tag2
return
return some(res)
# Payloads ---------------------------------------------------------------------
# Several differences between geth and parity - this code is closer to geth
# simply because that makes it closer to EIP 627 - see also:
# https://github.com/paritytech/parity-ethereum/issues/9652
proc encode*(rng: var HmacDrbgContext, self: Payload): Option[seq[byte]] =
## Encode a payload according so as to make it suitable to put in an Envelope
## The format follows EIP 627 - https://eips.ethereum.org/EIPS/eip-627
# XXX is this limit too high? We could limit it here but the protocol
# technically supports it..
if self.payload.len >= 256*256*256:
notice "Payload exceeds max length", len = self.payload.len
return
# length of the payload length field :)
let payloadLenLen =
if self.payload.len >= 256*256: 3'u8
elif self.payload.len >= 256: 2'u8
else: 1'u8
let signatureLen =
if self.src.isSome(): keys.RawSignatureSize
else: 0
# useful data length
let dataLen = flagsLen + payloadLenLen.int + self.payload.len + signatureLen
let padLen =
if self.padding.isSome(): self.padding.get().len
# is there a reason why 256 bytes are padded when the dataLen is 256?
else: padMaxLen - (dataLen mod padMaxLen)
# buffer space that we need to allocate
let totalLen = dataLen + padLen
var plain = newSeqOfCap[byte](totalLen)
let signatureFlag =
if self.src.isSome(): signatureBits
else: 0'u8
# byte 0: flags with payload length length and presence of signature
plain.add payloadLenLen or signatureFlag
# next, length of payload - little endian (who comes up with this stuff? why
# can't the world just settle on one endian?)
let payloadLenLE = self.payload.len.uint32.toBytesLE
# No, I have no love for nim closed ranges - such a mess to remember the extra
# < or risk off-by-ones when working with lengths..
plain.add payloadLenLE[0..<payloadLenLen]
plain.add self.payload
if self.padding.isSome():
plain.add self.padding.get()
else:
var padding = newSeq[byte](padLen)
hmacDrbgGenerate(rng, padding)
plain.add padding
if self.src.isSome(): # Private key present - signature requested
let sig = sign(self.src.get(), plain)
plain.add sig.toRaw()
if self.dst.isSome(): # Asymmetric key present - encryption requested
var res = newSeq[byte](eciesEncryptedLength(plain.len))
let err = eciesEncrypt(rng, plain, res, self.dst.get())
if err.isErr:
notice "Encryption failed", err = err.error
return
return some(res)
if self.symKey.isSome(): # Symmetric key present - encryption requested
var iv: array[gcmIVLen, byte]
hmacDrbgGenerate(rng, iv)
return some(encryptAesGcm(plain, self.symKey.get(), iv))
# No encryption!
return some(plain)
proc decode*(data: openarray[byte], dst = none[PrivateKey](),
symKey = none[SymKey]()): Option[DecodedPayload] =
## Decode data into payload, potentially trying to decrypt if keys are
## provided
# Careful throughout - data coming from unknown source - malformatted data
# expected
var res: DecodedPayload
var plain: seq[byte]
if dst.isSome():
# XXX: eciesDecryptedLength is pretty fragile, API-wise.. is this really the
# way to check for errors / sufficient length?
let plainLen = eciesDecryptedLength(data.len)
if plainLen < 0:
debug "Not enough data to decrypt", len = data.len
return
plain.setLen(eciesDecryptedLength(data.len))
if eciesDecrypt(data, plain, dst.get()).isErr:
debug "Couldn't decrypt using asymmetric key", len = data.len
return
elif symKey.isSome():
let tmp = decryptAesGcm(data, symKey.get())
if tmp.isNone():
debug "Couldn't decrypt using symmetric key", len = data.len
return
plain = tmp.get()
else: # No encryption!
plain = @data
if plain.len < 2: # Minimum 1 byte flags, 1 byte payload len
debug "Missing flags or payload length", len = plain.len
return
var pos = 0
let payloadLenLen = int(plain[pos] and 0b11'u8)
let hasSignature = (plain[pos] and 0b100'u8) != 0
pos += 1
if plain.len < pos + payloadLenLen:
debug "Missing payload length", len = plain.len, pos, payloadLenLen
return
var payloadLenLE: array[4, byte]
for i in 0..<payloadLenLen: payloadLenLE[i] = plain[pos + i]
pos += payloadLenLen
let payloadLen = int(fromBytesLE(uint32, payloadLenLE))
if plain.len < pos + payloadLen:
debug "Missing payload", len = plain.len, pos, payloadLen
return
res.payload = plain[pos ..< pos + payloadLen]
pos += payloadLen
if hasSignature:
if plain.len < (keys.RawSignatureSize + pos):
debug "Missing expected signature", len = plain.len
return
let sig = Signature.fromRaw(plain[^keys.RawSignatureSize .. ^1])
let key = sig and recover(
sig[], plain.toOpenArray(0, plain.len - keys.RawSignatureSize - 1))
if key.isErr:
debug "Failed to recover signature key", err = key.error
return
res.src = some(key[])
if hasSignature:
if plain.len > pos + keys.RawSignatureSize:
res.padding = some(plain[pos .. ^(keys.RawSignatureSize+1)])
else:
if plain.len > pos:
res.padding = some(plain[pos .. ^1])
return some(res)
# Envelopes --------------------------------------------------------------------
proc valid*(self: Envelope, now = epochTime()): bool =
if self.expiry.float64 < now: # expired
envelopes_dropped.inc(labelValues = ["expired"])
return false
if self.ttl <= 0: # this would invalidate pow calculation
envelopes_dropped.inc(labelValues = ["expired"])
return false
let created = self.expiry - self.ttl
if created.float64 > (now + 2.0): # created in the future
envelopes_dropped.inc(labelValues = ["future_timestamp"])
return false
return true
proc len(self: Envelope): int = 20 + self.data.len
proc toShortRlp*(self: Envelope): seq[byte] =
## RLP-encoded message without nonce is used during proof-of-work calculations
rlp.encodeList(self.expiry, self.ttl, self.topic, self.data)
proc toRlp(self: Envelope): seq[byte] =
## What gets sent out over the wire includes the nonce
rlp.encode(self)
proc minePow*(self: Envelope, seconds: float, bestBitTarget: int = 0): (uint64, Hash) =
## For the given envelope, spend millis milliseconds to find the
## best proof-of-work and return the nonce
let bytes = self.toShortRlp()
var ctx: keccak256
ctx.init()
ctx.update(bytes)
var bestBit: int = 0
let mineEnd = epochTime() + seconds
var i: uint64
while epochTime() < mineEnd or bestBit == 0: # At least one round
var tmp = ctx # copy hash calculated so far - we'll reuse that for each iter
tmp.update(i.toBytesBE())
# XXX:a random nonce here would not leak number of iters
let hash = tmp.finish()
let zeroBits = leadingZeroBits(hash)
if zeroBits > bestBit: # XXX: could also compare hashes as numbers instead
bestBit = zeroBits
result = (i, hash)
if bestBitTarget > 0 and bestBit >= bestBitTarget:
break
i.inc
proc calcPowHash*(self: Envelope): Hash =
## Calculate the message hash, as done during mining - this can be used to
## verify proof-of-work
let bytes = self.toShortRlp()
var ctx: keccak256
ctx.init()
ctx.update(bytes)
ctx.update(self.nonce.toBytesBE())
return ctx.finish()
# Messages ---------------------------------------------------------------------
proc cmpPow(a, b: Message): int =
## Biggest pow first, lowest at the end (for easy popping)
if a.pow < b.pow: 1
elif a.pow == b.pow: 0
else: -1
proc initMessage*(env: Envelope, powCalc = true): Message =
result.env = env
result.size = env.toRlp().len().uint32 # XXX: calc len without creating RLP
result.bloom = topicBloom(env.topic)
if powCalc:
result.hash = env.calcPowHash()
result.pow = calcPow(result.env.len.uint32, result.env.ttl, result.hash)
trace "Message PoW", pow = result.pow.formatFloat(ffScientific)
proc hash*(hash: Hash): hashes.Hash = hashes.hash(hash.data)
# NOTE: Hashing and leading zeroes calculation is now the same between geth,
# parity and this implementation.
# However, there is still a difference in the size calculation.
# See also here: https://github.com/ethereum/go-ethereum/pull/19753
# This implementation is not conform EIP-627 as we do not use the size of the
# RLP-encoded envelope, but the size of the envelope object itself.
# This is done to be able to correctly calculate the bestBitTarget.
# Other options would be:
# - work directly with powTarget in minePow, but this requires recalculation of
# rlp size + calcPow
# - Use worst case size of envelope nonce
# - Mine PoW for x interval, calcPow of best result, if target not met .. repeat
proc sealEnvelope*(msg: var Message, powTime: float, powTarget: float): bool =
let size = msg.env.len
if powTarget > 0:
let x = powTarget * size.float * msg.env.ttl.float
var bestBitTarget: int
if x <= 1: # log() would return negative numbers or 0
bestBitTarget = 1
else:
bestBitTarget = ceil(log(x, 2)).int
(msg.env.nonce, msg.hash) = msg.env.minePow(powTime, bestBitTarget)
else:
# If no target is set, we are certain of executed powTime
msg.env.expiry += powTime.uint32
(msg.env.nonce, msg.hash) = msg.env.minePow(powTime)
msg.pow = calcPow(size.uint32, msg.env.ttl, msg.hash)
trace "Message PoW", pow = msg.pow
if msg.pow < powTarget:
return false
return true
# Queues -----------------------------------------------------------------------
proc initQueue*(capacity: int): Queue =
result.items = newSeqOfCap[Message](capacity)
result.capacity = capacity
result.itemHashes.init()
proc prune*(self: var Queue) =
## Remove items that are past their expiry time
let now = epochTime().uint32
# keepIf code + pruning of hashset
var pos = 0
for i in 0 ..< len(self.items):
if self.items[i].env.expiry > now:
if pos != i:
shallowCopy(self.items[pos], self.items[i])
inc(pos)
else: self.itemHashes.excl(self.items[i].hash)
setLen(self.items, pos)
proc add*(self: var Queue, msg: Message): bool =
## Add a message to the queue.
## If we're at capacity, we will be removing, in order:
## * expired messages
## * lowest proof-of-work message - this may be `msg` itself!
# check for duplicate before pruning
if self.itemHashes.contains(msg.hash):
envelopes_dropped.inc(labelValues = ["benign_duplicate"])
return false
else:
envelopes_valid.inc()
if self.items.len >= self.capacity:
self.prune() # Only prune if needed
if self.items.len >= self.capacity:
# Still no room - go by proof-of-work quantity
let last = self.items[^1]
if last.pow > msg.pow or
(last.pow == msg.pow and last.env.expiry > msg.env.expiry):
# The new message has less pow or will expire earlier - drop it
envelopes_dropped.inc(labelValues = ["full_queue_new"])
return false
self.items.del(self.items.len() - 1)
self.itemHashes.excl(last.hash)
envelopes_dropped.inc(labelValues = ["full_queue_old"])
self.itemHashes.incl(msg.hash)
self.items.insert(msg, self.items.lowerBound(msg, cmpPow))
return true
# Filters ----------------------------------------------------------------------
proc initFilter*(src = none[PublicKey](), privateKey = none[PrivateKey](),
symKey = none[SymKey](), topics: seq[Topic] = @[],
powReq = 0.0, allowP2P = false): Filter =
# Zero topics will give an empty bloom filter which is fine as this bloom
# filter is only used to `or` with existing/other bloom filters. Not to do
# matching.
Filter(src: src, privateKey: privateKey, symKey: symKey, topics: topics,
powReq: powReq, allowP2P: allowP2P, bloom: toBloom(topics))
proc subscribeFilter*(
rng: var HmacDrbgContext, filters: var Filters, filter: Filter,
handler: FilterMsgHandler = nil): string =
# NOTE: Should we allow a filter without a key? Encryption is mandatory in v6?
# Check if asymmetric _and_ symmetric key? Now asymmetric just has precedence.
let id = generateRandomID(rng)
var filter = filter
if handler.isNil():
filter.queue = newSeqOfCap[ReceivedMessage](defaultFilterQueueCapacity)
else:
filter.handler = handler
filters[id] = filter
debug "Filter added", filter = id
return id
proc notify*(filters: var Filters, msg: Message) {.gcsafe.} =
var decoded: Option[DecodedPayload]
var keyHash: Hash
var dst: Option[PublicKey]
for filter in filters.mvalues:
if not filter.allowP2P and msg.isP2P:
continue
# if message is direct p2p PoW doesn't matter
if msg.pow < filter.powReq and not msg.isP2P:
continue
if filter.topics.len > 0:
if msg.env.topic notin filter.topics:
continue
# Decode, if already decoded previously check if hash of key matches
if decoded.isNone():
decoded = decode(msg.env.data, dst = filter.privateKey,
symKey = filter.symKey)
if decoded.isNone():
continue
if filter.privateKey.isSome():
keyHash = keccak256.digest(filter.privateKey.get().toRaw())
# TODO: Get rid of the hash and just use pubkey to compare?
dst = some(toPublicKey(filter.privateKey.get()))
elif filter.symKey.isSome():
keyHash = keccak256.digest(filter.symKey.get())
# else:
# NOTE: In this case the message was not encrypted
else:
if filter.privateKey.isSome():
if keyHash != keccak256.digest(filter.privateKey.get().toRaw()):
continue
elif filter.symKey.isSome():
if keyHash != keccak256.digest(filter.symKey.get()):
continue
# else:
# NOTE: In this case the message was not encrypted
# When decoding is done we can check the src (signature)
if filter.src.isSome():
let src: Option[PublicKey] = decoded.get().src
if not src.isSome():
continue
elif src.get() != filter.src.get():
continue
let receivedMsg = ReceivedMessage(decoded: decoded.get(),
timestamp: msg.env.expiry - msg.env.ttl,
ttl: msg.env.ttl,
topic: msg.env.topic,
pow: msg.pow,
hash: msg.hash,
dst: dst)
# Either run callback or add to queue
if filter.handler.isNil():
filter.queue.insert(receivedMsg)
else:
filter.handler(receivedMsg)
proc getFilterMessages*(filters: var Filters, filterId: string):
seq[ReceivedMessage] {.raises: [KeyError, Defect].} =
result = @[]
if filters.contains(filterId):
if filters[filterId].handler.isNil():
shallowCopy(result, filters[filterId].queue)
filters[filterId].queue =
newSeqOfCap[ReceivedMessage](defaultFilterQueueCapacity)
proc toBloom*(filters: Filters): Bloom =
for filter in filters.values:
if filter.topics.len > 0:
result = result or filter.bloom

View File

@ -1,67 +0,0 @@
const
# Whisper nodes taken from:
# curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.prod"].whisper[] + "\","' -r
WhisperNodes* = [
"enode://b957e51f41e4abab8382e1ea7229e88c6e18f34672694c6eae389eac22dab8655622bbd4a08192c321416b9becffaab11c8e2b7a5d0813b922aa128b82990dab@47.75.222.178:443",
"enode://66ba15600cda86009689354c3a77bdf1a97f4f4fb3ab50ffe34dbc904fac561040496828397be18d9744c75881ffc6ac53729ddbd2cdbdadc5f45c400e2622f7@178.128.141.87:443",
"enode://182ed5d658d1a1a4382c9e9f7c9e5d8d9fec9db4c71ae346b9e23e1a589116aeffb3342299bdd00e0ab98dbf804f7b2d8ae564ed18da9f45650b444aed79d509@34.68.132.118:443",
"enode://8bebe73ddf7cf09e77602c7d04c93a73f455b51f24ae0d572917a4792f1dec0bb4c562759b8830cc3615a658d38c1a4a38597a1d7ae3ba35111479fc42d65dec@47.75.85.212:443",
"enode://4ea35352702027984a13274f241a56a47854a7fd4b3ba674a596cff917d3c825506431cf149f9f2312a293bb7c2b1cca55db742027090916d01529fe0729643b@134.209.136.79:443",
"enode://fbeddac99d396b91d59f2c63a3cb5fc7e0f8a9f7ce6fe5f2eed5e787a0154161b7173a6a73124a4275ef338b8966dc70a611e9ae2192f0f2340395661fad81c0@34.67.230.193:443",
"enode://ac3948b2c0786ada7d17b80cf869cf59b1909ea3accd45944aae35bf864cc069126da8b82dfef4ddf23f1d6d6b44b1565c4cf81c8b98022253c6aea1a89d3ce2@47.75.88.12:443",
"enode://ce559a37a9c344d7109bd4907802dd690008381d51f658c43056ec36ac043338bd92f1ac6043e645b64953b06f27202d679756a9c7cf62fdefa01b2e6ac5098e@134.209.136.123:443",
"enode://c07aa0deea3b7056c5d45a85bca42f0d8d3b1404eeb9577610f386e0a4744a0e7b2845ae328efc4aa4b28075af838b59b5b3985bffddeec0090b3b7669abc1f3@35.226.92.155:443",
"enode://385579fc5b14e04d5b04af7eee835d426d3d40ccf11f99dbd95340405f37cf3bbbf830b3eb8f70924be0c2909790120682c9c3e791646e2d5413e7801545d353@47.244.221.249:443",
"enode://4e0a8db9b73403c9339a2077e911851750fc955db1fc1e09f81a4a56725946884dd5e4d11258eac961f9078a393c45bcab78dd0e3bc74e37ce773b3471d2e29c@134.209.136.101:443",
"enode://0624b4a90063923c5cc27d12624b6a49a86dfb3623fcb106801217fdbab95f7617b83fa2468b9ae3de593ff6c1cf556ccf9bc705bfae9cb4625999765127b423@35.222.158.246:443",
"enode://b77bffc29e2592f30180311dd81204ab845e5f78953b5ba0587c6631be9c0862963dea5eb64c90617cf0efd75308e22a42e30bc4eb3cd1bbddbd1da38ff6483e@47.75.10.177:443",
"enode://a8bddfa24e1e92a82609b390766faa56cf7a5eef85b22a2b51e79b333c8aaeec84f7b4267e432edd1cf45b63a3ad0fc7d6c3a16f046aa6bc07ebe50e80b63b8c@178.128.141.249:443",
"enode://a5fe9c82ad1ffb16ae60cb5d4ffe746b9de4c5fbf20911992b7dd651b1c08ba17dd2c0b27ee6b03162c52d92f219961cc3eb14286aca8a90b75cf425826c3bd8@104.154.230.58:443",
"enode://cf5f7a7e64e3b306d1bc16073fba45be3344cb6695b0b616ccc2da66ea35b9f35b3b231c6cf335fdfaba523519659a440752fc2e061d1e5bc4ef33864aac2f19@47.75.221.196:443",
"enode://887cbd92d95afc2c5f1e227356314a53d3d18855880ac0509e0c0870362aee03939d4074e6ad31365915af41d34320b5094bfcc12a67c381788cd7298d06c875@178.128.141.0:443",
"enode://282e009967f9f132a5c2dd366a76319f0d22d60d0c51f7e99795a1e40f213c2705a2c10e4cc6f3890319f59da1a535b8835ed9b9c4b57c3aad342bf312fd7379@35.223.240.17:443",
"enode://13d63a1f85ccdcbd2fb6861b9bd9d03f94bdba973608951f7c36e5df5114c91de2b8194d71288f24bfd17908c48468e89dd8f0fb8ccc2b2dedae84acdf65f62a@47.244.210.80:443",
"enode://2b01955d7e11e29dce07343b456e4e96c081760022d1652b1c4b641eaf320e3747871870fa682e9e9cfb85b819ce94ed2fee1ac458904d54fd0b97d33ba2c4a4@134.209.136.112:443",
"enode://b706a60572634760f18a27dd407b2b3582f7e065110dae10e3998498f1ae3f29ba04db198460d83ed6d2bfb254bb06b29aab3c91415d75d3b869cd0037f3853c@35.239.5.162:443",
"enode://32915c8841faaef21a6b75ab6ed7c2b6f0790eb177ad0f4ea6d731bacc19b938624d220d937ebd95e0f6596b7232bbb672905ee12601747a12ee71a15bfdf31c@47.75.59.11:443",
"enode://0d9d65fcd5592df33ed4507ce862b9c748b6dbd1ea3a1deb94e3750052760b4850aa527265bbaf357021d64d5cc53c02b410458e732fafc5b53f257944247760@178.128.141.42:443",
"enode://e87f1d8093d304c3a9d6f1165b85d6b374f1c0cc907d39c0879eb67f0a39d779be7a85cbd52920b6f53a94da43099c58837034afa6a7be4b099bfcd79ad13999@35.238.106.101:443",
]
# curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.staging"].whisper[] + "\","' -r
WhisperNodesStaging* = [
"enode://00395686f5954662a3796e170b9e87bbaf68a050d57e9987b78a2292502dae44aae2b8803280a017ec9af9be0b3121db9d6b3693ab3a0451a866bcbedd58fdac@47.52.226.137:443",
"enode://914c0b30f27bab30c1dfd31dad7652a46fda9370542aee1b062498b1345ee0913614b8b9e3e84622e84a7203c5858ae1d9819f63aece13ee668e4f6668063989@167.99.19.148:443",
"enode://2d897c6e846949f9dcf10279f00e9b8325c18fe7fa52d658520ad7be9607c83008b42b06aefd97cfe1fdab571f33a2a9383ff97c5909ed51f63300834913237e@35.192.0.86:443",
]
# curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.test"].whisper[] + "\","' -r
WhisperNodesTest* = [
"enode://ad38f94030a846cc7005b7a1f3b6b01bf4ef59d34e8d3d6f4d12df23d14ba8656702a435d34cf4df3b412c0c1923df5adcce8461321a0d8ffb9435b26e572c2a@47.52.255.194:443",
"enode://1d193635e015918fb85bbaf774863d12f65d70c6977506187ef04420d74ec06c9e8f0dcb57ea042f85df87433dab17a1260ed8dde1bdf9d6d5d2de4b7bf8e993@206.189.243.163:443",
"enode://f593a27731bc0f8eb088e2d39222c2d59dfb9bf0b3950d7a828d51e8ab9e08fffbd9916a82fd993c1a080c57c2bd70ed6c36f489a969de697aff93088dbee1a9@35.194.31.108:443",
]
# curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.prod"].boot[] + "\","' -r
StatusBootNodes* = [
"enode://6e6554fb3034b211398fcd0f0082cbb6bd13619e1a7e76ba66e1809aaa0c5f1ac53c9ae79cf2fd4a7bacb10d12010899b370c75fed19b991d9c0cdd02891abad@47.75.99.169:443",
"enode://436cc6f674928fdc9a9f7990f2944002b685d1c37f025c1be425185b5b1f0900feaf1ccc2a6130268f9901be4a7d252f37302c8335a2c1a62736e9232691cc3a@178.128.138.128:443",
"enode://32ff6d88760b0947a3dee54ceff4d8d7f0b4c023c6dad34568615fcae89e26cc2753f28f12485a4116c977be937a72665116596265aa0736b53d46b27446296a@34.70.75.208:443",
"enode://23d0740b11919358625d79d4cac7d50a34d79e9c69e16831c5c70573757a1f5d7d884510bc595d7ee4da3c1508adf87bbc9e9260d804ef03f8c1e37f2fb2fc69@47.52.106.107:443",
"enode://5395aab7833f1ecb671b59bf0521cf20224fe8162fc3d2675de4ee4d5636a75ec32d13268fc184df8d1ddfa803943906882da62a4df42d4fccf6d17808156a87@178.128.140.188:443",
"enode://5405c509df683c962e7c9470b251bb679dd6978f82d5b469f1f6c64d11d50fbd5dd9f7801c6ad51f3b20a5f6c7ffe248cc9ab223f8bcbaeaf14bb1c0ef295fd0@35.223.215.156:443",
]
# curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.staging"].boot[] + "\","' -r
StatusBootNodesStaging* = [
"enode://630b0342ca4e9552f50714b6c8e28d6955bc0fd14e7950f93bc3b2b8cc8c1f3b6d103df66f51a13d773b5db0f130661fb5c7b8fa21c48890c64c79b41a56a490@47.91.229.44:443",
"enode://f79fb3919f72ca560ad0434dcc387abfe41e0666201ebdada8ede0462454a13deb05cda15f287d2c4bd85da81f0eb25d0a486bbbc8df427b971ac51533bd00fe@174.138.107.239:443",
"enode://10a78c17929a7019ef4aa2249d7302f76ae8a06f40b2dc88b7b31ebff4a623fbb44b4a627acba296c1ced3775d91fbe18463c15097a6a36fdb2c804ff3fc5b35@35.238.97.234:443",
]
# curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.test"].boot[] + "\","' -r
StatusBootNodesTest* = [
"enode://daae2e72820e86e942fa2a8aa7d6e9954d4043a753483d8bd338e16be82cf962392d5c0e1ae57c3d793c3d3dddd8fd58339262e4234dc966f953cd73b535f5fa@47.52.188.149:443",
"enode://9e0988575eb7717c25dea72fd11c7b37767dc09c1a7686f7c2ec577d308d24b377ceb675de4317474a1a870e47882732967f4fa785b02ba95d669b31d464dec0@206.189.243.164:443",
"enode://c1e5018887c863d64e431b69bf617561087825430e4401733f5ba77c70db14236df381fefb0ebe1ac42294b9e261bbe233dbdb83e32c586c66ae26c8de70cb4c@35.188.168.137:443",
]