mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-03 22:43:09 +00:00
chore: bump dependencies for v0.36 (#3410)
* properly pass userMessageLimit to OnchainGroupManager * waku.nimble 2.2.4 Nim compiler * rm stew/shims/net import * change ValidIpAddress.init with parseIpAddress * fix serialize for zerokit * group_manager: separate if statements * protocol_types: add encode UInt32 with zeros up to 32 bytes * windows build: skip libunwind build and rm libunwind.a inlcusion step * bump nph to overcome the compilation issues with 2.2.x * bump nim-libp2p to v1.10.1
This commit is contained in:
parent
1512bdaf04
commit
005815746b
13
.github/ISSUE_TEMPLATE/bump_dependencies.md
vendored
13
.github/ISSUE_TEMPLATE/bump_dependencies.md
vendored
@ -12,7 +12,6 @@ assignees: ''
|
||||
Update `nwaku` "vendor" dependencies.
|
||||
|
||||
### Items to bump
|
||||
- [ ] negentropy
|
||||
- [ ] dnsclient.nim ( update to the latest tag version )
|
||||
- [ ] nim-bearssl
|
||||
- [ ] nimbus-build-system
|
||||
@ -38,12 +37,12 @@ Update `nwaku` "vendor" dependencies.
|
||||
- [ ] nim-sqlite3-abi ( update to the latest tag version )
|
||||
- [ ] nim-stew
|
||||
- [ ] nim-stint
|
||||
- [ ] nim-taskpools
|
||||
- [ ] nim-testutils
|
||||
- [ ] nim-taskpools ( update to the latest tag version )
|
||||
- [ ] nim-testutils ( update to the latest tag version )
|
||||
- [ ] nim-toml-serialization
|
||||
- [ ] nim-unicodedb
|
||||
- [ ] nim-unittest2
|
||||
- [ ] nim-web3
|
||||
- [ ] nim-websock
|
||||
- [ ] nim-unittest2 ( update to the latest tag version )
|
||||
- [ ] nim-web3 ( update to the latest tag version )
|
||||
- [ ] nim-websock ( update to the latest tag version )
|
||||
- [ ] nim-zlib
|
||||
- [ ] zerokit ( this should be kept in version `v0.5.1` )
|
||||
- [ ] zerokit ( this should be kept in version `v0.7.0` )
|
||||
|
||||
25
.github/workflows/windows-build.yml
vendored
25
.github/workflows/windows-build.yml
vendored
@ -68,28 +68,6 @@ jobs:
|
||||
./build_all.bat
|
||||
cd ../../../..
|
||||
|
||||
- name: Building libunwind
|
||||
run: |
|
||||
cd vendor/nim-libbacktrace
|
||||
mkdir -p vendor/libunwind/build
|
||||
pushd vendor/libunwind
|
||||
|
||||
cmake -S runtimes \
|
||||
-DLLVM_ENABLE_RUNTIMES="libunwind" \
|
||||
-DLIBUNWIND_ENABLE_SHARED=OFF -DLIBUNWIND_ENABLE_STATIC=ON \
|
||||
-DLIBUNWIND_INCLUDE_DOCS=OFF -DLIBUNWIND_INSTALL_HEADERS=ON \
|
||||
-DCMAKE_INSTALL_PREFIX="$(pwd)/../install/usr" \
|
||||
-G "MinGW Makefiles" -B build
|
||||
|
||||
cd build
|
||||
mingw32-make VERBOSE=1 clean
|
||||
mingw32-make VERBOSE=1 unwind_static
|
||||
mingw32-make VERBOSE=1 install-unwind
|
||||
|
||||
popd
|
||||
mkdir -p install/usr/lib
|
||||
cp -r vendor/libunwind/build/lib/libunwind.a install/usr/lib/
|
||||
|
||||
- name: Building miniupnpc
|
||||
run: |
|
||||
cd vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc
|
||||
@ -105,9 +83,6 @@ jobs:
|
||||
|
||||
- name: Building wakunode2
|
||||
run: |
|
||||
cd vendor/nim-libbacktrace
|
||||
cp ./vendor/libunwind/build/lib/libunwind.a install/usr/lib
|
||||
cd ../..
|
||||
make wakunode2 LOG_LEVEL=DEBUG V=3 -j8
|
||||
|
||||
- name: Check Executable
|
||||
|
||||
@ -11,7 +11,6 @@ import
|
||||
confutils,
|
||||
chronicles,
|
||||
chronos,
|
||||
stew/shims/net as stewNet,
|
||||
eth/keys,
|
||||
bearssl,
|
||||
stew/[byteutils, results],
|
||||
@ -559,7 +558,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
let rlnConf = WakuRlnConfig(
|
||||
dynamic: conf.rlnRelayDynamic,
|
||||
credIndex: conf.rlnRelayCredIndex,
|
||||
chainId: conf.rlnRelayChainId,
|
||||
chainId: UInt256.fromBytesBE(conf.rlnRelayChainId.toBytesBE()),
|
||||
ethClientUrls: conf.ethClientUrls.mapIt(string(it)),
|
||||
creds: some(
|
||||
RlnRelayCreds(
|
||||
|
||||
@ -5,7 +5,6 @@ import
|
||||
chronos,
|
||||
std/strutils,
|
||||
results,
|
||||
stew/shims/net,
|
||||
regex
|
||||
|
||||
type EthRpcUrl* = distinct string
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/json,
|
||||
results,
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
chronicles/topics_registry,
|
||||
chronos,
|
||||
|
||||
@ -2,7 +2,6 @@ import
|
||||
std/[strutils, sequtils, tables, strformat],
|
||||
confutils,
|
||||
chronos,
|
||||
stew/shims/net,
|
||||
chronicles/topics_registry,
|
||||
os
|
||||
import
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import
|
||||
std/[tables, sequtils],
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
chronos,
|
||||
confutils,
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import
|
||||
std/[tables, times, sequtils],
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
results,
|
||||
chronos,
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import
|
||||
std/[tables, times, sequtils],
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
chronos,
|
||||
confutils,
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import
|
||||
std/[tables, sequtils],
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
chronos,
|
||||
confutils,
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import waku/[common/logging, factory/[waku, networks_config, external_config]]
|
||||
import
|
||||
std/[options, strutils, os, sequtils],
|
||||
stew/shims/net as stewNet,
|
||||
chronicles,
|
||||
chronos,
|
||||
metrics,
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/[os, options],
|
||||
results,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
confutils,
|
||||
confutils/defs,
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import std/options, results, stew/shims/net, testutils/unittests
|
||||
import std/[options, net], results, testutils/unittests
|
||||
import waku/common/enr, ../testlib/wakucore
|
||||
|
||||
suite "nim-eth ENR - builder and typed record":
|
||||
|
||||
@ -8,7 +8,8 @@ import
|
||||
libp2p/multiaddress,
|
||||
nimcrypto/utils,
|
||||
secp256k1,
|
||||
confutils
|
||||
confutils,
|
||||
stint
|
||||
import
|
||||
../../waku/factory/external_config,
|
||||
../../waku/factory/networks_config,
|
||||
|
||||
@ -3,7 +3,6 @@ import
|
||||
testutils/unittests,
|
||||
libp2p/[multiaddress, peerid],
|
||||
libp2p/crypto/crypto,
|
||||
stew/shims/net,
|
||||
eth/keys,
|
||||
eth/p2p/discoveryv5/enr,
|
||||
nimcrypto/utils
|
||||
|
||||
@ -1,9 +1,4 @@
|
||||
import
|
||||
chronicles,
|
||||
std/[options, tables, strutils],
|
||||
stew/shims/net,
|
||||
chronos,
|
||||
testutils/unittests
|
||||
import chronicles, std/[options, tables, strutils], chronos, testutils/unittests
|
||||
|
||||
import
|
||||
waku/node/waku_node,
|
||||
@ -23,7 +18,7 @@ suite "Peer Manager":
|
||||
|
||||
asyncSetup:
|
||||
listenPort = Port(0)
|
||||
listenAddress = ValidIpAddress.init("0.0.0.0")
|
||||
listenAddress = parseIpAddress("0.0.0.0")
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
clusterId = 1
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[options, tables, sequtils, strutils, sets],
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
chronicles,
|
||||
|
||||
@ -1,8 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, tempfiles],
|
||||
stew/shims/net as stewNet,
|
||||
std/[options, tempfiles, net],
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
std/strformat,
|
||||
@ -46,8 +45,8 @@ suite "Waku Legacy Lightpush - End To End":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
await allFutures(server.start(), client.start())
|
||||
await server.start()
|
||||
@ -70,7 +69,7 @@ suite "Waku Legacy Lightpush - End To End":
|
||||
asyncTest "Via 11/WAKU2-RELAY from Relay/Full Node":
|
||||
# Given a light lightpush client
|
||||
let lightpushClient =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
lightpushClient.mountLegacyLightpushClient()
|
||||
|
||||
# When the client publishes a message
|
||||
@ -129,8 +128,8 @@ suite "RLN Proofs as a Lightpush Service":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
# mount rln-relay
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
@ -162,7 +161,7 @@ suite "RLN Proofs as a Lightpush Service":
|
||||
asyncTest "Message is published when RLN enabled":
|
||||
# Given a light lightpush client
|
||||
let lightpushClient =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
lightpushClient.mountLegacyLightPushClient()
|
||||
|
||||
# When the client publishes a message
|
||||
|
||||
@ -1,11 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/options,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
libp2p/crypto/crypto
|
||||
import std/options, testutils/unittests, chronos, libp2p/crypto/crypto
|
||||
|
||||
import
|
||||
waku/[
|
||||
@ -66,8 +61,8 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
archiveDriver = newArchiveDriverWithMessages(pubsubTopic, archiveMessages)
|
||||
let mountArchiveResult = server.mountLegacyArchive(archiveDriver)
|
||||
@ -440,7 +435,7 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
newArchiveDriverWithMessages(pubsubTopic, archiveMessages)
|
||||
otherServerKey = generateSecp256k1Key()
|
||||
otherServer =
|
||||
newTestWakuNode(otherServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(otherServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountOtherArchiveResult =
|
||||
otherServer.mountLegacyArchive(otherArchiveDriverWithMessages)
|
||||
assert mountOtherArchiveResult.isOk()
|
||||
@ -522,8 +517,8 @@ suite "Waku Store - End to End - Unsorted Archive":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
let
|
||||
unsortedArchiveDriverWithMessages =
|
||||
@ -678,8 +673,8 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
let archiveDriver = newSqliteArchiveDriver()
|
||||
.put(pubsubTopic, archiveMessages[0 ..< 6])
|
||||
@ -927,7 +922,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
ephemeralServerKey = generateSecp256k1Key()
|
||||
ephemeralServer =
|
||||
newTestWakuNode(ephemeralServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(ephemeralServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountEphemeralArchiveResult =
|
||||
ephemeralServer.mountLegacyArchive(ephemeralArchiveDriver)
|
||||
assert mountEphemeralArchiveResult.isOk()
|
||||
@ -970,7 +965,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
mixedServerKey = generateSecp256k1Key()
|
||||
mixedServer =
|
||||
newTestWakuNode(mixedServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(mixedServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountMixedArchiveResult = mixedServer.mountLegacyArchive(mixedArchiveDriver)
|
||||
assert mountMixedArchiveResult.isOk()
|
||||
|
||||
@ -997,7 +992,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
emptyServerKey = generateSecp256k1Key()
|
||||
emptyServer =
|
||||
newTestWakuNode(emptyServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(emptyServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountEmptyArchiveResult = emptyServer.mountLegacyArchive(emptyArchiveDriver)
|
||||
assert mountEmptyArchiveResult.isOk()
|
||||
|
||||
@ -1028,7 +1023,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
voluminousServerKey = generateSecp256k1Key()
|
||||
voluminousServer =
|
||||
newTestWakuNode(voluminousServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(voluminousServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountVoluminousArchiveResult =
|
||||
voluminousServer.mountLegacyArchive(voluminousArchiveDriverWithMessages)
|
||||
assert mountVoluminousArchiveResult.isOk()
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[options, tempfiles],
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
std/strformat,
|
||||
@ -40,8 +39,8 @@ suite "Waku Lightpush - End To End":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
await allFutures(server.start(), client.start())
|
||||
await server.start()
|
||||
@ -63,7 +62,7 @@ suite "Waku Lightpush - End To End":
|
||||
asyncTest "Via 11/WAKU2-RELAY from Relay/Full Node":
|
||||
# Given a light lightpush client
|
||||
let lightpushClient =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
lightpushClient.mountLightpushClient()
|
||||
|
||||
# When the client publishes a message
|
||||
@ -123,8 +122,8 @@ suite "RLN Proofs as a Lightpush Service":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
# mount rln-relay
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
@ -156,7 +155,7 @@ suite "RLN Proofs as a Lightpush Service":
|
||||
asyncTest "Message is published when RLN enabled":
|
||||
# Given a light lightpush client
|
||||
let lightpushClient =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
lightpushClient.mountLightPushClient()
|
||||
|
||||
# When the client publishes a message
|
||||
|
||||
@ -5,7 +5,6 @@ import
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
chronicles,
|
||||
stew/shims/net,
|
||||
libp2p/switch,
|
||||
libp2p/peerId,
|
||||
libp2p/crypto/crypto,
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
os,
|
||||
std/[options, tables],
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
# chronos/timer,
|
||||
@ -32,7 +31,7 @@ const DEFAULT_PROTOCOLS: seq[string] =
|
||||
@["/ipfs/id/1.0.0", "/libp2p/autonat/1.0.0", "/libp2p/circuit/relay/0.2.0/hop"]
|
||||
|
||||
let
|
||||
listenIp = ValidIpAddress.init("0.0.0.0")
|
||||
listenIp = parseIpAddress("0.0.0.0")
|
||||
listenPort = Port(0)
|
||||
|
||||
suite "Peer Manager":
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[tempfiles, strutils, options],
|
||||
stew/shims/net as stewNet,
|
||||
stew/results,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
@ -121,8 +120,8 @@ suite "Waku RlnRelay - End to End - Static":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
await allFutures(server.start(), client.start())
|
||||
|
||||
@ -410,8 +409,8 @@ suite "Waku RlnRelay - End to End - OnChain":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
await allFutures(server.start(), client.start())
|
||||
|
||||
|
||||
@ -1,16 +1,10 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, sequtils, tempfiles],
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
chronicles,
|
||||
stew/shims/net as stewNet
|
||||
import std/[options, sequtils, tempfiles], testutils/unittests, chronos, chronicles
|
||||
|
||||
import
|
||||
std/[sequtils, tempfiles],
|
||||
stew/byteutils,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
libp2p/switch,
|
||||
@ -35,7 +29,7 @@ import
|
||||
import waku_relay/protocol
|
||||
|
||||
const
|
||||
listenIp = ValidIpAddress.init("0.0.0.0")
|
||||
listenIp = parseIpAddress("0.0.0.0")
|
||||
listenPort = Port(0)
|
||||
|
||||
suite "Sharding":
|
||||
|
||||
@ -1,11 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, sequtils, sets],
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
libp2p/crypto/crypto
|
||||
import std/[options, sequtils, sets], testutils/unittests, chronos, libp2p/crypto/crypto
|
||||
|
||||
import
|
||||
waku/[
|
||||
@ -75,8 +70,8 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
archiveDriver = newArchiveDriverWithMessages(pubsubTopic, messages)
|
||||
let mountArchiveResult = server.mountArchive(archiveDriver)
|
||||
@ -480,7 +475,7 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
)
|
||||
otherServerKey = generateSecp256k1Key()
|
||||
otherServer =
|
||||
newTestWakuNode(otherServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(otherServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountOtherArchiveResult =
|
||||
otherServer.mountArchive(otherArchiveDriverWithMessages)
|
||||
assert mountOtherArchiveResult.isOk()
|
||||
@ -571,8 +566,8 @@ suite "Waku Store - End to End - Unsorted Archive":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
let
|
||||
unsortedArchiveDriverWithMessages =
|
||||
@ -788,8 +783,8 @@ suite "Waku Store - End to End - Unsorted Archive without provided Timestamp":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
let
|
||||
unsortedArchiveDriverWithMessages =
|
||||
@ -938,8 +933,8 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
let archiveDriver = newSqliteArchiveDriver().put(pubsubTopic, messages[0 ..< 6]).put(
|
||||
pubsubTopicB, messages[6 ..< 10]
|
||||
@ -1189,7 +1184,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
ephemeralServerKey = generateSecp256k1Key()
|
||||
ephemeralServer =
|
||||
newTestWakuNode(ephemeralServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(ephemeralServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountEphemeralArchiveResult =
|
||||
ephemeralServer.mountArchive(ephemeralArchiveDriver)
|
||||
assert mountEphemeralArchiveResult.isOk()
|
||||
@ -1231,7 +1226,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
mixedServerKey = generateSecp256k1Key()
|
||||
mixedServer =
|
||||
newTestWakuNode(mixedServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(mixedServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountMixedArchiveResult = mixedServer.mountArchive(mixedArchiveDriver)
|
||||
assert mountMixedArchiveResult.isOk()
|
||||
|
||||
@ -1258,7 +1253,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
emptyServerKey = generateSecp256k1Key()
|
||||
emptyServer =
|
||||
newTestWakuNode(emptyServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(emptyServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountEmptyArchiveResult = emptyServer.mountArchive(emptyArchiveDriver)
|
||||
assert mountEmptyArchiveResult.isOk()
|
||||
|
||||
@ -1298,7 +1293,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
voluminousServerKey = generateSecp256k1Key()
|
||||
voluminousServer =
|
||||
newTestWakuNode(voluminousServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(voluminousServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountVoluminousArchiveResult =
|
||||
voluminousServer.mountArchive(voluminousArchiveDriverWithMessages)
|
||||
assert mountVoluminousArchiveResult.isOk()
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[sequtils, times, sugar, net],
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
json_rpc/rpcserver,
|
||||
@ -39,7 +38,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "connectPeer() works":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
|
||||
@ -58,7 +57,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "dialPeer() works":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
@ -93,7 +92,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "dialPeer() fails gracefully":
|
||||
# Create 2 nodes and start them
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
await allFutures(nodes.mapIt(it.mountRelay()))
|
||||
@ -121,8 +120,7 @@ procSuite "Peer Manager":
|
||||
|
||||
asyncTest "Adding, selecting and filtering peers work":
|
||||
let
|
||||
node =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
node = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
# Create filter peer
|
||||
filterLoc = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()
|
||||
@ -155,7 +153,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "Peer manager keeps track of connections":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
@ -208,7 +206,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "Peer manager updates failed peers correctly":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
@ -310,7 +308,7 @@ procSuite "Peer Manager":
|
||||
# Simulate restart by initialising a new node using the same storage
|
||||
let node3 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("127.0.0.1"),
|
||||
parseIpAddress("127.0.0.1"),
|
||||
Port(56037),
|
||||
peerStorage = storage,
|
||||
)
|
||||
@ -383,7 +381,7 @@ procSuite "Peer Manager":
|
||||
# Simulate restart by initialising a new node using the same storage
|
||||
let node3 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("127.0.0.1"),
|
||||
parseIpAddress("127.0.0.1"),
|
||||
Port(56037),
|
||||
peerStorage = storage,
|
||||
)
|
||||
@ -419,7 +417,7 @@ procSuite "Peer Manager":
|
||||
# different network
|
||||
node1 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("0.0.0.0"),
|
||||
parseIpAddress("0.0.0.0"),
|
||||
port,
|
||||
clusterId = 3,
|
||||
shards = @[uint16(0)],
|
||||
@ -428,14 +426,14 @@ procSuite "Peer Manager":
|
||||
# same network
|
||||
node2 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("0.0.0.0"),
|
||||
parseIpAddress("0.0.0.0"),
|
||||
port,
|
||||
clusterId = 4,
|
||||
shards = @[uint16(0)],
|
||||
)
|
||||
node3 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("0.0.0.0"),
|
||||
parseIpAddress("0.0.0.0"),
|
||||
port,
|
||||
clusterId = 4,
|
||||
shards = @[uint16(0)],
|
||||
@ -475,12 +473,12 @@ procSuite "Peer Manager":
|
||||
storage = WakuPeerStorage.new(database)[]
|
||||
node1 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("0.0.0.0"),
|
||||
parseIpAddress("0.0.0.0"),
|
||||
Port(0),
|
||||
peerStorage = storage,
|
||||
)
|
||||
node2 =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
peerInfo2 = node2.switch.peerInfo
|
||||
betaCodec = "/vac/waku/relay/2.0.0-beta2"
|
||||
stableCodec = "/vac/waku/relay/2.0.0"
|
||||
@ -508,10 +506,7 @@ procSuite "Peer Manager":
|
||||
|
||||
# Simulate restart by initialising a new node using the same storage
|
||||
let node3 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("0.0.0.0"),
|
||||
Port(0),
|
||||
peerStorage = storage,
|
||||
generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0), peerStorage = storage
|
||||
)
|
||||
|
||||
(await node3.mountRelay()).isOkOr:
|
||||
@ -546,7 +541,7 @@ procSuite "Peer Manager":
|
||||
let nodes = toSeq(0 ..< 4).mapIt(
|
||||
newTestWakuNode(
|
||||
nodeKey = generateSecp256k1Key(),
|
||||
bindIp = ValidIpAddress.init("0.0.0.0"),
|
||||
bindIp = parseIpAddress("0.0.0.0"),
|
||||
bindPort = Port(0),
|
||||
wakuFlags = some(CapabilitiesBitfield.init(@[Relay])),
|
||||
)
|
||||
@ -616,7 +611,7 @@ procSuite "Peer Manager":
|
||||
let nodes = toSeq(0 ..< 4).mapIt(
|
||||
newTestWakuNode(
|
||||
nodeKey = generateSecp256k1Key(),
|
||||
bindIp = ValidIpAddress.init("0.0.0.0"),
|
||||
bindIp = parseIpAddress("0.0.0.0"),
|
||||
bindPort = Port(0),
|
||||
wakuFlags = some(CapabilitiesBitfield.init(@[Relay])),
|
||||
)
|
||||
@ -684,7 +679,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "Peer store keeps track of incoming connections":
|
||||
# Create 4 nodes
|
||||
let nodes = toSeq(0 ..< 4).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
# Start them
|
||||
@ -778,8 +773,7 @@ procSuite "Peer Manager":
|
||||
let basePeerId = "16Uiu2HAm7QGEZKujdSbbo1aaQyfDPQ6Bw3ybQnj6fruH5Dxwd7D"
|
||||
|
||||
let
|
||||
node =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
node = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
peers = toSeq(1 .. 4)
|
||||
.mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it))
|
||||
.filterIt(it.isOk())
|
||||
@ -818,7 +812,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "connectedPeers() returns expected number of connections per protocol":
|
||||
# Create 4 nodes
|
||||
let nodes = toSeq(0 ..< 4).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
# Start them with relay + filter
|
||||
@ -873,7 +867,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "getNumStreams() returns expected number of connections per protocol":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
# Start them with relay + filter
|
||||
@ -1140,7 +1134,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "colocationLimit is enforced by pruneConnsByIp()":
|
||||
# Create 5 nodes
|
||||
let nodes = toSeq(0 ..< 5).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
# Start them with relay + filter
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[sequtils, options],
|
||||
stew/shims/net,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
libp2p/peerid,
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[sequtils, tables],
|
||||
stew/shims/net,
|
||||
results,
|
||||
stew/base32,
|
||||
testutils/unittests,
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
chronicles,
|
||||
|
||||
@ -5,7 +5,6 @@ import
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
chronicles,
|
||||
stew/shims/net,
|
||||
libp2p/switch,
|
||||
libp2p/peerId,
|
||||
libp2p/crypto/crypto,
|
||||
|
||||
@ -75,8 +75,15 @@ suite "Waku Switch":
|
||||
completionFut = newFuture[bool]()
|
||||
proto = new LPProtocol
|
||||
proto.codec = customProtoCodec
|
||||
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
||||
assert (await conn.readLp(1024)) == msg.toBytes()
|
||||
proto.handler = proc(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
assert (await conn.readLp(1024)) == msg.toBytes()
|
||||
except LPStreamError:
|
||||
error "Connection read error", error = getCurrentExceptionMsg()
|
||||
assert false, getCurrentExceptionMsg()
|
||||
|
||||
completionFut.complete(true)
|
||||
|
||||
await proto.start()
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/[sequtils, strutils, net],
|
||||
stew/byteutils,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronicles,
|
||||
chronos,
|
||||
@ -15,7 +14,8 @@ import
|
||||
libp2p/protocols/pubsub/pubsub,
|
||||
libp2p/protocols/pubsub/gossipsub,
|
||||
libp2p/nameresolving/mockresolver,
|
||||
eth/p2p/discoveryv5/enr
|
||||
eth/p2p/discoveryv5/enr,
|
||||
eth/net/utils
|
||||
import
|
||||
waku/[waku_core, waku_node, node/peer_manager], ./testlib/wakucore, ./testlib/wakunode
|
||||
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import
|
||||
std/options,
|
||||
results,
|
||||
stew/shims/net,
|
||||
chronos,
|
||||
libp2p/switch,
|
||||
libp2p/builders,
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import stew/shims/net as stewNet, std/strutils, testutils/unittests
|
||||
import std/[strutils, net], testutils/unittests
|
||||
import ../testlib/wakucore, ../testlib/wakunode
|
||||
|
||||
suite "Waku Core - Published Address":
|
||||
|
||||
@ -1,9 +1,4 @@
|
||||
import
|
||||
std/options,
|
||||
stew/shims/net,
|
||||
chronos,
|
||||
libp2p/crypto/crypto as libp2p_keys,
|
||||
eth/keys as eth_keys
|
||||
import std/options, chronos, libp2p/crypto/crypto as libp2p_keys, eth/keys as eth_keys
|
||||
|
||||
import
|
||||
waku/
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
stew/results,
|
||||
stew/shims/net,
|
||||
chronos,
|
||||
testutils/unittests,
|
||||
libp2p/crypto/crypto as libp2p_keys,
|
||||
|
||||
@ -2,7 +2,6 @@ import
|
||||
std/options,
|
||||
sequtils,
|
||||
results,
|
||||
stew/shims/net,
|
||||
chronos,
|
||||
libp2p/crypto/crypto as libp2p_keys,
|
||||
eth/keys as eth_keys
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import
|
||||
unittest,
|
||||
results,
|
||||
stew/[shims/net, byteutils],
|
||||
stew/byteutils,
|
||||
nimcrypto/sha2,
|
||||
libp2p/protocols/pubsub/rpc/messages
|
||||
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[options, strformat],
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
libp2p/protocols/pubsub/[pubsub, gossipsub],
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/[os, sequtils, sysrand, math],
|
||||
stew/byteutils,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
libp2p/switch,
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/[strutils, sequtils, tempfiles],
|
||||
stew/byteutils,
|
||||
stew/shims/net as stewNet,
|
||||
chronos,
|
||||
chronicles,
|
||||
libp2p/switch,
|
||||
|
||||
@ -3,7 +3,6 @@ import
|
||||
testutils/unittests,
|
||||
chronicles,
|
||||
chronos,
|
||||
stew/shims/net as stewNet,
|
||||
eth/keys,
|
||||
bearssl,
|
||||
stew/[results],
|
||||
|
||||
@ -57,19 +57,21 @@ suite "Onchain group manager":
|
||||
raiseAssert "Expected error when chainId does not match"
|
||||
|
||||
asyncTest "should initialize when chainId is set to 0":
|
||||
manager.chainId = 0
|
||||
manager.chainId = 0x0'u256
|
||||
|
||||
(await manager.init()).isOkOr:
|
||||
raiseAssert $error
|
||||
|
||||
asyncTest "should error on initialization when loaded metadata does not match":
|
||||
(await manager.init()).isOkOr:
|
||||
raiseAssert $error
|
||||
assert false, $error
|
||||
|
||||
let metadataSetRes = manager.setMetadata()
|
||||
assert metadataSetRes.isOk(), metadataSetRes.error
|
||||
let metadataOpt = manager.rlnInstance.getMetadata().valueOr:
|
||||
raiseAssert $error
|
||||
assert false, $error
|
||||
return
|
||||
|
||||
assert metadataOpt.isSome(), "metadata is not set"
|
||||
let metadata = metadataOpt.get()
|
||||
|
||||
@ -84,17 +86,12 @@ suite "Onchain group manager":
|
||||
ethContractAddress: $differentContractAddress,
|
||||
rlnInstance: manager.rlnInstance,
|
||||
onFatalErrorAction: proc(errStr: string) =
|
||||
raiseAssert errStr
|
||||
assert false, errStr
|
||||
,
|
||||
)
|
||||
let e = await manager2.init()
|
||||
(e).isErrOr:
|
||||
raiseAssert "Expected error when contract address doesn't match"
|
||||
|
||||
echo "---"
|
||||
discard "persisted data: contract address mismatch"
|
||||
echo e.error
|
||||
echo "---"
|
||||
assert false, "Expected error when contract address doesn't match"
|
||||
|
||||
asyncTest "should error if contract does not exist":
|
||||
manager.ethContractAddress = "0x0000000000000000000000000000000000000000"
|
||||
|
||||
@ -13,7 +13,7 @@ import
|
||||
waku_rln_relay/group_manager/static/group_manager,
|
||||
]
|
||||
|
||||
import stew/shims/net, chronos, libp2p/crypto/crypto, eth/keys, dnsdisc/builder
|
||||
import chronos, libp2p/crypto/crypto, eth/keys, dnsdisc/builder
|
||||
|
||||
import std/tempfiles
|
||||
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/[options, os, sequtils, tempfiles],
|
||||
stew/byteutils,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
chronicles,
|
||||
@ -247,7 +246,7 @@ suite "Waku rln relay":
|
||||
.setMetadata(
|
||||
RlnMetadata(
|
||||
lastProcessedBlock: 128,
|
||||
chainId: 1155511,
|
||||
chainId: 1155511'u256,
|
||||
contractAddress: "0x9c09146844c1326c2dbc41c451766c7138f88155",
|
||||
)
|
||||
)
|
||||
@ -265,7 +264,7 @@ suite "Waku rln relay":
|
||||
.setMetadata(
|
||||
RlnMetadata(
|
||||
lastProcessedBlock: 128,
|
||||
chainId: 1155511,
|
||||
chainId: 1155511'u256,
|
||||
contractAddress: "0x9c09146844c1326c2dbc41c451766c7138f88155",
|
||||
)
|
||||
)
|
||||
@ -278,7 +277,7 @@ suite "Waku rln relay":
|
||||
let metadata = metadataOpt.get()
|
||||
check:
|
||||
metadata.lastProcessedBlock == 128
|
||||
metadata.chainId == 1155511
|
||||
metadata.chainId == 1155511'u256
|
||||
metadata.contractAddress == "0x9c09146844c1326c2dbc41c451766c7138f88155"
|
||||
|
||||
test "getMetadata: empty rln metadata":
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/[sequtils, tempfiles],
|
||||
stew/byteutils,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronicles,
|
||||
chronos,
|
||||
|
||||
@ -13,6 +13,7 @@ import
|
||||
web3,
|
||||
web3/conversions,
|
||||
web3/eth_api_types,
|
||||
json_rpc/rpcclient,
|
||||
json,
|
||||
libp2p/crypto/crypto,
|
||||
eth/keys,
|
||||
@ -29,7 +30,7 @@ import
|
||||
../testlib/common,
|
||||
./utils
|
||||
|
||||
const CHAIN_ID* = 1337
|
||||
const CHAIN_ID* = 1337'u256
|
||||
|
||||
template skip0xPrefix(hexStr: string): int =
|
||||
## Returns the index of the first meaningful char in `hexStr` by skipping
|
||||
@ -74,7 +75,8 @@ proc uploadRLNContract*(ethClientAddress: string): Future[Address] {.async.} =
|
||||
let add = web3.defaultAccount
|
||||
debug "contract deployer account address ", add
|
||||
|
||||
let balance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest")
|
||||
let balance =
|
||||
await web3.provider.eth_getBalance(web3.defaultAccount, blockId("latest"))
|
||||
debug "Initial account balance: ", balance
|
||||
|
||||
# deploy poseidon hasher bytecode
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/[sequtils, tempfiles],
|
||||
stew/byteutils,
|
||||
stew/shims/net as stewNet,
|
||||
chronos,
|
||||
chronicles,
|
||||
libp2p/switch,
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/sequtils,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronicles,
|
||||
chronos,
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
stew/shims/net,
|
||||
testutils/unittests,
|
||||
chronicles,
|
||||
chronos,
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[sequtils, sysrand, math],
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
libp2p/crypto/crypto,
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
stew/shims/net,
|
||||
testutils/unittests,
|
||||
presto,
|
||||
presto/client as presto_client,
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
stew/shims/net,
|
||||
testutils/unittests,
|
||||
presto,
|
||||
presto/client as presto_client,
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
chronos/timer,
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
testutils/unittests,
|
||||
presto,
|
||||
presto/client as presto_client,
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/tempfiles,
|
||||
stew/shims/net,
|
||||
testutils/unittests,
|
||||
presto,
|
||||
presto/client as presto_client,
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/sequtils,
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
testutils/unittests,
|
||||
presto,
|
||||
presto/client as presto_client,
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/sequtils,
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
testutils/unittests,
|
||||
presto,
|
||||
presto/client as presto_client,
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/[sequtils, strformat, tempfiles],
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
testutils/unittests,
|
||||
presto,
|
||||
presto/client as presto_client,
|
||||
@ -320,7 +319,7 @@ suite "Waku v2 Rest API - Relay":
|
||||
check:
|
||||
# Node should be subscribed to all shards
|
||||
node.wakuRelay.subscribedTopics ==
|
||||
@["/waku/2/rs/1/7", "/waku/2/rs/1/2", "/waku/2/rs/1/5"]
|
||||
@["/waku/2/rs/1/5", "/waku/2/rs/1/7", "/waku/2/rs/1/2"]
|
||||
|
||||
await restServer.stop()
|
||||
await restServer.closeWait()
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[options, sugar],
|
||||
stew/shims/net as stewNet,
|
||||
chronicles,
|
||||
chronos/timer,
|
||||
testutils/unittests,
|
||||
|
||||
@ -20,7 +20,7 @@ type RlnKeystoreGeneratorConf* = object
|
||||
execute*: bool
|
||||
ethContractAddress*: string
|
||||
ethClientUrls*: seq[string]
|
||||
chainId*: uint
|
||||
chainId*: UInt256
|
||||
credPath*: string
|
||||
credPassword*: string
|
||||
userMessageLimit*: uint64
|
||||
|
||||
2
vendor/nim-chronicles
vendored
2
vendor/nim-chronicles
vendored
@ -1 +1 @@
|
||||
Subproject commit 81a4a7a360c78be9c80c8f735c76b6d4a1517304
|
||||
Subproject commit a8fb38a10bcb548df78e9a70bd77b26bb50abd12
|
||||
2
vendor/nim-chronos
vendored
2
vendor/nim-chronos
vendored
@ -1 +1 @@
|
||||
Subproject commit c04576d829b8a0a1b12baaa8bc92037501b3a4a0
|
||||
Subproject commit 0646c444fce7c7ed08ef6f2c9a7abfd172ffe655
|
||||
2
vendor/nim-eth
vendored
2
vendor/nim-eth
vendored
@ -1 +1 @@
|
||||
Subproject commit c6c9dc7ae01656eba8126b913e84bdfb95c8c323
|
||||
Subproject commit a1f7d63ababa6ce90798e16a110fc4e43ac93f03
|
||||
2
vendor/nim-faststreams
vendored
2
vendor/nim-faststreams
vendored
@ -1 +1 @@
|
||||
Subproject commit 2b08c774afaafd600cf4c6f994cf78b8aa090c0c
|
||||
Subproject commit c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
|
||||
2
vendor/nim-json-rpc
vendored
2
vendor/nim-json-rpc
vendored
@ -1 +1 @@
|
||||
Subproject commit c0ac848733e42e672081f429fb146451894f7711
|
||||
Subproject commit cbe8edf69d743a787b76b1cd25bfc4eae89927f7
|
||||
2
vendor/nim-libbacktrace
vendored
2
vendor/nim-libbacktrace
vendored
@ -1 +1 @@
|
||||
Subproject commit dbade9ba250da7db519c5cdfb225d03ca1255efc
|
||||
Subproject commit 822849874926ba3849a86cb3eafdf017bd11bd2d
|
||||
2
vendor/nim-libp2p
vendored
2
vendor/nim-libp2p
vendored
@ -1 +1 @@
|
||||
Subproject commit 78a434405435b69a24e8b263d48d622d57c4db5b
|
||||
Subproject commit cd60b254a0700b0daac7a6cb2c0c48860b57c539
|
||||
2
vendor/nim-metrics
vendored
2
vendor/nim-metrics
vendored
@ -1 +1 @@
|
||||
Subproject commit 25ffd054fd774f8cf7935e75d6cad542306d7802
|
||||
Subproject commit 11d0cddfb0e711aa2a8c75d1892ae24a64c299fc
|
||||
2
vendor/nim-nat-traversal
vendored
2
vendor/nim-nat-traversal
vendored
@ -1 +1 @@
|
||||
Subproject commit 213ac13dfe5c4830474912c48181b86b73f1ec1f
|
||||
Subproject commit dfbf8c9ad3655f238b350f690bbfce5ec34d25fb
|
||||
2
vendor/nim-regex
vendored
2
vendor/nim-regex
vendored
@ -1 +1 @@
|
||||
Subproject commit 0673df07cb266e15942c3b5f5b8a4732f049cd73
|
||||
Subproject commit 4593305ed1e49731fc75af1dc572dd2559aad19c
|
||||
2
vendor/nim-secp256k1
vendored
2
vendor/nim-secp256k1
vendored
@ -1 +1 @@
|
||||
Subproject commit 62e16b4dff513f1eea7148a8cbba8a8c547b9546
|
||||
Subproject commit f808ed5e7a7bfc42204ec7830f14b7a42b63c284
|
||||
2
vendor/nim-sqlite3-abi
vendored
2
vendor/nim-sqlite3-abi
vendored
@ -1 +1 @@
|
||||
Subproject commit cc4fefd538aa43814c5864c540fb75b567c2dcc3
|
||||
Subproject commit d08e964872271e83fb1b6de67ad57c2d0fcdfe63
|
||||
2
vendor/nim-stew
vendored
2
vendor/nim-stew
vendored
@ -1 +1 @@
|
||||
Subproject commit 687d1b4ab1a91e6cc9c92e4fd4d98bec7874c259
|
||||
Subproject commit 58abb4891f97c6cdc07335e868414e0c7b736c68
|
||||
2
vendor/nim-taskpools
vendored
2
vendor/nim-taskpools
vendored
@ -1 +1 @@
|
||||
Subproject commit 7b74a716a40249720fd7da428113147942b9642d
|
||||
Subproject commit 9e8ccc754631ac55ac2fd495e167e74e86293edb
|
||||
2
vendor/nim-testutils
vendored
2
vendor/nim-testutils
vendored
@ -1 +1 @@
|
||||
Subproject commit 14a56ae5aada81bed43e29d2368fc8ab8a449bf5
|
||||
Subproject commit 94d68e796c045d5b37cabc6be32d7bfa168f8857
|
||||
2
vendor/nim-unittest2
vendored
2
vendor/nim-unittest2
vendored
@ -1 +1 @@
|
||||
Subproject commit 88a613ffa4dbe452971beb937ea2db736dc9a9f4
|
||||
Subproject commit 8b51e99b4a57fcfb31689230e75595f024543024
|
||||
2
vendor/nim-web3
vendored
2
vendor/nim-web3
vendored
@ -1 +1 @@
|
||||
Subproject commit 94aac8a77cd265fe779ce8ed25a028340b925fd1
|
||||
Subproject commit 3ef986c9d93604775595f116a35c6ac0bf5257fc
|
||||
2
vendor/nim-zlib
vendored
2
vendor/nim-zlib
vendored
@ -1 +1 @@
|
||||
Subproject commit 3f7998095264d262a8d99e2be89045e6d9301537
|
||||
Subproject commit daa8723fd32299d4ca621c837430c29a5a11e19a
|
||||
2
vendor/nimbus-build-system
vendored
2
vendor/nimbus-build-system
vendored
@ -1 +1 @@
|
||||
Subproject commit 8fafcd0bac9f409091b7bcaee62ab6330f57441e
|
||||
Subproject commit 0be0663e1af76e869837226a4ef3e586fcc737d3
|
||||
2
vendor/nimcrypto
vendored
2
vendor/nimcrypto
vendored
@ -1 +1 @@
|
||||
Subproject commit dc07e3058c6904eef965394493b6ea99aa2adefc
|
||||
Subproject commit 19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
|
||||
2
vendor/nph
vendored
2
vendor/nph
vendored
@ -1 +1 @@
|
||||
Subproject commit 0d8000e741fa11ed48fdd116f24b4251b92aa9b5
|
||||
Subproject commit 3191cc71f4d49473de6cf73a2680009a92419407
|
||||
@ -8,7 +8,7 @@ license = "MIT or Apache License 2.0"
|
||||
#bin = @["build/waku"]
|
||||
|
||||
### Dependencies
|
||||
requires "nim >= 2.0.8",
|
||||
requires "nim >= 2.2.4",
|
||||
"chronicles",
|
||||
"confutils",
|
||||
"chronos",
|
||||
|
||||
@ -26,7 +26,7 @@ proc getAutonatService*(rng: ref HmacDrbgContext): AutonatService =
|
||||
|
||||
proc statusAndConfidenceHandler(
|
||||
networkReachability: NetworkReachability, confidence: Opt[float]
|
||||
): Future[void] {.async.} =
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
|
||||
if confidence.isSome():
|
||||
info "Peer reachability status",
|
||||
networkReachability = networkReachability, confidence = confidence.get()
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import chronicles, std/options, results
|
||||
import chronicles, std/options, results, stint, stew/endians2
|
||||
import ../waku_conf
|
||||
|
||||
logScope:
|
||||
@ -9,7 +9,7 @@ logScope:
|
||||
##############################
|
||||
type RlnRelayConfBuilder* = object
|
||||
enabled*: Option[bool]
|
||||
chainId*: Option[uint]
|
||||
chainId*: Option[UInt256]
|
||||
ethClientUrls*: Option[seq[string]]
|
||||
ethContractAddress*: Option[string]
|
||||
credIndex*: Option[uint]
|
||||
@ -26,8 +26,11 @@ proc init*(T: type RlnRelayConfBuilder): RlnRelayConfBuilder =
|
||||
proc withEnabled*(b: var RlnRelayConfBuilder, enabled: bool) =
|
||||
b.enabled = some(enabled)
|
||||
|
||||
proc withChainId*(b: var RlnRelayConfBuilder, chainId: uint) =
|
||||
b.chainId = some(chainId)
|
||||
proc withChainId*(b: var RlnRelayConfBuilder, chainId: uint | UInt256) =
|
||||
when chainId is uint:
|
||||
b.chainId = some(UInt256.fromBytesBE(chainId.toBytesBE()))
|
||||
else:
|
||||
b.chainId = some(chainId)
|
||||
|
||||
proc withCredIndex*(b: var RlnRelayConfBuilder, credIndex: uint) =
|
||||
b.credIndex = some(credIndex)
|
||||
|
||||
@ -2,6 +2,7 @@ import
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/multiaddress,
|
||||
std/[net, options, sequtils, strutils],
|
||||
stint,
|
||||
chronicles,
|
||||
chronos,
|
||||
results
|
||||
@ -292,7 +293,7 @@ proc nodeKey(
|
||||
proc applyClusterConf(builder: var WakuConfBuilder) =
|
||||
# Apply cluster conf, overrides most values passed individually
|
||||
# If you want to tweak values, don't use clusterConf
|
||||
if builder.clusterConf.isNone:
|
||||
if builder.clusterConf.isNone():
|
||||
return
|
||||
let clusterConf = builder.clusterConf.get()
|
||||
|
||||
@ -417,7 +418,7 @@ proc build*(
|
||||
warn("Cluster Id was not specified, defaulting to 0")
|
||||
0.uint16
|
||||
else:
|
||||
builder.clusterId.get()
|
||||
builder.clusterId.get().uint16
|
||||
|
||||
let numShardsInNetwork =
|
||||
if builder.numShardsInNetwork.isSome():
|
||||
|
||||
@ -4,6 +4,8 @@ import
|
||||
chronicles,
|
||||
chronos,
|
||||
regex,
|
||||
stew/endians2,
|
||||
stint,
|
||||
confutils,
|
||||
confutils/defs,
|
||||
confutils/std/net,
|
||||
@ -867,7 +869,7 @@ proc defaultWakuNodeConf*(): ConfResult[WakuNodeConf] =
|
||||
proc toKeystoreGeneratorConf*(n: WakuNodeConf): RlnKeystoreGeneratorConf =
|
||||
RlnKeystoreGeneratorConf(
|
||||
execute: n.execute,
|
||||
chainId: n.rlnRelayChainId,
|
||||
chainId: UInt256.fromBytesBE(n.rlnRelayChainId.toBytesBE()),
|
||||
ethClientUrls: n.ethClientUrls.mapIt(string(it)),
|
||||
ethContractAddress: n.rlnRelayEthContractAddress,
|
||||
userMessageLimit: n.rlnRelayUserMessageLimit,
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import stint
|
||||
|
||||
# TODO: Rename this type to match file name
|
||||
|
||||
type ClusterConf* = object
|
||||
@ -7,7 +9,7 @@ type ClusterConf* = object
|
||||
clusterId*: uint16
|
||||
rlnRelay*: bool
|
||||
rlnRelayEthContractAddress*: string
|
||||
rlnRelayChainId*: uint
|
||||
rlnRelayChainId*: UInt256
|
||||
rlnRelayDynamic*: bool
|
||||
rlnEpochSizeSec*: uint64
|
||||
rlnRelayUserMessageLimit*: uint64
|
||||
@ -20,13 +22,14 @@ type ClusterConf* = object
|
||||
# Cluster configuration corresponding to The Waku Network. Note that it
|
||||
# overrides existing cli configuration
|
||||
proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf =
|
||||
const RelayChainId = 11155111'u256
|
||||
return ClusterConf(
|
||||
maxMessageSize: "150KiB",
|
||||
clusterId: 1,
|
||||
rlnRelay: true,
|
||||
rlnRelayEthContractAddress: "0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8",
|
||||
rlnRelayDynamic: true,
|
||||
rlnRelayChainId: 11155111,
|
||||
rlnRelayChainId: RelayChainId,
|
||||
rlnEpochSizeSec: 600,
|
||||
rlnRelayUserMessageLimit: 100,
|
||||
numShardsInNetwork: 8,
|
||||
|
||||
@ -1103,8 +1103,13 @@ proc new*(
|
||||
online: true,
|
||||
)
|
||||
|
||||
proc peerHook(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe.} =
|
||||
onPeerEvent(pm, peerId, event)
|
||||
proc peerHook(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
await onPeerEvent(pm, peerId, event)
|
||||
except CatchableError:
|
||||
error "exception in onPeerEvent", error = getCurrentExceptionMsg()
|
||||
|
||||
var peerStore = pm.switch.peerStore
|
||||
|
||||
|
||||
@ -1475,7 +1475,7 @@ proc start*(node: WakuNode) {.async.} =
|
||||
## with announced addrs after start
|
||||
let addressMapper = proc(
|
||||
listenAddrs: seq[MultiAddress]
|
||||
): Future[seq[MultiAddress]] {.async.} =
|
||||
): Future[seq[MultiAddress]] {.gcsafe, async: (raises: [CancelledError]).} =
|
||||
return node.announcedAddresses
|
||||
node.switch.peerInfo.addressMappers.add(addressMapper)
|
||||
|
||||
|
||||
@ -20,7 +20,7 @@ const MaxConnectionsPerPeer* = 1
|
||||
|
||||
proc withWsTransport*(b: SwitchBuilder): SwitchBuilder =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade): Transport =
|
||||
proc(upgr: Upgrade, privateKey: crypto.PrivateKey): Transport =
|
||||
WsTransport.new(upgr)
|
||||
)
|
||||
|
||||
@ -48,7 +48,7 @@ proc withWssTransport*(
|
||||
let key: TLSPrivateKey = getSecureKey(secureKeyPath)
|
||||
let cert: TLSCertificate = getSecureCert(secureCertPath)
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade): Transport =
|
||||
proc(upgr: Upgrade, privateKey: crypto.PrivateKey): Transport =
|
||||
WsTransport.new(
|
||||
upgr,
|
||||
tlsPrivateKey = key,
|
||||
|
||||
@ -174,7 +174,7 @@ proc registerPushHandler*(wfc: WakuFilterClient, handler: FilterPushHandler) =
|
||||
wfc.pushHandlers.add(handler)
|
||||
|
||||
proc initProtocolHandler(wfc: WakuFilterClient) =
|
||||
proc handler(conn: Connection, proto: string) {.async.} =
|
||||
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
## Notice that the client component is acting as a server of WakuFilterPushCodec messages
|
||||
while not conn.atEof():
|
||||
var buf: seq[byte]
|
||||
|
||||
@ -287,14 +287,20 @@ proc handleMessage*(
|
||||
waku_filter_handle_message_duration_seconds.observe(handleMessageDurationSec)
|
||||
|
||||
proc initProtocolHandler(wf: WakuFilter) =
|
||||
proc handler(conn: Connection, proto: string) {.async.} =
|
||||
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
debug "filter subscribe request handler triggered",
|
||||
peerId = shortLog(conn.peerId), conn
|
||||
|
||||
var response: FilterSubscribeResponse
|
||||
|
||||
wf.peerRequestRateLimiter.checkUsageLimit(WakuFilterSubscribeCodec, conn):
|
||||
let buf = await conn.readLp(int(DefaultMaxSubscribeSize))
|
||||
var buf: seq[byte]
|
||||
try:
|
||||
buf = await conn.readLp(int(DefaultMaxSubscribeSize))
|
||||
except LPStreamError:
|
||||
error "failed to read stream in readLp",
|
||||
remote_peer_id = conn.peerId, error = getCurrentExceptionMsg()
|
||||
return
|
||||
|
||||
waku_service_network_bytes.inc(
|
||||
amount = buf.len().int64, labelValues = [WakuFilterSubscribeCodec, "in"]
|
||||
@ -302,14 +308,19 @@ proc initProtocolHandler(wf: WakuFilter) =
|
||||
|
||||
let decodeRes = FilterSubscribeRequest.decode(buf)
|
||||
if decodeRes.isErr():
|
||||
error "Failed to decode filter subscribe request",
|
||||
error "failed to decode filter subscribe request",
|
||||
peer_id = conn.peerId, err = decodeRes.error
|
||||
waku_filter_errors.inc(labelValues = [decodeRpcFailure])
|
||||
return
|
||||
|
||||
let request = decodeRes.value #TODO: toAPI() split here
|
||||
|
||||
response = await wf.handleSubscribeRequest(conn.peerId, request)
|
||||
try:
|
||||
response = await wf.handleSubscribeRequest(conn.peerId, request)
|
||||
except CatchableError:
|
||||
error "handleSubscribeRequest failed",
|
||||
remote_peer_id = conn.peerId, err = getCurrentExceptionMsg()
|
||||
return
|
||||
|
||||
debug "sending filter subscribe response",
|
||||
peer_id = shortLog(conn.peerId), response = response
|
||||
@ -322,7 +333,11 @@ proc initProtocolHandler(wf: WakuFilter) =
|
||||
statusDesc: some("filter request rejected due rate limit exceeded"),
|
||||
)
|
||||
|
||||
await conn.writeLp(response.encode().buffer) #TODO: toRPC() separation here
|
||||
try:
|
||||
await conn.writeLp(response.encode().buffer) #TODO: toRPC() separation here
|
||||
except LPStreamError:
|
||||
error "failed to write stream in writeLp",
|
||||
remote_peer_id = conn.peerId, error = getCurrentExceptionMsg()
|
||||
return
|
||||
|
||||
wf.handler = handler
|
||||
@ -355,8 +370,16 @@ proc new*(
|
||||
peerRequestRateLimiter: PerPeerRateLimiter(setting: rateLimitSetting),
|
||||
)
|
||||
|
||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe.} =
|
||||
wf.onPeerEventHandler(peerId, event)
|
||||
proc peerEventHandler(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
await wf.onPeerEventHandler(peerId, event)
|
||||
except CatchableError:
|
||||
error "onPeerEventHandler failed",
|
||||
remote_peer_id = shortLog(peerId),
|
||||
event = event,
|
||||
error = getCurrentExceptionMsg()
|
||||
|
||||
peerManager.addExtPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||
|
||||
|
||||
@ -114,16 +114,24 @@ proc handleRequest*(
|
||||
return pushResponse
|
||||
|
||||
proc initProtocolHandler(wl: WakuLightPush) =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
var rpc: LightpushResponse
|
||||
wl.requestRateLimiter.checkUsageLimit(WakuLightPushCodec, conn):
|
||||
let buffer = await conn.readLp(DefaultMaxRpcSize)
|
||||
var buffer: seq[byte]
|
||||
try:
|
||||
buffer = await conn.readLp(DefaultMaxRpcSize)
|
||||
except LPStreamError:
|
||||
error "lightpush read stream failed", error = getCurrentExceptionMsg()
|
||||
return
|
||||
|
||||
waku_service_network_bytes.inc(
|
||||
amount = buffer.len().int64, labelValues = [WakuLightPushCodec, "in"]
|
||||
)
|
||||
|
||||
rpc = await handleRequest(wl, conn.peerId, buffer)
|
||||
try:
|
||||
rpc = await handleRequest(wl, conn.peerId, buffer)
|
||||
except CatchableError:
|
||||
error "lightpush failed handleRequest", error = getCurrentExceptionMsg()
|
||||
do:
|
||||
debug "lightpush request rejected due rate limit exceeded",
|
||||
peerId = conn.peerId, limit = $wl.requestRateLimiter.setting
|
||||
@ -139,12 +147,15 @@ proc initProtocolHandler(wl: WakuLightPush) =
|
||||
)
|
||||
)
|
||||
|
||||
await conn.writeLp(rpc.encode().buffer)
|
||||
try:
|
||||
await conn.writeLp(rpc.encode().buffer)
|
||||
except LPStreamError:
|
||||
error "lightpush write stream failed", error = getCurrentExceptionMsg()
|
||||
|
||||
## For lightpush might not worth to measure outgoing trafic as it is only
|
||||
## small respones about success/failure
|
||||
|
||||
wl.handler = handle
|
||||
wl.handler = handler
|
||||
wl.codec = WakuLightPushCodec
|
||||
|
||||
proc new*(
|
||||
|
||||
@ -64,16 +64,24 @@ proc handleRequest*(
|
||||
return rpc
|
||||
|
||||
proc initProtocolHandler(wl: WakuLegacyLightPush) =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
var rpc: PushRPC
|
||||
wl.requestRateLimiter.checkUsageLimit(WakuLegacyLightPushCodec, conn):
|
||||
let buffer = await conn.readLp(DefaultMaxRpcSize)
|
||||
var buffer: seq[byte]
|
||||
try:
|
||||
buffer = await conn.readLp(DefaultMaxRpcSize)
|
||||
except LPStreamError:
|
||||
error "lightpush legacy read stream failed", error = getCurrentExceptionMsg()
|
||||
return
|
||||
|
||||
waku_service_network_bytes.inc(
|
||||
amount = buffer.len().int64, labelValues = [WakuLegacyLightPushCodec, "in"]
|
||||
)
|
||||
|
||||
rpc = await handleRequest(wl, conn.peerId, buffer)
|
||||
try:
|
||||
rpc = await handleRequest(wl, conn.peerId, buffer)
|
||||
except CatchableError:
|
||||
error "lightpush legacy handleRequest failed", error = getCurrentExceptionMsg()
|
||||
do:
|
||||
debug "lightpush request rejected due rate limit exceeded",
|
||||
peerId = conn.peerId, limit = $wl.requestRateLimiter.setting
|
||||
@ -89,12 +97,15 @@ proc initProtocolHandler(wl: WakuLegacyLightPush) =
|
||||
)
|
||||
)
|
||||
|
||||
await conn.writeLp(rpc.encode().buffer)
|
||||
try:
|
||||
await conn.writeLp(rpc.encode().buffer)
|
||||
except LPStreamError:
|
||||
error "lightpush legacy write stream failed", error = getCurrentExceptionMsg()
|
||||
|
||||
## For lightpush might not worth to measure outgoing trafic as it is only
|
||||
## small respones about success/failure
|
||||
|
||||
wl.handler = handle
|
||||
wl.handler = handler
|
||||
wl.codec = WakuLegacyLightPushCodec
|
||||
|
||||
proc new*(
|
||||
|
||||
@ -70,7 +70,11 @@ proc request*(
|
||||
return ok(response)
|
||||
|
||||
proc initProtocolHandler(m: WakuMetadata) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
defer:
|
||||
# close, no data is expected
|
||||
await conn.closeWithEof()
|
||||
|
||||
let res = catch:
|
||||
await conn.readLp(RpcResponseMaxBytes)
|
||||
let buffer = res.valueOr:
|
||||
@ -88,12 +92,13 @@ proc initProtocolHandler(m: WakuMetadata) =
|
||||
localShards = m.shards,
|
||||
peer = conn.peerId
|
||||
|
||||
discard await m.respond(conn)
|
||||
try:
|
||||
discard await m.respond(conn)
|
||||
except CatchableError:
|
||||
error "Failed to respond to WakuMetadata request",
|
||||
error = getCurrentExceptionMsg()
|
||||
|
||||
# close, no data is expected
|
||||
await conn.closeWithEof()
|
||||
|
||||
m.handler = handle
|
||||
m.handler = handler
|
||||
m.codec = WakuMetadataCodec
|
||||
|
||||
proc new*(
|
||||
|
||||
@ -243,7 +243,7 @@ proc updatePxEnrCache(wpx: WakuPeerExchange) {.async.} =
|
||||
wpx.populateEnrCache()
|
||||
|
||||
proc initProtocolHandler(wpx: WakuPeerExchange) =
|
||||
proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
var buffer: seq[byte]
|
||||
wpx.requestRateLimiter.checkUsageLimit(WakuPeerExchangeCodec, conn):
|
||||
try:
|
||||
@ -253,9 +253,13 @@ proc initProtocolHandler(wpx: WakuPeerExchange) =
|
||||
waku_px_errors.inc(labelValues = [exc.msg])
|
||||
|
||||
(
|
||||
await wpx.respondError(
|
||||
PeerExchangeResponseStatusCode.BAD_REQUEST, some(exc.msg), conn
|
||||
)
|
||||
try:
|
||||
await wpx.respondError(
|
||||
PeerExchangeResponseStatusCode.BAD_REQUEST, some(exc.msg), conn
|
||||
)
|
||||
except CatchableError:
|
||||
error "could not send error response", error = getCurrentExceptionMsg()
|
||||
return
|
||||
).isOkOr:
|
||||
error "Failed to respond with BAD_REQUEST:", error = $error
|
||||
return
|
||||
@ -266,26 +270,41 @@ proc initProtocolHandler(wpx: WakuPeerExchange) =
|
||||
error "Failed to decode PeerExchange request", error = $decBuf.error
|
||||
|
||||
(
|
||||
await wpx.respondError(
|
||||
PeerExchangeResponseStatusCode.BAD_REQUEST, some($decBuf.error), conn
|
||||
)
|
||||
try:
|
||||
await wpx.respondError(
|
||||
PeerExchangeResponseStatusCode.BAD_REQUEST, some($decBuf.error), conn
|
||||
)
|
||||
except CatchableError:
|
||||
error "could not send error response decode",
|
||||
error = getCurrentExceptionMsg()
|
||||
return
|
||||
).isOkOr:
|
||||
error "Failed to respond with BAD_REQUEST:", error = $error
|
||||
return
|
||||
|
||||
let enrs = wpx.getEnrsFromCache(decBuf.get().request.numPeers)
|
||||
debug "peer exchange request received", enrs = $enrs
|
||||
(await wpx.respond(enrs, conn)).isErrOr:
|
||||
waku_px_peers_sent.inc(enrs.len().int64())
|
||||
|
||||
try:
|
||||
(await wpx.respond(enrs, conn)).isErrOr:
|
||||
waku_px_peers_sent.inc(enrs.len().int64())
|
||||
except CatchableError:
|
||||
error "could not send response", error = getCurrentExceptionMsg()
|
||||
do:
|
||||
(
|
||||
await wpx.respondError(
|
||||
PeerExchangeResponseStatusCode.TOO_MANY_REQUESTS, none(string), conn
|
||||
)
|
||||
).isOkOr:
|
||||
error "Failed to respond with TOO_MANY_REQUESTS:", error = $error
|
||||
# close, no data is expected
|
||||
await conn.closeWithEof()
|
||||
defer:
|
||||
# close, no data is expected
|
||||
await conn.closeWithEof()
|
||||
|
||||
try:
|
||||
(
|
||||
await wpx.respondError(
|
||||
PeerExchangeResponseStatusCode.TOO_MANY_REQUESTS, none(string), conn
|
||||
)
|
||||
).isOkOr:
|
||||
error "Failed to respond with TOO_MANY_REQUESTS:", error = $error
|
||||
except CatchableError:
|
||||
error "could not send error response", error = getCurrentExceptionMsg()
|
||||
return
|
||||
|
||||
wpx.handler = handler
|
||||
wpx.codec = WakuPeerExchangeCodec
|
||||
|
||||
@ -144,7 +144,7 @@ type PublishOutcome* {.pure.} = enum
|
||||
CannotGenerateMessageId
|
||||
|
||||
proc initProtocolHandler(w: WakuRelay) =
|
||||
proc handler(conn: Connection, proto: string) {.async.} =
|
||||
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
## main protocol handler that gets triggered on every
|
||||
## connection for a protocol string
|
||||
## e.g. ``/wakusub/0.0.1``, etc...
|
||||
|
||||
@ -123,7 +123,7 @@ proc batchRequest*(
|
||||
conn
|
||||
|
||||
let reqCatch = catch:
|
||||
await self.rendezvous.request(namespace, count, peers)
|
||||
await self.rendezvous.request(Opt.some(namespace), count, peers)
|
||||
|
||||
for conn in conns:
|
||||
await conn.close()
|
||||
|
||||
@ -57,7 +57,7 @@ type
|
||||
ethRpc*: Option[Web3]
|
||||
wakuRlnContract*: Option[WakuRlnContractWithSender]
|
||||
registrationTxHash*: Option[TxHash]
|
||||
chainId*: uint
|
||||
chainId*: UInt256
|
||||
keystorePath*: Option[string]
|
||||
keystorePassword*: Option[string]
|
||||
registrationHandler*: Option[RegistrationHandler]
|
||||
@ -239,8 +239,12 @@ method register*(
|
||||
# TODO: make this robust. search within the event list for the event
|
||||
debug "ts receipt", receipt = tsReceipt[]
|
||||
|
||||
if tsReceipt.status.isNone() or tsReceipt.status.get() != 1.Quantity:
|
||||
raise newException(ValueError, "register: transaction failed")
|
||||
if tsReceipt.status.isNone():
|
||||
raise newException(ValueError, "register: transaction failed status is None")
|
||||
if tsReceipt.status.get() != 1.Quantity:
|
||||
raise newException(
|
||||
ValueError, "register: transaction failed status is: " & $tsReceipt.status.get()
|
||||
)
|
||||
|
||||
let firstTopic = tsReceipt.logs[0].topics[0]
|
||||
# the hash of the signature of MemberRegistered(uint256,uint32) event is equal to the following hex value
|
||||
@ -485,9 +489,9 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
|
||||
let ethRpc: Web3 = (await establishConnection(g)).valueOr:
|
||||
return err("failed to connect to Ethereum clients: " & $error)
|
||||
|
||||
var fetchedChainId: uint
|
||||
var fetchedChainId: UInt256
|
||||
g.retryWrapper(fetchedChainId, "Failed to get the chain id"):
|
||||
uint(await ethRpc.provider.eth_chainId())
|
||||
await ethRpc.provider.eth_chainId()
|
||||
|
||||
# Set the chain id
|
||||
if g.chainId == 0:
|
||||
@ -555,7 +559,7 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
|
||||
warn "could not initialize with persisted rln metadata"
|
||||
elif metadataGetOptRes.get().isSome():
|
||||
let metadata = metadataGetOptRes.get().get()
|
||||
if metadata.chainId != uint(g.chainId):
|
||||
if metadata.chainId != g.chainId:
|
||||
return err("persisted data: chain id mismatch")
|
||||
if metadata.contractAddress != g.ethContractAddress.toLower():
|
||||
return err("persisted data: contract address mismatch")
|
||||
|
||||
@ -131,6 +131,13 @@ proc encode*(nsp: RateLimitProof): ProtoBuffer =
|
||||
output.finish3()
|
||||
return output
|
||||
|
||||
func encode*(x: UInt32): seq[byte] =
|
||||
## the Ethereum ABI imposes a 32 byte width for every type
|
||||
let numTargetBytes = 32 div 8
|
||||
let paddingBytes = 32 - numTargetBytes
|
||||
let paddingZeros = newSeq[byte](paddingBytes)
|
||||
paddingZeros & @(stint.toBytesBE(x))
|
||||
|
||||
type
|
||||
SpamHandler* =
|
||||
proc(wakuMessage: WakuMessage): void {.gcsafe, closure, raises: [Defect].}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user