Merge remote-tracking branch 'origin/unstable' into autoupnp

This commit is contained in:
Tanguy 2022-11-08 17:23:59 +01:00
commit 3874c57395
No known key found for this signature in database
GPG Key ID: 7DD8EC6B6CE6C45E
74 changed files with 3186 additions and 611 deletions

View File

@ -63,7 +63,7 @@ jobs:
git push origin gh-pages
update_site:
if: github.ref == 'refs/heads/master'
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/docs'
name: 'Rebuild website'
runs-on: ubuntu-latest
steps:
@ -74,8 +74,12 @@ jobs:
with:
python-version: 3.x
- uses: jiro4989/setup-nim-action@v1
with:
nim-version: 'stable'
- name: Generate website
run: pip install mkdocs-material && mkdocs build
run: pip install mkdocs-material && nimble website
- name: Clone the gh-pages branch
uses: actions/checkout@v2

View File

@ -21,7 +21,7 @@ jobs:
cpu: amd64
#- os: windows
#cpu: i386
branch: [version-1-2, version-1-4, version-1-6, devel]
branch: [version-1-2, version-1-6, devel]
include:
- target:
os: linux

1
.gitignore vendored
View File

@ -13,5 +13,6 @@ build/
.vscode/
.DS_Store
tests/pubsub/testgossipsub
examples/*.md
nimble.develop
nimble.paths

16
.pinned
View File

@ -1,17 +1,17 @@
bearssl;https://github.com/status-im/nim-bearssl@#f4c4233de453cb7eac0ce3f3ffad6496295f83ab
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
chronos;https://github.com/status-im/nim-chronos@#8e8263370b51bc9b71916273e3eb956053532c4f
dnsclient;https://github.com/ba0f3/dnsclient.nim@#4960de2b345f567b12f09a08e9967af104ab39a3
faststreams;https://github.com/status-im/nim-faststreams@#5f9fd4e5c51328a7df1c091ae53a754586d25563
chronos;https://github.com/status-im/nim-chronos@#6525f4ce1d1a7eba146e5f1a53f6f105077ae686
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823
faststreams;https://github.com/status-im/nim-faststreams@#6112432b3a81d9db116cd5d64c39648881cfff29
httputils;https://github.com/status-im/nim-http-utils@#e88e231dfcef4585fe3b2fbd9b664dbd28a88040
json_serialization;https://github.com/status-im/nim-json-serialization@#e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4
metrics;https://github.com/status-im/nim-metrics@#0a6477268e850d7bc98347b3875301524871765f
nat_mapper;https://github.com/status-im/nim-nat-mapper@#31c7e280c65c8e0816d82978e710464399f6cd6c
nimcrypto;https://github.com/cheatfate/nimcrypto@#24e006df85927f64916e60511620583b11403178
secp256k1;https://github.com/status-im/nim-secp256k1@#c7f1a37d9b0f17292649bfed8bf6cef83cf4221f
serialization;https://github.com/status-im/nim-serialization@#493d18b8292fc03aa4f835fd825dea1183f97466
stew;https://github.com/status-im/nim-stew@#0c379cf1d8d3d9db07af108cc78ff542b2105914
serialization;https://github.com/status-im/nim-serialization@#60a5bd8ac0461dfadd3069fd9c01a7734f205995
stew;https://github.com/status-im/nim-stew@#23da07c9b59c0ba3d4efa7e4e6e2c4121ae5a156
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
unittest2;https://github.com/status-im/nim-unittest2@#f180f596c88dfd266f746ed6f8dbebce39c824db
websock;https://github.com/status-im/nim-websock@#7b2ed397d6e4c37ea4df08ae82aeac7ff04cd180
zlib;https://github.com/status-im/nim-zlib@#6a6670afba6b97b29b920340e2641978c05ab4d8
unittest2;https://github.com/status-im/nim-unittest2@#da8398c45cafd5bd7772da1fc96e3924a18d3823
websock;https://github.com/status-im/nim-websock@#acbe30e9ca1e51dcbbfe4c552ee6f16c7eede538
zlib;https://github.com/status-im/nim-zlib@#6a6670afba6b97b29b920340e2641978c05ab4d8

View File

@ -98,20 +98,31 @@ nim-libp2p is used by:
- [nim-codex](https://github.com/status-im/nim-codex), a decentralized storage application
- (open a pull request if you want to be included here)
## Development
**Clone and Install dependencies:**
## Stability
nim-libp2p has been used in production for over a year in high-stake scenarios, so its core is considered stable.
Some modules are more recent and less stable.
The versioning follows [semver](https://semver.org/), with some additions:
- Some of libp2p procedures are marked as `.public.`, they will remain compatible during each `MAJOR` version
- The rest of the procedures are considered internal, and can change at any `MINOR` version (but remain compatible for each new `PATCH`)
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.2 & 1.6`
## Development
Clone and Install dependencies:
```sh
git clone https://github.com/status-im/nim-libp2p
cd nim-libp2p
nimble install
nimble install -dy
```
**Run unit tests**
Run unit tests:
```sh
# run all the unit tests
nimble test
```
This requires the go daemon to be available. To only run native tests, use `nimble testnative`.
Or use `nimble tasks` to show all available tasks.
### Contribute
@ -125,22 +136,19 @@ The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-st
### Core Developers
[@cheatfate](https://github.com/cheatfate), [Dmitriy Ryajov](https://github.com/dryajov), [Tanguy](https://github.com/Menduist), [Zahary Karadjov](https://github.com/zah)
### Tips and tricks
**enable expensive metrics:**
### Compile time flags
Enable expensive metrics (ie, metrics with per-peer cardinality):
```bash
nim c -d:libp2p_expensive_metrics some_file.nim
```
**use identify metrics**
Set list of known libp2p agents for metrics:
```bash
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,prysm,teku some_file.nim
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
```
**specify gossipsub specific topics to measure**
Specify gossipsub specific topics to measure in the metrics:
```bash
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
```

View File

@ -2,5 +2,5 @@
Welcome to the nim-libp2p documentation!
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as [examples](directchat.nim) and
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as
the [full reference](https://status-im.github.io/nim-libp2p/master/libp2p.html).

View File

@ -1,6 +1,14 @@
## # Circuit Relay example
##
## Circuit Relay can be used when a node cannot reach another node
## directly, but can reach it through a another node (the Relay).
##
## That may happen because of NAT, Firewalls, or incompatible transports.
##
## More informations [here](https://docs.libp2p.io/concepts/circuit-relay/).
import chronos, stew/byteutils
import ../libp2p,
../libp2p/protocols/connectivity/relay/[relay, client]
import libp2p,
libp2p/protocols/connectivity/relay/[relay, client]
# Helper to create a circuit relay node
proc createCircuitRelaySwitch(r: Relay): Switch =

View File

@ -5,8 +5,8 @@ import
strformat, strutils,
stew/byteutils,
chronos,
../libp2p,
../libp2p/protocols/connectivity/nat_auto_mapper
libp2p,
libp2p/protocols/connectivity/nat_auto_mapper
const DefaultAddr = "/ip4/0.0.0.0/tcp/0"

View File

@ -1,6 +1,6 @@
import chronos # an efficient library for async
import stew/byteutils # various utils
import ../libp2p # when installed through nimble, just use `import libp2p`
import libp2p
##
# Create our custom protocol

View File

@ -1,108 +0,0 @@
# Simple ping tutorial
Hi all, welcome to the first nim-libp2p tutorial!
!!! tips ""
This tutorial is for everyone who is interested in building peer-to-peer applications. No Nim programming experience is needed.
To give you a quick overview, **Nim** is the programming language we are using and **nim-libp2p** is the Nim implementation of [libp2p](https://libp2p.io/), a modular library that enables the development of peer-to-peer network applications.
Hope you'll find it helpful in your journey of learning. Happy coding! ;)
## Before you start
The only prerequisite here is [Nim](https://nim-lang.org/), the programming language with a Python-like syntax and a performance similar to C. Detailed information can be found [here](https://nim-lang.org/docs/tut1.html).
Install Nim via their [official website](https://nim-lang.org/install.html).
Check Nim's installation via `nim --version` and its package manager Nimble via `nimble --version`.
You can now install the latest version of `nim-libp2p`:
```bash
nimble install libp2p@#master
```
## A simple ping application
We'll start by creating a simple application, which is starting two libp2p [switch](https://docs.libp2p.io/concepts/stream-multiplexing/#switch-swarm), and pinging each other using the [Ping](https://docs.libp2p.io/concepts/protocols/#ping) protocol.
!!! tips ""
You can extract the code from this tutorial by running `nim c -r tools/markdown_runner.nim examples/tutorial_1_connect.md` in the libp2p folder!
Let's create a `part1.nim`, and import our dependencies:
```nim
import chronos
import libp2p
import libp2p/protocols/ping
```
[chronos](https://github.com/status-im/nim-chronos) the asynchronous framework used by `nim-libp2p`
Next, we'll create an helper procedure to create our switches. A switch needs a bit of configuration, and it will be easier to do this configuration only once:
```nim
proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
var switch = SwitchBuilder
.new()
.withRng(rng) # Give the application RNG
.withAddress(ma) # Our local address(es)
.withTcpTransport() # Use TCP as transport
.withMplex() # Use Mplex as muxer
.withNoise() # Use Noise as secure manager
.build()
return switch
```
This will create a switch using [Mplex](https://docs.libp2p.io/concepts/stream-multiplexing/) as a multiplexer, Noise to secure the communication, and TCP as an underlying transport.
You can of course tweak this, to use a different or multiple transport, or tweak the configuration of Mplex and Noise, but this is some sane defaults that we'll use going forward.
Let's now start to create our main procedure:
```nim
proc main() {.async, gcsafe.} =
let
rng = newRng()
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
pingProtocol = Ping.new(rng=rng)
```
We created some variables that we'll need for the rest of the application: the global `rng` instance, our `localAddress`, and an instance of the `Ping` protocol.
The address is in the [MultiAddress](https://github.com/multiformats/multiaddr) format. The port `0` means "take any port available".
`tryGet` is procedure which is part of [nim-result](https://github.com/arnetheduck/nim-result/), that will throw an exception if the supplied MultiAddress is invalid.
We can now create our two switches:
```nim
let
switch1 = createSwitch(localAddress, rng)
switch2 = createSwitch(localAddress, rng)
switch1.mount(pingProtocol)
await switch1.start()
await switch2.start()
```
We've **mounted** the `pingProtocol` on our first switch. This means that the first switch will actually listen for any ping requests coming in, and handle them accordingly.
Now that we've started the nodes, they are listening for incoming peers.
We can find out which port was attributed, and the resulting local addresses, by using `switch1.peerInfo.addrs`.
We'll **dial** the first switch from the second one, by specifying it's **Peer ID**, it's **MultiAddress** and the **`Ping` protocol codec**:
```nim
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, PingCodec)
```
We now have a `Ping` connection setup between the second and the first switch, we can use it to actually ping the node:
```nim
# ping the other node and echo the ping duration
echo "ping: ", await pingProtocol.ping(conn)
# We must close the connection ourselves when we're done with it
await conn.close()
```
And that's it! Just a little bit of cleanup: shutting down the switches, waiting for them to stop, and we'll call our `main` procedure:
```nim
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
waitFor(main())
```
You can now run this program using `nim c -r part1.nim`, and you should see the dialing sequence, ending with a ping output.
In the [next tutorial](tutorial_2_customproto.md), we'll look at how to create our own custom protocol.

View File

@ -0,0 +1,95 @@
## # Simple ping tutorial
##
## Hi all, welcome to the first nim-libp2p tutorial!
##
## !!! tips ""
## This tutorial is for everyone who is interested in building peer-to-peer applications. No Nim programming experience is needed.
##
## To give you a quick overview, **Nim** is the programming language we are using and **nim-libp2p** is the Nim implementation of [libp2p](https://libp2p.io/), a modular library that enables the development of peer-to-peer network applications.
##
## Hope you'll find it helpful in your journey of learning. Happy coding! ;)
##
## ## Before you start
## The only prerequisite here is [Nim](https://nim-lang.org/), the programming language with a Python-like syntax and a performance similar to C. Detailed information can be found [here](https://nim-lang.org/docs/tut1.html).
##
## Install Nim via their [official website](https://nim-lang.org/install.html).
## Check Nim's installation via `nim --version` and its package manager Nimble via `nimble --version`.
##
## You can now install the latest version of `nim-libp2p`:
## ```bash
## nimble install libp2p@#master
## ```
##
## ## A simple ping application
## We'll start by creating a simple application, which is starting two libp2p [switch](https://docs.libp2p.io/concepts/stream-multiplexing/#switch-swarm), and pinging each other using the [Ping](https://docs.libp2p.io/concepts/protocols/#ping) protocol.
##
## !!! tips ""
## You can find the source of this tutorial (and other tutorials) in the [libp2p/examples](https://github.com/status-im/nim-libp2p/tree/master/examples) folder!
##
## Let's create a `part1.nim`, and import our dependencies:
import chronos
import libp2p
import libp2p/protocols/ping
## [chronos](https://github.com/status-im/nim-chronos) the asynchronous framework used by `nim-libp2p`
##
## Next, we'll create an helper procedure to create our switches. A switch needs a bit of configuration, and it will be easier to do this configuration only once:
proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
var switch = SwitchBuilder
.new()
.withRng(rng) # Give the application RNG
.withAddress(ma) # Our local address(es)
.withTcpTransport() # Use TCP as transport
.withMplex() # Use Mplex as muxer
.withNoise() # Use Noise as secure manager
.build()
return switch
## This will create a switch using [Mplex](https://docs.libp2p.io/concepts/stream-multiplexing/) as a multiplexer, Noise to secure the communication, and TCP as an underlying transport.
##
## You can of course tweak this, to use a different or multiple transport, or tweak the configuration of Mplex and Noise, but this is some sane defaults that we'll use going forward.
##
##
## Let's now start to create our main procedure:
proc main() {.async, gcsafe.} =
let
rng = newRng()
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
pingProtocol = Ping.new(rng=rng)
## We created some variables that we'll need for the rest of the application: the global `rng` instance, our `localAddress`, and an instance of the `Ping` protocol.
## The address is in the [MultiAddress](https://github.com/multiformats/multiaddr) format. The port `0` means "take any port available".
##
## `tryGet` is procedure which is part of [nim-result](https://github.com/arnetheduck/nim-result/), that will throw an exception if the supplied MultiAddress is invalid.
##
## We can now create our two switches:
let
switch1 = createSwitch(localAddress, rng)
switch2 = createSwitch(localAddress, rng)
switch1.mount(pingProtocol)
await switch1.start()
await switch2.start()
## We've **mounted** the `pingProtocol` on our first switch. This means that the first switch will actually listen for any ping requests coming in, and handle them accordingly.
##
## Now that we've started the nodes, they are listening for incoming peers.
## We can find out which port was attributed, and the resulting local addresses, by using `switch1.peerInfo.addrs`.
##
## We'll **dial** the first switch from the second one, by specifying it's **Peer ID**, it's **MultiAddress** and the **`Ping` protocol codec**:
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, PingCodec)
## We now have a `Ping` connection setup between the second and the first switch, we can use it to actually ping the node:
# ping the other node and echo the ping duration
echo "ping: ", await pingProtocol.ping(conn)
# We must close the connection ourselves when we're done with it
await conn.close()
## And that's it! Just a little bit of cleanup: shutting down the switches, waiting for them to stop, and we'll call our `main` procedure:
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
waitFor(main())
## You can now run this program using `nim c -r part1.nim`, and you should see the dialing sequence, ending with a ping output.
##
## In the [next tutorial](tutorial_2_customproto.md), we'll look at how to create our own custom protocol.

View File

@ -1,82 +0,0 @@
# Custom protocol in libp2p
In the [previous tutorial](tutorial_1_connect.md), we've looked at how to create a simple ping program using the `nim-libp2p`.
We'll now look at how to create a custom protocol inside the libp2p
Let's create a `part2.nim`, and import our dependencies:
```nim
import chronos
import stew/byteutils
import libp2p
```
This is similar to the first tutorial, except we don't need to import the `Ping` protocol.
Next, we'll declare our custom protocol
```nim
const TestCodec = "/test/proto/1.0.0"
type TestProto = ref object of LPProtocol
```
We've set a [protocol ID](https://docs.libp2p.io/concepts/protocols/#protocol-ids), and created a custom `LPProtocol`. In a more complex protocol, we could use this structure to store interesting variables.
A protocol generally has two part: and handling/server part, and a dialing/client part.
Theses two parts can be identical, but in our trivial protocol, the server will wait for a message from the client, and the client will send a message, so we have to handle the two cases separately.
Let's start with the server part:
```nim
proc new(T: typedesc[TestProto]): T =
# every incoming connections will in be handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
# Read up to 1024 bytes from this connection, and transform them into
# a string
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
# We must close the connections ourselves when we're done with it
await conn.close()
return T(codecs: @[TestCodec], handler: handle)
```
This is a constructor for our `TestProto`, that will specify our `codecs` and a `handler`, which will be called for each incoming peer asking for this protocol.
In our handle, we simply read a message from the connection and `echo` it.
We can now create our client part:
```nim
proc hello(p: TestProto, conn: Connection) {.async.} =
await conn.writeLp("Hello p2p!")
```
Again, pretty straight-forward, we just send a message on the connection.
We can now create our main procedure:
```nim
proc main() {.async, gcsafe.} =
let
rng = newRng()
testProto = TestProto.new()
switch1 = newStandardSwitch(rng=rng)
switch2 = newStandardSwitch(rng=rng)
switch1.mount(testProto)
await switch1.start()
await switch2.start()
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
await testProto.hello(conn)
# We must close the connection ourselves when we're done with it
await conn.close()
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
```
This is very similar to the first tutorial's `main`, the only noteworthy difference is that we use `newStandardSwitch`, which is similar to the `createSwitch` of the first tutorial, but is bundled directly in libp2p
We can now wrap our program by calling our main proc:
```nim
waitFor(main())
```
And that's it!

View File

@ -0,0 +1,74 @@
## # Custom protocol in libp2p
##
## In the [previous tutorial](tutorial_1_connect.md), we've looked at how to create a simple ping program using the `nim-libp2p`.
##
## We'll now look at how to create a custom protocol inside the libp2p
##
## Let's create a `part2.nim`, and import our dependencies:
import chronos
import stew/byteutils
import libp2p
## This is similar to the first tutorial, except we don't need to import the `Ping` protocol.
##
## Next, we'll declare our custom protocol
const TestCodec = "/test/proto/1.0.0"
type TestProto = ref object of LPProtocol
## We've set a [protocol ID](https://docs.libp2p.io/concepts/protocols/#protocol-ids), and created a custom `LPProtocol`. In a more complex protocol, we could use this structure to store interesting variables.
##
## A protocol generally has two part: and handling/server part, and a dialing/client part.
## Theses two parts can be identical, but in our trivial protocol, the server will wait for a message from the client, and the client will send a message, so we have to handle the two cases separately.
##
## Let's start with the server part:
proc new(T: typedesc[TestProto]): T =
# every incoming connections will in be handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
# Read up to 1024 bytes from this connection, and transform them into
# a string
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
# We must close the connections ourselves when we're done with it
await conn.close()
return T(codecs: @[TestCodec], handler: handle)
## This is a constructor for our `TestProto`, that will specify our `codecs` and a `handler`, which will be called for each incoming peer asking for this protocol.
## In our handle, we simply read a message from the connection and `echo` it.
##
## We can now create our client part:
proc hello(p: TestProto, conn: Connection) {.async.} =
await conn.writeLp("Hello p2p!")
## Again, pretty straight-forward, we just send a message on the connection.
##
## We can now create our main procedure:
proc main() {.async, gcsafe.} =
let
rng = newRng()
testProto = TestProto.new()
switch1 = newStandardSwitch(rng=rng)
switch2 = newStandardSwitch(rng=rng)
switch1.mount(testProto)
await switch1.start()
await switch2.start()
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
await testProto.hello(conn)
# We must close the connection ourselves when we're done with it
await conn.close()
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
## This is very similar to the first tutorial's `main`, the only noteworthy difference is that we use `newStandardSwitch`, which is similar to the `createSwitch` of the first tutorial, but is bundled directly in libp2p
##
## We can now wrap our program by calling our main proc:
waitFor(main())
## And that's it!
## In the [next tutorial](tutorial_3_protobuf.md), we'll create a more complex protocol using Protobuf.

View File

@ -0,0 +1,162 @@
## # Protobuf usage
##
## In the [previous tutorial](tutorial_2_customproto.md), we created a simple "ping" protocol.
## Most real protocol want their messages to be structured and extensible, which is why
## most real protocols use [protobuf](https://developers.google.com/protocol-buffers) to
## define their message structures.
##
## Here, we'll create a slightly more complex protocol, which parses & generate protobuf
## messages. Let's start by importing our dependencies, as usual:
import chronos
import stew/results # for Opt[T]
import libp2p
## ## Protobuf encoding & decoding
## This will be the structure of our messages:
## ```protobuf
## message MetricList {
## message Metric {
## string name = 1;
## float value = 2;
## }
##
## repeated Metric metrics = 2;
## }
## ```
## We'll create our protobuf types, encoders & decoders, according to this format.
## To create the encoders & decoders, we are going to use minprotobuf
## (included in libp2p).
##
## While more modern technics
## (such as [nim-protobuf-serialization](https://github.com/status-im/nim-protobuf-serialization))
## exists, minprotobuf is currently the recommended method to handle protobuf, since it has
## been used in production extensively, and audited.
type
Metric = object
name: string
value: float
MetricList = object
metrics: seq[Metric]
{.push raises: [].}
proc encode(m: Metric): ProtoBuffer =
result = initProtoBuffer()
result.write(1, m.name)
result.write(2, m.value)
result.finish()
proc decode(_: type Metric, buf: seq[byte]): Result[Metric, ProtoError] =
var res: Metric
let pb = initProtoBuffer(buf)
# "getField" will return a Result[bool, ProtoError].
# The Result will hold an error if the protobuf is invalid.
# The Result will hold "false" if the field is missing
#
# We are just checking the error, and ignoring whether the value
# is present or not (default values are valid).
discard ? pb.getField(1, res.name)
discard ? pb.getField(2, res.value)
ok(res)
proc encode(m: MetricList): ProtoBuffer =
result = initProtoBuffer()
for metric in m.metrics:
result.write(1, metric.encode())
result.finish()
proc decode(_: type MetricList, buf: seq[byte]): Result[MetricList, ProtoError] =
var
res: MetricList
metrics: seq[seq[byte]]
let pb = initProtoBuffer(buf)
discard ? pb.getRepeatedField(1, metrics)
for metric in metrics:
res.metrics &= ? Metric.decode(metric)
ok(res)
## ## Results instead of exceptions
## As you can see, this part of the program also uses Results instead of exceptions for error handling.
## We start by `{.push raises: [].}`, which will prevent every non-async function from raising
## exceptions.
##
## Then, we use [nim-result](https://github.com/arnetheduck/nim-result) to convey
## errors to function callers. A `Result[T, E]` will either hold a valid result of type
## T, or an error of type E.
##
## You can check if the call succeeded by using `res.isOk`, and then get the
## value using `res.value` or the error by using `res.error`.
##
## Another useful tool is `?`, which will unpack a Result if it succeeded,
## or if it failed, exit the current procedure returning the error.
##
## nim-result is packed with other functionalities that you'll find in the
## nim-result repository.
##
## Results and exception are generally interchangeable, but have different semantics
## that you may or may not prefer.
##
## ## Creating the protocol
## We'll next create a protocol, like in the last tutorial, to request these metrics from our host
type
MetricCallback = proc: Future[MetricList] {.raises: [], gcsafe.}
MetricProto = ref object of LPProtocol
metricGetter: MetricCallback
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
let res = MetricProto(metricGetter: cb)
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
await conn.writeLp(asProtobuf.buffer)
await conn.close()
res.codecs = @["/metric-getter/1.0.0"]
res.handler = handle
return res
proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
let protobuf = await conn.readLp(2048)
# tryGet will raise an exception if the Result contains an error.
# It's useful to bridge between exception-world and result-world
return MetricList.decode(protobuf).tryGet()
## We can now create our main procedure:
proc main() {.async, gcsafe.} =
let rng = newRng()
proc randomMetricGenerator: Future[MetricList] {.async.} =
let metricCount = rng[].generate(uint32) mod 16
for i in 0 ..< metricCount + 1:
result.metrics.add(Metric(
name: "metric_" & $i,
value: float(rng[].generate(uint16)) / 1000.0
))
return result
let
metricProto1 = MetricProto.new(randomMetricGenerator)
metricProto2 = MetricProto.new(randomMetricGenerator)
switch1 = newStandardSwitch(rng=rng)
switch2 = newStandardSwitch(rng=rng)
switch1.mount(metricProto1)
await switch1.start()
await switch2.start()
let
conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, metricProto2.codecs)
metrics = await metricProto2.fetch(conn)
await conn.close()
for metric in metrics.metrics:
echo metric.name, " = ", metric.value
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
waitFor(main())
## If you run this program, you should see random metrics being sent from the switch1 to the switch2.

View File

@ -0,0 +1,163 @@
## # GossipSub
##
## In this tutorial, we'll build a simple GossipSub network
## to broadcast the metrics we built in the previous tutorial.
##
## GossipSub is used to broadcast some messages in a network,
## and allows to balance between latency, bandwidth usage,
## privacy and attack resistance.
##
## You'll find a good explanation on how GossipSub works
## [here.](https://docs.libp2p.io/concepts/publish-subscribe/) There are a lot
## of parameters you can tweak to adjust how GossipSub behaves but here we'll
## use the sane defaults shipped with libp2p.
##
## We'll start by creating our metric structure like previously
import chronos
import stew/results
import libp2p
import libp2p/protocols/pubsub/rpc/messages
type
Metric = object
name: string
value: float
MetricList = object
hostname: string
metrics: seq[Metric]
{.push raises: [].}
proc encode(m: Metric): ProtoBuffer =
result = initProtoBuffer()
result.write(1, m.name)
result.write(2, m.value)
result.finish()
proc decode(_: type Metric, buf: seq[byte]): Result[Metric, ProtoError] =
var res: Metric
let pb = initProtoBuffer(buf)
discard ? pb.getField(1, res.name)
discard ? pb.getField(2, res.value)
ok(res)
proc encode(m: MetricList): ProtoBuffer =
result = initProtoBuffer()
for metric in m.metrics:
result.write(1, metric.encode())
result.write(2, m.hostname)
result.finish()
proc decode(_: type MetricList, buf: seq[byte]): Result[MetricList, ProtoError] =
var
res: MetricList
metrics: seq[seq[byte]]
let pb = initProtoBuffer(buf)
discard ? pb.getRepeatedField(1, metrics)
for metric in metrics:
res.metrics &= ? Metric.decode(metric)
? pb.getRequiredField(2, res.hostname)
ok(res)
## This is exactly like the previous structure, except that we added
## a `hostname` to distinguish where the metric is coming from.
##
## Now we'll create a small GossipSub network to broadcast the metrics,
## and collect them on one of the node.
type Node = tuple[switch: Switch, gossip: GossipSub, hostname: string]
proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
# This procedure will handle one of the node of the network
node.gossip.addValidator(["metrics"],
proc(topic: string, message: Message): Future[ValidationResult] {.async.} =
let decoded = MetricList.decode(message.data)
if decoded.isErr: return ValidationResult.Reject
return ValidationResult.Accept
)
# This "validator" will attach to the `metrics` topic and make sure
# that every message in this topic is valid. This allows us to stop
# propagation of invalid messages quickly in the network, and punish
# peers sending them.
# `John` will be responsible to log the metrics, the rest of the nodes
# will just forward them in the network
if node.hostname == "John":
node.gossip.subscribe("metrics",
proc (topic: string, data: seq[byte]) {.async.} =
echo MetricList.decode(data).tryGet()
)
else:
node.gossip.subscribe("metrics", nil)
# Create random metrics 10 times and broadcast them
for _ in 0..<10:
await sleepAsync(500.milliseconds)
var metricList = MetricList(hostname: node.hostname)
let metricCount = rng[].generate(uint32) mod 4
for i in 0 ..< metricCount + 1:
metricList.metrics.add(Metric(
name: "metric_" & $i,
value: float(rng[].generate(uint16)) / 1000.0
))
discard await node.gossip.publish("metrics", encode(metricList).buffer)
await node.switch.stop()
## For our main procedure, we'll create a few nodes, and connect them together.
## Note that they are not all interconnected, but GossipSub will take care of
## broadcasting to the full network nonetheless.
proc main {.async.} =
let rng = newRng()
var nodes: seq[Node]
for hostname in ["John", "Walter", "David", "Thuy", "Amy"]:
let
switch = newStandardSwitch(rng=rng)
gossip = GossipSub.init(switch = switch, triggerSelf = true)
switch.mount(gossip)
await switch.start()
nodes.add((switch, gossip, hostname))
for index, node in nodes:
# Connect to a few neighbors
for otherNodeIdx in index - 1 .. index + 2:
if otherNodeIdx notin 0 ..< nodes.len or otherNodeIdx == index: continue
let otherNode = nodes[otherNodeIdx]
await node.switch.connect(
otherNode.switch.peerInfo.peerId,
otherNode.switch.peerInfo.addrs)
var allFuts: seq[Future[void]]
for node in nodes:
allFuts.add(oneNode(node, rng))
await allFutures(allFuts)
waitFor(main())
## If you run this program, you should see something like:
## ```
## (hostname: "John", metrics: @[(name: "metric_0", value: 42.097), (name: "metric_1", value: 50.99), (name: "metric_2", value: 47.86), (name: "metric_3", value: 5.368)])
## (hostname: "Walter", metrics: @[(name: "metric_0", value: 39.452), (name: "metric_1", value: 15.606), (name: "metric_2", value: 14.059), (name: "metric_3", value: 6.68)])
## (hostname: "David", metrics: @[(name: "metric_0", value: 9.82), (name: "metric_1", value: 2.862), (name: "metric_2", value: 15.514)])
## (hostname: "Thuy", metrics: @[(name: "metric_0", value: 59.038)])
## (hostname: "Amy", metrics: @[(name: "metric_0", value: 55.616), (name: "metric_1", value: 23.52), (name: "metric_2", value: 59.081), (name: "metric_3", value: 2.516)])
## ```
##
## This is John receiving & logging everyone's metrics.
##
## ## Going further
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
## and [topic params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
## you can achieve very different properties.
##
## Also see reports for [GossipSub v1.1](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4)
##
## If you are interested in broadcasting for your application, you may want to use [Waku](https://waku.org/), which builds on top of GossipSub,
## and adds features such as history, spam protection, and light node friendliness.

View File

@ -0,0 +1,132 @@
## # Discovery Manager
##
## In the [previous tutorial](tutorial_4_gossipsub.md), we built a custom protocol using [protobuf](https://developers.google.com/protocol-buffers) and
## spread informations (some metrics) on the network using gossipsub.
## For this tutorial, on the other hand, we'll go back on a simple example
## we'll try to discover a specific peers to greet on the network.
##
## First, as usual, we import the dependencies:
import sequtils
import chronos
import stew/byteutils
import libp2p
import libp2p/protocols/rendezvous
import libp2p/discovery/rendezvousinterface
import libp2p/discovery/discoverymngr
## We'll not use newStandardSwitch this time as we need the discovery protocol
## [RendezVous](https://github.com/libp2p/specs/blob/master/rendezvous/README.md) to be mounted on the switch using withRendezVous.
##
## Note that other discovery methods such as [Kademlia](https://github.com/libp2p/specs/blob/master/kad-dht/README.md) or [discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md) exist.
proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withTcpTransport()
.withYamux()
.withNoise()
.withRendezVous(rdv)
.build()
# Create a really simple protocol to log one message received then close the stream
const DumbCodec = "/dumb/proto/1.0.0"
type DumbProto = ref object of LPProtocol
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
await conn.close()
return T(codecs: @[DumbCodec], handler: handle)
## ## Bootnodes
## The first time a p2p program is ran, he needs to know how to join
## its network. This is generally done by hard-coding a list of stable
## nodes in the binary, called "bootnodes". These bootnodes are a
## critical part of a p2p network, since they are used by every new
## user to onboard the network.
##
## By using libp2p, we can use any node supporting our discovery protocol
## (rendezvous in this case) as a bootnode. For this example, we'll
## create a bootnode, and then every peer will advertise itself on the
## bootnode, and use it to find other peers
proc main() {.async, gcsafe.} =
let bootNode = createSwitch()
await bootNode.start()
# Create 5 nodes in the network
var
switches: seq[Switch] = @[]
discManagers: seq[DiscoveryManager] = @[]
for i in 0..5:
let rdv = RendezVous.new()
# Create a remote future to await at the end of the program
let switch = createSwitch(rdv)
switch.mount(DumbProto.new(i))
switches.add(switch)
# A discovery manager is a simple tool, you can set it up by adding discovery
# interfaces (such as RendezVousInterface) then you can use it to advertise
# something on the network or to request something from it.
let dm = DiscoveryManager()
# A RendezVousInterface is a RendezVous protocol wrapped to be usable by the
# DiscoveryManager.
dm.add(RendezVousInterface.new(rdv))
discManagers.add(dm)
# We can now start the switch and connect to the bootnode
await switch.start()
await switch.connect(bootNode.peerInfo.peerId, bootNode.peerInfo.addrs)
# Each nodes of the network will advertise on some topics (EvenGang or OddClub)
dm.advertise(RdvNamespace(if i mod 2 == 0: "EvenGang" else: "OddClub"))
## We can now create the newcomer. This peer will connect to the boot node, and use
## it to discover peers & greet them.
let
rdv = RendezVous.new()
newcomer = createSwitch(rdv)
dm = DiscoveryManager()
await newcomer.start()
await newcomer.connect(bootNode.peerInfo.peerId, bootNode.peerInfo.addrs)
dm.add(RendezVousInterface.new(rdv, ttr = 250.milliseconds))
# Use the discovery manager to find peers on the OddClub topic to greet them
let queryOddClub = dm.request(RdvNamespace("OddClub"))
for _ in 0..2:
let
# getPeer give you a PeerAttribute containing informations about the peer.
res = await queryOddClub.getPeer()
# Here we will use the PeerId and the MultiAddress to greet him
conn = await newcomer.dial(res[PeerId], res.getAll(MultiAddress), DumbCodec)
await conn.writeLp("Odd Club suuuucks! Even Gang is better!")
# Uh-oh!
await conn.close()
# Wait for the peer to close the stream
await conn.join()
# Queries will run in a loop, so we must stop them when we are done
queryOddClub.stop()
# Maybe it was because he wanted to join the EvenGang
let queryEvenGang = dm.request(RdvNamespace("EvenGang"))
for _ in 0..2:
let
res = await queryEvenGang.getPeer()
conn = await newcomer.dial(res[PeerId], res.getAll(MultiAddress), DumbCodec)
await conn.writeLp("Even Gang is sooo laaame! Odd Club rocks!")
# Or maybe not...
await conn.close()
await conn.join()
queryEvenGang.stop()
# What can I say, some people just want to watch the world burn... Anyway
# Stop all the discovery managers
for d in discManagers:
d.stop()
dm.stop()
# Stop all the switches
await allFutures(switches.mapIt(it.stop()))
await allFutures(bootNode.stop(), newcomer.stop())
waitFor(main())

View File

@ -0,0 +1,259 @@
## # Tron example
##
## In this tutorial, we will create a video game based on libp2p, using
## all of the features we talked about in the last tutorials.
##
## We will:
## - Discover peers using the Discovery Manager
## - Use GossipSub to find a play mate
## - Create a custom protocol to play with him
##
## While this may look like a daunting project, it's less than 150 lines of code.
##
## The game will be a simple Tron. We will use [nico](https://github.com/ftsf/nico)
## as a game engine. (you need to run `nimble install nico` to have it available)
##
## ![multiplay](https://user-images.githubusercontent.com/13471753/198852714-b55048e3-f233-4723-900d-2193ad259fe1.gif)
##
## We will start by importing our dependencies and creating our types
import os
import nico, chronos, stew/byteutils, stew/endians2
import libp2p
import libp2p/protocols/rendezvous
import libp2p/discovery/rendezvousinterface
import libp2p/discovery/discoverymngr
const
directions = @[(K_UP, 0, -1), (K_LEFT, -1, 0), (K_DOWN, 0, 1), (K_RIGHT, 1, 0)]
mapSize = 32
tickPeriod = 0.2
type
Player = ref object
x, y: int
currentDir, nextDir: int
lost: bool
color: int
Game = ref object
gameMap: array[mapSize * mapSize, int]
tickTime: float
localPlayer, remotePlayer: Player
peerFound: Future[Connection]
hasCandidate: bool
tickFinished: Future[int]
GameProto = ref object of LPProtocol
proc new(_: type[Game]): Game =
# Default state of a game
result = Game(
tickTime: -3.0, # 3 seconds of "warm-up" time
localPlayer: Player(x: 4, y: 16, currentDir: 3, nextDir: 3, color: 8),
remotePlayer: Player(x: 27, y: 16, currentDir: 1, nextDir: 1, color: 12),
peerFound: newFuture[Connection]()
)
for pos in 0 .. result.gameMap.high:
if pos mod mapSize in [0, mapSize - 1] or pos div mapSize in [0, mapSize - 1]:
result.gameMap[pos] = 7
## ## Game Logic
## The networking during the game will work like this:
##
## * Each player will have `tickPeriod` (0.1) seconds to choose
## a direction that he wants to go to (default to current direction)
## * After `tickPeriod`, we will send our choosen direction to the peer,
## and wait for his direction
## * Once we have both direction, we will "tick" the game, and restart the
## loop, as long as both player are alive.
##
## This is a very simplistic scheme, but creating proper networking for
## video games is an [art](https://developer.valvesoftware.com/wiki/Latency_Compensating_Methods_in_Client/Server_In-game_Protocol_Design_and_Optimization)
##
## The main drawback of this scheme is that the more ping you have with
## the peer, the slower the game will run. Or invertedly, the less ping you
## have, the faster it runs!
proc update(g: Game, dt: float32) =
# Will be called at each frame of the game.
#
# Because both Nico and Chronos have a main loop,
# they must share the control of the main thread.
# This is a hacky way to make this happen
waitFor(sleepAsync(1.milliseconds))
# Don't do anything if we are still waiting for an opponent
if not(g.peerFound.finished()) or isNil(g.tickFinished): return
g.tickTime += dt
# Update the wanted direction, making sure we can't go backward
for i in 0 .. directions.high:
if i != (g.localPlayer.currentDir + 2 mod 4) and keyp(directions[i][0]):
g.localPlayer.nextDir = i
if g.tickTime > tickPeriod and not g.tickFinished.finished():
# We choosen our next direction, let the networking know
g.localPlayer.currentDir = g.localPlayer.nextDir
g.tickFinished.complete(g.localPlayer.currentDir)
proc tick(g: Game, p: Player) =
# Move player and check if he lost
p.x += directions[p.currentDir][1]
p.y += directions[p.currentDir][2]
if g.gameMap[p.y * mapSize + p.x] != 0: p.lost = true
g.gameMap[p.y * mapSize + p.x] = p.color
proc mainLoop(g: Game, peer: Connection) {.async.} =
while not (g.localPlayer.lost or g.remotePlayer.lost):
if g.tickTime > 0.0:
g.tickTime = 0
g.tickFinished = newFuture[int]()
# Wait for a choosen direction
let dir = await g.tickFinished
# Send it
await peer.writeLp(toBytes(uint32(dir)))
# Get the one from the peer
g.remotePlayer.currentDir = int uint32.fromBytes(await peer.readLp(8))
# Tick the players & restart
g.tick(g.remotePlayer)
g.tick(g.localPlayer)
## We'll draw the map & put some texts when necessary:
proc draw(g: Game) =
for pos, color in g.gameMap:
setColor(color)
boxFill(pos mod 32 * 4, pos div 32 * 4, 4, 4)
let text = if not(g.peerFound.finished()): "Matchmaking.."
elif g.tickTime < -1.5: "Welcome to Etron"
elif g.tickTime < 0.0: "- " & $(int(abs(g.tickTime) / 0.5) + 1) & " -"
elif g.remotePlayer.lost and g.localPlayer.lost: "DEUCE"
elif g.localPlayer.lost: "YOU LOOSE"
elif g.remotePlayer.lost: "YOU WON"
else: ""
printc(text, screenWidth div 2, screenHeight div 2)
## ## Matchmaking
## To find an opponent, we will broadcast our address on a
## GossipSub topic, and wait for someone to connect to us.
## We will also listen to that topic, and connect to anyone
## broadcasting his address.
##
## If we are looking for a game, we'll send `ok` to let the
## peer know that we are available, check that he is also available,
## and launch the game.
proc new(T: typedesc[GameProto], g: Game): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
defer: await conn.closeWithEof()
if g.peerFound.finished or g.hasCandidate:
await conn.close()
return
g.hasCandidate = true
await conn.writeLp("ok")
if "ok" != string.fromBytes(await conn.readLp(1024)):
g.hasCandidate = false
return
g.peerFound.complete(conn)
# The handler of a protocol must wait for the stream to
# be finished before returning
await conn.join()
return T(codecs: @["/tron/1.0.0"], handler: handle)
proc networking(g: Game) {.async.} =
# Create our switch, similar to the GossipSub example and
# the Discovery examples combined
let
rdv = RendezVous.new()
switch = SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withTcpTransport()
.withYamux()
.withNoise()
.withRendezVous(rdv)
.build()
dm = DiscoveryManager()
gameProto = GameProto.new(g)
gossip = GossipSub.init(
switch = switch,
triggerSelf = false)
dm.add(RendezVousInterface.new(rdv))
switch.mount(gossip)
switch.mount(gameProto)
gossip.subscribe(
"/tron/matchmaking",
proc (topic: string, data: seq[byte]) {.async.} =
# If we are still looking for an opponent,
# try to match anyone broadcasting it's address
if g.peerFound.finished or g.hasCandidate: return
g.hasCandidate = true
try:
let
(peerId, multiAddress) = parseFullAddress(data).tryGet()
stream = await switch.dial(peerId, @[multiAddress], gameProto.codec)
await stream.writeLp("ok")
if (await stream.readLp(10)) != "ok".toBytes:
g.hasCandidate = false
return
g.peerFound.complete(stream)
# We are "player 2"
swap(g.localPlayer, g.remotePlayer)
except CatchableError as exc:
discard
)
await switch.start()
defer: await switch.stop()
# As explained in the last tutorial, we need a bootnode to be able
# to find peers. We could use any libp2p running rendezvous (or any
# node running tron). We will take it's MultiAddress from the command
# line parameters
if paramCount() > 0:
let (peerId, multiAddress) = paramStr(1).parseFullAddress().tryGet()
await switch.connect(peerId, @[multiAddress])
else:
echo "No bootnode provided, listening on: ", switch.peerInfo.fullAddrs.tryGet()
# Discover peers from the bootnode, and connect to them
dm.advertise(RdvNamespace("tron"))
let discoveryQuery = dm.request(RdvNamespace("tron"))
discoveryQuery.forEach:
try:
await switch.connect(peer[PeerId], peer.getAll(MultiAddress))
except CatchableError as exc:
echo "Failed to dial a peer: ", exc.msg
# We will try to publish our address multiple times, in case
# it takes time to establish connections with other GossipSub peers
var published = false
while not published:
await sleepAsync(500.milliseconds)
for fullAddr in switch.peerInfo.fullAddrs.tryGet():
if (await gossip.publish("/tron/matchmaking", fullAddr.bytes)) == 0:
published = false
break
published = true
discoveryQuery.stop()
# We now wait for someone to connect to us (or for us to connect to someone)
let peerConn = await g.peerFound
defer: await peerConn.closeWithEof()
await g.mainLoop(peerConn)
let
game = Game.new()
netFut = networking(game)
nico.init("Status", "Tron")
nico.createWindow("Tron", mapSize * 4, mapSize * 4, 4, false)
nico.run(proc = discard, proc(dt: float32) = game.update(dt), proc = game.draw())
waitFor(netFut.cancelAndWait())
## And that's it! If you want to run this code locally, the simplest way is to use the
## first node as a boot node for the second one. But you can also use any rendezvous node

View File

@ -17,7 +17,7 @@ when defined(nimdoc):
## stay backward compatible during the Major version, whereas private ones can
## change at each new Minor version.
##
## If you're new to nim-libp2p, you can find a tutorial `here<https://github.com/status-im/nim-libp2p/blob/master/examples/tutorial_1_connect.md>`_
## If you're new to nim-libp2p, you can find a tutorial `here<https://status-im.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
## that can help you get started.
# Import stuff for doc

View File

@ -1,7 +1,7 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "0.0.2"
version = "1.0.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
@ -9,7 +9,7 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
requires "nim >= 1.2.0",
"nimcrypto >= 0.4.1",
"dnsclient >= 0.1.2",
"dnsclient >= 0.3.0 & < 0.4.0",
"bearssl >= 0.1.4",
"chronicles >= 0.10.2",
"chronos >= 3.0.6",
@ -17,7 +17,8 @@ requires "nim >= 1.2.0",
"metrics",
"secp256k1",
"stew#head",
"websock"
"websock",
"unittest2 >= 0.0.5 & < 0.1.0"
proc runTest(filename: string, verify: bool = true, sign: bool = true,
moreoptions: string = "") =
@ -32,17 +33,17 @@ proc runTest(filename: string, verify: bool = true, sign: bool = true,
exec excstr & " -r " & " tests/" & filename
rmFile "tests/" & filename.toExe
proc buildSample(filename: string, run = false) =
var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off "
proc buildSample(filename: string, run = false, extraFlags = "") =
var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off -p:. " & extraFlags
excstr.add(" examples/" & filename)
exec excstr
if run:
exec "./examples/" & filename.toExe
rmFile "examples/" & filename.toExe
proc buildTutorial(filename: string) =
discard gorge "cat " & filename & " | nim c -r --hints:off tools/markdown_runner.nim | " &
" nim --verbosity:0 --hints:off c -"
proc tutorialToMd(filename: string) =
let markdown = gorge "cat " & filename & " | nim c -r --verbosity:0 --hints:off tools/markdown_builder.nim "
writeFile(filename.replace(".nim", ".md"), markdown)
task testnative, "Runs libp2p native tests":
runTest("testnative")
@ -87,12 +88,31 @@ task test_slim, "Runs the (slimmed down) test suite":
exec "nimble testfilter"
exec "nimble examples_build"
task website, "Build the website":
tutorialToMd("examples/tutorial_1_connect.nim")
tutorialToMd("examples/tutorial_2_customproto.nim")
tutorialToMd("examples/tutorial_3_protobuf.nim")
tutorialToMd("examples/tutorial_4_gossipsub.nim")
tutorialToMd("examples/tutorial_5_discovery.nim")
tutorialToMd("examples/tutorial_6_game.nim")
tutorialToMd("examples/circuitrelay.nim")
exec "mkdocs build"
task examples_build, "Build the samples":
buildSample("directchat")
buildSample("helloworld", true)
buildSample("circuitrelay", true)
buildTutorial("examples/tutorial_1_connect.md")
buildTutorial("examples/tutorial_2_customproto.md")
buildSample("tutorial_1_connect", true)
buildSample("tutorial_2_customproto", true)
if (NimMajor, NimMinor) > (1, 2):
# These tutorials relies on post 1.4 exception tracking
buildSample("tutorial_3_protobuf", true)
buildSample("tutorial_4_gossipsub", true)
buildSample("tutorial_5_discovery", true)
# Nico doesn't work in 1.2
exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
exec "nimble install -y nico"
buildSample("tutorial_6_game", false, "--styleCheck:off")
# pin system
# while nimble lockfile

View File

@ -26,7 +26,7 @@ import
switch, peerid, peerinfo, stream/connection, multiaddress,
crypto/crypto, transports/[transport, tcptransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat, relay/relay, relay/client, relay/rtransport],
connmanager, upgrademngrs/muxedupgrade,
nameresolving/nameresolver,
@ -60,6 +60,7 @@ type
peerStoreCapacity: Option[int]
autonat: bool
circuitRelay: Relay
rdv: RendezVous
proc new*(T: type[SwitchBuilder]): T {.public.} =
## Creates a SwitchBuilder
@ -194,6 +195,10 @@ proc withCircuitRelay*(b: SwitchBuilder, r: Relay = Relay.new()): SwitchBuilder
b.circuitRelay = r
b
proc withRendezVous*(b: SwitchBuilder, rdv: RendezVous = RendezVous.new()): SwitchBuilder =
b.rdv = rdv
b
proc build*(b: SwitchBuilder): Switch
{.raises: [Defect, LPError], public.} =
@ -261,6 +266,10 @@ proc build*(b: SwitchBuilder): Switch
b.circuitRelay.setup(switch)
switch.mount(b.circuitRelay)
if not isNil(b.rdv):
b.rdv.setup(switch)
switch.mount(b.rdv)
return switch
proc newStandardSwitch*(

View File

@ -528,8 +528,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
field = Asn1Field(kind: Asn1Tag.Boolean, klass: aclass,
index: ttag, offset: int(ab.offset),
length: 1)
shallowCopy(field.buffer, ab.buffer)
length: 1, buffer: ab.buffer)
field.vbool = (b == 0xFF'u8)
ab.offset += 1
return ok(field)
@ -554,8 +553,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
# Negative or Positive integer
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length))
shallowCopy(field.buffer, ab.buffer)
length: int(length), buffer: ab.buffer)
if (ab.buffer[ab.offset] and 0x80'u8) == 0x80'u8:
# Negative integer
if length <= 8:
@ -579,16 +577,15 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
# Zero value integer
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length), vint: 0'u64)
shallowCopy(field.buffer, ab.buffer)
length: int(length), vint: 0'u64,
buffer: ab.buffer)
ab.offset += int(length)
return ok(field)
else:
# Positive integer with leading zero
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
index: ttag, offset: int(ab.offset) + 1,
length: int(length) - 1)
shallowCopy(field.buffer, ab.buffer)
length: int(length) - 1, buffer: ab.buffer)
if length <= 9:
for i in 1 ..< int(length):
field.vint = (field.vint shl 8) or
@ -610,8 +607,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
# Zero-length BIT STRING.
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
index: ttag, offset: int(ab.offset + 1),
length: 0, ubits: 0)
shallowCopy(field.buffer, ab.buffer)
length: 0, ubits: 0, buffer: ab.buffer)
ab.offset += int(length)
return ok(field)
@ -631,8 +627,8 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
index: ttag, offset: int(ab.offset + 1),
length: int(length - 1), ubits: int(unused))
shallowCopy(field.buffer, ab.buffer)
length: int(length - 1), ubits: int(unused),
buffer: ab.buffer)
ab.offset += int(length)
return ok(field)
@ -643,8 +639,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
field = Asn1Field(kind: Asn1Tag.OctetString, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length))
shallowCopy(field.buffer, ab.buffer)
length: int(length), buffer: ab.buffer)
ab.offset += int(length)
return ok(field)
@ -654,8 +649,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
return err(Asn1Error.Incorrect)
field = Asn1Field(kind: Asn1Tag.Null, klass: aclass, index: ttag,
offset: int(ab.offset), length: 0)
shallowCopy(field.buffer, ab.buffer)
offset: int(ab.offset), length: 0, buffer: ab.buffer)
ab.offset += int(length)
return ok(field)
@ -666,8 +660,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
field = Asn1Field(kind: Asn1Tag.Oid, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length))
shallowCopy(field.buffer, ab.buffer)
length: int(length), buffer: ab.buffer)
ab.offset += int(length)
return ok(field)
@ -678,8 +671,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
field = Asn1Field(kind: Asn1Tag.Sequence, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length))
shallowCopy(field.buffer, ab.buffer)
length: int(length), buffer: ab.buffer)
ab.offset += int(length)
return ok(field)

View File

@ -13,10 +13,13 @@ else:
{.push raises: [].}
import chronos
import stew/results
import peerid,
stream/connection,
transports/transport
export results
type
Dial* = ref object of RootObj
@ -69,5 +72,5 @@ method addTransport*(
method tryDial*(
self: Dial,
peerId: PeerId,
addrs: seq[MultiAddress]): Future[MultiAddress] {.async, base.} =
addrs: seq[MultiAddress]): Future[Opt[MultiAddress]] {.async, base.} =
doAssert(false, "Not implemented!")

View File

@ -7,8 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
import std/[sugar, tables]
import std/[sugar, tables, sequtils]
import stew/results
import pkg/[chronos,
chronicles,
metrics]
@ -16,6 +17,7 @@ import pkg/[chronos,
import dial,
peerid,
peerinfo,
multicodec,
multistream,
connmanager,
stream/connection,
@ -24,7 +26,7 @@ import dial,
upgrademngrs/upgrade,
errors
export dial, errors
export dial, errors, results
logScope:
topics = "libp2p dialer"
@ -45,56 +47,105 @@ type
transports: seq[Transport]
nameResolver: NameResolver
proc dialAndUpgrade(
self: Dialer,
peerId: Opt[PeerId],
hostname: string,
address: MultiAddress):
Future[Connection] {.async.} =
for transport in self.transports: # for each transport
if transport.handles(address): # check if it can dial it
trace "Dialing address", address, peerId, hostname
let dialed =
try:
libp2p_total_dial_attempts.inc()
await transport.dial(hostname, address)
except CancelledError as exc:
debug "Dialing canceled", msg = exc.msg, peerId
raise exc
except CatchableError as exc:
debug "Dialing failed", msg = exc.msg, peerId
libp2p_failed_dials.inc()
return nil # Try the next address
# also keep track of the connection's bottom unsafe transport direction
# required by gossipsub scoring
dialed.transportDir = Direction.Out
libp2p_successful_dials.inc()
let conn =
try:
await transport.upgradeOutgoing(dialed, peerId)
except CatchableError as exc:
# If we failed to establish the connection through one transport,
# we won't succeeded through another - no use in trying again
await dialed.close()
debug "Upgrade failed", msg = exc.msg, peerId
if exc isnot CancelledError:
libp2p_failed_upgrades_outgoing.inc()
# Try other address
return nil
doAssert not isNil(conn), "connection died after upgradeOutgoing"
debug "Dial successful", conn, peerId = conn.peerId
return conn
return nil
proc expandDnsAddr(
self: Dialer,
peerId: Opt[PeerId],
address: MultiAddress): Future[seq[(MultiAddress, Opt[PeerId])]] {.async.} =
if not DNSADDR.matchPartial(address): return @[(address, peerId)]
if isNil(self.nameResolver):
info "Can't resolve DNSADDR without NameResolver", ma=address
return @[]
let
toResolve =
if peerId.isSome:
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
else:
address
resolved = await self.nameResolver.resolveDnsAddr(toResolve)
for resolvedAddress in resolved:
let lastPart = resolvedAddress[^1].tryGet()
if lastPart.protoCode == Result[MultiCodec, string].ok(multiCodec("p2p")):
let
peerIdBytes = lastPart.protoArgument().tryGet()
addrPeerId = PeerId.init(peerIdBytes).tryGet()
result.add((resolvedAddress[0..^2].tryGet(), Opt.some(addrPeerId)))
else:
result.add((resolvedAddress, peerId))
proc dialAndUpgrade(
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress]):
Future[Connection] {.async.} =
debug "Dialing peer", peerId
for address in addrs: # for each address
let
hostname = address.getHostname()
resolvedAddresses =
if isNil(self.nameResolver): @[address]
else: await self.nameResolver.resolveMAddress(address)
for rawAddress in addrs:
# resolve potential dnsaddr
let addresses = await self.expandDnsAddr(peerId, rawAddress)
for a in resolvedAddresses: # for each resolved address
for transport in self.transports: # for each transport
if transport.handles(a): # check if it can dial it
trace "Dialing address", address = $a, peerId, hostname
let dialed = try:
libp2p_total_dial_attempts.inc()
await transport.dial(hostname, a)
except CancelledError as exc:
debug "Dialing canceled", msg = exc.msg, peerId
raise exc
except CatchableError as exc:
debug "Dialing failed", msg = exc.msg, peerId
libp2p_failed_dials.inc()
continue # Try the next address
for (expandedAddress, addrPeerId) in addresses:
# DNS resolution
let
hostname = expandedAddress.getHostname()
resolvedAddresses =
if isNil(self.nameResolver): @[expandedAddress]
else: await self.nameResolver.resolveMAddress(expandedAddress)
# also keep track of the connection's bottom unsafe transport direction
# required by gossipsub scoring
dialed.transportDir = Direction.Out
libp2p_successful_dials.inc()
let conn = try:
await transport.upgradeOutgoing(dialed, peerId)
except CatchableError as exc:
# If we failed to establish the connection through one transport,
# we won't succeeded through another - no use in trying again
# TODO we should try another address though
await dialed.close()
debug "Upgrade failed", msg = exc.msg, peerId
if exc isnot CancelledError:
libp2p_failed_upgrades_outgoing.inc()
raise exc
doAssert not isNil(conn), "connection died after upgradeOutgoing"
debug "Dial successful", conn, peerId = conn.peerId
return conn
for resolvedAddress in resolvedAddresses:
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress)
if not isNil(result):
return result
proc internalConnect(
self: Dialer,
@ -189,7 +240,7 @@ proc negotiateStream(
method tryDial*(
self: Dialer,
peerId: PeerId,
addrs: seq[MultiAddress]): Future[MultiAddress] {.async.} =
addrs: seq[MultiAddress]): Future[Opt[MultiAddress]] {.async.} =
## Create a protocol stream in order to check
## if a connection is possible.
## Doesn't use the Connection Manager to save it.

View File

@ -0,0 +1,182 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/sequtils
import chronos, chronicles, stew/results
import ../errors
type
BaseAttr = ref object of RootObj
comparator: proc(f, c: BaseAttr): bool {.gcsafe, raises: [Defect].}
Attribute[T] = ref object of BaseAttr
value: T
PeerAttributes* = object
attributes: seq[BaseAttr]
DiscoveryService* = distinct string
proc `==`*(a, b: DiscoveryService): bool {.borrow.}
proc ofType*[T](f: BaseAttr, _: type[T]): bool =
return f of Attribute[T]
proc to*[T](f: BaseAttr, _: type[T]): T =
Attribute[T](f).value
proc add*[T](pa: var PeerAttributes,
value: T) =
pa.attributes.add(Attribute[T](
value: value,
comparator: proc(f: BaseAttr, c: BaseAttr): bool =
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T)
)
)
iterator items*(pa: PeerAttributes): BaseAttr =
for f in pa.attributes:
yield f
proc getAll*[T](pa: PeerAttributes, t: typedesc[T]): seq[T] =
for f in pa.attributes:
if f.ofType(T):
result.add(f.to(T))
proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
for f in pa.attributes:
if f.ofType(T):
return Opt.some(f.to(T))
Opt.none(T)
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [Defect, KeyError].} =
pa{T}.valueOr: raise newException(KeyError, "Attritute not found")
proc match*(pa, candidate: PeerAttributes): bool =
for f in pa.attributes:
block oneAttribute:
for field in candidate.attributes:
if field.comparator(field, f):
break oneAttribute
return false
return true
type
PeerFoundCallback* = proc(pa: PeerAttributes) {.raises: [Defect], gcsafe.}
DiscoveryInterface* = ref object of RootObj
onPeerFound*: PeerFoundCallback
toAdvertise*: PeerAttributes
advertisementUpdated*: AsyncEvent
advertiseLoop*: Future[void]
method request*(self: DiscoveryInterface, pa: PeerAttributes) {.async, base.} =
doAssert(false, "Not implemented!")
method advertise*(self: DiscoveryInterface) {.async, base.} =
doAssert(false, "Not implemented!")
type
DiscoveryError* = object of LPError
DiscoveryFinished* = object of LPError
DiscoveryQuery* = ref object
attr: PeerAttributes
peers: AsyncQueue[PeerAttributes]
finished: bool
futs: seq[Future[void]]
DiscoveryManager* = ref object
interfaces: seq[DiscoveryInterface]
queries: seq[DiscoveryQuery]
proc add*(dm: DiscoveryManager, di: DiscoveryInterface) =
dm.interfaces &= di
di.onPeerFound = proc (pa: PeerAttributes) =
for query in dm.queries:
if query.attr.match(pa):
try:
query.peers.putNoWait(pa)
except AsyncQueueFullError as exc:
debug "Cannot push discovered peer to queue"
proc request*(dm: DiscoveryManager, pa: PeerAttributes): DiscoveryQuery =
var query = DiscoveryQuery(attr: pa, peers: newAsyncQueue[PeerAttributes]())
for i in dm.interfaces:
query.futs.add(i.request(pa))
dm.queries.add(query)
dm.queries.keepItIf(it.futs.anyIt(not it.finished()))
return query
proc request*[T](dm: DiscoveryManager, value: T): DiscoveryQuery =
var pa: PeerAttributes
pa.add(value)
return dm.request(pa)
proc advertise*(dm: DiscoveryManager, pa: PeerAttributes) =
for i in dm.interfaces:
i.toAdvertise = pa
if i.advertiseLoop.isNil:
i.advertisementUpdated = newAsyncEvent()
i.advertiseLoop = i.advertise()
else:
i.advertisementUpdated.fire()
proc advertise*[T](dm: DiscoveryManager, value: T) =
var pa: PeerAttributes
pa.add(value)
dm.advertise(pa)
template forEach*(query: DiscoveryQuery, code: untyped) =
## Will execute `code` for each discovered peer. The
## peer attritubtes are available through the variable
## `peer`
proc forEachInternal(q: DiscoveryQuery) {.async.} =
while true:
let peer {.inject.} =
try: await q.getPeer()
except DiscoveryFinished: return
code
asyncSpawn forEachInternal(query)
proc stop*(query: DiscoveryQuery) =
query.finished = true
for r in query.futs:
if not r.finished(): r.cancel()
proc stop*(dm: DiscoveryManager) =
for q in dm.queries:
q.stop()
for i in dm.interfaces:
if isNil(i.advertiseLoop): continue
i.advertiseLoop.cancel()
proc getPeer*(query: DiscoveryQuery): Future[PeerAttributes] {.async.} =
let getter = query.peers.popFirst()
try:
await getter or allFinished(query.futs)
except CancelledError as exc:
getter.cancel()
raise exc
if not finished(getter):
if query.finished:
raise newException(DiscoveryFinished, "Discovery query stopped")
# discovery loops only finish when they don't handle the query
raise newException(DiscoveryError, "Unable to find any peer matching this request")
return await getter

View File

@ -0,0 +1,77 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import sequtils
import chronos
import ./discoverymngr,
../protocols/rendezvous,
../peerid
type
RendezVousInterface* = ref object of DiscoveryInterface
rdv*: RendezVous
timeToRequest: Duration
timeToAdvertise: Duration
RdvNamespace* = distinct string
proc `==`*(a, b: RdvNamespace): bool {.borrow.}
method request*(self: RendezVousInterface, pa: PeerAttributes) {.async.} =
var namespace = ""
for attr in pa:
if attr.ofType(RdvNamespace):
namespace = string attr.to(RdvNamespace)
elif attr.ofType(DiscoveryService):
namespace = string attr.to(DiscoveryService)
elif attr.ofType(PeerId):
namespace = $attr.to(PeerId)
else:
# unhandled type
return
while true:
for pr in await self.rdv.request(namespace):
var peer: PeerAttributes
peer.add(pr.peerId)
for address in pr.addresses:
peer.add(address.address)
peer.add(DiscoveryService(namespace))
peer.add(RdvNamespace(namespace))
self.onPeerFound(peer)
await sleepAsync(self.timeToRequest)
method advertise*(self: RendezVousInterface) {.async.} =
while true:
var toAdvertise: seq[string]
for attr in self.toAdvertise:
if attr.ofType(RdvNamespace):
toAdvertise.add string attr.to(RdvNamespace)
elif attr.ofType(DiscoveryService):
toAdvertise.add string attr.to(DiscoveryService)
elif attr.ofType(PeerId):
toAdvertise.add $attr.to(PeerId)
self.advertisementUpdated.clear()
for toAdv in toAdvertise:
await self.rdv.advertise(toAdv, self.timeToAdvertise)
await sleepAsync(self.timeToAdvertise) or self.advertisementUpdated.wait()
proc new*(T: typedesc[RendezVousInterface],
rdv: RendezVous,
ttr: Duration = 1.minutes,
tta: Duration = MinimumDuration): RendezVousInterface =
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta)

View File

@ -470,6 +470,8 @@ const
WS* = mapAnd(TCP, mapEq("ws"))
WSS* = mapAnd(TCP, mapEq("wss"))
WebSockets* = mapOr(WS, WSS)
Onion3* = mapEq("onion3")
TcpOnion3* = mapAnd(TCP, Onion3)
Unreliable* = mapOr(UDP)
@ -516,15 +518,10 @@ proc trimRight(s: string, ch: char): string =
break
result = s[0..(s.high - m)]
proc shcopy*(m1: var MultiAddress, m2: MultiAddress) =
shallowCopy(m1.data.buffer, m2.data.buffer)
m1.data.offset = m2.data.offset
proc protoCode*(ma: MultiAddress): MaResult[MultiCodec] =
## Returns MultiAddress ``ma`` protocol code.
var header: uint64
var vb: MultiAddress
shcopy(vb, ma)
var vb = ma
if vb.data.readVarint(header) == -1:
err("multiaddress: Malformed binary address!")
else:
@ -537,8 +534,7 @@ proc protoCode*(ma: MultiAddress): MaResult[MultiCodec] =
proc protoName*(ma: MultiAddress): MaResult[string] =
## Returns MultiAddress ``ma`` protocol name.
var header: uint64
var vb: MultiAddress
shcopy(vb, ma)
var vb = ma
if vb.data.readVarint(header) == -1:
err("multiaddress: Malformed binary address!")
else:
@ -555,9 +551,8 @@ proc protoArgument*(ma: MultiAddress,
## If current MultiAddress do not have argument value, then result will be
## ``0``.
var header: uint64
var vb: MultiAddress
var vb = ma
var buffer: seq[byte]
shcopy(vb, ma)
if vb.data.readVarint(header) == -1:
err("multiaddress: Malformed binary address!")
else:
@ -594,6 +589,13 @@ proc protoAddress*(ma: MultiAddress): MaResult[seq[byte]] =
buffer.setLen(res)
ok(buffer)
proc protoArgument*(ma: MultiAddress): MaResult[seq[byte]] =
## Returns MultiAddress ``ma`` protocol address binary blob.
##
## If current MultiAddress do not have argument value, then result array will
## be empty.
ma.protoAddress()
proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
var header: uint64
var data = newSeq[byte]()
@ -601,6 +603,9 @@ proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
var vb = ma
var res: MultiAddress
res.data = initVBuffer()
if index < 0: return err("multiaddress: negative index gived to getPart")
while offset <= index:
if vb.data.readVarint(header) == -1:
return err("multiaddress: Malformed binary address!")
@ -647,9 +652,13 @@ proc getParts[U, V](ma: MultiAddress, slice: HSlice[U, V]): MaResult[MultiAddres
? res.append(? ma[i])
ok(res)
proc `[]`*(ma: MultiAddress, i: int): MaResult[MultiAddress] {.inline.} =
proc `[]`*(ma: MultiAddress, i: int | BackwardsIndex): MaResult[MultiAddress] {.inline.} =
## Returns part with index ``i`` of MultiAddress ``ma``.
ma.getPart(i)
when i is BackwardsIndex:
let maLength = ? len(ma)
ma.getPart(maLength - int(i))
else:
ma.getPart(i)
proc `[]`*(ma: MultiAddress, slice: HSlice): MaResult[MultiAddress] {.inline.} =
## Returns parts with slice ``slice`` of MultiAddress ``ma``.
@ -778,8 +787,7 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
proc validate*(ma: MultiAddress): bool =
## Returns ``true`` if MultiAddress ``ma`` is valid.
var header: uint64
var vb: MultiAddress
shcopy(vb, ma)
var vb = ma
while true:
if vb.data.isEmpty():
break
@ -1078,6 +1086,9 @@ proc `$`*(pat: MaPattern): string =
elif pat.operator == Eq:
result = $pat.value
proc bytes*(value: MultiAddress): seq[byte] =
value.data.buffer
proc write*(pb: var ProtoBuffer, field: int, value: MultiAddress) {.inline.} =
write(pb, field, value.data.buffer)

View File

@ -14,7 +14,7 @@ else:
import
std/[streams, strutils, sets, sequtils],
chronos, chronicles,
chronos, chronicles, stew/byteutils,
dnsclientpkg/[protocol, types]
import
@ -76,15 +76,11 @@ proc getDnsResponse(
if not receivedDataFuture.finished:
raise newException(IOError, "DNS server timeout")
var
rawResponse = sock.getMessage()
dataStream = newStringStream()
dataStream.writeData(addr rawResponse[0], rawResponse.len)
dataStream.setPosition(0)
let rawResponse = sock.getMessage()
# parseResponse can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
return parseResponse(dataStream)
return parseResponse(string.fromBytes(rawResponse))
except CatchableError as exc: raise exc
except Exception as exc: raiseAssert exc.msg
finally:
@ -118,7 +114,14 @@ method resolveIp*(
try:
let resp = await fut
for answer in resp.answers:
resolvedAddresses.incl(answer.toString())
# toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
resolvedAddresses.incl(
try: answer.toString()
except CatchableError as exc: raise exc
except Exception as exc: raiseAssert exc.msg
)
except CancelledError as e:
raise e
except ValueError as e:
@ -158,6 +161,11 @@ method resolveTxt*(
self.nameServers.add(self.nameServers[0])
self.nameServers.delete(0)
continue
except Exception as e:
# toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert e.msg
debug "Failed to resolve TXT, returning empty set"
return @[]

View File

@ -13,7 +13,7 @@ else:
{.push raises: [].}
import std/[sugar, sets, sequtils, strutils]
import
import
chronos,
chronicles,
stew/[endians2, byteutils]
@ -22,14 +22,14 @@ import ".."/[multiaddress, multicodec]
logScope:
topics = "libp2p nameresolver"
type
type
NameResolver* = ref object of RootObj
method resolveTxt*(
self: NameResolver,
address: string): Future[seq[string]] {.async, base.} =
## Get TXT record
##
##
doAssert(false, "Not implemented!")
@ -39,16 +39,18 @@ method resolveIp*(
port: Port,
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async, base.} =
## Resolve the specified address
##
##
doAssert(false, "Not implemented!")
proc getHostname*(ma: MultiAddress): string =
let firstPart = ($ma[0].get()).split('/')
if firstPart.len > 1: firstPart[2]
let
firstPart = ma[0].valueOr: return ""
fpSplitted = ($firstPart).split('/', 2)
if fpSplitted.len > 2: fpSplitted[2]
else: ""
proc resolveDnsAddress(
proc resolveOneAddress(
self: NameResolver,
ma: MultiAddress,
domain: Domain = Domain.AF_UNSPEC,
@ -64,29 +66,22 @@ proc resolveDnsAddress(
let
port = Port(fromBytesBE(uint16, pbuf))
resolvedAddresses = await self.resolveIp(prefix & dnsval, port, domain)
return collect(newSeqOfCap(4)):
for address in resolvedAddresses:
var createdAddress = MultiAddress.init(address).tryGet()[0].tryGet()
for part in ma:
if DNS.match(part.get()): continue
if DNS.match(part.tryGet()): continue
createdAddress &= part.tryGet()
createdAddress
func matchDnsSuffix(m1, m2: MultiAddress): MaResult[bool] =
for partMaybe in m1:
let part = ?partMaybe
if DNS.match(part): continue
let entryProt = ?m2[?part.protoCode()]
if entryProt != part:
return ok(false)
return ok(true)
proc resolveDnsAddr(
proc resolveDnsAddr*(
self: NameResolver,
ma: MultiAddress,
depth: int = 0): Future[seq[MultiAddress]]
{.async.} =
depth: int = 0): Future[seq[MultiAddress]] {.async.} =
if not DNSADDR.matchPartial(ma):
return @[ma]
trace "Resolving dnsaddr", ma
if depth > 6:
@ -104,21 +99,17 @@ proc resolveDnsAddr(
if not entry.startsWith("dnsaddr="): continue
let entryValue = MultiAddress.init(entry[8..^1]).tryGet()
if not matchDnsSuffix(ma, entryValue).tryGet(): continue
if entryValue.contains(multiCodec("p2p")).tryGet() and ma.contains(multiCodec("p2p")).tryGet():
if entryValue[multiCodec("p2p")] != ma[multiCodec("p2p")]:
continue
# The spec is not clear wheter only DNSADDR can be recursived
# or any DNS addr. Only handling DNSADDR because it's simpler
# to avoid infinite recursion
if DNSADDR.matchPartial(entryValue):
let resolved = await self.resolveDnsAddr(entryValue, depth + 1)
for r in resolved:
result.add(r)
else:
result.add(entryValue)
let resolved = await self.resolveDnsAddr(entryValue, depth + 1)
for r in resolved:
result.add(r)
if result.len == 0:
debug "Failed to resolve any DNSADDR", ma
return @[ma]
debug "Failed to resolve a DNSADDR", ma
return @[]
return result
@ -133,14 +124,15 @@ proc resolveMAddress*(
let code = address[0].get().protoCode().get()
let seq = case code:
of multiCodec("dns"):
await self.resolveDnsAddress(address)
await self.resolveOneAddress(address)
of multiCodec("dns4"):
await self.resolveDnsAddress(address, Domain.AF_INET)
await self.resolveOneAddress(address, Domain.AF_INET)
of multiCodec("dns6"):
await self.resolveDnsAddress(address, Domain.AF_INET6)
await self.resolveOneAddress(address, Domain.AF_INET6)
of multiCodec("dnsaddr"):
await self.resolveDnsAddr(address)
else:
doAssert false
@[address]
for ad in seq:
res.incl(ad)

View File

@ -148,7 +148,7 @@ func init*(pid: var PeerId, data: string): bool =
if Base58.decode(data, p, length) == Base58Status.Success:
p.setLen(length)
var opid: PeerId
shallowCopy(opid.data, p)
opid.data = p
if opid.validate():
pid = opid
result = true

View File

@ -15,7 +15,7 @@ else:
import std/[options, sequtils]
import pkg/[chronos, chronicles, stew/results]
import peerid, multiaddress, crypto/crypto, routing_record, errors, utility
import peerid, multiaddress, multicodec, crypto/crypto, routing_record, errors, utility
export peerid, multiaddress, crypto, routing_record, errors, results
@ -69,6 +69,27 @@ proc update*(p: PeerInfo) {.async.} =
proc addrs*(p: PeerInfo): seq[MultiAddress] =
p.addrs
proc fullAddrs*(p: PeerInfo): MaResult[seq[MultiAddress]] =
let peerIdPart = ? MultiAddress.init(multiCodec("p2p"), p.peerId.data)
var res: seq[MultiAddress]
for address in p.addrs:
res.add(? concat(address, peerIdPart))
ok(res)
proc parseFullAddress*(ma: MultiAddress): MaResult[(PeerId, MultiAddress)] =
let p2pPart = ? ma[^1]
if ? p2pPart.protoCode != multiCodec("p2p"):
return err("Missing p2p part from multiaddress!")
let res = (
? PeerId.init(? p2pPart.protoArgument()).orErr("invalid peerid"),
? ma[0 .. ^2]
)
ok(res)
proc parseFullAddress*(ma: string | seq[byte]): MaResult[(PeerId, MultiAddress)] =
parseFullAddress(? MultiAddress.init(ma))
proc new*(
p: typedesc[PeerInfo],
key: PrivateKey,

View File

@ -124,7 +124,7 @@ proc vsizeof*(field: ProtoField): int {.inline.} =
proc initProtoBuffer*(data: seq[byte], offset = 0,
options: set[ProtoFlags] = {}): ProtoBuffer =
## Initialize ProtoBuffer with shallow copy of ``data``.
shallowCopy(result.buffer, data)
result.buffer = data
result.offset = offset
result.options = options

View File

@ -13,6 +13,7 @@ else:
{.push raises: [].}
import std/[options, sets, sequtils]
import stew/results
import chronos, chronicles, stew/objects
import ../protocol,
../../switch,
@ -226,7 +227,10 @@ proc tryDial(a: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.async.} =
try:
await a.sem.acquire()
let ma = await a.switch.dialer.tryDial(conn.peerId, addrs)
await conn.sendResponseOk(ma)
if ma.isSome:
await conn.sendResponseOk(ma.get())
else:
await conn.sendResponseError(DialError, "Missing observed address")
except CancelledError as exc:
raise exc
except CatchableError as exc:
@ -241,15 +245,19 @@ proc handleDial(a: Autonat, conn: Connection, msg: AutonatMsg): Future[void] =
if peerInfo.id.isSome() and peerInfo.id.get() != conn.peerId:
return conn.sendResponseError(BadRequest, "PeerId mismatch")
var isRelayed = conn.observedAddr.contains(multiCodec("p2p-circuit"))
if conn.observedAddr.isNone:
return conn.sendResponseError(BadRequest, "Missing observed address")
let observedAddr = conn.observedAddr.get()
var isRelayed = observedAddr.contains(multiCodec("p2p-circuit"))
if isRelayed.isErr() or isRelayed.get():
return conn.sendResponseError(DialRefused, "Refused to dial a relayed observed address")
let hostIp = conn.observedAddr[0]
let hostIp = observedAddr[0]
if hostIp.isErr() or not IP.match(hostIp.get()):
trace "wrong observed address", address=conn.observedAddr
trace "wrong observed address", address=observedAddr
return conn.sendResponseError(InternalError, "Expected an IP address")
var addrs = initHashSet[MultiAddress]()
addrs.incl(conn.observedAddr)
addrs.incl(observedAddr)
for ma in peerInfo.addrs:
isRelayed = ma.contains(multiCodec("p2p-circuit"))
if isRelayed.isErr() or isRelayed.get():

View File

@ -64,15 +64,17 @@ proc bridge*(connSrc: Connection, connDst: Connection) {.async.} =
await futSrc or futDst
if futSrc.finished():
bufRead = await futSrc
bytesSendFromSrcToDst.inc(bufRead)
await connDst.write(@bufSrcToDst[0..<bufRead])
zeroMem(addr(bufSrcToDst), bufSrcToDst.high + 1)
if bufRead > 0:
bytesSendFromSrcToDst.inc(bufRead)
await connDst.write(@bufSrcToDst[0..<bufRead])
zeroMem(addr(bufSrcToDst), bufSrcToDst.high + 1)
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.high + 1)
if futDst.finished():
bufRead = await futDst
bytesSendFromDstToSrc += bufRead
await connSrc.write(bufDstToSrc[0..<bufRead])
zeroMem(addr(bufDstToSrc), bufDstToSrc.high + 1)
if bufRead > 0:
bytesSendFromDstToSrc += bufRead
await connSrc.write(bufDstToSrc[0..<bufRead])
zeroMem(addr(bufDstToSrc), bufDstToSrc.high + 1)
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.high + 1)
except CancelledError as exc:
raise exc

View File

@ -16,6 +16,7 @@ else:
{.push raises: [].}
import std/[sequtils, options, strutils, sugar]
import stew/results
import chronos, chronicles
import ../protobuf/minprotobuf,
../peerinfo,
@ -80,7 +81,7 @@ chronicles.expandIt(IdentifyInfo):
if iinfo.signedPeerRecord.isSome(): "Some"
else: "None"
proc encodeMsg(peerInfo: PeerInfo, observedAddr: MultiAddress, sendSpr: bool): ProtoBuffer
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
{.raises: [Defect].} =
result = initProtoBuffer()
@ -91,7 +92,8 @@ proc encodeMsg(peerInfo: PeerInfo, observedAddr: MultiAddress, sendSpr: bool): P
result.write(2, ma.data.buffer)
for proto in peerInfo.protocols:
result.write(3, proto)
result.write(4, observedAddr.data.buffer)
if observedAddr.isSome:
result.write(4, observedAddr.get().data.buffer)
let protoVersion = ProtoVersion
result.write(5, protoVersion)
let agentVersion = if peerInfo.agentVersion.len <= 0:

View File

@ -360,7 +360,7 @@ method rpcHandler*(g: GossipSub,
template sub: untyped = rpcMsg.subscriptions[i]
g.handleSubscribe(peer, sub.topic, sub.subscribe)
# the above call applied limtis to subs number
# the above call applied limits to subs number
# in gossipsub we want to apply scoring as well
if rpcMsg.subscriptions.len > g.topicsHigh:
debug "received an rpc message with an oversized amount of subscriptions", peer,

View File

@ -19,6 +19,9 @@ import ".."/[pubsubpeer, peertable, timedcache, mcache, floodsub, pubsub]
import "../rpc"/[messages]
import "../../.."/[peerid, multiaddress, utility, switch, routing_record, signed_envelope, utils/heartbeat]
logScope:
topics = "libp2p gossipsub"
declareGauge(libp2p_gossipsub_cache_window_size, "the number of messages in the cache")
declareGauge(libp2p_gossipsub_peers_per_topic_mesh, "gossipsub peers per topic in mesh", labels = ["topic"])
declareGauge(libp2p_gossipsub_peers_per_topic_fanout, "gossipsub peers per topic in fanout", labels = ["topic"])

View File

@ -18,6 +18,9 @@ import "."/[types]
import ".."/[pubsubpeer]
import "../../.."/[peerid, multiaddress, utility, switch, utils/heartbeat]
logScope:
topics = "libp2p gossipsub"
declareGauge(libp2p_gossipsub_peers_scores, "the scores of the peers in gossipsub", labels = ["agent"])
declareCounter(libp2p_gossipsub_bad_score_disconnection, "the number of peers disconnected by gossipsub", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_firstMessageDeliveries, "Detailed gossipsub scoring metric", labels = ["agent"])

View File

@ -16,7 +16,7 @@ import chronos
import std/[tables, sets]
import ".."/[floodsub, peertable, mcache, pubsubpeer]
import "../rpc"/[messages]
import "../../.."/[peerid, multiaddress]
import "../../.."/[peerid, multiaddress, utility]
const
GossipSubCodec* = "/meshsub/1.1.0"
@ -65,7 +65,7 @@ type
meshFailurePenalty*: float64
invalidMessageDeliveries*: float64
TopicParams* = object
TopicParams* {.public.} = object
topicWeight*: float64
# p1
@ -102,7 +102,7 @@ type
appScore*: float64 # application specific score
behaviourPenalty*: float64 # the eventual penalty score
GossipSubParams* = object
GossipSubParams* {.public.} = object
explicit*: bool
pruneBackoff*: Duration
unsubscribeBackoff*: Duration

View File

@ -130,7 +130,7 @@ type
knownTopics*: HashSet[string]
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base.} =
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
## handle peer disconnects
##
@ -267,11 +267,11 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
method rpcHandler*(p: PubSub,
peer: PubSubPeer,
rpcMsg: RPCMsg): Future[void] {.base.} =
rpcMsg: RPCMsg): Future[void] {.base, async.} =
## Handler that must be overridden by concrete implementation
raiseAssert "Unimplemented"
method onNewPeer(p: PubSub, peer: PubSubPeer) {.base.} = discard
method onNewPeer(p: PubSub, peer: PubSubPeer) {.base, gcsafe.} = discard
method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent) {.base, gcsafe.} =
# Peer event is raised for the send connection in particular
@ -377,7 +377,7 @@ method handleConn*(p: PubSub,
finally:
await conn.closeWithEOF()
method subscribePeer*(p: PubSub, peer: PeerId) {.base.} =
method subscribePeer*(p: PubSub, peer: PeerId) {.base, gcsafe.} =
## subscribe to remote peer to receive/send pubsub
## messages
##
@ -400,7 +400,7 @@ proc updateTopicMetrics(p: PubSub, topic: string) =
libp2p_pubsub_topic_handlers.set(others, labelValues = ["other"])
method onTopicSubscription*(p: PubSub, topic: string, subscribed: bool) {.base.} =
method onTopicSubscription*(p: PubSub, topic: string, subscribed: bool) {.base, gcsafe.} =
# Called when subscribe is called the first time for a topic or unsubscribe
# removes the last handler
@ -433,7 +433,7 @@ proc unsubscribe*(p: PubSub, topics: openArray[TopicPair]) {.public.} =
for t in topics:
p.unsubscribe(t.topic, t.handler)
proc unsubscribeAll*(p: PubSub, topic: string) {.public.} =
proc unsubscribeAll*(p: PubSub, topic: string) {.public, gcsafe.} =
## unsubscribe every `handler` from `topic`
if topic notin p.topics:
debug "unsubscribeAll called for an unknown topic", topic
@ -495,7 +495,7 @@ method initPubSub*(p: PubSub)
method addValidator*(p: PubSub,
topic: varargs[string],
hook: ValidatorHandler) {.base, public.} =
hook: ValidatorHandler) {.base, public, gcsafe.} =
## Add a validator to a `topic`. Each new message received in this
## will be sent to `hook`. `hook` can return either `Accept`,
## `Ignore` or `Reject` (which can descore the peer)

View File

@ -12,7 +12,8 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
import std/[sequtils, strutils, tables, hashes]
import std/[sequtils, strutils, tables, hashes, options]
import stew/results
import chronos, chronicles, nimcrypto/sha2, metrics
import rpc/[messages, message, protobuf],
../../peerid,
@ -174,7 +175,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
trace "Get new send connection", p, newConn
p.sendConn = newConn
p.address = some(p.sendConn.observedAddr)
p.address = if p.sendConn.observedAddr.isSome: some(p.sendConn.observedAddr.get) else: none(MultiAddress)
if p.onEvent != nil:
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.Connected))

View File

@ -22,7 +22,7 @@ import messages,
logScope:
topics = "pubsubprotobuf"
topics = "libp2p pubsubprotobuf"
when defined(libp2p_protobuf_metrics):
import metrics

View File

@ -0,0 +1,677 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import tables, sequtils, sugar, sets, options
import chronos,
chronicles,
bearssl/rand,
stew/[byteutils, objects]
import ./protocol,
../switch,
../routing_record,
../utils/heartbeat,
../stream/connection,
../utils/offsettedseq,
../utils/semaphore
export chronicles
logScope:
topics = "libp2p discovery rendezvous"
const
RendezVousCodec* = "/rendezvous/1.0.0"
MinimumDuration* = 2.hours
MaximumDuration = 72.hours
MinimumTTL = MinimumDuration.seconds.uint64
MaximumTTL = MaximumDuration.seconds.uint64
RegistrationLimitPerPeer = 1000
DiscoverLimit = 1000'u64
SemaphoreDefaultSize = 5
type
MessageType {.pure.} = enum
Register = 0
RegisterResponse = 1
Unregister = 2
Discover = 3
DiscoverResponse = 4
ResponseStatus = enum
Ok = 0
InvalidNamespace = 100
InvalidSignedPeerRecord = 101
InvalidTTL = 102
InvalidCookie = 103
NotAuthorized = 200
InternalError = 300
Unavailable = 400
Cookie = object
offset : uint64
ns : string
Register = object
ns : string
signedPeerRecord: seq[byte]
ttl: Option[uint64] # in seconds
RegisterResponse = object
status: ResponseStatus
text: Option[string]
ttl: Option[uint64] # in seconds
Unregister = object
ns: string
Discover = object
ns: string
limit: Option[uint64]
cookie: Option[seq[byte]]
DiscoverResponse = object
registrations: seq[Register]
cookie: Option[seq[byte]]
status: ResponseStatus
text: Option[string]
Message = object
msgType: MessageType
register: Option[Register]
registerResponse: Option[RegisterResponse]
unregister: Option[Unregister]
discover: Option[Discover]
discoverResponse: Option[DiscoverResponse]
proc encode(c: Cookie): ProtoBuffer =
result = initProtoBuffer()
result.write(1, c.offset)
result.write(2, c.ns)
result.finish()
proc encode(r: Register): ProtoBuffer =
result = initProtoBuffer()
result.write(1, r.ns)
result.write(2, r.signedPeerRecord)
if r.ttl.isSome():
result.write(3, r.ttl.get())
result.finish()
proc encode(rr: RegisterResponse): ProtoBuffer =
result = initProtoBuffer()
result.write(1, rr.status.uint)
if rr.text.isSome():
result.write(2, rr.text.get())
if rr.ttl.isSome():
result.write(3, rr.ttl.get())
result.finish()
proc encode(u: Unregister): ProtoBuffer =
result = initProtoBuffer()
result.write(1, u.ns)
result.finish()
proc encode(d: Discover): ProtoBuffer =
result = initProtoBuffer()
result.write(1, d.ns)
if d.limit.isSome():
result.write(2, d.limit.get())
if d.cookie.isSome():
result.write(3, d.cookie.get())
result.finish()
proc encode(d: DiscoverResponse): ProtoBuffer =
result = initProtoBuffer()
for reg in d.registrations:
result.write(1, reg.encode())
if d.cookie.isSome():
result.write(2, d.cookie.get())
result.write(3, d.status.uint)
if d.text.isSome():
result.write(4, d.text.get())
result.finish()
proc encode(msg: Message): ProtoBuffer =
result = initProtoBuffer()
result.write(1, msg.msgType.uint)
if msg.register.isSome():
result.write(2, msg.register.get().encode())
if msg.registerResponse.isSome():
result.write(3, msg.registerResponse.get().encode())
if msg.unregister.isSome():
result.write(4, msg.unregister.get().encode())
if msg.discover.isSome():
result.write(5, msg.discover.get().encode())
if msg.discoverResponse.isSome():
result.write(6, msg.discoverResponse.get().encode())
result.finish()
proc decode(_: typedesc[Cookie], buf: seq[byte]): Option[Cookie] =
var c: Cookie
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, c.offset)
r2 = pb.getRequiredField(2, c.ns)
if r1.isErr() or r2.isErr(): return none(Cookie)
some(c)
proc decode(_: typedesc[Register], buf: seq[byte]): Option[Register] =
var
r: Register
ttl: uint64
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, r.ns)
r2 = pb.getRequiredField(2, r.signedPeerRecord)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr(): return none(Register)
if r3.get(): r.ttl = some(ttl)
some(r)
proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Option[RegisterResponse] =
var
rr: RegisterResponse
statusOrd: uint
text: string
ttl: uint64
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, statusOrd)
r2 = pb.getField(2, text)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr() or
not checkedEnumAssign(rr.status, statusOrd): return none(RegisterResponse)
if r2.get(): rr.text = some(text)
if r3.get(): rr.ttl = some(ttl)
some(rr)
proc decode(_: typedesc[Unregister], buf: seq[byte]): Option[Unregister] =
var u: Unregister
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, u.ns)
if r1.isErr(): return none(Unregister)
some(u)
proc decode(_: typedesc[Discover], buf: seq[byte]): Option[Discover] =
var
d: Discover
limit: uint64
cookie: seq[byte]
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, d.ns)
r2 = pb.getField(2, limit)
r3 = pb.getField(3, cookie)
if r1.isErr() or r2.isErr() or r3.isErr: return none(Discover)
if r2.get(): d.limit = some(limit)
if r3.get(): d.cookie = some(cookie)
some(d)
proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Option[DiscoverResponse] =
var
dr: DiscoverResponse
registrations: seq[seq[byte]]
cookie: seq[byte]
statusOrd: uint
text: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRepeatedField(1, registrations)
r2 = pb.getField(2, cookie)
r3 = pb.getRequiredField(3, statusOrd)
r4 = pb.getField(4, text)
if r1.isErr() or r2.isErr() or r3.isErr or r4.isErr() or
not checkedEnumAssign(dr.status, statusOrd): return none(DiscoverResponse)
for reg in registrations:
var r: Register
let regOpt = Register.decode(reg)
if regOpt.isNone(): return none(DiscoverResponse)
dr.registrations.add(regOpt.get())
if r2.get(): dr.cookie = some(cookie)
if r4.get(): dr.text = some(text)
some(dr)
proc decode(_: typedesc[Message], buf: seq[byte]): Option[Message] =
var
msg: Message
statusOrd: uint
pbr, pbrr, pbu, pbd, pbdr: ProtoBuffer
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, statusOrd)
r2 = pb.getField(2, pbr)
r3 = pb.getField(3, pbrr)
r4 = pb.getField(4, pbu)
r5 = pb.getField(5, pbd)
r6 = pb.getField(6, pbdr)
if r1.isErr() or r2.isErr() or r3.isErr() or
r4.isErr() or r5.isErr() or r6.isErr() or
not checkedEnumAssign(msg.msgType, statusOrd): return none(Message)
if r2.get():
msg.register = Register.decode(pbr.buffer)
if msg.register.isNone(): return none(Message)
if r3.get():
msg.registerResponse = RegisterResponse.decode(pbrr.buffer)
if msg.registerResponse.isNone(): return none(Message)
if r4.get():
msg.unregister = Unregister.decode(pbu.buffer)
if msg.unregister.isNone(): return none(Message)
if r5.get():
msg.discover = Discover.decode(pbd.buffer)
if msg.discover.isNone(): return none(Message)
if r6.get():
msg.discoverResponse = DiscoverResponse.decode(pbdr.buffer)
if msg.discoverResponse.isNone(): return none(Message)
some(msg)
type
RendezVousError* = object of LPError
RegisteredData = object
expiration: Moment
peerId: PeerId
data: Register
RendezVous* = ref object of LPProtocol
# Registered needs to be an offsetted sequence
# because we need stable index for the cookies.
registered: OffsettedSeq[RegisteredData]
# Namespaces is a table whose key is a salted namespace and
# the value is the index sequence corresponding to this
# namespace in the offsettedqueue.
namespaces: Table[string, seq[int]]
rng: ref HmacDrbgContext
salt: string
defaultDT: Moment
registerDeletionLoop: Future[void]
#registerEvent: AsyncEvent # TODO: to raise during the heartbeat
# + make the heartbeat sleep duration "smarter"
sema: AsyncSemaphore
peers: seq[PeerId]
cookiesSaved: Table[PeerId, Table[string, seq[byte]]]
switch: Switch
proc checkPeerRecord(spr: seq[byte], peerId: PeerId): Result[void, string] =
if spr.len == 0: return err("Empty peer record")
let signedEnv = ? SignedPeerRecord.decode(spr).mapErr(x => $x)
if signedEnv.data.peerId != peerId:
return err("Bad Peer ID")
return ok()
proc sendRegisterResponse(conn: Connection,
ttl: uint64) {.async.} =
let msg = encode(Message(
msgType: MessageType.RegisterResponse,
registerResponse: some(RegisterResponse(status: Ok, ttl: some(ttl)))))
await conn.writeLp(msg.buffer)
proc sendRegisterResponseError(conn: Connection,
status: ResponseStatus,
text: string = "") {.async.} =
let msg = encode(Message(
msgType: MessageType.RegisterResponse,
registerResponse: some(RegisterResponse(status: status, text: some(text)))))
await conn.writeLp(msg.buffer)
proc sendDiscoverResponse(conn: Connection,
s: seq[Register],
cookie: Cookie) {.async.} =
let msg = encode(Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: some(DiscoverResponse(
status: Ok,
registrations: s,
cookie: some(cookie.encode().buffer)
))
))
await conn.writeLp(msg.buffer)
proc sendDiscoverResponseError(conn: Connection,
status: ResponseStatus,
text: string = "") {.async.} =
let msg = encode(Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: some(DiscoverResponse(status: status, text: some(text)))))
await conn.writeLp(msg.buffer)
proc countRegister(rdv: RendezVous, peerId: PeerId): int =
let n = Moment.now()
for data in rdv.registered:
if data.peerId == peerId and data.expiration > n:
result.inc()
proc save(rdv: RendezVous,
ns: string,
peerId: PeerId,
r: Register,
update: bool = true) =
let nsSalted = ns & rdv.salt
discard rdv.namespaces.hasKeyOrPut(nsSalted, newSeq[int]())
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == peerId:
if update == false: return
rdv.registered[index].expiration = rdv.defaultDT
rdv.registered.add(
RegisteredData(
peerId: peerId,
expiration: Moment.now() + r.ttl.get(MinimumTTL).int64.seconds,
data: r
)
)
rdv.namespaces[nsSalted].add(rdv.registered.high)
# rdv.registerEvent.fire()
except KeyError:
doAssert false, "Should have key"
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
trace "Received Register", peerId = conn.peerId, ns = r.ns
if r.ns.len notin 1..255:
return conn.sendRegisterResponseError(InvalidNamespace)
let ttl = r.ttl.get(MinimumTTL)
if ttl notin MinimumTTL..MaximumTTL:
return conn.sendRegisterResponseError(InvalidTTL)
let pr = checkPeerRecord(r.signedPeerRecord, conn.peerId)
if pr.isErr():
return conn.sendRegisterResponseError(InvalidSignedPeerRecord, pr.error())
if rdv.countRegister(conn.peerId) >= RegistrationLimitPerPeer:
return conn.sendRegisterResponseError(NotAuthorized, "Registration limit reached")
rdv.save(r.ns, conn.peerId, r)
conn.sendRegisterResponse(ttl)
proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
trace "Received Unregister", peerId = conn.peerId, ns = u.ns
let nsSalted = u.ns & rdv.salt
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == conn.peerId:
rdv.registered[index].expiration = rdv.defaultDT
except KeyError:
return
proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
trace "Received Discover", peerId = conn.peerId, ns = d.ns
if d.ns.len notin 0..255:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
var limit = min(DiscoverLimit, d.limit.get(DiscoverLimit))
var
cookie =
if d.cookie.isSome():
try:
Cookie.decode(d.cookie.get()).get()
except CatchableError:
await conn.sendDiscoverResponseError(InvalidCookie)
return
else: Cookie(offset: rdv.registered.low().uint64 - 1)
if cookie.ns != d.ns or
cookie.offset notin rdv.registered.low().uint64..rdv.registered.high().uint64:
cookie = Cookie(offset: rdv.registered.low().uint64 - 1)
let
nsSalted = d.ns & rdv.salt
namespaces =
if d.ns != "":
try:
rdv.namespaces[nsSalted]
except KeyError:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
else: toSeq(cookie.offset.int..rdv.registered.high())
if namespaces.len() == 0:
await conn.sendDiscoverResponse(@[], Cookie())
return
var offset = namespaces[^1]
let n = Moment.now()
var s = collect(newSeq()):
for index in namespaces:
var reg = rdv.registered[index]
if limit == 0:
offset = index
break
if reg.expiration < n or index.uint64 <= cookie.offset: continue
limit.dec()
reg.data.ttl = some((reg.expiration - Moment.now()).seconds.uint64)
reg.data
rdv.rng.shuffle(s)
await conn.sendDiscoverResponse(s, Cookie(offset: offset.uint64, ns: d.ns))
proc advertisePeer(rdv: RendezVous,
peer: PeerId,
msg: seq[byte]) {.async.} =
proc advertiseWrap() {.async.} =
try:
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer: await conn.close()
await conn.writeLp(msg)
let
buf = await conn.readLp(4096)
msgRecv = Message.decode(buf).get()
if msgRecv.msgType != MessageType.RegisterResponse:
trace "Unexpected register response", peer, msgType = msgRecv.msgType
elif msgRecv.registerResponse.isNone() or
msgRecv.registerResponse.get().status != ResponseStatus.Ok:
trace "Refuse to register", peer, response = msgRecv.registerResponse
except CatchableError as exc:
trace "exception in the advertise", error = exc.msg
finally:
rdv.sema.release()
await rdv.sema.acquire()
discard await advertiseWrap().withTimeout(5.seconds)
proc advertise*(rdv: RendezVous,
ns: string,
ttl: Duration = MinimumDuration) {.async.} =
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode()
if sprBuff.isErr():
raise newException(RendezVousError, "Wrong Signed Peer Record")
if ns.len notin 1..255:
raise newException(RendezVousError, "Invalid namespace")
if ttl notin MinimumDuration..MaximumDuration:
raise newException(RendezVousError, "Invalid time to live")
let
r = Register(ns: ns, signedPeerRecord: sprBuff.get(), ttl: some(ttl.seconds.uint64))
msg = encode(Message(msgType: MessageType.Register, register: some(r)))
rdv.save(ns, rdv.switch.peerInfo.peerId, r)
let fut = collect(newSeq()):
for peer in rdv.peers:
trace "Send Advertise", peerId = peer, ns
rdv.advertisePeer(peer, msg.buffer)
await allFutures(fut)
proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
let
nsSalted = ns & rdv.salt
n = Moment.now()
try:
collect(newSeq()):
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].expiration > n:
SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).get().data
except KeyError as exc:
@[]
proc request*(rdv: RendezVous,
ns: string,
l: int = DiscoverLimit.int): Future[seq[PeerRecord]] {.async.} =
let nsSalted = ns & rdv.salt
var
s: Table[PeerId, (PeerRecord, Register)]
limit: uint64
d = Discover(ns: ns)
if l <= 0 or l > DiscoverLimit.int:
raise newException(RendezVousError, "Invalid limit")
if ns.len notin 0..255:
raise newException(RendezVousError, "Invalid namespace")
limit = l.uint64
proc requestPeer(peer: PeerId) {.async.} =
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer: await conn.close()
d.limit = some(limit)
d.cookie =
try:
some(rdv.cookiesSaved[peer][ns])
except KeyError as exc:
none(seq[byte])
await conn.writeLp(encode(Message(
msgType: MessageType.Discover,
discover: some(d))).buffer)
let
buf = await conn.readLp(65536)
msgRcv = Message.decode(buf).get()
if msgRcv.msgType != MessageType.DiscoverResponse or
msgRcv.discoverResponse.isNone():
debug "Unexpected discover response", msgType = msgRcv.msgType
return
let resp = msgRcv.discoverResponse.get()
if resp.status != ResponseStatus.Ok:
trace "Cannot discover", ns, status = resp.status, text = resp.text
return
if resp.cookie.isSome() and resp.cookie.get().len < 1000:
if rdv.cookiesSaved.hasKeyOrPut(peer, {ns: resp.cookie.get()}.toTable):
rdv.cookiesSaved[peer][ns] = resp.cookie.get()
for r in resp.registrations:
if limit == 0: return
if r.ttl.isNone() or r.ttl.get() > MaximumTTL: continue
let sprRes = SignedPeerRecord.decode(r.signedPeerRecord)
if sprRes.isErr(): continue
let pr = sprRes.get().data
if s.hasKey(pr.peerId):
let (prSaved, rSaved) = s[pr.peerId]
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get() < r.ttl.get()) or
prSaved.seqNo < pr.seqNo:
s[pr.peerId] = (pr, r)
else:
s[pr.peerId] = (pr, r)
limit.dec()
for (_, r) in s.values():
rdv.save(ns, peer, r, false)
# copy to avoid resizes during the loop
let peers = rdv.peers
for peer in peers:
if limit == 0: break
if RendezVousCodec notin rdv.switch.peerStore[ProtoBook][peer]: continue
try:
trace "Send Request", peerId = peer, ns
await peer.requestPeer()
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "exception catch in request", error = exc.msg
return toSeq(s.values()).mapIt(it[0])
proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
let nsSalted = ns & rdv.salt
try:
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == rdv.switch.peerInfo.peerId:
rdv.registered[index].expiration = rdv.defaultDT
except KeyError:
return
proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
# TODO: find a way to improve this, maybe something similar to the advertise
if ns.len notin 1..255:
raise newException(RendezVousError, "Invalid namespace")
rdv.unsubscribeLocally(ns)
let msg = encode(Message(
msgType: MessageType.Unregister,
unregister: some(Unregister(ns: ns))))
proc unsubscribePeer(rdv: RendezVous, peerId: PeerId) {.async.} =
try:
let conn = await rdv.switch.dial(peerId, RendezVousCodec)
defer: await conn.close()
await conn.writeLp(msg.buffer)
except CatchableError as exc:
trace "exception while unsubscribing", error = exc.msg
for peer in rdv.peers:
discard await rdv.unsubscribePeer(peer).withTimeout(5.seconds)
proc setup*(rdv: RendezVous, switch: Switch) =
rdv.switch = switch
proc handlePeer(peerId: PeerId, event: PeerEvent) {.async.} =
if event.kind == PeerEventKind.Joined:
rdv.peers.add(peerId)
elif event.kind == PeerEventKind.Left:
rdv.peers.keepItIf(it != peerId)
rdv.switch.addPeerEventHandler(handlePeer, Joined)
rdv.switch.addPeerEventHandler(handlePeer, Left)
proc new*(T: typedesc[RendezVous],
rng: ref HmacDrbgContext = newRng()): T =
let rdv = T(
rng: rng,
salt: string.fromBytes(generateBytes(rng[], 8)),
registered: initOffsettedSeq[RegisteredData](1),
defaultDT: Moment.now() - 1.days,
#registerEvent: newAsyncEvent(),
sema: newAsyncSemaphore(SemaphoreDefaultSize)
)
logScope: topics = "libp2p discovery rendezvous"
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
try:
let
buf = await conn.readLp(4096)
msg = Message.decode(buf).get()
case msg.msgType:
of MessageType.Register: await rdv.register(conn, msg.register.get())
of MessageType.RegisterResponse:
trace "Got an unexpected Register Response", response = msg.registerResponse
of MessageType.Unregister: rdv.unregister(conn, msg.unregister.get())
of MessageType.Discover: await rdv.discover(conn, msg.discover.get())
of MessageType.DiscoverResponse:
trace "Got an unexpected Discover Response", response = msg.discoverResponse
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "exception in rendezvous handler", error = exc.msg
finally:
await conn.close()
rdv.handler = handleStream
rdv.codec = RendezVousCodec
return rdv
proc new*(T: typedesc[RendezVous],
switch: Switch,
rng: ref HmacDrbgContext = newRng()): T =
let rdv = T.new(rng)
rdv.setup(switch)
return rdv
proc deletesRegister(rdv: RendezVous) {.async.} =
heartbeat "Register timeout", 1.minutes:
let n = Moment.now()
rdv.registered.flushIfIt(it.expiration < n)
for data in rdv.namespaces.mvalues():
data.keepItIf(it >= rdv.registered.offset)
method start*(rdv: RendezVous) {.async.} =
if not rdv.registerDeletionLoop.isNil:
warn "Starting rendezvous twice"
return
rdv.registerDeletionLoop = rdv.deletesRegister()
rdv.started = true
method stop*(rdv: RendezVous) {.async.} =
if rdv.registerDeletionLoop.isNil:
warn "Stopping rendezvous without starting it"
return
rdv.started = false
rdv.registerDeletionLoop.cancel()
rdv.registerDeletionLoop = nil

View File

@ -7,12 +7,14 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push gcsafe.}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/[strformat]
import stew/results
import chronos, chronicles
import ../protocol,
../../stream/streamseq,
@ -21,7 +23,7 @@ import ../protocol,
../../peerinfo,
../../errors
export protocol
export protocol, results
logScope:
topics = "libp2p secure"
@ -48,7 +50,7 @@ chronicles.formatIt(SecureConn): shortLog(it)
proc new*(T: type SecureConn,
conn: Connection,
peerId: PeerId,
observedAddr: MultiAddress,
observedAddr: Opt[MultiAddress],
timeout: Duration = DefaultConnectionTimeout): T =
result = T(stream: conn,
peerId: peerId,

View File

@ -13,10 +13,13 @@ else:
{.push raises: [].}
import std/[oids, strformat]
import stew/results
import chronos, chronicles, metrics
import connection
import ../utility
export results
logScope:
topics = "libp2p chronosstream"
@ -60,7 +63,7 @@ proc init*(C: type ChronosStream,
client: StreamTransport,
dir: Direction,
timeout = DefaultChronosStreamTimeout,
observedAddr: MultiAddress = MultiAddress()): ChronosStream =
observedAddr: Opt[MultiAddress]): ChronosStream =
result = C(client: client,
timeout: timeout,
dir: dir,
@ -127,6 +130,9 @@ proc completeWrite(
method write*(s: ChronosStream, msg: seq[byte]): Future[void] =
# Avoid a copy of msg being kept in the closure created by `{.async.}` as this
# drives up memory usage
if msg.len == 0:
trace "Empty byte seq, nothing to write"
return
if s.closed:
let fut = newFuture[void]("chronosstream.write.closed")
fut.fail(newLPStreamClosedError())

View File

@ -13,13 +13,14 @@ else:
{.push raises: [].}
import std/[hashes, oids, strformat]
import stew/results
import chronicles, chronos, metrics
import lpstream,
../multiaddress,
../peerinfo,
../errors
export lpstream, peerinfo, errors
export lpstream, peerinfo, errors, results
logScope:
topics = "libp2p connection"
@ -37,7 +38,7 @@ type
timerTaskFut: Future[void] # the current timer instance
timeoutHandler*: TimeoutHandler # timeout handler
peerId*: PeerId
observedAddr*: MultiAddress
observedAddr*: Opt[MultiAddress]
upgraded*: Future[void]
protocol*: string # protocol used by the connection, used as tag for metrics
transportDir*: Direction # The bottom level transport (generally the socket) direction
@ -160,9 +161,9 @@ method getWrapped*(s: Connection): Connection {.base.} =
proc new*(C: type Connection,
peerId: PeerId,
dir: Direction,
observedAddr: Opt[MultiAddress],
timeout: Duration = DefaultConnectionTimeout,
timeoutHandler: TimeoutHandler = nil,
observedAddr: MultiAddress = MultiAddress()): Connection =
timeoutHandler: TimeoutHandler = nil): Connection =
result = C(peerId: peerId,
dir: dir,
timeout: timeout,

View File

@ -9,6 +9,7 @@
## Length Prefixed stream implementation
{.push gcsafe.}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
@ -79,7 +80,7 @@ type
opened*: uint64
closed*: uint64
proc setupStreamTracker(name: string): StreamTracker =
proc setupStreamTracker*(name: string): StreamTracker =
let tracker = new StreamTracker
proc dumpTracking(): string {.gcsafe.} =

View File

@ -15,6 +15,7 @@ else:
{.push raises: [].}
import std/[oids, sequtils]
import stew/results
import chronos, chronicles
import transport,
../errors,
@ -31,7 +32,7 @@ import transport,
logScope:
topics = "libp2p tcptransport"
export transport
export transport, results
const
TcpTransportTrackerName* = "libp2p.tcptransport"
@ -71,18 +72,20 @@ proc setupTcpTransportTracker(): TcpTransportTracker =
result.isLeaked = leakTransport
addTracker(TcpTransportTrackerName, result)
proc connHandler*(self: TcpTransport,
client: StreamTransport,
dir: Direction): Future[Connection] {.async.} =
var observedAddr: MultiAddress = MultiAddress()
proc getObservedAddr(client: StreamTransport): Future[MultiAddress] {.async.} =
try:
observedAddr = MultiAddress.init(client.remoteAddress).tryGet()
return MultiAddress.init(client.remoteAddress).tryGet()
except CatchableError as exc:
trace "Failed to create observedAddr", exc = exc.msg
if not(isNil(client) and client.closed):
await client.closeWait()
raise exc
proc connHandler*(self: TcpTransport,
client: StreamTransport,
observedAddr: Opt[MultiAddress],
dir: Direction): Future[Connection] {.async.} =
trace "Handling tcp connection", address = $observedAddr,
dir = $dir,
clients = self.clients[Direction.In].len +
@ -222,7 +225,8 @@ method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
self.acceptFuts[index] = self.servers[index].accept()
let transp = await finished
return await self.connHandler(transp, Direction.In)
let observedAddr = await getObservedAddr(transp)
return await self.connHandler(transp, Opt.some(observedAddr), Direction.In)
except TransportOsError as exc:
# TODO: it doesn't sound like all OS errors
# can be ignored, we should re-raise those
@ -250,7 +254,8 @@ method dial*(
let transp = await connect(address)
try:
return await self.connHandler(transp, Direction.Out)
let observedAddr = await getObservedAddr(transp)
return await self.connHandler(transp, Opt.some(observedAddr), Direction.Out)
except CatchableError as err:
await transp.closeWait()
raise err

View File

@ -0,0 +1,281 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## Tor transport implementation
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/strformat
import chronos, chronicles, strutils
import stew/[byteutils, endians2, results, objects]
import ../multicodec
import transport,
tcptransport,
../switch,
../builders,
../stream/[lpstream, connection, chronosstream],
../multiaddress,
../upgrademngrs/upgrade
const
IPTcp = mapAnd(IP, mapEq("tcp"))
IPv4Tcp = mapAnd(IP4, mapEq("tcp"))
IPv6Tcp = mapAnd(IP6, mapEq("tcp"))
DnsTcp = mapAnd(DNSANY, mapEq("tcp"))
Socks5ProtocolVersion = byte(5)
NMethods = byte(1)
type
TorTransport* = ref object of Transport
transportAddress: TransportAddress
tcpTransport: TcpTransport
Socks5AuthMethod* {.pure.} = enum
NoAuth = 0
GSSAPI = 1
UsernamePassword = 2
NoAcceptableMethod = 0xff
Socks5RequestCommand* {.pure.} = enum
Connect = 1, Bind = 2, UdpAssoc = 3
Socks5AddressType* {.pure.} = enum
IPv4 = 1, FQDN = 3, IPv6 = 4
Socks5ReplyType* {.pure.} = enum
Succeeded = (0, "Succeeded"), ServerFailure = (1, "Server Failure"),
ConnectionNotAllowed = (2, "Connection Not Allowed"), NetworkUnreachable = (3, "Network Unreachable"),
HostUnreachable = (4, "Host Unreachable"), ConnectionRefused = (5, "Connection Refused"),
TtlExpired = (6, "Ttl Expired"), CommandNotSupported = (7, "Command Not Supported"),
AddressTypeNotSupported = (8, "Address Type Not Supported")
TransportStartError* = object of transport.TransportError
Socks5Error* = object of CatchableError
Socks5AuthFailedError* = object of Socks5Error
Socks5VersionError* = object of Socks5Error
Socks5ServerReplyError* = object of Socks5Error
proc new*(
T: typedesc[TorTransport],
transportAddress: TransportAddress,
flags: set[ServerFlags] = {},
upgrade: Upgrade): T {.public.} =
## Creates a Tor transport
T(
transportAddress: transportAddress,
upgrader: upgrade,
tcpTransport: TcpTransport.new(flags, upgrade))
proc handlesDial(address: MultiAddress): bool {.gcsafe.} =
return Onion3.match(address) or TCP.match(address) or DNSANY.match(address)
proc handlesStart(address: MultiAddress): bool {.gcsafe.} =
return TcpOnion3.match(address)
proc connectToTorServer(
transportAddress: TransportAddress): Future[StreamTransport] {.async, gcsafe.} =
let transp = await connect(transportAddress)
try:
discard await transp.write(@[Socks5ProtocolVersion, NMethods, Socks5AuthMethod.NoAuth.byte])
let
serverReply = await transp.read(2)
socks5ProtocolVersion = serverReply[0]
serverSelectedMethod = serverReply[1]
if socks5ProtocolVersion != Socks5ProtocolVersion:
raise newException(Socks5VersionError, "Unsupported socks version")
if serverSelectedMethod != Socks5AuthMethod.NoAuth.byte:
raise newException(Socks5AuthFailedError, "Unsupported auth method")
return transp
except CatchableError as err:
await transp.closeWait()
raise err
proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
## The specification for this code is defined on
## [link text](https://www.rfc-editor.org/rfc/rfc1928#section-5)
## and [link text](https://www.rfc-editor.org/rfc/rfc1928#section-6).
let
portNumOctets = 2
ipV4NumOctets = 4
ipV6NumOctets = 16
firstFourOctets = await transp.read(4)
socks5ProtocolVersion = firstFourOctets[0]
serverReply = firstFourOctets[1]
if socks5ProtocolVersion != Socks5ProtocolVersion:
raise newException(Socks5VersionError, "Unsupported socks version")
if serverReply != Socks5ReplyType.Succeeded.byte:
var socks5ReplyType: Socks5ReplyType
if socks5ReplyType.checkedEnumAssign(serverReply):
raise newException(Socks5ServerReplyError, fmt"Server reply error: {socks5ReplyType}")
else:
raise newException(LPError, fmt"Unexpected server reply: {serverReply}")
let atyp = firstFourOctets[3]
case atyp:
of Socks5AddressType.IPv4.byte:
discard await transp.read(ipV4NumOctets + portNumOctets)
of Socks5AddressType.FQDN.byte:
let fqdnNumOctets = await transp.read(1)
discard await transp.read(int(uint8.fromBytes(fqdnNumOctets)) + portNumOctets)
of Socks5AddressType.IPv6.byte:
discard await transp.read(ipV6NumOctets + portNumOctets)
else:
raise newException(LPError, "Address not supported")
proc parseOnion3(address: MultiAddress): (byte, seq[byte], seq[byte]) {.raises: [Defect, LPError, ValueError].} =
var addressArray = ($address).split('/')
if addressArray.len < 2: raise newException(LPError, fmt"Onion address not supported {address}")
addressArray = addressArray[2].split(':')
if addressArray.len == 0: raise newException(LPError, fmt"Onion address not supported {address}")
let
addressStr = addressArray[0] & ".onion"
dstAddr = @(uint8(addressStr.len).toBytes()) & addressStr.toBytes()
dstPort = address.data.buffer[37..38]
return (Socks5AddressType.FQDN.byte, dstAddr, dstPort)
proc parseIpTcp(address: MultiAddress): (byte, seq[byte], seq[byte]) {.raises: [Defect, LPError, ValueError].} =
let (codec, atyp) =
if IPv4Tcp.match(address):
(multiCodec("ip4"), Socks5AddressType.IPv4.byte)
elif IPv6Tcp.match(address):
(multiCodec("ip6"), Socks5AddressType.IPv6.byte)
else:
raise newException(LPError, fmt"IP address not supported {address}")
let
dstAddr = address[codec].get().protoArgument().get()
dstPort = address[multiCodec("tcp")].get().protoArgument().get()
(atyp, dstAddr, dstPort)
proc parseDnsTcp(address: MultiAddress): (byte, seq[byte], seq[byte]) =
let
dnsAddress = address[multiCodec("dns")].get().protoArgument().get()
dstAddr = @(uint8(dnsAddress.len).toBytes()) & dnsAddress
dstPort = address[multiCodec("tcp")].get().protoArgument().get()
(Socks5AddressType.FQDN.byte, dstAddr, dstPort)
proc dialPeer(
transp: StreamTransport, address: MultiAddress) {.async, gcsafe.} =
let (atyp, dstAddr, dstPort) =
if Onion3.match(address):
parseOnion3(address)
elif IPTcp.match(address):
parseIpTcp(address)
elif DnsTcp.match(address):
parseDnsTcp(address)
else:
raise newException(LPError, fmt"Address not supported: {address}")
let reserved = byte(0)
let request = @[
Socks5ProtocolVersion,
Socks5RequestCommand.Connect.byte,
reserved,
atyp] & dstAddr & dstPort
discard await transp.write(request)
await readServerReply(transp)
method dial*(
self: TorTransport,
hostname: string,
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
## dial a peer
##
if not handlesDial(address):
raise newException(LPError, fmt"Address not supported: {address}")
trace "Dialing remote peer", address = $address
let transp = await connectToTorServer(self.transportAddress)
try:
await dialPeer(transp, address)
return await self.tcpTransport.connHandler(transp, Opt.none(MultiAddress), Direction.Out)
except CatchableError as err:
await transp.closeWait()
raise err
method start*(
self: TorTransport,
addrs: seq[MultiAddress]) {.async.} =
## listen on the transport
##
var listenAddrs: seq[MultiAddress]
var onion3Addrs: seq[MultiAddress]
for i, ma in addrs:
if not handlesStart(ma):
warn "Invalid address detected, skipping!", address = ma
continue
let listenAddress = ma[0..1].get()
listenAddrs.add(listenAddress)
let onion3 = ma[multiCodec("onion3")].get()
onion3Addrs.add(onion3)
if len(listenAddrs) != 0 and len(onion3Addrs) != 0:
await procCall Transport(self).start(onion3Addrs)
await self.tcpTransport.start(listenAddrs)
else:
raise newException(TransportStartError, "Tor Transport couldn't start, no supported addr was provided.")
method accept*(self: TorTransport): Future[Connection] {.async, gcsafe.} =
## accept a new Tor connection
##
let conn = await self.tcpTransport.accept()
conn.observedAddr = Opt.none(MultiAddress)
return conn
method stop*(self: TorTransport) {.async, gcsafe.} =
## stop the transport
##
await procCall Transport(self).stop() # call base
await self.tcpTransport.stop()
method handles*(t: TorTransport, address: MultiAddress): bool {.gcsafe.} =
if procCall Transport(t).handles(address):
return handlesDial(address) or handlesStart(address)
type
TorSwitch* = ref object of Switch
proc new*(
T: typedesc[TorSwitch],
torServer: TransportAddress,
rng: ref HmacDrbgContext,
addresses: seq[MultiAddress] = @[],
flags: set[ServerFlags] = {}): TorSwitch
{.raises: [LPError, Defect], public.} =
var builder = SwitchBuilder.new()
.withRng(rng)
.withTransport(proc(upgr: Upgrade): Transport = TorTransport.new(torServer, flags, upgr))
if addresses.len != 0:
builder = builder.withAddresses(addresses)
let switch = builder.withMplex()
.withNoise()
.build()
let torSwitch = T(
peerInfo: switch.peerInfo,
ms: switch.ms,
transports: switch.transports,
connManager: switch.connManager,
peerStore: switch.peerStore,
dialer: Dialer.new(switch.peerInfo.peerId, switch.connManager, switch.transports, switch.ms, nil),
nameResolver: nil)
torSwitch.connManager.peerStore = switch.peerStore
return torSwitch
method addTransport*(s: TorSwitch, t: Transport) =
doAssert(false, "not implemented!")
method getTorTransport*(s: TorSwitch): Transport {.base.} =
return s.transports[0]

View File

@ -15,6 +15,7 @@ else:
{.push raises: [].}
import std/[sequtils]
import stew/results
import chronos, chronicles
import transport,
../errors,
@ -31,7 +32,7 @@ import transport,
logScope:
topics = "libp2p wstransport"
export transport, websock
export transport, websock, results
const
WsTransportTrackerName* = "libp2p.wstransport"
@ -45,8 +46,8 @@ type
proc new*(T: type WsStream,
session: WSSession,
dir: Direction,
timeout = 10.minutes,
observedAddr: MultiAddress = MultiAddress()): T =
observedAddr: Opt[MultiAddress],
timeout = 10.minutes): T =
let stream = T(
session: session,
@ -221,8 +222,7 @@ proc connHandler(self: WsTransport,
await stream.close()
raise exc
let conn = WsStream.new(stream, dir)
conn.observedAddr = observedAddr
let conn = WsStream.new(stream, dir, Opt.some(observedAddr))
self.connections[dir].add(conn)
proc onClose() {.async.} =

View File

@ -7,6 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push gcsafe.}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:

View File

@ -0,0 +1,73 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import sequtils
type
OffsettedSeq*[T] = object
s*: seq[T]
offset*: int
proc initOffsettedSeq*[T](offset: int = 0): OffsettedSeq[T] =
OffsettedSeq[T](s: newSeq[T](), offset: offset)
proc all*[T](o: OffsettedSeq[T], pred: proc (x: T): bool): bool =
o.s.all(pred)
proc any*[T](o: OffsettedSeq[T], pred: proc (x: T): bool): bool =
o.s.any(pred)
proc apply*[T](o: OffsettedSeq[T], op: proc (x: T)) =
o.s.apply(pred)
proc apply*[T](o: OffsettedSeq[T], op: proc (x: T): T) =
o.s.apply(pred)
proc apply*[T](o: OffsettedSeq[T], op: proc (x: var T)) =
o.s.apply(pred)
func count*[T](o: OffsettedSeq[T], x: T): int =
o.s.count(x)
proc flushIf*[T](o: OffsettedSeq[T], pred: proc (x: T): bool) =
var i = 0
for e in o.s:
if not pred(e): break
i.inc()
if i > 0:
o.s.delete(0..<i)
o.offset.inc(i)
template flushIfIt*(o, pred: untyped) =
var i = 0
for it {.inject.} in o.s:
if not pred: break
i.inc()
if i > 0:
when (NimMajor, NimMinor) < (1, 4):
o.s.delete(0, i - 1)
else:
o.s.delete(0..<i)
o.offset.inc(i)
proc add*[T](o: var OffsettedSeq[T], v: T) =
o.s.add(v)
proc `[]`*[T](o: var OffsettedSeq[T], index: int): var T =
o.s[index - o.offset]
iterator items*[T](o: OffsettedSeq[T]): T =
for e in o.s:
yield e
proc high*[T](o: OffsettedSeq[T]): int =
o.s.high + o.offset
proc low*[T](o: OffsettedSeq[T]): int =
o.s.low + o.offset

View File

@ -39,21 +39,9 @@ proc len*(vb: VBuffer): int =
result = len(vb.buffer) - vb.offset
doAssert(result >= 0)
proc isLiteral[T](s: seq[T]): bool {.inline.} =
when defined(gcOrc) or defined(gcArc):
false
else:
type
SeqHeader = object
length, reserved: int
(cast[ptr SeqHeader](s).reserved and (1 shl (sizeof(int) * 8 - 2))) != 0
proc initVBuffer*(data: seq[byte], offset = 0): VBuffer =
## Initialize VBuffer with shallow copy of ``data``.
if isLiteral(data):
result.buffer = data
else:
shallowCopy(result.buffer, data)
result.buffer = data
result.offset = offset
proc initVBuffer*(data: openArray[byte], offset = 0): VBuffer =

View File

@ -3,7 +3,9 @@ site_name: nim-libp2p
repo_url: https://github.com/status-im/nim-libp2p
repo_name: status-im/nim-libp2p
site_url: https://status-im.github.io/nim-libp2p/docs
edit_uri: edit/unstable/examples/
# Can't find a way to point the edit to the .nim instead
# of the .md
edit_uri: ''
docs_dir: examples
@ -38,8 +40,13 @@ theme:
name: Switch to light mode
nav:
- Introduction: README.md
- Tutorials:
- 'Part I: Simple connection': tutorial_1_connect.md
- 'Part II: Custom protocol': tutorial_2_customproto.md
- 'Introduction': README.md
- 'Simple connection': tutorial_1_connect.md
- 'Create a custom protocol': tutorial_2_customproto.md
- 'Protobuf': tutorial_3_protobuf.md
- 'GossipSub': tutorial_4_gossipsub.md
- 'Discovery Manager': tutorial_5_discovery.md
- 'Game': tutorial_6_game.md
- 'Circuit Relay': circuitrelay.md
- Reference: '/nim-libp2p/master/libp2p.html'

View File

@ -1,7 +1,7 @@
{.used.}
import sequtils
import chronos, stew/byteutils
import chronos, stew/[byteutils, results]
import ../libp2p/[stream/connection,
transports/transport,
upgrademngrs/upgrade,
@ -13,36 +13,37 @@ import ./helpers
type TransportProvider* = proc(): Transport {.gcsafe, raises: [Defect].}
proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
suite name & " common tests":
teardown:
checkTrackers()
template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string = "") =
block:
let transpProvider = prov
asyncTest "can handle local address":
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = transpProvider()
await transport1.start(ma)
check transport1.handles(transport1.addrs[0])
await transport1.stop()
asyncTest "e2e: handle observedAddr":
let ma = @[MultiAddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = prov()
let transport1 = transpProvider()
await transport1.start(ma)
let transport2 = prov()
let transport2 = transpProvider()
proc acceptHandler() {.async, gcsafe.} =
let conn = await transport1.accept()
check transport1.handles(conn.observedAddr)
if conn.observedAddr.isSome():
check transport1.handles(conn.observedAddr.get())
await conn.close()
let handlerWait = acceptHandler()
let conn = await transport2.dial(transport1.addrs[0])
check transport2.handles(conn.observedAddr)
if conn.observedAddr.isSome():
check transport2.handles(conn.observedAddr.get())
await conn.close() #for some protocols, closing requires actively reading, so we must close here
@ -54,9 +55,9 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
asyncTest "e2e: handle write":
let ma = @[MultiAddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = prov()
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
@ -66,7 +67,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
let handlerWait = acceptHandler()
let transport2 = prov()
let transport2 = transpProvider()
let conn = await transport2.dial(transport1.addrs[0])
var msg = newSeq[byte](6)
await conn.readExactly(addr msg[0], 6)
@ -82,8 +83,8 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
asyncTest "e2e: handle read":
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
@ -95,7 +96,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
let handlerWait = acceptHandler()
let transport2 = prov()
let transport2 = transpProvider()
let conn = await transport2.dial(transport1.addrs[0])
await conn.write("Hello!")
@ -108,12 +109,12 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
transport2.stop()))
asyncTest "e2e: handle dial cancellation":
let ma = @[MultiAddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = prov()
let transport1 = transpProvider()
await transport1.start(ma)
let transport2 = prov()
let transport2 = transpProvider()
let cancellation = transport2.dial(transport1.addrs[0])
await cancellation.cancelAndWait()
@ -125,9 +126,9 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
transport2.stop()))
asyncTest "e2e: handle accept cancellation":
let ma = @[MultiAddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = prov()
let transport1 = transpProvider()
await transport1.start(ma)
let acceptHandler = transport1.accept()
@ -141,11 +142,11 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
# this randomly locks the Windows CI job
skip()
return
let addrs = @[MultiAddress.init(ma).tryGet(),
MultiAddress.init(ma).tryGet()]
let addrs = @[MultiAddress.init(ma1).tryGet(),
MultiAddress.init(if ma2 == "": ma1 else: ma2).tryGet()]
let transport1 = prov()
let transport1 = transpProvider()
await transport1.start(addrs)
proc acceptHandler() {.async, gcsafe.} =
@ -190,12 +191,12 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
await transport1.stop()
asyncTest "e2e: stopping transport kills connections":
let ma = @[MultiAddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = prov()
let transport1 = transpProvider()
await transport1.start(ma)
let transport2 = prov()
let transport2 = transpProvider()
let acceptHandler = transport1.accept()
let conn = await transport2.dial(transport1.addrs[0])
@ -210,8 +211,8 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
check conn.closed()
asyncTest "read or write on closed connection":
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =

View File

@ -105,16 +105,15 @@ proc bridgedConnections*: (Connection, Connection) =
return (connA, connB)
proc checkExpiringInternal(cond: proc(): bool {.raises: [Defect].} ): Future[bool] {.async, gcsafe.} =
{.gcsafe.}:
let start = Moment.now()
while true:
if Moment.now() > (start + chronos.seconds(5)):
return false
elif cond():
return true
else:
await sleepAsync(1.millis)
proc checkExpiringInternal(cond: proc(): bool {.raises: [Defect], gcsafe.} ): Future[bool] {.async, gcsafe.} =
let start = Moment.now()
while true:
if Moment.now() > (start + chronos.seconds(5)):
return false
elif cond():
return true
else:
await sleepAsync(1.millis)
template checkExpiring*(code: untyped): untyped =
checkExpiringInternal(proc(): bool = code)
check await checkExpiringInternal(proc(): bool = code)

View File

@ -351,7 +351,7 @@ suite "FloodSub":
check (await smallNode[0].publish("foo", smallMessage1)) > 0
check (await bigNode[0].publish("foo", smallMessage2)) > 0
check (await checkExpiring(messageReceived == 2)) == true
checkExpiring: messageReceived == 2
check (await smallNode[0].publish("foo", bigMessage)) > 0
check (await bigNode[0].publish("foo", bigMessage)) > 0

View File

@ -9,7 +9,7 @@
{.used.}
import sequtils, options, tables, sets
import sequtils, options, tables, sets, sugar
import chronos, stew/byteutils
import chronicles
import utils, ../../libp2p/[errors,
@ -29,26 +29,6 @@ import ../helpers
proc `$`(peer: PubSubPeer): string = shortLog(peer)
proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
if sender == receiver:
return
let timeout = Moment.now() + 5.seconds
let fsub = GossipSub(sender)
# this is for testing purposes only
# peers can be inside `mesh` and `fanout`, not just `gossipsub`
while (not fsub.gossipsub.hasKey(key) or
not fsub.gossipsub.hasPeerId(key, receiver.peerInfo.peerId)) and
(not fsub.mesh.hasKey(key) or
not fsub.mesh.hasPeerId(key, receiver.peerInfo.peerId)) and
(not fsub.fanout.hasKey(key) or
not fsub.fanout.hasPeerId(key , receiver.peerInfo.peerId)):
trace "waitSub sleeping..."
# await
await sleepAsync(5.milliseconds)
doAssert Moment.now() < timeout, "waitSub timeout!"
template tryPublish(call: untyped, require: int, wait = 10.milliseconds, timeout = 5.seconds): untyped =
var
expiration = Moment.now() + timeout
@ -336,11 +316,10 @@ suite "GossipSub":
let gossip1 = GossipSub(nodes[0])
let gossip2 = GossipSub(nodes[1])
check await checkExpiring(
checkExpiring:
"foobar" in gossip2.topics and
"foobar" in gossip1.gossipsub and
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
)
await allFuturesThrowing(
nodes[0].switch.stop(),
@ -483,7 +462,7 @@ suite "GossipSub":
nodes[0].unsubscribe("foobar", handler)
let gsNode = GossipSub(nodes[1])
check await checkExpiring(gsNode.mesh.getOrDefault("foobar").len == 0)
checkExpiring: gsNode.mesh.getOrDefault("foobar").len == 0
nodes[0].subscribe("foobar", handler)
@ -602,7 +581,7 @@ suite "GossipSub":
gossip1.seen = TimedCache[MessageId].init()
gossip3.seen = TimedCache[MessageId].init()
let msgId = toSeq(gossip2.validationSeen.keys)[0]
check await checkExpiring(try: gossip2.validationSeen[msgId].len > 0 except: false)
checkExpiring(try: gossip2.validationSeen[msgId].len > 0 except: false)
result = ValidationResult.Accept
bFinished.complete()
@ -690,14 +669,14 @@ suite "GossipSub":
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSub(nodes[0], dialer, "foobar")
await waitSubGraph(nodes, "foobar")
tryPublish await wait(nodes[0].publish("foobar",
toBytes("from node " &
$nodes[0].peerInfo.peerId)),
1.minutes), 1
await wait(seenFut, 2.minutes)
await wait(seenFut, 1.minutes)
check: seen.len >= runs
for k, v in seen.pairs:
check: v >= 1
@ -726,10 +705,11 @@ suite "GossipSub":
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0..<nodes.len:
let dialer = nodes[i]
var handler: TopicHandler
closureScope:
capture dialer, i:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
if peerName notin seen:
@ -740,14 +720,14 @@ suite "GossipSub":
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSub(nodes[0], dialer, "foobar")
await waitSubGraph(nodes, "foobar")
tryPublish await wait(nodes[0].publish("foobar",
toBytes("from node " &
$nodes[0].peerInfo.peerId)),
1.minutes), 1
await wait(seenFut, 5.minutes)
await wait(seenFut, 60.seconds)
check: seen.len >= runs
for k, v in seen.pairs:
check: v >= 1

View File

@ -10,8 +10,7 @@
{.used.}
import sequtils, options, tables, sets
import chronos, stew/byteutils
import chronicles
import chronos, stew/byteutils, chronicles
import utils, ../../libp2p/[errors,
peerid,
peerinfo,
@ -25,26 +24,6 @@ import utils, ../../libp2p/[errors,
protocols/pubsub/rpc/messages]
import ../helpers
proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
if sender == receiver:
return
let timeout = Moment.now() + 5.seconds
let fsub = GossipSub(sender)
# this is for testing purposes only
# peers can be inside `mesh` and `fanout`, not just `gossipsub`
while (not fsub.gossipsub.hasKey(key) or
not fsub.gossipsub.hasPeerId(key, receiver.peerInfo.peerId)) and
(not fsub.mesh.hasKey(key) or
not fsub.mesh.hasPeerId(key, receiver.peerInfo.peerId)) and
(not fsub.fanout.hasKey(key) or
not fsub.fanout.hasPeerId(key , receiver.peerInfo.peerId)):
trace "waitSub sleeping..."
# await
await sleepAsync(5.milliseconds)
doAssert Moment.now() < timeout, "waitSub timeout!"
template tryPublish(call: untyped, require: int, wait = 10.milliseconds, timeout = 10.seconds): untyped =
var
expiration = Moment.now() + timeout
@ -269,7 +248,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "GossipsSub peers disconnections mechanics":
asyncTest "GossipSub peers disconnections mechanics":
var runs = 10
let
@ -294,7 +273,8 @@ suite "GossipSub":
seenFut.complete()
dialer.subscribe("foobar", handler)
await waitSub(nodes[0], dialer, "foobar")
await waitSubGraph(nodes, "foobar")
# ensure peer stats are stored properly and kept properly
check:

View File

@ -4,7 +4,7 @@ const
libp2p_pubsub_verify {.booldefine.} = true
libp2p_pubsub_anonymize {.booldefine.} = false
import hashes, random, tables
import hashes, random, tables, sets, sequtils
import chronos, stew/[byteutils, results]
import ../../libp2p/[builders,
protocols/pubsub/errors,
@ -13,6 +13,7 @@ import ../../libp2p/[builders,
protocols/pubsub/floodsub,
protocols/pubsub/rpc/messages,
protocols/secure/secure]
import chronicles
export builders
@ -102,3 +103,43 @@ proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
if dialer.peerInfo.peerId != node.peerInfo.peerId:
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
dialed.add(node.peerInfo.peerId)
proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
if sender == receiver:
return
let timeout = Moment.now() + 5.seconds
let fsub = GossipSub(sender)
# this is for testing purposes only
# peers can be inside `mesh` and `fanout`, not just `gossipsub`
while (not fsub.gossipsub.hasKey(key) or
not fsub.gossipsub.hasPeerId(key, receiver.peerInfo.peerId)) and
(not fsub.mesh.hasKey(key) or
not fsub.mesh.hasPeerId(key, receiver.peerInfo.peerId)) and
(not fsub.fanout.hasKey(key) or
not fsub.fanout.hasPeerId(key , receiver.peerInfo.peerId)):
trace "waitSub sleeping..."
# await
await sleepAsync(5.milliseconds)
doAssert Moment.now() < timeout, "waitSub timeout!"
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async, gcsafe.} =
let timeout = Moment.now() + 5.seconds
while true:
var
nodesMesh: Table[PeerId, seq[PeerId]]
seen: HashSet[PeerId]
for n in nodes:
nodesMesh[n.peerInfo.peerId] = toSeq(GossipSub(n).mesh.getOrDefault(key).items()).mapIt(it.peerId)
proc explore(p: PeerId) =
if p in seen: return
seen.incl(p)
for peer in nodesMesh.getOrDefault(p):
explore(peer)
explore(nodes[0].peerInfo.peerId)
if seen.len == nodes.len: return
trace "waitSubGraph sleeping..."
await sleepAsync(5.milliseconds)
doAssert Moment.now() < timeout, "waitSubGraph timeout!"

86
tests/stubs.nim Normal file
View File

@ -0,0 +1,86 @@
{.used.}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import tables
import chronos, stew/[byteutils, endians2, shims/net]
import ../libp2p/[stream/connection,
protocols/connectivity/relay/utils,
transports/tcptransport,
transports/tortransport,
upgrademngrs/upgrade,
multiaddress,
errors,
builders]
type
TorServerStub* = ref object of RootObj
tcpTransport: TcpTransport
addrTable: Table[string, string]
proc new*(
T: typedesc[TorServerStub]): T {.public.} =
T(
tcpTransport: TcpTransport.new(flags = {ReuseAddr}, upgrade = Upgrade()),
addrTable: initTable[string, string]())
proc registerAddr*(self: TorServerStub, key: string, val: string) =
self.addrTable[key] = val
proc start*(self: TorServerStub, address: TransportAddress) {.async.} =
let ma = @[MultiAddress.init(address).tryGet()]
await self.tcpTransport.start(ma)
var msg = newSeq[byte](3)
while self.tcpTransport.running:
let connSrc = await self.tcpTransport.accept()
await connSrc.readExactly(addr msg[0], 3)
await connSrc.write(@[05'u8, 00])
msg = newSeq[byte](4)
await connSrc.readExactly(addr msg[0], 4)
let atyp = msg[3]
let address = case atyp:
of Socks5AddressType.IPv4.byte:
let n = 4 + 2 # +2 bytes for the port
msg = newSeq[byte](n)
await connSrc.readExactly(addr msg[0], n)
var ip: array[4, byte]
for i, e in msg[0..^3]:
ip[i] = e
$(ipv4(ip)) & ":" & $(Port(fromBytesBE(uint16, msg[^2..^1])))
of Socks5AddressType.IPv6.byte:
let n = 16 + 2 # +2 bytes for the port
msg = newSeq[byte](n) # +2 bytes for the port
await connSrc.readExactly(addr msg[0], n)
var ip: array[16, byte]
for i, e in msg[0..^3]:
ip[i] = e
$(ipv6(ip)) & ":" & $(Port(fromBytesBE(uint16, msg[^2..^1])))
of Socks5AddressType.FQDN.byte:
await connSrc.readExactly(addr msg[0], 1)
let n = int(uint8.fromBytes(msg[0..0])) + 2 # +2 bytes for the port
msg = newSeq[byte](n)
await connSrc.readExactly(addr msg[0], n)
string.fromBytes(msg[0..^3]) & ":" & $(Port(fromBytesBE(uint16, msg[^2..^1])))
else:
raise newException(LPError, "Address not supported")
let tcpIpAddr = self.addrTable[$(address)]
await connSrc.write(@[05'u8, 00, 00, 01, 00, 00, 00, 00, 00, 00])
let connDst = await self.tcpTransport.dial("", MultiAddress.init(tcpIpAddr).tryGet())
await bridge(connSrc, connDst)
await allFutures(connSrc.close(), connDst.close())
proc stop*(self: TorServerStub) {.async.} =
await self.tcpTransport.stop()

View File

@ -1,4 +1,5 @@
import sequtils
import stew/results
import chronos
import ../libp2p/[connmanager,
stream/connection,
@ -9,6 +10,9 @@ import ../libp2p/[connmanager,
import helpers
proc getConnection(peerId: PeerId, dir: Direction = Direction.In): Connection =
return Connection.new(peerId, dir, Opt.none(MultiAddress))
type
TestMuxer = ref object of Muxer
peerId: PeerId
@ -18,7 +22,7 @@ method newStream*(
name: string = "",
lazy: bool = false):
Future[Connection] {.async, gcsafe.} =
result = Connection.new(m.peerId, Direction.Out)
result = getConnection(m.peerId, Direction.Out)
suite "Connection Manager":
teardown:
@ -27,7 +31,7 @@ suite "Connection Manager":
asyncTest "add and retrieve a connection":
let connMngr = ConnManager.new()
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
let conn = Connection.new(peerId, Direction.In)
let conn = getConnection(peerId)
connMngr.storeConn(conn)
check conn in connMngr
@ -41,7 +45,7 @@ suite "Connection Manager":
asyncTest "shouldn't allow a closed connection":
let connMngr = ConnManager.new()
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
let conn = Connection.new(peerId, Direction.In)
let conn = getConnection(peerId)
await conn.close()
expect CatchableError:
@ -52,7 +56,7 @@ suite "Connection Manager":
asyncTest "shouldn't allow an EOFed connection":
let connMngr = ConnManager.new()
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
let conn = Connection.new(peerId, Direction.In)
let conn = getConnection(peerId)
conn.isEof = true
expect CatchableError:
@ -64,7 +68,7 @@ suite "Connection Manager":
asyncTest "add and retrieve a muxer":
let connMngr = ConnManager.new()
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
let conn = Connection.new(peerId, Direction.In)
let conn = getConnection(peerId)
let muxer = new Muxer
muxer.connection = conn
@ -80,7 +84,7 @@ suite "Connection Manager":
asyncTest "shouldn't allow a muxer for an untracked connection":
let connMngr = ConnManager.new()
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
let conn = Connection.new(peerId, Direction.In)
let conn = getConnection(peerId)
let muxer = new Muxer
muxer.connection = conn
@ -94,8 +98,8 @@ suite "Connection Manager":
asyncTest "get conn with direction":
let connMngr = ConnManager.new()
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
let conn1 = Connection.new(peerId, Direction.Out)
let conn2 = Connection.new(peerId, Direction.In)
let conn1 = getConnection(peerId, Direction.Out)
let conn2 = getConnection(peerId)
connMngr.storeConn(conn1)
connMngr.storeConn(conn2)
@ -114,7 +118,7 @@ suite "Connection Manager":
asyncTest "get muxed stream for peer":
let connMngr = ConnManager.new()
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
let conn = Connection.new(peerId, Direction.In)
let conn = getConnection(peerId)
let muxer = new TestMuxer
muxer.peerId = peerId
@ -134,7 +138,7 @@ suite "Connection Manager":
asyncTest "get stream from directed connection":
let connMngr = ConnManager.new()
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
let conn = Connection.new(peerId, Direction.In)
let conn = getConnection(peerId)
let muxer = new TestMuxer
muxer.peerId = peerId
@ -155,7 +159,7 @@ suite "Connection Manager":
asyncTest "get stream from any connection":
let connMngr = ConnManager.new()
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
let conn = Connection.new(peerId, Direction.In)
let conn = getConnection(peerId)
let muxer = new TestMuxer
muxer.peerId = peerId
@ -175,11 +179,11 @@ suite "Connection Manager":
let connMngr = ConnManager.new(maxConnsPerPeer = 1)
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
connMngr.storeConn(Connection.new(peerId, Direction.In))
connMngr.storeConn(getConnection(peerId))
let conns = @[
Connection.new(peerId, Direction.In),
Connection.new(peerId, Direction.In)]
getConnection(peerId),
getConnection(peerId)]
expect TooManyConnectionsError:
connMngr.storeConn(conns[0])
@ -193,7 +197,7 @@ suite "Connection Manager":
asyncTest "cleanup on connection close":
let connMngr = ConnManager.new()
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
let conn = Connection.new(peerId, Direction.In)
let conn = getConnection(peerId)
let muxer = new Muxer
muxer.connection = conn
@ -220,7 +224,7 @@ suite "Connection Manager":
Direction.In else:
Direction.Out
let conn = Connection.new(peerId, dir)
let conn = getConnection(peerId, dir)
let muxer = new Muxer
muxer.connection = conn
@ -353,7 +357,7 @@ suite "Connection Manager":
let slot = await ((connMngr.getOutgoingSlot()).wait(10.millis))
let conn =
Connection.new(
getConnection(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
Direction.In)

51
tests/testdiscovery.nim Normal file
View File

@ -0,0 +1,51 @@
{.used.}
import options, chronos, sets
import stew/byteutils
import ../libp2p/[protocols/rendezvous,
switch,
builders,
discovery/discoverymngr,
discovery/rendezvousinterface,]
import ./helpers
proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withTcpTransport()
.withMplex()
.withNoise()
.withRendezVous(rdv)
.build()
suite "Discovery":
teardown:
checkTrackers()
asyncTest "RendezVous test":
let
rdvA = RendezVous.new()
rdvB = RendezVous.new()
clientA = createSwitch(rdvA)
clientB = createSwitch(rdvB)
remoteNode = createSwitch()
dmA = DiscoveryManager()
dmB = DiscoveryManager()
dmA.add(RendezVousInterface.new(rdvA, ttr = 500.milliseconds))
dmB.add(RendezVousInterface.new(rdvB))
await allFutures(clientA.start(), clientB.start(), remoteNode.start())
await clientB.connect(remoteNode.peerInfo.peerId, remoteNode.peerInfo.addrs)
await clientA.connect(remoteNode.peerInfo.peerId, remoteNode.peerInfo.addrs)
dmB.advertise(RdvNamespace("foo"))
let
query = dmA.request(RdvNamespace("foo"))
res = await query.getPeer()
check:
res{PeerId}.get() == clientB.peerInfo.peerId
res[PeerId] == clientB.peerInfo.peerId
res.getAll(PeerId) == @[clientB.peerInfo.peerId]
toHashSet(res.getAll(MultiAddress)) == toHashSet(clientB.peerInfo.addrs)
await allFutures(clientA.stop(), clientB.stop(), remoteNode.stop())

View File

@ -202,8 +202,8 @@ suite "Identify":
await identifyPush2.push(switch2.peerInfo, conn)
check await checkExpiring(switch1.peerStore[ProtoBook][switch2.peerInfo.peerId] == switch2.peerInfo.protocols)
check await checkExpiring(switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs)
checkExpiring: switch1.peerStore[ProtoBook][switch2.peerInfo.peerId] == switch2.peerInfo.protocols
checkExpiring: switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs
await closeAll()

View File

@ -816,7 +816,7 @@ suite "Mplex":
for i in 0..9:
dialStreams.add((await mplexDial.newStream()))
check await checkExpiring(listenStreams.len == 10 and dialStreams.len == 10)
checkExpiring: listenStreams.len == 10 and dialStreams.len == 10
await mplexListen.close()
await allFuturesThrowing(
@ -862,7 +862,7 @@ suite "Mplex":
for i in 0..9:
dialStreams.add((await mplexDial.newStream()))
check await checkExpiring(listenStreams.len == 10 and dialStreams.len == 10)
checkExpiring: listenStreams.len == 10 and dialStreams.len == 10
mplexHandle.cancel()
await allFuturesThrowing(
@ -905,7 +905,7 @@ suite "Mplex":
for i in 0..9:
dialStreams.add((await mplexDial.newStream()))
check await checkExpiring(listenStreams.len == 10 and dialStreams.len == 10)
checkExpiring: listenStreams.len == 10 and dialStreams.len == 10
await conn.close()
await allFuturesThrowing(
@ -951,7 +951,7 @@ suite "Mplex":
for i in 0..9:
dialStreams.add((await mplexDial.newStream()))
check await checkExpiring(listenStreams.len == 10 and dialStreams.len == 10)
checkExpiring: listenStreams.len == 10 and dialStreams.len == 10
await listenConn.closeWithEOF()
await allFuturesThrowing(

View File

@ -386,6 +386,11 @@ suite "MultiAddress test suite":
let ma = MultiAddress.init("/ip4/0.0.0.0/tcp/0/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/p2p-circuit/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSuNEXT/unix/stdio/").get()
check:
$ma[0..0].get() == "/ip4/0.0.0.0"
$ma[^1].get() == "/unix/stdio"
ma[-100].isErr()
ma[100].isErr()
ma[^100].isErr()
ma[^0].isErr()
$ma[0..1].get() == "/ip4/0.0.0.0/tcp/0"
$ma[1..2].get() == "/tcp/0/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"
$ma[^3..^1].get() == "/p2p-circuit/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSuNEXT/unix/stdio"

View File

@ -139,7 +139,18 @@ suite "Name resolving":
asyncTest "dnsaddr infinite recursion":
resolver.txtResponses["_dnsaddr.bootstrap.libp2p.io"] = @["dnsaddr=/dnsaddr/bootstrap.libp2p.io"]
check testOne("/dnsaddr/bootstrap.libp2p.io/", "/dnsaddr/bootstrap.libp2p.io/")
check testOne("/dnsaddr/bootstrap.libp2p.io/", newSeq[string]())
test "getHostname":
check:
MultiAddress.init("/dnsaddr/bootstrap.libp2p.io/").tryGet().getHostname == "bootstrap.libp2p.io"
MultiAddress.init("").tryGet().getHostname == ""
MultiAddress.init("/ip4/147.75.69.143/tcp/4001/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN").tryGet().getHostname == "147.75.69.143"
MultiAddress.init("/ip6/2604:1380:1000:6000::1/tcp/4001/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN").tryGet().getHostname == "2604:1380:1000:6000::1"
MultiAddress.init("/dns/localhost/udp/0").tryGet().getHostname == "localhost"
MultiAddress.init("/dns4/hello.com/udp/0").tryGet().getHostname == "hello.com"
MultiAddress.init("/dns6/hello.com/udp/0").tryGet().getHostname == "hello.com"
MultiAddress.init("/wss/").tryGet().getHostname == ""
suite "DNS Resolving":
teardown:
@ -171,7 +182,7 @@ suite "Name resolving":
# The test
var dnsresolver = DnsResolver.new(@[server.localAddress])
check await(dnsresolver.resolveIp("status.im", 0.Port, Domain.AF_UNSPEC)) ==
mapIt(
@["104.22.24.181:0", "172.67.10.161:0", "104.22.25.181:0",
@ -209,7 +220,7 @@ suite "Name resolving":
# The test
var dnsresolver = DnsResolver.new(@[unresponsiveServer.localAddress, server.localAddress])
check await(dnsresolver.resolveIp("status.im", 0.Port, Domain.AF_INET)) ==
mapIt(@["104.22.24.181:0", "172.67.10.161:0", "104.22.25.181:0"], initTAddress(it))

View File

@ -23,6 +23,7 @@ import testmultibase,
testrouting_record
import testtcptransport,
testtortransport,
testnameresolve,
testwstransport,
testmultistream,
@ -37,5 +38,7 @@ import testtcptransport,
testmplex,
testrelayv1,
testrelayv2,
testrendezvous,
testdiscovery,
testyamux,
testautonat

View File

@ -294,7 +294,7 @@ suite "Noise":
(switch2, peerInfo2) = createSwitch(ma2, true, true) # secio, we want to fail
await switch1.start()
await switch2.start()
expect(UpgradeFailedError):
expect(DialFailedError):
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
await allFuturesThrowing(

125
tests/testrendezvous.nim Normal file
View File

@ -0,0 +1,125 @@
{.used.}
import options, sequtils, strutils
import stew/byteutils, chronos
import ../libp2p/[protocols/rendezvous,
switch,
builders,]
import ./helpers
proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withTcpTransport()
.withMplex()
.withNoise()
.withRendezVous(rdv)
.build()
suite "RendezVous":
teardown:
checkTrackers()
asyncTest "Simple local test":
let
rdv = RendezVous.new()
s = createSwitch(rdv)
await s.start()
let res0 = rdv.requestLocally("empty")
check res0.len == 0
await rdv.advertise("foo")
let res1 = rdv.requestLocally("foo")
check:
res1.len == 1
res1[0] == s.peerInfo.signedPeerRecord.data
let res2 = rdv.requestLocally("bar")
check res2.len == 0
rdv.unsubscribeLocally("foo")
let res3 = rdv.requestLocally("foo")
check res3.len == 0
await s.stop()
asyncTest "Simple remote test":
let
rdv = RendezVous.new()
client = createSwitch(rdv)
remoteSwitch = createSwitch()
await client.start()
await remoteSwitch.start()
await client.connect(remoteSwitch.peerInfo.peerId, remoteSwitch.peerInfo.addrs)
let res0 = await rdv.request("empty")
check res0.len == 0
await rdv.advertise("foo")
let res1 = await rdv.request("foo")
check:
res1.len == 1
res1[0] == client.peerInfo.signedPeerRecord.data
let res2 = await rdv.request("bar")
check res2.len == 0
await rdv.unsubscribe("foo")
let res3 = await rdv.request("foo")
check res3.len == 0
await allFutures(client.stop(), remoteSwitch.stop())
asyncTest "Harder remote test":
var
rdvSeq: seq[RendezVous] = @[]
clientSeq: seq[Switch] = @[]
remoteSwitch = createSwitch()
for x in 0..10:
rdvSeq.add(RendezVous.new())
clientSeq.add(createSwitch(rdvSeq[^1]))
await remoteSwitch.start()
await allFutures(clientSeq.mapIt(it.start()))
await allFutures(clientSeq.mapIt(remoteSwitch.connect(it.peerInfo.peerId, it.peerInfo.addrs)))
await allFutures(rdvSeq.mapIt(it.advertise("foo")))
var data = clientSeq.mapIt(it.peerInfo.signedPeerRecord.data)
let res1 = await rdvSeq[0].request("foo", 5)
check res1.len == 5
for d in res1:
check d in data
data.keepItIf(it notin res1)
let res2 = await rdvSeq[0].request("foo")
check res2.len == 5
for d in res2:
check d in data
let res3 = await rdvSeq[0].request("foo")
check res3.len == 0
await remoteSwitch.stop()
await allFutures(clientSeq.mapIt(it.stop()))
asyncTest "Simple cookie test":
let
rdvA = RendezVous.new()
rdvB = RendezVous.new()
clientA = createSwitch(rdvA)
clientB = createSwitch(rdvB)
remoteSwitch = createSwitch()
await clientA.start()
await clientB.start()
await remoteSwitch.start()
await clientA.connect(remoteSwitch.peerInfo.peerId, remoteSwitch.peerInfo.addrs)
await clientB.connect(remoteSwitch.peerInfo.peerId, remoteSwitch.peerInfo.addrs)
await rdvA.advertise("foo")
let res1 = await rdvA.request("foo")
await rdvB.advertise("foo")
let res2 = await rdvA.request("foo")
check:
res2.len == 1
res2[0] == clientB.peerInfo.signedPeerRecord.data
await allFutures(clientA.stop(), clientB.stop(), remoteSwitch.stop())
asyncTest "Various local error":
let
rdv = RendezVous.new()
switch = createSwitch(rdv)
expect RendezVousError: discard await rdv.request("A".repeat(300))
expect RendezVousError: discard await rdv.request("A", -1)
expect RendezVousError: discard await rdv.request("A", 3000)
expect RendezVousError: await rdv.advertise("A".repeat(300))
expect RendezVousError: await rdv.advertise("A", 2.weeks)
expect RendezVousError: await rdv.advertise("A", 5.minutes)

View File

@ -201,13 +201,25 @@ suite "Switch":
check not switch1.isConnected(switch2.peerInfo.peerId)
check not switch2.isConnected(switch1.peerInfo.peerId)
asyncTest "e2e connect to peer with unkown PeerId":
asyncTest "e2e connect to peer with unknown PeerId":
let resolver = MockResolver.new()
let switch1 = newStandardSwitch(secureManagers = [SecureProtocol.Noise])
let switch2 = newStandardSwitch(secureManagers = [SecureProtocol.Noise])
let switch2 = newStandardSwitch(secureManagers = [SecureProtocol.Noise], nameResolver = resolver)
await switch1.start()
await switch2.start()
# via dnsaddr
resolver.txtResponses["_dnsaddr.test.io"] = @[
"dnsaddr=" & $switch1.peerInfo.addrs[0] & "/p2p/" & $switch1.peerInfo.peerId,
]
check: (await switch2.connect(@[MultiAddress.init("/dnsaddr/test.io/").tryGet()])) == switch1.peerInfo.peerId
await switch2.disconnect(switch1.peerInfo.peerId)
# via direct ip
check not switch2.isConnected(switch1.peerInfo.peerId)
check: (await switch2.connect(switch1.peerInfo.addrs)) == switch1.peerInfo.peerId
await switch2.disconnect(switch1.peerInfo.peerId)
await allFuturesThrowing(
@ -235,14 +247,12 @@ suite "Switch":
await switch2.disconnect(switch1.peerInfo.peerId)
check not switch2.isConnected(switch1.peerInfo.peerId)
check await(checkExpiring((not switch1.isConnected(switch2.peerInfo.peerId))))
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
await sleepAsync(1.seconds)
check:
checkExpiring:
startCounts ==
@[
switch1.connManager.inSema.count, switch1.connManager.outSema.count,
@ -290,7 +300,7 @@ suite "Switch":
await switch2.disconnect(switch1.peerInfo.peerId)
check not switch2.isConnected(switch1.peerInfo.peerId)
check await(checkExpiring((not switch1.isConnected(switch2.peerInfo.peerId))))
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
@ -342,7 +352,7 @@ suite "Switch":
await switch2.disconnect(switch1.peerInfo.peerId)
check not switch2.isConnected(switch1.peerInfo.peerId)
check await(checkExpiring((not switch1.isConnected(switch2.peerInfo.peerId))))
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
@ -393,7 +403,7 @@ suite "Switch":
await switch2.disconnect(switch1.peerInfo.peerId)
check not switch2.isConnected(switch1.peerInfo.peerId)
check await(checkExpiring((not switch1.isConnected(switch2.peerInfo.peerId))))
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
@ -444,7 +454,7 @@ suite "Switch":
await switch2.disconnect(switch1.peerInfo.peerId)
check not switch2.isConnected(switch1.peerInfo.peerId)
check await(checkExpiring((not switch1.isConnected(switch2.peerInfo.peerId))))
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
@ -508,8 +518,8 @@ suite "Switch":
check not switch2.isConnected(switch1.peerInfo.peerId)
check not switch3.isConnected(switch1.peerInfo.peerId)
check await(checkExpiring((not switch1.isConnected(switch2.peerInfo.peerId))))
check await(checkExpiring((not switch1.isConnected(switch3.peerInfo.peerId))))
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkExpiring: not switch1.isConnected(switch3.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
@ -542,7 +552,6 @@ suite "Switch":
await switches[0].disconnect(peerInfo.peerId) # trigger disconnect
of ConnEventKind.Disconnected:
check not switches[0].isConnected(peerInfo.peerId)
await sleepAsync(1.millis)
done.complete()
switches.add(newStandardSwitch(
@ -559,8 +568,6 @@ suite "Switch":
await onConnect
await done
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
await allFuturesThrowing(
switches.mapIt( it.stop() ))
@ -613,42 +620,6 @@ suite "Switch":
await allFuturesThrowing(
switches.mapIt( it.stop() ))
# TODO: we should be able to test cancellation
# for most of the steps in the upgrade flow -
# this is just a basic test for dials
asyncTest "e2e canceling dial should not leak":
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport = TcpTransport.new(upgrade = Upgrade())
await transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
try:
let conn = await transport.accept()
discard await conn.readLp(100)
await conn.close()
except CatchableError:
discard
let handlerWait = acceptHandler()
let switch = newStandardSwitch(secureManagers = [SecureProtocol.Noise])
await switch.start()
var peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).get()
let connectFut = switch.connect(peerId, transport.addrs)
await sleepAsync(500.millis)
connectFut.cancel()
await handlerWait
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
checkTracker(ChronosStreamTrackerName)
await allFuturesThrowing(
transport.stop(),
switch.stop())
asyncTest "e2e closing remote conn should not leak":
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
@ -665,7 +636,7 @@ suite "Switch":
await switch.start()
var peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).get()
expect LPStreamClosedError, LPStreamEOFError:
expect DialFailedError:
await switch.connect(peerId, transport.addrs)
await handlerWait
@ -704,7 +675,7 @@ suite "Switch":
await allFuturesThrowing(readers)
await switch2.stop() #Otherwise this leaks
check await checkExpiring(not switch1.isConnected(switch2.peerInfo.peerId))
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
@ -994,9 +965,10 @@ suite "Switch":
await srcWsSwitch.start()
resolver.txtResponses["_dnsaddr.test.io"] = @[
"dnsaddr=" & $destSwitch.peerInfo.addrs[0],
"dnsaddr=" & $destSwitch.peerInfo.addrs[1]
"dnsaddr=/dns4/localhost" & $destSwitch.peerInfo.addrs[0][1..^1].tryGet() & "/p2p/" & $destSwitch.peerInfo.peerId,
"dnsaddr=/dns4/localhost" & $destSwitch.peerInfo.addrs[1][1..^1].tryGet()
]
resolver.ipResponses[("localhost", false)] = @["127.0.0.1"]
let testAddr = MultiAddress.init("/dnsaddr/test.io/").tryGet()

View File

@ -125,7 +125,8 @@ suite "TCP transport":
server.close()
await server.join()
proc transProvider(): Transport = TcpTransport.new(upgrade = Upgrade())
commonTransportTest(
"TcpTransport",
proc (): Transport = TcpTransport.new(upgrade = Upgrade()),
transProvider,
"/ip4/0.0.0.0/tcp/0")

143
tests/testtortransport.nim Normal file
View File

@ -0,0 +1,143 @@
{.used.}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import tables
import chronos, stew/[byteutils]
import ../libp2p/[stream/connection,
transports/tcptransport,
transports/tortransport,
upgrademngrs/upgrade,
multiaddress,
builders]
import ./helpers, ./stubs, ./commontransport
const torServer = initTAddress("127.0.0.1", 9050.Port)
var stub: TorServerStub
var startFut: Future[void]
suite "Tor transport":
setup:
stub = TorServerStub.new()
stub.registerAddr("127.0.0.1:8080", "/ip4/127.0.0.1/tcp/8080")
stub.registerAddr("libp2p.nim:8080", "/ip4/127.0.0.1/tcp/8080")
stub.registerAddr("::1:8080", "/ip6/::1/tcp/8080")
stub.registerAddr("a2mncbqsbullu7thgm4e6zxda2xccmcgzmaq44oayhdtm6rav5vovcad.onion:80", "/ip4/127.0.0.1/tcp/8080")
stub.registerAddr("a2mncbqsbullu7thgm4e6zxda2xccmcgzmaq44oayhdtm6rav5vovcae.onion:81", "/ip4/127.0.0.1/tcp/8081")
startFut = stub.start(torServer)
teardown:
waitFor startFut.cancelAndWait()
waitFor stub.stop()
checkTrackers()
proc test(lintesAddr: string, dialAddr: string) {.async.} =
let server = TcpTransport.new({ReuseAddr}, Upgrade())
let ma2 = @[MultiAddress.init(lintesAddr).tryGet()]
await server.start(ma2)
proc runClient() {.async.} =
let client = TorTransport.new(transportAddress = torServer, upgrade = Upgrade())
let conn = await client.dial("", MultiAddress.init(dialAddr).tryGet())
await conn.write("client")
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)
await conn.close()
check string.fromBytes(resp) == "server"
await client.stop()
proc serverAcceptHandler() {.async, gcsafe.} =
let conn = await server.accept()
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)
check string.fromBytes(resp) == "client"
await conn.write("server")
await conn.close()
await server.stop()
asyncSpawn serverAcceptHandler()
await runClient()
asyncTest "test start and dial using ipv4":
await test("/ip4/127.0.0.1/tcp/8080", "/ip4/127.0.0.1/tcp/8080")
asyncTest "test start and dial using ipv6":
await test("/ip6/::1/tcp/8080", "/ip6/::1/tcp/8080")
asyncTest "test start and dial using dns":
await test("/ip4/127.0.0.1/tcp/8080", "/dns/libp2p.nim/tcp/8080")
asyncTest "test start and dial usion onion3 and builder":
const TestCodec = "/test/proto/1.0.0" # custom protocol string identifier
type
TestProto = ref object of LPProtocol # declare a custom protocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)
check string.fromBytes(resp) == "client"
await conn.write("server")
# We must close the connections ourselves when we're done with it
await conn.close()
return T(codecs: @[TestCodec], handler: handle)
let rng = newRng()
let ma = MultiAddress.init("/ip4/127.0.0.1/tcp/8080/onion3/a2mncbqsbullu7thgm4e6zxda2xccmcgzmaq44oayhdtm6rav5vovcad:80").tryGet()
let serverSwitch = TorSwitch.new(torServer, rng, @[ma], {ReuseAddr})
# setup the custom proto
let testProto = TestProto.new()
serverSwitch.mount(testProto)
await serverSwitch.start()
let serverPeerId = serverSwitch.peerInfo.peerId
let serverAddress = serverSwitch.peerInfo.addrs
proc startClient() {.async.} =
let clientSwitch = TorSwitch.new(torServer = torServer, rng= rng, flags = {ReuseAddr})
let conn = await clientSwitch.dial(serverPeerId, serverAddress, TestCodec)
await conn.write("client")
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)
check string.fromBytes(resp) == "server"
await conn.close()
await clientSwitch.stop()
await startClient()
await serverSwitch.stop()
test "It's not possible to add another transport in TorSwitch":
when (NimMajor, NimMinor, NimPatch) < (1, 4, 0):
type AssertionDefect = AssertionError
let torSwitch = TorSwitch.new(torServer = torServer, rng= rng, flags = {ReuseAddr})
expect(AssertionDefect):
torSwitch.addTransport(TcpTransport.new(upgrade = Upgrade()))
waitFor torSwitch.stop()
proc transProvider(): Transport =
TorTransport.new(torServer, {ReuseAddr}, Upgrade())
commonTransportTest(
transProvider,
"/ip4/127.0.0.1/tcp/8080/onion3/a2mncbqsbullu7thgm4e6zxda2xccmcgzmaq44oayhdtm6rav5vovcad:80",
"/ip4/127.0.0.1/tcp/8081/onion3/a2mncbqsbullu7thgm4e6zxda2xccmcgzmaq44oayhdtm6rav5vovcae:81")

View File

@ -55,14 +55,13 @@ suite "WebSocket transport":
teardown:
checkTrackers()
commonTransportTest(
"WebSocket",
proc (): Transport = WsTransport.new(Upgrade()),
"/ip4/0.0.0.0/tcp/0/ws")
proc wsTraspProvider(): Transport = WsTransport.new(Upgrade())
commonTransportTest(
"WebSocket Secure",
(proc (): Transport {.gcsafe.} =
wsTraspProvider,
"/ip4/0.0.0.0/tcp/0/ws")
proc wsSecureTranspProvider(): Transport {.gcsafe.} =
try:
return WsTransport.new(
Upgrade(),
@ -70,7 +69,9 @@ suite "WebSocket transport":
TLSCertificate.init(SecureCert),
{TLSFlags.NoVerifyHost, TLSFlags.NoVerifyServerName})
except Exception: check(false)
),
commonTransportTest(
wsSecureTranspProvider,
"/ip4/0.0.0.0/tcp/0/wss")
asyncTest "Hostname verification":

View File

@ -0,0 +1,29 @@
import os, strutils
let contents =
if paramCount() > 0:
readFile(paramStr(1))
else:
stdin.readAll()
var code = ""
for line in contents.splitLines(true):
let
stripped = line.strip()
isMarkdown = stripped.startsWith("##")
if isMarkdown:
if code.strip.len > 0:
echo "```nim"
echo code.strip(leading = false)
echo "```"
code = ""
echo(if stripped.len > 3: stripped[3..^1]
else: "")
else:
code &= line
if code.strip.len > 0:
echo ""
echo "```nim"
echo code
echo "```"