commit
32ede6da3c
|
@ -0,0 +1,170 @@
|
||||||
|
name: Daily
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "30 6 * * *"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
target:
|
||||||
|
- os: linux
|
||||||
|
cpu: amd64
|
||||||
|
- os: linux
|
||||||
|
cpu: i386
|
||||||
|
- os: macos
|
||||||
|
cpu: amd64
|
||||||
|
#- os: windows
|
||||||
|
#cpu: i386
|
||||||
|
- os: windows
|
||||||
|
cpu: amd64
|
||||||
|
branch: [version-1-2, version-1-4, version-1-6, devel]
|
||||||
|
include:
|
||||||
|
- target:
|
||||||
|
os: linux
|
||||||
|
builder: ubuntu-20.04
|
||||||
|
- target:
|
||||||
|
os: macos
|
||||||
|
builder: macos-10.15
|
||||||
|
- target:
|
||||||
|
os: windows
|
||||||
|
builder: windows-2019
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
||||||
|
runs-on: ${{ matrix.builder }}
|
||||||
|
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout nim-libp2p
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
ref: master
|
||||||
|
submodules: true
|
||||||
|
|
||||||
|
- name: Derive environment variables
|
||||||
|
run: |
|
||||||
|
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
|
||||||
|
ARCH=64
|
||||||
|
PLATFORM=x64
|
||||||
|
else
|
||||||
|
ARCH=32
|
||||||
|
PLATFORM=x86
|
||||||
|
fi
|
||||||
|
echo "ARCH=$ARCH" >> $GITHUB_ENV
|
||||||
|
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
ncpu=
|
||||||
|
ext=
|
||||||
|
MAKE_CMD="make"
|
||||||
|
case '${{ runner.os }}' in
|
||||||
|
'Linux')
|
||||||
|
ncpu=$(nproc)
|
||||||
|
;;
|
||||||
|
'macOS')
|
||||||
|
ncpu=$(sysctl -n hw.ncpu)
|
||||||
|
;;
|
||||||
|
'Windows')
|
||||||
|
ncpu=$NUMBER_OF_PROCESSORS
|
||||||
|
ext=.exe
|
||||||
|
MAKE_CMD="mingw32-make"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
||||||
|
echo "ncpu=$ncpu" >> $GITHUB_ENV
|
||||||
|
echo "ext=$ext" >> $GITHUB_ENV
|
||||||
|
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Install build dependencies (Linux i386)
|
||||||
|
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
|
||||||
|
run: |
|
||||||
|
sudo dpkg --add-architecture i386
|
||||||
|
sudo apt-get update -qq
|
||||||
|
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
|
||||||
|
--no-install-recommends -yq gcc-multilib g++-multilib \
|
||||||
|
libssl-dev:i386
|
||||||
|
mkdir -p external/bin
|
||||||
|
cat << EOF > external/bin/gcc
|
||||||
|
#!/bin/bash
|
||||||
|
exec $(which gcc) -m32 "\$@"
|
||||||
|
EOF
|
||||||
|
cat << EOF > external/bin/g++
|
||||||
|
#!/bin/bash
|
||||||
|
exec $(which g++) -m32 "\$@"
|
||||||
|
EOF
|
||||||
|
chmod 755 external/bin/gcc external/bin/g++
|
||||||
|
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Restore MinGW-W64 (Windows) from cache
|
||||||
|
if: runner.os == 'Windows'
|
||||||
|
id: windows-mingw-cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: external/mingw-${{ matrix.target.cpu }}
|
||||||
|
key: 'mingw-${{ matrix.target.cpu }}'
|
||||||
|
|
||||||
|
- name: Restore Nim DLLs dependencies (Windows) from cache
|
||||||
|
if: runner.os == 'Windows'
|
||||||
|
id: windows-dlls-cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: external/dlls-${{ matrix.target.cpu }}
|
||||||
|
key: 'dlls-${{ matrix.target.cpu }}'
|
||||||
|
|
||||||
|
- name: Install MinGW64 dependency (Windows)
|
||||||
|
if: >
|
||||||
|
steps.windows-mingw-cache.outputs.cache-hit != 'true' &&
|
||||||
|
runner.os == 'Windows'
|
||||||
|
run: |
|
||||||
|
mkdir -p external
|
||||||
|
curl -L "https://nim-lang.org/download/mingw$ARCH.7z" -o "external/mingw-${{ matrix.target.cpu }}.7z"
|
||||||
|
7z x -y "external/mingw-${{ matrix.target.cpu }}.7z" -oexternal/
|
||||||
|
mv external/mingw$ARCH external/mingw-${{ matrix.target.cpu }}
|
||||||
|
|
||||||
|
- name: Install DLLs dependencies (Windows)
|
||||||
|
if: >
|
||||||
|
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
|
||||||
|
runner.os == 'Windows'
|
||||||
|
run: |
|
||||||
|
mkdir -p external
|
||||||
|
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
|
||||||
|
7z x -y external/windeps.zip -oexternal/dlls-${{ matrix.target.cpu }}
|
||||||
|
|
||||||
|
- name: Path to cached dependencies (Windows)
|
||||||
|
if: >
|
||||||
|
runner.os == 'Windows'
|
||||||
|
run: |
|
||||||
|
echo "${{ github.workspace }}/external/mingw-${{ matrix.target.cpu }}/bin" >> $GITHUB_PATH
|
||||||
|
echo "${{ github.workspace }}/external/dlls-${{ matrix.target.cpu }}" >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Build Nim and Nimble
|
||||||
|
run: |
|
||||||
|
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||||
|
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ matrix.branch }} \
|
||||||
|
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
|
||||||
|
bash build_nim.sh nim csources dist/nimble NimBinaries
|
||||||
|
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: '^1.15.5'
|
||||||
|
|
||||||
|
- name: Install p2pd
|
||||||
|
run: |
|
||||||
|
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||||
|
|
||||||
|
- name: Run nim-libp2p tests
|
||||||
|
run: |
|
||||||
|
nimble install -y --depsOnly
|
||||||
|
nimble test
|
||||||
|
if [[ "${{ matrix.branch }}" == "version-1-6" || "${{ matrix.branch }}" == "devel" ]]; then
|
||||||
|
echo -e "\nTesting with '--gc:orc':\n"
|
||||||
|
export NIMFLAGS="--gc:orc"
|
||||||
|
nimble test
|
||||||
|
fi
|
||||||
|
|
|
@ -1,98 +1,7 @@
|
||||||
# Getting Started
|
# Getting Started
|
||||||
Welcome to nim-libp2p! This guide will walk you through a peer to peer chat example. <br>
|
Welcome to nim-libp2p!
|
||||||
The full code can be found in [directchat.nim](examples/directchat.nim) under the examples folder.
|
|
||||||
|
|
||||||
|
|
||||||
### Direct Chat Example
|
To get started, please look at the [tutorials](../examples/tutorial_1_connect.md)
|
||||||
To run nim-libp2p, add it to your project's nimble file and spawn a node as follows:
|
|
||||||
|
|
||||||
```nim
|
For more concrete examples, you can look at the [hello world example](../examples/helloworld.nim) or the [direct chat](../examples/directchat.nim)
|
||||||
import tables
|
|
||||||
import chronos
|
|
||||||
import ../libp2p/[switch,
|
|
||||||
multistream,
|
|
||||||
protocols/identify,
|
|
||||||
connection,
|
|
||||||
transports/transport,
|
|
||||||
transports/tcptransport,
|
|
||||||
multiaddress,
|
|
||||||
peerinfo,
|
|
||||||
crypto/crypto,
|
|
||||||
peerid,
|
|
||||||
protocols/protocol,
|
|
||||||
muxers/muxer,
|
|
||||||
muxers/mplex/mplex,
|
|
||||||
protocols/secure/secio,
|
|
||||||
protocols/secure/secure]
|
|
||||||
|
|
||||||
const TestCodec = "/test/proto/1.0.0" # custom protocol string
|
|
||||||
|
|
||||||
type
|
|
||||||
TestProto = ref object of LPProtocol # declare a custom protocol
|
|
||||||
|
|
||||||
method init(p: TestProto) {.gcsafe.} =
|
|
||||||
# handle incoming connections in closure
|
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
|
||||||
echo "Got from remote - ", cast[string](await conn.readLp(1024))
|
|
||||||
await conn.writeLp("Hello!")
|
|
||||||
await conn.close()
|
|
||||||
|
|
||||||
p.codec = TestCodec # init proto with the correct string id
|
|
||||||
p.handler = handle # set proto handler
|
|
||||||
|
|
||||||
proc createSwitch(ma: MultiAddress): (Switch, PeerInfo) =
|
|
||||||
## Helper to create a swith
|
|
||||||
|
|
||||||
let seckey = PrivateKey.random(RSA) # use a random key for peer id
|
|
||||||
var peerInfo = PeerInfo.init(seckey) # create a peer id and assign
|
|
||||||
peerInfo.addrs.add(ma) # set this peer's multiaddresses (can be any number)
|
|
||||||
|
|
||||||
let identify = newIdentify(peerInfo) # create the identify proto
|
|
||||||
|
|
||||||
proc createMplex(conn: Connection): Muxer =
|
|
||||||
# helper proc to create multiplexers,
|
|
||||||
# use this to perform any custom setup up,
|
|
||||||
# such as adjusting timeout or anything else
|
|
||||||
# that the muxer requires
|
|
||||||
result = newMplex(conn)
|
|
||||||
|
|
||||||
let mplexProvider = newMuxerProvider(createMplex, MplexCodec) # create multiplexer
|
|
||||||
let transports = @[Transport(newTransport(TcpTransport))] # add all transports (tcp only for now, but can be anything in the future)
|
|
||||||
let muxers = {MplexCodec: mplexProvider}.toTable() # add all muxers
|
|
||||||
let secureManagers = {SecioCodec: Secure(Secio.new(seckey))}.toTable() # setup the secio and any other secure provider
|
|
||||||
|
|
||||||
# create the switch
|
|
||||||
let switch = newSwitch(peerInfo,
|
|
||||||
transports,
|
|
||||||
identify,
|
|
||||||
muxers,
|
|
||||||
secureManagers)
|
|
||||||
result = (switch, peerInfo)
|
|
||||||
|
|
||||||
proc main() {.async, gcsafe.} =
|
|
||||||
let ma1: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0")
|
|
||||||
let ma2: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0")
|
|
||||||
|
|
||||||
var peerInfo1, peerInfo2: PeerInfo
|
|
||||||
var switch1, switch2: Switch
|
|
||||||
(switch1, peerInfo1) = createSwitch(ma1) # create node 1
|
|
||||||
|
|
||||||
# setup the custom proto
|
|
||||||
let testProto = new TestProto
|
|
||||||
testProto.init() # run it's init method to perform any required initialization
|
|
||||||
switch1.mount(testProto) # mount the proto
|
|
||||||
var switch1Fut = await switch1.start() # start the node
|
|
||||||
|
|
||||||
(switch2, peerInfo2) = createSwitch(ma2) # create node 2
|
|
||||||
var switch2Fut = await switch2.start() # start second node
|
|
||||||
let conn = await switch2.dial(switch1.peerInfo, TestCodec) # dial the first node
|
|
||||||
|
|
||||||
await conn.writeLp("Hello!") # writeLp send a length prefixed buffer over the wire
|
|
||||||
# readLp reads length prefixed bytes and returns a buffer without the prefix
|
|
||||||
echo "Remote responded with - ", cast[string](await conn.readLp(1024))
|
|
||||||
|
|
||||||
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
|
||||||
await allFutures(switch1Fut & switch2Fut) # wait for all transports to shutdown
|
|
||||||
|
|
||||||
waitFor(main())
|
|
||||||
```
|
|
||||||
|
|
|
@ -1,54 +1,88 @@
|
||||||
when not(compileOption("threads")):
|
when not(compileOption("threads")):
|
||||||
{.fatal: "Please, compile this program with the --threads:on option!".}
|
{.fatal: "Please, compile this program with the --threads:on option!".}
|
||||||
|
|
||||||
import tables, strformat, strutils, bearssl
|
import
|
||||||
import chronos # an efficient library for async
|
strformat, strutils, bearssl,
|
||||||
import ../libp2p/[switch, # manage transports, a single entry point for dialing and listening
|
stew/byteutils,
|
||||||
builders, # helper to build the switch object
|
chronos,
|
||||||
multistream, # tag stream with short header to identify it
|
../libp2p
|
||||||
multicodec, # multicodec utilities
|
|
||||||
crypto/crypto, # cryptographic functions
|
|
||||||
errors, # error handling utilities
|
|
||||||
protocols/identify, # identify the peer info of a peer
|
|
||||||
stream/connection, # create and close stream read / write connections
|
|
||||||
transports/transport, # listen and dial to other peers using p2p protocol
|
|
||||||
transports/tcptransport, # listen and dial to other peers using client-server protocol
|
|
||||||
multiaddress, # encode different addressing schemes. For example, /ip4/7.7.7.7/tcp/6543 means it is using IPv4 protocol and TCP
|
|
||||||
peerinfo, # manage the information of a peer, such as peer ID and public / private key
|
|
||||||
peerid, # Implement how peers interact
|
|
||||||
protocols/protocol, # define the protocol base type
|
|
||||||
protocols/secure/secure, # define the protocol of secure connection
|
|
||||||
protocols/secure/secio, # define the protocol of secure input / output, allows encrypted communication that uses public keys to validate signed messages instead of a certificate authority like in TLS
|
|
||||||
muxers/muxer, # define an interface for stream multiplexing, allowing peers to offer many protocols over a single connection
|
|
||||||
muxers/mplex/mplex] # define some contants and message types for stream multiplexing
|
|
||||||
|
|
||||||
const ChatCodec = "/nim-libp2p/chat/1.0.0"
|
const DefaultAddr = "/ip4/127.0.0.1/tcp/0"
|
||||||
const DefaultAddr = "/ip4/127.0.0.1/tcp/55505"
|
|
||||||
|
|
||||||
const Help = """
|
const Help = """
|
||||||
Commands: /[?|hep|connect|disconnect|exit]
|
Commands: /[?|help|connect|disconnect|exit]
|
||||||
help: Prints this help
|
help: Prints this help
|
||||||
connect: dials a remote peer
|
connect: dials a remote peer
|
||||||
disconnect: ends current session
|
disconnect: ends current session
|
||||||
exit: closes the chat
|
exit: closes the chat
|
||||||
"""
|
"""
|
||||||
|
|
||||||
type ChatProto = ref object of LPProtocol
|
type
|
||||||
switch: Switch # a single entry point for dialing and listening to peer
|
Chat = ref object
|
||||||
transp: StreamTransport # transport streams between read & write file descriptor
|
switch: Switch # a single entry point for dialing and listening to peer
|
||||||
conn: Connection # create and close read & write stream
|
stdinReader: StreamTransport # transport streams between read & write file descriptor
|
||||||
connected: bool # if the node is connected to another peer
|
conn: Connection # connection to the other peer
|
||||||
started: bool # if the node has started
|
connected: bool # if the node is connected to another peer
|
||||||
|
|
||||||
proc readAndPrint(p: ChatProto) {.async.} =
|
##
|
||||||
while true:
|
# Stdout helpers, to write the prompt
|
||||||
var strData = await p.conn.readLp(1024)
|
##
|
||||||
strData &= '\0'.uint8
|
proc writePrompt(c: Chat) =
|
||||||
var str = cast[cstring](addr strdata[0])
|
if c.connected:
|
||||||
echo $p.switch.peerInfo.peerId & ": " & $str
|
stdout.write '\r' & $c.switch.peerInfo.peerId & ": "
|
||||||
await sleepAsync(100.millis)
|
stdout.flushFile()
|
||||||
|
|
||||||
proc dialPeer(p: ChatProto, address: string) {.async.} =
|
proc writeStdout(c: Chat, str: string) =
|
||||||
|
echo '\r' & str
|
||||||
|
c.writePrompt()
|
||||||
|
|
||||||
|
##
|
||||||
|
# Chat Protocol
|
||||||
|
##
|
||||||
|
const ChatCodec = "/nim-libp2p/chat/1.0.0"
|
||||||
|
|
||||||
|
type
|
||||||
|
ChatProto = ref object of LPProtocol
|
||||||
|
|
||||||
|
proc new(T: typedesc[ChatProto], c: Chat): T =
|
||||||
|
let chatproto = T()
|
||||||
|
|
||||||
|
# create handler for incoming connection
|
||||||
|
proc handle(stream: Connection, proto: string) {.async.} =
|
||||||
|
if c.connected and not c.conn.closed:
|
||||||
|
c.writeStdout "a chat session is already in progress - refusing incoming peer!"
|
||||||
|
await stream.close()
|
||||||
|
else:
|
||||||
|
await c.handlePeer(stream)
|
||||||
|
await stream.close()
|
||||||
|
|
||||||
|
# assign the new handler
|
||||||
|
chatproto.handler = handle
|
||||||
|
chatproto.codec = ChatCodec
|
||||||
|
return chatproto
|
||||||
|
|
||||||
|
##
|
||||||
|
# Chat application
|
||||||
|
##
|
||||||
|
proc handlePeer(c: Chat, conn: Connection) {.async.} =
|
||||||
|
# Handle a peer (incoming or outgoing)
|
||||||
|
try:
|
||||||
|
c.conn = conn
|
||||||
|
c.connected = true
|
||||||
|
c.writeStdout $conn.peerId & " connected"
|
||||||
|
|
||||||
|
# Read loop
|
||||||
|
while true:
|
||||||
|
let
|
||||||
|
strData = await conn.readLp(1024)
|
||||||
|
str = string.fromBytes(strData)
|
||||||
|
c.writeStdout $conn.peerId & ": " & $str
|
||||||
|
|
||||||
|
except LPStreamEOFError:
|
||||||
|
c.writeStdout $conn.peerId & " disconnected"
|
||||||
|
|
||||||
|
proc dialPeer(c: Chat, address: string) {.async.} =
|
||||||
|
# Parse and dial address
|
||||||
let
|
let
|
||||||
multiAddr = MultiAddress.init(address).tryGet()
|
multiAddr = MultiAddress.init(address).tryGet()
|
||||||
# split the peerId part /p2p/...
|
# split the peerId part /p2p/...
|
||||||
|
@ -63,86 +97,53 @@ proc dialPeer(p: ChatProto, address: string) {.async.} =
|
||||||
wireAddr = ip4Addr & tcpAddr
|
wireAddr = ip4Addr & tcpAddr
|
||||||
|
|
||||||
echo &"dialing peer: {multiAddr}"
|
echo &"dialing peer: {multiAddr}"
|
||||||
p.conn = await p.switch.dial(remotePeer, @[wireAddr], ChatCodec)
|
asyncSpawn c.handlePeer(await c.switch.dial(remotePeer, @[wireAddr], ChatCodec))
|
||||||
p.connected = true
|
|
||||||
asyncSpawn p.readAndPrint()
|
|
||||||
|
|
||||||
proc writeAndPrint(p: ChatProto) {.async.} =
|
proc readLoop(c: Chat) {.async.} =
|
||||||
while true:
|
while true:
|
||||||
if not p.connected:
|
if not c.connected:
|
||||||
echo "type an address or wait for a connection:"
|
echo "type an address or wait for a connection:"
|
||||||
echo "type /[help|?] for help"
|
echo "type /[help|?] for help"
|
||||||
|
|
||||||
let line = await p.transp.readLine()
|
c.writePrompt()
|
||||||
if line.startsWith("/help") or line.startsWith("/?") or not p.started:
|
|
||||||
|
let line = await c.stdinReader.readLine()
|
||||||
|
if line.startsWith("/help") or line.startsWith("/?"):
|
||||||
echo Help
|
echo Help
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if line.startsWith("/disconnect"):
|
if line.startsWith("/disconnect"):
|
||||||
echo "Ending current session"
|
c.writeStdout "Ending current session"
|
||||||
if p.connected and p.conn.closed.not:
|
if c.connected and c.conn.closed.not:
|
||||||
await p.conn.close()
|
await c.conn.close()
|
||||||
p.connected = false
|
c.connected = false
|
||||||
elif line.startsWith("/connect"):
|
elif line.startsWith("/connect"):
|
||||||
if p.connected:
|
c.writeStdout "enter address of remote peer"
|
||||||
var yesno = "N"
|
let address = await c.stdinReader.readLine()
|
||||||
echo "a session is already in progress, do you want end it [y/N]?"
|
|
||||||
yesno = await p.transp.readLine()
|
|
||||||
if yesno.cmpIgnoreCase("y") == 0:
|
|
||||||
await p.conn.close()
|
|
||||||
p.connected = false
|
|
||||||
elif yesno.cmpIgnoreCase("n") == 0:
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
echo "unrecognized response"
|
|
||||||
continue
|
|
||||||
|
|
||||||
echo "enter address of remote peer"
|
|
||||||
let address = await p.transp.readLine()
|
|
||||||
if address.len > 0:
|
if address.len > 0:
|
||||||
await p.dialPeer(address)
|
await c.dialPeer(address)
|
||||||
|
|
||||||
elif line.startsWith("/exit"):
|
elif line.startsWith("/exit"):
|
||||||
if p.connected and p.conn.closed.not:
|
if c.connected and c.conn.closed.not:
|
||||||
await p.conn.close()
|
await c.conn.close()
|
||||||
p.connected = false
|
c.connected = false
|
||||||
|
|
||||||
await p.switch.stop()
|
await c.switch.stop()
|
||||||
echo "quitting..."
|
c.writeStdout "quitting..."
|
||||||
quit(0)
|
quit(0)
|
||||||
else:
|
else:
|
||||||
if p.connected:
|
if c.connected:
|
||||||
await p.conn.writeLp(line)
|
await c.conn.writeLp(line)
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
if line.startsWith("/") and "p2p" in line:
|
if line.startsWith("/") and "p2p" in line:
|
||||||
await p.dialPeer(line)
|
await c.dialPeer(line)
|
||||||
except:
|
except CatchableError as exc:
|
||||||
echo &"unable to dial remote peer {line}"
|
echo &"unable to dial remote peer {line}"
|
||||||
echo getCurrentExceptionMsg()
|
echo exc.msg
|
||||||
|
|
||||||
proc readWriteLoop(p: ChatProto) {.async.} =
|
|
||||||
await p.writeAndPrint()
|
|
||||||
|
|
||||||
proc newChatProto(switch: Switch, transp: StreamTransport): ChatProto =
|
|
||||||
var chatproto = ChatProto(switch: switch, transp: transp, codecs: @[ChatCodec])
|
|
||||||
|
|
||||||
# create handler for incoming connection
|
|
||||||
proc handle(stream: Connection, proto: string) {.async.} =
|
|
||||||
if chatproto.connected and not chatproto.conn.closed:
|
|
||||||
echo "a chat session is already in progress - disconnecting!"
|
|
||||||
await stream.close()
|
|
||||||
else:
|
|
||||||
chatproto.conn = stream
|
|
||||||
chatproto.connected = true
|
|
||||||
await chatproto.readAndPrint()
|
|
||||||
|
|
||||||
# assign the new handler
|
|
||||||
chatproto.handler = handle
|
|
||||||
return chatproto
|
|
||||||
|
|
||||||
proc readInput(wfd: AsyncFD) {.thread.} =
|
proc readInput(wfd: AsyncFD) {.thread.} =
|
||||||
## This procedure performs reading from `stdin` and sends data over
|
## This thread performs reading from `stdin` and sends data over
|
||||||
## pipe to main thread.
|
## pipe to main thread.
|
||||||
let transp = fromPipe(wfd)
|
let transp = fromPipe(wfd)
|
||||||
|
|
||||||
|
@ -150,36 +151,35 @@ proc readInput(wfd: AsyncFD) {.thread.} =
|
||||||
let line = stdin.readLine()
|
let line = stdin.readLine()
|
||||||
discard waitFor transp.write(line & "\r\n")
|
discard waitFor transp.write(line & "\r\n")
|
||||||
|
|
||||||
proc processInput(rfd: AsyncFD, rng: ref BrHmacDrbgContext) {.async.} =
|
proc main() {.async.} =
|
||||||
let transp = fromPipe(rfd)
|
let
|
||||||
|
rng = newRng() # Single random number source for the whole application
|
||||||
|
|
||||||
let seckey = PrivateKey.random(RSA, rng[]).get()
|
# Pipe to read stdin from main thread
|
||||||
var localAddress = DefaultAddr
|
(rfd, wfd) = createAsyncPipe()
|
||||||
while true:
|
stdinReader = fromPipe(rfd)
|
||||||
echo &"Type an address to bind to or Enter to use the default {DefaultAddr}"
|
|
||||||
let a = await transp.readLine()
|
var thread: Thread[AsyncFD]
|
||||||
try:
|
thread.createThread(readInput, wfd)
|
||||||
if a.len > 0:
|
|
||||||
localAddress = a
|
var localAddress = MultiAddress.init(DefaultAddr).tryGet()
|
||||||
break
|
|
||||||
# uise default
|
|
||||||
break
|
|
||||||
except:
|
|
||||||
echo "invalid address"
|
|
||||||
localAddress = DefaultAddr
|
|
||||||
continue
|
|
||||||
|
|
||||||
var switch = SwitchBuilder
|
var switch = SwitchBuilder
|
||||||
.init()
|
.new()
|
||||||
.withRng(rng)
|
.withRng(rng) # Give the application RNG
|
||||||
.withPrivateKey(seckey)
|
.withAddress(localAddress)
|
||||||
.withAddress(MultiAddress.init(localAddress).tryGet())
|
.withTcpTransport() # Use TCP as transport
|
||||||
|
.withMplex() # Use Mplex as muxer
|
||||||
|
.withNoise() # Use Noise as secure manager
|
||||||
.build()
|
.build()
|
||||||
|
|
||||||
let chatProto = newChatProto(switch, transp)
|
let chat = Chat(
|
||||||
switch.mount(chatProto)
|
switch: switch,
|
||||||
|
stdinReader: stdinReader)
|
||||||
|
|
||||||
|
switch.mount(ChatProto.new(chat))
|
||||||
|
|
||||||
let libp2pFuts = await switch.start()
|
let libp2pFuts = await switch.start()
|
||||||
chatProto.started = true
|
|
||||||
|
|
||||||
let id = $switch.peerInfo.peerId
|
let id = $switch.peerInfo.peerId
|
||||||
echo "PeerID: " & id
|
echo "PeerID: " & id
|
||||||
|
@ -187,19 +187,7 @@ proc processInput(rfd: AsyncFD, rng: ref BrHmacDrbgContext) {.async.} =
|
||||||
for a in switch.peerInfo.addrs:
|
for a in switch.peerInfo.addrs:
|
||||||
echo &"{a}/p2p/{id}"
|
echo &"{a}/p2p/{id}"
|
||||||
|
|
||||||
await chatProto.readWriteLoop()
|
await chat.readLoop()
|
||||||
await allFuturesThrowing(libp2pFuts)
|
await allFuturesThrowing(libp2pFuts)
|
||||||
|
|
||||||
proc main() {.async.} =
|
waitFor(main())
|
||||||
let rng = newRng() # Singe random number source for the whole application
|
|
||||||
let (rfd, wfd) = createAsyncPipe()
|
|
||||||
if rfd == asyncInvalidPipe or wfd == asyncInvalidPipe:
|
|
||||||
raise newException(ValueError, "Could not initialize pipe!")
|
|
||||||
|
|
||||||
var thread: Thread[AsyncFD]
|
|
||||||
thread.createThread(readInput, wfd)
|
|
||||||
|
|
||||||
await processInput(rfd, rng)
|
|
||||||
|
|
||||||
when isMainModule: # isMainModule = true when the module is compiled as the main file
|
|
||||||
waitFor(main())
|
|
||||||
|
|
|
@ -0,0 +1,92 @@
|
||||||
|
import bearssl
|
||||||
|
import chronos # an efficient library for async
|
||||||
|
import stew/byteutils # various utils
|
||||||
|
import ../libp2p # when installed through nimble, just use `import libp2p`
|
||||||
|
|
||||||
|
##
|
||||||
|
# Create our custom protocol
|
||||||
|
##
|
||||||
|
const TestCodec = "/test/proto/1.0.0" # custom protocol string identifier
|
||||||
|
|
||||||
|
type
|
||||||
|
TestProto = ref object of LPProtocol # declare a custom protocol
|
||||||
|
|
||||||
|
proc new(T: typedesc[TestProto]): T =
|
||||||
|
|
||||||
|
# every incoming connections will be in handled in this closure
|
||||||
|
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||||
|
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("Roger p2p!")
|
||||||
|
|
||||||
|
# We must close the connections ourselves when we're done with it
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
return T(codecs: @[TestCodec], handler: handle)
|
||||||
|
|
||||||
|
##
|
||||||
|
# Helper to create a switch/node
|
||||||
|
##
|
||||||
|
proc createSwitch(ma: MultiAddress, rng: ref BrHmacDrbgContext): Switch =
|
||||||
|
var switch = SwitchBuilder
|
||||||
|
.new()
|
||||||
|
.withRng(rng) # Give the application RNG
|
||||||
|
.withAddress(ma) # Our local address(es)
|
||||||
|
.withTcpTransport() # Use TCP as transport
|
||||||
|
.withMplex() # Use Mplex as muxer
|
||||||
|
.withNoise() # Use Noise as secure manager
|
||||||
|
.build()
|
||||||
|
|
||||||
|
result = switch
|
||||||
|
|
||||||
|
##
|
||||||
|
# The actual application
|
||||||
|
##
|
||||||
|
proc main() {.async, gcsafe.} =
|
||||||
|
let
|
||||||
|
rng = newRng() # Single random number source for the whole application
|
||||||
|
# port 0 will take a random available port
|
||||||
|
# `tryGet` will throw an exception if the Multiaddress failed
|
||||||
|
# (for instance, if the address is not well formatted)
|
||||||
|
ma1 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
|
ma2 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
|
|
||||||
|
# setup the custom proto
|
||||||
|
let testProto = TestProto.new()
|
||||||
|
|
||||||
|
# setup the two nodes
|
||||||
|
let
|
||||||
|
switch1 = createSwitch(ma1, rng) #Create the two switches
|
||||||
|
switch2 = createSwitch(ma2, rng)
|
||||||
|
|
||||||
|
# mount the proto on switch1
|
||||||
|
# the node will now listen for this proto
|
||||||
|
# and call the handler everytime a client request it
|
||||||
|
switch1.mount(testProto)
|
||||||
|
|
||||||
|
# Start the nodes. This will start the transports
|
||||||
|
# and listen on each local addresses
|
||||||
|
let
|
||||||
|
switch1Fut = await switch1.start()
|
||||||
|
switch2Fut = await switch2.start()
|
||||||
|
|
||||||
|
# the node addrs is populated with it's
|
||||||
|
# actual port during the start
|
||||||
|
|
||||||
|
# use the second node to dial the first node
|
||||||
|
# using the first node peerid and address
|
||||||
|
# and specify our custom protocol codec
|
||||||
|
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
|
||||||
|
|
||||||
|
# conn is now a fully setup connection, we talk directly to the node1 custom protocol handler
|
||||||
|
await conn.writeLp("Hello p2p!") # writeLp send a length prefixed buffer over the wire
|
||||||
|
|
||||||
|
# readLp reads length prefixed bytes and returns a buffer without the prefix
|
||||||
|
echo "Remote responded with - ", string.fromBytes(await conn.readLp(1024))
|
||||||
|
|
||||||
|
# We must close the connection ourselves when we're done with it
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
||||||
|
await allFutures(switch1Fut & switch2Fut) # wait for all transports to shutdown
|
||||||
|
|
||||||
|
waitFor(main())
|
|
@ -0,0 +1,108 @@
|
||||||
|
Hi all, welcome to the first article of the nim-libp2p's tutorial series!
|
||||||
|
|
||||||
|
_This tutorial is for everyone who is interested in building peer-to-peer chatting applications. No Nim programming experience is needed._
|
||||||
|
|
||||||
|
To give you a quick overview, **Nim** is the programming language we are using and **nim-libp2p** is the Nim implementation of [libp2p](https://libp2p.io/), a modular library that enables the development of peer-to-peer network applications.
|
||||||
|
|
||||||
|
Hope you'll find it helpful in your journey of learning. Happy coding! ;)
|
||||||
|
|
||||||
|
# Before you start
|
||||||
|
The only prerequisite here is [Nim](https://nim-lang.org/), the programming language with a Python-like syntax and a performance similar to C. Detailed information can be found [here](https://nim-lang.org/docs/tut1.html).
|
||||||
|
|
||||||
|
Install Nim via their official website: [https://nim-lang.org/install.html](https://nim-lang.org/install.html)
|
||||||
|
Check Nim's installation via `nim --version` and its package manager Nimble via `nimble --version`.
|
||||||
|
|
||||||
|
You can now install the latest version of `nim-libp2p`:
|
||||||
|
```bash
|
||||||
|
nimble install libp2p@#master
|
||||||
|
```
|
||||||
|
|
||||||
|
# A simple ping application
|
||||||
|
We'll start by creating a simple application, which is starting two libp2p [switch](https://docs.libp2p.io/concepts/stream-multiplexing/#switch-swarm), and pinging each other using the [Ping](https://docs.libp2p.io/concepts/protocols/#ping) protocol.
|
||||||
|
|
||||||
|
_TIP: You can extract the code from this tutorial by running `nim c -r tools/markdown_runner.nim examples/tutorial_1_connect.md` in the libp2p folder!_
|
||||||
|
|
||||||
|
Let's create a `part1.nim`, and import our dependencies:
|
||||||
|
```nim
|
||||||
|
import bearssl
|
||||||
|
import chronos
|
||||||
|
|
||||||
|
import libp2p
|
||||||
|
import libp2p/protocols/ping
|
||||||
|
```
|
||||||
|
[bearssl](https://github.com/status-im/nim-bearssl) is used as a [cryptographic pseudorandom number generator](https://en.wikipedia.org/wiki/Cryptographically-secure_pseudorandom_number_generator)
|
||||||
|
[chronos](https://github.com/status-im/nim-chronos) the asynchronous framework used by `nim-libp2p`
|
||||||
|
|
||||||
|
Next, we'll create an helper procedure to create our switches. A switch needs a bit of configuration, and it will be easier to do this configuration only once:
|
||||||
|
```nim
|
||||||
|
proc createSwitch(ma: MultiAddress, rng: ref BrHmacDrbgContext): Switch =
|
||||||
|
var switch = SwitchBuilder
|
||||||
|
.new()
|
||||||
|
.withRng(rng) # Give the application RNG
|
||||||
|
.withAddress(ma) # Our local address(es)
|
||||||
|
.withTcpTransport() # Use TCP as transport
|
||||||
|
.withMplex() # Use Mplex as muxer
|
||||||
|
.withNoise() # Use Noise as secure manager
|
||||||
|
.build()
|
||||||
|
|
||||||
|
return switch
|
||||||
|
```
|
||||||
|
This will create a switch using [Mplex](https://docs.libp2p.io/concepts/stream-multiplexing/) as a multiplexer, Noise to secure the communication, and TCP as an underlying transport.
|
||||||
|
|
||||||
|
You can of course tweak this, to use a different or multiple transport, or tweak the configuration of Mplex and Noise, but this is some sane defaults that we'll use going forward.
|
||||||
|
|
||||||
|
|
||||||
|
Let's now start to create our main procedure:
|
||||||
|
```nim
|
||||||
|
proc main() {.async, gcsafe.} =
|
||||||
|
let
|
||||||
|
rng = newRng()
|
||||||
|
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
|
pingProtocol = Ping.new(rng=rng)
|
||||||
|
```
|
||||||
|
We created some variables that we'll need for the rest of the application: the global `rng` instance, our `localAddress`, and an instance of the `Ping` protocol.
|
||||||
|
The address is in the [MultiAddress](https://github.com/multiformats/multiaddr) format. The port `0` means "take any port available".
|
||||||
|
|
||||||
|
`tryGet` is procedure which is part of the [nim-result](https://github.com/arnetheduck/nim-result/), that will throw an exception if the supplied MultiAddress is not valid.
|
||||||
|
|
||||||
|
We can now create our two switches:
|
||||||
|
```nim
|
||||||
|
let
|
||||||
|
switch1 = createSwitch(localAddress, rng)
|
||||||
|
switch2 = createSwitch(localAddress, rng)
|
||||||
|
|
||||||
|
switch1.mount(pingProtocol)
|
||||||
|
|
||||||
|
let
|
||||||
|
switch1Fut = await switch1.start()
|
||||||
|
switch2Fut = await switch2.start()
|
||||||
|
```
|
||||||
|
We've **mounted** the `pingProtocol` on our first switch. This means that the first switch will actually listen for any ping requests coming in, and handle them accordingly.
|
||||||
|
|
||||||
|
Now that we've started the nodes, they are listening for incoming peers.
|
||||||
|
We can find out which port was attributed, and the resulting local addresses, by using `switch1.peerInfo.addrs`.
|
||||||
|
|
||||||
|
We'll **dial** the first switch from the second one, by specifying it's **Peer ID**, it's **MultiAddress** and the **`Ping` protocol codec**:
|
||||||
|
```nim
|
||||||
|
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, PingCodec)
|
||||||
|
```
|
||||||
|
We now have a `Ping` connection setup between the second and the first switch, we can use it to actually ping the node:
|
||||||
|
```nim
|
||||||
|
# ping the other node and echo the ping duration
|
||||||
|
echo "ping: ", await pingProtocol.ping(conn)
|
||||||
|
|
||||||
|
# We must close the connection ourselves when we're done with it
|
||||||
|
await conn.close()
|
||||||
|
```
|
||||||
|
|
||||||
|
And that's it! Just a little bit of cleanup: shutting down the switches, waiting for them to stop, and we'll call our `main` procedure:
|
||||||
|
```nim
|
||||||
|
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
||||||
|
await allFutures(switch1Fut & switch2Fut) # wait for all transports to shutdown
|
||||||
|
|
||||||
|
waitFor(main())
|
||||||
|
```
|
||||||
|
|
||||||
|
You can now run this program using `nim c -r part1.nim`, and you should see the dialing sequence, ending with a ping output.
|
||||||
|
|
||||||
|
In the [next tutorial](tutorial_2_customproto.md), we'll look at how to create our own custom protocol.
|
|
@ -0,0 +1,82 @@
|
||||||
|
In the [previous tutorial](tutorial_1_connect.md), we've looked at how to create a simple ping program using the `nim-libp2p`.
|
||||||
|
|
||||||
|
We'll now look at how to create a custom protocol inside the libp2p
|
||||||
|
|
||||||
|
# Custom protocol in libp2p
|
||||||
|
Let's create a `part2.nim`, and import our dependencies:
|
||||||
|
```nim
|
||||||
|
import bearssl
|
||||||
|
import chronos
|
||||||
|
import stew/byteutils
|
||||||
|
|
||||||
|
import libp2p
|
||||||
|
```
|
||||||
|
This is similar to the first tutorial, except we don't need to import the `Ping` protocol.
|
||||||
|
|
||||||
|
Next, we'll declare our custom protocol
|
||||||
|
```nim
|
||||||
|
const TestCodec = "/test/proto/1.0.0"
|
||||||
|
|
||||||
|
type TestProto = ref object of LPProtocol
|
||||||
|
```
|
||||||
|
|
||||||
|
We've set a [protocol ID](https://docs.libp2p.io/concepts/protocols/#protocol-ids), and created a custom `LPProtocol`. In a more complex protocol, we could use this structure to store interesting variables.
|
||||||
|
|
||||||
|
A protocol generally has two part: and handling/server part, and a dialing/client part.
|
||||||
|
Theses two parts can be identical, but in our trivial protocol, the server will wait for a message from the client, and the client will send a message, so we have to handle the two cases separately.
|
||||||
|
|
||||||
|
Let's start with the server part:
|
||||||
|
```nim
|
||||||
|
proc new(T: typedesc[TestProto]): T =
|
||||||
|
# every incoming connections will in be handled in this closure
|
||||||
|
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||||
|
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||||
|
# We must close the connections ourselves when we're done with it
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
return T(codecs: @[TestCodec], handler: handle)
|
||||||
|
```
|
||||||
|
This is a constructor for our `TestProto`, that will specify our `codecs` and a `handler`, which will be called for each incoming peer asking for this protocol.
|
||||||
|
In our handle, we simply read a message from the connection and `echo` it.
|
||||||
|
|
||||||
|
We can now create our client part:
|
||||||
|
```nim
|
||||||
|
proc hello(p: TestProto, conn: Connection) {.async.} =
|
||||||
|
await conn.writeLp("Hello p2p!")
|
||||||
|
```
|
||||||
|
Again, pretty straight-forward, we just send a message on the connection.
|
||||||
|
|
||||||
|
We can now create our main procedure:
|
||||||
|
```nim
|
||||||
|
proc main() {.async, gcsafe.} =
|
||||||
|
let
|
||||||
|
rng = newRng()
|
||||||
|
testProto = TestProto.new()
|
||||||
|
switch1 = newStandardSwitch(rng=rng)
|
||||||
|
switch2 = newStandardSwitch(rng=rng)
|
||||||
|
|
||||||
|
switch1.mount(testProto)
|
||||||
|
|
||||||
|
let
|
||||||
|
switch1Fut = await switch1.start()
|
||||||
|
switch2Fut = await switch2.start()
|
||||||
|
|
||||||
|
conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
|
||||||
|
|
||||||
|
await testProto.hello(conn)
|
||||||
|
|
||||||
|
# We must close the connection ourselves when we're done with it
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
||||||
|
await allFutures(switch1Fut & switch2Fut) # wait for all transports to shutdown
|
||||||
|
```
|
||||||
|
|
||||||
|
This is very similar to the first tutorial's `main`, the only noteworthy difference is that we use `newStandardSwitch`, which is similar to `createSwitch` but is bundled directly in libp2p
|
||||||
|
|
||||||
|
We can now wrap our program by calling our main proc:
|
||||||
|
```nim
|
||||||
|
waitFor(main())
|
||||||
|
```
|
||||||
|
|
||||||
|
And that's it!
|
|
@ -16,11 +16,12 @@ requires "nim >= 1.2.0",
|
||||||
"metrics",
|
"metrics",
|
||||||
"secp256k1",
|
"secp256k1",
|
||||||
"stew#head",
|
"stew#head",
|
||||||
"https://github.com/status-im/nim-websock"
|
"websock"
|
||||||
|
|
||||||
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||||
moreoptions: string = "") =
|
moreoptions: string = "") =
|
||||||
var excstr = "nim c --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics --verbosity:0 --hints:off"
|
let env_nimflags = getEnv("NIMFLAGS")
|
||||||
|
var excstr = "nim c --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics --verbosity:0 --hints:off " & env_nimflags
|
||||||
excstr.add(" --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off")
|
excstr.add(" --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off")
|
||||||
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
||||||
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
|
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
|
||||||
|
@ -32,12 +33,18 @@ proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||||
exec excstr & " -d:chronicles_log_level=INFO -r" & " tests/" & filename
|
exec excstr & " -d:chronicles_log_level=INFO -r" & " tests/" & filename
|
||||||
rmFile "tests/" & filename.toExe
|
rmFile "tests/" & filename.toExe
|
||||||
|
|
||||||
proc buildSample(filename: string) =
|
proc buildSample(filename: string, run = false) =
|
||||||
var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off"
|
var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off"
|
||||||
excstr.add(" --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off")
|
excstr.add(" --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off")
|
||||||
excstr.add(" examples/" & filename)
|
excstr.add(" examples/" & filename)
|
||||||
exec excstr
|
exec excstr
|
||||||
rmFile "examples" & filename.toExe
|
if run:
|
||||||
|
exec "./examples/" & filename.toExe
|
||||||
|
rmFile "examples/" & filename.toExe
|
||||||
|
|
||||||
|
proc buildTutorial(filename: string) =
|
||||||
|
discard gorge "cat " & filename & " | nim c -r --hints:off tools/markdown_runner.nim | " &
|
||||||
|
" nim --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off c -"
|
||||||
|
|
||||||
task testnative, "Runs libp2p native tests":
|
task testnative, "Runs libp2p native tests":
|
||||||
runTest("testnative")
|
runTest("testnative")
|
||||||
|
@ -74,6 +81,7 @@ task test, "Runs the test suite":
|
||||||
exec "nimble testdaemon"
|
exec "nimble testdaemon"
|
||||||
exec "nimble testinterop"
|
exec "nimble testinterop"
|
||||||
exec "nimble testfilter"
|
exec "nimble testfilter"
|
||||||
|
exec "nimble examples_build"
|
||||||
|
|
||||||
task test_slim, "Runs the test suite":
|
task test_slim, "Runs the test suite":
|
||||||
exec "nimble testnative"
|
exec "nimble testnative"
|
||||||
|
@ -83,3 +91,6 @@ task test_slim, "Runs the test suite":
|
||||||
|
|
||||||
task examples_build, "Build the samples":
|
task examples_build, "Build the samples":
|
||||||
buildSample("directchat")
|
buildSample("directchat")
|
||||||
|
buildSample("helloworld", true)
|
||||||
|
buildTutorial("examples/tutorial_1_connect.md")
|
||||||
|
buildTutorial("examples/tutorial_2_customproto.md")
|
||||||
|
|
|
@ -80,7 +80,7 @@ proc withAddresses*(b: SwitchBuilder, addresses: seq[MultiAddress]): SwitchBuild
|
||||||
|
|
||||||
proc withMplex*(b: SwitchBuilder, inTimeout = 5.minutes, outTimeout = 5.minutes): SwitchBuilder =
|
proc withMplex*(b: SwitchBuilder, inTimeout = 5.minutes, outTimeout = 5.minutes): SwitchBuilder =
|
||||||
proc newMuxer(conn: Connection): Muxer =
|
proc newMuxer(conn: Connection): Muxer =
|
||||||
Mplex.init(
|
Mplex.new(
|
||||||
conn,
|
conn,
|
||||||
inTimeout = inTimeout,
|
inTimeout = inTimeout,
|
||||||
outTimeout = outTimeout)
|
outTimeout = outTimeout)
|
||||||
|
@ -151,7 +151,7 @@ proc build*(b: SwitchBuilder): Switch
|
||||||
secureManagerInstances.add(Noise.new(b.rng, seckey).Secure)
|
secureManagerInstances.add(Noise.new(b.rng, seckey).Secure)
|
||||||
|
|
||||||
let
|
let
|
||||||
peerInfo = PeerInfo.init(
|
peerInfo = PeerInfo.new(
|
||||||
seckey,
|
seckey,
|
||||||
b.addresses,
|
b.addresses,
|
||||||
protoVersion = b.protoVersion,
|
protoVersion = b.protoVersion,
|
||||||
|
@ -166,9 +166,9 @@ proc build*(b: SwitchBuilder): Switch
|
||||||
|
|
||||||
let
|
let
|
||||||
identify = Identify.new(peerInfo)
|
identify = Identify.new(peerInfo)
|
||||||
connManager = ConnManager.init(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
|
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
|
||||||
ms = MultistreamSelect.new()
|
ms = MultistreamSelect.new()
|
||||||
muxedUpgrade = MuxedUpgrade.init(identify, muxers, secureManagerInstances, connManager, ms)
|
muxedUpgrade = MuxedUpgrade.new(identify, muxers, secureManagerInstances, connManager, ms)
|
||||||
|
|
||||||
let
|
let
|
||||||
transports = block:
|
transports = block:
|
||||||
|
|
|
@ -84,7 +84,7 @@ type
|
||||||
proc newTooManyConnectionsError(): ref TooManyConnectionsError {.inline.} =
|
proc newTooManyConnectionsError(): ref TooManyConnectionsError {.inline.} =
|
||||||
result = newException(TooManyConnectionsError, "Too many connections")
|
result = newException(TooManyConnectionsError, "Too many connections")
|
||||||
|
|
||||||
proc init*(C: type ConnManager,
|
proc new*(C: type ConnManager,
|
||||||
maxConnsPerPeer = MaxConnectionsPerPeer,
|
maxConnsPerPeer = MaxConnectionsPerPeer,
|
||||||
maxConnections = MaxConnections,
|
maxConnections = MaxConnections,
|
||||||
maxIn = -1,
|
maxIn = -1,
|
||||||
|
|
|
@ -174,6 +174,19 @@ proc newRng*(): ref BrHmacDrbgContext =
|
||||||
return nil
|
return nil
|
||||||
rng
|
rng
|
||||||
|
|
||||||
|
proc shuffle*[T](
|
||||||
|
rng: ref BrHmacDrbgContext,
|
||||||
|
x: var openArray[T]) =
|
||||||
|
|
||||||
|
var randValues = newSeqUninitialized[byte](len(x) * 2)
|
||||||
|
brHmacDrbgGenerate(rng[], randValues)
|
||||||
|
|
||||||
|
for i in countdown(x.high, 1):
|
||||||
|
let
|
||||||
|
rand = randValues[i * 2].int32 or (randValues[i * 2 + 1].int32 shl 8)
|
||||||
|
y = rand mod i
|
||||||
|
swap(x[i], x[y])
|
||||||
|
|
||||||
proc random*(T: typedesc[PrivateKey], scheme: PKScheme,
|
proc random*(T: typedesc[PrivateKey], scheme: PKScheme,
|
||||||
rng: var BrHmacDrbgContext,
|
rng: var BrHmacDrbgContext,
|
||||||
bits = RsaDefaultKeySize): CryptoResult[PrivateKey] =
|
bits = RsaDefaultKeySize): CryptoResult[PrivateKey] =
|
||||||
|
@ -331,9 +344,6 @@ proc getPublicKey*(key: PrivateKey): CryptoResult[PublicKey] =
|
||||||
else:
|
else:
|
||||||
err(SchemeError)
|
err(SchemeError)
|
||||||
|
|
||||||
proc getKey*(key: PrivateKey): CryptoResult[PublicKey] {.deprecated: "use getPublicKey".} =
|
|
||||||
key.getPublicKey()
|
|
||||||
|
|
||||||
proc toRawBytes*(key: PrivateKey | PublicKey,
|
proc toRawBytes*(key: PrivateKey | PublicKey,
|
||||||
data: var openarray[byte]): CryptoResult[int] =
|
data: var openarray[byte]): CryptoResult[int] =
|
||||||
## Serialize private key ``key`` (using scheme's own serialization) and store
|
## Serialize private key ``key`` (using scheme's own serialization) and store
|
||||||
|
@ -1013,39 +1023,6 @@ proc write*(pb: var ProtoBuffer, field: int, sig: Signature) {.
|
||||||
inline, raises: [Defect].} =
|
inline, raises: [Defect].} =
|
||||||
write(pb, field, sig.getBytes())
|
write(pb, field, sig.getBytes())
|
||||||
|
|
||||||
proc initProtoField*(index: int, key: PublicKey|PrivateKey): ProtoField {.
|
|
||||||
deprecated, raises: [Defect, ResultError[CryptoError]].} =
|
|
||||||
## Initialize ProtoField with PublicKey/PrivateKey ``key``.
|
|
||||||
result = initProtoField(index, key.getBytes().tryGet())
|
|
||||||
|
|
||||||
proc initProtoField*(index: int, sig: Signature): ProtoField {.deprecated.} =
|
|
||||||
## Initialize ProtoField with Signature ``sig``.
|
|
||||||
result = initProtoField(index, sig.getBytes())
|
|
||||||
|
|
||||||
proc getValue*[T: PublicKey|PrivateKey](data: var ProtoBuffer, field: int,
|
|
||||||
value: var T): int {.deprecated.} =
|
|
||||||
## Read PublicKey/PrivateKey from ProtoBuf's message and validate it.
|
|
||||||
var buf: seq[byte]
|
|
||||||
var key: PublicKey
|
|
||||||
result = getLengthValue(data, field, buf)
|
|
||||||
if result > 0:
|
|
||||||
if not key.init(buf):
|
|
||||||
result = -1
|
|
||||||
else:
|
|
||||||
value = key
|
|
||||||
|
|
||||||
proc getValue*(data: var ProtoBuffer, field: int, value: var Signature): int {.
|
|
||||||
deprecated.} =
|
|
||||||
## Read ``Signature`` from ProtoBuf's message and validate it.
|
|
||||||
var buf: seq[byte]
|
|
||||||
var sig: Signature
|
|
||||||
result = getLengthValue(data, field, buf)
|
|
||||||
if result > 0:
|
|
||||||
if not sig.init(buf):
|
|
||||||
result = -1
|
|
||||||
else:
|
|
||||||
value = sig
|
|
||||||
|
|
||||||
proc getField*[T: PublicKey|PrivateKey](pb: ProtoBuffer, field: int,
|
proc getField*[T: PublicKey|PrivateKey](pb: ProtoBuffer, field: int,
|
||||||
value: var T): ProtoResult[bool] =
|
value: var T): ProtoResult[bool] =
|
||||||
## Deserialize public/private key from protobuf's message ``pb`` using field
|
## Deserialize public/private key from protobuf's message ``pb`` using field
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
## This module implementes API for `go-libp2p-daemon`.
|
## This module implementes API for `go-libp2p-daemon`.
|
||||||
import std/[os, osproc, strutils, tables, strtabs]
|
import std/[os, osproc, strutils, tables, strtabs, sequtils]
|
||||||
import pkg/[chronos, chronicles]
|
import pkg/[chronos, chronicles]
|
||||||
import ../varint, ../multiaddress, ../multicodec, ../cid, ../peerid
|
import ../varint, ../multiaddress, ../multicodec, ../cid, ../peerid
|
||||||
import ../wire, ../multihash, ../protobuf/minprotobuf, ../errors
|
import ../wire, ../multihash, ../protobuf/minprotobuf, ../errors
|
||||||
|
@ -35,7 +35,7 @@ type
|
||||||
Critical, Error, Warning, Notice, Info, Debug, Trace
|
Critical, Error, Warning, Notice, Info, Debug, Trace
|
||||||
|
|
||||||
RequestType* {.pure.} = enum
|
RequestType* {.pure.} = enum
|
||||||
IDENTITY = 0,
|
IDENTIFY = 0,
|
||||||
CONNECT = 1,
|
CONNECT = 1,
|
||||||
STREAM_OPEN = 2,
|
STREAM_OPEN = 2,
|
||||||
STREAM_HANDLER = 3,
|
STREAM_HANDLER = 3,
|
||||||
|
@ -167,7 +167,7 @@ proc requestIdentity(): ProtoBuffer =
|
||||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
||||||
## Processing function `doIdentify(req *pb.Request)`.
|
## Processing function `doIdentify(req *pb.Request)`.
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.IDENTITY)))
|
result.write(1, cast[uint](RequestType.IDENTIFY))
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestConnect(peerid: PeerID,
|
proc requestConnect(peerid: PeerID,
|
||||||
|
@ -177,13 +177,13 @@ proc requestConnect(peerid: PeerID,
|
||||||
## Processing function `doConnect(req *pb.Request)`.
|
## Processing function `doConnect(req *pb.Request)`.
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, peerid))
|
msg.write(1, peerid)
|
||||||
for item in addresses:
|
for item in addresses:
|
||||||
msg.write(initProtoField(2, item.data.buffer))
|
msg.write(2, item.data.buffer)
|
||||||
if timeout > 0:
|
if timeout > 0:
|
||||||
msg.write(initProtoField(3, hint64(timeout)))
|
msg.write(3, hint64(timeout))
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.CONNECT)))
|
result.write(1, cast[uint](RequestType.CONNECT))
|
||||||
result.write(initProtoField(2, msg))
|
result.write(2, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDisconnect(peerid: PeerID): ProtoBuffer =
|
proc requestDisconnect(peerid: PeerID): ProtoBuffer =
|
||||||
|
@ -191,9 +191,9 @@ proc requestDisconnect(peerid: PeerID): ProtoBuffer =
|
||||||
## Processing function `doDisconnect(req *pb.Request)`.
|
## Processing function `doDisconnect(req *pb.Request)`.
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, peerid))
|
msg.write(1, peerid)
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.DISCONNECT)))
|
result.write(1, cast[uint](RequestType.DISCONNECT))
|
||||||
result.write(initProtoField(7, msg))
|
result.write(7, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestStreamOpen(peerid: PeerID,
|
proc requestStreamOpen(peerid: PeerID,
|
||||||
|
@ -203,13 +203,13 @@ proc requestStreamOpen(peerid: PeerID,
|
||||||
## Processing function `doStreamOpen(req *pb.Request)`.
|
## Processing function `doStreamOpen(req *pb.Request)`.
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, peerid))
|
msg.write(1, peerid)
|
||||||
for item in protocols:
|
for item in protocols:
|
||||||
msg.write(initProtoField(2, item))
|
msg.write(2, item)
|
||||||
if timeout > 0:
|
if timeout > 0:
|
||||||
msg.write(initProtoField(3, hint64(timeout)))
|
msg.write(3, hint64(timeout))
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.STREAM_OPEN)))
|
result.write(1, cast[uint](RequestType.STREAM_OPEN))
|
||||||
result.write(initProtoField(3, msg))
|
result.write(3, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestStreamHandler(address: MultiAddress,
|
proc requestStreamHandler(address: MultiAddress,
|
||||||
|
@ -218,18 +218,18 @@ proc requestStreamHandler(address: MultiAddress,
|
||||||
## Processing function `doStreamHandler(req *pb.Request)`.
|
## Processing function `doStreamHandler(req *pb.Request)`.
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, address.data.buffer))
|
msg.write(1, address.data.buffer)
|
||||||
for item in protocols:
|
for item in protocols:
|
||||||
msg.write(initProtoField(2, item))
|
msg.write(2, item)
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.STREAM_HANDLER)))
|
result.write(1, cast[uint](RequestType.STREAM_HANDLER))
|
||||||
result.write(initProtoField(4, msg))
|
result.write(4, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestListPeers(): ProtoBuffer =
|
proc requestListPeers(): ProtoBuffer =
|
||||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
||||||
## Processing function `doListPeers(req *pb.Request)`
|
## Processing function `doListPeers(req *pb.Request)`
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.LIST_PEERS)))
|
result.write(1, cast[uint](RequestType.LIST_PEERS))
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDHTFindPeer(peer: PeerID, timeout = 0): ProtoBuffer =
|
proc requestDHTFindPeer(peer: PeerID, timeout = 0): ProtoBuffer =
|
||||||
|
@ -238,13 +238,13 @@ proc requestDHTFindPeer(peer: PeerID, timeout = 0): ProtoBuffer =
|
||||||
let msgid = cast[uint](DHTRequestType.FIND_PEER)
|
let msgid = cast[uint](DHTRequestType.FIND_PEER)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(2, peer))
|
msg.write(2, peer)
|
||||||
if timeout > 0:
|
if timeout > 0:
|
||||||
msg.write(initProtoField(7, hint64(timeout)))
|
msg.write(7, hint64(timeout))
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.DHT)))
|
result.write(1, cast[uint](RequestType.DHT))
|
||||||
result.write(initProtoField(5, msg))
|
result.write(5, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDHTFindPeersConnectedToPeer(peer: PeerID,
|
proc requestDHTFindPeersConnectedToPeer(peer: PeerID,
|
||||||
|
@ -254,13 +254,13 @@ proc requestDHTFindPeersConnectedToPeer(peer: PeerID,
|
||||||
let msgid = cast[uint](DHTRequestType.FIND_PEERS_CONNECTED_TO_PEER)
|
let msgid = cast[uint](DHTRequestType.FIND_PEERS_CONNECTED_TO_PEER)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(2, peer))
|
msg.write(2, peer)
|
||||||
if timeout > 0:
|
if timeout > 0:
|
||||||
msg.write(initProtoField(7, hint64(timeout)))
|
msg.write(7, hint64(timeout))
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.DHT)))
|
result.write(1, cast[uint](RequestType.DHT))
|
||||||
result.write(initProtoField(5, msg))
|
result.write(5, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDHTFindProviders(cid: Cid,
|
proc requestDHTFindProviders(cid: Cid,
|
||||||
|
@ -270,14 +270,14 @@ proc requestDHTFindProviders(cid: Cid,
|
||||||
let msgid = cast[uint](DHTRequestType.FIND_PROVIDERS)
|
let msgid = cast[uint](DHTRequestType.FIND_PROVIDERS)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(3, cid.data.buffer))
|
msg.write(3, cid.data.buffer)
|
||||||
msg.write(initProtoField(6, count))
|
msg.write(6, count)
|
||||||
if timeout > 0:
|
if timeout > 0:
|
||||||
msg.write(initProtoField(7, hint64(timeout)))
|
msg.write(7, hint64(timeout))
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.DHT)))
|
result.write(1, cast[uint](RequestType.DHT))
|
||||||
result.write(initProtoField(5, msg))
|
result.write(5, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDHTGetClosestPeers(key: string, timeout = 0): ProtoBuffer =
|
proc requestDHTGetClosestPeers(key: string, timeout = 0): ProtoBuffer =
|
||||||
|
@ -286,13 +286,13 @@ proc requestDHTGetClosestPeers(key: string, timeout = 0): ProtoBuffer =
|
||||||
let msgid = cast[uint](DHTRequestType.GET_CLOSEST_PEERS)
|
let msgid = cast[uint](DHTRequestType.GET_CLOSEST_PEERS)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(4, key))
|
msg.write(4, key)
|
||||||
if timeout > 0:
|
if timeout > 0:
|
||||||
msg.write(initProtoField(7, hint64(timeout)))
|
msg.write(7, hint64(timeout))
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.DHT)))
|
result.write(1, cast[uint](RequestType.DHT))
|
||||||
result.write(initProtoField(5, msg))
|
result.write(5, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDHTGetPublicKey(peer: PeerID, timeout = 0): ProtoBuffer =
|
proc requestDHTGetPublicKey(peer: PeerID, timeout = 0): ProtoBuffer =
|
||||||
|
@ -301,13 +301,13 @@ proc requestDHTGetPublicKey(peer: PeerID, timeout = 0): ProtoBuffer =
|
||||||
let msgid = cast[uint](DHTRequestType.GET_PUBLIC_KEY)
|
let msgid = cast[uint](DHTRequestType.GET_PUBLIC_KEY)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(2, peer))
|
msg.write(2, peer)
|
||||||
if timeout > 0:
|
if timeout > 0:
|
||||||
msg.write(initProtoField(7, hint64(timeout)))
|
msg.write(7, hint64(timeout))
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.DHT)))
|
result.write(1, cast[uint](RequestType.DHT))
|
||||||
result.write(initProtoField(5, msg))
|
result.write(5, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDHTGetValue(key: string, timeout = 0): ProtoBuffer =
|
proc requestDHTGetValue(key: string, timeout = 0): ProtoBuffer =
|
||||||
|
@ -316,13 +316,13 @@ proc requestDHTGetValue(key: string, timeout = 0): ProtoBuffer =
|
||||||
let msgid = cast[uint](DHTRequestType.GET_VALUE)
|
let msgid = cast[uint](DHTRequestType.GET_VALUE)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(4, key))
|
msg.write(4, key)
|
||||||
if timeout > 0:
|
if timeout > 0:
|
||||||
msg.write(initProtoField(7, hint64(timeout)))
|
msg.write(7, hint64(timeout))
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.DHT)))
|
result.write(1, cast[uint](RequestType.DHT))
|
||||||
result.write(initProtoField(5, msg))
|
result.write(5, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDHTSearchValue(key: string, timeout = 0): ProtoBuffer =
|
proc requestDHTSearchValue(key: string, timeout = 0): ProtoBuffer =
|
||||||
|
@ -331,13 +331,13 @@ proc requestDHTSearchValue(key: string, timeout = 0): ProtoBuffer =
|
||||||
let msgid = cast[uint](DHTRequestType.SEARCH_VALUE)
|
let msgid = cast[uint](DHTRequestType.SEARCH_VALUE)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(4, key))
|
msg.write(4, key)
|
||||||
if timeout > 0:
|
if timeout > 0:
|
||||||
msg.write(initProtoField(7, hint64(timeout)))
|
msg.write(7, hint64(timeout))
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.DHT)))
|
result.write(1, cast[uint](RequestType.DHT))
|
||||||
result.write(initProtoField(5, msg))
|
result.write(5, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDHTPutValue(key: string, value: openarray[byte],
|
proc requestDHTPutValue(key: string, value: openarray[byte],
|
||||||
|
@ -347,14 +347,14 @@ proc requestDHTPutValue(key: string, value: openarray[byte],
|
||||||
let msgid = cast[uint](DHTRequestType.PUT_VALUE)
|
let msgid = cast[uint](DHTRequestType.PUT_VALUE)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(4, key))
|
msg.write(4, key)
|
||||||
msg.write(initProtoField(5, value))
|
msg.write(5, value)
|
||||||
if timeout > 0:
|
if timeout > 0:
|
||||||
msg.write(initProtoField(7, hint64(timeout)))
|
msg.write(7, hint64(timeout))
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.DHT)))
|
result.write(1, cast[uint](RequestType.DHT))
|
||||||
result.write(initProtoField(5, msg))
|
result.write(5, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDHTProvide(cid: Cid, timeout = 0): ProtoBuffer =
|
proc requestDHTProvide(cid: Cid, timeout = 0): ProtoBuffer =
|
||||||
|
@ -363,13 +363,13 @@ proc requestDHTProvide(cid: Cid, timeout = 0): ProtoBuffer =
|
||||||
let msgid = cast[uint](DHTRequestType.PROVIDE)
|
let msgid = cast[uint](DHTRequestType.PROVIDE)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(3, cid.data.buffer))
|
msg.write(3, cid.data.buffer)
|
||||||
if timeout > 0:
|
if timeout > 0:
|
||||||
msg.write(initProtoField(7, hint64(timeout)))
|
msg.write(7, hint64(timeout))
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.DHT)))
|
result.write(1, cast[uint](RequestType.DHT))
|
||||||
result.write(initProtoField(5, msg))
|
result.write(5, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestCMTagPeer(peer: PeerID, tag: string, weight: int): ProtoBuffer =
|
proc requestCMTagPeer(peer: PeerID, tag: string, weight: int): ProtoBuffer =
|
||||||
|
@ -377,13 +377,13 @@ proc requestCMTagPeer(peer: PeerID, tag: string, weight: int): ProtoBuffer =
|
||||||
let msgid = cast[uint](ConnManagerRequestType.TAG_PEER)
|
let msgid = cast[uint](ConnManagerRequestType.TAG_PEER)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(2, peer))
|
msg.write(2, peer)
|
||||||
msg.write(initProtoField(3, tag))
|
msg.write(3, tag)
|
||||||
msg.write(initProtoField(4, hint64(weight)))
|
msg.write(4, hint64(weight))
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.CONNMANAGER)))
|
result.write(1, cast[uint](RequestType.CONNMANAGER))
|
||||||
result.write(initProtoField(6, msg))
|
result.write(6, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestCMUntagPeer(peer: PeerID, tag: string): ProtoBuffer =
|
proc requestCMUntagPeer(peer: PeerID, tag: string): ProtoBuffer =
|
||||||
|
@ -391,12 +391,12 @@ proc requestCMUntagPeer(peer: PeerID, tag: string): ProtoBuffer =
|
||||||
let msgid = cast[uint](ConnManagerRequestType.UNTAG_PEER)
|
let msgid = cast[uint](ConnManagerRequestType.UNTAG_PEER)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(2, peer))
|
msg.write(2, peer)
|
||||||
msg.write(initProtoField(3, tag))
|
msg.write(3, tag)
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.CONNMANAGER)))
|
result.write(1, cast[uint](RequestType.CONNMANAGER))
|
||||||
result.write(initProtoField(6, msg))
|
result.write(6, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestCMTrim(): ProtoBuffer =
|
proc requestCMTrim(): ProtoBuffer =
|
||||||
|
@ -404,10 +404,10 @@ proc requestCMTrim(): ProtoBuffer =
|
||||||
let msgid = cast[uint](ConnManagerRequestType.TRIM)
|
let msgid = cast[uint](ConnManagerRequestType.TRIM)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.CONNMANAGER)))
|
result.write(1, cast[uint](RequestType.CONNMANAGER))
|
||||||
result.write(initProtoField(6, msg))
|
result.write(6, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestPSGetTopics(): ProtoBuffer =
|
proc requestPSGetTopics(): ProtoBuffer =
|
||||||
|
@ -416,10 +416,10 @@ proc requestPSGetTopics(): ProtoBuffer =
|
||||||
let msgid = cast[uint](PSRequestType.GET_TOPICS)
|
let msgid = cast[uint](PSRequestType.GET_TOPICS)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.PUBSUB)))
|
result.write(1, cast[uint](RequestType.PUBSUB))
|
||||||
result.write(initProtoField(8, msg))
|
result.write(8, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestPSListPeers(topic: string): ProtoBuffer =
|
proc requestPSListPeers(topic: string): ProtoBuffer =
|
||||||
|
@ -428,11 +428,11 @@ proc requestPSListPeers(topic: string): ProtoBuffer =
|
||||||
let msgid = cast[uint](PSRequestType.LIST_PEERS)
|
let msgid = cast[uint](PSRequestType.LIST_PEERS)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(2, topic))
|
msg.write(2, topic)
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.PUBSUB)))
|
result.write(1, cast[uint](RequestType.PUBSUB))
|
||||||
result.write(initProtoField(8, msg))
|
result.write(8, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestPSPublish(topic: string, data: openarray[byte]): ProtoBuffer =
|
proc requestPSPublish(topic: string, data: openarray[byte]): ProtoBuffer =
|
||||||
|
@ -441,12 +441,12 @@ proc requestPSPublish(topic: string, data: openarray[byte]): ProtoBuffer =
|
||||||
let msgid = cast[uint](PSRequestType.PUBLISH)
|
let msgid = cast[uint](PSRequestType.PUBLISH)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(2, topic))
|
msg.write(2, topic)
|
||||||
msg.write(initProtoField(3, data))
|
msg.write(3, data)
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.PUBSUB)))
|
result.write(1, cast[uint](RequestType.PUBSUB))
|
||||||
result.write(initProtoField(8, msg))
|
result.write(8, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestPSSubscribe(topic: string): ProtoBuffer =
|
proc requestPSSubscribe(topic: string): ProtoBuffer =
|
||||||
|
@ -455,25 +455,26 @@ proc requestPSSubscribe(topic: string): ProtoBuffer =
|
||||||
let msgid = cast[uint](PSRequestType.SUBSCRIBE)
|
let msgid = cast[uint](PSRequestType.SUBSCRIBE)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
var msg = initProtoBuffer()
|
var msg = initProtoBuffer()
|
||||||
msg.write(initProtoField(1, msgid))
|
msg.write(1, msgid)
|
||||||
msg.write(initProtoField(2, topic))
|
msg.write(2, topic)
|
||||||
msg.finish()
|
msg.finish()
|
||||||
result.write(initProtoField(1, cast[uint](RequestType.PUBSUB)))
|
result.write(1, cast[uint](RequestType.PUBSUB))
|
||||||
result.write(initProtoField(8, msg))
|
result.write(8, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc checkResponse(pb: var ProtoBuffer): ResponseKind {.inline.} =
|
proc checkResponse(pb: ProtoBuffer): ResponseKind {.inline.} =
|
||||||
result = ResponseKind.Malformed
|
result = ResponseKind.Malformed
|
||||||
var value: uint64
|
var value: uint64
|
||||||
if getVarintValue(pb, 1, value) > 0:
|
if getRequiredField(pb, 1, value).isOk():
|
||||||
if value == 0:
|
if value == 0:
|
||||||
result = ResponseKind.Success
|
result = ResponseKind.Success
|
||||||
else:
|
else:
|
||||||
result = ResponseKind.Error
|
result = ResponseKind.Error
|
||||||
|
|
||||||
proc getErrorMessage(pb: var ProtoBuffer): string {.inline, raises: [Defect, DaemonLocalError].} =
|
proc getErrorMessage(pb: ProtoBuffer): string {.inline, raises: [Defect, DaemonLocalError].} =
|
||||||
if pb.enterSubmessage() == cast[int](ResponseType.ERROR):
|
var error: seq[byte]
|
||||||
if pb.getString(1, result) == -1:
|
if pb.getRequiredField(ResponseType.ERROR.int, error).isOk():
|
||||||
|
if initProtoBuffer(error).getRequiredField(1, result).isErr():
|
||||||
raise newException(DaemonLocalError, "Error message is missing!")
|
raise newException(DaemonLocalError, "Error message is missing!")
|
||||||
|
|
||||||
proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
|
proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
|
||||||
|
@ -830,26 +831,14 @@ proc transactMessage(transp: StreamTransport,
|
||||||
raise newException(DaemonLocalError, "Incorrect or empty message received!")
|
raise newException(DaemonLocalError, "Incorrect or empty message received!")
|
||||||
result = initProtoBuffer(message)
|
result = initProtoBuffer(message)
|
||||||
|
|
||||||
proc getPeerInfo(pb: var ProtoBuffer): PeerInfo
|
proc getPeerInfo(pb: ProtoBuffer): PeerInfo
|
||||||
{.raises: [Defect, DaemonLocalError].} =
|
{.raises: [Defect, DaemonLocalError].} =
|
||||||
## Get PeerInfo object from ``pb``.
|
## Get PeerInfo object from ``pb``.
|
||||||
result.addresses = newSeq[MultiAddress]()
|
result.addresses = newSeq[MultiAddress]()
|
||||||
if pb.getValue(1, result.peer) == -1:
|
if pb.getRequiredField(1, result.peer).isErr():
|
||||||
raise newException(DaemonLocalError, "Missing required field `peer`!")
|
raise newException(DaemonLocalError, "Incorrect or empty message received!")
|
||||||
|
|
||||||
var address = newSeq[byte]()
|
discard pb.getRepeatedField(2, result.addresses)
|
||||||
while pb.getBytes(2, address) != -1:
|
|
||||||
if len(address) != 0:
|
|
||||||
var copyaddr = address
|
|
||||||
let addrRes = MultiAddress.init(copyaddr)
|
|
||||||
|
|
||||||
# TODO: for some reason `toException` doesn't
|
|
||||||
# work for this module
|
|
||||||
if addrRes.isErr:
|
|
||||||
raise newException(DaemonLocalError, addrRes.error)
|
|
||||||
|
|
||||||
result.addresses.add(addrRes.get())
|
|
||||||
address.setLen(0)
|
|
||||||
|
|
||||||
proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
|
proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
|
||||||
## Get Node identity information
|
## Get Node identity information
|
||||||
|
@ -857,9 +846,10 @@ proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
|
||||||
try:
|
try:
|
||||||
var pb = await transactMessage(transp, requestIdentity())
|
var pb = await transactMessage(transp, requestIdentity())
|
||||||
pb.withMessage() do:
|
pb.withMessage() do:
|
||||||
let res = pb.enterSubmessage()
|
var res: seq[byte]
|
||||||
if res == cast[int](ResponseType.IDENTITY):
|
if pb.getRequiredField(ResponseType.IDENTITY.int, res).isOk():
|
||||||
result = pb.getPeerInfo()
|
var resPb = initProtoBuffer(res)
|
||||||
|
result = getPeerInfo(resPb)
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
|
@ -897,18 +887,16 @@ proc openStream*(api: DaemonAPI, peer: PeerID,
|
||||||
var pb = await transp.transactMessage(requestStreamOpen(peer, protocols,
|
var pb = await transp.transactMessage(requestStreamOpen(peer, protocols,
|
||||||
timeout))
|
timeout))
|
||||||
pb.withMessage() do:
|
pb.withMessage() do:
|
||||||
var res = pb.enterSubmessage()
|
var res: seq[byte]
|
||||||
if res == cast[int](ResponseType.STREAMINFO):
|
if pb.getRequiredField(ResponseType.STREAMINFO.int, res).isOk():
|
||||||
|
let resPb = initProtoBuffer(res)
|
||||||
# stream.peer = newSeq[byte]()
|
# stream.peer = newSeq[byte]()
|
||||||
var raddress = newSeq[byte]()
|
var raddress = newSeq[byte]()
|
||||||
stream.protocol = ""
|
stream.protocol = ""
|
||||||
if pb.getValue(1, stream.peer) == -1:
|
resPb.getRequiredField(1, stream.peer).tryGet()
|
||||||
raise newException(DaemonLocalError, "Missing `peer` field!")
|
resPb.getRequiredField(2, raddress).tryGet()
|
||||||
if pb.getLengthValue(2, raddress) == -1:
|
|
||||||
raise newException(DaemonLocalError, "Missing `address` field!")
|
|
||||||
stream.raddress = MultiAddress.init(raddress).tryGet()
|
stream.raddress = MultiAddress.init(raddress).tryGet()
|
||||||
if pb.getLengthValue(3, stream.protocol) == -1:
|
resPb.getRequiredField(3, stream.protocol).tryGet()
|
||||||
raise newException(DaemonLocalError, "Missing `proto` field!")
|
|
||||||
stream.flags.incl(Outbound)
|
stream.flags.incl(Outbound)
|
||||||
stream.transp = transp
|
stream.transp = transp
|
||||||
result = stream
|
result = stream
|
||||||
|
@ -923,13 +911,10 @@ proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
|
||||||
var stream = new P2PStream
|
var stream = new P2PStream
|
||||||
var raddress = newSeq[byte]()
|
var raddress = newSeq[byte]()
|
||||||
stream.protocol = ""
|
stream.protocol = ""
|
||||||
if pb.getValue(1, stream.peer) == -1:
|
pb.getRequiredField(1, stream.peer).tryGet()
|
||||||
raise newException(DaemonLocalError, "Missing `peer` field!")
|
pb.getRequiredField(2, raddress).tryGet()
|
||||||
if pb.getLengthValue(2, raddress) == -1:
|
|
||||||
raise newException(DaemonLocalError, "Missing `address` field!")
|
|
||||||
stream.raddress = MultiAddress.init(raddress).tryGet()
|
stream.raddress = MultiAddress.init(raddress).tryGet()
|
||||||
if pb.getLengthValue(3, stream.protocol) == -1:
|
pb.getRequiredField(3, stream.protocol).tryGet()
|
||||||
raise newException(DaemonLocalError, "Missing `proto` field!")
|
|
||||||
stream.flags.incl(Inbound)
|
stream.flags.incl(Inbound)
|
||||||
stream.transp = transp
|
stream.transp = transp
|
||||||
if len(stream.protocol) > 0:
|
if len(stream.protocol) > 0:
|
||||||
|
@ -968,14 +953,11 @@ proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.} =
|
||||||
var pb = await transp.transactMessage(requestListPeers())
|
var pb = await transp.transactMessage(requestListPeers())
|
||||||
pb.withMessage() do:
|
pb.withMessage() do:
|
||||||
result = newSeq[PeerInfo]()
|
result = newSeq[PeerInfo]()
|
||||||
var res = pb.enterSubmessage()
|
var ress: seq[seq[byte]]
|
||||||
while res != 0:
|
if pb.getRequiredRepeatedField(ResponseType.PEERINFO.int, ress).isOk():
|
||||||
if res == cast[int](ResponseType.PEERINFO):
|
for p in ress:
|
||||||
var peer = pb.getPeerInfo()
|
let peer = initProtoBuffer(p).getPeerInfo()
|
||||||
result.add(peer)
|
result.add(peer)
|
||||||
else:
|
|
||||||
pb.skipSubmessage()
|
|
||||||
res = pb.enterSubmessage()
|
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
|
@ -1010,51 +992,61 @@ proc cmTrimPeers*(api: DaemonAPI) {.async.} =
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
proc dhtGetSinglePeerInfo(pb: var ProtoBuffer): PeerInfo
|
proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo
|
||||||
{.raises: [Defect, DaemonLocalError].} =
|
{.raises: [Defect, DaemonLocalError].} =
|
||||||
if pb.enterSubmessage() == 2:
|
var res: seq[byte]
|
||||||
result = pb.getPeerInfo()
|
if pb.getRequiredField(2, res).isOk():
|
||||||
|
result = initProtoBuffer(res).getPeerInfo()
|
||||||
else:
|
else:
|
||||||
raise newException(DaemonLocalError, "Missing required field `peer`!")
|
raise newException(DaemonLocalError, "Missing required field `peer`!")
|
||||||
|
|
||||||
proc dhtGetSingleValue(pb: var ProtoBuffer): seq[byte]
|
proc dhtGetSingleValue(pb: ProtoBuffer): seq[byte]
|
||||||
{.raises: [Defect, DaemonLocalError].} =
|
{.raises: [Defect, DaemonLocalError].} =
|
||||||
result = newSeq[byte]()
|
result = newSeq[byte]()
|
||||||
if pb.getLengthValue(3, result) == -1:
|
if pb.getRequiredField(3, result).isErr():
|
||||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||||
|
|
||||||
proc dhtGetSinglePublicKey(pb: var ProtoBuffer): PublicKey
|
proc dhtGetSinglePublicKey(pb: ProtoBuffer): PublicKey
|
||||||
{.raises: [Defect, DaemonLocalError].} =
|
{.raises: [Defect, DaemonLocalError].} =
|
||||||
if pb.getValue(3, result) == -1:
|
if pb.getRequiredField(3, result).isErr():
|
||||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||||
|
|
||||||
proc dhtGetSinglePeerID(pb: var ProtoBuffer): PeerID
|
proc dhtGetSinglePeerID(pb: ProtoBuffer): PeerID
|
||||||
{.raises: [Defect, DaemonLocalError].} =
|
{.raises: [Defect, DaemonLocalError].} =
|
||||||
if pb.getValue(3, result) == -1:
|
if pb.getRequiredField(3, result).isErr():
|
||||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||||
|
|
||||||
proc enterDhtMessage(pb: var ProtoBuffer, rt: DHTResponseType)
|
proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): Protobuffer
|
||||||
{.inline, raises: [Defect, DaemonLocalError].} =
|
{.inline, raises: [Defect, DaemonLocalError].} =
|
||||||
var dtype: uint
|
var dhtResponse: seq[byte]
|
||||||
var res = pb.enterSubmessage()
|
if pb.getRequiredField(ResponseType.DHT.int, dhtResponse).isOk():
|
||||||
if res == cast[int](ResponseType.DHT):
|
var pbDhtResponse = initProtoBuffer(dhtResponse)
|
||||||
if pb.getVarintValue(1, dtype) == 0:
|
var dtype: uint
|
||||||
|
if pbDhtResponse.getRequiredField(1, dtype).isErr():
|
||||||
raise newException(DaemonLocalError, "Missing required DHT field `type`!")
|
raise newException(DaemonLocalError, "Missing required DHT field `type`!")
|
||||||
if dtype != cast[uint](rt):
|
if dtype != cast[uint](rt):
|
||||||
raise newException(DaemonLocalError, "Wrong DHT answer type! ")
|
raise newException(DaemonLocalError, "Wrong DHT answer type! ")
|
||||||
|
|
||||||
|
var value: seq[byte]
|
||||||
|
if pbDhtResponse.getRequiredField(3, value).isErr():
|
||||||
|
raise newException(DaemonLocalError, "Missing required DHT field `value`!")
|
||||||
|
|
||||||
|
return initProtoBuffer(value)
|
||||||
else:
|
else:
|
||||||
raise newException(DaemonLocalError, "Wrong message type!")
|
raise newException(DaemonLocalError, "Wrong message type!")
|
||||||
|
|
||||||
proc enterPsMessage(pb: var ProtoBuffer)
|
proc enterPsMessage(pb: ProtoBuffer): ProtoBuffer
|
||||||
{.inline, raises: [Defect, DaemonLocalError].} =
|
{.inline, raises: [Defect, DaemonLocalError].} =
|
||||||
var res = pb.enterSubmessage()
|
var res: seq[byte]
|
||||||
if res != cast[int](ResponseType.PUBSUB):
|
if pb.getRequiredField(ResponseType.PUBSUB.int, res).isErr():
|
||||||
raise newException(DaemonLocalError, "Wrong message type!")
|
raise newException(DaemonLocalError, "Wrong message type!")
|
||||||
|
|
||||||
proc getDhtMessageType(pb: var ProtoBuffer): DHTResponseType
|
initProtoBuffer(res)
|
||||||
|
|
||||||
|
proc getDhtMessageType(pb: ProtoBuffer): DHTResponseType
|
||||||
{.inline, raises: [Defect, DaemonLocalError].} =
|
{.inline, raises: [Defect, DaemonLocalError].} =
|
||||||
var dtype: uint
|
var dtype: uint
|
||||||
if pb.getVarintValue(1, dtype) == 0:
|
if pb.getRequiredField(1, dtype).isErr():
|
||||||
raise newException(DaemonLocalError, "Missing required DHT field `type`!")
|
raise newException(DaemonLocalError, "Missing required DHT field `type`!")
|
||||||
if dtype == cast[uint](DHTResponseType.VALUE):
|
if dtype == cast[uint](DHTResponseType.VALUE):
|
||||||
result = DHTResponseType.VALUE
|
result = DHTResponseType.VALUE
|
||||||
|
@ -1073,8 +1065,7 @@ proc dhtFindPeer*(api: DaemonAPI, peer: PeerID,
|
||||||
try:
|
try:
|
||||||
var pb = await transp.transactMessage(requestDHTFindPeer(peer, timeout))
|
var pb = await transp.transactMessage(requestDHTFindPeer(peer, timeout))
|
||||||
withMessage(pb) do:
|
withMessage(pb) do:
|
||||||
pb.enterDhtMessage(DHTResponseType.VALUE)
|
result = pb.enterDhtMessage(DHTResponseType.VALUE).dhtGetSinglePeerInfo()
|
||||||
result = pb.dhtGetSinglePeerInfo()
|
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
|
@ -1088,8 +1079,7 @@ proc dhtGetPublicKey*(api: DaemonAPI, peer: PeerID,
|
||||||
try:
|
try:
|
||||||
var pb = await transp.transactMessage(requestDHTGetPublicKey(peer, timeout))
|
var pb = await transp.transactMessage(requestDHTGetPublicKey(peer, timeout))
|
||||||
withMessage(pb) do:
|
withMessage(pb) do:
|
||||||
pb.enterDhtMessage(DHTResponseType.VALUE)
|
result = pb.enterDhtMessage(DHTResponseType.VALUE).dhtGetSinglePublicKey()
|
||||||
result = pb.dhtGetSinglePublicKey()
|
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
|
@ -1103,8 +1093,7 @@ proc dhtGetValue*(api: DaemonAPI, key: string,
|
||||||
try:
|
try:
|
||||||
var pb = await transp.transactMessage(requestDHTGetValue(key, timeout))
|
var pb = await transp.transactMessage(requestDHTGetValue(key, timeout))
|
||||||
withMessage(pb) do:
|
withMessage(pb) do:
|
||||||
pb.enterDhtMessage(DHTResponseType.VALUE)
|
result = pb.enterDhtMessage(DHTResponseType.VALUE).dhtGetSingleValue()
|
||||||
result = pb.dhtGetSingleValue()
|
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
|
@ -1148,7 +1137,7 @@ proc dhtFindPeersConnectedToPeer*(api: DaemonAPI, peer: PeerID,
|
||||||
let spb = requestDHTFindPeersConnectedToPeer(peer, timeout)
|
let spb = requestDHTFindPeersConnectedToPeer(peer, timeout)
|
||||||
var pb = await transp.transactMessage(spb)
|
var pb = await transp.transactMessage(spb)
|
||||||
withMessage(pb) do:
|
withMessage(pb) do:
|
||||||
pb.enterDhtMessage(DHTResponseType.BEGIN)
|
discard pb.enterDhtMessage(DHTResponseType.BEGIN)
|
||||||
while true:
|
while true:
|
||||||
var message = await transp.recvMessage()
|
var message = await transp.recvMessage()
|
||||||
if len(message) == 0:
|
if len(message) == 0:
|
||||||
|
@ -1173,7 +1162,7 @@ proc dhtGetClosestPeers*(api: DaemonAPI, key: string,
|
||||||
let spb = requestDHTGetClosestPeers(key, timeout)
|
let spb = requestDHTGetClosestPeers(key, timeout)
|
||||||
var pb = await transp.transactMessage(spb)
|
var pb = await transp.transactMessage(spb)
|
||||||
withMessage(pb) do:
|
withMessage(pb) do:
|
||||||
pb.enterDhtMessage(DHTResponseType.BEGIN)
|
discard pb.enterDhtMessage(DHTResponseType.BEGIN)
|
||||||
while true:
|
while true:
|
||||||
var message = await transp.recvMessage()
|
var message = await transp.recvMessage()
|
||||||
if len(message) == 0:
|
if len(message) == 0:
|
||||||
|
@ -1198,7 +1187,7 @@ proc dhtFindProviders*(api: DaemonAPI, cid: Cid, count: uint32,
|
||||||
let spb = requestDHTFindProviders(cid, count, timeout)
|
let spb = requestDHTFindProviders(cid, count, timeout)
|
||||||
var pb = await transp.transactMessage(spb)
|
var pb = await transp.transactMessage(spb)
|
||||||
withMessage(pb) do:
|
withMessage(pb) do:
|
||||||
pb.enterDhtMessage(DHTResponseType.BEGIN)
|
discard pb.enterDhtMessage(DHTResponseType.BEGIN)
|
||||||
while true:
|
while true:
|
||||||
var message = await transp.recvMessage()
|
var message = await transp.recvMessage()
|
||||||
if len(message) == 0:
|
if len(message) == 0:
|
||||||
|
@ -1222,7 +1211,7 @@ proc dhtSearchValue*(api: DaemonAPI, key: string,
|
||||||
try:
|
try:
|
||||||
var pb = await transp.transactMessage(requestDHTSearchValue(key, timeout))
|
var pb = await transp.transactMessage(requestDHTSearchValue(key, timeout))
|
||||||
withMessage(pb) do:
|
withMessage(pb) do:
|
||||||
pb.enterDhtMessage(DHTResponseType.BEGIN)
|
discard pb.enterDhtMessage(DHTResponseType.BEGIN)
|
||||||
while true:
|
while true:
|
||||||
var message = await transp.recvMessage()
|
var message = await transp.recvMessage()
|
||||||
if len(message) == 0:
|
if len(message) == 0:
|
||||||
|
@ -1241,12 +1230,9 @@ proc pubsubGetTopics*(api: DaemonAPI): Future[seq[string]] {.async.} =
|
||||||
try:
|
try:
|
||||||
var pb = await transp.transactMessage(requestPSGetTopics())
|
var pb = await transp.transactMessage(requestPSGetTopics())
|
||||||
withMessage(pb) do:
|
withMessage(pb) do:
|
||||||
pb.enterPsMessage()
|
let innerPb = pb.enterPsMessage()
|
||||||
var topics = newSeq[string]()
|
var topics = newSeq[string]()
|
||||||
var topic = ""
|
discard innerPb.getRepeatedField(1, topics)
|
||||||
while pb.getString(1, topic) != -1:
|
|
||||||
topics.add(topic)
|
|
||||||
topic.setLen(0)
|
|
||||||
result = topics
|
result = topics
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
@ -1260,11 +1246,10 @@ proc pubsubListPeers*(api: DaemonAPI,
|
||||||
var pb = await transp.transactMessage(requestPSListPeers(topic))
|
var pb = await transp.transactMessage(requestPSListPeers(topic))
|
||||||
withMessage(pb) do:
|
withMessage(pb) do:
|
||||||
var peer: PeerID
|
var peer: PeerID
|
||||||
pb.enterPsMessage()
|
let innerPb = pb.enterPsMessage()
|
||||||
var peers = newSeq[PeerID]()
|
var peers = newSeq[seq[byte]]()
|
||||||
while pb.getValue(2, peer) != -1:
|
discard innerPb.getRepeatedField(2, peers)
|
||||||
peers.add(peer)
|
result = peers.mapIt(PeerId.init(it).get())
|
||||||
result = peers
|
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
|
@ -1279,24 +1264,15 @@ proc pubsubPublish*(api: DaemonAPI, topic: string,
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
proc getPubsubMessage*(pb: var ProtoBuffer): PubSubMessage =
|
proc getPubsubMessage*(pb: ProtoBuffer): PubSubMessage =
|
||||||
result.data = newSeq[byte]()
|
result.data = newSeq[byte]()
|
||||||
result.seqno = newSeq[byte]()
|
result.seqno = newSeq[byte]()
|
||||||
discard pb.getValue(1, result.peer)
|
discard pb.getField(1, result.peer)
|
||||||
discard pb.getBytes(2, result.data)
|
discard pb.getField(2, result.data)
|
||||||
discard pb.getBytes(3, result.seqno)
|
discard pb.getField(3, result.seqno)
|
||||||
var item = newSeq[byte]()
|
discard pb.getRepeatedField(4, result.topics)
|
||||||
while true:
|
discard pb.getField(5, result.signature)
|
||||||
if pb.getBytes(4, item) == -1:
|
discard pb.getField(6, result.key)
|
||||||
break
|
|
||||||
var copyitem = item
|
|
||||||
var stritem = cast[string](copyitem)
|
|
||||||
if len(result.topics) == 0:
|
|
||||||
result.topics = newSeq[string]()
|
|
||||||
result.topics.add(stritem)
|
|
||||||
item.setLen(0)
|
|
||||||
discard pb.getValue(5, result.signature)
|
|
||||||
discard pb.getValue(6, result.key)
|
|
||||||
|
|
||||||
proc pubsubLoop(api: DaemonAPI, ticket: PubsubTicket) {.async.} =
|
proc pubsubLoop(api: DaemonAPI, ticket: PubsubTicket) {.async.} =
|
||||||
while true:
|
while true:
|
||||||
|
|
|
@ -20,6 +20,7 @@ import dial,
|
||||||
connmanager,
|
connmanager,
|
||||||
stream/connection,
|
stream/connection,
|
||||||
transports/transport,
|
transports/transport,
|
||||||
|
nameresolving/nameresolver,
|
||||||
errors
|
errors
|
||||||
|
|
||||||
export dial, errors
|
export dial, errors
|
||||||
|
@ -41,6 +42,7 @@ type
|
||||||
connManager: ConnManager
|
connManager: ConnManager
|
||||||
dialLock: Table[PeerID, AsyncLock]
|
dialLock: Table[PeerID, AsyncLock]
|
||||||
transports: seq[Transport]
|
transports: seq[Transport]
|
||||||
|
nameResolver: NameResolver
|
||||||
|
|
||||||
proc dialAndUpgrade(
|
proc dialAndUpgrade(
|
||||||
self: Dialer,
|
self: Dialer,
|
||||||
|
@ -49,58 +51,63 @@ proc dialAndUpgrade(
|
||||||
Future[Connection] {.async.} =
|
Future[Connection] {.async.} =
|
||||||
debug "Dialing peer", peerId
|
debug "Dialing peer", peerId
|
||||||
|
|
||||||
# Avoid "cannot be captured as it would violate memory safety" errors in Nim-1.4.x.
|
for address in addrs: # for each address
|
||||||
var
|
let
|
||||||
transport: Transport
|
hostname = address.getHostname()
|
||||||
address: MultiAddress
|
resolvedAddresses =
|
||||||
|
if isNil(self.nameResolver): @[address]
|
||||||
|
else: await self.nameResolver.resolveMAddress(address)
|
||||||
|
|
||||||
for t in self.transports: # for each transport
|
for a in resolvedAddresses: # for each resolved address
|
||||||
transport = t
|
for transport in self.transports: # for each transport
|
||||||
for a in addrs: # for each address
|
if transport.handles(a): # check if it can dial it
|
||||||
address = a
|
trace "Dialing address", address = $a, peerId, hostname
|
||||||
if t.handles(a): # check if it can dial it
|
let dialed = try:
|
||||||
trace "Dialing address", address = $a, peerId
|
libp2p_total_dial_attempts.inc()
|
||||||
let dialed = try:
|
# await a connection slot when the total
|
||||||
libp2p_total_dial_attempts.inc()
|
# connection count is equal to `maxConns`
|
||||||
# await a connection slot when the total
|
#
|
||||||
# connection count is equal to `maxConns`
|
# Need to copy to avoid "cannot be captured" errors in Nim-1.4.x.
|
||||||
await self.connManager.trackOutgoingConn(
|
let
|
||||||
() => transport.dial(address)
|
transportCopy = transport
|
||||||
)
|
addressCopy = a
|
||||||
except TooManyConnectionsError as exc:
|
await self.connManager.trackOutgoingConn(
|
||||||
trace "Connection limit reached!"
|
() => transportCopy.dial(hostname, addressCopy)
|
||||||
raise exc
|
)
|
||||||
except CancelledError as exc:
|
except TooManyConnectionsError as exc:
|
||||||
debug "Dialing canceled", msg = exc.msg, peerId
|
trace "Connection limit reached!"
|
||||||
raise exc
|
raise exc
|
||||||
except CatchableError as exc:
|
except CancelledError as exc:
|
||||||
debug "Dialing failed", msg = exc.msg, peerId
|
debug "Dialing canceled", msg = exc.msg, peerId
|
||||||
libp2p_failed_dials.inc()
|
raise exc
|
||||||
continue # Try the next address
|
except CatchableError as exc:
|
||||||
|
debug "Dialing failed", msg = exc.msg, peerId
|
||||||
|
libp2p_failed_dials.inc()
|
||||||
|
continue # Try the next address
|
||||||
|
|
||||||
# make sure to assign the peer to the connection
|
# make sure to assign the peer to the connection
|
||||||
dialed.peerId = peerId
|
dialed.peerId = peerId
|
||||||
|
|
||||||
# also keep track of the connection's bottom unsafe transport direction
|
# also keep track of the connection's bottom unsafe transport direction
|
||||||
# required by gossipsub scoring
|
# required by gossipsub scoring
|
||||||
dialed.transportDir = Direction.Out
|
dialed.transportDir = Direction.Out
|
||||||
|
|
||||||
libp2p_successful_dials.inc()
|
libp2p_successful_dials.inc()
|
||||||
|
|
||||||
let conn = try:
|
let conn = try:
|
||||||
await transport.upgradeOutgoing(dialed)
|
await transport.upgradeOutgoing(dialed)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
# If we failed to establish the connection through one transport,
|
# If we failed to establish the connection through one transport,
|
||||||
# we won't succeeded through another - no use in trying again
|
# we won't succeeded through another - no use in trying again
|
||||||
await dialed.close()
|
await dialed.close()
|
||||||
debug "Upgrade failed", msg = exc.msg, peerId
|
debug "Upgrade failed", msg = exc.msg, peerId
|
||||||
if exc isnot CancelledError:
|
if exc isnot CancelledError:
|
||||||
libp2p_failed_upgrades_outgoing.inc()
|
libp2p_failed_upgrades_outgoing.inc()
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
doAssert not isNil(conn), "connection died after upgradeOutgoing"
|
doAssert not isNil(conn), "connection died after upgradeOutgoing"
|
||||||
debug "Dial successful", conn, peerId = conn.peerId
|
debug "Dial successful", conn, peerId = conn.peerId
|
||||||
return conn
|
return conn
|
||||||
|
|
||||||
proc internalConnect(
|
proc internalConnect(
|
||||||
self: Dialer,
|
self: Dialer,
|
||||||
|
@ -234,9 +241,11 @@ proc new*(
|
||||||
localPeerId: PeerId,
|
localPeerId: PeerId,
|
||||||
connManager: ConnManager,
|
connManager: ConnManager,
|
||||||
transports: seq[Transport],
|
transports: seq[Transport],
|
||||||
ms: MultistreamSelect): Dialer =
|
ms: MultistreamSelect,
|
||||||
|
nameResolver: NameResolver = nil): Dialer =
|
||||||
|
|
||||||
T(localPeerId: localPeerId,
|
T(localPeerId: localPeerId,
|
||||||
connManager: connManager,
|
connManager: connManager,
|
||||||
transports: transports,
|
transports: transports,
|
||||||
ms: ms)
|
ms: ms,
|
||||||
|
nameResolver: nameResolver)
|
||||||
|
|
|
@ -426,7 +426,7 @@ const
|
||||||
|
|
||||||
Unreliable* = mapOr(UDP)
|
Unreliable* = mapOr(UDP)
|
||||||
|
|
||||||
Reliable* = mapOr(TCP, UTP, QUIC)
|
Reliable* = mapOr(TCP, UTP, QUIC, WebSockets)
|
||||||
|
|
||||||
IPFS* = mapAnd(Reliable, mapEq("p2p"))
|
IPFS* = mapAnd(Reliable, mapEq("p2p"))
|
||||||
|
|
||||||
|
@ -944,59 +944,6 @@ proc `==`*(m1: var MultiAddress, m2: MultiAddress): bool =
|
||||||
## Check of two MultiAddress are equal
|
## Check of two MultiAddress are equal
|
||||||
m1.data == m2.data
|
m1.data == m2.data
|
||||||
|
|
||||||
proc isWire*(ma: MultiAddress): bool =
|
|
||||||
## Returns ``true`` if MultiAddress ``ma`` is one of:
|
|
||||||
## - {IP4}/{TCP, UDP}
|
|
||||||
## - {IP6}/{TCP, UDP}
|
|
||||||
## - {UNIX}/{PATH}
|
|
||||||
|
|
||||||
var state = 0
|
|
||||||
const
|
|
||||||
wireProtocols = toHashSet([
|
|
||||||
multiCodec("ip4"), multiCodec("ip6"),
|
|
||||||
])
|
|
||||||
wireTransports = toHashSet([
|
|
||||||
multiCodec("tcp"), multiCodec("udp")
|
|
||||||
])
|
|
||||||
try:
|
|
||||||
for rpart in ma.items():
|
|
||||||
if rpart.isErr():
|
|
||||||
return false
|
|
||||||
let part = rpart.get()
|
|
||||||
|
|
||||||
if state == 0:
|
|
||||||
let rcode = part.protoCode()
|
|
||||||
if rcode.isErr():
|
|
||||||
return false
|
|
||||||
let code = rcode.get()
|
|
||||||
|
|
||||||
if code in wireProtocols:
|
|
||||||
inc(state)
|
|
||||||
continue
|
|
||||||
elif code == multiCodec("unix"):
|
|
||||||
result = true
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
result = false
|
|
||||||
break
|
|
||||||
elif state == 1:
|
|
||||||
let rcode = part.protoCode()
|
|
||||||
if rcode.isErr():
|
|
||||||
return false
|
|
||||||
let code = rcode.get()
|
|
||||||
|
|
||||||
if code in wireTransports:
|
|
||||||
inc(state)
|
|
||||||
result = true
|
|
||||||
else:
|
|
||||||
result = false
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
result = false
|
|
||||||
break
|
|
||||||
except:
|
|
||||||
result = false
|
|
||||||
|
|
||||||
proc matchPart(pat: MaPattern, protos: seq[MultiCodec]): MaPatResult =
|
proc matchPart(pat: MaPattern, protos: seq[MultiCodec]): MaPatResult =
|
||||||
var empty: seq[MultiCodec]
|
var empty: seq[MultiCodec]
|
||||||
var pcs = protos
|
var pcs = protos
|
||||||
|
@ -1073,7 +1020,7 @@ proc getField*(pb: var ProtoBuffer, field: int,
|
||||||
else:
|
else:
|
||||||
err(ProtoError.IncorrectBlob)
|
err(ProtoError.IncorrectBlob)
|
||||||
|
|
||||||
proc getRepeatedField*(pb: var ProtoBuffer, field: int,
|
proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
||||||
value: var seq[MultiAddress]): ProtoResult[bool] {.
|
value: var seq[MultiAddress]): ProtoResult[bool] {.
|
||||||
inline.} =
|
inline.} =
|
||||||
var items: seq[seq[byte]]
|
var items: seq[seq[byte]]
|
||||||
|
|
|
@ -43,9 +43,6 @@ type
|
||||||
proc new*(T: typedesc[MultistreamSelect]): T =
|
proc new*(T: typedesc[MultistreamSelect]): T =
|
||||||
T(codec: MSCodec)
|
T(codec: MSCodec)
|
||||||
|
|
||||||
proc newMultistream*(): MultistreamSelect {.deprecated: "use MultistreamSelect.new".} =
|
|
||||||
MultistreamSelect.new()
|
|
||||||
|
|
||||||
template validateSuffix(str: string): untyped =
|
template validateSuffix(str: string): untyped =
|
||||||
if str.endsWith("\n"):
|
if str.endsWith("\n"):
|
||||||
str.removeSuffix("\n")
|
str.removeSuffix("\n")
|
||||||
|
|
|
@ -192,7 +192,7 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
||||||
await m.close()
|
await m.close()
|
||||||
trace "Stopped mplex handler", m
|
trace "Stopped mplex handler", m
|
||||||
|
|
||||||
proc init*(M: type Mplex,
|
proc new*(M: type Mplex,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
inTimeout, outTimeout: Duration = DefaultChanTimeout,
|
inTimeout, outTimeout: Duration = DefaultChanTimeout,
|
||||||
maxChannCount: int = MaxChannelCount): Mplex =
|
maxChannCount: int = MaxChannelCount): Mplex =
|
||||||
|
|
|
@ -58,9 +58,6 @@ proc new*(
|
||||||
muxerProvider.init()
|
muxerProvider.init()
|
||||||
muxerProvider
|
muxerProvider
|
||||||
|
|
||||||
proc newMuxerProvider*(creator: MuxerConstructor, codec: string): MuxerProvider {.gcsafe, deprecated: "use MuxerProvider.new".} =
|
|
||||||
MuxerProvider.new(creator, codec)
|
|
||||||
|
|
||||||
method init(c: MuxerProvider) =
|
method init(c: MuxerProvider) =
|
||||||
proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||||
trace "starting muxer handler", proto=proto, conn
|
trace "starting muxer handler", proto=proto, conn
|
||||||
|
|
|
@ -40,12 +40,10 @@ method resolveIp*(
|
||||||
|
|
||||||
doAssert(false, "Not implemented!")
|
doAssert(false, "Not implemented!")
|
||||||
|
|
||||||
proc getHostname(ma: MultiAddress): string =
|
proc getHostname*(ma: MultiAddress): string =
|
||||||
var dnsbuf = newSeq[byte](256)
|
let firstPart = ($ma[0].get()).split('/')
|
||||||
|
if firstPart.len > 1: firstPart[2]
|
||||||
let dnsLen = ma[0].get().protoArgument(dnsbuf).get()
|
else: ""
|
||||||
dnsbuf.setLen(dnsLen)
|
|
||||||
return string.fromBytes(dnsbuf)
|
|
||||||
|
|
||||||
proc resolveDnsAddress(
|
proc resolveDnsAddress(
|
||||||
self: NameResolver,
|
self: NameResolver,
|
||||||
|
@ -122,27 +120,26 @@ proc resolveDnsAddr(
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
proc resolveMAddresses*(
|
proc resolveMAddress*(
|
||||||
self: NameResolver,
|
self: NameResolver,
|
||||||
addrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
address: MultiAddress): Future[seq[MultiAddress]] {.async.} =
|
||||||
var res = initOrderedSet[MultiAddress]()
|
var res = initOrderedSet[MultiAddress]()
|
||||||
|
|
||||||
for address in addrs:
|
if not DNS.matchPartial(address):
|
||||||
if not DNS.matchPartial(address):
|
res.incl(address)
|
||||||
res.incl(address)
|
else:
|
||||||
else:
|
let code = address[0].get().protoCode().get()
|
||||||
let code = address[0].get().protoCode().get()
|
let seq = case code:
|
||||||
let seq = case code:
|
of multiCodec("dns"):
|
||||||
of multiCodec("dns"):
|
await self.resolveDnsAddress(address)
|
||||||
await self.resolveDnsAddress(address)
|
of multiCodec("dns4"):
|
||||||
of multiCodec("dns4"):
|
await self.resolveDnsAddress(address, Domain.AF_INET)
|
||||||
await self.resolveDnsAddress(address, Domain.AF_INET)
|
of multiCodec("dns6"):
|
||||||
of multiCodec("dns6"):
|
await self.resolveDnsAddress(address, Domain.AF_INET6)
|
||||||
await self.resolveDnsAddress(address, Domain.AF_INET6)
|
of multiCodec("dnsaddr"):
|
||||||
of multiCodec("dnsaddr"):
|
await self.resolveDnsAddr(address)
|
||||||
await self.resolveDnsAddr(address)
|
else:
|
||||||
else:
|
@[address]
|
||||||
@[address]
|
for ad in seq:
|
||||||
for ad in seq:
|
res.incl(ad)
|
||||||
res.incl(ad)
|
|
||||||
return res.toSeq
|
return res.toSeq
|
||||||
|
|
|
@ -197,21 +197,6 @@ func write*(vb: var VBuffer, pid: PeerID) =
|
||||||
## Write PeerID value ``peerid`` to buffer ``vb``.
|
## Write PeerID value ``peerid`` to buffer ``vb``.
|
||||||
vb.writeSeq(pid.data)
|
vb.writeSeq(pid.data)
|
||||||
|
|
||||||
func initProtoField*(index: int, pid: PeerID): ProtoField {.deprecated.} =
|
|
||||||
## Initialize ProtoField with PeerID ``value``.
|
|
||||||
initProtoField(index, pid.data)
|
|
||||||
|
|
||||||
func getValue*(data: var ProtoBuffer, field: int, value: var PeerID): int {.
|
|
||||||
deprecated.} =
|
|
||||||
## Read ``PeerID`` from ProtoBuf's message and validate it.
|
|
||||||
var pid: PeerID
|
|
||||||
result = getLengthValue(data, field, pid.data)
|
|
||||||
if result > 0:
|
|
||||||
if not pid.validate():
|
|
||||||
result = -1
|
|
||||||
else:
|
|
||||||
value = pid
|
|
||||||
|
|
||||||
func write*(pb: var ProtoBuffer, field: int, pid: PeerID) =
|
func write*(pb: var ProtoBuffer, field: int, pid: PeerID) =
|
||||||
## Write PeerID value ``peerid`` to object ``pb`` using ProtoBuf's encoding.
|
## Write PeerID value ``peerid`` to object ``pb`` using ProtoBuf's encoding.
|
||||||
write(pb, field, pid.data)
|
write(pb, field, pid.data)
|
||||||
|
|
|
@ -39,7 +39,7 @@ func shortLog*(p: PeerInfo): auto =
|
||||||
)
|
)
|
||||||
chronicles.formatIt(PeerInfo): shortLog(it)
|
chronicles.formatIt(PeerInfo): shortLog(it)
|
||||||
|
|
||||||
proc init*(
|
proc new*(
|
||||||
p: typedesc[PeerInfo],
|
p: typedesc[PeerInfo],
|
||||||
key: PrivateKey,
|
key: PrivateKey,
|
||||||
addrs: openarray[MultiAddress] = [],
|
addrs: openarray[MultiAddress] = [],
|
||||||
|
@ -49,7 +49,7 @@ proc init*(
|
||||||
{.raises: [Defect, PeerInfoError].} =
|
{.raises: [Defect, PeerInfoError].} =
|
||||||
|
|
||||||
let pubkey = try:
|
let pubkey = try:
|
||||||
key.getKey().tryGet()
|
key.getPublicKey().tryGet()
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
raise newException(PeerInfoError, "invalid private key")
|
raise newException(PeerInfoError, "invalid private key")
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,8 @@ type
|
||||||
BufferOverflow,
|
BufferOverflow,
|
||||||
MessageTooBig,
|
MessageTooBig,
|
||||||
BadWireType,
|
BadWireType,
|
||||||
IncorrectBlob
|
IncorrectBlob,
|
||||||
|
RequiredFieldMissing
|
||||||
|
|
||||||
ProtoResult*[T] = Result[T, ProtoError]
|
ProtoResult*[T] = Result[T, ProtoError]
|
||||||
|
|
||||||
|
@ -115,43 +116,6 @@ proc vsizeof*(field: ProtoField): int {.inline.} =
|
||||||
else:
|
else:
|
||||||
0
|
0
|
||||||
|
|
||||||
proc initProtoField*(index: int, value: SomeVarint): ProtoField {.deprecated.} =
|
|
||||||
## Initialize ProtoField with integer value.
|
|
||||||
result = ProtoField(kind: Varint, index: index)
|
|
||||||
when type(value) is uint64:
|
|
||||||
result.vint = value
|
|
||||||
else:
|
|
||||||
result.vint = cast[uint64](value)
|
|
||||||
|
|
||||||
proc initProtoField*(index: int, value: bool): ProtoField {.deprecated.} =
|
|
||||||
## Initialize ProtoField with integer value.
|
|
||||||
result = ProtoField(kind: Varint, index: index)
|
|
||||||
result.vint = byte(value)
|
|
||||||
|
|
||||||
proc initProtoField*(index: int,
|
|
||||||
value: openarray[byte]): ProtoField {.deprecated.} =
|
|
||||||
## Initialize ProtoField with bytes array.
|
|
||||||
result = ProtoField(kind: Length, index: index)
|
|
||||||
if len(value) > 0:
|
|
||||||
result.vbuffer = newSeq[byte](len(value))
|
|
||||||
copyMem(addr result.vbuffer[0], unsafeAddr value[0], len(value))
|
|
||||||
|
|
||||||
proc initProtoField*(index: int, value: string): ProtoField {.deprecated.} =
|
|
||||||
## Initialize ProtoField with string.
|
|
||||||
result = ProtoField(kind: Length, index: index)
|
|
||||||
if len(value) > 0:
|
|
||||||
result.vbuffer = newSeq[byte](len(value))
|
|
||||||
copyMem(addr result.vbuffer[0], unsafeAddr value[0], len(value))
|
|
||||||
|
|
||||||
proc initProtoField*(index: int,
|
|
||||||
value: ProtoBuffer): ProtoField {.deprecated, inline.} =
|
|
||||||
## Initialize ProtoField with nested message stored in ``value``.
|
|
||||||
##
|
|
||||||
## Note: This procedure performs shallow copy of ``value`` sequence.
|
|
||||||
result = ProtoField(kind: Length, index: index)
|
|
||||||
if len(value.buffer) > 0:
|
|
||||||
shallowCopy(result.vbuffer, value.buffer)
|
|
||||||
|
|
||||||
proc initProtoBuffer*(data: seq[byte], offset = 0,
|
proc initProtoBuffer*(data: seq[byte], offset = 0,
|
||||||
options: set[ProtoFlags] = {}): ProtoBuffer =
|
options: set[ProtoFlags] = {}): ProtoBuffer =
|
||||||
## Initialize ProtoBuffer with shallow copy of ``data``.
|
## Initialize ProtoBuffer with shallow copy of ``data``.
|
||||||
|
@ -299,51 +263,6 @@ proc write*(pb: var ProtoBuffer, field: int, value: ProtoBuffer) {.inline.} =
|
||||||
## ``pb`` with field number ``field``.
|
## ``pb`` with field number ``field``.
|
||||||
write(pb, field, value.buffer)
|
write(pb, field, value.buffer)
|
||||||
|
|
||||||
proc write*(pb: var ProtoBuffer, field: ProtoField) {.deprecated.} =
|
|
||||||
## Encode protobuf's field ``field`` and store it to protobuf's buffer ``pb``.
|
|
||||||
var length = 0
|
|
||||||
var res: VarintResult[void]
|
|
||||||
pb.buffer.setLen(len(pb.buffer) + vsizeof(field))
|
|
||||||
res = PB.putUVarint(pb.toOpenArray(), length, getProtoHeader(field))
|
|
||||||
doAssert(res.isOk())
|
|
||||||
pb.offset += length
|
|
||||||
case field.kind
|
|
||||||
of ProtoFieldKind.Varint:
|
|
||||||
res = PB.putUVarint(pb.toOpenArray(), length, field.vint)
|
|
||||||
doAssert(res.isOk())
|
|
||||||
pb.offset += length
|
|
||||||
of ProtoFieldKind.Fixed64:
|
|
||||||
doAssert(pb.isEnough(8))
|
|
||||||
var value = cast[uint64](field.vfloat64)
|
|
||||||
pb.buffer[pb.offset] = byte(value and 0xFF'u32)
|
|
||||||
pb.buffer[pb.offset + 1] = byte((value shr 8) and 0xFF'u64)
|
|
||||||
pb.buffer[pb.offset + 2] = byte((value shr 16) and 0xFF'u64)
|
|
||||||
pb.buffer[pb.offset + 3] = byte((value shr 24) and 0xFF'u64)
|
|
||||||
pb.buffer[pb.offset + 4] = byte((value shr 32) and 0xFF'u64)
|
|
||||||
pb.buffer[pb.offset + 5] = byte((value shr 40) and 0xFF'u64)
|
|
||||||
pb.buffer[pb.offset + 6] = byte((value shr 48) and 0xFF'u64)
|
|
||||||
pb.buffer[pb.offset + 7] = byte((value shr 56) and 0xFF'u64)
|
|
||||||
pb.offset += 8
|
|
||||||
of ProtoFieldKind.Fixed32:
|
|
||||||
doAssert(pb.isEnough(4))
|
|
||||||
var value = cast[uint32](field.vfloat32)
|
|
||||||
pb.buffer[pb.offset] = byte(value and 0xFF'u32)
|
|
||||||
pb.buffer[pb.offset + 1] = byte((value shr 8) and 0xFF'u32)
|
|
||||||
pb.buffer[pb.offset + 2] = byte((value shr 16) and 0xFF'u32)
|
|
||||||
pb.buffer[pb.offset + 3] = byte((value shr 24) and 0xFF'u32)
|
|
||||||
pb.offset += 4
|
|
||||||
of ProtoFieldKind.Length:
|
|
||||||
res = PB.putUVarint(pb.toOpenArray(), length, uint(len(field.vbuffer)))
|
|
||||||
doAssert(res.isOk())
|
|
||||||
pb.offset += length
|
|
||||||
doAssert(pb.isEnough(len(field.vbuffer)))
|
|
||||||
if len(field.vbuffer) > 0:
|
|
||||||
copyMem(addr pb.buffer[pb.offset], unsafeAddr field.vbuffer[0],
|
|
||||||
len(field.vbuffer))
|
|
||||||
pb.offset += len(field.vbuffer)
|
|
||||||
else:
|
|
||||||
discard
|
|
||||||
|
|
||||||
proc finish*(pb: var ProtoBuffer) =
|
proc finish*(pb: var ProtoBuffer) =
|
||||||
## Prepare protobuf's buffer ``pb`` for writing to stream.
|
## Prepare protobuf's buffer ``pb`` for writing to stream.
|
||||||
doAssert(len(pb.buffer) > 0)
|
doAssert(len(pb.buffer) > 0)
|
||||||
|
@ -657,6 +576,17 @@ proc getField*(pb: ProtoBuffer, field: int,
|
||||||
else:
|
else:
|
||||||
err(res.error)
|
err(res.error)
|
||||||
|
|
||||||
|
proc getRequiredField*[T](pb: ProtoBuffer, field: int,
|
||||||
|
output: var T): ProtoResult[void] {.inline.} =
|
||||||
|
let res = pb.getField(field, output)
|
||||||
|
if res.isOk():
|
||||||
|
if res.get():
|
||||||
|
ok()
|
||||||
|
else:
|
||||||
|
err(RequiredFieldMissing)
|
||||||
|
else:
|
||||||
|
err(res.error)
|
||||||
|
|
||||||
proc getRepeatedField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
|
proc getRepeatedField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
|
||||||
output: var seq[T]): ProtoResult[bool] =
|
output: var seq[T]): ProtoResult[bool] =
|
||||||
checkFieldNumber(field)
|
checkFieldNumber(field)
|
||||||
|
@ -733,6 +663,17 @@ proc getRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
|
||||||
else:
|
else:
|
||||||
ok(false)
|
ok(false)
|
||||||
|
|
||||||
|
proc getRequiredRepeatedField*[T](pb: ProtoBuffer, field: int,
|
||||||
|
output: var seq[T]): ProtoResult[void] {.inline.} =
|
||||||
|
let res = pb.getRepeatedField(field, output)
|
||||||
|
if res.isOk():
|
||||||
|
if res.get():
|
||||||
|
ok()
|
||||||
|
else:
|
||||||
|
err(RequiredFieldMissing)
|
||||||
|
else:
|
||||||
|
err(res.error)
|
||||||
|
|
||||||
proc getPackedRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
|
proc getPackedRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
|
||||||
output: var seq[T]): ProtoResult[bool] =
|
output: var seq[T]): ProtoResult[bool] =
|
||||||
checkFieldNumber(field)
|
checkFieldNumber(field)
|
||||||
|
@ -787,93 +728,3 @@ proc getPackedRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
|
||||||
ok(true)
|
ok(true)
|
||||||
else:
|
else:
|
||||||
ok(false)
|
ok(false)
|
||||||
|
|
||||||
proc getVarintValue*(data: var ProtoBuffer, field: int,
|
|
||||||
value: var SomeVarint): int {.deprecated.} =
|
|
||||||
## Get value of `Varint` type.
|
|
||||||
var length = 0
|
|
||||||
var header = 0'u64
|
|
||||||
var soffset = data.offset
|
|
||||||
|
|
||||||
if not data.isEmpty() and PB.getUVarint(data.toOpenArray(),
|
|
||||||
length, header).isOk():
|
|
||||||
data.offset += length
|
|
||||||
if header == getProtoHeader(field, Varint):
|
|
||||||
if not data.isEmpty():
|
|
||||||
when type(value) is int32 or type(value) is int64 or type(value) is int:
|
|
||||||
let res = getSVarint(data.toOpenArray(), length, value)
|
|
||||||
else:
|
|
||||||
let res = PB.getUVarint(data.toOpenArray(), length, value)
|
|
||||||
if res.isOk():
|
|
||||||
data.offset += length
|
|
||||||
result = length
|
|
||||||
return
|
|
||||||
# Restore offset on error
|
|
||||||
data.offset = soffset
|
|
||||||
|
|
||||||
proc getLengthValue*[T: string|seq[byte]](data: var ProtoBuffer, field: int,
|
|
||||||
buffer: var T): int {.deprecated.} =
|
|
||||||
## Get value of `Length` type.
|
|
||||||
var length = 0
|
|
||||||
var header = 0'u64
|
|
||||||
var ssize = 0'u64
|
|
||||||
var soffset = data.offset
|
|
||||||
result = -1
|
|
||||||
buffer.setLen(0)
|
|
||||||
if not data.isEmpty() and PB.getUVarint(data.toOpenArray(),
|
|
||||||
length, header).isOk():
|
|
||||||
data.offset += length
|
|
||||||
if header == getProtoHeader(field, Length):
|
|
||||||
if not data.isEmpty() and PB.getUVarint(data.toOpenArray(),
|
|
||||||
length, ssize).isOk():
|
|
||||||
data.offset += length
|
|
||||||
if ssize <= MaxMessageSize and data.isEnough(int(ssize)):
|
|
||||||
buffer.setLen(ssize)
|
|
||||||
# Protobuf allow zero-length values.
|
|
||||||
if ssize > 0'u64:
|
|
||||||
copyMem(addr buffer[0], addr data.buffer[data.offset], ssize)
|
|
||||||
result = int(ssize)
|
|
||||||
data.offset += int(ssize)
|
|
||||||
return
|
|
||||||
# Restore offset on error
|
|
||||||
data.offset = soffset
|
|
||||||
|
|
||||||
proc getBytes*(data: var ProtoBuffer, field: int,
|
|
||||||
buffer: var seq[byte]): int {.deprecated, inline.} =
|
|
||||||
## Get value of `Length` type as bytes.
|
|
||||||
result = getLengthValue(data, field, buffer)
|
|
||||||
|
|
||||||
proc getString*(data: var ProtoBuffer, field: int,
|
|
||||||
buffer: var string): int {.deprecated, inline.} =
|
|
||||||
## Get value of `Length` type as string.
|
|
||||||
result = getLengthValue(data, field, buffer)
|
|
||||||
|
|
||||||
proc enterSubmessage*(pb: var ProtoBuffer): int {.deprecated.} =
|
|
||||||
## Processes protobuf's sub-message and adjust internal offset to enter
|
|
||||||
## inside of sub-message. Returns field index of sub-message field or
|
|
||||||
## ``0`` on error.
|
|
||||||
var length = 0
|
|
||||||
var header = 0'u64
|
|
||||||
var msize = 0'u64
|
|
||||||
var soffset = pb.offset
|
|
||||||
|
|
||||||
if not pb.isEmpty() and PB.getUVarint(pb.toOpenArray(),
|
|
||||||
length, header).isOk():
|
|
||||||
pb.offset += length
|
|
||||||
if (header and 0x07'u64) == cast[uint64](ProtoFieldKind.Length):
|
|
||||||
if not pb.isEmpty() and PB.getUVarint(pb.toOpenArray(),
|
|
||||||
length, msize).isOk():
|
|
||||||
pb.offset += length
|
|
||||||
if msize <= MaxMessageSize and pb.isEnough(int(msize)):
|
|
||||||
pb.length = int(msize)
|
|
||||||
result = int(header shr 3)
|
|
||||||
return
|
|
||||||
# Restore offset on error
|
|
||||||
pb.offset = soffset
|
|
||||||
|
|
||||||
proc skipSubmessage*(pb: var ProtoBuffer) {.deprecated.} =
|
|
||||||
## Skip current protobuf's sub-message and adjust internal offset to the
|
|
||||||
## end of sub-message.
|
|
||||||
doAssert(pb.length != 0)
|
|
||||||
pb.offset += pb.length
|
|
||||||
pb.length = 0
|
|
||||||
|
|
|
@ -122,9 +122,6 @@ proc new*(T: typedesc[Identify], peerInfo: PeerInfo): T =
|
||||||
identify.init()
|
identify.init()
|
||||||
identify
|
identify
|
||||||
|
|
||||||
proc newIdentify*(peerInfo: PeerInfo): Identify {.deprecated: "use Identify.new".} =
|
|
||||||
Identify.new(peerInfo)
|
|
||||||
|
|
||||||
method init*(p: Identify) =
|
method init*(p: Identify) =
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -49,7 +49,7 @@ method init*(p: Ping) =
|
||||||
var buf: array[PingSize, byte]
|
var buf: array[PingSize, byte]
|
||||||
await conn.readExactly(addr buf[0], PingSize)
|
await conn.readExactly(addr buf[0], PingSize)
|
||||||
trace "echoing ping", conn
|
trace "echoing ping", conn
|
||||||
await conn.write(addr buf[0], PingSize)
|
await conn.write(@buf)
|
||||||
if not isNil(p.pingHandler):
|
if not isNil(p.pingHandler):
|
||||||
await p.pingHandler(conn.peerId)
|
await p.pingHandler(conn.peerId)
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
|
@ -79,7 +79,7 @@ proc ping*(
|
||||||
let startTime = Moment.now()
|
let startTime = Moment.now()
|
||||||
|
|
||||||
trace "sending ping", conn
|
trace "sending ping", conn
|
||||||
await conn.write(addr randomBuf[0], randomBuf.len)
|
await conn.write(@randomBuf)
|
||||||
|
|
||||||
await conn.readExactly(addr resultBuf[0], PingSize)
|
await conn.readExactly(addr resultBuf[0], PingSize)
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@ import ./pubsub,
|
||||||
./timedcache,
|
./timedcache,
|
||||||
./peertable,
|
./peertable,
|
||||||
./rpc/[message, messages],
|
./rpc/[message, messages],
|
||||||
|
../../crypto/crypto,
|
||||||
../../stream/connection,
|
../../stream/connection,
|
||||||
../../peerid,
|
../../peerid,
|
||||||
../../peerinfo,
|
../../peerinfo,
|
||||||
|
@ -207,8 +208,7 @@ method initPubSub*(f: FloodSub)
|
||||||
{.raises: [Defect, InitializationError].} =
|
{.raises: [Defect, InitializationError].} =
|
||||||
procCall PubSub(f).initPubSub()
|
procCall PubSub(f).initPubSub()
|
||||||
f.seen = TimedCache[MessageID].init(2.minutes)
|
f.seen = TimedCache[MessageID].init(2.minutes)
|
||||||
var rng = newRng()
|
|
||||||
f.seenSalt = newSeqUninitialized[byte](sizeof(Hash))
|
f.seenSalt = newSeqUninitialized[byte](sizeof(Hash))
|
||||||
brHmacDrbgGenerate(rng[], f.seenSalt)
|
brHmacDrbgGenerate(f.rng[], f.seenSalt)
|
||||||
|
|
||||||
f.init()
|
f.init()
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import std/[tables, sets, options, sequtils, random]
|
import std/[tables, sets, options, sequtils]
|
||||||
import chronos, chronicles, metrics
|
import chronos, chronicles, metrics
|
||||||
import ./pubsub,
|
import ./pubsub,
|
||||||
./floodsub,
|
./floodsub,
|
||||||
|
@ -37,6 +37,7 @@ logScope:
|
||||||
|
|
||||||
declareCounter(libp2p_gossipsub_failed_publish, "number of failed publish")
|
declareCounter(libp2p_gossipsub_failed_publish, "number of failed publish")
|
||||||
declareCounter(libp2p_gossipsub_invalid_topic_subscription, "number of invalid topic subscriptions that happened")
|
declareCounter(libp2p_gossipsub_invalid_topic_subscription, "number of invalid topic subscriptions that happened")
|
||||||
|
declareCounter(libp2p_gossipsub_duplicate_during_validation, "number of duplicates received during message validation")
|
||||||
|
|
||||||
proc init*(_: type[GossipSubParams]): GossipSubParams =
|
proc init*(_: type[GossipSubParams]): GossipSubParams =
|
||||||
GossipSubParams(
|
GossipSubParams(
|
||||||
|
@ -295,11 +296,12 @@ method rpcHandler*(g: GossipSub,
|
||||||
|
|
||||||
for i in 0..<rpcMsg.messages.len(): # for every message
|
for i in 0..<rpcMsg.messages.len(): # for every message
|
||||||
template msg: untyped = rpcMsg.messages[i]
|
template msg: untyped = rpcMsg.messages[i]
|
||||||
let msgId = g.msgIdProvider(msg)
|
let
|
||||||
|
msgId = g.msgIdProvider(msg)
|
||||||
|
msgIdSalted = msgId & g.seenSalt
|
||||||
|
|
||||||
# avoid the remote peer from controlling the seen table hashing
|
# addSeen adds salt to msgId to avoid
|
||||||
# by adding random bytes to the ID we ensure we randomize the IDs
|
# remote attacking the hash function
|
||||||
# we do only for seen as this is the great filter from the external world
|
|
||||||
if g.addSeen(msgId):
|
if g.addSeen(msgId):
|
||||||
trace "Dropping already-seen message", msgId = shortLog(msgId), peer
|
trace "Dropping already-seen message", msgId = shortLog(msgId), peer
|
||||||
# make sure to update score tho before continuing
|
# make sure to update score tho before continuing
|
||||||
|
@ -307,6 +309,8 @@ method rpcHandler*(g: GossipSub,
|
||||||
# score only if messages are not too old.
|
# score only if messages are not too old.
|
||||||
g.rewardDelivered(peer, msg.topicIDs, false)
|
g.rewardDelivered(peer, msg.topicIDs, false)
|
||||||
|
|
||||||
|
g.validationSeen.withValue(msgIdSalted, seen): seen[].incl(peer)
|
||||||
|
|
||||||
# onto the next message
|
# onto the next message
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -332,7 +336,16 @@ method rpcHandler*(g: GossipSub,
|
||||||
# g.anonymize needs no evaluation when receiving messages
|
# g.anonymize needs no evaluation when receiving messages
|
||||||
# as we have a "lax" policy and allow signed messages
|
# as we have a "lax" policy and allow signed messages
|
||||||
|
|
||||||
|
# Be careful not to fill the validationSeen table
|
||||||
|
# (eg, pop everything you put in it)
|
||||||
|
g.validationSeen[msgIdSalted] = initHashSet[PubSubPeer]()
|
||||||
|
|
||||||
let validation = await g.validate(msg)
|
let validation = await g.validate(msg)
|
||||||
|
|
||||||
|
var seenPeers: HashSet[PubSubPeer]
|
||||||
|
discard g.validationSeen.pop(msgIdSalted, seenPeers)
|
||||||
|
libp2p_gossipsub_duplicate_during_validation.inc(seenPeers.len.int64)
|
||||||
|
|
||||||
case validation
|
case validation
|
||||||
of ValidationResult.Reject:
|
of ValidationResult.Reject:
|
||||||
debug "Dropping message after validation, reason: reject",
|
debug "Dropping message after validation, reason: reject",
|
||||||
|
@ -351,7 +364,7 @@ method rpcHandler*(g: GossipSub,
|
||||||
|
|
||||||
g.rewardDelivered(peer, msg.topicIDs, true)
|
g.rewardDelivered(peer, msg.topicIDs, true)
|
||||||
|
|
||||||
var toSendPeers = initHashSet[PubSubPeer]()
|
var toSendPeers = HashSet[PubSubPeer]()
|
||||||
for t in msg.topicIDs: # for every topic in the message
|
for t in msg.topicIDs: # for every topic in the message
|
||||||
if t notin g.topics:
|
if t notin g.topics:
|
||||||
continue
|
continue
|
||||||
|
@ -361,6 +374,11 @@ method rpcHandler*(g: GossipSub,
|
||||||
|
|
||||||
await handleData(g, t, msg.data)
|
await handleData(g, t, msg.data)
|
||||||
|
|
||||||
|
# Don't send it to source peer, or peers that
|
||||||
|
# sent it during validation
|
||||||
|
toSendPeers.excl(peer)
|
||||||
|
toSendPeers.excl(seenPeers)
|
||||||
|
|
||||||
# In theory, if topics are the same in all messages, we could batch - we'd
|
# In theory, if topics are the same in all messages, we could batch - we'd
|
||||||
# also have to be careful to only include validated messages
|
# also have to be careful to only include validated messages
|
||||||
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
||||||
|
@ -503,9 +521,9 @@ proc maintainDirectPeers(g: GossipSub) {.async.} =
|
||||||
let _ = await g.switch.dial(id, addrs, g.codecs)
|
let _ = await g.switch.dial(id, addrs, g.codecs)
|
||||||
# populate the peer after it's connected
|
# populate the peer after it's connected
|
||||||
discard g.getOrCreatePeer(id, g.codecs)
|
discard g.getOrCreatePeer(id, g.codecs)
|
||||||
except CancelledError:
|
except CancelledError as exc:
|
||||||
trace "Direct peer dial canceled"
|
trace "Direct peer dial canceled"
|
||||||
raise
|
raise exc
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
debug "Direct peer error dialing", msg = exc.msg
|
debug "Direct peer error dialing", msg = exc.msg
|
||||||
|
|
||||||
|
@ -548,8 +566,6 @@ method initPubSub*(g: GossipSub)
|
||||||
if validationRes.isErr:
|
if validationRes.isErr:
|
||||||
raise newException(InitializationError, $validationRes.error)
|
raise newException(InitializationError, $validationRes.error)
|
||||||
|
|
||||||
randomize()
|
|
||||||
|
|
||||||
# init the floodsub stuff here, we customize timedcache in gossip!
|
# init the floodsub stuff here, we customize timedcache in gossip!
|
||||||
g.seen = TimedCache[MessageID].init(g.parameters.seenTTL)
|
g.seen = TimedCache[MessageID].init(g.parameters.seenTTL)
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,6 @@
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import std/[tables, sequtils, sets, algorithm]
|
import std/[tables, sequtils, sets, algorithm]
|
||||||
import random # for shuffle
|
|
||||||
import chronos, chronicles, metrics
|
import chronos, chronicles, metrics
|
||||||
import "."/[types, scoring]
|
import "."/[types, scoring]
|
||||||
import ".."/[pubsubpeer, peertable, timedcache, mcache, floodsub, pubsub]
|
import ".."/[pubsubpeer, peertable, timedcache, mcache, floodsub, pubsub]
|
||||||
|
@ -215,7 +214,7 @@ proc handleIHave*(g: GossipSub,
|
||||||
break
|
break
|
||||||
# shuffling res.messageIDs before sending it out to increase the likelihood
|
# shuffling res.messageIDs before sending it out to increase the likelihood
|
||||||
# of getting an answer if the peer truncates the list due to internal size restrictions.
|
# of getting an answer if the peer truncates the list due to internal size restrictions.
|
||||||
shuffle(res.messageIDs)
|
g.rng.shuffle(res.messageIDs)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
proc handleIWant*(g: GossipSub,
|
proc handleIWant*(g: GossipSub,
|
||||||
|
@ -282,7 +281,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
# shuffle anyway, score might be not used
|
# shuffle anyway, score might be not used
|
||||||
shuffle(candidates)
|
g.rng.shuffle(candidates)
|
||||||
|
|
||||||
# sort peers by score, high score first since we graft
|
# sort peers by score, high score first since we graft
|
||||||
candidates.sort(byScore, SortOrder.Descending)
|
candidates.sort(byScore, SortOrder.Descending)
|
||||||
|
@ -318,7 +317,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
# shuffle anyway, score might be not used
|
# shuffle anyway, score might be not used
|
||||||
shuffle(candidates)
|
g.rng.shuffle(candidates)
|
||||||
|
|
||||||
# sort peers by score, high score first, we are grafting
|
# sort peers by score, high score first, we are grafting
|
||||||
candidates.sort(byScore, SortOrder.Descending)
|
candidates.sort(byScore, SortOrder.Descending)
|
||||||
|
@ -350,7 +349,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||||
prunes.keepIf do (x: PubSubPeer) -> bool: x notin grafts
|
prunes.keepIf do (x: PubSubPeer) -> bool: x notin grafts
|
||||||
|
|
||||||
# shuffle anyway, score might be not used
|
# shuffle anyway, score might be not used
|
||||||
shuffle(prunes)
|
g.rng.shuffle(prunes)
|
||||||
|
|
||||||
# sort peers by score (inverted), pruning, so low score peers are on top
|
# sort peers by score (inverted), pruning, so low score peers are on top
|
||||||
prunes.sort(byScore, SortOrder.Ascending)
|
prunes.sort(byScore, SortOrder.Ascending)
|
||||||
|
@ -382,7 +381,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||||
if pruneLen > 0:
|
if pruneLen > 0:
|
||||||
# Ok we got some peers to prune,
|
# Ok we got some peers to prune,
|
||||||
# for this heartbeat let's prune those
|
# for this heartbeat let's prune those
|
||||||
shuffle(prunes)
|
g.rng.shuffle(prunes)
|
||||||
prunes.setLen(pruneLen)
|
prunes.setLen(pruneLen)
|
||||||
|
|
||||||
trace "pruning", prunes = prunes.len
|
trace "pruning", prunes = prunes.len
|
||||||
|
@ -519,7 +518,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises:
|
||||||
# similar to rust: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/behaviour.rs#L2101
|
# similar to rust: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/behaviour.rs#L2101
|
||||||
# and go https://github.com/libp2p/go-libp2p-pubsub/blob/08c17398fb11b2ab06ca141dddc8ec97272eb772/gossipsub.go#L582
|
# and go https://github.com/libp2p/go-libp2p-pubsub/blob/08c17398fb11b2ab06ca141dddc8ec97272eb772/gossipsub.go#L582
|
||||||
if midsSeq.len > IHaveMaxLength:
|
if midsSeq.len > IHaveMaxLength:
|
||||||
shuffle(midsSeq)
|
g.rng.shuffle(midsSeq)
|
||||||
midsSeq.setLen(IHaveMaxLength)
|
midsSeq.setLen(IHaveMaxLength)
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -540,7 +539,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises:
|
||||||
target = min(factor, allPeers.len)
|
target = min(factor, allPeers.len)
|
||||||
|
|
||||||
if target < allPeers.len:
|
if target < allPeers.len:
|
||||||
shuffle(allPeers)
|
g.rng.shuffle(allPeers)
|
||||||
allPeers.setLen(target)
|
allPeers.setLen(target)
|
||||||
|
|
||||||
for peer in allPeers:
|
for peer in allPeers:
|
||||||
|
|
|
@ -139,6 +139,7 @@ type
|
||||||
disconnectBadPeers*: bool
|
disconnectBadPeers*: bool
|
||||||
|
|
||||||
BackoffTable* = Table[string, Table[PeerID, Moment]]
|
BackoffTable* = Table[string, Table[PeerID, Moment]]
|
||||||
|
ValidationSeenTable* = Table[MessageID, HashSet[PubSubPeer]]
|
||||||
|
|
||||||
GossipSub* = ref object of FloodSub
|
GossipSub* = ref object of FloodSub
|
||||||
mesh*: PeerTable # peers that we send messages to when we are subscribed to the topic
|
mesh*: PeerTable # peers that we send messages to when we are subscribed to the topic
|
||||||
|
@ -150,6 +151,7 @@ type
|
||||||
gossip*: Table[string, seq[ControlIHave]] # pending gossip
|
gossip*: Table[string, seq[ControlIHave]] # pending gossip
|
||||||
control*: Table[string, ControlMessage] # pending control messages
|
control*: Table[string, ControlMessage] # pending control messages
|
||||||
mcache*: MCache # messages cache
|
mcache*: MCache # messages cache
|
||||||
|
validationSeen*: ValidationSeenTable # peers who sent us message in validation
|
||||||
heartbeatFut*: Future[void] # cancellation future for heartbeat interval
|
heartbeatFut*: Future[void] # cancellation future for heartbeat interval
|
||||||
heartbeatRunning*: bool
|
heartbeatRunning*: bool
|
||||||
|
|
||||||
|
|
|
@ -10,11 +10,12 @@
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import std/[tables, sequtils, sets, strutils]
|
import std/[tables, sequtils, sets, strutils]
|
||||||
import chronos, chronicles, metrics
|
import chronos, chronicles, metrics, bearssl
|
||||||
import ./pubsubpeer,
|
import ./pubsubpeer,
|
||||||
./rpc/[message, messages, protobuf],
|
./rpc/[message, messages, protobuf],
|
||||||
../../switch,
|
../../switch,
|
||||||
../protocol,
|
../protocol,
|
||||||
|
../../crypto/crypto,
|
||||||
../../stream/connection,
|
../../stream/connection,
|
||||||
../../peerid,
|
../../peerid,
|
||||||
../../peerinfo,
|
../../peerinfo,
|
||||||
|
@ -106,6 +107,15 @@ type
|
||||||
anonymize*: bool # if we omit fromPeer and seqno from RPC messages we send
|
anonymize*: bool # if we omit fromPeer and seqno from RPC messages we send
|
||||||
subscriptionValidator*: SubscriptionValidator # callback used to validate subscriptions
|
subscriptionValidator*: SubscriptionValidator # callback used to validate subscriptions
|
||||||
topicsHigh*: int # the maximum number of topics a peer is allowed to subscribe to
|
topicsHigh*: int # the maximum number of topics a peer is allowed to subscribe to
|
||||||
|
maxMessageSize*: int ##\
|
||||||
|
## the maximum raw message size we'll globally allow
|
||||||
|
## for finer tuning, check message size on topic validator
|
||||||
|
##
|
||||||
|
## sending a big message to a peer with a lower size limit can
|
||||||
|
## lead to issues, from descoring to connection drops
|
||||||
|
##
|
||||||
|
## defaults to 1mB
|
||||||
|
rng*: ref BrHmacDrbgContext
|
||||||
|
|
||||||
knownTopics*: HashSet[string]
|
knownTopics*: HashSet[string]
|
||||||
|
|
||||||
|
@ -283,7 +293,7 @@ proc getOrCreatePeer*(
|
||||||
p.onPubSubPeerEvent(peer, event)
|
p.onPubSubPeerEvent(peer, event)
|
||||||
|
|
||||||
# create new pubsub peer
|
# create new pubsub peer
|
||||||
let pubSubPeer = PubSubPeer.new(peerId, getConn, dropConn, onEvent, protos[0])
|
let pubSubPeer = PubSubPeer.new(peerId, getConn, dropConn, onEvent, protos[0], p.maxMessageSize)
|
||||||
debug "created new pubsub peer", peerId
|
debug "created new pubsub peer", peerId
|
||||||
|
|
||||||
p.peers[peerId] = pubSubPeer
|
p.peers[peerId] = pubSubPeer
|
||||||
|
@ -538,6 +548,8 @@ proc init*[PubParams: object | bool](
|
||||||
sign: bool = true,
|
sign: bool = true,
|
||||||
msgIdProvider: MsgIdProvider = defaultMsgIdProvider,
|
msgIdProvider: MsgIdProvider = defaultMsgIdProvider,
|
||||||
subscriptionValidator: SubscriptionValidator = nil,
|
subscriptionValidator: SubscriptionValidator = nil,
|
||||||
|
maxMessageSize: int = 1024 * 1024,
|
||||||
|
rng: ref BrHmacDrbgContext = newRng(),
|
||||||
parameters: PubParams = false): P
|
parameters: PubParams = false): P
|
||||||
{.raises: [Defect, InitializationError].} =
|
{.raises: [Defect, InitializationError].} =
|
||||||
let pubsub =
|
let pubsub =
|
||||||
|
@ -550,6 +562,8 @@ proc init*[PubParams: object | bool](
|
||||||
sign: sign,
|
sign: sign,
|
||||||
msgIdProvider: msgIdProvider,
|
msgIdProvider: msgIdProvider,
|
||||||
subscriptionValidator: subscriptionValidator,
|
subscriptionValidator: subscriptionValidator,
|
||||||
|
maxMessageSize: maxMessageSize,
|
||||||
|
rng: rng,
|
||||||
topicsHigh: int.high)
|
topicsHigh: int.high)
|
||||||
else:
|
else:
|
||||||
P(switch: switch,
|
P(switch: switch,
|
||||||
|
@ -561,6 +575,8 @@ proc init*[PubParams: object | bool](
|
||||||
msgIdProvider: msgIdProvider,
|
msgIdProvider: msgIdProvider,
|
||||||
subscriptionValidator: subscriptionValidator,
|
subscriptionValidator: subscriptionValidator,
|
||||||
parameters: parameters,
|
parameters: parameters,
|
||||||
|
maxMessageSize: maxMessageSize,
|
||||||
|
rng: rng,
|
||||||
topicsHigh: int.high)
|
topicsHigh: int.high)
|
||||||
|
|
||||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||||
|
|
|
@ -60,6 +60,7 @@ type
|
||||||
score*: float64
|
score*: float64
|
||||||
iWantBudget*: int
|
iWantBudget*: int
|
||||||
iHaveBudget*: int
|
iHaveBudget*: int
|
||||||
|
maxMessageSize: int
|
||||||
appScore*: float64 # application specific score
|
appScore*: float64 # application specific score
|
||||||
behaviourPenalty*: float64 # the eventual penalty score
|
behaviourPenalty*: float64 # the eventual penalty score
|
||||||
|
|
||||||
|
@ -119,7 +120,7 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
|
||||||
while not conn.atEof:
|
while not conn.atEof:
|
||||||
trace "waiting for data", conn, peer = p, closed = conn.closed
|
trace "waiting for data", conn, peer = p, closed = conn.closed
|
||||||
|
|
||||||
var data = await conn.readLp(64 * 1024)
|
var data = await conn.readLp(p.maxMessageSize)
|
||||||
trace "read data from peer",
|
trace "read data from peer",
|
||||||
conn, peer = p, closed = conn.closed,
|
conn, peer = p, closed = conn.closed,
|
||||||
data = data.shortLog
|
data = data.shortLog
|
||||||
|
@ -186,8 +187,8 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
||||||
try:
|
try:
|
||||||
if p.onEvent != nil:
|
if p.onEvent != nil:
|
||||||
p.onEvent(p, PubsubPeerEvent(kind: PubSubPeerEventKind.Disconnected))
|
p.onEvent(p, PubsubPeerEvent(kind: PubSubPeerEventKind.Disconnected))
|
||||||
except CancelledError:
|
except CancelledError as exc:
|
||||||
raise
|
raise exc
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
debug "Errors during diconnection events", error = exc.msg
|
debug "Errors during diconnection events", error = exc.msg
|
||||||
|
|
||||||
|
@ -243,6 +244,10 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect].} =
|
||||||
debug "empty message, skipping", p, msg = shortLog(msg)
|
debug "empty message, skipping", p, msg = shortLog(msg)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if msg.len > p.maxMessageSize:
|
||||||
|
info "trying to send a too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
|
||||||
|
return
|
||||||
|
|
||||||
let conn = p.sendConn
|
let conn = p.sendConn
|
||||||
if conn == nil or conn.closed():
|
if conn == nil or conn.closed():
|
||||||
trace "No send connection, skipping message", p, msg = shortLog(msg)
|
trace "No send connection, skipping message", p, msg = shortLog(msg)
|
||||||
|
@ -280,7 +285,8 @@ proc new*(
|
||||||
getConn: GetConn,
|
getConn: GetConn,
|
||||||
dropConn: DropConn,
|
dropConn: DropConn,
|
||||||
onEvent: OnEvent,
|
onEvent: OnEvent,
|
||||||
codec: string): T =
|
codec: string,
|
||||||
|
maxMessageSize: int): T =
|
||||||
|
|
||||||
T(
|
T(
|
||||||
getConn: getConn,
|
getConn: getConn,
|
||||||
|
@ -288,19 +294,5 @@ proc new*(
|
||||||
onEvent: onEvent,
|
onEvent: onEvent,
|
||||||
codec: codec,
|
codec: codec,
|
||||||
peerId: peerId,
|
peerId: peerId,
|
||||||
)
|
maxMessageSize: maxMessageSize
|
||||||
|
|
||||||
proc newPubSubPeer*(
|
|
||||||
peerId: PeerID,
|
|
||||||
getConn: GetConn,
|
|
||||||
dropConn: DropConn,
|
|
||||||
onEvent: OnEvent,
|
|
||||||
codec: string): PubSubPeer {.deprecated: "use PubSubPeer.new".} =
|
|
||||||
|
|
||||||
PubSubPeer.new(
|
|
||||||
peerId,
|
|
||||||
getConn,
|
|
||||||
dropConn,
|
|
||||||
onEvent,
|
|
||||||
codec
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -565,7 +565,7 @@ method handshake*(p: Noise, conn: Connection, initiator: bool): Future[SecureCon
|
||||||
raise newException(NoiseHandshakeError, "Invalid remote peer id")
|
raise newException(NoiseHandshakeError, "Invalid remote peer id")
|
||||||
conn.peerId = pid.get()
|
conn.peerId = pid.get()
|
||||||
|
|
||||||
var tmp = NoiseConnection.init(conn, conn.peerId, conn.observedAddr)
|
var tmp = NoiseConnection.new(conn, conn.peerId, conn.observedAddr)
|
||||||
|
|
||||||
if initiator:
|
if initiator:
|
||||||
tmp.readCs = handshakeRes.cs2
|
tmp.readCs = handshakeRes.cs2
|
||||||
|
@ -615,10 +615,3 @@ proc new*(
|
||||||
|
|
||||||
noise.init()
|
noise.init()
|
||||||
noise
|
noise
|
||||||
|
|
||||||
proc newNoise*(
|
|
||||||
rng: ref BrHmacDrbgContext,
|
|
||||||
privateKey: PrivateKey,
|
|
||||||
outgoing: bool = true,
|
|
||||||
commonPrologue: seq[byte] = @[]): Noise {.deprecated: "use Noise.new".}=
|
|
||||||
Noise.new(rng, privateKey, outgoing, commonPrologue)
|
|
||||||
|
|
|
@ -29,6 +29,3 @@ proc new*(T: typedesc[PlainText]): T =
|
||||||
let plainText = T()
|
let plainText = T()
|
||||||
plainText.init()
|
plainText.init()
|
||||||
plainText
|
plainText
|
||||||
|
|
||||||
proc newPlainText*(): PlainText {.deprecated: "use PlainText.new".} =
|
|
||||||
PlainText.new()
|
|
||||||
|
|
|
@ -263,7 +263,7 @@ proc newSecioConn(conn: Connection,
|
||||||
## cipher algorithm ``cipher``, stretched keys ``secrets`` and order
|
## cipher algorithm ``cipher``, stretched keys ``secrets`` and order
|
||||||
## ``order``.
|
## ``order``.
|
||||||
|
|
||||||
result = SecioConn.init(conn, conn.peerId, conn.observedAddr)
|
result = SecioConn.new(conn, conn.peerId, conn.observedAddr)
|
||||||
|
|
||||||
let i0 = if order < 0: 1 else: 0
|
let i0 = if order < 0: 1 else: 0
|
||||||
let i1 = if order < 0: 0 else: 1
|
let i1 = if order < 0: 0 else: 1
|
||||||
|
@ -441,6 +441,3 @@ proc new*(
|
||||||
)
|
)
|
||||||
secio.init()
|
secio.init()
|
||||||
secio
|
secio
|
||||||
|
|
||||||
proc newSecio*(rng: ref BrHmacDrbgContext, localPrivateKey: PrivateKey): Secio {.deprecated: "use Secio.new".} =
|
|
||||||
Secio.new(rng, localPrivateKey)
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ func shortLog*(conn: SecureConn): auto =
|
||||||
|
|
||||||
chronicles.formatIt(SecureConn): shortLog(it)
|
chronicles.formatIt(SecureConn): shortLog(it)
|
||||||
|
|
||||||
proc init*(T: type SecureConn,
|
proc new*(T: type SecureConn,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
observedAddr: Multiaddress,
|
observedAddr: Multiaddress,
|
||||||
|
|
|
@ -65,10 +65,6 @@ proc new*(
|
||||||
bufferStream.initStream()
|
bufferStream.initStream()
|
||||||
bufferStream
|
bufferStream
|
||||||
|
|
||||||
proc newBufferStream*(
|
|
||||||
timeout: Duration = DefaultConnectionTimeout): BufferStream {.deprecated: "use BufferStream.new".} =
|
|
||||||
return BufferStream.new(timeout)
|
|
||||||
|
|
||||||
method pushData*(s: BufferStream, data: seq[byte]) {.base, async.} =
|
method pushData*(s: BufferStream, data: seq[byte]) {.base, async.} =
|
||||||
## Write bytes to internal read buffer, use this to fill up the
|
## Write bytes to internal read buffer, use this to fill up the
|
||||||
## buffer with data.
|
## buffer with data.
|
||||||
|
|
|
@ -151,7 +151,7 @@ proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
|
||||||
if not await s.pollActivity():
|
if not await s.pollActivity():
|
||||||
return
|
return
|
||||||
|
|
||||||
proc init*(C: type Connection,
|
proc new*(C: type Connection,
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
dir: Direction,
|
dir: Direction,
|
||||||
timeout: Duration = DefaultConnectionTimeout,
|
timeout: Duration = DefaultConnectionTimeout,
|
||||||
|
|
|
@ -180,7 +180,7 @@ proc readExactly*(s: LPStream,
|
||||||
proc readLine*(s: LPStream,
|
proc readLine*(s: LPStream,
|
||||||
limit = 0,
|
limit = 0,
|
||||||
sep = "\r\n"): Future[string]
|
sep = "\r\n"): Future[string]
|
||||||
{.async, deprecated: "todo".} =
|
{.async.} =
|
||||||
# TODO replace with something that exploits buffering better
|
# TODO replace with something that exploits buffering better
|
||||||
var lim = if limit <= 0: -1 else: limit
|
var lim = if limit <= 0: -1 else: limit
|
||||||
var state = 0
|
var state = 0
|
||||||
|
@ -255,9 +255,6 @@ proc writeLp*(s: LPStream, msg: openArray[byte]): Future[void] =
|
||||||
proc writeLp*(s: LPStream, msg: string): Future[void] =
|
proc writeLp*(s: LPStream, msg: string): Future[void] =
|
||||||
writeLp(s, msg.toOpenArrayByte(0, msg.high))
|
writeLp(s, msg.toOpenArrayByte(0, msg.high))
|
||||||
|
|
||||||
proc write*(s: LPStream, pbytes: pointer, nbytes: int): Future[void] {.deprecated: "seq".} =
|
|
||||||
s.write(@(toOpenArray(cast[ptr UncheckedArray[byte]](pbytes), 0, nbytes - 1)))
|
|
||||||
|
|
||||||
proc write*(s: LPStream, msg: string): Future[void] =
|
proc write*(s: LPStream, msg: string): Future[void] =
|
||||||
s.write(msg.toBytes())
|
s.write(msg.toBytes())
|
||||||
|
|
||||||
|
|
|
@ -273,7 +273,7 @@ proc newSwitch*(peerInfo: PeerInfo,
|
||||||
transports: transports,
|
transports: transports,
|
||||||
connManager: connManager,
|
connManager: connManager,
|
||||||
peerStore: PeerStore.new(),
|
peerStore: PeerStore.new(),
|
||||||
dialer: Dialer.new(peerInfo.peerId, connManager, transports, ms),
|
dialer: Dialer.new(peerInfo.peerId, connManager, transports, ms, nameResolver),
|
||||||
nameResolver: nameResolver)
|
nameResolver: nameResolver)
|
||||||
|
|
||||||
switch.connManager.peerStore = switch.peerStore
|
switch.connManager.peerStore = switch.peerStore
|
||||||
|
|
|
@ -114,13 +114,6 @@ proc connHandler*(self: TcpTransport,
|
||||||
|
|
||||||
return conn
|
return conn
|
||||||
|
|
||||||
proc init*(
|
|
||||||
T: typedesc[TcpTransport],
|
|
||||||
flags: set[ServerFlags] = {},
|
|
||||||
upgrade: Upgrade): T {.deprecated: "use .new".} =
|
|
||||||
|
|
||||||
T.new(flags, upgrade)
|
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: typedesc[TcpTransport],
|
T: typedesc[TcpTransport],
|
||||||
flags: set[ServerFlags] = {},
|
flags: set[ServerFlags] = {},
|
||||||
|
@ -206,6 +199,7 @@ method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
|
||||||
|
|
||||||
method dial*(
|
method dial*(
|
||||||
self: TcpTransport,
|
self: TcpTransport,
|
||||||
|
hostname: string,
|
||||||
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
||||||
## dial a peer
|
## dial a peer
|
||||||
##
|
##
|
||||||
|
|
|
@ -60,12 +60,18 @@ method accept*(self: Transport): Future[Connection]
|
||||||
|
|
||||||
method dial*(
|
method dial*(
|
||||||
self: Transport,
|
self: Transport,
|
||||||
|
hostname: string,
|
||||||
address: MultiAddress): Future[Connection] {.base, gcsafe.} =
|
address: MultiAddress): Future[Connection] {.base, gcsafe.} =
|
||||||
## dial a peer
|
## dial a peer
|
||||||
##
|
##
|
||||||
|
|
||||||
doAssert(false, "Not implemented!")
|
doAssert(false, "Not implemented!")
|
||||||
|
|
||||||
|
proc dial*(
|
||||||
|
self: Transport,
|
||||||
|
address: MultiAddress): Future[Connection] {.gcsafe.} =
|
||||||
|
self.dial("", address)
|
||||||
|
|
||||||
method upgradeIncoming*(
|
method upgradeIncoming*(
|
||||||
self: Transport,
|
self: Transport,
|
||||||
conn: Connection): Future[void] {.base, gcsafe.} =
|
conn: Connection): Future[void] {.base, gcsafe.} =
|
||||||
|
|
|
@ -34,7 +34,7 @@ type
|
||||||
WsStream = ref object of Connection
|
WsStream = ref object of Connection
|
||||||
session: WSSession
|
session: WSSession
|
||||||
|
|
||||||
proc init*(T: type WsStream,
|
proc new*(T: type WsStream,
|
||||||
session: WSSession,
|
session: WSSession,
|
||||||
dir: Direction,
|
dir: Direction,
|
||||||
timeout = 10.minutes,
|
timeout = 10.minutes,
|
||||||
|
@ -170,7 +170,7 @@ proc connHandler(self: WsTransport,
|
||||||
await stream.close()
|
await stream.close()
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
let conn = WsStream.init(stream, dir)
|
let conn = WsStream.new(stream, dir)
|
||||||
conn.observedAddr = observedAddr
|
conn.observedAddr = observedAddr
|
||||||
|
|
||||||
self.connections[dir].add(conn)
|
self.connections[dir].add(conn)
|
||||||
|
@ -207,6 +207,7 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
|
||||||
|
|
||||||
method dial*(
|
method dial*(
|
||||||
self: WsTransport,
|
self: WsTransport,
|
||||||
|
hostname: string,
|
||||||
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
||||||
## dial a peer
|
## dial a peer
|
||||||
##
|
##
|
||||||
|
@ -219,6 +220,7 @@ method dial*(
|
||||||
address.initTAddress().tryGet(),
|
address.initTAddress().tryGet(),
|
||||||
"",
|
"",
|
||||||
secure = secure,
|
secure = secure,
|
||||||
|
hostName = hostname,
|
||||||
flags = self.tlsFlags)
|
flags = self.tlsFlags)
|
||||||
|
|
||||||
return await self.connHandler(transp, Direction.Out)
|
return await self.connHandler(transp, Direction.Out)
|
||||||
|
|
|
@ -194,7 +194,7 @@ proc muxerHandler(
|
||||||
await muxer.close()
|
await muxer.close()
|
||||||
trace "Exception in muxer handler", conn, msg = exc.msg
|
trace "Exception in muxer handler", conn, msg = exc.msg
|
||||||
|
|
||||||
proc init*(
|
proc new*(
|
||||||
T: type MuxedUpgrade,
|
T: type MuxedUpgrade,
|
||||||
identity: Identify,
|
identity: Identify,
|
||||||
muxers: Table[string, MuxerProvider],
|
muxers: Table[string, MuxerProvider],
|
||||||
|
|
|
@ -83,9 +83,6 @@ proc new*(T: typedesc[TestBufferStream], writeHandler: WriteHandler): T =
|
||||||
testBufferStream.initStream()
|
testBufferStream.initStream()
|
||||||
testBufferStream
|
testBufferStream
|
||||||
|
|
||||||
proc newBufferStream*(writeHandler: WriteHandler): TestBufferStream {.deprecated: "use TestBufferStream.new".}=
|
|
||||||
TestBufferStream.new(writeHandler)
|
|
||||||
|
|
||||||
proc checkExpiringInternal(cond: proc(): bool {.raises: [Defect].} ): Future[bool] {.async, gcsafe.} =
|
proc checkExpiringInternal(cond: proc(): bool {.raises: [Defect].} ): Future[bool] {.async, gcsafe.} =
|
||||||
{.gcsafe.}:
|
{.gcsafe.}:
|
||||||
let start = Moment.now()
|
let start = Moment.now()
|
||||||
|
|
|
@ -382,3 +382,57 @@ suite "FloodSub":
|
||||||
it.switch.stop())))
|
it.switch.stop())))
|
||||||
|
|
||||||
await allFuturesThrowing(nodesFut)
|
await allFuturesThrowing(nodesFut)
|
||||||
|
|
||||||
|
asyncTest "FloodSub message size validation":
|
||||||
|
var messageReceived = 0
|
||||||
|
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||||
|
check data.len < 50
|
||||||
|
inc(messageReceived)
|
||||||
|
|
||||||
|
let
|
||||||
|
bigNode = generateNodes(1)
|
||||||
|
smallNode = generateNodes(1, maxMessageSize = 200)
|
||||||
|
|
||||||
|
# start switches
|
||||||
|
nodesFut = await allFinished(
|
||||||
|
bigNode[0].switch.start(),
|
||||||
|
smallNode[0].switch.start(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# start pubsubcon
|
||||||
|
await allFuturesThrowing(
|
||||||
|
allFinished(
|
||||||
|
bigNode[0].start(),
|
||||||
|
smallNode[0].start(),
|
||||||
|
))
|
||||||
|
|
||||||
|
await subscribeNodes(bigNode & smallNode)
|
||||||
|
bigNode[0].subscribe("foo", handler)
|
||||||
|
smallNode[0].subscribe("foo", handler)
|
||||||
|
await waitSub(bigNode[0], smallNode[0], "foo")
|
||||||
|
|
||||||
|
let
|
||||||
|
bigMessage = newSeq[byte](1000)
|
||||||
|
smallMessage1 = @[1.byte]
|
||||||
|
smallMessage2 = @[3.byte]
|
||||||
|
|
||||||
|
# Need two different messages, otherwise they are the same when anonymized
|
||||||
|
check (await smallNode[0].publish("foo", smallMessage1)) > 0
|
||||||
|
check (await bigNode[0].publish("foo", smallMessage2)) > 0
|
||||||
|
|
||||||
|
check (await checkExpiring(messageReceived == 2)) == true
|
||||||
|
|
||||||
|
check (await smallNode[0].publish("foo", bigMessage)) > 0
|
||||||
|
check (await bigNode[0].publish("foo", bigMessage)) > 0
|
||||||
|
|
||||||
|
await allFuturesThrowing(
|
||||||
|
smallNode[0].switch.stop(),
|
||||||
|
bigNode[0].switch.stop()
|
||||||
|
)
|
||||||
|
|
||||||
|
await allFuturesThrowing(
|
||||||
|
smallNode[0].stop(),
|
||||||
|
bigNode[0].stop()
|
||||||
|
)
|
||||||
|
|
||||||
|
await allFuturesThrowing(nodesFut)
|
||||||
|
|
|
@ -25,7 +25,7 @@ proc getPubSubPeer(p: TestGossipSub, peerId: PeerID): PubSubPeer =
|
||||||
proc dropConn(peer: PubSubPeer) =
|
proc dropConn(peer: PubSubPeer) =
|
||||||
discard # we don't care about it here yet
|
discard # we don't care about it here yet
|
||||||
|
|
||||||
let pubSubPeer = PubSubPeer.new(peerId, getConn, dropConn, nil, GossipSubCodec)
|
let pubSubPeer = PubSubPeer.new(peerId, getConn, dropConn, nil, GossipSubCodec, 1024 * 1024)
|
||||||
debug "created new pubsub peer", peerId
|
debug "created new pubsub peer", peerId
|
||||||
|
|
||||||
p.peers[peerId] = pubSubPeer
|
p.peers[peerId] = pubSubPeer
|
||||||
|
|
|
@ -22,6 +22,7 @@ import utils, ../../libp2p/[errors,
|
||||||
protocols/pubsub/gossipsub,
|
protocols/pubsub/gossipsub,
|
||||||
protocols/pubsub/pubsubpeer,
|
protocols/pubsub/pubsubpeer,
|
||||||
protocols/pubsub/peertable,
|
protocols/pubsub/peertable,
|
||||||
|
protocols/pubsub/timedcache,
|
||||||
protocols/pubsub/rpc/messages]
|
protocols/pubsub/rpc/messages]
|
||||||
import ../helpers
|
import ../helpers
|
||||||
|
|
||||||
|
@ -556,6 +557,89 @@ suite "GossipSub":
|
||||||
|
|
||||||
await allFuturesThrowing(nodesFut.concat())
|
await allFuturesThrowing(nodesFut.concat())
|
||||||
|
|
||||||
|
asyncTest "e2e - GossipSub should not send to source & peers who already seen":
|
||||||
|
# 3 nodes: A, B, C
|
||||||
|
# A publishes, B relays, C is having a long validation
|
||||||
|
# so C should not send to anyone
|
||||||
|
|
||||||
|
let
|
||||||
|
nodes = generateNodes(
|
||||||
|
3,
|
||||||
|
gossip = true)
|
||||||
|
|
||||||
|
# start switches
|
||||||
|
nodesFut = await allFinished(
|
||||||
|
nodes[0].switch.start(),
|
||||||
|
nodes[1].switch.start(),
|
||||||
|
nodes[2].switch.start(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# start pubsub
|
||||||
|
await allFuturesThrowing(
|
||||||
|
allFinished(
|
||||||
|
nodes[0].start(),
|
||||||
|
nodes[1].start(),
|
||||||
|
nodes[2].start(),
|
||||||
|
))
|
||||||
|
|
||||||
|
await subscribeNodes(nodes)
|
||||||
|
|
||||||
|
var cRelayed: Future[void] = newFuture[void]()
|
||||||
|
var bFinished: Future[void] = newFuture[void]()
|
||||||
|
var
|
||||||
|
aReceived = 0
|
||||||
|
cReceived = 0
|
||||||
|
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||||
|
inc aReceived
|
||||||
|
check aReceived < 2
|
||||||
|
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
|
||||||
|
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||||
|
inc cReceived
|
||||||
|
check cReceived < 2
|
||||||
|
cRelayed.complete()
|
||||||
|
|
||||||
|
nodes[0].subscribe("foobar", handlerA)
|
||||||
|
nodes[1].subscribe("foobar", handlerB)
|
||||||
|
nodes[2].subscribe("foobar", handlerC)
|
||||||
|
await waitSub(nodes[0], nodes[1], "foobar")
|
||||||
|
await waitSub(nodes[0], nodes[2], "foobar")
|
||||||
|
await waitSub(nodes[2], nodes[1], "foobar")
|
||||||
|
await waitSub(nodes[1], nodes[2], "foobar")
|
||||||
|
|
||||||
|
var gossip1: GossipSub = GossipSub(nodes[0])
|
||||||
|
var gossip2: GossipSub = GossipSub(nodes[1])
|
||||||
|
var gossip3: GossipSub = GossipSub(nodes[2])
|
||||||
|
|
||||||
|
proc slowValidator(topic: string, message: Message): Future[ValidationResult] {.async.} =
|
||||||
|
await cRelayed
|
||||||
|
# Empty A & C caches to detect duplicates
|
||||||
|
gossip1.seen = TimedCache[MessageId].init()
|
||||||
|
gossip3.seen = TimedCache[MessageId].init()
|
||||||
|
let msgId = toSeq(gossip2.validationSeen.keys)[0]
|
||||||
|
check await checkExpiring(try: gossip2.validationSeen[msgId].len > 0 except: false)
|
||||||
|
result = ValidationResult.Accept
|
||||||
|
bFinished.complete()
|
||||||
|
|
||||||
|
nodes[1].addValidator("foobar", slowValidator)
|
||||||
|
|
||||||
|
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 1
|
||||||
|
|
||||||
|
await bFinished
|
||||||
|
|
||||||
|
await allFuturesThrowing(
|
||||||
|
nodes[0].switch.stop(),
|
||||||
|
nodes[1].switch.stop(),
|
||||||
|
nodes[2].switch.stop()
|
||||||
|
)
|
||||||
|
|
||||||
|
await allFuturesThrowing(
|
||||||
|
nodes[0].stop(),
|
||||||
|
nodes[1].stop(),
|
||||||
|
nodes[2].stop()
|
||||||
|
)
|
||||||
|
|
||||||
|
await allFuturesThrowing(nodesFut.concat())
|
||||||
|
|
||||||
asyncTest "e2e - GossipSub send over floodPublish A -> B":
|
asyncTest "e2e - GossipSub send over floodPublish A -> B":
|
||||||
var passed: Future[bool] = newFuture[bool]()
|
var passed: Future[bool] = newFuture[bool]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||||
|
|
|
@ -14,7 +14,7 @@ suite "Message":
|
||||||
test "signature":
|
test "signature":
|
||||||
var seqno = 11'u64
|
var seqno = 11'u64
|
||||||
let
|
let
|
||||||
peer = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
peer = PeerInfo.new(PrivateKey.random(ECDSA, rng[]).get())
|
||||||
msg = Message.init(some(peer), @[], "topic", some(seqno), sign = true)
|
msg = Message.init(some(peer), @[], "topic", some(seqno), sign = true)
|
||||||
|
|
||||||
check verify(msg)
|
check verify(msg)
|
||||||
|
|
|
@ -26,7 +26,8 @@ proc generateNodes*(
|
||||||
triggerSelf: bool = false,
|
triggerSelf: bool = false,
|
||||||
verifySignature: bool = libp2p_pubsub_verify,
|
verifySignature: bool = libp2p_pubsub_verify,
|
||||||
anonymize: bool = libp2p_pubsub_anonymize,
|
anonymize: bool = libp2p_pubsub_anonymize,
|
||||||
sign: bool = libp2p_pubsub_sign): seq[PubSub] =
|
sign: bool = libp2p_pubsub_sign,
|
||||||
|
maxMessageSize: int = 1024 * 1024): seq[PubSub] =
|
||||||
|
|
||||||
for i in 0..<num:
|
for i in 0..<num:
|
||||||
let switch = newStandardSwitch(secureManagers = secureManagers)
|
let switch = newStandardSwitch(secureManagers = secureManagers)
|
||||||
|
@ -38,6 +39,7 @@ proc generateNodes*(
|
||||||
sign = sign,
|
sign = sign,
|
||||||
msgIdProvider = msgIdProvider,
|
msgIdProvider = msgIdProvider,
|
||||||
anonymize = anonymize,
|
anonymize = anonymize,
|
||||||
|
maxMessageSize = maxMessageSize,
|
||||||
parameters = (var p = GossipSubParams.init(); p.floodPublish = false; p.historyLength = 20; p.historyGossip = 20; p))
|
parameters = (var p = GossipSubParams.init(); p.floodPublish = false; p.historyLength = 20; p.historyGossip = 20; p))
|
||||||
# set some testing params, to enable scores
|
# set some testing params, to enable scores
|
||||||
g.topicParams.mgetOrPut("foobar", TopicParams.init()).topicWeight = 1.0
|
g.topicParams.mgetOrPut("foobar", TopicParams.init()).topicWeight = 1.0
|
||||||
|
@ -51,6 +53,7 @@ proc generateNodes*(
|
||||||
verifySignature = verifySignature,
|
verifySignature = verifySignature,
|
||||||
sign = sign,
|
sign = sign,
|
||||||
msgIdProvider = msgIdProvider,
|
msgIdProvider = msgIdProvider,
|
||||||
|
maxMessageSize = maxMessageSize,
|
||||||
anonymize = anonymize).PubSub
|
anonymize = anonymize).PubSub
|
||||||
|
|
||||||
switch.mount(pubsub)
|
switch.mount(pubsub)
|
||||||
|
|
|
@ -18,16 +18,16 @@ method newStream*(
|
||||||
name: string = "",
|
name: string = "",
|
||||||
lazy: bool = false):
|
lazy: bool = false):
|
||||||
Future[Connection] {.async, gcsafe.} =
|
Future[Connection] {.async, gcsafe.} =
|
||||||
result = Connection.init(m.peerId, Direction.Out)
|
result = Connection.new(m.peerId, Direction.Out)
|
||||||
|
|
||||||
suite "Connection Manager":
|
suite "Connection Manager":
|
||||||
teardown:
|
teardown:
|
||||||
checkTrackers()
|
checkTrackers()
|
||||||
|
|
||||||
asyncTest "add and retrieve a connection":
|
asyncTest "add and retrieve a connection":
|
||||||
let connMngr = ConnManager.init()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.init(peerId, Direction.In)
|
let conn = Connection.new(peerId, Direction.In)
|
||||||
|
|
||||||
connMngr.storeConn(conn)
|
connMngr.storeConn(conn)
|
||||||
check conn in connMngr
|
check conn in connMngr
|
||||||
|
@ -39,9 +39,9 @@ suite "Connection Manager":
|
||||||
await connMngr.close()
|
await connMngr.close()
|
||||||
|
|
||||||
asyncTest "shouldn't allow a closed connection":
|
asyncTest "shouldn't allow a closed connection":
|
||||||
let connMngr = ConnManager.init()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.init(peerId, Direction.In)
|
let conn = Connection.new(peerId, Direction.In)
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
expect CatchableError:
|
expect CatchableError:
|
||||||
|
@ -50,9 +50,9 @@ suite "Connection Manager":
|
||||||
await connMngr.close()
|
await connMngr.close()
|
||||||
|
|
||||||
asyncTest "shouldn't allow an EOFed connection":
|
asyncTest "shouldn't allow an EOFed connection":
|
||||||
let connMngr = ConnManager.init()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.init(peerId, Direction.In)
|
let conn = Connection.new(peerId, Direction.In)
|
||||||
conn.isEof = true
|
conn.isEof = true
|
||||||
|
|
||||||
expect CatchableError:
|
expect CatchableError:
|
||||||
|
@ -62,9 +62,9 @@ suite "Connection Manager":
|
||||||
await connMngr.close()
|
await connMngr.close()
|
||||||
|
|
||||||
asyncTest "add and retrieve a muxer":
|
asyncTest "add and retrieve a muxer":
|
||||||
let connMngr = ConnManager.init()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.init(peerId, Direction.In)
|
let conn = Connection.new(peerId, Direction.In)
|
||||||
let muxer = new Muxer
|
let muxer = new Muxer
|
||||||
muxer.connection = conn
|
muxer.connection = conn
|
||||||
|
|
||||||
|
@ -78,9 +78,9 @@ suite "Connection Manager":
|
||||||
await connMngr.close()
|
await connMngr.close()
|
||||||
|
|
||||||
asyncTest "shouldn't allow a muxer for an untracked connection":
|
asyncTest "shouldn't allow a muxer for an untracked connection":
|
||||||
let connMngr = ConnManager.init()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.init(peerId, Direction.In)
|
let conn = Connection.new(peerId, Direction.In)
|
||||||
let muxer = new Muxer
|
let muxer = new Muxer
|
||||||
muxer.connection = conn
|
muxer.connection = conn
|
||||||
|
|
||||||
|
@ -92,10 +92,10 @@ suite "Connection Manager":
|
||||||
await connMngr.close()
|
await connMngr.close()
|
||||||
|
|
||||||
asyncTest "get conn with direction":
|
asyncTest "get conn with direction":
|
||||||
let connMngr = ConnManager.init()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn1 = Connection.init(peerId, Direction.Out)
|
let conn1 = Connection.new(peerId, Direction.Out)
|
||||||
let conn2 = Connection.init(peerId, Direction.In)
|
let conn2 = Connection.new(peerId, Direction.In)
|
||||||
|
|
||||||
connMngr.storeConn(conn1)
|
connMngr.storeConn(conn1)
|
||||||
connMngr.storeConn(conn2)
|
connMngr.storeConn(conn2)
|
||||||
|
@ -112,9 +112,9 @@ suite "Connection Manager":
|
||||||
await connMngr.close()
|
await connMngr.close()
|
||||||
|
|
||||||
asyncTest "get muxed stream for peer":
|
asyncTest "get muxed stream for peer":
|
||||||
let connMngr = ConnManager.init()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.init(peerId, Direction.In)
|
let conn = Connection.new(peerId, Direction.In)
|
||||||
|
|
||||||
let muxer = new TestMuxer
|
let muxer = new TestMuxer
|
||||||
muxer.peerId = peerId
|
muxer.peerId = peerId
|
||||||
|
@ -132,9 +132,9 @@ suite "Connection Manager":
|
||||||
await stream.close()
|
await stream.close()
|
||||||
|
|
||||||
asyncTest "get stream from directed connection":
|
asyncTest "get stream from directed connection":
|
||||||
let connMngr = ConnManager.init()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.init(peerId, Direction.In)
|
let conn = Connection.new(peerId, Direction.In)
|
||||||
|
|
||||||
let muxer = new TestMuxer
|
let muxer = new TestMuxer
|
||||||
muxer.peerId = peerId
|
muxer.peerId = peerId
|
||||||
|
@ -153,9 +153,9 @@ suite "Connection Manager":
|
||||||
await stream1.close()
|
await stream1.close()
|
||||||
|
|
||||||
asyncTest "get stream from any connection":
|
asyncTest "get stream from any connection":
|
||||||
let connMngr = ConnManager.init()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.init(peerId, Direction.In)
|
let conn = Connection.new(peerId, Direction.In)
|
||||||
|
|
||||||
let muxer = new TestMuxer
|
let muxer = new TestMuxer
|
||||||
muxer.peerId = peerId
|
muxer.peerId = peerId
|
||||||
|
@ -172,14 +172,14 @@ suite "Connection Manager":
|
||||||
await stream.close()
|
await stream.close()
|
||||||
|
|
||||||
asyncTest "should raise on too many connections":
|
asyncTest "should raise on too many connections":
|
||||||
let connMngr = ConnManager.init(maxConnsPerPeer = 1)
|
let connMngr = ConnManager.new(maxConnsPerPeer = 1)
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
|
|
||||||
connMngr.storeConn(Connection.init(peerId, Direction.In))
|
connMngr.storeConn(Connection.new(peerId, Direction.In))
|
||||||
|
|
||||||
let conns = @[
|
let conns = @[
|
||||||
Connection.init(peerId, Direction.In),
|
Connection.new(peerId, Direction.In),
|
||||||
Connection.init(peerId, Direction.In)]
|
Connection.new(peerId, Direction.In)]
|
||||||
|
|
||||||
expect TooManyConnectionsError:
|
expect TooManyConnectionsError:
|
||||||
connMngr.storeConn(conns[0])
|
connMngr.storeConn(conns[0])
|
||||||
|
@ -191,9 +191,9 @@ suite "Connection Manager":
|
||||||
allFutures(conns.mapIt( it.close() )))
|
allFutures(conns.mapIt( it.close() )))
|
||||||
|
|
||||||
asyncTest "cleanup on connection close":
|
asyncTest "cleanup on connection close":
|
||||||
let connMngr = ConnManager.init()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.init(peerId, Direction.In)
|
let conn = Connection.new(peerId, Direction.In)
|
||||||
let muxer = new Muxer
|
let muxer = new Muxer
|
||||||
|
|
||||||
muxer.connection = conn
|
muxer.connection = conn
|
||||||
|
@ -212,7 +212,7 @@ suite "Connection Manager":
|
||||||
await connMngr.close()
|
await connMngr.close()
|
||||||
|
|
||||||
asyncTest "drop connections for peer":
|
asyncTest "drop connections for peer":
|
||||||
let connMngr = ConnManager.init()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
|
|
||||||
for i in 0..<2:
|
for i in 0..<2:
|
||||||
|
@ -220,7 +220,7 @@ suite "Connection Manager":
|
||||||
Direction.In else:
|
Direction.In else:
|
||||||
Direction.Out
|
Direction.Out
|
||||||
|
|
||||||
let conn = Connection.init(peerId, dir)
|
let conn = Connection.new(peerId, dir)
|
||||||
let muxer = new Muxer
|
let muxer = new Muxer
|
||||||
muxer.connection = conn
|
muxer.connection = conn
|
||||||
|
|
||||||
|
@ -241,13 +241,13 @@ suite "Connection Manager":
|
||||||
await connMngr.close()
|
await connMngr.close()
|
||||||
|
|
||||||
asyncTest "track total incoming connection limits":
|
asyncTest "track total incoming connection limits":
|
||||||
let connMngr = ConnManager.init(maxConnections = 3)
|
let connMngr = ConnManager.new(maxConnections = 3)
|
||||||
|
|
||||||
var conns: seq[Connection]
|
var conns: seq[Connection]
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
let conn = connMngr.trackIncomingConn(
|
let conn = connMngr.trackIncomingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -258,7 +258,7 @@ suite "Connection Manager":
|
||||||
# should timeout adding a connection over the limit
|
# should timeout adding a connection over the limit
|
||||||
let conn = connMngr.trackIncomingConn(
|
let conn = connMngr.trackIncomingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -270,13 +270,13 @@ suite "Connection Manager":
|
||||||
allFutures(conns.mapIt( it.close() )))
|
allFutures(conns.mapIt( it.close() )))
|
||||||
|
|
||||||
asyncTest "track total outgoing connection limits":
|
asyncTest "track total outgoing connection limits":
|
||||||
let connMngr = ConnManager.init(maxConnections = 3)
|
let connMngr = ConnManager.new(maxConnections = 3)
|
||||||
|
|
||||||
var conns: seq[Connection]
|
var conns: seq[Connection]
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
let conn = await connMngr.trackOutgoingConn(
|
let conn = await connMngr.trackOutgoingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -287,7 +287,7 @@ suite "Connection Manager":
|
||||||
expect TooManyConnectionsError:
|
expect TooManyConnectionsError:
|
||||||
discard await connMngr.trackOutgoingConn(
|
discard await connMngr.trackOutgoingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -297,13 +297,13 @@ suite "Connection Manager":
|
||||||
allFutures(conns.mapIt( it.close() )))
|
allFutures(conns.mapIt( it.close() )))
|
||||||
|
|
||||||
asyncTest "track both incoming and outgoing total connections limits - fail on incoming":
|
asyncTest "track both incoming and outgoing total connections limits - fail on incoming":
|
||||||
let connMngr = ConnManager.init(maxConnections = 3)
|
let connMngr = ConnManager.new(maxConnections = 3)
|
||||||
|
|
||||||
var conns: seq[Connection]
|
var conns: seq[Connection]
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
let conn = await connMngr.trackOutgoingConn(
|
let conn = await connMngr.trackOutgoingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -313,7 +313,7 @@ suite "Connection Manager":
|
||||||
# should timeout adding a connection over the limit
|
# should timeout adding a connection over the limit
|
||||||
let conn = connMngr.trackIncomingConn(
|
let conn = connMngr.trackIncomingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -325,13 +325,13 @@ suite "Connection Manager":
|
||||||
allFutures(conns.mapIt( it.close() )))
|
allFutures(conns.mapIt( it.close() )))
|
||||||
|
|
||||||
asyncTest "track both incoming and outgoing total connections limits - fail on outgoing":
|
asyncTest "track both incoming and outgoing total connections limits - fail on outgoing":
|
||||||
let connMngr = ConnManager.init(maxConnections = 3)
|
let connMngr = ConnManager.new(maxConnections = 3)
|
||||||
|
|
||||||
var conns: seq[Connection]
|
var conns: seq[Connection]
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
let conn = connMngr.trackIncomingConn(
|
let conn = connMngr.trackIncomingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -343,7 +343,7 @@ suite "Connection Manager":
|
||||||
expect TooManyConnectionsError:
|
expect TooManyConnectionsError:
|
||||||
discard await connMngr.trackOutgoingConn(
|
discard await connMngr.trackOutgoingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -353,13 +353,13 @@ suite "Connection Manager":
|
||||||
allFutures(conns.mapIt( it.close() )))
|
allFutures(conns.mapIt( it.close() )))
|
||||||
|
|
||||||
asyncTest "track max incoming connection limits":
|
asyncTest "track max incoming connection limits":
|
||||||
let connMngr = ConnManager.init(maxIn = 3)
|
let connMngr = ConnManager.new(maxIn = 3)
|
||||||
|
|
||||||
var conns: seq[Connection]
|
var conns: seq[Connection]
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
let conn = connMngr.trackIncomingConn(
|
let conn = connMngr.trackIncomingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -370,7 +370,7 @@ suite "Connection Manager":
|
||||||
# should timeout adding a connection over the limit
|
# should timeout adding a connection over the limit
|
||||||
let conn = connMngr.trackIncomingConn(
|
let conn = connMngr.trackIncomingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -382,13 +382,13 @@ suite "Connection Manager":
|
||||||
allFutures(conns.mapIt( it.close() )))
|
allFutures(conns.mapIt( it.close() )))
|
||||||
|
|
||||||
asyncTest "track max outgoing connection limits":
|
asyncTest "track max outgoing connection limits":
|
||||||
let connMngr = ConnManager.init(maxOut = 3)
|
let connMngr = ConnManager.new(maxOut = 3)
|
||||||
|
|
||||||
var conns: seq[Connection]
|
var conns: seq[Connection]
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
let conn = await connMngr.trackOutgoingConn(
|
let conn = await connMngr.trackOutgoingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -399,7 +399,7 @@ suite "Connection Manager":
|
||||||
expect TooManyConnectionsError:
|
expect TooManyConnectionsError:
|
||||||
discard await connMngr.trackOutgoingConn(
|
discard await connMngr.trackOutgoingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -409,13 +409,13 @@ suite "Connection Manager":
|
||||||
allFutures(conns.mapIt( it.close() )))
|
allFutures(conns.mapIt( it.close() )))
|
||||||
|
|
||||||
asyncTest "track incoming max connections limits - fail on incoming":
|
asyncTest "track incoming max connections limits - fail on incoming":
|
||||||
let connMngr = ConnManager.init(maxOut = 3)
|
let connMngr = ConnManager.new(maxOut = 3)
|
||||||
|
|
||||||
var conns: seq[Connection]
|
var conns: seq[Connection]
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
let conn = await connMngr.trackOutgoingConn(
|
let conn = await connMngr.trackOutgoingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -425,7 +425,7 @@ suite "Connection Manager":
|
||||||
# should timeout adding a connection over the limit
|
# should timeout adding a connection over the limit
|
||||||
let conn = connMngr.trackIncomingConn(
|
let conn = connMngr.trackIncomingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -437,13 +437,13 @@ suite "Connection Manager":
|
||||||
allFutures(conns.mapIt( it.close() )))
|
allFutures(conns.mapIt( it.close() )))
|
||||||
|
|
||||||
asyncTest "track incoming max connections limits - fail on outgoing":
|
asyncTest "track incoming max connections limits - fail on outgoing":
|
||||||
let connMngr = ConnManager.init(maxIn = 3)
|
let connMngr = ConnManager.new(maxIn = 3)
|
||||||
|
|
||||||
var conns: seq[Connection]
|
var conns: seq[Connection]
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
let conn = connMngr.trackIncomingConn(
|
let conn = connMngr.trackIncomingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
@ -455,7 +455,7 @@ suite "Connection Manager":
|
||||||
expect TooManyConnectionsError:
|
expect TooManyConnectionsError:
|
||||||
discard await connMngr.trackOutgoingConn(
|
discard await connMngr.trackOutgoingConn(
|
||||||
proc(): Future[Connection] {.async.} =
|
proc(): Future[Connection] {.async.} =
|
||||||
return Connection.init(
|
return Connection.new(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
)
|
)
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
import unittest2
|
import unittest2
|
||||||
import nimcrypto/[utils, sysrand]
|
import nimcrypto/[utils, sysrand]
|
||||||
import ../libp2p/crypto/[crypto, chacha20poly1305, curve25519, hkdf]
|
import ../libp2p/crypto/[crypto, chacha20poly1305, curve25519, hkdf]
|
||||||
|
import bearssl
|
||||||
|
|
||||||
when defined(nimHasUsed): {.used.}
|
when defined(nimHasUsed): {.used.}
|
||||||
|
|
||||||
|
@ -545,3 +546,10 @@ suite "Key interface test suite":
|
||||||
|
|
||||||
sha256.hkdf(salt, ikm, info, output)
|
sha256.hkdf(salt, ikm, info, output)
|
||||||
check output[0].toHex(true) == truth
|
check output[0].toHex(true) == truth
|
||||||
|
|
||||||
|
test "shuffle":
|
||||||
|
var cards = ["Ace", "King", "Queen", "Jack", "Ten"]
|
||||||
|
var rng = (ref BrHmacDrbgContext)()
|
||||||
|
brHmacDrbgInit(addr rng[], addr sha256Vtable, nil, 0)
|
||||||
|
rng.shuffle(cards)
|
||||||
|
check cards == ["King", "Ten", "Ace", "Queen", "Jack"]
|
||||||
|
|
|
@ -38,7 +38,7 @@ suite "Identify":
|
||||||
asyncSetup:
|
asyncSetup:
|
||||||
ma = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
ma = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
remoteSecKey = PrivateKey.random(ECDSA, rng[]).get()
|
remoteSecKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
remotePeerInfo = PeerInfo.init(
|
remotePeerInfo = PeerInfo.new(
|
||||||
remoteSecKey, [ma], ["/test/proto1/1.0.0", "/test/proto2/1.0.0"])
|
remoteSecKey, [ma], ["/test/proto1/1.0.0", "/test/proto2/1.0.0"])
|
||||||
|
|
||||||
transport1 = TcpTransport.new(upgrade = Upgrade())
|
transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
|
@ -117,7 +117,7 @@ suite "Identify":
|
||||||
conn = await transport2.dial(transport1.ma)
|
conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
expect IdentityNoMatchError:
|
expect IdentityNoMatchError:
|
||||||
let pi2 = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
let pi2 = PeerInfo.new(PrivateKey.random(ECDSA, rng[]).get())
|
||||||
discard await msDial.select(conn, IdentifyCodec)
|
discard await msDial.select(conn, IdentifyCodec)
|
||||||
discard await identifyProto2.identify(conn, pi2.peerId)
|
discard await identifyProto2.identify(conn, pi2.peerId)
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ suite "Identify":
|
||||||
switch1.peerStore.protoBook.get(switch2.peerInfo.peerId) != switch2.peerInfo.protocols.toHashSet()
|
switch1.peerStore.protoBook.get(switch2.peerInfo.peerId) != switch2.peerInfo.protocols.toHashSet()
|
||||||
|
|
||||||
let oldPeerId = switch2.peerInfo.peerId
|
let oldPeerId = switch2.peerInfo.peerId
|
||||||
switch2.peerInfo = PeerInfo.init(PrivateKey.random(newRng()[]).get())
|
switch2.peerInfo = PeerInfo.new(PrivateKey.random(newRng()[]).get())
|
||||||
|
|
||||||
await identifyPush2.push(switch2.peerInfo, conn)
|
await identifyPush2.push(switch2.peerInfo, conn)
|
||||||
|
|
||||||
|
|
|
@ -385,7 +385,7 @@ suite "Mplex":
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
let msg = await stream.readLp(1024)
|
let msg = await stream.readLp(1024)
|
||||||
|
@ -399,7 +399,7 @@ suite "Mplex":
|
||||||
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
let stream = await mplexDial.newStream()
|
let stream = await mplexDial.newStream()
|
||||||
await stream.writeLp("HELLO")
|
await stream.writeLp("HELLO")
|
||||||
|
@ -422,7 +422,7 @@ suite "Mplex":
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
let msg = await stream.readLp(1024)
|
let msg = await stream.readLp(1024)
|
||||||
|
@ -436,7 +436,7 @@ suite "Mplex":
|
||||||
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let stream = await mplexDial.newStream(lazy = true)
|
let stream = await mplexDial.newStream(lazy = true)
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
check not LPChannel(stream).isOpen # assert lazy
|
check not LPChannel(stream).isOpen # assert lazy
|
||||||
|
@ -467,7 +467,7 @@ suite "Mplex":
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
try:
|
try:
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
let msg = await stream.readLp(MaxMsgSize)
|
let msg = await stream.readLp(MaxMsgSize)
|
||||||
|
@ -488,7 +488,7 @@ suite "Mplex":
|
||||||
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
let stream = await mplexDial.newStream()
|
let stream = await mplexDial.newStream()
|
||||||
|
|
||||||
|
@ -513,7 +513,7 @@ suite "Mplex":
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
await stream.writeLp("Hello from stream!")
|
await stream.writeLp("Hello from stream!")
|
||||||
|
@ -526,7 +526,7 @@ suite "Mplex":
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let acceptFut = acceptHandler()
|
let acceptFut = acceptHandler()
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
let stream = await mplexDial.newStream("DIALER")
|
let stream = await mplexDial.newStream("DIALER")
|
||||||
let msg = string.fromBytes(await stream.readLp(1024))
|
let msg = string.fromBytes(await stream.readLp(1024))
|
||||||
|
@ -551,7 +551,7 @@ suite "Mplex":
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
var count = 1
|
var count = 1
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
let msg = await stream.readLp(1024)
|
let msg = await stream.readLp(1024)
|
||||||
|
@ -568,7 +568,7 @@ suite "Mplex":
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let acceptFut = acceptHandler()
|
let acceptFut = acceptHandler()
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
# TODO: Reenable once half-closed is working properly
|
# TODO: Reenable once half-closed is working properly
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
for i in 1..10:
|
for i in 1..10:
|
||||||
|
@ -595,7 +595,7 @@ suite "Mplex":
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
var count = 1
|
var count = 1
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
let msg = await stream.readLp(1024)
|
let msg = await stream.readLp(1024)
|
||||||
|
@ -613,7 +613,7 @@ suite "Mplex":
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let acceptFut = acceptHandler()
|
let acceptFut = acceptHandler()
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
for i in 1..10:
|
for i in 1..10:
|
||||||
let stream = await mplexDial.newStream("dialer stream")
|
let stream = await mplexDial.newStream("dialer stream")
|
||||||
|
@ -639,7 +639,7 @@ suite "Mplex":
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
|
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
|
@ -660,7 +660,7 @@ suite "Mplex":
|
||||||
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
var dialStreams: seq[Connection]
|
var dialStreams: seq[Connection]
|
||||||
for i in 0..9:
|
for i in 0..9:
|
||||||
|
@ -689,7 +689,7 @@ suite "Mplex":
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
listenStreams.add(stream)
|
listenStreams.add(stream)
|
||||||
|
@ -708,7 +708,7 @@ suite "Mplex":
|
||||||
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
var dialStreams: seq[Connection]
|
var dialStreams: seq[Connection]
|
||||||
for i in 0..9:
|
for i in 0..9:
|
||||||
|
@ -752,7 +752,7 @@ suite "Mplex":
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
listenStreams.add(stream)
|
listenStreams.add(stream)
|
||||||
|
@ -767,7 +767,7 @@ suite "Mplex":
|
||||||
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
var dialStreams: seq[Connection]
|
var dialStreams: seq[Connection]
|
||||||
for i in 0..9:
|
for i in 0..9:
|
||||||
|
@ -795,7 +795,7 @@ suite "Mplex":
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
mplexListen = Mplex.init(conn)
|
mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
listenStreams.add(stream)
|
listenStreams.add(stream)
|
||||||
|
@ -810,7 +810,7 @@ suite "Mplex":
|
||||||
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
var dialStreams: seq[Connection]
|
var dialStreams: seq[Connection]
|
||||||
for i in 0..9:
|
for i in 0..9:
|
||||||
|
@ -838,7 +838,7 @@ suite "Mplex":
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
listenStreams.add(stream)
|
listenStreams.add(stream)
|
||||||
|
@ -854,7 +854,7 @@ suite "Mplex":
|
||||||
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
var dialStreams: seq[Connection]
|
var dialStreams: seq[Connection]
|
||||||
for i in 0..9:
|
for i in 0..9:
|
||||||
|
@ -880,7 +880,7 @@ suite "Mplex":
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
listenStreams.add(stream)
|
listenStreams.add(stream)
|
||||||
|
@ -895,7 +895,7 @@ suite "Mplex":
|
||||||
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
var dialStreams: seq[Connection]
|
var dialStreams: seq[Connection]
|
||||||
for i in 0..9:
|
for i in 0..9:
|
||||||
|
@ -923,7 +923,7 @@ suite "Mplex":
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
listenConn = await transport1.accept()
|
listenConn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(listenConn)
|
let mplexListen = Mplex.new(listenConn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
listenStreams.add(stream)
|
listenStreams.add(stream)
|
||||||
|
@ -938,7 +938,7 @@ suite "Mplex":
|
||||||
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
var dialStreams: seq[Connection]
|
var dialStreams: seq[Connection]
|
||||||
for i in 0..9:
|
for i in 0..9:
|
||||||
|
@ -970,7 +970,7 @@ suite "Mplex":
|
||||||
const MsgSize = 1024
|
const MsgSize = 1024
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
try:
|
try:
|
||||||
|
@ -988,7 +988,7 @@ suite "Mplex":
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let acceptFut = acceptHandler()
|
let acceptFut = acceptHandler()
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
let stream = await mplexDial.newStream()
|
let stream = await mplexDial.newStream()
|
||||||
var bigseq = newSeqOfCap[uint8](MaxMsgSize + 1)
|
var bigseq = newSeqOfCap[uint8](MaxMsgSize + 1)
|
||||||
|
@ -1042,7 +1042,7 @@ suite "Mplex":
|
||||||
const MsgSize = 512
|
const MsgSize = 512
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.init(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async, gcsafe.} =
|
||||||
let msg = await stream.readLp(MsgSize)
|
let msg = await stream.readLp(MsgSize)
|
||||||
|
@ -1057,7 +1057,7 @@ suite "Mplex":
|
||||||
let conn = await transport2.dial(transport1.ma)
|
let conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
let acceptFut = acceptHandler()
|
let acceptFut = acceptHandler()
|
||||||
let mplexDial = Mplex.init(conn)
|
let mplexDial = Mplex.new(conn)
|
||||||
let stream = await mplexDial.newStream()
|
let stream = await mplexDial.newStream()
|
||||||
let mplexDialFut = mplexDial.handle()
|
let mplexDialFut = mplexDial.handle()
|
||||||
var bigseq = newSeqOfCap[uint8](MsgSize + 1)
|
var bigseq = newSeqOfCap[uint8](MsgSize + 1)
|
||||||
|
|
|
@ -192,29 +192,6 @@ const
|
||||||
"9003172F612F622F632F642F652F662F672F682F692E736F636B"
|
"9003172F612F622F632F642F652F662F672F682F692E736F636B"
|
||||||
]
|
]
|
||||||
|
|
||||||
UtilitySuccessVectors = [
|
|
||||||
"/ip4/127.0.0.1/tcp/1024",
|
|
||||||
"/ip4/127.0.0.1/udp/1024",
|
|
||||||
"/ip4/0.0.0.0/tcp/1024",
|
|
||||||
"/ip4/0.0.0.0/udp/1024",
|
|
||||||
"/ip4/255.255.255.255/tcp/65535",
|
|
||||||
"/ip4/255.255.255.255/udp/65535",
|
|
||||||
"/ip6/::1/tcp/1024",
|
|
||||||
"/ip6/::1/udp/1024",
|
|
||||||
"/ip6/::/tcp/65535",
|
|
||||||
"/ip6/::/udp/65535",
|
|
||||||
"/ip6/::/udp/65535",
|
|
||||||
"/unix/tmp/test.socket"
|
|
||||||
]
|
|
||||||
|
|
||||||
UtilityFailVectors = [
|
|
||||||
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
|
|
||||||
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234",
|
|
||||||
"/ip6/2001:8a0:7ac5:4201:3ac9:86ff:fe31:7095/tcp/8000/ws/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
|
|
||||||
"/p2p-webrtc-star/ip4/127.0.0.1/tcp/9090/ws/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
|
|
||||||
"/ip4/127.0.0.1/udp/1234/quic"
|
|
||||||
]
|
|
||||||
|
|
||||||
PatternVectors = [
|
PatternVectors = [
|
||||||
PatternVector(pattern: IP,
|
PatternVector(pattern: IP,
|
||||||
good: @["/ip4/0.0.0.0", "/ip6/fc00::"],
|
good: @["/ip4/0.0.0.0", "/ip6/fc00::"],
|
||||||
|
@ -339,14 +316,6 @@ suite "MultiAddress test suite":
|
||||||
$cma == "/ip4/127.0.0.1/udp/30000/p2p-circuit"
|
$cma == "/ip4/127.0.0.1/udp/30000/p2p-circuit"
|
||||||
$ma2 == "/ip4/127.0.0.1/udp/30000/p2p-circuit"
|
$ma2 == "/ip4/127.0.0.1/udp/30000/p2p-circuit"
|
||||||
|
|
||||||
test "isWire() test":
|
|
||||||
for item in UtilitySuccessVectors:
|
|
||||||
var a = MultiAddress.init(item).get()
|
|
||||||
check a.isWire() == true
|
|
||||||
for item in UtilityFailVectors:
|
|
||||||
var a = MultiAddress.init(item).get()
|
|
||||||
check a.isWire() == false
|
|
||||||
|
|
||||||
test "Path addresses serialization/deserialization":
|
test "Path addresses serialization/deserialization":
|
||||||
for i in 0..<len(PathVectors):
|
for i in 0..<len(PathVectors):
|
||||||
var a = MultiAddress.init(PathVectors[i]).get()
|
var a = MultiAddress.init(PathVectors[i]).get()
|
||||||
|
|
|
@ -59,7 +59,7 @@ suite "Name resolving":
|
||||||
var resolver {.threadvar.}: MockResolver
|
var resolver {.threadvar.}: MockResolver
|
||||||
|
|
||||||
proc testOne(input: string, output: seq[Multiaddress]): bool =
|
proc testOne(input: string, output: seq[Multiaddress]): bool =
|
||||||
let resolved = waitFor resolver.resolveMAddresses(@[Multiaddress.init(input).tryGet()])
|
let resolved = waitFor resolver.resolveMAddress(Multiaddress.init(input).tryGet())
|
||||||
if resolved != output:
|
if resolved != output:
|
||||||
echo "Expected ", output
|
echo "Expected ", output
|
||||||
echo "Got ", resolved
|
echo "Got ", resolved
|
||||||
|
@ -90,18 +90,6 @@ suite "Name resolving":
|
||||||
|
|
||||||
check testOne("/ip6/::1/tcp/0", "/ip6/::1/tcp/0")
|
check testOne("/ip6/::1/tcp/0", "/ip6/::1/tcp/0")
|
||||||
|
|
||||||
asyncTest "test multiple resolve":
|
|
||||||
resolver.ipResponses[("localhost", false)] = @["127.0.0.1"]
|
|
||||||
resolver.ipResponses[("localhost", true)] = @["::1"]
|
|
||||||
|
|
||||||
let resolved = waitFor resolver.resolveMAddresses(@[
|
|
||||||
Multiaddress.init("/dns/localhost/udp/0").tryGet(),
|
|
||||||
Multiaddress.init("/dns4/localhost/udp/0").tryGet(),
|
|
||||||
Multiaddress.init("/dns6/localhost/udp/0").tryGet(),
|
|
||||||
])
|
|
||||||
|
|
||||||
check resolved == @[Multiaddress.init("/ip4/127.0.0.1/udp/0").tryGet(), Multiaddress.init("/ip6/::1/udp/0").tryGet()]
|
|
||||||
|
|
||||||
asyncTest "dnsaddr recursive test":
|
asyncTest "dnsaddr recursive test":
|
||||||
resolver.txtResponses["_dnsaddr.bootstrap.libp2p.io"] = @[
|
resolver.txtResponses["_dnsaddr.bootstrap.libp2p.io"] = @[
|
||||||
"dnsaddr=/dnsaddr/sjc-1.bootstrap.libp2p.io/tcp/4001/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
"dnsaddr=/dnsaddr/sjc-1.bootstrap.libp2p.io/tcp/4001/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||||
|
|
|
@ -53,11 +53,11 @@ method init(p: TestProto) {.gcsafe.} =
|
||||||
proc createSwitch(ma: MultiAddress; outgoing: bool, secio: bool = false): (Switch, PeerInfo) =
|
proc createSwitch(ma: MultiAddress; outgoing: bool, secio: bool = false): (Switch, PeerInfo) =
|
||||||
var
|
var
|
||||||
privateKey = PrivateKey.random(ECDSA, rng[]).get()
|
privateKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
peerInfo = PeerInfo.init(privateKey)
|
peerInfo = PeerInfo.new(privateKey)
|
||||||
peerInfo.addrs.add(ma)
|
peerInfo.addrs.add(ma)
|
||||||
|
|
||||||
proc createMplex(conn: Connection): Muxer =
|
proc createMplex(conn: Connection): Muxer =
|
||||||
result = Mplex.init(conn)
|
result = Mplex.new(conn)
|
||||||
|
|
||||||
let
|
let
|
||||||
identify = Identify.new(peerInfo)
|
identify = Identify.new(peerInfo)
|
||||||
|
@ -67,9 +67,9 @@ proc createSwitch(ma: MultiAddress; outgoing: bool, secio: bool = false): (Switc
|
||||||
[Secure(Secio.new(rng, privateKey))]
|
[Secure(Secio.new(rng, privateKey))]
|
||||||
else:
|
else:
|
||||||
[Secure(Noise.new(rng, privateKey, outgoing = outgoing))]
|
[Secure(Noise.new(rng, privateKey, outgoing = outgoing))]
|
||||||
connManager = ConnManager.init()
|
connManager = ConnManager.new()
|
||||||
ms = MultistreamSelect.new()
|
ms = MultistreamSelect.new()
|
||||||
muxedUpgrade = MuxedUpgrade.init(identify, muxers, secureManagers, connManager, ms)
|
muxedUpgrade = MuxedUpgrade.new(identify, muxers, secureManagers, connManager, ms)
|
||||||
transports = @[Transport(TcpTransport.new(upgrade = muxedUpgrade))]
|
transports = @[Transport(TcpTransport.new(upgrade = muxedUpgrade))]
|
||||||
|
|
||||||
let switch = newSwitch(
|
let switch = newSwitch(
|
||||||
|
@ -90,7 +90,7 @@ suite "Noise":
|
||||||
let
|
let
|
||||||
server = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
server = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
serverInfo = PeerInfo.init(serverPrivKey, [server])
|
serverInfo = PeerInfo.new(serverPrivKey, [server])
|
||||||
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
||||||
|
|
||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
|
@ -109,7 +109,7 @@ suite "Noise":
|
||||||
acceptFut = acceptHandler()
|
acceptFut = acceptHandler()
|
||||||
transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
clientPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
clientPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
clientInfo = PeerInfo.init(clientPrivKey, [transport1.ma])
|
clientInfo = PeerInfo.new(clientPrivKey, [transport1.ma])
|
||||||
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
|
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
|
||||||
conn = await transport2.dial(transport1.ma)
|
conn = await transport2.dial(transport1.ma)
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ suite "Noise":
|
||||||
let
|
let
|
||||||
server = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
server = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
serverInfo = PeerInfo.init(serverPrivKey, [server])
|
serverInfo = PeerInfo.new(serverPrivKey, [server])
|
||||||
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -153,7 +153,7 @@ suite "Noise":
|
||||||
handlerWait = acceptHandler()
|
handlerWait = acceptHandler()
|
||||||
transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
clientPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
clientPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
clientInfo = PeerInfo.init(clientPrivKey, [transport1.ma])
|
clientInfo = PeerInfo.new(clientPrivKey, [transport1.ma])
|
||||||
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true, commonPrologue = @[1'u8, 2'u8, 3'u8])
|
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true, commonPrologue = @[1'u8, 2'u8, 3'u8])
|
||||||
conn = await transport2.dial(transport1.ma)
|
conn = await transport2.dial(transport1.ma)
|
||||||
conn.peerId = serverInfo.peerId
|
conn.peerId = serverInfo.peerId
|
||||||
|
@ -171,7 +171,7 @@ suite "Noise":
|
||||||
let
|
let
|
||||||
server = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
server = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
serverInfo = PeerInfo.init(serverPrivKey, [server])
|
serverInfo = PeerInfo.new(serverPrivKey, [server])
|
||||||
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
||||||
readTask = newFuture[void]()
|
readTask = newFuture[void]()
|
||||||
|
|
||||||
|
@ -193,7 +193,7 @@ suite "Noise":
|
||||||
acceptFut = acceptHandler()
|
acceptFut = acceptHandler()
|
||||||
transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
clientPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
clientPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
clientInfo = PeerInfo.init(clientPrivKey, [transport1.ma])
|
clientInfo = PeerInfo.new(clientPrivKey, [transport1.ma])
|
||||||
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
|
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
|
||||||
conn = await transport2.dial(transport1.ma)
|
conn = await transport2.dial(transport1.ma)
|
||||||
conn.peerId = serverInfo.peerId
|
conn.peerId = serverInfo.peerId
|
||||||
|
@ -210,7 +210,7 @@ suite "Noise":
|
||||||
let
|
let
|
||||||
server = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
server = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
serverInfo = PeerInfo.init(serverPrivKey, [server])
|
serverInfo = PeerInfo.new(serverPrivKey, [server])
|
||||||
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
||||||
readTask = newFuture[void]()
|
readTask = newFuture[void]()
|
||||||
|
|
||||||
|
@ -235,7 +235,7 @@ suite "Noise":
|
||||||
acceptFut = acceptHandler()
|
acceptFut = acceptHandler()
|
||||||
transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
transport2: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
clientPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
clientPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
clientInfo = PeerInfo.init(clientPrivKey, [transport1.ma])
|
clientInfo = PeerInfo.new(clientPrivKey, [transport1.ma])
|
||||||
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
|
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
|
||||||
conn = await transport2.dial(transport1.ma)
|
conn = await transport2.dial(transport1.ma)
|
||||||
conn.peerId = serverInfo.peerId
|
conn.peerId = serverInfo.peerId
|
||||||
|
|
|
@ -11,7 +11,7 @@ import ./helpers
|
||||||
suite "PeerInfo":
|
suite "PeerInfo":
|
||||||
test "Should init with private key":
|
test "Should init with private key":
|
||||||
let seckey = PrivateKey.random(ECDSA, rng[]).get()
|
let seckey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
var peerInfo = PeerInfo.init(seckey)
|
var peerInfo = PeerInfo.new(seckey)
|
||||||
var peerId = PeerID.init(seckey).get()
|
var peerId = PeerID.init(seckey).get()
|
||||||
|
|
||||||
check peerId == peerInfo.peerId
|
check peerId == peerInfo.peerId
|
||||||
|
|
|
@ -41,8 +41,8 @@ suite "Ping":
|
||||||
pingProto1 = Ping.new()
|
pingProto1 = Ping.new()
|
||||||
pingProto2 = Ping.new(handlePing)
|
pingProto2 = Ping.new(handlePing)
|
||||||
|
|
||||||
msListen = newMultistream()
|
msListen = MultistreamSelect.new()
|
||||||
msDial = newMultistream()
|
msDial = MultistreamSelect.new()
|
||||||
|
|
||||||
pingReceivedCount = 0
|
pingReceivedCount = 0
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ suite "Ping":
|
||||||
buf: array[32, byte]
|
buf: array[32, byte]
|
||||||
fakebuf: array[32, byte]
|
fakebuf: array[32, byte]
|
||||||
await conn.readExactly(addr buf[0], 32)
|
await conn.readExactly(addr buf[0], 32)
|
||||||
await conn.write(addr fakebuf[0], 32)
|
await conn.write(@fakebuf)
|
||||||
fakePingProto.codec = PingCodec
|
fakePingProto.codec = PingCodec
|
||||||
fakePingProto.handler = fakeHandle
|
fakePingProto.handler = fakeHandle
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,8 @@ import ../libp2p/[errors,
|
||||||
nameresolving/nameresolver,
|
nameresolving/nameresolver,
|
||||||
nameresolving/mockresolver,
|
nameresolving/mockresolver,
|
||||||
stream/chronosstream,
|
stream/chronosstream,
|
||||||
transports/tcptransport]
|
transports/tcptransport,
|
||||||
|
transports/wstransport]
|
||||||
import ./helpers
|
import ./helpers
|
||||||
|
|
||||||
const
|
const
|
||||||
|
@ -463,7 +464,7 @@ suite "Switch":
|
||||||
|
|
||||||
let switch1 = newStandardSwitch()
|
let switch1 = newStandardSwitch()
|
||||||
|
|
||||||
let rng = newRng()
|
let rng = crypto.newRng()
|
||||||
# use same private keys to emulate two connection from same peer
|
# use same private keys to emulate two connection from same peer
|
||||||
let privKey = PrivateKey.random(rng[]).tryGet()
|
let privKey = PrivateKey.random(rng[]).tryGet()
|
||||||
let switch2 = newStandardSwitch(
|
let switch2 = newStandardSwitch(
|
||||||
|
@ -530,11 +531,11 @@ suite "Switch":
|
||||||
asyncTest "e2e should allow dropping peer from connection events":
|
asyncTest "e2e should allow dropping peer from connection events":
|
||||||
var awaiters: seq[Future[void]]
|
var awaiters: seq[Future[void]]
|
||||||
|
|
||||||
let rng = newRng()
|
let rng = crypto.newRng()
|
||||||
# use same private keys to emulate two connection from same peer
|
# use same private keys to emulate two connection from same peer
|
||||||
let
|
let
|
||||||
privateKey = PrivateKey.random(rng[]).tryGet()
|
privateKey = PrivateKey.random(rng[]).tryGet()
|
||||||
peerInfo = PeerInfo.init(privateKey)
|
peerInfo = PeerInfo.new(privateKey)
|
||||||
|
|
||||||
var switches: seq[Switch]
|
var switches: seq[Switch]
|
||||||
var done = newFuture[void]()
|
var done = newFuture[void]()
|
||||||
|
@ -573,11 +574,11 @@ suite "Switch":
|
||||||
asyncTest "e2e should allow dropping multiple connections for peer from connection events":
|
asyncTest "e2e should allow dropping multiple connections for peer from connection events":
|
||||||
var awaiters: seq[Future[void]]
|
var awaiters: seq[Future[void]]
|
||||||
|
|
||||||
let rng = newRng()
|
let rng = crypto.newRng()
|
||||||
# use same private keys to emulate two connection from same peer
|
# use same private keys to emulate two connection from same peer
|
||||||
let
|
let
|
||||||
privateKey = PrivateKey.random(rng[]).tryGet()
|
privateKey = PrivateKey.random(rng[]).tryGet()
|
||||||
peerInfo = PeerInfo.init(privateKey)
|
peerInfo = PeerInfo.new(privateKey)
|
||||||
|
|
||||||
var conns = 1
|
var conns = 1
|
||||||
var switches: seq[Switch]
|
var switches: seq[Switch]
|
||||||
|
@ -736,7 +737,7 @@ suite "Switch":
|
||||||
discard await switch2.start()
|
discard await switch2.start()
|
||||||
let someAddr = MultiAddress.init("/ip4/127.128.0.99").get()
|
let someAddr = MultiAddress.init("/ip4/127.128.0.99").get()
|
||||||
let seckey = PrivateKey.random(ECDSA, rng[]).get()
|
let seckey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
let somePeer = PeerInfo.init(secKey, [someAddr])
|
let somePeer = PeerInfo.new(secKey, [someAddr])
|
||||||
expect(DialFailedError):
|
expect(DialFailedError):
|
||||||
discard await switch2.dial(somePeer.peerId, somePeer.addrs, TestCodec)
|
discard await switch2.dial(somePeer.peerId, somePeer.addrs, TestCodec)
|
||||||
await switch2.stop()
|
await switch2.stop()
|
||||||
|
@ -901,5 +902,79 @@ suite "Switch":
|
||||||
switch1.peerStore.addressBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.addrs.toHashSet()
|
switch1.peerStore.addressBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.addrs.toHashSet()
|
||||||
switch2.peerStore.addressBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.addrs.toHashSet()
|
switch2.peerStore.addressBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.addrs.toHashSet()
|
||||||
|
|
||||||
switch1.peerStore.addressBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.addrs.toHashSet()
|
switch1.peerStore.protoBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.protocols.toHashSet()
|
||||||
switch2.peerStore.addressBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.addrs.toHashSet()
|
switch2.peerStore.protoBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.protocols.toHashSet()
|
||||||
|
|
||||||
|
asyncTest "e2e dial dns4 address":
|
||||||
|
var awaiters: seq[Future[void]]
|
||||||
|
let resolver = MockResolver.new()
|
||||||
|
resolver.ipResponses[("localhost", false)] = @["127.0.0.1"]
|
||||||
|
resolver.ipResponses[("localhost", true)] = @["::1"]
|
||||||
|
|
||||||
|
let
|
||||||
|
srcSwitch = newStandardSwitch(nameResolver = resolver)
|
||||||
|
destSwitch = newStandardSwitch()
|
||||||
|
|
||||||
|
awaiters.add(await destSwitch.start())
|
||||||
|
awaiters.add(await srcSwitch.start())
|
||||||
|
await allFuturesThrowing(awaiters)
|
||||||
|
|
||||||
|
let testAddr = MultiAddress.init("/dns4/localhost/").tryGet() &
|
||||||
|
destSwitch.peerInfo.addrs[0][1].tryGet()
|
||||||
|
|
||||||
|
await srcSwitch.connect(destSwitch.peerInfo.peerId, @[testAddr])
|
||||||
|
check srcSwitch.isConnected(destSwitch.peerInfo.peerId)
|
||||||
|
|
||||||
|
await destSwitch.stop()
|
||||||
|
await srcSwitch.stop()
|
||||||
|
|
||||||
|
asyncTest "e2e dial dnsaddr with multiple transports":
|
||||||
|
var awaiters: seq[Future[void]]
|
||||||
|
let resolver = MockResolver.new()
|
||||||
|
|
||||||
|
let
|
||||||
|
wsAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0/ws").tryGet()
|
||||||
|
tcpAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()
|
||||||
|
|
||||||
|
srcTcpSwitch = newStandardSwitch(nameResolver = resolver)
|
||||||
|
srcWsSwitch =
|
||||||
|
SwitchBuilder.new()
|
||||||
|
.withAddress(wsAddress)
|
||||||
|
.withRng(crypto.newRng())
|
||||||
|
.withMplex()
|
||||||
|
.withTransport(proc (upgr: Upgrade): Transport = WsTransport.new(upgr))
|
||||||
|
.withNameResolver(resolver)
|
||||||
|
.withNoise()
|
||||||
|
.build()
|
||||||
|
|
||||||
|
destSwitch =
|
||||||
|
SwitchBuilder.new()
|
||||||
|
.withAddresses(@[tcpAddress, wsAddress])
|
||||||
|
.withRng(crypto.newRng())
|
||||||
|
.withMplex()
|
||||||
|
.withTransport(proc (upgr: Upgrade): Transport = WsTransport.new(upgr))
|
||||||
|
.withTcpTransport()
|
||||||
|
.withNoise()
|
||||||
|
.build()
|
||||||
|
|
||||||
|
awaiters.add(await destSwitch.start())
|
||||||
|
awaiters.add(await srcTcpSwitch.start())
|
||||||
|
awaiters.add(await srcWsSwitch.start())
|
||||||
|
await allFuturesThrowing(awaiters)
|
||||||
|
|
||||||
|
resolver.txtResponses["_dnsaddr.test.io"] = @[
|
||||||
|
"dnsaddr=/ip4/127.0.0.1" & $destSwitch.peerInfo.addrs[1][1].tryGet() & "/ws",
|
||||||
|
"dnsaddr=/ip4/127.0.0.1" & $destSwitch.peerInfo.addrs[0][1].tryGet()
|
||||||
|
]
|
||||||
|
|
||||||
|
let testAddr = MultiAddress.init("/dnsaddr/test.io/").tryGet()
|
||||||
|
|
||||||
|
await srcTcpSwitch.connect(destSwitch.peerInfo.peerId, @[testAddr])
|
||||||
|
check srcTcpSwitch.isConnected(destSwitch.peerInfo.peerId)
|
||||||
|
|
||||||
|
await srcWsSwitch.connect(destSwitch.peerInfo.peerId, @[testAddr])
|
||||||
|
check srcWsSwitch.isConnected(destSwitch.peerInfo.peerId)
|
||||||
|
|
||||||
|
await destSwitch.stop()
|
||||||
|
await srcWsSwitch.stop()
|
||||||
|
await srcTcpSwitch.stop()
|
||||||
|
|
|
@ -15,56 +15,39 @@ import ./helpers, ./commontransport
|
||||||
const
|
const
|
||||||
SecureKey* = """
|
SecureKey* = """
|
||||||
-----BEGIN PRIVATE KEY-----
|
-----BEGIN PRIVATE KEY-----
|
||||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCdNv0SX02aeZ4/
|
MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAP0yH7F7FtGunC91
|
||||||
Yc+p/Kwd5UVOHlpmK7/TVC/kcjFbdoUuKNn8pnX/fyhgSKpUYut+te7YRiZhqlaL
|
IPkU+u8B4gdxiwYW0J3PrixtB1Xz3e4dfjwQqhIJlG6BxQ4myCxmSPjxP/eOOYp+
|
||||||
EZKjfy8GBZwXZnJCevFkTvGTTebXXExLIsLGfJqKeLAdFCQkX8wV3jV1DT5JLV+D
|
8/+A9nikbnc7H3OV8fNJhsSmPu8j8W2FsNVJzJnUQaE2yimrFR8NnvQ4MKvMBSgb
|
||||||
5+HWaiiBr38gsl4ZbfyedTF40JvzokCmcdlx9bpzX1j/b84L/zSwUyyEcgp5G28F
|
lHTLbP1aAFp+K6KPPE7pkRMUdlqFAgMBAAECgYBl0eli4yALFI/kmdK3uBMtWHGA
|
||||||
Jh5TnxAeDHJpOVjr8XMb/xoNqiDF6NwF96hvOZC14mZ1TxxW5bUzXprsy0l52pmh
|
Es4YlcYxIFpnrTS9AQPnhN7F4uGxvT5+rhsDlN780+lWixXxRLWpF2KiBkeW8ayT
|
||||||
dN3Crz11+t2h519hRKHxT6/l5pTx/+dApXiP6hMV04CQJNnas3NyRxTDR9dNel+3
|
kPeWvpSy6z+4LXw633ZLfCO1r6brpqSXNWxA0q7IgzYQEfMpnkaQrE3PVP5xkmTT
|
||||||
+wD7/PRTAgMBAAECggEBAJuXPEbegxMKog7gYoE9S6oaqchySc0sJyCjBPL2ANsg
|
k159ev138J23VfNgRQJBAP768qHOCnplKIe69SVUWlsQ5nnnybDBMq2YVATbombz
|
||||||
JRZV38cnh0hhNDh2MfxqGd7Bd6wbYQjvZ88iiRm+WW+ARcby4MnimtxHNNYwFvG0
|
KD57iufzBgND1clIEEuC6PK2C5gzTk4HZQioJ/juOFcCQQD+NVlb1HLoK7rHXZFO
|
||||||
qt0BffqqftfkMYfV0x8coAJUdFtvy+DoQstsxhlJ3uTaJtrZLD/GlmjMWzXSX0Vy
|
Tg3O+bwRZdo67J4pt//ijF7tLlZU/q6Kp9wHrXe1yhRV+Tow0BzBVHkc5eUM0/n7
|
||||||
FXiLDO7/LoSjsjaf4e4aLofIyLJS3H1T+5cr/d2mdpRzkeWkxShODsK4cRLOlZ5I
|
cOqDAkAedrECb/GEig17mfSsDxX0h2Jh8jWArrR1VRvEsNEIZ8jJHk2MRNbVEQe7
|
||||||
pz4Wm2770DTbiYph8ixl/CnmYn6T7V0F5VYujALknipUBeQY4e/A9vrQ/pvqJV+W
|
0qZPv0ZBqUpdVtPmMq/5hs2vyhZlAkEA1cZ1fCUf8KD9tLS6AnjfYeRgRN07dXwQ
|
||||||
JjFUne6Rxg/lJjh8vNJp2bK1ZbzpwmZLaZIoEz8t/qECgYEAzvCCA48uQPaurSQ3
|
0hKbTKAxIBJspZN7orzg60/0sNrc2SP6zJvm4qowI54tTelhexMNEwJBAOZz72xn
|
||||||
cvHDhcVwYmEaH8MW8aIW/5l8XJK60GsUHPFhEsfD/ObI5PJJ9aOqgabpRHkvD4ZY
|
EFUXKYQBbetiejnBBzFYmdA/QKmZ7kbQfDBOwG9wDPFmvnNSvSZws/bP1zcM95rq
|
||||||
a8QJBxCy6UeogUeKvGks8VQ34SZXLimmgrL9Mlljv0v9PloEkVYbztYyX4GVO0ov
|
NABr5ec1FxuJa/8=
|
||||||
3oH+hKO+/MclzNDyeXZx3Vv4K+UCgYEAwnyb7tqp7fRqm/8EymIZV5pa0p6h609p
|
|
||||||
EhCBi9ii6d/ewEjsBhs7bPDBO4PO9ylvOvryYZH1hVbQja2anOCBjO8dAHRHWM86
|
|
||||||
964TFriywBQkYxp6dsB8nUjLBDza2xAM3m+OGi9/ATuhEAe5sXp/fZL3tkfSaOXI
|
|
||||||
A7Gzro+kS9cCgYEAtKScSfEeBlWQa9H2mV9UN5z/mtF61YkeqTW+b8cTGVh4vWEL
|
|
||||||
wKww+gzqGAV6Duk2CLijKeSDMmO64gl7fC83VjSMiTklbhz+jbQeKFhFI0Sty71N
|
|
||||||
/j+y6NXBTgdOfLRl0lzhj2/JrzdWBtie6tR9UloCaXSKmb04PTFY+kvDWsUCgYBR
|
|
||||||
krJUnKJpi/qrM2tu93Zpp/QwIxkG+We4i/PKFDNApQVo4S0d4o4qQ1DJBZ/pSxe8
|
|
||||||
RUUkZ3PzWVZgFlCjPAcadbBUYHEMbt7sw7Z98ToIFmqspo53AIVD8yQzwtKIz1KW
|
|
||||||
eXPAx+sdOUV008ivCBIxOVNswPMfzED4S7Bxpw3iQQKBgGJhct2nBsgu0l2/wzh9
|
|
||||||
tpKbalW1RllgptNQzjuBEZMTvPF0L+7BE09/exKtt4N9s3yAzi8o6Qo7RHX5djVc
|
|
||||||
SNgafV4jj7jt2Ilh6KOy9dshtLoEkS1NmiqfVe2go2auXZdyGm+I2yzKWdKGDO0J
|
|
||||||
diTtYf1sA0PgNXdSyDC03TZl
|
|
||||||
-----END PRIVATE KEY-----
|
-----END PRIVATE KEY-----
|
||||||
"""
|
"""
|
||||||
|
|
||||||
SecureCert* = """
|
SecureCert* = """
|
||||||
-----BEGIN CERTIFICATE-----
|
-----BEGIN CERTIFICATE-----
|
||||||
MIIDazCCAlOgAwIBAgIUe9fr78Dz9PedQ5Sq0uluMWQhX9wwDQYJKoZIhvcNAQEL
|
MIICjDCCAfWgAwIBAgIURjeiJmkNbBVktqXvnXh44DKx364wDQYJKoZIhvcNAQEL
|
||||||
BQAwRTELMAkGA1UEBhMCSU4xEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
BQAwVzELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMTAzMTcwOTMzMzZaFw0zMTAz
|
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEQMA4GA1UEAwwHd3MudGVzdDAgFw0y
|
||||||
MTUwOTMzMzZaMEUxCzAJBgNVBAYTAklOMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
MTA5MTQxMTU2NTZaGA8yMDgyMDgzMDExNTY1NlowVzELMAkGA1UEBhMCQVUxEzAR
|
||||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
BgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5
|
||||||
AQUAA4IBDwAwggEKAoIBAQCdNv0SX02aeZ4/Yc+p/Kwd5UVOHlpmK7/TVC/kcjFb
|
IEx0ZDEQMA4GA1UEAwwHd3MudGVzdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
|
||||||
doUuKNn8pnX/fyhgSKpUYut+te7YRiZhqlaLEZKjfy8GBZwXZnJCevFkTvGTTebX
|
gYEA/TIfsXsW0a6cL3Ug+RT67wHiB3GLBhbQnc+uLG0HVfPd7h1+PBCqEgmUboHF
|
||||||
XExLIsLGfJqKeLAdFCQkX8wV3jV1DT5JLV+D5+HWaiiBr38gsl4ZbfyedTF40Jvz
|
DibILGZI+PE/9445in7z/4D2eKRudzsfc5Xx80mGxKY+7yPxbYWw1UnMmdRBoTbK
|
||||||
okCmcdlx9bpzX1j/b84L/zSwUyyEcgp5G28FJh5TnxAeDHJpOVjr8XMb/xoNqiDF
|
KasVHw2e9Dgwq8wFKBuUdMts/VoAWn4roo88TumRExR2WoUCAwEAAaNTMFEwHQYD
|
||||||
6NwF96hvOZC14mZ1TxxW5bUzXprsy0l52pmhdN3Crz11+t2h519hRKHxT6/l5pTx
|
VR0OBBYEFHaV2ief8/Que1wxcZ8ACfdW7NUNMB8GA1UdIwQYMBaAFHaV2ief8/Qu
|
||||||
/+dApXiP6hMV04CQJNnas3NyRxTDR9dNel+3+wD7/PRTAgMBAAGjUzBRMB0GA1Ud
|
e1wxcZ8ACfdW7NUNMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADgYEA
|
||||||
DgQWBBRkSY1AkGUpVNxG5fYocfgFODtQmTAfBgNVHSMEGDAWgBRkSY1AkGUpVNxG
|
XvDtaDLShrjS9huhRVoEdUtoBdhonmFpV3HXqRs7NdTuUWooXiph9a66GVSIfUCR
|
||||||
5fYocfgFODtQmTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBt
|
iEaNOKF6OM0n7GLSDIrBeIWAxL9Ra/dFFwCxl+9wxg8yyzEJDBkAhXkrfp2b4Sx6
|
||||||
D71VH7F8GOQXITFXCrHwEq1Fx3ScuSnL04NJrXw/e9huzLVQOchAYp/EIn4x2utN
|
wdK6xU2VOAxI0GUzwzjcyNl7RDFA3ayFaGl+9+oppWM=
|
||||||
S31dt94wvi/IysOVbR1LatYNF5kKgGj2Wc6DH0PswBMk8R1G8QMeCz+hCjf1VDHe
|
|
||||||
AAW1x2q20rJAvUrT6cRBQqeiMzQj0OaJbvfnd2hu0/d0DFkcuGVgBa2zlbG5rbdU
|
|
||||||
Jnq7MQfSaZHd0uBgiKkS+Zw6XaYfWfByCAGSnUqRdOChiJ2stFVLvu+9oQ+PJjJt
|
|
||||||
Er1u9bKTUyeuYpqXr2BP9dqphwu8R4NFVUg6DIRpMFMsybaL7KAd4hD22RXCvc0m
|
|
||||||
uLu7KODi+eW62MHqs4N2
|
|
||||||
-----END CERTIFICATE-----
|
-----END CERTIFICATE-----
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -86,3 +69,30 @@ suite "WebSocket transport":
|
||||||
TLSCertificate.init(SecureCert),
|
TLSCertificate.init(SecureCert),
|
||||||
{TLSFlags.NoVerifyHost, TLSFlags.NoVerifyServerName}),
|
{TLSFlags.NoVerifyHost, TLSFlags.NoVerifyServerName}),
|
||||||
"/ip4/0.0.0.0/tcp/0/wss")
|
"/ip4/0.0.0.0/tcp/0/wss")
|
||||||
|
|
||||||
|
asyncTest "Hostname verification":
|
||||||
|
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0/wss").tryGet()
|
||||||
|
let transport1 = WsTransport.new(Upgrade(), TLSPrivateKey.init(SecureKey), TLSCertificate.init(SecureCert), {TLSFlags.NoVerifyHost})
|
||||||
|
|
||||||
|
await transport1.start(ma)
|
||||||
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
|
while true:
|
||||||
|
let conn = await transport1.accept()
|
||||||
|
if not isNil(conn):
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
let handlerWait = acceptHandler()
|
||||||
|
|
||||||
|
# ws.test is in certificate
|
||||||
|
let conn = await transport1.dial("ws.test", transport1.ma)
|
||||||
|
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
try:
|
||||||
|
let conn = await transport1.dial("ws.wronghostname", transport1.ma)
|
||||||
|
check false
|
||||||
|
except CatchableError as exc:
|
||||||
|
check true
|
||||||
|
|
||||||
|
await handlerWait.cancelAndWait()
|
||||||
|
await transport1.stop()
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
import os, osproc, streams, strutils
|
||||||
|
import parseutils
|
||||||
|
|
||||||
|
let contents =
|
||||||
|
if paramCount() > 0:
|
||||||
|
readFile(paramStr(1))
|
||||||
|
else:
|
||||||
|
stdin.readAll()
|
||||||
|
var index = 0
|
||||||
|
|
||||||
|
const startDelim = "```nim\n"
|
||||||
|
const endDelim = "\n```"
|
||||||
|
while true:
|
||||||
|
let startOfBlock = contents.find(startDelim, start = index)
|
||||||
|
if startOfBlock == -1: break
|
||||||
|
|
||||||
|
let endOfBlock = contents.find(endDelim, start = startOfBlock + startDelim.len)
|
||||||
|
if endOfBlock == -1:
|
||||||
|
quit "Unfinished block!"
|
||||||
|
|
||||||
|
let code = contents[startOfBlock + startDelim.len .. endOfBlock]
|
||||||
|
|
||||||
|
echo code
|
||||||
|
|
||||||
|
index = endOfBlock + endDelim.len
|
Loading…
Reference in New Issue