mirror of
https://github.com/waku-org/nwaku.git
synced 2025-02-12 15:06:38 +00:00
deploy: a044c6a82c910f0e9e6522ce285b774e1b824443
This commit is contained in:
parent
6a6edeb874
commit
6e49e04c45
2
.gitmodules
vendored
2
.gitmodules
vendored
@ -12,7 +12,7 @@
|
||||
path = vendor/nim-libp2p
|
||||
url = https://github.com/status-im/nim-libp2p.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
branch = unstable
|
||||
[submodule "vendor/nim-stew"]
|
||||
path = vendor/nim-stew
|
||||
url = https://github.com/status-im/nim-stew.git
|
||||
|
@ -304,7 +304,7 @@ proc processInput(rfd: AsyncFD, rng: ref BrHmacDrbgContext) {.async.} =
|
||||
rlnRelayEnabled = conf.rlnRelay,
|
||||
relayMessages = conf.relay) # Indicates if node is capable to relay messages
|
||||
|
||||
node.mountKeepalive()
|
||||
node.mountLibp2pPing()
|
||||
|
||||
let nick = await readNick(transp)
|
||||
echo "Welcome, " & nick & "!"
|
||||
|
@ -253,7 +253,7 @@ when isMainModule:
|
||||
|
||||
# Now load rest of config
|
||||
# Mount configured Waku v2 protocols
|
||||
mountKeepalive(bridge.nodev2)
|
||||
mountLibp2pPing(bridge.nodev2)
|
||||
|
||||
if conf.store:
|
||||
mountStore(bridge.nodev2)
|
||||
|
@ -6,44 +6,42 @@ import
|
||||
stew/shims/net as stewNet,
|
||||
libp2p/switch,
|
||||
libp2p/protobuf/minprotobuf,
|
||||
libp2p/protocols/ping,
|
||||
libp2p/stream/[bufferstream, connection],
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/multistream,
|
||||
../../waku/v2/node/wakunode2,
|
||||
../../waku/v2/protocol/waku_keepalive/waku_keepalive,
|
||||
../test_helpers, ./utils
|
||||
|
||||
procSuite "Waku Keepalive":
|
||||
|
||||
asyncTest "handle keepalive":
|
||||
asyncTest "handle ping keepalives":
|
||||
let
|
||||
nodeKey1 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||
node1 = WakuNode.init(nodeKey1, ValidIpAddress.init("0.0.0.0"), Port(60000))
|
||||
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||
node2 = WakuNode.init(nodeKey2, ValidIpAddress.init("0.0.0.0"), Port(60002))
|
||||
|
||||
var completionFut = newFuture[bool]()
|
||||
|
||||
proc pingHandler(peer: PeerInfo) {.async, gcsafe, raises: [Defect].} =
|
||||
debug "Ping received"
|
||||
|
||||
check:
|
||||
peer.peerId == node1.switch.peerInfo.peerId
|
||||
|
||||
completionFut.complete(true)
|
||||
|
||||
await node1.start()
|
||||
node1.mountRelay()
|
||||
node1.mountKeepalive()
|
||||
node1.mountLibp2pPing()
|
||||
|
||||
await node2.start()
|
||||
node2.mountRelay()
|
||||
node2.mountKeepalive()
|
||||
node2.switch.mount(Ping.new(handler = pingHandler))
|
||||
|
||||
await node1.connectToNodes(@[node2.peerInfo])
|
||||
|
||||
var completionFut = newFuture[bool]()
|
||||
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
debug "WakuKeepalive message received"
|
||||
|
||||
check:
|
||||
proto == waku_keepalive.WakuKeepaliveCodec
|
||||
|
||||
completionFut.complete(true)
|
||||
|
||||
node2.wakuKeepalive.handler = handle
|
||||
|
||||
node1.startKeepalive()
|
||||
|
||||
check:
|
||||
|
@ -32,6 +32,7 @@ const
|
||||
type
|
||||
HttpClientConnectionState* {.pure.} = enum
|
||||
Closed ## Connection has been closed
|
||||
Closing, ## Connection is closing
|
||||
Resolving, ## Resolving remote hostname
|
||||
Connecting, ## Connecting to remote server
|
||||
Ready, ## Connected to remote server
|
||||
@ -51,6 +52,7 @@ type
|
||||
|
||||
HttpClientRequestState* {.pure.} = enum
|
||||
Closed, ## Request has been closed
|
||||
Closing, ## Connection is closing
|
||||
Created, ## Request created
|
||||
Connecting, ## Connecting to remote host
|
||||
HeadersSending, ## Sending request headers
|
||||
@ -62,6 +64,7 @@ type
|
||||
|
||||
HttpClientResponseState* {.pure.} = enum
|
||||
Closed, ## Response has been closed
|
||||
Closing, ## Response is closing
|
||||
HeadersReceived, ## Response headers received
|
||||
BodyReceiving, ## Response body receiving
|
||||
BodyReceived, ## Response body received
|
||||
@ -528,8 +531,18 @@ proc setError(response: HttpClientResponseRef, error: ref HttpError) {.
|
||||
|
||||
proc closeWait(conn: HttpClientConnectionRef) {.async.} =
|
||||
## Close HttpClientConnectionRef instance ``conn`` and free all the resources.
|
||||
if conn.state != HttpClientConnectionState.Closed:
|
||||
await allFutures(conn.reader.closeWait(), conn.writer.closeWait())
|
||||
if conn.state notin {HttpClientConnectionState.Closing,
|
||||
HttpClientConnectionState.Closed}:
|
||||
conn.state = HttpClientConnectionState.Closing
|
||||
let pending =
|
||||
block:
|
||||
var res: seq[Future[void]]
|
||||
if not(isNil(conn.reader)) and not(conn.reader.closed()):
|
||||
res.add(conn.reader.closeWait())
|
||||
if not(isNil(conn.writer)) and not(conn.writer.closed()):
|
||||
res.add(conn.writer.closeWait())
|
||||
res
|
||||
if len(pending) > 0: await allFutures(pending)
|
||||
case conn.kind
|
||||
of HttpClientScheme.Secure:
|
||||
await allFutures(conn.treader.closeWait(), conn.twriter.closeWait())
|
||||
@ -568,6 +581,7 @@ proc connect(session: HttpSessionRef,
|
||||
raise exc
|
||||
except AsyncStreamError:
|
||||
await res.closeWait()
|
||||
res.state = HttpClientConnectionState.Error
|
||||
of HttpClientScheme.Nonsecure:
|
||||
res.state = HttpClientConnectionState.Ready
|
||||
res
|
||||
@ -632,7 +646,9 @@ proc closeWait*(session: HttpSessionRef) {.async.} =
|
||||
await allFutures(pending)
|
||||
|
||||
proc closeWait*(request: HttpClientRequestRef) {.async.} =
|
||||
if request.state != HttpClientRequestState.Closed:
|
||||
if request.state notin {HttpClientRequestState.Closing,
|
||||
HttpClientRequestState.Closed}:
|
||||
request.setState(HttpClientRequestState.Closing)
|
||||
if not(isNil(request.writer)):
|
||||
if not(request.writer.closed()):
|
||||
await request.writer.closeWait()
|
||||
@ -647,7 +663,9 @@ proc closeWait*(request: HttpClientRequestRef) {.async.} =
|
||||
untrackHttpClientRequest(request)
|
||||
|
||||
proc closeWait*(response: HttpClientResponseRef) {.async.} =
|
||||
if response.state != HttpClientResponseState.Closed:
|
||||
if response.state notin {HttpClientResponseState.Closing,
|
||||
HttpClientResponseState.Closed}:
|
||||
response.setState(HttpClientResponseState.Closing)
|
||||
if not(isNil(response.reader)):
|
||||
if not(response.reader.closed()):
|
||||
await response.reader.closeWait()
|
||||
@ -1200,18 +1218,18 @@ proc fetch*(session: HttpSessionRef, url: Uri): Future[HttpResponseTuple] {.
|
||||
raiseHttpRedirectError("Location header with an empty value")
|
||||
else:
|
||||
raiseHttpRedirectError("Location header missing")
|
||||
discard await response.consumeBody()
|
||||
await request.closeWait()
|
||||
request = nil
|
||||
discard await response.consumeBody()
|
||||
await response.closeWait()
|
||||
response = nil
|
||||
request = redirect
|
||||
redirect = nil
|
||||
else:
|
||||
await request.closeWait()
|
||||
request = nil
|
||||
let data = await response.getBodyBytes()
|
||||
let code = response.status
|
||||
await request.closeWait()
|
||||
request = nil
|
||||
await response.closeWait()
|
||||
response = nil
|
||||
return (code, data)
|
||||
|
1
vendor/nim-eth/.appveyor.yml
vendored
1
vendor/nim-eth/.appveyor.yml
vendored
@ -77,6 +77,7 @@ build_script:
|
||||
test_script:
|
||||
- nimble test
|
||||
- nimble build_dcli
|
||||
- nimble build_portalcli
|
||||
|
||||
deploy: off
|
||||
|
||||
|
2
vendor/nim-eth/.github/workflows/ci.yml
vendored
2
vendor/nim-eth/.github/workflows/ci.yml
vendored
@ -237,4 +237,4 @@ jobs:
|
||||
nimble install -y --depsOnly
|
||||
nimble test
|
||||
nimble build_dcli
|
||||
|
||||
nimble build_portalcli
|
||||
|
1
vendor/nim-eth/.travis.yml
vendored
1
vendor/nim-eth/.travis.yml
vendored
@ -47,3 +47,4 @@ script:
|
||||
- nimble install -y --depsOnly
|
||||
- nimble test
|
||||
- nimble build_dcli
|
||||
- nimble build_portalcli
|
||||
|
6
vendor/nim-eth/eth.nimble
vendored
6
vendor/nim-eth/eth.nimble
vendored
@ -46,6 +46,9 @@ task test_discv5, "Run discovery v5 tests":
|
||||
task test_discv4, "Run discovery v4 tests":
|
||||
runTest("tests/p2p/test_discovery")
|
||||
|
||||
task test_portal, "Run Portal network tests":
|
||||
runTest("tests/p2p/all_portal_tests")
|
||||
|
||||
task test_p2p, "Run p2p tests":
|
||||
runTest("tests/p2p/all_tests")
|
||||
|
||||
@ -86,3 +89,6 @@ task test_discv5_full, "Run discovery v5 and its dependencies tests":
|
||||
|
||||
task build_dcli, "Build dcli":
|
||||
buildBinary("eth/p2p/discoveryv5/dcli")
|
||||
|
||||
task build_portalcli, "Build portalcli":
|
||||
buildBinary("eth/p2p/portal/portalcli")
|
||||
|
71
vendor/nim-eth/eth/p2p/discoveryv5/protocol.nim
vendored
71
vendor/nim-eth/eth/p2p/discoveryv5/protocol.nim
vendored
@ -132,16 +132,20 @@ type
|
||||
bootstrapRecords*: seq[Record]
|
||||
ipVote: IpVote
|
||||
enrAutoUpdate: bool
|
||||
talkProtocols: Table[seq[byte], TalkProtocolHandler]
|
||||
talkProtocols*: Table[seq[byte], TalkProtocol] # TODO: Table is a bit of
|
||||
# overkill here, use sequence
|
||||
rng*: ref BrHmacDrbgContext
|
||||
|
||||
PendingRequest = object
|
||||
node: Node
|
||||
message: seq[byte]
|
||||
|
||||
TalkProtocolHandler* = proc(request: seq[byte]): seq[byte]
|
||||
TalkProtocolHandler* = proc(p: TalkProtocol, request: seq[byte]): seq[byte]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
|
||||
TalkProtocol* = ref object of RootObj
|
||||
protocolHandler*: TalkProtocolHandler
|
||||
|
||||
DiscResult*[T] = Result[T, cstring]
|
||||
|
||||
proc addNode*(d: Protocol, node: Node): bool =
|
||||
@ -299,15 +303,16 @@ proc handleFindNode(d: Protocol, fromId: NodeId, fromAddr: Address,
|
||||
|
||||
proc handleTalkReq(d: Protocol, fromId: NodeId, fromAddr: Address,
|
||||
talkreq: TalkReqMessage, reqId: RequestId) =
|
||||
let protocolHandler = d.talkProtocols.getOrDefault(talkreq.protocol)
|
||||
let talkProtocol = d.talkProtocols.getOrDefault(talkreq.protocol)
|
||||
|
||||
let talkresp =
|
||||
if protocolHandler.isNil():
|
||||
if talkProtocol.isNil() or talkProtocol.protocolHandler.isNil():
|
||||
# Protocol identifier that is not registered and thus not supported. An
|
||||
# empty response is send as per specification.
|
||||
TalkRespMessage(response: @[])
|
||||
else:
|
||||
TalkRespMessage(response: protocolHandler(talkreq.request))
|
||||
TalkRespMessage(response: talkProtocol.protocolHandler(talkProtocol,
|
||||
talkreq.request))
|
||||
let (data, _) = encodeMessagePacket(d.rng[], d.codec, fromId, fromAddr,
|
||||
encodeMessage(talkresp, reqId))
|
||||
|
||||
@ -341,10 +346,10 @@ proc handleMessage(d: Protocol, srcId: NodeId, fromAddr: Address,
|
||||
trace "Timed out or unrequested message", kind = message.kind,
|
||||
origin = fromAddr
|
||||
|
||||
proc registerTalkProtocol*(d: Protocol, protocol: seq[byte],
|
||||
handler: TalkProtocolHandler): DiscResult[void] =
|
||||
proc registerTalkProtocol*(d: Protocol, protocolId: seq[byte],
|
||||
protocol: TalkProtocol): DiscResult[void] =
|
||||
# Currently allow only for one handler per talk protocol.
|
||||
if d.talkProtocols.hasKeyOrPut(protocol, handler):
|
||||
if d.talkProtocols.hasKeyOrPut(protocolId, protocol):
|
||||
err("Protocol identifier already registered")
|
||||
else:
|
||||
ok()
|
||||
@ -546,18 +551,23 @@ proc waitNodes(d: Protocol, fromNode: Node, reqId: RequestId):
|
||||
## If one reply is lost here (timed out), others are ignored too.
|
||||
## Same counts for out of order receival.
|
||||
var op = await d.waitMessage(fromNode, reqId)
|
||||
if op.isSome and op.get.kind == nodes:
|
||||
var res = op.get.nodes.enrs
|
||||
let total = op.get.nodes.total
|
||||
for i in 1 ..< total:
|
||||
op = await d.waitMessage(fromNode, reqId)
|
||||
if op.isSome and op.get.kind == nodes:
|
||||
res.add(op.get.nodes.enrs)
|
||||
else:
|
||||
# No error on this as we received some nodes.
|
||||
break
|
||||
return ok(res)
|
||||
if op.isSome:
|
||||
if op.get.kind == nodes:
|
||||
var res = op.get.nodes.enrs
|
||||
let total = op.get.nodes.total
|
||||
for i in 1 ..< total:
|
||||
op = await d.waitMessage(fromNode, reqId)
|
||||
if op.isSome and op.get.kind == nodes:
|
||||
res.add(op.get.nodes.enrs)
|
||||
else:
|
||||
# No error on this as we received some nodes.
|
||||
break
|
||||
return ok(res)
|
||||
else:
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
|
||||
return err("Invalid response to find node message")
|
||||
else:
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
|
||||
return err("Nodes message not received in time")
|
||||
|
||||
proc sendMessage*[T: SomeMessage](d: Protocol, toNode: Node, m: T):
|
||||
@ -586,9 +596,14 @@ proc ping*(d: Protocol, toNode: Node):
|
||||
PingMessage(enrSeq: d.localNode.record.seqNum))
|
||||
let resp = await d.waitMessage(toNode, reqId)
|
||||
|
||||
if resp.isSome() and resp.get().kind == pong:
|
||||
d.routingTable.setJustSeen(toNode)
|
||||
return ok(resp.get().pong)
|
||||
if resp.isSome():
|
||||
if resp.get().kind == pong:
|
||||
d.routingTable.setJustSeen(toNode)
|
||||
return ok(resp.get().pong)
|
||||
else:
|
||||
d.replaceNode(toNode)
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
|
||||
return err("Invalid response to ping message")
|
||||
else:
|
||||
d.replaceNode(toNode)
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
|
||||
@ -609,7 +624,6 @@ proc findNode*(d: Protocol, toNode: Node, distances: seq[uint32]):
|
||||
return ok(res)
|
||||
else:
|
||||
d.replaceNode(toNode)
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
|
||||
return err(nodes.error)
|
||||
|
||||
proc talkreq*(d: Protocol, toNode: Node, protocol, request: seq[byte]):
|
||||
@ -621,9 +635,14 @@ proc talkreq*(d: Protocol, toNode: Node, protocol, request: seq[byte]):
|
||||
TalkReqMessage(protocol: protocol, request: request))
|
||||
let resp = await d.waitMessage(toNode, reqId)
|
||||
|
||||
if resp.isSome() and resp.get().kind == talkresp:
|
||||
d.routingTable.setJustSeen(toNode)
|
||||
return ok(resp.get().talkresp)
|
||||
if resp.isSome():
|
||||
if resp.get().kind == talkresp:
|
||||
d.routingTable.setJustSeen(toNode)
|
||||
return ok(resp.get().talkresp)
|
||||
else:
|
||||
d.replaceNode(toNode)
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
|
||||
return err("Invalid response to talk request message")
|
||||
else:
|
||||
d.replaceNode(toNode)
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
|
||||
|
48
vendor/nim-eth/eth/p2p/portal/README.md
vendored
Normal file
48
vendor/nim-eth/eth/p2p/portal/README.md
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
# Portal Network Wire Protocol
|
||||
## Introduction
|
||||
The `eth/p2p/portal` directory holds a Nim implementation of the
|
||||
[Portal Network Wire Protocol](https://github.com/ethereum/stateless-ethereum-specs/blob/master/state-network.md#wire-protocol).
|
||||
|
||||
Both specification, at above link, and implementations are still WIP.
|
||||
|
||||
The protocol builds on top of the Node Discovery v5.1 protocol its `talkreq` and
|
||||
`talkresp` messages.
|
||||
|
||||
For further information on the Nim implementation of the Node Discovery v5.1
|
||||
protocol check out the [discv5](../../../doc/discv5.md) page.
|
||||
|
||||
## Test suite
|
||||
To run the test suite specifically for the Portal wire protocol, run following
|
||||
command:
|
||||
```sh
|
||||
# Install required modules
|
||||
nimble install
|
||||
# Run only Portal tests
|
||||
nimble test_portal
|
||||
```
|
||||
|
||||
## portalcli
|
||||
This is a small command line application that allows you to run a
|
||||
Discovery v5.1 + Portal node.
|
||||
|
||||
*Note:* Its objective is only to test the protocol wire component, not to actually
|
||||
serve content. This means it will always return empty lists on content requests.
|
||||
Perhaps in the future some hardcoded data could added and maybe some test vectors
|
||||
can be created in such form.
|
||||
|
||||
The `portalcli` application allows you to either run a node, or to specifically
|
||||
send one of the message types, wait for the response, and then shut down.
|
||||
|
||||
### Example usage
|
||||
```sh
|
||||
# Install required modules
|
||||
# Make sure you have the latest modules, do NOT trust nimble on this.
|
||||
nimble install
|
||||
# Build portalcli
|
||||
nimble build_portalcli
|
||||
# See all options
|
||||
./eth/p2p/portal/portalcli --help
|
||||
# Example command: Ping another node
|
||||
./eth/p2p/portal/portalcli ping enr:<base64 encoding of ENR>
|
||||
# Example command: Run discovery + portal node
|
||||
./eth/p2p/portal/portalcli --log-level:debug --bootnode:enr:<base64 encoding of ENR>
|
153
vendor/nim-eth/eth/p2p/portal/messages.nim
vendored
Normal file
153
vendor/nim-eth/eth/p2p/portal/messages.nim
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
# nim-eth - Portal Network- Message types
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
# As per spec:
|
||||
# https://github.com/ethereum/stateless-ethereum-specs/blob/master/state-network.md#wire-protocol
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
stint, stew/[results, objects],
|
||||
../../ssz/ssz_serialization
|
||||
|
||||
export ssz_serialization, stint
|
||||
|
||||
type
|
||||
ByteList* = List[byte, 2048]
|
||||
|
||||
MessageKind* = enum
|
||||
unused = 0x00
|
||||
|
||||
ping = 0x01
|
||||
pong = 0x02
|
||||
findnode = 0x03
|
||||
nodes = 0x04
|
||||
findcontent = 0x05
|
||||
foundcontent = 0x06
|
||||
advertise = 0x07
|
||||
requestproofs = 0x08
|
||||
|
||||
PingMessage* = object
|
||||
enrSeq*: uint64
|
||||
dataRadius*: UInt256
|
||||
|
||||
PongMessage* = object
|
||||
enrSeq*: uint64
|
||||
dataRadius*: UInt256
|
||||
|
||||
FindNodeMessage* = object
|
||||
distances*: List[uint16, 256]
|
||||
|
||||
NodesMessage* = object
|
||||
total*: uint8
|
||||
enrs*: List[ByteList, 32] # ByteList here is the rlp encoded ENR. This could
|
||||
# also be limited to 300 bytes instead of 2048
|
||||
|
||||
FindContentMessage* = object
|
||||
contentKey*: ByteList
|
||||
|
||||
FoundContentMessage* = object
|
||||
enrs*: List[ByteList, 32]
|
||||
payload*: ByteList
|
||||
|
||||
AdvertiseMessage* = List[ByteList, 32] # No container, heh...
|
||||
|
||||
# This would be more consistent with the other messages
|
||||
# AdvertiseMessage* = object
|
||||
# contentKeys*: List[ByteList, 32]
|
||||
|
||||
RequestProofsMessage* = object
|
||||
connectionId*: List[byte, 4]
|
||||
contentKeys*: List[ByteList, 32]
|
||||
|
||||
Message* = object
|
||||
case kind*: MessageKind
|
||||
of ping:
|
||||
ping*: PingMessage
|
||||
of pong:
|
||||
pong*: PongMessage
|
||||
of findnode:
|
||||
findNode*: FindNodeMessage
|
||||
of nodes:
|
||||
nodes*: NodesMessage
|
||||
of findcontent:
|
||||
findcontent*: FindContentMessage
|
||||
of foundcontent:
|
||||
foundcontent*: FoundContentMessage
|
||||
of advertise:
|
||||
advertise*: AdvertiseMessage
|
||||
of requestproofs:
|
||||
requestproofs*: RequestProofsMessage
|
||||
else:
|
||||
discard
|
||||
|
||||
SomeMessage* =
|
||||
PingMessage or PongMessage or
|
||||
FindNodeMessage or NodesMessage or
|
||||
FindContentMessage or FoundContentMessage or
|
||||
AdvertiseMessage or RequestProofsMessage
|
||||
|
||||
template messageKind*(T: typedesc[SomeMessage]): MessageKind =
|
||||
when T is PingMessage: ping
|
||||
elif T is PongMessage: pong
|
||||
elif T is FindNodeMessage: findNode
|
||||
elif T is NodesMessage: nodes
|
||||
elif T is FindContentMessage: findcontent
|
||||
elif T is FoundContentMessage: foundcontent
|
||||
elif T is AdvertiseMessage: advertise
|
||||
elif T is RequestProofsMessage: requestproofs
|
||||
|
||||
template toSszType*(x: auto): auto =
|
||||
mixin toSszType
|
||||
|
||||
when x is UInt256: toBytesLE(x)
|
||||
else: x
|
||||
|
||||
func fromSszBytes*(T: type UInt256, data: openArray[byte]):
|
||||
T {.raises: [MalformedSszError, Defect].} =
|
||||
if data.len != sizeof(result):
|
||||
raiseIncorrectSize T
|
||||
|
||||
T.fromBytesLE(data)
|
||||
|
||||
proc encodeMessage*[T: SomeMessage](m: T): seq[byte] =
|
||||
ord(messageKind(T)).byte & SSZ.encode(m)
|
||||
|
||||
proc decodeMessage*(body: openarray[byte]): Result[Message, cstring] =
|
||||
# Decodes to the specific `Message` type.
|
||||
if body.len < 1:
|
||||
return err("No message data")
|
||||
|
||||
var kind: MessageKind
|
||||
if not checkedEnumAssign(kind, body[0]):
|
||||
return err("Invalid message type")
|
||||
|
||||
var message = Message(kind: kind)
|
||||
|
||||
try:
|
||||
case kind
|
||||
of unused: return err("Invalid message type")
|
||||
of ping:
|
||||
message.ping = SSZ.decode(body.toOpenArray(1, body.high), PingMessage)
|
||||
of pong:
|
||||
message.pong = SSZ.decode(body.toOpenArray(1, body.high), PongMessage)
|
||||
of findNode:
|
||||
message.findNode = SSZ.decode(body.toOpenArray(1, body.high), FindNodeMessage)
|
||||
of nodes:
|
||||
message.nodes = SSZ.decode(body.toOpenArray(1, body.high), NodesMessage)
|
||||
of findcontent:
|
||||
message.findcontent = SSZ.decode(body.toOpenArray(1, body.high), FindContentMessage)
|
||||
of foundcontent:
|
||||
message.foundcontent = SSZ.decode(body.toOpenArray(1, body.high), FoundContentMessage)
|
||||
of advertise:
|
||||
message.advertise = SSZ.decode(body.toOpenArray(1, body.high), AdvertiseMessage)
|
||||
of requestproofs:
|
||||
message.requestproofs = SSZ.decode(body.toOpenArray(1, body.high), RequestProofsMessage)
|
||||
except SszError:
|
||||
return err("Invalid message encoding")
|
||||
|
||||
ok(message)
|
221
vendor/nim-eth/eth/p2p/portal/portalcli.nim
vendored
Normal file
221
vendor/nim-eth/eth/p2p/portal/portalcli.nim
vendored
Normal file
@ -0,0 +1,221 @@
|
||||
# nim-eth - Portal Network
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
std/[options, strutils, tables],
|
||||
confutils, confutils/std/net, chronicles, chronicles/topics_registry,
|
||||
chronos, metrics, metrics/chronos_httpserver, stew/byteutils,
|
||||
../../keys, ../../net/nat,
|
||||
".."/discoveryv5/[enr, node], ".."/discoveryv5/protocol as discv5_protocol,
|
||||
./messages, ./protocol as portal_protocol
|
||||
|
||||
type
|
||||
PortalCmd* = enum
|
||||
noCommand
|
||||
ping
|
||||
findnode
|
||||
findcontent
|
||||
|
||||
DiscoveryConf* = object
|
||||
logLevel* {.
|
||||
defaultValue: LogLevel.DEBUG
|
||||
desc: "Sets the log level"
|
||||
name: "log-level" .}: LogLevel
|
||||
|
||||
udpPort* {.
|
||||
defaultValue: 9009
|
||||
desc: "UDP listening port"
|
||||
name: "udp-port" .}: uint16
|
||||
|
||||
listenAddress* {.
|
||||
defaultValue: defaultListenAddress(config)
|
||||
desc: "Listening address for the Discovery v5 traffic"
|
||||
name: "listen-address" }: ValidIpAddress
|
||||
|
||||
bootnodes* {.
|
||||
desc: "ENR URI of node to bootstrap discovery with. Argument may be repeated"
|
||||
name: "bootnode" .}: seq[enr.Record]
|
||||
|
||||
nat* {.
|
||||
desc: "Specify method to use for determining public address. " &
|
||||
"Must be one of: any, none, upnp, pmp, extip:<IP>"
|
||||
defaultValue: NatConfig(hasExtIp: false, nat: NatAny)
|
||||
name: "nat" .}: NatConfig
|
||||
|
||||
enrAutoUpdate* {.
|
||||
defaultValue: false
|
||||
desc: "Discovery can automatically update its ENR with the IP address " &
|
||||
"and UDP port as seen by other nodes it communicates with. " &
|
||||
"This option allows to enable/disable this functionality"
|
||||
name: "enr-auto-update" .}: bool
|
||||
|
||||
nodeKey* {.
|
||||
desc: "P2P node private key as hex",
|
||||
defaultValue: PrivateKey.random(keys.newRng()[])
|
||||
name: "nodekey" .}: PrivateKey
|
||||
|
||||
metricsEnabled* {.
|
||||
defaultValue: false
|
||||
desc: "Enable the metrics server"
|
||||
name: "metrics" .}: bool
|
||||
|
||||
metricsAddress* {.
|
||||
defaultValue: defaultAdminListenAddress(config)
|
||||
desc: "Listening address of the metrics server"
|
||||
name: "metrics-address" .}: ValidIpAddress
|
||||
|
||||
metricsPort* {.
|
||||
defaultValue: 8008
|
||||
desc: "Listening HTTP port of the metrics server"
|
||||
name: "metrics-port" .}: Port
|
||||
|
||||
case cmd* {.
|
||||
command
|
||||
defaultValue: noCommand }: PortalCmd
|
||||
of noCommand:
|
||||
discard
|
||||
of ping:
|
||||
pingTarget* {.
|
||||
argument
|
||||
desc: "ENR URI of the node to a send ping message"
|
||||
name: "node" .}: Node
|
||||
of findnode:
|
||||
distance* {.
|
||||
defaultValue: 255
|
||||
desc: "Distance parameter for the findNode message"
|
||||
name: "distance" .}: uint16
|
||||
# TODO: Order here matters as else the help message does not show all the
|
||||
# information, see: https://github.com/status-im/nim-confutils/issues/15
|
||||
findNodeTarget* {.
|
||||
argument
|
||||
desc: "ENR URI of the node to send a findNode message"
|
||||
name: "node" .}: Node
|
||||
of findcontent:
|
||||
findContentTarget* {.
|
||||
argument
|
||||
desc: "ENR URI of the node to send a findContent message"
|
||||
name: "node" .}: Node
|
||||
|
||||
func defaultListenAddress*(conf: DiscoveryConf): ValidIpAddress =
|
||||
(static ValidIpAddress.init("0.0.0.0"))
|
||||
|
||||
func defaultAdminListenAddress*(conf: DiscoveryConf): ValidIpAddress =
|
||||
(static ValidIpAddress.init("127.0.0.1"))
|
||||
|
||||
proc parseCmdArg*(T: type enr.Record, p: TaintedString): T =
|
||||
if not fromURI(result, p):
|
||||
raise newException(ConfigurationError, "Invalid ENR")
|
||||
|
||||
proc completeCmdArg*(T: type enr.Record, val: TaintedString): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type Node, p: TaintedString): T =
|
||||
var record: enr.Record
|
||||
if not fromURI(record, p):
|
||||
raise newException(ConfigurationError, "Invalid ENR")
|
||||
|
||||
let n = newNode(record)
|
||||
if n.isErr:
|
||||
raise newException(ConfigurationError, $n.error)
|
||||
|
||||
if n[].address.isNone():
|
||||
raise newException(ConfigurationError, "ENR without address")
|
||||
|
||||
n[]
|
||||
|
||||
proc completeCmdArg*(T: type Node, val: TaintedString): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type PrivateKey, p: TaintedString): T =
|
||||
try:
|
||||
result = PrivateKey.fromHex(string(p)).tryGet()
|
||||
except CatchableError:
|
||||
raise newException(ConfigurationError, "Invalid private key")
|
||||
|
||||
proc completeCmdArg*(T: type PrivateKey, val: TaintedString): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc discover(d: discv5_protocol.Protocol) {.async.} =
|
||||
while true:
|
||||
let discovered = await d.queryRandom()
|
||||
info "Lookup finished", nodes = discovered.len
|
||||
await sleepAsync(30.seconds)
|
||||
|
||||
proc run(config: DiscoveryConf) =
|
||||
let
|
||||
rng = newRng()
|
||||
bindIp = config.listenAddress
|
||||
udpPort = Port(config.udpPort)
|
||||
# TODO: allow for no TCP port mapping!
|
||||
(extIp, _, extUdpPort) = setupAddress(config.nat,
|
||||
config.listenAddress, udpPort, udpPort, "dcli")
|
||||
|
||||
let d = newProtocol(config.nodeKey,
|
||||
extIp, none(Port), extUdpPort,
|
||||
bootstrapRecords = config.bootnodes,
|
||||
bindIp = bindIp, bindPort = udpPort,
|
||||
enrAutoUpdate = config.enrAutoUpdate,
|
||||
rng = rng)
|
||||
|
||||
d.open()
|
||||
|
||||
let portal = PortalProtocol.new(d)
|
||||
|
||||
if config.metricsEnabled:
|
||||
let
|
||||
address = config.metricsAddress
|
||||
port = config.metricsPort
|
||||
notice "Starting metrics HTTP server",
|
||||
url = "http://" & $address & ":" & $port & "/metrics"
|
||||
try:
|
||||
chronos_httpserver.startMetricsHttpServer($address, port)
|
||||
except CatchableError as exc: raise exc
|
||||
except Exception as exc: raiseAssert exc.msg # TODO fix metrics
|
||||
|
||||
case config.cmd
|
||||
of ping:
|
||||
let pong = waitFor portal.ping(config.pingTarget)
|
||||
|
||||
if pong.isOk():
|
||||
echo pong.get()
|
||||
else:
|
||||
echo pong.error
|
||||
of findnode:
|
||||
let distances = List[uint16, 256](@[config.distance])
|
||||
let nodes = waitFor portal.findNode(config.findNodeTarget, distances)
|
||||
|
||||
if nodes.isOk():
|
||||
echo nodes.get()
|
||||
else:
|
||||
echo nodes.error
|
||||
of findcontent:
|
||||
proc random(T: type UInt256, rng: var BrHmacDrbgContext): T =
|
||||
var key: UInt256
|
||||
brHmacDrbgGenerate(addr rng, addr key, csize_t(sizeof(key)))
|
||||
|
||||
key
|
||||
|
||||
# For now just random content keys
|
||||
let contentKey = ByteList(@(UInt256.random(rng[]).toBytes()))
|
||||
let foundContent = waitFor portal.findContent(config.findContentTarget,
|
||||
contentKey)
|
||||
|
||||
if foundContent.isOk():
|
||||
echo foundContent.get()
|
||||
else:
|
||||
echo foundContent.error
|
||||
|
||||
of noCommand:
|
||||
d.start()
|
||||
waitfor(discover(d))
|
||||
|
||||
when isMainModule:
|
||||
let config = DiscoveryConf.load()
|
||||
|
||||
setLogLevel(config.logLevel)
|
||||
|
||||
run(config)
|
164
vendor/nim-eth/eth/p2p/portal/protocol.nim
vendored
Normal file
164
vendor/nim-eth/eth/p2p/portal/protocol.nim
vendored
Normal file
@ -0,0 +1,164 @@
|
||||
# nim-eth - Portal Network
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
stew/[results, byteutils], chronicles,
|
||||
../../rlp,
|
||||
../discoveryv5/[protocol, node],
|
||||
./messages
|
||||
|
||||
export messages
|
||||
|
||||
logScope:
|
||||
topics = "portal"
|
||||
|
||||
const
|
||||
PortalProtocolId* = "portal".toBytes()
|
||||
|
||||
type
|
||||
PortalProtocol* = ref object of TalkProtocol
|
||||
baseProtocol*: protocol.Protocol
|
||||
dataRadius*: UInt256
|
||||
|
||||
proc handlePing(p: PortalProtocol, ping: PingMessage):
|
||||
seq[byte] =
|
||||
let p = PongMessage(enrSeq: p.baseProtocol.localNode.record.seqNum,
|
||||
dataRadius: p.dataRadius)
|
||||
|
||||
encodeMessage(p)
|
||||
|
||||
proc handleFindNode(p: PortalProtocol, fn: FindNodeMessage): seq[byte] =
|
||||
if fn.distances.len == 0:
|
||||
let enrs = List[ByteList, 32](@[])
|
||||
encodeMessage(NodesMessage(total: 1, enrs: enrs))
|
||||
elif fn.distances.contains(0):
|
||||
# A request for our own record.
|
||||
let enr = ByteList(rlp.encode(p.baseProtocol.localNode.record))
|
||||
encodeMessage(NodesMessage(total: 1, enrs: List[ByteList, 32](@[enr])))
|
||||
else:
|
||||
# TODO: Not implemented for now, sending empty back.
|
||||
let enrs = List[ByteList, 32](@[])
|
||||
encodeMessage(NodesMessage(total: 1, enrs: enrs))
|
||||
|
||||
proc handleFindContent(p: PortalProtocol, ping: FindContentMessage): seq[byte] =
|
||||
# TODO: Neither payload nor enrs implemented, sending empty back.
|
||||
let
|
||||
enrs = List[ByteList, 32](@[])
|
||||
payload = ByteList(@[])
|
||||
encodeMessage(FoundContentMessage(enrs: enrs, payload: payload))
|
||||
|
||||
proc handleAdvertise(p: PortalProtocol, ping: AdvertiseMessage): seq[byte] =
|
||||
# TODO: Not implemented
|
||||
let
|
||||
connectionId = List[byte, 4](@[])
|
||||
contentKeys = List[ByteList, 32](@[])
|
||||
encodeMessage(RequestProofsMessage(connectionId: connectionId,
|
||||
contentKeys: contentKeys))
|
||||
|
||||
proc messageHandler*(protocol: TalkProtocol, request: seq[byte]): seq[byte] =
|
||||
doAssert(protocol of PortalProtocol)
|
||||
|
||||
let p = PortalProtocol(protocol)
|
||||
|
||||
let decoded = decodeMessage(request)
|
||||
if decoded.isOk():
|
||||
let message = decoded.get()
|
||||
trace "Received message response", kind = message.kind
|
||||
case message.kind
|
||||
of MessageKind.ping:
|
||||
p.handlePing(message.ping)
|
||||
of MessageKind.findnode:
|
||||
p.handleFindNode(message.findNode)
|
||||
of MessageKind.findcontent:
|
||||
p.handleFindContent(message.findcontent)
|
||||
of MessageKind.advertise:
|
||||
p.handleAdvertise(message.advertise)
|
||||
else:
|
||||
@[]
|
||||
else:
|
||||
@[]
|
||||
|
||||
proc new*(T: type PortalProtocol, baseProtocol: protocol.Protocol,
|
||||
dataRadius = UInt256.high()): T =
|
||||
let proto = PortalProtocol(
|
||||
protocolHandler: messageHandler,
|
||||
baseProtocol: baseProtocol,
|
||||
dataRadius: dataRadius)
|
||||
|
||||
proto.baseProtocol.registerTalkProtocol(PortalProtocolId, proto).expect(
|
||||
"Only one protocol should have this id")
|
||||
|
||||
return proto
|
||||
|
||||
proc ping*(p: PortalProtocol, dst: Node):
|
||||
Future[DiscResult[PongMessage]] {.async.} =
|
||||
let ping = PingMessage(enrSeq: p.baseProtocol.localNode.record.seqNum,
|
||||
dataRadius: p.dataRadius)
|
||||
|
||||
# TODO: This send and response handling code could be more generalized for the
|
||||
# different message types.
|
||||
trace "Send message request", dstId = dst.id, kind = MessageKind.ping
|
||||
let talkresp = await talkreq(p.baseProtocol, dst, PortalProtocolId,
|
||||
encodeMessage(ping))
|
||||
|
||||
if talkresp.isOk():
|
||||
let decoded = decodeMessage(talkresp.get().response)
|
||||
if decoded.isOk():
|
||||
let message = decoded.get()
|
||||
if message.kind == pong:
|
||||
return ok(message.pong)
|
||||
else:
|
||||
return err("Invalid message response received")
|
||||
else:
|
||||
return err(decoded.error)
|
||||
else:
|
||||
return err(talkresp.error)
|
||||
|
||||
proc findNode*(p: PortalProtocol, dst: Node, distances: List[uint16, 256]):
|
||||
Future[DiscResult[NodesMessage]] {.async.} =
|
||||
let fn = FindNodeMessage(distances: distances)
|
||||
|
||||
trace "Send message request", dstId = dst.id, kind = MessageKind.findnode
|
||||
let talkresp = await talkreq(p.baseProtocol, dst, PortalProtocolId,
|
||||
encodeMessage(fn))
|
||||
|
||||
if talkresp.isOk():
|
||||
let decoded = decodeMessage(talkresp.get().response)
|
||||
if decoded.isOk():
|
||||
let message = decoded.get()
|
||||
if message.kind == nodes:
|
||||
# TODO: Verify nodes here
|
||||
return ok(message.nodes)
|
||||
else:
|
||||
return err("Invalid message response received")
|
||||
else:
|
||||
return err(decoded.error)
|
||||
else:
|
||||
return err(talkresp.error)
|
||||
|
||||
proc findContent*(p: PortalProtocol, dst: Node, contentKey: ByteList):
|
||||
Future[DiscResult[FoundContentMessage]] {.async.} =
|
||||
let fc = FindContentMessage(contentKey: contentKey)
|
||||
|
||||
trace "Send message request", dstId = dst.id, kind = MessageKind.findcontent
|
||||
let talkresp = await talkreq(p.baseProtocol, dst, PortalProtocolId,
|
||||
encodeMessage(fc))
|
||||
|
||||
if talkresp.isOk():
|
||||
let decoded = decodeMessage(talkresp.get().response)
|
||||
if decoded.isOk():
|
||||
let message = decoded.get()
|
||||
if message.kind == foundcontent:
|
||||
return ok(message.foundcontent)
|
||||
else:
|
||||
return err("Invalid message response received")
|
||||
else:
|
||||
return err(decoded.error)
|
||||
else:
|
||||
return err(talkresp.error)
|
313
vendor/nim-eth/eth/ssz/bitseqs.nim
vendored
Normal file
313
vendor/nim-eth/eth/ssz/bitseqs.nim
vendored
Normal file
@ -0,0 +1,313 @@
|
||||
# nim-eth
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
stew/[bitops2, endians2, ptrops]
|
||||
|
||||
type
|
||||
Bytes = seq[byte]
|
||||
|
||||
BitSeq* = distinct Bytes
|
||||
## The current design of BitSeq tries to follow precisely
|
||||
## the bitwise representation of the SSZ bitlists.
|
||||
## This is a relatively compact representation, but as
|
||||
## evident from the code below, many of the operations
|
||||
## are not trivial.
|
||||
|
||||
BitArray*[bits: static int] = object
|
||||
bytes*: array[(bits + 7) div 8, byte]
|
||||
|
||||
func bitsLen*(bytes: openArray[byte]): int =
|
||||
let
|
||||
bytesCount = bytes.len
|
||||
lastByte = bytes[bytesCount - 1]
|
||||
markerPos = log2trunc(lastByte)
|
||||
|
||||
bytesCount * 8 - (8 - markerPos)
|
||||
|
||||
template len*(s: BitSeq): int =
|
||||
bitsLen(Bytes s)
|
||||
|
||||
template len*(a: BitArray): int =
|
||||
a.bits
|
||||
|
||||
func add*(s: var BitSeq, value: bool) =
|
||||
let
|
||||
lastBytePos = s.Bytes.len - 1
|
||||
lastByte = s.Bytes[lastBytePos]
|
||||
|
||||
if (lastByte and byte(128)) == 0:
|
||||
# There is at least one leading zero, so we have enough
|
||||
# room to store the new bit
|
||||
let markerPos = log2trunc(lastByte)
|
||||
s.Bytes[lastBytePos].changeBit markerPos, value
|
||||
s.Bytes[lastBytePos].setBit markerPos + 1
|
||||
else:
|
||||
s.Bytes[lastBytePos].changeBit 7, value
|
||||
s.Bytes.add byte(1)
|
||||
|
||||
func toBytesLE(x: uint): array[sizeof(x), byte] =
|
||||
# stew/endians2 supports explicitly sized uints only
|
||||
when sizeof(uint) == 4:
|
||||
static: doAssert sizeof(uint) == sizeof(uint32)
|
||||
toBytesLE(x.uint32)
|
||||
elif sizeof(uint) == 8:
|
||||
static: doAssert sizeof(uint) == sizeof(uint64)
|
||||
toBytesLE(x.uint64)
|
||||
else:
|
||||
static: doAssert false, "requires a 32-bit or 64-bit platform"
|
||||
|
||||
func loadLEBytes(WordType: type, bytes: openArray[byte]): WordType =
|
||||
# TODO: this is a temporary proc until the endians API is improved
|
||||
var shift = 0
|
||||
for b in bytes:
|
||||
result = result or (WordType(b) shl shift)
|
||||
shift += 8
|
||||
|
||||
func storeLEBytes(value: SomeUnsignedInt, dst: var openArray[byte]) =
|
||||
doAssert dst.len <= sizeof(value)
|
||||
let bytesLE = toBytesLE(value)
|
||||
copyMem(addr dst[0], unsafeAddr bytesLE[0], dst.len)
|
||||
|
||||
template loopOverWords(lhs, rhs: BitSeq,
|
||||
lhsIsVar, rhsIsVar: static bool,
|
||||
WordType: type,
|
||||
lhsBits, rhsBits, body: untyped) =
|
||||
const hasRhs = astToStr(lhs) != astToStr(rhs)
|
||||
|
||||
let bytesCount = len Bytes(lhs)
|
||||
when hasRhs: doAssert len(Bytes(rhs)) == bytesCount
|
||||
|
||||
var fullWordsCount = bytesCount div sizeof(WordType)
|
||||
let lastWordSize = bytesCount mod sizeof(WordType)
|
||||
|
||||
block:
|
||||
var lhsWord: WordType
|
||||
when hasRhs:
|
||||
var rhsWord: WordType
|
||||
var firstByteOfLastWord, lastByteOfLastWord: int
|
||||
|
||||
# TODO: Returning a `var` value from an iterator is always safe due to
|
||||
# the way inlining works, but currently the compiler reports an error
|
||||
# when a local variable escapes. We have to cheat it with this location
|
||||
# obfuscation through pointers:
|
||||
template lhsBits: auto = (addr(lhsWord))[]
|
||||
|
||||
when hasRhs:
|
||||
template rhsBits: auto = (addr(rhsWord))[]
|
||||
|
||||
template lastWordBytes(bitseq): auto =
|
||||
Bytes(bitseq).toOpenArray(firstByteOfLastWord, lastByteOfLastWord)
|
||||
|
||||
template initLastWords =
|
||||
lhsWord = loadLEBytes(WordType, lastWordBytes(lhs))
|
||||
when hasRhs: rhsWord = loadLEBytes(WordType, lastWordBytes(rhs))
|
||||
|
||||
if lastWordSize == 0:
|
||||
firstByteOfLastWord = bytesCount - sizeof(WordType)
|
||||
lastByteOfLastWord = bytesCount - 1
|
||||
dec fullWordsCount
|
||||
else:
|
||||
firstByteOfLastWord = bytesCount - lastWordSize
|
||||
lastByteOfLastWord = bytesCount - 1
|
||||
|
||||
initLastWords()
|
||||
let markerPos = log2trunc(lhsWord)
|
||||
when hasRhs: doAssert log2trunc(rhsWord) == markerPos
|
||||
|
||||
lhsWord.clearBit markerPos
|
||||
when hasRhs: rhsWord.clearBit markerPos
|
||||
|
||||
body
|
||||
|
||||
when lhsIsVar or rhsIsVar:
|
||||
let
|
||||
markerBit = uint(1 shl markerPos)
|
||||
mask = markerBit - 1'u
|
||||
|
||||
when lhsIsVar:
|
||||
let lhsEndResult = (lhsWord and mask) or markerBit
|
||||
storeLEBytes(lhsEndResult, lastWordBytes(lhs))
|
||||
|
||||
when rhsIsVar:
|
||||
let rhsEndResult = (rhsWord and mask) or markerBit
|
||||
storeLEBytes(rhsEndResult, lastWordBytes(rhs))
|
||||
|
||||
var lhsCurrAddr = cast[ptr WordType](unsafeAddr Bytes(lhs)[0])
|
||||
let lhsEndAddr = offset(lhsCurrAddr, fullWordsCount)
|
||||
when hasRhs:
|
||||
var rhsCurrAddr = cast[ptr WordType](unsafeAddr Bytes(rhs)[0])
|
||||
|
||||
while lhsCurrAddr < lhsEndAddr:
|
||||
template lhsBits: auto = lhsCurrAddr[]
|
||||
when hasRhs:
|
||||
template rhsBits: auto = rhsCurrAddr[]
|
||||
|
||||
body
|
||||
|
||||
lhsCurrAddr = offset(lhsCurrAddr, 1)
|
||||
when hasRhs: rhsCurrAddr = offset(rhsCurrAddr, 1)
|
||||
|
||||
iterator words*(x: var BitSeq): var uint =
|
||||
loopOverWords(x, x, true, false, uint, word, wordB):
|
||||
yield word
|
||||
|
||||
iterator words*(x: BitSeq): uint =
|
||||
loopOverWords(x, x, false, false, uint, word, word):
|
||||
yield word
|
||||
|
||||
iterator words*(a, b: BitSeq): (uint, uint) =
|
||||
loopOverWords(a, b, false, false, uint, wordA, wordB):
|
||||
yield (wordA, wordB)
|
||||
|
||||
iterator words*(a: var BitSeq, b: BitSeq): (var uint, uint) =
|
||||
loopOverWords(a, b, true, false, uint, wordA, wordB):
|
||||
yield (wordA, wordB)
|
||||
|
||||
iterator words*(a, b: var BitSeq): (var uint, var uint) =
|
||||
loopOverWords(a, b, true, true, uint, wordA, wordB):
|
||||
yield (wordA, wordB)
|
||||
|
||||
func `[]`*(s: BitSeq, pos: Natural): bool {.inline.} =
|
||||
doAssert pos < s.len
|
||||
s.Bytes.getBit pos
|
||||
|
||||
func `[]=`*(s: var BitSeq, pos: Natural, value: bool) {.inline.} =
|
||||
doAssert pos < s.len
|
||||
s.Bytes.changeBit pos, value
|
||||
|
||||
func setBit*(s: var BitSeq, pos: Natural) {.inline.} =
|
||||
doAssert pos < s.len
|
||||
setBit s.Bytes, pos
|
||||
|
||||
func clearBit*(s: var BitSeq, pos: Natural) {.inline.} =
|
||||
doAssert pos < s.len
|
||||
clearBit s.Bytes, pos
|
||||
|
||||
func init*(T: type BitSeq, len: int): T =
|
||||
result = BitSeq newSeq[byte](1 + len div 8)
|
||||
Bytes(result).setBit len
|
||||
|
||||
func init*(T: type BitArray): T =
|
||||
# The default zero-initializatio is fine
|
||||
discard
|
||||
|
||||
template `[]`*(a: BitArray, pos: Natural): bool =
|
||||
getBit a.bytes, pos
|
||||
|
||||
template `[]=`*(a: var BitArray, pos: Natural, value: bool) =
|
||||
changeBit a.bytes, pos, value
|
||||
|
||||
template setBit*(a: var BitArray, pos: Natural) =
|
||||
setBit a.bytes, pos
|
||||
|
||||
template clearBit*(a: var BitArray, pos: Natural) =
|
||||
clearBit a.bytes, pos
|
||||
|
||||
# TODO: Submit this to the standard library as `cmp`
|
||||
# At the moment, it doesn't work quite well because Nim selects
|
||||
# the generic cmp[T] from the system module instead of choosing
|
||||
# the openArray overload
|
||||
func compareArrays[T](a, b: openArray[T]): int =
|
||||
result = cmp(a.len, b.len)
|
||||
if result != 0: return
|
||||
|
||||
for i in 0 ..< a.len:
|
||||
result = cmp(a[i], b[i])
|
||||
if result != 0: return
|
||||
|
||||
template cmp*(a, b: BitSeq): int =
|
||||
compareArrays(Bytes a, Bytes b)
|
||||
|
||||
template `==`*(a, b: BitSeq): bool =
|
||||
cmp(a, b) == 0
|
||||
|
||||
func `$`*(a: BitSeq | BitArray): string =
|
||||
let length = a.len
|
||||
result = newStringOfCap(2 + length)
|
||||
result.add "0b"
|
||||
for i in countdown(length - 1, 0):
|
||||
result.add if a[i]: '1' else: '0'
|
||||
|
||||
func incl*(tgt: var BitSeq, src: BitSeq) =
|
||||
# Update `tgt` to include the bits of `src`, as if applying `or` to each bit
|
||||
doAssert tgt.len == src.len
|
||||
for tgtWord, srcWord in words(tgt, src):
|
||||
tgtWord = tgtWord or srcWord
|
||||
|
||||
func overlaps*(a, b: BitSeq): bool =
|
||||
for wa, wb in words(a, b):
|
||||
if (wa and wb) != 0:
|
||||
return true
|
||||
|
||||
func countOverlap*(a, b: BitSeq): int =
|
||||
var res = 0
|
||||
for wa, wb in words(a, b):
|
||||
res += countOnes(wa and wb)
|
||||
res
|
||||
|
||||
func isSubsetOf*(a, b: BitSeq): bool =
|
||||
let alen = a.len
|
||||
doAssert b.len == alen
|
||||
for i in 0 ..< alen:
|
||||
if a[i] and not b[i]:
|
||||
return false
|
||||
true
|
||||
|
||||
func isZeros*(x: BitSeq): bool =
|
||||
for w in words(x):
|
||||
if w != 0: return false
|
||||
return true
|
||||
|
||||
func countOnes*(x: BitSeq): int =
|
||||
# Count the number of set bits
|
||||
var res = 0
|
||||
for w in words(x):
|
||||
res += w.countOnes()
|
||||
res
|
||||
|
||||
func clear*(x: var BitSeq) =
|
||||
for w in words(x):
|
||||
w = 0
|
||||
|
||||
func countZeros*(x: BitSeq): int =
|
||||
x.len() - x.countOnes()
|
||||
|
||||
template bytes*(x: BitSeq): untyped =
|
||||
seq[byte](x)
|
||||
|
||||
iterator items*(x: BitArray): bool =
|
||||
for i in 0..<x.bits:
|
||||
yield x[i]
|
||||
|
||||
iterator pairs*(x: BitArray): (int, bool) =
|
||||
for i in 0..<x.bits:
|
||||
yield (i, x[i])
|
||||
|
||||
func incl*(a: var BitArray, b: BitArray) =
|
||||
# Update `a` to include the bits of `b`, as if applying `or` to each bit
|
||||
for i in 0..<a.bytes.len:
|
||||
a[i] = a[i] or b[i]
|
||||
|
||||
func clear*(a: var BitArray) =
|
||||
for b in a.bytes.mitems(): b = 0
|
||||
|
||||
# Set operations
|
||||
func `+`*(a, b: BitArray): BitArray =
|
||||
for i in 0..<a.bytes.len:
|
||||
result.bytes[i] = a.bytes[i] or b.bytes[i]
|
||||
|
||||
func `-`*(a, b: BitArray): BitArray =
|
||||
for i in 0..<a.bytes.len:
|
||||
result.bytes[i] = a.bytes[i] and (not b.bytes[i])
|
||||
|
||||
iterator oneIndices*(a: BitArray): int =
|
||||
for i in 0..<a.len:
|
||||
if a[i]: yield i
|
||||
|
218
vendor/nim-eth/eth/ssz/bytes_reader.nim
vendored
Normal file
218
vendor/nim-eth/eth/ssz/bytes_reader.nim
vendored
Normal file
@ -0,0 +1,218 @@
|
||||
# nim-eth - Limited SSZ implementation
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/[typetraits, options],
|
||||
stew/[endians2, objects],
|
||||
./types
|
||||
|
||||
template raiseIncorrectSize*(T: type) =
|
||||
const typeName = name(T)
|
||||
raise newException(MalformedSszError,
|
||||
"SSZ " & typeName & " input of incorrect size")
|
||||
|
||||
template setOutputSize[R, T](a: var array[R, T], length: int) =
|
||||
if length != a.len:
|
||||
raiseIncorrectSize a.type
|
||||
|
||||
proc setOutputSize(list: var List, length: int) {.raises: [SszError, Defect].} =
|
||||
if not list.setLen length:
|
||||
raise newException(MalformedSszError, "SSZ list maximum size exceeded")
|
||||
|
||||
# fromSszBytes copies the wire representation to a Nim variable,
|
||||
# assuming there's enough data in the buffer
|
||||
func fromSszBytes*(T: type UintN, data: openArray[byte]):
|
||||
T {.raises: [MalformedSszError, Defect].} =
|
||||
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
||||
## All integers are serialized as **little endian**.
|
||||
if data.len != sizeof(result):
|
||||
raiseIncorrectSize T
|
||||
|
||||
T.fromBytesLE(data)
|
||||
|
||||
func fromSszBytes*(T: type bool, data: openArray[byte]):
|
||||
T {.raises: [MalformedSszError, Defect].} =
|
||||
# Strict: only allow 0 or 1
|
||||
if data.len != 1 or byte(data[0]) > byte(1):
|
||||
raise newException(MalformedSszError, "invalid boolean value")
|
||||
data[0] == 1
|
||||
|
||||
template fromSszBytes*(T: type BitSeq, bytes: openArray[byte]): auto =
|
||||
BitSeq @bytes
|
||||
|
||||
proc `[]`[T, U, V](s: openArray[T], x: HSlice[U, V]) {.error:
|
||||
"Please don't use openArray's [] as it allocates a result sequence".}
|
||||
|
||||
template checkForForbiddenBits(ResulType: type,
|
||||
input: openArray[byte],
|
||||
expectedBits: static int64) =
|
||||
## This checks if the input contains any bits set above the maximum
|
||||
## sized allowed. We only need to check the last byte to verify this:
|
||||
const bitsInLastByte = (expectedBits mod 8)
|
||||
when bitsInLastByte != 0:
|
||||
# As an example, if there are 3 bits expected in the last byte,
|
||||
# we calculate a bitmask equal to 11111000. If the input has any
|
||||
# raised bits in range of the bitmask, this would be a violation
|
||||
# of the size of the BitArray:
|
||||
const forbiddenBitsMask = byte(byte(0xff) shl bitsInLastByte)
|
||||
|
||||
if (input[^1] and forbiddenBitsMask) != 0:
|
||||
raiseIncorrectSize ResulType
|
||||
|
||||
func readSszValue*[T](input: openArray[byte], val: var T)
|
||||
{.raises: [SszError, Defect].} =
|
||||
mixin fromSszBytes, toSszType
|
||||
|
||||
template readOffsetUnchecked(n: int): uint32 {.used.}=
|
||||
fromSszBytes(uint32, input.toOpenArray(n, n + offsetSize - 1))
|
||||
|
||||
template readOffset(n: int): int {.used.} =
|
||||
let offset = readOffsetUnchecked(n)
|
||||
if offset > input.len.uint32:
|
||||
raise newException(MalformedSszError, "SSZ list element offset points past the end of the input")
|
||||
int(offset)
|
||||
|
||||
when val is BitList:
|
||||
if input.len == 0:
|
||||
raise newException(MalformedSszError, "Invalid empty SSZ BitList value")
|
||||
|
||||
# Since our BitLists have an in-memory representation that precisely
|
||||
# matches their SSZ encoding, we can deserialize them as regular Lists:
|
||||
const maxExpectedSize = (val.maxLen div 8) + 1
|
||||
type MatchingListType = List[byte, maxExpectedSize]
|
||||
|
||||
when false:
|
||||
# TODO: Nim doesn't like this simple type coercion,
|
||||
# we'll rely on `cast` for now (see below)
|
||||
readSszValue(input, MatchingListType val)
|
||||
else:
|
||||
static:
|
||||
# As a sanity check, we verify that the coercion is accepted by the compiler:
|
||||
doAssert MatchingListType(val) is MatchingListType
|
||||
readSszValue(input, cast[ptr MatchingListType](addr val)[])
|
||||
|
||||
let resultBytesCount = len bytes(val)
|
||||
|
||||
if bytes(val)[resultBytesCount - 1] == 0:
|
||||
raise newException(MalformedSszError, "SSZ BitList is not properly terminated")
|
||||
|
||||
if resultBytesCount == maxExpectedSize:
|
||||
checkForForbiddenBits(T, input, val.maxLen + 1)
|
||||
|
||||
elif val is List|array:
|
||||
type E = type val[0]
|
||||
|
||||
when E is byte:
|
||||
val.setOutputSize input.len
|
||||
if input.len > 0:
|
||||
copyMem(addr val[0], unsafeAddr input[0], input.len)
|
||||
|
||||
elif isFixedSize(E):
|
||||
const elemSize = fixedPortionSize(E)
|
||||
if input.len mod elemSize != 0:
|
||||
var ex = new SszSizeMismatchError
|
||||
ex.deserializedType = cstring typetraits.name(T)
|
||||
ex.actualSszSize = input.len
|
||||
ex.elementSize = elemSize
|
||||
raise ex
|
||||
val.setOutputSize input.len div elemSize
|
||||
for i in 0 ..< val.len:
|
||||
let offset = i * elemSize
|
||||
readSszValue(input.toOpenArray(offset, offset + elemSize - 1), val[i])
|
||||
|
||||
else:
|
||||
if input.len == 0:
|
||||
# This is an empty list.
|
||||
# The default initialization of the return value is fine.
|
||||
val.setOutputSize 0
|
||||
return
|
||||
elif input.len < offsetSize:
|
||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||
|
||||
var offset = readOffset 0
|
||||
let resultLen = offset div offsetSize
|
||||
|
||||
if resultLen == 0:
|
||||
# If there are too many elements, other constraints detect problems
|
||||
# (not monotonically increasing, past end of input, or last element
|
||||
# not matching up with its nextOffset properly)
|
||||
raise newException(MalformedSszError, "SSZ list incorrectly encoded of zero length")
|
||||
|
||||
val.setOutputSize resultLen
|
||||
for i in 1 ..< resultLen:
|
||||
let nextOffset = readOffset(i * offsetSize)
|
||||
if nextOffset <= offset:
|
||||
raise newException(MalformedSszError, "SSZ list element offsets are not monotonically increasing")
|
||||
else:
|
||||
readSszValue(input.toOpenArray(offset, nextOffset - 1), val[i - 1])
|
||||
offset = nextOffset
|
||||
|
||||
readSszValue(input.toOpenArray(offset, input.len - 1), val[resultLen - 1])
|
||||
|
||||
elif val is UintN|bool:
|
||||
val = fromSszBytes(T, input)
|
||||
|
||||
elif val is BitArray:
|
||||
if sizeof(val) != input.len:
|
||||
raiseIncorrectSize(T)
|
||||
checkForForbiddenBits(T, input, val.bits)
|
||||
copyMem(addr val.bytes[0], unsafeAddr input[0], input.len)
|
||||
|
||||
elif val is object|tuple:
|
||||
let inputLen = uint32 input.len
|
||||
const minimallyExpectedSize = uint32 fixedPortionSize(T)
|
||||
|
||||
if inputLen < minimallyExpectedSize:
|
||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||
|
||||
enumInstanceSerializedFields(val, fieldName, field):
|
||||
const boundingOffsets = getFieldBoundingOffsets(T, fieldName)
|
||||
|
||||
# type FieldType = type field # buggy
|
||||
# For some reason, Nim gets confused about the alias here. This could be a
|
||||
# generics caching issue caused by the use of distinct types. Such an
|
||||
# issue is very scary in general.
|
||||
# The bug can be seen with the two List[uint64, N] types that exist in
|
||||
# the spec, with different N.
|
||||
|
||||
type SszType = type toSszType(declval type(field))
|
||||
|
||||
when isFixedSize(SszType):
|
||||
const
|
||||
startOffset = boundingOffsets[0]
|
||||
endOffset = boundingOffsets[1]
|
||||
else:
|
||||
let
|
||||
startOffset = readOffsetUnchecked(boundingOffsets[0])
|
||||
endOffset = if boundingOffsets[1] == -1: inputLen
|
||||
else: readOffsetUnchecked(boundingOffsets[1])
|
||||
|
||||
when boundingOffsets.isFirstOffset:
|
||||
if startOffset != minimallyExpectedSize:
|
||||
raise newException(MalformedSszError, "SSZ object dynamic portion starts at invalid offset")
|
||||
|
||||
if startOffset > endOffset:
|
||||
raise newException(MalformedSszError, "SSZ field offsets are not monotonically increasing")
|
||||
elif endOffset > inputLen:
|
||||
raise newException(MalformedSszError, "SSZ field offset points past the end of the input")
|
||||
elif startOffset < minimallyExpectedSize:
|
||||
raise newException(MalformedSszError, "SSZ field offset points outside bounding offsets")
|
||||
|
||||
# TODO The extra type escaping here is a work-around for a Nim issue:
|
||||
when type(field) is type(SszType):
|
||||
readSszValue(
|
||||
input.toOpenArray(int(startOffset), int(endOffset - 1)),
|
||||
field)
|
||||
else:
|
||||
field = fromSszBytes(
|
||||
type(field),
|
||||
input.toOpenArray(int(startOffset), int(endOffset - 1)))
|
||||
|
||||
else:
|
||||
unsupported T
|
247
vendor/nim-eth/eth/ssz/ssz_serialization.nim
vendored
Normal file
247
vendor/nim-eth/eth/ssz/ssz_serialization.nim
vendored
Normal file
@ -0,0 +1,247 @@
|
||||
# nim-eth - Limited SSZ implementation
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
## SSZ serialization for core SSZ types, as specified in:
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/ssz/simple-serialize.md#serialization
|
||||
|
||||
import
|
||||
std/[typetraits, options],
|
||||
stew/[endians2, leb128, objects],
|
||||
serialization, serialization/testing/tracing,
|
||||
./bytes_reader, ./types
|
||||
|
||||
export
|
||||
serialization, types, bytes_reader
|
||||
|
||||
type
|
||||
SszReader* = object
|
||||
stream: InputStream
|
||||
|
||||
SszWriter* = object
|
||||
stream: OutputStream
|
||||
|
||||
SizePrefixed*[T] = distinct T
|
||||
SszMaxSizeExceeded* = object of SerializationError
|
||||
|
||||
VarSizedWriterCtx = object
|
||||
fixedParts: WriteCursor
|
||||
offset: int
|
||||
|
||||
FixedSizedWriterCtx = object
|
||||
|
||||
serializationFormat SSZ
|
||||
|
||||
SSZ.setReader SszReader
|
||||
SSZ.setWriter SszWriter, PreferredOutput = seq[byte]
|
||||
|
||||
template sizePrefixed*[TT](x: TT): untyped =
|
||||
type T = TT
|
||||
SizePrefixed[T](x)
|
||||
|
||||
proc init*(T: type SszReader, stream: InputStream): T {.raises: [Defect].} =
|
||||
T(stream: stream)
|
||||
|
||||
proc writeFixedSized(s: var (OutputStream|WriteCursor), x: auto)
|
||||
{.raises: [Defect, IOError].} =
|
||||
mixin toSszType
|
||||
|
||||
when x is byte:
|
||||
s.write x
|
||||
elif x is bool:
|
||||
s.write byte(ord(x))
|
||||
elif x is UintN:
|
||||
when cpuEndian == bigEndian:
|
||||
s.write toBytesLE(x)
|
||||
else:
|
||||
s.writeMemCopy x
|
||||
elif x is array:
|
||||
when x[0] is byte:
|
||||
trs "APPENDING FIXED SIZE BYTES", x
|
||||
s.write x
|
||||
else:
|
||||
for elem in x:
|
||||
trs "WRITING FIXED SIZE ARRAY ELEMENT"
|
||||
s.writeFixedSized toSszType(elem)
|
||||
elif x is tuple|object:
|
||||
enumInstanceSerializedFields(x, fieldName, field):
|
||||
trs "WRITING FIXED SIZE FIELD", fieldName
|
||||
s.writeFixedSized toSszType(field)
|
||||
else:
|
||||
unsupported x.type
|
||||
|
||||
template writeOffset(cursor: var WriteCursor, offset: int) =
|
||||
write cursor, toBytesLE(uint32 offset)
|
||||
|
||||
template supports*(_: type SSZ, T: type): bool =
|
||||
mixin toSszType
|
||||
anonConst compiles(fixedPortionSize toSszType(declval T))
|
||||
|
||||
func init*(T: type SszWriter, stream: OutputStream): T {.raises: [Defect].} =
|
||||
result.stream = stream
|
||||
|
||||
proc writeVarSizeType(w: var SszWriter, value: auto)
|
||||
{.gcsafe, raises: [Defect, IOError].}
|
||||
|
||||
proc beginRecord*(w: var SszWriter, TT: type): auto {.raises: [Defect].} =
|
||||
type T = TT
|
||||
when isFixedSize(T):
|
||||
FixedSizedWriterCtx()
|
||||
else:
|
||||
const offset = when T is array: len(T) * offsetSize
|
||||
else: fixedPortionSize(T)
|
||||
VarSizedWriterCtx(offset: offset,
|
||||
fixedParts: w.stream.delayFixedSizeWrite(offset))
|
||||
|
||||
template writeField*(w: var SszWriter,
|
||||
ctx: var auto,
|
||||
fieldName: string,
|
||||
field: auto) =
|
||||
mixin toSszType
|
||||
when ctx is FixedSizedWriterCtx:
|
||||
writeFixedSized(w.stream, toSszType(field))
|
||||
else:
|
||||
type FieldType = type toSszType(field)
|
||||
|
||||
when isFixedSize(FieldType):
|
||||
writeFixedSized(ctx.fixedParts, toSszType(field))
|
||||
else:
|
||||
trs "WRITING OFFSET ", ctx.offset, " FOR ", fieldName
|
||||
writeOffset(ctx.fixedParts, ctx.offset)
|
||||
let initPos = w.stream.pos
|
||||
trs "WRITING VAR SIZE VALUE OF TYPE ", name(FieldType)
|
||||
when FieldType is BitList:
|
||||
trs "BIT SEQ ", bytes(field)
|
||||
writeVarSizeType(w, toSszType(field))
|
||||
ctx.offset += w.stream.pos - initPos
|
||||
|
||||
template endRecord*(w: var SszWriter, ctx: var auto) =
|
||||
when ctx is VarSizedWriterCtx:
|
||||
finalize ctx.fixedParts
|
||||
|
||||
proc writeSeq[T](w: var SszWriter, value: seq[T])
|
||||
{.raises: [Defect, IOError].} =
|
||||
# Please note that `writeSeq` exists in order to reduce the code bloat
|
||||
# produced from generic instantiations of the unique `List[N, T]` types.
|
||||
when isFixedSize(T):
|
||||
trs "WRITING LIST WITH FIXED SIZE ELEMENTS"
|
||||
for elem in value:
|
||||
w.stream.writeFixedSized toSszType(elem)
|
||||
trs "DONE"
|
||||
else:
|
||||
trs "WRITING LIST WITH VAR SIZE ELEMENTS"
|
||||
var offset = value.len * offsetSize
|
||||
var cursor = w.stream.delayFixedSizeWrite offset
|
||||
for elem in value:
|
||||
cursor.writeFixedSized uint32(offset)
|
||||
let initPos = w.stream.pos
|
||||
w.writeVarSizeType toSszType(elem)
|
||||
offset += w.stream.pos - initPos
|
||||
finalize cursor
|
||||
trs "DONE"
|
||||
|
||||
proc writeVarSizeType(w: var SszWriter, value: auto)
|
||||
{.raises: [Defect, IOError].} =
|
||||
trs "STARTING VAR SIZE TYPE"
|
||||
|
||||
when value is List:
|
||||
# We reduce code bloat by forwarding all `List` types to a general `seq[T]`
|
||||
# proc.
|
||||
writeSeq(w, asSeq value)
|
||||
elif value is BitList:
|
||||
# ATTENTION! We can reuse `writeSeq` only as long as our BitList type is
|
||||
# implemented to internally match the binary representation of SSZ BitLists
|
||||
# in memory.
|
||||
writeSeq(w, bytes value)
|
||||
elif value is object|tuple|array:
|
||||
trs "WRITING OBJECT OR ARRAY"
|
||||
var ctx = beginRecord(w, type value)
|
||||
enumerateSubFields(value, field):
|
||||
writeField w, ctx, astToStr(field), field
|
||||
endRecord w, ctx
|
||||
else:
|
||||
unsupported type(value)
|
||||
|
||||
proc writeValue*(w: var SszWriter, x: auto)
|
||||
{.gcsafe, raises: [Defect, IOError].} =
|
||||
mixin toSszType
|
||||
type T = type toSszType(x)
|
||||
|
||||
when isFixedSize(T):
|
||||
w.stream.writeFixedSized toSszType(x)
|
||||
else:
|
||||
w.writeVarSizeType toSszType(x)
|
||||
|
||||
func sszSize*(value: auto): int {.gcsafe, raises: [Defect].}
|
||||
|
||||
func sszSizeForVarSizeList[T](value: openArray[T]): int =
|
||||
mixin toSszType
|
||||
result = len(value) * offsetSize
|
||||
for elem in value:
|
||||
result += sszSize(toSszType elem)
|
||||
|
||||
func sszSize*(value: auto): int {.gcsafe, raises: [Defect].} =
|
||||
mixin toSszType
|
||||
type T = type toSszType(value)
|
||||
|
||||
when isFixedSize(T):
|
||||
anonConst fixedPortionSize(T)
|
||||
|
||||
elif T is array|List:
|
||||
type E = ElemType(T)
|
||||
when isFixedSize(E):
|
||||
len(value) * anonConst(fixedPortionSize(E))
|
||||
elif T is HashArray:
|
||||
sszSizeForVarSizeList(value.data)
|
||||
elif T is array:
|
||||
sszSizeForVarSizeList(value)
|
||||
else:
|
||||
sszSizeForVarSizeList(asSeq value)
|
||||
|
||||
elif T is BitList:
|
||||
return len(bytes(value))
|
||||
|
||||
elif T is object|tuple:
|
||||
result = anonConst fixedPortionSize(T)
|
||||
enumInstanceSerializedFields(value, _{.used.}, field):
|
||||
type FieldType = type toSszType(field)
|
||||
when not isFixedSize(FieldType):
|
||||
result += sszSize(toSszType field)
|
||||
|
||||
else:
|
||||
unsupported T
|
||||
|
||||
proc writeValue*[T](w: var SszWriter, x: SizePrefixed[T])
|
||||
{.raises: [Defect, IOError].} =
|
||||
var cursor = w.stream.delayVarSizeWrite(Leb128.maxLen(uint64))
|
||||
let initPos = w.stream.pos
|
||||
w.writeValue T(x)
|
||||
let length = toBytes(uint64(w.stream.pos - initPos), Leb128)
|
||||
cursor.finalWrite length.toOpenArray()
|
||||
|
||||
proc readValue*[T](r: var SszReader, val: var T)
|
||||
{.raises: [Defect, SszError, IOError].} =
|
||||
when isFixedSize(T):
|
||||
const minimalSize = fixedPortionSize(T)
|
||||
if r.stream.readable(minimalSize):
|
||||
readSszValue(r.stream.read(minimalSize), val)
|
||||
else:
|
||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||
else:
|
||||
# TODO(zah) Read the fixed portion first and precisely measure the
|
||||
# size of the dynamic portion to consume the right number of bytes.
|
||||
readSszValue(r.stream.read(r.stream.len.get), val)
|
||||
|
||||
proc readSszBytes*[T](data: openArray[byte], val: var T) {.
|
||||
raises: [Defect, MalformedSszError, SszSizeMismatchError].} =
|
||||
when isFixedSize(T):
|
||||
const minimalSize = fixedPortionSize(T)
|
||||
if data.len < minimalSize:
|
||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||
|
||||
readSszValue(data, val)
|
258
vendor/nim-eth/eth/ssz/types.nim
vendored
Normal file
258
vendor/nim-eth/eth/ssz/types.nim
vendored
Normal file
@ -0,0 +1,258 @@
|
||||
# nim-eth - Limited SSZ implementation
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/[tables, options, typetraits, strformat],
|
||||
stew/shims/macros, stew/[byteutils, bitops2, objects],
|
||||
serialization/[object_serialization, errors],
|
||||
./bitseqs
|
||||
|
||||
export bitseqs
|
||||
|
||||
const
|
||||
offsetSize* = 4
|
||||
bytesPerChunk* = 32
|
||||
|
||||
type
|
||||
UintN* = SomeUnsignedInt
|
||||
BasicType* = bool|UintN
|
||||
|
||||
Limit* = int64
|
||||
|
||||
List*[T; maxLen: static Limit] = distinct seq[T]
|
||||
BitList*[maxLen: static Limit] = distinct BitSeq
|
||||
|
||||
# Note for readers:
|
||||
# We use `array` for `Vector` and
|
||||
# `BitArray` for `BitVector`
|
||||
|
||||
SszError* = object of SerializationError
|
||||
|
||||
MalformedSszError* = object of SszError
|
||||
|
||||
SszSizeMismatchError* = object of SszError
|
||||
deserializedType*: cstring
|
||||
actualSszSize*: int
|
||||
elementSize*: int
|
||||
|
||||
template asSeq*(x: List): auto = distinctBase(x)
|
||||
|
||||
template init*[T](L: type List, x: seq[T], N: static Limit): auto =
|
||||
List[T, N](x)
|
||||
|
||||
template init*[T, N](L: type List[T, N], x: seq[T]): auto =
|
||||
List[T, N](x)
|
||||
|
||||
template `$`*(x: List): auto = $(distinctBase x)
|
||||
template len*(x: List): auto = len(distinctBase x)
|
||||
template low*(x: List): auto = low(distinctBase x)
|
||||
template high*(x: List): auto = high(distinctBase x)
|
||||
template `[]`*(x: List, idx: auto): untyped = distinctBase(x)[idx]
|
||||
template `[]=`*(x: var List, idx: auto, val: auto) = distinctBase(x)[idx] = val
|
||||
template `==`*(a, b: List): bool = distinctBase(a) == distinctBase(b)
|
||||
|
||||
template `&`*(a, b: List): auto = (type(a)(distinctBase(a) & distinctBase(b)))
|
||||
|
||||
template items* (x: List): untyped = items(distinctBase x)
|
||||
template pairs* (x: List): untyped = pairs(distinctBase x)
|
||||
template mitems*(x: var List): untyped = mitems(distinctBase x)
|
||||
template mpairs*(x: var List): untyped = mpairs(distinctBase x)
|
||||
|
||||
template contains* (x: List, val: auto): untyped = contains(distinctBase x, val)
|
||||
|
||||
proc add*(x: var List, val: auto): bool =
|
||||
if x.len < x.maxLen:
|
||||
add(distinctBase x, val)
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
proc setLen*(x: var List, newLen: int): bool =
|
||||
if newLen <= x.maxLen:
|
||||
setLen(distinctBase x, newLen)
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
template init*(L: type BitList, x: seq[byte], N: static Limit): auto =
|
||||
BitList[N](data: x)
|
||||
|
||||
template init*[N](L: type BitList[N], x: seq[byte]): auto =
|
||||
L(data: x)
|
||||
|
||||
template init*(T: type BitList, len: int): auto = T init(BitSeq, len)
|
||||
template len*(x: BitList): auto = len(BitSeq(x))
|
||||
template bytes*(x: BitList): auto = seq[byte](x)
|
||||
template `[]`*(x: BitList, idx: auto): auto = BitSeq(x)[idx]
|
||||
template `[]=`*(x: var BitList, idx: auto, val: bool) = BitSeq(x)[idx] = val
|
||||
template `==`*(a, b: BitList): bool = BitSeq(a) == BitSeq(b)
|
||||
template setBit*(x: var BitList, idx: Natural) = setBit(BitSeq(x), idx)
|
||||
template clearBit*(x: var BitList, idx: Natural) = clearBit(BitSeq(x), idx)
|
||||
template overlaps*(a, b: BitList): bool = overlaps(BitSeq(a), BitSeq(b))
|
||||
template incl*(a: var BitList, b: BitList) = incl(BitSeq(a), BitSeq(b))
|
||||
template isSubsetOf*(a, b: BitList): bool = isSubsetOf(BitSeq(a), BitSeq(b))
|
||||
template isZeros*(x: BitList): bool = isZeros(BitSeq(x))
|
||||
template countOnes*(x: BitList): int = countOnes(BitSeq(x))
|
||||
template countZeros*(x: BitList): int = countZeros(BitSeq(x))
|
||||
template countOverlap*(x, y: BitList): int = countOverlap(BitSeq(x), BitSeq(y))
|
||||
template `$`*(a: BitList): string = $(BitSeq(a))
|
||||
|
||||
iterator items*(x: BitList): bool =
|
||||
for i in 0 ..< x.len:
|
||||
yield x[i]
|
||||
|
||||
macro unsupported*(T: typed): untyped =
|
||||
# TODO: {.fatal.} breaks compilation even in `compiles()` context,
|
||||
# so we use this macro instead. It's also much better at figuring
|
||||
# out the actual type that was used in the instantiation.
|
||||
# File both problems as issues.
|
||||
error "SSZ serialization of the type " & humaneTypeName(T) & " is not supported"
|
||||
|
||||
template ElemType*(T: type array): untyped =
|
||||
type(default(T)[low(T)])
|
||||
|
||||
template ElemType*(T: type seq): untyped =
|
||||
type(default(T)[0])
|
||||
|
||||
template ElemType*(T: type List): untyped =
|
||||
T.T
|
||||
|
||||
func isFixedSize*(T0: type): bool {.compileTime.} =
|
||||
mixin toSszType, enumAllSerializedFields
|
||||
|
||||
type T = type toSszType(declval T0)
|
||||
|
||||
when T is BasicType:
|
||||
return true
|
||||
elif T is array:
|
||||
return isFixedSize(ElemType(T))
|
||||
elif T is object|tuple:
|
||||
enumAllSerializedFields(T):
|
||||
when not isFixedSize(FieldType):
|
||||
return false
|
||||
return true
|
||||
|
||||
func fixedPortionSize*(T0: type): int {.compileTime.} =
|
||||
mixin enumAllSerializedFields, toSszType
|
||||
|
||||
type T = type toSszType(declval T0)
|
||||
|
||||
when T is BasicType: sizeof(T)
|
||||
elif T is array:
|
||||
type E = ElemType(T)
|
||||
when isFixedSize(E): int(len(T)) * fixedPortionSize(E)
|
||||
else: int(len(T)) * offsetSize
|
||||
elif T is object|tuple:
|
||||
enumAllSerializedFields(T):
|
||||
when isFixedSize(FieldType):
|
||||
result += fixedPortionSize(FieldType)
|
||||
else:
|
||||
result += offsetSize
|
||||
else:
|
||||
unsupported T0
|
||||
|
||||
# TODO This should have been an iterator, but the VM can't compile the
|
||||
# code due to "too many registers required".
|
||||
proc fieldInfos*(RecordType: type): seq[tuple[name: string,
|
||||
offset: int,
|
||||
fixedSize: int,
|
||||
branchKey: string]] =
|
||||
mixin enumAllSerializedFields
|
||||
|
||||
var
|
||||
offsetInBranch = {"": 0}.toTable
|
||||
nestedUnder = initTable[string, string]()
|
||||
|
||||
enumAllSerializedFields(RecordType):
|
||||
const
|
||||
isFixed = isFixedSize(FieldType)
|
||||
fixedSize = when isFixed: fixedPortionSize(FieldType)
|
||||
else: 0
|
||||
branchKey = when fieldCaseDiscriminator.len == 0: ""
|
||||
else: fieldCaseDiscriminator & ":" & $fieldCaseBranches
|
||||
fieldSize = when isFixed: fixedSize
|
||||
else: offsetSize
|
||||
|
||||
nestedUnder[fieldName] = branchKey
|
||||
|
||||
var fieldOffset: int
|
||||
offsetInBranch.withValue(branchKey, val):
|
||||
fieldOffset = val[]
|
||||
val[] += fieldSize
|
||||
do:
|
||||
try:
|
||||
let parentBranch = nestedUnder.getOrDefault(fieldCaseDiscriminator, "")
|
||||
fieldOffset = offsetInBranch[parentBranch]
|
||||
offsetInBranch[branchKey] = fieldOffset + fieldSize
|
||||
except KeyError as e:
|
||||
raiseAssert e.msg
|
||||
|
||||
result.add((fieldName, fieldOffset, fixedSize, branchKey))
|
||||
|
||||
func getFieldBoundingOffsetsImpl(RecordType: type, fieldName: static string):
|
||||
tuple[fieldOffset, nextFieldOffset: int, isFirstOffset: bool]
|
||||
{.compileTime.} =
|
||||
result = (-1, -1, false)
|
||||
var fieldBranchKey: string
|
||||
var isFirstOffset = true
|
||||
|
||||
for f in fieldInfos(RecordType):
|
||||
if fieldName == f.name:
|
||||
result[0] = f.offset
|
||||
if f.fixedSize > 0:
|
||||
result[1] = result[0] + f.fixedSize
|
||||
return
|
||||
else:
|
||||
fieldBranchKey = f.branchKey
|
||||
result.isFirstOffset = isFirstOffset
|
||||
|
||||
elif result[0] != -1 and
|
||||
f.fixedSize == 0 and
|
||||
f.branchKey == fieldBranchKey:
|
||||
# We have found the next variable sized field
|
||||
result[1] = f.offset
|
||||
return
|
||||
|
||||
if f.fixedSize == 0:
|
||||
isFirstOffset = false
|
||||
|
||||
func getFieldBoundingOffsets*(RecordType: type, fieldName: static string):
|
||||
tuple[fieldOffset, nextFieldOffset: int, isFirstOffset: bool]
|
||||
{.compileTime.} =
|
||||
## Returns the start and end offsets of a field.
|
||||
##
|
||||
## For fixed-size fields, the start offset points to the first
|
||||
## byte of the field and the end offset points to 1 byte past the
|
||||
## end of the field.
|
||||
##
|
||||
## For variable-size fields, the returned offsets point to the
|
||||
## statically known positions of the 32-bit offset values written
|
||||
## within the SSZ object. You must read the 32-bit values stored
|
||||
## at the these locations in order to obtain the actual offsets.
|
||||
##
|
||||
## For variable-size fields, the end offset may be -1 when the
|
||||
## designated field is the last variable sized field within the
|
||||
## object. Then the SSZ object boundary known at run-time marks
|
||||
## the end of the variable-size field.
|
||||
type T = RecordType
|
||||
anonConst getFieldBoundingOffsetsImpl(T, fieldName)
|
||||
|
||||
template enumerateSubFields*(holder, fieldVar, body: untyped) =
|
||||
when holder is array:
|
||||
for fieldVar in holder: body
|
||||
else:
|
||||
enumInstanceSerializedFields(holder, _{.used.}, fieldVar): body
|
||||
|
||||
method formatMsg*(
|
||||
err: ref SszSizeMismatchError,
|
||||
filename: string): string {.gcsafe, raises: [Defect].} =
|
||||
try:
|
||||
&"SSZ size mismatch, element {err.elementSize}, actual {err.actualSszSize}, type {err.deserializedType}, file {filename}"
|
||||
except CatchableError:
|
||||
"SSZ size mismatch"
|
5
vendor/nim-eth/tests/p2p/all_portal_tests.nim
vendored
Normal file
5
vendor/nim-eth/tests/p2p/all_portal_tests.nim
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
./test_portal_encoding,
|
||||
./test_portal
|
1
vendor/nim-eth/tests/p2p/all_tests.nim
vendored
1
vendor/nim-eth/tests/p2p/all_tests.nim
vendored
@ -1,5 +1,6 @@
|
||||
import
|
||||
./all_discv5_tests,
|
||||
./all_portal_tests,
|
||||
./test_auth,
|
||||
./test_crypt,
|
||||
./test_discovery,
|
||||
|
18
vendor/nim-eth/tests/p2p/test_discoveryv5.nim
vendored
18
vendor/nim-eth/tests/p2p/test_discoveryv5.nim
vendored
@ -645,10 +645,13 @@ procSuite "Discovery v5 Tests":
|
||||
rng, PrivateKey.random(rng[]), localAddress(20303))
|
||||
talkProtocol = "echo".toBytes()
|
||||
|
||||
proc handler(request: seq[byte]): seq[byte] {.gcsafe, raises: [Defect].} =
|
||||
proc handler(protocol: TalkProtocol, request: seq[byte]): seq[byte]
|
||||
{.gcsafe, raises: [Defect].} =
|
||||
request
|
||||
|
||||
check node2.registerTalkProtocol(talkProtocol, handler).isOk()
|
||||
let echoProtocol = TalkProtocol(protocolHandler: handler)
|
||||
|
||||
check node2.registerTalkProtocol(talkProtocol, echoProtocol).isOk()
|
||||
let talkresp = await discv5_protocol.talkreq(node1, node2.localNode,
|
||||
talkProtocol, "hello".toBytes())
|
||||
|
||||
@ -667,13 +670,16 @@ procSuite "Discovery v5 Tests":
|
||||
rng, PrivateKey.random(rng[]), localAddress(20303))
|
||||
talkProtocol = "echo".toBytes()
|
||||
|
||||
proc handler(request: seq[byte]): seq[byte] {.gcsafe, raises: [Defect].} =
|
||||
proc handler(protocol: TalkProtocol, request: seq[byte]): seq[byte]
|
||||
{.gcsafe, raises: [Defect].} =
|
||||
request
|
||||
|
||||
let echoProtocol = TalkProtocol(protocolHandler: handler)
|
||||
|
||||
check:
|
||||
node2.registerTalkProtocol(talkProtocol, handler).isOk()
|
||||
node2.registerTalkProtocol(talkProtocol, handler).isErr()
|
||||
node2.registerTalkProtocol("test".toBytes(), handler).isOk()
|
||||
node2.registerTalkProtocol(talkProtocol, echoProtocol).isOk()
|
||||
node2.registerTalkProtocol(talkProtocol, echoProtocol).isErr()
|
||||
node2.registerTalkProtocol("test".toBytes(), echoProtocol).isOk()
|
||||
|
||||
await node1.closeWait()
|
||||
await node2.closeWait()
|
||||
|
103
vendor/nim-eth/tests/p2p/test_portal.nim
vendored
Normal file
103
vendor/nim-eth/tests/p2p/test_portal.nim
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
# nim-eth - Portal Network
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
chronos, testutils/unittests,
|
||||
../../eth/keys, # for rng
|
||||
../../eth/p2p/discoveryv5/protocol as discv5_protocol,
|
||||
../../eth/p2p/portal/protocol as portal_protocol,
|
||||
./discv5_test_helper
|
||||
|
||||
proc random(T: type UInt256, rng: var BrHmacDrbgContext): T =
|
||||
var key: UInt256
|
||||
brHmacDrbgGenerate(addr rng, addr key, csize_t(sizeof(key)))
|
||||
|
||||
key
|
||||
|
||||
procSuite "Portal Tests":
|
||||
let rng = newRng()
|
||||
|
||||
asyncTest "Portal Ping/Pong":
|
||||
let
|
||||
node1 = initDiscoveryNode(
|
||||
rng, PrivateKey.random(rng[]), localAddress(20302))
|
||||
node2 = initDiscoveryNode(
|
||||
rng, PrivateKey.random(rng[]), localAddress(20303))
|
||||
|
||||
proto1 = PortalProtocol.new(node1)
|
||||
proto2 = PortalProtocol.new(node2)
|
||||
|
||||
let pong = await proto1.ping(proto2.baseProtocol.localNode)
|
||||
|
||||
check:
|
||||
pong.isOk()
|
||||
pong.get().enrSeq == 1'u64
|
||||
pong.get().dataRadius == UInt256.high()
|
||||
|
||||
await node1.closeWait()
|
||||
await node2.closeWait()
|
||||
|
||||
asyncTest "Portal FindNode/Nodes":
|
||||
let
|
||||
node1 = initDiscoveryNode(
|
||||
rng, PrivateKey.random(rng[]), localAddress(20302))
|
||||
node2 = initDiscoveryNode(
|
||||
rng, PrivateKey.random(rng[]), localAddress(20303))
|
||||
|
||||
proto1 = PortalProtocol.new(node1)
|
||||
proto2 = PortalProtocol.new(node2)
|
||||
|
||||
block: # Find itself
|
||||
let nodes = await proto1.findNode(proto2.baseProtocol.localNode,
|
||||
List[uint16, 256](@[0'u16]))
|
||||
|
||||
check:
|
||||
nodes.isOk()
|
||||
nodes.get().total == 1'u8
|
||||
nodes.get().enrs.len() == 1
|
||||
|
||||
block: # Find nothing
|
||||
let nodes = await proto1.findNode(proto2.baseProtocol.localNode,
|
||||
List[uint16, 256](@[]))
|
||||
|
||||
check:
|
||||
nodes.isOk()
|
||||
nodes.get().total == 1'u8
|
||||
nodes.get().enrs.len() == 0
|
||||
|
||||
block: # Find for distance
|
||||
# TODO: Add test when implemented
|
||||
discard
|
||||
|
||||
await node1.closeWait()
|
||||
await node2.closeWait()
|
||||
|
||||
asyncTest "Portal FindContent/FoundContent":
|
||||
let
|
||||
node1 = initDiscoveryNode(
|
||||
rng, PrivateKey.random(rng[]), localAddress(20302))
|
||||
node2 = initDiscoveryNode(
|
||||
rng, PrivateKey.random(rng[]), localAddress(20303))
|
||||
|
||||
proto1 = PortalProtocol.new(node1)
|
||||
proto2 = PortalProtocol.new(node2)
|
||||
|
||||
let contentKey = ByteList(@(UInt256.random(rng[]).toBytes()))
|
||||
|
||||
let foundContent = await proto1.findContent(proto2.baseProtocol.localNode,
|
||||
contentKey)
|
||||
|
||||
check:
|
||||
foundContent.isOk()
|
||||
# TODO: adjust when implemented
|
||||
foundContent.get().enrs.len() == 0
|
||||
foundContent.get().payload.len() == 0
|
||||
|
||||
await node1.closeWait()
|
||||
await node2.closeWait()
|
156
vendor/nim-eth/tests/p2p/test_portal_encoding.nim
vendored
Normal file
156
vendor/nim-eth/tests/p2p/test_portal_encoding.nim
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
# nim-eth - Portal Network
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/unittest,
|
||||
stint, stew/[byteutils, results],
|
||||
../../eth/p2p/portal/messages
|
||||
|
||||
suite "Portal Protocol Message Encodings":
|
||||
test "Ping Request":
|
||||
var dataRadius: UInt256
|
||||
let
|
||||
enrSeq = 1'u64
|
||||
p = PingMessage(enrSeq: enrSeq, dataRadius: dataRadius)
|
||||
|
||||
let encoded = encodeMessage(p)
|
||||
check encoded.toHex ==
|
||||
"0101000000000000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
let decoded = decodeMessage(encoded)
|
||||
check decoded.isOk()
|
||||
|
||||
let message = decoded.get()
|
||||
check:
|
||||
message.kind == ping
|
||||
message.ping.enrSeq == enrSeq
|
||||
message.ping.dataRadius == dataRadius
|
||||
|
||||
test "Pong Response":
|
||||
var dataRadius: UInt256
|
||||
let
|
||||
enrSeq = 1'u64
|
||||
p = PongMessage(enrSeq: enrSeq, dataRadius: dataRadius)
|
||||
|
||||
let encoded = encodeMessage(p)
|
||||
check encoded.toHex ==
|
||||
"0201000000000000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
let decoded = decodeMessage(encoded)
|
||||
check decoded.isOk()
|
||||
|
||||
let message = decoded.get()
|
||||
check:
|
||||
message.kind == pong
|
||||
message.pong.enrSeq == enrSeq
|
||||
message.pong.dataRadius == dataRadius
|
||||
|
||||
test "FindNode Request":
|
||||
let
|
||||
distances = List[uint16, 256](@[0x0100'u16])
|
||||
fn = FindNodeMessage(distances: distances)
|
||||
|
||||
let encoded = encodeMessage(fn)
|
||||
check encoded.toHex == "03040000000001"
|
||||
|
||||
let decoded = decodeMessage(encoded)
|
||||
check decoded.isOk()
|
||||
|
||||
let message = decoded.get()
|
||||
check:
|
||||
message.kind == findnode
|
||||
message.findnode.distances == distances
|
||||
|
||||
test "Nodes Response (empty)":
|
||||
let
|
||||
total = 0x1'u8
|
||||
n = NodesMessage(total: total)
|
||||
|
||||
let encoded = encodeMessage(n)
|
||||
check encoded.toHex == "040105000000"
|
||||
|
||||
let decoded = decodeMessage(encoded)
|
||||
check decoded.isOk()
|
||||
|
||||
let message = decoded.get()
|
||||
check:
|
||||
message.kind == nodes
|
||||
message.nodes.total == total
|
||||
message.nodes.enrs.len() == 0
|
||||
|
||||
test "FindContent Request":
|
||||
let
|
||||
contentKey = ByteList(@[byte 0x01, 0x02, 0x03])
|
||||
fn = FindContentMessage(contentKey: contentKey)
|
||||
|
||||
let encoded = encodeMessage(fn)
|
||||
check encoded.toHex == "0504000000010203"
|
||||
|
||||
let decoded = decodeMessage(encoded)
|
||||
check decoded.isOk()
|
||||
|
||||
let message = decoded.get()
|
||||
check:
|
||||
message.kind == findcontent
|
||||
message.findcontent.contentKey == contentKey
|
||||
|
||||
test "FoundContent Response (empty enrs)":
|
||||
let
|
||||
enrs = List[ByteList, 32](@[])
|
||||
payload = ByteList(@[byte 0x01, 0x02, 0x03])
|
||||
n = FoundContentMessage(enrs: enrs, payload: payload)
|
||||
|
||||
let encoded = encodeMessage(n)
|
||||
check encoded.toHex == "060800000008000000010203"
|
||||
|
||||
let decoded = decodeMessage(encoded)
|
||||
check decoded.isOk()
|
||||
|
||||
let message = decoded.get()
|
||||
check:
|
||||
message.kind == foundcontent
|
||||
message.foundcontent.enrs.len() == 0
|
||||
message.foundcontent.payload == payload
|
||||
|
||||
test "Advertise Request":
|
||||
let
|
||||
contentKeys = List[ByteList, 32](List(@[ByteList(@[byte 0x01, 0x02, 0x03])]))
|
||||
am = AdvertiseMessage(contentKeys)
|
||||
# am = AdvertiseMessage(contentKeys: contentKeys)
|
||||
|
||||
let encoded = encodeMessage(am)
|
||||
check encoded.toHex == "0704000000010203"
|
||||
# "070400000004000000010203"
|
||||
|
||||
let decoded = decodeMessage(encoded)
|
||||
check decoded.isOk()
|
||||
|
||||
let message = decoded.get()
|
||||
check:
|
||||
message.kind == advertise
|
||||
message.advertise == contentKeys
|
||||
# message.advertise.contentKeys == contentKeys
|
||||
|
||||
test "RequestProofs Response": # That sounds weird
|
||||
let
|
||||
connectionId = List[byte, 4](@[byte 0x01, 0x02, 0x03, 0x04])
|
||||
contentKeys =
|
||||
List[ByteList, 32](List(@[ByteList(@[byte 0x01, 0x02, 0x03])]))
|
||||
n = RequestProofsMessage(connectionId: connectionId,
|
||||
contentKeys: contentKeys)
|
||||
|
||||
let encoded = encodeMessage(n)
|
||||
check encoded.toHex == "08080000000c0000000102030404000000010203"
|
||||
|
||||
let decoded = decodeMessage(encoded)
|
||||
check decoded.isOk()
|
||||
|
||||
let message = decoded.get()
|
||||
check:
|
||||
message.kind == requestproofs
|
||||
message.requestproofs.connectionId == connectionId
|
||||
message.requestproofs.contentKeys == contentKeys
|
92
vendor/nim-libp2p/.github/workflows/ci.yml
vendored
92
vendor/nim-libp2p/.github/workflows/ci.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: nim-libp2p CI
|
||||
name: CI
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
@ -7,24 +7,18 @@ jobs:
|
||||
fail-fast: false
|
||||
max-parallel: 20
|
||||
matrix:
|
||||
branch: [v1.2.6]
|
||||
target:
|
||||
# Unit tests
|
||||
- os: linux
|
||||
cpu: amd64
|
||||
TEST_KIND: unit-tests
|
||||
- os: linux
|
||||
cpu: i386
|
||||
TEST_KIND: unit-tests
|
||||
- os: macos
|
||||
cpu: amd64
|
||||
TEST_KIND: unit-tests
|
||||
- os: windows
|
||||
cpu: i386
|
||||
TEST_KIND: unit-tests
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
TEST_KIND: unit-tests
|
||||
include:
|
||||
- target:
|
||||
os: linux
|
||||
@ -35,17 +29,20 @@ jobs:
|
||||
- target:
|
||||
os: windows
|
||||
builder: windows-2019
|
||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (${{ matrix.branch }})'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }}'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
steps:
|
||||
- name: Checkout nim-libp2p
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: nim-libp2p
|
||||
submodules: true
|
||||
|
||||
- name: Derive environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
|
||||
ARCH=64
|
||||
@ -59,6 +56,7 @@ jobs:
|
||||
|
||||
ncpu=
|
||||
ext=
|
||||
MAKE_CMD="make"
|
||||
case '${{ runner.os }}' in
|
||||
'Linux')
|
||||
ncpu=$(nproc)
|
||||
@ -69,11 +67,13 @@ jobs:
|
||||
'Windows')
|
||||
ncpu=$NUMBER_OF_PROCESSORS
|
||||
ext=.exe
|
||||
MAKE_CMD="mingw32-make"
|
||||
;;
|
||||
esac
|
||||
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
||||
echo "ncpu=$ncpu" >> $GITHUB_ENV
|
||||
echo "ext=$ext" >> $GITHUB_ENV
|
||||
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
|
||||
|
||||
- name: Install build dependencies (Linux i386)
|
||||
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
|
||||
@ -115,7 +115,6 @@ jobs:
|
||||
if: >
|
||||
steps.windows-mingw-cache.outputs.cache-hit != 'true' &&
|
||||
runner.os == 'Windows'
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p external
|
||||
curl -L "https://nim-lang.org/download/mingw$ARCH.7z" -o "external/mingw-${{ matrix.target.cpu }}.7z"
|
||||
@ -126,7 +125,6 @@ jobs:
|
||||
if: >
|
||||
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
|
||||
runner.os == 'Windows'
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p external
|
||||
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
|
||||
@ -135,80 +133,31 @@ jobs:
|
||||
- name: Path to cached dependencies (Windows)
|
||||
if: >
|
||||
runner.os == 'Windows'
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ github.workspace }}/external/mingw-${{ matrix.target.cpu }}/bin" >> $GITHUB_PATH
|
||||
echo "${{ github.workspace }}/external/dlls-${{ matrix.target.cpu }}" >> $GITHUB_PATH
|
||||
|
||||
- name: Setup environment
|
||||
shell: bash
|
||||
run: echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
|
||||
|
||||
- name: Get latest Nim commit hash
|
||||
id: versions
|
||||
shell: bash
|
||||
run: |
|
||||
getHash() {
|
||||
git ls-remote "https://github.com/$1" "${2:-HEAD}" | cut -f 1
|
||||
}
|
||||
nimHash=$(getHash nim-lang/Nim '${{ matrix.branch }}')
|
||||
csourcesHash=$(getHash nim-lang/csources)
|
||||
echo "::set-output name=nim::$nimHash"
|
||||
echo "::set-output name=csources::$csourcesHash"
|
||||
nbsHash=$(getHash status-im/nimbus-build-system)
|
||||
echo "::set-output name=nimbus_build_system::$nbsHash"
|
||||
|
||||
- name: Restore prebuilt Nim from cache
|
||||
id: nim-cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: nim
|
||||
key: "nim-${{ matrix.target.os }}-${{ matrix.target.cpu }}-${{ steps.versions.outputs.nim }}"
|
||||
|
||||
- name: Restore prebuilt csources from cache
|
||||
if: steps.nim-cache.outputs.cache-hit != 'true'
|
||||
id: csources-cache
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: csources/bin
|
||||
key: "csources-${{ matrix.target.os }}-${{ matrix.target.cpu }}-${{ steps.versions.outputs.csources }}"
|
||||
|
||||
- name: Checkout Nim csources
|
||||
if: >
|
||||
steps.csources-cache.outputs.cache-hit != 'true' &&
|
||||
steps.nim-cache.outputs.cache-hit != 'true'
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: nim-lang/csources
|
||||
path: csources
|
||||
ref: ${{ steps.versions.outputs.csources }}
|
||||
|
||||
- name: Checkout Nim
|
||||
if: steps.nim-cache.outputs.cache-hit != 'true'
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: nim-lang/Nim
|
||||
path: nim
|
||||
ref: ${{ steps.versions.outputs.nim }}
|
||||
path: NimBinaries
|
||||
key: 'NimBinaries-${{ matrix.target.os }}-${{ matrix.target.cpu }}-${{ steps.versions.outputs.nimbus_build_system }}'
|
||||
|
||||
- name: Build Nim and associated tools
|
||||
if: steps.nim-cache.outputs.cache-hit != 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
||||
if [[ ! -e csources/bin/nim$ext ]]; then
|
||||
make -C csources -j $ncpu CC=gcc ucpu='${{ matrix.target.cpu }}'
|
||||
else
|
||||
echo 'Using prebuilt csources'
|
||||
fi
|
||||
cp -v csources/bin/nim$ext nim/bin
|
||||
cd nim
|
||||
nim c koch
|
||||
./koch boot -d:release
|
||||
./koch tools -d:release
|
||||
# clean up to save cache space
|
||||
rm koch
|
||||
rm -rf nimcache
|
||||
rm -rf dist
|
||||
rm -rf .git
|
||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} CC=gcc bash build_nim.sh nim csources dist/nimble NimBinaries
|
||||
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
@ -216,16 +165,11 @@ jobs:
|
||||
go-version: '^1.15.5'
|
||||
|
||||
- name: Install p2pd
|
||||
shell: bash
|
||||
run: |
|
||||
cd nim-libp2p
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache v0.3.0
|
||||
|
||||
- name: Run nim-libp2p tests
|
||||
shell: bash
|
||||
run: |
|
||||
export UCPU="$cpu"
|
||||
cd nim-libp2p
|
||||
nimble install -y --depsOnly
|
||||
nimble test
|
||||
|
||||
|
2
vendor/nim-libp2p/docs/GETTING_STARTED.md
vendored
2
vendor/nim-libp2p/docs/GETTING_STARTED.md
vendored
@ -59,7 +59,7 @@ proc createSwitch(ma: MultiAddress): (Switch, PeerInfo) =
|
||||
let mplexProvider = newMuxerProvider(createMplex, MplexCodec) # create multiplexer
|
||||
let transports = @[Transport(newTransport(TcpTransport))] # add all transports (tcp only for now, but can be anything in the future)
|
||||
let muxers = {MplexCodec: mplexProvider}.toTable() # add all muxers
|
||||
let secureManagers = {SecioCodec: Secure(newSecio(seckey))}.toTable() # setup the secio and any other secure provider
|
||||
let secureManagers = {SecioCodec: Secure(Secio.new(seckey))}.toTable() # setup the secio and any other secure provider
|
||||
|
||||
# create the switch
|
||||
let switch = newSwitch(peerInfo,
|
||||
|
5
vendor/nim-libp2p/libp2p.nim
vendored
5
vendor/nim-libp2p/libp2p.nim
vendored
@ -27,7 +27,8 @@ import
|
||||
peerinfo,
|
||||
multiaddress,
|
||||
builders,
|
||||
crypto/crypto]
|
||||
crypto/crypto,
|
||||
protocols/pubsub]
|
||||
|
||||
import bearssl
|
||||
|
||||
@ -36,4 +37,4 @@ export
|
||||
connection, multiaddress, crypto, lpstream,
|
||||
bufferstream, bearssl, muxer, mplex, transport,
|
||||
tcptransport, noise, errors, cid, multihash,
|
||||
multicodec, builders
|
||||
multicodec, builders, pubsub
|
||||
|
25
vendor/nim-libp2p/libp2p/builders.nim
vendored
25
vendor/nim-libp2p/libp2p/builders.nim
vendored
@ -10,14 +10,16 @@
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
options, tables, chronos, bearssl,
|
||||
options, tables, chronos, chronicles, bearssl,
|
||||
switch, peerid, peerinfo, stream/connection, multiaddress,
|
||||
crypto/crypto, transports/[transport, tcptransport],
|
||||
muxers/[muxer, mplex/mplex],
|
||||
protocols/[identify, secure/secure, secure/noise]
|
||||
protocols/[identify, secure/secure, secure/noise],
|
||||
connmanager, upgrademngrs/muxedupgrade,
|
||||
errors
|
||||
|
||||
export
|
||||
switch, peerid, peerinfo, connection, multiaddress, crypto
|
||||
switch, peerid, peerinfo, connection, multiaddress, crypto, errors
|
||||
|
||||
type
|
||||
SecureProtocol* {.pure.} = enum
|
||||
@ -136,7 +138,7 @@ proc build*(b: SwitchBuilder): Switch
|
||||
var
|
||||
secureManagerInstances: seq[Secure]
|
||||
if SecureProtocol.Noise in b.secureManagers:
|
||||
secureManagerInstances.add(newNoise(b.rng, seckey).Secure)
|
||||
secureManagerInstances.add(Noise.new(b.rng, seckey).Secure)
|
||||
|
||||
let
|
||||
peerInfo = PeerInfo.init(
|
||||
@ -149,17 +151,20 @@ proc build*(b: SwitchBuilder): Switch
|
||||
muxers = block:
|
||||
var muxers: Table[string, MuxerProvider]
|
||||
if b.mplexOpts.enable:
|
||||
muxers.add(MplexCodec, newMuxerProvider(b.mplexOpts.newMuxer, MplexCodec))
|
||||
muxers[MplexCodec] = MuxerProvider.new(b.mplexOpts.newMuxer, MplexCodec)
|
||||
muxers
|
||||
|
||||
let
|
||||
identify = newIdentify(peerInfo)
|
||||
identify = Identify.new(peerInfo)
|
||||
connManager = ConnManager.init(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
|
||||
ms = MultistreamSelect.new()
|
||||
muxedUpgrade = MuxedUpgrade.init(identify, muxers, secureManagerInstances, connManager, ms)
|
||||
|
||||
let
|
||||
transports = block:
|
||||
var transports: seq[Transport]
|
||||
if b.tcpTransportOpts.enable:
|
||||
transports.add(Transport(TcpTransport.init(b.tcpTransportOpts.flags)))
|
||||
transports.add(Transport(TcpTransport.init(b.tcpTransportOpts.flags, muxedUpgrade)))
|
||||
transports
|
||||
|
||||
if b.secureManagers.len == 0:
|
||||
@ -174,10 +179,8 @@ proc build*(b: SwitchBuilder): Switch
|
||||
identity = identify,
|
||||
muxers = muxers,
|
||||
secureManagers = secureManagerInstances,
|
||||
maxConnections = b.maxConnections,
|
||||
maxIn = b.maxIn,
|
||||
maxOut = b.maxOut,
|
||||
maxConnsPerPeer = b.maxConnsPerPeer)
|
||||
connManager = connManager,
|
||||
ms = ms)
|
||||
|
||||
return switch
|
||||
|
||||
|
45
vendor/nim-libp2p/libp2p/connmanager.nim
vendored
45
vendor/nim-libp2p/libp2p/connmanager.nim
vendored
@ -10,7 +10,7 @@
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import std/[options, tables, sequtils, sets]
|
||||
import chronos, chronicles, metrics
|
||||
import pkg/[chronos, chronicles, metrics]
|
||||
import peerinfo,
|
||||
stream/connection,
|
||||
muxers/muxer,
|
||||
@ -48,11 +48,12 @@ type
|
||||
discard
|
||||
|
||||
ConnEventHandler* =
|
||||
proc(peerId: PeerID, event: ConnEvent): Future[void]
|
||||
proc(peerInfo: PeerInfo, event: ConnEvent): Future[void]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
|
||||
PeerEventKind* {.pure.} = enum
|
||||
Left,
|
||||
Identified,
|
||||
Joined
|
||||
|
||||
PeerEvent* = object
|
||||
@ -63,7 +64,7 @@ type
|
||||
discard
|
||||
|
||||
PeerEventHandler* =
|
||||
proc(peerId: PeerID, event: PeerEvent): Future[void] {.gcsafe.}
|
||||
proc(peerInfo: PeerInfo, event: PeerEvent): Future[void] {.gcsafe.}
|
||||
|
||||
MuxerHolder = object
|
||||
muxer: Muxer
|
||||
@ -132,22 +133,22 @@ proc removeConnEventHandler*(c: ConnManager,
|
||||
raiseAssert exc.msg
|
||||
|
||||
proc triggerConnEvent*(c: ConnManager,
|
||||
peerId: PeerID,
|
||||
peerInfo: PeerInfo,
|
||||
event: ConnEvent) {.async, gcsafe.} =
|
||||
try:
|
||||
trace "About to trigger connection events", peer = peerId
|
||||
trace "About to trigger connection events", peer = peerInfo.peerId
|
||||
if c.connEvents[event.kind].len() > 0:
|
||||
trace "triggering connection events", peer = peerId, event = $event.kind
|
||||
trace "triggering connection events", peer = peerInfo.peerId, event = $event.kind
|
||||
var connEvents: seq[Future[void]]
|
||||
for h in c.connEvents[event.kind]:
|
||||
connEvents.add(h(peerId, event))
|
||||
connEvents.add(h(peerInfo, event))
|
||||
|
||||
checkFutures(await allFinished(connEvents))
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Exception in triggerConnEvents",
|
||||
msg = exc.msg, peerId, event = $event
|
||||
msg = exc.msg, peer = peerInfo.peerId, event = $event
|
||||
|
||||
proc addPeerEventHandler*(c: ConnManager,
|
||||
handler: PeerEventHandler,
|
||||
@ -178,33 +179,33 @@ proc removePeerEventHandler*(c: ConnManager,
|
||||
raiseAssert exc.msg
|
||||
|
||||
proc triggerPeerEvents*(c: ConnManager,
|
||||
peerId: PeerID,
|
||||
peerInfo: PeerInfo,
|
||||
event: PeerEvent) {.async, gcsafe.} =
|
||||
|
||||
trace "About to trigger peer events", peer = peerId
|
||||
trace "About to trigger peer events", peer = peerInfo.peerId
|
||||
if c.peerEvents[event.kind].len == 0:
|
||||
return
|
||||
|
||||
try:
|
||||
let count = c.connCount(peerId)
|
||||
let count = c.connCount(peerInfo.peerId)
|
||||
if event.kind == PeerEventKind.Joined and count != 1:
|
||||
trace "peer already joined", peerId, event = $event
|
||||
trace "peer already joined", peer = peerInfo.peerId, event = $event
|
||||
return
|
||||
elif event.kind == PeerEventKind.Left and count != 0:
|
||||
trace "peer still connected or already left", peerId, event = $event
|
||||
trace "peer still connected or already left", peer = peerInfo.peerId, event = $event
|
||||
return
|
||||
|
||||
trace "triggering peer events", peerId, event = $event
|
||||
trace "triggering peer events", peer = peerInfo.peerId, event = $event
|
||||
|
||||
var peerEvents: seq[Future[void]]
|
||||
for h in c.peerEvents[event.kind]:
|
||||
peerEvents.add(h(peerId, event))
|
||||
peerEvents.add(h(peerInfo, event))
|
||||
|
||||
checkFutures(await allFinished(peerEvents))
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc: # handlers should not raise!
|
||||
warn "Exception in triggerPeerEvents", exc = exc.msg, peerId
|
||||
warn "Exception in triggerPeerEvents", exc = exc.msg, peer = peerInfo.peerId
|
||||
|
||||
proc contains*(c: ConnManager, conn: Connection): bool =
|
||||
## checks if a connection is being tracked by the
|
||||
@ -292,12 +293,12 @@ proc onConnUpgraded(c: ConnManager, conn: Connection) {.async.} =
|
||||
trace "Triggering connect events", conn
|
||||
conn.upgrade()
|
||||
|
||||
let peerId = conn.peerInfo.peerId
|
||||
let peerInfo = conn.peerInfo
|
||||
await c.triggerPeerEvents(
|
||||
peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: conn.dir == Direction.Out))
|
||||
peerInfo, PeerEvent(kind: PeerEventKind.Joined, initiator: conn.dir == Direction.Out))
|
||||
|
||||
await c.triggerConnEvent(
|
||||
peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: conn.dir == Direction.In))
|
||||
peerInfo, ConnEvent(kind: ConnEventKind.Connected, incoming: conn.dir == Direction.In))
|
||||
except CatchableError as exc:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError and should handle other errors
|
||||
@ -307,10 +308,10 @@ proc onConnUpgraded(c: ConnManager, conn: Connection) {.async.} =
|
||||
proc peerCleanup(c: ConnManager, conn: Connection) {.async.} =
|
||||
try:
|
||||
trace "Triggering disconnect events", conn
|
||||
let peerId = conn.peerInfo.peerId
|
||||
let peerInfo = conn.peerInfo
|
||||
await c.triggerConnEvent(
|
||||
peerId, ConnEvent(kind: ConnEventKind.Disconnected))
|
||||
await c.triggerPeerEvents(peerId, PeerEvent(kind: PeerEventKind.Left))
|
||||
peerInfo, ConnEvent(kind: ConnEventKind.Disconnected))
|
||||
await c.triggerPeerEvents(peerInfo, PeerEvent(kind: PeerEventKind.Left))
|
||||
except CatchableError as exc:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError and should handle other errors
|
||||
|
12
vendor/nim-libp2p/libp2p/daemon/daemonapi.nim
vendored
12
vendor/nim-libp2p/libp2p/daemon/daemonapi.nim
vendored
@ -13,8 +13,8 @@
|
||||
import std/[os, osproc, strutils, tables, strtabs]
|
||||
import pkg/[chronos, chronicles]
|
||||
import ../varint, ../multiaddress, ../multicodec, ../cid, ../peerid
|
||||
import ../wire, ../multihash, ../protobuf/minprotobuf
|
||||
import ../crypto/crypto, ../errors
|
||||
import ../wire, ../multihash, ../protobuf/minprotobuf, ../errors
|
||||
import ../crypto/crypto
|
||||
|
||||
export
|
||||
peerid, multiaddress, multicodec, multihash, cid, crypto, wire, errors
|
||||
@ -155,11 +155,9 @@ type
|
||||
ticket: PubsubTicket,
|
||||
message: PubSubMessage): Future[bool] {.gcsafe.}
|
||||
|
||||
# TODO: would be nice to be able to map other errors to
|
||||
# this types with `Result.toException`, but it doesn't work
|
||||
# in this module
|
||||
DaemonRemoteError* = object of CatchableError
|
||||
DaemonLocalError* = object of CatchableError
|
||||
DaemonError* = object of LPError
|
||||
DaemonRemoteError* = object of DaemonError
|
||||
DaemonLocalError* = object of DaemonError
|
||||
|
||||
var daemonsCount {.threadvar.}: int
|
||||
|
||||
|
48
vendor/nim-libp2p/libp2p/dial.nim
vendored
Normal file
48
vendor/nim-libp2p/libp2p/dial.nim
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
## Nim-LibP2P
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import chronos
|
||||
import peerid,
|
||||
stream/connection
|
||||
|
||||
type
|
||||
Dial* = ref object of RootObj
|
||||
|
||||
method connect*(
|
||||
self: Dial,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress]) {.async, base.} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
method dial*(
|
||||
self: Dial,
|
||||
peerId: PeerID,
|
||||
protos: seq[string]): Future[Connection] {.async, base.} =
|
||||
## create a protocol stream over an
|
||||
## existing connection
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
method dial*(
|
||||
self: Dial,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress],
|
||||
protos: seq[string]): Future[Connection] {.async, base.} =
|
||||
## create a protocol stream and establish
|
||||
## a connection if one doesn't exist already
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
242
vendor/nim-libp2p/libp2p/dialer.nim
vendored
Normal file
242
vendor/nim-libp2p/libp2p/dialer.nim
vendored
Normal file
@ -0,0 +1,242 @@
|
||||
## Nim-LibP2P
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/[sugar, tables]
|
||||
|
||||
import pkg/[chronos,
|
||||
chronicles,
|
||||
metrics]
|
||||
|
||||
import dial,
|
||||
peerid,
|
||||
peerinfo,
|
||||
multistream,
|
||||
connmanager,
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
errors
|
||||
|
||||
export dial, errors
|
||||
|
||||
logScope:
|
||||
topics = "libp2p dialer"
|
||||
|
||||
declareCounter(libp2p_total_dial_attempts, "total attempted dials")
|
||||
declareCounter(libp2p_successful_dials, "dialed successful peers")
|
||||
declareCounter(libp2p_failed_dials, "failed dials")
|
||||
declareCounter(libp2p_failed_upgrades_outgoing, "outgoing connections failed upgrades")
|
||||
|
||||
type
|
||||
DialFailedError* = object of LPError
|
||||
|
||||
Dialer* = ref object of Dial
|
||||
peerInfo*: PeerInfo
|
||||
ms: MultistreamSelect
|
||||
connManager: ConnManager
|
||||
dialLock: Table[PeerID, AsyncLock]
|
||||
transports: seq[Transport]
|
||||
|
||||
proc dialAndUpgrade(
|
||||
self: Dialer,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress]):
|
||||
Future[Connection] {.async.} =
|
||||
debug "Dialing peer", peerId
|
||||
|
||||
# Avoid "cannot be captured as it would violate memory safety" errors in Nim-1.4.x.
|
||||
var
|
||||
transport: Transport
|
||||
address: MultiAddress
|
||||
|
||||
for t in self.transports: # for each transport
|
||||
transport = t
|
||||
for a in addrs: # for each address
|
||||
address = a
|
||||
if t.handles(a): # check if it can dial it
|
||||
trace "Dialing address", address = $a, peerId
|
||||
let dialed = try:
|
||||
libp2p_total_dial_attempts.inc()
|
||||
# await a connection slot when the total
|
||||
# connection count is equal to `maxConns`
|
||||
await self.connManager.trackOutgoingConn(
|
||||
() => transport.dial(address)
|
||||
)
|
||||
except TooManyConnectionsError as exc:
|
||||
trace "Connection limit reached!"
|
||||
raise exc
|
||||
except CancelledError as exc:
|
||||
debug "Dialing canceled", msg = exc.msg, peerId
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Dialing failed", msg = exc.msg, peerId
|
||||
libp2p_failed_dials.inc()
|
||||
continue # Try the next address
|
||||
|
||||
# make sure to assign the peer to the connection
|
||||
dialed.peerInfo = PeerInfo.init(peerId, addrs)
|
||||
|
||||
# also keep track of the connection's bottom unsafe transport direction
|
||||
# required by gossipsub scoring
|
||||
dialed.transportDir = Direction.Out
|
||||
|
||||
libp2p_successful_dials.inc()
|
||||
|
||||
let conn = try:
|
||||
await transport.upgradeOutgoing(dialed)
|
||||
except CatchableError as exc:
|
||||
# If we failed to establish the connection through one transport,
|
||||
# we won't succeeded through another - no use in trying again
|
||||
await dialed.close()
|
||||
debug "Upgrade failed", msg = exc.msg, peerId
|
||||
if exc isnot CancelledError:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
raise exc
|
||||
|
||||
doAssert not isNil(conn), "connection died after upgradeOutgoing"
|
||||
debug "Dial successful", conn, peerInfo = conn.peerInfo
|
||||
return conn
|
||||
|
||||
proc internalConnect(
|
||||
self: Dialer,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress]):
|
||||
Future[Connection] {.async.} =
|
||||
if self.peerInfo.peerId == peerId:
|
||||
raise newException(CatchableError, "can't dial self!")
|
||||
|
||||
# Ensure there's only one in-flight attempt per peer
|
||||
let lock = self.dialLock.mgetOrPut(peerId, newAsyncLock())
|
||||
try:
|
||||
await lock.acquire()
|
||||
|
||||
# Check if we have a connection already and try to reuse it
|
||||
var conn = self.connManager.selectConn(peerId)
|
||||
if conn != nil:
|
||||
if conn.atEof or conn.closed:
|
||||
# This connection should already have been removed from the connection
|
||||
# manager - it's essentially a bug that we end up here - we'll fail
|
||||
# for now, hoping that this will clean themselves up later...
|
||||
warn "dead connection in connection manager", conn
|
||||
await conn.close()
|
||||
raise newException(DialFailedError, "Zombie connection encountered")
|
||||
|
||||
trace "Reusing existing connection", conn, direction = $conn.dir
|
||||
return conn
|
||||
|
||||
conn = await self.dialAndUpgrade(peerId, addrs)
|
||||
if isNil(conn): # None of the addresses connected
|
||||
raise newException(DialFailedError, "Unable to establish outgoing link")
|
||||
|
||||
# We already check for this in Connection manager
|
||||
# but a disconnect could have happened right after
|
||||
# we've added the connection so we check again
|
||||
# to prevent races due to that.
|
||||
if conn.closed() or conn.atEof():
|
||||
# This can happen when the other ends drops us
|
||||
# before we get a chance to return the connection
|
||||
# back to the dialer.
|
||||
trace "Connection dead on arrival", conn
|
||||
raise newLPStreamClosedError()
|
||||
|
||||
return conn
|
||||
finally:
|
||||
if lock.locked():
|
||||
lock.release()
|
||||
|
||||
method connect*(
|
||||
self: Dialer,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress]) {.async.} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
|
||||
if self.connManager.connCount(peerId) > 0:
|
||||
return
|
||||
|
||||
discard await self.internalConnect(peerId, addrs)
|
||||
|
||||
proc negotiateStream(
|
||||
self: Dialer,
|
||||
conn: Connection,
|
||||
protos: seq[string]): Future[Connection] {.async.} =
|
||||
trace "Negotiating stream", conn, protos
|
||||
let selected = await self.ms.select(conn, protos)
|
||||
if not protos.contains(selected):
|
||||
await conn.closeWithEOF()
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol " & $protos)
|
||||
|
||||
return conn
|
||||
|
||||
method dial*(
|
||||
self: Dialer,
|
||||
peerId: PeerID,
|
||||
protos: seq[string]): Future[Connection] {.async.} =
|
||||
## create a protocol stream over an
|
||||
## existing connection
|
||||
##
|
||||
|
||||
trace "Dialing (existing)", peerId, protos
|
||||
let stream = await self.connManager.getStream(peerId)
|
||||
if stream.isNil:
|
||||
raise newException(DialFailedError, "Couldn't get muxed stream")
|
||||
|
||||
return await self.negotiateStream(stream, protos)
|
||||
|
||||
method dial*(
|
||||
self: Dialer,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress],
|
||||
protos: seq[string]): Future[Connection] {.async.} =
|
||||
## create a protocol stream and establish
|
||||
## a connection if one doesn't exist already
|
||||
##
|
||||
|
||||
var
|
||||
conn: Connection
|
||||
stream: Connection
|
||||
|
||||
proc cleanup() {.async.} =
|
||||
if not(isNil(stream)):
|
||||
await stream.closeWithEOF()
|
||||
|
||||
if not(isNil(conn)):
|
||||
await conn.close()
|
||||
|
||||
try:
|
||||
trace "Dialing (new)", peerId, protos
|
||||
conn = await self.internalConnect(peerId, addrs)
|
||||
trace "Opening stream", conn
|
||||
stream = await self.connManager.getStream(conn)
|
||||
|
||||
if isNil(stream):
|
||||
raise newException(DialFailedError,
|
||||
"Couldn't get muxed stream")
|
||||
|
||||
return await self.negotiateStream(stream, protos)
|
||||
except CancelledError as exc:
|
||||
trace "Dial canceled", conn
|
||||
await cleanup()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Error dialing", conn, msg = exc.msg
|
||||
await cleanup()
|
||||
raise exc
|
||||
|
||||
proc new*(
|
||||
T: type Dialer,
|
||||
peerInfo: PeerInfo,
|
||||
connManager: ConnManager,
|
||||
transports: seq[Transport],
|
||||
ms: MultistreamSelect): Dialer =
|
||||
|
||||
T(peerInfo: peerInfo,
|
||||
connManager: connManager,
|
||||
transports: transports,
|
||||
ms: ms)
|
8
vendor/nim-libp2p/libp2p/multistream.nim
vendored
8
vendor/nim-libp2p/libp2p/multistream.nim
vendored
@ -40,9 +40,11 @@ type
|
||||
handlers*: seq[HandlerHolder]
|
||||
codec*: string
|
||||
|
||||
proc newMultistream*(): MultistreamSelect =
|
||||
new result
|
||||
result.codec = MSCodec
|
||||
proc new*(T: typedesc[MultistreamSelect]): T =
|
||||
T(codec: MSCodec)
|
||||
|
||||
proc newMultistream*(): MultistreamSelect {.deprecated: "use MultistreamSelect.new".} =
|
||||
MultistreamSelect.new()
|
||||
|
||||
template validateSuffix(str: string): untyped =
|
||||
if str.endsWith("\n"):
|
||||
|
@ -9,8 +9,7 @@
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import chronos
|
||||
import nimcrypto/utils, chronicles, stew/byteutils
|
||||
import pkg/[chronos, nimcrypto/utils, chronicles, stew/byteutils]
|
||||
import ../../stream/connection,
|
||||
../../utility,
|
||||
../../varint,
|
||||
|
@ -10,10 +10,9 @@
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import std/[oids, strformat]
|
||||
import chronos, chronicles, metrics
|
||||
import pkg/[chronos, chronicles, metrics, nimcrypto/utils]
|
||||
import ./coder,
|
||||
../muxer,
|
||||
nimcrypto/utils,
|
||||
../../stream/[bufferstream, connection, streamseq],
|
||||
../../peerinfo
|
||||
|
||||
@ -140,7 +139,7 @@ method close*(s: LPChannel) {.async, gcsafe.} =
|
||||
|
||||
method initStream*(s: LPChannel) =
|
||||
if s.objName.len == 0:
|
||||
s.objName = "LPChannel"
|
||||
s.objName = LPChannelTrackerName
|
||||
|
||||
s.timeoutHandler = proc(): Future[void] {.gcsafe.} =
|
||||
trace "Idle timeout expired, resetting LPChannel", s
|
||||
|
17
vendor/nim-libp2p/libp2p/muxers/muxer.nim
vendored
17
vendor/nim-libp2p/libp2p/muxers/muxer.nim
vendored
@ -48,11 +48,18 @@ method newStream*(m: Muxer, name: string = "", lazy: bool = false):
|
||||
method close*(m: Muxer) {.base, async, gcsafe.} = discard
|
||||
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
|
||||
|
||||
proc newMuxerProvider*(creator: MuxerConstructor, codec: string): MuxerProvider {.gcsafe.} =
|
||||
new result
|
||||
result.newMuxer = creator
|
||||
result.codec = codec
|
||||
result.init()
|
||||
proc new*(
|
||||
T: typedesc[MuxerProvider],
|
||||
creator: MuxerConstructor,
|
||||
codec: string): T {.gcsafe.} =
|
||||
|
||||
let muxerProvider = T(newMuxer: creator)
|
||||
muxerProvider.codec = codec
|
||||
muxerProvider.init()
|
||||
muxerProvider
|
||||
|
||||
proc newMuxerProvider*(creator: MuxerConstructor, codec: string): MuxerProvider {.gcsafe, deprecated: "use MuxerProvider.new".} =
|
||||
MuxerProvider.new(creator, codec)
|
||||
|
||||
method init(c: MuxerProvider) =
|
||||
proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
|
71
vendor/nim-libp2p/libp2p/peerstore.nim
vendored
71
vendor/nim-libp2p/libp2p/peerstore.nim
vendored
@ -10,9 +10,9 @@
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/[tables, sets, sequtils],
|
||||
std/[tables, sets, sequtils, options],
|
||||
./crypto/crypto,
|
||||
./peerid,
|
||||
./peerid, ./peerinfo,
|
||||
./multiaddress
|
||||
|
||||
type
|
||||
@ -34,23 +34,25 @@ type
|
||||
PeerBook*[T] = object of RootObj
|
||||
book*: Table[PeerID, T]
|
||||
changeHandlers: seq[PeerBookChangeHandler[T]]
|
||||
|
||||
SetPeerBook*[T] = object of PeerBook[HashSet[T]]
|
||||
|
||||
AddressBook* = object of PeerBook[HashSet[MultiAddress]]
|
||||
ProtoBook* = object of PeerBook[HashSet[string]]
|
||||
AddressBook* = object of SetPeerBook[MultiAddress]
|
||||
ProtoBook* = object of SetPeerBook[string]
|
||||
KeyBook* = object of PeerBook[PublicKey]
|
||||
|
||||
####################
|
||||
# Peer store types #
|
||||
####################
|
||||
|
||||
PeerStore* = ref object of RootObj
|
||||
PeerStore* = ref object
|
||||
addressBook*: AddressBook
|
||||
protoBook*: ProtoBook
|
||||
keyBook*: KeyBook
|
||||
|
||||
StoredInfo* = object
|
||||
# Collates stored info about a peer
|
||||
peerId*: PeerID
|
||||
peerId*: PeerID
|
||||
addrs*: HashSet[MultiAddress]
|
||||
protos*: HashSet[string]
|
||||
publicKey*: PublicKey
|
||||
@ -93,39 +95,23 @@ proc delete*[T](peerBook: var PeerBook[T],
|
||||
peerBook.book.del(peerId)
|
||||
return true
|
||||
|
||||
####################
|
||||
# Address Book API #
|
||||
####################
|
||||
################
|
||||
# Set Book API #
|
||||
################
|
||||
|
||||
proc add*(addressBook: var AddressBook,
|
||||
peerId: PeerID,
|
||||
multiaddr: MultiAddress) =
|
||||
## Add known multiaddr of a given peer. If the peer is not known,
|
||||
## it will be set with the provided multiaddr.
|
||||
proc add*[T](
|
||||
peerBook: var SetPeerBook[T],
|
||||
peerId: PeerID,
|
||||
entry: T) =
|
||||
## Add entry to a given peer. If the peer is not known,
|
||||
## it will be set with the provided entry.
|
||||
|
||||
addressBook.book.mgetOrPut(peerId,
|
||||
initHashSet[MultiAddress]()).incl(multiaddr)
|
||||
peerBook.book.mgetOrPut(peerId,
|
||||
initHashSet[T]()).incl(entry)
|
||||
|
||||
# Notify clients
|
||||
for handler in addressBook.changeHandlers:
|
||||
handler(peerId, addressBook.get(peerId))
|
||||
|
||||
#####################
|
||||
# Protocol Book API #
|
||||
#####################
|
||||
|
||||
proc add*(protoBook: var ProtoBook,
|
||||
peerId: PeerID,
|
||||
protocol: string) =
|
||||
## Adds known protocol codec for a given peer. If the peer is not known,
|
||||
## it will be set with the provided protocol.
|
||||
|
||||
protoBook.book.mgetOrPut(peerId,
|
||||
initHashSet[string]()).incl(protocol)
|
||||
|
||||
# Notify clients
|
||||
for handler in protoBook.changeHandlers:
|
||||
handler(peerId, protoBook.get(peerId))
|
||||
for handler in peerBook.changeHandlers:
|
||||
handler(peerId, peerBook.get(peerId))
|
||||
|
||||
##################
|
||||
# Peer Store API #
|
||||
@ -160,6 +146,21 @@ proc get*(peerStore: PeerStore,
|
||||
publicKey: peerStore.keyBook.get(peerId)
|
||||
)
|
||||
|
||||
proc update*(peerStore: PeerStore, peerInfo: PeerInfo) =
|
||||
for address in peerInfo.addrs:
|
||||
peerStore.addressBook.add(peerInfo.peerId, address)
|
||||
for proto in peerInfo.protocols:
|
||||
peerStore.protoBook.add(peerInfo.peerId, proto)
|
||||
let pKey = peerInfo.publicKey()
|
||||
if pKey.isSome:
|
||||
peerStore.keyBook.set(peerInfo.peerId, pKey.get())
|
||||
|
||||
proc replace*(peerStore: PeerStore, peerInfo: PeerInfo) =
|
||||
discard peerStore.addressBook.delete(peerInfo.peerId)
|
||||
discard peerStore.protoBook.delete(peerInfo.peerId)
|
||||
discard peerStore.keyBook.delete(peerInfo.peerId)
|
||||
peerStore.update(peerInfo)
|
||||
|
||||
proc peers*(peerStore: PeerStore): seq[StoredInfo] =
|
||||
## Get all the stored information of every peer.
|
||||
|
||||
|
11
vendor/nim-libp2p/libp2p/protocols/identify.nim
vendored
11
vendor/nim-libp2p/libp2p/protocols/identify.nim
vendored
@ -109,10 +109,13 @@ proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
|
||||
trace "decodeMsg: failed to decode received message"
|
||||
none[IdentifyInfo]()
|
||||
|
||||
proc newIdentify*(peerInfo: PeerInfo): Identify =
|
||||
new result
|
||||
result.peerInfo = peerInfo
|
||||
result.init()
|
||||
proc new*(T: typedesc[Identify], peerInfo: PeerInfo): T =
|
||||
let identify = T(peerInfo: peerInfo)
|
||||
identify.init()
|
||||
identify
|
||||
|
||||
proc newIdentify*(peerInfo: PeerInfo): Identify {.deprecated: "use Identify.new".} =
|
||||
Identify.new(peerInfo)
|
||||
|
||||
method init*(p: Identify) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
|
95
vendor/nim-libp2p/libp2p/protocols/ping.nim
vendored
Normal file
95
vendor/nim-libp2p/libp2p/protocols/ping.nim
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
## Nim-LibP2P
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import chronos, chronicles, bearssl
|
||||
import ../protobuf/minprotobuf,
|
||||
../peerinfo,
|
||||
../stream/connection,
|
||||
../peerid,
|
||||
../crypto/crypto,
|
||||
../multiaddress,
|
||||
../protocols/protocol,
|
||||
../errors
|
||||
|
||||
logScope:
|
||||
topics = "libp2p ping"
|
||||
|
||||
const
|
||||
PingCodec* = "/ipfs/ping/1.0.0"
|
||||
PingSize = 32
|
||||
|
||||
type
|
||||
PingError* = object of LPError
|
||||
WrongPingAckError* = object of LPError
|
||||
|
||||
PingHandler* = proc (
|
||||
peer: PeerInfo):
|
||||
Future[void]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
|
||||
Ping* = ref object of LPProtocol
|
||||
pingHandler*: PingHandler
|
||||
rng: ref BrHmacDrbgContext
|
||||
|
||||
proc new*(T: typedesc[Ping], handler: PingHandler = nil, rng: ref BrHmacDrbgContext = newRng()): T =
|
||||
let ping = Ping(pinghandler: handler, rng: rng)
|
||||
ping.init()
|
||||
ping
|
||||
|
||||
method init*(p: Ping) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
try:
|
||||
trace "handling ping", conn
|
||||
var buf: array[PingSize, byte]
|
||||
await conn.readExactly(addr buf[0], PingSize)
|
||||
trace "echoing ping", conn
|
||||
await conn.write(addr buf[0], PingSize)
|
||||
if not isNil(p.pingHandler):
|
||||
await p.pingHandler(conn.peerInfo)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in ping handler", exc = exc.msg, conn
|
||||
|
||||
p.handler = handle
|
||||
p.codec = PingCodec
|
||||
|
||||
proc ping*(
|
||||
p: Ping,
|
||||
conn: Connection,
|
||||
): Future[Duration] {.async, gcsafe.} =
|
||||
## Sends ping to `conn`
|
||||
## Returns the delay
|
||||
##
|
||||
|
||||
trace "initiating ping", conn
|
||||
|
||||
var
|
||||
randomBuf: array[PingSize, byte]
|
||||
resultBuf: array[PingSize, byte]
|
||||
|
||||
p.rng[].brHmacDrbgGenerate(randomBuf)
|
||||
|
||||
let startTime = Moment.now()
|
||||
|
||||
trace "sending ping", conn
|
||||
await conn.write(addr randomBuf[0], randomBuf.len)
|
||||
|
||||
await conn.readExactly(addr resultBuf[0], PingSize)
|
||||
|
||||
let responseDur = Moment.now() - startTime
|
||||
|
||||
trace "got ping response", conn, responseDur
|
||||
|
||||
for i in 0..<randomBuf.len:
|
||||
if randomBuf[i] != resultBuf[i]:
|
||||
raise newException(WrongPingAckError, "Incorrect ping data from peer!")
|
||||
|
||||
trace "valid ping response", conn
|
||||
return responseDur
|
3
vendor/nim-libp2p/libp2p/protocols/pubsub.nim
vendored
Normal file
3
vendor/nim-libp2p/libp2p/protocols/pubsub.nim
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
import ./pubsub/[pubsub, floodsub, gossipsub]
|
||||
|
||||
export pubsub, floodsub, gossipsub
|
@ -47,7 +47,8 @@ proc init*(_: type[TopicParams]): TopicParams =
|
||||
)
|
||||
|
||||
proc withPeerStats*(
|
||||
g: GossipSub, peerId: PeerId,
|
||||
g: GossipSub,
|
||||
peerId: PeerId,
|
||||
action: proc (stats: var PeerStats) {.gcsafe, raises: [Defect].}) =
|
||||
## Add or update peer statistics for a particular peer id - the statistics
|
||||
## are retained across multiple connections until they expire
|
||||
@ -293,9 +294,10 @@ proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, topics: seq[string])
|
||||
if t notin g.topics:
|
||||
continue
|
||||
|
||||
let tt = t
|
||||
# update stats
|
||||
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
|
||||
stats.topicInfos.mgetOrPut(t, TopicInfo()).invalidMessageDeliveries += 1
|
||||
stats.topicInfos.mgetOrPut(tt, TopicInfo()).invalidMessageDeliveries += 1
|
||||
|
||||
proc addCapped*[T](stat: var T, diff, cap: T) =
|
||||
stat += min(diff, cap - stat)
|
||||
@ -306,11 +308,13 @@ proc rewardDelivered*(
|
||||
let t = tt
|
||||
if t notin g.topics:
|
||||
continue
|
||||
|
||||
let tt = t
|
||||
let topicParams = g.topicParams.mgetOrPut(t, TopicParams.init())
|
||||
# if in mesh add more delivery score
|
||||
|
||||
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
|
||||
stats.topicInfos.withValue(t, tstats):
|
||||
stats.topicInfos.withValue(tt, tstats):
|
||||
if tstats[].inMesh:
|
||||
if first:
|
||||
tstats[].firstMessageDeliveries.addCapped(
|
||||
@ -319,4 +323,4 @@ proc rewardDelivered*(
|
||||
tstats[].meshMessageDeliveries.addCapped(
|
||||
1, topicParams.meshMessageDeliveriesCap)
|
||||
do: # make sure we don't loose this information
|
||||
stats.topicInfos[t] = TopicInfo(meshMessageDeliveries: 1)
|
||||
stats.topicInfos[tt] = TopicInfo(meshMessageDeliveries: 1)
|
||||
|
@ -283,7 +283,7 @@ proc getOrCreatePeer*(
|
||||
p.onPubSubPeerEvent(peer, event)
|
||||
|
||||
# create new pubsub peer
|
||||
let pubSubPeer = newPubSubPeer(peerId, getConn, dropConn, onEvent, protos[0])
|
||||
let pubSubPeer = PubSubPeer.new(peerId, getConn, dropConn, onEvent, protos[0])
|
||||
debug "created new pubsub peer", peerId
|
||||
|
||||
p.peers[peerId] = pubSubPeer
|
||||
@ -568,11 +568,11 @@ proc init*[PubParams: object | bool](
|
||||
parameters: parameters,
|
||||
topicsHigh: int.high)
|
||||
|
||||
proc peerEventHandler(peerId: PeerID, event: PeerEvent) {.async.} =
|
||||
proc peerEventHandler(peerInfo: PeerInfo, event: PeerEvent) {.async.} =
|
||||
if event.kind == PeerEventKind.Joined:
|
||||
pubsub.subscribePeer(peerId)
|
||||
pubsub.subscribePeer(peerInfo.peerId)
|
||||
else:
|
||||
pubsub.unsubscribePeer(peerId)
|
||||
pubsub.unsubscribePeer(peerInfo.peerId)
|
||||
|
||||
switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
||||
switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||
|
@ -274,15 +274,33 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
|
||||
|
||||
p.sendEncoded(encoded)
|
||||
|
||||
proc newPubSubPeer*(peerId: PeerID,
|
||||
getConn: GetConn,
|
||||
dropConn: DropConn,
|
||||
onEvent: OnEvent,
|
||||
codec: string): PubSubPeer =
|
||||
PubSubPeer(
|
||||
proc new*(
|
||||
T: typedesc[PubSubPeer],
|
||||
peerId: PeerID,
|
||||
getConn: GetConn,
|
||||
dropConn: DropConn,
|
||||
onEvent: OnEvent,
|
||||
codec: string): T =
|
||||
|
||||
T(
|
||||
getConn: getConn,
|
||||
dropConn: dropConn,
|
||||
onEvent: onEvent,
|
||||
codec: codec,
|
||||
peerId: peerId,
|
||||
)
|
||||
|
||||
proc newPubSubPeer*(
|
||||
peerId: PeerID,
|
||||
getConn: GetConn,
|
||||
dropConn: DropConn,
|
||||
onEvent: OnEvent,
|
||||
codec: string): PubSubPeer {.deprecated: "use PubSubPeer.new".} =
|
||||
|
||||
PubSubPeer.new(
|
||||
peerId,
|
||||
getConn,
|
||||
dropConn,
|
||||
onEvent,
|
||||
codec
|
||||
)
|
||||
|
@ -595,11 +595,12 @@ method init*(p: Noise) {.gcsafe.} =
|
||||
procCall Secure(p).init()
|
||||
p.codec = NoiseCodec
|
||||
|
||||
proc newNoise*(
|
||||
rng: ref BrHmacDrbgContext,
|
||||
privateKey: PrivateKey,
|
||||
outgoing: bool = true,
|
||||
commonPrologue: seq[byte] = @[]): Noise =
|
||||
proc new*(
|
||||
T: typedesc[Noise],
|
||||
rng: ref BrHmacDrbgContext,
|
||||
privateKey: PrivateKey,
|
||||
outgoing: bool = true,
|
||||
commonPrologue: seq[byte] = @[]): T =
|
||||
|
||||
let pkBytes = privateKey
|
||||
.getKey()
|
||||
@ -617,4 +618,11 @@ proc newNoise*(
|
||||
)
|
||||
|
||||
noise.init()
|
||||
return noise
|
||||
noise
|
||||
|
||||
proc newNoise*(
|
||||
rng: ref BrHmacDrbgContext,
|
||||
privateKey: PrivateKey,
|
||||
outgoing: bool = true,
|
||||
commonPrologue: seq[byte] = @[]): Noise {.deprecated: "use Noise.new".}=
|
||||
Noise.new(rng, privateKey, outgoing, commonPrologue)
|
||||
|
@ -25,6 +25,10 @@ method init(p: PlainText) {.gcsafe.} =
|
||||
p.codec = PlainTextCodec
|
||||
p.handler = handle
|
||||
|
||||
proc newPlainText*(): PlainText =
|
||||
new result
|
||||
result.init()
|
||||
proc new*(T: typedesc[PlainText]): T =
|
||||
let plainText = T()
|
||||
plainText.init()
|
||||
plainText
|
||||
|
||||
proc newPlainText*(): PlainText {.deprecated: "use PlainText.new".} =
|
||||
PlainText.new()
|
||||
|
@ -36,7 +36,7 @@ const
|
||||
SecioHashes = "SHA256,SHA512"
|
||||
|
||||
type
|
||||
Secio = ref object of Secure
|
||||
Secio* = ref object of Secure
|
||||
rng: ref BrHmacDrbgContext
|
||||
localPrivateKey: PrivateKey
|
||||
localPublicKey: PublicKey
|
||||
@ -431,14 +431,23 @@ method init(s: Secio) {.gcsafe.} =
|
||||
procCall Secure(s).init()
|
||||
s.codec = SecioCodec
|
||||
|
||||
proc newSecio*(rng: ref BrHmacDrbgContext, localPrivateKey: PrivateKey): Secio =
|
||||
proc new*(
|
||||
T: typedesc[Secio],
|
||||
rng: ref BrHmacDrbgContext,
|
||||
localPrivateKey: PrivateKey): T =
|
||||
let pkRes = localPrivateKey.getKey()
|
||||
if pkRes.isErr:
|
||||
raise newException(Defect, "Can't fetch local private key")
|
||||
|
||||
result = Secio(
|
||||
let secio = Secio(
|
||||
rng: rng,
|
||||
localPrivateKey: localPrivateKey,
|
||||
localPublicKey: localPrivateKey.getKey().get(),
|
||||
localPublicKey: localPrivateKey
|
||||
.getKey()
|
||||
.expect("Can't fetch local private key"),
|
||||
)
|
||||
result.init()
|
||||
secio.init()
|
||||
secio
|
||||
|
||||
proc newSecio*(rng: ref BrHmacDrbgContext, localPrivateKey: PrivateKey): Secio {.deprecated: "use Secio.new".} =
|
||||
Secio.new(rng, localPrivateKey)
|
||||
|
@ -59,7 +59,7 @@ proc init*(T: type SecureConn,
|
||||
|
||||
method initStream*(s: SecureConn) =
|
||||
if s.objName.len == 0:
|
||||
s.objName = "SecureConn"
|
||||
s.objName = SecureConnTrackerName
|
||||
|
||||
procCall Connection(s).initStream()
|
||||
|
||||
|
17
vendor/nim-libp2p/libp2p/stream/bufferstream.nim
vendored
17
vendor/nim-libp2p/libp2p/stream/bufferstream.nim
vendored
@ -50,7 +50,7 @@ proc len*(s: BufferStream): int =
|
||||
|
||||
method initStream*(s: BufferStream) =
|
||||
if s.objName.len == 0:
|
||||
s.objName = "BufferStream"
|
||||
s.objName = BufferStreamTrackerName
|
||||
|
||||
procCall Connection(s).initStream()
|
||||
|
||||
@ -58,10 +58,17 @@ method initStream*(s: BufferStream) =
|
||||
|
||||
trace "BufferStream created", s
|
||||
|
||||
proc newBufferStream*(timeout: Duration = DefaultConnectionTimeout): BufferStream =
|
||||
new result
|
||||
result.timeout = timeout
|
||||
result.initStream()
|
||||
proc new*(
|
||||
T: typedesc[BufferStream],
|
||||
timeout: Duration = DefaultConnectionTimeout): T =
|
||||
|
||||
let bufferStream = T(timeout: timeout)
|
||||
bufferStream.initStream()
|
||||
bufferStream
|
||||
|
||||
proc newBufferStream*(
|
||||
timeout: Duration = DefaultConnectionTimeout): BufferStream {.deprecated: "use BufferStream.new".} =
|
||||
return BufferStream.new(timeout)
|
||||
|
||||
method pushData*(s: BufferStream, data: seq[byte]) {.base, async.} =
|
||||
## Write bytes to internal read buffer, use this to fill up the
|
||||
|
@ -13,9 +13,10 @@ import std/[hashes, oids, strformat]
|
||||
import chronicles, chronos, metrics
|
||||
import lpstream,
|
||||
../multiaddress,
|
||||
../peerinfo
|
||||
../peerinfo,
|
||||
../errors
|
||||
|
||||
export lpstream, peerinfo
|
||||
export lpstream, peerinfo, errors
|
||||
|
||||
logScope:
|
||||
topics = "libp2p connection"
|
||||
@ -67,7 +68,7 @@ chronicles.formatIt(Connection): shortLog(it)
|
||||
|
||||
method initStream*(s: Connection) =
|
||||
if s.objName.len == 0:
|
||||
s.objName = "Connection"
|
||||
s.objName = ConnectionTrackerName
|
||||
|
||||
procCall LPStream(s).initStream()
|
||||
|
||||
|
6
vendor/nim-libp2p/libp2p/stream/lpstream.nim
vendored
6
vendor/nim-libp2p/libp2p/stream/lpstream.nim
vendored
@ -17,6 +17,8 @@ import ../varint,
|
||||
../multiaddress,
|
||||
../errors
|
||||
|
||||
export errors
|
||||
|
||||
declareGauge(libp2p_open_streams,
|
||||
"open stream instances", labels = ["type", "dir"])
|
||||
|
||||
@ -121,7 +123,7 @@ chronicles.formatIt(LPStream): shortLog(it)
|
||||
|
||||
method initStream*(s: LPStream) {.base.} =
|
||||
if s.objName.len == 0:
|
||||
s.objName = "LPStream"
|
||||
s.objName = LPStreamTrackerName
|
||||
|
||||
s.closeEvent = newAsyncEvent()
|
||||
s.oid = genOid()
|
||||
@ -262,9 +264,9 @@ proc write*(s: LPStream, msg: string): Future[void] =
|
||||
method closeImpl*(s: LPStream): Future[void] {.async, base.} =
|
||||
## Implementation of close - called only once
|
||||
trace "Closing stream", s, objName = s.objName, dir = $s.dir
|
||||
s.closeEvent.fire()
|
||||
libp2p_open_streams.dec(labelValues = [s.objName, $s.dir])
|
||||
inc getStreamTracker(s.objName).closed
|
||||
s.closeEvent.fire()
|
||||
trace "Closed stream", s, objName = s.objName, dir = $s.dir
|
||||
|
||||
method close*(s: LPStream): Future[void] {.base, async.} = # {.raises [Defect].}
|
||||
|
236
vendor/nim-libp2p/libp2p/switch.nim
vendored
236
vendor/nim-libp2p/libp2p/switch.nim
vendored
@ -33,9 +33,11 @@ import stream/connection,
|
||||
utils/semaphore,
|
||||
connmanager,
|
||||
peerid,
|
||||
errors
|
||||
peerstore,
|
||||
errors,
|
||||
dialer
|
||||
|
||||
export connmanager, upgrade
|
||||
export connmanager, upgrade, dialer, peerstore
|
||||
|
||||
logScope:
|
||||
topics = "libp2p switch"
|
||||
@ -46,26 +48,20 @@ logScope:
|
||||
# and only if the channel has been secured (i.e. if a secure manager has been
|
||||
# previously provided)
|
||||
|
||||
declareCounter(libp2p_total_dial_attempts, "total attempted dials")
|
||||
declareCounter(libp2p_successful_dials, "dialed successful peers")
|
||||
declareCounter(libp2p_failed_dials, "failed dials")
|
||||
declareCounter(libp2p_failed_upgrades_incoming, "incoming connections failed upgrades")
|
||||
declareCounter(libp2p_failed_upgrades_outgoing, "outgoing connections failed upgrades")
|
||||
|
||||
const
|
||||
ConcurrentUpgrades* = 4
|
||||
|
||||
type
|
||||
DialFailedError* = object of LPError
|
||||
|
||||
Switch* = ref object of RootObj
|
||||
Switch* = ref object of Dial
|
||||
peerInfo*: PeerInfo
|
||||
connManager*: ConnManager
|
||||
transports*: seq[Transport]
|
||||
ms*: MultistreamSelect
|
||||
dialLock: Table[PeerID, AsyncLock]
|
||||
acceptFuts: seq[Future[void]]
|
||||
upgrade: Upgrade
|
||||
dialer*: Dial
|
||||
peerStore*: PeerStore
|
||||
|
||||
proc addConnEventHandler*(s: Switch,
|
||||
handler: ConnEventHandler,
|
||||
@ -97,186 +93,36 @@ proc isConnected*(s: Switch, peerId: PeerID): bool =
|
||||
proc disconnect*(s: Switch, peerId: PeerID): Future[void] {.gcsafe.} =
|
||||
s.connManager.dropPeer(peerId)
|
||||
|
||||
proc dialAndUpgrade(s: Switch,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress]):
|
||||
Future[Connection] {.async.} =
|
||||
debug "Dialing peer", peerId
|
||||
method connect*(
|
||||
s: Switch,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress]): Future[void] =
|
||||
s.dialer.connect(peerId, addrs)
|
||||
|
||||
# Avoid "cannot be captured as it would violate memory safety" errors in Nim-1.4.x.
|
||||
var
|
||||
transport: Transport
|
||||
address: MultiAddress
|
||||
|
||||
for t in s.transports: # for each transport
|
||||
transport = t
|
||||
for a in addrs: # for each address
|
||||
address = a
|
||||
if t.handles(a): # check if it can dial it
|
||||
trace "Dialing address", address = $a, peerId
|
||||
let dialed = try:
|
||||
libp2p_total_dial_attempts.inc()
|
||||
# await a connection slot when the total
|
||||
# connection count is equal to `maxConns`
|
||||
await s.connManager.trackOutgoingConn(
|
||||
() => transport.dial(address)
|
||||
)
|
||||
except TooManyConnectionsError as exc:
|
||||
trace "Connection limit reached!"
|
||||
raise exc
|
||||
except CancelledError as exc:
|
||||
debug "Dialing canceled", msg = exc.msg, peerId
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Dialing failed", msg = exc.msg, peerId
|
||||
libp2p_failed_dials.inc()
|
||||
continue # Try the next address
|
||||
|
||||
# make sure to assign the peer to the connection
|
||||
dialed.peerInfo = PeerInfo.init(peerId, addrs)
|
||||
|
||||
# also keep track of the connection's bottom unsafe transport direction
|
||||
# required by gossipsub scoring
|
||||
dialed.transportDir = Direction.Out
|
||||
|
||||
libp2p_successful_dials.inc()
|
||||
|
||||
let conn = try:
|
||||
await s.upgrade.upgradeOutgoing(dialed)
|
||||
except CatchableError as exc:
|
||||
# If we failed to establish the connection through one transport,
|
||||
# we won't succeeded through another - no use in trying again
|
||||
await dialed.close()
|
||||
debug "Upgrade failed", msg = exc.msg, peerId
|
||||
if exc isnot CancelledError:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
raise exc
|
||||
|
||||
doAssert not isNil(conn), "connection died after upgradeOutgoing"
|
||||
debug "Dial successful", conn, peerInfo = conn.peerInfo
|
||||
return conn
|
||||
|
||||
proc internalConnect(s: Switch,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress]):
|
||||
Future[Connection] {.async.} =
|
||||
if s.peerInfo.peerId == peerId:
|
||||
raise newException(CatchableError, "can't dial self!")
|
||||
|
||||
# Ensure there's only one in-flight attempt per peer
|
||||
let lock = s.dialLock.mgetOrPut(peerId, newAsyncLock())
|
||||
try:
|
||||
await lock.acquire()
|
||||
|
||||
# Check if we have a connection already and try to reuse it
|
||||
var conn = s.connManager.selectConn(peerId)
|
||||
if conn != nil:
|
||||
if conn.atEof or conn.closed:
|
||||
# This connection should already have been removed from the connection
|
||||
# manager - it's essentially a bug that we end up here - we'll fail
|
||||
# for now, hoping that this will clean themselves up later...
|
||||
warn "dead connection in connection manager", conn
|
||||
await conn.close()
|
||||
raise newException(DialFailedError, "Zombie connection encountered")
|
||||
|
||||
trace "Reusing existing connection", conn, direction = $conn.dir
|
||||
return conn
|
||||
|
||||
conn = await s.dialAndUpgrade(peerId, addrs)
|
||||
if isNil(conn): # None of the addresses connected
|
||||
raise newException(DialFailedError, "Unable to establish outgoing link")
|
||||
|
||||
# We already check for this in Connection manager
|
||||
# but a disconnect could have happened right after
|
||||
# we've added the connection so we check again
|
||||
# to prevent races due to that.
|
||||
if conn.closed() or conn.atEof():
|
||||
# This can happen when the other ends drops us
|
||||
# before we get a chance to return the connection
|
||||
# back to the dialer.
|
||||
trace "Connection dead on arrival", conn
|
||||
raise newLPStreamClosedError()
|
||||
|
||||
return conn
|
||||
finally:
|
||||
if lock.locked():
|
||||
lock.release()
|
||||
|
||||
proc connect*(s: Switch, peerId: PeerID, addrs: seq[MultiAddress]) {.async.} =
|
||||
## attempt to create establish a connection
|
||||
## with a remote peer
|
||||
##
|
||||
|
||||
if s.connManager.connCount(peerId) > 0:
|
||||
return
|
||||
|
||||
discard await s.internalConnect(peerId, addrs)
|
||||
|
||||
proc negotiateStream(s: Switch, conn: Connection, protos: seq[string]): Future[Connection] {.async.} =
|
||||
trace "Negotiating stream", conn, protos
|
||||
let selected = await s.ms.select(conn, protos)
|
||||
if not protos.contains(selected):
|
||||
await conn.closeWithEOF()
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol " & $protos)
|
||||
|
||||
return conn
|
||||
|
||||
proc dial*(s: Switch,
|
||||
peerId: PeerID,
|
||||
protos: seq[string]): Future[Connection] {.async.} =
|
||||
trace "Dialing (existing)", peerId, protos
|
||||
let stream = await s.connManager.getStream(peerId)
|
||||
if stream.isNil:
|
||||
raise newException(DialFailedError, "Couldn't get muxed stream")
|
||||
|
||||
return await s.negotiateStream(stream, protos)
|
||||
method dial*(
|
||||
s: Switch,
|
||||
peerId: PeerID,
|
||||
protos: seq[string]): Future[Connection] =
|
||||
s.dialer.dial(peerId, protos)
|
||||
|
||||
proc dial*(s: Switch,
|
||||
peerId: PeerID,
|
||||
proto: string): Future[Connection] =
|
||||
dial(s, peerId, @[proto])
|
||||
|
||||
proc dial*(s: Switch,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress],
|
||||
protos: seq[string]):
|
||||
Future[Connection] {.async.} =
|
||||
var
|
||||
conn: Connection
|
||||
stream: Connection
|
||||
method dial*(
|
||||
s: Switch,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress],
|
||||
protos: seq[string]): Future[Connection] =
|
||||
s.dialer.dial(peerId, addrs, protos)
|
||||
|
||||
proc cleanup() {.async.} =
|
||||
if not(isNil(stream)):
|
||||
await stream.closeWithEOF()
|
||||
|
||||
if not(isNil(conn)):
|
||||
await conn.close()
|
||||
|
||||
try:
|
||||
trace "Dialing (new)", peerId, protos
|
||||
conn = await s.internalConnect(peerId, addrs)
|
||||
trace "Opening stream", conn
|
||||
stream = await s.connManager.getStream(conn)
|
||||
|
||||
if isNil(stream):
|
||||
raise newException(DialFailedError,
|
||||
"Couldn't get muxed stream")
|
||||
|
||||
return await s.negotiateStream(stream, protos)
|
||||
except CancelledError as exc:
|
||||
trace "Dial canceled", conn
|
||||
await cleanup()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Error dialing", conn, msg = exc.msg
|
||||
await cleanup()
|
||||
raise exc
|
||||
|
||||
proc dial*(s: Switch,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress],
|
||||
proto: string):
|
||||
Future[Connection] = dial(s, peerId, addrs, @[proto])
|
||||
proc dial*(
|
||||
s: Switch,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress],
|
||||
proto: string): Future[Connection] =
|
||||
dial(s, peerId, addrs, @[proto])
|
||||
|
||||
proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil)
|
||||
{.gcsafe, raises: [Defect, LPError].} =
|
||||
@ -346,7 +192,7 @@ proc accept(s: Switch, transport: Transport) {.async.} = # noraises
|
||||
|
||||
debug "Accepted an incoming connection", conn
|
||||
asyncSpawn upgradeMonitor(conn, upgrades)
|
||||
asyncSpawn s.upgrade.upgradeIncoming(conn)
|
||||
asyncSpawn transport.upgradeIncoming(conn)
|
||||
except CancelledError as exc:
|
||||
trace "releasing semaphore on cancellation"
|
||||
upgrades.release() # always release the slot
|
||||
@ -368,6 +214,11 @@ proc start*(s: Switch): Future[seq[Future[void]]] {.async, gcsafe.} =
|
||||
s.acceptFuts.add(s.accept(t))
|
||||
startFuts.add(server)
|
||||
|
||||
proc peerIdentifiedHandler(peerInfo: PeerInfo, event: PeerEvent) {.async.} =
|
||||
s.peerStore.replace(peerInfo)
|
||||
|
||||
s.connManager.addPeerEventHandler(peerIdentifiedHandler, PeerEventKind.Identified)
|
||||
|
||||
debug "Started libp2p node", peer = s.peerInfo
|
||||
return startFuts # listen for incoming connections
|
||||
|
||||
@ -404,26 +255,19 @@ proc newSwitch*(peerInfo: PeerInfo,
|
||||
identity: Identify,
|
||||
muxers: Table[string, MuxerProvider],
|
||||
secureManagers: openarray[Secure] = [],
|
||||
maxConnections = MaxConnections,
|
||||
maxIn = -1,
|
||||
maxOut = -1,
|
||||
maxConnsPerPeer = MaxConnectionsPerPeer): Switch
|
||||
connManager: ConnManager,
|
||||
ms: MultistreamSelect): Switch
|
||||
{.raises: [Defect, LPError].} =
|
||||
|
||||
if secureManagers.len == 0:
|
||||
raise (ref LPError)(msg: "Provide at least one secure manager")
|
||||
|
||||
let ms = newMultistream()
|
||||
let connManager = ConnManager.init(maxConnsPerPeer, maxConnections, maxIn, maxOut)
|
||||
let upgrade = MuxedUpgrade.init(identity, muxers, secureManagers, connManager, ms)
|
||||
raise newException(LPError, "Provide at least one secure manager")
|
||||
|
||||
let switch = Switch(
|
||||
peerInfo: peerInfo,
|
||||
ms: ms,
|
||||
transports: transports,
|
||||
connManager: connManager,
|
||||
upgrade: upgrade,
|
||||
)
|
||||
peerStore: PeerStore.new(),
|
||||
dialer: Dialer.new(peerInfo, connManager, transports, ms))
|
||||
|
||||
switch.mount(identity)
|
||||
return switch
|
||||
|
107
vendor/nim-libp2p/libp2p/transports/tcptransport.nim
vendored
107
vendor/nim-libp2p/libp2p/transports/tcptransport.nim
vendored
@ -14,10 +14,13 @@ import chronos, chronicles
|
||||
import transport,
|
||||
../errors,
|
||||
../wire,
|
||||
../multiaddress,
|
||||
../multicodec,
|
||||
../multistream,
|
||||
../connmanager,
|
||||
../multiaddress,
|
||||
../stream/connection,
|
||||
../stream/chronosstream
|
||||
../stream/chronosstream,
|
||||
../upgrademngrs/upgrade
|
||||
|
||||
logScope:
|
||||
topics = "libp2p tcptransport"
|
||||
@ -61,7 +64,7 @@ proc setupTcpTransportTracker(): TcpTransportTracker =
|
||||
result.isLeaked = leakTransport
|
||||
addTracker(TcpTransportTrackerName, result)
|
||||
|
||||
proc connHandler*(t: TcpTransport,
|
||||
proc connHandler*(self: TcpTransport,
|
||||
client: StreamTransport,
|
||||
dir: Direction): Future[Connection] {.async.} =
|
||||
var observedAddr: MultiAddress = MultiAddress()
|
||||
@ -75,8 +78,8 @@ proc connHandler*(t: TcpTransport,
|
||||
|
||||
trace "Handling tcp connection", address = $observedAddr,
|
||||
dir = $dir,
|
||||
clients = t.clients[Direction.In].len +
|
||||
t.clients[Direction.Out].len
|
||||
clients = self.clients[Direction.In].len +
|
||||
self.clients[Direction.Out].len
|
||||
|
||||
let conn = Connection(
|
||||
ChronosStream.init(
|
||||
@ -95,7 +98,7 @@ proc connHandler*(t: TcpTransport,
|
||||
trace "Cleaning up client", addrs = $client.remoteAddress,
|
||||
conn
|
||||
|
||||
t.clients[dir].keepItIf( it != client )
|
||||
self.clients[dir].keepItIf( it != client )
|
||||
await allFuturesThrowing(
|
||||
conn.close(), client.closeWait())
|
||||
|
||||
@ -106,82 +109,108 @@ proc connHandler*(t: TcpTransport,
|
||||
let useExc {.used.} = exc
|
||||
debug "Error cleaning up client", errMsg = exc.msg, conn
|
||||
|
||||
t.clients[dir].add(client)
|
||||
self.clients[dir].add(client)
|
||||
asyncSpawn onClose()
|
||||
|
||||
return conn
|
||||
|
||||
proc init*(T: type TcpTransport,
|
||||
flags: set[ServerFlags] = {}): T =
|
||||
result = T(flags: flags)
|
||||
func init*(
|
||||
T: type TcpTransport,
|
||||
flags: set[ServerFlags] = {},
|
||||
upgrade: Upgrade): T =
|
||||
|
||||
result = T(
|
||||
flags: flags,
|
||||
upgrader: upgrade
|
||||
)
|
||||
|
||||
result.initTransport()
|
||||
|
||||
method initTransport*(t: TcpTransport) =
|
||||
t.multicodec = multiCodec("tcp")
|
||||
method initTransport*(self: TcpTransport) =
|
||||
self.multicodec = multiCodec("tcp")
|
||||
inc getTcpTransportTracker().opened
|
||||
|
||||
method start*(t: TcpTransport, ma: MultiAddress) {.async.} =
|
||||
method start*(
|
||||
self: TcpTransport,
|
||||
ma: MultiAddress) {.async.} =
|
||||
## listen on the transport
|
||||
##
|
||||
|
||||
if t.running:
|
||||
if self.running:
|
||||
trace "TCP transport already running"
|
||||
return
|
||||
|
||||
await procCall Transport(t).start(ma)
|
||||
await procCall Transport(self).start(ma)
|
||||
trace "Starting TCP transport"
|
||||
|
||||
t.server = createStreamServer(
|
||||
ma = t.ma,
|
||||
flags = t.flags,
|
||||
udata = t)
|
||||
self.server = createStreamServer(
|
||||
ma = self.ma,
|
||||
flags = self.flags,
|
||||
udata = self)
|
||||
|
||||
# always get the resolved address in case we're bound to 0.0.0.0:0
|
||||
t.ma = MultiAddress.init(t.server.sock.getLocalAddress()).tryGet()
|
||||
t.running = true
|
||||
self.ma = MultiAddress.init(self.server.sock.getLocalAddress()).tryGet()
|
||||
self.running = true
|
||||
|
||||
trace "Listening on", address = t.ma
|
||||
trace "Listening on", address = self.ma
|
||||
|
||||
method stop*(t: TcpTransport) {.async, gcsafe.} =
|
||||
method stop*(self: TcpTransport) {.async, gcsafe.} =
|
||||
## stop the transport
|
||||
##
|
||||
|
||||
t.running = false # mark stopped as soon as possible
|
||||
self.running = false # mark stopped as soon as possible
|
||||
|
||||
try:
|
||||
trace "Stopping TCP transport"
|
||||
await procCall Transport(t).stop() # call base
|
||||
await procCall Transport(self).stop() # call base
|
||||
|
||||
checkFutures(
|
||||
await allFinished(
|
||||
t.clients[Direction.In].mapIt(it.closeWait()) &
|
||||
t.clients[Direction.Out].mapIt(it.closeWait())))
|
||||
self.clients[Direction.In].mapIt(it.closeWait()) &
|
||||
self.clients[Direction.Out].mapIt(it.closeWait())))
|
||||
|
||||
# server can be nil
|
||||
if not isNil(t.server):
|
||||
await t.server.closeWait()
|
||||
if not isNil(self.server):
|
||||
await self.server.closeWait()
|
||||
|
||||
t.server = nil
|
||||
self.server = nil
|
||||
trace "Transport stopped"
|
||||
inc getTcpTransportTracker().closed
|
||||
except CatchableError as exc:
|
||||
trace "Error shutting down tcp transport", exc = exc.msg
|
||||
|
||||
method accept*(t: TcpTransport): Future[Connection] {.async, gcsafe.} =
|
||||
method upgradeIncoming*(
|
||||
self: TcpTransport,
|
||||
conn: Connection): Future[void] {.gcsafe.} =
|
||||
## base upgrade method that the transport uses to perform
|
||||
## transport specific upgrades
|
||||
##
|
||||
|
||||
self.upgrader.upgradeIncoming(conn)
|
||||
|
||||
method upgradeOutgoing*(
|
||||
self: TcpTransport,
|
||||
conn: Connection): Future[Connection] {.gcsafe.} =
|
||||
## base upgrade method that the transport uses to perform
|
||||
## transport specific upgrades
|
||||
##
|
||||
|
||||
self.upgrader.upgradeOutgoing(conn)
|
||||
|
||||
method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
|
||||
## accept a new TCP connection
|
||||
##
|
||||
|
||||
if not t.running:
|
||||
if not self.running:
|
||||
raise newTransportClosedError()
|
||||
|
||||
try:
|
||||
let transp = await t.server.accept()
|
||||
return await t.connHandler(transp, Direction.In)
|
||||
let transp = await self.server.accept()
|
||||
return await self.connHandler(transp, Direction.In)
|
||||
except TransportOsError as exc:
|
||||
# TODO: it doesn't sound like all OS errors
|
||||
# can be ignored, we should re-raise those
|
||||
# that can't.
|
||||
# that can'self.
|
||||
debug "OS Error", exc = exc.msg
|
||||
except TransportTooManyError as exc:
|
||||
debug "Too many files opened", exc = exc.msg
|
||||
@ -192,16 +221,16 @@ method accept*(t: TcpTransport): Future[Connection] {.async, gcsafe.} =
|
||||
warn "Unexpected error creating connection", exc = exc.msg
|
||||
raise exc
|
||||
|
||||
method dial*(t: TcpTransport,
|
||||
address: MultiAddress):
|
||||
Future[Connection] {.async, gcsafe.} =
|
||||
method dial*(
|
||||
self: TcpTransport,
|
||||
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
||||
## dial a peer
|
||||
##
|
||||
|
||||
trace "Dialing remote peer", address = $address
|
||||
|
||||
let transp = await connect(address)
|
||||
return await t.connHandler(transp, Direction.Out)
|
||||
return await self.connHandler(transp, Direction.Out)
|
||||
|
||||
method handles*(t: TcpTransport, address: MultiAddress): bool {.gcsafe.} =
|
||||
if procCall Transport(t).handles(address):
|
||||
|
@ -14,7 +14,8 @@ import sequtils
|
||||
import chronos, chronicles
|
||||
import ../stream/connection,
|
||||
../multiaddress,
|
||||
../multicodec
|
||||
../multicodec,
|
||||
../upgrademngrs/upgrade
|
||||
|
||||
logScope:
|
||||
topics = "libp2p transport"
|
||||
@ -25,56 +26,72 @@ type
|
||||
|
||||
Transport* = ref object of RootObj
|
||||
ma*: Multiaddress
|
||||
multicodec*: MultiCodec
|
||||
running*: bool
|
||||
upgrader*: Upgrade
|
||||
multicodec*: MultiCodec
|
||||
|
||||
proc newTransportClosedError*(parent: ref Exception = nil): ref LPError =
|
||||
newException(TransportClosedError,
|
||||
"Transport closed, no more connections!", parent)
|
||||
|
||||
method initTransport*(t: Transport) {.base, gcsafe, locks: "unknown".} =
|
||||
method initTransport*(self: Transport) {.base, gcsafe, locks: "unknown".} =
|
||||
## perform protocol initialization
|
||||
##
|
||||
|
||||
discard
|
||||
|
||||
method start*(t: Transport, ma: MultiAddress) {.base, async.} =
|
||||
method start*(
|
||||
self: Transport,
|
||||
ma: MultiAddress): Future[void] {.base, async.} =
|
||||
## start the transport
|
||||
##
|
||||
|
||||
t.ma = ma
|
||||
self.ma = ma
|
||||
trace "starting transport", address = $ma
|
||||
|
||||
method stop*(t: Transport) {.base, async.} =
|
||||
method stop*(self: Transport): Future[void] {.base, async.} =
|
||||
## stop and cleanup the transport
|
||||
## including all outstanding connections
|
||||
##
|
||||
|
||||
discard
|
||||
|
||||
method accept*(t: Transport): Future[Connection]
|
||||
{.base, async, gcsafe.} =
|
||||
method accept*(self: Transport): Future[Connection]
|
||||
{.base, gcsafe.} =
|
||||
## accept incoming connections
|
||||
##
|
||||
|
||||
discard
|
||||
|
||||
method dial*(t: Transport,
|
||||
address: MultiAddress): Future[Connection]
|
||||
{.base, async, gcsafe.} =
|
||||
method dial*(
|
||||
self: Transport,
|
||||
address: MultiAddress): Future[Connection] {.base, gcsafe.} =
|
||||
## dial a peer
|
||||
##
|
||||
|
||||
discard
|
||||
|
||||
method upgrade*(t: Transport) {.base, async, gcsafe.} =
|
||||
method upgradeIncoming*(
|
||||
self: Transport,
|
||||
conn: Connection): Future[void] {.base, gcsafe.} =
|
||||
## base upgrade method that the transport uses to perform
|
||||
## transport specific upgrades
|
||||
##
|
||||
|
||||
discard
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
method handles*(t: Transport, address: MultiAddress): bool {.base, gcsafe.} =
|
||||
method upgradeOutgoing*(
|
||||
self: Transport,
|
||||
conn: Connection): Future[Connection] {.base, gcsafe.} =
|
||||
## base upgrade method that the transport uses to perform
|
||||
## transport specific upgrades
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
method handles*(
|
||||
self: Transport,
|
||||
address: MultiAddress): bool {.base, gcsafe.} =
|
||||
## check if transport supports the multiaddress
|
||||
##
|
||||
|
||||
@ -83,7 +100,7 @@ method handles*(t: Transport, address: MultiAddress): bool {.base, gcsafe.} =
|
||||
if address.protocols.isOk:
|
||||
return address.protocols.get().filterIt( it == multiCodec("p2p-circuit") ).len == 0
|
||||
|
||||
method localAddress*(t: Transport): MultiAddress {.base, gcsafe.} =
|
||||
method localAddress*(self: Transport): MultiAddress {.base, gcsafe.} =
|
||||
## get the local address of the transport in case started with 0.0.0.0:0
|
||||
##
|
||||
|
||||
|
@ -25,26 +25,30 @@ type
|
||||
muxers*: Table[string, MuxerProvider]
|
||||
streamHandler*: StreamHandler
|
||||
|
||||
proc identify*(u: MuxedUpgrade, muxer: Muxer) {.async, gcsafe.} =
|
||||
proc identify*(
|
||||
self: MuxedUpgrade,
|
||||
muxer: Muxer) {.async, gcsafe.} =
|
||||
# new stream for identify
|
||||
var stream = await muxer.newStream()
|
||||
if stream == nil:
|
||||
return
|
||||
|
||||
try:
|
||||
await u.identify(stream)
|
||||
await self.identify(stream)
|
||||
finally:
|
||||
await stream.closeWithEOF()
|
||||
|
||||
proc mux*(u: MuxedUpgrade, conn: Connection): Future[Muxer] {.async, gcsafe.} =
|
||||
proc mux*(
|
||||
self: MuxedUpgrade,
|
||||
conn: Connection): Future[Muxer] {.async, gcsafe.} =
|
||||
## mux incoming connection
|
||||
|
||||
trace "Muxing connection", conn
|
||||
if u.muxers.len == 0:
|
||||
if self.muxers.len == 0:
|
||||
warn "no muxers registered, skipping upgrade flow", conn
|
||||
return
|
||||
|
||||
let muxerName = await u.ms.select(conn, toSeq(u.muxers.keys()))
|
||||
let muxerName = await self.ms.select(conn, toSeq(self.muxers.keys()))
|
||||
if muxerName.len == 0 or muxerName == "na":
|
||||
debug "no muxer available, early exit", conn
|
||||
return
|
||||
@ -52,18 +56,18 @@ proc mux*(u: MuxedUpgrade, conn: Connection): Future[Muxer] {.async, gcsafe.} =
|
||||
trace "Found a muxer", conn, muxerName
|
||||
|
||||
# create new muxer for connection
|
||||
let muxer = u.muxers[muxerName].newMuxer(conn)
|
||||
let muxer = self.muxers[muxerName].newMuxer(conn)
|
||||
|
||||
# install stream handler
|
||||
muxer.streamHandler = u.streamHandler
|
||||
muxer.streamHandler = self.streamHandler
|
||||
|
||||
u.connManager.storeConn(conn)
|
||||
self.connManager.storeConn(conn)
|
||||
|
||||
# store it in muxed connections if we have a peer for it
|
||||
u.connManager.storeMuxer(muxer, muxer.handle()) # store muxer and start read loop
|
||||
self.connManager.storeMuxer(muxer, muxer.handle()) # store muxer and start read loop
|
||||
|
||||
try:
|
||||
await u.identify(muxer)
|
||||
await self.identify(muxer)
|
||||
except CatchableError as exc:
|
||||
# Identify is non-essential, though if it fails, it might indicate that
|
||||
# the connection was closed already - this will be picked up by the read
|
||||
@ -72,10 +76,12 @@ proc mux*(u: MuxedUpgrade, conn: Connection): Future[Muxer] {.async, gcsafe.} =
|
||||
|
||||
return muxer
|
||||
|
||||
method upgradeOutgoing*(u: MuxedUpgrade, conn: Connection): Future[Connection] {.async, gcsafe.} =
|
||||
method upgradeOutgoing*(
|
||||
self: MuxedUpgrade,
|
||||
conn: Connection): Future[Connection] {.async, gcsafe.} =
|
||||
trace "Upgrading outgoing connection", conn
|
||||
|
||||
let sconn = await u.secure(conn) # secure the connection
|
||||
let sconn = await self.secure(conn) # secure the connection
|
||||
if isNil(sconn):
|
||||
raise newException(UpgradeFailedError,
|
||||
"unable to secure connection, stopping upgrade")
|
||||
@ -84,7 +90,7 @@ method upgradeOutgoing*(u: MuxedUpgrade, conn: Connection): Future[Connection] {
|
||||
raise newException(UpgradeFailedError,
|
||||
"current version of nim-libp2p requires that secure protocol negotiates peerid")
|
||||
|
||||
let muxer = await u.mux(sconn) # mux it if possible
|
||||
let muxer = await self.mux(sconn) # mux it if possible
|
||||
if muxer == nil:
|
||||
# TODO this might be relaxed in the future
|
||||
raise newException(UpgradeFailedError,
|
||||
@ -99,16 +105,18 @@ method upgradeOutgoing*(u: MuxedUpgrade, conn: Connection): Future[Connection] {
|
||||
|
||||
return sconn
|
||||
|
||||
method upgradeIncoming*(u: MuxedUpgrade, incomingConn: Connection) {.async, gcsafe.} = # noraises
|
||||
method upgradeIncoming*(
|
||||
self: MuxedUpgrade,
|
||||
incomingConn: Connection) {.async, gcsafe.} = # noraises
|
||||
trace "Upgrading incoming connection", incomingConn
|
||||
let ms = newMultistream()
|
||||
let ms = MultistreamSelect.new()
|
||||
|
||||
# secure incoming connections
|
||||
proc securedHandler(conn: Connection,
|
||||
proto: string)
|
||||
{.async, gcsafe, closure.} =
|
||||
trace "Starting secure handler", conn
|
||||
let secure = u.secureManagers.filterIt(it.codec == proto)[0]
|
||||
let secure = self.secureManagers.filterIt(it.codec == proto)[0]
|
||||
|
||||
var cconn = conn
|
||||
try:
|
||||
@ -118,7 +126,7 @@ method upgradeIncoming*(u: MuxedUpgrade, incomingConn: Connection) {.async, gcsa
|
||||
|
||||
cconn = sconn
|
||||
# add the muxer
|
||||
for muxer in u.muxers.values:
|
||||
for muxer in self.muxers.values:
|
||||
ms.addHandler(muxer.codecs, muxer)
|
||||
|
||||
# handle subsequent secure requests
|
||||
@ -136,7 +144,7 @@ method upgradeIncoming*(u: MuxedUpgrade, incomingConn: Connection) {.async, gcsa
|
||||
try:
|
||||
if (await ms.select(incomingConn)): # just handshake
|
||||
# add the secure handlers
|
||||
for k in u.secureManagers:
|
||||
for k in self.secureManagers:
|
||||
ms.addHandler(k.codec, securedHandler)
|
||||
|
||||
# handle un-secured connections
|
||||
@ -150,7 +158,9 @@ method upgradeIncoming*(u: MuxedUpgrade, incomingConn: Connection) {.async, gcsa
|
||||
if not isNil(incomingConn):
|
||||
await incomingConn.close()
|
||||
|
||||
proc muxerHandler(u: MuxedUpgrade, muxer: Muxer) {.async, gcsafe.} =
|
||||
proc muxerHandler(
|
||||
self: MuxedUpgrade,
|
||||
muxer: Muxer) {.async, gcsafe.} =
|
||||
let
|
||||
conn = muxer.connection
|
||||
|
||||
@ -160,13 +170,13 @@ proc muxerHandler(u: MuxedUpgrade, muxer: Muxer) {.async, gcsafe.} =
|
||||
return
|
||||
|
||||
# store incoming connection
|
||||
u.connManager.storeConn(conn)
|
||||
self.connManager.storeConn(conn)
|
||||
|
||||
# store muxer and muxed connection
|
||||
u.connManager.storeMuxer(muxer)
|
||||
self.connManager.storeMuxer(muxer)
|
||||
|
||||
try:
|
||||
await u.identify(muxer)
|
||||
await self.identify(muxer)
|
||||
except IdentifyError as exc:
|
||||
# Identify is non-essential, though if it fails, it might indicate that
|
||||
# the connection was closed already - this will be picked up by the read
|
||||
@ -198,7 +208,8 @@ proc init*(
|
||||
connManager: connManager,
|
||||
ms: ms)
|
||||
|
||||
upgrader.streamHandler = proc(conn: Connection) {.async, gcsafe.} = # noraises
|
||||
upgrader.streamHandler = proc(conn: Connection)
|
||||
{.async, gcsafe, raises: [Defect].} =
|
||||
trace "Starting stream handler", conn
|
||||
try:
|
||||
await upgrader.ms.handle(conn) # handle incoming connection
|
||||
@ -212,7 +223,8 @@ proc init*(
|
||||
|
||||
for _, val in muxers:
|
||||
val.streamHandler = upgrader.streamHandler
|
||||
val.muxerHandler = proc(muxer: Muxer): Future[void] =
|
||||
val.muxerHandler = proc(muxer: Muxer): Future[void]
|
||||
{.raises: [Defect].} =
|
||||
upgrader.muxerHandler(muxer)
|
||||
|
||||
return upgrader
|
||||
|
@ -35,22 +35,28 @@ type
|
||||
connManager*: ConnManager
|
||||
secureManagers*: seq[Secure]
|
||||
|
||||
method upgradeIncoming*(u: Upgrade, conn: Connection): Future[void] {.base.} =
|
||||
method upgradeIncoming*(
|
||||
self: Upgrade,
|
||||
conn: Connection): Future[void] {.base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
method upgradeOutgoing*(u: Upgrade, conn: Connection): Future[Connection] {.base.} =
|
||||
method upgradeOutgoing*(
|
||||
self: Upgrade,
|
||||
conn: Connection): Future[Connection] {.base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
proc secure*(u: Upgrade, conn: Connection): Future[Connection] {.async, gcsafe.} =
|
||||
if u.secureManagers.len <= 0:
|
||||
proc secure*(
|
||||
self: Upgrade,
|
||||
conn: Connection): Future[Connection] {.async, gcsafe.} =
|
||||
if self.secureManagers.len <= 0:
|
||||
raise newException(UpgradeFailedError, "No secure managers registered!")
|
||||
|
||||
let codec = await u.ms.select(conn, u.secureManagers.mapIt(it.codec))
|
||||
let codec = await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
|
||||
if codec.len == 0:
|
||||
raise newException(UpgradeFailedError, "Unable to negotiate a secure channel!")
|
||||
|
||||
trace "Securing connection", conn, codec
|
||||
let secureProtocol = u.secureManagers.filterIt(it.codec == codec)
|
||||
let secureProtocol = self.secureManagers.filterIt(it.codec == codec)
|
||||
|
||||
# ms.select should deal with the correctness of this
|
||||
# let's avoid duplicating checks but detect if it fails to do it properly
|
||||
@ -58,11 +64,13 @@ proc secure*(u: Upgrade, conn: Connection): Future[Connection] {.async, gcsafe.}
|
||||
|
||||
return await secureProtocol[0].secure(conn, true)
|
||||
|
||||
proc identify*(u: Upgrade, conn: Connection) {.async, gcsafe.} =
|
||||
proc identify*(
|
||||
self: Upgrade,
|
||||
conn: Connection) {.async, gcsafe.} =
|
||||
## identify the connection
|
||||
|
||||
if (await u.ms.select(conn, u.identity.codec)):
|
||||
let info = await u.identity.identify(conn, conn.peerInfo)
|
||||
if (await self.ms.select(conn, self.identity.codec)):
|
||||
let info = await self.identity.identify(conn, conn.peerInfo)
|
||||
|
||||
if info.pubKey.isNone and isNil(conn):
|
||||
raise newException(UpgradeFailedError,
|
||||
@ -83,4 +91,5 @@ proc identify*(u: Upgrade, conn: Connection) {.async, gcsafe.} =
|
||||
if info.protos.len > 0:
|
||||
conn.peerInfo.protocols = info.protos
|
||||
|
||||
await self.connManager.triggerPeerEvents(conn.peerInfo, PeerEvent(kind: PeerEventKind.Identified))
|
||||
trace "identified remote peer", conn, peerInfo = shortLog(conn.peerInfo)
|
||||
|
13
vendor/nim-libp2p/tests/helpers.nim
vendored
13
vendor/nim-libp2p/tests/helpers.nim
vendored
@ -36,7 +36,7 @@ iterator testTrackers*(extras: openArray[string] = []): TrackerBase =
|
||||
if not isNil(t): yield t
|
||||
|
||||
template checkTracker*(name: string) =
|
||||
var tracker = getTracker(LPChannelTrackerName)
|
||||
var tracker = getTracker(name)
|
||||
if tracker.isLeaked():
|
||||
checkpoint tracker.dump()
|
||||
fail()
|
||||
@ -74,10 +74,13 @@ type
|
||||
method write*(s: TestBufferStream, msg: seq[byte]): Future[void] =
|
||||
s.writeHandler(msg)
|
||||
|
||||
proc newBufferStream*(writeHandler: WriteHandler): TestBufferStream =
|
||||
new result
|
||||
result.writeHandler = writeHandler
|
||||
result.initStream()
|
||||
proc new*(T: typedesc[TestBufferStream], writeHandler: WriteHandler): T =
|
||||
let testBufferStream = T(writeHandler: writeHandler)
|
||||
testBufferStream.initStream()
|
||||
testBufferStream
|
||||
|
||||
proc newBufferStream*(writeHandler: WriteHandler): TestBufferStream {.deprecated: "use TestBufferStream.new".}=
|
||||
TestBufferStream.new(writeHandler)
|
||||
|
||||
proc checkExpiringInternal(cond: proc(): bool {.raises: [Defect].} ): Future[bool] {.async, gcsafe.} =
|
||||
{.gcsafe.}:
|
||||
|
@ -25,7 +25,7 @@ proc getPubSubPeer(p: TestGossipSub, peerId: PeerID): PubSubPeer =
|
||||
proc dropConn(peer: PubSubPeer) =
|
||||
discard # we don't care about it here yet
|
||||
|
||||
let pubSubPeer = newPubSubPeer(peerId, getConn, dropConn, nil, GossipSubCodec)
|
||||
let pubSubPeer = PubSubPeer.new(peerId, getConn, dropConn, nil, GossipSubCodec)
|
||||
debug "created new pubsub peer", peerId
|
||||
|
||||
p.peers[peerId] = pubSubPeer
|
||||
@ -56,7 +56,7 @@ suite "GossipSub internal":
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0..<15:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -97,7 +97,7 @@ suite "GossipSub internal":
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0..<15:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -123,7 +123,7 @@ suite "GossipSub internal":
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var scoreLow = -11'f64
|
||||
for i in 0..<15:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -153,7 +153,7 @@ suite "GossipSub internal":
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0..<15:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
||||
conn.peerInfo = peerInfo
|
||||
@ -180,7 +180,7 @@ suite "GossipSub internal":
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0..<15:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
var peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -209,7 +209,7 @@ suite "GossipSub internal":
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0..<6:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
||||
conn.peerInfo = peerInfo
|
||||
@ -243,7 +243,7 @@ suite "GossipSub internal":
|
||||
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0..<6:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -277,7 +277,7 @@ suite "GossipSub internal":
|
||||
|
||||
# generate mesh and fanout peers
|
||||
for i in 0..<30:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -291,7 +291,7 @@ suite "GossipSub internal":
|
||||
|
||||
# generate gossipsub (free standing) peers
|
||||
for i in 0..<15:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -302,7 +302,7 @@ suite "GossipSub internal":
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0..5:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -335,7 +335,7 @@ suite "GossipSub internal":
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0..<30:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -349,7 +349,7 @@ suite "GossipSub internal":
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0..5:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -375,7 +375,7 @@ suite "GossipSub internal":
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0..<30:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -390,7 +390,7 @@ suite "GossipSub internal":
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0..5:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -416,7 +416,7 @@ suite "GossipSub internal":
|
||||
gossipSub.fanout[topic] = initHashSet[PubSubPeer]()
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0..<30:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -431,7 +431,7 @@ suite "GossipSub internal":
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0..5:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -454,7 +454,7 @@ suite "GossipSub internal":
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0..<30:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -464,7 +464,7 @@ suite "GossipSub internal":
|
||||
# generate messages
|
||||
var seqno = 0'u64
|
||||
for i in 0..5:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -488,7 +488,7 @@ suite "GossipSub internal":
|
||||
let topic = "foobar"
|
||||
var conns = newSeq[Connection]()
|
||||
for i in 0..<30:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -521,7 +521,7 @@ suite "GossipSub internal":
|
||||
tooManyTopics &= "topic" & $i
|
||||
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
|
||||
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
let peer = gossipSub.getPubSubPeer(peerInfo.peerId)
|
||||
@ -544,7 +544,7 @@ suite "GossipSub internal":
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0..<15:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -575,7 +575,7 @@ suite "GossipSub internal":
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0..<15:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -617,7 +617,7 @@ suite "GossipSub internal":
|
||||
var conns = newSeq[Connection]()
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0..<6:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conn.transportDir = Direction.In
|
||||
conns &= conn
|
||||
let peerInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
||||
@ -629,7 +629,7 @@ suite "GossipSub internal":
|
||||
gossipSub.mesh[topic].incl(peer)
|
||||
|
||||
for i in 0..<7:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conn.transportDir = Direction.Out
|
||||
conns &= conn
|
||||
let peerInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
||||
@ -665,7 +665,7 @@ suite "GossipSub internal":
|
||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||
gossipSub.mesh[topic] = initHashSet[PubSubPeer]()
|
||||
for i in 0..<30:
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -676,7 +676,7 @@ suite "GossipSub internal":
|
||||
|
||||
block:
|
||||
# should ignore no budget peer
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -692,7 +692,7 @@ suite "GossipSub internal":
|
||||
|
||||
block:
|
||||
# given duplicate ihave should generate only one iwant
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
@ -707,7 +707,7 @@ suite "GossipSub internal":
|
||||
|
||||
block:
|
||||
# given duplicate iwant should generate only one message
|
||||
let conn = newBufferStream(noop)
|
||||
let conn = TestBufferStream.new(noop)
|
||||
conns &= conn
|
||||
let peerInfo = randomPeerInfo()
|
||||
conn.peerInfo = peerInfo
|
||||
|
26
vendor/nim-libp2p/tests/testbufferstream.nim
vendored
26
vendor/nim-libp2p/tests/testbufferstream.nim
vendored
@ -13,7 +13,7 @@ suite "BufferStream":
|
||||
check getTracker(BufferStreamTrackerName).isLeaked() == false
|
||||
|
||||
asyncTest "push data to buffer":
|
||||
let buff = newBufferStream()
|
||||
let buff = BufferStream.new()
|
||||
check buff.len == 0
|
||||
var data = "12345"
|
||||
await buff.pushData(data.toBytes())
|
||||
@ -21,7 +21,7 @@ suite "BufferStream":
|
||||
await buff.close()
|
||||
|
||||
asyncTest "push and wait":
|
||||
let buff = newBufferStream()
|
||||
let buff = BufferStream.new()
|
||||
check buff.len == 0
|
||||
|
||||
let fut0 = buff.pushData("1234".toBytes())
|
||||
@ -38,7 +38,7 @@ suite "BufferStream":
|
||||
await buff.close()
|
||||
|
||||
asyncTest "read with size":
|
||||
let buff = newBufferStream()
|
||||
let buff = BufferStream.new()
|
||||
check buff.len == 0
|
||||
|
||||
await buff.pushData("12345".toBytes())
|
||||
@ -48,7 +48,7 @@ suite "BufferStream":
|
||||
await buff.close()
|
||||
|
||||
asyncTest "readExactly":
|
||||
let buff = newBufferStream()
|
||||
let buff = BufferStream.new()
|
||||
check buff.len == 0
|
||||
|
||||
await buff.pushData("12345".toBytes())
|
||||
@ -59,7 +59,7 @@ suite "BufferStream":
|
||||
await buff.close()
|
||||
|
||||
asyncTest "readExactly raises":
|
||||
let buff = newBufferStream()
|
||||
let buff = BufferStream.new()
|
||||
check buff.len == 0
|
||||
|
||||
await buff.pushData("123".toBytes())
|
||||
@ -71,7 +71,7 @@ suite "BufferStream":
|
||||
await readFut
|
||||
|
||||
asyncTest "readOnce":
|
||||
let buff = newBufferStream()
|
||||
let buff = BufferStream.new()
|
||||
check buff.len == 0
|
||||
|
||||
var data: array[3, byte]
|
||||
@ -84,7 +84,7 @@ suite "BufferStream":
|
||||
await buff.close()
|
||||
|
||||
asyncTest "reads should happen in order":
|
||||
let buff = newBufferStream()
|
||||
let buff = BufferStream.new()
|
||||
check buff.len == 0
|
||||
|
||||
proc writer1() {.async.} =
|
||||
@ -126,7 +126,7 @@ suite "BufferStream":
|
||||
await writerFut2
|
||||
|
||||
asyncTest "small reads":
|
||||
let buff = newBufferStream()
|
||||
let buff = BufferStream.new()
|
||||
check buff.len == 0
|
||||
|
||||
var str: string
|
||||
@ -152,7 +152,7 @@ suite "BufferStream":
|
||||
await buff.close()
|
||||
|
||||
asyncTest "read all data after eof":
|
||||
let buff = newBufferStream()
|
||||
let buff = BufferStream.new()
|
||||
check buff.len == 0
|
||||
|
||||
await buff.pushData("12345".toBytes())
|
||||
@ -176,7 +176,7 @@ suite "BufferStream":
|
||||
await buff.close() # all data should still be read after close
|
||||
|
||||
asyncTest "read more data after eof":
|
||||
let buff = newBufferStream()
|
||||
let buff = BufferStream.new()
|
||||
check buff.len == 0
|
||||
|
||||
await buff.pushData("12345".toBytes())
|
||||
@ -200,7 +200,7 @@ suite "BufferStream":
|
||||
await buff.close() # all data should still be read after close
|
||||
|
||||
asyncTest "shouldn't get stuck on close":
|
||||
var stream = newBufferStream()
|
||||
var stream = BufferStream.new()
|
||||
var
|
||||
fut = stream.pushData(toBytes("hello"))
|
||||
fut2 = stream.pushData(toBytes("again"))
|
||||
@ -214,7 +214,7 @@ suite "BufferStream":
|
||||
await stream.close()
|
||||
|
||||
asyncTest "no push after close":
|
||||
var stream = newBufferStream()
|
||||
var stream = BufferStream.new()
|
||||
await stream.pushData("123".toBytes())
|
||||
var data: array[3, byte]
|
||||
await stream.readExactly(addr data[0], data.len)
|
||||
@ -224,7 +224,7 @@ suite "BufferStream":
|
||||
await stream.pushData("123".toBytes())
|
||||
|
||||
asyncTest "no concurrent pushes":
|
||||
var stream = newBufferStream()
|
||||
var stream = BufferStream.new()
|
||||
await stream.pushData("123".toBytes())
|
||||
let push = stream.pushData("123".toBytes())
|
||||
|
||||
|
6
vendor/nim-libp2p/tests/testconnection.nim
vendored
6
vendor/nim-libp2p/tests/testconnection.nim
vendored
@ -6,13 +6,13 @@ import ./helpers
|
||||
|
||||
suite "Connection":
|
||||
asyncTest "close":
|
||||
var conn = newBufferStream()
|
||||
var conn = BufferStream.new()
|
||||
await conn.close()
|
||||
check:
|
||||
conn.closed == true
|
||||
|
||||
asyncTest "parent close":
|
||||
var buf = newBufferStream()
|
||||
var buf = BufferStream.new()
|
||||
var conn = buf
|
||||
|
||||
await conn.close()
|
||||
@ -21,7 +21,7 @@ suite "Connection":
|
||||
buf.closed == true
|
||||
|
||||
asyncTest "child close":
|
||||
var buf = newBufferStream()
|
||||
var buf = BufferStream.new()
|
||||
var conn = buf
|
||||
|
||||
await buf.close()
|
||||
|
15
vendor/nim-libp2p/tests/testidentify.nim
vendored
15
vendor/nim-libp2p/tests/testidentify.nim
vendored
@ -8,7 +8,8 @@ import ../libp2p/[protocols/identify,
|
||||
multistream,
|
||||
transports/transport,
|
||||
transports/tcptransport,
|
||||
crypto/crypto]
|
||||
crypto/crypto,
|
||||
upgrademngrs/upgrade]
|
||||
import ./helpers
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
@ -38,14 +39,14 @@ suite "Identify":
|
||||
remotePeerInfo = PeerInfo.init(
|
||||
remoteSecKey, [ma], ["/test/proto1/1.0.0", "/test/proto2/1.0.0"])
|
||||
|
||||
transport1 = TcpTransport.init()
|
||||
transport2 = TcpTransport.init()
|
||||
transport1 = TcpTransport.init(upgrade = Upgrade())
|
||||
transport2 = TcpTransport.init(upgrade = Upgrade())
|
||||
|
||||
identifyProto1 = newIdentify(remotePeerInfo)
|
||||
identifyProto2 = newIdentify(remotePeerInfo)
|
||||
identifyProto1 = Identify.new(remotePeerInfo)
|
||||
identifyProto2 = Identify.new(remotePeerInfo)
|
||||
|
||||
msListen = newMultistream()
|
||||
msDial = newMultistream()
|
||||
msListen = MultistreamSelect.new()
|
||||
msDial = MultistreamSelect.new()
|
||||
|
||||
asyncTeardown:
|
||||
await conn.close()
|
||||
|
29
vendor/nim-libp2p/tests/testinterop.nim
vendored
29
vendor/nim-libp2p/tests/testinterop.nim
vendored
@ -1,35 +1,12 @@
|
||||
import options, tables
|
||||
import chronos, chronicles, stew/byteutils
|
||||
import helpers
|
||||
import ../libp2p/[daemon/daemonapi,
|
||||
protobuf/minprotobuf,
|
||||
vbuffer,
|
||||
multiaddress,
|
||||
multicodec,
|
||||
cid,
|
||||
varint,
|
||||
multihash,
|
||||
builders,
|
||||
peerid,
|
||||
peerinfo,
|
||||
switch,
|
||||
stream/connection,
|
||||
muxers/muxer,
|
||||
crypto/crypto,
|
||||
muxers/mplex/mplex,
|
||||
muxers/muxer,
|
||||
protocols/protocol,
|
||||
protocols/identify,
|
||||
transports/transport,
|
||||
transports/tcptransport,
|
||||
protocols/secure/secure,
|
||||
protocols/pubsub/pubsub,
|
||||
protocols/pubsub/floodsub,
|
||||
protocols/pubsub/gossipsub]
|
||||
import ../libp2p
|
||||
import ../libp2p/[daemon/daemonapi, varint]
|
||||
|
||||
type
|
||||
# TODO: Unify both PeerInfo structs
|
||||
NativePeerInfo = peerinfo.PeerInfo
|
||||
NativePeerInfo = libp2p.PeerInfo
|
||||
DaemonPeerInfo = daemonapi.PeerInfo
|
||||
|
||||
proc writeLp*(s: StreamTransport, msg: string | seq[byte]): Future[int] {.gcsafe.} =
|
||||
|
109
vendor/nim-libp2p/tests/testmplex.nim
vendored
109
vendor/nim-libp2p/tests/testmplex.nim
vendored
@ -9,6 +9,7 @@ import ../libp2p/[errors,
|
||||
muxers/mplex/mplex,
|
||||
muxers/mplex/coder,
|
||||
muxers/mplex/lpchannel,
|
||||
upgrademngrs/upgrade,
|
||||
vbuffer,
|
||||
varint]
|
||||
|
||||
@ -25,7 +26,7 @@ suite "Mplex":
|
||||
proc encHandler(msg: seq[byte]) {.async.} =
|
||||
check msg == fromHex("000873747265616d2031")
|
||||
|
||||
let conn = newBufferStream(encHandler)
|
||||
let conn = TestBufferStream.new(encHandler)
|
||||
await conn.writeMsg(0, MessageType.New, ("stream 1").toBytes)
|
||||
await conn.close()
|
||||
|
||||
@ -33,7 +34,7 @@ suite "Mplex":
|
||||
proc encHandler(msg: seq[byte]) {.async.} =
|
||||
check msg == fromHex("88010873747265616d2031")
|
||||
|
||||
let conn = newBufferStream(encHandler)
|
||||
let conn = TestBufferStream.new(encHandler)
|
||||
await conn.writeMsg(17, MessageType.New, ("stream 1").toBytes)
|
||||
await conn.close()
|
||||
|
||||
@ -41,7 +42,7 @@ suite "Mplex":
|
||||
proc encHandler(msg: seq[byte]) {.async.} =
|
||||
check msg == fromHex("020873747265616d2031")
|
||||
|
||||
let conn = newBufferStream(encHandler)
|
||||
let conn = TestBufferStream.new(encHandler)
|
||||
await conn.writeMsg(0, MessageType.MsgOut, ("stream 1").toBytes)
|
||||
await conn.close()
|
||||
|
||||
@ -49,12 +50,12 @@ suite "Mplex":
|
||||
proc encHandler(msg: seq[byte]) {.async.} =
|
||||
check msg == fromHex("8a010873747265616d2031")
|
||||
|
||||
let conn = newBufferStream(encHandler)
|
||||
let conn = TestBufferStream.new(encHandler)
|
||||
await conn.writeMsg(17, MessageType.MsgOut, ("stream 1").toBytes)
|
||||
await conn.close()
|
||||
|
||||
asyncTest "decode header with channel id 0":
|
||||
let stream = newBufferStream()
|
||||
let stream = BufferStream.new()
|
||||
let conn = stream
|
||||
await stream.pushData(fromHex("000873747265616d2031"))
|
||||
let msg = await conn.readMsg()
|
||||
@ -64,7 +65,7 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "decode header and body with channel id 0":
|
||||
let stream = newBufferStream()
|
||||
let stream = BufferStream.new()
|
||||
let conn = stream
|
||||
await stream.pushData(fromHex("021668656C6C6F2066726F6D206368616E6E656C20302121"))
|
||||
let msg = await conn.readMsg()
|
||||
@ -75,7 +76,7 @@ suite "Mplex":
|
||||
await conn.close()
|
||||
|
||||
asyncTest "decode header and body with channel id other than 0":
|
||||
let stream = newBufferStream()
|
||||
let stream = BufferStream.new()
|
||||
let conn = stream
|
||||
await stream.pushData(fromHex("8a011668656C6C6F2066726F6D206368616E6E656C20302121"))
|
||||
let msg = await conn.readMsg()
|
||||
@ -89,7 +90,7 @@ suite "Mplex":
|
||||
asyncTest "(local close) - should close for write":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
|
||||
await chann.close()
|
||||
@ -101,7 +102,7 @@ suite "Mplex":
|
||||
|
||||
asyncTest "(local close) - should allow reads until remote closes":
|
||||
let
|
||||
conn = newBufferStream(
|
||||
conn = TestBufferStream.new(
|
||||
proc (data: seq[byte]) {.gcsafe, async.} =
|
||||
discard,
|
||||
)
|
||||
@ -128,7 +129,7 @@ suite "Mplex":
|
||||
|
||||
asyncTest "(remote close) - channel should close for reading by remote":
|
||||
let
|
||||
conn = newBufferStream(
|
||||
conn = TestBufferStream.new(
|
||||
proc (data: seq[byte]) {.gcsafe, async.} =
|
||||
discard,
|
||||
)
|
||||
@ -151,7 +152,7 @@ suite "Mplex":
|
||||
asyncTest "(remote close) - channel should allow writing on remote close":
|
||||
let
|
||||
testData = "Hello!".toBytes
|
||||
conn = newBufferStream(
|
||||
conn = TestBufferStream.new(
|
||||
proc (data: seq[byte]) {.gcsafe, async.} =
|
||||
discard
|
||||
)
|
||||
@ -167,7 +168,7 @@ suite "Mplex":
|
||||
asyncTest "should not allow pushing data to channel when remote end closed":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
await chann.pushEof()
|
||||
var buf: array[1, byte]
|
||||
@ -184,7 +185,7 @@ suite "Mplex":
|
||||
asyncTest "channel should fail reading":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
|
||||
await chann.reset()
|
||||
@ -197,7 +198,7 @@ suite "Mplex":
|
||||
asyncTest "reset should complete read":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
|
||||
var data = newSeq[byte](1)
|
||||
@ -212,7 +213,7 @@ suite "Mplex":
|
||||
asyncTest "reset should complete pushData":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
|
||||
proc pushes() {.async.} = # pushes don't hang on reset
|
||||
@ -231,7 +232,7 @@ suite "Mplex":
|
||||
asyncTest "reset should complete both read and push":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
|
||||
var data = newSeq[byte](1)
|
||||
@ -246,7 +247,7 @@ suite "Mplex":
|
||||
asyncTest "reset should complete both read and pushes":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
|
||||
var data = newSeq[byte](1)
|
||||
@ -271,7 +272,7 @@ suite "Mplex":
|
||||
asyncTest "reset should complete both read and push with cancel":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
|
||||
var data = newSeq[byte](1)
|
||||
@ -285,7 +286,7 @@ suite "Mplex":
|
||||
asyncTest "should complete both read and push after reset":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
|
||||
var data = newSeq[byte](1)
|
||||
@ -303,7 +304,7 @@ suite "Mplex":
|
||||
asyncTest "reset should complete ongoing push without reader":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
|
||||
await chann.pushData(@[0'u8])
|
||||
@ -315,7 +316,7 @@ suite "Mplex":
|
||||
asyncTest "reset should complete ongoing read without a push":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
|
||||
var data = newSeq[byte](1)
|
||||
@ -327,7 +328,7 @@ suite "Mplex":
|
||||
asyncTest "reset should allow all reads and pushes to complete":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
|
||||
var data = newSeq[byte](1)
|
||||
@ -356,7 +357,7 @@ suite "Mplex":
|
||||
asyncTest "channel should fail writing":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(1, conn, true)
|
||||
await chann.reset()
|
||||
|
||||
@ -368,7 +369,7 @@ suite "Mplex":
|
||||
asyncTest "channel should reset on timeout":
|
||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
||||
let
|
||||
conn = newBufferStream(writeHandler)
|
||||
conn = TestBufferStream.new(writeHandler)
|
||||
chann = LPChannel.init(
|
||||
1, conn, true, timeout = 100.millis)
|
||||
|
||||
@ -379,7 +380,7 @@ suite "Mplex":
|
||||
asyncTest "read/write receiver":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
@ -395,7 +396,7 @@ suite "Mplex":
|
||||
await mplexListen.close()
|
||||
|
||||
let acceptFut = acceptHandler()
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let mplexDial = Mplex.init(conn)
|
||||
@ -416,7 +417,7 @@ suite "Mplex":
|
||||
asyncTest "read/write receiver lazy":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
@ -432,7 +433,7 @@ suite "Mplex":
|
||||
await mplexListen.close()
|
||||
|
||||
let acceptFut = acceptHandler()
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let mplexDial = Mplex.init(conn)
|
||||
@ -460,7 +461,7 @@ suite "Mplex":
|
||||
for _ in 0..<MaxMsgSize:
|
||||
bigseq.add(uint8(rand(uint('A')..uint('z'))))
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
@ -484,7 +485,7 @@ suite "Mplex":
|
||||
check false
|
||||
|
||||
let acceptFut = acceptHandler()
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let mplexDial = Mplex.init(conn)
|
||||
@ -507,7 +508,7 @@ suite "Mplex":
|
||||
asyncTest "read/write initiator":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
@ -521,7 +522,7 @@ suite "Mplex":
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let acceptFut = acceptHandler()
|
||||
@ -543,7 +544,7 @@ suite "Mplex":
|
||||
asyncTest "multiple streams":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport1 = TcpTransport.init()
|
||||
let transport1 = TcpTransport.init(upgrade = Upgrade())
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
let done = newFuture[void]()
|
||||
@ -563,7 +564,7 @@ suite "Mplex":
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let acceptFut = acceptHandler()
|
||||
@ -587,7 +588,7 @@ suite "Mplex":
|
||||
asyncTest "multiple read/write streams":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
let done = newFuture[void]()
|
||||
@ -608,7 +609,7 @@ suite "Mplex":
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let acceptFut = acceptHandler()
|
||||
@ -634,7 +635,7 @@ suite "Mplex":
|
||||
asyncTest "channel closes listener with EOF":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport1 = TcpTransport.init()
|
||||
let transport1 = TcpTransport.init(upgrade = Upgrade())
|
||||
var listenStreams: seq[Connection]
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
let conn = await transport1.accept()
|
||||
@ -656,7 +657,7 @@ suite "Mplex":
|
||||
|
||||
await transport1.start(ma)
|
||||
let acceptFut = acceptHandler()
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let mplexDial = Mplex.init(conn)
|
||||
@ -681,7 +682,7 @@ suite "Mplex":
|
||||
|
||||
asyncTest "channel closes dialer with EOF":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
let transport1 = TcpTransport.init()
|
||||
let transport1 = TcpTransport.init(upgrade = Upgrade())
|
||||
|
||||
var count = 0
|
||||
var done = newFuture[void]()
|
||||
@ -704,7 +705,7 @@ suite "Mplex":
|
||||
await transport1.start(ma)
|
||||
let acceptFut = acceptHandler()
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let mplexDial = Mplex.init(conn)
|
||||
@ -746,7 +747,7 @@ suite "Mplex":
|
||||
|
||||
asyncTest "dialing mplex closes both ends":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
let transport1 = TcpTransport.init()
|
||||
let transport1 = TcpTransport.init(upgrade = Upgrade())
|
||||
|
||||
var listenStreams: seq[Connection]
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
@ -763,7 +764,7 @@ suite "Mplex":
|
||||
await transport1.start(ma)
|
||||
let acceptFut = acceptHandler()
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let mplexDial = Mplex.init(conn)
|
||||
@ -788,7 +789,7 @@ suite "Mplex":
|
||||
|
||||
asyncTest "listening mplex closes both ends":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
let transport1 = TcpTransport.init()
|
||||
let transport1 = TcpTransport.init(upgrade = Upgrade())
|
||||
|
||||
var mplexListen: Mplex
|
||||
var listenStreams: seq[Connection]
|
||||
@ -806,7 +807,7 @@ suite "Mplex":
|
||||
await transport1.start(ma)
|
||||
let acceptFut = acceptHandler()
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let mplexDial = Mplex.init(conn)
|
||||
@ -831,7 +832,7 @@ suite "Mplex":
|
||||
|
||||
asyncTest "canceling mplex handler closes both ends":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
let transport1 = TcpTransport.init()
|
||||
let transport1 = TcpTransport.init(upgrade = Upgrade())
|
||||
|
||||
var mplexHandle: Future[void]
|
||||
var listenStreams: seq[Connection]
|
||||
@ -850,7 +851,7 @@ suite "Mplex":
|
||||
await transport1.start(ma)
|
||||
let acceptFut = acceptHandler()
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let mplexDial = Mplex.init(conn)
|
||||
@ -874,7 +875,7 @@ suite "Mplex":
|
||||
|
||||
asyncTest "closing dialing connection should close both ends":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
let transport1 = TcpTransport.init()
|
||||
let transport1 = TcpTransport.init(upgrade = Upgrade())
|
||||
|
||||
var listenStreams: seq[Connection]
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
@ -891,7 +892,7 @@ suite "Mplex":
|
||||
await transport1.start(ma)
|
||||
let acceptFut = acceptHandler()
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let mplexDial = Mplex.init(conn)
|
||||
@ -916,7 +917,7 @@ suite "Mplex":
|
||||
|
||||
asyncTest "canceling listening connection should close both ends":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
let transport1 = TcpTransport.init()
|
||||
let transport1 = TcpTransport.init(upgrade = Upgrade())
|
||||
|
||||
var listenConn: Connection
|
||||
var listenStreams: seq[Connection]
|
||||
@ -934,7 +935,7 @@ suite "Mplex":
|
||||
await transport1.start(ma)
|
||||
let acceptFut = acceptHandler()
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let mplexDial = Mplex.init(conn)
|
||||
@ -962,7 +963,7 @@ suite "Mplex":
|
||||
asyncTest "channel should be able to handle erratic read/writes":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
var complete = newFuture[void]()
|
||||
@ -983,7 +984,7 @@ suite "Mplex":
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let acceptFut = acceptHandler()
|
||||
@ -1034,7 +1035,7 @@ suite "Mplex":
|
||||
asyncTest "channel should handle 1 byte read/write":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
var complete = newFuture[void]()
|
||||
@ -1052,7 +1053,7 @@ suite "Mplex":
|
||||
await mplexListen.handle()
|
||||
await mplexListen.close()
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let acceptFut = acceptHandler()
|
||||
|
43
vendor/nim-libp2p/tests/testmultistream.nim
vendored
43
vendor/nim-libp2p/tests/testmultistream.nim
vendored
@ -7,7 +7,8 @@ import ../libp2p/errors,
|
||||
../libp2p/multiaddress,
|
||||
../libp2p/transports/transport,
|
||||
../libp2p/transports/tcptransport,
|
||||
../libp2p/protocols/protocol
|
||||
../libp2p/protocols/protocol,
|
||||
../libp2p/upgrademngrs/upgrade
|
||||
|
||||
import ./helpers
|
||||
|
||||
@ -171,13 +172,13 @@ suite "Multistream select":
|
||||
checkTrackers()
|
||||
|
||||
asyncTest "test select custom proto":
|
||||
let ms = newMultistream()
|
||||
let ms = MultistreamSelect.new()
|
||||
let conn = newTestSelectStream()
|
||||
check (await ms.select(conn, @["/test/proto/1.0.0"])) == "/test/proto/1.0.0"
|
||||
await conn.close()
|
||||
|
||||
asyncTest "test handle custom proto":
|
||||
let ms = newMultistream()
|
||||
let ms = MultistreamSelect.new()
|
||||
let conn = newTestSelectStream()
|
||||
|
||||
var protocol: LPProtocol = new LPProtocol
|
||||
@ -192,7 +193,7 @@ suite "Multistream select":
|
||||
await ms.handle(conn)
|
||||
|
||||
asyncTest "test handle `ls`":
|
||||
let ms = newMultistream()
|
||||
let ms = MultistreamSelect.new()
|
||||
|
||||
proc testLsHandler(proto: seq[byte]) {.async, gcsafe.} # forward declaration
|
||||
let conn = Connection(newTestLsStream(testLsHandler))
|
||||
@ -213,7 +214,7 @@ suite "Multistream select":
|
||||
await done.wait(5.seconds)
|
||||
|
||||
asyncTest "test handle `na`":
|
||||
let ms = newMultistream()
|
||||
let ms = MultistreamSelect.new()
|
||||
|
||||
proc testNaHandler(msg: string): Future[void] {.async, gcsafe.}
|
||||
let conn = newTestNaStream(testNaHandler)
|
||||
@ -244,10 +245,10 @@ suite "Multistream select":
|
||||
await conn.close()
|
||||
|
||||
protocol.handler = testHandler
|
||||
let msListen = newMultistream()
|
||||
let msListen = MultistreamSelect.new()
|
||||
msListen.addHandler("/test/proto/1.0.0", protocol)
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1 = TcpTransport.init(upgrade = Upgrade())
|
||||
asyncCheck transport1.start(ma)
|
||||
|
||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
||||
@ -257,8 +258,8 @@ suite "Multistream select":
|
||||
|
||||
let handlerWait = acceptHandler()
|
||||
|
||||
let msDial = newMultistream()
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let msDial = MultistreamSelect.new()
|
||||
let transport2 = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
check (await msDial.select(conn, "/test/proto/1.0.0")) == true
|
||||
@ -278,7 +279,7 @@ suite "Multistream select":
|
||||
let
|
||||
handlerWait = newFuture[void]()
|
||||
|
||||
let msListen = newMultistream()
|
||||
let msListen = MultistreamSelect.new()
|
||||
var protocol: LPProtocol = new LPProtocol
|
||||
protocol.handler = proc(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
# never reached
|
||||
@ -294,7 +295,7 @@ suite "Multistream select":
|
||||
msListen.addHandler("/test/proto1/1.0.0", protocol)
|
||||
msListen.addHandler("/test/proto2/1.0.0", protocol)
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let listenFut = transport1.start(ma)
|
||||
|
||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
||||
@ -309,8 +310,8 @@ suite "Multistream select":
|
||||
await conn.close()
|
||||
|
||||
let acceptFut = acceptHandler()
|
||||
let msDial = newMultistream()
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let msDial = MultistreamSelect.new()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
let ls = await msDial.list(conn)
|
||||
@ -336,10 +337,10 @@ suite "Multistream select":
|
||||
await conn.close()
|
||||
|
||||
protocol.handler = testHandler
|
||||
let msListen = newMultistream()
|
||||
let msListen = MultistreamSelect.new()
|
||||
msListen.addHandler("/test/proto/1.0.0", protocol)
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
asyncCheck transport1.start(ma)
|
||||
|
||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
||||
@ -347,8 +348,8 @@ suite "Multistream select":
|
||||
await msListen.handle(conn)
|
||||
|
||||
let acceptFut = acceptHandler()
|
||||
let msDial = newMultistream()
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let msDial = MultistreamSelect.new()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
check (await msDial.select(conn,
|
||||
@ -373,11 +374,11 @@ suite "Multistream select":
|
||||
await conn.close()
|
||||
|
||||
protocol.handler = testHandler
|
||||
let msListen = newMultistream()
|
||||
let msListen = MultistreamSelect.new()
|
||||
msListen.addHandler("/test/proto1/1.0.0", protocol)
|
||||
msListen.addHandler("/test/proto2/1.0.0", protocol)
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
asyncCheck transport1.start(ma)
|
||||
|
||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
||||
@ -385,8 +386,8 @@ suite "Multistream select":
|
||||
await msListen.handle(conn)
|
||||
|
||||
let acceptFut = acceptHandler()
|
||||
let msDial = newMultistream()
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let msDial = MultistreamSelect.new()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
|
||||
check (await msDial.select(conn,
|
||||
|
154
vendor/nim-libp2p/tests/testnoise.nim
vendored
154
vendor/nim-libp2p/tests/testnoise.nim
vendored
@ -29,7 +29,9 @@ import ../libp2p/[switch,
|
||||
muxers/mplex/mplex,
|
||||
protocols/secure/noise,
|
||||
protocols/secure/secio,
|
||||
protocols/secure/secure]
|
||||
protocols/secure/secure,
|
||||
upgrademngrs/muxedupgrade,
|
||||
connmanager]
|
||||
import ./helpers
|
||||
|
||||
const
|
||||
@ -51,23 +53,31 @@ method init(p: TestProto) {.gcsafe.} =
|
||||
proc createSwitch(ma: MultiAddress; outgoing: bool, secio: bool = false): (Switch, PeerInfo) =
|
||||
var peerInfo: PeerInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
||||
peerInfo.addrs.add(ma)
|
||||
let identify = newIdentify(peerInfo)
|
||||
|
||||
proc createMplex(conn: Connection): Muxer =
|
||||
result = Mplex.init(conn)
|
||||
|
||||
let mplexProvider = newMuxerProvider(createMplex, MplexCodec)
|
||||
let transports = @[Transport(TcpTransport.init())]
|
||||
let muxers = [(MplexCodec, mplexProvider)].toTable()
|
||||
let secureManagers = if secio:
|
||||
[Secure(newSecio(rng, peerInfo.privateKey))]
|
||||
let
|
||||
identify = Identify.new(peerInfo)
|
||||
mplexProvider = MuxerProvider.new(createMplex, MplexCodec)
|
||||
muxers = [(MplexCodec, mplexProvider)].toTable()
|
||||
secureManagers = if secio:
|
||||
[Secure(Secio.new(rng, peerInfo.privateKey))]
|
||||
else:
|
||||
[Secure(newNoise(rng, peerInfo.privateKey, outgoing = outgoing))]
|
||||
let switch = newSwitch(peerInfo,
|
||||
transports,
|
||||
identify,
|
||||
muxers,
|
||||
secureManagers)
|
||||
[Secure(Noise.new(rng, peerInfo.privateKey, outgoing = outgoing))]
|
||||
connManager = ConnManager.init()
|
||||
ms = MultistreamSelect.new()
|
||||
muxedUpgrade = MuxedUpgrade.init(identify, muxers, secureManagers, connManager, ms)
|
||||
transports = @[Transport(TcpTransport.init(upgrade = muxedUpgrade))]
|
||||
|
||||
let switch = newSwitch(
|
||||
peerInfo,
|
||||
transports,
|
||||
identify,
|
||||
muxers,
|
||||
secureManagers,
|
||||
connManager,
|
||||
ms)
|
||||
result = (switch, peerInfo)
|
||||
|
||||
suite "Noise":
|
||||
@ -78,9 +88,9 @@ suite "Noise":
|
||||
let
|
||||
server = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
serverInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get(), [server])
|
||||
serverNoise = newNoise(rng, serverInfo.privateKey, outgoing = false)
|
||||
serverNoise = Noise.new(rng, serverInfo.privateKey, outgoing = false)
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
asyncCheck transport1.start(server)
|
||||
|
||||
proc acceptHandler() {.async.} =
|
||||
@ -94,9 +104,9 @@ suite "Noise":
|
||||
|
||||
let
|
||||
acceptFut = acceptHandler()
|
||||
transport2: TcpTransport = TcpTransport.init()
|
||||
transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
clientInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get(), [transport1.ma])
|
||||
clientNoise = newNoise(rng, clientInfo.privateKey, outgoing = true)
|
||||
clientNoise = Noise.new(rng, clientInfo.privateKey, outgoing = true)
|
||||
conn = await transport2.dial(transport1.ma)
|
||||
sconn = await clientNoise.secure(conn, true)
|
||||
|
||||
@ -115,10 +125,10 @@ suite "Noise":
|
||||
let
|
||||
server = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
serverInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get(), [server])
|
||||
serverNoise = newNoise(rng, serverInfo.privateKey, outgoing = false)
|
||||
serverNoise = Noise.new(rng, serverInfo.privateKey, outgoing = false)
|
||||
|
||||
let
|
||||
transport1: TcpTransport = TcpTransport.init()
|
||||
transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
|
||||
asyncCheck transport1.start(server)
|
||||
|
||||
@ -134,9 +144,9 @@ suite "Noise":
|
||||
|
||||
let
|
||||
handlerWait = acceptHandler()
|
||||
transport2: TcpTransport = TcpTransport.init()
|
||||
transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
clientInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get(), [transport1.ma])
|
||||
clientNoise = newNoise(rng, clientInfo.privateKey, outgoing = true, commonPrologue = @[1'u8, 2'u8, 3'u8])
|
||||
clientNoise = Noise.new(rng, clientInfo.privateKey, outgoing = true, commonPrologue = @[1'u8, 2'u8, 3'u8])
|
||||
conn = await transport2.dial(transport1.ma)
|
||||
var sconn: Connection = nil
|
||||
expect(NoiseDecryptTagError):
|
||||
@ -151,10 +161,10 @@ suite "Noise":
|
||||
let
|
||||
server = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
serverInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get(), [server])
|
||||
serverNoise = newNoise(rng, serverInfo.privateKey, outgoing = false)
|
||||
serverNoise = Noise.new(rng, serverInfo.privateKey, outgoing = false)
|
||||
readTask = newFuture[void]()
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
asyncCheck transport1.start(server)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
@ -170,9 +180,9 @@ suite "Noise":
|
||||
|
||||
let
|
||||
acceptFut = acceptHandler()
|
||||
transport2: TcpTransport = TcpTransport.init()
|
||||
transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
clientInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get(), [transport1.ma])
|
||||
clientNoise = newNoise(rng, clientInfo.privateKey, outgoing = true)
|
||||
clientNoise = Noise.new(rng, clientInfo.privateKey, outgoing = true)
|
||||
conn = await transport2.dial(transport1.ma)
|
||||
sconn = await clientNoise.secure(conn, true)
|
||||
|
||||
@ -187,7 +197,7 @@ suite "Noise":
|
||||
let
|
||||
server = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
serverInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get(), [server])
|
||||
serverNoise = newNoise(rng, serverInfo.privateKey, outgoing = false)
|
||||
serverNoise = Noise.new(rng, serverInfo.privateKey, outgoing = false)
|
||||
readTask = newFuture[void]()
|
||||
|
||||
var hugePayload = newSeq[byte](0xFFFFF)
|
||||
@ -195,7 +205,7 @@ suite "Noise":
|
||||
trace "Sending huge payload", size = hugePayload.len
|
||||
|
||||
let
|
||||
transport1: TcpTransport = TcpTransport.init()
|
||||
transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
listenFut = transport1.start(server)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
@ -209,9 +219,9 @@ suite "Noise":
|
||||
|
||||
let
|
||||
acceptFut = acceptHandler()
|
||||
transport2: TcpTransport = TcpTransport.init()
|
||||
transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
clientInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get(), [transport1.ma])
|
||||
clientNoise = newNoise(rng, clientInfo.privateKey, outgoing = true)
|
||||
clientNoise = Noise.new(rng, clientInfo.privateKey, outgoing = true)
|
||||
conn = await transport2.dial(transport1.ma)
|
||||
sconn = await clientNoise.secure(conn, true)
|
||||
|
||||
@ -278,89 +288,3 @@ suite "Noise":
|
||||
switch2.stop())
|
||||
|
||||
await allFuturesThrowing(awaiters)
|
||||
|
||||
# test "interop with rust noise":
|
||||
# when true: # disable cos in CI we got no interop server/client
|
||||
# proc testListenerDialer(): Future[bool] {.async.} =
|
||||
# const
|
||||
# proto = "/noise/xx/25519/chachapoly/sha256/0.1.0"
|
||||
|
||||
# let
|
||||
# local = Multiaddress.init("/ip4/0.0.0.0/tcp/23456")
|
||||
# info = PeerInfo.init(PrivateKey.random(ECDSA), [local])
|
||||
# noise = newNoise(info.privateKey)
|
||||
# ms = newMultistream()
|
||||
# transport = TcpTransport.newTransport()
|
||||
|
||||
# proc connHandler(conn: Connection) {.async, gcsafe.} =
|
||||
# try:
|
||||
# await ms.handle(conn)
|
||||
# trace "ms.handle exited"
|
||||
# except:
|
||||
# error getCurrentExceptionMsg()
|
||||
# finally:
|
||||
# await conn.close()
|
||||
|
||||
# ms.addHandler(proto, noise)
|
||||
|
||||
# let
|
||||
# clientConn = await transport.listen(local, connHandler)
|
||||
# await clientConn
|
||||
|
||||
# result = true
|
||||
|
||||
# check:
|
||||
# waitFor(testListenerDialer()) == true
|
||||
|
||||
# test "interop with rust noise":
|
||||
# when true: # disable cos in CI we got no interop server/client
|
||||
# proc testListenerDialer(): Future[bool] {.async.} =
|
||||
# const
|
||||
# proto = "/noise/xx/25519/chachapoly/sha256/0.1.0"
|
||||
|
||||
# let
|
||||
# local = Multiaddress.init("/ip4/0.0.0.0/tcp/0")
|
||||
# remote = Multiaddress.init("/ip4/127.0.0.1/tcp/23456")
|
||||
# info = PeerInfo.init(PrivateKey.random(ECDSA), [local])
|
||||
# noise = newNoise(info.privateKey)
|
||||
# ms = newMultistream()
|
||||
# transport = TcpTransport.newTransport()
|
||||
# conn = await transport.dial(remote)
|
||||
|
||||
# check ms.select(conn, @[proto]).await == proto
|
||||
|
||||
# let
|
||||
# sconn = await noise.secure(conn, true)
|
||||
|
||||
# # use sconn
|
||||
|
||||
# result = true
|
||||
|
||||
# check:
|
||||
# waitFor(testListenerDialer()) == true
|
||||
|
||||
# test "interop with go noise":
|
||||
# when true: # disable cos in CI we got no interop server/client
|
||||
# proc testListenerDialer(): Future[bool] {.async.} =
|
||||
# let
|
||||
# local = Multiaddress.init("/ip4/0.0.0.0/tcp/23456")
|
||||
# info = PeerInfo.init(PrivateKey.random(ECDSA), [local])
|
||||
# noise = newNoise(info.privateKey)
|
||||
# ms = newMultistream()
|
||||
# transport = TcpTransport.newTransport()
|
||||
|
||||
# proc connHandler(conn: Connection) {.async, gcsafe.} =
|
||||
# try:
|
||||
# let seconn = await noise.secure(conn, false)
|
||||
# trace "ms.handle exited"
|
||||
# finally:
|
||||
# await conn.close()
|
||||
|
||||
# let
|
||||
# clientConn = await transport.listen(local, connHandler)
|
||||
# await clientConn
|
||||
|
||||
# result = true
|
||||
|
||||
# check:
|
||||
# waitFor(testListenerDialer()) == true
|
||||
|
3
vendor/nim-libp2p/tests/testpeerstore.nim
vendored
3
vendor/nim-libp2p/tests/testpeerstore.nim
vendored
@ -1,5 +1,6 @@
|
||||
import
|
||||
std/[unittest2, tables, sequtils, sets],
|
||||
unittest2,
|
||||
std/[tables, sequtils, sets],
|
||||
../libp2p/crypto/crypto,
|
||||
../libp2p/multiaddress,
|
||||
../libp2p/peerid,
|
||||
|
147
vendor/nim-libp2p/tests/testping.nim
vendored
Normal file
147
vendor/nim-libp2p/tests/testping.nim
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
import options, bearssl
|
||||
import chronos, strutils
|
||||
import ../libp2p/[protocols/identify,
|
||||
protocols/ping,
|
||||
multiaddress,
|
||||
peerinfo,
|
||||
wire,
|
||||
peerid,
|
||||
stream/connection,
|
||||
multistream,
|
||||
transports/transport,
|
||||
transports/tcptransport,
|
||||
crypto/crypto,
|
||||
upgrademngrs/upgrade]
|
||||
import ./helpers
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
||||
suite "Ping":
|
||||
teardown:
|
||||
checkTrackers()
|
||||
|
||||
suite "handle ping message":
|
||||
var
|
||||
ma {.threadvar.}: MultiAddress
|
||||
serverFut {.threadvar.}: Future[void]
|
||||
acceptFut {.threadvar.}: Future[void]
|
||||
pingProto1 {.threadvar.}: Ping
|
||||
pingProto2 {.threadvar.}: Ping
|
||||
transport1 {.threadvar.}: Transport
|
||||
transport2 {.threadvar.}: Transport
|
||||
msListen {.threadvar.}: MultistreamSelect
|
||||
msDial {.threadvar.}: MultistreamSelect
|
||||
conn {.threadvar.}: Connection
|
||||
pingReceivedCount {.threadvar.}: int
|
||||
|
||||
asyncSetup:
|
||||
ma = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
transport1 = TcpTransport.init(upgrade = Upgrade())
|
||||
transport2 = TcpTransport.init(upgrade = Upgrade())
|
||||
|
||||
proc handlePing(peer: PeerInfo) {.async, gcsafe, closure.} =
|
||||
inc pingReceivedCount
|
||||
pingProto1 = Ping.new()
|
||||
pingProto2 = Ping.new(handlePing)
|
||||
|
||||
msListen = newMultistream()
|
||||
msDial = newMultistream()
|
||||
|
||||
pingReceivedCount = 0
|
||||
|
||||
asyncTeardown:
|
||||
await conn.close()
|
||||
await acceptFut
|
||||
await transport1.stop()
|
||||
await serverFut
|
||||
await transport2.stop()
|
||||
|
||||
asyncTest "simple ping":
|
||||
msListen.addHandler(PingCodec, pingProto1)
|
||||
serverFut = transport1.start(ma)
|
||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
||||
let c = await transport1.accept()
|
||||
await msListen.handle(c)
|
||||
|
||||
acceptFut = acceptHandler()
|
||||
conn = await transport2.dial(transport1.ma)
|
||||
|
||||
discard await msDial.select(conn, PingCodec)
|
||||
let time = await pingProto2.ping(conn)
|
||||
|
||||
check not time.isZero()
|
||||
|
||||
asyncTest "networked cancel ping":
|
||||
proc testPing(): Future[void] {.async.} =
|
||||
let baseMa = Multiaddress.init("/ip4/127.0.0.1/tcp/0").tryGet()
|
||||
|
||||
let transport: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let transportdialer: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
asyncCheck transport.start(baseMa)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
let handler = Ping.new().handler
|
||||
let conn = await transport.accept()
|
||||
await handler(conn, "na")
|
||||
|
||||
let handlerWait = acceptHandler()
|
||||
|
||||
let streamTransport = await transportdialer.dial(transport.ma)
|
||||
|
||||
discard await pingProto2.ping(streamTransport)
|
||||
|
||||
for pollCount in 0..20:
|
||||
#echo "Polling ", pollCount, " times"
|
||||
let p = testPing()
|
||||
for _ in 0..<pollCount:
|
||||
if p.finished: break
|
||||
poll()
|
||||
if p.finished: break #We actually finished the sequence
|
||||
await p.cancelAndWait()
|
||||
check p.cancelled
|
||||
|
||||
asyncTest "ping callback":
|
||||
msDial.addHandler(PingCodec, pingProto2)
|
||||
serverFut = transport1.start(ma)
|
||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
||||
let c = await transport1.accept()
|
||||
discard await msListen.select(c, PingCodec)
|
||||
discard await pingProto1.ping(c)
|
||||
|
||||
acceptFut = acceptHandler()
|
||||
conn = await transport2.dial(transport1.ma)
|
||||
|
||||
await msDial.handle(conn)
|
||||
check pingReceivedCount == 1
|
||||
|
||||
asyncTest "bad ping data ack":
|
||||
type FakePing = ref object of LPProtocol
|
||||
let fakePingProto = FakePing()
|
||||
proc fakeHandle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
var
|
||||
buf: array[32, byte]
|
||||
fakebuf: array[32, byte]
|
||||
await conn.readExactly(addr buf[0], 32)
|
||||
await conn.write(addr fakebuf[0], 32)
|
||||
fakePingProto.codec = PingCodec
|
||||
fakePingProto.handler = fakeHandle
|
||||
|
||||
msListen.addHandler(PingCodec, fakePingProto)
|
||||
serverFut = transport1.start(ma)
|
||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
||||
let c = await transport1.accept()
|
||||
await msListen.handle(c)
|
||||
|
||||
acceptFut = acceptHandler()
|
||||
conn = await transport2.dial(transport1.ma)
|
||||
|
||||
discard await msDial.select(conn, PingCodec)
|
||||
let p = pingProto2.ping(conn)
|
||||
var raised = false
|
||||
try:
|
||||
discard await p
|
||||
check false #should have raised
|
||||
except WrongPingAckError:
|
||||
raised = true
|
||||
check raised
|
105
vendor/nim-libp2p/tests/testswitch.nim
vendored
105
vendor/nim-libp2p/tests/testswitch.nim
vendored
@ -1,6 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import options, sequtils
|
||||
import options, sequtils, sets
|
||||
import chronos
|
||||
import stew/byteutils
|
||||
import nimcrypto/sysrand
|
||||
@ -47,10 +47,10 @@ suite "Switch":
|
||||
testProto.codec = TestCodec
|
||||
testProto.handler = handle
|
||||
|
||||
let switch1 = newStandardSwitch(secureManagers = [SecureProtocol.Noise])
|
||||
let switch1 = newStandardSwitch()
|
||||
switch1.mount(testProto)
|
||||
|
||||
let switch2 = newStandardSwitch(secureManagers = [SecureProtocol.Noise])
|
||||
let switch2 = newStandardSwitch()
|
||||
var awaiters: seq[Future[void]]
|
||||
awaiters.add(await switch1.start())
|
||||
awaiters.add(await switch2.start())
|
||||
@ -246,18 +246,18 @@ suite "Switch":
|
||||
|
||||
var step = 0
|
||||
var kinds: set[ConnEventKind]
|
||||
proc hook(peerId: PeerID, event: ConnEvent) {.async, gcsafe.} =
|
||||
proc hook(peerInfo: PeerInfo, event: ConnEvent) {.async, gcsafe.} =
|
||||
kinds = kinds + {event.kind}
|
||||
case step:
|
||||
of 0:
|
||||
check:
|
||||
event.kind == ConnEventKind.Connected
|
||||
peerId == switch1.peerInfo.peerId
|
||||
peerInfo.peerId == switch1.peerInfo.peerId
|
||||
of 1:
|
||||
check:
|
||||
event.kind == ConnEventKind.Disconnected
|
||||
|
||||
check peerId == switch1.peerInfo.peerId
|
||||
check peerInfo.peerId == switch1.peerInfo.peerId
|
||||
else:
|
||||
check false
|
||||
|
||||
@ -301,18 +301,18 @@ suite "Switch":
|
||||
|
||||
var step = 0
|
||||
var kinds: set[ConnEventKind]
|
||||
proc hook(peerId: PeerID, event: ConnEvent) {.async, gcsafe.} =
|
||||
proc hook(peerInfo: PeerInfo, event: ConnEvent) {.async, gcsafe.} =
|
||||
kinds = kinds + {event.kind}
|
||||
case step:
|
||||
of 0:
|
||||
check:
|
||||
event.kind == ConnEventKind.Connected
|
||||
peerId == switch2.peerInfo.peerId
|
||||
peerInfo.peerId == switch2.peerInfo.peerId
|
||||
of 1:
|
||||
check:
|
||||
event.kind == ConnEventKind.Disconnected
|
||||
|
||||
check peerId == switch2.peerInfo.peerId
|
||||
check peerInfo.peerId == switch2.peerInfo.peerId
|
||||
else:
|
||||
check false
|
||||
|
||||
@ -356,17 +356,17 @@ suite "Switch":
|
||||
|
||||
var step = 0
|
||||
var kinds: set[PeerEventKind]
|
||||
proc handler(peerId: PeerID, event: PeerEvent) {.async, gcsafe.} =
|
||||
proc handler(peerInfo: PeerInfo, event: PeerEvent) {.async, gcsafe.} =
|
||||
kinds = kinds + {event.kind}
|
||||
case step:
|
||||
of 0:
|
||||
check:
|
||||
event.kind == PeerEventKind.Joined
|
||||
peerId == switch2.peerInfo.peerId
|
||||
peerInfo.peerId == switch2.peerInfo.peerId
|
||||
of 1:
|
||||
check:
|
||||
event.kind == PeerEventKind.Left
|
||||
peerId == switch2.peerInfo.peerId
|
||||
peerInfo.peerId == switch2.peerInfo.peerId
|
||||
else:
|
||||
check false
|
||||
|
||||
@ -410,17 +410,17 @@ suite "Switch":
|
||||
|
||||
var step = 0
|
||||
var kinds: set[PeerEventKind]
|
||||
proc handler(peerId: PeerID, event: PeerEvent) {.async, gcsafe.} =
|
||||
proc handler(peerInfo: PeerInfo, event: PeerEvent) {.async, gcsafe.} =
|
||||
kinds = kinds + {event.kind}
|
||||
case step:
|
||||
of 0:
|
||||
check:
|
||||
event.kind == PeerEventKind.Joined
|
||||
peerId == switch1.peerInfo.peerId
|
||||
peerInfo.peerId == switch1.peerInfo.peerId
|
||||
of 1:
|
||||
check:
|
||||
event.kind == PeerEventKind.Left
|
||||
peerId == switch1.peerInfo.peerId
|
||||
peerInfo.peerId == switch1.peerInfo.peerId
|
||||
else:
|
||||
check false
|
||||
|
||||
@ -474,7 +474,7 @@ suite "Switch":
|
||||
|
||||
var step = 0
|
||||
var kinds: set[PeerEventKind]
|
||||
proc handler(peerId: PeerID, event: PeerEvent) {.async, gcsafe.} =
|
||||
proc handler(peerInfo: PeerInfo, event: PeerEvent) {.async, gcsafe.} =
|
||||
kinds = kinds + {event.kind}
|
||||
case step:
|
||||
of 0:
|
||||
@ -535,7 +535,7 @@ suite "Switch":
|
||||
var switches: seq[Switch]
|
||||
var done = newFuture[void]()
|
||||
var onConnect: Future[void]
|
||||
proc hook(peerId: PeerID, event: ConnEvent) {.async, gcsafe.} =
|
||||
proc hook(peerInfo: PeerInfo, event: ConnEvent) {.async, gcsafe.} =
|
||||
case event.kind:
|
||||
of ConnEventKind.Connected:
|
||||
await onConnect
|
||||
@ -577,7 +577,7 @@ suite "Switch":
|
||||
var switches: seq[Switch]
|
||||
var done = newFuture[void]()
|
||||
var onConnect: Future[void]
|
||||
proc hook(peerId: PeerID, event: ConnEvent) {.async, gcsafe.} =
|
||||
proc hook(peerInfo2: PeerInfo, event: ConnEvent) {.async, gcsafe.} =
|
||||
case event.kind:
|
||||
of ConnEventKind.Connected:
|
||||
if conns == 5:
|
||||
@ -596,13 +596,13 @@ suite "Switch":
|
||||
rng = rng))
|
||||
|
||||
switches[0].addConnEventHandler(hook, ConnEventKind.Connected)
|
||||
switches[0].addConnEventHandler(hook, ConnEventKind.Disconnected)
|
||||
awaiters.add(await switches[0].start())
|
||||
|
||||
for i in 1..5:
|
||||
switches.add(newStandardSwitch(
|
||||
privKey = some(peerInfo.privateKey),
|
||||
rng = rng))
|
||||
switches[i].addConnEventHandler(hook, ConnEventKind.Disconnected)
|
||||
onConnect = switches[i].connect(switches[0].peerInfo.peerId, switches[0].peerInfo.addrs)
|
||||
await onConnect
|
||||
|
||||
@ -620,13 +620,14 @@ suite "Switch":
|
||||
asyncTest "e2e canceling dial should not leak":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport = TcpTransport.init()
|
||||
let transport = TcpTransport.init(upgrade = Upgrade())
|
||||
await transport.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
try:
|
||||
let conn = await transport.accept()
|
||||
discard await conn.readLp(100)
|
||||
await conn.close()
|
||||
except CatchableError:
|
||||
discard
|
||||
|
||||
@ -656,7 +657,7 @@ suite "Switch":
|
||||
asyncTest "e2e closing remote conn should not leak":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport = TcpTransport.init()
|
||||
let transport = TcpTransport.init(upgrade = Upgrade())
|
||||
await transport.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
@ -712,13 +713,12 @@ suite "Switch":
|
||||
readers.add(closeReader())
|
||||
|
||||
await allFuturesThrowing(readers)
|
||||
await switch2.stop() #Otherwise this leeks
|
||||
checkTracker(LPChannelTrackerName)
|
||||
checkTracker(SecureConnTrackerName)
|
||||
checkTracker(ChronosStreamTrackerName)
|
||||
|
||||
await allFuturesThrowing(
|
||||
switch1.stop(),
|
||||
switch2.stop())
|
||||
await switch1.stop()
|
||||
|
||||
# this needs to go at end
|
||||
await allFuturesThrowing(awaiters)
|
||||
@ -844,3 +844,60 @@ suite "Switch":
|
||||
await allFuturesThrowing(
|
||||
allFutures(switches.mapIt( it.stop() )))
|
||||
await allFuturesThrowing(awaiters)
|
||||
|
||||
asyncTest "e2e peer store":
|
||||
let done = newFuture[void]()
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
try:
|
||||
let msg = string.fromBytes(await conn.readLp(1024))
|
||||
check "Hello!" == msg
|
||||
await conn.writeLp("Hello!")
|
||||
finally:
|
||||
await conn.close()
|
||||
done.complete()
|
||||
|
||||
let testProto = new TestProto
|
||||
testProto.codec = TestCodec
|
||||
testProto.handler = handle
|
||||
|
||||
let switch1 = newStandardSwitch()
|
||||
switch1.mount(testProto)
|
||||
|
||||
let switch2 = newStandardSwitch()
|
||||
var awaiters: seq[Future[void]]
|
||||
awaiters.add(await switch1.start())
|
||||
awaiters.add(await switch2.start())
|
||||
|
||||
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
|
||||
|
||||
check switch1.isConnected(switch2.peerInfo.peerId)
|
||||
check switch2.isConnected(switch1.peerInfo.peerId)
|
||||
|
||||
await conn.writeLp("Hello!")
|
||||
let msg = string.fromBytes(await conn.readLp(1024))
|
||||
check "Hello!" == msg
|
||||
await conn.close()
|
||||
|
||||
await allFuturesThrowing(
|
||||
done.wait(5.seconds),
|
||||
switch1.stop(),
|
||||
switch2.stop())
|
||||
|
||||
# this needs to go at end
|
||||
await allFuturesThrowing(awaiters)
|
||||
|
||||
check not switch1.isConnected(switch2.peerInfo.peerId)
|
||||
check not switch2.isConnected(switch1.peerInfo.peerId)
|
||||
|
||||
let storedInfo1 = switch1.peerStore.get(switch2.peerInfo.peerId)
|
||||
let storedInfo2 = switch2.peerStore.get(switch1.peerInfo.peerId)
|
||||
|
||||
check:
|
||||
storedInfo1.peerId == switch2.peerInfo.peerId
|
||||
storedInfo2.peerId == switch1.peerInfo.peerId
|
||||
|
||||
storedInfo1.addrs.toSeq() == switch2.peerInfo.addrs
|
||||
storedInfo2.addrs.toSeq() == switch1.peerInfo.addrs
|
||||
|
||||
storedInfo1.protos.toSeq() == switch2.peerInfo.protocols
|
||||
storedInfo2.protos.toSeq() == switch1.peerInfo.protocols
|
||||
|
23
vendor/nim-libp2p/tests/testtransport.nim
vendored
23
vendor/nim-libp2p/tests/testtransport.nim
vendored
@ -5,6 +5,7 @@ import chronos, stew/byteutils
|
||||
import ../libp2p/[stream/connection,
|
||||
transports/transport,
|
||||
transports/tcptransport,
|
||||
upgrademngrs/upgrade,
|
||||
multiaddress,
|
||||
errors,
|
||||
wire]
|
||||
@ -17,7 +18,7 @@ suite "TCP transport":
|
||||
|
||||
asyncTest "test listener: handle write":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
let transport: TcpTransport = TcpTransport.init()
|
||||
let transport: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
asyncCheck transport.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
@ -39,7 +40,7 @@ suite "TCP transport":
|
||||
asyncTest "test listener: handle read":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport: TcpTransport = TcpTransport.init()
|
||||
let transport: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
asyncCheck transport.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
@ -77,7 +78,7 @@ suite "TCP transport":
|
||||
server.start()
|
||||
|
||||
let ma: MultiAddress = MultiAddress.init(server.sock.getLocalAddress()).tryGet()
|
||||
let transport: TcpTransport = TcpTransport.init()
|
||||
let transport: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport.dial(ma)
|
||||
var msg = newSeq[byte](6)
|
||||
await conn.readExactly(addr msg[0], 6)
|
||||
@ -111,7 +112,7 @@ suite "TCP transport":
|
||||
server.start()
|
||||
|
||||
let ma: MultiAddress = MultiAddress.init(server.sock.getLocalAddress()).tryGet()
|
||||
let transport: TcpTransport = TcpTransport.init()
|
||||
let transport: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport.dial(ma)
|
||||
await conn.write("Hello!")
|
||||
|
||||
@ -127,7 +128,7 @@ suite "TCP transport":
|
||||
asyncTest "e2e: handle write":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
await transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
@ -137,7 +138,7 @@ suite "TCP transport":
|
||||
|
||||
let handlerWait = acceptHandler()
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
var msg = newSeq[byte](6)
|
||||
await conn.readExactly(addr msg[0], 6)
|
||||
@ -152,7 +153,7 @@ suite "TCP transport":
|
||||
|
||||
asyncTest "e2e: handle read":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
asyncCheck transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async, gcsafe.} =
|
||||
@ -164,7 +165,7 @@ suite "TCP transport":
|
||||
|
||||
let handlerWait = acceptHandler()
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let conn = await transport2.dial(transport1.ma)
|
||||
await conn.write("Hello!")
|
||||
|
||||
@ -177,10 +178,10 @@ suite "TCP transport":
|
||||
asyncTest "e2e: handle dial cancellation":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
await transport1.start(ma)
|
||||
|
||||
let transport2: TcpTransport = TcpTransport.init()
|
||||
let transport2: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
let cancellation = transport2.dial(transport1.ma)
|
||||
|
||||
await cancellation.cancelAndWait()
|
||||
@ -192,7 +193,7 @@ suite "TCP transport":
|
||||
asyncTest "e2e: handle accept cancellation":
|
||||
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
let transport1: TcpTransport = TcpTransport.init()
|
||||
let transport1: TcpTransport = TcpTransport.init(upgrade = Upgrade())
|
||||
await transport1.start(ma)
|
||||
|
||||
let acceptHandler = transport1.accept()
|
||||
|
@ -255,7 +255,7 @@ when isMainModule:
|
||||
elif conf.fleetV1 == test: connectToNodes(bridge.nodev1, WhisperNodesTest)
|
||||
|
||||
# Mount configured Waku v2 protocols
|
||||
mountKeepalive(bridge.nodev2)
|
||||
mountLibp2pPing(bridge.nodev2)
|
||||
|
||||
if conf.store:
|
||||
mountStore(bridge.nodev2, persistMessages = false) # Bridge does not persist messages
|
||||
|
@ -117,8 +117,8 @@ proc new*(T: type PeerManager, switch: Switch, storage: PeerStorage = nil): Peer
|
||||
peerStore: WakuPeerStore.new(),
|
||||
storage: storage)
|
||||
|
||||
proc peerHook(peerId: PeerID, event: ConnEvent): Future[void] {.gcsafe.} =
|
||||
onConnEvent(pm, peerId, event)
|
||||
proc peerHook(peerInfo: PeerInfo, event: ConnEvent): Future[void] {.gcsafe.} =
|
||||
onConnEvent(pm, peerInfo.peerId, event)
|
||||
|
||||
pm.switch.addConnEventHandler(peerHook, ConnEventKind.Connected)
|
||||
pm.switch.addConnEventHandler(peerHook, ConnEventKind.Disconnected)
|
||||
|
@ -1,6 +1,7 @@
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/[tables, sequtils, sets],
|
||||
libp2p/builders,
|
||||
libp2p/peerstore
|
||||
|
||||
@ -21,11 +22,38 @@ type
|
||||
|
||||
DisconnectBook* = object of PeerBook[int64] # Keeps track of when peers were disconnected in Unix timestamps
|
||||
|
||||
WakuPeerStore* = ref object of PeerStore
|
||||
WakuPeerStore* = ref object
|
||||
addressBook*: AddressBook
|
||||
protoBook*: ProtoBook
|
||||
keyBook*: KeyBook
|
||||
connectionBook*: ConnectionBook
|
||||
disconnectBook*: DisconnectBook
|
||||
|
||||
proc new*(T: type WakuPeerStore): WakuPeerStore =
|
||||
var p: WakuPeerStore
|
||||
new(p)
|
||||
return p
|
||||
return p
|
||||
|
||||
##################
|
||||
# Peer Store API #
|
||||
##################
|
||||
|
||||
proc get*(peerStore: WakuPeerStore,
|
||||
peerId: PeerID): StoredInfo =
|
||||
## Get the stored information of a given peer.
|
||||
|
||||
StoredInfo(
|
||||
peerId: peerId,
|
||||
addrs: peerStore.addressBook.get(peerId),
|
||||
protos: peerStore.protoBook.get(peerId),
|
||||
publicKey: peerStore.keyBook.get(peerId)
|
||||
)
|
||||
|
||||
proc peers*(peerStore: WakuPeerStore): seq[StoredInfo] =
|
||||
## Get all the stored information of every peer.
|
||||
|
||||
let allKeys = concat(toSeq(keys(peerStore.addressBook.book)),
|
||||
toSeq(keys(peerStore.protoBook.book)),
|
||||
toSeq(keys(peerStore.keyBook.book))).toHashSet()
|
||||
|
||||
return allKeys.mapIt(peerStore.get(it))
|
||||
|
@ -9,6 +9,7 @@ import
|
||||
libp2p/multiaddress,
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/protocols/protocol,
|
||||
libp2p/protocols/ping,
|
||||
# NOTE For TopicHandler, solve with exports?
|
||||
libp2p/protocols/pubsub/rpc/messages,
|
||||
libp2p/protocols/pubsub/pubsub,
|
||||
@ -20,7 +21,6 @@ import
|
||||
../protocol/waku_filter/waku_filter,
|
||||
../protocol/waku_lightpush/waku_lightpush,
|
||||
../protocol/waku_rln_relay/waku_rln_relay_types,
|
||||
../protocol/waku_keepalive/waku_keepalive,
|
||||
../utils/peers,
|
||||
./storage/message/message_store,
|
||||
./storage/peer/peer_storage,
|
||||
@ -68,8 +68,8 @@ type
|
||||
wakuSwap*: WakuSwap
|
||||
wakuRlnRelay*: WakuRLNRelay
|
||||
wakuLightPush*: WakuLightPush
|
||||
wakuKeepalive*: WakuKeepalive
|
||||
peerInfo*: PeerInfo
|
||||
libp2pPing*: Ping
|
||||
libp2pTransportLoops*: seq[Future[void]]
|
||||
# TODO Revist messages field indexing as well as if this should be Message or WakuMessage
|
||||
messages*: seq[(Topic, WakuMessage)]
|
||||
@ -530,19 +530,34 @@ proc mountLightPush*(node: WakuNode) =
|
||||
|
||||
node.switch.mount(node.wakuLightPush)
|
||||
|
||||
proc mountKeepalive*(node: WakuNode) =
|
||||
info "mounting keepalive"
|
||||
proc mountLibp2pPing*(node: WakuNode) =
|
||||
info "mounting libp2p ping protocol"
|
||||
|
||||
node.wakuKeepalive = WakuKeepalive.new(node.peerManager, node.rng)
|
||||
node.libp2pPing = Ping.new(rng = node.rng)
|
||||
|
||||
node.switch.mount(node.wakuKeepalive)
|
||||
node.switch.mount(node.libp2pPing)
|
||||
|
||||
proc keepaliveLoop(node: WakuNode, keepalive: chronos.Duration) {.async.} =
|
||||
while node.started:
|
||||
# Keep all managed peers alive when idle
|
||||
# Keep all connected peers alive while running
|
||||
trace "Running keepalive"
|
||||
|
||||
await node.wakuKeepalive.keepAllAlive()
|
||||
# First get a list of connected peer infos
|
||||
let peers = node.peerManager.peers()
|
||||
.filterIt(node.peerManager.connectedness(it.peerId) == Connected)
|
||||
.mapIt(it.toPeerInfo())
|
||||
|
||||
# Attempt to retrieve and ping the active outgoing connection for each peer
|
||||
for peer in peers:
|
||||
let connOpt = await node.peerManager.dialPeer(peer, PingCodec)
|
||||
|
||||
if connOpt.isNone:
|
||||
# @TODO more sophisticated error handling here
|
||||
debug "failed to connect to remote peer", peer=peer
|
||||
waku_node_errors.inc(labelValues = ["keep_alive_failure"])
|
||||
return
|
||||
|
||||
discard await node.libp2pPing.ping(connOpt.get()) # Ping connection
|
||||
|
||||
await sleepAsync(keepalive)
|
||||
|
||||
@ -752,7 +767,7 @@ when isMainModule:
|
||||
relayMessages = conf.relay) # Indicates if node is capable to relay messages
|
||||
|
||||
# Keepalive mounted on all nodes
|
||||
mountKeepalive(node)
|
||||
mountLibp2pPing(node)
|
||||
|
||||
# Resume historical messages, this has to be called after the relay setup
|
||||
if conf.store and conf.persistMessages:
|
||||
|
@ -1,85 +0,0 @@
|
||||
import
|
||||
std/[tables, sequtils, options],
|
||||
bearssl,
|
||||
chronos, chronicles, metrics, stew/results,
|
||||
libp2p/protocols/pubsub/pubsubpeer,
|
||||
libp2p/protocols/pubsub/floodsub,
|
||||
libp2p/protocols/pubsub/gossipsub,
|
||||
libp2p/protocols/protocol,
|
||||
libp2p/protobuf/minprotobuf,
|
||||
libp2p/stream/connection,
|
||||
libp2p/crypto/crypto,
|
||||
../../utils/requests,
|
||||
../../node/peer_manager/peer_manager,
|
||||
../message_notifier,
|
||||
../waku_relay,
|
||||
waku_keepalive_types
|
||||
|
||||
export waku_keepalive_types
|
||||
|
||||
declarePublicGauge waku_keepalive_count, "number of keepalives received"
|
||||
declarePublicGauge waku_keepalive_errors, "number of keepalive protocol errors", ["type"]
|
||||
|
||||
logScope:
|
||||
topics = "wakukeepalive"
|
||||
|
||||
const
|
||||
WakuKeepaliveCodec* = "/vac/waku/keepalive/2.0.0-alpha1"
|
||||
|
||||
# Error types (metric label values)
|
||||
const
|
||||
dialFailure = "dial_failure"
|
||||
|
||||
# Encoding and decoding -------------------------------------------------------
|
||||
proc encode*(msg: KeepaliveMessage): ProtoBuffer =
|
||||
var pb = initProtoBuffer()
|
||||
|
||||
# @TODO: Currently no fields defined for a KeepaliveMessage
|
||||
|
||||
return pb
|
||||
|
||||
proc init*(T: type KeepaliveMessage, buffer: seq[byte]): ProtoResult[T] =
|
||||
var msg = KeepaliveMessage()
|
||||
let pb = initProtoBuffer(buffer)
|
||||
|
||||
# @TODO: Currently no fields defined for a KeepaliveMessage
|
||||
|
||||
ok(msg)
|
||||
|
||||
# Protocol -------------------------------------------------------
|
||||
proc new*(T: type WakuKeepalive, peerManager: PeerManager, rng: ref BrHmacDrbgContext): T =
|
||||
debug "new WakuKeepalive"
|
||||
var wk: WakuKeepalive
|
||||
new wk
|
||||
|
||||
wk.rng = crypto.newRng()
|
||||
wk.peerManager = peerManager
|
||||
|
||||
wk.init()
|
||||
|
||||
return wk
|
||||
|
||||
method init*(wk: WakuKeepalive) =
|
||||
debug "init WakuKeepalive"
|
||||
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
info "WakuKeepalive message received"
|
||||
waku_keepalive_count.inc()
|
||||
|
||||
wk.handler = handle
|
||||
wk.codec = WakuKeepaliveCodec
|
||||
|
||||
proc keepAllAlive*(wk: WakuKeepalive) {.async, gcsafe.} =
|
||||
# Send keepalive message to all managed and connected peers
|
||||
let peers = wk.peerManager.peers().filterIt(wk.peerManager.connectedness(it.peerId) == Connected).mapIt(it.toPeerInfo())
|
||||
|
||||
for peer in peers:
|
||||
let connOpt = await wk.peerManager.dialPeer(peer, WakuKeepaliveCodec)
|
||||
|
||||
if connOpt.isNone():
|
||||
# @TODO more sophisticated error handling here
|
||||
error "failed to connect to remote peer"
|
||||
waku_keepalive_errors.inc(labelValues = [dialFailure])
|
||||
return
|
||||
|
||||
await connOpt.get().writeLP(KeepaliveMessage().encode().buffer) # Send keep-alive on connection
|
@ -1,12 +0,0 @@
|
||||
import
|
||||
bearssl,
|
||||
libp2p/protocols/protocol,
|
||||
../../node/peer_manager/peer_manager
|
||||
|
||||
type
|
||||
KeepaliveMessage* = object
|
||||
# Currently no fields for a keepalive message
|
||||
|
||||
WakuKeepalive* = ref object of LPProtocol
|
||||
rng*: ref BrHmacDrbgContext
|
||||
peerManager*: PeerManager
|
Loading…
x
Reference in New Issue
Block a user