diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a02312e29..9ea293eb0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -154,7 +154,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v2 with: - go-version: '^1.15.5' + go-version: '~1.15.5' - name: Install p2pd run: | diff --git a/.github/workflows/multi_nim.yml b/.github/workflows/multi_nim.yml index 5d55afb9b..391f3971b 100644 --- a/.github/workflows/multi_nim.yml +++ b/.github/workflows/multi_nim.yml @@ -152,7 +152,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v2 with: - go-version: '^1.15.5' + go-version: '~1.15.5' - name: Install p2pd run: | diff --git a/.pinned b/.pinned index de0146561..46f7cb1a4 100644 --- a/.pinned +++ b/.pinned @@ -1,17 +1,17 @@ -asynctest;https://github.com/markspanbroek/asynctest@#3882ed64ed3159578f796bc5ae0c6b13837fe798 -bearssl;https://github.com/status-im/nim-bearssl@#ba80e2a0d7ae8aab666cee013e38ff8d33a3e5e7 +asynctest;https://github.com/markspanbroek/asynctest@#5347c59b4b057443a014722aa40800cd8bb95c69 +bearssl;https://github.com/status-im/nim-bearssl@#0ebb1d7a4af5f4b4d4756a9b6dbfe5d411fa55d9 chronicles;https://github.com/status-im/nim-chronicles@#2a2681b60289aaf7895b7056f22616081eb1a882 -chronos;https://github.com/status-im/nim-chronos@#87197230779002a2bfa8642f0e2ae07e2349e304 +chronos;https://github.com/status-im/nim-chronos@#875d7d8e6ef0803ae1c331dbf76b1981b0caeb15 dnsclient;https://github.com/ba0f3/dnsclient.nim@#fbb76f8af8a33ab818184a7d4406d9fee20993be -faststreams;https://github.com/status-im/nim-faststreams@#37a183153c071539ab870f427c09a1376ba311b9 -httputils;https://github.com/status-im/nim-http-utils@#40048e8b3e69284bdb5d4daa0a16ad93402c55db -json_serialization;https://github.com/status-im/nim-json-serialization@#4b8f487d2dfdd941df7408ceaa70b174cce02180 -metrics;https://github.com/status-im/nim-metrics@#71e0f0e354e1f4c59e3dc92153989c8b723c3440 +faststreams;https://github.com/status-im/nim-faststreams@#49e2c52eb5dda46b1c9c10d079abe7bffe6cea89 +httputils;https://github.com/status-im/nim-http-utils@#f83fbce4d6ec7927b75be3f85e4fa905fcb69788 +json_serialization;https://github.com/status-im/nim-json-serialization@#3509706517f3562cbcbe9d94988eccdd80474ab8 +metrics;https://github.com/status-im/nim-metrics@#11edec862f96e42374bc2d584c84cc88d5d1f95f nimcrypto;https://github.com/cheatfate/nimcrypto@#a5742a9a214ac33f91615f3862c7b099aec43b00 secp256k1;https://github.com/status-im/nim-secp256k1@#e092373a5cbe1fa25abfc62e0f2a5f138dc3fb13 -serialization;https://github.com/status-im/nim-serialization@#37bc0db558d85711967acb16e9bb822b06911d46 -stew;https://github.com/status-im/nim-stew@#bb705bf17b46d2c8f9bfb106d9cc7437009a2501 +serialization;https://github.com/status-im/nim-serialization@#9631fbd1c81c8b25ff8740df440ca7ba87fa6131 +stew;https://github.com/status-im/nim-stew@#cdb1f213d073fd2ecbdaf35a866417657da9294c testutils;https://github.com/status-im/nim-testutils@#aa6e5216f4b4ab5aa971cdcdd70e1ec1203cedf2 unittest2;https://github.com/status-im/nim-unittest2@#4e2893eacb916c7678fdc4935ff7420f13bf3a9c -websock;https://github.com/status-im/nim-websock@#73edde4417f7b45003113b7a34212c3ccd95b9fd +websock;https://github.com/status-im/nim-websock@#8927db93f6ca96abaacfea39f8ca50ce9d41bcdb zlib;https://github.com/status-im/nim-zlib@#74cdeb54b21bededb5a515d36f608bc1850555a2 \ No newline at end of file diff --git a/config.nims b/config.nims index 80f134395..d26e195ff 100644 --- a/config.nims +++ b/config.nims @@ -1,3 +1,6 @@ # to allow locking if dirExists("nimbledeps/pkgs"): switch("NimblePath", "nimbledeps/pkgs") + +when (NimMajor, NimMinor) > (1, 2): + switch("hint", "XCannotRaiseY:off") \ No newline at end of file diff --git a/libp2p.nimble b/libp2p.nimble index a5483acab..513d83491 100644 --- a/libp2p.nimble +++ b/libp2p.nimble @@ -27,7 +27,7 @@ const nimflags = proc runTest(filename: string, verify: bool = true, sign: bool = true, moreoptions: string = "") = - var excstr = "nim c --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics " + var excstr = "nim c --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics -d:libp2p_mplex_metrics " excstr.add(" " & getEnv("NIMFLAGS") & " ") excstr.add(" " & nimflags & " ") excstr.add(" -d:libp2p_pubsub_sign=" & $sign) diff --git a/libp2p/builders.nim b/libp2p/builders.nim index 8011af6e6..d77031b15 100644 --- a/libp2p/builders.nim +++ b/libp2p/builders.nim @@ -14,7 +14,7 @@ import switch, peerid, peerinfo, stream/connection, multiaddress, crypto/crypto, transports/[transport, tcptransport], muxers/[muxer, mplex/mplex], - protocols/[identify, secure/secure, secure/noise], + protocols/[identify, secure/secure, secure/noise, relay], connmanager, upgrademngrs/muxedupgrade, nameresolving/nameresolver, errors @@ -42,11 +42,15 @@ type rng: ref BrHmacDrbgContext maxConnections: int maxIn: int + sendSignedPeerRecord: bool maxOut: int maxConnsPerPeer: int protoVersion: string agentVersion: string nameResolver: NameResolver + peerStoreCapacity: Option[int] + isCircuitRelay: bool + circuitRelayCanHop: bool proc new*(T: type[SwitchBuilder]): T = @@ -63,7 +67,8 @@ proc new*(T: type[SwitchBuilder]): T = maxOut: -1, maxConnsPerPeer: MaxConnectionsPerPeer, protoVersion: ProtoVersion, - agentVersion: AgentVersion) + agentVersion: AgentVersion, + isCircuitRelay: false) proc withPrivateKey*(b: SwitchBuilder, privateKey: PrivateKey): SwitchBuilder = b.privKey = some(privateKey) @@ -77,13 +82,21 @@ proc withAddresses*(b: SwitchBuilder, addresses: seq[MultiAddress]): SwitchBuild b.addresses = addresses b +proc withSignedPeerRecord*(b: SwitchBuilder, sendIt = true): SwitchBuilder = + b.sendSignedPeerRecord = sendIt + b -proc withMplex*(b: SwitchBuilder, inTimeout = 5.minutes, outTimeout = 5.minutes): SwitchBuilder = +proc withMplex*( + b: SwitchBuilder, + inTimeout = 5.minutes, + outTimeout = 5.minutes, + maxChannCount = 200): SwitchBuilder = proc newMuxer(conn: Connection): Muxer = Mplex.new( conn, - inTimeout = inTimeout, - outTimeout = outTimeout) + inTimeout, + outTimeout, + maxChannCount) b.mplexOpts = MplexOpts( enable: true, @@ -123,6 +136,10 @@ proc withMaxConnsPerPeer*(b: SwitchBuilder, maxConnsPerPeer: int): SwitchBuilder b.maxConnsPerPeer = maxConnsPerPeer b +proc withPeerStore*(b: SwitchBuilder, capacity: int): SwitchBuilder = + b.peerStoreCapacity = some(capacity) + b + proc withProtoVersion*(b: SwitchBuilder, protoVersion: string): SwitchBuilder = b.protoVersion = protoVersion b @@ -135,6 +152,11 @@ proc withNameResolver*(b: SwitchBuilder, nameResolver: NameResolver): SwitchBuil b.nameResolver = nameResolver b +proc withRelayTransport*(b: SwitchBuilder, canHop: bool): SwitchBuilder = + b.isCircuitRelay = true + b.circuitRelayCanHop = canHop + b + proc build*(b: SwitchBuilder): Switch {.raises: [Defect, LPError].} = @@ -165,7 +187,7 @@ proc build*(b: SwitchBuilder): Switch muxers let - identify = Identify.new(peerInfo) + identify = Identify.new(peerInfo, b.sendSignedPeerRecord) connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut) ms = MultistreamSelect.new() muxedUpgrade = MuxedUpgrade.new(identify, muxers, secureManagerInstances, connManager, ms) @@ -183,6 +205,12 @@ proc build*(b: SwitchBuilder): Switch if isNil(b.rng): b.rng = newRng() + let peerStore = + if isSome(b.peerStoreCapacity): + PeerStore.new(b.peerStoreCapacity.get()) + else: + PeerStore.new() + let switch = newSwitch( peerInfo = peerInfo, transports = transports, @@ -191,7 +219,13 @@ proc build*(b: SwitchBuilder): Switch secureManagers = secureManagerInstances, connManager = connManager, ms = ms, - nameResolver = b.nameResolver) + nameResolver = b.nameResolver, + peerStore = peerStore) + + if b.isCircuitRelay: + let relay = Relay.new(switch, b.circuitRelayCanHop) + switch.mount(relay) + switch.addTransport(RelayTransport.new(relay, muxedUpgrade)) return switch @@ -209,7 +243,9 @@ proc newStandardSwitch*( maxIn = -1, maxOut = -1, maxConnsPerPeer = MaxConnectionsPerPeer, - nameResolver: NameResolver = nil): Switch + nameResolver: NameResolver = nil, + sendSignedPeerRecord = false, + peerStoreCapacity = 1000): Switch {.raises: [Defect, LPError].} = if SecureProtocol.Secio in secureManagers: quit("Secio is deprecated!") # use of secio is unsafe @@ -219,10 +255,12 @@ proc newStandardSwitch*( .new() .withAddresses(addrs) .withRng(rng) + .withSignedPeerRecord(sendSignedPeerRecord) .withMaxConnections(maxConnections) .withMaxIn(maxIn) .withMaxOut(maxOut) .withMaxConnsPerPeer(maxConnsPerPeer) + .withPeerStore(capacity=peerStoreCapacity) .withMplex(inTimeout, outTimeout) .withTcpTransport(transportFlags) .withNameResolver(nameResolver) diff --git a/libp2p/connmanager.nim b/libp2p/connmanager.nim index f56c7b768..76de2da07 100644 --- a/libp2p/connmanager.nim +++ b/libp2p/connmanager.nim @@ -307,6 +307,9 @@ proc peerCleanup(c: ConnManager, conn: Connection) {.async.} = await c.triggerConnEvent( peerId, ConnEvent(kind: ConnEventKind.Disconnected)) await c.triggerPeerEvents(peerId, PeerEvent(kind: PeerEventKind.Left)) + + if not(c.peerStore.isNil): + c.peerStore.cleanup(peerId) except CatchableError as exc: # This is top-level procedure which will work as separate task, so it # do not need to propagate CancelledError and should handle other errors diff --git a/libp2p/crypto/chacha20poly1305.nim b/libp2p/crypto/chacha20poly1305.nim index afc4ce568..094423e52 100644 --- a/libp2p/crypto/chacha20poly1305.nim +++ b/libp2p/crypto/chacha20poly1305.nim @@ -1,5 +1,5 @@ ## Nim-Libp2p -## Copyright (c) 2020 Status Research & Development GmbH +## Copyright (c) 2020-2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * MIT license ([LICENSE-MIT](LICENSE-MIT)) @@ -18,6 +18,8 @@ {.push raises: [Defect].} import bearssl +from stew/assign2 import assign +from stew/ranges/ptr_arith import baseAddr # have to do this due to a nim bug and raises[] on callbacks # https://github.com/nim-lang/Nim/issues/13905 @@ -39,15 +41,15 @@ type proc intoChaChaPolyKey*(s: openArray[byte]): ChaChaPolyKey = assert s.len == ChaChaPolyKeySize - copyMem(addr result[0], unsafeAddr s[0], ChaChaPolyKeySize) + assign(result, s) proc intoChaChaPolyNonce*(s: openArray[byte]): ChaChaPolyNonce = assert s.len == ChaChaPolyNonceSize - copyMem(addr result[0], unsafeAddr s[0], ChaChaPolyNonceSize) + assign(result, s) proc intoChaChaPolyTag*(s: openArray[byte]): ChaChaPolyTag = assert s.len == ChaChaPolyTagSize - copyMem(addr result[0], unsafeAddr s[0], ChaChaPolyTagSize) + assign(result, s) # bearssl allows us to use optimized versions # this is reconciled at runtime @@ -68,11 +70,11 @@ proc encrypt*(_: type[ChaChaPoly], ourPoly1305CtmulRun( unsafeAddr key[0], unsafeAddr nonce[0], - addr data[0], + baseAddr(data), data.len, ad, aad.len, - addr tag[0], + baseAddr(tag), chacha20CtRun, #[encrypt]# 1.cint) @@ -91,10 +93,10 @@ proc decrypt*(_: type[ChaChaPoly], ourPoly1305CtmulRun( unsafeAddr key[0], unsafeAddr nonce[0], - addr data[0], + baseAddr(data), data.len, ad, aad.len, - addr tag[0], + baseAddr(tag), chacha20CtRun, #[decrypt]# 0.cint) diff --git a/libp2p/crypto/crypto.nim b/libp2p/crypto/crypto.nim index 6dd1b280a..a9cde403e 100644 --- a/libp2p/crypto/crypto.nim +++ b/libp2p/crypto/crypto.nim @@ -81,8 +81,6 @@ export results # This is workaround for Nim's `import` bug export rijndael, twofish, sha2, hash, hmac, ncrutils -from strutils import split - type DigestSheme* = enum Sha256, diff --git a/libp2p/crypto/curve25519.nim b/libp2p/crypto/curve25519.nim index ef054171c..4f66b1616 100644 --- a/libp2p/crypto/curve25519.nim +++ b/libp2p/crypto/curve25519.nim @@ -1,5 +1,5 @@ ## Nim-Libp2p -## Copyright (c) 2020 Status Research & Development GmbH +## Copyright (c) 2020-2022 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * MIT license ([LICENSE-MIT](LICENSE-MIT)) @@ -19,6 +19,7 @@ import bearssl import stew/results +from stew/assign2 import assign export results const @@ -27,13 +28,13 @@ const type Curve25519* = object Curve25519Key* = array[Curve25519KeySize, byte] - pcuchar = ptr cuchar + pcuchar = ptr char Curve25519Error* = enum Curver25519GenError proc intoCurve25519Key*(s: openArray[byte]): Curve25519Key = assert s.len == Curve25519KeySize - copyMem(addr result[0], unsafeAddr s[0], Curve25519KeySize) + assign(result, s) proc getBytes*(key: Curve25519Key): seq[byte] = @key diff --git a/libp2p/crypto/ecnist.nim b/libp2p/crypto/ecnist.nim index 6d1277956..b212eb360 100644 --- a/libp2p/crypto/ecnist.nim +++ b/libp2p/crypto/ecnist.nim @@ -123,8 +123,8 @@ proc checkPublic(key: openArray[byte], curve: cint): uint32 = var impl = brEcGetDefault() var orderlen = 0 discard impl.order(curve, addr orderlen) - result = impl.mul(cast[ptr cuchar](unsafeAddr ckey[0]), len(ckey), - cast[ptr cuchar](addr x[0]), len(x), curve) + result = impl.mul(cast[ptr char](unsafeAddr ckey[0]), len(ckey), + cast[ptr char](addr x[0]), len(x), curve) proc getOffset(pubkey: EcPublicKey): int {.inline.} = let o = cast[uint](pubkey.key.q) - cast[uint](unsafeAddr pubkey.buffer[0]) @@ -174,7 +174,7 @@ proc copy*[T: EcPKI](dst: var T, src: T): bool = dst.buffer = src.buffer dst.key.curve = src.key.curve dst.key.xlen = length - dst.key.x = cast[ptr cuchar](addr dst.buffer[offset]) + dst.key.x = cast[ptr char](addr dst.buffer[offset]) result = true elif T is EcPublicKey: let length = src.key.qlen @@ -184,7 +184,7 @@ proc copy*[T: EcPKI](dst: var T, src: T): bool = dst.buffer = src.buffer dst.key.curve = src.key.curve dst.key.qlen = length - dst.key.q = cast[ptr cuchar](addr dst.buffer[offset]) + dst.key.q = cast[ptr char](addr dst.buffer[offset]) result = true else: let length = len(src.buffer) @@ -252,8 +252,8 @@ proc getPublicKey*(seckey: EcPrivateKey): EcResult[EcPublicKey] = var ecimp = brEcGetDefault() if seckey.key.curve in EcSupportedCurvesCint: - var length = getPublicKeyLength(cast[EcCurveKind](seckey.key.curve)) var res = new EcPublicKey + assert res.buffer.len > getPublicKeyLength(cast[EcCurveKind](seckey.key.curve)) if brEcComputePublicKey(ecimp, addr res.key, addr res.buffer[0], unsafeAddr seckey.key) == 0: err(EcKeyIncorrectError) @@ -638,7 +638,7 @@ proc init*(key: var EcPrivateKey, data: openArray[byte]): Result[void, Asn1Error if checkScalar(raw.toOpenArray(), curve) == 1'u32: key = new EcPrivateKey copyMem(addr key.buffer[0], addr raw.buffer[raw.offset], raw.length) - key.key.x = cast[ptr cuchar](addr key.buffer[0]) + key.key.x = cast[ptr char](addr key.buffer[0]) key.key.xlen = raw.length key.key.curve = curve ok() @@ -697,7 +697,7 @@ proc init*(pubkey: var EcPublicKey, data: openArray[byte]): Result[void, Asn1Err if checkPublic(raw.toOpenArray(), curve) != 0: pubkey = new EcPublicKey copyMem(addr pubkey.buffer[0], addr raw.buffer[raw.offset], raw.length) - pubkey.key.q = cast[ptr cuchar](addr pubkey.buffer[0]) + pubkey.key.q = cast[ptr char](addr pubkey.buffer[0]) pubkey.key.qlen = raw.length pubkey.key.curve = curve ok() @@ -785,7 +785,7 @@ proc initRaw*(key: var EcPrivateKey, data: openArray[byte]): bool = let length = len(data) key = new EcPrivateKey copyMem(addr key.buffer[0], unsafeAddr data[0], length) - key.key.x = cast[ptr cuchar](addr key.buffer[0]) + key.key.x = cast[ptr char](addr key.buffer[0]) key.key.xlen = length key.key.curve = curve result = true @@ -816,7 +816,7 @@ proc initRaw*(pubkey: var EcPublicKey, data: openArray[byte]): bool = let length = len(data) pubkey = new EcPublicKey copyMem(addr pubkey.buffer[0], unsafeAddr data[0], length) - pubkey.key.q = cast[ptr cuchar](addr pubkey.buffer[0]) + pubkey.key.q = cast[ptr char](addr pubkey.buffer[0]) pubkey.key.qlen = length pubkey.key.curve = curve result = true @@ -891,9 +891,9 @@ proc scalarMul*(pub: EcPublicKey, sec: EcPrivateKey): EcPublicKey = let poffset = key.getOffset() let soffset = sec.getOffset() if poffset >= 0 and soffset >= 0: - let res = impl.mul(cast[ptr cuchar](addr key.buffer[poffset]), + let res = impl.mul(cast[ptr char](addr key.buffer[poffset]), key.key.qlen, - cast[ptr cuchar](unsafeAddr sec.buffer[soffset]), + cast[ptr char](unsafeAddr sec.buffer[soffset]), sec.key.xlen, key.key.curve) if res != 0: diff --git a/libp2p/crypto/rsa.nim b/libp2p/crypto/rsa.nim index dc8662680..133936cd6 100644 --- a/libp2p/crypto/rsa.nim +++ b/libp2p/crypto/rsa.nim @@ -62,7 +62,7 @@ type buffer*: seq[byte] seck*: BrRsaPrivateKey pubk*: BrRsaPublicKey - pexp*: ptr cuchar + pexp*: ptr char pexplen*: int RsaPublicKey* = ref object @@ -109,9 +109,9 @@ template getArray*(bs, os, ls: untyped): untyped = template trimZeroes(b: seq[byte], pt, ptlen: untyped) = var length = ptlen for i in 0.. 0 and len(rawdq) > 0 and len(rawiq) > 0: key = new RsaPrivateKey key.buffer = @data - key.pubk.n = cast[ptr cuchar](addr key.buffer[rawn.offset]) - key.pubk.e = cast[ptr cuchar](addr key.buffer[rawpube.offset]) - key.seck.p = cast[ptr cuchar](addr key.buffer[rawp.offset]) - key.seck.q = cast[ptr cuchar](addr key.buffer[rawq.offset]) - key.seck.dp = cast[ptr cuchar](addr key.buffer[rawdp.offset]) - key.seck.dq = cast[ptr cuchar](addr key.buffer[rawdq.offset]) - key.seck.iq = cast[ptr cuchar](addr key.buffer[rawiq.offset]) - key.pexp = cast[ptr cuchar](addr key.buffer[rawprie.offset]) + key.pubk.n = cast[ptr char](addr key.buffer[rawn.offset]) + key.pubk.e = cast[ptr char](addr key.buffer[rawpube.offset]) + key.seck.p = cast[ptr char](addr key.buffer[rawp.offset]) + key.seck.q = cast[ptr char](addr key.buffer[rawq.offset]) + key.seck.dp = cast[ptr char](addr key.buffer[rawdp.offset]) + key.seck.dq = cast[ptr char](addr key.buffer[rawdq.offset]) + key.seck.iq = cast[ptr char](addr key.buffer[rawiq.offset]) + key.pexp = cast[ptr char](addr key.buffer[rawprie.offset]) key.pubk.nlen = len(rawn) key.pubk.elen = len(rawpube) key.seck.plen = len(rawp) @@ -554,8 +554,8 @@ proc init*(key: var RsaPublicKey, data: openArray[byte]): Result[void, Asn1Error if len(rawn) >= (MinKeySize shr 3) and len(rawe) > 0: key = new RsaPublicKey key.buffer = @data - key.key.n = cast[ptr cuchar](addr key.buffer[rawn.offset]) - key.key.e = cast[ptr cuchar](addr key.buffer[rawe.offset]) + key.key.n = cast[ptr char](addr key.buffer[rawn.offset]) + key.key.e = cast[ptr char](addr key.buffer[rawe.offset]) key.key.nlen = len(rawn) key.key.elen = len(rawe) ok() @@ -762,9 +762,9 @@ proc sign*[T: byte|char](key: RsaPrivateKey, kv.update(addr hc.vtable, nil, 0) kv.output(addr hc.vtable, addr hash[0]) var oid = RsaOidSha256 - let implRes = impl(cast[ptr cuchar](addr oid[0]), - cast[ptr cuchar](addr hash[0]), len(hash), - addr key.seck, cast[ptr cuchar](addr res.buffer[0])) + let implRes = impl(cast[ptr char](addr oid[0]), + cast[ptr char](addr hash[0]), len(hash), + addr key.seck, cast[ptr char](addr res.buffer[0])) if implRes == 0: err(RsaSignatureError) else: @@ -791,8 +791,8 @@ proc verify*[T: byte|char](sig: RsaSignature, message: openArray[T], kv.update(addr hc.vtable, nil, 0) kv.output(addr hc.vtable, addr hash[0]) var oid = RsaOidSha256 - let res = impl(cast[ptr cuchar](addr sig.buffer[0]), len(sig.buffer), - cast[ptr cuchar](addr oid[0]), - len(check), addr pubkey.key, cast[ptr cuchar](addr check[0])) + let res = impl(cast[ptr char](addr sig.buffer[0]), len(sig.buffer), + cast[ptr char](addr oid[0]), + len(check), addr pubkey.key, cast[ptr char](addr check[0])) if res == 1: result = equalMem(addr check[0], addr hash[0], len(hash)) diff --git a/libp2p/dial.nim b/libp2p/dial.nim index ea51270a0..d850a9da2 100644 --- a/libp2p/dial.nim +++ b/libp2p/dial.nim @@ -11,7 +11,8 @@ import chronos import peerid, - stream/connection + stream/connection, + transports/transport type Dial* = ref object of RootObj @@ -49,3 +50,8 @@ method dial*( ## doAssert(false, "Not implemented!") + +method addTransport*( + self: Dial, + transport: Transport) {.base.} = + doAssert(false, "Not implemented!") diff --git a/libp2p/dialer.nim b/libp2p/dialer.nim index 65cc1d628..eb89164da 100644 --- a/libp2p/dialer.nim +++ b/libp2p/dialer.nim @@ -241,6 +241,9 @@ method dial*( await cleanup() raise exc +method addTransport*(self: Dialer, t: Transport) = + self.transports &= t + proc new*( T: type Dialer, localPeerId: PeerId, diff --git a/libp2p/multiaddress.nim b/libp2p/multiaddress.nim index cf9d6598e..7d7feedb3 100644 --- a/libp2p/multiaddress.nim +++ b/libp2p/multiaddress.nim @@ -428,7 +428,9 @@ const Reliable* = mapOr(TCP, UTP, QUIC, WebSockets) - IPFS* = mapAnd(Reliable, mapEq("p2p")) + P2PPattern* = mapEq("p2p") + + IPFS* = mapAnd(Reliable, P2PPattern) HTTP* = mapOr( mapAnd(TCP, mapEq("http")), @@ -447,6 +449,8 @@ const mapAnd(HTTPS, mapEq("p2p-webrtc-direct")) ) + CircuitRelay* = mapEq("p2p-circuit") + proc initMultiAddressCodeTable(): Table[MultiCodec, MAProtocol] {.compileTime.} = for item in ProtocolsList: diff --git a/libp2p/multistream.nim b/libp2p/multistream.nim index 95e40c023..3070123d7 100644 --- a/libp2p/multistream.nim +++ b/libp2p/multistream.nim @@ -12,7 +12,6 @@ import std/[strutils] import chronos, chronicles, stew/byteutils import stream/connection, - vbuffer, protocols/protocol logScope: diff --git a/libp2p/muxers/mplex/lpchannel.nim b/libp2p/muxers/mplex/lpchannel.nim index 224c6b408..46f558049 100644 --- a/libp2p/muxers/mplex/lpchannel.nim +++ b/libp2p/muxers/mplex/lpchannel.nim @@ -21,6 +21,12 @@ export connection logScope: topics = "libp2p mplexchannel" +when defined(libp2p_mplex_metrics): + declareHistogram libp2p_mplex_qlen, "message queue length", + buckets = [0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0] + declareCounter libp2p_mplex_qlenclose, "closed because of max queuelen" + declareHistogram libp2p_mplex_qtime, "message queuing time" + when defined(libp2p_network_protocols_metrics): declareCounter libp2p_protocols_bytes, "total sent or received bytes", ["protocol", "direction"] @@ -187,6 +193,8 @@ proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} = if s.writes >= MaxWrites: debug "Closing connection, too many in-flight writes on channel", s, conn = s.conn, writes = s.writes + when defined(libp2p_mplex_metrics): + libp2p_mplex_qlenclose.inc() await s.reset() await s.conn.close() return @@ -201,8 +209,14 @@ proc completeWrite( try: s.writes += 1 - await fut - when defined(libp2p_network_protocols_metrics): + when defined(libp2p_mplex_metrics): + libp2p_mplex_qlen.observe(s.writes.int64 - 1) + libp2p_mplex_qtime.time: + await fut + else: + await fut + + when defined(libp2p_network_protocol_metrics): if s.tag.len > 0: libp2p_protocols_bytes.inc(msgLen.int64, labelValues=[s.tag, "out"]) diff --git a/libp2p/nameresolving/nameresolver.nim b/libp2p/nameresolving/nameresolver.nim index 6f1e97588..b8da3352a 100644 --- a/libp2p/nameresolving/nameresolver.nim +++ b/libp2p/nameresolving/nameresolver.nim @@ -62,7 +62,6 @@ proc resolveDnsAddress( port = Port(fromBytesBE(uint16, pbuf)) resolvedAddresses = await self.resolveIp(prefix & dnsval, port, domain) - var addressSuffix = ma return collect(newSeqOfCap(4)): for address in resolvedAddresses: var createdAddress = MultiAddress.init(address).tryGet()[0].tryGet() diff --git a/libp2p/peerid.nim b/libp2p/peerid.nim index b7b2c0dcc..d7e578929 100644 --- a/libp2p/peerid.nim +++ b/libp2p/peerid.nim @@ -38,7 +38,11 @@ func shortLog*(pid: PeerId): string = var spid = $pid if len(spid) > 10: spid[3] = '*' - spid.delete(4, spid.high - 6) + + when (NimMajor, NimMinor) > (1, 4): + spid.delete(4 .. spid.high - 6) + else: + spid.delete(4, spid.high - 6) spid diff --git a/libp2p/peerinfo.nim b/libp2p/peerinfo.nim index 8d602b338..ce953cc3b 100644 --- a/libp2p/peerinfo.nim +++ b/libp2p/peerinfo.nim @@ -9,11 +9,11 @@ {.push raises: [Defect].} -import std/[options, sequtils, hashes] +import std/[options, sequtils] import pkg/[chronos, chronicles, stew/results] -import peerid, multiaddress, crypto/crypto, errors +import peerid, multiaddress, crypto/crypto, routing_record, errors -export peerid, multiaddress, crypto, errors, results +export peerid, multiaddress, crypto, routing_record, errors, results ## Our local peer info @@ -28,6 +28,7 @@ type agentVersion*: string privateKey*: PrivateKey publicKey*: PublicKey + signedPeerRecord*: SignedPeerRecord func shortLog*(p: PeerInfo): auto = ( @@ -39,6 +40,17 @@ func shortLog*(p: PeerInfo): auto = ) chronicles.formatIt(PeerInfo): shortLog(it) +proc update*(p: PeerInfo) = + let sprRes = SignedPeerRecord.init( + p.privateKey, + PeerRecord.init(p.peerId, p.addrs) + ) + if sprRes.isOk: + p.signedPeerRecord = sprRes.get() + else: + discard + #info "Can't update the signed peer record" + proc new*( p: typedesc[PeerInfo], key: PrivateKey, @@ -52,14 +64,19 @@ proc new*( key.getPublicKey().tryGet() except CatchableError: raise newException(PeerInfoError, "invalid private key") + + let peerId = PeerID.init(key).tryGet() let peerInfo = PeerInfo( - peerId: PeerId.init(key).tryGet(), + peerId: peerId, publicKey: pubkey, privateKey: key, protoVersion: protoVersion, agentVersion: agentVersion, addrs: @addrs, - protocols: @protocols) + protocols: @protocols, + ) + + peerInfo.update() return peerInfo diff --git a/libp2p/peerstore.nim b/libp2p/peerstore.nim index 27e0ea9ca..b86145c95 100644 --- a/libp2p/peerstore.nim +++ b/libp2p/peerstore.nim @@ -10,10 +10,11 @@ {.push raises: [Defect].} import - std/[tables, sets, sequtils, options], + std/[tables, sets, options, macros], ./crypto/crypto, ./protocols/identify, ./peerid, ./peerinfo, + ./routing_record, ./multiaddress type @@ -21,56 +22,52 @@ type # Handler types # ################# - PeerBookChangeHandler*[T] = proc(peerId: PeerId, entry: T) - - AddrChangeHandler* = PeerBookChangeHandler[HashSet[MultiAddress]] - ProtoChangeHandler* = PeerBookChangeHandler[HashSet[string]] - KeyChangeHandler* = PeerBookChangeHandler[PublicKey] + PeerBookChangeHandler* = proc(peerId: PeerId) {.gcsafe, raises: [Defect].} ######### # Books # ######### # Each book contains a book (map) and event handler(s) - PeerBook*[T] = object of RootObj - book*: Table[PeerId, T] - changeHandlers: seq[PeerBookChangeHandler[T]] + BasePeerBook = ref object of RootObj + changeHandlers: seq[PeerBookChangeHandler] + deletor: PeerBookChangeHandler - SetPeerBook*[T] = object of PeerBook[HashSet[T]] + PeerBook*[T] = ref object of BasePeerBook + book*: Table[PeerId, T] + + SeqPeerBook*[T] = ref object of PeerBook[seq[T]] - AddressBook* = object of SetPeerBook[MultiAddress] - ProtoBook* = object of SetPeerBook[string] - KeyBook* = object of PeerBook[PublicKey] + AddressBook* = ref object of SeqPeerBook[MultiAddress] + ProtoBook* = ref object of SeqPeerBook[string] + KeyBook* = ref object of PeerBook[PublicKey] + + AgentBook* = ref object of PeerBook[string] + ProtoVersionBook* = ref object of PeerBook[string] + SPRBook* = ref object of PeerBook[Envelope] #################### # Peer store types # #################### PeerStore* = ref object - addressBook*: AddressBook - protoBook*: ProtoBook - keyBook*: KeyBook - - agentBook*: PeerBook[string] - protoVersionBook*: PeerBook[string] + books: Table[string, BasePeerBook] + capacity*: int + toClean*: seq[PeerId] -## Constructs a new PeerStore with metadata of type M -proc new*(T: type PeerStore): PeerStore = - var p: PeerStore - new(p) - return p +proc new*(T: type PeerStore, capacity = 1000): PeerStore = + T(capacity: capacity) ######################### # Generic Peer Book API # ######################### -proc get*[T](peerBook: PeerBook[T], +proc `[]`*[T](peerBook: PeerBook[T], peerId: PeerId): T = ## Get all the known metadata of a provided peer. - peerBook.book.getOrDefault(peerId) -proc set*[T](peerBook: var PeerBook[T], +proc `[]=`*[T](peerBook: PeerBook[T], peerId: PeerId, entry: T) = ## Set metadata for a given peerId. This will replace any @@ -80,83 +77,90 @@ proc set*[T](peerBook: var PeerBook[T], # Notify clients for handler in peerBook.changeHandlers: - handler(peerId, peerBook.get(peerId)) + handler(peerId) -proc delete*[T](peerBook: var PeerBook[T], +proc del*[T](peerBook: PeerBook[T], peerId: PeerId): bool = ## Delete the provided peer from the book. - if not peerBook.book.hasKey(peerId): + if peerId notin peerBook.book: return false else: peerBook.book.del(peerId) + # Notify clients + for handler in peerBook.changeHandlers: + handler(peerId) return true proc contains*[T](peerBook: PeerBook[T], peerId: PeerId): bool = peerId in peerBook.book -################ -# Set Book API # -################ +proc addHandler*[T](peerBook: PeerBook[T], handler: PeerBookChangeHandler) = + peerBook.changeHandlers.add(handler) -proc add*[T]( - peerBook: var SetPeerBook[T], - peerId: PeerId, - entry: T) = - ## Add entry to a given peer. If the peer is not known, - ## it will be set with the provided entry. - - peerBook.book.mgetOrPut(peerId, - initHashSet[T]()).incl(entry) - - # Notify clients - for handler in peerBook.changeHandlers: - handler(peerId, peerBook.get(peerId)) - -# Helper for seq -proc set*[T]( - peerBook: var SetPeerBook[T], - peerId: PeerId, - entry: seq[T]) = - ## Add entry to a given peer. If the peer is not known, - ## it will be set with the provided entry. - peerBook.set(peerId, entry.toHashSet()) - +proc len*[T](peerBook: PeerBook[T]): int = peerBook.book.len ################## # Peer Store API # ################## +macro getTypeName(t: type): untyped = + # Generate unique name in form of Module.Type + let typ = getTypeImpl(t)[1] + newLit(repr(typ.owner()) & "." & repr(typ)) -proc addHandlers*(peerStore: PeerStore, - addrChangeHandler: AddrChangeHandler, - protoChangeHandler: ProtoChangeHandler, - keyChangeHandler: KeyChangeHandler) = - ## Register event handlers to notify clients of changes in the peer store - - peerStore.addressBook.changeHandlers.add(addrChangeHandler) - peerStore.protoBook.changeHandlers.add(protoChangeHandler) - peerStore.keyBook.changeHandlers.add(keyChangeHandler) +proc `[]`*[T](p: PeerStore, typ: type[T]): T = + let name = getTypeName(T) + result = T(p.books.getOrDefault(name)) + if result.isNil: + result = T.new() + result.deletor = proc(pid: PeerId) = + # Manual method because generic method + # don't work + discard T(p.books.getOrDefault(name)).del(pid) + p.books[name] = result + return result -proc delete*(peerStore: PeerStore, - peerId: PeerId): bool = +proc del*(peerStore: PeerStore, + peerId: PeerId) = ## Delete the provided peer from every book. - - peerStore.addressBook.delete(peerId) and - peerStore.protoBook.delete(peerId) and - peerStore.keyBook.delete(peerId) + for _, book in peerStore.books: + book.deletor(peerId) proc updatePeerInfo*( peerStore: PeerStore, info: IdentifyInfo) = if info.addrs.len > 0: - peerStore.addressBook.set(info.peerId, info.addrs) + peerStore[AddressBook][info.peerId] = info.addrs if info.agentVersion.isSome: - peerStore.agentBook.set(info.peerId, info.agentVersion.get().string) + peerStore[AgentBook][info.peerId] = info.agentVersion.get().string if info.protoVersion.isSome: - peerStore.protoVersionBook.set(info.peerId, info.protoVersion.get().string) + peerStore[ProtoVersionBook][info.peerId] = info.protoVersion.get().string if info.protos.len > 0: - peerStore.protoBook.set(info.peerId, info.protos) + peerStore[ProtoBook][info.peerId] = info.protos + + if info.signedPeerRecord.isSome: + peerStore[SPRBook][info.peerId] = info.signedPeerRecord.get() + + let cleanupPos = peerStore.toClean.find(info.peerId) + if cleanupPos >= 0: + peerStore.toClean.delete(cleanupPos) + +proc cleanup*( + peerStore: PeerStore, + peerId: PeerId) = + + if peerStore.capacity == 0: + peerStore.del(peerId) + return + elif peerStore.capacity < 0: + #infinite capacity + return + + peerStore.toClean.add(peerId) + while peerStore.toClean.len > peerStore.capacity: + peerStore.del(peerStore.toClean[0]) + peerStore.toClean.delete(0) diff --git a/libp2p/protocols/identify.nim b/libp2p/protocols/identify.nim index dfa081334..8e5cfcd3f 100644 --- a/libp2p/protocols/identify.nim +++ b/libp2p/protocols/identify.nim @@ -9,7 +9,7 @@ {.push raises: [Defect].} -import std/[sequtils, options, strutils] +import std/[sequtils, options, strutils, sugar] import chronos, chronicles import ../protobuf/minprotobuf, ../peerinfo, @@ -44,9 +44,11 @@ type protoVersion*: Option[string] agentVersion*: Option[string] protos*: seq[string] + signedPeerRecord*: Option[Envelope] Identify* = ref object of LPProtocol peerInfo*: PeerInfo + sendSignedPeerRecord*: bool IdentifyPushHandler* = proc ( peer: PeerId, @@ -57,8 +59,23 @@ type IdentifyPush* = ref object of LPProtocol identifyHandler: IdentifyPushHandler -proc encodeMsg*(peerInfo: PeerInfo, observedAddr: MultiAddress): ProtoBuffer - {.raises: [Defect, IdentifyNoPubKeyError].} = +chronicles.expandIt(IdentifyInfo): + pubkey = ($it.pubkey).shortLog + addresses = it.addrs.map(x => $x).join(",") + protocols = it.protos.map(x => $x).join(",") + observable_address = + if it.observedAddr.isSome(): $it.observedAddr.get() + else: "None" + proto_version = it.protoVersion.get("None") + agent_version = it.agentVersion.get("None") + signedPeerRecord = + # The SPR contains the same data as the identify message + # would be cumbersome to log + if iinfo.signedPeerRecord.isSome(): "Some" + else: "None" + +proc encodeMsg(peerInfo: PeerInfo, observedAddr: MultiAddress, sendSpr: bool): ProtoBuffer + {.raises: [Defect].} = result = initProtoBuffer() let pkey = peerInfo.publicKey @@ -76,6 +93,14 @@ proc encodeMsg*(peerInfo: PeerInfo, observedAddr: MultiAddress): ProtoBuffer else: peerInfo.agentVersion result.write(6, agentVersion) + + ## Optionally populate signedPeerRecord field. + ## See https://github.com/libp2p/go-libp2p/blob/ddf96ce1cfa9e19564feb9bd3e8269958bbc0aba/p2p/protocol/identify/pb/identify.proto for reference. + if sendSpr: + let sprBuff = peerInfo.signedPeerRecord.envelope.encode() + if sprBuff.isOk(): + result.write(8, sprBuff.get()) + result.finish() proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] = @@ -85,6 +110,7 @@ proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] = oaddr: MultiAddress protoVersion: string agentVersion: string + signedPeerRecord: SignedPeerRecord var pb = initProtoBuffer(buf) @@ -95,8 +121,11 @@ proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] = let r5 = pb.getField(5, protoVersion) let r6 = pb.getField(6, agentVersion) + let r8 = pb.getField(8, signedPeerRecord) + let res = r1.isOk() and r2.isOk() and r3.isOk() and - r4.isOk() and r5.isOk() and r6.isOk() + r4.isOk() and r5.isOk() and r6.isOk() and + r8.isOk() if res: if r1.get(): @@ -107,21 +136,24 @@ proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] = iinfo.protoVersion = some(protoVersion) if r6.get(): iinfo.agentVersion = some(agentVersion) - debug "decodeMsg: decoded identify", pubkey = ($pubkey).shortLog, - addresses = iinfo.addrs.mapIt($it).join(","), - protocols = iinfo.protos.mapIt($it).join(","), - observable_address = - if iinfo.observedAddr.isSome(): $iinfo.observedAddr.get() - else: "None", - proto_version = iinfo.protoVersion.get("None"), - agent_version = iinfo.agentVersion.get("None") + if r8.get() and r1.get(): + if iinfo.pubkey.get() == signedPeerRecord.envelope.publicKey: + iinfo.signedPeerRecord = some(signedPeerRecord.envelope) + debug "decodeMsg: decoded identify", iinfo some(iinfo) else: trace "decodeMsg: failed to decode received message" none[IdentifyInfo]() -proc new*(T: typedesc[Identify], peerInfo: PeerInfo): T = - let identify = T(peerInfo: peerInfo) +proc new*( + T: typedesc[Identify], + peerInfo: PeerInfo, + sendSignedPeerRecord = false + ): T = + let identify = T( + peerInfo: peerInfo, + sendSignedPeerRecord: sendSignedPeerRecord + ) identify.init() identify @@ -129,7 +161,7 @@ method init*(p: Identify) = proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} = try: trace "handling identify request", conn - var pb = encodeMsg(p.peerInfo, conn.observedAddr) + var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord) await conn.writeLp(pb.buffer) except CancelledError as exc: raise exc @@ -209,5 +241,5 @@ proc init*(p: IdentifyPush) = p.codec = IdentifyPushCodec proc push*(p: IdentifyPush, peerInfo: PeerInfo, conn: Connection) {.async.} = - var pb = encodeMsg(peerInfo, conn.observedAddr) + var pb = encodeMsg(peerInfo, conn.observedAddr, true) await conn.writeLp(pb.buffer) diff --git a/libp2p/protocols/pubsub/floodsub.nim b/libp2p/protocols/pubsub/floodsub.nim index 54d6f281b..7637439a1 100644 --- a/libp2p/protocols/pubsub/floodsub.nim +++ b/libp2p/protocols/pubsub/floodsub.nim @@ -42,6 +42,9 @@ proc addSeen*(f: FloodSub, msgId: MessageID): bool = # Return true if the message has already been seen f.seen.put(f.seenSalt & msgId) +proc firstSeen*(f: FloodSub, msgId: MessageID): Moment = + f.seen.addedAt(f.seenSalt & msgId) + proc handleSubscribe*(f: FloodSub, peer: PubsubPeer, topic: string, diff --git a/libp2p/protocols/pubsub/gossipsub.nim b/libp2p/protocols/pubsub/gossipsub.nim index 8261ac538..5f94f2f65 100644 --- a/libp2p/protocols/pubsub/gossipsub.nim +++ b/libp2p/protocols/pubsub/gossipsub.nim @@ -28,7 +28,7 @@ import ./pubsub, import stew/results export results -import ./gossipsub/[types, scoring, behavior] +import ./gossipsub/[types, scoring, behavior], ../../utils/heartbeat export types, scoring, behavior, pubsub @@ -38,6 +38,8 @@ logScope: declareCounter(libp2p_gossipsub_failed_publish, "number of failed publish") declareCounter(libp2p_gossipsub_invalid_topic_subscription, "number of invalid topic subscriptions that happened") declareCounter(libp2p_gossipsub_duplicate_during_validation, "number of duplicates received during message validation") +declareCounter(libp2p_gossipsub_duplicate, "number of duplicates received") +declareCounter(libp2p_gossipsub_received, "number of messages received (deduplicated)") proc init*(_: type[GossipSubParams]): GossipSubParams = GossipSubParams( @@ -69,7 +71,8 @@ proc init*(_: type[GossipSubParams]): GossipSubParams = ipColocationFactorThreshold: 1.0, behaviourPenaltyWeight: -1.0, behaviourPenaltyDecay: 0.999, - disconnectBadPeers: false + disconnectBadPeers: false, + enablePX: false ) proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] = @@ -378,16 +381,24 @@ method rpcHandler*(g: GossipSub, # remote attacking the hash function if g.addSeen(msgId): trace "Dropping already-seen message", msgId = shortLog(msgId), peer - # make sure to update score tho before continuing - # TODO: take into account meshMessageDeliveriesWindow - # score only if messages are not too old. - g.rewardDelivered(peer, msg.topicIDs, false) - g.validationSeen.withValue(msgIdSalted, seen): seen[].incl(peer) + var alreadyReceived = false + g.validationSeen.withValue(msgIdSalted, seen): + if seen[].containsOrIncl(peer): + # peer sent us this message twice + alreadyReceived = true + + if not alreadyReceived: + let delay = Moment.now() - g.firstSeen(msgId) + g.rewardDelivered(peer, msg.topicIDs, false, delay) + + libp2p_gossipsub_duplicate.inc() # onto the next message continue + libp2p_gossipsub_received.inc() + # avoid processing messages we are not interested in if msg.topicIDs.allIt(it notin g.topics): debug "Dropping message of topic without subscription", msgId = shortLog(msgId), peer @@ -556,7 +567,7 @@ method publish*(g: GossipSub, return peers.len proc maintainDirectPeers(g: GossipSub) {.async.} = - while g.heartbeatRunning: + heartbeat "GossipSub DirectPeers", 1.minutes: for id, addrs in g.parameters.directPeers: let peer = g.peers.getOrDefault(id) if isNil(peer): @@ -572,8 +583,6 @@ proc maintainDirectPeers(g: GossipSub) {.async.} = except CatchableError as exc: debug "Direct peer error dialing", msg = exc.msg - await sleepAsync(1.minutes) - method start*(g: GossipSub) {.async.} = trace "gossipsub start" @@ -581,8 +590,8 @@ method start*(g: GossipSub) {.async.} = warn "Starting gossipsub twice" return - g.heartbeatRunning = true g.heartbeatFut = g.heartbeat() + g.scoringHeartbeatFut = g.scoringHeartbeat() g.directPeersLoop = g.maintainDirectPeers() method stop*(g: GossipSub) {.async.} = @@ -592,13 +601,10 @@ method stop*(g: GossipSub) {.async.} = return # stop heartbeat interval - g.heartbeatRunning = false g.directPeersLoop.cancel() - if not g.heartbeatFut.finished: - trace "awaiting last heartbeat" - await g.heartbeatFut - trace "heartbeat stopped" - g.heartbeatFut = nil + g.scoringHeartbeatFut.cancel() + g.heartbeatFut.cancel() + g.heartbeatFut = nil method initPubSub*(g: GossipSub) {.raises: [Defect, InitializationError].} = diff --git a/libp2p/protocols/pubsub/gossipsub/behavior.nim b/libp2p/protocols/pubsub/gossipsub/behavior.nim index e54584627..eda2e8cf9 100644 --- a/libp2p/protocols/pubsub/gossipsub/behavior.nim +++ b/libp2p/protocols/pubsub/gossipsub/behavior.nim @@ -14,7 +14,7 @@ import chronos, chronicles, metrics import "."/[types, scoring] import ".."/[pubsubpeer, peertable, timedcache, mcache, floodsub, pubsub] import "../rpc"/[messages] -import "../../.."/[peerid, multiaddress, utility, switch] +import "../../.."/[peerid, multiaddress, utility, switch, routing_record, signed_envelope, utils/heartbeat] declareGauge(libp2p_gossipsub_cache_window_size, "the number of messages in the cache") declareGauge(libp2p_gossipsub_peers_per_topic_mesh, "gossipsub peers per topic in mesh", labels = ["topic"]) @@ -25,6 +25,7 @@ declareGauge(libp2p_gossipsub_no_peers_topics, "number of topics in mesh with no declareGauge(libp2p_gossipsub_low_peers_topics, "number of topics in mesh with at least one but below dlow peers") declareGauge(libp2p_gossipsub_healthy_peers_topics, "number of topics in mesh with at least dlow peers (but below dhigh)") declareCounter(libp2p_gossipsub_above_dhigh_condition, "number of above dhigh pruning branches ran", labels = ["topic"]) +declareSummary(libp2p_gossipsub_mcache_hit, "ratio of successful IWANT message cache lookups") proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [Defect].} = g.withPeerStats(p.peerId) do (stats: var PeerStats): @@ -78,13 +79,23 @@ proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [Defect].} v[].del(peer) proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] {.raises: [Defect].} = + if not g.parameters.enablePX: + return @[] var peers = g.gossipsub.getOrDefault(topic, initHashSet[PubSubPeer]()).toSeq() peers.keepIf do (x: PubSubPeer) -> bool: x.score >= 0.0 # by spec, larger then Dhi, but let's put some hard caps peers.setLen(min(peers.len, g.parameters.dHigh * 2)) + let sprBook = g.switch.peerStore[SPRBook] peers.map do (x: PubSubPeer) -> PeerInfoMsg: - PeerInfoMsg(peerId: x.peerId.getBytes()) + PeerInfoMsg( + peerId: x.peerId, + signedPeerRecord: + if x.peerId in sprBook: + sprBook[x.peerId].encode().get(default(seq[byte])) + else: + default(seq[byte]) + ) proc handleGraft*(g: GossipSub, peer: PubSubPeer, @@ -165,6 +176,29 @@ proc handleGraft*(g: GossipSub, return prunes +proc getPeers(prune: ControlPrune, peer: PubSubPeer): seq[(PeerId, Option[PeerRecord])] = + var routingRecords: seq[(PeerId, Option[PeerRecord])] + for record in prune.peers: + let peerRecord = + if record.signedPeerRecord.len == 0: + none(PeerRecord) + else: + let signedRecord = SignedPeerRecord.decode(record.signedPeerRecord) + if signedRecord.isErr: + trace "peer sent invalid SPR", peer, error=signedRecord.error + none(PeerRecord) + else: + if record.peerID != signedRecord.get().data.peerId: + trace "peer sent envelope with wrong public key", peer + none(PeerRecord) + else: + some(signedRecord.get().data) + + routingRecords.add((record.peerId, peerRecord)) + + routingRecords + + proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.raises: [Defect].} = for prune in prunes: let topic = prune.topicID @@ -190,9 +224,12 @@ proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.r g.pruned(peer, topic, setBackoff = false) g.mesh.removePeer(topic, peer) - # TODO peer exchange, we miss ambient peer discovery in libp2p, so we are blocked by that - # another option could be to implement signed peer records - ## if peer.score > g.parameters.gossipThreshold and prunes.peers.len > 0: + if peer.score > g.parameters.gossipThreshold and prune.peers.len > 0 and + g.routingRecordsHandler.len > 0: + let routingRecords = prune.getPeers(peer) + + for handler in g.routingRecordsHandler: + handler(peer.peerId, topic, routingRecords) proc handleIHave*(g: GossipSub, peer: PubSubPeer, @@ -242,12 +279,15 @@ proc handleIWant*(g: GossipSub, trace "peer sent iwant", peer, messageID = mid let msg = g.mcache.get(mid) if msg.isSome: + libp2p_gossipsub_mcache_hit.observe(1) # avoid spam if peer.iWantBudget > 0: messages.add(msg.get()) dec peer.iWantBudget else: break + else: + libp2p_gossipsub_mcache_hit.observe(0) return messages proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} = @@ -272,22 +312,29 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) var prunes, grafts: seq[PubSubPeer] npeers = g.mesh.peers(topic) + defaultMesh: HashSet[PubSubPeer] + backingOff = g.backingOff.getOrDefault(topic) if npeers < g.parameters.dLow: trace "replenishing mesh", peers = npeers # replenish the mesh if we're below Dlo - var candidates = toSeq( - g.gossipsub.getOrDefault(topic, initHashSet[PubSubPeer]()) - - g.mesh.getOrDefault(topic, initHashSet[PubSubPeer]()) - ).filterIt( - it.connected and - # avoid negative score peers - it.score >= 0.0 and - # don't pick explicit peers - it.peerId notin g.parameters.directPeers and - # and avoid peers we are backing off - it.peerId notin g.backingOff.getOrDefault(topic) - ) + + var + candidates: seq[PubSubPeer] + currentMesh = addr defaultMesh + g.mesh.withValue(topic, v): currentMesh = v + g.gossipSub.withValue(topic, peerList): + for it in peerList[]: + if + it.connected and + # avoid negative score peers + it.score >= 0.0 and + it notin currentMesh[] and + # don't pick explicit peers + it.peerId notin g.parameters.directPeers and + # and avoid peers we are backing off + it.peerId notin backingOff: + candidates.add(it) # shuffle anyway, score might be not used g.rng.shuffle(candidates) @@ -308,39 +355,43 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) grafts &= peer else: - trace "replenishing mesh outbound quota", peers = g.mesh.peers(topic) + trace "replenishing mesh outbound quota", peers = g.mesh.peers(topic) - var candidates = toSeq( - g.gossipsub.getOrDefault(topic, initHashSet[PubSubPeer]()) - - g.mesh.getOrDefault(topic, initHashSet[PubSubPeer]()) - ).filterIt( - it.connected and - # get only outbound ones - it.outbound and - # avoid negative score peers - it.score >= 0.0 and - # don't pick explicit peers - it.peerId notin g.parameters.directPeers and - # and avoid peers we are backing off - it.peerId notin g.backingOff.getOrDefault(topic) - ) + var + candidates: seq[PubSubPeer] + currentMesh = addr defaultMesh + g.mesh.withValue(topic, v): currentMesh = v + g.gossipSub.withValue(topic, peerList): + for it in peerList[]: + if + it.connected and + # get only outbound ones + it.outbound and + it notin currentMesh[] and + # avoid negative score peers + it.score >= 0.0 and + # don't pick explicit peers + it.peerId notin g.parameters.directPeers and + # and avoid peers we are backing off + it.peerId notin backingOff: + candidates.add(it) - # shuffle anyway, score might be not used - g.rng.shuffle(candidates) + # shuffle anyway, score might be not used + g.rng.shuffle(candidates) - # sort peers by score, high score first, we are grafting - candidates.sort(byScore, SortOrder.Descending) + # sort peers by score, high score first, we are grafting + candidates.sort(byScore, SortOrder.Descending) - # Graft peers so we reach a count of D - candidates.setLen(min(candidates.len, g.parameters.dOut)) + # Graft peers so we reach a count of D + candidates.setLen(min(candidates.len, g.parameters.dOut)) - trace "grafting outbound peers", topic, peers = candidates.len + trace "grafting outbound peers", topic, peers = candidates.len - for peer in candidates: - if g.mesh.addPeer(topic, peer): - g.grafted(peer, topic) - g.fanout.removePeer(topic, peer) - grafts &= peer + for peer in candidates: + if g.mesh.addPeer(topic, peer): + g.grafted(peer, topic) + g.fanout.removePeer(topic, peer) + grafts &= peer # get again npeers after possible grafts @@ -399,6 +450,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) g.pruned(peer, topic) g.mesh.removePeer(topic, peer) + backingOff = g.backingOff.getOrDefault(topic) + # opportunistic grafting, by spec mesh should not be empty... if g.mesh.peers(topic) > 1: var peers = toSeq(try: g.mesh[topic] except KeyError: raiseAssert "have peers") @@ -408,22 +461,26 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) let median = peers[medianIdx] if median.score < g.parameters.opportunisticGraftThreshold: trace "median score below opportunistic threshold", score = median.score - var avail = toSeq( - g.gossipsub.getOrDefault(topic, initHashSet[PubSubPeer]()) - - g.mesh.getOrDefault(topic, initHashSet[PubSubPeer]()) - ) - avail.keepIf do (x: PubSubPeer) -> bool: - # avoid negative score peers - x.score >= median.score and - # don't pick explicit peers - x.peerId notin g.parameters.directPeers and - # and avoid peers we are backing off - x.peerId notin g.backingOff.getOrDefault(topic) + var + avail: seq[PubSubPeer] + currentMesh = addr defaultMesh + g.mesh.withValue(topic, v): currentMesh = v + g.gossipSub.withValue(topic, peerList): + for it in peerList[]: + if + # avoid negative score peers + it.score >= median.score and + it notin currentMesh[] and + # don't pick explicit peers + it.peerId notin g.parameters.directPeers and + # and avoid peers we are backing off + it.peerId notin backingOff: + avail.add(it) - # by spec, grab only 2 - if avail.len > 2: - avail.setLen(2) + # by spec, grab only 2 + if avail.len > 1: + break for peer in avail: if g.mesh.addPeer(topic, peer): @@ -568,8 +625,6 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} = peer.iWantBudget = IWantPeerBudget peer.iHaveBudget = IHavePeerBudget - g.updateScores() - var meshMetrics = MeshMetrics() for t in toSeq(g.topics.keys): @@ -623,12 +678,10 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} = # {.pop.} # raises [Defect] proc heartbeat*(g: GossipSub) {.async.} = - while g.heartbeatRunning: + heartbeat "GossipSub", g.parameters.heartbeatInterval: trace "running heartbeat", instance = cast[int](g) g.onHeartbeat() for trigger in g.heartbeatEvents: trace "firing heartbeat event", instance = cast[int](g) trigger.fire() - - await sleepAsync(g.parameters.heartbeatInterval) diff --git a/libp2p/protocols/pubsub/gossipsub/scoring.nim b/libp2p/protocols/pubsub/gossipsub/scoring.nim index e1a40c59f..872ad0b6b 100644 --- a/libp2p/protocols/pubsub/gossipsub/scoring.nim +++ b/libp2p/protocols/pubsub/gossipsub/scoring.nim @@ -13,7 +13,7 @@ import std/[tables, sets, options] import chronos, chronicles, metrics import "."/[types] import ".."/[pubsubpeer] -import "../../.."/[peerid, multiaddress, utility, switch] +import "../../.."/[peerid, multiaddress, utility, switch, utils/heartbeat] declareGauge(libp2p_gossipsub_peers_scores, "the scores of the peers in gossipsub", labels = ["agent"]) declareCounter(libp2p_gossipsub_bad_score_disconnection, "the number of peers disconnected by gossipsub", labels = ["agent"]) @@ -242,7 +242,8 @@ proc updateScores*(g: GossipSub) = # avoid async trace "updated peer's score", peer, score = peer.score, n_topics, is_grafted - if g.parameters.disconnectBadPeers and stats.score < g.parameters.graylistThreshold: + if g.parameters.disconnectBadPeers and stats.score < g.parameters.graylistThreshold and + peer.peerId notin g.parameters.directPeers: debug "disconnecting bad score peer", peer, score = peer.score asyncSpawn(try: g.disconnectPeer(peer) except Exception as exc: raiseAssert exc.msg) @@ -253,6 +254,11 @@ proc updateScores*(g: GossipSub) = # avoid async trace "updated scores", peers = g.peers.len +proc scoringHeartbeat*(g: GossipSub) {.async.} = + heartbeat "Gossipsub scoring", g.parameters.decayInterval: + trace "running scoring heartbeat", instance = cast[int](g) + g.updateScores() + proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, topics: seq[string]) = for tt in topics: let t = tt @@ -268,7 +274,7 @@ proc addCapped*[T](stat: var T, diff, cap: T) = stat += min(diff, cap - stat) proc rewardDelivered*( - g: GossipSub, peer: PubSubPeer, topics: openArray[string], first: bool) = + g: GossipSub, peer: PubSubPeer, topics: openArray[string], first: bool, delay = ZeroDuration) = for tt in topics: let t = tt if t notin g.topics: @@ -278,6 +284,10 @@ proc rewardDelivered*( let topicParams = g.topicParams.mgetOrPut(t, TopicParams.init()) # if in mesh add more delivery score + if delay > topicParams.meshMessageDeliveriesWindow: + # Too old + continue + g.withPeerStats(peer.peerId) do (stats: var PeerStats): stats.topicInfos.withValue(tt, tstats): if tstats[].inMesh: diff --git a/libp2p/protocols/pubsub/gossipsub/types.nim b/libp2p/protocols/pubsub/gossipsub/types.nim index dca37c4c8..11279eacf 100644 --- a/libp2p/protocols/pubsub/gossipsub/types.nim +++ b/libp2p/protocols/pubsub/gossipsub/types.nim @@ -138,10 +138,18 @@ type directPeers*: Table[PeerId, seq[MultiAddress]] disconnectBadPeers*: bool + enablePX*: bool BackoffTable* = Table[string, Table[PeerId, Moment]] ValidationSeenTable* = Table[MessageID, HashSet[PubSubPeer]] + RoutingRecordsPair* = tuple[id: PeerId, record: Option[PeerRecord]] + RoutingRecordsHandler* = + proc(peer: PeerId, + tag: string, # For gossipsub, the topic + peers: seq[RoutingRecordsPair]) + {.gcsafe, raises: [Defect].} + GossipSub* = ref object of FloodSub mesh*: PeerTable # peers that we send messages to when we are subscribed to the topic fanout*: PeerTable # peers that we send messages to when we're not subscribed to the topic @@ -153,7 +161,8 @@ type control*: Table[string, ControlMessage] # pending control messages mcache*: MCache # messages cache validationSeen*: ValidationSeenTable # peers who sent us message in validation - heartbeatFut*: Future[void] # cancellation future for heartbeat interval + heartbeatFut*: Future[void] # cancellation future for heartbeat interval + scoringHeartbeatFut*: Future[void] # cancellation future for scoring heartbeat interval heartbeatRunning*: bool peerStats*: Table[PeerId, PeerStats] @@ -161,6 +170,7 @@ type topicParams*: Table[string, TopicParams] directPeersLoop*: Future[void] peersInIP*: Table[MultiAddress, HashSet[PeerId]] + routingRecordsHandler*: seq[RoutingRecordsHandler] # Callback for peer exchange heartbeatEvents*: seq[AsyncEvent] diff --git a/libp2p/protocols/pubsub/rpc/messages.nim b/libp2p/protocols/pubsub/rpc/messages.nim index 4b7dce1a2..1ebbf959b 100644 --- a/libp2p/protocols/pubsub/rpc/messages.nim +++ b/libp2p/protocols/pubsub/rpc/messages.nim @@ -10,14 +10,17 @@ {.push raises: [Defect].} import options, sequtils -import ../../../utility -import ../../../peerid +import "../../.."/[ + peerid, + routing_record, + utility + ] export options type PeerInfoMsg* = object - peerId*: seq[byte] + peerId*: PeerId signedPeerRecord*: seq[byte] SubOpts* = object diff --git a/libp2p/protocols/pubsub/timedcache.nim b/libp2p/protocols/pubsub/timedcache.nim index ab77e3581..ad47846fa 100644 --- a/libp2p/protocols/pubsub/timedcache.nim +++ b/libp2p/protocols/pubsub/timedcache.nim @@ -18,7 +18,7 @@ const Timeout* = 10.seconds # default timeout in ms type TimedEntry*[K] = ref object of RootObj key: K - expiresAt: Moment + addedAt: Moment next, prev: TimedEntry[K] TimedCache*[K] = object of RootObj @@ -27,7 +27,8 @@ type timeout: Duration func expire*(t: var TimedCache, now: Moment = Moment.now()) = - while t.head != nil and t.head.expiresAt < now: + let expirationLimit = now - t.timeout + while t.head != nil and t.head.addedAt < expirationLimit: t.entries.del(t.head.key) t.head.prev = nil t.head = t.head.next @@ -54,7 +55,7 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool = var res = t.del(k) # Refresh existing item - let node = TimedEntry[K](key: k, expiresAt: now + t.timeout) + let node = TimedEntry[K](key: k, addedAt: now) if t.head == nil: t.tail = node @@ -62,7 +63,7 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool = else: # search from tail because typically that's where we add when now grows var cur = t.tail - while cur != nil and node.expiresAt < cur.expiresAt: + while cur != nil and node.addedAt < cur.addedAt: cur = cur.prev if cur == nil: @@ -83,6 +84,10 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool = func contains*[K](t: TimedCache[K], k: K): bool = k in t.entries +func addedAt*[K](t: TimedCache[K], k: K): Moment = + t.entries.getOrDefault(k).addedAt + + func init*[K](T: type TimedCache[K], timeout: Duration = Timeout): T = T( timeout: timeout diff --git a/libp2p/protocols/relay.nim b/libp2p/protocols/relay.nim new file mode 100644 index 000000000..acc042b13 --- /dev/null +++ b/libp2p/protocols/relay.nim @@ -0,0 +1,488 @@ +## Nim-LibP2P +## Copyright (c) 2022 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +{.push raises: [Defect].} + +import options +import sequtils, strutils, tables +import chronos, chronicles + +import ../peerinfo, + ../switch, + ../multiaddress, + ../stream/connection, + ../protocols/protocol, + ../transports/transport, + ../utility, + ../errors + +const + RelayCodec* = "/libp2p/circuit/relay/0.1.0" + MsgSize* = 4096 + MaxCircuit* = 1024 + MaxCircuitPerPeer* = 64 + +logScope: + topics = "libp2p relay" + +type + RelayType* = enum + Hop = 1 + Stop = 2 + Status = 3 + CanHop = 4 + RelayStatus* = enum + Success = 100 + HopSrcAddrTooLong = 220 + HopDstAddrTooLong = 221 + HopSrcMultiaddrInvalid = 250 + HopDstMultiaddrInvalid = 251 + HopNoConnToDst = 260 + HopCantDialDst = 261 + HopCantOpenDstStream = 262 + HopCantSpeakRelay = 270 + HopCantRelayToSelf = 280 + StopSrcAddrTooLong = 320 + StopDstAddrTooLong = 321 + StopSrcMultiaddrInvalid = 350 + StopDstMultiaddrInvalid = 351 + StopRelayRefused = 390 + MalformedMessage = 400 + + RelayError* = object of LPError + + RelayPeer* = object + peerId*: PeerID + addrs*: seq[MultiAddress] + + AddConn* = proc(conn: Connection): Future[void] {.gcsafe, raises: [Defect].} + + RelayMessage* = object + msgType*: Option[RelayType] + srcPeer*: Option[RelayPeer] + dstPeer*: Option[RelayPeer] + status*: Option[RelayStatus] + + Relay* = ref object of LPProtocol + switch*: Switch + peerId: PeerID + dialer: Dial + canHop: bool + streamCount: int + hopCount: CountTable[PeerId] + + addConn: AddConn + + maxCircuit*: int + maxCircuitPerPeer*: int + msgSize*: int + +proc encodeMsg*(msg: RelayMessage): ProtoBuffer = + result = initProtoBuffer() + + if isSome(msg.msgType): + result.write(1, msg.msgType.get().ord.uint) + if isSome(msg.srcPeer): + var peer = initProtoBuffer() + peer.write(1, msg.srcPeer.get().peerId) + for ma in msg.srcPeer.get().addrs: + peer.write(2, ma.data.buffer) + peer.finish() + result.write(2, peer.buffer) + if isSome(msg.dstPeer): + var peer = initProtoBuffer() + peer.write(1, msg.dstPeer.get().peerId) + for ma in msg.dstPeer.get().addrs: + peer.write(2, ma.data.buffer) + peer.finish() + result.write(3, peer.buffer) + if isSome(msg.status): + result.write(4, msg.status.get().ord.uint) + + result.finish() + +proc decodeMsg*(buf: seq[byte]): Option[RelayMessage] = + var + rMsg: RelayMessage + msgTypeOrd: uint32 + src: RelayPeer + dst: RelayPeer + statusOrd: uint32 + pbSrc: ProtoBuffer + pbDst: ProtoBuffer + + let + pb = initProtoBuffer(buf) + r1 = pb.getField(1, msgTypeOrd) + r2 = pb.getField(2, pbSrc) + r3 = pb.getField(3, pbDst) + r4 = pb.getField(4, statusOrd) + + if r1.isErr() or r2.isErr() or r3.isErr() or r4.isErr(): + return none(RelayMessage) + + if r2.get() and + (pbSrc.getField(1, src.peerId).isErr() or + pbSrc.getRepeatedField(2, src.addrs).isErr()): + return none(RelayMessage) + + if r3.get() and + (pbDst.getField(1, dst.peerId).isErr() or + pbDst.getRepeatedField(2, dst.addrs).isErr()): + return none(RelayMessage) + + if r1.get(): rMsg.msgType = some(RelayType(msgTypeOrd)) + if r2.get(): rMsg.srcPeer = some(src) + if r3.get(): rMsg.dstPeer = some(dst) + if r4.get(): rMsg.status = some(RelayStatus(statusOrd)) + some(rMsg) + +proc sendStatus*(conn: Connection, code: RelayStatus) {.async, gcsafe.} = + trace "send status", status = $code & "(" & $ord(code) & ")" + let + msg = RelayMessage( + msgType: some(RelayType.Status), + status: some(code)) + pb = encodeMsg(msg) + + await conn.writeLp(pb.buffer) + +proc handleHopStream(r: Relay, conn: Connection, msg: RelayMessage) {.async, gcsafe.} = + r.streamCount.inc() + defer: + r.streamCount.dec() + + if r.streamCount > r.maxCircuit: + trace "refusing connection; too many active circuit" + await sendStatus(conn, RelayStatus.HopCantSpeakRelay) + return + + proc checkMsg(): Result[RelayMessage, RelayStatus] = + if not r.canHop: + return err(RelayStatus.HopCantSpeakRelay) + if msg.srcPeer.isNone: + return err(RelayStatus.HopSrcMultiaddrInvalid) + let src = msg.srcPeer.get() + if src.peerId != conn.peerId: + return err(RelayStatus.HopSrcMultiaddrInvalid) + if msg.dstPeer.isNone: + return err(RelayStatus.HopDstMultiaddrInvalid) + let dst = msg.dstPeer.get() + if dst.peerId == r.switch.peerInfo.peerId: + return err(RelayStatus.HopCantRelayToSelf) + if not r.switch.isConnected(dst.peerId): + trace "relay not connected to dst", dst + return err(RelayStatus.HopNoConnToDst) + ok(msg) + + let check = checkMsg() + if check.isErr: + await sendStatus(conn, check.error()) + return + let + src = msg.srcPeer.get() + dst = msg.dstPeer.get() + + # TODO: if r.acl # access control list + # and not r.acl.AllowHop(src.peerId, dst.peerId) + # sendStatus(conn, RelayStatus.HopCantSpeakRelay) + + r.hopCount.inc(src.peerId) + r.hopCount.inc(dst.peerId) + defer: + r.hopCount.inc(src.peerId, -1) + r.hopCount.inc(dst.peerId, -1) + + if r.hopCount[src.peerId] > r.maxCircuitPerPeer: + trace "refusing connection; too many connection from src", src, dst + await sendStatus(conn, RelayStatus.HopCantSpeakRelay) + return + + if r.hopCount[dst.peerId] > r.maxCircuitPerPeer: + trace "refusing connection; too many connection to dst", src, dst + await sendStatus(conn, RelayStatus.HopCantSpeakRelay) + return + + let connDst = try: + await r.switch.dial(dst.peerId, @[RelayCodec]) + except CancelledError as exc: + raise exc + except CatchableError as exc: + trace "error opening relay stream", dst, exc=exc.msg + await sendStatus(conn, RelayStatus.HopCantDialDst) + return + defer: + await connDst.close() + + let msgToSend = RelayMessage( + msgType: some(RelayType.Stop), + srcPeer: some(src), + dstPeer: some(dst), + status: none(RelayStatus)) + + let msgRcvFromDstOpt = try: + await connDst.writeLp(encodeMsg(msgToSend).buffer) + decodeMsg(await connDst.readLp(r.msgSize)) + except CancelledError as exc: + raise exc + except CatchableError as exc: + trace "error writing stop handshake or reading stop response", exc=exc.msg + await sendStatus(conn, RelayStatus.HopCantOpenDstStream) + return + + if msgRcvFromDstOpt.isNone: + trace "error reading stop response", msg = msgRcvFromDstOpt + await sendStatus(conn, RelayStatus.HopCantOpenDstStream) + return + + let msgRcvFromDst = msgRcvFromDstOpt.get() + if msgRcvFromDst.msgType.isNone or msgRcvFromDst.msgType.get() != RelayType.Status: + trace "unexcepted relay stop response", msgType = msgRcvFromDst.msgType + await sendStatus(conn, RelayStatus.HopCantOpenDstStream) + return + + if msgRcvFromDst.status.isNone or msgRcvFromDst.status.get() != RelayStatus.Success: + trace "relay stop failure", status=msgRcvFromDst.status + await sendStatus(conn, RelayStatus.HopCantOpenDstStream) + return + + await sendStatus(conn, RelayStatus.Success) + + trace "relaying connection", src, dst + + proc bridge(conn: Connection, connDst: Connection) {.async.} = + const bufferSize = 4096 + var + bufSrcToDst: array[bufferSize, byte] + bufDstToSrc: array[bufferSize, byte] + futSrc = conn.readOnce(addr bufSrcToDst[0], bufSrcToDst.high + 1) + futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.high + 1) + bytesSendFromSrcToDst = 0 + bytesSendFromDstToSrc = 0 + bufRead: int + + while not conn.closed() and not connDst.closed(): + try: + await futSrc or futDst + if futSrc.finished(): + bufRead = await futSrc + bytesSendFromSrcToDst.inc(bufRead) + await connDst.write(@bufSrcToDst[0..= 3: + result = CircuitRelay.match(sma[^2].get()) and + P2PPattern.match(sma[^1].get()) + trace "Handles return", ma, result + +proc new*(T: typedesc[RelayTransport], relay: Relay, upgrader: Upgrade): T = + result = T(relay: relay, upgrader: upgrader) + result.running = true + result.queue = newAsyncQueue[Connection](0) diff --git a/libp2p/protocols/secure/secure.nim b/libp2p/protocols/secure/secure.nim index 3e098767f..5e9a30d1d 100644 --- a/libp2p/protocols/secure/secure.nim +++ b/libp2p/protocols/secure/secure.nim @@ -9,7 +9,7 @@ {.push raises: [Defect].} -import std/[options, strformat] +import std/[strformat] import chronos, chronicles, bearssl import ../protocol, ../../stream/streamseq, diff --git a/libp2p/routing_record.nim b/libp2p/routing_record.nim index da60a4765..b3410581c 100644 --- a/libp2p/routing_record.nim +++ b/libp2p/routing_record.nim @@ -12,7 +12,7 @@ {.push raises: [Defect].} import std/[sequtils, times] -import pkg/stew/[results, byteutils] +import pkg/stew/results import multiaddress, multicodec, @@ -22,11 +22,6 @@ import export peerid, multiaddress, signed_envelope -## Constants relating to signed peer records -const - EnvelopeDomain = multiCodec("libp2p-peer-record") # envelope domain as per RFC0002 - EnvelopePayloadType= @[(byte) 0x03, (byte) 0x01] # payload_type for routing records as spec'ed in RFC0003 - type AddressInfo* = object address*: MultiAddress @@ -76,8 +71,9 @@ proc encode*(record: PeerRecord): seq[byte] = proc init*(T: typedesc[PeerRecord], peerId: PeerId, - seqNo: uint64, - addresses: seq[MultiAddress]): T = + addresses: seq[MultiAddress], + seqNo = getTime().toUnix().uint64 # follows the recommended implementation, using unix epoch as seq no. + ): T = PeerRecord( peerId: peerId, @@ -87,39 +83,13 @@ proc init*(T: typedesc[PeerRecord], ## Functions related to signed peer records +type SignedPeerRecord* = SignedPayload[PeerRecord] -proc init*(T: typedesc[Envelope], - privateKey: PrivateKey, - peerRecord: PeerRecord): Result[Envelope, CryptoError] = - - ## Init a signed envelope wrapping a peer record +proc payloadDomain*(T: typedesc[PeerRecord]): string = $multiCodec("libp2p-peer-record") +proc payloadType*(T: typedesc[PeerRecord]): seq[byte] = @[(byte) 0x03, (byte) 0x01] - let envelope = ? Envelope.init(privateKey, - EnvelopePayloadType, - peerRecord.encode(), - $EnvelopeDomain) - - ok(envelope) - -proc init*(T: typedesc[Envelope], - peerId: PeerId, - addresses: seq[MultiAddress], - privateKey: PrivateKey): Result[Envelope, CryptoError] = - ## Creates a signed peer record for this peer: - ## a peer routing record according to https://github.com/libp2p/specs/blob/500a7906dd7dd8f64e0af38de010ef7551fd61b6/RFC/0003-routing-records.md - ## in a signed envelope according to https://github.com/libp2p/specs/blob/500a7906dd7dd8f64e0af38de010ef7551fd61b6/RFC/0002-signed-envelopes.md - - # First create a peer record from the peer info - let peerRecord = PeerRecord.init(peerId, - getTime().toUnix().uint64, # This currently follows the recommended implementation, using unix epoch as seq no. - addresses) - - let envelope = ? Envelope.init(privateKey, - peerRecord) - - ok(envelope) - -proc getSignedPeerRecord*(pb: ProtoBuffer, field: int, - value: var Envelope): ProtoResult[bool] {. - inline.} = - getField(pb, field, value, $EnvelopeDomain) +proc checkValid*(spr: SignedPeerRecord): Result[void, EnvelopeError] = + if not spr.data.peerId.match(spr.envelope.publicKey): + err(EnvelopeInvalidSignature) + else: + ok() diff --git a/libp2p/signed_envelope.nim b/libp2p/signed_envelope.nim index 6210ca4e2..6568b6029 100644 --- a/libp2p/signed_envelope.nim +++ b/libp2p/signed_envelope.nim @@ -11,6 +11,7 @@ {.push raises: [Defect].} +import std/sugar import pkg/stew/[results, byteutils] import multicodec, crypto/crypto, @@ -23,7 +24,8 @@ type EnvelopeError* = enum EnvelopeInvalidProtobuf, EnvelopeFieldMissing, - EnvelopeInvalidSignature + EnvelopeInvalidSignature, + EnvelopeWrongType Envelope* = object publicKey*: PublicKey @@ -116,3 +118,52 @@ proc getField*(pb: ProtoBuffer, field: int, ok(true) else: err(ProtoError.IncorrectBlob) + +type + SignedPayload*[T] = object + # T needs to have .encode(), .decode(), .payloadType(), .domain() + envelope*: Envelope + data*: T + +proc init*[T](_: typedesc[SignedPayload[T]], + privateKey: PrivateKey, + data: T): Result[SignedPayload[T], CryptoError] = + mixin encode + + let envelope = ? Envelope.init(privateKey, + T.payloadType(), + data.encode(), + T.payloadDomain) + + ok(SignedPayload[T](data: data, envelope: envelope)) + +proc getField*[T](pb: ProtoBuffer, field: int, + value: var SignedPayload[T]): ProtoResult[bool] {. + inline.} = + if not ? getField(pb, field, value.envelope, T.payloadDomain): + ok(false) + else: + mixin decode + value.data = ? T.decode(value.envelope.payload).mapErr(x => ProtoError.IncorrectBlob) + ok(true) + +proc decode*[T]( + _: typedesc[SignedPayload[T]], + buffer: seq[byte] + ): Result[SignedPayload[T], EnvelopeError] = + + let + envelope = ? Envelope.decode(buffer, T.payloadDomain) + data = ? T.decode(envelope.payload).mapErr(x => EnvelopeInvalidProtobuf) + signedPayload = SignedPayload[T](envelope: envelope, data: data) + + if envelope.payloadType != T.payloadType: + return err(EnvelopeWrongType) + + when compiles(? signedPayload.checkValid()): + ? signedPayload.checkValid() + + ok(signedPayload) + +proc encode*[T](msg: SignedPayload[T]): Result[seq[byte], CryptoError] = + msg.envelope.encode() diff --git a/libp2p/switch.nim b/libp2p/switch.nim index da68e8608..c8957703d 100644 --- a/libp2p/switch.nim +++ b/libp2p/switch.nim @@ -86,6 +86,11 @@ proc removePeerEventHandler*(s: Switch, kind: PeerEventKind) = s.connManager.removePeerEventHandler(handler, kind) +method addTransport*(s: Switch, + t: Transport) = + s.transports &= t + s.dialer.addTransport(t) + proc isConnected*(s: Switch, peerId: PeerId): bool = ## returns true if the peer has one or more ## associated connections (sockets) @@ -248,7 +253,7 @@ proc start*(s: Switch) {.async, gcsafe.} = it notin addrs ) - if addrs.len > 0: + if addrs.len > 0 or t.running: startFuts.add(t.start(addrs)) await allFutures(startFuts) @@ -261,10 +266,12 @@ proc start*(s: Switch) {.async, gcsafe.} = "Failed to start one transport", s.error) for t in s.transports: # for each transport - if t.addrs.len > 0: + if t.addrs.len > 0 or t.running: s.acceptFuts.add(s.accept(t)) s.peerInfo.addrs &= t.addrs + s.peerInfo.update() + debug "Started libp2p node", peer = s.peerInfo proc newSwitch*(peerInfo: PeerInfo, @@ -274,7 +281,8 @@ proc newSwitch*(peerInfo: PeerInfo, secureManagers: openArray[Secure] = [], connManager: ConnManager, ms: MultistreamSelect, - nameResolver: NameResolver = nil): Switch + nameResolver: NameResolver = nil, + peerStore = PeerStore.new()): Switch {.raises: [Defect, LPError].} = if secureManagers.len == 0: raise newException(LPError, "Provide at least one secure manager") @@ -284,10 +292,10 @@ proc newSwitch*(peerInfo: PeerInfo, ms: ms, transports: transports, connManager: connManager, - peerStore: PeerStore.new(), + peerStore: peerStore, dialer: Dialer.new(peerInfo.peerId, connManager, transports, ms, nameResolver), nameResolver: nameResolver) - switch.connManager.peerStore = switch.peerStore + switch.connManager.peerStore = peerStore switch.mount(identity) return switch diff --git a/libp2p/transports/wstransport.nim b/libp2p/transports/wstransport.nim index 4f6444294..581e61f31 100644 --- a/libp2p/transports/wstransport.nim +++ b/libp2p/transports/wstransport.nim @@ -30,6 +30,8 @@ export transport, websock const WsTransportTrackerName* = "libp2p.wstransport" + DefaultHeadersTimeout = 3.seconds + type WsStream = ref object of Connection session: WSSession @@ -69,12 +71,14 @@ method readOnce*( if res == 0 and s.session.readyState == ReadyState.Closed: raise newLPStreamEOFError() + s.activity = true # reset activity flag return res method write*( s: WsStream, msg: seq[byte]): Future[void] {.async.} = mapExceptions(await s.session.send(msg, Opcode.Binary)) + s.activity = true # reset activity flag method closeImpl*(s: WsStream): Future[void] {.async.} = await s.session.close() @@ -92,6 +96,7 @@ type tlsCertificate: TLSCertificate tlsFlags: set[TLSFlags] flags: set[ServerFlags] + handshakeTimeout: Duration factories: seq[ExtFactory] rng: Rng @@ -131,9 +136,13 @@ method start*( address = ma.initTAddress().tryGet(), tlsPrivateKey = self.tlsPrivateKey, tlsCertificate = self.tlsCertificate, - flags = self.flags) + flags = self.flags, + handshakeTimeout = self.handshakeTimeout) else: - HttpServer.create(ma.initTAddress().tryGet()) + HttpServer.create( + ma.initTAddress().tryGet(), + handshakeTimeout = self.handshakeTimeout + ) self.httpservers &= httpserver @@ -222,19 +231,19 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} = if not self.running: raise newTransportClosedError() + if self.acceptFuts.len <= 0: + self.acceptFuts = self.httpservers.mapIt(it.accept()) + + if self.acceptFuts.len <= 0: + return + + let + finished = await one(self.acceptFuts) + index = self.acceptFuts.find(finished) + + self.acceptFuts[index] = self.httpservers[index].accept() + try: - if self.acceptFuts.len <= 0: - self.acceptFuts = self.httpservers.mapIt(it.accept()) - - if self.acceptFuts.len <= 0: - return - - let - finished = await one(self.acceptFuts) - index = self.acceptFuts.find(finished) - - self.acceptFuts[index] = self.httpservers[index].accept() - let req = await finished try: @@ -250,6 +259,8 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} = debug "OS Error", exc = exc.msg except WebSocketError as exc: debug "Websocket Error", exc = exc.msg + except HttpError as exc: + debug "Http Error", exc = exc.msg except AsyncStreamError as exc: debug "AsyncStream Error", exc = exc.msg except TransportTooManyError as exc: @@ -301,7 +312,8 @@ proc new*( tlsFlags: set[TLSFlags] = {}, flags: set[ServerFlags] = {}, factories: openArray[ExtFactory] = [], - rng: Rng = nil): T = + rng: Rng = nil, + handshakeTimeout = DefaultHeadersTimeout): T = T( upgrader: upgrade, @@ -310,14 +322,16 @@ proc new*( tlsFlags: tlsFlags, flags: flags, factories: @factories, - rng: rng) + rng: rng, + handshakeTimeout: handshakeTimeout) proc new*( T: typedesc[WsTransport], upgrade: Upgrade, flags: set[ServerFlags] = {}, factories: openArray[ExtFactory] = [], - rng: Rng = nil): T = + rng: Rng = nil, + handshakeTimeout = DefaultHeadersTimeout): T = T.new( upgrade = upgrade, @@ -325,4 +339,5 @@ proc new*( tlsCertificate = nil, flags = flags, factories = @factories, - rng = rng) + rng = rng, + handshakeTimeout = handshakeTimeout) diff --git a/libp2p/utils/heartbeat.nim b/libp2p/utils/heartbeat.nim new file mode 100644 index 000000000..3fad818a0 --- /dev/null +++ b/libp2p/utils/heartbeat.nim @@ -0,0 +1,27 @@ +# Nim-Libp2p +# Copyright (c) 2022 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +{.push raises: [Defect].} + +import sequtils +import chronos, chronicles + +export chronicles + +template heartbeat*(name: string, interval: Duration, body: untyped): untyped = + var nextHeartbeat = Moment.now() + while true: + body + + nextHeartbeat += interval + let now = Moment.now() + if nextHeartbeat < now: + info "Missed heartbeat", heartbeat = name, delay = now - nextHeartbeat + nextHeartbeat = now + interval + await sleepAsync(nextHeartbeat - now) diff --git a/tests/pubsub/testgossipsub.nim b/tests/pubsub/testgossipsub.nim index 5986d47ab..416368dbf 100644 --- a/tests/pubsub/testgossipsub.nim +++ b/tests/pubsub/testgossipsub.nim @@ -932,3 +932,76 @@ suite "GossipSub": it.switch.stop()))) await allFuturesThrowing(nodesFut) + + asyncTest "e2e - GossipSub peer exchange": + # A, B & C are subscribed to something + # B unsubcribe from it, it should send + # PX to A & C + # + # C sent his SPR, not A + proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} = + discard # not used in this test + + let + nodes = generateNodes( + 2, + gossip = true, + enablePX = true) & + generateNodes(1, gossip = true, sendSignedPeerRecord = true) + + # start switches + nodesFut = await allFinished( + nodes[0].switch.start(), + nodes[1].switch.start(), + nodes[2].switch.start(), + ) + + # start pubsub + await allFuturesThrowing( + allFinished( + nodes[0].start(), + nodes[1].start(), + nodes[2].start(), + )) + + var + gossip0 = GossipSub(nodes[0]) + gossip1 = GossipSub(nodes[1]) + gossip2 = GossipSub(nodes[1]) + + await subscribeNodes(nodes) + + nodes[0].subscribe("foobar", handler) + nodes[1].subscribe("foobar", handler) + nodes[2].subscribe("foobar", handler) + for x in 0..<3: + for y in 0..<3: + if x != y: + await waitSub(nodes[x], nodes[y], "foobar") + + var passed: Future[void] = newFuture[void]() + gossip0.routingRecordsHandler.add(proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) = + check: + tag == "foobar" + peers.len == 2 + peers[0].record.isSome() xor peers[1].record.isSome() + passed.complete() + ) + nodes[1].unsubscribe("foobar", handler) + + await passed.wait(5.seconds) + + await allFuturesThrowing( + nodes[0].switch.stop(), + nodes[1].switch.stop(), + nodes[2].switch.stop() + ) + + await allFuturesThrowing( + nodes[0].stop(), + nodes[1].stop(), + nodes[2].stop() + ) + + await allFuturesThrowing(nodesFut.concat()) + diff --git a/tests/pubsub/testgossipsub2.nim b/tests/pubsub/testgossipsub2.nim index 6e9c8913d..70d44b865 100644 --- a/tests/pubsub/testgossipsub2.nim +++ b/tests/pubsub/testgossipsub2.nim @@ -182,10 +182,6 @@ suite "GossipSub": await allFuturesThrowing(nodesFut.concat()) asyncTest "GossipSub test directPeers": - var handlerFut = newFuture[bool]() - proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} = - check topic == "foobar" - handlerFut.complete(true) let nodes = generateNodes(2, gossip = true) @@ -221,7 +217,7 @@ suite "GossipSub": # DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN ### await subscribeNodes(nodes) - nodes[0].subscribe("foobar", handler) + proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} = discard nodes[1].subscribe("foobar", handler) await invalidDetected.wait(10.seconds) @@ -238,6 +234,113 @@ suite "GossipSub": await allFuturesThrowing(nodesFut.concat()) + asyncTest "GossipSub directPeers: always forward messages": + let + nodes = generateNodes(2, gossip = true) + + # start switches + nodesFut = await allFinished( + nodes[0].switch.start(), + nodes[1].switch.start(), + ) + + GossipSub(nodes[0]).parameters.directPeers[nodes[1].switch.peerInfo.peerId] = nodes[1].switch.peerInfo.addrs + GossipSub(nodes[1]).parameters.directPeers[nodes[0].switch.peerInfo.peerId] = nodes[0].switch.peerInfo.addrs + + # start pubsub + await allFuturesThrowing( + allFinished( + nodes[0].start(), + nodes[1].start(), + )) + + var handlerFut = newFuture[void]() + proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} = + check topic == "foobar" + handlerFut.complete() + + nodes[0].subscribe("foobar", handler) + nodes[1].subscribe("foobar", handler) + + tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1 + + await handlerFut + + # peer shouldn't be in our mesh + check "foobar" notin GossipSub(nodes[0]).mesh + check "foobar" notin GossipSub(nodes[1]).mesh + + await allFuturesThrowing( + nodes[0].switch.stop(), + nodes[1].switch.stop() + ) + + await allFuturesThrowing( + nodes[0].stop(), + nodes[1].stop() + ) + + await allFuturesThrowing(nodesFut.concat()) + + asyncTest "GossipSub directPeers: don't kick direct peer with low score": + let + nodes = generateNodes(2, gossip = true) + + # start switches + nodesFut = await allFinished( + nodes[0].switch.start(), + nodes[1].switch.start(), + ) + + GossipSub(nodes[0]).parameters.directPeers[nodes[1].switch.peerInfo.peerId] = nodes[1].switch.peerInfo.addrs + GossipSub(nodes[1]).parameters.directPeers[nodes[0].switch.peerInfo.peerId] = nodes[0].switch.peerInfo.addrs + + GossipSub(nodes[1]).parameters.disconnectBadPeers = true + GossipSub(nodes[1]).parameters.graylistThreshold = 100000 + + # start pubsub + await allFuturesThrowing( + allFinished( + nodes[0].start(), + nodes[1].start(), + )) + + var handlerFut = newFuture[void]() + proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} = + check topic == "foobar" + handlerFut.complete() + + nodes[0].subscribe("foobar", handler) + nodes[1].subscribe("foobar", handler) + + tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1 + + await handlerFut + + GossipSub(nodes[1]).updateScores() + # peer shouldn't be in our mesh + check: + GossipSub(nodes[1]).peerStats[nodes[0].switch.peerInfo.peerId].score < GossipSub(nodes[1]).parameters.graylistThreshold + GossipSub(nodes[1]).updateScores() + + handlerFut = newFuture[void]() + tryPublish await nodes[0].publish("foobar", toBytes("hellow2")), 1 + + # Without directPeers, this would fail + await handlerFut.wait(1.seconds) + + await allFuturesThrowing( + nodes[0].switch.stop(), + nodes[1].switch.stop() + ) + + await allFuturesThrowing( + nodes[0].stop(), + nodes[1].stop() + ) + + await allFuturesThrowing(nodesFut.concat()) + asyncTest "GossipsSub peers disconnections mechanics": var runs = 10 @@ -335,3 +438,60 @@ suite "GossipSub": it.switch.stop()))) await allFuturesThrowing(nodesFut) + + asyncTest "GossipSub scoring - decayInterval": + + let + nodes = generateNodes(2, gossip = true) + + # start switches + nodesFut = await allFinished( + nodes[0].switch.start(), + nodes[1].switch.start(), + ) + + var gossip = GossipSub(nodes[0]) + # MacOs has some nasty jitter when sleeping + # (up to 7 ms), so we need some pretty long + # sleeps to be safe here + gossip.parameters.decayInterval = 300.milliseconds + + # start pubsub + await allFuturesThrowing( + allFinished( + nodes[0].start(), + nodes[1].start(), + )) + + var handlerFut = newFuture[void]() + proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} = + handlerFut.complete() + + await subscribeNodes(nodes) + + nodes[0].subscribe("foobar", handler) + nodes[1].subscribe("foobar", handler) + + tryPublish await nodes[0].publish("foobar", toBytes("hello")), 1 + + await handlerFut + + gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries = 100 + gossip.topicParams["foobar"].meshMessageDeliveriesDecay = 0.9 + await sleepAsync(1500.milliseconds) + + # We should have decayed 5 times, though allowing 4..6 + check: + gossip.peerStats[nodes[1].peerInfo.peerId].topicInfos["foobar"].meshMessageDeliveries in 50.0 .. 66.0 + + await allFuturesThrowing( + nodes[0].switch.stop(), + nodes[1].switch.stop() + ) + + await allFuturesThrowing( + nodes[0].stop(), + nodes[1].stop() + ) + + await allFuturesThrowing(nodesFut.concat()) diff --git a/tests/pubsub/utils.nim b/tests/pubsub/utils.nim index 846c8bb01..993efdd01 100644 --- a/tests/pubsub/utils.nim +++ b/tests/pubsub/utils.nim @@ -40,11 +40,13 @@ proc generateNodes*( verifySignature: bool = libp2p_pubsub_verify, anonymize: bool = libp2p_pubsub_anonymize, sign: bool = libp2p_pubsub_sign, + sendSignedPeerRecord = false, unsubscribeBackoff = 1.seconds, - maxMessageSize: int = 1024 * 1024): seq[PubSub] = + maxMessageSize: int = 1024 * 1024, + enablePX: bool = false): seq[PubSub] = for i in 0..= 4: + period = 75.milliseconds + let hb = t() + await sleepAsync(500.milliseconds) + await hb.cancelAndWait() + + # 4x 30 ms heartbeat = 120ms + # (500 ms - 120 ms) / 75ms = 5x 75ms + # total 9 + check: + i in 8..10 + + asyncTest "catch up on slow heartbeat": + var i = 0 + proc t() {.async.} = + heartbeat "this is normal", 30.milliseconds: + if i < 3: + await sleepAsync(150.milliseconds) + i.inc() + + let hb = t() + await sleepAsync(900.milliseconds) + await hb.cancelAndWait() + # 3x (150ms heartbeat + 30ms interval) = 540ms + # 360ms remaining, / 30ms = 12x + # total 15 + check: + i in 14..16 diff --git a/tests/testidentify.nim b/tests/testidentify.nim index 4b3a0d70e..96ea1c293 100644 --- a/tests/testidentify.nim +++ b/tests/testidentify.nim @@ -77,6 +77,7 @@ suite "Identify": check id.protoVersion.get() == ProtoVersion check id.agentVersion.get() == AgentVersion check id.protos == @["/test/proto1/1.0.0", "/test/proto2/1.0.0"] + check id.signedPeerRecord.isNone() asyncTest "custom agent version": const customAgentVersion = "MY CUSTOM AGENT STRING" @@ -100,6 +101,7 @@ suite "Identify": check id.protoVersion.get() == ProtoVersion check id.agentVersion.get() == customAgentVersion check id.protos == @["/test/proto1/1.0.0", "/test/proto2/1.0.0"] + check id.signedPeerRecord.isNone() asyncTest "handle failed identify": msListen.addHandler(IdentifyCodec, identifyProto1) @@ -123,6 +125,27 @@ suite "Identify": discard await msDial.select(conn, IdentifyCodec) discard await identifyProto2.identify(conn, pi2.peerId) + asyncTest "can send signed peer record": + msListen.addHandler(IdentifyCodec, identifyProto1) + identifyProto1.sendSignedPeerRecord = true + serverFut = transport1.start(ma) + proc acceptHandler(): Future[void] {.async, gcsafe.} = + let c = await transport1.accept() + await msListen.handle(c) + + acceptFut = acceptHandler() + conn = await transport2.dial(transport1.addrs[0]) + + discard await msDial.select(conn, IdentifyCodec) + let id = await identifyProto2.identify(conn, remotePeerInfo.peerId) + + check id.pubkey.get() == remoteSecKey.getPublicKey().get() + check id.addrs == ma + check id.protoVersion.get() == ProtoVersion + check id.agentVersion.get() == AgentVersion + check id.protos == @["/test/proto1/1.0.0", "/test/proto2/1.0.0"] + check id.signedPeerRecord.get() == remotePeerInfo.signedPeerRecord.envelope + suite "handle push identify message": var switch1 {.threadvar.}: Switch @@ -154,11 +177,15 @@ suite "Identify": IdentifyPushCodec) check: - switch1.peerStore.addressBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.addrs.toHashSet() - switch2.peerStore.addressBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.addrs.toHashSet() + switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs + switch2.peerStore[AddressBook][switch1.peerInfo.peerId] == switch1.peerInfo.addrs - switch1.peerStore.addressBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.addrs.toHashSet() - switch2.peerStore.addressBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.addrs.toHashSet() + switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs + switch2.peerStore[AddressBook][switch1.peerInfo.peerId] == switch1.peerInfo.addrs + + #switch1.peerStore.signedPeerRecordBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.signedPeerRecord.get() + #switch2.peerStore.signedPeerRecordBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.signedPeerRecord.get() + # no longer sent by default proc closeAll() {.async.} = await conn.close() @@ -171,20 +198,20 @@ suite "Identify": switch2.peerInfo.addrs.add(MultiAddress.init("/ip4/127.0.0.1/tcp/5555").tryGet()) check: - switch1.peerStore.addressBook.get(switch2.peerInfo.peerId) != switch2.peerInfo.addrs.toHashSet() - switch1.peerStore.protoBook.get(switch2.peerInfo.peerId) != switch2.peerInfo.protocols.toHashSet() + switch1.peerStore[AddressBook][switch2.peerInfo.peerId] != switch2.peerInfo.addrs + switch1.peerStore[ProtoBook][switch2.peerInfo.peerId] != switch2.peerInfo.protocols await identifyPush2.push(switch2.peerInfo, conn) - check await checkExpiring(switch1.peerStore.protoBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.protocols.toHashSet()) - check await checkExpiring(switch1.peerStore.addressBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.addrs.toHashSet()) + check await checkExpiring(switch1.peerStore[ProtoBook][switch2.peerInfo.peerId] == switch2.peerInfo.protocols) + check await checkExpiring(switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs) await closeAll() # Wait the very end to be sure that the push has been processed check: - switch1.peerStore.protoBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.protocols.toHashSet() - switch1.peerStore.addressBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.addrs.toHashSet() + switch1.peerStore[ProtoBook][switch2.peerInfo.peerId] == switch2.peerInfo.protocols + switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs asyncTest "wrong peer id push identify": @@ -192,8 +219,8 @@ suite "Identify": switch2.peerInfo.addrs.add(MultiAddress.init("/ip4/127.0.0.1/tcp/5555").tryGet()) check: - switch1.peerStore.addressBook.get(switch2.peerInfo.peerId) != switch2.peerInfo.addrs.toHashSet() - switch1.peerStore.protoBook.get(switch2.peerInfo.peerId) != switch2.peerInfo.protocols.toHashSet() + switch1.peerStore[AddressBook][switch2.peerInfo.peerId] != switch2.peerInfo.addrs + switch1.peerStore[ProtoBook][switch2.peerInfo.peerId] != switch2.peerInfo.protocols let oldPeerId = switch2.peerInfo.peerId switch2.peerInfo = PeerInfo.new(PrivateKey.random(newRng()[]).get()) @@ -210,5 +237,5 @@ suite "Identify": # Wait the very end to be sure that the push has been processed check: - switch1.peerStore.protoBook.get(oldPeerId) != switch2.peerInfo.protocols.toHashSet() - switch1.peerStore.addressBook.get(oldPeerId) != switch2.peerInfo.addrs.toHashSet() + switch1.peerStore[ProtoBook][oldPeerId] != switch2.peerInfo.protocols + switch1.peerStore[AddressBook][oldPeerId] != switch2.peerInfo.addrs diff --git a/tests/testinterop.nim b/tests/testinterop.nim index 3ab86f6cd..cecace6d0 100644 --- a/tests/testinterop.nim +++ b/tests/testinterop.nim @@ -2,7 +2,7 @@ import options, tables import chronos, chronicles, stew/byteutils import helpers import ../libp2p -import ../libp2p/[daemon/daemonapi, varint, transports/wstransport, crypto/crypto] +import ../libp2p/[daemon/daemonapi, varint, transports/wstransport, crypto/crypto, protocols/relay ] type DaemonPeerInfo = daemonapi.PeerInfo @@ -471,3 +471,158 @@ suite "Interop": asyncTest "gossipsub: node publish many": await testPubSubNodePublish(gossip = true, count = 10) + + asyncTest "NativeSrc -> NativeRelay -> DaemonDst": + proc daemonHandler(api: DaemonAPI, stream: P2PStream) {.async.} = + check "line1" == string.fromBytes(await stream.transp.readLp()) + discard await stream.transp.writeLp("line2") + check "line3" == string.fromBytes(await stream.transp.readLp()) + discard await stream.transp.writeLp("line4") + await stream.close() + let + maSrc = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() + maRel = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() + src = SwitchBuilder.new() + .withRng(crypto.newRng()) + .withAddresses(@[ maSrc ]) + .withTcpTransport() + .withMplex() + .withNoise() + .withRelayTransport(false) + .build() + rel = SwitchBuilder.new() + .withRng(crypto.newRng()) + .withAddresses(@[ maRel ]) + .withTcpTransport() + .withMplex() + .withNoise() + .withRelayTransport(true) + .build() + + await src.start() + await rel.start() + let daemonNode = await newDaemonApi() + let daemonPeer = await daemonNode.identity() + let maStr = $rel.peerInfo.addrs[0] & "/p2p/" & $rel.peerInfo.peerId & "/p2p-circuit/p2p/" & $daemonPeer.peer + let maddr = MultiAddress.init(maStr).tryGet() + await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs) + await rel.connect(daemonPeer.peer, daemonPeer.addresses) + + await daemonNode.addHandler(@[ "/testCustom" ], daemonHandler) + + let conn = await src.dial(daemonPeer.peer, @[ maddr ], @[ "/testCustom" ]) + + await conn.writeLp("line1") + check string.fromBytes(await conn.readLp(1024)) == "line2" + + await conn.writeLp("line3") + check string.fromBytes(await conn.readLp(1024)) == "line4" + + await allFutures(src.stop(), rel.stop()) + await daemonNode.close() + + asyncTest "DaemonSrc -> NativeRelay -> NativeDst": + proc customHandler(conn: Connection, proto: string) {.async.} = + check "line1" == string.fromBytes(await conn.readLp(1024)) + await conn.writeLp("line2") + check "line3" == string.fromBytes(await conn.readLp(1024)) + await conn.writeLp("line4") + await conn.close() + let + protos = @[ "/customProto", RelayCodec ] + var + customProto = new LPProtocol + customProto.handler = customHandler + customProto.codec = protos[0] + let + maRel = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() + maDst = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() + rel = SwitchBuilder.new() + .withRng(crypto.newRng()) + .withAddresses(@[ maRel ]) + .withTcpTransport() + .withMplex() + .withNoise() + .withRelayTransport(true) + .build() + dst = SwitchBuilder.new() + .withRng(crypto.newRng()) + .withAddresses(@[ maDst ]) + .withTcpTransport() + .withMplex() + .withNoise() + .withRelayTransport(false) + .build() + + dst.mount(customProto) + await rel.start() + await dst.start() + let daemonNode = await newDaemonApi() + let daemonPeer = await daemonNode.identity() + let maStr = $rel.peerInfo.addrs[0] & "/p2p/" & $rel.peerInfo.peerId & "/p2p-circuit/p2p/" & $dst.peerInfo.peerId + let maddr = MultiAddress.init(maStr).tryGet() + await daemonNode.connect(rel.peerInfo.peerId, rel.peerInfo.addrs) + await rel.connect(dst.peerInfo.peerId, dst.peerInfo.addrs) + await daemonNode.connect(dst.peerInfo.peerId, @[ maddr ]) + var stream = await daemonNode.openStream(dst.peerInfo.peerId, protos) + + discard await stream.transp.writeLp("line1") + check string.fromBytes(await stream.transp.readLp()) == "line2" + discard await stream.transp.writeLp("line3") + check string.fromBytes(await stream.transp.readLp()) == "line4" + + await allFutures(dst.stop(), rel.stop()) + await daemonNode.close() + + asyncTest "NativeSrc -> DaemonRelay -> NativeDst": + proc customHandler(conn: Connection, proto: string) {.async.} = + check "line1" == string.fromBytes(await conn.readLp(1024)) + await conn.writeLp("line2") + check "line3" == string.fromBytes(await conn.readLp(1024)) + await conn.writeLp("line4") + await conn.close() + let + protos = @[ "/customProto", RelayCodec ] + var + customProto = new LPProtocol + customProto.handler = customHandler + customProto.codec = protos[0] + let + maSrc = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() + maDst = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() + src = SwitchBuilder.new() + .withRng(crypto.newRng()) + .withAddresses(@[ maSrc ]) + .withTcpTransport() + .withMplex() + .withNoise() + .withRelayTransport(false) + .build() + dst = SwitchBuilder.new() + .withRng(crypto.newRng()) + .withAddresses(@[ maDst ]) + .withTcpTransport() + .withMplex() + .withNoise() + .withRelayTransport(false) + .build() + + dst.mount(customProto) + await src.start() + await dst.start() + let daemonNode = await newDaemonApi({RelayHop}) + let daemonPeer = await daemonNode.identity() + let maStr = $daemonPeer.addresses[0] & "/p2p/" & $daemonPeer.peer & "/p2p-circuit/p2p/" & $dst.peerInfo.peerId + let maddr = MultiAddress.init(maStr).tryGet() + await src.connect(daemonPeer.peer, daemonPeer.addresses) + await daemonNode.connect(dst.peerInfo.peerId, dst.peerInfo.addrs) + let conn = await src.dial(dst.peerInfo.peerId, @[ maddr ], protos[0]) + + await conn.writeLp("line1") + check string.fromBytes(await conn.readLp(1024)) == "line2" + + await conn.writeLp("line3") + check string.fromBytes(await conn.readLp(1024)) == "line4" + + await allFutures(src.stop(), dst.stop()) + await daemonNode.close() diff --git a/tests/testmplex.nim b/tests/testmplex.nim index be45f07be..bce9e9af4 100644 --- a/tests/testmplex.nim +++ b/tests/testmplex.nim @@ -1,4 +1,4 @@ -import strformat, strformat, random, oids, sequtils +import strformat, random, oids, sequtils import chronos, nimcrypto/utils, chronicles, stew/byteutils import ../libp2p/[errors, stream/connection, diff --git a/tests/testmultistream.nim b/tests/testmultistream.nim index 5690ec0fc..6b07fcc72 100644 --- a/tests/testmultistream.nim +++ b/tests/testmultistream.nim @@ -278,9 +278,6 @@ suite "Multistream select": asyncTest "e2e - ls": let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()] - let - handlerWait = newFuture[void]() - let msListen = MultistreamSelect.new() var protocol: LPProtocol = new LPProtocol protocol.handler = proc(conn: Connection, proto: string) {.async, gcsafe.} = diff --git a/tests/testnative.nim b/tests/testnative.nim index 519e89854..998f104e2 100644 --- a/tests/testnative.nim +++ b/tests/testnative.nim @@ -2,7 +2,8 @@ import testvarint, testconnection, testminprotobuf, teststreamseq, - testsemaphore + testsemaphore, + testheartbeat import testminasn1, testrsa, @@ -31,4 +32,5 @@ import testtcptransport, testpeerinfo, testpeerstore, testping, - testmplex + testmplex, + testrelay diff --git a/tests/testnoise.nim b/tests/testnoise.nim index 8415d6f17..8c9e82e82 100644 --- a/tests/testnoise.nim +++ b/tests/testnoise.nim @@ -12,7 +12,6 @@ import tables, bearssl import chronos, stew/byteutils import chronicles -import ../libp2p/crypto/crypto import ../libp2p/[switch, errors, multistream, diff --git a/tests/testpeerinfo.nim b/tests/testpeerinfo.nim index 5140f8dfa..837657926 100644 --- a/tests/testpeerinfo.nim +++ b/tests/testpeerinfo.nim @@ -1,10 +1,12 @@ {.used.} import options, bearssl -import chronos +import chronos, stew/byteutils import ../libp2p/crypto/crypto, + ../libp2p/multicodec, ../libp2p/peerinfo, - ../libp2p/peerid + ../libp2p/peerid, + ../libp2p/routing_record import ./helpers @@ -16,3 +18,32 @@ suite "PeerInfo": check peerId == peerInfo.peerId check seckey.getPublicKey().get() == peerInfo.publicKey + + test "Signed peer record": + const + ExpectedDomain = $multiCodec("libp2p-peer-record") + ExpectedPayloadType = @[(byte) 0x03, (byte) 0x01] + + let + seckey = PrivateKey.random(rng[]).tryGet() + peerId = PeerID.init(seckey).get() + multiAddresses = @[MultiAddress.init("/ip4/0.0.0.0/tcp/24").tryGet(), MultiAddress.init("/ip4/0.0.0.0/tcp/25").tryGet()] + peerInfo = PeerInfo.new(seckey, multiAddresses) + + let + env = peerInfo.signedPeerRecord.envelope + rec = PeerRecord.decode(env.payload()).tryGet() + + # Check envelope fields + check: + env.publicKey == peerInfo.publicKey + env.domain == ExpectedDomain + env.payloadType == ExpectedPayloadType + + # Check payload (routing record) + check: + rec.peerId == peerId + rec.seqNo > 0 + rec.addresses.len == 2 + rec.addresses[0].address == multiAddresses[0] + rec.addresses[1].address == multiAddresses[1] diff --git a/tests/testpeerstore.nim b/tests/testpeerstore.nim index 948050941..74477319b 100644 --- a/tests/testpeerstore.nim +++ b/tests/testpeerstore.nim @@ -28,170 +28,107 @@ suite "PeerStore": var peerStore = PeerStore.new() - peerStore.addressBook.add(peerId1, multiaddr1) - peerStore.addressBook.add(peerId2, multiaddr2) - peerStore.protoBook.add(peerId1, testcodec1) - peerStore.protoBook.add(peerId2, testcodec2) - peerStore.keyBook.set(peerId1, keyPair1.pubkey) - peerStore.keyBook.set(peerId2, keyPair2.pubkey) + peerStore[AddressBook][peerId1] = @[multiaddr1] + peerStore[AddressBook][peerId2] = @[multiaddr2] + peerStore[ProtoBook][peerId1] = @[testcodec1] + peerStore[ProtoBook][peerId2] = @[testcodec2] + peerStore[KeyBook][peerId1] = keyPair1.pubkey + peerStore[KeyBook][peerId2] = keyPair2.pubkey - # Test PeerStore::delete - check: - # Delete existing peerId - peerStore.delete(peerId1) == true - peerId1 notin peerStore.addressBook + # Test PeerStore::del + # Delete existing peerId + peerStore.del(peerId1) + check peerId1 notin peerStore[AddressBook] + # Now try and del it again + peerStore.del(peerId1) - # Now try and delete it again - peerStore.delete(peerId1) == false test "PeerStore listeners": # Set up peer store with listener var peerStore = PeerStore.new() addrChanged = false - protoChanged = false - keyChanged = false - proc addrChange(peerId: PeerId, addrs: HashSet[MultiAddress]) = + proc addrChange(peerId: PeerId) {.gcsafe.} = addrChanged = true - proc protoChange(peerId: PeerId, protos: HashSet[string]) = - protoChanged = true - - proc keyChange(peerId: PeerId, publicKey: PublicKey) = - keyChanged = true - - peerStore.addHandlers(addrChangeHandler = addrChange, - protoChangeHandler = protoChange, - keyChangeHandler = keyChange) + peerStore[AddressBook].addHandler(addrChange) # Test listener triggered on adding multiaddr - peerStore.addressBook.add(peerId1, multiaddr1) - check: - addrChanged == true + peerStore[AddressBook][peerId1] = @[multiaddr1] + check: addrChanged == true - # Test listener triggered on setting addresses addrChanged = false - peerStore.addressBook.set(peerId2, - toHashSet([multiaddr1, multiaddr2])) check: + peerStore[AddressBook].del(peerId1) == true addrChanged == true - # Test listener triggered on adding proto - peerStore.protoBook.add(peerId1, testcodec1) - check: - protoChanged == true - - # Test listener triggered on setting protos - protoChanged = false - peerStore.protoBook.set(peerId2, - toHashSet([testcodec1, testcodec2])) - check: - protoChanged == true - - # Test listener triggered on setting public key - peerStore.keyBook.set(peerId1, - keyPair1.pubkey) - check: - keyChanged == true - - # Test listener triggered on changing public key - keyChanged = false - peerStore.keyBook.set(peerId1, - keyPair2.pubkey) - check: - keyChanged == true - - test "AddressBook API": + test "PeerBook API": # Set up address book - var - addressBook = PeerStore.new().addressBook + var addressBook = PeerStore.new()[AddressBook] # Test AddressBook::add - addressBook.add(peerId1, multiaddr1) + addressBook[peerId1] = @[multiaddr1] check: toSeq(keys(addressBook.book))[0] == peerId1 - toSeq(values(addressBook.book))[0] == toHashSet([multiaddr1]) + toSeq(values(addressBook.book))[0] == @[multiaddr1] # Test AddressBook::get check: - addressBook.get(peerId1) == toHashSet([multiaddr1]) + addressBook[peerId1] == @[multiaddr1] - # Test AddressBook::delete + # Test AddressBook::del check: - # Try to delete peerId that doesn't exist - addressBook.delete(peerId2) == false + # Try to del peerId that doesn't exist + addressBook.del(peerId2) == false # Delete existing peerId addressBook.book.len == 1 # sanity - addressBook.delete(peerId1) == true + addressBook.del(peerId1) == true addressBook.book.len == 0 # Test AddressBook::set # Set peerId2 with multiple multiaddrs - addressBook.set(peerId2, - toHashSet([multiaddr1, multiaddr2])) + addressBook[peerId2] = @[multiaddr1, multiaddr2] check: toSeq(keys(addressBook.book))[0] == peerId2 - toSeq(values(addressBook.book))[0] == toHashSet([multiaddr1, multiaddr2]) + toSeq(values(addressBook.book))[0] == @[multiaddr1, multiaddr2] - test "ProtoBook API": - # Set up protocol book - var - protoBook = PeerStore.new().protoBook + test "Pruner - no capacity": + let peerStore = PeerStore.new(capacity = 0) + peerStore[AgentBook][peerId1] = "gds" - # Test ProtoBook::add - protoBook.add(peerId1, testcodec1) + peerStore.cleanup(peerId1) + check peerId1 notin peerStore[AgentBook] + + test "Pruner - FIFO": + let peerStore = PeerStore.new(capacity = 1) + peerStore[AgentBook][peerId1] = "gds" + peerStore[AgentBook][peerId2] = "gds" + peerStore.cleanup(peerId2) + peerStore.cleanup(peerId1) check: - toSeq(keys(protoBook.book))[0] == peerId1 - toSeq(values(protoBook.book))[0] == toHashSet([testcodec1]) + peerId1 in peerStore[AgentBook] + peerId2 notin peerStore[AgentBook] - # Test ProtoBook::get - check: - protoBook.get(peerId1) == toHashSet([testcodec1]) + test "Pruner - regular capacity": + var peerStore = PeerStore.new(capacity = 20) - # Test ProtoBook::delete - check: - # Try to delete peerId that doesn't exist - protoBook.delete(peerId2) == false + for i in 0..<30: + let randomPeerId = PeerId.init(KeyPair.random(ECDSA, rng[]).get().pubkey).get() + peerStore[AgentBook][randomPeerId] = "gds" + peerStore.cleanup(randomPeerId) - # Delete existing peerId - protoBook.book.len == 1 # sanity - protoBook.delete(peerId1) == true - protoBook.book.len == 0 + check peerStore[AgentBook].len == 20 - # Test ProtoBook::set - # Set peerId2 with multiple protocols - protoBook.set(peerId2, - toHashSet([testcodec1, testcodec2])) - check: - toSeq(keys(protoBook.book))[0] == peerId2 - toSeq(values(protoBook.book))[0] == toHashSet([testcodec1, testcodec2]) + test "Pruner - infinite capacity": + var peerStore = PeerStore.new(capacity = -1) - test "KeyBook API": - # Set up key book - var - keyBook = PeerStore.new().keyBook + for i in 0..<30: + let randomPeerId = PeerId.init(KeyPair.random(ECDSA, rng[]).get().pubkey).get() + peerStore[AgentBook][randomPeerId] = "gds" + peerStore.cleanup(randomPeerId) - # Test KeyBook::set - keyBook.set(peerId1, - keyPair1.pubkey) - check: - toSeq(keys(keyBook.book))[0] == peerId1 - toSeq(values(keyBook.book))[0] == keyPair1.pubkey - - # Test KeyBook::get - check: - keyBook.get(peerId1) == keyPair1.pubkey - - # Test KeyBook::delete - check: - # Try to delete peerId that doesn't exist - keyBook.delete(peerId2) == false - - # Delete existing peerId - keyBook.book.len == 1 # sanity - keyBook.delete(peerId1) == true - keyBook.book.len == 0 + check peerStore[AgentBook].len == 30 diff --git a/tests/testrelay.nim b/tests/testrelay.nim new file mode 100644 index 000000000..a9fa514e5 --- /dev/null +++ b/tests/testrelay.nim @@ -0,0 +1,350 @@ +{.used.} + +import options, bearssl, chronos +import stew/byteutils +import ../libp2p/[protocols/relay, + multiaddress, + peerinfo, + peerid, + stream/connection, + multistream, + transports/transport, + switch, + builders, + upgrademngrs/upgrade, + varint, + daemon/daemonapi] +import ./helpers + +proc new(T: typedesc[RelayTransport], relay: Relay): T = + T.new(relay = relay, upgrader = relay.switch.transports[0].upgrader) + +proc writeLp*(s: StreamTransport, msg: string | seq[byte]): Future[int] {.gcsafe.} = + ## write lenght prefixed + var buf = initVBuffer() + buf.writeSeq(msg) + buf.finish() + result = s.write(buf.buffer) + +proc readLp*(s: StreamTransport): Future[seq[byte]] {.async, gcsafe.} = + ## read length prefixed msg + var + size: uint + length: int + res: VarintResult[void] + result = newSeq[byte](10) + + for i in 0.. 0.uint: + await s.readExactly(addr result[0], int(size)) + +suite "Circuit Relay": + asyncTeardown: + await allFutures(src.stop(), dst.stop(), rel.stop()) + checkTrackers() + + var + protos {.threadvar.}: seq[string] + customProto {.threadvar.}: LPProtocol + ma {.threadvar.}: MultiAddress + src {.threadvar.}: Switch + dst {.threadvar.}: Switch + rel {.threadvar.}: Switch + relaySrc {.threadvar.}: Relay + relayDst {.threadvar.}: Relay + relayRel {.threadvar.}: Relay + conn {.threadVar.}: Connection + msg {.threadVar.}: ProtoBuffer + rcv {.threadVar.}: Option[RelayMessage] + + proc createMsg( + msgType: Option[RelayType] = RelayType.none, + status: Option[RelayStatus] = RelayStatus.none, + src: Option[RelayPeer] = RelayPeer.none, + dst: Option[RelayPeer] = RelayPeer.none): ProtoBuffer = + encodeMsg(RelayMessage(msgType: msgType, srcPeer: src, dstPeer: dst, status: status)) + + proc checkMsg(msg: Option[RelayMessage], + msgType: Option[RelayType] = none[RelayType](), + status: Option[RelayStatus] = none[RelayStatus](), + src: Option[RelayPeer] = none[RelayPeer](), + dst: Option[RelayPeer] = none[RelayPeer]()): bool = + msg.isSome and msg.get == RelayMessage(msgType: msgType, srcPeer: src, dstPeer: dst, status: status) + + proc customHandler(conn: Connection, proto: string) {.async.} = + check "line1" == string.fromBytes(await conn.readLp(1024)) + await conn.writeLp("line2") + check "line3" == string.fromBytes(await conn.readLp(1024)) + await conn.writeLp("line4") + await conn.close() + + asyncSetup: + # Create a custom prototype + protos = @[ "/customProto", RelayCodec ] + customProto = new LPProtocol + customProto.handler = customHandler + customProto.codec = protos[0] + ma = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() + + src = newStandardSwitch() + rel = newStandardSwitch() + dst = SwitchBuilder + .new() + .withRng(newRng()) + .withAddresses(@[ ma ]) + .withTcpTransport() + .withMplex() + .withNoise() + .build() + + relaySrc = Relay.new(src, false) + relayDst = Relay.new(dst, false) + relayRel = Relay.new(rel, true) + + src.mount(relaySrc) + dst.mount(relayDst) + dst.mount(customProto) + rel.mount(relayRel) + + src.addTransport(RelayTransport.new(relaySrc)) + dst.addTransport(RelayTransport.new(relayDst)) + + await src.start() + await dst.start() + await rel.start() + + asyncTest "Handle CanHop": + msg = createMsg(some(CanHop)) + conn = await src.dial(rel.peerInfo.peerId, rel.peerInfo.addrs, RelayCodec) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(RelayStatus.Success)) + + conn = await src.dial(dst.peerInfo.peerId, dst.peerInfo.addrs, RelayCodec) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(HopCantSpeakRelay)) + + await conn.close() + + asyncTest "Malformed": + conn = await rel.dial(dst.peerInfo.peerId, dst.peerInfo.addrs, RelayCodec) + msg = createMsg(some(RelayType.Status)) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + await conn.close() + check rcv.checkMsg(some(Status), some(MalformedMessage)) + + asyncTest "Handle Stop Error": + conn = await rel.dial(dst.peerInfo.peerId, dst.peerInfo.addrs, RelayCodec) + msg = createMsg(some(RelayType.Stop), + none(RelayStatus), + none(RelayPeer), + some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs))) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(StopSrcMultiaddrInvalid)) + + conn = await rel.dial(dst.peerInfo.peerId, dst.peerInfo.addrs, RelayCodec) + msg = createMsg(some(RelayType.Stop), + none(RelayStatus), + some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)), + none(RelayPeer)) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(StopDstMultiaddrInvalid)) + + conn = await rel.dial(dst.peerInfo.peerId, dst.peerInfo.addrs, RelayCodec) + msg = createMsg(some(RelayType.Stop), + none(RelayStatus), + some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)), + some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs))) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + await conn.close() + check rcv.checkMsg(some(Status), some(StopDstMultiaddrInvalid)) + + asyncTest "Handle Hop Error": + conn = await src.dial(dst.peerInfo.peerId, dst.peerInfo.addrs, RelayCodec) + msg = createMsg(some(RelayType.Hop)) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(HopCantSpeakRelay)) + + conn = await src.dial(rel.peerInfo.peerId, rel.peerInfo.addrs, RelayCodec) + msg = createMsg(some(RelayType.Hop), + none(RelayStatus), + none(RelayPeer), + some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs))) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(HopSrcMultiaddrInvalid)) + + conn = await src.dial(rel.peerInfo.peerId, rel.peerInfo.addrs, RelayCodec) + msg = createMsg(some(RelayType.Hop), + none(RelayStatus), + some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs)), + some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs))) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(HopSrcMultiaddrInvalid)) + + conn = await src.dial(rel.peerInfo.peerId, rel.peerInfo.addrs, RelayCodec) + msg = createMsg(some(RelayType.Hop), + none(RelayStatus), + some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)), + none(RelayPeer)) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(HopDstMultiaddrInvalid)) + + conn = await src.dial(rel.peerInfo.peerId, rel.peerInfo.addrs, RelayCodec) + msg = createMsg(some(RelayType.Hop), + none(RelayStatus), + some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)), + some(RelayPeer(peerId: rel.peerInfo.peerId, addrs: rel.peerInfo.addrs))) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(HopCantRelayToSelf)) + + conn = await src.dial(rel.peerInfo.peerId, rel.peerInfo.addrs, RelayCodec) + msg = createMsg(some(RelayType.Hop), + none(RelayStatus), + some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)), + some(RelayPeer(peerId: rel.peerInfo.peerId, addrs: rel.peerInfo.addrs))) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(HopCantRelayToSelf)) + + conn = await src.dial(rel.peerInfo.peerId, rel.peerInfo.addrs, RelayCodec) + msg = createMsg(some(RelayType.Hop), + none(RelayStatus), + some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)), + some(RelayPeer(peerId: dst.peerInfo.peerId, addrs: dst.peerInfo.addrs))) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(HopNoConnToDst)) + + await rel.connect(dst.peerInfo.peerId, dst.peerInfo.addrs) + + relayRel.maxCircuit = 0 + conn = await src.dial(rel.peerInfo.peerId, rel.peerInfo.addrs, RelayCodec) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(HopCantSpeakRelay)) + relayRel.maxCircuit = relay.MaxCircuit + await conn.close() + + relayRel.maxCircuitPerPeer = 0 + conn = await src.dial(rel.peerInfo.peerId, rel.peerInfo.addrs, RelayCodec) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(HopCantSpeakRelay)) + relayRel.maxCircuitPerPeer = relay.MaxCircuitPerPeer + await conn.close() + + let dst2 = newStandardSwitch() + await dst2.start() + await rel.connect(dst2.peerInfo.peerId, dst2.peerInfo.addrs) + + conn = await src.dial(rel.peerInfo.peerId, rel.peerInfo.addrs, RelayCodec) + msg = createMsg(some(RelayType.Hop), + none(RelayStatus), + some(RelayPeer(peerId: src.peerInfo.peerId, addrs: src.peerInfo.addrs)), + some(RelayPeer(peerId: dst2.peerInfo.peerId, addrs: dst2.peerInfo.addrs))) + await conn.writeLp(msg.buffer) + rcv = relay.decodeMsg(await conn.readLp(relay.MsgSize)) + check rcv.checkMsg(some(Status), some(HopCantDialDst)) + await allFutures(dst2.stop()) + + asyncTest "Dial Peer": + let maStr = $rel.peerInfo.addrs[0] & "/p2p/" & $rel.peerInfo.peerId & "/p2p-circuit/p2p/" & $dst.peerInfo.peerId + let maddr = MultiAddress.init(maStr).tryGet() + await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs) + await rel.connect(dst.peerInfo.peerId, dst.peerInfo.addrs) + conn = await src.dial(dst.peerInfo.peerId, @[ maddr ], protos[0]) + + await conn.writeLp("line1") + check string.fromBytes(await conn.readLp(1024)) == "line2" + + await conn.writeLp("line3") + check string.fromBytes(await conn.readLp(1024)) == "line4" + + asyncTest "SwitchBuilder withRelay": + let + maSrc = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() + maRel = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() + maDst = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() + srcWR = SwitchBuilder.new() + .withRng(newRng()) + .withAddresses(@[ maSrc ]) + .withTcpTransport() + .withMplex() + .withNoise() + .withRelayTransport(false) + .build() + relWR = SwitchBuilder.new() + .withRng(newRng()) + .withAddresses(@[ maRel ]) + .withTcpTransport() + .withMplex() + .withNoise() + .withRelayTransport(true) + .build() + dstWR = SwitchBuilder.new() + .withRng(newRng()) + .withAddresses(@[ maDst ]) + .withTcpTransport() + .withMplex() + .withNoise() + .withRelayTransport(false) + .build() + + dstWR.mount(customProto) + + await srcWR.start() + await dstWR.start() + await relWR.start() + + let maStr = $relWR.peerInfo.addrs[0] & "/p2p/" & $relWR.peerInfo.peerId & "/p2p-circuit/p2p/" & $dstWR.peerInfo.peerId + let maddr = MultiAddress.init(maStr).tryGet() + await srcWR.connect(relWR.peerInfo.peerId, relWR.peerInfo.addrs) + await relWR.connect(dstWR.peerInfo.peerId, dstWR.peerInfo.addrs) + conn = await srcWR.dial(dstWR.peerInfo.peerId, @[ maddr ], protos[0]) + + await conn.writeLp("line1") + check string.fromBytes(await conn.readLp(1024)) == "line2" + + await conn.writeLp("line3") + check string.fromBytes(await conn.readLp(1024)) == "line4" + + await allFutures(srcWR.stop(), dstWR.stop(), relWR.stop()) + + asynctest "Bad MultiAddress": + await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs) + await rel.connect(dst.peerInfo.peerId, dst.peerInfo.addrs) + expect(CatchableError): + let maStr = $rel.peerInfo.addrs[0] & "/p2p/" & $rel.peerInfo.peerId & "/p2p/" & $dst.peerInfo.peerId + let maddr = MultiAddress.init(maStr).tryGet() + conn = await src.dial(dst.peerInfo.peerId, @[ maddr ], protos[0]) + + expect(CatchableError): + let maStr = $rel.peerInfo.addrs[0] & "/p2p/" & $rel.peerInfo.peerId + let maddr = MultiAddress.init(maStr).tryGet() + conn = await src.dial(dst.peerInfo.peerId, @[ maddr ], protos[0]) + + expect(CatchableError): + let maStr = "/ip4/127.0.0.1" + let maddr = MultiAddress.init(maStr).tryGet() + conn = await src.dial(dst.peerInfo.peerId, @[ maddr ], protos[0]) + + expect(CatchableError): + let maStr = $dst.peerInfo.peerId + let maddr = MultiAddress.init(maStr).tryGet() + conn = await src.dial(dst.peerInfo.peerId, @[ maddr ], protos[0]) diff --git a/tests/testrouting_record.nim b/tests/testrouting_record.nim index d10500b5b..112efe442 100644 --- a/tests/testrouting_record.nim +++ b/tests/testrouting_record.nim @@ -9,7 +9,7 @@ suite "Routing record": privKey = PrivateKey.random(rng[]).tryGet() peerId = PeerId.init(privKey).tryGet() multiAddresses = @[MultiAddress.init("/ip4/0.0.0.0/tcp/24").tryGet(), MultiAddress.init("/ip4/0.0.0.0/tcp/25").tryGet()] - routingRecord = PeerRecord.init(peerId, 42, multiAddresses) + routingRecord = PeerRecord.init(peerId, multiAddresses, 42) buffer = routingRecord.encode() @@ -36,3 +36,33 @@ suite "Routing record": $decodedRecord.addresses[0].address == "/ip4/1.2.3.4/tcp/0" $decodedRecord.addresses[1].address == "/ip4/1.2.3.4/tcp/1" +suite "Signed Routing Record": + test "Encode -> decode test": + let + rng = newRng() + privKey = PrivateKey.random(rng[]).tryGet() + peerId = PeerId.init(privKey).tryGet() + multiAddresses = @[MultiAddress.init("/ip4/0.0.0.0/tcp/24").tryGet(), MultiAddress.init("/ip4/0.0.0.0/tcp/25").tryGet()] + routingRecord = SignedPeerRecord.init(privKey, PeerRecord.init(peerId, multiAddresses, 42)).tryGet() + buffer = routingRecord.envelope.encode().tryGet() + + parsedRR = SignedPeerRecord.decode(buffer).tryGet().data + + check: + parsedRR.peerId == peerId + parsedRR.seqNo == 42 + parsedRR.addresses.len == 2 + parsedRR.addresses[0].address == multiAddresses[0] + parsedRR.addresses[1].address == multiAddresses[1] + + test "Can't use mismatched public key": + let + rng = newRng() + privKey = PrivateKey.random(rng[]).tryGet() + privKey2 = PrivateKey.random(rng[]).tryGet() + peerId = PeerId.init(privKey).tryGet() + multiAddresses = @[MultiAddress.init("/ip4/0.0.0.0/tcp/24").tryGet(), MultiAddress.init("/ip4/0.0.0.0/tcp/25").tryGet()] + routingRecord = SignedPeerRecord.init(privKey2, PeerRecord.init(peerId, multiAddresses, 42)).tryGet() + buffer = routingRecord.envelope.encode().tryGet() + + check SignedPeerRecord.decode(buffer).error == EnvelopeInvalidSignature diff --git a/tests/testsigned_envelope.nim b/tests/testsigned_envelope.nim index 3c5882232..510db90fb 100644 --- a/tests/testsigned_envelope.nim +++ b/tests/testsigned_envelope.nim @@ -3,7 +3,7 @@ import stew/byteutils import ../libp2p/[signed_envelope] suite "Signed envelope": - test "Encode -> decode test": + test "Encode -> decode -> encode -> decode test": let rng = newRng() privKey = PrivateKey.random(rng[]).tryGet() @@ -12,10 +12,16 @@ suite "Signed envelope": decodedEnvelope = Envelope.decode(buffer, "domain").tryGet() wrongDomain = Envelope.decode(buffer, "wdomain") + reencodedEnvelope = decodedEnvelope.encode().tryGet() + redecodedEnvelope = Envelope.decode(reencodedEnvelope, "domain").tryGet() + check: decodedEnvelope == envelope wrongDomain.error == EnvelopeInvalidSignature + reencodedEnvelope == buffer + redecodedEnvelope == envelope + test "Interop decode test": # from https://github.com/libp2p/go-libp2p-core/blob/b18a4c9c5629870bde2cd85ab3b87a507600d411/record/envelope_test.go#L68 let inputData = "0a24080112206f1581709bb7b1ef030d210db18e3b0ba1c776fba65d8cdaad05415142d189f812102f6c69627032702f74657374646174611a0c68656c6c6f20776f726c64212a401178673b51dfa842aad17e465e25d646ad16628916b964c3fb10c711fee87872bdd4e4646f58c277cdff09704913d8be1aec6322de8d3d0bb852120374aece08".hexToSeqByte() @@ -28,3 +34,56 @@ suite "Signed envelope": # same as above, but payload altered let inputData = "0a24080112206f1581709bb7b1ef030d210db18e3b0ba1c776fba65d8cdaad05415142d189f812102f6c69627032702f74657374646174611a0c00006c6c6f20776f726c64212a401178673b51dfa842aad17e465e25d646ad16628916b964c3fb10c711fee87872bdd4e4646f58c277cdff09704913d8be1aec6322de8d3d0bb852120374aece08".hexToSeqByte() check Envelope.decode(inputData, "libp2p-testing").error == EnvelopeInvalidSignature + +# needs to be exported to work +type + DummyPayload* = object + awesome: byte + SignedDummy = SignedPayload[DummyPayload] + +proc decode*(T: typedesc[DummyPayload], buffer: seq[byte]): Result[DummyPayload, cstring] = + ok(DummyPayload(awesome: buffer[0])) + +proc encode*(pd: DummyPayload): seq[byte] = + @[pd.awesome] + +proc checkValid*(pd: SignedDummy): Result[void, EnvelopeError] = + if pd.data.awesome == 12.byte: ok() + else: err(EnvelopeInvalidSignature) + +proc payloadDomain*(T: typedesc[DummyPayload]): string = "dummy" +proc payloadType*(T: typedesc[DummyPayload]): seq[byte] = @[(byte) 0x00, (byte) 0x00] +suite "Signed payload": + test "Simple encode -> decode": + let + rng = newRng() + privKey = PrivateKey.random(rng[]).tryGet() + + dummyPayload = DummyPayload(awesome: 12.byte) + signed = SignedDummy.init(privKey, dummyPayload).tryGet() + encoded = signed.encode().tryGet() + decoded = SignedDummy.decode(encoded).tryGet() + + check: + dummyPayload.awesome == decoded.data.awesome + decoded.envelope.publicKey == privKey.getPublicKey().tryGet() + + test "Invalid payload": + let + rng = newRng() + privKey = PrivateKey.random(rng[]).tryGet() + + dummyPayload = DummyPayload(awesome: 30.byte) + signed = SignedDummy.init(privKey, dummyPayload).tryGet() + encoded = signed.encode().tryGet() + check SignedDummy.decode(encoded).error == EnvelopeInvalidSignature + + test "Invalid payload type": + let + rng = newRng() + privKey = PrivateKey.random(rng[]).tryGet() + + dummyPayload = DummyPayload(awesome: 30.byte) + signed = Envelope.init(privKey, @[55.byte], dummyPayload.encode(), DummyPayload.payloadDomain).tryGet() + encoded = signed.encode().tryGet() + check SignedDummy.decode(encoded).error == EnvelopeWrongType diff --git a/tests/testswitch.nim b/tests/testswitch.nim index c24c9b89f..23cb6ed0b 100644 --- a/tests/testswitch.nim +++ b/tests/testswitch.nim @@ -811,7 +811,7 @@ suite "Switch": let switch1 = newStandardSwitch() switch1.mount(testProto) - let switch2 = newStandardSwitch() + let switch2 = newStandardSwitch(peerStoreCapacity = 0) await switch1.start() await switch2.start() @@ -834,11 +834,11 @@ suite "Switch": check not switch2.isConnected(switch1.peerInfo.peerId) check: - switch1.peerStore.addressBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.addrs.toHashSet() - switch2.peerStore.addressBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.addrs.toHashSet() + switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs + switch1.peerStore[ProtoBook][switch2.peerInfo.peerId] == switch2.peerInfo.protocols - switch1.peerStore.protoBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.protocols.toHashSet() - switch2.peerStore.protoBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.protocols.toHashSet() + switch1.peerInfo.peerId notin switch2.peerStore[AddressBook] + switch1.peerInfo.peerId notin switch2.peerStore[ProtoBook] asyncTest "e2e should allow multiple local addresses": when defined(windows): diff --git a/tools/grafana/libp2p-metrics.json b/tools/grafana/libp2p-metrics.json new file mode 100644 index 000000000..3c89460d3 --- /dev/null +++ b/tools/grafana/libp2p-metrics.json @@ -0,0 +1,1508 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 4, + "iteration": 1642596851125, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 83, + "panels": [], + "title": "peer metrics", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 14, + "x": 0, + "y": 1 + }, + "id": 51, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + }, + "pluginVersion": "8.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "libp2p_peers{instance=\"${instance}\"}", + "interval": "", + "legendFormat": "libp2p peers", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "libp2p_pubsub_peers{instance=\"${instance}\"}", + "interval": "", + "legendFormat": "libp2p pubsub peers", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "nbc_peers{instance=\"${instance}\"}", + "interval": "", + "legendFormat": "beacon_node peers", + "refId": "B" + } + ], + "title": "peers (${instance})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 2, + "w": 3, + "x": 14, + "y": 1 + }, + "id": 12, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.3.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "nbc_peers{instance=\"${instance}\"}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "peers", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 14, + "x": 0, + "y": 6 + }, + "id": 16, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + }, + "pluginVersion": "8.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "libp2p_open_streams{instance=\"${instance}\"}", + "interval": "", + "legendFormat": "{{type}} ({{dir}})", + "refId": "A" + } + ], + "title": "open streams (${instance})", + "type": "timeseries" + }, + { + "cards": {}, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateSpectral", + "exponent": 0.5, + "min": 0, + "mode": "opacity" + }, + "dataFormat": "tsbuckets", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "gridPos": { + "h": 6, + "w": 14, + "x": 0, + "y": 11 + }, + "heatmap": {}, + "hideZeroBuckets": false, + "highlightCards": true, + "id": 76, + "interval": "", + "legend": { + "show": false + }, + "reverseYBuckets": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_mplex_qtime_bucket{instance=\"${instance}\"}[$__rate_interval])", + "format": "heatmap", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "A" + } + ], + "title": "message tx queuing delay (s) (${instance})", + "tooltip": { + "show": true, + "showHistogram": false + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "yAxis": { + "format": "short", + "logBase": 1, + "show": true + }, + "yBucketBound": "auto" + }, + { + "cards": {}, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateSpectral", + "exponent": 0.5, + "min": 0, + "mode": "opacity" + }, + "dataFormat": "tsbuckets", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "gridPos": { + "h": 6, + "w": 14, + "x": 0, + "y": 17 + }, + "heatmap": {}, + "hideZeroBuckets": false, + "highlightCards": true, + "id": 72, + "interval": "", + "legend": { + "show": false + }, + "reverseYBuckets": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_mplex_qlen_bucket{instance=\"${instance}\"}[$__rate_interval])", + "format": "heatmap", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "A" + } + ], + "title": "message tx queue length (msgs) (${instance})", + "tooltip": { + "show": true, + "showHistogram": false + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "yAxis": { + "format": "short", + "logBase": 1, + "show": true + }, + "yBucketBound": "auto" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 14, + "x": 0, + "y": 23 + }, + "id": 86, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + }, + "pluginVersion": "8.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_mplex_qlenclose_total{instance=\"${instance}\"}[$__rate_interval])", + "interval": "", + "legendFormat": "reason: max queue length reached", + "refId": "A" + } + ], + "title": "Peer connections closed / second (${instance})", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 28 + }, + "id": 68, + "panels": [], + "title": "Gossipsub", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "no_peers" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 5, + "w": 14, + "x": 0, + "y": 29 + }, + "id": 80, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "libp2p_gossipsub_healthy_peers_topics{instance=\"${instance}\"}", + "interval": "", + "legendFormat": "healthy", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "libp2p_gossipsub_low_peers_topics{instance=\"${instance}\"}", + "hide": false, + "interval": "", + "legendFormat": "low", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "libp2p_gossipsub_no_peers_topics{instance=\"${instance}\"}", + "hide": false, + "interval": "", + "legendFormat": "no_peers", + "refId": "B" + } + ], + "title": "Mesh health", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 14, + "x": 0, + "y": 34 + }, + "id": 73, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + }, + "pluginVersion": "8.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_gossipsub_duplicate_during_validation_total{instance=\"${instance}\"}[$__rate_interval])", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "duplicates during validation", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_gossipsub_duplicate_total{instance=\"${instance}\"}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "duplicates received", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_gossipsub_received_total{instance=\"${instance}\"}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "received", + "refId": "C" + } + ], + "title": "gossipsub duplicates / second (${instance})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 14, + "x": 0, + "y": 39 + }, + "id": 74, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + }, + "pluginVersion": "8.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_gossipsub_duplicate_total{instance=\"${instance}\"}[$__rate_interval]) / rate(libp2p_gossipsub_received_total{instance=\"${instance}\"}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "duplicates received ratio", + "refId": "B" + } + ], + "title": "gossipsub duplicates received ratio (${instance})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 14, + "x": 0, + "y": 44 + }, + "id": 81, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + }, + "pluginVersion": "8.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "libp2p_pubsub_topic_handlers{instance=\"${instance}\"}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "topic subscription (${instance})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 14, + "x": 0, + "y": 49 + }, + "id": 78, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + }, + "pluginVersion": "8.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "(rate(libp2p_gossipsub_mcache_hit_count{instance=\"${instance}\"}[$__rate_interval]) - rate(libp2p_gossipsub_mcache_hit_sum{instance=\"${instance}\"}[$__rate_interval]))", + "hide": false, + "interval": "", + "legendFormat": "mcache miss", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_gossipsub_mcache_hit_count{instance=\"${instance}\"}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "mcache hit", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_pubsub_received_iwant_total{instance=\"${instance}\"}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "iwant received", + "refId": "C" + } + ], + "title": "gossipsub iwant and mcache / second (${instance})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 14, + "x": 0, + "y": 54 + }, + "id": 77, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + }, + "pluginVersion": "8.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_gossipsub_mcache_hit_sum{instance=\"${instance}\"}[$__rate_interval]) / rate(libp2p_gossipsub_mcache_hit_count{instance=\"${instance}\"}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "mcache hits", + "refId": "B" + } + ], + "title": "gossipsub mcache hit ratio (${instance})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 14, + "x": 0, + "y": 59 + }, + "id": 84, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + }, + "pluginVersion": "8.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_pubsub_broadcast_iwant_total{instance=\"${instance}\"}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "iwant msgids sent", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_pubsub_received_iwant_total{instance=\"${instance}\"}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "iwant msgids received", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (instance) (rate(libp2p_pubsub_broadcast_ihave_total{instance=\"${instance}\"}[$__rate_interval]))", + "hide": false, + "interval": "", + "legendFormat": "ihave cmds sent", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (instance) (rate(libp2p_pubsub_received_ihave_total{instance=\"${instance}\"}[$__rate_interval]))", + "hide": false, + "interval": "", + "legendFormat": "ihave cmds received", + "refId": "D" + } + ], + "title": "gossipsub iwant, ihave: msgids / second (${instance})", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Requires -d:libp2p_protobuf_metrics", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 14, + "x": 0, + "y": 64 + }, + "id": 85, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + }, + "pluginVersion": "8.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_pubsub_rpc_bytes_read_total{instance=\"${instance}\"}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(libp2p_pubsub_rpc_bytes_write_total{instance=\"${instance}\"}[$__rate_interval])", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "title": "gossipsub bytes / second (${instance})", + "type": "timeseries" + } + ], + "refresh": "5s", + "schemaVersion": 33, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "Prometheus", + "value": "Prometheus" + }, + "description": "The prometheus instance to use as data source", + "hide": 0, + "includeAll": false, + "label": "Datasource", + "multi": false, + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": { + "selected": true, + "text": "127.0.0.1:8008", + "value": "127.0.0.1:8008" + }, + "definition": "label_values(instance)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "label_values(instance)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "libp2p metrics", + "uid": "oHpljhTnk", + "version": 17, + "weekStart": "" +} \ No newline at end of file