Gossipsub fixes and Initiator flagging fixes (#539)
* properly propagate initiator information for gossipsub * Fix pubsubpeer lifetime management * restore old behavior * tests fixing * clamp backoff time value received * fix member name collisions * internal test fixes * better names and explaining of the importance of transport direction * fixes
This commit is contained in:
parent
c1334c6d89
commit
02ad017107
|
@ -95,6 +95,7 @@ proc newStreamInternal*(m: Mplex,
|
||||||
|
|
||||||
result.peerInfo = m.connection.peerInfo
|
result.peerInfo = m.connection.peerInfo
|
||||||
result.observedAddr = m.connection.observedAddr
|
result.observedAddr = m.connection.observedAddr
|
||||||
|
result.transportDir = m.connection.transportDir
|
||||||
|
|
||||||
trace "Creating new channel", m, channel = result, id, initiator, name
|
trace "Creating new channel", m, channel = result, id, initiator, name
|
||||||
|
|
||||||
|
|
|
@ -173,7 +173,15 @@ proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) =
|
||||||
# add peer backoff
|
# add peer backoff
|
||||||
if prune.backoff > 0:
|
if prune.backoff > 0:
|
||||||
let
|
let
|
||||||
backoff = Moment.fromNow((prune.backoff + BackoffSlackTime).int64.seconds)
|
# avoid overflows and follow params
|
||||||
|
# worst case if the remote thinks we are wrong we get penalized
|
||||||
|
# but we won't end up with ghost peers
|
||||||
|
backoffSeconds = clamp(
|
||||||
|
prune.backoff + BackoffSlackTime,
|
||||||
|
0'u64,
|
||||||
|
g.parameters.pruneBackoff.seconds.uint64 + BackoffSlackTime
|
||||||
|
)
|
||||||
|
backoff = Moment.fromNow(backoffSeconds.int64.seconds)
|
||||||
current = g.backingOff.getOrDefault(topic).getOrDefault(peer.peerId)
|
current = g.backingOff.getOrDefault(topic).getOrDefault(peer.peerId)
|
||||||
if backoff > current:
|
if backoff > current:
|
||||||
g.backingOff
|
g.backingOff
|
||||||
|
|
|
@ -274,11 +274,21 @@ proc getOrCreatePeer*(
|
||||||
proc getConn(): Future[Connection] =
|
proc getConn(): Future[Connection] =
|
||||||
p.switch.dial(peer, protos)
|
p.switch.dial(peer, protos)
|
||||||
|
|
||||||
|
proc dropConn(peer: PubSubPeer) =
|
||||||
|
proc dropConnAsync(peer: PubsubPeer) {.async.} =
|
||||||
|
try:
|
||||||
|
await p.switch.disconnect(peer.peerId)
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "Failed to close connection", peer, error = exc.name, msg = exc.msg
|
||||||
|
asyncSpawn dropConnAsync(peer)
|
||||||
|
|
||||||
proc onEvent(peer: PubsubPeer, event: PubsubPeerEvent) {.gcsafe.} =
|
proc onEvent(peer: PubsubPeer, event: PubsubPeerEvent) {.gcsafe.} =
|
||||||
p.onPubSubPeerEvent(peer, event)
|
p.onPubSubPeerEvent(peer, event)
|
||||||
|
|
||||||
# create new pubsub peer
|
# create new pubsub peer
|
||||||
let pubSubPeer = newPubSubPeer(peer, getConn, onEvent, protos[0])
|
let pubSubPeer = newPubSubPeer(peer, getConn, dropConn, onEvent, protos[0])
|
||||||
debug "created new pubsub peer", peer
|
debug "created new pubsub peer", peer
|
||||||
|
|
||||||
p.peers[peer] = pubSubPeer
|
p.peers[peer] = pubSubPeer
|
||||||
|
|
|
@ -41,10 +41,12 @@ type
|
||||||
kind*: PubSubPeerEventKind
|
kind*: PubSubPeerEventKind
|
||||||
|
|
||||||
GetConn* = proc(): Future[Connection] {.gcsafe.}
|
GetConn* = proc(): Future[Connection] {.gcsafe.}
|
||||||
|
DropConn* = proc(peer: PubsubPeer) {.gcsafe.} # have to pass peer as it's unknown during init
|
||||||
OnEvent* = proc(peer: PubSubPeer, event: PubsubPeerEvent) {.gcsafe.}
|
OnEvent* = proc(peer: PubSubPeer, event: PubsubPeerEvent) {.gcsafe.}
|
||||||
|
|
||||||
PubSubPeer* = ref object of RootObj
|
PubSubPeer* = ref object of RootObj
|
||||||
getConn*: GetConn # callback to establish a new send connection
|
getConn*: GetConn # callback to establish a new send connection
|
||||||
|
dropConn*: DropConn # Function pointer to use to drop connections
|
||||||
onEvent*: OnEvent # Connectivity updates for peer
|
onEvent*: OnEvent # Connectivity updates for peer
|
||||||
codec*: string # the protocol that this peer joined from
|
codec*: string # the protocol that this peer joined from
|
||||||
sendConn*: Connection # cached send connection
|
sendConn*: Connection # cached send connection
|
||||||
|
@ -83,7 +85,11 @@ proc hasObservers(p: PubSubPeer): bool =
|
||||||
p.observers != nil and anyIt(p.observers[], it != nil)
|
p.observers != nil and anyIt(p.observers[], it != nil)
|
||||||
|
|
||||||
func outbound*(p: PubSubPeer): bool =
|
func outbound*(p: PubSubPeer): bool =
|
||||||
if p.connected and p.sendConn.dir == Direction.Out:
|
# gossipsub 1.1 spec requires us to know if the transport is outgoing
|
||||||
|
# in order to give priotity to connections we make
|
||||||
|
# https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#outbound-mesh-quotas
|
||||||
|
# This behaviour is presrcibed to counter sybil attacks and ensures that a coordinated inbound attack can never fully take over the mesh
|
||||||
|
if not p.sendConn.isNil and p.sendConn.transportDir == Direction.Out:
|
||||||
true
|
true
|
||||||
else:
|
else:
|
||||||
false
|
false
|
||||||
|
@ -169,18 +175,18 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
||||||
finally:
|
finally:
|
||||||
if p.sendConn != nil:
|
if p.sendConn != nil:
|
||||||
trace "Removing send connection", p, conn = p.sendConn
|
trace "Removing send connection", p, conn = p.sendConn
|
||||||
|
|
||||||
await p.sendConn.close()
|
await p.sendConn.close()
|
||||||
|
|
||||||
try:
|
|
||||||
if p.onEvent != nil:
|
|
||||||
p.onEvent(p, PubsubPeerEvent(kind: PubSubPeerEventKind.Disconnected))
|
|
||||||
except CancelledError as exc:
|
|
||||||
debug "Errors during diconnection events", error = exc.msg
|
|
||||||
|
|
||||||
# clean up at the end
|
|
||||||
p.sendConn = nil
|
p.sendConn = nil
|
||||||
# don't cleanup p.address else we leak some gossip stat table
|
|
||||||
|
try:
|
||||||
|
if p.onEvent != nil:
|
||||||
|
p.onEvent(p, PubsubPeerEvent(kind: PubSubPeerEventKind.Disconnected))
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
|
except CatchableError as exc:
|
||||||
|
debug "Errors during diconnection events", error = exc.msg
|
||||||
|
|
||||||
|
# don't cleanup p.address else we leak some gossip stat table
|
||||||
|
|
||||||
proc connectImpl(p: PubSubPeer) {.async.} =
|
proc connectImpl(p: PubSubPeer) {.async.} =
|
||||||
try:
|
try:
|
||||||
|
@ -189,9 +195,13 @@ proc connectImpl(p: PubSubPeer) {.async.} =
|
||||||
# issue so we try to get a new on
|
# issue so we try to get a new on
|
||||||
while true:
|
while true:
|
||||||
await connectOnce(p)
|
await connectOnce(p)
|
||||||
|
except CancelledError:
|
||||||
|
raise
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
debug "Could not establish send connection", msg = exc.msg
|
debug "Could not establish send connection", msg = exc.msg
|
||||||
|
finally:
|
||||||
|
# drop the connection, else we end up with ghost peers
|
||||||
|
if p.dropConn != nil: p.dropConn(p)
|
||||||
|
|
||||||
proc connect*(p: PubSubPeer) =
|
proc connect*(p: PubSubPeer) =
|
||||||
asyncSpawn connectImpl(p)
|
asyncSpawn connectImpl(p)
|
||||||
|
@ -255,10 +265,12 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) =
|
||||||
|
|
||||||
proc newPubSubPeer*(peerId: PeerID,
|
proc newPubSubPeer*(peerId: PeerID,
|
||||||
getConn: GetConn,
|
getConn: GetConn,
|
||||||
|
dropConn: DropConn,
|
||||||
onEvent: OnEvent,
|
onEvent: OnEvent,
|
||||||
codec: string): PubSubPeer =
|
codec: string): PubSubPeer =
|
||||||
PubSubPeer(
|
PubSubPeer(
|
||||||
getConn: getConn,
|
getConn: getConn,
|
||||||
|
dropConn: dropConn,
|
||||||
onEvent: onEvent,
|
onEvent: onEvent,
|
||||||
codec: codec,
|
codec: codec,
|
||||||
peerId: peerId,
|
peerId: peerId,
|
||||||
|
|
|
@ -76,6 +76,10 @@ proc handleConn(s: Secure,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
initiator: bool): Future[Connection] {.async.} =
|
initiator: bool): Future[Connection] {.async.} =
|
||||||
var sconn = await s.handshake(conn, initiator)
|
var sconn = await s.handshake(conn, initiator)
|
||||||
|
# mark connection bottom level transport direction
|
||||||
|
# this is the safest place to do this
|
||||||
|
# we require this information in for example gossipsub
|
||||||
|
sconn.transportDir = if initiator: Direction.Out else: Direction.In
|
||||||
|
|
||||||
proc cleanup() {.async.} =
|
proc cleanup() {.async.} =
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -33,6 +33,7 @@ type
|
||||||
peerInfo*: PeerInfo
|
peerInfo*: PeerInfo
|
||||||
observedAddr*: Multiaddress
|
observedAddr*: Multiaddress
|
||||||
upgraded*: Future[void]
|
upgraded*: Future[void]
|
||||||
|
transportDir*: Direction # The bottom level transport (generally the socket) direction
|
||||||
|
|
||||||
proc timeoutMonitor(s: Connection) {.async, gcsafe.}
|
proc timeoutMonitor(s: Connection) {.async, gcsafe.}
|
||||||
|
|
||||||
|
|
|
@ -136,6 +136,10 @@ proc dialAndUpgrade(s: Switch,
|
||||||
# make sure to assign the peer to the connection
|
# make sure to assign the peer to the connection
|
||||||
dialed.peerInfo = PeerInfo.init(peerId, addrs)
|
dialed.peerInfo = PeerInfo.init(peerId, addrs)
|
||||||
|
|
||||||
|
# also keep track of the connection's bottom unsafe transport direction
|
||||||
|
# required by gossipsub scoring
|
||||||
|
dialed.transportDir = Direction.Out
|
||||||
|
|
||||||
libp2p_successful_dials.inc()
|
libp2p_successful_dials.inc()
|
||||||
|
|
||||||
let conn = try:
|
let conn = try:
|
||||||
|
@ -334,6 +338,11 @@ proc accept(s: Switch, transport: Transport) {.async.} = # noraises
|
||||||
upgrades.release()
|
upgrades.release()
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# set the direction of this bottom level transport
|
||||||
|
# in order to be able to consume this information in gossipsub if required
|
||||||
|
# gossipsub gives priority to connections we make
|
||||||
|
conn.transportDir = Direction.In
|
||||||
|
|
||||||
debug "Accepted an incoming connection", conn
|
debug "Accepted an incoming connection", conn
|
||||||
asyncSpawn upgradeMonitor(conn, upgrades)
|
asyncSpawn upgradeMonitor(conn, upgrades)
|
||||||
asyncSpawn s.upgrade.upgradeIncoming(conn)
|
asyncSpawn s.upgrade.upgradeIncoming(conn)
|
||||||
|
|
|
@ -22,7 +22,10 @@ proc getPubSubPeer(p: TestGossipSub, peerId: PeerID): auto =
|
||||||
proc getConn(): Future[Connection] =
|
proc getConn(): Future[Connection] =
|
||||||
p.switch.dial(peerId, GossipSubCodec)
|
p.switch.dial(peerId, GossipSubCodec)
|
||||||
|
|
||||||
newPubSubPeer(peerId, getConn, nil, GossipSubCodec)
|
proc dropConn(peer: PubSubPeer) =
|
||||||
|
discard # we don't care about it here yet
|
||||||
|
|
||||||
|
newPubSubPeer(peerId, getConn, dropConn, nil, GossipSubCodec)
|
||||||
|
|
||||||
proc randomPeerInfo(): PeerInfo =
|
proc randomPeerInfo(): PeerInfo =
|
||||||
PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
||||||
|
@ -591,7 +594,7 @@ suite "GossipSub internal":
|
||||||
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
gossipSub.gossipsub[topic] = initHashSet[PubSubPeer]()
|
||||||
for i in 0..<6:
|
for i in 0..<6:
|
||||||
let conn = newBufferStream(noop)
|
let conn = newBufferStream(noop)
|
||||||
conn.dir = Direction.In
|
conn.transportDir = Direction.In
|
||||||
conns &= conn
|
conns &= conn
|
||||||
let peerInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
let peerInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
||||||
conn.peerInfo = peerInfo
|
conn.peerInfo = peerInfo
|
||||||
|
@ -605,7 +608,7 @@ suite "GossipSub internal":
|
||||||
|
|
||||||
for i in 0..<7:
|
for i in 0..<7:
|
||||||
let conn = newBufferStream(noop)
|
let conn = newBufferStream(noop)
|
||||||
conn.dir = Direction.Out
|
conn.transportDir = Direction.Out
|
||||||
conns &= conn
|
conns &= conn
|
||||||
let peerInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
let peerInfo = PeerInfo.init(PrivateKey.random(ECDSA, rng[]).get())
|
||||||
conn.peerInfo = peerInfo
|
conn.peerInfo = peerInfo
|
||||||
|
@ -623,7 +626,7 @@ suite "GossipSub internal":
|
||||||
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
|
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
|
||||||
var outbound = 0
|
var outbound = 0
|
||||||
for peer in gossipSub.mesh[topic]:
|
for peer in gossipSub.mesh[topic]:
|
||||||
if peer.sendConn.dir == Direction.Out:
|
if peer.sendConn.transportDir == Direction.Out:
|
||||||
inc outbound
|
inc outbound
|
||||||
# ensure we give priority and keep at least dOut outbound peers
|
# ensure we give priority and keep at least dOut outbound peers
|
||||||
check outbound >= gossipSub.parameters.dOut
|
check outbound >= gossipSub.parameters.dOut
|
||||||
|
|
Loading…
Reference in New Issue