Merge branch 'master' into unstable
This commit is contained in:
commit
1b2cdd6aec
|
@ -201,14 +201,18 @@ proc verify*[T: byte|char](sig: SkSignature, msg: openarray[T],
|
||||||
let h = sha256.digest(msg)
|
let h = sha256.digest(msg)
|
||||||
verify(secp256k1.SkSignature(sig), SkMessage(h.data), secp256k1.SkPublicKey(key))
|
verify(secp256k1.SkSignature(sig), SkMessage(h.data), secp256k1.SkPublicKey(key))
|
||||||
|
|
||||||
func clear*(key: var SkPrivateKey) {.borrow.}
|
func clear*(key: var SkPrivateKey) = clear(secp256k1.SkSecretKey(key))
|
||||||
|
|
||||||
proc `$`*(key: SkPrivateKey): string {.borrow.}
|
func `$`*(key: SkPrivateKey): string = $secp256k1.SkSecretKey(key)
|
||||||
proc `$`*(key: SkPublicKey): string {.borrow.}
|
func `$`*(key: SkPublicKey): string = $secp256k1.SkPublicKey(key)
|
||||||
proc `$`*(key: SkSignature): string {.borrow.}
|
func `$`*(key: SkSignature): string = $secp256k1.SkSignature(key)
|
||||||
proc `$`*(key: SkKeyPair): string {.borrow.}
|
func `$`*(key: SkKeyPair): string = $secp256k1.SkKeyPair(key)
|
||||||
|
|
||||||
proc `==`*(a, b: SkPrivateKey): bool {.borrow.}
|
func `==`*(a, b: SkPrivateKey): bool =
|
||||||
proc `==`*(a, b: SkPublicKey): bool {.borrow.}
|
secp256k1.SkSecretKey(a) == secp256k1.SkSecretKey(b)
|
||||||
proc `==`*(a, b: SkSignature): bool {.borrow.}
|
func `==`*(a, b: SkPublicKey): bool =
|
||||||
proc `==`*(a, b: SkKeyPair): bool {.borrow.}
|
secp256k1.SkPublicKey(a) == secp256k1.SkPublicKey(b)
|
||||||
|
func `==`*(a, b: SkSignature): bool =
|
||||||
|
secp256k1.SkSignature(a) == secp256k1.SkSignature(b)
|
||||||
|
func `==`*(a, b: SkKeyPair): bool =
|
||||||
|
secp256k1.SkKeyPair(a) == secp256k1.SkKeyPair(b)
|
||||||
|
|
|
@ -89,7 +89,7 @@ proc handleGraft*(g: GossipSub,
|
||||||
# It is an error to GRAFT on a explicit peer
|
# It is an error to GRAFT on a explicit peer
|
||||||
if peer.peerId in g.parameters.directPeers:
|
if peer.peerId in g.parameters.directPeers:
|
||||||
# receiving a graft from a direct peer should yield a more prominent warning (protocol violation)
|
# receiving a graft from a direct peer should yield a more prominent warning (protocol violation)
|
||||||
warn "attempt to graft an explicit peer, peering agreements should be reciprocal",
|
warn "an explicit peer attempted to graft us, peering agreements should be reciprocal",
|
||||||
peer, topic
|
peer, topic
|
||||||
# and such an attempt should be logged and rejected with a PRUNE
|
# and such an attempt should be logged and rejected with a PRUNE
|
||||||
prunes.add(ControlPrune(
|
prunes.add(ControlPrune(
|
||||||
|
@ -105,10 +105,14 @@ proc handleGraft*(g: GossipSub,
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# Check backingOff
|
||||||
|
# Ignore BackoffSlackTime here, since this only for outbound activity
|
||||||
|
# and subtract a second time to avoid race conditions
|
||||||
|
# (peers may wait to graft us as the exact instant they're allowed to)
|
||||||
if g.backingOff
|
if g.backingOff
|
||||||
.getOrDefault(topic)
|
.getOrDefault(topic)
|
||||||
.getOrDefault(peer.peerId) > Moment.now():
|
.getOrDefault(peer.peerId) - (BackoffSlackTime * 2).seconds > Moment.now():
|
||||||
debug "attempt to graft a backingOff peer", peer, topic
|
debug "a backingOff peer attempted to graft us", peer, topic
|
||||||
# and such an attempt should be logged and rejected with a PRUNE
|
# and such an attempt should be logged and rejected with a PRUNE
|
||||||
prunes.add(ControlPrune(
|
prunes.add(ControlPrune(
|
||||||
topicID: topic,
|
topicID: topic,
|
||||||
|
@ -162,13 +166,11 @@ proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.r
|
||||||
# add peer backoff
|
# add peer backoff
|
||||||
if prune.backoff > 0:
|
if prune.backoff > 0:
|
||||||
let
|
let
|
||||||
# avoid overflows and follow params
|
# avoid overflows and clamp to reasonable value
|
||||||
# worst case if the remote thinks we are wrong we get penalized
|
|
||||||
# but we won't end up with ghost peers
|
|
||||||
backoffSeconds = clamp(
|
backoffSeconds = clamp(
|
||||||
prune.backoff + BackoffSlackTime,
|
prune.backoff + BackoffSlackTime,
|
||||||
0'u64,
|
0'u64,
|
||||||
g.parameters.pruneBackoff.seconds.uint64 + BackoffSlackTime
|
1.days.seconds.uint64
|
||||||
)
|
)
|
||||||
backoff = Moment.fromNow(backoffSeconds.int64.seconds)
|
backoff = Moment.fromNow(backoffSeconds.int64.seconds)
|
||||||
current = g.backingOff.getOrDefault(topic).getOrDefault(peer.peerId)
|
current = g.backingOff.getOrDefault(topic).getOrDefault(peer.peerId)
|
||||||
|
|
|
@ -205,20 +205,6 @@ proc accept(s: Switch, transport: Transport) {.async.} = # noraises
|
||||||
await conn.close()
|
await conn.close()
|
||||||
return
|
return
|
||||||
|
|
||||||
proc start*(s: Switch): Future[seq[Future[void]]] {.async, gcsafe.} =
|
|
||||||
trace "starting switch for peer", peerInfo = s.peerInfo
|
|
||||||
var startFuts: seq[Future[void]]
|
|
||||||
for t in s.transports: # for each transport
|
|
||||||
for i, a in s.peerInfo.addrs:
|
|
||||||
if t.handles(a): # check if it handles the multiaddr
|
|
||||||
var server = t.start(a)
|
|
||||||
s.peerInfo.addrs[i] = t.ma # update peer's address
|
|
||||||
s.acceptFuts.add(s.accept(t))
|
|
||||||
startFuts.add(server)
|
|
||||||
|
|
||||||
debug "Started libp2p node", peer = s.peerInfo
|
|
||||||
return startFuts # listen for incoming connections
|
|
||||||
|
|
||||||
proc stop*(s: Switch) {.async.} =
|
proc stop*(s: Switch) {.async.} =
|
||||||
trace "Stopping switch"
|
trace "Stopping switch"
|
||||||
|
|
||||||
|
@ -247,6 +233,28 @@ proc stop*(s: Switch) {.async.} =
|
||||||
|
|
||||||
trace "Switch stopped"
|
trace "Switch stopped"
|
||||||
|
|
||||||
|
proc start*(s: Switch): Future[seq[Future[void]]] {.async, gcsafe.} =
|
||||||
|
trace "starting switch for peer", peerInfo = s.peerInfo
|
||||||
|
var startFuts: seq[Future[void]]
|
||||||
|
for t in s.transports: # for each transport
|
||||||
|
for i, a in s.peerInfo.addrs:
|
||||||
|
if t.handles(a): # check if it handles the multiaddr
|
||||||
|
let transpStart = t.start(a)
|
||||||
|
startFuts.add(transpStart)
|
||||||
|
try:
|
||||||
|
await transpStart
|
||||||
|
s.peerInfo.addrs[i] = t.ma # update peer's address
|
||||||
|
s.acceptFuts.add(s.accept(t))
|
||||||
|
except CancelledError as exc:
|
||||||
|
await s.stop()
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
debug "Failed to start one transport", address = $a, err = exc.msg
|
||||||
|
continue
|
||||||
|
|
||||||
|
debug "Started libp2p node", peer = s.peerInfo
|
||||||
|
return startFuts # listen for incoming connections
|
||||||
|
|
||||||
proc newSwitch*(peerInfo: PeerInfo,
|
proc newSwitch*(peerInfo: PeerInfo,
|
||||||
transports: seq[Transport],
|
transports: seq[Transport],
|
||||||
identity: Identify,
|
identity: Identify,
|
||||||
|
|
Loading…
Reference in New Issue