mirror of
https://github.com/logos-storage/nim-libp2p.git
synced 2026-01-02 13:43:11 +00:00
* allow multiple codecs per protocol (without breaking things) * add 1.1 protocol to gossip * explicit peering part 1 * explicit peering part 2 * explicit peering part 3 * PeerInfo and ControlPrune protocols * fix encodePrune * validated always, even explicit peers * prune by score (score is stub still) * add a way to pass parameters to gossip * standard setup fixes * take into account explicit direct peers in publish * add floodPublish logic * small fixes, publish still half broken * make sure to waitsub in sparse test * use var semantics to optimize table access * wip... lvalues don't work properly sadly... * big publish refactor, replenish and balance * fix internal tests * use g.peers for fanout (todo: don't include flood peers) * exclude non gossip from fanout * internal test fixes * fix flood tests * fix test's trypublish * test interop fixes * make sure to not remove peers from gossip table * restore old replenishFanout * cleanups * restore utility module import * restore trace vs debug in gossip * improve fanout replenish behavior further * triage publish nil peers (issue is on master too but just hidden behind a if/in) * getGossipPeers fixes * remove topics from pubsubpeer (was unused) * simplify rebalanceMesh (following spec) and make it finally reach D_high * better diagnostics * merge new pubsubpeer, copy 1.1 to new module * fix up merge * conditional enable gossip11 module * add back topics in peers, re-enable flood publish * add more heartbeat locking to prevent races * actually lock the heartbeat * minor fixes * with sugar * merge 1.0 * remove assertion in publish * fix multistream 1.1 multi proto * Fix merge oops * wip * fix gossip 11 upstream * gossipsub11 -> gossipsub * support interop testing * tests fixing * fix directchat build * control prune updates (pb) * wip parameters * gossip internal tests fixes * parameters wip * finishup with params * cleanups/wip * small sugar * grafted and pruned procs * wip updateScores * wip * fix logging issue * pubsubpeer, chronicles explicit override * fix internal gossip tests * wip * tables troubleshooting * score wip * score wip * fixes * fix test utils generateNodes * don't delete while iterating in score update * fix grafted defect * add a handleConnect in subscribeTopic * pruning improvements * wip * score fixes * post merge - builds gossip tests * further merge fixes * rebalance improvements and opportunistic grafting * fix test for now * restore explicit peering * implement peer exchange graft message * add an hard cap to PX * backoff time management * IWANT cap/budget * Adaptive gossip dissemination * outbound mesh quota, internal tests fixing * oversub prune score based, finish outbound quota * finishup with score and ihave budget * use go daemon 0.3.0 * import fixes * byScore cleanup score sorting * remove pointless scaling in `/` Duration operator * revert using libp2p org for daemon * interop fixes * fixes and cleanup * remove heartbeat assertion, minor debug fixes * logging improvements and cleaning up * (to revert) add some traces * add explicit topic to gossip rpcs * pubsub merge fixes and type fix in switch * Revert "(to revert) add some traces" This reverts commit 4663eaab6cc336c81cee50bc54025cf0b7bcbd99. * cleanup some now irrelevant todo * shuffle peers anyway as score might be disabled * add missing shuffle * old merge fix * more merge fixes * debug improvements * re-enable gossip internal tests * add gossip10 fallback (dormant but tested) * split gossipsub internal tests into 1.0 and 1.1 Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
156 lines
4.7 KiB
Nim
156 lines
4.7 KiB
Nim
## Nim-LibP2P
|
|
## Copyright (c) 2019 Status Research & Development GmbH
|
|
## Licensed under either of
|
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
## at your option.
|
|
## This file may not be copied, modified, or distributed except according to
|
|
## those terms.
|
|
|
|
import options
|
|
import chronos, chronicles
|
|
import ../protobuf/minprotobuf,
|
|
../peerinfo,
|
|
../stream/connection,
|
|
../peerid,
|
|
../crypto/crypto,
|
|
../multiaddress,
|
|
../protocols/protocol,
|
|
../utility
|
|
|
|
logScope:
|
|
topics = "identify"
|
|
|
|
const
|
|
IdentifyCodec* = "/ipfs/id/1.0.0"
|
|
IdentifyPushCodec* = "/ipfs/id/push/1.0.0"
|
|
ProtoVersion* = "ipfs/0.1.0"
|
|
AgentVersion* = "nim-libp2p/0.0.1"
|
|
|
|
#TODO: implement push identify, leaving out for now as it is not essential
|
|
|
|
type
|
|
IdentityNoMatchError* = object of CatchableError
|
|
IdentityInvalidMsgError* = object of CatchableError
|
|
|
|
IdentifyInfo* = object
|
|
pubKey*: Option[PublicKey]
|
|
addrs*: seq[MultiAddress]
|
|
observedAddr*: Option[MultiAddress]
|
|
protoVersion*: Option[string]
|
|
agentVersion*: Option[string]
|
|
protos*: seq[string]
|
|
|
|
Identify* = ref object of LPProtocol
|
|
peerInfo*: PeerInfo
|
|
|
|
proc encodeMsg*(peerInfo: PeerInfo, observedAddr: Multiaddress): ProtoBuffer =
|
|
result = initProtoBuffer()
|
|
result.write(1, peerInfo.publicKey.get().getBytes().tryGet())
|
|
for ma in peerInfo.addrs:
|
|
result.write(2, ma.data.buffer)
|
|
for proto in peerInfo.protocols:
|
|
result.write(3, proto)
|
|
result.write(4, observedAddr.data.buffer)
|
|
let protoVersion = ProtoVersion
|
|
result.write(5, protoVersion)
|
|
let agentVersion = AgentVersion
|
|
result.write(6, agentVersion)
|
|
result.finish()
|
|
|
|
proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
|
|
var
|
|
iinfo: IdentifyInfo
|
|
pubKey: PublicKey
|
|
oaddr: MultiAddress
|
|
protoVersion: string
|
|
agentVersion: string
|
|
|
|
var pb = initProtoBuffer(buf)
|
|
|
|
let r1 = pb.getField(1, pubKey)
|
|
let r2 = pb.getRepeatedField(2, iinfo.addrs)
|
|
let r3 = pb.getRepeatedField(3, iinfo.protos)
|
|
let r4 = pb.getField(4, oaddr)
|
|
let r5 = pb.getField(5, protoVersion)
|
|
let r6 = pb.getField(6, agentVersion)
|
|
|
|
let res = r1.isOk() and r2.isOk() and r3.isOk() and
|
|
r4.isOk() and r5.isOk() and r6.isOk()
|
|
|
|
if res:
|
|
if r1.get():
|
|
iinfo.pubKey = some(pubKey)
|
|
if r4.get():
|
|
iinfo.observedAddr = some(oaddr)
|
|
if r5.get():
|
|
iinfo.protoVersion = some(protoVersion)
|
|
if r6.get():
|
|
iinfo.agentVersion = some(agentVersion)
|
|
debug "decodeMsg: decoded message", pubkey = ($pubKey).shortLog,
|
|
addresses = $iinfo.addrs, protocols = $iinfo.protos,
|
|
observable_address = $iinfo.observedAddr,
|
|
proto_version = $iinfo.protoVersion,
|
|
agent_version = $iinfo.agentVersion
|
|
some(iinfo)
|
|
else:
|
|
trace "decodeMsg: failed to decode received message"
|
|
none[IdentifyInfo]()
|
|
|
|
proc newIdentify*(peerInfo: PeerInfo): Identify =
|
|
new result
|
|
result.peerInfo = peerInfo
|
|
result.init()
|
|
|
|
method init*(p: Identify) =
|
|
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
|
try:
|
|
defer:
|
|
trace "exiting identify handler", conn
|
|
await conn.close()
|
|
|
|
trace "handling identify request", conn
|
|
var pb = encodeMsg(p.peerInfo, conn.observedAddr)
|
|
await conn.writeLp(pb.buffer)
|
|
except CancelledError as exc:
|
|
raise exc
|
|
except CatchableError as exc:
|
|
trace "exception in identify handler", exc = exc.msg, conn
|
|
|
|
p.handler = handle
|
|
p.codec = IdentifyCodec
|
|
|
|
proc identify*(p: Identify,
|
|
conn: Connection,
|
|
remotePeerInfo: PeerInfo): Future[IdentifyInfo] {.async, gcsafe.} =
|
|
trace "initiating identify", conn
|
|
var message = await conn.readLp(64*1024)
|
|
if len(message) == 0:
|
|
trace "identify: Empty message received!", conn
|
|
raise newException(IdentityInvalidMsgError, "Empty message received!")
|
|
|
|
let infoOpt = decodeMsg(message)
|
|
if infoOpt.isNone():
|
|
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
|
result = infoOpt.get()
|
|
|
|
if not isNil(remotePeerInfo) and result.pubKey.isSome:
|
|
let peer = PeerID.init(result.pubKey.get())
|
|
if peer.isErr:
|
|
raise newException(IdentityInvalidMsgError, $peer.error)
|
|
else:
|
|
# do a string comaprison of the ids,
|
|
# because that is the only thing we
|
|
# have in most cases
|
|
if peer.get() != remotePeerInfo.peerId:
|
|
trace "Peer ids don't match",
|
|
remote = peer,
|
|
local = remotePeerInfo.peerId
|
|
|
|
raise newException(IdentityNoMatchError, "Peer ids don't match")
|
|
|
|
proc push*(p: Identify, conn: Connection) {.async.} =
|
|
await conn.write(IdentifyPushCodec)
|
|
var pb = encodeMsg(p.peerInfo, conn.observedAddr)
|
|
await conn.writeLp(pb.buffer)
|