mirror of https://github.com/waku-org/nwaku.git
feat: metadata protocol shard subscription (#2149)
This commit is contained in:
parent
250e8b983c
commit
bcf8e9630d
|
@ -15,6 +15,7 @@ import
|
||||||
eth/p2p/discoveryv5/enr
|
eth/p2p/discoveryv5/enr
|
||||||
import
|
import
|
||||||
../../waku/waku_node,
|
../../waku/waku_node,
|
||||||
|
../../waku/waku_core/topics,
|
||||||
../../waku/node/peer_manager,
|
../../waku/node/peer_manager,
|
||||||
../../waku/waku_discv5,
|
../../waku/waku_discv5,
|
||||||
../../waku/waku_metadata,
|
../../waku/waku_metadata,
|
||||||
|
@ -23,8 +24,6 @@ import
|
||||||
|
|
||||||
|
|
||||||
procSuite "Waku Metadata Protocol":
|
procSuite "Waku Metadata Protocol":
|
||||||
|
|
||||||
# TODO: Add tests with shards when ready
|
|
||||||
asyncTest "request() returns the supported metadata of the peer":
|
asyncTest "request() returns the supported metadata of the peer":
|
||||||
let clusterId = 10.uint32
|
let clusterId = 10.uint32
|
||||||
let
|
let
|
||||||
|
@ -34,6 +33,9 @@ procSuite "Waku Metadata Protocol":
|
||||||
# Start nodes
|
# Start nodes
|
||||||
await allFutures([node1.start(), node2.start()])
|
await allFutures([node1.start(), node2.start()])
|
||||||
|
|
||||||
|
node1.topicSubscriptionQueue.emit((kind: PubsubSub, topic: "/waku/2/rs/10/7"))
|
||||||
|
node1.topicSubscriptionQueue.emit((kind: PubsubSub, topic: "/waku/2/rs/10/6"))
|
||||||
|
|
||||||
# Create connection
|
# Create connection
|
||||||
let connOpt = await node2.peerManager.dialPeer(node1.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec)
|
let connOpt = await node2.peerManager.dialPeer(node1.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec)
|
||||||
require:
|
require:
|
||||||
|
@ -48,3 +50,5 @@ procSuite "Waku Metadata Protocol":
|
||||||
|
|
||||||
check:
|
check:
|
||||||
response1.get().clusterId.get() == clusterId
|
response1.get().clusterId.get() == clusterId
|
||||||
|
response1.get().shards == @[uint32(6), uint32(7)]
|
||||||
|
|
|
@ -155,7 +155,7 @@ proc new*(T: type WakuNode,
|
||||||
)
|
)
|
||||||
|
|
||||||
# mount metadata protocol
|
# mount metadata protocol
|
||||||
let metadata = WakuMetadata.new(netConfig.clusterId)
|
let metadata = WakuMetadata.new(netConfig.clusterId, queue)
|
||||||
node.switch.mount(metadata, protocolMatcher(WakuMetadataCodec))
|
node.switch.mount(metadata, protocolMatcher(WakuMetadataCodec))
|
||||||
node.wakuMetadata = metadata
|
node.wakuMetadata = metadata
|
||||||
peerManager.wakuMetadata = metadata
|
peerManager.wakuMetadata = metadata
|
||||||
|
@ -1127,6 +1127,8 @@ proc start*(node: WakuNode) {.async.} =
|
||||||
|
|
||||||
node.started = true
|
node.started = true
|
||||||
|
|
||||||
|
node.wakuMetadata.start()
|
||||||
|
|
||||||
info "Node started successfully"
|
info "Node started successfully"
|
||||||
|
|
||||||
proc stop*(node: WakuNode) {.async.} =
|
proc stop*(node: WakuNode) {.async.} =
|
||||||
|
|
|
@ -4,7 +4,7 @@ else:
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[options, sequtils, random],
|
std/[options, sequtils, random, sets],
|
||||||
stew/results,
|
stew/results,
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
|
@ -27,57 +27,56 @@ const RpcResponseMaxBytes* = 1024
|
||||||
type
|
type
|
||||||
WakuMetadata* = ref object of LPProtocol
|
WakuMetadata* = ref object of LPProtocol
|
||||||
clusterId*: uint32
|
clusterId*: uint32
|
||||||
shards*: seq[uint32]
|
shards*: HashSet[uint32]
|
||||||
|
topicSubscriptionQueue: AsyncEventQueue[SubscriptionEvent]
|
||||||
|
|
||||||
proc respond(m: WakuMetadata, conn: Connection): Future[Result[void, string]] {.async, gcsafe.} =
|
proc respond(m: WakuMetadata, conn: Connection): Future[Result[void, string]] {.async, gcsafe.} =
|
||||||
try:
|
let response = WakuMetadataResponse(
|
||||||
await conn.writeLP(WakuMetadataResponse(
|
clusterId: some(m.clusterId),
|
||||||
clusterId: some(m.clusterId),
|
shards: toSeq(m.shards)
|
||||||
shards: m.shards
|
)
|
||||||
).encode().buffer)
|
|
||||||
except CatchableError as exc:
|
let res = catch: await conn.writeLP(response.encode().buffer)
|
||||||
return err(exc.msg)
|
if res.isErr():
|
||||||
|
return err(res.error.msg)
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
proc request*(m: WakuMetadata, conn: Connection): Future[Result[WakuMetadataResponse, string]] {.async, gcsafe.} =
|
proc request*(m: WakuMetadata, conn: Connection): Future[Result[WakuMetadataResponse, string]] {.async, gcsafe.} =
|
||||||
var buffer: seq[byte]
|
let request = WakuMetadataRequest(clusterId: some(m.clusterId), shards: toSeq(m.shards))
|
||||||
var error: string
|
|
||||||
try:
|
|
||||||
await conn.writeLP(WakuMetadataRequest(
|
|
||||||
clusterId: some(m.clusterId),
|
|
||||||
shards: m.shards,
|
|
||||||
).encode().buffer)
|
|
||||||
buffer = await conn.readLp(RpcResponseMaxBytes)
|
|
||||||
except CatchableError as exc:
|
|
||||||
error = $exc.msg
|
|
||||||
finally:
|
|
||||||
# close, no more data is expected
|
|
||||||
await conn.closeWithEof()
|
|
||||||
|
|
||||||
if error.len > 0:
|
let writeRes = catch: await conn.writeLP(request.encode().buffer)
|
||||||
return err("write/read failed: " & error)
|
let readRes = catch: await conn.readLp(RpcResponseMaxBytes)
|
||||||
|
|
||||||
|
# close no watter what
|
||||||
|
let closeRes = catch: await conn.closeWithEof()
|
||||||
|
if closeRes.isErr():
|
||||||
|
return err("close failed: " & closeRes.error.msg)
|
||||||
|
|
||||||
let decodedBuff = WakuMetadataResponse.decode(buffer)
|
if writeRes.isErr():
|
||||||
if decodedBuff.isErr():
|
return err("write failed: " & writeRes.error.msg)
|
||||||
return err("decode failed: " & $decodedBuff.error)
|
|
||||||
|
|
||||||
echo decodedBuff.get().clusterId
|
let buffer =
|
||||||
return ok(decodedBuff.get())
|
if readRes.isErr():
|
||||||
|
return err("read failed: " & readRes.error.msg)
|
||||||
|
else: readRes.get()
|
||||||
|
|
||||||
|
let response = WakuMetadataResponse.decode(buffer).valueOr:
|
||||||
|
return err("decode failed: " & $error)
|
||||||
|
|
||||||
|
return ok(response)
|
||||||
|
|
||||||
proc initProtocolHandler*(m: WakuMetadata) =
|
proc initProtocolHandler*(m: WakuMetadata) =
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||||
var buffer: seq[byte]
|
let res = catch: await conn.readLp(RpcResponseMaxBytes)
|
||||||
try:
|
let buffer = res.valueOr:
|
||||||
buffer = await conn.readLp(RpcResponseMaxBytes)
|
error "Connection reading error", error=error.msg
|
||||||
except CatchableError as exc:
|
|
||||||
return
|
return
|
||||||
|
|
||||||
let decBuf = WakuMetadataResponse.decode(buffer)
|
let response = WakuMetadataResponse.decode(buffer).valueOr:
|
||||||
if decBuf.isErr():
|
error "Response decoding error", error=error
|
||||||
return
|
return
|
||||||
|
|
||||||
let response = decBuf.get()
|
|
||||||
debug "Received WakuMetadata request",
|
debug "Received WakuMetadata request",
|
||||||
remoteClusterId=response.clusterId,
|
remoteClusterId=response.clusterId,
|
||||||
remoteShards=response.shards,
|
remoteShards=response.shards,
|
||||||
|
@ -92,12 +91,50 @@ proc initProtocolHandler*(m: WakuMetadata) =
|
||||||
m.handler = handle
|
m.handler = handle
|
||||||
m.codec = WakuMetadataCodec
|
m.codec = WakuMetadataCodec
|
||||||
|
|
||||||
proc new*(T: type WakuMetadata, clusterId: uint32): T =
|
proc new*(T: type WakuMetadata,
|
||||||
let m = WakuMetadata(
|
clusterId: uint32,
|
||||||
clusterId: clusterId,
|
queue: AsyncEventQueue[SubscriptionEvent],
|
||||||
# TODO: must be updated real time
|
): T =
|
||||||
shards: @[],
|
let wm = WakuMetadata(clusterId: clusterId, topicSubscriptionQueue: queue)
|
||||||
)
|
|
||||||
m.initProtocolHandler()
|
wm.initProtocolHandler()
|
||||||
|
|
||||||
info "Created WakuMetadata protocol", clusterId=clusterId
|
info "Created WakuMetadata protocol", clusterId=clusterId
|
||||||
return m
|
|
||||||
|
return wm
|
||||||
|
|
||||||
|
proc subscriptionsListener(wm: WakuMetadata) {.async.} =
|
||||||
|
## Listen for pubsub topics subscriptions changes
|
||||||
|
|
||||||
|
let key = wm.topicSubscriptionQueue.register()
|
||||||
|
|
||||||
|
while wm.started:
|
||||||
|
let events = await wm.topicSubscriptionQueue.waitEvents(key)
|
||||||
|
|
||||||
|
for event in events:
|
||||||
|
let parsedTopic = NsPubsubTopic.parse(event.topic).valueOr:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if parsedTopic.kind != NsPubsubTopicKind.StaticSharding:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if parsedTopic.clusterId != wm.clusterId:
|
||||||
|
continue
|
||||||
|
|
||||||
|
case event.kind:
|
||||||
|
of PubsubSub:
|
||||||
|
wm.shards.incl(parsedTopic.shardId)
|
||||||
|
of PubsubUnsub:
|
||||||
|
wm.shards.excl(parsedTopic.shardId)
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
wm.topicSubscriptionQueue.unregister(key)
|
||||||
|
|
||||||
|
proc start*(wm: WakuMetadata) =
|
||||||
|
wm.started = true
|
||||||
|
|
||||||
|
asyncSpawn wm.subscriptionsListener()
|
||||||
|
|
||||||
|
proc stop*(wm: WakuMetadata) =
|
||||||
|
wm.started = false
|
Loading…
Reference in New Issue