Limit cache size on Filter API and Relay API (#317)

Minor improvements to Filter API and Relay API
This commit is contained in:
Hanno Cornelius 2020-12-07 15:26:58 +02:00 committed by GitHub
parent 7d0c1788f8
commit fa4d873ee3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 60 additions and 14 deletions

View File

@ -334,12 +334,28 @@ procSuite "Waku v2 JSON-RPC API":
response.allIt(it.contentTopic == cTopic)
# No new messages
response = await client.get_waku_v2_filter_v1_messages(cTopic)
check:
response.len() == 0
# Now ensure that no more than the preset max messages can be cached
let maxSize = filter_api.maxCache
for x in 1..(maxSize + 1):
# Try to cache 1 more than maximum allowed
filters.notify(WakuMessage(payload: @[byte x], contentTopic: cTopic), requestId)
response = await client.get_waku_v2_filter_v1_messages(cTopic)
check:
# Max messages has not been exceeded
response.len == maxSize
response.allIt(it.contentTopic == cTopic)
# Check that oldest item has been removed
response[0].payload == @[byte 2]
response[maxSize - 1].payload == @[byte (maxSize + 1)]
server.stop()
server.close()
waitfor node.stop()
waitfor node.stop()

View File

@ -7,19 +7,34 @@ import
../../waku_types,
../wakunode2
const futTimeout = 5.seconds
const futTimeout* = 5.seconds # Max time to wait for futures
const maxCache* = 100 # Max number of messages cached per topic @TODO make this configurable
type
MessageCache* = Table[ContentTopic, seq[WakuMessage]]
proc installFilterApiHandlers*(node: WakuNode, rpcsrv: RpcServer) =
## Create a message cache indexed on content topic
## @TODO consider moving message cache elsewhere. Perhaps to node?
var
messageCache = initTable[ContentTopic, seq[WakuMessage]]()
messageCache: MessageCache
proc filterHandler(msg: WakuMessage) {.gcsafe, closure.} =
debug "WakuMessage received", msg=msg
# Add message to current cache
# @TODO limit max content topics and messages
messageCache.mgetOrPut(msg.contentTopic, @[]).add(msg)
trace "WakuMessage received", msg=msg
# Make a copy of msgs for this topic to modify
var msgs = messageCache.getOrDefault(msg.contentTopic, @[])
if msgs.len >= maxCache:
# Message cache on this topic exceeds maximum. Delete oldest.
# @TODO this may become a bottle neck if called as the norm rather than exception when adding messages. Performance profile needed.
msgs.delete(0,0)
msgs.add(msg)
# Replace indexed entry with copy
# @TODO max number of content topics could be limited in node
messageCache[msg.contentTopic] = msgs
## Filter API version 1 definitions

View File

@ -9,21 +9,36 @@ import
../wakunode2,
./jsonrpc_types, ./jsonrpc_utils
const futTimeout = 5.seconds
const futTimeout* = 5.seconds # Max time to wait for futures
const maxCache* = 100 # Max number of messages cached per topic @TODO make this configurable
type
TopicCache* = Table[string, seq[WakuMessage]]
proc installRelayApiHandlers*(node: WakuNode, rpcsrv: RpcServer) =
## Create a per-topic message cache
var
topicCache = initTable[string, seq[WakuMessage]]()
topicCache: TopicCache
proc topicHandler(topic: string, data: seq[byte]) {.async.} =
debug "Topic handler triggered"
trace "Topic handler triggered"
let msg = WakuMessage.init(data)
if msg.isOk():
debug "WakuMessage received", msg=msg, topic=topic
# Add message to current cache
# @TODO limit max topics and messages
topicCache.mgetOrPut(topic, @[]).add(msg[])
trace "WakuMessage received", msg=msg, topic=topic
# Make a copy of msgs for this topic to modify
var msgs = topicCache.getOrDefault(topic, @[])
if msgs.len >= maxCache:
# Message cache on this topic exceeds maximum. Delete oldest.
# @TODO this may become a bottle neck if called as the norm rather than exception when adding messages. Performance profile needed.
msgs.delete(0,0)
msgs.add(msg[])
# Replace indexed entry with copy
# @TODO max number of topics could be limited in node
topicCache[topic] = msgs
else:
debug "WakuMessage received but failed to decode", msg=msg, topic=topic
# @TODO handle message decode failure