mirror of https://github.com/waku-org/nwaku.git
feat: archive update for store v3 (#2451)
This commit is contained in:
parent
059cb97518
commit
505479b870
|
@ -30,7 +30,8 @@ proc computeArchiveCursor*(
|
||||||
pubsubTopic: pubsubTopic,
|
pubsubTopic: pubsubTopic,
|
||||||
senderTime: message.timestamp,
|
senderTime: message.timestamp,
|
||||||
storeTime: message.timestamp,
|
storeTime: message.timestamp,
|
||||||
digest: waku_archive.computeDigest(message),
|
digest: computeDigest(message),
|
||||||
|
hash: computeMessageHash(pubsubTopic, message),
|
||||||
)
|
)
|
||||||
|
|
||||||
proc put*(
|
proc put*(
|
||||||
|
@ -38,7 +39,7 @@ proc put*(
|
||||||
): ArchiveDriver =
|
): ArchiveDriver =
|
||||||
for msg in msgList:
|
for msg in msgList:
|
||||||
let
|
let
|
||||||
msgDigest = waku_archive.computeDigest(msg)
|
msgDigest = computeDigest(msg)
|
||||||
msgHash = computeMessageHash(pubsubTopic, msg)
|
msgHash = computeMessageHash(pubsubTopic, msg)
|
||||||
_ = waitFor driver.put(pubsubTopic, msg, msgDigest, msgHash, msg.timestamp)
|
_ = waitFor driver.put(pubsubTopic, msg, msgDigest, msgHash, msg.timestamp)
|
||||||
# discard crashes
|
# discard crashes
|
||||||
|
|
|
@ -20,7 +20,8 @@ proc computeTestCursor(pubsubTopic: PubsubTopic,
|
||||||
pubsubTopic: pubsubTopic,
|
pubsubTopic: pubsubTopic,
|
||||||
senderTime: message.timestamp,
|
senderTime: message.timestamp,
|
||||||
storeTime: message.timestamp,
|
storeTime: message.timestamp,
|
||||||
digest: computeDigest(message)
|
digest: computeDigest(message),
|
||||||
|
hash: computeMessageHash(pubsubTopic, message),
|
||||||
)
|
)
|
||||||
|
|
||||||
suite "Postgres driver":
|
suite "Postgres driver":
|
||||||
|
@ -62,19 +63,21 @@ suite "Postgres driver":
|
||||||
let msg = fakeWakuMessage(contentTopic=contentTopic)
|
let msg = fakeWakuMessage(contentTopic=contentTopic)
|
||||||
|
|
||||||
let computedDigest = computeDigest(msg)
|
let computedDigest = computeDigest(msg)
|
||||||
|
let computedHash = computeMessageHash(DefaultPubsubTopic, msg)
|
||||||
|
|
||||||
let putRes = await driver.put(DefaultPubsubTopic, msg, computedDigest, computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)
|
let putRes = await driver.put(DefaultPubsubTopic, msg, computedDigest, computedHash, msg.timestamp)
|
||||||
assert putRes.isOk(), putRes.error
|
assert putRes.isOk(), putRes.error
|
||||||
|
|
||||||
let storedMsg = (await driver.getAllMessages()).tryGet()
|
let storedMsg = (await driver.getAllMessages()).tryGet()
|
||||||
|
|
||||||
assert storedMsg.len == 1
|
assert storedMsg.len == 1
|
||||||
|
|
||||||
let (pubsubTopic, actualMsg, digest, storeTimestamp) = storedMsg[0]
|
let (pubsubTopic, actualMsg, digest, _, hash) = storedMsg[0]
|
||||||
assert actualMsg.contentTopic == contentTopic
|
assert actualMsg.contentTopic == contentTopic
|
||||||
assert pubsubTopic == DefaultPubsubTopic
|
assert pubsubTopic == DefaultPubsubTopic
|
||||||
assert toHex(computedDigest.data) == toHex(digest)
|
assert toHex(computedDigest.data) == toHex(digest)
|
||||||
assert toHex(actualMsg.payload) == toHex(msg.payload)
|
assert toHex(actualMsg.payload) == toHex(msg.payload)
|
||||||
|
assert toHex(computedHash) == toHex(hash)
|
||||||
|
|
||||||
asyncTest "Insert and query message":
|
asyncTest "Insert and query message":
|
||||||
const contentTopic1 = "test-content-topic-1"
|
const contentTopic1 = "test-content-topic-1"
|
||||||
|
|
|
@ -8,7 +8,6 @@ import
|
||||||
import
|
import
|
||||||
../../../waku/waku_archive,
|
../../../waku/waku_archive,
|
||||||
../../../waku/waku_archive/driver as driver_module,
|
../../../waku/waku_archive/driver as driver_module,
|
||||||
../../../waku/waku_archive/driver/builder,
|
|
||||||
../../../waku/waku_archive/driver/postgres_driver,
|
../../../waku/waku_archive/driver/postgres_driver,
|
||||||
../../../waku/waku_core,
|
../../../waku/waku_core,
|
||||||
../../../waku/waku_core/message/digest,
|
../../../waku/waku_core/message/digest,
|
||||||
|
@ -33,7 +32,8 @@ proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveC
|
||||||
pubsubTopic: pubsubTopic,
|
pubsubTopic: pubsubTopic,
|
||||||
senderTime: message.timestamp,
|
senderTime: message.timestamp,
|
||||||
storeTime: message.timestamp,
|
storeTime: message.timestamp,
|
||||||
digest: computeDigest(message)
|
digest: computeDigest(message),
|
||||||
|
hash: computeMessageHash(pubsubTopic, message)
|
||||||
)
|
)
|
||||||
|
|
||||||
suite "Postgres driver - queries":
|
suite "Postgres driver - queries":
|
||||||
|
@ -652,6 +652,45 @@ suite "Postgres driver - queries":
|
||||||
check:
|
check:
|
||||||
filteredMessages == expectedMessages[4..5].reversed()
|
filteredMessages == expectedMessages[4..5].reversed()
|
||||||
|
|
||||||
|
asyncTest "only hashes - descending order":
|
||||||
|
## Given
|
||||||
|
let timeOrigin = now()
|
||||||
|
var expected = @[
|
||||||
|
fakeWakuMessage(@[byte 0], ts=ts(00, timeOrigin)),
|
||||||
|
fakeWakuMessage(@[byte 1], ts=ts(10, timeOrigin)),
|
||||||
|
fakeWakuMessage(@[byte 2], ts=ts(20, timeOrigin)),
|
||||||
|
fakeWakuMessage(@[byte 3], ts=ts(30, timeOrigin)),
|
||||||
|
fakeWakuMessage(@[byte 4], ts=ts(40, timeOrigin)),
|
||||||
|
fakeWakuMessage(@[byte 5], ts=ts(50, timeOrigin)),
|
||||||
|
fakeWakuMessage(@[byte 6], ts=ts(60, timeOrigin)),
|
||||||
|
fakeWakuMessage(@[byte 7], ts=ts(70, timeOrigin)),
|
||||||
|
fakeWakuMessage(@[byte 8], ts=ts(80, timeOrigin)),
|
||||||
|
fakeWakuMessage(@[byte 9], ts=ts(90, timeOrigin)),
|
||||||
|
]
|
||||||
|
var messages = expected
|
||||||
|
|
||||||
|
shuffle(messages)
|
||||||
|
debug "randomized message insertion sequence", sequence=messages.mapIt(it.payload)
|
||||||
|
|
||||||
|
let hashes = messages.mapIt(computeMessageHash(DefaultPubsubTopic, it))
|
||||||
|
|
||||||
|
for (msg, hash) in messages.zip(hashes):
|
||||||
|
require (await driver.put(DefaultPubsubTopic, msg, computeDigest(msg), hash, msg.timestamp)).isOk()
|
||||||
|
|
||||||
|
## When
|
||||||
|
let res = await driver.getMessages(
|
||||||
|
hashes=hashes,
|
||||||
|
ascendingOrder=false
|
||||||
|
)
|
||||||
|
|
||||||
|
## Then
|
||||||
|
assert res.isOk(), res.error
|
||||||
|
|
||||||
|
let expectedMessages = expected.reversed()
|
||||||
|
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||||
|
check:
|
||||||
|
filteredMessages == expectedMessages
|
||||||
|
|
||||||
asyncTest "start time only":
|
asyncTest "start time only":
|
||||||
## Given
|
## Given
|
||||||
const contentTopic = "test-content-topic"
|
const contentTopic = "test-content-topic"
|
||||||
|
|
|
@ -13,8 +13,8 @@ import
|
||||||
|
|
||||||
# Helper functions
|
# Helper functions
|
||||||
|
|
||||||
proc genIndexedWakuMessage(i: int8): IndexedWakuMessage =
|
proc genIndexedWakuMessage(i: int8): (Index, WakuMessage) =
|
||||||
## Use i to generate an IndexedWakuMessage
|
## Use i to generate an Index WakuMessage
|
||||||
var data {.noinit.}: array[32, byte]
|
var data {.noinit.}: array[32, byte]
|
||||||
for x in data.mitems: x = i.byte
|
for x in data.mitems: x = i.byte
|
||||||
|
|
||||||
|
@ -27,14 +27,14 @@ proc genIndexedWakuMessage(i: int8): IndexedWakuMessage =
|
||||||
pubsubTopic: "test-pubsub-topic"
|
pubsubTopic: "test-pubsub-topic"
|
||||||
)
|
)
|
||||||
|
|
||||||
IndexedWakuMessage(msg: message, index: cursor)
|
(cursor, message)
|
||||||
|
|
||||||
proc getPrepopulatedTestQueue(unsortedSet: auto, capacity: int): QueueDriver =
|
proc getPrepopulatedTestQueue(unsortedSet: auto, capacity: int): QueueDriver =
|
||||||
let driver = QueueDriver.new(capacity)
|
let driver = QueueDriver.new(capacity)
|
||||||
|
|
||||||
for i in unsortedSet:
|
for i in unsortedSet:
|
||||||
let message = genIndexedWakuMessage(i.int8)
|
let (index, message) = genIndexedWakuMessage(i.int8)
|
||||||
discard driver.add(message)
|
discard driver.add(index, message)
|
||||||
|
|
||||||
driver
|
driver
|
||||||
|
|
||||||
|
@ -49,12 +49,12 @@ procSuite "Sorted driver queue":
|
||||||
## When
|
## When
|
||||||
# Fill up the queue
|
# Fill up the queue
|
||||||
for i in 1..capacity:
|
for i in 1..capacity:
|
||||||
let message = genIndexedWakuMessage(i.int8)
|
let (index, message) = genIndexedWakuMessage(i.int8)
|
||||||
require(driver.add(message).isOk())
|
require(driver.add(index, message).isOk())
|
||||||
|
|
||||||
# Add one more. Capacity should not be exceeded
|
# Add one more. Capacity should not be exceeded
|
||||||
let message = genIndexedWakuMessage(capacity.int8 + 1)
|
let (index, message) = genIndexedWakuMessage(capacity.int8 + 1)
|
||||||
require(driver.add(message).isOk())
|
require(driver.add(index, message).isOk())
|
||||||
|
|
||||||
## Then
|
## Then
|
||||||
check:
|
check:
|
||||||
|
@ -68,14 +68,14 @@ procSuite "Sorted driver queue":
|
||||||
## When
|
## When
|
||||||
# Fill up the queue
|
# Fill up the queue
|
||||||
for i in 1..capacity:
|
for i in 1..capacity:
|
||||||
let message = genIndexedWakuMessage(i.int8)
|
let (index, message) = genIndexedWakuMessage(i.int8)
|
||||||
require(driver.add(message).isOk())
|
require(driver.add(index, message).isOk())
|
||||||
|
|
||||||
# Attempt to add message with older value than oldest in queue should fail
|
# Attempt to add message with older value than oldest in queue should fail
|
||||||
let
|
let
|
||||||
oldestTimestamp = driver.first().get().index.senderTime
|
oldestTimestamp = driver.first().get().senderTime
|
||||||
message = genIndexedWakuMessage(oldestTimestamp.int8 - 1)
|
(index, message) = genIndexedWakuMessage(oldestTimestamp.int8 - 1)
|
||||||
addRes = driver.add(message)
|
addRes = driver.add(index, message)
|
||||||
|
|
||||||
## Then
|
## Then
|
||||||
check:
|
check:
|
||||||
|
@ -93,14 +93,14 @@ procSuite "Sorted driver queue":
|
||||||
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
|
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
|
||||||
|
|
||||||
# Walk forward through the set and verify ascending order
|
# Walk forward through the set and verify ascending order
|
||||||
var prevSmaller = genIndexedWakuMessage(min(unsortedSet).int8 - 1).index
|
var (prevSmaller, _) = genIndexedWakuMessage(min(unsortedSet).int8 - 1)
|
||||||
for i in driver.fwdIterator:
|
for i in driver.fwdIterator:
|
||||||
let (index, _) = i
|
let (index, _) = i
|
||||||
check cmp(index, prevSmaller) > 0
|
check cmp(index, prevSmaller) > 0
|
||||||
prevSmaller = index
|
prevSmaller = index
|
||||||
|
|
||||||
# Walk backward through the set and verify descending order
|
# Walk backward through the set and verify descending order
|
||||||
var prevLarger = genIndexedWakuMessage(max(unsortedSet).int8 + 1).index
|
var (prevLarger, _) = genIndexedWakuMessage(max(unsortedSet).int8 + 1)
|
||||||
for i in driver.bwdIterator:
|
for i in driver.bwdIterator:
|
||||||
let (index, _) = i
|
let (index, _) = i
|
||||||
check cmp(index, prevLarger) < 0
|
check cmp(index, prevLarger) < 0
|
||||||
|
@ -122,7 +122,7 @@ procSuite "Sorted driver queue":
|
||||||
|
|
||||||
let first = firstRes.tryGet()
|
let first = firstRes.tryGet()
|
||||||
check:
|
check:
|
||||||
first.msg.timestamp == Timestamp(1)
|
first.senderTime == Timestamp(1)
|
||||||
|
|
||||||
test "get first item from empty queue should fail":
|
test "get first item from empty queue should fail":
|
||||||
## Given
|
## Given
|
||||||
|
@ -153,7 +153,7 @@ procSuite "Sorted driver queue":
|
||||||
|
|
||||||
let last = lastRes.tryGet()
|
let last = lastRes.tryGet()
|
||||||
check:
|
check:
|
||||||
last.msg.timestamp == Timestamp(5)
|
last.senderTime == Timestamp(5)
|
||||||
|
|
||||||
test "get last item from empty queue should fail":
|
test "get last item from empty queue should fail":
|
||||||
## Given
|
## Given
|
||||||
|
@ -176,8 +176,8 @@ procSuite "Sorted driver queue":
|
||||||
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
|
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
|
||||||
|
|
||||||
let
|
let
|
||||||
existingIndex = genIndexedWakuMessage(4).index
|
(existingIndex, _) = genIndexedWakuMessage(4)
|
||||||
nonExistingIndex = genIndexedWakuMessage(99).index
|
(nonExistingIndex, _) = genIndexedWakuMessage(99)
|
||||||
|
|
||||||
## Then
|
## Then
|
||||||
check:
|
check:
|
||||||
|
|
|
@ -20,15 +20,16 @@ proc getTestQueueDriver(numMessages: int): QueueDriver =
|
||||||
for x in data.mitems: x = 1
|
for x in data.mitems: x = 1
|
||||||
|
|
||||||
for i in 0..<numMessages:
|
for i in 0..<numMessages:
|
||||||
let msg = IndexedWakuMessage(
|
|
||||||
msg: WakuMessage(payload: @[byte i], timestamp: Timestamp(i)),
|
let msg = WakuMessage(payload: @[byte i], timestamp: Timestamp(i))
|
||||||
index: Index(
|
|
||||||
receiverTime: Timestamp(i),
|
let index = Index(
|
||||||
senderTime: Timestamp(i),
|
receiverTime: Timestamp(i),
|
||||||
digest: MessageDigest(data: data)
|
senderTime: Timestamp(i),
|
||||||
)
|
digest: MessageDigest(data: data)
|
||||||
)
|
)
|
||||||
discard testQueueDriver.add(msg)
|
|
||||||
|
discard testQueueDriver.add(index, msg)
|
||||||
|
|
||||||
return testQueueDriver
|
return testQueueDriver
|
||||||
|
|
||||||
|
@ -37,7 +38,7 @@ procSuite "Queue driver - pagination":
|
||||||
let driver = getTestQueueDriver(10)
|
let driver = getTestQueueDriver(10)
|
||||||
let
|
let
|
||||||
indexList: seq[Index] = toSeq(driver.fwdIterator()).mapIt(it[0])
|
indexList: seq[Index] = toSeq(driver.fwdIterator()).mapIt(it[0])
|
||||||
msgList: seq[WakuMessage] = toSeq(driver.fwdIterator()).mapIt(it[1].msg)
|
msgList: seq[WakuMessage] = toSeq(driver.fwdIterator()).mapIt(it[1])
|
||||||
|
|
||||||
test "Forward pagination - normal pagination":
|
test "Forward pagination - normal pagination":
|
||||||
## Given
|
## Given
|
||||||
|
@ -211,7 +212,7 @@ procSuite "Queue driver - pagination":
|
||||||
cursor: Option[Index] = none(Index)
|
cursor: Option[Index] = none(Index)
|
||||||
forward = true
|
forward = true
|
||||||
|
|
||||||
proc onlyEvenTimes(i: IndexedWakuMessage): bool = i.msg.timestamp.int64 mod 2 == 0
|
proc onlyEvenTimes(index: Index, msg: WakuMessage): bool = msg.timestamp.int64 mod 2 == 0
|
||||||
|
|
||||||
## When
|
## When
|
||||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor, predicate=onlyEvenTimes)
|
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor, predicate=onlyEvenTimes)
|
||||||
|
@ -392,7 +393,7 @@ procSuite "Queue driver - pagination":
|
||||||
cursor: Option[Index] = none(Index)
|
cursor: Option[Index] = none(Index)
|
||||||
forward = false
|
forward = false
|
||||||
|
|
||||||
proc onlyOddTimes(i: IndexedWakuMessage): bool = i.msg.timestamp.int64 mod 2 != 0
|
proc onlyOddTimes(index: Index, msg: WakuMessage): bool = msg.timestamp.int64 mod 2 != 0
|
||||||
|
|
||||||
## When
|
## When
|
||||||
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor, predicate=onlyOddTimes)
|
let page = driver.getPage(pageSize=pageSize, forward=forward, cursor=cursor, predicate=onlyOddTimes)
|
||||||
|
|
|
@ -30,7 +30,8 @@ proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveC
|
||||||
pubsubTopic: pubsubTopic,
|
pubsubTopic: pubsubTopic,
|
||||||
senderTime: message.timestamp,
|
senderTime: message.timestamp,
|
||||||
storeTime: message.timestamp,
|
storeTime: message.timestamp,
|
||||||
digest: computeDigest(message)
|
digest: computeDigest(message),
|
||||||
|
hash: computeMessageHash(pubsubTopic, message),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -41,9 +41,10 @@ suite "SQLite driver":
|
||||||
let driver = newSqliteArchiveDriver()
|
let driver = newSqliteArchiveDriver()
|
||||||
|
|
||||||
let msg = fakeWakuMessage(contentTopic=contentTopic)
|
let msg = fakeWakuMessage(contentTopic=contentTopic)
|
||||||
|
let msgHash = computeMessageHash(DefaultPubsubTopic, msg)
|
||||||
|
|
||||||
## When
|
## When
|
||||||
let putRes = waitFor driver.put(DefaultPubsubTopic, msg, computeDigest(msg), computeMessageHash(DefaultPubsubTopic, msg), msg.timestamp)
|
let putRes = waitFor driver.put(DefaultPubsubTopic, msg, computeDigest(msg), msgHash, msg.timestamp)
|
||||||
|
|
||||||
## Then
|
## Then
|
||||||
check:
|
check:
|
||||||
|
@ -53,9 +54,10 @@ suite "SQLite driver":
|
||||||
check:
|
check:
|
||||||
storedMsg.len == 1
|
storedMsg.len == 1
|
||||||
storedMsg.all do (item: auto) -> bool:
|
storedMsg.all do (item: auto) -> bool:
|
||||||
let (pubsubTopic, msg, digest, storeTimestamp) = item
|
let (pubsubTopic, msg, _, _, hash) = item
|
||||||
msg.contentTopic == contentTopic and
|
msg.contentTopic == contentTopic and
|
||||||
pubsubTopic == DefaultPubsubTopic
|
pubsubTopic == DefaultPubsubTopic and
|
||||||
|
hash == msgHash
|
||||||
|
|
||||||
## Cleanup
|
## Cleanup
|
||||||
(waitFor driver.close()).expect("driver to close")
|
(waitFor driver.close()).expect("driver to close")
|
||||||
|
|
|
@ -138,7 +138,7 @@ suite "Waku Archive - Retention policy":
|
||||||
check:
|
check:
|
||||||
storedMsg.len == capacity
|
storedMsg.len == capacity
|
||||||
storedMsg.all do (item: auto) -> bool:
|
storedMsg.all do (item: auto) -> bool:
|
||||||
let (pubsubTopic, msg, digest, storeTimestamp) = item
|
let (pubsubTopic, msg, _, _, _) = item
|
||||||
msg.contentTopic == contentTopic and
|
msg.contentTopic == contentTopic and
|
||||||
pubsubTopic == DefaultPubsubTopic
|
pubsubTopic == DefaultPubsubTopic
|
||||||
|
|
||||||
|
|
|
@ -777,14 +777,14 @@ proc mountArchive*(node: WakuNode,
|
||||||
driver: ArchiveDriver,
|
driver: ArchiveDriver,
|
||||||
retentionPolicy = none(RetentionPolicy)):
|
retentionPolicy = none(RetentionPolicy)):
|
||||||
Result[void, string] =
|
Result[void, string] =
|
||||||
|
node.wakuArchive = WakuArchive.new(
|
||||||
|
driver = driver,
|
||||||
|
retentionPolicy = retentionPolicy,
|
||||||
|
).valueOr:
|
||||||
|
return err("error in mountArchive: " & error)
|
||||||
|
|
||||||
let wakuArchiveRes = WakuArchive.new(driver,
|
node.wakuArchive.start()
|
||||||
retentionPolicy)
|
|
||||||
if wakuArchiveRes.isErr():
|
|
||||||
return err("error in mountArchive: " & wakuArchiveRes.error)
|
|
||||||
|
|
||||||
node.wakuArchive = wakuArchiveRes.get()
|
|
||||||
asyncSpawn node.wakuArchive.start()
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
## Waku store
|
## Waku store
|
||||||
|
@ -1194,7 +1194,7 @@ proc stop*(node: WakuNode) {.async.} =
|
||||||
error "exception stopping the node", error=getCurrentExceptionMsg()
|
error "exception stopping the node", error=getCurrentExceptionMsg()
|
||||||
|
|
||||||
if not node.wakuArchive.isNil():
|
if not node.wakuArchive.isNil():
|
||||||
await node.wakuArchive.stop()
|
await node.wakuArchive.stopWait()
|
||||||
|
|
||||||
node.started = false
|
node.started = false
|
||||||
|
|
||||||
|
|
|
@ -4,22 +4,15 @@ else:
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[tables, times, sequtils, options, algorithm, strutils],
|
std/[times, options, sequtils, strutils, algorithm],
|
||||||
stew/[results, byteutils],
|
stew/[results, byteutils],
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
regex,
|
|
||||||
metrics
|
metrics
|
||||||
import
|
import
|
||||||
../common/[
|
../common/paging,
|
||||||
databases/dburl,
|
|
||||||
databases/db_sqlite,
|
|
||||||
paging
|
|
||||||
],
|
|
||||||
./driver,
|
./driver,
|
||||||
./retention_policy,
|
./retention_policy,
|
||||||
./retention_policy/retention_policy_capacity,
|
|
||||||
./retention_policy/retention_policy_time,
|
|
||||||
../waku_core,
|
../waku_core,
|
||||||
../waku_core/message/digest,
|
../waku_core/message/digest,
|
||||||
./common,
|
./common,
|
||||||
|
@ -32,22 +25,35 @@ const
|
||||||
DefaultPageSize*: uint = 20
|
DefaultPageSize*: uint = 20
|
||||||
MaxPageSize*: uint = 100
|
MaxPageSize*: uint = 100
|
||||||
|
|
||||||
## Message validation
|
# Retention policy
|
||||||
|
WakuArchiveDefaultRetentionPolicyInterval* = chronos.minutes(30)
|
||||||
|
|
||||||
type
|
# Metrics reporting
|
||||||
MessageValidator* = ref object of RootObj
|
WakuArchiveDefaultMetricsReportInterval* = chronos.minutes(1)
|
||||||
|
|
||||||
ValidationResult* = Result[void, string]
|
# Message validation
|
||||||
|
# 20 seconds maximum allowable sender timestamp "drift"
|
||||||
|
MaxMessageTimestampVariance* = getNanoSecondTime(20)
|
||||||
|
|
||||||
method validate*(validator: MessageValidator, msg: WakuMessage): ValidationResult {.base.} = discard
|
type MessageValidator* = proc(msg: WakuMessage): Result[void, string] {.closure, gcsafe, raises: [].}
|
||||||
|
|
||||||
# Default message validator
|
## Archive
|
||||||
|
|
||||||
const MaxMessageTimestampVariance* = getNanoSecondTime(20) # 20 seconds maximum allowable sender timestamp "drift"
|
type WakuArchive* = ref object
|
||||||
|
driver: ArchiveDriver
|
||||||
|
|
||||||
type DefaultMessageValidator* = ref object of MessageValidator
|
validator: MessageValidator
|
||||||
|
|
||||||
|
retentionPolicy: Option[RetentionPolicy]
|
||||||
|
|
||||||
|
retentionPolicyHandle: Future[void]
|
||||||
|
metricsHandle: Future[void]
|
||||||
|
|
||||||
|
proc validate*(msg: WakuMessage): Result[void, string] =
|
||||||
|
if msg.ephemeral:
|
||||||
|
# Ephemeral message, do not store
|
||||||
|
return
|
||||||
|
|
||||||
method validate*(validator: DefaultMessageValidator, msg: WakuMessage): ValidationResult =
|
|
||||||
if msg.timestamp == 0:
|
if msg.timestamp == 0:
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
|
@ -62,188 +68,167 @@ method validate*(validator: DefaultMessageValidator, msg: WakuMessage): Validati
|
||||||
if upperBound < msg.timestamp:
|
if upperBound < msg.timestamp:
|
||||||
return err(invalidMessageFuture)
|
return err(invalidMessageFuture)
|
||||||
|
|
||||||
ok()
|
return ok()
|
||||||
|
|
||||||
## Archive
|
|
||||||
|
|
||||||
type
|
|
||||||
WakuArchive* = ref object
|
|
||||||
driver*: ArchiveDriver # TODO: Make this field private. Remove asterisk
|
|
||||||
validator: MessageValidator
|
|
||||||
retentionPolicy: RetentionPolicy
|
|
||||||
retPolicyFut: Future[Result[void, string]] ## retention policy cancelable future
|
|
||||||
retMetricsRepFut: Future[Result[void, string]] ## metrics reporting cancelable future
|
|
||||||
|
|
||||||
proc new*(T: type WakuArchive,
|
proc new*(T: type WakuArchive,
|
||||||
driver: ArchiveDriver,
|
driver: ArchiveDriver,
|
||||||
|
validator: MessageValidator = validate,
|
||||||
retentionPolicy = none(RetentionPolicy)):
|
retentionPolicy = none(RetentionPolicy)):
|
||||||
Result[T, string] =
|
Result[T, string] =
|
||||||
|
if driver.isNil():
|
||||||
|
return err("archive driver is Nil")
|
||||||
|
|
||||||
let retPolicy = if retentionPolicy.isSome():
|
let archive =
|
||||||
retentionPolicy.get()
|
WakuArchive(
|
||||||
else:
|
driver: driver,
|
||||||
nil
|
validator: validator,
|
||||||
|
retentionPolicy: retentionPolicy,
|
||||||
|
)
|
||||||
|
|
||||||
let wakuArch = WakuArchive(driver: driver,
|
return ok(archive)
|
||||||
validator: DefaultMessageValidator(),
|
|
||||||
retentionPolicy: retPolicy)
|
|
||||||
return ok(wakuArch)
|
|
||||||
|
|
||||||
proc handleMessage*(w: WakuArchive,
|
proc handleMessage*(self: WakuArchive,
|
||||||
pubsubTopic: PubsubTopic,
|
pubsubTopic: PubsubTopic,
|
||||||
msg: WakuMessage) {.async.} =
|
msg: WakuMessage) {.async.} =
|
||||||
if msg.ephemeral:
|
self.validator(msg).isOkOr:
|
||||||
# Ephemeral message, do not store
|
waku_archive_errors.inc(labelValues = [error])
|
||||||
return
|
return
|
||||||
|
|
||||||
if not w.validator.isNil():
|
let
|
||||||
let validationRes = w.validator.validate(msg)
|
msgDigest = computeDigest(msg)
|
||||||
if validationRes.isErr():
|
msgHash = computeMessageHash(pubsubTopic, msg)
|
||||||
waku_archive_errors.inc(labelValues = [validationRes.error])
|
msgTimestamp = if msg.timestamp > 0: msg.timestamp
|
||||||
return
|
else: getNanosecondTime(getTime().toUnixFloat())
|
||||||
|
|
||||||
|
trace "handling message",
|
||||||
|
pubsubTopic=pubsubTopic,
|
||||||
|
contentTopic=msg.contentTopic,
|
||||||
|
msgTimestamp=msg.timestamp,
|
||||||
|
usedTimestamp=msgTimestamp,
|
||||||
|
digest=toHex(msgDigest.data),
|
||||||
|
messageHash=toHex(msgHash)
|
||||||
|
|
||||||
let insertStartTime = getTime().toUnixFloat()
|
let insertStartTime = getTime().toUnixFloat()
|
||||||
|
|
||||||
block:
|
(await self.driver.put(pubsubTopic, msg, msgDigest, msgHash, msgTimestamp)).isOkOr:
|
||||||
let
|
waku_archive_errors.inc(labelValues = [insertFailure])
|
||||||
msgDigest = computeDigest(msg)
|
# Prevent spamming the logs when multiple nodes are connected to the same database.
|
||||||
msgHash = computeMessageHash(pubsubTopic, msg)
|
# In that case, the message cannot be inserted but is an expected "insert error"
|
||||||
msgDigestHex = toHex(msgDigest.data)
|
# and therefore we reduce its visibility by having the log in trace level.
|
||||||
msgHashHex = toHex(msgHash)
|
if "duplicate key value violates unique constraint" in error:
|
||||||
msgReceivedTime = if msg.timestamp > 0: msg.timestamp
|
trace "failed to insert message", err=error
|
||||||
else: getNanosecondTime(getTime().toUnixFloat())
|
else:
|
||||||
|
debug "failed to insert message", err=error
|
||||||
trace "handling message", pubsubTopic=pubsubTopic, contentTopic=msg.contentTopic, timestamp=msg.timestamp, digest=msgDigestHex, messageHash=msgHashHex
|
|
||||||
|
|
||||||
let putRes = await w.driver.put(pubsubTopic, msg, msgDigest, msgHash, msgReceivedTime)
|
|
||||||
if putRes.isErr():
|
|
||||||
if "duplicate key value violates unique constraint" in putRes.error:
|
|
||||||
trace "failed to insert message", err=putRes.error
|
|
||||||
else:
|
|
||||||
debug "failed to insert message", err=putRes.error
|
|
||||||
waku_archive_errors.inc(labelValues = [insertFailure])
|
|
||||||
|
|
||||||
let insertDuration = getTime().toUnixFloat() - insertStartTime
|
let insertDuration = getTime().toUnixFloat() - insertStartTime
|
||||||
waku_archive_insert_duration_seconds.observe(insertDuration)
|
waku_archive_insert_duration_seconds.observe(insertDuration)
|
||||||
|
|
||||||
proc findMessages*(w: WakuArchive, query: ArchiveQuery): Future[ArchiveResult] {.async, gcsafe.} =
|
proc findMessages*(self: WakuArchive, query: ArchiveQuery): Future[ArchiveResult] {.async, gcsafe.} =
|
||||||
## Search the archive to return a single page of messages matching the query criteria
|
## Search the archive to return a single page of messages matching the query criteria
|
||||||
let
|
|
||||||
qContentTopics = query.contentTopics
|
|
||||||
qPubSubTopic = query.pubsubTopic
|
|
||||||
qCursor = query.cursor
|
|
||||||
qStartTime = query.startTime
|
|
||||||
qEndTime = query.endTime
|
|
||||||
qMaxPageSize = if query.pageSize <= 0: DefaultPageSize
|
|
||||||
else: min(query.pageSize, MaxPageSize)
|
|
||||||
isAscendingOrder = query.direction.into()
|
|
||||||
|
|
||||||
if qContentTopics.len > 10:
|
let maxPageSize =
|
||||||
|
if query.pageSize <= 0:
|
||||||
|
DefaultPageSize
|
||||||
|
else:
|
||||||
|
min(query.pageSize, MaxPageSize)
|
||||||
|
|
||||||
|
let isAscendingOrder = query.direction.into()
|
||||||
|
|
||||||
|
if query.contentTopics.len > 10:
|
||||||
return err(ArchiveError.invalidQuery("too many content topics"))
|
return err(ArchiveError.invalidQuery("too many content topics"))
|
||||||
|
|
||||||
let queryStartTime = getTime().toUnixFloat()
|
let queryStartTime = getTime().toUnixFloat()
|
||||||
|
|
||||||
let queryRes = await w.driver.getMessages(
|
let rows = (await self.driver.getMessages(
|
||||||
contentTopic = qContentTopics,
|
contentTopic = query.contentTopics,
|
||||||
pubsubTopic = qPubSubTopic,
|
pubsubTopic = query.pubsubTopic,
|
||||||
cursor = qCursor,
|
cursor = query.cursor,
|
||||||
startTime = qStartTime,
|
startTime = query.startTime,
|
||||||
endTime = qEndTime,
|
endTime = query.endTime,
|
||||||
maxPageSize = qMaxPageSize + 1,
|
hashes = query.hashes,
|
||||||
ascendingOrder = isAscendingOrder
|
maxPageSize = maxPageSize + 1,
|
||||||
)
|
ascendingOrder = isAscendingOrder
|
||||||
|
)).valueOr:
|
||||||
|
return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: error))
|
||||||
|
|
||||||
let queryDuration = getTime().toUnixFloat() - queryStartTime
|
let queryDuration = getTime().toUnixFloat() - queryStartTime
|
||||||
waku_archive_query_duration_seconds.observe(queryDuration)
|
waku_archive_query_duration_seconds.observe(queryDuration)
|
||||||
|
|
||||||
# Build response
|
var hashes = newSeq[WakuMessageHash]()
|
||||||
if queryRes.isErr():
|
|
||||||
return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: queryRes.error))
|
|
||||||
|
|
||||||
let rows = queryRes.get()
|
|
||||||
var messages = newSeq[WakuMessage]()
|
var messages = newSeq[WakuMessage]()
|
||||||
var cursor = none(ArchiveCursor)
|
var cursor = none(ArchiveCursor)
|
||||||
|
|
||||||
if rows.len == 0:
|
if rows.len == 0:
|
||||||
return ok(ArchiveResponse(messages: messages, cursor: cursor))
|
return ok(ArchiveResponse(hashes: hashes, messages: messages, cursor: cursor))
|
||||||
|
|
||||||
## Messages
|
## Messages
|
||||||
let pageSize = min(rows.len, int(qMaxPageSize))
|
let pageSize = min(rows.len, int(maxPageSize))
|
||||||
|
|
||||||
|
#TODO once store v2 is removed, unzip instead of 2x map
|
||||||
messages = rows[0..<pageSize].mapIt(it[1])
|
messages = rows[0..<pageSize].mapIt(it[1])
|
||||||
|
hashes = rows[0..<pageSize].mapIt(it[4])
|
||||||
|
|
||||||
## Cursor
|
## Cursor
|
||||||
if rows.len > int(qMaxPageSize):
|
if rows.len > int(maxPageSize):
|
||||||
## Build last message cursor
|
## Build last message cursor
|
||||||
## The cursor is built from the last message INCLUDED in the response
|
## The cursor is built from the last message INCLUDED in the response
|
||||||
## (i.e. the second last message in the rows list)
|
## (i.e. the second last message in the rows list)
|
||||||
let (pubsubTopic, message, digest, storeTimestamp) = rows[^2]
|
|
||||||
|
|
||||||
# TODO: Improve coherence of MessageDigest type
|
#TODO Once Store v2 is removed keep only message and hash
|
||||||
let messageDigest = block:
|
let (pubsubTopic, message, digest, storeTimestamp, hash) = rows[^2]
|
||||||
var data: array[32, byte]
|
|
||||||
for i in 0..<min(digest.len, 32):
|
|
||||||
data[i] = digest[i]
|
|
||||||
|
|
||||||
MessageDigest(data: data)
|
|
||||||
|
|
||||||
|
#TODO Once Store v2 is removed, the cursor becomes the hash of the last message
|
||||||
cursor = some(ArchiveCursor(
|
cursor = some(ArchiveCursor(
|
||||||
pubsubTopic: pubsubTopic,
|
digest: MessageDigest.fromBytes(digest),
|
||||||
senderTime: message.timestamp,
|
|
||||||
storeTime: storeTimestamp,
|
storeTime: storeTimestamp,
|
||||||
digest: messageDigest
|
sendertime: message.timestamp,
|
||||||
|
pubsubTopic: pubsubTopic,
|
||||||
|
hash: hash,
|
||||||
))
|
))
|
||||||
|
|
||||||
# All messages MUST be returned in chronological order
|
# All messages MUST be returned in chronological order
|
||||||
if not isAscendingOrder:
|
if not isAscendingOrder:
|
||||||
reverse(messages)
|
reverse(messages)
|
||||||
|
reverse(hashes)
|
||||||
|
|
||||||
return ok(ArchiveResponse(messages: messages, cursor: cursor))
|
return ok(ArchiveResponse(hashes: hashes, messages: messages, cursor: cursor))
|
||||||
|
|
||||||
# Retention policy
|
proc periodicRetentionPolicy(self: WakuArchive) {.async.} =
|
||||||
const WakuArchiveDefaultRetentionPolicyInterval* = chronos.minutes(30)
|
debug "executing message retention policy"
|
||||||
|
|
||||||
proc loopApplyRetentionPolicy*(w: WakuArchive):
|
let policy = self.retentionPolicy.get()
|
||||||
Future[Result[void, string]] {.async.} =
|
|
||||||
|
|
||||||
if w.retentionPolicy.isNil():
|
|
||||||
return err("retentionPolicy is Nil in executeMessageRetentionPolicy")
|
|
||||||
|
|
||||||
if w.driver.isNil():
|
|
||||||
return err("driver is Nil in executeMessageRetentionPolicy")
|
|
||||||
|
|
||||||
while true:
|
while true:
|
||||||
debug "executing message retention policy"
|
(await policy.execute(self.driver)).isOkOr:
|
||||||
let retPolicyRes = await w.retentionPolicy.execute(w.driver)
|
waku_archive_errors.inc(labelValues = [retPolicyFailure])
|
||||||
if retPolicyRes.isErr():
|
error "failed execution of retention policy", error=error
|
||||||
waku_archive_errors.inc(labelValues = [retPolicyFailure])
|
|
||||||
error "failed execution of retention policy", error=retPolicyRes.error
|
|
||||||
|
|
||||||
await sleepAsync(WakuArchiveDefaultRetentionPolicyInterval)
|
await sleepAsync(WakuArchiveDefaultRetentionPolicyInterval)
|
||||||
|
|
||||||
return ok()
|
proc periodicMetricReport(self: WakuArchive) {.async.} =
|
||||||
|
|
||||||
# Metrics reporting
|
|
||||||
const WakuArchiveDefaultMetricsReportInterval* = chronos.minutes(1)
|
|
||||||
|
|
||||||
proc loopReportStoredMessagesMetric*(w: WakuArchive):
|
|
||||||
Future[Result[void, string]] {.async.} =
|
|
||||||
if w.driver.isNil():
|
|
||||||
return err("driver is Nil in loopReportStoredMessagesMetric")
|
|
||||||
|
|
||||||
while true:
|
while true:
|
||||||
let resCount = await w.driver.getMessagesCount()
|
let countRes = (await self.driver.getMessagesCount())
|
||||||
if resCount.isErr():
|
if countRes.isErr():
|
||||||
return err("loopReportStoredMessagesMetric failed to get messages count: " & resCount.error)
|
error "loopReportStoredMessagesMetric failed to get messages count", error=countRes.error
|
||||||
|
else:
|
||||||
|
let count = countRes.get()
|
||||||
|
waku_archive_messages.set(count, labelValues = ["stored"])
|
||||||
|
|
||||||
waku_archive_messages.set(resCount.value, labelValues = ["stored"])
|
|
||||||
await sleepAsync(WakuArchiveDefaultMetricsReportInterval)
|
await sleepAsync(WakuArchiveDefaultMetricsReportInterval)
|
||||||
|
|
||||||
return ok()
|
proc start*(self: WakuArchive) =
|
||||||
|
if self.retentionPolicy.isSome():
|
||||||
|
self.retentionPolicyHandle = self.periodicRetentionPolicy()
|
||||||
|
|
||||||
proc start*(self: WakuArchive) {.async.} =
|
self.metricsHandle = self.periodicMetricReport()
|
||||||
## TODO: better control the Result in case of error. Now it is ignored
|
|
||||||
self.retPolicyFut = self.loopApplyRetentionPolicy()
|
|
||||||
self.retMetricsRepFut = self.loopReportStoredMessagesMetric()
|
|
||||||
|
|
||||||
proc stop*(self: WakuArchive) {.async.} =
|
proc stopWait*(self: WakuArchive) {.async.} =
|
||||||
self.retPolicyFut.cancel()
|
var futures: seq[Future[void]]
|
||||||
self.retMetricsRepFut.cancel()
|
|
||||||
|
if self.retentionPolicy.isSome() and not self.retentionPolicyHandle.isNil():
|
||||||
|
futures.add(self.retentionPolicyHandle.cancelAndWait())
|
||||||
|
|
||||||
|
if not self.metricsHandle.isNil:
|
||||||
|
futures.add(self.metricsHandle.cancelAndWait())
|
||||||
|
|
||||||
|
await noCancel(allFutures(futures))
|
|
@ -7,18 +7,26 @@ import
|
||||||
std/options,
|
std/options,
|
||||||
stew/results,
|
stew/results,
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
|
stew/arrayops,
|
||||||
nimcrypto/sha2
|
nimcrypto/sha2
|
||||||
import
|
import
|
||||||
../waku_core,
|
../waku_core,
|
||||||
../common/paging
|
../common/paging
|
||||||
|
|
||||||
|
|
||||||
## Waku message digest
|
## Waku message digest
|
||||||
# TODO: Move this into the driver implementations. We should be passing
|
|
||||||
# here a buffer containing a serialized implementation dependent cursor.
|
|
||||||
|
|
||||||
type MessageDigest* = MDigest[256]
|
type MessageDigest* = MDigest[256]
|
||||||
|
|
||||||
|
proc fromBytes*(T: type MessageDigest, src: seq[byte]): T =
|
||||||
|
|
||||||
|
var data: array[32, byte]
|
||||||
|
|
||||||
|
let byteCount = copyFrom[byte](data, src)
|
||||||
|
|
||||||
|
assert byteCount == 32
|
||||||
|
|
||||||
|
return MessageDigest(data: data)
|
||||||
|
|
||||||
proc computeDigest*(msg: WakuMessage): MessageDigest =
|
proc computeDigest*(msg: WakuMessage): MessageDigest =
|
||||||
var ctx: sha256
|
var ctx: sha256
|
||||||
ctx.init()
|
ctx.init()
|
||||||
|
@ -30,23 +38,16 @@ proc computeDigest*(msg: WakuMessage): MessageDigest =
|
||||||
# Computes the hash
|
# Computes the hash
|
||||||
return ctx.finish()
|
return ctx.finish()
|
||||||
|
|
||||||
|
|
||||||
# TODO: Move this into the driver implementations. We should be passing
|
|
||||||
# here a buffer containing a serialized implementation dependent cursor.
|
|
||||||
type DbCursor = object
|
|
||||||
pubsubTopic*: PubsubTopic
|
|
||||||
senderTime*: Timestamp
|
|
||||||
storeTime*: Timestamp
|
|
||||||
digest*: MessageDigest
|
|
||||||
|
|
||||||
|
|
||||||
## Public API types
|
## Public API types
|
||||||
|
|
||||||
type
|
type
|
||||||
# TODO: We should be passing here a buffer containing a serialized
|
#TODO Once Store v2 is removed, the cursor becomes the hash of the last message
|
||||||
# implementation dependent cursor. Waku archive logic should be independent
|
ArchiveCursor* = object
|
||||||
# of the cursor format.
|
digest*: MessageDigest
|
||||||
ArchiveCursor* = DbCursor
|
storeTime*: Timestamp
|
||||||
|
senderTime*: Timestamp
|
||||||
|
pubsubTopic*: PubsubTopic
|
||||||
|
hash*: WakuMessageHash
|
||||||
|
|
||||||
ArchiveQuery* = object
|
ArchiveQuery* = object
|
||||||
pubsubTopic*: Option[PubsubTopic]
|
pubsubTopic*: Option[PubsubTopic]
|
||||||
|
@ -54,10 +55,12 @@ type
|
||||||
cursor*: Option[ArchiveCursor]
|
cursor*: Option[ArchiveCursor]
|
||||||
startTime*: Option[Timestamp]
|
startTime*: Option[Timestamp]
|
||||||
endTime*: Option[Timestamp]
|
endTime*: Option[Timestamp]
|
||||||
|
hashes*: seq[WakuMessageHash]
|
||||||
pageSize*: uint
|
pageSize*: uint
|
||||||
direction*: PagingDirection
|
direction*: PagingDirection
|
||||||
|
|
||||||
ArchiveResponse* = object
|
ArchiveResponse* = object
|
||||||
|
hashes*: seq[WakuMessageHash]
|
||||||
messages*: seq[WakuMessage]
|
messages*: seq[WakuMessage]
|
||||||
cursor*: Option[ArchiveCursor]
|
cursor*: Option[ArchiveCursor]
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,6 @@ import
|
||||||
chronos
|
chronos
|
||||||
import
|
import
|
||||||
../waku_core,
|
../waku_core,
|
||||||
../common/error_handling,
|
|
||||||
./common
|
./common
|
||||||
|
|
||||||
const DefaultPageSize*: uint = 25
|
const DefaultPageSize*: uint = 25
|
||||||
|
@ -18,7 +17,8 @@ type
|
||||||
ArchiveDriverResult*[T] = Result[T, string]
|
ArchiveDriverResult*[T] = Result[T, string]
|
||||||
ArchiveDriver* = ref object of RootObj
|
ArchiveDriver* = ref object of RootObj
|
||||||
|
|
||||||
type ArchiveRow* = (PubsubTopic, WakuMessage, seq[byte], Timestamp)
|
#TODO Once Store v2 is removed keep only messages and hashes
|
||||||
|
type ArchiveRow* = (PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)
|
||||||
|
|
||||||
# ArchiveDriver interface
|
# ArchiveDriver interface
|
||||||
|
|
||||||
|
@ -34,11 +34,12 @@ method getAllMessages*(driver: ArchiveDriver):
|
||||||
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = discard
|
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = discard
|
||||||
|
|
||||||
method getMessages*(driver: ArchiveDriver,
|
method getMessages*(driver: ArchiveDriver,
|
||||||
contentTopic: seq[ContentTopic] = @[],
|
contentTopic = newSeq[ContentTopic](0),
|
||||||
pubsubTopic = none(PubsubTopic),
|
pubsubTopic = none(PubsubTopic),
|
||||||
cursor = none(ArchiveCursor),
|
cursor = none(ArchiveCursor),
|
||||||
startTime = none(Timestamp),
|
startTime = none(Timestamp),
|
||||||
endTime = none(Timestamp),
|
endTime = none(Timestamp),
|
||||||
|
hashes = newSeq[WakuMessageHash](0),
|
||||||
maxPageSize = DefaultPageSize,
|
maxPageSize = DefaultPageSize,
|
||||||
ascendingOrder = true):
|
ascendingOrder = true):
|
||||||
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = discard
|
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = discard
|
||||||
|
|
|
@ -4,8 +4,8 @@ else:
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[nre,options,sequtils,strutils,strformat,times],
|
std/[nre, options, sequtils, strutils, strformat, times],
|
||||||
stew/[results,byteutils],
|
stew/[results, byteutils, arrayops],
|
||||||
db_postgres,
|
db_postgres,
|
||||||
postgres,
|
postgres,
|
||||||
chronos,
|
chronos,
|
||||||
|
@ -36,7 +36,7 @@ const InsertRowStmtDefinition =
|
||||||
|
|
||||||
const SelectNoCursorAscStmtName = "SelectWithoutCursorAsc"
|
const SelectNoCursorAscStmtName = "SelectWithoutCursorAsc"
|
||||||
const SelectNoCursorAscStmtDef =
|
const SelectNoCursorAscStmtDef =
|
||||||
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id FROM messages
|
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages
|
||||||
WHERE contentTopic IN ($1) AND
|
WHERE contentTopic IN ($1) AND
|
||||||
pubsubTopic = $2 AND
|
pubsubTopic = $2 AND
|
||||||
storedAt >= $3 AND
|
storedAt >= $3 AND
|
||||||
|
@ -45,7 +45,7 @@ const SelectNoCursorAscStmtDef =
|
||||||
|
|
||||||
const SelectNoCursorDescStmtName = "SelectWithoutCursorDesc"
|
const SelectNoCursorDescStmtName = "SelectWithoutCursorDesc"
|
||||||
const SelectNoCursorDescStmtDef =
|
const SelectNoCursorDescStmtDef =
|
||||||
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id FROM messages
|
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages
|
||||||
WHERE contentTopic IN ($1) AND
|
WHERE contentTopic IN ($1) AND
|
||||||
pubsubTopic = $2 AND
|
pubsubTopic = $2 AND
|
||||||
storedAt >= $3 AND
|
storedAt >= $3 AND
|
||||||
|
@ -54,7 +54,7 @@ const SelectNoCursorDescStmtDef =
|
||||||
|
|
||||||
const SelectWithCursorDescStmtName = "SelectWithCursorDesc"
|
const SelectWithCursorDescStmtName = "SelectWithCursorDesc"
|
||||||
const SelectWithCursorDescStmtDef =
|
const SelectWithCursorDescStmtDef =
|
||||||
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id FROM messages
|
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages
|
||||||
WHERE contentTopic IN ($1) AND
|
WHERE contentTopic IN ($1) AND
|
||||||
pubsubTopic = $2 AND
|
pubsubTopic = $2 AND
|
||||||
(storedAt, id) < ($3,$4) AND
|
(storedAt, id) < ($3,$4) AND
|
||||||
|
@ -64,7 +64,7 @@ const SelectWithCursorDescStmtDef =
|
||||||
|
|
||||||
const SelectWithCursorAscStmtName = "SelectWithCursorAsc"
|
const SelectWithCursorAscStmtName = "SelectWithCursorAsc"
|
||||||
const SelectWithCursorAscStmtDef =
|
const SelectWithCursorAscStmtDef =
|
||||||
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id FROM messages
|
"""SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages
|
||||||
WHERE contentTopic IN ($1) AND
|
WHERE contentTopic IN ($1) AND
|
||||||
pubsubTopic = $2 AND
|
pubsubTopic = $2 AND
|
||||||
(storedAt, id) > ($3,$4) AND
|
(storedAt, id) > ($3,$4) AND
|
||||||
|
@ -107,8 +107,10 @@ proc reset*(s: PostgresDriver): Future[ArchiveDriverResult[void]] {.async.} =
|
||||||
let ret = await s.decreaseDatabaseSize(targetSize, forceRemoval)
|
let ret = await s.decreaseDatabaseSize(targetSize, forceRemoval)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
proc rowCallbackImpl(pqResult: ptr PGresult,
|
proc rowCallbackImpl(
|
||||||
outRows: var seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp)]) =
|
pqResult: ptr PGresult,
|
||||||
|
outRows: var seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)],
|
||||||
|
) =
|
||||||
## Proc aimed to contain the logic of the callback passed to the `psasyncpool`.
|
## Proc aimed to contain the logic of the callback passed to the `psasyncpool`.
|
||||||
## That callback is used in "SELECT" queries.
|
## That callback is used in "SELECT" queries.
|
||||||
##
|
##
|
||||||
|
@ -116,7 +118,7 @@ proc rowCallbackImpl(pqResult: ptr PGresult,
|
||||||
## outRows - seq of Store-rows. This is populated from the info contained in pqResult
|
## outRows - seq of Store-rows. This is populated from the info contained in pqResult
|
||||||
|
|
||||||
let numFields = pqResult.pqnfields()
|
let numFields = pqResult.pqnfields()
|
||||||
if numFields != 7:
|
if numFields != 8:
|
||||||
error "Wrong number of fields"
|
error "Wrong number of fields"
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -130,6 +132,8 @@ proc rowCallbackImpl(pqResult: ptr PGresult,
|
||||||
var storedAt: int64
|
var storedAt: int64
|
||||||
var digest: string
|
var digest: string
|
||||||
var payload: string
|
var payload: string
|
||||||
|
var hashHex: string
|
||||||
|
var msgHash: WakuMessageHash
|
||||||
|
|
||||||
try:
|
try:
|
||||||
storedAt = parseInt( $(pqgetvalue(pqResult, iRow, 0)) )
|
storedAt = parseInt( $(pqgetvalue(pqResult, iRow, 0)) )
|
||||||
|
@ -139,6 +143,8 @@ proc rowCallbackImpl(pqResult: ptr PGresult,
|
||||||
version = parseUInt( $(pqgetvalue(pqResult, iRow, 4)) )
|
version = parseUInt( $(pqgetvalue(pqResult, iRow, 4)) )
|
||||||
timestamp = parseInt( $(pqgetvalue(pqResult, iRow, 5)) )
|
timestamp = parseInt( $(pqgetvalue(pqResult, iRow, 5)) )
|
||||||
digest = parseHexStr( $(pqgetvalue(pqResult, iRow, 6)) )
|
digest = parseHexStr( $(pqgetvalue(pqResult, iRow, 6)) )
|
||||||
|
hashHex = parseHexStr( $(pqgetvalue(pqResult, iRow, 7)) )
|
||||||
|
msgHash = fromBytes(hashHex.toOpenArrayByte(0, 31))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
error "could not parse correctly", error = getCurrentExceptionMsg()
|
error "could not parse correctly", error = getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
@ -150,7 +156,9 @@ proc rowCallbackImpl(pqResult: ptr PGresult,
|
||||||
outRows.add((pubSubTopic,
|
outRows.add((pubSubTopic,
|
||||||
wakuMessage,
|
wakuMessage,
|
||||||
@(digest.toOpenArrayByte(0, digest.high)),
|
@(digest.toOpenArrayByte(0, digest.high)),
|
||||||
storedAt))
|
storedAt,
|
||||||
|
msgHash,
|
||||||
|
))
|
||||||
|
|
||||||
method put*(s: PostgresDriver,
|
method put*(s: PostgresDriver,
|
||||||
pubsubTopic: PubsubTopic,
|
pubsubTopic: PubsubTopic,
|
||||||
|
@ -195,13 +203,13 @@ method getAllMessages*(s: PostgresDriver):
|
||||||
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
||||||
## Retrieve all messages from the store.
|
## Retrieve all messages from the store.
|
||||||
|
|
||||||
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp)]
|
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
|
||||||
proc rowCallback(pqResult: ptr PGresult) =
|
proc rowCallback(pqResult: ptr PGresult) =
|
||||||
rowCallbackImpl(pqResult, rows)
|
rowCallbackImpl(pqResult, rows)
|
||||||
|
|
||||||
(await s.readConnPool.pgQuery("""SELECT storedAt, contentTopic,
|
(await s.readConnPool.pgQuery("""SELECT storedAt, contentTopic,
|
||||||
payload, pubsubTopic, version, timestamp,
|
payload, pubsubTopic, version, timestamp,
|
||||||
id FROM messages ORDER BY storedAt ASC""",
|
id, messageHash FROM messages ORDER BY storedAt ASC""",
|
||||||
newSeq[string](0),
|
newSeq[string](0),
|
||||||
rowCallback
|
rowCallback
|
||||||
)).isOkOr:
|
)).isOkOr:
|
||||||
|
@ -242,12 +250,13 @@ proc getMessagesArbitraryQuery(s: PostgresDriver,
|
||||||
cursor = none(ArchiveCursor),
|
cursor = none(ArchiveCursor),
|
||||||
startTime = none(Timestamp),
|
startTime = none(Timestamp),
|
||||||
endTime = none(Timestamp),
|
endTime = none(Timestamp),
|
||||||
|
hexHashes: seq[string] = @[],
|
||||||
maxPageSize = DefaultPageSize,
|
maxPageSize = DefaultPageSize,
|
||||||
ascendingOrder = true):
|
ascendingOrder = true):
|
||||||
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
||||||
## This proc allows to handle atypical queries. We don't use prepared statements for those.
|
## This proc allows to handle atypical queries. We don't use prepared statements for those.
|
||||||
|
|
||||||
var query = """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id FROM messages"""
|
var query = """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages"""
|
||||||
var statements: seq[string]
|
var statements: seq[string]
|
||||||
var args: seq[string]
|
var args: seq[string]
|
||||||
|
|
||||||
|
@ -257,6 +266,12 @@ proc getMessagesArbitraryQuery(s: PostgresDriver,
|
||||||
for t in contentTopic:
|
for t in contentTopic:
|
||||||
args.add(t)
|
args.add(t)
|
||||||
|
|
||||||
|
if hexHashes.len > 0:
|
||||||
|
let cstmt = "messageHash IN (" & "?".repeat(hexHashes.len).join(",") & ")"
|
||||||
|
statements.add(cstmt)
|
||||||
|
for t in hexHashes:
|
||||||
|
args.add(t)
|
||||||
|
|
||||||
if pubsubTopic.isSome():
|
if pubsubTopic.isSome():
|
||||||
statements.add("pubsubTopic = ?")
|
statements.add("pubsubTopic = ?")
|
||||||
args.add(pubsubTopic.get())
|
args.add(pubsubTopic.get())
|
||||||
|
@ -289,7 +304,7 @@ proc getMessagesArbitraryQuery(s: PostgresDriver,
|
||||||
query &= " LIMIT ?"
|
query &= " LIMIT ?"
|
||||||
args.add($maxPageSize)
|
args.add($maxPageSize)
|
||||||
|
|
||||||
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp)]
|
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
|
||||||
proc rowCallback(pqResult: ptr PGresult) =
|
proc rowCallback(pqResult: ptr PGresult) =
|
||||||
rowCallbackImpl(pqResult, rows)
|
rowCallbackImpl(pqResult, rows)
|
||||||
|
|
||||||
|
@ -313,7 +328,7 @@ proc getMessagesPreparedStmt(s: PostgresDriver,
|
||||||
##
|
##
|
||||||
## contentTopic - string with list of conten topics. e.g: "'ctopic1','ctopic2','ctopic3'"
|
## contentTopic - string with list of conten topics. e.g: "'ctopic1','ctopic2','ctopic3'"
|
||||||
|
|
||||||
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp)]
|
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
|
||||||
proc rowCallback(pqResult: ptr PGresult) =
|
proc rowCallback(pqResult: ptr PGresult) =
|
||||||
rowCallbackImpl(pqResult, rows)
|
rowCallbackImpl(pqResult, rows)
|
||||||
|
|
||||||
|
@ -354,6 +369,7 @@ proc getMessagesPreparedStmt(s: PostgresDriver,
|
||||||
else:
|
else:
|
||||||
var stmtName = if ascOrder: SelectNoCursorAscStmtName else: SelectNoCursorDescStmtName
|
var stmtName = if ascOrder: SelectNoCursorAscStmtName else: SelectNoCursorDescStmtName
|
||||||
var stmtDef = if ascOrder: SelectNoCursorAscStmtDef else: SelectNoCursorDescStmtDef
|
var stmtDef = if ascOrder: SelectNoCursorAscStmtDef else: SelectNoCursorDescStmtDef
|
||||||
|
|
||||||
(await s.readConnPool.runStmt(stmtName,
|
(await s.readConnPool.runStmt(stmtName,
|
||||||
stmtDef,
|
stmtDef,
|
||||||
@[contentTopic,
|
@[contentTopic,
|
||||||
|
@ -374,14 +390,16 @@ proc getMessagesPreparedStmt(s: PostgresDriver,
|
||||||
return ok(rows)
|
return ok(rows)
|
||||||
|
|
||||||
method getMessages*(s: PostgresDriver,
|
method getMessages*(s: PostgresDriver,
|
||||||
contentTopicSeq: seq[ContentTopic] = @[],
|
contentTopicSeq = newSeq[ContentTopic](0),
|
||||||
pubsubTopic = none(PubsubTopic),
|
pubsubTopic = none(PubsubTopic),
|
||||||
cursor = none(ArchiveCursor),
|
cursor = none(ArchiveCursor),
|
||||||
startTime = none(Timestamp),
|
startTime = none(Timestamp),
|
||||||
endTime = none(Timestamp),
|
endTime = none(Timestamp),
|
||||||
|
hashes = newSeq[WakuMessageHash](0),
|
||||||
maxPageSize = DefaultPageSize,
|
maxPageSize = DefaultPageSize,
|
||||||
ascendingOrder = true):
|
ascendingOrder = true):
|
||||||
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
||||||
|
let hexHashes = hashes.mapIt(toHex(it))
|
||||||
|
|
||||||
if contentTopicSeq.len == 1 and
|
if contentTopicSeq.len == 1 and
|
||||||
pubsubTopic.isSome() and
|
pubsubTopic.isSome() and
|
||||||
|
@ -399,12 +417,13 @@ method getMessages*(s: PostgresDriver,
|
||||||
else:
|
else:
|
||||||
## We will run atypical query. In this case we don't use prepared statemets
|
## We will run atypical query. In this case we don't use prepared statemets
|
||||||
return await s.getMessagesArbitraryQuery(contentTopicSeq,
|
return await s.getMessagesArbitraryQuery(contentTopicSeq,
|
||||||
pubsubTopic,
|
pubsubTopic,
|
||||||
cursor,
|
cursor,
|
||||||
startTime,
|
startTime,
|
||||||
endTime,
|
endTime,
|
||||||
maxPageSize,
|
hexHashes,
|
||||||
ascendingOrder)
|
maxPageSize,
|
||||||
|
ascendingOrder)
|
||||||
|
|
||||||
proc getStr(s: PostgresDriver,
|
proc getStr(s: PostgresDriver,
|
||||||
query: string):
|
query: string):
|
||||||
|
|
|
@ -10,50 +10,53 @@ import
|
||||||
../../../waku_core,
|
../../../waku_core,
|
||||||
../../common
|
../../common
|
||||||
|
|
||||||
|
|
||||||
type Index* = object
|
type Index* = object
|
||||||
## This type contains the description of an Index used in the pagination of WakuMessages
|
## This type contains the description of an Index used in the pagination of WakuMessages
|
||||||
pubsubTopic*: string
|
pubsubTopic*: string
|
||||||
senderTime*: Timestamp # the time at which the message is generated
|
senderTime*: Timestamp # the time at which the message is generated
|
||||||
receiverTime*: Timestamp
|
receiverTime*: Timestamp
|
||||||
digest*: MessageDigest # calculated over payload and content topic
|
digest*: MessageDigest # calculated over payload and content topic
|
||||||
|
hash*: WakuMessageHash
|
||||||
|
|
||||||
proc compute*(T: type Index, msg: WakuMessage, receivedTime: Timestamp, pubsubTopic: PubsubTopic): T =
|
proc compute*(T: type Index, msg: WakuMessage, receivedTime: Timestamp, pubsubTopic: PubsubTopic): T =
|
||||||
## Takes a WakuMessage with received timestamp and returns its Index.
|
## Takes a WakuMessage with received timestamp and returns its Index.
|
||||||
let
|
let
|
||||||
digest = computeDigest(msg)
|
digest = computeDigest(msg)
|
||||||
senderTime = msg.timestamp
|
senderTime = msg.timestamp
|
||||||
|
hash = computeMessageHash(pubsubTopic, msg)
|
||||||
|
|
||||||
Index(
|
return Index(
|
||||||
pubsubTopic: pubsubTopic,
|
pubsubTopic: pubsubTopic,
|
||||||
senderTime: senderTime,
|
senderTime: senderTime,
|
||||||
receiverTime: receivedTime,
|
receiverTime: receivedTime,
|
||||||
digest: digest
|
digest: digest,
|
||||||
|
hash: hash,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
proc tohistoryCursor*(index: Index): ArchiveCursor =
|
proc tohistoryCursor*(index: Index): ArchiveCursor =
|
||||||
ArchiveCursor(
|
return ArchiveCursor(
|
||||||
pubsubTopic: index.pubsubTopic,
|
pubsubTopic: index.pubsubTopic,
|
||||||
senderTime: index.senderTime,
|
senderTime: index.senderTime,
|
||||||
storeTime: index.receiverTime,
|
storeTime: index.receiverTime,
|
||||||
digest: index.digest
|
digest: index.digest,
|
||||||
|
hash: index.hash,
|
||||||
)
|
)
|
||||||
|
|
||||||
proc toIndex*(index: ArchiveCursor): Index =
|
proc toIndex*(index: ArchiveCursor): Index =
|
||||||
Index(
|
return Index(
|
||||||
pubsubTopic: index.pubsubTopic,
|
pubsubTopic: index.pubsubTopic,
|
||||||
senderTime: index.senderTime,
|
senderTime: index.senderTime,
|
||||||
receiverTime: index.storeTime,
|
receiverTime: index.storeTime,
|
||||||
digest: index.digest
|
digest: index.digest,
|
||||||
|
hash: index.hash,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
proc `==`*(x, y: Index): bool =
|
proc `==`*(x, y: Index): bool =
|
||||||
## receiverTime plays no role in index equality
|
## receiverTime plays no role in index equality
|
||||||
(x.senderTime == y.senderTime) and
|
return
|
||||||
(x.digest == y.digest) and
|
(x.senderTime == y.senderTime) and
|
||||||
(x.pubsubTopic == y.pubsubTopic)
|
(x.digest == y.digest) and
|
||||||
|
(x.pubsubTopic == y.pubsubTopic)
|
||||||
|
|
||||||
proc cmp*(x, y: Index): int =
|
proc cmp*(x, y: Index): int =
|
||||||
## compares x and y
|
## compares x and y
|
||||||
|
|
|
@ -21,16 +21,22 @@ logScope:
|
||||||
const QueueDriverDefaultMaxCapacity* = 25_000
|
const QueueDriverDefaultMaxCapacity* = 25_000
|
||||||
|
|
||||||
type
|
type
|
||||||
IndexedWakuMessage = object
|
QueryFilterMatcher = proc(index: Index, msg: WakuMessage): bool {.gcsafe, closure.}
|
||||||
# TODO: may need to rename this object as it holds both the index and the pubsub topic of a waku message
|
|
||||||
## This type is used to encapsulate a WakuMessage and its Index
|
|
||||||
msg*: WakuMessage
|
|
||||||
index*: Index
|
|
||||||
pubsubTopic*: string
|
|
||||||
|
|
||||||
QueryFilterMatcher = proc(indexedWakuMsg: IndexedWakuMessage): bool {.gcsafe, closure.}
|
QueueDriver* = ref object of ArchiveDriver
|
||||||
|
## Bounded repository for indexed messages
|
||||||
|
##
|
||||||
|
## The store queue will keep messages up to its
|
||||||
|
## configured capacity. As soon as this capacity
|
||||||
|
## is reached and a new message is added, the oldest
|
||||||
|
## item will be removed to make space for the new one.
|
||||||
|
## This implies both a `delete` and `add` operation
|
||||||
|
## for new items.
|
||||||
|
|
||||||
|
# TODO: a circular/ring buffer may be a more efficient implementation
|
||||||
|
items: SortedSet[Index, WakuMessage] # sorted set of stored messages
|
||||||
|
capacity: int # Maximum amount of messages to keep
|
||||||
|
|
||||||
type
|
|
||||||
QueueDriverErrorKind {.pure.} = enum
|
QueueDriverErrorKind {.pure.} = enum
|
||||||
INVALID_CURSOR
|
INVALID_CURSOR
|
||||||
|
|
||||||
|
@ -41,26 +47,11 @@ proc `$`(error: QueueDriverErrorKind): string =
|
||||||
of INVALID_CURSOR:
|
of INVALID_CURSOR:
|
||||||
"invalid_cursor"
|
"invalid_cursor"
|
||||||
|
|
||||||
type QueueDriver* = ref object of ArchiveDriver
|
|
||||||
## Bounded repository for indexed messages
|
|
||||||
##
|
|
||||||
## The store queue will keep messages up to its
|
|
||||||
## configured capacity. As soon as this capacity
|
|
||||||
## is reached and a new message is added, the oldest
|
|
||||||
## item will be removed to make space for the new one.
|
|
||||||
## This implies both a `delete` and `add` operation
|
|
||||||
## for new items.
|
|
||||||
##
|
|
||||||
## TODO: a circular/ring buffer may be a more efficient implementation
|
|
||||||
## TODO: we don't need to store the Index twice (as key and in the value)
|
|
||||||
items: SortedSet[Index, IndexedWakuMessage] # sorted set of stored messages
|
|
||||||
capacity: int # Maximum amount of messages to keep
|
|
||||||
|
|
||||||
### Helpers
|
### Helpers
|
||||||
|
|
||||||
proc walkToCursor(w: SortedSetWalkRef[Index, IndexedWakuMessage],
|
proc walkToCursor(w: SortedSetWalkRef[Index, WakuMessage],
|
||||||
startCursor: Index,
|
startCursor: Index,
|
||||||
forward: bool): SortedSetResult[Index, IndexedWakuMessage] =
|
forward: bool): SortedSetResult[Index, WakuMessage] =
|
||||||
## Walk to util we find the cursor
|
## Walk to util we find the cursor
|
||||||
## TODO: Improve performance here with a binary/tree search
|
## TODO: Improve performance here with a binary/tree search
|
||||||
|
|
||||||
|
@ -81,15 +72,15 @@ proc walkToCursor(w: SortedSetWalkRef[Index, IndexedWakuMessage],
|
||||||
#### API
|
#### API
|
||||||
|
|
||||||
proc new*(T: type QueueDriver, capacity: int = QueueDriverDefaultMaxCapacity): T =
|
proc new*(T: type QueueDriver, capacity: int = QueueDriverDefaultMaxCapacity): T =
|
||||||
var items = SortedSet[Index, IndexedWakuMessage].init()
|
var items = SortedSet[Index, WakuMessage].init()
|
||||||
return QueueDriver(items: items, capacity: capacity)
|
return QueueDriver(items: items, capacity: capacity)
|
||||||
|
|
||||||
proc contains*(driver: QueueDriver, index: Index): bool =
|
proc contains*(driver: QueueDriver, index: Index): bool =
|
||||||
## Return `true` if the store queue already contains the `index`, `false` otherwise.
|
## Return `true` if the store queue already contains the `index`, `false` otherwise.
|
||||||
driver.items.eq(index).isOk()
|
return driver.items.eq(index).isOk()
|
||||||
|
|
||||||
proc len*(driver: QueueDriver): int {.noSideEffect.} =
|
proc len*(driver: QueueDriver): int {.noSideEffect.} =
|
||||||
driver.items.len
|
return driver.items.len
|
||||||
|
|
||||||
proc getPage(driver: QueueDriver,
|
proc getPage(driver: QueueDriver,
|
||||||
pageSize: uint = 0,
|
pageSize: uint = 0,
|
||||||
|
@ -102,10 +93,10 @@ proc getPage(driver: QueueDriver,
|
||||||
## Each entry must match the `pred`
|
## Each entry must match the `pred`
|
||||||
var outSeq: seq[ArchiveRow]
|
var outSeq: seq[ArchiveRow]
|
||||||
|
|
||||||
var w = SortedSetWalkRef[Index,IndexedWakuMessage].init(driver.items)
|
var w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
|
||||||
defer: w.destroy()
|
defer: w.destroy()
|
||||||
|
|
||||||
var currentEntry: SortedSetResult[Index, IndexedWakuMessage]
|
var currentEntry: SortedSetResult[Index, WakuMessage]
|
||||||
|
|
||||||
# Find starting entry
|
# Find starting entry
|
||||||
if cursor.isSome():
|
if cursor.isSome():
|
||||||
|
@ -131,14 +122,14 @@ proc getPage(driver: QueueDriver,
|
||||||
while currentEntry.isOk() and numberOfItems < pageSize:
|
while currentEntry.isOk() and numberOfItems < pageSize:
|
||||||
trace "Continuing page query", currentEntry=currentEntry, numberOfItems=numberOfItems
|
trace "Continuing page query", currentEntry=currentEntry, numberOfItems=numberOfItems
|
||||||
|
|
||||||
if predicate.isNil() or predicate(currentEntry.value.data):
|
let
|
||||||
let
|
key = currentEntry.value.key
|
||||||
key = currentEntry.value.key
|
data = currentEntry.value.data
|
||||||
data = currentEntry.value.data
|
|
||||||
|
|
||||||
|
if predicate.isNil() or predicate(key, data):
|
||||||
numberOfItems += 1
|
numberOfItems += 1
|
||||||
|
|
||||||
outSeq.add((key.pubsubTopic, data.msg, @(key.digest.data), key.receiverTime))
|
outSeq.add((key.pubsubTopic, data, @(key.digest.data), key.receiverTime, key.hash))
|
||||||
|
|
||||||
currentEntry = if forward: w.next()
|
currentEntry = if forward: w.next()
|
||||||
else: w.prev()
|
else: w.prev()
|
||||||
|
@ -150,10 +141,10 @@ proc getPage(driver: QueueDriver,
|
||||||
|
|
||||||
## --- SortedSet accessors ---
|
## --- SortedSet accessors ---
|
||||||
|
|
||||||
iterator fwdIterator*(driver: QueueDriver): (Index, IndexedWakuMessage) =
|
iterator fwdIterator*(driver: QueueDriver): (Index, WakuMessage) =
|
||||||
## Forward iterator over the entire store queue
|
## Forward iterator over the entire store queue
|
||||||
var
|
var
|
||||||
w = SortedSetWalkRef[Index,IndexedWakuMessage].init(driver.items)
|
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
|
||||||
res = w.first()
|
res = w.first()
|
||||||
|
|
||||||
while res.isOk():
|
while res.isOk():
|
||||||
|
@ -162,10 +153,10 @@ iterator fwdIterator*(driver: QueueDriver): (Index, IndexedWakuMessage) =
|
||||||
|
|
||||||
w.destroy()
|
w.destroy()
|
||||||
|
|
||||||
iterator bwdIterator*(driver: QueueDriver): (Index, IndexedWakuMessage) =
|
iterator bwdIterator*(driver: QueueDriver): (Index, WakuMessage) =
|
||||||
## Backwards iterator over the entire store queue
|
## Backwards iterator over the entire store queue
|
||||||
var
|
var
|
||||||
w = SortedSetWalkRef[Index,IndexedWakuMessage].init(driver.items)
|
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
|
||||||
res = w.last()
|
res = w.last()
|
||||||
|
|
||||||
while res.isOk():
|
while res.isOk():
|
||||||
|
@ -174,45 +165,45 @@ iterator bwdIterator*(driver: QueueDriver): (Index, IndexedWakuMessage) =
|
||||||
|
|
||||||
w.destroy()
|
w.destroy()
|
||||||
|
|
||||||
proc first*(driver: QueueDriver): ArchiveDriverResult[IndexedWakuMessage] =
|
proc first*(driver: QueueDriver): ArchiveDriverResult[Index] =
|
||||||
var
|
var
|
||||||
w = SortedSetWalkRef[Index,IndexedWakuMessage].init(driver.items)
|
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
|
||||||
res = w.first()
|
res = w.first()
|
||||||
w.destroy()
|
w.destroy()
|
||||||
|
|
||||||
if res.isErr():
|
if res.isErr():
|
||||||
return err("Not found")
|
return err("Not found")
|
||||||
|
|
||||||
return ok(res.value.data)
|
return ok(res.value.key)
|
||||||
|
|
||||||
proc last*(driver: QueueDriver): ArchiveDriverResult[IndexedWakuMessage] =
|
proc last*(driver: QueueDriver): ArchiveDriverResult[Index] =
|
||||||
var
|
var
|
||||||
w = SortedSetWalkRef[Index,IndexedWakuMessage].init(driver.items)
|
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
|
||||||
res = w.last()
|
res = w.last()
|
||||||
w.destroy()
|
w.destroy()
|
||||||
|
|
||||||
if res.isErr():
|
if res.isErr():
|
||||||
return err("Not found")
|
return err("Not found")
|
||||||
|
|
||||||
return ok(res.value.data)
|
return ok(res.value.key)
|
||||||
|
|
||||||
## --- Queue API ---
|
## --- Queue API ---
|
||||||
|
|
||||||
proc add*(driver: QueueDriver, msg: IndexedWakuMessage): ArchiveDriverResult[void] =
|
proc add*(driver: QueueDriver, index: Index, msg: WakuMessage): ArchiveDriverResult[void] =
|
||||||
## Add a message to the queue
|
## Add a message to the queue
|
||||||
##
|
##
|
||||||
## If we're at capacity, we will be removing, the oldest (first) item
|
## If we're at capacity, we will be removing, the oldest (first) item
|
||||||
if driver.contains(msg.index):
|
if driver.contains(index):
|
||||||
trace "could not add item to store queue. Index already exists", index=msg.index
|
trace "could not add item to store queue. Index already exists", index=index
|
||||||
return err("duplicate")
|
return err("duplicate")
|
||||||
|
|
||||||
# TODO: the below delete block can be removed if we convert to circular buffer
|
# TODO: the below delete block can be removed if we convert to circular buffer
|
||||||
if driver.items.len >= driver.capacity:
|
if driver.items.len >= driver.capacity:
|
||||||
var
|
var
|
||||||
w = SortedSetWalkRef[Index, IndexedWakuMessage].init(driver.items)
|
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
|
||||||
firstItem = w.first
|
firstItem = w.first
|
||||||
|
|
||||||
if cmp(msg.index, firstItem.value.key) < 0:
|
if cmp(index, firstItem.value.key) < 0:
|
||||||
# When at capacity, we won't add if message index is smaller (older) than our oldest item
|
# When at capacity, we won't add if message index is smaller (older) than our oldest item
|
||||||
w.destroy # Clean up walker
|
w.destroy # Clean up walker
|
||||||
return err("too_old")
|
return err("too_old")
|
||||||
|
@ -220,7 +211,7 @@ proc add*(driver: QueueDriver, msg: IndexedWakuMessage): ArchiveDriverResult[voi
|
||||||
discard driver.items.delete(firstItem.value.key)
|
discard driver.items.delete(firstItem.value.key)
|
||||||
w.destroy # better to destroy walker after a delete operation
|
w.destroy # better to destroy walker after a delete operation
|
||||||
|
|
||||||
driver.items.insert(msg.index).value.data = msg
|
driver.items.insert(index).value.data = msg
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
|
@ -231,9 +222,15 @@ method put*(driver: QueueDriver,
|
||||||
messageHash: WakuMessageHash,
|
messageHash: WakuMessageHash,
|
||||||
receivedTime: Timestamp):
|
receivedTime: Timestamp):
|
||||||
Future[ArchiveDriverResult[void]] {.async.} =
|
Future[ArchiveDriverResult[void]] {.async.} =
|
||||||
let index = Index(pubsubTopic: pubsubTopic, senderTime: message.timestamp, receiverTime: receivedTime, digest: digest)
|
let index = Index(
|
||||||
let message = IndexedWakuMessage(msg: message, index: index, pubsubTopic: pubsubTopic)
|
pubsubTopic: pubsubTopic,
|
||||||
return driver.add(message)
|
senderTime: message.timestamp,
|
||||||
|
receiverTime: receivedTime,
|
||||||
|
digest: digest,
|
||||||
|
hash: messageHash,
|
||||||
|
)
|
||||||
|
|
||||||
|
return driver.add(index, message)
|
||||||
|
|
||||||
method getAllMessages*(driver: QueueDriver):
|
method getAllMessages*(driver: QueueDriver):
|
||||||
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
||||||
|
@ -244,28 +241,33 @@ method existsTable*(driver: QueueDriver, tableName: string):
|
||||||
Future[ArchiveDriverResult[bool]] {.async.} =
|
Future[ArchiveDriverResult[bool]] {.async.} =
|
||||||
return err("interface method not implemented")
|
return err("interface method not implemented")
|
||||||
|
|
||||||
method getMessages*(driver: QueueDriver,
|
method getMessages*(
|
||||||
contentTopic: seq[ContentTopic] = @[],
|
driver: QueueDriver,
|
||||||
pubsubTopic = none(PubsubTopic),
|
contentTopic: seq[ContentTopic] = @[],
|
||||||
cursor = none(ArchiveCursor),
|
pubsubTopic = none(PubsubTopic),
|
||||||
startTime = none(Timestamp),
|
cursor = none(ArchiveCursor),
|
||||||
endTime = none(Timestamp),
|
startTime = none(Timestamp),
|
||||||
maxPageSize = DefaultPageSize,
|
endTime = none(Timestamp),
|
||||||
ascendingOrder = true):
|
hashes: seq[WakuMessageHash] = @[],
|
||||||
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.}=
|
maxPageSize = DefaultPageSize,
|
||||||
|
ascendingOrder = true,
|
||||||
|
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
||||||
let cursor = cursor.map(toIndex)
|
let cursor = cursor.map(toIndex)
|
||||||
|
|
||||||
let matchesQuery: QueryFilterMatcher = func(row: IndexedWakuMessage): bool =
|
let matchesQuery: QueryFilterMatcher = func(index: Index, msg: WakuMessage): bool =
|
||||||
if pubsubTopic.isSome() and row.pubsubTopic != pubsubTopic.get():
|
if pubsubTopic.isSome() and index.pubsubTopic != pubsubTopic.get():
|
||||||
return false
|
return false
|
||||||
|
|
||||||
if contentTopic.len > 0 and row.msg.contentTopic notin contentTopic:
|
if contentTopic.len > 0 and msg.contentTopic notin contentTopic:
|
||||||
return false
|
return false
|
||||||
|
|
||||||
if startTime.isSome() and row.msg.timestamp < startTime.get():
|
if startTime.isSome() and msg.timestamp < startTime.get():
|
||||||
return false
|
return false
|
||||||
|
|
||||||
if endTime.isSome() and row.msg.timestamp > endTime.get():
|
if endTime.isSome() and msg.timestamp > endTime.get():
|
||||||
|
return false
|
||||||
|
|
||||||
|
if hashes.len > 0 and index.hash notin hashes:
|
||||||
return false
|
return false
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
@ -293,7 +295,7 @@ method getPagesSize*(driver: QueueDriver):
|
||||||
Future[ArchiveDriverResult[int64]] {.async} =
|
Future[ArchiveDriverResult[int64]] {.async} =
|
||||||
return ok(int64(driver.len()))
|
return ok(int64(driver.len()))
|
||||||
|
|
||||||
method getDatabasesSize*(driver: QueueDriver):
|
method getDatabaseSize*(driver: QueueDriver):
|
||||||
Future[ArchiveDriverResult[int64]] {.async} =
|
Future[ArchiveDriverResult[int64]] {.async} =
|
||||||
return ok(int64(driver.len()))
|
return ok(int64(driver.len()))
|
||||||
|
|
||||||
|
@ -303,11 +305,11 @@ method performVacuum*(driver: QueueDriver):
|
||||||
|
|
||||||
method getOldestMessageTimestamp*(driver: QueueDriver):
|
method getOldestMessageTimestamp*(driver: QueueDriver):
|
||||||
Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
||||||
return driver.first().map(proc(msg: IndexedWakuMessage): Timestamp = msg.index.receiverTime)
|
return driver.first().map(proc(index: Index): Timestamp = index.receiverTime)
|
||||||
|
|
||||||
method getNewestMessageTimestamp*(driver: QueueDriver):
|
method getNewestMessageTimestamp*(driver: QueueDriver):
|
||||||
Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
||||||
return driver.last().map(proc(msg: IndexedWakuMessage): Timestamp = msg.index.receiverTime)
|
return driver.last().map(proc(index: Index): Timestamp = index.receiverTime)
|
||||||
|
|
||||||
method deleteMessagesOlderThanTimestamp*(driver: QueueDriver,
|
method deleteMessagesOlderThanTimestamp*(driver: QueueDriver,
|
||||||
ts: Timestamp):
|
ts: Timestamp):
|
||||||
|
|
|
@ -49,7 +49,7 @@ proc isSchemaVersion7*(db: SqliteDatabase): DatabaseResult[bool] =
|
||||||
|
|
||||||
else:
|
else:
|
||||||
info "Not considered schema version 7"
|
info "Not considered schema version 7"
|
||||||
ok(false)
|
return ok(false)
|
||||||
|
|
||||||
proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult[void] =
|
proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult[void] =
|
||||||
## Compares the `user_version` of the sqlite database with the provided `targetVersion`, then
|
## Compares the `user_version` of the sqlite database with the provided `targetVersion`, then
|
||||||
|
@ -75,4 +75,4 @@ proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult
|
||||||
return err("failed to execute migration scripts: " & migrationRes.error)
|
return err("failed to execute migration scripts: " & migrationRes.error)
|
||||||
|
|
||||||
debug "finished message store's sqlite database migration"
|
debug "finished message store's sqlite database migration"
|
||||||
ok()
|
return ok()
|
||||||
|
|
|
@ -5,7 +5,7 @@ else:
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[options, sequtils],
|
std/[options, sequtils],
|
||||||
stew/[results, byteutils],
|
stew/[results, byteutils, arrayops],
|
||||||
sqlite3_abi
|
sqlite3_abi
|
||||||
import
|
import
|
||||||
../../../common/databases/db_sqlite,
|
../../../common/databases/db_sqlite,
|
||||||
|
@ -24,14 +24,15 @@ proc queryRowWakuMessageCallback(s: ptr sqlite3_stmt, contentTopicCol, payloadCo
|
||||||
topic = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, contentTopicCol))
|
topic = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, contentTopicCol))
|
||||||
topicLength = sqlite3_column_bytes(s, contentTopicCol)
|
topicLength = sqlite3_column_bytes(s, contentTopicCol)
|
||||||
contentTopic = string.fromBytes(@(toOpenArray(topic, 0, topicLength-1)))
|
contentTopic = string.fromBytes(@(toOpenArray(topic, 0, topicLength-1)))
|
||||||
let
|
|
||||||
p = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, payloadCol))
|
p = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, payloadCol))
|
||||||
|
|
||||||
length = sqlite3_column_bytes(s, payloadCol)
|
length = sqlite3_column_bytes(s, payloadCol)
|
||||||
payload = @(toOpenArray(p, 0, length-1))
|
payload = @(toOpenArray(p, 0, length-1))
|
||||||
let version = sqlite3_column_int64(s, versionCol)
|
version = sqlite3_column_int64(s, versionCol)
|
||||||
let senderTimestamp = sqlite3_column_int64(s, senderTimestampCol)
|
senderTimestamp = sqlite3_column_int64(s, senderTimestampCol)
|
||||||
|
|
||||||
WakuMessage(
|
return WakuMessage(
|
||||||
contentTopic: ContentTopic(contentTopic),
|
contentTopic: ContentTopic(contentTopic),
|
||||||
payload: payload ,
|
payload: payload ,
|
||||||
version: uint32(version),
|
version: uint32(version),
|
||||||
|
@ -40,7 +41,7 @@ proc queryRowWakuMessageCallback(s: ptr sqlite3_stmt, contentTopicCol, payloadCo
|
||||||
|
|
||||||
proc queryRowReceiverTimestampCallback(s: ptr sqlite3_stmt, storedAtCol: cint): Timestamp =
|
proc queryRowReceiverTimestampCallback(s: ptr sqlite3_stmt, storedAtCol: cint): Timestamp =
|
||||||
let storedAt = sqlite3_column_int64(s, storedAtCol)
|
let storedAt = sqlite3_column_int64(s, storedAtCol)
|
||||||
Timestamp(storedAt)
|
return Timestamp(storedAt)
|
||||||
|
|
||||||
proc queryRowPubsubTopicCallback(s: ptr sqlite3_stmt, pubsubTopicCol: cint): PubsubTopic =
|
proc queryRowPubsubTopicCallback(s: ptr sqlite3_stmt, pubsubTopicCol: cint): PubsubTopic =
|
||||||
let
|
let
|
||||||
|
@ -48,7 +49,7 @@ proc queryRowPubsubTopicCallback(s: ptr sqlite3_stmt, pubsubTopicCol: cint): Pub
|
||||||
pubsubTopicLength = sqlite3_column_bytes(s, pubsubTopicCol)
|
pubsubTopicLength = sqlite3_column_bytes(s, pubsubTopicCol)
|
||||||
pubsubTopic = string.fromBytes(@(toOpenArray(pubsubTopicPointer, 0, pubsubTopicLength-1)))
|
pubsubTopic = string.fromBytes(@(toOpenArray(pubsubTopicPointer, 0, pubsubTopicLength-1)))
|
||||||
|
|
||||||
pubsubTopic
|
return pubsubTopic
|
||||||
|
|
||||||
proc queryRowDigestCallback(s: ptr sqlite3_stmt, digestCol: cint): seq[byte] =
|
proc queryRowDigestCallback(s: ptr sqlite3_stmt, digestCol: cint): seq[byte] =
|
||||||
let
|
let
|
||||||
|
@ -56,8 +57,15 @@ proc queryRowDigestCallback(s: ptr sqlite3_stmt, digestCol: cint): seq[byte] =
|
||||||
digestLength = sqlite3_column_bytes(s, digestCol)
|
digestLength = sqlite3_column_bytes(s, digestCol)
|
||||||
digest = @(toOpenArray(digestPointer, 0, digestLength-1))
|
digest = @(toOpenArray(digestPointer, 0, digestLength-1))
|
||||||
|
|
||||||
digest
|
return digest
|
||||||
|
|
||||||
|
proc queryRowWakuMessageHashCallback(s: ptr sqlite3_stmt, hashCol: cint): WakuMessageHash =
|
||||||
|
let
|
||||||
|
hashPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, hashCol))
|
||||||
|
hashLength = sqlite3_column_bytes(s, hashCol)
|
||||||
|
hash = fromBytes(toOpenArray(hashPointer, 0, hashLength-1))
|
||||||
|
|
||||||
|
return hash
|
||||||
|
|
||||||
### SQLite queries
|
### SQLite queries
|
||||||
|
|
||||||
|
@ -79,7 +87,7 @@ proc createTableQuery(table: string): SqlQueryStr =
|
||||||
proc createTable*(db: SqliteDatabase): DatabaseResult[void] =
|
proc createTable*(db: SqliteDatabase): DatabaseResult[void] =
|
||||||
let query = createTableQuery(DbTable)
|
let query = createTableQuery(DbTable)
|
||||||
discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard)
|
discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard)
|
||||||
ok()
|
return ok()
|
||||||
|
|
||||||
## Create indices
|
## Create indices
|
||||||
|
|
||||||
|
@ -90,8 +98,7 @@ proc createOldestMessageTimestampIndex*(db: SqliteDatabase):
|
||||||
DatabaseResult[void] =
|
DatabaseResult[void] =
|
||||||
let query = createOldestMessageTimestampIndexQuery(DbTable)
|
let query = createOldestMessageTimestampIndexQuery(DbTable)
|
||||||
discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard)
|
discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard)
|
||||||
ok()
|
return ok()
|
||||||
|
|
||||||
|
|
||||||
proc createHistoryQueryIndexQuery(table: string): SqlQueryStr =
|
proc createHistoryQueryIndexQuery(table: string): SqlQueryStr =
|
||||||
"CREATE INDEX IF NOT EXISTS i_query ON " & table & " (contentTopic, pubsubTopic, storedAt, id);"
|
"CREATE INDEX IF NOT EXISTS i_query ON " & table & " (contentTopic, pubsubTopic, storedAt, id);"
|
||||||
|
@ -99,24 +106,24 @@ proc createHistoryQueryIndexQuery(table: string): SqlQueryStr =
|
||||||
proc createHistoryQueryIndex*(db: SqliteDatabase): DatabaseResult[void] =
|
proc createHistoryQueryIndex*(db: SqliteDatabase): DatabaseResult[void] =
|
||||||
let query = createHistoryQueryIndexQuery(DbTable)
|
let query = createHistoryQueryIndexQuery(DbTable)
|
||||||
discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard)
|
discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard)
|
||||||
ok()
|
return ok()
|
||||||
|
|
||||||
|
|
||||||
## Insert message
|
## Insert message
|
||||||
type InsertMessageParams* = (seq[byte], seq[byte], Timestamp, seq[byte], seq[byte], seq[byte], int64, Timestamp)
|
type InsertMessageParams* = (seq[byte], seq[byte], Timestamp, seq[byte], seq[byte], seq[byte], int64, Timestamp)
|
||||||
|
|
||||||
proc insertMessageQuery(table: string): SqlQueryStr =
|
proc insertMessageQuery(table: string): SqlQueryStr =
|
||||||
"INSERT INTO " & table & "(id, messageHash, storedAt, contentTopic, payload, pubsubTopic, version, timestamp)" &
|
return
|
||||||
" VALUES (?, ?, ?, ?, ?, ?, ?, ?);"
|
"INSERT INTO " & table & "(id, messageHash, storedAt, contentTopic, payload, pubsubTopic, version, timestamp)" &
|
||||||
|
" VALUES (?, ?, ?, ?, ?, ?, ?, ?);"
|
||||||
|
|
||||||
proc prepareInsertMessageStmt*(db: SqliteDatabase): SqliteStmt[InsertMessageParams, void] =
|
proc prepareInsertMessageStmt*(db: SqliteDatabase): SqliteStmt[InsertMessageParams, void] =
|
||||||
let query = insertMessageQuery(DbTable)
|
let query = insertMessageQuery(DbTable)
|
||||||
db.prepareStmt(query, InsertMessageParams, void).expect("this is a valid statement")
|
return db.prepareStmt(query, InsertMessageParams, void).expect("this is a valid statement")
|
||||||
|
|
||||||
## Count table messages
|
## Count table messages
|
||||||
|
|
||||||
proc countMessagesQuery(table: string): SqlQueryStr =
|
proc countMessagesQuery(table: string): SqlQueryStr =
|
||||||
"SELECT COUNT(*) FROM " & table
|
return "SELECT COUNT(*) FROM " & table
|
||||||
|
|
||||||
proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] =
|
proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] =
|
||||||
var count: int64
|
var count: int64
|
||||||
|
@ -128,12 +135,12 @@ proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] =
|
||||||
if res.isErr():
|
if res.isErr():
|
||||||
return err("failed to count number of messages in the database")
|
return err("failed to count number of messages in the database")
|
||||||
|
|
||||||
ok(count)
|
return ok(count)
|
||||||
|
|
||||||
## Get oldest message receiver timestamp
|
## Get oldest message receiver timestamp
|
||||||
|
|
||||||
proc selectOldestMessageTimestampQuery(table: string): SqlQueryStr =
|
proc selectOldestMessageTimestampQuery(table: string): SqlQueryStr =
|
||||||
"SELECT MIN(storedAt) FROM " & table
|
return "SELECT MIN(storedAt) FROM " & table
|
||||||
|
|
||||||
proc selectOldestReceiverTimestamp*(db: SqliteDatabase):
|
proc selectOldestReceiverTimestamp*(db: SqliteDatabase):
|
||||||
DatabaseResult[Timestamp] {.inline.}=
|
DatabaseResult[Timestamp] {.inline.}=
|
||||||
|
@ -146,12 +153,12 @@ proc selectOldestReceiverTimestamp*(db: SqliteDatabase):
|
||||||
if res.isErr():
|
if res.isErr():
|
||||||
return err("failed to get the oldest receiver timestamp from the database")
|
return err("failed to get the oldest receiver timestamp from the database")
|
||||||
|
|
||||||
ok(timestamp)
|
return ok(timestamp)
|
||||||
|
|
||||||
## Get newest message receiver timestamp
|
## Get newest message receiver timestamp
|
||||||
|
|
||||||
proc selectNewestMessageTimestampQuery(table: string): SqlQueryStr =
|
proc selectNewestMessageTimestampQuery(table: string): SqlQueryStr =
|
||||||
"SELECT MAX(storedAt) FROM " & table
|
return "SELECT MAX(storedAt) FROM " & table
|
||||||
|
|
||||||
proc selectNewestReceiverTimestamp*(db: SqliteDatabase):
|
proc selectNewestReceiverTimestamp*(db: SqliteDatabase):
|
||||||
DatabaseResult[Timestamp] {.inline.}=
|
DatabaseResult[Timestamp] {.inline.}=
|
||||||
|
@ -164,64 +171,67 @@ proc selectNewestReceiverTimestamp*(db: SqliteDatabase):
|
||||||
if res.isErr():
|
if res.isErr():
|
||||||
return err("failed to get the newest receiver timestamp from the database")
|
return err("failed to get the newest receiver timestamp from the database")
|
||||||
|
|
||||||
ok(timestamp)
|
return ok(timestamp)
|
||||||
|
|
||||||
## Delete messages older than timestamp
|
## Delete messages older than timestamp
|
||||||
|
|
||||||
proc deleteMessagesOlderThanTimestampQuery(table: string, ts: Timestamp): SqlQueryStr =
|
proc deleteMessagesOlderThanTimestampQuery(table: string, ts: Timestamp): SqlQueryStr =
|
||||||
"DELETE FROM " & table & " WHERE storedAt < " & $ts
|
return "DELETE FROM " & table & " WHERE storedAt < " & $ts
|
||||||
|
|
||||||
proc deleteMessagesOlderThanTimestamp*(db: SqliteDatabase, ts: int64):
|
proc deleteMessagesOlderThanTimestamp*(db: SqliteDatabase, ts: int64):
|
||||||
DatabaseResult[void] =
|
DatabaseResult[void] =
|
||||||
let query = deleteMessagesOlderThanTimestampQuery(DbTable, ts)
|
let query = deleteMessagesOlderThanTimestampQuery(DbTable, ts)
|
||||||
discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard)
|
discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard)
|
||||||
ok()
|
return ok()
|
||||||
|
|
||||||
|
|
||||||
## Delete oldest messages not within limit
|
## Delete oldest messages not within limit
|
||||||
|
|
||||||
proc deleteOldestMessagesNotWithinLimitQuery(table: string, limit: int): SqlQueryStr =
|
proc deleteOldestMessagesNotWithinLimitQuery(table: string, limit: int): SqlQueryStr =
|
||||||
"DELETE FROM " & table & " WHERE (storedAt, id, pubsubTopic) NOT IN (" &
|
return
|
||||||
" SELECT storedAt, id, pubsubTopic FROM " & table &
|
"DELETE FROM " & table & " WHERE (storedAt, id, pubsubTopic) NOT IN (" &
|
||||||
" ORDER BY storedAt DESC, id DESC" &
|
" SELECT storedAt, id, pubsubTopic FROM " & table &
|
||||||
" LIMIT " & $limit &
|
" ORDER BY storedAt DESC, id DESC" &
|
||||||
");"
|
" LIMIT " & $limit &
|
||||||
|
");"
|
||||||
|
|
||||||
proc deleteOldestMessagesNotWithinLimit*(db: SqliteDatabase, limit: int):
|
proc deleteOldestMessagesNotWithinLimit*(db: SqliteDatabase, limit: int):
|
||||||
DatabaseResult[void] =
|
DatabaseResult[void] =
|
||||||
# NOTE: The word `limit` here refers the store capacity/maximum number-of-messages allowed limit
|
# NOTE: The word `limit` here refers the store capacity/maximum number-of-messages allowed limit
|
||||||
let query = deleteOldestMessagesNotWithinLimitQuery(DbTable, limit=limit)
|
let query = deleteOldestMessagesNotWithinLimitQuery(DbTable, limit=limit)
|
||||||
discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard)
|
discard ?db.query(query, proc(s: ptr sqlite3_stmt) = discard)
|
||||||
ok()
|
return ok()
|
||||||
|
|
||||||
## Select all messages
|
## Select all messages
|
||||||
|
|
||||||
proc selectAllMessagesQuery(table: string): SqlQueryStr =
|
proc selectAllMessagesQuery(table: string): SqlQueryStr =
|
||||||
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id" &
|
return
|
||||||
" FROM " & table &
|
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash" &
|
||||||
" ORDER BY storedAt ASC"
|
" FROM " & table &
|
||||||
|
" ORDER BY storedAt ASC"
|
||||||
|
|
||||||
proc selectAllMessages*(db: SqliteDatabase): DatabaseResult[seq[(PubsubTopic,
|
proc selectAllMessages*(db: SqliteDatabase): DatabaseResult[seq[(PubsubTopic,
|
||||||
WakuMessage,
|
WakuMessage,
|
||||||
seq[byte],
|
seq[byte],
|
||||||
Timestamp)]] =
|
Timestamp,
|
||||||
|
WakuMessageHash)]] =
|
||||||
## Retrieve all messages from the store.
|
## Retrieve all messages from the store.
|
||||||
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp)]
|
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
|
||||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||||
let
|
let
|
||||||
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol=3)
|
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol=3)
|
||||||
wakuMessage = queryRowWakuMessageCallback(s, contentTopicCol=1, payloadCol=2, versionCol=4, senderTimestampCol=5)
|
wakuMessage = queryRowWakuMessageCallback(s, contentTopicCol=1, payloadCol=2, versionCol=4, senderTimestampCol=5)
|
||||||
digest = queryRowDigestCallback(s, digestCol=6)
|
digest = queryRowDigestCallback(s, digestCol=6)
|
||||||
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol=0)
|
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol=0)
|
||||||
|
hash = queryRowWakuMessageHashCallback(s, hashCol=7)
|
||||||
|
|
||||||
rows.add((pubsubTopic, wakuMessage, digest, storedAt))
|
rows.add((pubsubTopic, wakuMessage, digest, storedAt, hash))
|
||||||
|
|
||||||
let query = selectAllMessagesQuery(DbTable)
|
let query = selectAllMessagesQuery(DbTable)
|
||||||
let res = db.query(query, queryRowCallback)
|
let res = db.query(query, queryRowCallback)
|
||||||
if res.isErr():
|
if res.isErr():
|
||||||
return err(res.error())
|
return err(res.error())
|
||||||
|
|
||||||
ok(rows)
|
return ok(rows)
|
||||||
|
|
||||||
## Select messages by history query with limit
|
## Select messages by history query with limit
|
||||||
|
|
||||||
|
@ -233,13 +243,14 @@ proc combineClauses(clauses: varargs[Option[string]]): Option[string] =
|
||||||
var where: string = whereSeq[0]
|
var where: string = whereSeq[0]
|
||||||
for clause in whereSeq[1..^1]:
|
for clause in whereSeq[1..^1]:
|
||||||
where &= " AND " & clause
|
where &= " AND " & clause
|
||||||
some(where)
|
return some(where)
|
||||||
|
|
||||||
proc whereClause(cursor: Option[DbCursor],
|
proc whereClause(cursor: Option[DbCursor],
|
||||||
pubsubTopic: Option[PubsubTopic],
|
pubsubTopic: Option[PubsubTopic],
|
||||||
contentTopic: seq[ContentTopic],
|
contentTopic: seq[ContentTopic],
|
||||||
startTime: Option[Timestamp],
|
startTime: Option[Timestamp],
|
||||||
endTime: Option[Timestamp],
|
endTime: Option[Timestamp],
|
||||||
|
hashes: seq[WakuMessageHash],
|
||||||
ascending: bool): Option[string] =
|
ascending: bool): Option[string] =
|
||||||
|
|
||||||
let cursorClause = if cursor.isNone():
|
let cursorClause = if cursor.isNone():
|
||||||
|
@ -273,14 +284,36 @@ proc whereClause(cursor: Option[DbCursor],
|
||||||
else:
|
else:
|
||||||
some("storedAt <= (?)")
|
some("storedAt <= (?)")
|
||||||
|
|
||||||
combineClauses(cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause)
|
let hashesClause = if hashes.len <= 0:
|
||||||
|
none(string)
|
||||||
|
else:
|
||||||
|
var where = "messageHash IN ("
|
||||||
|
where &= "?"
|
||||||
|
for _ in 1..<hashes.len:
|
||||||
|
where &= ", ?"
|
||||||
|
where &= ")"
|
||||||
|
some(where)
|
||||||
|
|
||||||
proc selectMessagesWithLimitQuery(table: string, where: Option[string], limit: uint, ascending=true): SqlQueryStr =
|
return combineClauses(
|
||||||
|
cursorClause,
|
||||||
|
pubsubTopicClause,
|
||||||
|
contentTopicClause,
|
||||||
|
startTimeClause,
|
||||||
|
endTimeClause,
|
||||||
|
hashesClause,
|
||||||
|
)
|
||||||
|
|
||||||
|
proc selectMessagesWithLimitQuery(
|
||||||
|
table: string,
|
||||||
|
where: Option[string],
|
||||||
|
limit: uint,
|
||||||
|
ascending=true
|
||||||
|
): SqlQueryStr =
|
||||||
let order = if ascending: "ASC" else: "DESC"
|
let order = if ascending: "ASC" else: "DESC"
|
||||||
|
|
||||||
var query: string
|
var query: string
|
||||||
|
|
||||||
query = "SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id"
|
query = "SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash"
|
||||||
query &= " FROM " & table
|
query &= " FROM " & table
|
||||||
|
|
||||||
if where.isSome():
|
if where.isSome():
|
||||||
|
@ -289,12 +322,12 @@ proc selectMessagesWithLimitQuery(table: string, where: Option[string], limit: u
|
||||||
query &= " ORDER BY storedAt " & order & ", id " & order
|
query &= " ORDER BY storedAt " & order & ", id " & order
|
||||||
query &= " LIMIT " & $limit & ";"
|
query &= " LIMIT " & $limit & ";"
|
||||||
|
|
||||||
query
|
return query
|
||||||
|
|
||||||
proc prepareSelectMessagesWithlimitStmt(db: SqliteDatabase, stmt: string): DatabaseResult[SqliteStmt[void, void]] =
|
proc prepareSelectMessagesWithlimitStmt(db: SqliteDatabase, stmt: string): DatabaseResult[SqliteStmt[void, void]] =
|
||||||
var s: RawStmtPtr
|
var s: RawStmtPtr
|
||||||
checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil)
|
checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil)
|
||||||
ok(SqliteStmt[void, void](s))
|
return ok(SqliteStmt[void, void](s))
|
||||||
|
|
||||||
proc execSelectMessagesWithLimitStmt(s: SqliteStmt,
|
proc execSelectMessagesWithLimitStmt(s: SqliteStmt,
|
||||||
cursor: Option[DbCursor],
|
cursor: Option[DbCursor],
|
||||||
|
@ -302,6 +335,7 @@ proc execSelectMessagesWithLimitStmt(s: SqliteStmt,
|
||||||
contentTopic: seq[ContentTopic],
|
contentTopic: seq[ContentTopic],
|
||||||
startTime: Option[Timestamp],
|
startTime: Option[Timestamp],
|
||||||
endTime: Option[Timestamp],
|
endTime: Option[Timestamp],
|
||||||
|
hashes: seq[WakuMessageHash],
|
||||||
onRowCallback: DataProc): DatabaseResult[void] =
|
onRowCallback: DataProc): DatabaseResult[void] =
|
||||||
let s = RawStmtPtr(s)
|
let s = RawStmtPtr(s)
|
||||||
|
|
||||||
|
@ -324,6 +358,16 @@ proc execSelectMessagesWithLimitStmt(s: SqliteStmt,
|
||||||
checkErr bindParam(s, paramIndex, topic.toBytes())
|
checkErr bindParam(s, paramIndex, topic.toBytes())
|
||||||
paramIndex += 1
|
paramIndex += 1
|
||||||
|
|
||||||
|
for hash in hashes:
|
||||||
|
let bytes: array[32, byte] = hash
|
||||||
|
var byteSeq: seq[byte]
|
||||||
|
|
||||||
|
let byteCount = copyFrom(byteSeq, bytes)
|
||||||
|
assert byteCount == 32
|
||||||
|
|
||||||
|
checkErr bindParam(s, paramIndex, byteSeq)
|
||||||
|
paramIndex += 1
|
||||||
|
|
||||||
if startTime.isSome():
|
if startTime.isSome():
|
||||||
let time = startTime.get()
|
let time = startTime.get()
|
||||||
checkErr bindParam(s, paramIndex, time)
|
checkErr bindParam(s, paramIndex, time)
|
||||||
|
@ -334,7 +378,6 @@ proc execSelectMessagesWithLimitStmt(s: SqliteStmt,
|
||||||
checkErr bindParam(s, paramIndex, time)
|
checkErr bindParam(s, paramIndex, time)
|
||||||
paramIndex += 1
|
paramIndex += 1
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
while true:
|
while true:
|
||||||
let v = sqlite3_step(s)
|
let v = sqlite3_step(s)
|
||||||
|
@ -356,26 +399,37 @@ proc selectMessagesByHistoryQueryWithLimit*(db: SqliteDatabase,
|
||||||
cursor: Option[DbCursor],
|
cursor: Option[DbCursor],
|
||||||
startTime: Option[Timestamp],
|
startTime: Option[Timestamp],
|
||||||
endTime: Option[Timestamp],
|
endTime: Option[Timestamp],
|
||||||
|
hashes: seq[WakuMessageHash],
|
||||||
limit: uint,
|
limit: uint,
|
||||||
ascending: bool):
|
ascending: bool):
|
||||||
DatabaseResult[seq[(PubsubTopic,
|
DatabaseResult[seq[(PubsubTopic,
|
||||||
WakuMessage,
|
WakuMessage,
|
||||||
seq[byte],
|
seq[byte],
|
||||||
Timestamp)]] =
|
Timestamp,
|
||||||
|
WakuMessageHash)]] =
|
||||||
|
|
||||||
|
|
||||||
var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp)] = @[]
|
var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] = @[]
|
||||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||||
let
|
let
|
||||||
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol=3)
|
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol=3)
|
||||||
message = queryRowWakuMessageCallback(s, contentTopicCol=1, payloadCol=2, versionCol=4, senderTimestampCol=5)
|
message = queryRowWakuMessageCallback(s, contentTopicCol=1, payloadCol=2, versionCol=4, senderTimestampCol=5)
|
||||||
digest = queryRowDigestCallback(s, digestCol=6)
|
digest = queryRowDigestCallback(s, digestCol=6)
|
||||||
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol=0)
|
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol=0)
|
||||||
|
hash = queryRowWakuMessageHashCallback(s, hashCol=7)
|
||||||
|
|
||||||
messages.add((pubsubTopic, message, digest, storedAt))
|
messages.add((pubsubTopic, message, digest, storedAt, hash))
|
||||||
|
|
||||||
let query = block:
|
let query = block:
|
||||||
let where = whereClause(cursor, pubsubTopic, contentTopic, startTime, endTime, ascending)
|
let where = whereClause(
|
||||||
|
cursor,
|
||||||
|
pubsubTopic,
|
||||||
|
contentTopic,
|
||||||
|
startTime,
|
||||||
|
endTime,
|
||||||
|
hashes,
|
||||||
|
ascending,
|
||||||
|
)
|
||||||
selectMessagesWithLimitQuery(DbTable, where, limit, ascending)
|
selectMessagesWithLimitQuery(DbTable, where, limit, ascending)
|
||||||
|
|
||||||
let dbStmt = ?db.prepareSelectMessagesWithlimitStmt(query)
|
let dbStmt = ?db.prepareSelectMessagesWithlimitStmt(query)
|
||||||
|
@ -385,8 +439,9 @@ proc selectMessagesByHistoryQueryWithLimit*(db: SqliteDatabase,
|
||||||
contentTopic,
|
contentTopic,
|
||||||
startTime,
|
startTime,
|
||||||
endTime,
|
endTime,
|
||||||
|
hashes,
|
||||||
queryRowCallback
|
queryRowCallback
|
||||||
)
|
)
|
||||||
dbStmt.dispose()
|
dbStmt.dispose()
|
||||||
|
|
||||||
ok(messages)
|
return ok(messages)
|
|
@ -41,7 +41,7 @@ proc init(db: SqliteDatabase): ArchiveDriverResult[void] =
|
||||||
if resMsgIndex.isErr():
|
if resMsgIndex.isErr():
|
||||||
return err("failed to create i_msg index: " & resMsgIndex.error())
|
return err("failed to create i_msg index: " & resMsgIndex.error())
|
||||||
|
|
||||||
ok()
|
return ok()
|
||||||
|
|
||||||
type SqliteDriver* = ref object of ArchiveDriver
|
type SqliteDriver* = ref object of ArchiveDriver
|
||||||
db: SqliteDatabase
|
db: SqliteDatabase
|
||||||
|
@ -56,7 +56,7 @@ proc new*(T: type SqliteDriver, db: SqliteDatabase): ArchiveDriverResult[T] =
|
||||||
|
|
||||||
# General initialization
|
# General initialization
|
||||||
let insertStmt = db.prepareInsertMessageStmt()
|
let insertStmt = db.prepareInsertMessageStmt()
|
||||||
ok(SqliteDriver(db: db, insertStmt: insertStmt))
|
return ok(SqliteDriver(db: db, insertStmt: insertStmt))
|
||||||
|
|
||||||
method put*(s: SqliteDriver,
|
method put*(s: SqliteDriver,
|
||||||
pubsubTopic: PubsubTopic,
|
pubsubTopic: PubsubTopic,
|
||||||
|
@ -85,11 +85,12 @@ method getAllMessages*(s: SqliteDriver):
|
||||||
return s.db.selectAllMessages()
|
return s.db.selectAllMessages()
|
||||||
|
|
||||||
method getMessages*(s: SqliteDriver,
|
method getMessages*(s: SqliteDriver,
|
||||||
contentTopic: seq[ContentTopic] = @[],
|
contentTopic = newSeq[ContentTopic](0),
|
||||||
pubsubTopic = none(PubsubTopic),
|
pubsubTopic = none(PubsubTopic),
|
||||||
cursor = none(ArchiveCursor),
|
cursor = none(ArchiveCursor),
|
||||||
startTime = none(Timestamp),
|
startTime = none(Timestamp),
|
||||||
endTime = none(Timestamp),
|
endTime = none(Timestamp),
|
||||||
|
hashes = newSeq[WakuMessageHash](0),
|
||||||
maxPageSize = DefaultPageSize,
|
maxPageSize = DefaultPageSize,
|
||||||
ascendingOrder = true):
|
ascendingOrder = true):
|
||||||
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
||||||
|
@ -102,6 +103,7 @@ method getMessages*(s: SqliteDriver,
|
||||||
cursor,
|
cursor,
|
||||||
startTime,
|
startTime,
|
||||||
endTime,
|
endTime,
|
||||||
|
hashes,
|
||||||
limit=maxPageSize,
|
limit=maxPageSize,
|
||||||
ascending=ascendingOrder
|
ascending=ascendingOrder
|
||||||
)
|
)
|
||||||
|
|
|
@ -6,8 +6,7 @@ else:
|
||||||
|
|
||||||
import
|
import
|
||||||
std/sequtils,
|
std/sequtils,
|
||||||
stew/byteutils,
|
stew/[byteutils, endians2, arrayops],
|
||||||
stew/endians2,
|
|
||||||
nimcrypto/sha2
|
nimcrypto/sha2
|
||||||
import
|
import
|
||||||
../topics,
|
../topics,
|
||||||
|
@ -19,6 +18,11 @@ import
|
||||||
|
|
||||||
type WakuMessageHash* = array[32, byte]
|
type WakuMessageHash* = array[32, byte]
|
||||||
|
|
||||||
|
converter fromBytes*(array: openArray[byte]): WakuMessageHash =
|
||||||
|
var hash: WakuMessageHash
|
||||||
|
let copiedBytes = copyFrom(hash, array)
|
||||||
|
assert copiedBytes == 32, "Waku message hash is 32 bytes"
|
||||||
|
hash
|
||||||
|
|
||||||
converter toBytesArray*(digest: MDigest[256]): WakuMessageHash =
|
converter toBytesArray*(digest: MDigest[256]): WakuMessageHash =
|
||||||
digest.data
|
digest.data
|
||||||
|
|
|
@ -11,8 +11,7 @@ else:
|
||||||
|
|
||||||
import
|
import
|
||||||
../topics,
|
../topics,
|
||||||
../time,
|
../time
|
||||||
./default_values
|
|
||||||
|
|
||||||
const
|
const
|
||||||
MaxMetaAttrLength* = 64 # 64 bytes
|
MaxMetaAttrLength* = 64 # 64 bytes
|
||||||
|
|
Loading…
Reference in New Issue