mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-03 14:33:12 +00:00
chore(archive): archive and drivers refactor (#2761)
* queue driver refactor (#2753) * chore(archive): archive refactor (#2752) * chore(archive): sqlite driver refactor (#2754) * chore(archive): postgres driver refactor (#2755) * chore(archive): renaming & copies (#2751) * posgres legacy: stop using the storedAt field * migration script 6: we still need the id column The id column is needed because it contains the message digest which is used in store v2, and we need to keep support to store v2 for a while * legacy archive: set target migration version to 6 * waku_node: try to use wakuLegacyArchive if wakuArchive is nil * node_factory, waku_node: mount legacy and future store simultaneously We want the nwaku node to simultaneously support store-v2 requests and store-v3 requests. Only the legacy archive is in charge of archiving messages, and the archived information is suitable to fulfill both store-v2 and store-v3 needs. * postgres_driver: adding temporary code until store-v2 is removed --------- Co-authored-by: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Co-authored-by: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Co-authored-by: Ivan Folgueira Bande <ivansete@status.im>
This commit is contained in:
parent
e269dca9cd
commit
f54ba10bc7
@ -44,6 +44,7 @@ import
|
||||
factory/builder,
|
||||
common/utils/nat,
|
||||
waku_relay,
|
||||
waku_store/common,
|
||||
],
|
||||
./config_chat2
|
||||
|
||||
@ -468,22 +469,30 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
# We have a viable storenode. Let's query it for historical messages.
|
||||
echo "Connecting to storenode: " & $(storenode.get())
|
||||
|
||||
node.mountLegacyStoreClient()
|
||||
node.peerManager.addServicePeer(storenode.get(), WakuLegacyStoreCodec)
|
||||
node.mountStoreClient()
|
||||
node.peerManager.addServicePeer(storenode.get(), WakuStoreCodec)
|
||||
|
||||
proc storeHandler(response: HistoryResponse) {.gcsafe.} =
|
||||
proc storeHandler(response: StoreQueryResponse) {.gcsafe.} =
|
||||
for msg in response.messages:
|
||||
let payload =
|
||||
if msg.message.isSome():
|
||||
msg.message.get().payload
|
||||
else:
|
||||
newSeq[byte](0)
|
||||
|
||||
let
|
||||
pb = Chat2Message.init(msg.payload)
|
||||
pb = Chat2Message.init(payload)
|
||||
chatLine =
|
||||
if pb.isOk:
|
||||
pb[].toString()
|
||||
else:
|
||||
string.fromBytes(msg.payload)
|
||||
string.fromBytes(payload)
|
||||
echo &"{chatLine}"
|
||||
info "Hit store handler"
|
||||
|
||||
let queryRes = await node.query(HistoryQuery(contentTopics: @[chat.contentTopic]))
|
||||
let queryRes = await node.query(
|
||||
StoreQueryRequest(contentTopics: @[chat.contentTopic]), storenode.get()
|
||||
)
|
||||
if queryRes.isOk():
|
||||
storeHandler(queryRes.value)
|
||||
|
||||
|
||||
@ -0,0 +1,12 @@
|
||||
const ContentScriptVersion_6* =
|
||||
"""
|
||||
-- we can drop the timestamp column because this data is also kept in the storedAt column
|
||||
ALTER TABLE messages DROP COLUMN timestamp;
|
||||
|
||||
-- from now on we are only interested in the message timestamp
|
||||
ALTER TABLE messages RENAME COLUMN storedAt TO timestamp;
|
||||
|
||||
-- Update to new version
|
||||
UPDATE version SET version = 6 WHERE version = 5;
|
||||
|
||||
"""
|
||||
@ -1,6 +1,6 @@
|
||||
import
|
||||
content_script_version_1, content_script_version_2, content_script_version_3,
|
||||
content_script_version_4, content_script_version_5
|
||||
content_script_version_4, content_script_version_5, content_script_version_6
|
||||
|
||||
type MigrationScript* = object
|
||||
version*: int
|
||||
@ -16,6 +16,7 @@ const PgMigrationScripts* =
|
||||
MigrationScript(version: 3, scriptContent: ContentScriptVersion_3),
|
||||
MigrationScript(version: 4, scriptContent: ContentScriptVersion_4),
|
||||
MigrationScript(version: 5, scriptContent: ContentScriptVersion_5),
|
||||
MigrationScript(version: 6, scriptContent: ContentScriptVersion_6),
|
||||
]
|
||||
|
||||
proc getMigrationScripts*(currentVersion: int64, targetVersion: int64): seq[string] =
|
||||
|
||||
@ -18,7 +18,15 @@ import
|
||||
./waku_archive/test_driver_sqlite,
|
||||
./waku_archive/test_retention_policy,
|
||||
./waku_archive/test_waku_archive,
|
||||
./waku_archive/test_partition_manager
|
||||
./waku_archive/test_partition_manager,
|
||||
./waku_archive_legacy/test_driver_queue_index,
|
||||
./waku_archive_legacy/test_driver_queue_pagination,
|
||||
./waku_archive_legacy/test_driver_queue_query,
|
||||
./waku_archive_legacy/test_driver_queue,
|
||||
./waku_archive_legacy/test_driver_sqlite_query,
|
||||
./waku_archive_legacy/test_driver_sqlite,
|
||||
./waku_archive_legacy/test_retention_policy,
|
||||
./waku_archive_legacy/test_waku_archive
|
||||
|
||||
const os* {.strdefine.} = ""
|
||||
when os == "Linux" and
|
||||
@ -28,6 +36,8 @@ when os == "Linux" and
|
||||
import
|
||||
./waku_archive/test_driver_postgres_query,
|
||||
./waku_archive/test_driver_postgres,
|
||||
#./waku_archive_legacy/test_driver_postgres_query,
|
||||
#./waku_archive_legacy/test_driver_postgres,
|
||||
./factory/test_node_factory,
|
||||
./wakunode_rest/test_rest_store
|
||||
|
||||
|
||||
@ -15,12 +15,12 @@ import
|
||||
waku_core,
|
||||
waku_store_legacy,
|
||||
waku_store_legacy/client,
|
||||
waku_archive,
|
||||
waku_archive/driver/sqlite_driver,
|
||||
waku_archive_legacy,
|
||||
waku_archive_legacy/driver/sqlite_driver,
|
||||
common/databases/db_sqlite,
|
||||
],
|
||||
../waku_store_legacy/store_utils,
|
||||
../waku_archive/archive_utils,
|
||||
../waku_archive_legacy/archive_utils,
|
||||
../testlib/[common, wakucore, wakunode, testasync, futures, testutils]
|
||||
|
||||
suite "Waku Store - End to End - Sorted Archive":
|
||||
@ -73,7 +73,7 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
|
||||
archiveDriver = newArchiveDriverWithMessages(pubsubTopic, archiveMessages)
|
||||
let mountArchiveResult = server.mountArchive(archiveDriver)
|
||||
let mountArchiveResult = server.mountLegacyArchive(archiveDriver)
|
||||
assert mountArchiveResult.isOk()
|
||||
|
||||
await server.mountLegacyStore()
|
||||
@ -445,7 +445,7 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
otherServer =
|
||||
newTestWakuNode(otherServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
mountOtherArchiveResult =
|
||||
otherServer.mountArchive(otherArchiveDriverWithMessages)
|
||||
otherServer.mountLegacyArchive(otherArchiveDriverWithMessages)
|
||||
assert mountOtherArchiveResult.isOk()
|
||||
|
||||
await otherServer.mountLegacyStore()
|
||||
@ -532,7 +532,7 @@ suite "Waku Store - End to End - Unsorted Archive":
|
||||
unsortedArchiveDriverWithMessages =
|
||||
newArchiveDriverWithMessages(pubsubTopic, unsortedArchiveMessages)
|
||||
mountUnsortedArchiveResult =
|
||||
server.mountArchive(unsortedArchiveDriverWithMessages)
|
||||
server.mountLegacyArchive(unsortedArchiveDriverWithMessages)
|
||||
|
||||
assert mountUnsortedArchiveResult.isOk()
|
||||
|
||||
@ -687,7 +687,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let archiveDriver = newSqliteArchiveDriver()
|
||||
.put(pubsubTopic, archiveMessages[0 ..< 6])
|
||||
.put(pubsubTopicB, archiveMessages[6 ..< 10])
|
||||
let mountSortedArchiveResult = server.mountArchive(archiveDriver)
|
||||
let mountSortedArchiveResult = server.mountLegacyArchive(archiveDriver)
|
||||
|
||||
assert mountSortedArchiveResult.isOk()
|
||||
|
||||
@ -932,7 +932,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
ephemeralServer =
|
||||
newTestWakuNode(ephemeralServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
mountEphemeralArchiveResult =
|
||||
ephemeralServer.mountArchive(ephemeralArchiveDriver)
|
||||
ephemeralServer.mountLegacyArchive(ephemeralArchiveDriver)
|
||||
assert mountEphemeralArchiveResult.isOk()
|
||||
|
||||
await ephemeralServer.mountLegacyStore()
|
||||
@ -974,7 +974,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
mixedServerKey = generateSecp256k1Key()
|
||||
mixedServer =
|
||||
newTestWakuNode(mixedServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
mountMixedArchiveResult = mixedServer.mountArchive(mixedArchiveDriver)
|
||||
mountMixedArchiveResult = mixedServer.mountLegacyArchive(mixedArchiveDriver)
|
||||
assert mountMixedArchiveResult.isOk()
|
||||
|
||||
await mixedServer.mountLegacyStore()
|
||||
@ -1001,7 +1001,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
emptyServerKey = generateSecp256k1Key()
|
||||
emptyServer =
|
||||
newTestWakuNode(emptyServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
mountEmptyArchiveResult = emptyServer.mountArchive(emptyArchiveDriver)
|
||||
mountEmptyArchiveResult = emptyServer.mountLegacyArchive(emptyArchiveDriver)
|
||||
assert mountEmptyArchiveResult.isOk()
|
||||
|
||||
await emptyServer.mountLegacyStore()
|
||||
@ -1033,7 +1033,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
voluminousServer =
|
||||
newTestWakuNode(voluminousServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
mountVoluminousArchiveResult =
|
||||
voluminousServer.mountArchive(voluminousArchiveDriverWithMessages)
|
||||
voluminousServer.mountLegacyArchive(voluminousArchiveDriverWithMessages)
|
||||
assert mountVoluminousArchiveResult.isOk()
|
||||
|
||||
await voluminousServer.mountLegacyStore()
|
||||
|
||||
27
tests/testlib/postgres_legacy.nim
Normal file
27
tests/testlib/postgres_legacy.nim
Normal file
@ -0,0 +1,27 @@
|
||||
import chronicles, chronos
|
||||
import
|
||||
waku/waku_archive_legacy,
|
||||
waku/waku_archive_legacy/driver as driver_module,
|
||||
waku/waku_archive_legacy/driver/builder,
|
||||
waku/waku_archive_legacy/driver/postgres_driver
|
||||
|
||||
const storeMessageDbUrl = "postgres://postgres:test123@localhost:5432/postgres"
|
||||
|
||||
proc newTestPostgresDriver*(): Future[Result[ArchiveDriver, string]] {.
|
||||
async, deprecated
|
||||
.} =
|
||||
proc onErr(errMsg: string) {.gcsafe, closure.} =
|
||||
error "error creating ArchiveDriver", error = errMsg
|
||||
quit(QuitFailure)
|
||||
|
||||
let
|
||||
vacuum = false
|
||||
migrate = true
|
||||
maxNumConn = 50
|
||||
|
||||
let driverRes =
|
||||
await ArchiveDriver.new(storeMessageDbUrl, vacuum, migrate, maxNumConn, onErr)
|
||||
if driverRes.isErr():
|
||||
onErr("could not create archive driver: " & driverRes.error)
|
||||
|
||||
return ok(driverRes.get())
|
||||
@ -23,26 +23,11 @@ proc newSqliteArchiveDriver*(): ArchiveDriver =
|
||||
proc newWakuArchive*(driver: ArchiveDriver): WakuArchive =
|
||||
WakuArchive.new(driver).get()
|
||||
|
||||
proc computeArchiveCursor*(
|
||||
pubsubTopic: PubsubTopic, message: WakuMessage
|
||||
): ArchiveCursor =
|
||||
ArchiveCursor(
|
||||
pubsubTopic: pubsubTopic,
|
||||
senderTime: message.timestamp,
|
||||
storeTime: message.timestamp,
|
||||
digest: computeDigest(message),
|
||||
hash: computeMessageHash(pubsubTopic, message),
|
||||
)
|
||||
|
||||
proc put*(
|
||||
driver: ArchiveDriver, pubsubTopic: PubSubTopic, msgList: seq[WakuMessage]
|
||||
): ArchiveDriver =
|
||||
for msg in msgList:
|
||||
let
|
||||
msgDigest = computeDigest(msg)
|
||||
msgHash = computeMessageHash(pubsubTopic, msg)
|
||||
_ = waitFor driver.put(pubsubTopic, msg, msgDigest, msgHash, msg.timestamp)
|
||||
# discard crashes
|
||||
let _ = waitFor driver.put(computeMessageHash(pubsubTopic, msg), pubsubTopic, msg)
|
||||
return driver
|
||||
|
||||
proc newArchiveDriverWithMessages*(
|
||||
|
||||
@ -9,5 +9,6 @@ import
|
||||
./test_driver_queue,
|
||||
./test_driver_sqlite_query,
|
||||
./test_driver_sqlite,
|
||||
./test_partition_manager,
|
||||
./test_retention_policy,
|
||||
./test_waku_archive
|
||||
|
||||
@ -12,15 +12,6 @@ import
|
||||
../testlib/testasync,
|
||||
../testlib/postgres
|
||||
|
||||
proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveCursor =
|
||||
ArchiveCursor(
|
||||
pubsubTopic: pubsubTopic,
|
||||
senderTime: message.timestamp,
|
||||
storeTime: message.timestamp,
|
||||
digest: computeDigest(message),
|
||||
hash: computeMessageHash(pubsubTopic, message),
|
||||
)
|
||||
|
||||
suite "Postgres driver":
|
||||
## Unique driver instance
|
||||
var driver {.threadvar.}: PostgresDriver
|
||||
@ -60,11 +51,8 @@ suite "Postgres driver":
|
||||
|
||||
let msg = fakeWakuMessage(contentTopic = contentTopic, meta = meta)
|
||||
|
||||
let computedDigest = computeDigest(msg)
|
||||
let computedHash = computeMessageHash(DefaultPubsubTopic, msg)
|
||||
|
||||
let putRes = await driver.put(
|
||||
DefaultPubsubTopic, msg, computedDigest, computedHash, msg.timestamp
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
assert putRes.isOk(), putRes.error
|
||||
|
||||
@ -72,12 +60,10 @@ suite "Postgres driver":
|
||||
|
||||
assert storedMsg.len == 1
|
||||
|
||||
let (pubsubTopic, actualMsg, digest, _, hash) = storedMsg[0]
|
||||
let (_, pubsubTopic, actualMsg) = storedMsg[0]
|
||||
assert actualMsg.contentTopic == contentTopic
|
||||
assert pubsubTopic == DefaultPubsubTopic
|
||||
assert toHex(computedDigest.data) == toHex(digest)
|
||||
assert toHex(actualMsg.payload) == toHex(msg.payload)
|
||||
assert toHex(computedHash) == toHex(hash)
|
||||
assert toHex(actualMsg.meta) == toHex(msg.meta)
|
||||
|
||||
asyncTest "Insert and query message":
|
||||
@ -88,24 +74,14 @@ suite "Postgres driver":
|
||||
|
||||
let msg1 = fakeWakuMessage(contentTopic = contentTopic1)
|
||||
|
||||
var putRes = await driver.put(
|
||||
pubsubTopic1,
|
||||
msg1,
|
||||
computeDigest(msg1),
|
||||
computeMessageHash(pubsubTopic1, msg1),
|
||||
msg1.timestamp,
|
||||
)
|
||||
var putRes =
|
||||
await driver.put(computeMessageHash(pubsubTopic1, msg1), pubsubTopic1, msg1)
|
||||
assert putRes.isOk(), putRes.error
|
||||
|
||||
let msg2 = fakeWakuMessage(contentTopic = contentTopic2)
|
||||
|
||||
putRes = await driver.put(
|
||||
pubsubTopic2,
|
||||
msg2,
|
||||
computeDigest(msg2),
|
||||
computeMessageHash(pubsubTopic2, msg2),
|
||||
msg2.timestamp,
|
||||
)
|
||||
putRes =
|
||||
await driver.put(computeMessageHash(pubsubTopic2, msg2), pubsubTopic2, msg2)
|
||||
assert putRes.isOk(), putRes.error
|
||||
|
||||
let countMessagesRes = await driver.getMessagesCount()
|
||||
@ -113,49 +89,49 @@ suite "Postgres driver":
|
||||
assert countMessagesRes.isOk(), $countMessagesRes.error
|
||||
assert countMessagesRes.get() == 2
|
||||
|
||||
var messagesRes = await driver.getMessages(contentTopic = @[contentTopic1])
|
||||
var messagesRes = await driver.getMessages(contentTopics = @[contentTopic1])
|
||||
|
||||
assert messagesRes.isOk(), $messagesRes.error
|
||||
assert messagesRes.get().len == 1
|
||||
|
||||
# Get both content topics, check ordering
|
||||
messagesRes =
|
||||
await driver.getMessages(contentTopic = @[contentTopic1, contentTopic2])
|
||||
await driver.getMessages(contentTopics = @[contentTopic1, contentTopic2])
|
||||
assert messagesRes.isOk(), messagesRes.error
|
||||
|
||||
assert messagesRes.get().len == 2
|
||||
assert messagesRes.get()[0][1].contentTopic == contentTopic1
|
||||
assert messagesRes.get()[0][2].contentTopic == contentTopic1
|
||||
|
||||
# Descending order
|
||||
messagesRes = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2], ascendingOrder = false
|
||||
contentTopics = @[contentTopic1, contentTopic2], ascendingOrder = false
|
||||
)
|
||||
assert messagesRes.isOk(), messagesRes.error
|
||||
|
||||
assert messagesRes.get().len == 2
|
||||
assert messagesRes.get()[0][1].contentTopic == contentTopic2
|
||||
assert messagesRes.get()[0][2].contentTopic == contentTopic2
|
||||
|
||||
# cursor
|
||||
# Get both content topics
|
||||
messagesRes = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2],
|
||||
cursor = some(computeTestCursor(pubsubTopic1, messagesRes.get()[1][1])),
|
||||
contentTopics = @[contentTopic1, contentTopic2],
|
||||
cursor = some(computeMessageHash(pubsubTopic1, messagesRes.get()[1][2])),
|
||||
)
|
||||
assert messagesRes.isOk()
|
||||
assert messagesRes.get().len == 1
|
||||
|
||||
# Get both content topics but one pubsub topic
|
||||
messagesRes = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2], pubsubTopic = some(pubsubTopic1)
|
||||
contentTopics = @[contentTopic1, contentTopic2], pubsubTopic = some(pubsubTopic1)
|
||||
)
|
||||
assert messagesRes.isOk(), messagesRes.error
|
||||
|
||||
assert messagesRes.get().len == 1
|
||||
assert messagesRes.get()[0][1].contentTopic == contentTopic1
|
||||
assert messagesRes.get()[0][2].contentTopic == contentTopic1
|
||||
|
||||
# Limit
|
||||
messagesRes = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2], maxPageSize = 1
|
||||
contentTopics = @[contentTopic1, contentTopic2], maxPageSize = 1
|
||||
)
|
||||
assert messagesRes.isOk(), messagesRes.error
|
||||
assert messagesRes.get().len == 1
|
||||
@ -172,11 +148,7 @@ suite "Postgres driver":
|
||||
raiseAssert "could not get num mgs correctly: " & $error
|
||||
|
||||
var putRes = await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg1,
|
||||
computeDigest(msg1),
|
||||
computeMessageHash(DefaultPubsubTopic, msg1),
|
||||
msg1.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg1), DefaultPubsubTopic, msg1
|
||||
)
|
||||
assert putRes.isOk(), putRes.error
|
||||
|
||||
@ -187,11 +159,7 @@ suite "Postgres driver":
|
||||
"wrong number of messages: " & $newNumMsgs
|
||||
|
||||
putRes = await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg2,
|
||||
computeDigest(msg2),
|
||||
computeMessageHash(DefaultPubsubTopic, msg2),
|
||||
msg2.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg2), DefaultPubsubTopic, msg2
|
||||
)
|
||||
|
||||
assert putRes.isOk()
|
||||
|
||||
@ -27,30 +27,21 @@ logScope:
|
||||
# Initialize the random number generator
|
||||
common.randomize()
|
||||
|
||||
proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveCursor =
|
||||
ArchiveCursor(
|
||||
pubsubTopic: pubsubTopic,
|
||||
senderTime: message.timestamp,
|
||||
storeTime: message.timestamp,
|
||||
digest: computeDigest(message),
|
||||
hash: computeMessageHash(pubsubTopic, message),
|
||||
)
|
||||
|
||||
suite "Postgres driver - queries":
|
||||
## Unique driver instance
|
||||
var driver {.threadvar.}: PostgresDriver
|
||||
|
||||
asyncSetup:
|
||||
let driverRes = await newTestPostgresDriver()
|
||||
if driverRes.isErr():
|
||||
assert false, driverRes.error
|
||||
|
||||
assert driverRes.isOk(), $driverRes.error
|
||||
|
||||
driver = PostgresDriver(driverRes.get())
|
||||
|
||||
asyncTeardown:
|
||||
let resetRes = await driver.reset()
|
||||
if resetRes.isErr():
|
||||
assert false, resetRes.error
|
||||
|
||||
assert resetRes.isOk(), $resetRes.error
|
||||
|
||||
(await driver.close()).expect("driver to close")
|
||||
|
||||
@ -75,15 +66,10 @@ suite "Postgres driver - queries":
|
||||
debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload)
|
||||
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
)
|
||||
).isOk()
|
||||
let putRes = await driver.put(
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
assert putRes.isOk(), $putRes.error
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(maxPageSize = 5, ascendingOrder = true)
|
||||
@ -91,7 +77,7 @@ suite "Postgres driver - queries":
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[0 .. 4]
|
||||
|
||||
@ -118,23 +104,19 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
)
|
||||
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 3]
|
||||
|
||||
@ -173,23 +155,19 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
)
|
||||
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 3]
|
||||
|
||||
@ -216,23 +194,19 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = false
|
||||
contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = false
|
||||
)
|
||||
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[6 .. 7].reversed()
|
||||
|
||||
@ -261,17 +235,13 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
var res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2],
|
||||
contentTopics = @[contentTopic1, contentTopic2],
|
||||
pubsubTopic = some(DefaultPubsubTopic),
|
||||
maxPageSize = 2,
|
||||
ascendingOrder = true,
|
||||
@ -281,14 +251,14 @@ suite "Postgres driver - queries":
|
||||
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
var filteredMessages = res.tryGet().mapIt(it[1])
|
||||
var filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check filteredMessages == expected[2 .. 3]
|
||||
|
||||
## When
|
||||
## This is very similar to the previous one but we enforce to use the prepared
|
||||
## statement by querying one single content topic
|
||||
res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1],
|
||||
contentTopics = @[contentTopic1],
|
||||
pubsubTopic = some(DefaultPubsubTopic),
|
||||
maxPageSize = 2,
|
||||
ascendingOrder = true,
|
||||
@ -298,7 +268,7 @@ suite "Postgres driver - queries":
|
||||
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
filteredMessages = res.tryGet().mapIt(it[1])
|
||||
filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check filteredMessages == @[expected[2]]
|
||||
|
||||
asyncTest "single content topic - no results":
|
||||
@ -321,23 +291,19 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
)
|
||||
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages.len == 0
|
||||
|
||||
@ -349,17 +315,13 @@ suite "Postgres driver - queries":
|
||||
let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts = ts(t))
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[DefaultContentTopic],
|
||||
contentTopics = @[DefaultContentTopic],
|
||||
maxPageSize = pageSize,
|
||||
ascendingOrder = true,
|
||||
)
|
||||
@ -367,7 +329,7 @@ suite "Postgres driver - queries":
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages.len == 40
|
||||
|
||||
@ -413,11 +375,7 @@ suite "Postgres driver - queries":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
@ -428,7 +386,7 @@ suite "Postgres driver - queries":
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5]
|
||||
|
||||
@ -474,11 +432,7 @@ suite "Postgres driver - queries":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(maxPageSize = 2, ascendingOrder = true)
|
||||
@ -487,7 +441,7 @@ suite "Postgres driver - queries":
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[0 .. 1]
|
||||
|
||||
@ -533,15 +487,11 @@ suite "Postgres driver - queries":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
maxPageSize = 2,
|
||||
ascendingOrder = true,
|
||||
@ -551,7 +501,7 @@ suite "Postgres driver - queries":
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5]
|
||||
|
||||
@ -579,15 +529,11 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[4])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[4])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
@ -597,7 +543,7 @@ suite "Postgres driver - queries":
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[5 .. 6]
|
||||
|
||||
@ -625,15 +571,11 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[4])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[4])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
@ -643,7 +585,7 @@ suite "Postgres driver - queries":
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 3].reversed()
|
||||
|
||||
@ -669,21 +611,16 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let fakeCursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage())
|
||||
let cursor = ArchiveCursor(hash: fakeCursor)
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage())
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
includeData = true,
|
||||
contentTopicSeq = @[DefaultContentTopic],
|
||||
contentTopics = @[DefaultContentTopic],
|
||||
pubsubTopic = none(PubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = none(Timestamp),
|
||||
@ -694,10 +631,10 @@ suite "Postgres driver - queries":
|
||||
)
|
||||
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
assert res.isErr(), $res.value
|
||||
|
||||
check:
|
||||
res.value.len == 0
|
||||
res.error == "cursor not found"
|
||||
|
||||
asyncTest "content topic and cursor":
|
||||
## Given
|
||||
@ -723,19 +660,15 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[4])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[4])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
cursor = some(cursor),
|
||||
maxPageSize = 10,
|
||||
ascendingOrder = true,
|
||||
@ -744,7 +677,7 @@ suite "Postgres driver - queries":
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[5 .. 6]
|
||||
|
||||
@ -772,19 +705,15 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[6])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[6])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
cursor = some(cursor),
|
||||
maxPageSize = 10,
|
||||
ascendingOrder = false,
|
||||
@ -793,7 +722,7 @@ suite "Postgres driver - queries":
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 5].reversed()
|
||||
|
||||
@ -864,13 +793,9 @@ suite "Postgres driver - queries":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
let cursor = computeTestCursor(expected[5][0], expected[5][1])
|
||||
let cursor = computeMessageHash(expected[5][0], expected[5][1])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
@ -884,7 +809,7 @@ suite "Postgres driver - queries":
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[6 .. 7]
|
||||
|
||||
@ -955,13 +880,9 @@ suite "Postgres driver - queries":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
let cursor = computeTestCursor(expected[6][0], expected[6][1])
|
||||
let cursor = computeMessageHash(expected[6][0], expected[6][1])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
@ -975,7 +896,7 @@ suite "Postgres driver - queries":
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5].reversed()
|
||||
|
||||
@ -1003,11 +924,7 @@ suite "Postgres driver - queries":
|
||||
let hashes = messages.mapIt(computeMessageHash(DefaultPubsubTopic, it))
|
||||
|
||||
for (msg, hash) in messages.zip(hashes):
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic, msg, computeDigest(msg), hash, msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(hash, DefaultPubsubTopic, msg)).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(hashes = hashes, ascendingOrder = false)
|
||||
@ -1016,7 +933,7 @@ suite "Postgres driver - queries":
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let expectedMessages = expected.reversed()
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages
|
||||
|
||||
@ -1044,11 +961,7 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
@ -1060,7 +973,7 @@ suite "Postgres driver - queries":
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 6]
|
||||
|
||||
@ -1088,11 +1001,7 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
@ -1104,7 +1013,7 @@ suite "Postgres driver - queries":
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[0 .. 4]
|
||||
|
||||
@ -1177,11 +1086,7 @@ suite "Postgres driver - queries":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
@ -1195,7 +1100,7 @@ suite "Postgres driver - queries":
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[2 .. 4]
|
||||
|
||||
@ -1224,17 +1129,13 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
startTime = some(ts(45, timeOrigin)),
|
||||
endTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 2,
|
||||
@ -1243,7 +1144,7 @@ suite "Postgres driver - queries":
|
||||
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages.len == 0
|
||||
|
||||
@ -1271,17 +1172,13 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
startTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 10,
|
||||
ascendingOrder = true,
|
||||
@ -1289,7 +1186,7 @@ suite "Postgres driver - queries":
|
||||
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 6]
|
||||
|
||||
@ -1320,17 +1217,13 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
startTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 10,
|
||||
ascendingOrder = false,
|
||||
@ -1338,7 +1231,7 @@ suite "Postgres driver - queries":
|
||||
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 6].reversed()
|
||||
|
||||
@ -1370,19 +1263,15 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[3])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[3])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 10,
|
||||
@ -1391,7 +1280,7 @@ suite "Postgres driver - queries":
|
||||
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[4 .. 9]
|
||||
|
||||
@ -1423,19 +1312,15 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[6])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[6])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 10,
|
||||
@ -1444,7 +1329,7 @@ suite "Postgres driver - queries":
|
||||
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[3 .. 4].reversed()
|
||||
|
||||
@ -1508,17 +1393,13 @@ suite "Postgres driver - queries":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[1][1])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[1][1])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(0, timeOrigin)),
|
||||
@ -1530,7 +1411,7 @@ suite "Postgres driver - queries":
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[3 .. 4]
|
||||
|
||||
@ -1593,17 +1474,13 @@ suite "Postgres driver - queries":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
let cursor = computeTestCursor(expected[7][0], expected[7][1])
|
||||
let cursor = computeMessageHash(expected[7][0], expected[7][1])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(35, timeOrigin)),
|
||||
@ -1615,7 +1492,7 @@ suite "Postgres driver - queries":
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5].reversed()
|
||||
|
||||
@ -1679,17 +1556,13 @@ suite "Postgres driver - queries":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
let cursor = computeTestCursor(expected[1][0], expected[1][1])
|
||||
let cursor = computeMessageHash(expected[1][0], expected[1][1])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(35, timeOrigin)),
|
||||
@ -1702,7 +1575,7 @@ suite "Postgres driver - queries":
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5]
|
||||
|
||||
@ -1766,17 +1639,13 @@ suite "Postgres driver - queries":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
let cursor = computeTestCursor(expected[1][0], expected[1][1])
|
||||
let cursor = computeMessageHash(expected[1][0], expected[1][1])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(35, timeOrigin)),
|
||||
@ -1788,7 +1657,7 @@ suite "Postgres driver - queries":
|
||||
## Then
|
||||
assert res.isOk(), res.error
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages.len == 0
|
||||
|
||||
@ -1816,11 +1685,7 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
@ -1867,11 +1732,7 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
@ -1908,11 +1769,7 @@ suite "Postgres driver - queries":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
|
||||
@ -19,13 +19,11 @@ proc genIndexedWakuMessage(i: int8): (Index, WakuMessage) =
|
||||
|
||||
let
|
||||
message = WakuMessage(payload: @[byte i], timestamp: Timestamp(i))
|
||||
topic = "test-pubsub-topic"
|
||||
pubsubTopic = "test-pubsub-topic"
|
||||
cursor = Index(
|
||||
receiverTime: Timestamp(i),
|
||||
senderTime: Timestamp(i),
|
||||
digest: MessageDigest(data: data),
|
||||
pubsubTopic: topic,
|
||||
hash: computeMessageHash(topic, message),
|
||||
time: Timestamp(i),
|
||||
hash: computeMessageHash(pubsubTopic, message),
|
||||
pubsubTopic: pubsubTopic,
|
||||
)
|
||||
|
||||
(cursor, message)
|
||||
@ -72,7 +70,7 @@ procSuite "Sorted driver queue":
|
||||
|
||||
# Attempt to add message with older value than oldest in queue should fail
|
||||
let
|
||||
oldestTimestamp = driver.first().get().senderTime
|
||||
oldestTimestamp = driver.first().get().time
|
||||
(index, message) = genIndexedWakuMessage(oldestTimestamp.int8 - 1)
|
||||
addRes = driver.add(index, message)
|
||||
|
||||
@ -121,7 +119,7 @@ procSuite "Sorted driver queue":
|
||||
|
||||
let first = firstRes.tryGet()
|
||||
check:
|
||||
first.senderTime == Timestamp(1)
|
||||
first.time == Timestamp(1)
|
||||
|
||||
test "get first item from empty queue should fail":
|
||||
## Given
|
||||
@ -152,7 +150,7 @@ procSuite "Sorted driver queue":
|
||||
|
||||
let last = lastRes.tryGet()
|
||||
check:
|
||||
last.senderTime == Timestamp(5)
|
||||
last.time == Timestamp(5)
|
||||
|
||||
test "get last item from empty queue should fail":
|
||||
## Given
|
||||
|
||||
@ -7,20 +7,6 @@ var rng = initRand()
|
||||
|
||||
## Helpers
|
||||
|
||||
proc getTestTimestamp(offset = 0): Timestamp =
|
||||
let now = getNanosecondTime(epochTime() + float(offset))
|
||||
Timestamp(now)
|
||||
|
||||
proc hashFromStr(input: string): MDigest[256] =
|
||||
var ctx: sha256
|
||||
|
||||
ctx.init()
|
||||
ctx.update(input.toBytes())
|
||||
let hashed = ctx.finish()
|
||||
ctx.clear()
|
||||
|
||||
return hashed
|
||||
|
||||
proc randomHash(): WakuMessageHash =
|
||||
var hash: WakuMessageHash
|
||||
|
||||
@ -33,187 +19,29 @@ proc randomHash(): WakuMessageHash =
|
||||
suite "Queue Driver - index":
|
||||
## Test vars
|
||||
let
|
||||
smallIndex1 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000),
|
||||
hash: randomHash(),
|
||||
)
|
||||
smallIndex2 = Index(
|
||||
digest: hashFromStr("1234567"), # digest is less significant than senderTime
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000),
|
||||
hash: randomHash(),
|
||||
)
|
||||
largeIndex1 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(9000),
|
||||
hash: randomHash(),
|
||||
) # only senderTime differ from smallIndex1
|
||||
largeIndex2 = Index(
|
||||
digest: hashFromStr("12345"), # only digest differs from smallIndex1
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000),
|
||||
hash: randomHash(),
|
||||
)
|
||||
eqIndex1 = Index(
|
||||
digest: hashFromStr("0003"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(54321),
|
||||
hash: randomHash(),
|
||||
)
|
||||
eqIndex2 = Index(
|
||||
digest: hashFromStr("0003"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(54321),
|
||||
hash: randomHash(),
|
||||
)
|
||||
eqIndex3 = Index(
|
||||
digest: hashFromStr("0003"),
|
||||
receiverTime: getNanosecondTime(9999),
|
||||
# receiverTime difference should have no effect on comparisons
|
||||
senderTime: getNanosecondTime(54321),
|
||||
hash: randomHash(),
|
||||
)
|
||||
diffPsTopic = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000),
|
||||
pubsubTopic: "zzzz",
|
||||
hash: randomHash(),
|
||||
)
|
||||
noSenderTime1 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(1100),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "zzzz",
|
||||
hash: randomHash(),
|
||||
)
|
||||
noSenderTime2 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(10000),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "zzzz",
|
||||
hash: randomHash(),
|
||||
)
|
||||
noSenderTime3 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(1200),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "aaaa",
|
||||
hash: randomHash(),
|
||||
)
|
||||
noSenderTime4 = Index(
|
||||
digest: hashFromStr("0"),
|
||||
receiverTime: getNanosecondTime(1200),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "zzzz",
|
||||
hash: randomHash(),
|
||||
)
|
||||
hash = randomHash()
|
||||
eqIndex1 = Index(time: getNanosecondTime(54321), hash: hash)
|
||||
eqIndex2 = Index(time: getNanosecondTime(54321), hash: hash)
|
||||
eqIndex3 = Index(time: getNanosecondTime(54321), hash: randomHash())
|
||||
eqIndex4 = Index(time: getNanosecondTime(65432), hash: hash)
|
||||
|
||||
test "Index comparison":
|
||||
# Index comparison with senderTime diff
|
||||
check:
|
||||
cmp(smallIndex1, largeIndex1) < 0
|
||||
cmp(smallIndex2, largeIndex1) < 0
|
||||
|
||||
# Index comparison with digest diff
|
||||
check:
|
||||
cmp(smallIndex1, smallIndex2) < 0
|
||||
cmp(smallIndex1, largeIndex2) < 0
|
||||
cmp(smallIndex2, largeIndex2) > 0
|
||||
cmp(largeIndex1, largeIndex2) > 0
|
||||
|
||||
# Index comparison when equal
|
||||
check:
|
||||
# equality
|
||||
cmp(eqIndex1, eqIndex2) == 0
|
||||
cmp(eqIndex1, eqIndex3) != 0
|
||||
cmp(eqIndex1, eqIndex4) != 0
|
||||
|
||||
# pubsubTopic difference
|
||||
check:
|
||||
cmp(smallIndex1, diffPsTopic) < 0
|
||||
# ordering
|
||||
cmp(eqIndex3, eqIndex4) < 0
|
||||
cmp(eqIndex4, eqIndex3) > 0 # Test symmetry
|
||||
|
||||
# receiverTime diff plays no role when senderTime set
|
||||
check:
|
||||
cmp(eqIndex1, eqIndex3) == 0
|
||||
|
||||
# receiverTime diff plays no role when digest/pubsubTopic equal
|
||||
check:
|
||||
cmp(noSenderTime1, noSenderTime2) == 0
|
||||
|
||||
# sort on receiverTime with no senderTimestamp and unequal pubsubTopic
|
||||
check:
|
||||
cmp(noSenderTime1, noSenderTime3) < 0
|
||||
|
||||
# sort on receiverTime with no senderTimestamp and unequal digest
|
||||
check:
|
||||
cmp(noSenderTime1, noSenderTime4) < 0
|
||||
|
||||
# sort on receiverTime if no senderTimestamp on only one side
|
||||
check:
|
||||
cmp(smallIndex1, noSenderTime1) < 0
|
||||
cmp(noSenderTime1, smallIndex1) > 0 # Test symmetry
|
||||
cmp(noSenderTime2, eqIndex3) < 0
|
||||
cmp(eqIndex3, noSenderTime2) > 0 # Test symmetry
|
||||
cmp(eqIndex2, eqIndex4) < 0
|
||||
cmp(eqIndex4, eqIndex2) > 0 # Test symmetry
|
||||
|
||||
test "Index equality":
|
||||
# Exactly equal
|
||||
check:
|
||||
eqIndex1 == eqIndex2
|
||||
|
||||
# Receiver time plays no role, even without sender time
|
||||
check:
|
||||
eqIndex1 == eqIndex3
|
||||
noSenderTime1 == noSenderTime2 # only receiver time differs, indices are equal
|
||||
noSenderTime1 != noSenderTime3 # pubsubTopics differ
|
||||
noSenderTime1 != noSenderTime4 # digests differ
|
||||
|
||||
# Unequal sender time
|
||||
check:
|
||||
smallIndex1 != largeIndex1
|
||||
|
||||
# Unequal digest
|
||||
check:
|
||||
smallIndex1 != smallIndex2
|
||||
|
||||
# Unequal hash and digest
|
||||
check:
|
||||
smallIndex1 != eqIndex1
|
||||
|
||||
# Unequal pubsubTopic
|
||||
check:
|
||||
smallIndex1 != diffPsTopic
|
||||
|
||||
test "Index computation should not be empty":
|
||||
## Given
|
||||
let ts = getTestTimestamp()
|
||||
let wm = WakuMessage(payload: @[byte 1, 2, 3], timestamp: ts)
|
||||
|
||||
## When
|
||||
let ts2 = getTestTimestamp() + 10
|
||||
let index = Index.compute(wm, ts2, DefaultContentTopic)
|
||||
|
||||
## Then
|
||||
check:
|
||||
index.digest.data.len != 0
|
||||
index.digest.data.len == 32 # sha2 output length in bytes
|
||||
index.receiverTime == ts2 # the receiver timestamp should be a non-zero value
|
||||
index.senderTime == ts
|
||||
index.pubsubTopic == DefaultContentTopic
|
||||
|
||||
test "Index digest of two identical messsage should be the same":
|
||||
## Given
|
||||
let topic = ContentTopic("test-content-topic")
|
||||
let
|
||||
wm1 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic)
|
||||
wm2 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic)
|
||||
|
||||
## When
|
||||
let ts = getTestTimestamp()
|
||||
let
|
||||
index1 = Index.compute(wm1, ts, DefaultPubsubTopic)
|
||||
index2 = Index.compute(wm2, ts, DefaultPubsubTopic)
|
||||
|
||||
## Then
|
||||
check:
|
||||
index1.digest == index2.digest
|
||||
eqIndex1 == eqIndex4
|
||||
eqIndex2 != eqIndex3
|
||||
eqIndex4 != eqIndex3
|
||||
|
||||
@ -23,10 +23,9 @@ proc getTestQueueDriver(numMessages: int): QueueDriver =
|
||||
let msg = WakuMessage(payload: @[byte i], timestamp: Timestamp(i))
|
||||
|
||||
let index = Index(
|
||||
receiverTime: Timestamp(i),
|
||||
senderTime: Timestamp(i),
|
||||
digest: MessageDigest(data: data),
|
||||
time: Timestamp(i),
|
||||
hash: computeMessageHash(DefaultPubsubTopic, msg),
|
||||
pubsubTopic: DefaultPubsubTopic,
|
||||
)
|
||||
|
||||
discard testQueueDriver.add(index, msg)
|
||||
@ -50,7 +49,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 2
|
||||
data == msgList[4 .. 5]
|
||||
@ -66,7 +65,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 2
|
||||
data == msgList[0 .. 1]
|
||||
@ -82,7 +81,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 10
|
||||
data == msgList[0 .. 9]
|
||||
@ -99,7 +98,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 0
|
||||
|
||||
@ -114,7 +113,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 6
|
||||
data == msgList[4 .. 9]
|
||||
@ -130,7 +129,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
uint(data.len) <= MaxPageSize
|
||||
|
||||
@ -145,19 +144,14 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 0
|
||||
|
||||
test "Forward pagination - invalid cursor":
|
||||
## Given
|
||||
let msg = fakeWakuMessage(payload = @[byte 10])
|
||||
let index = ArchiveCursor(
|
||||
pubsubTopic: DefaultPubsubTopic,
|
||||
senderTime: msg.timestamp,
|
||||
storeTime: msg.timestamp,
|
||||
digest: computeDigest(msg),
|
||||
).toIndex()
|
||||
let index = Index(hash: computeMessageHash(DefaultPubsubTopic, msg))
|
||||
|
||||
let
|
||||
pageSize: uint = 10
|
||||
@ -184,7 +178,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 1
|
||||
|
||||
@ -200,7 +194,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 0
|
||||
|
||||
@ -220,7 +214,7 @@ procSuite "Queue driver - pagination":
|
||||
)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.mapIt(it.timestamp.int) == @[0, 2, 4]
|
||||
|
||||
@ -235,7 +229,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data == msgList[1 .. 2].reversed
|
||||
|
||||
@ -251,7 +245,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 0
|
||||
|
||||
@ -266,7 +260,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 2
|
||||
data == msgList[8 .. 9].reversed
|
||||
@ -282,7 +276,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 10
|
||||
data == msgList[0 .. 9].reversed
|
||||
@ -298,7 +292,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data == msgList[0 .. 2].reversed
|
||||
|
||||
@ -313,7 +307,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
uint(data.len) <= MaxPageSize
|
||||
|
||||
@ -328,19 +322,14 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 0
|
||||
|
||||
test "Backward pagination - invalid cursor":
|
||||
## Given
|
||||
let msg = fakeWakuMessage(payload = @[byte 10])
|
||||
let index = ArchiveCursor(
|
||||
pubsubTopic: DefaultPubsubTopic,
|
||||
senderTime: msg.timestamp,
|
||||
storeTime: msg.timestamp,
|
||||
digest: computeDigest(msg),
|
||||
).toIndex()
|
||||
let index = Index(hash: computeMessageHash(DefaultPubsubTopic, msg))
|
||||
|
||||
let
|
||||
pageSize: uint = 2
|
||||
@ -367,7 +356,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 1
|
||||
|
||||
@ -383,7 +372,7 @@ procSuite "Queue driver - pagination":
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.len == 0
|
||||
|
||||
@ -403,6 +392,6 @@ procSuite "Queue driver - pagination":
|
||||
)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
let data = page.tryGet().mapIt(it[2])
|
||||
check:
|
||||
data.mapIt(it.timestamp.int) == @[5, 7, 9].reversed
|
||||
|
||||
@ -22,15 +22,6 @@ common.randomize()
|
||||
proc newTestSqliteDriver(): ArchiveDriver =
|
||||
QueueDriver.new(capacity = 50)
|
||||
|
||||
proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveCursor =
|
||||
ArchiveCursor(
|
||||
pubsubTopic: pubsubTopic,
|
||||
senderTime: message.timestamp,
|
||||
storeTime: message.timestamp,
|
||||
digest: computeDigest(message),
|
||||
hash: computeMessageHash(pubsubTopic, message),
|
||||
)
|
||||
|
||||
suite "Queue driver - query by content topic":
|
||||
test "no content topic":
|
||||
## Given
|
||||
@ -56,11 +47,7 @@ suite "Queue driver - query by content topic":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
@ -71,7 +58,7 @@ suite "Queue driver - query by content topic":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[0 .. 4]
|
||||
|
||||
@ -102,24 +89,20 @@ suite "Queue driver - query by content topic":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
)
|
||||
|
||||
## Then
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 3]
|
||||
|
||||
@ -150,24 +133,20 @@ suite "Queue driver - query by content topic":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = false
|
||||
contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = false
|
||||
)
|
||||
|
||||
## Then
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[6 .. 7].reversed()
|
||||
|
||||
@ -200,17 +179,13 @@ suite "Queue driver - query by content topic":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2],
|
||||
contentTopics = @[contentTopic1, contentTopic2],
|
||||
maxPageSize = 2,
|
||||
ascendingOrder = true,
|
||||
)
|
||||
@ -219,7 +194,7 @@ suite "Queue driver - query by content topic":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 3]
|
||||
|
||||
@ -247,24 +222,20 @@ suite "Queue driver - query by content topic":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
)
|
||||
|
||||
## Then
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages.len == 0
|
||||
|
||||
@ -280,17 +251,13 @@ suite "Queue driver - query by content topic":
|
||||
for t in 0 ..< 40:
|
||||
let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts = ts(t))
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[DefaultContentTopic],
|
||||
contentTopics = @[DefaultContentTopic],
|
||||
maxPageSize = pageSize,
|
||||
ascendingOrder = true,
|
||||
)
|
||||
@ -299,7 +266,7 @@ suite "Queue driver - query by content topic":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages.len == 40
|
||||
|
||||
@ -351,9 +318,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
let retFut = waitFor driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg)
|
||||
require retFut.isOk()
|
||||
|
||||
## When
|
||||
@ -366,7 +331,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5]
|
||||
|
||||
@ -417,9 +382,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
let retFut = waitFor driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg)
|
||||
require retFut.isOk()
|
||||
|
||||
## When
|
||||
@ -430,7 +393,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[0 .. 1]
|
||||
|
||||
@ -481,14 +444,12 @@ suite "SQLite driver - query by pubsub topic":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
let retFut = waitFor driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg)
|
||||
require retFut.isOk()
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
maxPageSize = 2,
|
||||
ascendingOrder = true,
|
||||
@ -499,7 +460,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5]
|
||||
|
||||
@ -532,15 +493,11 @@ suite "Queue driver - query by cursor":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[4])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[4])
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
@ -551,7 +508,7 @@ suite "Queue driver - query by cursor":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[5 .. 6]
|
||||
|
||||
@ -583,15 +540,11 @@ suite "Queue driver - query by cursor":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[4])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[4])
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
@ -602,7 +555,7 @@ suite "Queue driver - query by cursor":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 3].reversed()
|
||||
|
||||
@ -632,21 +585,16 @@ suite "Queue driver - query by cursor":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
let fakeCursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage())
|
||||
let cursor = ArchiveCursor(hash: fakeCursor)
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage())
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
includeData = true,
|
||||
contentTopic = @[DefaultContentTopic],
|
||||
contentTopics = @[DefaultContentTopic],
|
||||
pubsubTopic = none(PubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = none(Timestamp),
|
||||
@ -689,19 +637,15 @@ suite "Queue driver - query by cursor":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[4])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[4])
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
cursor = some(cursor),
|
||||
maxPageSize = 10,
|
||||
ascendingOrder = true,
|
||||
@ -711,7 +655,7 @@ suite "Queue driver - query by cursor":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[5 .. 6]
|
||||
|
||||
@ -743,19 +687,15 @@ suite "Queue driver - query by cursor":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[6])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[6])
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
cursor = some(cursor),
|
||||
maxPageSize = 10,
|
||||
ascendingOrder = false,
|
||||
@ -765,7 +705,7 @@ suite "Queue driver - query by cursor":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 5].reversed()
|
||||
|
||||
@ -841,12 +781,10 @@ suite "Queue driver - query by cursor":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
let retFut = waitFor driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg)
|
||||
require retFut.isOk()
|
||||
|
||||
let cursor = computeTestCursor(expected[5][0], expected[5][1])
|
||||
let cursor = computeMessageHash(expected[5][0], expected[5][1])
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
@ -861,7 +799,7 @@ suite "Queue driver - query by cursor":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[6 .. 7]
|
||||
|
||||
@ -937,12 +875,10 @@ suite "Queue driver - query by cursor":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
let retFut = waitFor driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg)
|
||||
require retFut.isOk()
|
||||
|
||||
let cursor = computeTestCursor(expected[6][0], expected[6][1])
|
||||
let cursor = computeMessageHash(expected[6][0], expected[6][1])
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
@ -957,7 +893,7 @@ suite "Queue driver - query by cursor":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5].reversed()
|
||||
|
||||
@ -990,11 +926,7 @@ suite "Queue driver - query by time range":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
@ -1007,7 +939,7 @@ suite "Queue driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 6]
|
||||
|
||||
@ -1039,11 +971,7 @@ suite "Queue driver - query by time range":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
@ -1056,7 +984,7 @@ suite "Queue driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[0 .. 4]
|
||||
|
||||
@ -1134,9 +1062,7 @@ suite "Queue driver - query by time range":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
let retFut = waitFor driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg)
|
||||
require retFut.isOk()
|
||||
|
||||
## When
|
||||
@ -1152,7 +1078,7 @@ suite "Queue driver - query by time range":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[2 .. 4]
|
||||
|
||||
@ -1185,17 +1111,13 @@ suite "Queue driver - query by time range":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
startTime = some(ts(45, timeOrigin)),
|
||||
endTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 2,
|
||||
@ -1205,7 +1127,7 @@ suite "Queue driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages.len == 0
|
||||
|
||||
@ -1237,17 +1159,13 @@ suite "Queue driver - query by time range":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
startTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 10,
|
||||
ascendingOrder = true,
|
||||
@ -1256,7 +1174,7 @@ suite "Queue driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 6]
|
||||
|
||||
@ -1291,17 +1209,13 @@ suite "Queue driver - query by time range":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
startTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 10,
|
||||
ascendingOrder = false,
|
||||
@ -1310,7 +1224,7 @@ suite "Queue driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 6].reversed()
|
||||
|
||||
@ -1346,19 +1260,15 @@ suite "Queue driver - query by time range":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[3])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[3])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 10,
|
||||
@ -1368,7 +1278,7 @@ suite "Queue driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[4 .. 9]
|
||||
|
||||
@ -1404,19 +1314,15 @@ suite "Queue driver - query by time range":
|
||||
|
||||
for msg in messages:
|
||||
let retFut = await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
require retFut.isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[6])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[6])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 10,
|
||||
@ -1426,7 +1332,7 @@ suite "Queue driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[3 .. 4].reversed()
|
||||
|
||||
@ -1495,16 +1401,14 @@ suite "Queue driver - query by time range":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
let retFut = waitFor driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg)
|
||||
require retFut.isOk()
|
||||
|
||||
let cursor = computeTestCursor(DefaultPubsubTopic, expected[1][1])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[1][1])
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(0, timeOrigin)),
|
||||
@ -1517,7 +1421,7 @@ suite "Queue driver - query by time range":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[3 .. 4]
|
||||
|
||||
@ -1585,16 +1489,14 @@ suite "Queue driver - query by time range":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
let retFut = waitFor driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg)
|
||||
require retFut.isOk()
|
||||
|
||||
let cursor = computeTestCursor(expected[7][0], expected[7][1])
|
||||
let cursor = computeMessageHash(expected[7][0], expected[7][1])
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(35, timeOrigin)),
|
||||
@ -1607,7 +1509,7 @@ suite "Queue driver - query by time range":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5].reversed()
|
||||
|
||||
@ -1676,16 +1578,14 @@ suite "Queue driver - query by time range":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
let retFut = waitFor driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg)
|
||||
require retFut.isOk()
|
||||
|
||||
let cursor = computeTestCursor(expected[1][0], expected[1][1])
|
||||
let cursor = computeMessageHash(expected[1][0], expected[1][1])
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(35, timeOrigin)),
|
||||
@ -1699,7 +1599,7 @@ suite "Queue driver - query by time range":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5]
|
||||
|
||||
@ -1768,16 +1668,14 @@ suite "Queue driver - query by time range":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
let retFut = waitFor driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg)
|
||||
require retFut.isOk()
|
||||
|
||||
let cursor = computeTestCursor(expected[1][0], expected[1][1])
|
||||
let cursor = computeMessageHash(expected[1][0], expected[1][1])
|
||||
|
||||
## When
|
||||
let res = waitFor driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(35, timeOrigin)),
|
||||
@ -1790,7 +1688,7 @@ suite "Queue driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages.len == 0
|
||||
|
||||
|
||||
@ -9,7 +9,6 @@ import
|
||||
waku_core,
|
||||
],
|
||||
../waku_archive/archive_utils,
|
||||
../testlib/common,
|
||||
../testlib/wakucore
|
||||
|
||||
suite "SQLite driver":
|
||||
@ -42,9 +41,7 @@ suite "SQLite driver":
|
||||
let msgHash = computeMessageHash(DefaultPubsubTopic, msg)
|
||||
|
||||
## When
|
||||
let putRes = waitFor driver.put(
|
||||
DefaultPubsubTopic, msg, computeDigest(msg), msgHash, msg.timestamp
|
||||
)
|
||||
let putRes = waitFor driver.put(msgHash, DefaultPubsubTopic, msg)
|
||||
|
||||
## Then
|
||||
check:
|
||||
@ -54,7 +51,7 @@ suite "SQLite driver":
|
||||
check:
|
||||
storedMsg.len == 1
|
||||
storedMsg.all do(item: auto) -> bool:
|
||||
let (pubsubTopic, actualMsg, _, _, hash) = item
|
||||
let (hash, pubsubTopic, actualMsg) = item
|
||||
actualMsg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic and
|
||||
hash == msgHash and msg.meta == actualMsg.meta
|
||||
|
||||
|
||||
@ -47,11 +47,7 @@ suite "SQLite driver - query by content topic":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
@ -62,7 +58,7 @@ suite "SQLite driver - query by content topic":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[0 .. 4]
|
||||
|
||||
@ -94,24 +90,20 @@ suite "SQLite driver - query by content topic":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
)
|
||||
|
||||
## Then
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 3]
|
||||
|
||||
@ -155,24 +147,20 @@ suite "SQLite driver - query by content topic":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
)
|
||||
|
||||
## Then
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 3]
|
||||
|
||||
@ -204,24 +192,20 @@ suite "SQLite driver - query by content topic":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = false
|
||||
contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = false
|
||||
)
|
||||
|
||||
## Then
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[6 .. 7].reversed()
|
||||
|
||||
@ -255,17 +239,13 @@ suite "SQLite driver - query by content topic":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2],
|
||||
contentTopics = @[contentTopic1, contentTopic2],
|
||||
maxPageSize = 2,
|
||||
ascendingOrder = true,
|
||||
)
|
||||
@ -274,7 +254,7 @@ suite "SQLite driver - query by content topic":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 3]
|
||||
|
||||
@ -303,24 +283,20 @@ suite "SQLite driver - query by content topic":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true
|
||||
)
|
||||
|
||||
## Then
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages.len == 0
|
||||
|
||||
@ -337,17 +313,13 @@ suite "SQLite driver - query by content topic":
|
||||
let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts = ts(t))
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[DefaultContentTopic],
|
||||
contentTopics = @[DefaultContentTopic],
|
||||
maxPageSize = pageSize,
|
||||
ascendingOrder = true,
|
||||
)
|
||||
@ -356,7 +328,7 @@ suite "SQLite driver - query by content topic":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages.len == 40
|
||||
|
||||
@ -408,11 +380,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
@ -424,7 +392,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5]
|
||||
|
||||
@ -475,11 +443,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(maxPageSize = 2, ascendingOrder = true)
|
||||
@ -489,7 +453,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[0 .. 1]
|
||||
|
||||
@ -540,15 +504,11 @@ suite "SQLite driver - query by pubsub topic":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
maxPageSize = 2,
|
||||
ascendingOrder = true,
|
||||
@ -559,7 +519,7 @@ suite "SQLite driver - query by pubsub topic":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5]
|
||||
|
||||
@ -593,15 +553,11 @@ suite "SQLite driver - query by cursor":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[4])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[4])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
@ -612,7 +568,7 @@ suite "SQLite driver - query by cursor":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[5 .. 6]
|
||||
|
||||
@ -645,15 +601,11 @@ suite "SQLite driver - query by cursor":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[4])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[4])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
@ -664,7 +616,7 @@ suite "SQLite driver - query by cursor":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 3].reversed()
|
||||
|
||||
@ -695,21 +647,16 @@ suite "SQLite driver - query by cursor":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let fakeCursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage())
|
||||
let cursor = ArchiveCursor(hash: fakeCursor)
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage())
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
includeData = true,
|
||||
contentTopic = @[DefaultContentTopic],
|
||||
contentTopics = @[DefaultContentTopic],
|
||||
pubsubTopic = none(PubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = none(Timestamp),
|
||||
@ -753,19 +700,15 @@ suite "SQLite driver - query by cursor":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[4])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[4])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
cursor = some(cursor),
|
||||
maxPageSize = 10,
|
||||
ascendingOrder = true,
|
||||
@ -775,7 +718,7 @@ suite "SQLite driver - query by cursor":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[5 .. 6]
|
||||
|
||||
@ -808,19 +751,15 @@ suite "SQLite driver - query by cursor":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[6])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[6])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
cursor = some(cursor),
|
||||
maxPageSize = 10,
|
||||
ascendingOrder = false,
|
||||
@ -830,7 +769,7 @@ suite "SQLite driver - query by cursor":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 5].reversed()
|
||||
|
||||
@ -906,13 +845,9 @@ suite "SQLite driver - query by cursor":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
let cursor = computeArchiveCursor(expected[5][0], expected[5][1])
|
||||
let cursor = computeMessageHash(expected[5][0], expected[5][1])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
@ -927,7 +862,7 @@ suite "SQLite driver - query by cursor":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[6 .. 7]
|
||||
|
||||
@ -1003,13 +938,9 @@ suite "SQLite driver - query by cursor":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
let cursor = computeArchiveCursor(expected[6][0], expected[6][1])
|
||||
let cursor = computeMessageHash(expected[6][0], expected[6][1])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
@ -1024,7 +955,7 @@ suite "SQLite driver - query by cursor":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5].reversed()
|
||||
|
||||
@ -1058,11 +989,7 @@ suite "SQLite driver - query by time range":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
@ -1075,7 +1002,7 @@ suite "SQLite driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 6]
|
||||
|
||||
@ -1108,11 +1035,7 @@ suite "SQLite driver - query by time range":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
@ -1125,7 +1048,7 @@ suite "SQLite driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[0 .. 4]
|
||||
|
||||
@ -1203,11 +1126,7 @@ suite "SQLite driver - query by time range":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
@ -1222,7 +1141,7 @@ suite "SQLite driver - query by time range":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[2 .. 4]
|
||||
|
||||
@ -1256,17 +1175,13 @@ suite "SQLite driver - query by time range":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
startTime = some(ts(45, timeOrigin)),
|
||||
endTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 2,
|
||||
@ -1276,7 +1191,7 @@ suite "SQLite driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages.len == 0
|
||||
|
||||
@ -1309,17 +1224,13 @@ suite "SQLite driver - query by time range":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
startTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 10,
|
||||
ascendingOrder = true,
|
||||
@ -1328,7 +1239,7 @@ suite "SQLite driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 6]
|
||||
|
||||
@ -1364,17 +1275,13 @@ suite "SQLite driver - query by time range":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
startTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 10,
|
||||
ascendingOrder = false,
|
||||
@ -1383,7 +1290,7 @@ suite "SQLite driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[2 .. 6].reversed()
|
||||
|
||||
@ -1420,19 +1327,15 @@ suite "SQLite driver - query by time range":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[3])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[3])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 10,
|
||||
@ -1442,7 +1345,7 @@ suite "SQLite driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[4 .. 9]
|
||||
|
||||
@ -1479,19 +1382,15 @@ suite "SQLite driver - query by time range":
|
||||
for msg in messages:
|
||||
require (
|
||||
await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[6])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[6])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(15, timeOrigin)),
|
||||
maxPageSize = 10,
|
||||
@ -1501,7 +1400,7 @@ suite "SQLite driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expected[3 .. 4].reversed()
|
||||
|
||||
@ -1570,17 +1469,13 @@ suite "SQLite driver - query by time range":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[1][1])
|
||||
let cursor = computeMessageHash(DefaultPubsubTopic, expected[1][1])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(0, timeOrigin)),
|
||||
@ -1593,7 +1488,7 @@ suite "SQLite driver - query by time range":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[3 .. 4]
|
||||
|
||||
@ -1661,17 +1556,13 @@ suite "SQLite driver - query by time range":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
let cursor = computeArchiveCursor(expected[7][0], expected[7][1])
|
||||
let cursor = computeMessageHash(expected[7][0], expected[7][1])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(35, timeOrigin)),
|
||||
@ -1684,7 +1575,7 @@ suite "SQLite driver - query by time range":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5].reversed()
|
||||
|
||||
@ -1753,17 +1644,13 @@ suite "SQLite driver - query by time range":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
let cursor = computeArchiveCursor(expected[1][0], expected[1][1])
|
||||
let cursor = computeMessageHash(expected[1][0], expected[1][1])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(35, timeOrigin)),
|
||||
@ -1777,7 +1664,7 @@ suite "SQLite driver - query by time range":
|
||||
res.isOk()
|
||||
|
||||
let expectedMessages = expected.mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages == expectedMessages[4 .. 5]
|
||||
|
||||
@ -1846,17 +1733,13 @@ suite "SQLite driver - query by time range":
|
||||
|
||||
for row in messages:
|
||||
let (topic, msg) = row
|
||||
require (
|
||||
await driver.put(
|
||||
topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk()
|
||||
|
||||
let cursor = computeArchiveCursor(expected[1][0], expected[1][1])
|
||||
let cursor = computeMessageHash(expected[1][0], expected[1][1])
|
||||
|
||||
## When
|
||||
let res = await driver.getMessages(
|
||||
contentTopic = @[contentTopic],
|
||||
contentTopics = @[contentTopic],
|
||||
pubsubTopic = some(pubsubTopic),
|
||||
cursor = some(cursor),
|
||||
startTime = some(ts(35, timeOrigin)),
|
||||
@ -1869,7 +1752,7 @@ suite "SQLite driver - query by time range":
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let filteredMessages = res.tryGet().mapIt(it[1])
|
||||
let filteredMessages = res.tryGet().mapIt(it[2])
|
||||
check:
|
||||
filteredMessages.len == 0
|
||||
|
||||
|
||||
@ -13,7 +13,6 @@ import
|
||||
waku_archive/retention_policy/retention_policy_size,
|
||||
],
|
||||
../waku_archive/archive_utils,
|
||||
../testlib/common,
|
||||
../testlib/wakucore
|
||||
|
||||
suite "Waku Archive - Retention policy":
|
||||
@ -35,18 +34,13 @@ suite "Waku Archive - Retention policy":
|
||||
payload = @[byte i], contentTopic = DefaultContentTopic, ts = Timestamp(i)
|
||||
)
|
||||
putFutures.add(
|
||||
driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
)
|
||||
driver.put(computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg)
|
||||
)
|
||||
|
||||
discard waitFor allFinished(putFutures)
|
||||
|
||||
require (waitFor retentionPolicy.execute(driver)).isOk()
|
||||
let res = waitFor retentionPolicy.execute(driver)
|
||||
assert res.isOk(), $res.error
|
||||
|
||||
## Then
|
||||
let numMessages = (waitFor driver.getMessagesCount()).tryGet()
|
||||
@ -88,13 +82,7 @@ suite "Waku Archive - Retention policy":
|
||||
payload = @[byte i], contentTopic = DefaultContentTopic, ts = Timestamp(i)
|
||||
)
|
||||
putFutures.add(
|
||||
driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
)
|
||||
driver.put(computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg)
|
||||
)
|
||||
|
||||
# waitFor is used to synchronously wait for the futures to complete.
|
||||
@ -150,11 +138,7 @@ suite "Waku Archive - Retention policy":
|
||||
for msg in messages:
|
||||
require (
|
||||
waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
require (waitFor retentionPolicy.execute(driver)).isOk()
|
||||
@ -164,7 +148,7 @@ suite "Waku Archive - Retention policy":
|
||||
check:
|
||||
storedMsg.len == capacity
|
||||
storedMsg.all do(item: auto) -> bool:
|
||||
let (pubsubTopic, msg, _, _, _) = item
|
||||
let (_, pubsubTopic, msg) = item
|
||||
msg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic
|
||||
|
||||
## Cleanup
|
||||
|
||||
@ -1,11 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, sequtils],
|
||||
testutils/unittests,
|
||||
chronicles,
|
||||
chronos,
|
||||
libp2p/crypto/crypto
|
||||
import std/[options, sequtils], testutils/unittests, chronos, libp2p/crypto/crypto
|
||||
|
||||
import
|
||||
waku/[
|
||||
@ -17,7 +12,6 @@ import
|
||||
waku_archive,
|
||||
],
|
||||
../waku_archive/archive_utils,
|
||||
../testlib/common,
|
||||
../testlib/wakucore
|
||||
|
||||
suite "Waku Archive - message handling":
|
||||
@ -60,7 +54,7 @@ suite "Waku Archive - message handling":
|
||||
check:
|
||||
(waitFor driver.getMessagesCount()).tryGet() == 2
|
||||
|
||||
test "it should archive a message with no sender timestamp":
|
||||
test "it should not archive a message with no sender timestamp":
|
||||
## Setup
|
||||
let driver = newSqliteArchiveDriver()
|
||||
let archive = newWakuArchive(driver)
|
||||
@ -74,7 +68,7 @@ suite "Waku Archive - message handling":
|
||||
|
||||
## Then
|
||||
check:
|
||||
(waitFor driver.getMessagesCount()).tryGet() == 1
|
||||
(waitFor driver.getMessagesCount()).tryGet() == 0
|
||||
|
||||
test "it should not archive a message with a sender time variance greater than max time variance (future)":
|
||||
## Setup
|
||||
@ -160,11 +154,7 @@ procSuite "Waku Archive - find messages":
|
||||
for msg in msgListA:
|
||||
require (
|
||||
waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
@ -250,13 +240,11 @@ procSuite "Waku Archive - find messages":
|
||||
let queryRes = waitFor archive.findMessages(req)
|
||||
|
||||
## Then
|
||||
check:
|
||||
queryRes.isErr()
|
||||
assert queryRes.isOk(), $queryRes.error
|
||||
|
||||
let error = queryRes.tryError()
|
||||
let response = queryRes.tryGet()
|
||||
check:
|
||||
error.kind == ArchiveErrorKind.INVALID_QUERY
|
||||
error.cause == "too many content topics"
|
||||
response.messages.len() == 0
|
||||
|
||||
test "handle query with pubsub topic filter":
|
||||
## Setup
|
||||
@ -394,8 +382,8 @@ procSuite "Waku Archive - find messages":
|
||||
|
||||
## Then
|
||||
check:
|
||||
cursors[0] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[3]))
|
||||
cursors[1] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[7]))
|
||||
cursors[0] == some(computeMessageHash(DefaultPubsubTopic, msgListA[3]))
|
||||
cursors[1] == some(computeMessageHash(DefaultPubsubTopic, msgListA[7]))
|
||||
cursors[2] == none(ArchiveCursor)
|
||||
|
||||
check:
|
||||
@ -428,8 +416,8 @@ procSuite "Waku Archive - find messages":
|
||||
|
||||
## Then
|
||||
check:
|
||||
cursors[0] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[6]))
|
||||
cursors[1] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[2]))
|
||||
cursors[0] == some(computeMessageHash(DefaultPubsubTopic, msgListA[6]))
|
||||
cursors[1] == some(computeMessageHash(DefaultPubsubTopic, msgListA[2]))
|
||||
cursors[2] == none(ArchiveCursor)
|
||||
|
||||
check:
|
||||
@ -460,11 +448,7 @@ procSuite "Waku Archive - find messages":
|
||||
for msg in msgList:
|
||||
require (
|
||||
waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg
|
||||
)
|
||||
).isOk()
|
||||
|
||||
|
||||
53
tests/waku_archive_legacy/archive_utils.nim
Normal file
53
tests/waku_archive_legacy/archive_utils.nim
Normal file
@ -0,0 +1,53 @@
|
||||
{.used.}
|
||||
|
||||
import std/options, results, chronos, libp2p/crypto/crypto
|
||||
|
||||
import
|
||||
waku/[
|
||||
node/peer_manager,
|
||||
waku_core,
|
||||
waku_archive_legacy,
|
||||
waku_archive_legacy/common,
|
||||
waku_archive_legacy/driver/sqlite_driver,
|
||||
common/databases/db_sqlite,
|
||||
],
|
||||
../testlib/[wakucore]
|
||||
|
||||
proc newSqliteDatabase*(path: Option[string] = string.none()): SqliteDatabase =
|
||||
SqliteDatabase.new(path.get(":memory:")).tryGet()
|
||||
|
||||
proc newSqliteArchiveDriver*(): ArchiveDriver =
|
||||
let database = newSqliteDatabase()
|
||||
SqliteDriver.new(database).tryGet()
|
||||
|
||||
proc newWakuArchive*(driver: ArchiveDriver): WakuArchive =
|
||||
WakuArchive.new(driver).get()
|
||||
|
||||
proc computeArchiveCursor*(
|
||||
pubsubTopic: PubsubTopic, message: WakuMessage
|
||||
): ArchiveCursor =
|
||||
ArchiveCursor(
|
||||
pubsubTopic: pubsubTopic,
|
||||
senderTime: message.timestamp,
|
||||
storeTime: message.timestamp,
|
||||
digest: computeDigest(message),
|
||||
hash: computeMessageHash(pubsubTopic, message),
|
||||
)
|
||||
|
||||
proc put*(
|
||||
driver: ArchiveDriver, pubsubTopic: PubSubTopic, msgList: seq[WakuMessage]
|
||||
): ArchiveDriver =
|
||||
for msg in msgList:
|
||||
let
|
||||
msgDigest = computeDigest(msg)
|
||||
msgHash = computeMessageHash(pubsubTopic, msg)
|
||||
_ = waitFor driver.put(pubsubTopic, msg, msgDigest, msgHash, msg.timestamp)
|
||||
# discard crashes
|
||||
return driver
|
||||
|
||||
proc newArchiveDriverWithMessages*(
|
||||
pubsubTopic: PubSubTopic, msgList: seq[WakuMessage]
|
||||
): ArchiveDriver =
|
||||
var driver = newSqliteArchiveDriver()
|
||||
driver = driver.put(pubsubTopic, msgList)
|
||||
return driver
|
||||
13
tests/waku_archive_legacy/test_all.nim
Normal file
13
tests/waku_archive_legacy/test_all.nim
Normal file
@ -0,0 +1,13 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
./test_driver_postgres_query,
|
||||
./test_driver_postgres,
|
||||
./test_driver_queue_index,
|
||||
./test_driver_queue_pagination,
|
||||
./test_driver_queue_query,
|
||||
./test_driver_queue,
|
||||
./test_driver_sqlite_query,
|
||||
./test_driver_sqlite,
|
||||
./test_retention_policy,
|
||||
./test_waku_archive
|
||||
201
tests/waku_archive_legacy/test_driver_postgres.nim
Normal file
201
tests/waku_archive_legacy/test_driver_postgres.nim
Normal file
@ -0,0 +1,201 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils, options], testutils/unittests, chronos
|
||||
import
|
||||
waku/waku_archive_legacy,
|
||||
waku/waku_archive_legacy/driver/postgres_driver,
|
||||
waku/waku_core,
|
||||
waku/waku_core/message/digest,
|
||||
../testlib/wakucore,
|
||||
../testlib/testasync,
|
||||
../testlib/postgres_legacy
|
||||
|
||||
proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveCursor =
|
||||
ArchiveCursor(
|
||||
pubsubTopic: pubsubTopic,
|
||||
senderTime: message.timestamp,
|
||||
storeTime: message.timestamp,
|
||||
digest: computeDigest(message),
|
||||
hash: computeMessageHash(pubsubTopic, message),
|
||||
)
|
||||
|
||||
suite "Postgres driver":
|
||||
## Unique driver instance
|
||||
var driver {.threadvar.}: PostgresDriver
|
||||
|
||||
asyncSetup:
|
||||
let driverRes = await newTestPostgresDriver()
|
||||
if driverRes.isErr():
|
||||
assert false, driverRes.error
|
||||
|
||||
driver = PostgresDriver(driverRes.get())
|
||||
|
||||
asyncTeardown:
|
||||
let resetRes = await driver.reset()
|
||||
if resetRes.isErr():
|
||||
assert false, resetRes.error
|
||||
|
||||
(await driver.close()).expect("driver to close")
|
||||
|
||||
asyncTest "Asynchronous queries":
|
||||
var futures = newSeq[Future[ArchiveDriverResult[void]]](0)
|
||||
|
||||
let beforeSleep = now()
|
||||
for _ in 1 .. 100:
|
||||
futures.add(driver.sleep(1))
|
||||
|
||||
await allFutures(futures)
|
||||
|
||||
let diff = now() - beforeSleep
|
||||
# Actually, the diff randomly goes between 1 and 2 seconds.
|
||||
# although in theory it should spend 1s because we establish 100
|
||||
# connections and we spawn 100 tasks that spend ~1s each.
|
||||
assert diff < 20_000_000_000
|
||||
|
||||
asyncTest "Insert a message":
|
||||
const contentTopic = "test-content-topic"
|
||||
const meta = "test meta"
|
||||
|
||||
let msg = fakeWakuMessage(contentTopic = contentTopic, meta = meta)
|
||||
|
||||
let computedDigest = computeDigest(msg)
|
||||
let computedHash = computeMessageHash(DefaultPubsubTopic, msg)
|
||||
|
||||
let putRes = await driver.put(
|
||||
DefaultPubsubTopic, msg, computedDigest, computedHash, msg.timestamp
|
||||
)
|
||||
assert putRes.isOk(), putRes.error
|
||||
|
||||
let storedMsg = (await driver.getAllMessages()).tryGet()
|
||||
|
||||
assert storedMsg.len == 1
|
||||
|
||||
let (pubsubTopic, actualMsg, digest, _, hash) = storedMsg[0]
|
||||
assert actualMsg.contentTopic == contentTopic
|
||||
assert pubsubTopic == DefaultPubsubTopic
|
||||
assert toHex(computedDigest.data) == toHex(digest)
|
||||
assert toHex(actualMsg.payload) == toHex(msg.payload)
|
||||
assert toHex(computedHash) == toHex(hash)
|
||||
assert toHex(actualMsg.meta) == toHex(msg.meta)
|
||||
|
||||
asyncTest "Insert and query message":
|
||||
const contentTopic1 = "test-content-topic-1"
|
||||
const contentTopic2 = "test-content-topic-2"
|
||||
const pubsubTopic1 = "pubsubtopic-1"
|
||||
const pubsubTopic2 = "pubsubtopic-2"
|
||||
|
||||
let msg1 = fakeWakuMessage(contentTopic = contentTopic1)
|
||||
|
||||
var putRes = await driver.put(
|
||||
pubsubTopic1,
|
||||
msg1,
|
||||
computeDigest(msg1),
|
||||
computeMessageHash(pubsubTopic1, msg1),
|
||||
msg1.timestamp,
|
||||
)
|
||||
assert putRes.isOk(), putRes.error
|
||||
|
||||
let msg2 = fakeWakuMessage(contentTopic = contentTopic2)
|
||||
|
||||
putRes = await driver.put(
|
||||
pubsubTopic2,
|
||||
msg2,
|
||||
computeDigest(msg2),
|
||||
computeMessageHash(pubsubTopic2, msg2),
|
||||
msg2.timestamp,
|
||||
)
|
||||
assert putRes.isOk(), putRes.error
|
||||
|
||||
let countMessagesRes = await driver.getMessagesCount()
|
||||
|
||||
assert countMessagesRes.isOk(), $countMessagesRes.error
|
||||
assert countMessagesRes.get() == 2
|
||||
|
||||
var messagesRes = await driver.getMessages(contentTopic = @[contentTopic1])
|
||||
|
||||
assert messagesRes.isOk(), $messagesRes.error
|
||||
assert messagesRes.get().len == 1
|
||||
|
||||
# Get both content topics, check ordering
|
||||
messagesRes =
|
||||
await driver.getMessages(contentTopic = @[contentTopic1, contentTopic2])
|
||||
assert messagesRes.isOk(), messagesRes.error
|
||||
|
||||
assert messagesRes.get().len == 2
|
||||
assert messagesRes.get()[0][1].contentTopic == contentTopic1
|
||||
|
||||
# Descending order
|
||||
messagesRes = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2], ascendingOrder = false
|
||||
)
|
||||
assert messagesRes.isOk(), messagesRes.error
|
||||
|
||||
assert messagesRes.get().len == 2
|
||||
assert messagesRes.get()[0][1].contentTopic == contentTopic2
|
||||
|
||||
# cursor
|
||||
# Get both content topics
|
||||
messagesRes = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2],
|
||||
cursor = some(computeTestCursor(pubsubTopic1, messagesRes.get()[1][1])),
|
||||
)
|
||||
assert messagesRes.isOk()
|
||||
assert messagesRes.get().len == 1
|
||||
|
||||
# Get both content topics but one pubsub topic
|
||||
messagesRes = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2], pubsubTopic = some(pubsubTopic1)
|
||||
)
|
||||
assert messagesRes.isOk(), messagesRes.error
|
||||
|
||||
assert messagesRes.get().len == 1
|
||||
assert messagesRes.get()[0][1].contentTopic == contentTopic1
|
||||
|
||||
# Limit
|
||||
messagesRes = await driver.getMessages(
|
||||
contentTopic = @[contentTopic1, contentTopic2], maxPageSize = 1
|
||||
)
|
||||
assert messagesRes.isOk(), messagesRes.error
|
||||
assert messagesRes.get().len == 1
|
||||
|
||||
asyncTest "Insert true duplicated messages":
|
||||
# Validates that two completely equal messages can not be stored.
|
||||
|
||||
let now = now()
|
||||
|
||||
let msg1 = fakeWakuMessage(ts = now)
|
||||
let msg2 = fakeWakuMessage(ts = now)
|
||||
|
||||
let initialNumMsgs = (await driver.getMessagesCount()).valueOr:
|
||||
raiseAssert "could not get num mgs correctly: " & $error
|
||||
|
||||
var putRes = await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg1,
|
||||
computeDigest(msg1),
|
||||
computeMessageHash(DefaultPubsubTopic, msg1),
|
||||
msg1.timestamp,
|
||||
)
|
||||
assert putRes.isOk(), putRes.error
|
||||
|
||||
var newNumMsgs = (await driver.getMessagesCount()).valueOr:
|
||||
raiseAssert "could not get num mgs correctly: " & $error
|
||||
|
||||
assert newNumMsgs == (initialNumMsgs + 1.int64),
|
||||
"wrong number of messages: " & $newNumMsgs
|
||||
|
||||
putRes = await driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg2,
|
||||
computeDigest(msg2),
|
||||
computeMessageHash(DefaultPubsubTopic, msg2),
|
||||
msg2.timestamp,
|
||||
)
|
||||
|
||||
assert putRes.isOk()
|
||||
|
||||
newNumMsgs = (await driver.getMessagesCount()).valueOr:
|
||||
raiseAssert "could not get num mgs correctly: " & $error
|
||||
|
||||
assert newNumMsgs == (initialNumMsgs + 1.int64),
|
||||
"wrong number of messages: " & $newNumMsgs
|
||||
1931
tests/waku_archive_legacy/test_driver_postgres_query.nim
Normal file
1931
tests/waku_archive_legacy/test_driver_postgres_query.nim
Normal file
File diff suppressed because it is too large
Load Diff
182
tests/waku_archive_legacy/test_driver_queue.nim
Normal file
182
tests/waku_archive_legacy/test_driver_queue.nim
Normal file
@ -0,0 +1,182 @@
|
||||
{.used.}
|
||||
|
||||
import std/options, stew/results, testutils/unittests
|
||||
import
|
||||
waku/waku_archive_legacy,
|
||||
waku/waku_archive_legacy/driver/queue_driver/queue_driver {.all.},
|
||||
waku/waku_archive_legacy/driver/queue_driver/index,
|
||||
waku/waku_core
|
||||
|
||||
# Helper functions
|
||||
|
||||
proc genIndexedWakuMessage(i: int8): (Index, WakuMessage) =
|
||||
## Use i to generate an Index WakuMessage
|
||||
var data {.noinit.}: array[32, byte]
|
||||
for x in data.mitems:
|
||||
x = i.byte
|
||||
|
||||
let
|
||||
message = WakuMessage(payload: @[byte i], timestamp: Timestamp(i))
|
||||
topic = "test-pubsub-topic"
|
||||
cursor = Index(
|
||||
receiverTime: Timestamp(i),
|
||||
senderTime: Timestamp(i),
|
||||
digest: MessageDigest(data: data),
|
||||
pubsubTopic: topic,
|
||||
hash: computeMessageHash(topic, message),
|
||||
)
|
||||
|
||||
(cursor, message)
|
||||
|
||||
proc getPrepopulatedTestQueue(unsortedSet: auto, capacity: int): QueueDriver =
|
||||
let driver = QueueDriver.new(capacity)
|
||||
|
||||
for i in unsortedSet:
|
||||
let (index, message) = genIndexedWakuMessage(i.int8)
|
||||
discard driver.add(index, message)
|
||||
|
||||
driver
|
||||
|
||||
procSuite "Sorted driver queue":
|
||||
test "queue capacity - add a message over the limit":
|
||||
## Given
|
||||
let capacity = 5
|
||||
let driver = QueueDriver.new(capacity)
|
||||
|
||||
## When
|
||||
# Fill up the queue
|
||||
for i in 1 .. capacity:
|
||||
let (index, message) = genIndexedWakuMessage(i.int8)
|
||||
require(driver.add(index, message).isOk())
|
||||
|
||||
# Add one more. Capacity should not be exceeded
|
||||
let (index, message) = genIndexedWakuMessage(capacity.int8 + 1)
|
||||
require(driver.add(index, message).isOk())
|
||||
|
||||
## Then
|
||||
check:
|
||||
driver.len == capacity
|
||||
|
||||
test "queue capacity - add message older than oldest in the queue":
|
||||
## Given
|
||||
let capacity = 5
|
||||
let driver = QueueDriver.new(capacity)
|
||||
|
||||
## When
|
||||
# Fill up the queue
|
||||
for i in 1 .. capacity:
|
||||
let (index, message) = genIndexedWakuMessage(i.int8)
|
||||
require(driver.add(index, message).isOk())
|
||||
|
||||
# Attempt to add message with older value than oldest in queue should fail
|
||||
let
|
||||
oldestTimestamp = driver.first().get().senderTime
|
||||
(index, message) = genIndexedWakuMessage(oldestTimestamp.int8 - 1)
|
||||
addRes = driver.add(index, message)
|
||||
|
||||
## Then
|
||||
check:
|
||||
addRes.isErr()
|
||||
addRes.error() == "too_old"
|
||||
|
||||
check:
|
||||
driver.len == capacity
|
||||
|
||||
test "queue sort-on-insert":
|
||||
## Given
|
||||
let
|
||||
capacity = 5
|
||||
unsortedSet = [5, 1, 3, 2, 4]
|
||||
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
|
||||
|
||||
# Walk forward through the set and verify ascending order
|
||||
var (prevSmaller, _) = genIndexedWakuMessage(min(unsortedSet).int8 - 1)
|
||||
for i in driver.fwdIterator:
|
||||
let (index, _) = i
|
||||
check cmp(index, prevSmaller) > 0
|
||||
prevSmaller = index
|
||||
|
||||
# Walk backward through the set and verify descending order
|
||||
var (prevLarger, _) = genIndexedWakuMessage(max(unsortedSet).int8 + 1)
|
||||
for i in driver.bwdIterator:
|
||||
let (index, _) = i
|
||||
check cmp(index, prevLarger) < 0
|
||||
prevLarger = index
|
||||
|
||||
test "access first item from queue":
|
||||
## Given
|
||||
let
|
||||
capacity = 5
|
||||
unsortedSet = [5, 1, 3, 2, 4]
|
||||
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
|
||||
|
||||
## When
|
||||
let firstRes = driver.first()
|
||||
|
||||
## Then
|
||||
check:
|
||||
firstRes.isOk()
|
||||
|
||||
let first = firstRes.tryGet()
|
||||
check:
|
||||
first.senderTime == Timestamp(1)
|
||||
|
||||
test "get first item from empty queue should fail":
|
||||
## Given
|
||||
let capacity = 5
|
||||
let driver = QueueDriver.new(capacity)
|
||||
|
||||
## When
|
||||
let firstRes = driver.first()
|
||||
|
||||
## Then
|
||||
check:
|
||||
firstRes.isErr()
|
||||
firstRes.error() == "Not found"
|
||||
|
||||
test "access last item from queue":
|
||||
## Given
|
||||
let
|
||||
capacity = 5
|
||||
unsortedSet = [5, 1, 3, 2, 4]
|
||||
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
|
||||
|
||||
## When
|
||||
let lastRes = driver.last()
|
||||
|
||||
## Then
|
||||
check:
|
||||
lastRes.isOk()
|
||||
|
||||
let last = lastRes.tryGet()
|
||||
check:
|
||||
last.senderTime == Timestamp(5)
|
||||
|
||||
test "get last item from empty queue should fail":
|
||||
## Given
|
||||
let capacity = 5
|
||||
let driver = QueueDriver.new(capacity)
|
||||
|
||||
## When
|
||||
let lastRes = driver.last()
|
||||
|
||||
## Then
|
||||
check:
|
||||
lastRes.isErr()
|
||||
lastRes.error() == "Not found"
|
||||
|
||||
test "verify if queue contains an index":
|
||||
## Given
|
||||
let
|
||||
capacity = 5
|
||||
unsortedSet = [5, 1, 3, 2, 4]
|
||||
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
|
||||
|
||||
let
|
||||
(existingIndex, _) = genIndexedWakuMessage(4)
|
||||
(nonExistingIndex, _) = genIndexedWakuMessage(99)
|
||||
|
||||
## Then
|
||||
check:
|
||||
driver.contains(existingIndex) == true
|
||||
driver.contains(nonExistingIndex) == false
|
||||
219
tests/waku_archive_legacy/test_driver_queue_index.nim
Normal file
219
tests/waku_archive_legacy/test_driver_queue_index.nim
Normal file
@ -0,0 +1,219 @@
|
||||
{.used.}
|
||||
|
||||
import std/[times, random], stew/byteutils, testutils/unittests, nimcrypto
|
||||
import waku/waku_core, waku/waku_archive_legacy/driver/queue_driver/index
|
||||
|
||||
var rng = initRand()
|
||||
|
||||
## Helpers
|
||||
|
||||
proc getTestTimestamp(offset = 0): Timestamp =
|
||||
let now = getNanosecondTime(epochTime() + float(offset))
|
||||
Timestamp(now)
|
||||
|
||||
proc hashFromStr(input: string): MDigest[256] =
|
||||
var ctx: sha256
|
||||
|
||||
ctx.init()
|
||||
ctx.update(input.toBytes())
|
||||
let hashed = ctx.finish()
|
||||
ctx.clear()
|
||||
|
||||
return hashed
|
||||
|
||||
proc randomHash(): WakuMessageHash =
|
||||
var hash: WakuMessageHash
|
||||
|
||||
for i in 0 ..< hash.len:
|
||||
let numb: byte = byte(rng.next())
|
||||
hash[i] = numb
|
||||
|
||||
hash
|
||||
|
||||
suite "Queue Driver - index":
|
||||
## Test vars
|
||||
let
|
||||
smallIndex1 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000),
|
||||
hash: randomHash(),
|
||||
)
|
||||
smallIndex2 = Index(
|
||||
digest: hashFromStr("1234567"), # digest is less significant than senderTime
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000),
|
||||
hash: randomHash(),
|
||||
)
|
||||
largeIndex1 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(9000),
|
||||
hash: randomHash(),
|
||||
) # only senderTime differ from smallIndex1
|
||||
largeIndex2 = Index(
|
||||
digest: hashFromStr("12345"), # only digest differs from smallIndex1
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000),
|
||||
hash: randomHash(),
|
||||
)
|
||||
eqIndex1 = Index(
|
||||
digest: hashFromStr("0003"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(54321),
|
||||
hash: randomHash(),
|
||||
)
|
||||
eqIndex2 = Index(
|
||||
digest: hashFromStr("0003"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(54321),
|
||||
hash: randomHash(),
|
||||
)
|
||||
eqIndex3 = Index(
|
||||
digest: hashFromStr("0003"),
|
||||
receiverTime: getNanosecondTime(9999),
|
||||
# receiverTime difference should have no effect on comparisons
|
||||
senderTime: getNanosecondTime(54321),
|
||||
hash: randomHash(),
|
||||
)
|
||||
diffPsTopic = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(0),
|
||||
senderTime: getNanosecondTime(1000),
|
||||
pubsubTopic: "zzzz",
|
||||
hash: randomHash(),
|
||||
)
|
||||
noSenderTime1 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(1100),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "zzzz",
|
||||
hash: randomHash(),
|
||||
)
|
||||
noSenderTime2 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(10000),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "zzzz",
|
||||
hash: randomHash(),
|
||||
)
|
||||
noSenderTime3 = Index(
|
||||
digest: hashFromStr("1234"),
|
||||
receiverTime: getNanosecondTime(1200),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "aaaa",
|
||||
hash: randomHash(),
|
||||
)
|
||||
noSenderTime4 = Index(
|
||||
digest: hashFromStr("0"),
|
||||
receiverTime: getNanosecondTime(1200),
|
||||
senderTime: getNanosecondTime(0),
|
||||
pubsubTopic: "zzzz",
|
||||
hash: randomHash(),
|
||||
)
|
||||
|
||||
test "Index comparison":
|
||||
# Index comparison with senderTime diff
|
||||
check:
|
||||
cmp(smallIndex1, largeIndex1) < 0
|
||||
cmp(smallIndex2, largeIndex1) < 0
|
||||
|
||||
# Index comparison with digest diff
|
||||
check:
|
||||
cmp(smallIndex1, smallIndex2) < 0
|
||||
cmp(smallIndex1, largeIndex2) < 0
|
||||
cmp(smallIndex2, largeIndex2) > 0
|
||||
cmp(largeIndex1, largeIndex2) > 0
|
||||
|
||||
# Index comparison when equal
|
||||
check:
|
||||
cmp(eqIndex1, eqIndex2) == 0
|
||||
|
||||
# pubsubTopic difference
|
||||
check:
|
||||
cmp(smallIndex1, diffPsTopic) < 0
|
||||
|
||||
# receiverTime diff plays no role when senderTime set
|
||||
check:
|
||||
cmp(eqIndex1, eqIndex3) == 0
|
||||
|
||||
# receiverTime diff plays no role when digest/pubsubTopic equal
|
||||
check:
|
||||
cmp(noSenderTime1, noSenderTime2) == 0
|
||||
|
||||
# sort on receiverTime with no senderTimestamp and unequal pubsubTopic
|
||||
check:
|
||||
cmp(noSenderTime1, noSenderTime3) < 0
|
||||
|
||||
# sort on receiverTime with no senderTimestamp and unequal digest
|
||||
check:
|
||||
cmp(noSenderTime1, noSenderTime4) < 0
|
||||
|
||||
# sort on receiverTime if no senderTimestamp on only one side
|
||||
check:
|
||||
cmp(smallIndex1, noSenderTime1) < 0
|
||||
cmp(noSenderTime1, smallIndex1) > 0 # Test symmetry
|
||||
cmp(noSenderTime2, eqIndex3) < 0
|
||||
cmp(eqIndex3, noSenderTime2) > 0 # Test symmetry
|
||||
|
||||
test "Index equality":
|
||||
# Exactly equal
|
||||
check:
|
||||
eqIndex1 == eqIndex2
|
||||
|
||||
# Receiver time plays no role, even without sender time
|
||||
check:
|
||||
eqIndex1 == eqIndex3
|
||||
noSenderTime1 == noSenderTime2 # only receiver time differs, indices are equal
|
||||
noSenderTime1 != noSenderTime3 # pubsubTopics differ
|
||||
noSenderTime1 != noSenderTime4 # digests differ
|
||||
|
||||
# Unequal sender time
|
||||
check:
|
||||
smallIndex1 != largeIndex1
|
||||
|
||||
# Unequal digest
|
||||
check:
|
||||
smallIndex1 != smallIndex2
|
||||
|
||||
# Unequal hash and digest
|
||||
check:
|
||||
smallIndex1 != eqIndex1
|
||||
|
||||
# Unequal pubsubTopic
|
||||
check:
|
||||
smallIndex1 != diffPsTopic
|
||||
|
||||
test "Index computation should not be empty":
|
||||
## Given
|
||||
let ts = getTestTimestamp()
|
||||
let wm = WakuMessage(payload: @[byte 1, 2, 3], timestamp: ts)
|
||||
|
||||
## When
|
||||
let ts2 = getTestTimestamp() + 10
|
||||
let index = Index.compute(wm, ts2, DefaultContentTopic)
|
||||
|
||||
## Then
|
||||
check:
|
||||
index.digest.data.len != 0
|
||||
index.digest.data.len == 32 # sha2 output length in bytes
|
||||
index.receiverTime == ts2 # the receiver timestamp should be a non-zero value
|
||||
index.senderTime == ts
|
||||
index.pubsubTopic == DefaultContentTopic
|
||||
|
||||
test "Index digest of two identical messsage should be the same":
|
||||
## Given
|
||||
let topic = ContentTopic("test-content-topic")
|
||||
let
|
||||
wm1 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic)
|
||||
wm2 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic)
|
||||
|
||||
## When
|
||||
let ts = getTestTimestamp()
|
||||
let
|
||||
index1 = Index.compute(wm1, ts, DefaultPubsubTopic)
|
||||
index2 = Index.compute(wm2, ts, DefaultPubsubTopic)
|
||||
|
||||
## Then
|
||||
check:
|
||||
index1.digest == index2.digest
|
||||
405
tests/waku_archive_legacy/test_driver_queue_pagination.nim
Normal file
405
tests/waku_archive_legacy/test_driver_queue_pagination.nim
Normal file
@ -0,0 +1,405 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, sequtils, algorithm], testutils/unittests, libp2p/protobuf/minprotobuf
|
||||
import
|
||||
waku/waku_archive_legacy,
|
||||
waku/waku_archive_legacy/driver/queue_driver/queue_driver {.all.},
|
||||
waku/waku_archive_legacy/driver/queue_driver/index,
|
||||
waku/waku_core,
|
||||
../testlib/wakucore
|
||||
|
||||
proc getTestQueueDriver(numMessages: int): QueueDriver =
|
||||
let testQueueDriver = QueueDriver.new(numMessages)
|
||||
|
||||
var data {.noinit.}: array[32, byte]
|
||||
for x in data.mitems:
|
||||
x = 1
|
||||
|
||||
for i in 0 ..< numMessages:
|
||||
let msg = WakuMessage(payload: @[byte i], timestamp: Timestamp(i))
|
||||
|
||||
let index = Index(
|
||||
receiverTime: Timestamp(i),
|
||||
senderTime: Timestamp(i),
|
||||
digest: MessageDigest(data: data),
|
||||
hash: computeMessageHash(DefaultPubsubTopic, msg),
|
||||
)
|
||||
|
||||
discard testQueueDriver.add(index, msg)
|
||||
|
||||
return testQueueDriver
|
||||
|
||||
procSuite "Queue driver - pagination":
|
||||
let driver = getTestQueueDriver(10)
|
||||
let
|
||||
indexList: seq[Index] = toSeq(driver.fwdIterator()).mapIt(it[0])
|
||||
msgList: seq[WakuMessage] = toSeq(driver.fwdIterator()).mapIt(it[1])
|
||||
|
||||
test "Forward pagination - normal pagination":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = 2
|
||||
cursor: Option[Index] = some(indexList[3])
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 2
|
||||
data == msgList[4 .. 5]
|
||||
|
||||
test "Forward pagination - initial pagination request with an empty cursor":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = 2
|
||||
cursor: Option[Index] = none(Index)
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 2
|
||||
data == msgList[0 .. 1]
|
||||
|
||||
test "Forward pagination - initial pagination request with an empty cursor to fetch the entire history":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = 13
|
||||
cursor: Option[Index] = none(Index)
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 10
|
||||
data == msgList[0 .. 9]
|
||||
|
||||
test "Forward pagination - empty msgList":
|
||||
## Given
|
||||
let driver = getTestQueueDriver(0)
|
||||
let
|
||||
pageSize: uint = 2
|
||||
cursor: Option[Index] = none(Index)
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 0
|
||||
|
||||
test "Forward pagination - page size larger than the remaining messages":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = 10
|
||||
cursor: Option[Index] = some(indexList[3])
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 6
|
||||
data == msgList[4 .. 9]
|
||||
|
||||
test "Forward pagination - page size larger than the maximum allowed page size":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = MaxPageSize + 1
|
||||
cursor: Option[Index] = some(indexList[3])
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
uint(data.len) <= MaxPageSize
|
||||
|
||||
test "Forward pagination - cursor pointing to the end of the message list":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = 10
|
||||
cursor: Option[Index] = some(indexList[9])
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 0
|
||||
|
||||
test "Forward pagination - invalid cursor":
|
||||
## Given
|
||||
let msg = fakeWakuMessage(payload = @[byte 10])
|
||||
let index = ArchiveCursor(
|
||||
pubsubTopic: DefaultPubsubTopic,
|
||||
senderTime: msg.timestamp,
|
||||
storeTime: msg.timestamp,
|
||||
digest: computeDigest(msg),
|
||||
).toIndex()
|
||||
|
||||
let
|
||||
pageSize: uint = 10
|
||||
cursor: Option[Index] = some(index)
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let error = page.tryError()
|
||||
check:
|
||||
error == QueueDriverErrorKind.INVALID_CURSOR
|
||||
|
||||
test "Forward pagination - initial paging query over a message list with one message":
|
||||
## Given
|
||||
let driver = getTestQueueDriver(1)
|
||||
let
|
||||
pageSize: uint = 10
|
||||
cursor: Option[Index] = none(Index)
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 1
|
||||
|
||||
test "Forward pagination - pagination over a message list with one message":
|
||||
## Given
|
||||
let driver = getTestQueueDriver(1)
|
||||
let
|
||||
pageSize: uint = 10
|
||||
cursor: Option[Index] = some(indexList[0])
|
||||
forward: bool = true
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 0
|
||||
|
||||
test "Forward pagination - with pradicate":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = 3
|
||||
cursor: Option[Index] = none(Index)
|
||||
forward = true
|
||||
|
||||
proc onlyEvenTimes(index: Index, msg: WakuMessage): bool =
|
||||
msg.timestamp.int64 mod 2 == 0
|
||||
|
||||
## When
|
||||
let page = driver.getPage(
|
||||
pageSize = pageSize, forward = forward, cursor = cursor, predicate = onlyEvenTimes
|
||||
)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.mapIt(it.timestamp.int) == @[0, 2, 4]
|
||||
|
||||
test "Backward pagination - normal pagination":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = 2
|
||||
cursor: Option[Index] = some(indexList[3])
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data == msgList[1 .. 2].reversed
|
||||
|
||||
test "Backward pagination - empty msgList":
|
||||
## Given
|
||||
let driver = getTestQueueDriver(0)
|
||||
let
|
||||
pageSize: uint = 2
|
||||
cursor: Option[Index] = none(Index)
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 0
|
||||
|
||||
test "Backward pagination - initial pagination request with an empty cursor":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = 2
|
||||
cursor: Option[Index] = none(Index)
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 2
|
||||
data == msgList[8 .. 9].reversed
|
||||
|
||||
test "Backward pagination - initial pagination request with an empty cursor to fetch the entire history":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = 13
|
||||
cursor: Option[Index] = none(Index)
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 10
|
||||
data == msgList[0 .. 9].reversed
|
||||
|
||||
test "Backward pagination - page size larger than the remaining messages":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = 5
|
||||
cursor: Option[Index] = some(indexList[3])
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data == msgList[0 .. 2].reversed
|
||||
|
||||
test "Backward pagination - page size larger than the Maximum allowed page size":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = MaxPageSize + 1
|
||||
cursor: Option[Index] = some(indexList[3])
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
uint(data.len) <= MaxPageSize
|
||||
|
||||
test "Backward pagination - cursor pointing to the begining of the message list":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = 5
|
||||
cursor: Option[Index] = some(indexList[0])
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 0
|
||||
|
||||
test "Backward pagination - invalid cursor":
|
||||
## Given
|
||||
let msg = fakeWakuMessage(payload = @[byte 10])
|
||||
let index = ArchiveCursor(
|
||||
pubsubTopic: DefaultPubsubTopic,
|
||||
senderTime: msg.timestamp,
|
||||
storeTime: msg.timestamp,
|
||||
digest: computeDigest(msg),
|
||||
).toIndex()
|
||||
|
||||
let
|
||||
pageSize: uint = 2
|
||||
cursor: Option[Index] = some(index)
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let error = page.tryError()
|
||||
check:
|
||||
error == QueueDriverErrorKind.INVALID_CURSOR
|
||||
|
||||
test "Backward pagination - initial paging query over a message list with one message":
|
||||
## Given
|
||||
let driver = getTestQueueDriver(1)
|
||||
let
|
||||
pageSize: uint = 10
|
||||
cursor: Option[Index] = none(Index)
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 1
|
||||
|
||||
test "Backward pagination - paging query over a message list with one message":
|
||||
## Given
|
||||
let driver = getTestQueueDriver(1)
|
||||
let
|
||||
pageSize: uint = 10
|
||||
cursor: Option[Index] = some(indexList[0])
|
||||
forward: bool = false
|
||||
|
||||
## When
|
||||
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.len == 0
|
||||
|
||||
test "Backward pagination - with predicate":
|
||||
## Given
|
||||
let
|
||||
pageSize: uint = 3
|
||||
cursor: Option[Index] = none(Index)
|
||||
forward = false
|
||||
|
||||
proc onlyOddTimes(index: Index, msg: WakuMessage): bool =
|
||||
msg.timestamp.int64 mod 2 != 0
|
||||
|
||||
## When
|
||||
let page = driver.getPage(
|
||||
pageSize = pageSize, forward = forward, cursor = cursor, predicate = onlyOddTimes
|
||||
)
|
||||
|
||||
## Then
|
||||
let data = page.tryGet().mapIt(it[1])
|
||||
check:
|
||||
data.mapIt(it.timestamp.int) == @[5, 7, 9].reversed
|
||||
1795
tests/waku_archive_legacy/test_driver_queue_query.nim
Normal file
1795
tests/waku_archive_legacy/test_driver_queue_query.nim
Normal file
File diff suppressed because it is too large
Load Diff
60
tests/waku_archive_legacy/test_driver_sqlite.nim
Normal file
60
tests/waku_archive_legacy/test_driver_sqlite.nim
Normal file
@ -0,0 +1,60 @@
|
||||
{.used.}
|
||||
|
||||
import std/sequtils, testutils/unittests, chronos
|
||||
import
|
||||
waku/common/databases/db_sqlite,
|
||||
waku/waku_archive_legacy,
|
||||
waku/waku_archive_legacy/driver/sqlite_driver,
|
||||
waku/waku_core,
|
||||
../waku_archive_legacy/archive_utils,
|
||||
../testlib/common,
|
||||
../testlib/wakucore
|
||||
|
||||
suite "SQLite driver":
|
||||
test "init driver and database":
|
||||
## Given
|
||||
let database = newSqliteDatabase()
|
||||
|
||||
## When
|
||||
let driverRes = SqliteDriver.new(database)
|
||||
|
||||
## Then
|
||||
check:
|
||||
driverRes.isOk()
|
||||
|
||||
let driver: ArchiveDriver = driverRes.tryGet()
|
||||
check:
|
||||
not driver.isNil()
|
||||
|
||||
## Cleanup
|
||||
(waitFor driver.close()).expect("driver to close")
|
||||
|
||||
test "insert a message":
|
||||
## Given
|
||||
const contentTopic = "test-content-topic"
|
||||
const meta = "test meta"
|
||||
|
||||
let driver = newSqliteArchiveDriver()
|
||||
|
||||
let msg = fakeWakuMessage(contentTopic = contentTopic, meta = meta)
|
||||
let msgHash = computeMessageHash(DefaultPubsubTopic, msg)
|
||||
|
||||
## When
|
||||
let putRes = waitFor driver.put(
|
||||
DefaultPubsubTopic, msg, computeDigest(msg), msgHash, msg.timestamp
|
||||
)
|
||||
|
||||
## Then
|
||||
check:
|
||||
putRes.isOk()
|
||||
|
||||
let storedMsg = (waitFor driver.getAllMessages()).tryGet()
|
||||
check:
|
||||
storedMsg.len == 1
|
||||
storedMsg.all do(item: auto) -> bool:
|
||||
let (pubsubTopic, actualMsg, _, _, hash) = item
|
||||
actualMsg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic and
|
||||
hash == msgHash and msg.meta == actualMsg.meta
|
||||
|
||||
## Cleanup
|
||||
(waitFor driver.close()).expect("driver to close")
|
||||
1875
tests/waku_archive_legacy/test_driver_sqlite_query.nim
Normal file
1875
tests/waku_archive_legacy/test_driver_sqlite_query.nim
Normal file
File diff suppressed because it is too large
Load Diff
169
tests/waku_archive_legacy/test_retention_policy.nim
Normal file
169
tests/waku_archive_legacy/test_retention_policy.nim
Normal file
@ -0,0 +1,169 @@
|
||||
{.used.}
|
||||
|
||||
import std/[sequtils, times], stew/results, testutils/unittests, chronos
|
||||
import
|
||||
waku/common/databases/db_sqlite,
|
||||
waku/waku_core,
|
||||
waku/waku_core/message/digest,
|
||||
waku/waku_archive_legacy,
|
||||
waku/waku_archive_legacy/driver/sqlite_driver,
|
||||
waku/waku_archive_legacy/retention_policy,
|
||||
waku/waku_archive_legacy/retention_policy/retention_policy_capacity,
|
||||
waku/waku_archive_legacy/retention_policy/retention_policy_size,
|
||||
../waku_archive_legacy/archive_utils,
|
||||
../testlib/common,
|
||||
../testlib/wakucore
|
||||
|
||||
suite "Waku Archive - Retention policy":
|
||||
test "capacity retention policy - windowed message deletion":
|
||||
## Given
|
||||
let
|
||||
capacity = 100
|
||||
excess = 60
|
||||
|
||||
let driver = newSqliteArchiveDriver()
|
||||
|
||||
let retentionPolicy: RetentionPolicy =
|
||||
CapacityRetentionPolicy.new(capacity = capacity)
|
||||
var putFutures = newSeq[Future[ArchiveDriverResult[void]]]()
|
||||
|
||||
## When
|
||||
for i in 1 .. capacity + excess:
|
||||
let msg = fakeWakuMessage(
|
||||
payload = @[byte i], contentTopic = DefaultContentTopic, ts = Timestamp(i)
|
||||
)
|
||||
putFutures.add(
|
||||
driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
)
|
||||
)
|
||||
|
||||
discard waitFor allFinished(putFutures)
|
||||
|
||||
require (waitFor retentionPolicy.execute(driver)).isOk()
|
||||
|
||||
## Then
|
||||
let numMessages = (waitFor driver.getMessagesCount()).tryGet()
|
||||
check:
|
||||
# Expected number of messages is 120 because
|
||||
# (capacity = 100) + (half of the overflow window = 15) + (5 messages added after after the last delete)
|
||||
# the window size changes when changing `const maxStoreOverflow = 1.3 in sqlite_store
|
||||
numMessages == 115
|
||||
|
||||
## Cleanup
|
||||
(waitFor driver.close()).expect("driver to close")
|
||||
|
||||
test "size retention policy - windowed message deletion":
|
||||
## Given
|
||||
let
|
||||
# in bytes
|
||||
sizeLimit: int64 = 52428
|
||||
excess = 325
|
||||
|
||||
let driver = newSqliteArchiveDriver()
|
||||
|
||||
let retentionPolicy: RetentionPolicy = SizeRetentionPolicy.new(size = sizeLimit)
|
||||
var putFutures = newSeq[Future[ArchiveDriverResult[void]]]()
|
||||
|
||||
# make sure that the db is empty to before test begins
|
||||
let storedMsg = (waitFor driver.getAllMessages()).tryGet()
|
||||
# if there are messages in db, empty them
|
||||
if storedMsg.len > 0:
|
||||
let now = getNanosecondTime(getTime().toUnixFloat())
|
||||
require (waitFor driver.deleteMessagesOlderThanTimestamp(ts = now)).isOk()
|
||||
require (waitFor driver.performVacuum()).isOk()
|
||||
|
||||
## When
|
||||
##
|
||||
|
||||
# create a number of messages so that the size of the DB overshoots
|
||||
for i in 1 .. excess:
|
||||
let msg = fakeWakuMessage(
|
||||
payload = @[byte i], contentTopic = DefaultContentTopic, ts = Timestamp(i)
|
||||
)
|
||||
putFutures.add(
|
||||
driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
)
|
||||
)
|
||||
|
||||
# waitFor is used to synchronously wait for the futures to complete.
|
||||
discard waitFor allFinished(putFutures)
|
||||
|
||||
## Then
|
||||
# calculate the current database size
|
||||
let sizeDB = int64((waitFor driver.getDatabaseSize()).tryGet())
|
||||
|
||||
# NOTE: since vacuumin is done manually, this needs to be revisited if vacuuming done automatically
|
||||
|
||||
# get the rows count pre-deletion
|
||||
let rowsCountBeforeDeletion = (waitFor driver.getMessagesCount()).tryGet()
|
||||
|
||||
# execute policy provided the current db size oveflows, results in rows deletion
|
||||
require (sizeDB >= sizeLimit)
|
||||
require (waitFor retentionPolicy.execute(driver)).isOk()
|
||||
|
||||
# get the number or rows from database
|
||||
let rowCountAfterDeletion = (waitFor driver.getMessagesCount()).tryGet()
|
||||
|
||||
check:
|
||||
# size of the database is used to check if the storage limit has been preserved
|
||||
# check the current database size with the limitSize provided by the user
|
||||
# it should be lower
|
||||
rowCountAfterDeletion <= rowsCountBeforeDeletion
|
||||
|
||||
## Cleanup
|
||||
(waitFor driver.close()).expect("driver to close")
|
||||
|
||||
test "store capacity should be limited":
|
||||
## Given
|
||||
const capacity = 5
|
||||
const contentTopic = "test-content-topic"
|
||||
|
||||
let
|
||||
driver = newSqliteArchiveDriver()
|
||||
retentionPolicy: RetentionPolicy =
|
||||
CapacityRetentionPolicy.new(capacity = capacity)
|
||||
|
||||
let messages =
|
||||
@[
|
||||
fakeWakuMessage(contentTopic = DefaultContentTopic, ts = ts(0)),
|
||||
fakeWakuMessage(contentTopic = DefaultContentTopic, ts = ts(1)),
|
||||
fakeWakuMessage(contentTopic = contentTopic, ts = ts(2)),
|
||||
fakeWakuMessage(contentTopic = contentTopic, ts = ts(3)),
|
||||
fakeWakuMessage(contentTopic = contentTopic, ts = ts(4)),
|
||||
fakeWakuMessage(contentTopic = contentTopic, ts = ts(5)),
|
||||
fakeWakuMessage(contentTopic = contentTopic, ts = ts(6)),
|
||||
]
|
||||
|
||||
## When
|
||||
for msg in messages:
|
||||
require (
|
||||
waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
)
|
||||
).isOk()
|
||||
require (waitFor retentionPolicy.execute(driver)).isOk()
|
||||
|
||||
## Then
|
||||
let storedMsg = (waitFor driver.getAllMessages()).tryGet()
|
||||
check:
|
||||
storedMsg.len == capacity
|
||||
storedMsg.all do(item: auto) -> bool:
|
||||
let (pubsubTopic, msg, _, _, _) = item
|
||||
msg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic
|
||||
|
||||
## Cleanup
|
||||
(waitFor driver.close()).expect("driver to close")
|
||||
543
tests/waku_archive_legacy/test_waku_archive.nim
Normal file
543
tests/waku_archive_legacy/test_waku_archive.nim
Normal file
@ -0,0 +1,543 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, sequtils],
|
||||
testutils/unittests,
|
||||
chronicles,
|
||||
chronos,
|
||||
libp2p/crypto/crypto
|
||||
|
||||
import
|
||||
waku/common/databases/db_sqlite,
|
||||
waku/common/paging,
|
||||
waku/waku_core,
|
||||
waku/waku_core/message/digest,
|
||||
waku/waku_archive_legacy/driver/sqlite_driver,
|
||||
waku/waku_archive_legacy,
|
||||
../waku_archive_legacy/archive_utils,
|
||||
../testlib/common,
|
||||
../testlib/wakucore
|
||||
|
||||
suite "Waku Archive - message handling":
|
||||
test "it should archive a valid and non-ephemeral message":
|
||||
## Setup
|
||||
let driver = newSqliteArchiveDriver()
|
||||
let archive = newWakuArchive(driver)
|
||||
|
||||
## Given
|
||||
let validSenderTime = now()
|
||||
let message = fakeWakuMessage(ephemeral = false, ts = validSenderTime)
|
||||
|
||||
## When
|
||||
waitFor archive.handleMessage(DefaultPubSubTopic, message)
|
||||
|
||||
## Then
|
||||
check:
|
||||
(waitFor driver.getMessagesCount()).tryGet() == 1
|
||||
|
||||
test "it should not archive ephemeral messages":
|
||||
## Setup
|
||||
let driver = newSqliteArchiveDriver()
|
||||
let archive = newWakuArchive(driver)
|
||||
|
||||
## Given
|
||||
let msgList =
|
||||
@[
|
||||
fakeWakuMessage(ephemeral = false, payload = "1"),
|
||||
fakeWakuMessage(ephemeral = true, payload = "2"),
|
||||
fakeWakuMessage(ephemeral = true, payload = "3"),
|
||||
fakeWakuMessage(ephemeral = true, payload = "4"),
|
||||
fakeWakuMessage(ephemeral = false, payload = "5"),
|
||||
]
|
||||
|
||||
## When
|
||||
for msg in msgList:
|
||||
waitFor archive.handleMessage(DefaultPubsubTopic, msg)
|
||||
|
||||
## Then
|
||||
check:
|
||||
(waitFor driver.getMessagesCount()).tryGet() == 2
|
||||
|
||||
test "it should archive a message with no sender timestamp":
|
||||
## Setup
|
||||
let driver = newSqliteArchiveDriver()
|
||||
let archive = newWakuArchive(driver)
|
||||
|
||||
## Given
|
||||
let invalidSenderTime = 0
|
||||
let message = fakeWakuMessage(ts = invalidSenderTime)
|
||||
|
||||
## When
|
||||
waitFor archive.handleMessage(DefaultPubSubTopic, message)
|
||||
|
||||
## Then
|
||||
check:
|
||||
(waitFor driver.getMessagesCount()).tryGet() == 1
|
||||
|
||||
test "it should not archive a message with a sender time variance greater than max time variance (future)":
|
||||
## Setup
|
||||
let driver = newSqliteArchiveDriver()
|
||||
let archive = newWakuArchive(driver)
|
||||
|
||||
## Given
|
||||
let
|
||||
now = now()
|
||||
invalidSenderTime = now + MaxMessageTimestampVariance + 1_000_000_000
|
||||
# 1 second over the max variance
|
||||
|
||||
let message = fakeWakuMessage(ts = invalidSenderTime)
|
||||
|
||||
## When
|
||||
waitFor archive.handleMessage(DefaultPubSubTopic, message)
|
||||
|
||||
## Then
|
||||
check:
|
||||
(waitFor driver.getMessagesCount()).tryGet() == 0
|
||||
|
||||
test "it should not archive a message with a sender time variance greater than max time variance (past)":
|
||||
## Setup
|
||||
let driver = newSqliteArchiveDriver()
|
||||
let archive = newWakuArchive(driver)
|
||||
|
||||
## Given
|
||||
let
|
||||
now = now()
|
||||
invalidSenderTime = now - MaxMessageTimestampVariance - 1
|
||||
|
||||
let message = fakeWakuMessage(ts = invalidSenderTime)
|
||||
|
||||
## When
|
||||
waitFor archive.handleMessage(DefaultPubSubTopic, message)
|
||||
|
||||
## Then
|
||||
check:
|
||||
(waitFor driver.getMessagesCount()).tryGet() == 0
|
||||
|
||||
procSuite "Waku Archive - find messages":
|
||||
## Fixtures
|
||||
let timeOrigin = now()
|
||||
let msgListA =
|
||||
@[
|
||||
fakeWakuMessage(
|
||||
@[byte 00], contentTopic = ContentTopic("2"), ts = ts(00, timeOrigin)
|
||||
),
|
||||
fakeWakuMessage(
|
||||
@[byte 01], contentTopic = ContentTopic("1"), ts = ts(10, timeOrigin)
|
||||
),
|
||||
fakeWakuMessage(
|
||||
@[byte 02], contentTopic = ContentTopic("2"), ts = ts(20, timeOrigin)
|
||||
),
|
||||
fakeWakuMessage(
|
||||
@[byte 03], contentTopic = ContentTopic("1"), ts = ts(30, timeOrigin)
|
||||
),
|
||||
fakeWakuMessage(
|
||||
@[byte 04], contentTopic = ContentTopic("2"), ts = ts(40, timeOrigin)
|
||||
),
|
||||
fakeWakuMessage(
|
||||
@[byte 05], contentTopic = ContentTopic("1"), ts = ts(50, timeOrigin)
|
||||
),
|
||||
fakeWakuMessage(
|
||||
@[byte 06], contentTopic = ContentTopic("2"), ts = ts(60, timeOrigin)
|
||||
),
|
||||
fakeWakuMessage(
|
||||
@[byte 07], contentTopic = ContentTopic("1"), ts = ts(70, timeOrigin)
|
||||
),
|
||||
fakeWakuMessage(
|
||||
@[byte 08], contentTopic = ContentTopic("2"), ts = ts(80, timeOrigin)
|
||||
),
|
||||
fakeWakuMessage(
|
||||
@[byte 09], contentTopic = ContentTopic("1"), ts = ts(90, timeOrigin)
|
||||
),
|
||||
]
|
||||
|
||||
let archiveA = block:
|
||||
let
|
||||
driver = newSqliteArchiveDriver()
|
||||
archive = newWakuArchive(driver)
|
||||
|
||||
for msg in msgListA:
|
||||
require (
|
||||
waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
)
|
||||
).isOk()
|
||||
|
||||
archive
|
||||
|
||||
test "handle query":
|
||||
## Setup
|
||||
let
|
||||
driver = newSqliteArchiveDriver()
|
||||
archive = newWakuArchive(driver)
|
||||
|
||||
let topic = ContentTopic("1")
|
||||
let
|
||||
msg1 = fakeWakuMessage(contentTopic = topic)
|
||||
msg2 = fakeWakuMessage()
|
||||
|
||||
waitFor archive.handleMessage("foo", msg1)
|
||||
waitFor archive.handleMessage("foo", msg2)
|
||||
|
||||
## Given
|
||||
let req = ArchiveQuery(includeData: true, contentTopics: @[topic])
|
||||
|
||||
## When
|
||||
let queryRes = waitFor archive.findMessages(req)
|
||||
|
||||
## Then
|
||||
check:
|
||||
queryRes.isOk()
|
||||
|
||||
let response = queryRes.tryGet()
|
||||
check:
|
||||
response.messages.len == 1
|
||||
response.messages == @[msg1]
|
||||
|
||||
test "handle query with multiple content filters":
|
||||
## Setup
|
||||
let
|
||||
driver = newSqliteArchiveDriver()
|
||||
archive = newWakuArchive(driver)
|
||||
|
||||
let
|
||||
topic1 = ContentTopic("1")
|
||||
topic2 = ContentTopic("2")
|
||||
topic3 = ContentTopic("3")
|
||||
|
||||
let
|
||||
msg1 = fakeWakuMessage(contentTopic = topic1)
|
||||
msg2 = fakeWakuMessage(contentTopic = topic2)
|
||||
msg3 = fakeWakuMessage(contentTopic = topic3)
|
||||
|
||||
waitFor archive.handleMessage("foo", msg1)
|
||||
waitFor archive.handleMessage("foo", msg2)
|
||||
waitFor archive.handleMessage("foo", msg3)
|
||||
|
||||
## Given
|
||||
let req = ArchiveQuery(includeData: true, contentTopics: @[topic1, topic3])
|
||||
|
||||
## When
|
||||
let queryRes = waitFor archive.findMessages(req)
|
||||
|
||||
## Then
|
||||
check:
|
||||
queryRes.isOk()
|
||||
|
||||
let response = queryRes.tryGet()
|
||||
check:
|
||||
response.messages.len() == 2
|
||||
response.messages.anyIt(it == msg1)
|
||||
response.messages.anyIt(it == msg3)
|
||||
|
||||
test "handle query with more than 10 content filters":
|
||||
## Setup
|
||||
let
|
||||
driver = newSqliteArchiveDriver()
|
||||
archive = newWakuArchive(driver)
|
||||
|
||||
let queryTopics = toSeq(1 .. 15).mapIt(ContentTopic($it))
|
||||
|
||||
## Given
|
||||
let req = ArchiveQuery(contentTopics: queryTopics)
|
||||
|
||||
## When
|
||||
let queryRes = waitFor archive.findMessages(req)
|
||||
|
||||
## Then
|
||||
check:
|
||||
queryRes.isErr()
|
||||
|
||||
let error = queryRes.tryError()
|
||||
check:
|
||||
error.kind == ArchiveErrorKind.INVALID_QUERY
|
||||
error.cause == "too many content topics"
|
||||
|
||||
test "handle query with pubsub topic filter":
|
||||
## Setup
|
||||
let
|
||||
driver = newSqliteArchiveDriver()
|
||||
archive = newWakuArchive(driver)
|
||||
|
||||
let
|
||||
pubsubTopic1 = "queried-topic"
|
||||
pubsubTopic2 = "non-queried-topic"
|
||||
|
||||
let
|
||||
contentTopic1 = ContentTopic("1")
|
||||
contentTopic2 = ContentTopic("2")
|
||||
contentTopic3 = ContentTopic("3")
|
||||
|
||||
let
|
||||
msg1 = fakeWakuMessage(contentTopic = contentTopic1)
|
||||
msg2 = fakeWakuMessage(contentTopic = contentTopic2)
|
||||
msg3 = fakeWakuMessage(contentTopic = contentTopic3)
|
||||
|
||||
waitFor archive.handleMessage(pubsubtopic1, msg1)
|
||||
waitFor archive.handleMessage(pubsubtopic2, msg2)
|
||||
waitFor archive.handleMessage(pubsubtopic2, msg3)
|
||||
|
||||
## Given
|
||||
# This query targets: pubsubtopic1 AND (contentTopic1 OR contentTopic3)
|
||||
let req = ArchiveQuery(
|
||||
includeData: true,
|
||||
pubsubTopic: some(pubsubTopic1),
|
||||
contentTopics: @[contentTopic1, contentTopic3],
|
||||
)
|
||||
|
||||
## When
|
||||
let queryRes = waitFor archive.findMessages(req)
|
||||
|
||||
## Then
|
||||
check:
|
||||
queryRes.isOk()
|
||||
|
||||
let response = queryRes.tryGet()
|
||||
check:
|
||||
response.messages.len() == 1
|
||||
response.messages.anyIt(it == msg1)
|
||||
|
||||
test "handle query with pubsub topic filter - no match":
|
||||
## Setup
|
||||
let
|
||||
driver = newSqliteArchiveDriver()
|
||||
archive = newWakuArchive(driver)
|
||||
|
||||
let
|
||||
pubsubtopic1 = "queried-topic"
|
||||
pubsubtopic2 = "non-queried-topic"
|
||||
|
||||
let
|
||||
msg1 = fakeWakuMessage()
|
||||
msg2 = fakeWakuMessage()
|
||||
msg3 = fakeWakuMessage()
|
||||
|
||||
waitFor archive.handleMessage(pubsubtopic2, msg1)
|
||||
waitFor archive.handleMessage(pubsubtopic2, msg2)
|
||||
waitFor archive.handleMessage(pubsubtopic2, msg3)
|
||||
|
||||
## Given
|
||||
let req = ArchiveQuery(pubsubTopic: some(pubsubTopic1))
|
||||
|
||||
## When
|
||||
let res = waitFor archive.findMessages(req)
|
||||
|
||||
## Then
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let response = res.tryGet()
|
||||
check:
|
||||
response.messages.len() == 0
|
||||
|
||||
test "handle query with pubsub topic filter - match the entire stored messages":
|
||||
## Setup
|
||||
let
|
||||
driver = newSqliteArchiveDriver()
|
||||
archive = newWakuArchive(driver)
|
||||
|
||||
let pubsubTopic = "queried-topic"
|
||||
|
||||
let
|
||||
msg1 = fakeWakuMessage(payload = "TEST-1")
|
||||
msg2 = fakeWakuMessage(payload = "TEST-2")
|
||||
msg3 = fakeWakuMessage(payload = "TEST-3")
|
||||
|
||||
waitFor archive.handleMessage(pubsubTopic, msg1)
|
||||
waitFor archive.handleMessage(pubsubTopic, msg2)
|
||||
waitFor archive.handleMessage(pubsubTopic, msg3)
|
||||
|
||||
## Given
|
||||
let req = ArchiveQuery(includeData: true, pubsubTopic: some(pubsubTopic))
|
||||
|
||||
## When
|
||||
let res = waitFor archive.findMessages(req)
|
||||
|
||||
## Then
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let response = res.tryGet()
|
||||
check:
|
||||
response.messages.len() == 3
|
||||
response.messages.anyIt(it == msg1)
|
||||
response.messages.anyIt(it == msg2)
|
||||
response.messages.anyIt(it == msg3)
|
||||
|
||||
test "handle query with forward pagination":
|
||||
## Given
|
||||
let req =
|
||||
ArchiveQuery(includeData: true, pageSize: 4, direction: PagingDirection.FORWARD)
|
||||
|
||||
## When
|
||||
var nextReq = req # copy
|
||||
|
||||
var pages = newSeq[seq[WakuMessage]](3)
|
||||
var cursors = newSeq[Option[ArchiveCursor]](3)
|
||||
|
||||
for i in 0 ..< 3:
|
||||
let res = waitFor archiveA.findMessages(nextReq)
|
||||
require res.isOk()
|
||||
|
||||
# Keep query response content
|
||||
let response = res.get()
|
||||
pages[i] = response.messages
|
||||
cursors[i] = response.cursor
|
||||
|
||||
# Set/update the request cursor
|
||||
nextReq.cursor = cursors[i]
|
||||
|
||||
## Then
|
||||
check:
|
||||
cursors[0] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[3]))
|
||||
cursors[1] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[7]))
|
||||
cursors[2] == none(ArchiveCursor)
|
||||
|
||||
check:
|
||||
pages[0] == msgListA[0 .. 3]
|
||||
pages[1] == msgListA[4 .. 7]
|
||||
pages[2] == msgListA[8 .. 9]
|
||||
|
||||
test "handle query with backward pagination":
|
||||
## Given
|
||||
let req =
|
||||
ArchiveQuery(includeData: true, pageSize: 4, direction: PagingDirection.BACKWARD)
|
||||
|
||||
## When
|
||||
var nextReq = req # copy
|
||||
|
||||
var pages = newSeq[seq[WakuMessage]](3)
|
||||
var cursors = newSeq[Option[ArchiveCursor]](3)
|
||||
|
||||
for i in 0 ..< 3:
|
||||
let res = waitFor archiveA.findMessages(nextReq)
|
||||
require res.isOk()
|
||||
|
||||
# Keep query response content
|
||||
let response = res.get()
|
||||
pages[i] = response.messages
|
||||
cursors[i] = response.cursor
|
||||
|
||||
# Set/update the request cursor
|
||||
nextReq.cursor = cursors[i]
|
||||
|
||||
## Then
|
||||
check:
|
||||
cursors[0] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[6]))
|
||||
cursors[1] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[2]))
|
||||
cursors[2] == none(ArchiveCursor)
|
||||
|
||||
check:
|
||||
pages[0] == msgListA[6 .. 9]
|
||||
pages[1] == msgListA[2 .. 5]
|
||||
pages[2] == msgListA[0 .. 1]
|
||||
|
||||
test "handle query with no paging info - auto-pagination":
|
||||
## Setup
|
||||
let
|
||||
driver = newSqliteArchiveDriver()
|
||||
archive = newWakuArchive(driver)
|
||||
|
||||
let msgList =
|
||||
@[
|
||||
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("2")),
|
||||
fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic),
|
||||
fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic),
|
||||
fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic),
|
||||
fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic),
|
||||
fakeWakuMessage(@[byte 5], contentTopic = DefaultContentTopic),
|
||||
fakeWakuMessage(@[byte 6], contentTopic = DefaultContentTopic),
|
||||
fakeWakuMessage(@[byte 7], contentTopic = DefaultContentTopic),
|
||||
fakeWakuMessage(@[byte 8], contentTopic = DefaultContentTopic),
|
||||
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("2")),
|
||||
]
|
||||
|
||||
for msg in msgList:
|
||||
require (
|
||||
waitFor driver.put(
|
||||
DefaultPubsubTopic,
|
||||
msg,
|
||||
computeDigest(msg),
|
||||
computeMessageHash(DefaultPubsubTopic, msg),
|
||||
msg.timestamp,
|
||||
)
|
||||
).isOk()
|
||||
|
||||
## Given
|
||||
let req = ArchiveQuery(includeData: true, contentTopics: @[DefaultContentTopic])
|
||||
|
||||
## When
|
||||
let res = waitFor archive.findMessages(req)
|
||||
|
||||
## Then
|
||||
check:
|
||||
res.isOk()
|
||||
|
||||
let response = res.tryGet()
|
||||
check:
|
||||
## No pagination specified. Response will be auto-paginated with
|
||||
## up to MaxPageSize messages per page.
|
||||
response.messages.len() == 8
|
||||
response.cursor.isNone()
|
||||
|
||||
test "handle temporal history query with a valid time window":
|
||||
## Given
|
||||
let req = ArchiveQuery(
|
||||
includeData: true,
|
||||
contentTopics: @[ContentTopic("1")],
|
||||
startTime: some(ts(15, timeOrigin)),
|
||||
endTime: some(ts(55, timeOrigin)),
|
||||
direction: PagingDirection.FORWARD,
|
||||
)
|
||||
|
||||
## When
|
||||
let res = waitFor archiveA.findMessages(req)
|
||||
|
||||
## Then
|
||||
check res.isOk()
|
||||
|
||||
let response = res.tryGet()
|
||||
check:
|
||||
response.messages.len() == 2
|
||||
response.messages.mapIt(it.timestamp) == @[ts(30, timeOrigin), ts(50, timeOrigin)]
|
||||
|
||||
test "handle temporal history query with a zero-size time window":
|
||||
## A zero-size window results in an empty list of history messages
|
||||
## Given
|
||||
let req = ArchiveQuery(
|
||||
contentTopics: @[ContentTopic("1")],
|
||||
startTime: some(Timestamp(2)),
|
||||
endTime: some(Timestamp(2)),
|
||||
)
|
||||
|
||||
## When
|
||||
let res = waitFor archiveA.findMessages(req)
|
||||
|
||||
## Then
|
||||
check res.isOk()
|
||||
|
||||
let response = res.tryGet()
|
||||
check:
|
||||
response.messages.len == 0
|
||||
|
||||
test "handle temporal history query with an invalid time window":
|
||||
## A history query with an invalid time range results in an empty list of history messages
|
||||
## Given
|
||||
let req = ArchiveQuery(
|
||||
contentTopics: @[ContentTopic("1")],
|
||||
startTime: some(Timestamp(5)),
|
||||
endTime: some(Timestamp(2)),
|
||||
)
|
||||
|
||||
## When
|
||||
let res = waitFor archiveA.findMessages(req)
|
||||
|
||||
## Then
|
||||
check res.isOk()
|
||||
|
||||
let response = res.tryGet()
|
||||
check:
|
||||
response.messages.len == 0
|
||||
@ -62,12 +62,7 @@ procSuite "WakuNode - Store":
|
||||
|
||||
for kv in kvs:
|
||||
let message = kv.message.get()
|
||||
let msg_digest = computeDigest(message)
|
||||
require (
|
||||
waitFor driver.put(
|
||||
DefaultPubsubTopic, message, msg_digest, kv.messageHash, message.timestamp
|
||||
)
|
||||
).isOk()
|
||||
require (waitFor driver.put(kv.messageHash, DefaultPubsubTopic, message)).isOk()
|
||||
|
||||
driver
|
||||
|
||||
|
||||
@ -13,15 +13,15 @@ when defined(waku_exp_store_resume):
|
||||
import
|
||||
waku/[
|
||||
common/databases/db_sqlite,
|
||||
waku_archive/driver,
|
||||
waku_archive/driver/sqlite_driver/sqlite_driver,
|
||||
waku_archive_legacy/driver,
|
||||
waku_archive_legacy/driver/sqlite_driver/sqlite_driver,
|
||||
node/peer_manager,
|
||||
waku_core,
|
||||
waku_core/message/digest,
|
||||
waku_store_legacy,
|
||||
],
|
||||
../waku_store_legacy/store_utils,
|
||||
../waku_archive/archive_utils,
|
||||
../waku_archive_legacy/archive_utils,
|
||||
./testlib/common,
|
||||
./testlib/switch
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
stew/shims/net as stewNet,
|
||||
std/net,
|
||||
testutils/unittests,
|
||||
chronicles,
|
||||
chronos,
|
||||
@ -9,27 +9,22 @@ import
|
||||
libp2p/peerid,
|
||||
libp2p/multiaddress,
|
||||
libp2p/switch,
|
||||
libp2p/protocols/pubsub/rpc/messages,
|
||||
libp2p/protocols/pubsub/pubsub,
|
||||
libp2p/protocols/pubsub/gossipsub
|
||||
import
|
||||
waku/[
|
||||
common/databases/db_sqlite,
|
||||
common/paging,
|
||||
waku_core,
|
||||
waku_core/message/digest,
|
||||
waku_core/subscription,
|
||||
node/peer_manager,
|
||||
waku_archive,
|
||||
waku_archive/driver/sqlite_driver,
|
||||
waku_archive_legacy,
|
||||
waku_filter_v2,
|
||||
waku_filter_v2/client,
|
||||
waku_store_legacy,
|
||||
waku_node,
|
||||
],
|
||||
../waku_store_legacy/store_utils,
|
||||
../waku_archive/archive_utils,
|
||||
../testlib/common,
|
||||
../waku_archive_legacy/archive_utils,
|
||||
../testlib/wakucore,
|
||||
../testlib/wakunode
|
||||
|
||||
@ -54,7 +49,7 @@ procSuite "WakuNode - Store Legacy":
|
||||
let driver = newSqliteArchiveDriver()
|
||||
|
||||
for msg in msgListA:
|
||||
let msg_digest = waku_archive.computeDigest(msg)
|
||||
let msg_digest = waku_archive_legacy.computeDigest(msg)
|
||||
let msg_hash = computeMessageHash(DefaultPubsubTopic, msg)
|
||||
require (
|
||||
waitFor driver.put(DefaultPubsubTopic, msg, msg_digest, msg_hash, msg.timestamp)
|
||||
@ -72,7 +67,7 @@ procSuite "WakuNode - Store Legacy":
|
||||
|
||||
waitFor allFutures(client.start(), server.start())
|
||||
|
||||
let mountArchiveRes = server.mountArchive(archiveA)
|
||||
let mountArchiveRes = server.mountLegacyArchive(archiveA)
|
||||
assert mountArchiveRes.isOk(), mountArchiveRes.error
|
||||
|
||||
waitFor server.mountLegacyStore()
|
||||
@ -106,7 +101,7 @@ procSuite "WakuNode - Store Legacy":
|
||||
|
||||
waitFor allFutures(client.start(), server.start())
|
||||
|
||||
let mountArchiveRes = server.mountArchive(archiveA)
|
||||
let mountArchiveRes = server.mountLegacyArchive(archiveA)
|
||||
assert mountArchiveRes.isOk(), mountArchiveRes.error
|
||||
|
||||
waitFor server.mountLegacyStore()
|
||||
@ -161,7 +156,7 @@ procSuite "WakuNode - Store Legacy":
|
||||
|
||||
waitFor allFutures(client.start(), server.start())
|
||||
|
||||
let mountArchiveRes = server.mountArchive(archiveA)
|
||||
let mountArchiveRes = server.mountLegacyArchive(archiveA)
|
||||
assert mountArchiveRes.isOk(), mountArchiveRes.error
|
||||
|
||||
waitFor server.mountLegacyStore()
|
||||
@ -223,7 +218,7 @@ procSuite "WakuNode - Store Legacy":
|
||||
waitFor filterSource.mountFilter()
|
||||
let driver = newSqliteArchiveDriver()
|
||||
|
||||
let mountArchiveRes = server.mountArchive(driver)
|
||||
let mountArchiveRes = server.mountLegacyArchive(driver)
|
||||
assert mountArchiveRes.isOk(), mountArchiveRes.error
|
||||
|
||||
waitFor server.mountLegacyStore()
|
||||
@ -241,7 +236,7 @@ procSuite "WakuNode - Store Legacy":
|
||||
proc filterHandler(
|
||||
pubsubTopic: PubsubTopic, msg: WakuMessage
|
||||
) {.async, gcsafe, closure.} =
|
||||
await server.wakuArchive.handleMessage(pubsubTopic, msg)
|
||||
await server.wakuLegacyArchive.handleMessage(pubsubTopic, msg)
|
||||
filterFut.complete((pubsubTopic, msg))
|
||||
|
||||
server.wakuFilterClient.registerPushHandler(filterHandler)
|
||||
@ -286,7 +281,7 @@ procSuite "WakuNode - Store Legacy":
|
||||
|
||||
waitFor allFutures(client.start(), server.start())
|
||||
|
||||
let mountArchiveRes = server.mountArchive(archiveA)
|
||||
let mountArchiveRes = server.mountLegacyArchive(archiveA)
|
||||
assert mountArchiveRes.isOk(), mountArchiveRes.error
|
||||
|
||||
waitFor server.mountLegacyStore()
|
||||
@ -302,7 +297,7 @@ procSuite "WakuNode - Store Legacy":
|
||||
pubsubTopic: "pubsubTopic",
|
||||
senderTime: now(),
|
||||
storeTime: now(),
|
||||
digest: waku_archive.MessageDigest(data: data),
|
||||
digest: waku_archive_legacy.MessageDigest(data: data),
|
||||
)
|
||||
|
||||
## Given
|
||||
|
||||
@ -40,16 +40,9 @@ logScope:
|
||||
proc put(
|
||||
store: ArchiveDriver, pubsubTopic: PubsubTopic, message: WakuMessage
|
||||
): Future[Result[void, string]] =
|
||||
let
|
||||
digest = computeDigest(message)
|
||||
msgHash = computeMessageHash(pubsubTopic, message)
|
||||
receivedTime =
|
||||
if message.timestamp > 0:
|
||||
message.timestamp
|
||||
else:
|
||||
getNowInNanosecondTime()
|
||||
let msgHash = computeMessageHash(pubsubTopic, message)
|
||||
|
||||
store.put(pubsubTopic, message, digest, msgHash, receivedTime)
|
||||
store.put(msgHash, pubsubTopic, message)
|
||||
|
||||
# Creates a new WakuNode
|
||||
proc testWakuNode(): WakuNode =
|
||||
|
||||
@ -335,6 +335,12 @@ type WakuNodeConf* = object
|
||||
desc: "Enable/disable waku store protocol", defaultValue: false, name: "store"
|
||||
.}: bool
|
||||
|
||||
legacyStore* {.
|
||||
desc: "Enable/disable waku store legacy mode",
|
||||
defaultValue: true,
|
||||
name: "legacy-store"
|
||||
.}: bool
|
||||
|
||||
storenode* {.
|
||||
desc: "Peer multiaddress to query for storage",
|
||||
defaultValue: "",
|
||||
|
||||
@ -17,7 +17,14 @@ import
|
||||
../waku_core,
|
||||
../waku_rln_relay,
|
||||
../discovery/waku_dnsdisc,
|
||||
../waku_archive,
|
||||
../waku_archive/retention_policy as policy,
|
||||
../waku_archive/retention_policy/builder as policy_builder,
|
||||
../waku_archive/driver as driver,
|
||||
../waku_archive/driver/builder as driver_builder,
|
||||
../waku_archive_legacy/retention_policy as legacy_policy,
|
||||
../waku_archive_legacy/retention_policy/builder as legacy_policy_builder,
|
||||
../waku_archive_legacy/driver as legacy_driver,
|
||||
../waku_archive_legacy/driver/builder as legacy_driver_builder,
|
||||
../waku_store,
|
||||
../waku_store/common as store_common,
|
||||
../waku_store_legacy,
|
||||
@ -28,8 +35,6 @@ import
|
||||
../node/peer_manager/peer_store/waku_peer_storage,
|
||||
../node/peer_manager/peer_store/migrations as peer_store_sqlite_migrations,
|
||||
../waku_lightpush/common,
|
||||
../waku_archive/driver/builder,
|
||||
../waku_archive/retention_policy/builder,
|
||||
../common/utils/parse_size_units,
|
||||
../common/ratelimit
|
||||
|
||||
@ -219,15 +224,36 @@ proc setupProtocols(
|
||||
return err("failed to mount waku RLN relay protocol: " & getCurrentExceptionMsg())
|
||||
|
||||
if conf.store:
|
||||
# Archive setup
|
||||
let archiveDriverRes = waitFor ArchiveDriver.new(
|
||||
if conf.legacyStore:
|
||||
let archiveDriverRes = waitFor legacy_driver.ArchiveDriver.new(
|
||||
conf.storeMessageDbUrl, conf.storeMessageDbVacuum, conf.storeMessageDbMigration,
|
||||
conf.storeMaxNumDbConnections, onFatalErrorAction,
|
||||
)
|
||||
if archiveDriverRes.isErr():
|
||||
return err("failed to setup legacy archive driver: " & archiveDriverRes.error)
|
||||
|
||||
let retPolicyRes =
|
||||
legacy_policy.RetentionPolicy.new(conf.storeMessageRetentionPolicy)
|
||||
if retPolicyRes.isErr():
|
||||
return err("failed to create retention policy: " & retPolicyRes.error)
|
||||
|
||||
let mountArcRes =
|
||||
node.mountLegacyArchive(archiveDriverRes.get(), retPolicyRes.get())
|
||||
if mountArcRes.isErr():
|
||||
return err("failed to mount waku legacy archive protocol: " & mountArcRes.error)
|
||||
|
||||
## For now we always mount the future archive driver but if the legacy one is mounted,
|
||||
## then the legacy will be in charge of performing the archiving.
|
||||
## Regarding storage, the only diff between the current/future archive driver and the legacy
|
||||
## one, is that the legacy stores an extra field: the id (message digest.)
|
||||
let archiveDriverRes = waitFor driver.ArchiveDriver.new(
|
||||
conf.storeMessageDbUrl, conf.storeMessageDbVacuum, conf.storeMessageDbMigration,
|
||||
conf.storeMaxNumDbConnections, onFatalErrorAction,
|
||||
)
|
||||
if archiveDriverRes.isErr():
|
||||
return err("failed to setup archive driver: " & archiveDriverRes.error)
|
||||
|
||||
let retPolicyRes = RetentionPolicy.new(conf.storeMessageRetentionPolicy)
|
||||
let retPolicyRes = policy.RetentionPolicy.new(conf.storeMessageRetentionPolicy)
|
||||
if retPolicyRes.isErr():
|
||||
return err("failed to create retention policy: " & retPolicyRes.error)
|
||||
|
||||
@ -235,20 +261,23 @@ proc setupProtocols(
|
||||
if mountArcRes.isErr():
|
||||
return err("failed to mount waku archive protocol: " & mountArcRes.error)
|
||||
|
||||
# Store setup
|
||||
let rateLimitSetting: RateLimitSetting =
|
||||
(conf.requestRateLimit, chronos.seconds(conf.requestRatePeriod))
|
||||
|
||||
if conf.legacyStore:
|
||||
# Store legacy setup
|
||||
try:
|
||||
await mountLegacyStore(node, rateLimitSetting)
|
||||
except CatchableError:
|
||||
return
|
||||
err("failed to mount waku legacy store protocol: " & getCurrentExceptionMsg())
|
||||
|
||||
# Store setup
|
||||
try:
|
||||
await mountStore(node, rateLimitSetting)
|
||||
except CatchableError:
|
||||
return err("failed to mount waku store protocol: " & getCurrentExceptionMsg())
|
||||
|
||||
try:
|
||||
await mountLegacyStore(node, rateLimitSetting)
|
||||
except CatchableError:
|
||||
return
|
||||
err("failed to mount waku legacy store protocol: " & getCurrentExceptionMsg())
|
||||
|
||||
mountStoreClient(node)
|
||||
if conf.storenode != "":
|
||||
let storeNode = parsePeerInfo(conf.storenode)
|
||||
|
||||
@ -27,6 +27,7 @@ import
|
||||
../waku_core/topics/sharding,
|
||||
../waku_relay,
|
||||
../waku_archive,
|
||||
../waku_archive_legacy,
|
||||
../waku_store_legacy/protocol as legacy_store,
|
||||
../waku_store_legacy/client as legacy_store_client,
|
||||
../waku_store_legacy/common as legacy_store_common,
|
||||
@ -87,7 +88,8 @@ type
|
||||
peerManager*: PeerManager
|
||||
switch*: Switch
|
||||
wakuRelay*: WakuRelay
|
||||
wakuArchive*: WakuArchive
|
||||
wakuArchive*: waku_archive.WakuArchive
|
||||
wakuLegacyArchive*: waku_archive_legacy.WakuArchive
|
||||
wakuLegacyStore*: legacy_store.WakuStore
|
||||
wakuLegacyStoreClient*: legacy_store_client.WakuStoreClient
|
||||
wakuStore*: store.WakuStore
|
||||
@ -244,6 +246,11 @@ proc registerRelayDefaultHandler(node: WakuNode, topic: PubsubTopic) =
|
||||
await node.wakuFilter.handleMessage(topic, msg)
|
||||
|
||||
proc archiveHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} =
|
||||
if not node.wakuLegacyArchive.isNil():
|
||||
## we try to store with legacy archive
|
||||
await node.wakuLegacyArchive.handleMessage(topic, msg)
|
||||
return
|
||||
|
||||
if node.wakuArchive.isNil():
|
||||
return
|
||||
|
||||
@ -675,25 +682,45 @@ proc filterUnsubscribeAll*(
|
||||
|
||||
## Waku archive
|
||||
proc mountArchive*(
|
||||
node: WakuNode, driver: ArchiveDriver, retentionPolicy = none(RetentionPolicy)
|
||||
node: WakuNode,
|
||||
driver: waku_archive.ArchiveDriver,
|
||||
retentionPolicy = none(waku_archive.RetentionPolicy),
|
||||
): Result[void, string] =
|
||||
node.wakuArchive = WakuArchive.new(driver = driver, retentionPolicy = retentionPolicy).valueOr:
|
||||
node.wakuArchive = waku_archive.WakuArchive.new(
|
||||
driver = driver, retentionPolicy = retentionPolicy
|
||||
).valueOr:
|
||||
return err("error in mountArchive: " & error)
|
||||
|
||||
node.wakuArchive.start()
|
||||
|
||||
return ok()
|
||||
|
||||
proc mountLegacyArchive*(
|
||||
node: WakuNode,
|
||||
driver: waku_archive_legacy.ArchiveDriver,
|
||||
retentionPolicy = none(waku_archive_legacy.RetentionPolicy),
|
||||
): Result[void, string] =
|
||||
node.wakuLegacyArchive = waku_archive_legacy.WakuArchive.new(
|
||||
driver = driver, retentionPolicy = retentionPolicy
|
||||
).valueOr:
|
||||
return err("error in mountLegacyArchive: " & error)
|
||||
|
||||
node.wakuLegacyArchive.start()
|
||||
|
||||
return ok()
|
||||
|
||||
## Legacy Waku Store
|
||||
|
||||
# TODO: Review this mapping logic. Maybe, move it to the appplication code
|
||||
proc toArchiveQuery(request: legacy_store_common.HistoryQuery): ArchiveQuery =
|
||||
ArchiveQuery(
|
||||
proc toArchiveQuery(
|
||||
request: legacy_store_common.HistoryQuery
|
||||
): waku_archive_legacy.ArchiveQuery =
|
||||
waku_archive_legacy.ArchiveQuery(
|
||||
pubsubTopic: request.pubsubTopic,
|
||||
contentTopics: request.contentTopics,
|
||||
cursor: request.cursor.map(
|
||||
proc(cursor: HistoryCursor): ArchiveCursor =
|
||||
ArchiveCursor(
|
||||
proc(cursor: HistoryCursor): waku_archive_legacy.ArchiveCursor =
|
||||
waku_archive_legacy.ArchiveCursor(
|
||||
pubsubTopic: cursor.pubsubTopic,
|
||||
senderTime: cursor.senderTime,
|
||||
storeTime: cursor.storeTime,
|
||||
@ -707,11 +734,14 @@ proc toArchiveQuery(request: legacy_store_common.HistoryQuery): ArchiveQuery =
|
||||
)
|
||||
|
||||
# TODO: Review this mapping logic. Maybe, move it to the appplication code
|
||||
proc toHistoryResult*(res: ArchiveResult): legacy_store_common.HistoryResult =
|
||||
proc toHistoryResult*(
|
||||
res: waku_archive_legacy.ArchiveResult
|
||||
): legacy_store_common.HistoryResult =
|
||||
if res.isErr():
|
||||
let error = res.error
|
||||
case res.error.kind
|
||||
of ArchiveErrorKind.DRIVER_ERROR, ArchiveErrorKind.INVALID_QUERY:
|
||||
of waku_archive_legacy.ArchiveErrorKind.DRIVER_ERROR,
|
||||
waku_archive_legacy.ArchiveErrorKind.INVALID_QUERY:
|
||||
err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST, cause: res.error.cause))
|
||||
else:
|
||||
err(HistoryError(kind: HistoryErrorKind.UNKNOWN))
|
||||
@ -721,7 +751,7 @@ proc toHistoryResult*(res: ArchiveResult): legacy_store_common.HistoryResult =
|
||||
HistoryResponse(
|
||||
messages: response.messages,
|
||||
cursor: response.cursor.map(
|
||||
proc(cursor: ArchiveCursor): HistoryCursor =
|
||||
proc(cursor: waku_archive_legacy.ArchiveCursor): HistoryCursor =
|
||||
HistoryCursor(
|
||||
pubsubTopic: cursor.pubsubTopic,
|
||||
senderTime: cursor.senderTime,
|
||||
@ -737,7 +767,7 @@ proc mountLegacyStore*(
|
||||
) {.async.} =
|
||||
info "mounting waku legacy store protocol"
|
||||
|
||||
if node.wakuArchive.isNil():
|
||||
if node.wakuLegacyArchive.isNil():
|
||||
error "failed to mount waku legacy store protocol", error = "waku archive not set"
|
||||
return
|
||||
|
||||
@ -750,7 +780,7 @@ proc mountLegacyStore*(
|
||||
return err(error)
|
||||
|
||||
let request = request.toArchiveQuery()
|
||||
let response = await node.wakuArchive.findMessagesV2(request)
|
||||
let response = await node.wakuLegacyArchive.findMessagesV2(request)
|
||||
return response.toHistoryResult()
|
||||
|
||||
node.wakuLegacyStore = legacy_store.WakuStore.new(
|
||||
@ -831,8 +861,8 @@ when defined(waku_exp_store_resume):
|
||||
|
||||
## Waku Store
|
||||
|
||||
proc toArchiveQuery(request: StoreQueryRequest): ArchiveQuery =
|
||||
var query = ArchiveQuery()
|
||||
proc toArchiveQuery(request: StoreQueryRequest): waku_archive.ArchiveQuery =
|
||||
var query = waku_archive.ArchiveQuery()
|
||||
|
||||
query.includeData = request.includeData
|
||||
query.pubsubTopic = request.pubsubTopic
|
||||
@ -840,12 +870,7 @@ proc toArchiveQuery(request: StoreQueryRequest): ArchiveQuery =
|
||||
query.startTime = request.startTime
|
||||
query.endTime = request.endTime
|
||||
query.hashes = request.messageHashes
|
||||
|
||||
if request.paginationCursor.isSome():
|
||||
var cursor = ArchiveCursor()
|
||||
cursor.hash = request.paginationCursor.get()
|
||||
query.cursor = some(cursor)
|
||||
|
||||
query.cursor = request.paginationCursor
|
||||
query.direction = request.paginationForward
|
||||
|
||||
if request.paginationLimit.isSome():
|
||||
@ -853,7 +878,7 @@ proc toArchiveQuery(request: StoreQueryRequest): ArchiveQuery =
|
||||
|
||||
return query
|
||||
|
||||
proc toStoreResult(res: ArchiveResult): StoreQueryResult =
|
||||
proc toStoreResult(res: waku_archive.ArchiveResult): StoreQueryResult =
|
||||
let response = res.valueOr:
|
||||
return err(StoreError.new(300, "archive error: " & $error))
|
||||
|
||||
@ -873,8 +898,7 @@ proc toStoreResult(res: ArchiveResult): StoreQueryResult =
|
||||
res.messages[i].message = some(response.messages[i])
|
||||
res.messages[i].pubsubTopic = some(response.topics[i])
|
||||
|
||||
if response.cursor.isSome():
|
||||
res.paginationCursor = some(response.cursor.get().hash)
|
||||
res.paginationCursor = response.cursor
|
||||
|
||||
return ok(res)
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[times, options, sequtils, strutils, algorithm],
|
||||
std/[times, options, sequtils, algorithm],
|
||||
stew/[results, byteutils],
|
||||
chronicles,
|
||||
chronos,
|
||||
@ -52,9 +52,6 @@ proc validate*(msg: WakuMessage): Result[void, string] =
|
||||
# Ephemeral message, do not store
|
||||
return
|
||||
|
||||
if msg.timestamp == 0:
|
||||
return ok()
|
||||
|
||||
let
|
||||
now = getNanosecondTime(getTime().toUnixFloat())
|
||||
lowerBound = now - MaxMessageTimestampVariance
|
||||
@ -89,38 +86,24 @@ proc handleMessage*(
|
||||
waku_archive_errors.inc(labelValues = [error])
|
||||
return
|
||||
|
||||
let
|
||||
msgDigest = computeDigest(msg)
|
||||
msgDigestHex = msgDigest.data.to0xHex()
|
||||
msgHash = computeMessageHash(pubsubTopic, msg)
|
||||
msgHashHex = msgHash.to0xHex()
|
||||
msgTimestamp =
|
||||
if msg.timestamp > 0:
|
||||
msg.timestamp
|
||||
else:
|
||||
getNanosecondTime(getTime().toUnixFloat())
|
||||
|
||||
notice "archive handling message",
|
||||
msg_hash = msgHashHex,
|
||||
pubsubTopic = pubsubTopic,
|
||||
contentTopic = msg.contentTopic,
|
||||
msgTimestamp = msg.timestamp,
|
||||
usedTimestamp = msgTimestamp,
|
||||
digest = msgDigestHex
|
||||
let msgHash = computeMessageHash(pubsubTopic, msg)
|
||||
|
||||
let insertStartTime = getTime().toUnixFloat()
|
||||
|
||||
(await self.driver.put(pubsubTopic, msg, msgDigest, msgHash, msgTimestamp)).isOkOr:
|
||||
(await self.driver.put(msgHash, pubsubTopic, msg)).isOkOr:
|
||||
waku_archive_errors.inc(labelValues = [insertFailure])
|
||||
error "failed to insert message", error = error
|
||||
trace "failed to insert message",
|
||||
hash_hash = msgHash.to0xHex(),
|
||||
pubsubTopic = pubsubTopic,
|
||||
contentTopic = msg.contentTopic,
|
||||
timestamp = msg.timestamp,
|
||||
error = error
|
||||
|
||||
notice "message archived",
|
||||
msg_hash = msgHashHex,
|
||||
hash_hash = msgHash.to0xHex(),
|
||||
pubsubTopic = pubsubTopic,
|
||||
contentTopic = msg.contentTopic,
|
||||
msgTimestamp = msg.timestamp,
|
||||
usedTimestamp = msgTimestamp,
|
||||
digest = msgDigestHex
|
||||
timestamp = msg.timestamp
|
||||
|
||||
let insertDuration = getTime().toUnixFloat() - insertStartTime
|
||||
waku_archive_insert_duration_seconds.observe(insertDuration)
|
||||
@ -130,6 +113,16 @@ proc findMessages*(
|
||||
): Future[ArchiveResult] {.async, gcsafe.} =
|
||||
## Search the archive to return a single page of messages matching the query criteria
|
||||
|
||||
if query.cursor.isSome():
|
||||
let cursor = query.cursor.get()
|
||||
|
||||
if cursor.len != 32:
|
||||
return
|
||||
err(ArchiveError.invalidQuery("invalid cursor hash length: " & $cursor.len))
|
||||
|
||||
if cursor == EmptyWakuMessageHash:
|
||||
return err(ArchiveError.invalidQuery("all zeroes cursor hash"))
|
||||
|
||||
let maxPageSize =
|
||||
if query.pageSize <= 0:
|
||||
DefaultPageSize
|
||||
@ -138,18 +131,12 @@ proc findMessages*(
|
||||
|
||||
let isAscendingOrder = query.direction.into()
|
||||
|
||||
if query.contentTopics.len > 100:
|
||||
return err(ArchiveError.invalidQuery("too many content topics"))
|
||||
|
||||
if query.cursor.isSome() and query.cursor.get().hash.len != 32:
|
||||
return err(ArchiveError.invalidQuery("invalid cursor hash length"))
|
||||
|
||||
let queryStartTime = getTime().toUnixFloat()
|
||||
|
||||
let rows = (
|
||||
await self.driver.getMessages(
|
||||
includeData = query.includeData,
|
||||
contentTopic = query.contentTopics,
|
||||
contentTopics = query.contentTopics,
|
||||
pubsubTopic = query.pubsubTopic,
|
||||
cursor = query.cursor,
|
||||
startTime = query.startTime,
|
||||
@ -160,7 +147,6 @@ proc findMessages*(
|
||||
)
|
||||
).valueOr:
|
||||
return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: error))
|
||||
|
||||
let queryDuration = getTime().toUnixFloat() - queryStartTime
|
||||
waku_archive_query_duration_seconds.observe(queryDuration)
|
||||
|
||||
@ -172,115 +158,33 @@ proc findMessages*(
|
||||
if rows.len == 0:
|
||||
return ok(ArchiveResponse(hashes: hashes, messages: messages, cursor: cursor))
|
||||
|
||||
## Messages
|
||||
let pageSize = min(rows.len, int(maxPageSize))
|
||||
|
||||
#TODO once store v2 is removed, unzip instead of 2x map
|
||||
#TODO once store v2 is removed, update driver to not return messages when not needed
|
||||
hashes = rows[0 ..< pageSize].mapIt(it[0])
|
||||
|
||||
if query.includeData:
|
||||
topics = rows[0 ..< pageSize].mapIt(it[0])
|
||||
messages = rows[0 ..< pageSize].mapIt(it[1])
|
||||
topics = rows[0 ..< pageSize].mapIt(it[1])
|
||||
messages = rows[0 ..< pageSize].mapIt(it[2])
|
||||
|
||||
hashes = rows[0 ..< pageSize].mapIt(it[4])
|
||||
|
||||
## Cursor
|
||||
if rows.len > int(maxPageSize):
|
||||
## Build last message cursor
|
||||
## The cursor is built from the last message INCLUDED in the response
|
||||
## (i.e. the second last message in the rows list)
|
||||
|
||||
#TODO Once Store v2 is removed keep only message and hash
|
||||
let (pubsubTopic, message, digest, storeTimestamp, hash) = rows[^2]
|
||||
let (hash, _, _) = rows[^2]
|
||||
|
||||
#TODO Once Store v2 is removed, the cursor becomes the hash of the last message
|
||||
cursor = some(
|
||||
ArchiveCursor(
|
||||
digest: MessageDigest.fromBytes(digest),
|
||||
storeTime: storeTimestamp,
|
||||
sendertime: message.timestamp,
|
||||
pubsubTopic: pubsubTopic,
|
||||
hash: hash,
|
||||
)
|
||||
)
|
||||
cursor = some(hash)
|
||||
|
||||
# All messages MUST be returned in chronological order
|
||||
# Messages MUST be returned in chronological order
|
||||
if not isAscendingOrder:
|
||||
reverse(hashes)
|
||||
reverse(messages)
|
||||
reverse(topics)
|
||||
reverse(messages)
|
||||
|
||||
return ok(
|
||||
ArchiveResponse(hashes: hashes, messages: messages, topics: topics, cursor: cursor)
|
||||
ArchiveResponse(cursor: cursor, topics: topics, hashes: hashes, messages: messages)
|
||||
)
|
||||
|
||||
proc findMessagesV2*(
|
||||
self: WakuArchive, query: ArchiveQuery
|
||||
): Future[ArchiveResult] {.async, deprecated, gcsafe.} =
|
||||
## Search the archive to return a single page of messages matching the query criteria
|
||||
|
||||
let maxPageSize =
|
||||
if query.pageSize <= 0:
|
||||
DefaultPageSize
|
||||
else:
|
||||
min(query.pageSize, MaxPageSize)
|
||||
|
||||
let isAscendingOrder = query.direction.into()
|
||||
|
||||
if query.contentTopics.len > 100:
|
||||
return err(ArchiveError.invalidQuery("too many content topics"))
|
||||
|
||||
let queryStartTime = getTime().toUnixFloat()
|
||||
|
||||
let rows = (
|
||||
await self.driver.getMessagesV2(
|
||||
contentTopic = query.contentTopics,
|
||||
pubsubTopic = query.pubsubTopic,
|
||||
cursor = query.cursor,
|
||||
startTime = query.startTime,
|
||||
endTime = query.endTime,
|
||||
maxPageSize = maxPageSize + 1,
|
||||
ascendingOrder = isAscendingOrder,
|
||||
)
|
||||
).valueOr:
|
||||
return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: error))
|
||||
|
||||
let queryDuration = getTime().toUnixFloat() - queryStartTime
|
||||
waku_archive_query_duration_seconds.observe(queryDuration)
|
||||
|
||||
var messages = newSeq[WakuMessage]()
|
||||
var cursor = none(ArchiveCursor)
|
||||
|
||||
if rows.len == 0:
|
||||
return ok(ArchiveResponse(messages: messages, cursor: cursor))
|
||||
|
||||
## Messages
|
||||
let pageSize = min(rows.len, int(maxPageSize))
|
||||
|
||||
messages = rows[0 ..< pageSize].mapIt(it[1])
|
||||
|
||||
## Cursor
|
||||
if rows.len > int(maxPageSize):
|
||||
## Build last message cursor
|
||||
## The cursor is built from the last message INCLUDED in the response
|
||||
## (i.e. the second last message in the rows list)
|
||||
|
||||
let (pubsubTopic, message, digest, storeTimestamp, _) = rows[^2]
|
||||
|
||||
cursor = some(
|
||||
ArchiveCursor(
|
||||
digest: MessageDigest.fromBytes(digest),
|
||||
storeTime: storeTimestamp,
|
||||
sendertime: message.timestamp,
|
||||
pubsubTopic: pubsubTopic,
|
||||
)
|
||||
)
|
||||
|
||||
# All messages MUST be returned in chronological order
|
||||
if not isAscendingOrder:
|
||||
reverse(messages)
|
||||
|
||||
return ok(ArchiveResponse(messages: messages, cursor: cursor))
|
||||
|
||||
proc periodicRetentionPolicy(self: WakuArchive) {.async.} =
|
||||
debug "executing message retention policy"
|
||||
|
||||
|
||||
@ -3,44 +3,13 @@
|
||||
import std/options, results, stew/byteutils, stew/arrayops, nimcrypto/sha2
|
||||
import ../waku_core, ../common/paging
|
||||
|
||||
## Waku message digest
|
||||
|
||||
type MessageDigest* = MDigest[256]
|
||||
|
||||
proc fromBytes*(T: type MessageDigest, src: seq[byte]): T =
|
||||
var data: array[32, byte]
|
||||
|
||||
let byteCount = copyFrom[byte](data, src)
|
||||
|
||||
assert byteCount == 32
|
||||
|
||||
return MessageDigest(data: data)
|
||||
|
||||
proc computeDigest*(msg: WakuMessage): MessageDigest =
|
||||
var ctx: sha256
|
||||
ctx.init()
|
||||
defer:
|
||||
ctx.clear()
|
||||
|
||||
ctx.update(msg.contentTopic.toBytes())
|
||||
ctx.update(msg.payload)
|
||||
|
||||
# Computes the hash
|
||||
return ctx.finish()
|
||||
|
||||
## Public API types
|
||||
|
||||
type
|
||||
#TODO Once Store v2 is removed, the cursor becomes the hash of the last message
|
||||
ArchiveCursor* = object
|
||||
digest*: MessageDigest
|
||||
storeTime*: Timestamp
|
||||
senderTime*: Timestamp
|
||||
pubsubTopic*: PubsubTopic
|
||||
hash*: WakuMessageHash
|
||||
ArchiveCursor* = WakuMessageHash
|
||||
|
||||
ArchiveQuery* = object
|
||||
includeData*: bool # indicate if messages should be returned in addition to hashes.
|
||||
includeData*: bool
|
||||
pubsubTopic*: Option[PubsubTopic]
|
||||
contentTopics*: seq[ContentTopic]
|
||||
cursor*: Option[ArchiveCursor]
|
||||
|
||||
@ -9,18 +9,15 @@ type
|
||||
ArchiveDriverResult*[T] = Result[T, string]
|
||||
ArchiveDriver* = ref object of RootObj
|
||||
|
||||
#TODO Once Store v2 is removed keep only messages and hashes
|
||||
type ArchiveRow* = (PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)
|
||||
type ArchiveRow* = (WakuMessageHash, PubsubTopic, WakuMessage)
|
||||
|
||||
# ArchiveDriver interface
|
||||
|
||||
method put*(
|
||||
driver: ArchiveDriver,
|
||||
messageHash: WakuMessageHash,
|
||||
pubsubTopic: PubsubTopic,
|
||||
message: WakuMessage,
|
||||
digest: MessageDigest,
|
||||
messageHash: WakuMessageHash,
|
||||
receivedTime: Timestamp,
|
||||
): Future[ArchiveDriverResult[void]] {.base, async.} =
|
||||
discard
|
||||
|
||||
@ -29,22 +26,10 @@ method getAllMessages*(
|
||||
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method getMessagesV2*(
|
||||
driver: ArchiveDriver,
|
||||
contentTopic = newSeq[ContentTopic](0),
|
||||
pubsubTopic = none(PubsubTopic),
|
||||
cursor = none(ArchiveCursor),
|
||||
startTime = none(Timestamp),
|
||||
endTime = none(Timestamp),
|
||||
maxPageSize = DefaultPageSize,
|
||||
ascendingOrder = true,
|
||||
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, deprecated, async.} =
|
||||
discard
|
||||
|
||||
method getMessages*(
|
||||
driver: ArchiveDriver,
|
||||
includeData = false,
|
||||
contentTopic = newSeq[ContentTopic](0),
|
||||
includeData = true,
|
||||
contentTopics = newSeq[ContentTopic](0),
|
||||
pubsubTopic = none(PubsubTopic),
|
||||
cursor = none(ArchiveCursor),
|
||||
startTime = none(Timestamp),
|
||||
|
||||
@ -9,7 +9,7 @@ import
|
||||
logScope:
|
||||
topics = "waku archive migration"
|
||||
|
||||
const SchemaVersion* = 5 # increase this when there is an update in the database schema
|
||||
const SchemaVersion* = 6 # increase this when there is an update in the database schema
|
||||
|
||||
proc breakIntoStatements*(script: string): seq[string] =
|
||||
## Given a full migration script, that can potentially contain a list
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
## This module is aimed to handle the creation and truncation of partition tables
|
||||
## in order to limit the space occupied in disk by the database.
|
||||
##
|
||||
## The created partitions are referenced by the 'storedAt' field.
|
||||
## The created partitions are referenced by the 'timestamp' field.
|
||||
##
|
||||
|
||||
import std/[deques, times]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,58 +1,16 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/byteutils, nimcrypto/sha2
|
||||
import ../../../waku_core, ../../common
|
||||
import stew/byteutils
|
||||
import ../../../waku_core
|
||||
|
||||
type Index* = object
|
||||
## This type contains the description of an Index used in the pagination of WakuMessages
|
||||
pubsubTopic*: string
|
||||
senderTime*: Timestamp # the time at which the message is generated
|
||||
receiverTime*: Timestamp
|
||||
digest*: MessageDigest # calculated over payload and content topic
|
||||
time*: Timestamp # the time at which the message is generated
|
||||
hash*: WakuMessageHash
|
||||
|
||||
proc compute*(
|
||||
T: type Index, msg: WakuMessage, receivedTime: Timestamp, pubsubTopic: PubsubTopic
|
||||
): T =
|
||||
## Takes a WakuMessage with received timestamp and returns its Index.
|
||||
let
|
||||
digest = computeDigest(msg)
|
||||
senderTime = msg.timestamp
|
||||
hash = computeMessageHash(pubsubTopic, msg)
|
||||
|
||||
return Index(
|
||||
pubsubTopic: pubsubTopic,
|
||||
senderTime: senderTime,
|
||||
receiverTime: receivedTime,
|
||||
digest: digest,
|
||||
hash: hash,
|
||||
)
|
||||
|
||||
proc tohistoryCursor*(index: Index): ArchiveCursor =
|
||||
return ArchiveCursor(
|
||||
pubsubTopic: index.pubsubTopic,
|
||||
senderTime: index.senderTime,
|
||||
storeTime: index.receiverTime,
|
||||
digest: index.digest,
|
||||
hash: index.hash,
|
||||
)
|
||||
|
||||
proc toIndex*(index: ArchiveCursor): Index =
|
||||
return Index(
|
||||
pubsubTopic: index.pubsubTopic,
|
||||
senderTime: index.senderTime,
|
||||
receiverTime: index.storeTime,
|
||||
digest: index.digest,
|
||||
hash: index.hash,
|
||||
)
|
||||
pubsubTopic*: PubsubTopic
|
||||
|
||||
proc `==`*(x, y: Index): bool =
|
||||
## receiverTime plays no role in index equality
|
||||
return
|
||||
(
|
||||
(x.senderTime == y.senderTime) and (x.digest == y.digest) and
|
||||
(x.pubsubTopic == y.pubsubTopic)
|
||||
) or (x.hash == y.hash) # this applies to store v3 queries only
|
||||
return x.hash == y.hash
|
||||
|
||||
proc cmp*(x, y: Index): int =
|
||||
## compares x and y
|
||||
@ -61,28 +19,11 @@ proc cmp*(x, y: Index): int =
|
||||
## returns 1 if x > y
|
||||
##
|
||||
## Default sorting order priority is:
|
||||
## 1. senderTimestamp
|
||||
## 2. receiverTimestamp (a fallback only if senderTimestamp unset on either side, and all other fields unequal)
|
||||
## 3. message digest
|
||||
## 4. pubsubTopic
|
||||
## 1. time
|
||||
## 2. hash
|
||||
|
||||
if x == y:
|
||||
# Quick exit ensures receiver time does not affect index equality
|
||||
return 0
|
||||
let timeCMP = cmp(x.time, y.time)
|
||||
if timeCMP != 0:
|
||||
return timeCMP
|
||||
|
||||
# Timestamp has a higher priority for comparison
|
||||
let
|
||||
# Use receiverTime where senderTime is unset
|
||||
xTimestamp = if x.senderTime == 0: x.receiverTime else: x.senderTime
|
||||
yTimestamp = if y.senderTime == 0: y.receiverTime else: y.senderTime
|
||||
|
||||
let timecmp = cmp(xTimestamp, yTimestamp)
|
||||
if timecmp != 0:
|
||||
return timecmp
|
||||
|
||||
# Continue only when timestamps are equal
|
||||
let digestcmp = cmp(x.digest.data, y.digest.data)
|
||||
if digestcmp != 0:
|
||||
return digestcmp
|
||||
|
||||
return cmp(x.pubsubTopic, y.pubsubTopic)
|
||||
return cmp(x.hash, y.hash)
|
||||
|
||||
@ -133,9 +133,7 @@ proc getPage(
|
||||
if predicate.isNil() or predicate(key, data):
|
||||
numberOfItems += 1
|
||||
|
||||
outSeq.add(
|
||||
(key.pubsubTopic, data, @(key.digest.data), key.receiverTime, key.hash)
|
||||
)
|
||||
outSeq.add((key.hash, key.pubsubTopic, data))
|
||||
|
||||
currentEntry =
|
||||
if forward:
|
||||
@ -227,19 +225,12 @@ proc add*(
|
||||
|
||||
method put*(
|
||||
driver: QueueDriver,
|
||||
messageHash: WakuMessageHash,
|
||||
pubsubTopic: PubsubTopic,
|
||||
message: WakuMessage,
|
||||
digest: MessageDigest,
|
||||
messageHash: WakuMessageHash,
|
||||
receivedTime: Timestamp,
|
||||
): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
let index = Index(
|
||||
pubsubTopic: pubsubTopic,
|
||||
senderTime: message.timestamp,
|
||||
receiverTime: receivedTime,
|
||||
digest: digest,
|
||||
hash: messageHash,
|
||||
)
|
||||
let index =
|
||||
Index(time: message.timestamp, hash: messageHash, pubsubTopic: pubsubTopic)
|
||||
|
||||
return driver.add(index, message)
|
||||
|
||||
@ -256,8 +247,8 @@ method existsTable*(
|
||||
|
||||
method getMessages*(
|
||||
driver: QueueDriver,
|
||||
includeData = false,
|
||||
contentTopic: seq[ContentTopic] = @[],
|
||||
includeData = true,
|
||||
contentTopics: seq[ContentTopic] = @[],
|
||||
pubsubTopic = none(PubsubTopic),
|
||||
cursor = none(ArchiveCursor),
|
||||
startTime = none(Timestamp),
|
||||
@ -266,14 +257,17 @@ method getMessages*(
|
||||
maxPageSize = DefaultPageSize,
|
||||
ascendingOrder = true,
|
||||
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
||||
let cursor = cursor.map(toIndex)
|
||||
var index = none(Index)
|
||||
|
||||
if cursor.isSome():
|
||||
index = some(Index(hash: cursor.get()))
|
||||
|
||||
let matchesQuery: QueryFilterMatcher =
|
||||
func (index: Index, msg: WakuMessage): bool =
|
||||
if pubsubTopic.isSome() and index.pubsubTopic != pubsubTopic.get():
|
||||
return false
|
||||
|
||||
if contentTopic.len > 0 and msg.contentTopic notin contentTopic:
|
||||
if contentTopics.len > 0 and msg.contentTopic notin contentTopics:
|
||||
return false
|
||||
|
||||
if startTime.isSome() and msg.timestamp < startTime.get():
|
||||
@ -287,11 +281,14 @@ method getMessages*(
|
||||
|
||||
return true
|
||||
|
||||
var pageRes: QueueDriverGetPageResult
|
||||
try:
|
||||
pageRes = driver.getPage(maxPageSize, ascendingOrder, cursor, matchesQuery)
|
||||
except CatchableError, Exception:
|
||||
return err(getCurrentExceptionMsg())
|
||||
let catchable = catch:
|
||||
driver.getPage(maxPageSize, ascendingOrder, index, matchesQuery)
|
||||
|
||||
let pageRes: QueueDriverGetPageResult =
|
||||
if catchable.isErr():
|
||||
return err(catchable.error.msg)
|
||||
else:
|
||||
catchable.get()
|
||||
|
||||
if pageRes.isErr():
|
||||
return err($pageRes.error)
|
||||
@ -328,7 +325,7 @@ method getOldestMessageTimestamp*(
|
||||
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
||||
return driver.first().map(
|
||||
proc(index: Index): Timestamp =
|
||||
index.receiverTime
|
||||
index.time
|
||||
)
|
||||
|
||||
method getNewestMessageTimestamp*(
|
||||
@ -336,7 +333,7 @@ method getNewestMessageTimestamp*(
|
||||
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
||||
return driver.last().map(
|
||||
proc(index: Index): Timestamp =
|
||||
index.receiverTime
|
||||
index.time
|
||||
)
|
||||
|
||||
method deleteMessagesOlderThanTimestamp*(
|
||||
|
||||
@ -5,8 +5,7 @@ import chronicles
|
||||
import
|
||||
../../../common/databases/db_sqlite,
|
||||
../../../common/databases/common,
|
||||
../../../waku_core,
|
||||
./cursor
|
||||
../../../waku_core
|
||||
|
||||
const DbTable = "Message"
|
||||
|
||||
@ -16,7 +15,7 @@ type SqlQueryStr = string
|
||||
|
||||
proc queryRowWakuMessageCallback(
|
||||
s: ptr sqlite3_stmt,
|
||||
contentTopicCol, payloadCol, versionCol, senderTimestampCol, metaCol: cint,
|
||||
contentTopicCol, payloadCol, versionCol, timestampCol, metaCol: cint,
|
||||
): WakuMessage =
|
||||
let
|
||||
topic = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, contentTopicCol))
|
||||
@ -30,22 +29,20 @@ proc queryRowWakuMessageCallback(
|
||||
metaLength = sqlite3_column_bytes(s, metaCol)
|
||||
payload = @(toOpenArray(p, 0, payloadLength - 1))
|
||||
version = sqlite3_column_int64(s, versionCol)
|
||||
senderTimestamp = sqlite3_column_int64(s, senderTimestampCol)
|
||||
timestamp = sqlite3_column_int64(s, timestampCol)
|
||||
meta = @(toOpenArray(m, 0, metaLength - 1))
|
||||
|
||||
return WakuMessage(
|
||||
contentTopic: ContentTopic(contentTopic),
|
||||
payload: payload,
|
||||
version: uint32(version),
|
||||
timestamp: Timestamp(senderTimestamp),
|
||||
timestamp: Timestamp(timestamp),
|
||||
meta: meta,
|
||||
)
|
||||
|
||||
proc queryRowReceiverTimestampCallback(
|
||||
s: ptr sqlite3_stmt, storedAtCol: cint
|
||||
): Timestamp =
|
||||
let storedAt = sqlite3_column_int64(s, storedAtCol)
|
||||
return Timestamp(storedAt)
|
||||
proc queryRowTimestampCallback(s: ptr sqlite3_stmt, timestampCol: cint): Timestamp =
|
||||
let timestamp = sqlite3_column_int64(s, timestampCol)
|
||||
return Timestamp(timestamp)
|
||||
|
||||
proc queryRowPubsubTopicCallback(
|
||||
s: ptr sqlite3_stmt, pubsubTopicCol: cint
|
||||
@ -59,14 +56,6 @@ proc queryRowPubsubTopicCallback(
|
||||
|
||||
return pubsubTopic
|
||||
|
||||
proc queryRowDigestCallback(s: ptr sqlite3_stmt, digestCol: cint): seq[byte] =
|
||||
let
|
||||
digestPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, digestCol))
|
||||
digestLength = sqlite3_column_bytes(s, digestCol)
|
||||
digest = @(toOpenArray(digestPointer, 0, digestLength - 1))
|
||||
|
||||
return digest
|
||||
|
||||
proc queryRowWakuMessageHashCallback(
|
||||
s: ptr sqlite3_stmt, hashCol: cint
|
||||
): WakuMessageHash =
|
||||
@ -82,11 +71,10 @@ proc queryRowWakuMessageHashCallback(
|
||||
## Create table
|
||||
|
||||
proc createTableQuery(table: string): SqlQueryStr =
|
||||
"CREATE TABLE IF NOT EXISTS " & table & " (" & " pubsubTopic BLOB NOT NULL," &
|
||||
"CREATE TABLE IF NOT EXISTS " & table & " (" &
|
||||
" messageHash BLOB NOT NULL PRIMARY KEY," & " pubsubTopic BLOB NOT NULL," &
|
||||
" contentTopic BLOB NOT NULL," & " payload BLOB," & " version INTEGER NOT NULL," &
|
||||
" timestamp INTEGER NOT NULL," & " id BLOB," & " messageHash BLOB," &
|
||||
" storedAt INTEGER NOT NULL," & " meta BLOB," &
|
||||
" CONSTRAINT messageIndex PRIMARY KEY (messageHash)" & ") WITHOUT ROWID;"
|
||||
" timestamp INTEGER NOT NULL," & " meta BLOB" & ") WITHOUT ROWID;"
|
||||
|
||||
proc createTable*(db: SqliteDatabase): DatabaseResult[void] =
|
||||
let query = createTableQuery(DbTable)
|
||||
@ -102,7 +90,7 @@ proc createTable*(db: SqliteDatabase): DatabaseResult[void] =
|
||||
## Create indices
|
||||
|
||||
proc createOldestMessageTimestampIndexQuery(table: string): SqlQueryStr =
|
||||
"CREATE INDEX IF NOT EXISTS i_ts ON " & table & " (storedAt);"
|
||||
"CREATE INDEX IF NOT EXISTS i_ts ON " & table & " (timestamp);"
|
||||
|
||||
proc createOldestMessageTimestampIndex*(db: SqliteDatabase): DatabaseResult[void] =
|
||||
let query = createOldestMessageTimestampIndexQuery(DbTable)
|
||||
@ -115,39 +103,15 @@ proc createOldestMessageTimestampIndex*(db: SqliteDatabase): DatabaseResult[void
|
||||
)
|
||||
return ok()
|
||||
|
||||
proc createHistoryQueryIndexQuery(table: string): SqlQueryStr =
|
||||
"CREATE INDEX IF NOT EXISTS i_query ON " & table &
|
||||
" (contentTopic, pubsubTopic, storedAt, id);"
|
||||
|
||||
proc createHistoryQueryIndex*(db: SqliteDatabase): DatabaseResult[void] =
|
||||
let query = createHistoryQueryIndexQuery(DbTable)
|
||||
discard
|
||||
?db.query(
|
||||
query,
|
||||
proc(s: ptr sqlite3_stmt) =
|
||||
discard
|
||||
,
|
||||
)
|
||||
return ok()
|
||||
|
||||
## Insert message
|
||||
type InsertMessageParams* = (
|
||||
seq[byte],
|
||||
seq[byte],
|
||||
Timestamp,
|
||||
seq[byte],
|
||||
seq[byte],
|
||||
seq[byte],
|
||||
int64,
|
||||
Timestamp,
|
||||
seq[byte],
|
||||
)
|
||||
type InsertMessageParams* =
|
||||
(seq[byte], seq[byte], seq[byte], seq[byte], int64, Timestamp, seq[byte])
|
||||
|
||||
proc insertMessageQuery(table: string): SqlQueryStr =
|
||||
return
|
||||
"INSERT INTO " & table &
|
||||
"(id, messageHash, storedAt, contentTopic, payload, pubsubTopic, version, timestamp, meta)" &
|
||||
" VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);"
|
||||
"(messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta)" &
|
||||
" VALUES (?, ?, ?, ?, ?, ?, ?);"
|
||||
|
||||
proc prepareInsertMessageStmt*(
|
||||
db: SqliteDatabase
|
||||
@ -176,14 +140,12 @@ proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] =
|
||||
## Get oldest message receiver timestamp
|
||||
|
||||
proc selectOldestMessageTimestampQuery(table: string): SqlQueryStr =
|
||||
return "SELECT MIN(storedAt) FROM " & table
|
||||
return "SELECT MIN(timestamp) FROM " & table
|
||||
|
||||
proc selectOldestReceiverTimestamp*(
|
||||
db: SqliteDatabase
|
||||
): DatabaseResult[Timestamp] {.inline.} =
|
||||
proc selectOldestTimestamp*(db: SqliteDatabase): DatabaseResult[Timestamp] {.inline.} =
|
||||
var timestamp: Timestamp
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
timestamp = queryRowReceiverTimestampCallback(s, 0)
|
||||
timestamp = queryRowTimestampCallback(s, 0)
|
||||
|
||||
let query = selectOldestMessageTimestampQuery(DbTable)
|
||||
let res = db.query(query, queryRowCallback)
|
||||
@ -195,14 +157,12 @@ proc selectOldestReceiverTimestamp*(
|
||||
## Get newest message receiver timestamp
|
||||
|
||||
proc selectNewestMessageTimestampQuery(table: string): SqlQueryStr =
|
||||
return "SELECT MAX(storedAt) FROM " & table
|
||||
return "SELECT MAX(timestamp) FROM " & table
|
||||
|
||||
proc selectNewestReceiverTimestamp*(
|
||||
db: SqliteDatabase
|
||||
): DatabaseResult[Timestamp] {.inline.} =
|
||||
proc selectNewestTimestamp*(db: SqliteDatabase): DatabaseResult[Timestamp] {.inline.} =
|
||||
var timestamp: Timestamp
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
timestamp = queryRowReceiverTimestampCallback(s, 0)
|
||||
timestamp = queryRowTimestampCallback(s, 0)
|
||||
|
||||
let query = selectNewestMessageTimestampQuery(DbTable)
|
||||
let res = db.query(query, queryRowCallback)
|
||||
@ -214,7 +174,7 @@ proc selectNewestReceiverTimestamp*(
|
||||
## Delete messages older than timestamp
|
||||
|
||||
proc deleteMessagesOlderThanTimestampQuery(table: string, ts: Timestamp): SqlQueryStr =
|
||||
return "DELETE FROM " & table & " WHERE storedAt < " & $ts
|
||||
return "DELETE FROM " & table & " WHERE timestamp < " & $ts
|
||||
|
||||
proc deleteMessagesOlderThanTimestamp*(
|
||||
db: SqliteDatabase, ts: int64
|
||||
@ -233,9 +193,9 @@ proc deleteMessagesOlderThanTimestamp*(
|
||||
|
||||
proc deleteOldestMessagesNotWithinLimitQuery(table: string, limit: int): SqlQueryStr =
|
||||
return
|
||||
"DELETE FROM " & table & " WHERE (storedAt, id, pubsubTopic) NOT IN (" &
|
||||
" SELECT storedAt, id, pubsubTopic FROM " & table &
|
||||
" ORDER BY storedAt DESC, id DESC" & " LIMIT " & $limit & ");"
|
||||
"DELETE FROM " & table & " WHERE (timestamp, messageHash) NOT IN (" &
|
||||
" SELECT timestamp, messageHash FROM " & table &
|
||||
" ORDER BY timestamp DESC, messageHash DESC" & " LIMIT " & $limit & ");"
|
||||
|
||||
proc deleteOldestMessagesNotWithinLimit*(
|
||||
db: SqliteDatabase, limit: int
|
||||
@ -255,37 +215,50 @@ proc deleteOldestMessagesNotWithinLimit*(
|
||||
|
||||
proc selectAllMessagesQuery(table: string): SqlQueryStr =
|
||||
return
|
||||
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta" &
|
||||
" FROM " & table & " ORDER BY storedAt ASC"
|
||||
"SELECT messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta" &
|
||||
" FROM " & table & " ORDER BY timestamp ASC"
|
||||
|
||||
proc selectAllMessages*(
|
||||
db: SqliteDatabase
|
||||
): DatabaseResult[
|
||||
seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
|
||||
] {.gcsafe.} =
|
||||
): DatabaseResult[seq[(WakuMessageHash, PubsubTopic, WakuMessage)]] =
|
||||
## Retrieve all messages from the store.
|
||||
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
|
||||
var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)]
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
let
|
||||
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3)
|
||||
hash = queryRowWakuMessageHashCallback(s, hashCol = 0)
|
||||
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 1)
|
||||
wakuMessage = queryRowWakuMessageCallback(
|
||||
s,
|
||||
contentTopicCol = 1,
|
||||
payloadCol = 2,
|
||||
contentTopicCol = 2,
|
||||
payloadCol = 3,
|
||||
versionCol = 4,
|
||||
senderTimestampCol = 5,
|
||||
metaCol = 8,
|
||||
timestampCol = 5,
|
||||
metaCol = 6,
|
||||
)
|
||||
digest = queryRowDigestCallback(s, digestCol = 6)
|
||||
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0)
|
||||
hash = queryRowWakuMessageHashCallback(s, hashCol = 7)
|
||||
|
||||
rows.add((pubsubTopic, wakuMessage, digest, storedAt, hash))
|
||||
rows.add((hash, pubsubTopic, wakuMessage))
|
||||
|
||||
let query = selectAllMessagesQuery(DbTable)
|
||||
let res = db.query(query, queryRowCallback)
|
||||
if res.isErr():
|
||||
return err(res.error())
|
||||
db.query(query, queryRowCallback).isOkOr:
|
||||
return err("select all messages failed: " & $error)
|
||||
|
||||
return ok(rows)
|
||||
|
||||
## Select all messages without data
|
||||
|
||||
proc selectAllMessageHashesQuery(table: string): SqlQueryStr =
|
||||
return "SELECT messageHash" & " FROM " & table & " ORDER BY timestamp ASC"
|
||||
|
||||
proc selectAllMessageHashes*(db: SqliteDatabase): DatabaseResult[seq[WakuMessageHash]] =
|
||||
## Retrieve all messages from the store.
|
||||
var rows: seq[WakuMessageHash]
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
let hash = queryRowWakuMessageHashCallback(s, hashCol = 0)
|
||||
rows.add(hash)
|
||||
|
||||
let query = selectAllMessageHashesQuery(DbTable)
|
||||
db.query(query, queryRowCallback).isOkOr:
|
||||
return err("select all message hashes failed: " & $error)
|
||||
|
||||
return ok(rows)
|
||||
|
||||
@ -301,75 +274,6 @@ proc combineClauses(clauses: varargs[Option[string]]): Option[string] =
|
||||
where &= " AND " & clause
|
||||
return some(where)
|
||||
|
||||
proc whereClausev2(
|
||||
cursor: bool,
|
||||
pubsubTopic: Option[PubsubTopic],
|
||||
contentTopic: seq[ContentTopic],
|
||||
startTime: Option[Timestamp],
|
||||
endTime: Option[Timestamp],
|
||||
ascending: bool,
|
||||
): Option[string] {.deprecated.} =
|
||||
let cursorClause =
|
||||
if cursor:
|
||||
let comp = if ascending: ">" else: "<"
|
||||
|
||||
some("(storedAt, id) " & comp & " (?, ?)")
|
||||
else:
|
||||
none(string)
|
||||
|
||||
let pubsubTopicClause =
|
||||
if pubsubTopic.isNone():
|
||||
none(string)
|
||||
else:
|
||||
some("pubsubTopic = (?)")
|
||||
|
||||
let contentTopicClause =
|
||||
if contentTopic.len <= 0:
|
||||
none(string)
|
||||
else:
|
||||
var where = "contentTopic IN ("
|
||||
where &= "?"
|
||||
for _ in 1 ..< contentTopic.len:
|
||||
where &= ", ?"
|
||||
where &= ")"
|
||||
some(where)
|
||||
|
||||
let startTimeClause =
|
||||
if startTime.isNone():
|
||||
none(string)
|
||||
else:
|
||||
some("storedAt >= (?)")
|
||||
|
||||
let endTimeClause =
|
||||
if endTime.isNone():
|
||||
none(string)
|
||||
else:
|
||||
some("storedAt <= (?)")
|
||||
|
||||
return combineClauses(
|
||||
cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause
|
||||
)
|
||||
|
||||
proc selectMessagesWithLimitQueryv2(
|
||||
table: string, where: Option[string], limit: uint, ascending = true, v3 = false
|
||||
): SqlQueryStr {.deprecated.} =
|
||||
let order = if ascending: "ASC" else: "DESC"
|
||||
|
||||
var query: string
|
||||
|
||||
query =
|
||||
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta"
|
||||
query &= " FROM " & table
|
||||
|
||||
if where.isSome():
|
||||
query &= " WHERE " & where.get()
|
||||
|
||||
query &= " ORDER BY storedAt " & order & ", id " & order
|
||||
|
||||
query &= " LIMIT " & $limit & ";"
|
||||
|
||||
return query
|
||||
|
||||
proc prepareStmt(
|
||||
db: SqliteDatabase, stmt: string
|
||||
): DatabaseResult[SqliteStmt[void, void]] =
|
||||
@ -377,113 +281,6 @@ proc prepareStmt(
|
||||
checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil)
|
||||
return ok(SqliteStmt[void, void](s))
|
||||
|
||||
proc execSelectMessagesV2WithLimitStmt(
|
||||
s: SqliteStmt,
|
||||
cursor: Option[DbCursor],
|
||||
pubsubTopic: Option[PubsubTopic],
|
||||
contentTopic: seq[ContentTopic],
|
||||
startTime: Option[Timestamp],
|
||||
endTime: Option[Timestamp],
|
||||
onRowCallback: DataProc,
|
||||
): DatabaseResult[void] {.deprecated.} =
|
||||
let s = RawStmtPtr(s)
|
||||
|
||||
# Bind params
|
||||
var paramIndex = 1
|
||||
|
||||
if cursor.isSome():
|
||||
let (storedAt, id, _) = cursor.get()
|
||||
checkErr bindParam(s, paramIndex, storedAt)
|
||||
paramIndex += 1
|
||||
checkErr bindParam(s, paramIndex, id)
|
||||
paramIndex += 1
|
||||
|
||||
if pubsubTopic.isSome():
|
||||
let pubsubTopic = toBytes(pubsubTopic.get())
|
||||
checkErr bindParam(s, paramIndex, pubsubTopic)
|
||||
paramIndex += 1
|
||||
|
||||
for topic in contentTopic:
|
||||
checkErr bindParam(s, paramIndex, topic.toBytes())
|
||||
paramIndex += 1
|
||||
|
||||
if startTime.isSome():
|
||||
let time = startTime.get()
|
||||
checkErr bindParam(s, paramIndex, time)
|
||||
paramIndex += 1
|
||||
|
||||
if endTime.isSome():
|
||||
let time = endTime.get()
|
||||
checkErr bindParam(s, paramIndex, time)
|
||||
paramIndex += 1
|
||||
|
||||
try:
|
||||
while true:
|
||||
let v = sqlite3_step(s)
|
||||
case v
|
||||
of SQLITE_ROW:
|
||||
onRowCallback(s)
|
||||
of SQLITE_DONE:
|
||||
return ok()
|
||||
else:
|
||||
return err($sqlite3_errstr(v))
|
||||
except Exception, CatchableError:
|
||||
error "exception in execSelectMessagesV2WithLimitStmt",
|
||||
error = getCurrentExceptionMsg()
|
||||
|
||||
# release implicit transaction
|
||||
discard sqlite3_reset(s) # same return information as step
|
||||
discard sqlite3_clear_bindings(s) # no errors possible
|
||||
|
||||
proc selectMessagesByHistoryQueryWithLimit*(
|
||||
db: SqliteDatabase,
|
||||
contentTopic: seq[ContentTopic],
|
||||
pubsubTopic: Option[PubsubTopic],
|
||||
cursor: Option[DbCursor],
|
||||
startTime: Option[Timestamp],
|
||||
endTime: Option[Timestamp],
|
||||
limit: uint,
|
||||
ascending: bool,
|
||||
): DatabaseResult[
|
||||
seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
|
||||
] {.deprecated.} =
|
||||
var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] =
|
||||
@[]
|
||||
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
let
|
||||
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3)
|
||||
message = queryRowWakuMessageCallback(
|
||||
s,
|
||||
contentTopicCol = 1,
|
||||
payloadCol = 2,
|
||||
versionCol = 4,
|
||||
senderTimestampCol = 5,
|
||||
metaCol = 8,
|
||||
)
|
||||
digest = queryRowDigestCallback(s, digestCol = 6)
|
||||
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0)
|
||||
hash = queryRowWakuMessageHashCallback(s, hashCol = 7)
|
||||
|
||||
messages.add((pubsubTopic, message, digest, storedAt, hash))
|
||||
|
||||
let query = block:
|
||||
let where = whereClausev2(
|
||||
cursor.isSome(), pubsubTopic, contentTopic, startTime, endTime, ascending
|
||||
)
|
||||
|
||||
selectMessagesWithLimitQueryv2(DbTable, where, limit, ascending)
|
||||
|
||||
let dbStmt = ?db.prepareStmt(query)
|
||||
?dbStmt.execSelectMessagesV2WithLimitStmt(
|
||||
cursor, pubsubTopic, contentTopic, startTime, endTime, queryRowCallback
|
||||
)
|
||||
dbStmt.dispose()
|
||||
|
||||
return ok(messages)
|
||||
|
||||
### Store v3 ###
|
||||
|
||||
proc execSelectMessageByHash(
|
||||
s: SqliteStmt, hash: WakuMessageHash, onRowCallback: DataProc
|
||||
): DatabaseResult[void] =
|
||||
@ -508,14 +305,23 @@ proc execSelectMessageByHash(
|
||||
discard sqlite3_reset(s) # same return information as step
|
||||
discard sqlite3_clear_bindings(s) # no errors possible
|
||||
|
||||
proc selectMessageByHashQuery(): SqlQueryStr =
|
||||
var query: string
|
||||
proc selectTimestampByHashQuery(table: string): SqlQueryStr =
|
||||
return "SELECT timestamp FROM " & table & " WHERE messageHash = (?)"
|
||||
|
||||
query = "SELECT contentTopic, payload, version, timestamp, meta, messageHash"
|
||||
query &= " FROM " & DbTable
|
||||
query &= " WHERE messageHash = (?)"
|
||||
proc getCursorTimestamp(
|
||||
db: SqliteDatabase, hash: WakuMessageHash
|
||||
): DatabaseResult[Option[Timestamp]] =
|
||||
var timestamp = none(Timestamp)
|
||||
|
||||
return query
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
timestamp = some(queryRowTimestampCallback(s, 0))
|
||||
|
||||
let query = selectTimestampByHashQuery(DbTable)
|
||||
let dbStmt = ?db.prepareStmt(query)
|
||||
?dbStmt.execSelectMessageByHash(hash, queryRowCallback)
|
||||
dbStmt.dispose()
|
||||
|
||||
return ok(timestamp)
|
||||
|
||||
proc whereClause(
|
||||
cursor: bool,
|
||||
@ -555,13 +361,13 @@ proc whereClause(
|
||||
if startTime.isNone():
|
||||
none(string)
|
||||
else:
|
||||
some("storedAt >= (?)")
|
||||
some("timestamp >= (?)")
|
||||
|
||||
let endTimeClause =
|
||||
if endTime.isNone():
|
||||
none(string)
|
||||
else:
|
||||
some("storedAt <= (?)")
|
||||
some("timestamp <= (?)")
|
||||
|
||||
let hashesClause =
|
||||
if hashes.len <= 0:
|
||||
@ -643,20 +449,36 @@ proc execSelectMessagesWithLimitStmt(
|
||||
discard sqlite3_clear_bindings(s) # no errors possible
|
||||
|
||||
proc selectMessagesWithLimitQuery(
|
||||
table: string, where: Option[string], limit: uint, ascending = true, v3 = false
|
||||
table: string, where: Option[string], limit: uint, ascending = true
|
||||
): SqlQueryStr =
|
||||
let order = if ascending: "ASC" else: "DESC"
|
||||
|
||||
var query: string
|
||||
|
||||
query =
|
||||
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta"
|
||||
"SELECT messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta"
|
||||
query &= " FROM " & table
|
||||
|
||||
if where.isSome():
|
||||
query &= " WHERE " & where.get()
|
||||
|
||||
query &= " ORDER BY storedAt " & order & ", messageHash " & order
|
||||
query &= " ORDER BY timestamp " & order & ", messageHash " & order
|
||||
|
||||
query &= " LIMIT " & $limit & ";"
|
||||
|
||||
return query
|
||||
|
||||
proc selectMessageHashesWithLimitQuery(
|
||||
table: string, where: Option[string], limit: uint, ascending = true
|
||||
): SqlQueryStr =
|
||||
let order = if ascending: "ASC" else: "DESC"
|
||||
|
||||
var query = "SELECT messageHash FROM " & table
|
||||
|
||||
if where.isSome():
|
||||
query &= " WHERE " & where.get()
|
||||
|
||||
query &= " ORDER BY timestamp " & order & ", messageHash " & order
|
||||
|
||||
query &= " LIMIT " & $limit & ";"
|
||||
|
||||
@ -672,79 +494,101 @@ proc selectMessagesByStoreQueryWithLimit*(
|
||||
hashes: seq[WakuMessageHash],
|
||||
limit: uint,
|
||||
ascending: bool,
|
||||
): DatabaseResult[
|
||||
seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
|
||||
] =
|
||||
# Must first get the message timestamp before paginating by time
|
||||
let newCursor =
|
||||
if cursor.isSome() and cursor.get() != EmptyWakuMessageHash:
|
||||
let hash: WakuMessageHash = cursor.get()
|
||||
): DatabaseResult[seq[(WakuMessageHash, PubsubTopic, WakuMessage)]] =
|
||||
var timeCursor = none((Timestamp, WakuMessageHash))
|
||||
|
||||
var wakuMessage: Option[WakuMessage]
|
||||
if cursor.isSome():
|
||||
let hash: WakuMessageHash = cursor.get()
|
||||
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
wakuMessage = some(
|
||||
queryRowWakuMessageCallback(
|
||||
s,
|
||||
contentTopicCol = 0,
|
||||
payloadCol = 1,
|
||||
versionCol = 2,
|
||||
senderTimestampCol = 3,
|
||||
metaCol = 4,
|
||||
)
|
||||
)
|
||||
let timeOpt = ?getCursorTimestamp(db, hash)
|
||||
|
||||
let query = selectMessageByHashQuery()
|
||||
let dbStmt = ?db.prepareStmt(query)
|
||||
?dbStmt.execSelectMessageByHash(hash, queryRowCallback)
|
||||
dbStmt.dispose()
|
||||
if timeOpt.isNone():
|
||||
return err("cursor not found")
|
||||
|
||||
if wakuMessage.isSome():
|
||||
let time = wakuMessage.get().timestamp
|
||||
timeCursor = some((timeOpt.get(), hash))
|
||||
|
||||
some((time, hash))
|
||||
else:
|
||||
return err("cursor not found")
|
||||
else:
|
||||
none((Timestamp, WakuMessageHash))
|
||||
|
||||
var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] =
|
||||
@[]
|
||||
var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] = @[]
|
||||
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
let
|
||||
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3)
|
||||
hash = queryRowWakuMessageHashCallback(s, hashCol = 0)
|
||||
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 1)
|
||||
message = queryRowWakuMessageCallback(
|
||||
s,
|
||||
contentTopicCol = 1,
|
||||
payloadCol = 2,
|
||||
contentTopicCol = 2,
|
||||
payloadCol = 3,
|
||||
versionCol = 4,
|
||||
senderTimestampCol = 5,
|
||||
metaCol = 8,
|
||||
timestampCol = 5,
|
||||
metaCol = 6,
|
||||
)
|
||||
digest = queryRowDigestCallback(s, digestCol = 6)
|
||||
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0)
|
||||
hash = queryRowWakuMessageHashCallback(s, hashCol = 7)
|
||||
|
||||
messages.add((pubsubTopic, message, digest, storedAt, hash))
|
||||
rows.add((hash, pubsubTopic, message))
|
||||
|
||||
let query = block:
|
||||
let where = whereClause(
|
||||
newCursor.isSome(),
|
||||
pubsubTopic,
|
||||
contentTopic,
|
||||
startTime,
|
||||
endTime,
|
||||
hashes,
|
||||
ascending,
|
||||
)
|
||||
let where = whereClause(
|
||||
timeCursor.isSome(),
|
||||
pubsubTopic,
|
||||
contentTopic,
|
||||
startTime,
|
||||
endTime,
|
||||
hashes,
|
||||
ascending,
|
||||
)
|
||||
|
||||
selectMessagesWithLimitQuery(DbTable, where, limit, ascending, true)
|
||||
let query = selectMessagesWithLimitQuery(DbTable, where, limit, ascending)
|
||||
|
||||
let dbStmt = ?db.prepareStmt(query)
|
||||
?dbStmt.execSelectMessagesWithLimitStmt(
|
||||
newCursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback
|
||||
timeCursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback
|
||||
)
|
||||
dbStmt.dispose()
|
||||
|
||||
return ok(messages)
|
||||
return ok(rows)
|
||||
|
||||
proc selectMessageHashesByStoreQueryWithLimit*(
|
||||
db: SqliteDatabase,
|
||||
contentTopic: seq[ContentTopic],
|
||||
pubsubTopic: Option[PubsubTopic],
|
||||
cursor: Option[WakuMessageHash],
|
||||
startTime: Option[Timestamp],
|
||||
endTime: Option[Timestamp],
|
||||
hashes: seq[WakuMessageHash],
|
||||
limit: uint,
|
||||
ascending: bool,
|
||||
): DatabaseResult[seq[(WakuMessageHash, PubsubTopic, WakuMessage)]] =
|
||||
var timeCursor = none((Timestamp, WakuMessageHash))
|
||||
|
||||
if cursor.isSome():
|
||||
let hash: WakuMessageHash = cursor.get()
|
||||
|
||||
let timeOpt = ?getCursorTimestamp(db, hash)
|
||||
|
||||
if timeOpt.isNone():
|
||||
return err("cursor not found")
|
||||
|
||||
timeCursor = some((timeOpt.get(), hash))
|
||||
|
||||
var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] = @[]
|
||||
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
let hash = queryRowWakuMessageHashCallback(s, hashCol = 0)
|
||||
rows.add((hash, "", WakuMessage()))
|
||||
|
||||
let where = whereClause(
|
||||
timeCursor.isSome(),
|
||||
pubsubTopic,
|
||||
contentTopic,
|
||||
startTime,
|
||||
endTime,
|
||||
hashes,
|
||||
ascending,
|
||||
)
|
||||
|
||||
let query = selectMessageHashesWithLimitQuery(DbTable, where, limit, ascending)
|
||||
|
||||
let dbStmt = ?db.prepareStmt(query)
|
||||
?dbStmt.execSelectMessagesWithLimitStmt(
|
||||
timeCursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback
|
||||
)
|
||||
dbStmt.dispose()
|
||||
|
||||
return ok(rows)
|
||||
|
||||
@ -9,7 +9,6 @@ import
|
||||
../../../waku_core/message/digest,
|
||||
../../common,
|
||||
../../driver,
|
||||
./cursor,
|
||||
./queries
|
||||
|
||||
logScope:
|
||||
@ -28,11 +27,7 @@ proc init(db: SqliteDatabase): ArchiveDriverResult[void] =
|
||||
# Create indices, if don't exist
|
||||
let resRtIndex = createOldestMessageTimestampIndex(db)
|
||||
if resRtIndex.isErr():
|
||||
return err("failed to create i_rt index: " & resRtIndex.error())
|
||||
|
||||
let resMsgIndex = createHistoryQueryIndex(db)
|
||||
if resMsgIndex.isErr():
|
||||
return err("failed to create i_query index: " & resMsgIndex.error())
|
||||
return err("failed to create i_ts index: " & resRtIndex.error())
|
||||
|
||||
return ok()
|
||||
|
||||
@ -52,24 +47,20 @@ proc new*(T: type SqliteDriver, db: SqliteDatabase): ArchiveDriverResult[T] =
|
||||
|
||||
method put*(
|
||||
s: SqliteDriver,
|
||||
messageHash: WakuMessageHash,
|
||||
pubsubTopic: PubsubTopic,
|
||||
message: WakuMessage,
|
||||
digest: MessageDigest,
|
||||
messageHash: WakuMessageHash,
|
||||
receivedTime: Timestamp,
|
||||
): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
## Inserts a message into the store
|
||||
let res = s.insertStmt.exec(
|
||||
(
|
||||
@(digest.data), # id
|
||||
@(messageHash), # messageHash
|
||||
receivedTime, # storedAt
|
||||
toBytes(message.contentTopic), # contentTopic
|
||||
message.payload, # payload
|
||||
toBytes(pubsubTopic), # pubsubTopic
|
||||
int64(message.version), # version
|
||||
message.timestamp, # senderTimestamp
|
||||
message.meta, # meta
|
||||
@(messageHash),
|
||||
toBytes(pubsubTopic),
|
||||
toBytes(message.contentTopic),
|
||||
message.payload,
|
||||
int64(message.version),
|
||||
message.timestamp,
|
||||
message.meta,
|
||||
)
|
||||
)
|
||||
|
||||
@ -81,34 +72,10 @@ method getAllMessages*(
|
||||
## Retrieve all messages from the store.
|
||||
return s.db.selectAllMessages()
|
||||
|
||||
method getMessagesV2*(
|
||||
s: SqliteDriver,
|
||||
contentTopic = newSeq[ContentTopic](0),
|
||||
pubsubTopic = none(PubsubTopic),
|
||||
cursor = none(ArchiveCursor),
|
||||
startTime = none(Timestamp),
|
||||
endTime = none(Timestamp),
|
||||
maxPageSize = DefaultPageSize,
|
||||
ascendingOrder = true,
|
||||
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} =
|
||||
let cursor = cursor.map(toDbCursor)
|
||||
|
||||
let rowsRes = s.db.selectMessagesByHistoryQueryWithLimit(
|
||||
contentTopic,
|
||||
pubsubTopic,
|
||||
cursor,
|
||||
startTime,
|
||||
endTime,
|
||||
limit = maxPageSize,
|
||||
ascending = ascendingOrder,
|
||||
)
|
||||
|
||||
return rowsRes
|
||||
|
||||
method getMessages*(
|
||||
s: SqliteDriver,
|
||||
includeData = false,
|
||||
contentTopic = newSeq[ContentTopic](0),
|
||||
includeData = true,
|
||||
contentTopics = newSeq[ContentTopic](0),
|
||||
pubsubTopic = none(PubsubTopic),
|
||||
cursor = none(ArchiveCursor),
|
||||
startTime = none(Timestamp),
|
||||
@ -117,14 +84,20 @@ method getMessages*(
|
||||
maxPageSize = DefaultPageSize,
|
||||
ascendingOrder = true,
|
||||
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
||||
let cursor =
|
||||
if cursor.isSome():
|
||||
some(cursor.get().hash)
|
||||
else:
|
||||
none(WakuMessageHash)
|
||||
if not includeData:
|
||||
return s.db.selectMessageHashesByStoreQueryWithLimit(
|
||||
contentTopics,
|
||||
pubsubTopic,
|
||||
cursor,
|
||||
startTime,
|
||||
endTime,
|
||||
hashes,
|
||||
limit = maxPageSize,
|
||||
ascending = ascendingOrder,
|
||||
)
|
||||
|
||||
let rowsRes = s.db.selectMessagesByStoreQueryWithLimit(
|
||||
contentTopic,
|
||||
return s.db.selectMessagesByStoreQueryWithLimit(
|
||||
contentTopics,
|
||||
pubsubTopic,
|
||||
cursor,
|
||||
startTime,
|
||||
@ -134,8 +107,6 @@ method getMessages*(
|
||||
ascending = ascendingOrder,
|
||||
)
|
||||
|
||||
return rowsRes
|
||||
|
||||
method getMessagesCount*(
|
||||
s: SqliteDriver
|
||||
): Future[ArchiveDriverResult[int64]] {.async.} =
|
||||
@ -156,12 +127,12 @@ method performVacuum*(s: SqliteDriver): Future[ArchiveDriverResult[void]] {.asyn
|
||||
method getOldestMessageTimestamp*(
|
||||
s: SqliteDriver
|
||||
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
||||
return s.db.selectOldestReceiverTimestamp()
|
||||
return s.db.selectOldestTimestamp()
|
||||
|
||||
method getNewestMessageTimestamp*(
|
||||
s: SqliteDriver
|
||||
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
||||
return s.db.selectnewestReceiverTimestamp()
|
||||
return s.db.selectnewestTimestamp()
|
||||
|
||||
method deleteMessagesOlderThanTimestamp*(
|
||||
s: SqliteDriver, ts: Timestamp
|
||||
|
||||
7
waku/waku_archive_legacy.nim
Normal file
7
waku/waku_archive_legacy.nim
Normal file
@ -0,0 +1,7 @@
|
||||
import
|
||||
./waku_archive_legacy/common,
|
||||
./waku_archive_legacy/archive,
|
||||
./waku_archive_legacy/driver,
|
||||
./waku_archive_legacy/retention_policy
|
||||
|
||||
export common, archive, driver, retention_policy
|
||||
323
waku/waku_archive_legacy/archive.nim
Normal file
323
waku/waku_archive_legacy/archive.nim
Normal file
@ -0,0 +1,323 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[times, options, sequtils, strutils, algorithm],
|
||||
stew/[results, byteutils],
|
||||
chronicles,
|
||||
chronos,
|
||||
metrics
|
||||
import
|
||||
../common/paging,
|
||||
./driver,
|
||||
./retention_policy,
|
||||
../waku_core,
|
||||
../waku_core/message/digest,
|
||||
./common,
|
||||
./archive_metrics
|
||||
|
||||
logScope:
|
||||
topics = "waku archive"
|
||||
|
||||
const
|
||||
DefaultPageSize*: uint = 20
|
||||
MaxPageSize*: uint = 100
|
||||
|
||||
# Retention policy
|
||||
WakuArchiveDefaultRetentionPolicyInterval* = chronos.minutes(30)
|
||||
|
||||
# Metrics reporting
|
||||
WakuArchiveDefaultMetricsReportInterval* = chronos.minutes(1)
|
||||
|
||||
# Message validation
|
||||
# 20 seconds maximum allowable sender timestamp "drift"
|
||||
MaxMessageTimestampVariance* = getNanoSecondTime(20)
|
||||
|
||||
type MessageValidator* =
|
||||
proc(msg: WakuMessage): Result[void, string] {.closure, gcsafe, raises: [].}
|
||||
|
||||
## Archive
|
||||
|
||||
type WakuArchive* = ref object
|
||||
driver: ArchiveDriver
|
||||
|
||||
validator: MessageValidator
|
||||
|
||||
retentionPolicy: Option[RetentionPolicy]
|
||||
|
||||
retentionPolicyHandle: Future[void]
|
||||
metricsHandle: Future[void]
|
||||
|
||||
proc validate*(msg: WakuMessage): Result[void, string] =
|
||||
if msg.ephemeral:
|
||||
# Ephemeral message, do not store
|
||||
return
|
||||
|
||||
if msg.timestamp == 0:
|
||||
return ok()
|
||||
|
||||
let
|
||||
now = getNanosecondTime(getTime().toUnixFloat())
|
||||
lowerBound = now - MaxMessageTimestampVariance
|
||||
upperBound = now + MaxMessageTimestampVariance
|
||||
|
||||
if msg.timestamp < lowerBound:
|
||||
return err(invalidMessageOld)
|
||||
|
||||
if upperBound < msg.timestamp:
|
||||
return err(invalidMessageFuture)
|
||||
|
||||
return ok()
|
||||
|
||||
proc new*(
|
||||
T: type WakuArchive,
|
||||
driver: ArchiveDriver,
|
||||
validator: MessageValidator = validate,
|
||||
retentionPolicy = none(RetentionPolicy),
|
||||
): Result[T, string] =
|
||||
if driver.isNil():
|
||||
return err("archive driver is Nil")
|
||||
|
||||
let archive =
|
||||
WakuArchive(driver: driver, validator: validator, retentionPolicy: retentionPolicy)
|
||||
|
||||
return ok(archive)
|
||||
|
||||
proc handleMessage*(
|
||||
self: WakuArchive, pubsubTopic: PubsubTopic, msg: WakuMessage
|
||||
) {.async.} =
|
||||
self.validator(msg).isOkOr:
|
||||
waku_legacy_archive_errors.inc(labelValues = [error])
|
||||
return
|
||||
|
||||
let
|
||||
msgDigest = computeDigest(msg)
|
||||
msgDigestHex = msgDigest.data.to0xHex()
|
||||
msgHash = computeMessageHash(pubsubTopic, msg)
|
||||
msgHashHex = msgHash.to0xHex()
|
||||
msgTimestamp =
|
||||
if msg.timestamp > 0:
|
||||
msg.timestamp
|
||||
else:
|
||||
getNanosecondTime(getTime().toUnixFloat())
|
||||
|
||||
trace "handling message",
|
||||
msg_hash = msgHashHex,
|
||||
pubsubTopic = pubsubTopic,
|
||||
contentTopic = msg.contentTopic,
|
||||
msgTimestamp = msg.timestamp,
|
||||
usedTimestamp = msgTimestamp,
|
||||
digest = msgDigestHex
|
||||
|
||||
let insertStartTime = getTime().toUnixFloat()
|
||||
|
||||
(await self.driver.put(pubsubTopic, msg, msgDigest, msgHash, msgTimestamp)).isOkOr:
|
||||
waku_legacy_archive_errors.inc(labelValues = [insertFailure])
|
||||
error "failed to insert message", error = error
|
||||
return
|
||||
|
||||
debug "message archived",
|
||||
msg_hash = msgHashHex,
|
||||
pubsubTopic = pubsubTopic,
|
||||
contentTopic = msg.contentTopic,
|
||||
msgTimestamp = msg.timestamp,
|
||||
usedTimestamp = msgTimestamp,
|
||||
digest = msgDigestHex
|
||||
|
||||
let insertDuration = getTime().toUnixFloat() - insertStartTime
|
||||
waku_legacy_archive_insert_duration_seconds.observe(insertDuration)
|
||||
|
||||
proc findMessages*(
|
||||
self: WakuArchive, query: ArchiveQuery
|
||||
): Future[ArchiveResult] {.async, gcsafe.} =
|
||||
## Search the archive to return a single page of messages matching the query criteria
|
||||
|
||||
let maxPageSize =
|
||||
if query.pageSize <= 0:
|
||||
DefaultPageSize
|
||||
else:
|
||||
min(query.pageSize, MaxPageSize)
|
||||
|
||||
let isAscendingOrder = query.direction.into()
|
||||
|
||||
if query.contentTopics.len > 10:
|
||||
return err(ArchiveError.invalidQuery("too many content topics"))
|
||||
|
||||
if query.cursor.isSome() and query.cursor.get().hash.len != 32:
|
||||
return err(ArchiveError.invalidQuery("invalid cursor hash length"))
|
||||
|
||||
let queryStartTime = getTime().toUnixFloat()
|
||||
|
||||
let rows = (
|
||||
await self.driver.getMessages(
|
||||
includeData = query.includeData,
|
||||
contentTopic = query.contentTopics,
|
||||
pubsubTopic = query.pubsubTopic,
|
||||
cursor = query.cursor,
|
||||
startTime = query.startTime,
|
||||
endTime = query.endTime,
|
||||
hashes = query.hashes,
|
||||
maxPageSize = maxPageSize + 1,
|
||||
ascendingOrder = isAscendingOrder,
|
||||
)
|
||||
).valueOr:
|
||||
return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: error))
|
||||
|
||||
let queryDuration = getTime().toUnixFloat() - queryStartTime
|
||||
waku_legacy_archive_query_duration_seconds.observe(queryDuration)
|
||||
|
||||
var hashes = newSeq[WakuMessageHash]()
|
||||
var messages = newSeq[WakuMessage]()
|
||||
var topics = newSeq[PubsubTopic]()
|
||||
var cursor = none(ArchiveCursor)
|
||||
|
||||
if rows.len == 0:
|
||||
return ok(ArchiveResponse(hashes: hashes, messages: messages, cursor: cursor))
|
||||
|
||||
## Messages
|
||||
let pageSize = min(rows.len, int(maxPageSize))
|
||||
|
||||
if query.includeData:
|
||||
topics = rows[0 ..< pageSize].mapIt(it[0])
|
||||
messages = rows[0 ..< pageSize].mapIt(it[1])
|
||||
|
||||
hashes = rows[0 ..< pageSize].mapIt(it[4])
|
||||
|
||||
## Cursor
|
||||
if rows.len > int(maxPageSize):
|
||||
## Build last message cursor
|
||||
## The cursor is built from the last message INCLUDED in the response
|
||||
## (i.e. the second last message in the rows list)
|
||||
|
||||
let (pubsubTopic, message, digest, storeTimestamp, hash) = rows[^2]
|
||||
|
||||
cursor = some(
|
||||
ArchiveCursor(
|
||||
digest: MessageDigest.fromBytes(digest),
|
||||
storeTime: storeTimestamp,
|
||||
sendertime: message.timestamp,
|
||||
pubsubTopic: pubsubTopic,
|
||||
hash: hash,
|
||||
)
|
||||
)
|
||||
|
||||
# All messages MUST be returned in chronological order
|
||||
if not isAscendingOrder:
|
||||
reverse(hashes)
|
||||
reverse(messages)
|
||||
reverse(topics)
|
||||
|
||||
return ok(
|
||||
ArchiveResponse(hashes: hashes, messages: messages, topics: topics, cursor: cursor)
|
||||
)
|
||||
|
||||
proc findMessagesV2*(
|
||||
self: WakuArchive, query: ArchiveQuery
|
||||
): Future[ArchiveResult] {.async, deprecated, gcsafe.} =
|
||||
## Search the archive to return a single page of messages matching the query criteria
|
||||
|
||||
let maxPageSize =
|
||||
if query.pageSize <= 0:
|
||||
DefaultPageSize
|
||||
else:
|
||||
min(query.pageSize, MaxPageSize)
|
||||
|
||||
let isAscendingOrder = query.direction.into()
|
||||
|
||||
if query.contentTopics.len > 10:
|
||||
return err(ArchiveError.invalidQuery("too many content topics"))
|
||||
|
||||
let queryStartTime = getTime().toUnixFloat()
|
||||
|
||||
let rows = (
|
||||
await self.driver.getMessagesV2(
|
||||
contentTopic = query.contentTopics,
|
||||
pubsubTopic = query.pubsubTopic,
|
||||
cursor = query.cursor,
|
||||
startTime = query.startTime,
|
||||
endTime = query.endTime,
|
||||
maxPageSize = maxPageSize + 1,
|
||||
ascendingOrder = isAscendingOrder,
|
||||
)
|
||||
).valueOr:
|
||||
return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: error))
|
||||
|
||||
let queryDuration = getTime().toUnixFloat() - queryStartTime
|
||||
waku_legacy_archive_query_duration_seconds.observe(queryDuration)
|
||||
|
||||
var messages = newSeq[WakuMessage]()
|
||||
var cursor = none(ArchiveCursor)
|
||||
|
||||
if rows.len == 0:
|
||||
return ok(ArchiveResponse(messages: messages, cursor: cursor))
|
||||
|
||||
## Messages
|
||||
let pageSize = min(rows.len, int(maxPageSize))
|
||||
|
||||
messages = rows[0 ..< pageSize].mapIt(it[1])
|
||||
|
||||
## Cursor
|
||||
if rows.len > int(maxPageSize):
|
||||
## Build last message cursor
|
||||
## The cursor is built from the last message INCLUDED in the response
|
||||
## (i.e. the second last message in the rows list)
|
||||
|
||||
let (pubsubTopic, message, digest, storeTimestamp, _) = rows[^2]
|
||||
|
||||
cursor = some(
|
||||
ArchiveCursor(
|
||||
digest: MessageDigest.fromBytes(digest),
|
||||
storeTime: storeTimestamp,
|
||||
sendertime: message.timestamp,
|
||||
pubsubTopic: pubsubTopic,
|
||||
)
|
||||
)
|
||||
|
||||
# All messages MUST be returned in chronological order
|
||||
if not isAscendingOrder:
|
||||
reverse(messages)
|
||||
|
||||
return ok(ArchiveResponse(messages: messages, cursor: cursor))
|
||||
|
||||
proc periodicRetentionPolicy(self: WakuArchive) {.async.} =
|
||||
debug "executing message retention policy"
|
||||
|
||||
let policy = self.retentionPolicy.get()
|
||||
|
||||
while true:
|
||||
(await policy.execute(self.driver)).isOkOr:
|
||||
waku_legacy_archive_errors.inc(labelValues = [retPolicyFailure])
|
||||
error "failed execution of retention policy", error = error
|
||||
|
||||
await sleepAsync(WakuArchiveDefaultRetentionPolicyInterval)
|
||||
|
||||
proc periodicMetricReport(self: WakuArchive) {.async.} =
|
||||
while true:
|
||||
let countRes = (await self.driver.getMessagesCount())
|
||||
if countRes.isErr():
|
||||
error "loopReportStoredMessagesMetric failed to get messages count",
|
||||
error = countRes.error
|
||||
else:
|
||||
let count = countRes.get()
|
||||
waku_legacy_archive_messages.set(count, labelValues = ["stored"])
|
||||
|
||||
await sleepAsync(WakuArchiveDefaultMetricsReportInterval)
|
||||
|
||||
proc start*(self: WakuArchive) =
|
||||
if self.retentionPolicy.isSome():
|
||||
self.retentionPolicyHandle = self.periodicRetentionPolicy()
|
||||
|
||||
self.metricsHandle = self.periodicMetricReport()
|
||||
|
||||
proc stopWait*(self: WakuArchive) {.async.} =
|
||||
var futures: seq[Future[void]]
|
||||
|
||||
if self.retentionPolicy.isSome() and not self.retentionPolicyHandle.isNil():
|
||||
futures.add(self.retentionPolicyHandle.cancelAndWait())
|
||||
|
||||
if not self.metricsHandle.isNil:
|
||||
futures.add(self.metricsHandle.cancelAndWait())
|
||||
|
||||
await noCancel(allFutures(futures))
|
||||
23
waku/waku_archive_legacy/archive_metrics.nim
Normal file
23
waku/waku_archive_legacy/archive_metrics.nim
Normal file
@ -0,0 +1,23 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import metrics
|
||||
|
||||
declarePublicGauge waku_legacy_archive_messages,
|
||||
"number of historical messages", ["type"]
|
||||
declarePublicGauge waku_legacy_archive_errors,
|
||||
"number of store protocol errors", ["type"]
|
||||
declarePublicGauge waku_legacy_archive_queries, "number of store queries received"
|
||||
declarePublicHistogram waku_legacy_archive_insert_duration_seconds,
|
||||
"message insertion duration"
|
||||
declarePublicHistogram waku_legacy_archive_query_duration_seconds,
|
||||
"history query duration"
|
||||
|
||||
# Error types (metric label values)
|
||||
const
|
||||
invalidMessageOld* = "invalid_message_too_old"
|
||||
invalidMessageFuture* = "invalid_message_future_timestamp"
|
||||
insertFailure* = "insert_failure"
|
||||
retPolicyFailure* = "retpolicy_failure"
|
||||
87
waku/waku_archive_legacy/common.nim
Normal file
87
waku/waku_archive_legacy/common.nim
Normal file
@ -0,0 +1,87 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/options, results, stew/byteutils, stew/arrayops, nimcrypto/sha2
|
||||
import ../waku_core, ../common/paging
|
||||
|
||||
## Waku message digest
|
||||
|
||||
type MessageDigest* = MDigest[256]
|
||||
|
||||
proc fromBytes*(T: type MessageDigest, src: seq[byte]): T =
|
||||
var data: array[32, byte]
|
||||
|
||||
let byteCount = copyFrom[byte](data, src)
|
||||
|
||||
assert byteCount == 32
|
||||
|
||||
return MessageDigest(data: data)
|
||||
|
||||
proc computeDigest*(msg: WakuMessage): MessageDigest =
|
||||
var ctx: sha256
|
||||
ctx.init()
|
||||
defer:
|
||||
ctx.clear()
|
||||
|
||||
ctx.update(msg.contentTopic.toBytes())
|
||||
ctx.update(msg.payload)
|
||||
|
||||
# Computes the hash
|
||||
return ctx.finish()
|
||||
|
||||
## Public API types
|
||||
|
||||
type
|
||||
#TODO Once Store v2 is removed, the cursor becomes the hash of the last message
|
||||
ArchiveCursor* = object
|
||||
digest*: MessageDigest
|
||||
storeTime*: Timestamp
|
||||
senderTime*: Timestamp
|
||||
pubsubTopic*: PubsubTopic
|
||||
hash*: WakuMessageHash
|
||||
|
||||
ArchiveQuery* = object
|
||||
includeData*: bool # indicate if messages should be returned in addition to hashes.
|
||||
pubsubTopic*: Option[PubsubTopic]
|
||||
contentTopics*: seq[ContentTopic]
|
||||
cursor*: Option[ArchiveCursor]
|
||||
startTime*: Option[Timestamp]
|
||||
endTime*: Option[Timestamp]
|
||||
hashes*: seq[WakuMessageHash]
|
||||
pageSize*: uint
|
||||
direction*: PagingDirection
|
||||
|
||||
ArchiveResponse* = object
|
||||
hashes*: seq[WakuMessageHash]
|
||||
messages*: seq[WakuMessage]
|
||||
topics*: seq[PubsubTopic]
|
||||
cursor*: Option[ArchiveCursor]
|
||||
|
||||
ArchiveErrorKind* {.pure.} = enum
|
||||
UNKNOWN = uint32(0)
|
||||
DRIVER_ERROR = uint32(1)
|
||||
INVALID_QUERY = uint32(2)
|
||||
|
||||
ArchiveError* = object
|
||||
case kind*: ArchiveErrorKind
|
||||
of DRIVER_ERROR, INVALID_QUERY:
|
||||
# TODO: Add an enum to be able to distinguish between error causes
|
||||
cause*: string
|
||||
else:
|
||||
discard
|
||||
|
||||
ArchiveResult* = Result[ArchiveResponse, ArchiveError]
|
||||
|
||||
proc `$`*(err: ArchiveError): string =
|
||||
case err.kind
|
||||
of ArchiveErrorKind.DRIVER_ERROR:
|
||||
"DIRVER_ERROR: " & err.cause
|
||||
of ArchiveErrorKind.INVALID_QUERY:
|
||||
"INVALID_QUERY: " & err.cause
|
||||
of ArchiveErrorKind.UNKNOWN:
|
||||
"UNKNOWN"
|
||||
|
||||
proc invalidQuery*(T: type ArchiveError, cause: string): T =
|
||||
ArchiveError(kind: ArchiveErrorKind.INVALID_QUERY, cause: cause)
|
||||
119
waku/waku_archive_legacy/driver.nim
Normal file
119
waku/waku_archive_legacy/driver.nim
Normal file
@ -0,0 +1,119 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/options, results, chronos
|
||||
import ../waku_core, ./common
|
||||
|
||||
const DefaultPageSize*: uint = 25
|
||||
|
||||
type
|
||||
ArchiveDriverResult*[T] = Result[T, string]
|
||||
ArchiveDriver* = ref object of RootObj
|
||||
|
||||
#TODO Once Store v2 is removed keep only messages and hashes
|
||||
type ArchiveRow* = (PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)
|
||||
|
||||
# ArchiveDriver interface
|
||||
|
||||
method put*(
|
||||
driver: ArchiveDriver,
|
||||
pubsubTopic: PubsubTopic,
|
||||
message: WakuMessage,
|
||||
digest: MessageDigest,
|
||||
messageHash: WakuMessageHash,
|
||||
receivedTime: Timestamp,
|
||||
): Future[ArchiveDriverResult[void]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method getAllMessages*(
|
||||
driver: ArchiveDriver
|
||||
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method getMessagesV2*(
|
||||
driver: ArchiveDriver,
|
||||
contentTopic = newSeq[ContentTopic](0),
|
||||
pubsubTopic = none(PubsubTopic),
|
||||
cursor = none(ArchiveCursor),
|
||||
startTime = none(Timestamp),
|
||||
endTime = none(Timestamp),
|
||||
maxPageSize = DefaultPageSize,
|
||||
ascendingOrder = true,
|
||||
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, deprecated, async.} =
|
||||
discard
|
||||
|
||||
method getMessages*(
|
||||
driver: ArchiveDriver,
|
||||
includeData = true,
|
||||
contentTopic = newSeq[ContentTopic](0),
|
||||
pubsubTopic = none(PubsubTopic),
|
||||
cursor = none(ArchiveCursor),
|
||||
startTime = none(Timestamp),
|
||||
endTime = none(Timestamp),
|
||||
hashes = newSeq[WakuMessageHash](0),
|
||||
maxPageSize = DefaultPageSize,
|
||||
ascendingOrder = true,
|
||||
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method getMessagesCount*(
|
||||
driver: ArchiveDriver
|
||||
): Future[ArchiveDriverResult[int64]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method getPagesCount*(
|
||||
driver: ArchiveDriver
|
||||
): Future[ArchiveDriverResult[int64]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method getPagesSize*(
|
||||
driver: ArchiveDriver
|
||||
): Future[ArchiveDriverResult[int64]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method getDatabaseSize*(
|
||||
driver: ArchiveDriver
|
||||
): Future[ArchiveDriverResult[int64]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method performVacuum*(
|
||||
driver: ArchiveDriver
|
||||
): Future[ArchiveDriverResult[void]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method getOldestMessageTimestamp*(
|
||||
driver: ArchiveDriver
|
||||
): Future[ArchiveDriverResult[Timestamp]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method getNewestMessageTimestamp*(
|
||||
driver: ArchiveDriver
|
||||
): Future[ArchiveDriverResult[Timestamp]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method deleteMessagesOlderThanTimestamp*(
|
||||
driver: ArchiveDriver, ts: Timestamp
|
||||
): Future[ArchiveDriverResult[void]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method deleteOldestMessagesNotWithinLimit*(
|
||||
driver: ArchiveDriver, limit: int
|
||||
): Future[ArchiveDriverResult[void]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method decreaseDatabaseSize*(
|
||||
driver: ArchiveDriver, targetSizeInBytes: int64, forceRemoval: bool = false
|
||||
): Future[ArchiveDriverResult[void]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method close*(
|
||||
driver: ArchiveDriver
|
||||
): Future[ArchiveDriverResult[void]] {.base, async.} =
|
||||
discard
|
||||
|
||||
method existsTable*(
|
||||
driver: ArchiveDriver, tableName: string
|
||||
): Future[ArchiveDriverResult[bool]] {.base, async.} =
|
||||
discard
|
||||
125
waku/waku_archive_legacy/driver/builder.nim
Normal file
125
waku/waku_archive_legacy/driver/builder.nim
Normal file
@ -0,0 +1,125 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import results, chronicles, chronos
|
||||
import
|
||||
../driver,
|
||||
../../common/databases/dburl,
|
||||
../../common/databases/db_sqlite,
|
||||
../../common/error_handling,
|
||||
./sqlite_driver,
|
||||
./sqlite_driver/migrations as archive_driver_sqlite_migrations,
|
||||
./queue_driver
|
||||
|
||||
export sqlite_driver, queue_driver
|
||||
|
||||
when defined(postgres):
|
||||
import ## These imports add dependency with an external libpq library
|
||||
./postgres_driver/migrations as archive_postgres_driver_migrations,
|
||||
./postgres_driver
|
||||
export postgres_driver
|
||||
|
||||
proc new*(
|
||||
T: type ArchiveDriver,
|
||||
url: string,
|
||||
vacuum: bool,
|
||||
migrate: bool,
|
||||
maxNumConn: int,
|
||||
onFatalErrorAction: OnFatalErrorHandler,
|
||||
): Future[Result[T, string]] {.async.} =
|
||||
## url - string that defines the database
|
||||
## vacuum - if true, a cleanup operation will be applied to the database
|
||||
## migrate - if true, the database schema will be updated
|
||||
## maxNumConn - defines the maximum number of connections to handle simultaneously (Postgres)
|
||||
## onFatalErrorAction - called if, e.g., the connection with db got lost
|
||||
|
||||
let dbUrlValidationRes = dburl.validateDbUrl(url)
|
||||
if dbUrlValidationRes.isErr():
|
||||
return err("DbUrl failure in ArchiveDriver.new: " & dbUrlValidationRes.error)
|
||||
|
||||
let engineRes = dburl.getDbEngine(url)
|
||||
if engineRes.isErr():
|
||||
return err("error getting db engine in setupWakuArchiveDriver: " & engineRes.error)
|
||||
|
||||
let engine = engineRes.get()
|
||||
|
||||
case engine
|
||||
of "sqlite":
|
||||
let pathRes = dburl.getDbPath(url)
|
||||
if pathRes.isErr():
|
||||
return err("error get path in setupWakuArchiveDriver: " & pathRes.error)
|
||||
|
||||
let dbRes = SqliteDatabase.new(pathRes.get())
|
||||
if dbRes.isErr():
|
||||
return err("error in setupWakuArchiveDriver: " & dbRes.error)
|
||||
|
||||
let db = dbRes.get()
|
||||
|
||||
# SQLite vacuum
|
||||
let sqliteStatsRes = db.gatherSqlitePageStats()
|
||||
if sqliteStatsRes.isErr():
|
||||
return err("error while gathering sqlite stats: " & $sqliteStatsRes.error)
|
||||
|
||||
let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get()
|
||||
debug "sqlite database page stats",
|
||||
pageSize = pageSize, pages = pageCount, freePages = freelistCount
|
||||
|
||||
if vacuum and (pageCount > 0 and freelistCount > 0):
|
||||
let vacuumRes = db.performSqliteVacuum()
|
||||
if vacuumRes.isErr():
|
||||
return err("error in vacuum sqlite: " & $vacuumRes.error)
|
||||
|
||||
# Database migration
|
||||
if migrate:
|
||||
let migrateRes = archive_driver_sqlite_migrations.migrate(db)
|
||||
if migrateRes.isErr():
|
||||
return err("error in migrate sqlite: " & $migrateRes.error)
|
||||
|
||||
debug "setting up sqlite waku archive driver"
|
||||
let res = SqliteDriver.new(db)
|
||||
if res.isErr():
|
||||
return err("failed to init sqlite archive driver: " & res.error)
|
||||
|
||||
return ok(res.get())
|
||||
of "postgres":
|
||||
when defined(postgres):
|
||||
let res = PostgresDriver.new(
|
||||
dbUrl = url,
|
||||
maxConnections = maxNumConn,
|
||||
onFatalErrorAction = onFatalErrorAction,
|
||||
)
|
||||
if res.isErr():
|
||||
return err("failed to init postgres archive driver: " & res.error)
|
||||
|
||||
let driver = res.get()
|
||||
|
||||
# Database migration
|
||||
if migrate:
|
||||
let migrateRes = await archive_postgres_driver_migrations.migrate(driver)
|
||||
if migrateRes.isErr():
|
||||
return err("ArchiveDriver build failed in migration: " & $migrateRes.error)
|
||||
|
||||
## This should be started once we make sure the 'messages' table exists
|
||||
## Hence, this should be run after the migration is completed.
|
||||
asyncSpawn driver.startPartitionFactory(onFatalErrorAction)
|
||||
|
||||
info "waiting for a partition to be created"
|
||||
for i in 0 ..< 100:
|
||||
if driver.containsAnyPartition():
|
||||
break
|
||||
await sleepAsync(chronos.milliseconds(100))
|
||||
|
||||
if not driver.containsAnyPartition():
|
||||
onFatalErrorAction("a partition could not be created")
|
||||
|
||||
return ok(driver)
|
||||
else:
|
||||
return err(
|
||||
"Postgres has been configured but not been compiled. Check compiler definitions."
|
||||
)
|
||||
else:
|
||||
debug "setting up in-memory waku archive driver"
|
||||
let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages
|
||||
return ok(driver)
|
||||
11
waku/waku_archive_legacy/driver/postgres_driver.nim
Normal file
11
waku/waku_archive_legacy/driver/postgres_driver.nim
Normal file
@ -0,0 +1,11 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
./postgres_driver/postgres_driver,
|
||||
./postgres_driver/partitions_manager,
|
||||
./postgres_driver/postgres_healthcheck
|
||||
|
||||
export postgres_driver, partitions_manager, postgres_healthcheck
|
||||
@ -0,0 +1,89 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/strutils, results, chronicles, chronos
|
||||
import
|
||||
../../../common/databases/common,
|
||||
../../../../migrations/message_store_postgres/pg_migration_manager,
|
||||
../postgres_driver
|
||||
|
||||
logScope:
|
||||
topics = "waku archive migration"
|
||||
|
||||
const SchemaVersion* = 6 # increase this when there is an update in the database schema
|
||||
|
||||
proc breakIntoStatements*(script: string): seq[string] =
|
||||
## Given a full migration script, that can potentially contain a list
|
||||
## of SQL statements, this proc splits it into the contained isolated statements
|
||||
## that should be executed one after the other.
|
||||
var statements = newSeq[string]()
|
||||
|
||||
let lines = script.split('\n')
|
||||
|
||||
var simpleStmt: string
|
||||
var plSqlStatement: string
|
||||
var insidePlSqlScript = false
|
||||
for line in lines:
|
||||
if line.strip().len == 0:
|
||||
continue
|
||||
|
||||
if insidePlSqlScript:
|
||||
if line.contains("END $$"):
|
||||
## End of the Pl/SQL script
|
||||
plSqlStatement &= line
|
||||
statements.add(plSqlStatement)
|
||||
plSqlStatement = ""
|
||||
insidePlSqlScript = false
|
||||
continue
|
||||
else:
|
||||
plSqlStatement &= line & "\n"
|
||||
|
||||
if line.contains("DO $$"):
|
||||
## Beginning of the Pl/SQL script
|
||||
insidePlSqlScript = true
|
||||
plSqlStatement &= line & "\n"
|
||||
|
||||
if not insidePlSqlScript:
|
||||
if line.contains(';'):
|
||||
## End of simple statement
|
||||
simpleStmt &= line
|
||||
statements.add(simpleStmt)
|
||||
simpleStmt = ""
|
||||
else:
|
||||
simpleStmt &= line & "\n"
|
||||
|
||||
return statements
|
||||
|
||||
proc migrate*(
|
||||
driver: PostgresDriver, targetVersion = SchemaVersion
|
||||
): Future[DatabaseResult[void]] {.async.} =
|
||||
debug "starting message store's postgres database migration"
|
||||
|
||||
let currentVersion = (await driver.getCurrentVersion()).valueOr:
|
||||
return err("migrate error could not retrieve current version: " & $error)
|
||||
|
||||
if currentVersion == targetVersion:
|
||||
debug "database schema is up to date",
|
||||
currentVersion = currentVersion, targetVersion = targetVersion
|
||||
return ok()
|
||||
|
||||
info "database schema is outdated",
|
||||
currentVersion = currentVersion, targetVersion = targetVersion
|
||||
|
||||
# Load migration scripts
|
||||
let scripts = pg_migration_manager.getMigrationScripts(currentVersion, targetVersion)
|
||||
|
||||
# Run the migration scripts
|
||||
for script in scripts:
|
||||
for statement in script.breakIntoStatements():
|
||||
debug "executing migration statement", statement = statement
|
||||
|
||||
(await driver.performWriteQuery(statement)).isOkOr:
|
||||
error "failed to execute migration statement",
|
||||
statement = statement, error = error
|
||||
return err("failed to execute migration statement")
|
||||
|
||||
debug "migration statement executed succesfully", statement = statement
|
||||
|
||||
debug "finished message store's postgres database migration"
|
||||
|
||||
return ok()
|
||||
@ -0,0 +1,102 @@
|
||||
## This module is aimed to handle the creation and truncation of partition tables
|
||||
## in order to limit the space occupied in disk by the database.
|
||||
##
|
||||
## The created partitions are referenced by the 'storedAt' field.
|
||||
##
|
||||
|
||||
import std/deques
|
||||
import chronos, chronicles
|
||||
|
||||
logScope:
|
||||
topics = "waku archive partitions_manager"
|
||||
|
||||
## The time range has seconds resolution
|
||||
type TimeRange* = tuple[beginning: int64, `end`: int64]
|
||||
|
||||
type
|
||||
Partition = object
|
||||
name: string
|
||||
timeRange: TimeRange
|
||||
|
||||
PartitionManager* = ref object
|
||||
partitions: Deque[Partition]
|
||||
# FIFO of partition table names. The first is the oldest partition
|
||||
|
||||
proc new*(T: type PartitionManager): T =
|
||||
return PartitionManager()
|
||||
|
||||
proc getPartitionFromDateTime*(
|
||||
self: PartitionManager, targetMoment: int64
|
||||
): Result[Partition, string] =
|
||||
## Returns the partition name that might store a message containing the passed timestamp.
|
||||
## In order words, it simply returns the partition name which contains the given timestamp.
|
||||
## targetMoment - represents the time of interest, measured in seconds since epoch.
|
||||
|
||||
if self.partitions.len == 0:
|
||||
return err("There are no partitions")
|
||||
|
||||
for partition in self.partitions:
|
||||
let timeRange = partition.timeRange
|
||||
|
||||
let beginning = timeRange.beginning
|
||||
let `end` = timeRange.`end`
|
||||
|
||||
if beginning <= targetMoment and targetMoment < `end`:
|
||||
return ok(partition)
|
||||
|
||||
return err("Couldn't find a partition table for given time: " & $targetMoment)
|
||||
|
||||
proc getNewestPartition*(self: PartitionManager): Result[Partition, string] =
|
||||
if self.partitions.len == 0:
|
||||
return err("there are no partitions allocated")
|
||||
|
||||
let newestPartition = self.partitions.peekLast
|
||||
return ok(newestPartition)
|
||||
|
||||
proc getOldestPartition*(self: PartitionManager): Result[Partition, string] =
|
||||
if self.partitions.len == 0:
|
||||
return err("there are no partitions allocated")
|
||||
|
||||
let oldestPartition = self.partitions.peekFirst
|
||||
return ok(oldestPartition)
|
||||
|
||||
proc addPartitionInfo*(
|
||||
self: PartitionManager, partitionName: string, beginning: int64, `end`: int64
|
||||
) =
|
||||
## The given partition range has seconds resolution.
|
||||
## We just store information of the new added partition merely to keep track of it.
|
||||
let partitionInfo = Partition(name: partitionName, timeRange: (beginning, `end`))
|
||||
trace "Adding partition info"
|
||||
self.partitions.addLast(partitionInfo)
|
||||
|
||||
proc removeOldestPartitionName*(self: PartitionManager) =
|
||||
## Simply removed the partition from the tracked/known partitions queue.
|
||||
## Just remove it and ignore it.
|
||||
discard self.partitions.popFirst()
|
||||
|
||||
proc isEmpty*(self: PartitionManager): bool =
|
||||
return self.partitions.len == 0
|
||||
|
||||
proc getLastMoment*(partition: Partition): int64 =
|
||||
## Considering the time range covered by the partition, this
|
||||
## returns the `end` time (number of seconds since epoch) of such range.
|
||||
let lastTimeInSec = partition.timeRange.`end`
|
||||
return lastTimeInSec
|
||||
|
||||
proc getPartitionStartTimeInNanosec*(partition: Partition): int64 =
|
||||
return partition.timeRange.beginning * 1_000_000_000
|
||||
|
||||
proc containsMoment*(partition: Partition, time: int64): bool =
|
||||
## Returns true if the given moment is contained within the partition window,
|
||||
## 'false' otherwise.
|
||||
## time - number of seconds since epoch
|
||||
if partition.timeRange.beginning <= time and time < partition.timeRange.`end`:
|
||||
return true
|
||||
|
||||
return false
|
||||
|
||||
proc getName*(partition: Partition): string =
|
||||
return partition.name
|
||||
|
||||
func `==`*(a, b: Partition): bool {.inline.} =
|
||||
return a.name == b.name
|
||||
1159
waku/waku_archive_legacy/driver/postgres_driver/postgres_driver.nim
Normal file
1159
waku/waku_archive_legacy/driver/postgres_driver/postgres_driver.nim
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,41 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, results
|
||||
import ../../../common/databases/db_postgres, ../../../common/error_handling
|
||||
|
||||
## Simple query to validate that the postgres is working and attending requests
|
||||
const HealthCheckQuery = "SELECT version();"
|
||||
const CheckConnectivityInterval = 60.seconds
|
||||
const MaxNumTrials = 20
|
||||
const TrialInterval = 1.seconds
|
||||
|
||||
proc checkConnectivity*(
|
||||
connPool: PgAsyncPool, onFatalErrorAction: OnFatalErrorHandler
|
||||
) {.async.} =
|
||||
while true:
|
||||
(await connPool.pgQuery(HealthCheckQuery)).isOkOr:
|
||||
## The connection failed once. Let's try reconnecting for a while.
|
||||
## Notice that the 'exec' proc tries to establish a new connection.
|
||||
|
||||
block errorBlock:
|
||||
## Force close all the opened connections. No need to close gracefully.
|
||||
(await connPool.resetConnPool()).isOkOr:
|
||||
onFatalErrorAction("checkConnectivity resetConnPool error: " & error)
|
||||
|
||||
var numTrial = 0
|
||||
while numTrial < MaxNumTrials:
|
||||
let res = await connPool.pgQuery(HealthCheckQuery)
|
||||
if res.isOk():
|
||||
## Connection resumed. Let's go back to the normal healthcheck.
|
||||
break errorBlock
|
||||
|
||||
await sleepAsync(TrialInterval)
|
||||
numTrial.inc()
|
||||
|
||||
## The connection couldn't be resumed. Let's inform the upper layers.
|
||||
onFatalErrorAction("postgres health check error: " & error)
|
||||
|
||||
await sleepAsync(CheckConnectivityInterval)
|
||||
8
waku/waku_archive_legacy/driver/queue_driver.nim
Normal file
8
waku/waku_archive_legacy/driver/queue_driver.nim
Normal file
@ -0,0 +1,8 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import ./queue_driver/queue_driver, ./queue_driver/index
|
||||
|
||||
export queue_driver, index
|
||||
91
waku/waku_archive_legacy/driver/queue_driver/index.nim
Normal file
91
waku/waku_archive_legacy/driver/queue_driver/index.nim
Normal file
@ -0,0 +1,91 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/byteutils, nimcrypto/sha2
|
||||
import ../../../waku_core, ../../common
|
||||
|
||||
type Index* = object
|
||||
## This type contains the description of an Index used in the pagination of WakuMessages
|
||||
pubsubTopic*: string
|
||||
senderTime*: Timestamp # the time at which the message is generated
|
||||
receiverTime*: Timestamp
|
||||
digest*: MessageDigest # calculated over payload and content topic
|
||||
hash*: WakuMessageHash
|
||||
|
||||
proc compute*(
|
||||
T: type Index, msg: WakuMessage, receivedTime: Timestamp, pubsubTopic: PubsubTopic
|
||||
): T =
|
||||
## Takes a WakuMessage with received timestamp and returns its Index.
|
||||
let
|
||||
digest = computeDigest(msg)
|
||||
senderTime = msg.timestamp
|
||||
hash = computeMessageHash(pubsubTopic, msg)
|
||||
|
||||
return Index(
|
||||
pubsubTopic: pubsubTopic,
|
||||
senderTime: senderTime,
|
||||
receiverTime: receivedTime,
|
||||
digest: digest,
|
||||
hash: hash,
|
||||
)
|
||||
|
||||
proc tohistoryCursor*(index: Index): ArchiveCursor =
|
||||
return ArchiveCursor(
|
||||
pubsubTopic: index.pubsubTopic,
|
||||
senderTime: index.senderTime,
|
||||
storeTime: index.receiverTime,
|
||||
digest: index.digest,
|
||||
hash: index.hash,
|
||||
)
|
||||
|
||||
proc toIndex*(index: ArchiveCursor): Index =
|
||||
return Index(
|
||||
pubsubTopic: index.pubsubTopic,
|
||||
senderTime: index.senderTime,
|
||||
receiverTime: index.storeTime,
|
||||
digest: index.digest,
|
||||
hash: index.hash,
|
||||
)
|
||||
|
||||
proc `==`*(x, y: Index): bool =
|
||||
## receiverTime plays no role in index equality
|
||||
return
|
||||
(
|
||||
(x.senderTime == y.senderTime) and (x.digest == y.digest) and
|
||||
(x.pubsubTopic == y.pubsubTopic)
|
||||
) or (x.hash == y.hash) # this applies to store v3 queries only
|
||||
|
||||
proc cmp*(x, y: Index): int =
|
||||
## compares x and y
|
||||
## returns 0 if they are equal
|
||||
## returns -1 if x < y
|
||||
## returns 1 if x > y
|
||||
##
|
||||
## Default sorting order priority is:
|
||||
## 1. senderTimestamp
|
||||
## 2. receiverTimestamp (a fallback only if senderTimestamp unset on either side, and all other fields unequal)
|
||||
## 3. message digest
|
||||
## 4. pubsubTopic
|
||||
|
||||
if x == y:
|
||||
# Quick exit ensures receiver time does not affect index equality
|
||||
return 0
|
||||
|
||||
# Timestamp has a higher priority for comparison
|
||||
let
|
||||
# Use receiverTime where senderTime is unset
|
||||
xTimestamp = if x.senderTime == 0: x.receiverTime else: x.senderTime
|
||||
yTimestamp = if y.senderTime == 0: y.receiverTime else: y.senderTime
|
||||
|
||||
let timecmp = cmp(xTimestamp, yTimestamp)
|
||||
if timecmp != 0:
|
||||
return timecmp
|
||||
|
||||
# Continue only when timestamps are equal
|
||||
let digestcmp = cmp(x.digest.data, y.digest.data)
|
||||
if digestcmp != 0:
|
||||
return digestcmp
|
||||
|
||||
return cmp(x.pubsubTopic, y.pubsubTopic)
|
||||
363
waku/waku_archive_legacy/driver/queue_driver/queue_driver.nim
Normal file
363
waku/waku_archive_legacy/driver/queue_driver/queue_driver.nim
Normal file
@ -0,0 +1,363 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/options, results, stew/sorted_set, chronicles, chronos
|
||||
import ../../../waku_core, ../../common, ../../driver, ./index
|
||||
|
||||
logScope:
|
||||
topics = "waku archive queue_store"
|
||||
|
||||
const QueueDriverDefaultMaxCapacity* = 25_000
|
||||
|
||||
type
|
||||
QueryFilterMatcher =
|
||||
proc(index: Index, msg: WakuMessage): bool {.gcsafe, raises: [], closure.}
|
||||
|
||||
QueueDriver* = ref object of ArchiveDriver
|
||||
## Bounded repository for indexed messages
|
||||
##
|
||||
## The store queue will keep messages up to its
|
||||
## configured capacity. As soon as this capacity
|
||||
## is reached and a new message is added, the oldest
|
||||
## item will be removed to make space for the new one.
|
||||
## This implies both a `delete` and `add` operation
|
||||
## for new items.
|
||||
|
||||
# TODO: a circular/ring buffer may be a more efficient implementation
|
||||
items: SortedSet[Index, WakuMessage] # sorted set of stored messages
|
||||
capacity: int # Maximum amount of messages to keep
|
||||
|
||||
QueueDriverErrorKind {.pure.} = enum
|
||||
INVALID_CURSOR
|
||||
|
||||
QueueDriverGetPageResult = Result[seq[ArchiveRow], QueueDriverErrorKind]
|
||||
|
||||
proc `$`(error: QueueDriverErrorKind): string =
|
||||
case error
|
||||
of INVALID_CURSOR: "invalid_cursor"
|
||||
|
||||
### Helpers
|
||||
|
||||
proc walkToCursor(
|
||||
w: SortedSetWalkRef[Index, WakuMessage], startCursor: Index, forward: bool
|
||||
): SortedSetResult[Index, WakuMessage] =
|
||||
## Walk to util we find the cursor
|
||||
## TODO: Improve performance here with a binary/tree search
|
||||
|
||||
var nextItem =
|
||||
if forward:
|
||||
w.first()
|
||||
else:
|
||||
w.last()
|
||||
|
||||
## Fast forward until we reach the startCursor
|
||||
while nextItem.isOk():
|
||||
if nextItem.value.key == startCursor:
|
||||
break
|
||||
|
||||
# Not yet at cursor. Continue advancing
|
||||
nextItem =
|
||||
if forward:
|
||||
w.next()
|
||||
else:
|
||||
w.prev()
|
||||
|
||||
return nextItem
|
||||
|
||||
#### API
|
||||
|
||||
proc new*(T: type QueueDriver, capacity: int = QueueDriverDefaultMaxCapacity): T =
|
||||
var items = SortedSet[Index, WakuMessage].init()
|
||||
return QueueDriver(items: items, capacity: capacity)
|
||||
|
||||
proc contains*(driver: QueueDriver, index: Index): bool =
|
||||
## Return `true` if the store queue already contains the `index`, `false` otherwise.
|
||||
return driver.items.eq(index).isOk()
|
||||
|
||||
proc len*(driver: QueueDriver): int {.noSideEffect.} =
|
||||
return driver.items.len
|
||||
|
||||
proc getPage(
|
||||
driver: QueueDriver,
|
||||
pageSize: uint = 0,
|
||||
forward: bool = true,
|
||||
cursor: Option[Index] = none(Index),
|
||||
predicate: QueryFilterMatcher = nil,
|
||||
): QueueDriverGetPageResult {.raises: [].} =
|
||||
## Populate a single page in forward direction
|
||||
## Start at the `startCursor` (exclusive), or first entry (inclusive) if not defined.
|
||||
## Page size must not exceed `maxPageSize`
|
||||
## Each entry must match the `pred`
|
||||
var outSeq: seq[ArchiveRow]
|
||||
|
||||
var w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
|
||||
defer:
|
||||
w.destroy()
|
||||
|
||||
var currentEntry: SortedSetResult[Index, WakuMessage]
|
||||
|
||||
# Find starting entry
|
||||
if cursor.isSome():
|
||||
let cursorEntry = w.walkToCursor(cursor.get(), forward)
|
||||
if cursorEntry.isErr():
|
||||
return err(QueueDriverErrorKind.INVALID_CURSOR)
|
||||
|
||||
# Advance walker once more
|
||||
currentEntry =
|
||||
if forward:
|
||||
w.next()
|
||||
else:
|
||||
w.prev()
|
||||
else:
|
||||
# Start from the beginning of the queue
|
||||
currentEntry =
|
||||
if forward:
|
||||
w.first()
|
||||
else:
|
||||
w.last()
|
||||
|
||||
trace "Starting page query", currentEntry = currentEntry
|
||||
|
||||
## This loop walks forward over the queue:
|
||||
## 1. from the given cursor (or first/last entry, if not provided)
|
||||
## 2. adds entries matching the predicate function to output page
|
||||
## 3. until either the end of the queue or maxPageSize is reached
|
||||
var numberOfItems: uint = 0
|
||||
while currentEntry.isOk() and numberOfItems < pageSize:
|
||||
trace "Continuing page query",
|
||||
currentEntry = currentEntry, numberOfItems = numberOfItems
|
||||
|
||||
let
|
||||
key = currentEntry.value.key
|
||||
data = currentEntry.value.data
|
||||
|
||||
if predicate.isNil() or predicate(key, data):
|
||||
numberOfItems += 1
|
||||
|
||||
outSeq.add(
|
||||
(key.pubsubTopic, data, @(key.digest.data), key.receiverTime, key.hash)
|
||||
)
|
||||
|
||||
currentEntry =
|
||||
if forward:
|
||||
w.next()
|
||||
else:
|
||||
w.prev()
|
||||
|
||||
trace "Successfully retrieved page", len = outSeq.len
|
||||
|
||||
return ok(outSeq)
|
||||
|
||||
## --- SortedSet accessors ---
|
||||
|
||||
iterator fwdIterator*(driver: QueueDriver): (Index, WakuMessage) =
|
||||
## Forward iterator over the entire store queue
|
||||
var
|
||||
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
|
||||
res = w.first()
|
||||
|
||||
while res.isOk():
|
||||
yield (res.value.key, res.value.data)
|
||||
res = w.next()
|
||||
|
||||
w.destroy()
|
||||
|
||||
iterator bwdIterator*(driver: QueueDriver): (Index, WakuMessage) =
|
||||
## Backwards iterator over the entire store queue
|
||||
var
|
||||
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
|
||||
res = w.last()
|
||||
|
||||
while res.isOk():
|
||||
yield (res.value.key, res.value.data)
|
||||
res = w.prev()
|
||||
|
||||
w.destroy()
|
||||
|
||||
proc first*(driver: QueueDriver): ArchiveDriverResult[Index] =
|
||||
var
|
||||
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
|
||||
res = w.first()
|
||||
w.destroy()
|
||||
|
||||
if res.isErr():
|
||||
return err("Not found")
|
||||
|
||||
return ok(res.value.key)
|
||||
|
||||
proc last*(driver: QueueDriver): ArchiveDriverResult[Index] =
|
||||
var
|
||||
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
|
||||
res = w.last()
|
||||
w.destroy()
|
||||
|
||||
if res.isErr():
|
||||
return err("Not found")
|
||||
|
||||
return ok(res.value.key)
|
||||
|
||||
## --- Queue API ---
|
||||
|
||||
proc add*(
|
||||
driver: QueueDriver, index: Index, msg: WakuMessage
|
||||
): ArchiveDriverResult[void] =
|
||||
## Add a message to the queue
|
||||
##
|
||||
## If we're at capacity, we will be removing, the oldest (first) item
|
||||
if driver.contains(index):
|
||||
trace "could not add item to store queue. Index already exists", index = index
|
||||
return err("duplicate")
|
||||
|
||||
# TODO: the below delete block can be removed if we convert to circular buffer
|
||||
if driver.items.len >= driver.capacity:
|
||||
var
|
||||
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
|
||||
firstItem = w.first
|
||||
|
||||
if cmp(index, firstItem.value.key) < 0:
|
||||
# When at capacity, we won't add if message index is smaller (older) than our oldest item
|
||||
w.destroy # Clean up walker
|
||||
return err("too_old")
|
||||
|
||||
discard driver.items.delete(firstItem.value.key)
|
||||
w.destroy # better to destroy walker after a delete operation
|
||||
|
||||
driver.items.insert(index).value.data = msg
|
||||
|
||||
return ok()
|
||||
|
||||
method put*(
|
||||
driver: QueueDriver,
|
||||
pubsubTopic: PubsubTopic,
|
||||
message: WakuMessage,
|
||||
digest: MessageDigest,
|
||||
messageHash: WakuMessageHash,
|
||||
receivedTime: Timestamp,
|
||||
): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
let index = Index(
|
||||
pubsubTopic: pubsubTopic,
|
||||
senderTime: message.timestamp,
|
||||
receiverTime: receivedTime,
|
||||
digest: digest,
|
||||
hash: messageHash,
|
||||
)
|
||||
|
||||
return driver.add(index, message)
|
||||
|
||||
method getAllMessages*(
|
||||
driver: QueueDriver
|
||||
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
||||
# TODO: Implement this message_store method
|
||||
return err("interface method not implemented")
|
||||
|
||||
method existsTable*(
|
||||
driver: QueueDriver, tableName: string
|
||||
): Future[ArchiveDriverResult[bool]] {.async.} =
|
||||
return err("interface method not implemented")
|
||||
|
||||
method getMessages*(
|
||||
driver: QueueDriver,
|
||||
includeData = true,
|
||||
contentTopic: seq[ContentTopic] = @[],
|
||||
pubsubTopic = none(PubsubTopic),
|
||||
cursor = none(ArchiveCursor),
|
||||
startTime = none(Timestamp),
|
||||
endTime = none(Timestamp),
|
||||
hashes: seq[WakuMessageHash] = @[],
|
||||
maxPageSize = DefaultPageSize,
|
||||
ascendingOrder = true,
|
||||
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
||||
let cursor = cursor.map(toIndex)
|
||||
|
||||
let matchesQuery: QueryFilterMatcher =
|
||||
func (index: Index, msg: WakuMessage): bool =
|
||||
if pubsubTopic.isSome() and index.pubsubTopic != pubsubTopic.get():
|
||||
return false
|
||||
|
||||
if contentTopic.len > 0 and msg.contentTopic notin contentTopic:
|
||||
return false
|
||||
|
||||
if startTime.isSome() and msg.timestamp < startTime.get():
|
||||
return false
|
||||
|
||||
if endTime.isSome() and msg.timestamp > endTime.get():
|
||||
return false
|
||||
|
||||
if hashes.len > 0 and index.hash notin hashes:
|
||||
return false
|
||||
|
||||
return true
|
||||
|
||||
var pageRes: QueueDriverGetPageResult
|
||||
try:
|
||||
pageRes = driver.getPage(maxPageSize, ascendingOrder, cursor, matchesQuery)
|
||||
except CatchableError, Exception:
|
||||
return err(getCurrentExceptionMsg())
|
||||
|
||||
if pageRes.isErr():
|
||||
return err($pageRes.error)
|
||||
|
||||
return ok(pageRes.value)
|
||||
|
||||
method getMessagesCount*(
|
||||
driver: QueueDriver
|
||||
): Future[ArchiveDriverResult[int64]] {.async.} =
|
||||
return ok(int64(driver.len()))
|
||||
|
||||
method getPagesCount*(
|
||||
driver: QueueDriver
|
||||
): Future[ArchiveDriverResult[int64]] {.async.} =
|
||||
return ok(int64(driver.len()))
|
||||
|
||||
method getPagesSize*(
|
||||
driver: QueueDriver
|
||||
): Future[ArchiveDriverResult[int64]] {.async.} =
|
||||
return ok(int64(driver.len()))
|
||||
|
||||
method getDatabaseSize*(
|
||||
driver: QueueDriver
|
||||
): Future[ArchiveDriverResult[int64]] {.async.} =
|
||||
return ok(int64(driver.len()))
|
||||
|
||||
method performVacuum*(
|
||||
driver: QueueDriver
|
||||
): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
return err("interface method not implemented")
|
||||
|
||||
method getOldestMessageTimestamp*(
|
||||
driver: QueueDriver
|
||||
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
||||
return driver.first().map(
|
||||
proc(index: Index): Timestamp =
|
||||
index.receiverTime
|
||||
)
|
||||
|
||||
method getNewestMessageTimestamp*(
|
||||
driver: QueueDriver
|
||||
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
||||
return driver.last().map(
|
||||
proc(index: Index): Timestamp =
|
||||
index.receiverTime
|
||||
)
|
||||
|
||||
method deleteMessagesOlderThanTimestamp*(
|
||||
driver: QueueDriver, ts: Timestamp
|
||||
): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
# TODO: Implement this message_store method
|
||||
return err("interface method not implemented")
|
||||
|
||||
method deleteOldestMessagesNotWithinLimit*(
|
||||
driver: QueueDriver, limit: int
|
||||
): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
# TODO: Implement this message_store method
|
||||
return err("interface method not implemented")
|
||||
|
||||
method decreaseDatabaseSize*(
|
||||
driver: QueueDriver, targetSizeInBytes: int64, forceRemoval: bool = false
|
||||
): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
return err("interface method not implemented")
|
||||
|
||||
method close*(driver: QueueDriver): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
return ok()
|
||||
8
waku/waku_archive_legacy/driver/sqlite_driver.nim
Normal file
8
waku/waku_archive_legacy/driver/sqlite_driver.nim
Normal file
@ -0,0 +1,8 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import ./sqlite_driver/sqlite_driver
|
||||
|
||||
export sqlite_driver
|
||||
@ -1,4 +1,7 @@
|
||||
{.push raises: [].}
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import ../../../waku_core, ../../common
|
||||
|
||||
74
waku/waku_archive_legacy/driver/sqlite_driver/migrations.nim
Normal file
74
waku/waku_archive_legacy/driver/sqlite_driver/migrations.nim
Normal file
@ -0,0 +1,74 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[tables, strutils, os], results, chronicles, sqlite3_abi # sqlite3_column_int64
|
||||
import ../../../common/databases/db_sqlite, ../../../common/databases/common
|
||||
|
||||
logScope:
|
||||
topics = "waku archive migration"
|
||||
|
||||
const SchemaVersion* = 9 # increase this when there is an update in the database schema
|
||||
|
||||
template projectRoot(): string =
|
||||
currentSourcePath.rsplit(DirSep, 1)[0] / ".." / ".." / ".." / ".."
|
||||
|
||||
const MessageStoreMigrationPath: string = projectRoot / "migrations" / "message_store"
|
||||
|
||||
proc isSchemaVersion7*(db: SqliteDatabase): DatabaseResult[bool] =
|
||||
## Temporary proc created to analyse when the table actually belongs to the SchemaVersion 7.
|
||||
##
|
||||
## During many nwaku versions, 0.14.0 until 0.18.0, the SchemaVersion wasn't set or checked.
|
||||
## Docker `nwaku` nodes that start working from these versions, 0.14.0 until 0.18.0, they started
|
||||
## with this discrepancy: `user_version`== 0 (not set) but Message table with SchemaVersion 7.
|
||||
##
|
||||
## We found issues where `user_version` (SchemaVersion) was set to 0 in the database even though
|
||||
## its scheme structure reflected SchemaVersion 7. In those cases, when `nwaku` re-started to
|
||||
## apply the migration scripts (in 0.19.0) the node didn't start properly because it tried to
|
||||
## migrate a database that already had the Schema structure #7, so it failed when changing the PK.
|
||||
##
|
||||
## TODO: This was added in version 0.20.0. We might remove this in version 0.30.0, as we
|
||||
## could consider that many users use +0.20.0.
|
||||
|
||||
var pkColumns = newSeq[string]()
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
let colName = cstring sqlite3_column_text(s, 0)
|
||||
pkColumns.add($colName)
|
||||
|
||||
let query =
|
||||
"""SELECT l.name FROM pragma_table_info("Message") as l WHERE l.pk != 0;"""
|
||||
let res = db.query(query, queryRowCallback)
|
||||
if res.isErr():
|
||||
return err("failed to determine the current SchemaVersion: " & $res.error)
|
||||
|
||||
if pkColumns == @["pubsubTopic", "id", "storedAt"]:
|
||||
return ok(true)
|
||||
else:
|
||||
info "Not considered schema version 7"
|
||||
return ok(false)
|
||||
|
||||
proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult[void] =
|
||||
## Compares the `user_version` of the sqlite database with the provided `targetVersion`, then
|
||||
## it runs migration scripts if the `user_version` is outdated. The `migrationScriptsDir` path
|
||||
## points to the directory holding the migrations scripts once the db is updated, it sets the
|
||||
## `user_version` to the `tragetVersion`.
|
||||
##
|
||||
## If not `targetVersion` is provided, it defaults to `SchemaVersion`.
|
||||
##
|
||||
## NOTE: Down migration it is not currently supported
|
||||
debug "starting message store's sqlite database migration"
|
||||
|
||||
let userVersion = ?db.getUserVersion()
|
||||
let isSchemaVersion7 = ?db.isSchemaVersion7()
|
||||
|
||||
if userVersion == 0'i64 and isSchemaVersion7:
|
||||
info "We found user_version 0 but the database schema reflects the user_version 7"
|
||||
## Force the correct schema version
|
||||
?db.setUserVersion(7)
|
||||
|
||||
let migrationRes =
|
||||
migrate(db, targetVersion, migrationsScriptsDir = MessageStoreMigrationPath)
|
||||
if migrationRes.isErr():
|
||||
return err("failed to execute migration scripts: " & migrationRes.error)
|
||||
|
||||
debug "finished message store's sqlite database migration"
|
||||
return ok()
|
||||
744
waku/waku_archive_legacy/driver/sqlite_driver/queries.nim
Normal file
744
waku/waku_archive_legacy/driver/sqlite_driver/queries.nim
Normal file
@ -0,0 +1,744 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, sequtils], stew/byteutils, sqlite3_abi, results
|
||||
import
|
||||
../../../common/databases/db_sqlite,
|
||||
../../../common/databases/common,
|
||||
../../../waku_core,
|
||||
./cursor
|
||||
|
||||
const DbTable = "Message"
|
||||
|
||||
type SqlQueryStr = string
|
||||
|
||||
### SQLite column helper methods
|
||||
|
||||
proc queryRowWakuMessageCallback(
|
||||
s: ptr sqlite3_stmt,
|
||||
contentTopicCol, payloadCol, versionCol, senderTimestampCol, metaCol: cint,
|
||||
): WakuMessage =
|
||||
let
|
||||
topic = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, contentTopicCol))
|
||||
topicLength = sqlite3_column_bytes(s, contentTopicCol)
|
||||
contentTopic = string.fromBytes(@(toOpenArray(topic, 0, topicLength - 1)))
|
||||
|
||||
p = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, payloadCol))
|
||||
m = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, metaCol))
|
||||
|
||||
payloadLength = sqlite3_column_bytes(s, payloadCol)
|
||||
metaLength = sqlite3_column_bytes(s, metaCol)
|
||||
payload = @(toOpenArray(p, 0, payloadLength - 1))
|
||||
version = sqlite3_column_int64(s, versionCol)
|
||||
senderTimestamp = sqlite3_column_int64(s, senderTimestampCol)
|
||||
meta = @(toOpenArray(m, 0, metaLength - 1))
|
||||
|
||||
return WakuMessage(
|
||||
contentTopic: ContentTopic(contentTopic),
|
||||
payload: payload,
|
||||
version: uint32(version),
|
||||
timestamp: Timestamp(senderTimestamp),
|
||||
meta: meta,
|
||||
)
|
||||
|
||||
proc queryRowReceiverTimestampCallback(
|
||||
s: ptr sqlite3_stmt, storedAtCol: cint
|
||||
): Timestamp =
|
||||
let storedAt = sqlite3_column_int64(s, storedAtCol)
|
||||
return Timestamp(storedAt)
|
||||
|
||||
proc queryRowPubsubTopicCallback(
|
||||
s: ptr sqlite3_stmt, pubsubTopicCol: cint
|
||||
): PubsubTopic =
|
||||
let
|
||||
pubsubTopicPointer =
|
||||
cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, pubsubTopicCol))
|
||||
pubsubTopicLength = sqlite3_column_bytes(s, pubsubTopicCol)
|
||||
pubsubTopic =
|
||||
string.fromBytes(@(toOpenArray(pubsubTopicPointer, 0, pubsubTopicLength - 1)))
|
||||
|
||||
return pubsubTopic
|
||||
|
||||
proc queryRowDigestCallback(s: ptr sqlite3_stmt, digestCol: cint): seq[byte] =
|
||||
let
|
||||
digestPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, digestCol))
|
||||
digestLength = sqlite3_column_bytes(s, digestCol)
|
||||
digest = @(toOpenArray(digestPointer, 0, digestLength - 1))
|
||||
|
||||
return digest
|
||||
|
||||
proc queryRowWakuMessageHashCallback(
|
||||
s: ptr sqlite3_stmt, hashCol: cint
|
||||
): WakuMessageHash =
|
||||
let
|
||||
hashPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, hashCol))
|
||||
hashLength = sqlite3_column_bytes(s, hashCol)
|
||||
hash = fromBytes(toOpenArray(hashPointer, 0, hashLength - 1))
|
||||
|
||||
return hash
|
||||
|
||||
### SQLite queries
|
||||
|
||||
## Create table
|
||||
|
||||
proc createTableQuery(table: string): SqlQueryStr =
|
||||
"CREATE TABLE IF NOT EXISTS " & table & " (" & " pubsubTopic BLOB NOT NULL," &
|
||||
" contentTopic BLOB NOT NULL," & " payload BLOB," & " version INTEGER NOT NULL," &
|
||||
" timestamp INTEGER NOT NULL," & " id BLOB," & " messageHash BLOB," &
|
||||
" storedAt INTEGER NOT NULL," & " meta BLOB," &
|
||||
" CONSTRAINT messageIndex PRIMARY KEY (messageHash)" & ") WITHOUT ROWID;"
|
||||
|
||||
proc createTable*(db: SqliteDatabase): DatabaseResult[void] =
|
||||
let query = createTableQuery(DbTable)
|
||||
discard
|
||||
?db.query(
|
||||
query,
|
||||
proc(s: ptr sqlite3_stmt) =
|
||||
discard
|
||||
,
|
||||
)
|
||||
return ok()
|
||||
|
||||
## Create indices
|
||||
|
||||
proc createOldestMessageTimestampIndexQuery(table: string): SqlQueryStr =
|
||||
"CREATE INDEX IF NOT EXISTS i_ts ON " & table & " (storedAt);"
|
||||
|
||||
proc createOldestMessageTimestampIndex*(db: SqliteDatabase): DatabaseResult[void] =
|
||||
let query = createOldestMessageTimestampIndexQuery(DbTable)
|
||||
discard
|
||||
?db.query(
|
||||
query,
|
||||
proc(s: ptr sqlite3_stmt) =
|
||||
discard
|
||||
,
|
||||
)
|
||||
return ok()
|
||||
|
||||
proc createHistoryQueryIndexQuery(table: string): SqlQueryStr =
|
||||
"CREATE INDEX IF NOT EXISTS i_query ON " & table &
|
||||
" (contentTopic, pubsubTopic, storedAt, id);"
|
||||
|
||||
proc createHistoryQueryIndex*(db: SqliteDatabase): DatabaseResult[void] =
|
||||
let query = createHistoryQueryIndexQuery(DbTable)
|
||||
discard
|
||||
?db.query(
|
||||
query,
|
||||
proc(s: ptr sqlite3_stmt) =
|
||||
discard
|
||||
,
|
||||
)
|
||||
return ok()
|
||||
|
||||
## Insert message
|
||||
type InsertMessageParams* = (
|
||||
seq[byte],
|
||||
seq[byte],
|
||||
Timestamp,
|
||||
seq[byte],
|
||||
seq[byte],
|
||||
seq[byte],
|
||||
int64,
|
||||
Timestamp,
|
||||
seq[byte],
|
||||
)
|
||||
|
||||
proc insertMessageQuery(table: string): SqlQueryStr =
|
||||
return
|
||||
"INSERT INTO " & table &
|
||||
"(id, messageHash, storedAt, contentTopic, payload, pubsubTopic, version, timestamp, meta)" &
|
||||
" VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);"
|
||||
|
||||
proc prepareInsertMessageStmt*(
|
||||
db: SqliteDatabase
|
||||
): SqliteStmt[InsertMessageParams, void] =
|
||||
let query = insertMessageQuery(DbTable)
|
||||
return
|
||||
db.prepareStmt(query, InsertMessageParams, void).expect("this is a valid statement")
|
||||
|
||||
## Count table messages
|
||||
|
||||
proc countMessagesQuery(table: string): SqlQueryStr =
|
||||
return "SELECT COUNT(*) FROM " & table
|
||||
|
||||
proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] =
|
||||
var count: int64
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
count = sqlite3_column_int64(s, 0)
|
||||
|
||||
let query = countMessagesQuery(DbTable)
|
||||
let res = db.query(query, queryRowCallback)
|
||||
if res.isErr():
|
||||
return err("failed to count number of messages in the database")
|
||||
|
||||
return ok(count)
|
||||
|
||||
## Get oldest message receiver timestamp
|
||||
|
||||
proc selectOldestMessageTimestampQuery(table: string): SqlQueryStr =
|
||||
return "SELECT MIN(storedAt) FROM " & table
|
||||
|
||||
proc selectOldestReceiverTimestamp*(
|
||||
db: SqliteDatabase
|
||||
): DatabaseResult[Timestamp] {.inline.} =
|
||||
var timestamp: Timestamp
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
timestamp = queryRowReceiverTimestampCallback(s, 0)
|
||||
|
||||
let query = selectOldestMessageTimestampQuery(DbTable)
|
||||
let res = db.query(query, queryRowCallback)
|
||||
if res.isErr():
|
||||
return err("failed to get the oldest receiver timestamp from the database")
|
||||
|
||||
return ok(timestamp)
|
||||
|
||||
## Get newest message receiver timestamp
|
||||
|
||||
proc selectNewestMessageTimestampQuery(table: string): SqlQueryStr =
|
||||
return "SELECT MAX(storedAt) FROM " & table
|
||||
|
||||
proc selectNewestReceiverTimestamp*(
|
||||
db: SqliteDatabase
|
||||
): DatabaseResult[Timestamp] {.inline.} =
|
||||
var timestamp: Timestamp
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
timestamp = queryRowReceiverTimestampCallback(s, 0)
|
||||
|
||||
let query = selectNewestMessageTimestampQuery(DbTable)
|
||||
let res = db.query(query, queryRowCallback)
|
||||
if res.isErr():
|
||||
return err("failed to get the newest receiver timestamp from the database")
|
||||
|
||||
return ok(timestamp)
|
||||
|
||||
## Delete messages older than timestamp
|
||||
|
||||
proc deleteMessagesOlderThanTimestampQuery(table: string, ts: Timestamp): SqlQueryStr =
|
||||
return "DELETE FROM " & table & " WHERE storedAt < " & $ts
|
||||
|
||||
proc deleteMessagesOlderThanTimestamp*(
|
||||
db: SqliteDatabase, ts: int64
|
||||
): DatabaseResult[void] =
|
||||
let query = deleteMessagesOlderThanTimestampQuery(DbTable, ts)
|
||||
discard
|
||||
?db.query(
|
||||
query,
|
||||
proc(s: ptr sqlite3_stmt) =
|
||||
discard
|
||||
,
|
||||
)
|
||||
return ok()
|
||||
|
||||
## Delete oldest messages not within limit
|
||||
|
||||
proc deleteOldestMessagesNotWithinLimitQuery(table: string, limit: int): SqlQueryStr =
|
||||
return
|
||||
"DELETE FROM " & table & " WHERE (storedAt, id, pubsubTopic) NOT IN (" &
|
||||
" SELECT storedAt, id, pubsubTopic FROM " & table &
|
||||
" ORDER BY storedAt DESC, id DESC" & " LIMIT " & $limit & ");"
|
||||
|
||||
proc deleteOldestMessagesNotWithinLimit*(
|
||||
db: SqliteDatabase, limit: int
|
||||
): DatabaseResult[void] =
|
||||
# NOTE: The word `limit` here refers the store capacity/maximum number-of-messages allowed limit
|
||||
let query = deleteOldestMessagesNotWithinLimitQuery(DbTable, limit = limit)
|
||||
discard
|
||||
?db.query(
|
||||
query,
|
||||
proc(s: ptr sqlite3_stmt) =
|
||||
discard
|
||||
,
|
||||
)
|
||||
return ok()
|
||||
|
||||
## Select all messages
|
||||
|
||||
proc selectAllMessagesQuery(table: string): SqlQueryStr =
|
||||
return
|
||||
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta" &
|
||||
" FROM " & table & " ORDER BY storedAt ASC"
|
||||
|
||||
proc selectAllMessages*(
|
||||
db: SqliteDatabase
|
||||
): DatabaseResult[
|
||||
seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
|
||||
] {.gcsafe.} =
|
||||
## Retrieve all messages from the store.
|
||||
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
let
|
||||
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3)
|
||||
wakuMessage = queryRowWakuMessageCallback(
|
||||
s,
|
||||
contentTopicCol = 1,
|
||||
payloadCol = 2,
|
||||
versionCol = 4,
|
||||
senderTimestampCol = 5,
|
||||
metaCol = 8,
|
||||
)
|
||||
digest = queryRowDigestCallback(s, digestCol = 6)
|
||||
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0)
|
||||
hash = queryRowWakuMessageHashCallback(s, hashCol = 7)
|
||||
|
||||
rows.add((pubsubTopic, wakuMessage, digest, storedAt, hash))
|
||||
|
||||
let query = selectAllMessagesQuery(DbTable)
|
||||
let res = db.query(query, queryRowCallback)
|
||||
if res.isErr():
|
||||
return err(res.error())
|
||||
|
||||
return ok(rows)
|
||||
|
||||
## Select messages by history query with limit
|
||||
|
||||
proc combineClauses(clauses: varargs[Option[string]]): Option[string] =
|
||||
let whereSeq = @clauses.filterIt(it.isSome()).mapIt(it.get())
|
||||
if whereSeq.len <= 0:
|
||||
return none(string)
|
||||
|
||||
var where: string = whereSeq[0]
|
||||
for clause in whereSeq[1 ..^ 1]:
|
||||
where &= " AND " & clause
|
||||
return some(where)
|
||||
|
||||
proc whereClausev2(
|
||||
cursor: bool,
|
||||
pubsubTopic: Option[PubsubTopic],
|
||||
contentTopic: seq[ContentTopic],
|
||||
startTime: Option[Timestamp],
|
||||
endTime: Option[Timestamp],
|
||||
ascending: bool,
|
||||
): Option[string] {.deprecated.} =
|
||||
let cursorClause =
|
||||
if cursor:
|
||||
let comp = if ascending: ">" else: "<"
|
||||
|
||||
some("(storedAt, id) " & comp & " (?, ?)")
|
||||
else:
|
||||
none(string)
|
||||
|
||||
let pubsubTopicClause =
|
||||
if pubsubTopic.isNone():
|
||||
none(string)
|
||||
else:
|
||||
some("pubsubTopic = (?)")
|
||||
|
||||
let contentTopicClause =
|
||||
if contentTopic.len <= 0:
|
||||
none(string)
|
||||
else:
|
||||
var where = "contentTopic IN ("
|
||||
where &= "?"
|
||||
for _ in 1 ..< contentTopic.len:
|
||||
where &= ", ?"
|
||||
where &= ")"
|
||||
some(where)
|
||||
|
||||
let startTimeClause =
|
||||
if startTime.isNone():
|
||||
none(string)
|
||||
else:
|
||||
some("storedAt >= (?)")
|
||||
|
||||
let endTimeClause =
|
||||
if endTime.isNone():
|
||||
none(string)
|
||||
else:
|
||||
some("storedAt <= (?)")
|
||||
|
||||
return combineClauses(
|
||||
cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause
|
||||
)
|
||||
|
||||
proc selectMessagesWithLimitQueryv2(
|
||||
table: string, where: Option[string], limit: uint, ascending = true, v3 = false
|
||||
): SqlQueryStr {.deprecated.} =
|
||||
let order = if ascending: "ASC" else: "DESC"
|
||||
|
||||
var query: string
|
||||
|
||||
query =
|
||||
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta"
|
||||
query &= " FROM " & table
|
||||
|
||||
if where.isSome():
|
||||
query &= " WHERE " & where.get()
|
||||
|
||||
query &= " ORDER BY storedAt " & order & ", id " & order
|
||||
|
||||
query &= " LIMIT " & $limit & ";"
|
||||
|
||||
return query
|
||||
|
||||
proc prepareStmt(
|
||||
db: SqliteDatabase, stmt: string
|
||||
): DatabaseResult[SqliteStmt[void, void]] =
|
||||
var s: RawStmtPtr
|
||||
checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil)
|
||||
return ok(SqliteStmt[void, void](s))
|
||||
|
||||
proc execSelectMessagesV2WithLimitStmt(
|
||||
s: SqliteStmt,
|
||||
cursor: Option[DbCursor],
|
||||
pubsubTopic: Option[PubsubTopic],
|
||||
contentTopic: seq[ContentTopic],
|
||||
startTime: Option[Timestamp],
|
||||
endTime: Option[Timestamp],
|
||||
onRowCallback: DataProc,
|
||||
): DatabaseResult[void] {.deprecated.} =
|
||||
let s = RawStmtPtr(s)
|
||||
|
||||
# Bind params
|
||||
var paramIndex = 1
|
||||
|
||||
if cursor.isSome():
|
||||
let (storedAt, id, _) = cursor.get()
|
||||
checkErr bindParam(s, paramIndex, storedAt)
|
||||
paramIndex += 1
|
||||
checkErr bindParam(s, paramIndex, id)
|
||||
paramIndex += 1
|
||||
|
||||
if pubsubTopic.isSome():
|
||||
let pubsubTopic = toBytes(pubsubTopic.get())
|
||||
checkErr bindParam(s, paramIndex, pubsubTopic)
|
||||
paramIndex += 1
|
||||
|
||||
for topic in contentTopic:
|
||||
checkErr bindParam(s, paramIndex, topic.toBytes())
|
||||
paramIndex += 1
|
||||
|
||||
if startTime.isSome():
|
||||
let time = startTime.get()
|
||||
checkErr bindParam(s, paramIndex, time)
|
||||
paramIndex += 1
|
||||
|
||||
if endTime.isSome():
|
||||
let time = endTime.get()
|
||||
checkErr bindParam(s, paramIndex, time)
|
||||
paramIndex += 1
|
||||
|
||||
try:
|
||||
while true:
|
||||
let v = sqlite3_step(s)
|
||||
case v
|
||||
of SQLITE_ROW:
|
||||
onRowCallback(s)
|
||||
of SQLITE_DONE:
|
||||
return ok()
|
||||
else:
|
||||
return err($sqlite3_errstr(v))
|
||||
except Exception, CatchableError:
|
||||
# release implicit transaction
|
||||
discard sqlite3_reset(s) # same return information as step
|
||||
discard sqlite3_clear_bindings(s) # no errors possible
|
||||
|
||||
proc selectMessagesByHistoryQueryWithLimit*(
|
||||
db: SqliteDatabase,
|
||||
contentTopic: seq[ContentTopic],
|
||||
pubsubTopic: Option[PubsubTopic],
|
||||
cursor: Option[DbCursor],
|
||||
startTime: Option[Timestamp],
|
||||
endTime: Option[Timestamp],
|
||||
limit: uint,
|
||||
ascending: bool,
|
||||
): DatabaseResult[
|
||||
seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
|
||||
] {.deprecated.} =
|
||||
var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] =
|
||||
@[]
|
||||
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
let
|
||||
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3)
|
||||
message = queryRowWakuMessageCallback(
|
||||
s,
|
||||
contentTopicCol = 1,
|
||||
payloadCol = 2,
|
||||
versionCol = 4,
|
||||
senderTimestampCol = 5,
|
||||
metaCol = 8,
|
||||
)
|
||||
digest = queryRowDigestCallback(s, digestCol = 6)
|
||||
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0)
|
||||
hash = queryRowWakuMessageHashCallback(s, hashCol = 7)
|
||||
|
||||
messages.add((pubsubTopic, message, digest, storedAt, hash))
|
||||
|
||||
let query = block:
|
||||
let where = whereClausev2(
|
||||
cursor.isSome(), pubsubTopic, contentTopic, startTime, endTime, ascending
|
||||
)
|
||||
|
||||
selectMessagesWithLimitQueryv2(DbTable, where, limit, ascending)
|
||||
|
||||
let dbStmt = ?db.prepareStmt(query)
|
||||
?dbStmt.execSelectMessagesV2WithLimitStmt(
|
||||
cursor, pubsubTopic, contentTopic, startTime, endTime, queryRowCallback
|
||||
)
|
||||
dbStmt.dispose()
|
||||
|
||||
return ok(messages)
|
||||
|
||||
### Store v3 ###
|
||||
|
||||
proc execSelectMessageByHash(
|
||||
s: SqliteStmt, hash: WakuMessageHash, onRowCallback: DataProc
|
||||
): DatabaseResult[void] =
|
||||
let s = RawStmtPtr(s)
|
||||
|
||||
checkErr bindParam(s, 1, toSeq(hash))
|
||||
|
||||
try:
|
||||
while true:
|
||||
let v = sqlite3_step(s)
|
||||
case v
|
||||
of SQLITE_ROW:
|
||||
onRowCallback(s)
|
||||
of SQLITE_DONE:
|
||||
return ok()
|
||||
else:
|
||||
return err($sqlite3_errstr(v))
|
||||
except Exception, CatchableError:
|
||||
# release implicit transaction
|
||||
discard sqlite3_reset(s) # same return information as step
|
||||
discard sqlite3_clear_bindings(s) # no errors possible
|
||||
|
||||
proc selectMessageByHashQuery(): SqlQueryStr =
|
||||
var query: string
|
||||
|
||||
query = "SELECT contentTopic, payload, version, timestamp, meta, messageHash"
|
||||
query &= " FROM " & DbTable
|
||||
query &= " WHERE messageHash = (?)"
|
||||
|
||||
return query
|
||||
|
||||
proc whereClause(
|
||||
cursor: bool,
|
||||
pubsubTopic: Option[PubsubTopic],
|
||||
contentTopic: seq[ContentTopic],
|
||||
startTime: Option[Timestamp],
|
||||
endTime: Option[Timestamp],
|
||||
hashes: seq[WakuMessageHash],
|
||||
ascending: bool,
|
||||
): Option[string] =
|
||||
let cursorClause =
|
||||
if cursor:
|
||||
let comp = if ascending: ">" else: "<"
|
||||
|
||||
some("(timestamp, messageHash) " & comp & " (?, ?)")
|
||||
else:
|
||||
none(string)
|
||||
|
||||
let pubsubTopicClause =
|
||||
if pubsubTopic.isNone():
|
||||
none(string)
|
||||
else:
|
||||
some("pubsubTopic = (?)")
|
||||
|
||||
let contentTopicClause =
|
||||
if contentTopic.len <= 0:
|
||||
none(string)
|
||||
else:
|
||||
var where = "contentTopic IN ("
|
||||
where &= "?"
|
||||
for _ in 1 ..< contentTopic.len:
|
||||
where &= ", ?"
|
||||
where &= ")"
|
||||
some(where)
|
||||
|
||||
let startTimeClause =
|
||||
if startTime.isNone():
|
||||
none(string)
|
||||
else:
|
||||
some("storedAt >= (?)")
|
||||
|
||||
let endTimeClause =
|
||||
if endTime.isNone():
|
||||
none(string)
|
||||
else:
|
||||
some("storedAt <= (?)")
|
||||
|
||||
let hashesClause =
|
||||
if hashes.len <= 0:
|
||||
none(string)
|
||||
else:
|
||||
var where = "messageHash IN ("
|
||||
where &= "?"
|
||||
for _ in 1 ..< hashes.len:
|
||||
where &= ", ?"
|
||||
where &= ")"
|
||||
some(where)
|
||||
|
||||
return combineClauses(
|
||||
cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause,
|
||||
hashesClause,
|
||||
)
|
||||
|
||||
proc execSelectMessagesWithLimitStmt(
|
||||
s: SqliteStmt,
|
||||
cursor: Option[(Timestamp, WakuMessageHash)],
|
||||
pubsubTopic: Option[PubsubTopic],
|
||||
contentTopic: seq[ContentTopic],
|
||||
startTime: Option[Timestamp],
|
||||
endTime: Option[Timestamp],
|
||||
hashes: seq[WakuMessageHash],
|
||||
onRowCallback: DataProc,
|
||||
): DatabaseResult[void] =
|
||||
let s = RawStmtPtr(s)
|
||||
|
||||
# Bind params
|
||||
var paramIndex = 1
|
||||
|
||||
if cursor.isSome():
|
||||
let (time, hash) = cursor.get()
|
||||
checkErr bindParam(s, paramIndex, time)
|
||||
paramIndex += 1
|
||||
checkErr bindParam(s, paramIndex, toSeq(hash))
|
||||
paramIndex += 1
|
||||
|
||||
if pubsubTopic.isSome():
|
||||
let pubsubTopic = toBytes(pubsubTopic.get())
|
||||
checkErr bindParam(s, paramIndex, pubsubTopic)
|
||||
paramIndex += 1
|
||||
|
||||
for topic in contentTopic:
|
||||
checkErr bindParam(s, paramIndex, topic.toBytes())
|
||||
paramIndex += 1
|
||||
|
||||
for hash in hashes:
|
||||
checkErr bindParam(s, paramIndex, toSeq(hash))
|
||||
paramIndex += 1
|
||||
|
||||
if startTime.isSome():
|
||||
let time = startTime.get()
|
||||
checkErr bindParam(s, paramIndex, time)
|
||||
paramIndex += 1
|
||||
|
||||
if endTime.isSome():
|
||||
let time = endTime.get()
|
||||
checkErr bindParam(s, paramIndex, time)
|
||||
paramIndex += 1
|
||||
|
||||
try:
|
||||
while true:
|
||||
let v = sqlite3_step(s)
|
||||
case v
|
||||
of SQLITE_ROW:
|
||||
onRowCallback(s)
|
||||
of SQLITE_DONE:
|
||||
return ok()
|
||||
else:
|
||||
return err($sqlite3_errstr(v))
|
||||
except Exception, CatchableError:
|
||||
# release implicit transaction
|
||||
discard sqlite3_reset(s) # same return information as step
|
||||
discard sqlite3_clear_bindings(s) # no errors possible
|
||||
|
||||
proc selectMessagesWithLimitQuery(
|
||||
table: string, where: Option[string], limit: uint, ascending = true, v3 = false
|
||||
): SqlQueryStr =
|
||||
let order = if ascending: "ASC" else: "DESC"
|
||||
|
||||
var query: string
|
||||
|
||||
query =
|
||||
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta"
|
||||
query &= " FROM " & table
|
||||
|
||||
if where.isSome():
|
||||
query &= " WHERE " & where.get()
|
||||
|
||||
query &= " ORDER BY storedAt " & order & ", messageHash " & order
|
||||
|
||||
query &= " LIMIT " & $limit & ";"
|
||||
|
||||
return query
|
||||
|
||||
proc selectMessagesByStoreQueryWithLimit*(
|
||||
db: SqliteDatabase,
|
||||
contentTopic: seq[ContentTopic],
|
||||
pubsubTopic: Option[PubsubTopic],
|
||||
cursor: Option[WakuMessageHash],
|
||||
startTime: Option[Timestamp],
|
||||
endTime: Option[Timestamp],
|
||||
hashes: seq[WakuMessageHash],
|
||||
limit: uint,
|
||||
ascending: bool,
|
||||
): DatabaseResult[
|
||||
seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
|
||||
] =
|
||||
# Must first get the message timestamp before paginating by time
|
||||
let newCursor =
|
||||
if cursor.isSome() and cursor.get() != EmptyWakuMessageHash:
|
||||
let hash: WakuMessageHash = cursor.get()
|
||||
|
||||
var wakuMessage: Option[WakuMessage]
|
||||
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
wakuMessage = some(
|
||||
queryRowWakuMessageCallback(
|
||||
s,
|
||||
contentTopicCol = 0,
|
||||
payloadCol = 1,
|
||||
versionCol = 2,
|
||||
senderTimestampCol = 3,
|
||||
metaCol = 4,
|
||||
)
|
||||
)
|
||||
|
||||
let query = selectMessageByHashQuery()
|
||||
let dbStmt = ?db.prepareStmt(query)
|
||||
?dbStmt.execSelectMessageByHash(hash, queryRowCallback)
|
||||
dbStmt.dispose()
|
||||
|
||||
if wakuMessage.isSome():
|
||||
let time = wakuMessage.get().timestamp
|
||||
|
||||
some((time, hash))
|
||||
else:
|
||||
return err("cursor not found")
|
||||
else:
|
||||
none((Timestamp, WakuMessageHash))
|
||||
|
||||
var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] =
|
||||
@[]
|
||||
|
||||
proc queryRowCallback(s: ptr sqlite3_stmt) =
|
||||
let
|
||||
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3)
|
||||
message = queryRowWakuMessageCallback(
|
||||
s,
|
||||
contentTopicCol = 1,
|
||||
payloadCol = 2,
|
||||
versionCol = 4,
|
||||
senderTimestampCol = 5,
|
||||
metaCol = 8,
|
||||
)
|
||||
digest = queryRowDigestCallback(s, digestCol = 6)
|
||||
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0)
|
||||
hash = queryRowWakuMessageHashCallback(s, hashCol = 7)
|
||||
|
||||
messages.add((pubsubTopic, message, digest, storedAt, hash))
|
||||
|
||||
let query = block:
|
||||
let where = whereClause(
|
||||
newCursor.isSome(),
|
||||
pubsubTopic,
|
||||
contentTopic,
|
||||
startTime,
|
||||
endTime,
|
||||
hashes,
|
||||
ascending,
|
||||
)
|
||||
|
||||
selectMessagesWithLimitQuery(DbTable, where, limit, ascending, true)
|
||||
|
||||
let dbStmt = ?db.prepareStmt(query)
|
||||
?dbStmt.execSelectMessagesWithLimitStmt(
|
||||
newCursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback
|
||||
)
|
||||
dbStmt.dispose()
|
||||
|
||||
return ok(messages)
|
||||
225
waku/waku_archive_legacy/driver/sqlite_driver/sqlite_driver.nim
Normal file
225
waku/waku_archive_legacy/driver/sqlite_driver/sqlite_driver.nim
Normal file
@ -0,0 +1,225 @@
|
||||
# The code in this file is an adaptation of the Sqlite KV Store found in nim-eth.
|
||||
# https://github.com/status-im/nim-eth/blob/master/eth/db/kvstore_sqlite3.nim
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/options, stew/byteutils, chronicles, chronos, results
|
||||
import
|
||||
../../../common/databases/db_sqlite,
|
||||
../../../waku_core,
|
||||
../../../waku_core/message/digest,
|
||||
../../common,
|
||||
../../driver,
|
||||
./cursor,
|
||||
./queries
|
||||
|
||||
logScope:
|
||||
topics = "waku archive sqlite"
|
||||
|
||||
proc init(db: SqliteDatabase): ArchiveDriverResult[void] =
|
||||
## Misconfiguration can lead to nil DB
|
||||
if db.isNil():
|
||||
return err("db not initialized")
|
||||
|
||||
# Create table, if doesn't exist
|
||||
let resCreate = createTable(db)
|
||||
if resCreate.isErr():
|
||||
return err("failed to create table: " & resCreate.error())
|
||||
|
||||
# Create indices, if don't exist
|
||||
let resRtIndex = createOldestMessageTimestampIndex(db)
|
||||
if resRtIndex.isErr():
|
||||
return err("failed to create i_rt index: " & resRtIndex.error())
|
||||
|
||||
let resMsgIndex = createHistoryQueryIndex(db)
|
||||
if resMsgIndex.isErr():
|
||||
return err("failed to create i_query index: " & resMsgIndex.error())
|
||||
|
||||
return ok()
|
||||
|
||||
type SqliteDriver* = ref object of ArchiveDriver
|
||||
db: SqliteDatabase
|
||||
insertStmt: SqliteStmt[InsertMessageParams, void]
|
||||
|
||||
proc new*(T: type SqliteDriver, db: SqliteDatabase): ArchiveDriverResult[T] =
|
||||
# Database initialization
|
||||
let resInit = init(db)
|
||||
if resInit.isErr():
|
||||
return err(resInit.error())
|
||||
|
||||
# General initialization
|
||||
let insertStmt = db.prepareInsertMessageStmt()
|
||||
return ok(SqliteDriver(db: db, insertStmt: insertStmt))
|
||||
|
||||
method put*(
|
||||
s: SqliteDriver,
|
||||
pubsubTopic: PubsubTopic,
|
||||
message: WakuMessage,
|
||||
digest: MessageDigest,
|
||||
messageHash: WakuMessageHash,
|
||||
receivedTime: Timestamp,
|
||||
): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
## Inserts a message into the store
|
||||
let res = s.insertStmt.exec(
|
||||
(
|
||||
@(digest.data), # id
|
||||
@(messageHash), # messageHash
|
||||
receivedTime, # storedAt
|
||||
toBytes(message.contentTopic), # contentTopic
|
||||
message.payload, # payload
|
||||
toBytes(pubsubTopic), # pubsubTopic
|
||||
int64(message.version), # version
|
||||
message.timestamp, # senderTimestamp
|
||||
message.meta, # meta
|
||||
)
|
||||
)
|
||||
|
||||
return res
|
||||
|
||||
method getAllMessages*(
|
||||
s: SqliteDriver
|
||||
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
||||
## Retrieve all messages from the store.
|
||||
return s.db.selectAllMessages()
|
||||
|
||||
method getMessagesV2*(
|
||||
s: SqliteDriver,
|
||||
contentTopic = newSeq[ContentTopic](0),
|
||||
pubsubTopic = none(PubsubTopic),
|
||||
cursor = none(ArchiveCursor),
|
||||
startTime = none(Timestamp),
|
||||
endTime = none(Timestamp),
|
||||
maxPageSize = DefaultPageSize,
|
||||
ascendingOrder = true,
|
||||
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} =
|
||||
echo "here"
|
||||
|
||||
let cursor = cursor.map(toDbCursor)
|
||||
|
||||
let rowsRes = s.db.selectMessagesByHistoryQueryWithLimit(
|
||||
contentTopic,
|
||||
pubsubTopic,
|
||||
cursor,
|
||||
startTime,
|
||||
endTime,
|
||||
limit = maxPageSize,
|
||||
ascending = ascendingOrder,
|
||||
)
|
||||
|
||||
return rowsRes
|
||||
|
||||
method getMessages*(
|
||||
s: SqliteDriver,
|
||||
includeData = true,
|
||||
contentTopic = newSeq[ContentTopic](0),
|
||||
pubsubTopic = none(PubsubTopic),
|
||||
cursor = none(ArchiveCursor),
|
||||
startTime = none(Timestamp),
|
||||
endTime = none(Timestamp),
|
||||
hashes = newSeq[WakuMessageHash](0),
|
||||
maxPageSize = DefaultPageSize,
|
||||
ascendingOrder = true,
|
||||
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
|
||||
let cursor =
|
||||
if cursor.isSome():
|
||||
some(cursor.get().hash)
|
||||
else:
|
||||
none(WakuMessageHash)
|
||||
|
||||
let rowsRes = s.db.selectMessagesByStoreQueryWithLimit(
|
||||
contentTopic,
|
||||
pubsubTopic,
|
||||
cursor,
|
||||
startTime,
|
||||
endTime,
|
||||
hashes,
|
||||
limit = maxPageSize,
|
||||
ascending = ascendingOrder,
|
||||
)
|
||||
|
||||
return rowsRes
|
||||
|
||||
method getMessagesCount*(
|
||||
s: SqliteDriver
|
||||
): Future[ArchiveDriverResult[int64]] {.async.} =
|
||||
return s.db.getMessageCount()
|
||||
|
||||
method getPagesCount*(s: SqliteDriver): Future[ArchiveDriverResult[int64]] {.async.} =
|
||||
return s.db.getPageCount()
|
||||
|
||||
method getPagesSize*(s: SqliteDriver): Future[ArchiveDriverResult[int64]] {.async.} =
|
||||
return s.db.getPageSize()
|
||||
|
||||
method getDatabaseSize*(s: SqliteDriver): Future[ArchiveDriverResult[int64]] {.async.} =
|
||||
return s.db.getDatabaseSize()
|
||||
|
||||
method performVacuum*(s: SqliteDriver): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
return s.db.performSqliteVacuum()
|
||||
|
||||
method getOldestMessageTimestamp*(
|
||||
s: SqliteDriver
|
||||
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
||||
return s.db.selectOldestReceiverTimestamp()
|
||||
|
||||
method getNewestMessageTimestamp*(
|
||||
s: SqliteDriver
|
||||
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
|
||||
return s.db.selectnewestReceiverTimestamp()
|
||||
|
||||
method deleteMessagesOlderThanTimestamp*(
|
||||
s: SqliteDriver, ts: Timestamp
|
||||
): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
return s.db.deleteMessagesOlderThanTimestamp(ts)
|
||||
|
||||
method deleteOldestMessagesNotWithinLimit*(
|
||||
s: SqliteDriver, limit: int
|
||||
): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
return s.db.deleteOldestMessagesNotWithinLimit(limit)
|
||||
|
||||
method decreaseDatabaseSize*(
|
||||
driver: SqliteDriver, targetSizeInBytes: int64, forceRemoval: bool = false
|
||||
): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
## To remove 20% of the outdated data from database
|
||||
const DeleteLimit = 0.80
|
||||
|
||||
## when db size overshoots the database limit, shread 20% of outdated messages
|
||||
## get size of database
|
||||
let dbSize = (await driver.getDatabaseSize()).valueOr:
|
||||
return err("failed to get database size: " & $error)
|
||||
|
||||
## database size in bytes
|
||||
let totalSizeOfDB: int64 = int64(dbSize)
|
||||
|
||||
if totalSizeOfDB < targetSizeInBytes:
|
||||
return ok()
|
||||
|
||||
## to shread/delete messsges, get the total row/message count
|
||||
let numMessages = (await driver.getMessagesCount()).valueOr:
|
||||
return err("failed to get messages count: " & error)
|
||||
|
||||
## NOTE: Using SQLite vacuuming is done manually, we delete a percentage of rows
|
||||
## if vacumming is done automatically then we aim to check DB size periodially for efficient
|
||||
## retention policy implementation.
|
||||
|
||||
## 80% of the total messages are to be kept, delete others
|
||||
let pageDeleteWindow = int(float(numMessages) * DeleteLimit)
|
||||
|
||||
(await driver.deleteOldestMessagesNotWithinLimit(limit = pageDeleteWindow)).isOkOr:
|
||||
return err("deleting oldest messages failed: " & error)
|
||||
|
||||
return ok()
|
||||
|
||||
method close*(s: SqliteDriver): Future[ArchiveDriverResult[void]] {.async.} =
|
||||
## Close the database connection
|
||||
# Dispose statements
|
||||
s.insertStmt.dispose()
|
||||
# Close connection
|
||||
s.db.close()
|
||||
return ok()
|
||||
|
||||
method existsTable*(
|
||||
s: SqliteDriver, tableName: string
|
||||
): Future[ArchiveDriverResult[bool]] {.async.} =
|
||||
return err("existsTable method not implemented in sqlite_driver")
|
||||
16
waku/waku_archive_legacy/retention_policy.nim
Normal file
16
waku/waku_archive_legacy/retention_policy.nim
Normal file
@ -0,0 +1,16 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import results, chronos
|
||||
import ./driver
|
||||
|
||||
type RetentionPolicyResult*[T] = Result[T, string]
|
||||
|
||||
type RetentionPolicy* = ref object of RootObj
|
||||
|
||||
method execute*(
|
||||
p: RetentionPolicy, store: ArchiveDriver
|
||||
): Future[RetentionPolicyResult[void]] {.base, async.} =
|
||||
discard
|
||||
88
waku/waku_archive_legacy/retention_policy/builder.nim
Normal file
88
waku/waku_archive_legacy/retention_policy/builder.nim
Normal file
@ -0,0 +1,88 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strutils, options], regex, results
|
||||
import
|
||||
../retention_policy,
|
||||
./retention_policy_time,
|
||||
./retention_policy_capacity,
|
||||
./retention_policy_size
|
||||
|
||||
proc new*(
|
||||
T: type RetentionPolicy, retPolicy: string
|
||||
): RetentionPolicyResult[Option[RetentionPolicy]] =
|
||||
let retPolicy = retPolicy.toLower
|
||||
|
||||
# Validate the retention policy format
|
||||
if retPolicy == "" or retPolicy == "none":
|
||||
return ok(none(RetentionPolicy))
|
||||
|
||||
const StoreMessageRetentionPolicyRegex = re2"^\w+:\d*\.?\d+((g|m)b)?$"
|
||||
if not retPolicy.match(StoreMessageRetentionPolicyRegex):
|
||||
return err("invalid 'store message retention policy' format: " & retPolicy)
|
||||
|
||||
# Apply the retention policy, if any
|
||||
let rententionPolicyParts = retPolicy.split(":", 1)
|
||||
let
|
||||
policy = rententionPolicyParts[0]
|
||||
policyArgs = rententionPolicyParts[1]
|
||||
|
||||
if policy == "time":
|
||||
var retentionTimeSeconds: int64
|
||||
try:
|
||||
retentionTimeSeconds = parseInt(policyArgs)
|
||||
except ValueError:
|
||||
return err("invalid time retention policy argument")
|
||||
|
||||
let retPolicy: RetentionPolicy = TimeRetentionPolicy.new(retentionTimeSeconds)
|
||||
return ok(some(retPolicy))
|
||||
elif policy == "capacity":
|
||||
var retentionCapacity: int
|
||||
try:
|
||||
retentionCapacity = parseInt(policyArgs)
|
||||
except ValueError:
|
||||
return err("invalid capacity retention policy argument")
|
||||
|
||||
let retPolicy: RetentionPolicy = CapacityRetentionPolicy.new(retentionCapacity)
|
||||
return ok(some(retPolicy))
|
||||
elif policy == "size":
|
||||
var retentionSize: string
|
||||
retentionSize = policyArgs
|
||||
|
||||
# captures the size unit such as GB or MB
|
||||
let sizeUnit = retentionSize.substr(retentionSize.len - 2)
|
||||
# captures the string type number data of the size provided
|
||||
let sizeQuantityStr = retentionSize.substr(0, retentionSize.len - 3)
|
||||
# to hold the numeric value data of size
|
||||
var inptSizeQuantity: float
|
||||
var sizeQuantity: int64
|
||||
var sizeMultiplier: float
|
||||
|
||||
try:
|
||||
inptSizeQuantity = parseFloat(sizeQuantityStr)
|
||||
except ValueError:
|
||||
return err("invalid size retention policy argument: " & getCurrentExceptionMsg())
|
||||
|
||||
case sizeUnit
|
||||
of "gb":
|
||||
sizeMultiplier = 1024.0 * 1024.0 * 1024.0
|
||||
of "mb":
|
||||
sizeMultiplier = 1024.0 * 1024.0
|
||||
else:
|
||||
return err (
|
||||
"""invalid size retention value unit: expected "Mb" or "Gb" but got """ &
|
||||
sizeUnit
|
||||
)
|
||||
|
||||
# quantity is converted into bytes for uniform processing
|
||||
sizeQuantity = int64(inptSizeQuantity * sizeMultiplier)
|
||||
|
||||
if sizeQuantity <= 0:
|
||||
return err("invalid size retention policy argument: a non-zero value is required")
|
||||
|
||||
let retPolicy: RetentionPolicy = SizeRetentionPolicy.new(sizeQuantity)
|
||||
return ok(some(retPolicy))
|
||||
else:
|
||||
return err("unknown retention policy")
|
||||
@ -0,0 +1,68 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import results, chronicles, chronos
|
||||
import ../driver, ../retention_policy
|
||||
|
||||
logScope:
|
||||
topics = "waku archive retention_policy"
|
||||
|
||||
const DefaultCapacity*: int = 25_000
|
||||
|
||||
const MaxOverflow = 1.3
|
||||
|
||||
type
|
||||
# CapacityRetentionPolicy implements auto deletion as follows:
|
||||
# - The sqlite DB will driver up to `totalCapacity = capacity` * `MaxOverflow` messages,
|
||||
# giving an overflowWindow of `capacity * (MaxOverflow - 1) = overflowWindow`.
|
||||
#
|
||||
# - In case of an overflow, messages are sorted by `receiverTimestamp` and the oldest ones are
|
||||
# deleted. The number of messages that get deleted is `(overflowWindow / 2) = deleteWindow`,
|
||||
# bringing the total number of driverd messages back to `capacity + (overflowWindow / 2)`.
|
||||
#
|
||||
# The rationale for batch deleting is efficiency. We keep half of the overflow window in addition
|
||||
# to `capacity` because we delete the oldest messages with respect to `receiverTimestamp` instead of
|
||||
# `senderTimestamp`. `ReceiverTimestamp` is guaranteed to be set, while senders could omit setting
|
||||
# `senderTimestamp`. However, `receiverTimestamp` can differ from node to node for the same message.
|
||||
# So sorting by `receiverTimestamp` might (slightly) prioritize some actually older messages and we
|
||||
# compensate that by keeping half of the overflow window.
|
||||
CapacityRetentionPolicy* = ref object of RetentionPolicy
|
||||
capacity: int
|
||||
# represents both the number of messages that are persisted in the sqlite DB (excl. the overflow window explained above), and the number of messages that get loaded via `getAll`.
|
||||
totalCapacity: int # = capacity * MaxOverflow
|
||||
deleteWindow: int
|
||||
# = capacity * (MaxOverflow - 1) / 2; half of the overflow window, the amount of messages deleted when overflow occurs
|
||||
|
||||
proc calculateTotalCapacity(capacity: int, overflow: float): int =
|
||||
int(float(capacity) * overflow)
|
||||
|
||||
proc calculateOverflowWindow(capacity: int, overflow: float): int =
|
||||
int(float(capacity) * (overflow - 1))
|
||||
|
||||
proc calculateDeleteWindow(capacity: int, overflow: float): int =
|
||||
calculateOverflowWindow(capacity, overflow) div 2
|
||||
|
||||
proc new*(T: type CapacityRetentionPolicy, capacity = DefaultCapacity): T =
|
||||
let
|
||||
totalCapacity = calculateTotalCapacity(capacity, MaxOverflow)
|
||||
deleteWindow = calculateDeleteWindow(capacity, MaxOverflow)
|
||||
|
||||
CapacityRetentionPolicy(
|
||||
capacity: capacity, totalCapacity: totalCapacity, deleteWindow: deleteWindow
|
||||
)
|
||||
|
||||
method execute*(
|
||||
p: CapacityRetentionPolicy, driver: ArchiveDriver
|
||||
): Future[RetentionPolicyResult[void]] {.async.} =
|
||||
let numMessages = (await driver.getMessagesCount()).valueOr:
|
||||
return err("failed to get messages count: " & error)
|
||||
|
||||
if numMessages < p.totalCapacity:
|
||||
return ok()
|
||||
|
||||
(await driver.deleteOldestMessagesNotWithinLimit(limit = p.capacity + p.deleteWindow)).isOkOr:
|
||||
return err("deleting oldest messages failed: " & error)
|
||||
|
||||
return ok()
|
||||
@ -0,0 +1,27 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import results, chronicles, chronos
|
||||
import ../driver, ../retention_policy
|
||||
|
||||
logScope:
|
||||
topics = "waku archive retention_policy"
|
||||
|
||||
# default size is 30 GiB or 32212254720.0 in bytes
|
||||
const DefaultRetentionSize*: int64 = 32212254720
|
||||
|
||||
type SizeRetentionPolicy* = ref object of RetentionPolicy
|
||||
sizeLimit: int64
|
||||
|
||||
proc new*(T: type SizeRetentionPolicy, size = DefaultRetentionSize): T =
|
||||
SizeRetentionPolicy(sizeLimit: size)
|
||||
|
||||
method execute*(
|
||||
p: SizeRetentionPolicy, driver: ArchiveDriver
|
||||
): Future[RetentionPolicyResult[void]] {.async.} =
|
||||
(await driver.decreaseDatabaseSize(p.sizeLimit)).isOkOr:
|
||||
return err("decreaseDatabaseSize failed: " & $error)
|
||||
|
||||
return ok()
|
||||
@ -0,0 +1,40 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/times, results, chronicles, chronos
|
||||
import ../../waku_core, ../driver, ../retention_policy
|
||||
|
||||
logScope:
|
||||
topics = "waku archive retention_policy"
|
||||
|
||||
const DefaultRetentionTime*: int64 = 30.days.seconds
|
||||
|
||||
type TimeRetentionPolicy* = ref object of RetentionPolicy
|
||||
retentionTime: chronos.Duration
|
||||
|
||||
proc new*(T: type TimeRetentionPolicy, retentionTime = DefaultRetentionTime): T =
|
||||
TimeRetentionPolicy(retentionTime: retentionTime.seconds)
|
||||
|
||||
method execute*(
|
||||
p: TimeRetentionPolicy, driver: ArchiveDriver
|
||||
): Future[RetentionPolicyResult[void]] {.async.} =
|
||||
## Delete messages that exceed the retention time by 10% and more (batch delete for efficiency)
|
||||
|
||||
let omtRes = await driver.getOldestMessageTimestamp()
|
||||
if omtRes.isErr():
|
||||
return err("failed to get oldest message timestamp: " & omtRes.error)
|
||||
|
||||
let now = getNanosecondTime(getTime().toUnixFloat())
|
||||
let retentionTimestamp = now - p.retentionTime.nanoseconds
|
||||
let thresholdTimestamp = retentionTimestamp - p.retentionTime.nanoseconds div 10
|
||||
|
||||
if thresholdTimestamp <= omtRes.value:
|
||||
return ok()
|
||||
|
||||
let res = await driver.deleteMessagesOlderThanTimestamp(ts = retentionTimestamp)
|
||||
if res.isErr():
|
||||
return err("failed to delete oldest messages: " & res.error)
|
||||
|
||||
return ok()
|
||||
Loading…
x
Reference in New Issue
Block a user