Simon-Pierre Vivier f54ba10bc7
chore(archive): archive and drivers refactor (#2761)
* queue driver refactor (#2753)
* chore(archive): archive refactor (#2752)
* chore(archive): sqlite driver refactor (#2754)
* chore(archive): postgres driver refactor (#2755)
* chore(archive): renaming & copies (#2751)
* posgres legacy: stop using the storedAt field
* migration script 6: we still need the id column
  The id column is needed because it contains the message digest
  which is used in store v2, and we need to keep support to
  store v2 for a while
* legacy archive: set target migration version to 6
* waku_node: try to use wakuLegacyArchive if wakuArchive is nil
* node_factory, waku_node: mount legacy and future store simultaneously
  We want the nwaku node to simultaneously support store-v2
  requests and store-v3 requests.
  Only the legacy archive is in charge of archiving messages, and the
  archived information is suitable to fulfill both store-v2 and
  store-v3 needs.
* postgres_driver: adding temporary code until store-v2 is removed

---------

Co-authored-by: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Co-authored-by: gabrielmer <101006718+gabrielmer@users.noreply.github.com>
Co-authored-by: Ivan Folgueira Bande <ivansete@status.im>
2024-07-12 18:19:12 +02:00

92 lines
2.6 KiB
Nim

when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import stew/byteutils, nimcrypto/sha2
import ../../../waku_core, ../../common
type Index* = object
## This type contains the description of an Index used in the pagination of WakuMessages
pubsubTopic*: string
senderTime*: Timestamp # the time at which the message is generated
receiverTime*: Timestamp
digest*: MessageDigest # calculated over payload and content topic
hash*: WakuMessageHash
proc compute*(
T: type Index, msg: WakuMessage, receivedTime: Timestamp, pubsubTopic: PubsubTopic
): T =
## Takes a WakuMessage with received timestamp and returns its Index.
let
digest = computeDigest(msg)
senderTime = msg.timestamp
hash = computeMessageHash(pubsubTopic, msg)
return Index(
pubsubTopic: pubsubTopic,
senderTime: senderTime,
receiverTime: receivedTime,
digest: digest,
hash: hash,
)
proc tohistoryCursor*(index: Index): ArchiveCursor =
return ArchiveCursor(
pubsubTopic: index.pubsubTopic,
senderTime: index.senderTime,
storeTime: index.receiverTime,
digest: index.digest,
hash: index.hash,
)
proc toIndex*(index: ArchiveCursor): Index =
return Index(
pubsubTopic: index.pubsubTopic,
senderTime: index.senderTime,
receiverTime: index.storeTime,
digest: index.digest,
hash: index.hash,
)
proc `==`*(x, y: Index): bool =
## receiverTime plays no role in index equality
return
(
(x.senderTime == y.senderTime) and (x.digest == y.digest) and
(x.pubsubTopic == y.pubsubTopic)
) or (x.hash == y.hash) # this applies to store v3 queries only
proc cmp*(x, y: Index): int =
## compares x and y
## returns 0 if they are equal
## returns -1 if x < y
## returns 1 if x > y
##
## Default sorting order priority is:
## 1. senderTimestamp
## 2. receiverTimestamp (a fallback only if senderTimestamp unset on either side, and all other fields unequal)
## 3. message digest
## 4. pubsubTopic
if x == y:
# Quick exit ensures receiver time does not affect index equality
return 0
# Timestamp has a higher priority for comparison
let
# Use receiverTime where senderTime is unset
xTimestamp = if x.senderTime == 0: x.receiverTime else: x.senderTime
yTimestamp = if y.senderTime == 0: y.receiverTime else: y.senderTime
let timecmp = cmp(xTimestamp, yTimestamp)
if timecmp != 0:
return timecmp
# Continue only when timestamps are equal
let digestcmp = cmp(x.digest.data, y.digest.data)
if digestcmp != 0:
return digestcmp
return cmp(x.pubsubTopic, y.pubsubTopic)