mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-08 00:43:06 +00:00
* queue driver refactor (#2753) * chore(archive): archive refactor (#2752) * chore(archive): sqlite driver refactor (#2754) * chore(archive): postgres driver refactor (#2755) * chore(archive): renaming & copies (#2751) * posgres legacy: stop using the storedAt field * migration script 6: we still need the id column The id column is needed because it contains the message digest which is used in store v2, and we need to keep support to store v2 for a while * legacy archive: set target migration version to 6 * waku_node: try to use wakuLegacyArchive if wakuArchive is nil * node_factory, waku_node: mount legacy and future store simultaneously We want the nwaku node to simultaneously support store-v2 requests and store-v3 requests. Only the legacy archive is in charge of archiving messages, and the archived information is suitable to fulfill both store-v2 and store-v3 needs. * postgres_driver: adding temporary code until store-v2 is removed --------- Co-authored-by: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Co-authored-by: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Co-authored-by: Ivan Folgueira Bande <ivansete@status.im>
126 lines
4.1 KiB
Nim
126 lines
4.1 KiB
Nim
when (NimMajor, NimMinor) < (1, 4):
|
|
{.push raises: [Defect].}
|
|
else:
|
|
{.push raises: [].}
|
|
|
|
import results, chronicles, chronos
|
|
import
|
|
../driver,
|
|
../../common/databases/dburl,
|
|
../../common/databases/db_sqlite,
|
|
../../common/error_handling,
|
|
./sqlite_driver,
|
|
./sqlite_driver/migrations as archive_driver_sqlite_migrations,
|
|
./queue_driver
|
|
|
|
export sqlite_driver, queue_driver
|
|
|
|
when defined(postgres):
|
|
import ## These imports add dependency with an external libpq library
|
|
./postgres_driver/migrations as archive_postgres_driver_migrations,
|
|
./postgres_driver
|
|
export postgres_driver
|
|
|
|
proc new*(
|
|
T: type ArchiveDriver,
|
|
url: string,
|
|
vacuum: bool,
|
|
migrate: bool,
|
|
maxNumConn: int,
|
|
onFatalErrorAction: OnFatalErrorHandler,
|
|
): Future[Result[T, string]] {.async.} =
|
|
## url - string that defines the database
|
|
## vacuum - if true, a cleanup operation will be applied to the database
|
|
## migrate - if true, the database schema will be updated
|
|
## maxNumConn - defines the maximum number of connections to handle simultaneously (Postgres)
|
|
## onFatalErrorAction - called if, e.g., the connection with db got lost
|
|
|
|
let dbUrlValidationRes = dburl.validateDbUrl(url)
|
|
if dbUrlValidationRes.isErr():
|
|
return err("DbUrl failure in ArchiveDriver.new: " & dbUrlValidationRes.error)
|
|
|
|
let engineRes = dburl.getDbEngine(url)
|
|
if engineRes.isErr():
|
|
return err("error getting db engine in setupWakuArchiveDriver: " & engineRes.error)
|
|
|
|
let engine = engineRes.get()
|
|
|
|
case engine
|
|
of "sqlite":
|
|
let pathRes = dburl.getDbPath(url)
|
|
if pathRes.isErr():
|
|
return err("error get path in setupWakuArchiveDriver: " & pathRes.error)
|
|
|
|
let dbRes = SqliteDatabase.new(pathRes.get())
|
|
if dbRes.isErr():
|
|
return err("error in setupWakuArchiveDriver: " & dbRes.error)
|
|
|
|
let db = dbRes.get()
|
|
|
|
# SQLite vacuum
|
|
let sqliteStatsRes = db.gatherSqlitePageStats()
|
|
if sqliteStatsRes.isErr():
|
|
return err("error while gathering sqlite stats: " & $sqliteStatsRes.error)
|
|
|
|
let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get()
|
|
debug "sqlite database page stats",
|
|
pageSize = pageSize, pages = pageCount, freePages = freelistCount
|
|
|
|
if vacuum and (pageCount > 0 and freelistCount > 0):
|
|
let vacuumRes = db.performSqliteVacuum()
|
|
if vacuumRes.isErr():
|
|
return err("error in vacuum sqlite: " & $vacuumRes.error)
|
|
|
|
# Database migration
|
|
if migrate:
|
|
let migrateRes = archive_driver_sqlite_migrations.migrate(db)
|
|
if migrateRes.isErr():
|
|
return err("error in migrate sqlite: " & $migrateRes.error)
|
|
|
|
debug "setting up sqlite waku archive driver"
|
|
let res = SqliteDriver.new(db)
|
|
if res.isErr():
|
|
return err("failed to init sqlite archive driver: " & res.error)
|
|
|
|
return ok(res.get())
|
|
of "postgres":
|
|
when defined(postgres):
|
|
let res = PostgresDriver.new(
|
|
dbUrl = url,
|
|
maxConnections = maxNumConn,
|
|
onFatalErrorAction = onFatalErrorAction,
|
|
)
|
|
if res.isErr():
|
|
return err("failed to init postgres archive driver: " & res.error)
|
|
|
|
let driver = res.get()
|
|
|
|
# Database migration
|
|
if migrate:
|
|
let migrateRes = await archive_postgres_driver_migrations.migrate(driver)
|
|
if migrateRes.isErr():
|
|
return err("ArchiveDriver build failed in migration: " & $migrateRes.error)
|
|
|
|
## This should be started once we make sure the 'messages' table exists
|
|
## Hence, this should be run after the migration is completed.
|
|
asyncSpawn driver.startPartitionFactory(onFatalErrorAction)
|
|
|
|
info "waiting for a partition to be created"
|
|
for i in 0 ..< 100:
|
|
if driver.containsAnyPartition():
|
|
break
|
|
await sleepAsync(chronos.milliseconds(100))
|
|
|
|
if not driver.containsAnyPartition():
|
|
onFatalErrorAction("a partition could not be created")
|
|
|
|
return ok(driver)
|
|
else:
|
|
return err(
|
|
"Postgres has been configured but not been compiled. Check compiler definitions."
|
|
)
|
|
else:
|
|
debug "setting up in-memory waku archive driver"
|
|
let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages
|
|
return ok(driver)
|