From 6030983a83479eaa93f216bd43e8afa316e7b558 Mon Sep 17 00:00:00 2001 From: darshankabariya Date: Mon, 16 Mar 2026 14:27:43 +0530 Subject: [PATCH 1/8] chore: add v0.38.0 changelog --- CHANGELOG.md | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index edc4a705c..bc5155b6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,73 @@ +## v0.38.0 (2026-03-16) + +### Notes + +- **liblogosdelivery**: Major new FFI API with debug API, health status events, message received events, stateful SubscriptionService, and improved resource management. +- Waku Kademlia discovery integrated with Mix protocol. +- Context-aware and event-driven broker architecture introduced. +- REST Store API now defaults to page size 20 with max 100. +- Lightpush no longer mounts without relay enabled. +- Repository renamed from `logos-messaging-nim` to `logos-delivery`. + +### Features + +- liblogosdelivery: FFI library of new API ([#3714](https://github.com/logos-messaging/logos-delivery/pull/3714)) ([3603b838](https://github.com/logos-messaging/logos-delivery/commit/3603b838)) +- liblogosdelivery: health status event support ([#3737](https://github.com/logos-messaging/logos-delivery/pull/3737)) ([ba85873f](https://github.com/logos-messaging/logos-delivery/commit/ba85873f)) +- liblogosdelivery: MessageReceivedEvent propagation over FFI ([#3747](https://github.com/logos-messaging/logos-delivery/pull/3747)) ([0ad55159](https://github.com/logos-messaging/logos-delivery/commit/0ad55159)) +- liblogosdelivery: add debug API ([#3742](https://github.com/logos-messaging/logos-delivery/pull/3742)) ([09618a26](https://github.com/logos-messaging/logos-delivery/commit/09618a26)) +- liblogosdelivery: implement stateful SubscriptionService for Core mode ([#3732](https://github.com/logos-messaging/logos-delivery/pull/3732)) ([51ec09c3](https://github.com/logos-messaging/logos-delivery/commit/51ec09c3)) +- Waku Kademlia integration and Mix protocol updates ([#3722](https://github.com/logos-messaging/logos-delivery/pull/3722)) ([335600eb](https://github.com/logos-messaging/logos-delivery/commit/335600eb)) +- Waku API: implement Health spec ([#3689](https://github.com/logos-messaging/logos-delivery/pull/3689)) ([1fb4d1ea](https://github.com/logos-messaging/logos-delivery/commit/1fb4d1ea)) +- Waku API: send ([#3669](https://github.com/logos-messaging/logos-delivery/pull/3669)) ([1fd25355](https://github.com/logos-messaging/logos-delivery/commit/1fd25355)) +- iOS compilation support (WIP) ([#3668](https://github.com/logos-messaging/logos-delivery/pull/3668)) ([96196ab8](https://github.com/logos-messaging/logos-delivery/commit/96196ab8)) +- Distribute libwaku binaries ([#3612](https://github.com/logos-messaging/logos-delivery/pull/3612)) ([9e2b3830](https://github.com/logos-messaging/logos-delivery/commit/9e2b3830)) +- Rendezvous: broadcast and discover WakuPeerRecords ([#3617](https://github.com/logos-messaging/logos-delivery/pull/3617)) ([b0cd75f4](https://github.com/logos-messaging/logos-delivery/commit/b0cd75f4)) +- New postgres metric to estimate payload stats ([#3596](https://github.com/logos-messaging/logos-delivery/pull/3596)) ([454b098a](https://github.com/logos-messaging/logos-delivery/commit/454b098a)) + +### Bug Fixes + +- Fix NodeHealthMonitor logspam ([#3743](https://github.com/logos-messaging/logos-delivery/pull/3743)) ([7e36e268](https://github.com/logos-messaging/logos-delivery/commit/7e36e268)) +- Fix peer selection by shard and rendezvous/metadata sharding initialization ([#3718](https://github.com/logos-messaging/logos-delivery/pull/3718)) ([84f79110](https://github.com/logos-messaging/logos-delivery/commit/84f79110)) +- Correct dynamic library extension on mac and update OS detection ([#3754](https://github.com/logos-messaging/logos-delivery/pull/3754)) ([1ace0154](https://github.com/logos-messaging/logos-delivery/commit/1ace0154)) +- Force FINALIZE partition detach after detecting shorter error ([#3728](https://github.com/logos-messaging/logos-delivery/pull/3728)) ([b38b5aae](https://github.com/logos-messaging/logos-delivery/commit/b38b5aae)) +- Fix store protocol issue in v0.37.0 ([#3657](https://github.com/logos-messaging/logos-delivery/pull/3657)) ([91b4c5f5](https://github.com/logos-messaging/logos-delivery/commit/91b4c5f5)) +- Fix hash inputs for external nullifier, remove length prefix for sha256 ([#3660](https://github.com/logos-messaging/logos-delivery/pull/3660)) ([2d40cb9d](https://github.com/logos-messaging/logos-delivery/commit/2d40cb9d)) +- Fix admin API peer shards field from metadata protocol ([#3594](https://github.com/logos-messaging/logos-delivery/pull/3594)) ([e54851d9](https://github.com/logos-messaging/logos-delivery/commit/e54851d9)) +- Wakucanary exits with error if ping fails ([#3595](https://github.com/logos-messaging/logos-delivery/pull/3595), [#3711](https://github.com/logos-messaging/logos-delivery/pull/3711)) +- Force epoll in chronos for Android ([#3705](https://github.com/logos-messaging/logos-delivery/pull/3705)) ([beb1dde1](https://github.com/logos-messaging/logos-delivery/commit/beb1dde1)) +- Fix build_rln.sh script ([#3704](https://github.com/logos-messaging/logos-delivery/pull/3704)) ([09034837](https://github.com/logos-messaging/logos-delivery/commit/09034837)) +- liblogosdelivery: move destroy API to node_api, add security checks and fix possible resource leak ([#3736](https://github.com/logos-messaging/logos-delivery/pull/3736)) ([db19da92](https://github.com/logos-messaging/logos-delivery/commit/db19da92)) + +### Changes + +- Context-aware brokers architecture ([#3674](https://github.com/logos-messaging/logos-delivery/pull/3674)) ([c27405b1](https://github.com/logos-messaging/logos-delivery/commit/c27405b1)) +- Introduce EventBroker, RequestBroker and MultiRequestBroker ([#3644](https://github.com/logos-messaging/logos-delivery/pull/3644)) ([ae74b901](https://github.com/logos-messaging/logos-delivery/commit/ae74b901)) +- Use chronos' TokenBucket ([#3670](https://github.com/logos-messaging/logos-delivery/pull/3670)) ([284a0816](https://github.com/logos-messaging/logos-delivery/commit/284a0816)) +- REST Store API constraints: default page size 20, max 100 ([#3602](https://github.com/logos-messaging/logos-delivery/pull/3602)) ([8c30a8e1](https://github.com/logos-messaging/logos-delivery/commit/8c30a8e1)) +- Do not mount lightpush without relay ([#3540](https://github.com/logos-messaging/logos-delivery/pull/3540)) ([7d1c6aba](https://github.com/logos-messaging/logos-delivery/commit/7d1c6aba)) +- Mix: use exit==dest approach ([#3642](https://github.com/logos-messaging/logos-delivery/pull/3642)) ([088e3108](https://github.com/logos-messaging/logos-delivery/commit/088e3108)) +- Mix: simple refactor to reduce duplicated logs ([#3752](https://github.com/logos-messaging/logos-delivery/pull/3752)) ([96f1c40a](https://github.com/logos-messaging/logos-delivery/commit/96f1c40a)) +- Simplify NodeHealthMonitor creation ([#3716](https://github.com/logos-messaging/logos-delivery/pull/3716)) ([a8bdbca9](https://github.com/logos-messaging/logos-delivery/commit/a8bdbca9)) +- Adapt CLI args for delivery API ([#3744](https://github.com/logos-messaging/logos-delivery/pull/3744)) ([1f9c4cb8](https://github.com/logos-messaging/logos-delivery/commit/1f9c4cb8)) +- Adapt debugapi to WakoNodeConf ([#3745](https://github.com/logos-messaging/logos-delivery/pull/3745)) ([4a6ad732](https://github.com/logos-messaging/logos-delivery/commit/4a6ad732)) +- Bump nim-ffi to v0.1.3 ([#3696](https://github.com/logos-messaging/logos-delivery/pull/3696)) ([a02aaab5](https://github.com/logos-messaging/logos-delivery/commit/a02aaab5)) +- Bump nim-metrics to v0.2.1 ([#3734](https://github.com/logos-messaging/logos-delivery/pull/3734)) ([c7e0cc0e](https://github.com/logos-messaging/logos-delivery/commit/c7e0cc0e)) +- Add gasprice overflow check ([#3636](https://github.com/logos-messaging/logos-delivery/pull/3636)) ([a8590a0a](https://github.com/logos-messaging/logos-delivery/commit/a8590a0a)) +- Pin RLN dependencies to specific version ([#3649](https://github.com/logos-messaging/logos-delivery/pull/3649)) ([834eea94](https://github.com/logos-messaging/logos-delivery/commit/834eea94)) +- Update CI/README references after repository rename to logos-delivery ([#3729](https://github.com/logos-messaging/logos-delivery/pull/3729)) ([895f3e2d](https://github.com/logos-messaging/logos-delivery/commit/895f3e2d)) + +### This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): + +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`WAKU2-LIGHTPUSH v3`](https://github.com/waku-org/specs/blob/master/standards/core/lightpush.md) | `draft` | `/vac/waku/lightpush/3.0.0` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | +| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/master/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` | + ## v0.37.1 (2026-03-12) ### Bug Fixes From d2fdd6ff36b198a51fd1f084f6c394c905569ec9 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Thu, 19 Mar 2026 21:37:04 +0100 Subject: [PATCH 2/8] allow union of several retention policies (#3766) * refactor retention policy to allow union of several retention policies * bug fix time retention policy * add removal of orphan partitions if any * use nim-http-utils 0.4.1 --- tests/wakunode2/test_cli_args.nim | 81 +++++++++++++++- tools/confutils/cli_args.nim | 4 +- vendor/nim-http-utils | 2 +- .../store_service_conf_builder.nim | 50 +++++++++- waku/factory/node_factory.nim | 4 +- waku/factory/waku_conf.nim | 2 +- waku/node/kernel_api/store.nim | 4 +- waku/waku_archive/archive.nim | 33 ++++--- .../postgres_driver/postgres_driver.nim | 95 ++++++++++++++++--- waku/waku_archive/retention_policy.nim | 3 + .../waku_archive/retention_policy/builder.nim | 13 ++- .../retention_policy_capacity.nim | 3 + .../retention_policy_size.nim | 3 + .../retention_policy_time.nim | 16 +--- 14 files changed, 260 insertions(+), 53 deletions(-) diff --git a/tests/wakunode2/test_cli_args.nim b/tests/wakunode2/test_cli_args.nim index dabc78083..5108b4a9d 100644 --- a/tests/wakunode2/test_cli_args.nim +++ b/tests/wakunode2/test_cli_args.nim @@ -1,7 +1,7 @@ {.used.} import - std/options, + std/[options, strutils], testutils/unittests, chronos, libp2p/crypto/[crypto, secp], @@ -261,6 +261,85 @@ suite "Waku external config - Shards": ## Then assert res.isErr(), "Invalid shard was accepted" +suite "Waku external config - store retention policy": + test "Default retention policy": + ## Given + var conf = defaultWakuNodeConf().get() + conf.store = true + conf.storeMessageDbUrl = "sqlite://test.db" + # storeMessageRetentionPolicy keeps its default: "time:<2 days in seconds>" + + ## When + let res = conf.toWakuConf() + + ## Then + assert res.isOk(), $res.error + let wakuConf = res.get() + require wakuConf.storeServiceConf.isSome() + check wakuConf.storeServiceConf.get().retentionPolicies == + @["time:" & $2.days.seconds] + + test "Single custom retention policy": + ## Given + var conf = defaultWakuNodeConf().get() + conf.store = true + conf.storeMessageDbUrl = "sqlite://test.db" + conf.storeMessageRetentionPolicy = "capacity:50000" + + ## When + let res = conf.toWakuConf() + + ## Then + assert res.isOk(), $res.error + let wakuConf = res.get() + require wakuConf.storeServiceConf.isSome() + check wakuConf.storeServiceConf.get().retentionPolicies == @["capacity:50000"] + + test "Retention policies with whitespace around semicolons and colons": + ## Given + var conf = defaultWakuNodeConf().get() + conf.store = true + conf.storeMessageDbUrl = "sqlite://test.db" + conf.storeMessageRetentionPolicy = "time:3600 ; capacity:10000 ; size : 30GB" + + ## When + let res = conf.toWakuConf() + + ## Then + assert res.isOk(), $res.error + let wakuConf = res.get() + require wakuConf.storeServiceConf.isSome() + check wakuConf.storeServiceConf.get().retentionPolicies == + @["time:3600", "capacity:10000", "size:30GB"] + + test "Invalid retention policy type returns error": + ## Given + var conf = defaultWakuNodeConf().get() + conf.store = true + conf.storeMessageDbUrl = "sqlite://test.db" + conf.storeMessageRetentionPolicy = "foo:1234" + + ## When + let res = conf.toWakuConf() + + ## Then + check res.isErr() + check res.error.contains("unknown retention policy type") + + test "Duplicated retention policy type returns error": + ## Given + var conf = defaultWakuNodeConf().get() + conf.store = true + conf.storeMessageDbUrl = "sqlite://test.db" + conf.storeMessageRetentionPolicy = "time:3600;time:7200;capacity:10000" + + ## When + let res = conf.toWakuConf() + + ## Then + check res.isErr() + check res.error.contains("duplicated retention policy type") + suite "Waku external config - http url parsing": test "Basic HTTP URLs without authentication": check string(parseCmdArg(EthRpcUrl, "https://example.com/path")) == diff --git a/tools/confutils/cli_args.nim b/tools/confutils/cli_args.nim index 4a6e8c618..74e3c66bd 100644 --- a/tools/confutils/cli_args.nim +++ b/tools/confutils/cli_args.nim @@ -363,7 +363,7 @@ hence would have reachability issues.""", storeMessageRetentionPolicy* {. desc: - "Message store retention policy. Time retention policy: 'time:'. Capacity retention policy: 'capacity:'. Size retention policy: 'size:'. Set to 'none' to disable.", + "Message store retention policy. Multiple policies may be provided as a semicolon-separated string and are applied as a union. Time retention policy: 'time:'. Capacity retention policy: 'capacity:'. Size retention policy: 'size:'. Set to 'none' to disable. Example: 'time:3600;size:1GB;capacity:100'.", defaultValue: "time:" & $2.days.seconds, name: "store-message-retention-policy" .}: string @@ -1047,7 +1047,7 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = b.storeServiceConf.withEnabled(n.store) b.storeServiceConf.withSupportV2(n.legacyStore) - b.storeServiceConf.withRetentionPolicy(n.storeMessageRetentionPolicy) + b.storeServiceConf.withRetentionPolicies(n.storeMessageRetentionPolicy) b.storeServiceConf.withDbUrl(n.storeMessageDbUrl) b.storeServiceConf.withDbVacuum(n.storeMessageDbVacuum) b.storeServiceConf.withDbMigration(n.storeMessageDbMigration) diff --git a/vendor/nim-http-utils b/vendor/nim-http-utils index c53852d9e..f142cb2e8 160000 --- a/vendor/nim-http-utils +++ b/vendor/nim-http-utils @@ -1 +1 @@ -Subproject commit c53852d9e24205b6363bba517fa8ee7bde823691 +Subproject commit f142cb2e8bd812dd002a6493b6082827bb248592 diff --git a/waku/factory/conf_builder/store_service_conf_builder.nim b/waku/factory/conf_builder/store_service_conf_builder.nim index d5d48c34d..30c743e01 100644 --- a/waku/factory/conf_builder/store_service_conf_builder.nim +++ b/waku/factory/conf_builder/store_service_conf_builder.nim @@ -1,4 +1,5 @@ -import chronicles, std/options, results, chronos +import std/[options, strutils, sequtils] +import chronicles, results, chronos import ../waku_conf, ./store_sync_conf_builder logScope: @@ -15,7 +16,7 @@ type StoreServiceConfBuilder* = object dbVacuum*: Option[bool] supportV2*: Option[bool] maxNumDbConnections*: Option[int] - retentionPolicy*: Option[string] + retentionPolicies*: seq[string] resume*: Option[bool] storeSyncConf*: StoreSyncConfBuilder @@ -42,12 +43,43 @@ proc withMaxNumDbConnections*( ) = b.maxNumDbConnections = some(maxNumDbConnections) -proc withRetentionPolicy*(b: var StoreServiceConfBuilder, retentionPolicy: string) = - b.retentionPolicy = some(retentionPolicy) +proc withRetentionPolicies*(b: var StoreServiceConfBuilder, retentionPolicies: string) = + b.retentionPolicies = retentionPolicies + .multiReplace((" ", ""), ("\t", "")) + .split(";") + .mapIt(it.strip()) + .filterIt(it.len > 0) proc withResume*(b: var StoreServiceConfBuilder, resume: bool) = b.resume = some(resume) +const ValidRetentionPolicyTypes = ["time", "capacity", "size"] + +proc validateRetentionPolicies(policies: seq[string]): Result[void, string] = + var seen: seq[string] + + for p in policies: + let policy = p.multiReplace((" ", ""), ("\t", "")) + let parts = policy.split(":", 1) + if parts.len != 2 or parts[1] == "": + return err( + "invalid retention policy format: '" & policy & "', expected ':'" + ) + + let policyType = parts[0].toLowerAscii() + if policyType notin ValidRetentionPolicyTypes: + return err( + "unknown retention policy type: '" & policyType & + "', valid types are: time, capacity, size" + ) + + if policyType in seen: + return err("duplicated retention policy type: '" & policyType & "'") + + seen.add(policyType) + + return ok() + proc build*(b: StoreServiceConfBuilder): Result[Option[StoreServiceConf], string] = if not b.enabled.get(false): return ok(none(StoreServiceConf)) @@ -58,6 +90,14 @@ proc build*(b: StoreServiceConfBuilder): Result[Option[StoreServiceConf], string let storeSyncConf = b.storeSyncConf.build().valueOr: return err("Store Sync Conf failed to build") + let retentionPolicies = + if b.retentionPolicies.len == 0: + @["time:" & $2.days.seconds] + else: + validateRetentionPolicies(b.retentionPolicies).isOkOr: + return err("invalid retention policies: " & error) + b.retentionPolicies + return ok( some( StoreServiceConf( @@ -66,7 +106,7 @@ proc build*(b: StoreServiceConfBuilder): Result[Option[StoreServiceConf], string dbVacuum: b.dbVacuum.get(false), supportV2: b.supportV2.get(false), maxNumDbConnections: b.maxNumDbConnections.get(50), - retentionPolicy: b.retentionPolicy.get("time:" & $2.days.seconds), + retentionPolicies: retentionPolicies, resume: b.resume.get(false), storeSyncConf: storeSyncConf, ) diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim index 2f82440f6..f6b39b93f 100644 --- a/waku/factory/node_factory.nim +++ b/waku/factory/node_factory.nim @@ -240,10 +240,10 @@ proc setupProtocols( ).valueOr: return err("failed to setup archive driver: " & error) - let retPolicy = policy.RetentionPolicy.new(storeServiceConf.retentionPolicy).valueOr: + let retPolicies = policy.RetentionPolicy.new(storeServiceConf.retentionPolicies).valueOr: return err("failed to create retention policy: " & error) - node.mountArchive(archiveDriver, retPolicy).isOkOr: + node.mountArchive(archiveDriver, retPolicies).isOkOr: return err("failed to mount waku archive protocol: " & error) if storeServiceConf.supportV2: diff --git a/waku/factory/waku_conf.nim b/waku/factory/waku_conf.nim index 01574d067..6ed34e131 100644 --- a/waku/factory/waku_conf.nim +++ b/waku/factory/waku_conf.nim @@ -62,7 +62,7 @@ type StoreServiceConf* {.requiresInit.} = object dbVacuum*: bool supportV2*: bool maxNumDbConnections*: int - retentionPolicy*: string + retentionPolicies*: seq[string] resume*: bool storeSyncConf*: Option[StoreSyncConf] diff --git a/waku/node/kernel_api/store.nim b/waku/node/kernel_api/store.nim index 7edae7966..ca9917163 100644 --- a/waku/node/kernel_api/store.nim +++ b/waku/node/kernel_api/store.nim @@ -39,10 +39,10 @@ logScope: proc mountArchive*( node: WakuNode, driver: waku_archive.ArchiveDriver, - retentionPolicy = none(waku_archive.RetentionPolicy), + retentionPolicies = newSeq[waku_archive.RetentionPolicy](), ): Result[void, string] = node.wakuArchive = waku_archive.WakuArchive.new( - driver = driver, retentionPolicy = retentionPolicy + driver = driver, retentionPolicies = retentionPolicies ).valueOr: return err("error in mountArchive: " & error) diff --git a/waku/waku_archive/archive.nim b/waku/waku_archive/archive.nim index 8eb1fc051..95c1a905d 100644 --- a/waku/waku_archive/archive.nim +++ b/waku/waku_archive/archive.nim @@ -45,7 +45,7 @@ type WakuArchive* = ref object validator: MessageValidator - retentionPolicy: Option[RetentionPolicy] + retentionPolicies: seq[RetentionPolicy] retentionPolicyHandle: Future[void] metricsHandle: Future[void] @@ -82,13 +82,17 @@ proc new*( T: type WakuArchive, driver: ArchiveDriver, validator: MessageValidator = validate, - retentionPolicy = none(RetentionPolicy), + retentionPolicies = newSeq[RetentionPolicy](0), ): Result[T, string] = if driver.isNil(): return err("archive driver is Nil") - let archive = - WakuArchive(driver: driver, validator: validator, retentionPolicy: retentionPolicy) + if retentionPolicies.len == 0: + return err("at least one retention policy must be provided") + + let archive = WakuArchive( + driver: driver, validator: validator, retentionPolicies: retentionPolicies + ) return ok(archive) @@ -263,16 +267,15 @@ proc findMessages*( ) proc periodicRetentionPolicy(self: WakuArchive) {.async.} = - let policy = self.retentionPolicy.get() - while true: - info "executing message retention policy" - (await policy.execute(self.driver)).isOkOr: - waku_archive_errors.inc(labelValues = [retPolicyFailure]) - error "failed execution of retention policy", error = error - await sleepAsync(WakuArchiveDefaultRetentionPolicyIntervalWhenError) - ## in case of error, let's try again faster - continue + for policy in self.retentionPolicies: + info "executing message retention policy", policy = $policy + (await policy.execute(self.driver)).isOkOr: + waku_archive_errors.inc(labelValues = [retPolicyFailure]) + error "failed execution of retention policy", policy = $policy, error = error + await sleepAsync(WakuArchiveDefaultRetentionPolicyIntervalWhenError) + ## in case of error, let's try again faster + continue await sleepAsync(WakuArchiveDefaultRetentionPolicyInterval) @@ -289,7 +292,7 @@ proc periodicMetricReport(self: WakuArchive) {.async.} = await sleepAsync(WakuArchiveDefaultMetricsReportInterval) proc start*(self: WakuArchive) = - if self.retentionPolicy.isSome(): + if self.retentionPolicies.len > 0: self.retentionPolicyHandle = self.periodicRetentionPolicy() self.metricsHandle = self.periodicMetricReport() @@ -297,7 +300,7 @@ proc start*(self: WakuArchive) = proc stopWait*(self: WakuArchive) {.async.} = var futures: seq[Future[void]] - if self.retentionPolicy.isSome() and not self.retentionPolicyHandle.isNil(): + if not self.retentionPolicyHandle.isNil(): futures.add(self.retentionPolicyHandle.cancelAndWait()) if not self.metricsHandle.isNil: diff --git a/waku/waku_archive/driver/postgres_driver/postgres_driver.nim b/waku/waku_archive/driver/postgres_driver/postgres_driver.nim index 2f495ba5d..f632513bc 100644 --- a/waku/waku_archive/driver/postgres_driver/postgres_driver.nim +++ b/waku/waku_archive/driver/postgres_driver/postgres_driver.nim @@ -20,6 +20,9 @@ import declarePublicGauge postgres_payload_size_bytes, "Payload size in bytes of correctly stored messages" +logScope: + topics = "postgres driver" + type PostgresDriver* = ref object of ArchiveDriver ## Establish a separate pools for read/write operations writeConnPool: PgAsyncPool @@ -375,6 +378,7 @@ proc getPartitionsList( ): Future[ArchiveDriverResult[seq[string]]] {.async.} = ## Retrieves the seq of partition table names. ## e.g: @["messages_1708534333_1708534393", "messages_1708534273_1708534333"] + ## This returns the partitions that are attached to the main messages table. var partitions: seq[string] proc rowCallback(pqResult: ptr PGresult) = for iRow in 0 ..< pqResult.pqNtuples(): @@ -401,6 +405,49 @@ proc getPartitionsList( return ok(partitions) +## fwd declaration. The implementation is below. +proc dropPartition( + self: PostgresDriver, partitionName: string +): Future[ArchiveDriverResult[void]] {.async.} + +proc dropOrphanPartitions( + s: PostgresDriver +): Future[ArchiveDriverResult[void]] {.async.} = + ## Tries to remove partitions that weren't correctly removed during retention policy execution. + ## Orphan partition is a partition that is not attached to the main messages table. + ## Therefore, it is not used for queries and can be safely removed. + var partitions: seq[string] + proc rowCallback(pqResult: ptr PGresult) = + for iRow in 0 ..< pqResult.pqNtuples(): + let partitionName = $(pqgetvalue(pqResult, iRow, 0)) + partitions.add(partitionName) + + ( + await s.readConnPool.pgQuery( + """ + SELECT c.relname AS partition_name + FROM pg_class c + LEFT JOIN pg_inherits i ON i.inhrelid = c.oid + WHERE c.relname LIKE 'messages_%' + AND c.relname != 'messages_lookup' + AND c.relkind = 'r' -- only regular tables + AND i.inhrelid IS NULL -- detached partition + ORDER BY partition_name + """, + newSeq[string](0), + rowCallback, + ) + ).isOkOr: + return err("dropOrphanPartitions failed in query: " & $error) + + for partition in partitions: + info "orphan partition found", partitionName = partition + (await s.dropPartition(partition)).isOkOr: + error "failed to drop orphan partition", partitionName = partition, error = $error + continue + + return ok() + proc getTimeCursor( s: PostgresDriver, hashHex: string ): Future[ArchiveDriverResult[Option[Timestamp]]] {.async.} = @@ -1267,11 +1314,18 @@ proc loopPartitionFactory( self: PostgresDriver, onFatalError: OnFatalErrorHandler ) {.async.} = ## Loop proc that continuously checks whether we need to create a new partition. - ## Notice that the deletion of partitions is handled by the retention policy modules. + ## Notice that the deletion of partitions is mostly handled by the retention policy modules. + ## This loop only removes orphan partitions which were detached but not properly removed by the + ## retention policy module due to some error. However, the main task of this loop is to create + ## new partitions when needed. info "starting loopPartitionFactory" while true: + trace "loopPartitionFactory iteration started" + (await self.dropOrphanPartitions()).isOkOr: + onFatalError("error when dropping orphan partitions: " & $error) + trace "Check if a new partition is needed" ## Let's make the 'partition_manager' aware of the current partitions @@ -1329,14 +1383,24 @@ proc getTableSize*( return ok(tableSize) -proc removePartition( +proc dropPartition( + self: PostgresDriver, partitionName: string +): Future[ArchiveDriverResult[void]] {.async.} = + let dropPartitionQuery = "DROP TABLE " & partitionName + info "drop partition", query = dropPartitionQuery + (await self.performWriteQuery(dropPartitionQuery)).isOkOr: + return err(fmt"error in dropPartition: {dropPartitionQuery}: " & $error) + + return ok() + +proc detachAndDropPartition( self: PostgresDriver, partition: Partition ): Future[ArchiveDriverResult[void]] {.async.} = - ## Removes the desired partition and also removes the rows from messages_lookup table + ## Detaches and drops the desired partition and also removes the rows from messages_lookup table ## whose rows belong to the partition time range let partitionName = partition.getName() - info "beginning of removePartition", partitionName + info "beginning of detachAndDropPartition", partitionName let partSize = (await self.getTableSize(partitionName)).valueOr("") @@ -1361,11 +1425,8 @@ proc removePartition( else: return err(fmt"error in {detachPartitionQuery}: " & $error) - ## Drop the partition - let dropPartitionQuery = "DROP TABLE " & partitionName - info "removeOldestPartition drop partition", query = dropPartitionQuery - (await self.performWriteQuery(dropPartitionQuery)).isOkOr: - return err(fmt"error in {dropPartitionQuery}: " & $error) + ## Drop partition + ?(await self.dropPartition(partitionName)) info "removed partition", partition_name = partitionName, partition_size = partSize self.partitionMngr.removeOldestPartitionName() @@ -1390,8 +1451,18 @@ proc removePartitionsOlderThan( var oldestPartition = self.partitionMngr.getOldestPartition().valueOr: return err("could not get oldest partition in removePartitionOlderThan: " & $error) - while not oldestPartition.containsMoment(tsInSec): - (await self.removePartition(oldestPartition)).isOkOr: + debug "oldest partition info", + partitionName = oldestPartition.getName(), + partitionLastMoment = oldestPartition.getLastMoment(), + tsInSec + + while oldestPartition.getLastMoment() < tsInSec: + info "start removing partition whose first record is older than the specified timestamp", + partitionName = oldestPartition.getName(), + partitionFirstMoment = oldestPartition.getLastMoment(), + tsInSec + + (await self.detachAndDropPartition(oldestPartition)).isOkOr: return err("issue in removePartitionsOlderThan: " & $error) oldestPartition = self.partitionMngr.getOldestPartition().valueOr: @@ -1419,7 +1490,7 @@ proc removeOldestPartition( info "Skipping to remove the current partition" return ok() - return await self.removePartition(oldestPartition) + return await self.detachAndDropPartition(oldestPartition) proc containsAnyPartition*(self: PostgresDriver): bool = return not self.partitionMngr.isEmpty() diff --git a/waku/waku_archive/retention_policy.nim b/waku/waku_archive/retention_policy.nim index d4b75ee1f..c2663fb66 100644 --- a/waku/waku_archive/retention_policy.nim +++ b/waku/waku_archive/retention_policy.nim @@ -11,3 +11,6 @@ method execute*( p: RetentionPolicy, store: ArchiveDriver ): Future[RetentionPolicyResult[void]] {.base, async.} = discard + +method `$`*(p: RetentionPolicy): string {.base, gcsafe.} = + "unknown retention policy" diff --git a/waku/waku_archive/retention_policy/builder.nim b/waku/waku_archive/retention_policy/builder.nim index 6cb131bbc..7e777f4a0 100644 --- a/waku/waku_archive/retention_policy/builder.nim +++ b/waku/waku_archive/retention_policy/builder.nim @@ -7,7 +7,7 @@ import ./retention_policy_capacity, ./retention_policy_size -proc new*( +proc new( T: type RetentionPolicy, retPolicy: string ): RetentionPolicyResult[Option[RetentionPolicy]] = let retPolicy = retPolicy.toLower @@ -83,3 +83,14 @@ proc new*( return ok(some(retPolicy)) else: return err("unknown retention policy") + +proc new*( + T: typedesc[RetentionPolicy], retPolicies: seq[string] +): RetentionPolicyResult[seq[RetentionPolicy]] = + var policies: seq[RetentionPolicy] + for retPolicy in retPolicies: + let policy = RetentionPolicy.new(retPolicy).valueOr: + return err(error) + if policy.isSome(): + policies.add(policy.get()) + return ok(policies) diff --git a/waku/waku_archive/retention_policy/retention_policy_capacity.nim b/waku/waku_archive/retention_policy/retention_policy_capacity.nim index ed4dd2339..ff4da6861 100644 --- a/waku/waku_archive/retention_policy/retention_policy_capacity.nim +++ b/waku/waku_archive/retention_policy/retention_policy_capacity.nim @@ -50,6 +50,9 @@ proc new*(T: type CapacityRetentionPolicy, capacity = DefaultCapacity): T = capacity: capacity, totalCapacity: totalCapacity, deleteWindow: deleteWindow ) +method `$`*(p: CapacityRetentionPolicy): string = + "capacity:" & $p.capacity + method execute*( p: CapacityRetentionPolicy, driver: ArchiveDriver ): Future[RetentionPolicyResult[void]] {.async.} = diff --git a/waku/waku_archive/retention_policy/retention_policy_size.nim b/waku/waku_archive/retention_policy/retention_policy_size.nim index e60aba303..416d95ec0 100644 --- a/waku/waku_archive/retention_policy/retention_policy_size.nim +++ b/waku/waku_archive/retention_policy/retention_policy_size.nim @@ -15,6 +15,9 @@ type SizeRetentionPolicy* = ref object of RetentionPolicy proc new*(T: type SizeRetentionPolicy, size = DefaultRetentionSize): T = SizeRetentionPolicy(sizeLimit: size) +method `$`*(p: SizeRetentionPolicy): string = + "size:" & $p.sizeLimit & "b" + method execute*( p: SizeRetentionPolicy, driver: ArchiveDriver ): Future[RetentionPolicyResult[void]] {.async.} = diff --git a/waku/waku_archive/retention_policy/retention_policy_time.nim b/waku/waku_archive/retention_policy/retention_policy_time.nim index 6d4c0815a..12f056c7b 100644 --- a/waku/waku_archive/retention_policy/retention_policy_time.nim +++ b/waku/waku_archive/retention_policy/retention_policy_time.nim @@ -6,29 +6,23 @@ import ../../waku_core, ../driver, ../retention_policy logScope: topics = "waku archive retention_policy" -const DefaultRetentionTime*: int64 = 30.days.seconds - type TimeRetentionPolicy* = ref object of RetentionPolicy retentionTime: chronos.Duration -proc new*(T: type TimeRetentionPolicy, retentionTime = DefaultRetentionTime): T = +proc new*(T: type TimeRetentionPolicy, retentionTime: int64): T = TimeRetentionPolicy(retentionTime: retentionTime.seconds) +method `$`*(p: TimeRetentionPolicy): string = + "time:" & $p.retentionTime.seconds + method execute*( p: TimeRetentionPolicy, driver: ArchiveDriver ): Future[RetentionPolicyResult[void]] {.async.} = - ## Delete messages that exceed the retention time by 10% and more (batch delete for efficiency) + ## Delete messages that exceed the retention time info "beginning of executing message retention policy - time" - let omt = (await driver.getOldestMessageTimestamp()).valueOr: - return err("failed to get oldest message timestamp: " & error) - let now = getNanosecondTime(getTime().toUnixFloat()) let retentionTimestamp = now - p.retentionTime.nanoseconds - let thresholdTimestamp = retentionTimestamp - p.retentionTime.nanoseconds div 10 - - if thresholdTimestamp <= omt: - return ok() (await driver.deleteMessagesOlderThanTimestamp(ts = retentionTimestamp)).isOkOr: return err("failed to delete oldest messages: " & error) From a0f134aadb9fcb61801afb9e6235ac18808a1109 Mon Sep 17 00:00:00 2001 From: Ivan Folgueira Bande Date: Thu, 19 Mar 2026 23:09:22 +0100 Subject: [PATCH 3/8] update changelog for v0.37.2 --- CHANGELOG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc5155b6e..e2de7307f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,16 @@ | [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | | [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/master/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` | +## v0.37.2 (2026-03-19) + +### Features + +- Allow union of several retention policies ([#3766](https://github.com/logos-messaging/logos-delivery/pull/3766)) + +### Bug Fixes + +- Bump nim-http-utils to v0.4.1 to allow accepting <:><(> as a valid header and tests to validate html rfc7230 ([#43](https://github.com/status-im/nim-http-utils/pull/43)) + ## v0.37.1 (2026-03-12) ### Bug Fixes From 4b5f91c0ce76e34394481513adfa9f2895ff89cd Mon Sep 17 00:00:00 2001 From: Ivan Folgueira Bande Date: Fri, 20 Mar 2026 16:54:42 +0100 Subject: [PATCH 4/8] fix compilation issue in test_node_conf.nim --- tests/api/test_node_conf.nim | 74 ++++++++++++++++++++++++++++++++---- 1 file changed, 67 insertions(+), 7 deletions(-) diff --git a/tests/api/test_node_conf.nim b/tests/api/test_node_conf.nim index d0b3d433c..b19739393 100644 --- a/tests/api/test_node_conf.nim +++ b/tests/api/test_node_conf.nim @@ -1,11 +1,14 @@ {.used.} import std/[options, json, strutils], results, stint, testutils/unittests -import json_serialization -import confutils, confutils/std/net -import tools/confutils/cli_args -import waku/factory/waku_conf, waku/factory/networks_config -import waku/common/logging +import json_serialization, confutils, confutils/std/net +import + tools/confutils/cli_args, + waku/api/api_conf, + waku/factory/waku_conf, + waku/factory/networks_config, + waku/factory/conf_builder/conf_builder, + waku/common/logging # Helper: parse JSON into WakuNodeConf using fieldPairs (same as liblogosdelivery) proc parseWakuNodeConfFromJson(jsonStr: string): Result[WakuNodeConf, string] = @@ -75,7 +78,7 @@ suite "WakuNodeConf - mode-driven toWakuConf": ## Given var conf = defaultWakuNodeConf().valueOr: raiseAssert error - conf.mode = WakuMode.noMode + conf.mode = cli_args.WakuMode.noMode conf.relay = true conf.lightpush = false conf.clusterId = 5 @@ -118,7 +121,7 @@ suite "WakuNodeConf - JSON parsing with fieldPairs": require confRes.isOk() let conf = confRes.get() check: - conf.mode == WakuMode.noMode + conf.mode == cli_args.WakuMode.noMode conf.clusterId == 0 conf.logLevel == logging.LogLevel.INFO @@ -368,3 +371,60 @@ suite "NodeConfig (deprecated) - toWakuConf": wakuConf.peerExchangeService == true {.pop.} + +suite "WakuConfBuilder - store retention policies": + test "Multiple retention policies": + ## Given + var b = WakuConfBuilder.init() + b.storeServiceConf.withEnabled(true) + b.storeServiceConf.withDbUrl("sqlite://test.db") + b.storeServiceConf.withRetentionPolicies( + "time:86400 ; capacity:10000; size : 50GB" + ) + + ## When + let wakuConf = b.build().valueOr: + raiseAssert error + + ## Then + require wakuConf.storeServiceConf.isSome() + let storeConf = wakuConf.storeServiceConf.get() + check storeConf.retentionPolicies == @["time:86400", "capacity:10000", "size:50GB"] + + test "Duplicated retention policies returns error": + ## Given + var b = WakuConfBuilder.init() + b.storeServiceConf.withEnabled(true) + b.storeServiceConf.withDbUrl("sqlite://test.db") + b.storeServiceConf.withRetentionPolicies("time:86400;time:800;capacity:10000") + + ## When + let wakuConfRes = b.build() + check wakuConfRes.isErr() + check wakuConfRes.error.contains("duplicated retention policy type") + + test "Incorrect retention policy type returns error": + ## Given + var b = WakuConfBuilder.init() + b.storeServiceConf.withEnabled(true) + b.storeServiceConf.withDbUrl("sqlite://test.db") + b.storeServiceConf.withRetentionPolicies("capaity:10000") + + ## When + let wakuConfRes = b.build() + + ## Then + check wakuConfRes.isErr() + check wakuConfRes.error.contains("unknown retention policy type") + + test "Store disabled - no retention policy applied": + ## Given + var b = WakuConfBuilder.init() + # storeServiceConf not enabled + + ## When + let wakuConf = b.build().valueOr: + raiseAssert error + + ## Then + check wakuConf.storeServiceConf.isNone() From 37f587f057cb3f94a73db2ba4fc687175b46686d Mon Sep 17 00:00:00 2001 From: Ivan Folgueira Bande Date: Fri, 20 Mar 2026 21:05:42 +0100 Subject: [PATCH 5/8] set default retention policy in archive.nim --- waku/waku_archive/archive.nim | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/waku/waku_archive/archive.nim b/waku/waku_archive/archive.nim index 95c1a905d..976d7d035 100644 --- a/waku/waku_archive/archive.nim +++ b/waku/waku_archive/archive.nim @@ -14,7 +14,8 @@ import ../waku_core, ../waku_core/message/digest, ./common, - ./archive_metrics + ./archive_metrics, + waku/waku_archive/retention_policy/retention_policy_time logScope: topics = "waku archive" @@ -82,14 +83,11 @@ proc new*( T: type WakuArchive, driver: ArchiveDriver, validator: MessageValidator = validate, - retentionPolicies = newSeq[RetentionPolicy](0), + retentionPolicies = @[RetentionPolicy(TimeRetentionPolicy.new(2.days.seconds))], ): Result[T, string] = if driver.isNil(): return err("archive driver is Nil") - if retentionPolicies.len == 0: - return err("at least one retention policy must be provided") - let archive = WakuArchive( driver: driver, validator: validator, retentionPolicies: retentionPolicies ) From 67491447396fb9408f0d9f5fcb88c71f5c5dca07 Mon Sep 17 00:00:00 2001 From: Ivan Folgueira Bande Date: Fri, 20 Mar 2026 00:18:26 +0100 Subject: [PATCH 6/8] update change log for v0.37.2 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e2de7307f..48ead0a63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,6 +77,7 @@ ### Bug Fixes - Bump nim-http-utils to v0.4.1 to allow accepting <:><(> as a valid header and tests to validate html rfc7230 ([#43](https://github.com/status-im/nim-http-utils/pull/43)) +- Force FINALIZE partition detach after detecting shorter error ([#3728](https://github.com/logos-messaging/logos-delivery/pull/3766)) ## v0.37.1 (2026-03-12) From 4d314b376d95e210e938894027bb9267e834e81d Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Wed, 8 Apr 2026 15:33:16 +0200 Subject: [PATCH 7/8] setting num-shards-in-network to 0 by default (#3748) Co-authored-by: darshankabariya --- tests/wakunode2/test_cli_args.nim | 12 ++++++------ tools/confutils/cli_args.nim | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/wakunode2/test_cli_args.nim b/tests/wakunode2/test_cli_args.nim index 9197afe02..d08544c2c 100644 --- a/tests/wakunode2/test_cli_args.nim +++ b/tests/wakunode2/test_cli_args.nim @@ -23,9 +23,8 @@ import suite "Waku external config - default values": test "Default sharding value": ## Setup - let defaultShardingMode = AutoSharding - let defaultNumShardsInCluster = 1.uint16 - let defaultSubscribeShards = @[0.uint16] + let defaultShardingMode = StaticSharding + let defaultSubscribeShards: seq[uint16] = @[] ## Given let preConfig = defaultWakuNodeConf().get() @@ -37,7 +36,6 @@ suite "Waku external config - default values": ## Then let conf = res.get() check conf.shardingConf.kind == defaultShardingMode - check conf.shardingConf.numShardsInCluster == defaultNumShardsInCluster check conf.subscribeShards == defaultSubscribeShards test "Default shards value in static sharding": @@ -212,7 +210,7 @@ suite "Waku external config - Shards": let vRes = wakuConf.validate() assert vRes.isOk(), $vRes.error - test "Imvalid shard is passed without num shards": + test "Any shard is valid without num shards in static sharding mode": ## Setup ## Given @@ -222,7 +220,9 @@ suite "Waku external config - Shards": let res = wakuNodeConf.toWakuConf() ## Then - assert res.isErr(), "Invalid shard was accepted" + let wakuConf = res.get() + let vRes = wakuConf.validate() + assert vRes.isOk(), $vRes.error suite "Waku external config - store retention policy": test "Default retention policy": diff --git a/tools/confutils/cli_args.nim b/tools/confutils/cli_args.nim index df2bd306c..90a349d9d 100644 --- a/tools/confutils/cli_args.nim +++ b/tools/confutils/cli_args.nim @@ -333,7 +333,7 @@ hence would have reachability issues.""", numShardsInNetwork* {. desc: "Enables autosharding and set number of shards in the cluster, set to `0` to use static sharding", - defaultValue: 1, + defaultValue: 0, name: "num-shards-in-network" .}: uint16 From a4db8895e47164fcdcfc014962ddbccd1a023b1f Mon Sep 17 00:00:00 2001 From: darshankabariya Date: Fri, 10 Apr 2026 17:03:25 +0530 Subject: [PATCH 8/8] chore: resolving lint --- tools/confutils/cli_args.nim | 39 +++++++++++++++--------------------- 1 file changed, 16 insertions(+), 23 deletions(-) diff --git a/tools/confutils/cli_args.nim b/tools/confutils/cli_args.nim index 90a349d9d..7d531159b 100644 --- a/tools/confutils/cli_args.nim +++ b/tools/confutils/cli_args.nim @@ -50,13 +50,13 @@ type ConfResult*[T] = Result[T, string] type EthRpcUrl* = distinct string type StartUpCommand* = enum - noCommand # default, runs waku + noCommand # default, runs waku generateRlnKeystore # generates a new RLN keystore type WakuMode* {.pure.} = enum noMode # default - use explicit CLI flags as-is - Core # full service node - Edge # client-only node + Core # full service node + Edge # client-only node type WakuNodeConf* = object configFile* {. @@ -183,8 +183,7 @@ type WakuNodeConf* = object name: "agent-string" .}: string - nodekey* {.desc: "P2P node private key as 64 char hex string.", - name: "nodekey".}: + nodekey* {.desc: "P2P node private key as 64 char hex string.", name: "nodekey".}: Option[PrivateKey] listenAddress* {. @@ -193,13 +192,11 @@ type WakuNodeConf* = object name: "listen-address" .}: IpAddress - tcpPort* {.desc: "TCP listening port.", defaultValue: 60000, - name: "tcp-port".}: + tcpPort* {.desc: "TCP listening port.", defaultValue: 60000, name: "tcp-port".}: Port portsShift* {. - desc: "Add a shift to all port numbers.", defaultValue: 0, - name: "ports-shift" + desc: "Add a shift to all port numbers.", defaultValue: 0, name: "ports-shift" .}: uint16 nat* {. @@ -243,13 +240,11 @@ type WakuNodeConf* = object .}: int peerStoreCapacity* {. - desc: "Maximum stored peers in the peerstore.", - name: "peer-store-capacity" + desc: "Maximum stored peers in the peerstore.", name: "peer-store-capacity" .}: Option[int] peerPersistence* {. - desc: "Enable peer persistence.", defaultValue: false, - name: "peer-persistence" + desc: "Enable peer persistence.", defaultValue: false, name: "peer-persistence" .}: bool ## DNS addrs config @@ -406,7 +401,7 @@ hence would have reachability issues.""", storeSyncInterval* {. desc: "Interval between store sync attempts. In seconds.", - defaultValue: 300, # 5 minutes + defaultValue: 300, # 5 minutes name: "store-sync-interval" .}: uint32 @@ -437,7 +432,7 @@ hence would have reachability issues.""", filterSubscriptionTimeout* {. desc: "Timeout for filter subscription without ping or refresh it, in seconds. Only for v2 filter protocol.", - defaultValue: 300, # 5 minutes + defaultValue: 300, # 5 minutes name: "filter-subscription-timeout" .}: uint16 @@ -664,8 +659,7 @@ with the drawback of consuming some more bandwidth.""", .}: bool websocketPort* {. - desc: "WebSocket listening port.", defaultValue: 8000, - name: "websocket-port" + desc: "WebSocket listening port.", defaultValue: 8000, name: "websocket-port" .}: Port websocketSecureSupport* {. @@ -762,8 +756,7 @@ proc parseCmdArg*(T: type ProtectedShard, p: string): T = raise newException(ValueError, "Invalid public key") if isNumber(elements[0]): - return ProtectedShard(shard: uint16.parseCmdArg(elements[0]), - key: publicKey) + return ProtectedShard(shard: uint16.parseCmdArg(elements[0]), key: publicKey) # TODO: Remove when removing protected-topic configuration let shard = RelayShard.parse(elements[0]).valueOr: @@ -891,11 +884,11 @@ proc load*(T: type WakuNodeConf, version = ""): ConfResult[T] = secondarySources = proc( conf: WakuNodeConf, sources: auto ) {.gcsafe, raises: [ConfigurationError].} = - sources.addConfigFile(Envvar, InputFile("wakunode2")) + sources.addConfigFile(Envvar, InputFile("wakunode2")) - if conf.configFile.isSome(): - sources.addConfigFile(Toml, conf.configFile.get()) - , + if conf.configFile.isSome(): + sources.addConfigFile(Toml, conf.configFile.get()) + , ) ok(conf)