2022-10-08 17:20:50 +00:00
|
|
|
# Nimbus
|
2022-08-04 08:04:30 +00:00
|
|
|
# Copyright (c) 2021 Status Research & Development GmbH
|
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
2023-03-03 20:01:59 +00:00
|
|
|
{.push raises: [].}
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
import
|
2022-10-20 16:59:54 +00:00
|
|
|
std/hashes,
|
2023-04-06 19:42:07 +00:00
|
|
|
chronos,
|
2022-10-20 16:59:54 +00:00
|
|
|
eth/[common, p2p],
|
2022-12-12 22:00:24 +00:00
|
|
|
stew/[interval_set, keyed_queue, sorted_set],
|
2022-10-20 16:59:54 +00:00
|
|
|
../../db/select_backend,
|
2023-04-14 22:28:57 +00:00
|
|
|
../misc/[best_pivot, block_queue],
|
2022-10-20 16:59:54 +00:00
|
|
|
../sync_desc,
|
2022-11-16 23:51:06 +00:00
|
|
|
./worker/com/com_error,
|
2022-12-24 09:54:18 +00:00
|
|
|
./worker/db/[snapdb_desc, snapdb_pivot],
|
2022-11-16 23:51:06 +00:00
|
|
|
./worker/ticker,
|
2022-08-04 08:04:30 +00:00
|
|
|
./range_desc
|
|
|
|
|
|
|
|
type
|
2022-12-12 22:00:24 +00:00
|
|
|
SnapAccountsList* = SortedSet[NodeTag,Hash256]
|
|
|
|
## Sorted pair of `(account,state-root)` entries
|
|
|
|
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
SnapSlotsQueue* = KeyedQueue[Hash256,SnapSlotsQueueItemRef]
|
2022-10-14 16:40:32 +00:00
|
|
|
## Handles list of storage slots data for fetch indexed by storage root.
|
2022-09-02 18:16:09 +00:00
|
|
|
##
|
2022-10-14 16:40:32 +00:00
|
|
|
## Typically, storage data requests cover the full storage slots trie. If
|
|
|
|
## there is only a partial list of slots to fetch, the queue entry is
|
|
|
|
## stored left-most for easy access.
|
|
|
|
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
SnapSlotsQueueItemRef* = ref object
|
2022-10-14 16:40:32 +00:00
|
|
|
## Storage slots request data. This entry is similar to `AccountSlotsHeader`
|
|
|
|
## where the optional `subRange` interval has been replaced by an interval
|
|
|
|
## range + healing support.
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
accKey*: NodeKey ## Owner account
|
2022-11-25 14:56:42 +00:00
|
|
|
slots*: SnapRangeBatchRef ## slots to fetch, nil => all slots
|
2022-09-02 18:16:09 +00:00
|
|
|
|
2022-11-08 18:56:04 +00:00
|
|
|
SnapTodoRanges* = array[2,NodeTagRangeSet]
|
2022-11-25 14:56:42 +00:00
|
|
|
## Pair of sets of ``unprocessed`` node ranges that need to be fetched and
|
|
|
|
## integrated. The ranges in the first set must be handled with priority.
|
|
|
|
##
|
|
|
|
## This data structure is used for coordinating peers that run quasi
|
|
|
|
## parallel.
|
2022-09-16 07:24:12 +00:00
|
|
|
|
2022-11-25 14:56:42 +00:00
|
|
|
SnapRangeBatchRef* = ref object
|
2022-10-14 16:40:32 +00:00
|
|
|
## `NodeTag` ranges to fetch, healing support
|
2022-11-25 14:56:42 +00:00
|
|
|
unprocessed*: SnapTodoRanges ## Range of slots to be fetched
|
2022-12-19 21:22:09 +00:00
|
|
|
processed*: NodeTagRangeSet ## Node ranges definitely processed
|
2022-10-14 16:40:32 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
SnapPivotRef* = ref object
|
2022-08-17 07:30:11 +00:00
|
|
|
## Per-state root cache for particular snap data environment
|
2022-09-02 18:16:09 +00:00
|
|
|
stateHeader*: BlockHeader ## Pivot state, containg state root
|
2022-10-08 17:20:50 +00:00
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
# Accounts download coverage
|
2022-11-25 14:56:42 +00:00
|
|
|
fetchAccounts*: SnapRangeBatchRef ## Set of accounts ranges to fetch
|
2022-10-08 17:20:50 +00:00
|
|
|
|
|
|
|
# Storage slots download
|
2022-11-08 18:56:04 +00:00
|
|
|
fetchStorageFull*: SnapSlotsQueue ## Fetch storage trie for these accounts
|
|
|
|
fetchStoragePart*: SnapSlotsQueue ## Partial storage trie to com[plete
|
2022-12-24 09:54:18 +00:00
|
|
|
parkedStorage*: HashSet[NodeKey] ## Storage batch items in use
|
2022-10-08 17:20:50 +00:00
|
|
|
|
|
|
|
# Info
|
2022-10-19 10:04:06 +00:00
|
|
|
nAccounts*: uint64 ## Imported # of accounts
|
2022-10-21 19:29:42 +00:00
|
|
|
nSlotLists*: uint64 ## Imported # of account storage tries
|
2022-12-12 22:00:24 +00:00
|
|
|
|
2023-04-14 22:28:57 +00:00
|
|
|
# Checkponting
|
|
|
|
savedFullPivotOk*: bool ## This fully completed pivot was saved
|
|
|
|
|
2022-12-12 22:00:24 +00:00
|
|
|
# Mothballing, ready to be swapped into newer pivot record
|
2023-04-04 13:36:18 +00:00
|
|
|
storageAccounts*: SnapAccountsList ## Accounts with missing storage slots
|
2022-12-12 22:00:24 +00:00
|
|
|
archived*: bool ## Not latest pivot, anymore
|
2022-08-04 08:04:30 +00:00
|
|
|
|
2022-12-09 13:43:55 +00:00
|
|
|
SnapPivotTable* = KeyedQueue[Hash256,SnapPivotRef]
|
2022-08-04 08:04:30 +00:00
|
|
|
## LRU table, indexed by state root
|
|
|
|
|
2022-11-25 14:56:42 +00:00
|
|
|
SnapRecoveryRef* = ref object
|
|
|
|
## Recovery context
|
|
|
|
state*: SnapDbPivotRegistry ## Saved recovery context state
|
|
|
|
level*: int ## top level is zero
|
|
|
|
|
2023-03-03 20:01:59 +00:00
|
|
|
SnapBuddyData* = object
|
2022-08-17 07:30:11 +00:00
|
|
|
## Per-worker local descriptor data extension
|
2022-10-08 17:20:50 +00:00
|
|
|
errors*: ComErrorStatsRef ## For error handling
|
2023-04-06 19:42:07 +00:00
|
|
|
|
2023-04-14 22:28:57 +00:00
|
|
|
# Full sync continuation parameters
|
|
|
|
bPivot*: BestPivotWorkerRef ## Local pivot worker descriptor
|
|
|
|
bQueue*: BlockQueueWorkerRef ## Block queue worker
|
|
|
|
|
2023-04-06 19:42:07 +00:00
|
|
|
SnapSyncModeType* = enum
|
|
|
|
## Current sync mode, after a snapshot has been downloaded, the system
|
|
|
|
## proceeds with full sync.
|
|
|
|
SnapSyncMode = 0 ## Start mode
|
|
|
|
FullSyncMode
|
|
|
|
|
|
|
|
SnapSyncSpecs* = object
|
|
|
|
## Full specs for all sync modes. This table must be held in the main
|
|
|
|
## descriptor and initialised at run time. The table values are opaque
|
|
|
|
## and will be specified in the worker module(s).
|
|
|
|
active*: SnapSyncModeType
|
|
|
|
tab*: array[SnapSyncModeType,RootRef]
|
2022-08-24 13:44:18 +00:00
|
|
|
|
2023-03-03 20:01:59 +00:00
|
|
|
SnapCtxData* = object
|
2022-08-04 08:04:30 +00:00
|
|
|
## Globally shared data extension
|
2022-09-02 18:16:09 +00:00
|
|
|
rng*: ref HmacDrbgContext ## Random generator
|
|
|
|
dbBackend*: ChainDB ## Low level DB driver access (if any)
|
2022-11-16 23:51:06 +00:00
|
|
|
snapDb*: SnapDbRef ## Accounts snapshot DB
|
|
|
|
|
|
|
|
# Pivot table
|
2022-09-02 18:16:09 +00:00
|
|
|
pivotTable*: SnapPivotTable ## Per state root environment
|
2023-01-18 08:31:57 +00:00
|
|
|
beaconHeader*: BlockHeader ## Running on beacon chain
|
2022-10-14 16:40:32 +00:00
|
|
|
coveredAccounts*: NodeTagRangeSet ## Derived from all available accounts
|
2022-12-19 21:22:09 +00:00
|
|
|
covAccTimesFull*: uint ## # of 100% coverages
|
2022-11-25 14:56:42 +00:00
|
|
|
recovery*: SnapRecoveryRef ## Current recovery checkpoint/context
|
2023-04-06 19:42:07 +00:00
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
# Info
|
|
|
|
ticker*: TickerRef ## Ticker, logger
|
2022-08-24 13:44:18 +00:00
|
|
|
|
2023-04-14 22:28:57 +00:00
|
|
|
# Snap/full mode muliplexing
|
|
|
|
syncMode*: SnapSyncSpecs ## Sync mode methods & data
|
|
|
|
fullPivot*: SnapPivotRef ## Start full sync from here
|
|
|
|
|
|
|
|
# Full sync continuation parameters
|
|
|
|
bPivot*: BestPivotCtxRef ## Global pivot descriptor
|
|
|
|
bCtx*: BlockQueueCtxRef ## Global block queue descriptor
|
|
|
|
|
2023-03-03 20:01:59 +00:00
|
|
|
SnapBuddyRef* = BuddyRef[SnapCtxData,SnapBuddyData]
|
2022-08-04 08:04:30 +00:00
|
|
|
## Extended worker peer descriptor
|
|
|
|
|
2023-03-03 20:01:59 +00:00
|
|
|
SnapCtxRef* = CtxRef[SnapCtxData]
|
2022-08-04 08:04:30 +00:00
|
|
|
## Extended global descriptor
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
proc hash*(a: SnapSlotsQueueItemRef): Hash =
|
2022-09-02 18:16:09 +00:00
|
|
|
## Table/KeyedQueue mixin
|
|
|
|
cast[pointer](a).hash
|
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
proc hash*(a: Hash256): Hash =
|
|
|
|
## Table/KeyedQueue mixin
|
|
|
|
a.data.hash
|
|
|
|
|
2022-12-19 21:22:09 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public helpers: coverage
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc pivotAccountsCoverage*(ctx: SnapCtxRef): float =
|
|
|
|
## Returns the accounts coverage factor
|
2023-02-23 13:13:02 +00:00
|
|
|
ctx.pool.coveredAccounts.fullFactor + ctx.pool.covAccTimesFull.float
|
2022-12-19 21:22:09 +00:00
|
|
|
|
2023-01-17 09:28:14 +00:00
|
|
|
proc pivotAccountsCoverage100PcRollOver*(ctx: SnapCtxRef) =
|
|
|
|
## Roll over `coveredAccounts` registry when it reaches 100%.
|
2023-02-23 13:13:02 +00:00
|
|
|
if ctx.pool.coveredAccounts.isFull:
|
2023-01-17 09:28:14 +00:00
|
|
|
# All of accounts hashes are covered by completed range fetch processes
|
|
|
|
# for all pivot environments. So reset covering and record full-ness level.
|
2023-02-23 13:13:02 +00:00
|
|
|
ctx.pool.covAccTimesFull.inc
|
|
|
|
ctx.pool.coveredAccounts.clear()
|
2023-01-17 09:28:14 +00:00
|
|
|
|
2022-10-14 16:40:32 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
2022-11-08 18:56:04 +00:00
|
|
|
# Public helpers: SnapTodoRanges
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-04-04 13:36:18 +00:00
|
|
|
proc init*(q: var SnapTodoRanges; clear = false) =
|
2022-11-16 23:51:06 +00:00
|
|
|
## Populate node range sets with maximal range in the first range set. This
|
2022-12-24 09:54:18 +00:00
|
|
|
## kind of pair or interval sets is managed as follows:
|
2022-11-16 23:51:06 +00:00
|
|
|
## * As long as possible, fetch and merge back intervals on the first set.
|
|
|
|
## * If the first set is empty and some intervals are to be fetched, swap
|
|
|
|
## first and second interval lists.
|
|
|
|
## That way, intervals from the first set are prioitised while the rest is
|
|
|
|
## is considered after the prioitised intervals are exhausted.
|
2022-11-08 18:56:04 +00:00
|
|
|
q[0] = NodeTagRangeSet.init()
|
|
|
|
q[1] = NodeTagRangeSet.init()
|
2023-04-04 13:36:18 +00:00
|
|
|
if not clear:
|
|
|
|
discard q[0].merge FullNodeTagRange
|
2022-11-08 18:56:04 +00:00
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
proc clear*(q: var SnapTodoRanges) =
|
|
|
|
## Reset argument range sets empty.
|
|
|
|
q[0].clear()
|
|
|
|
q[1].clear()
|
|
|
|
|
2022-11-08 18:56:04 +00:00
|
|
|
|
|
|
|
proc merge*(q: var SnapTodoRanges; iv: NodeTagRange) =
|
2022-11-25 14:56:42 +00:00
|
|
|
## Unconditionally merge the node range into the account ranges list.
|
2022-11-16 23:51:06 +00:00
|
|
|
discard q[0].merge(iv)
|
|
|
|
discard q[1].reduce(iv)
|
2022-11-08 18:56:04 +00:00
|
|
|
|
2023-03-25 10:44:48 +00:00
|
|
|
proc mergeSplit*(q: var SnapTodoRanges; iv: NodeTagRange) =
|
|
|
|
## Ditto w/priorities partially reversed
|
2023-04-04 13:36:18 +00:00
|
|
|
if iv.len == 1:
|
|
|
|
discard q[0].reduce iv
|
|
|
|
discard q[1].merge iv
|
|
|
|
else:
|
2023-03-25 10:44:48 +00:00
|
|
|
let
|
2023-04-04 13:36:18 +00:00
|
|
|
# note that (`iv.len` == 0) => (`iv` == `FullNodeTagRange`)
|
2023-03-25 10:44:48 +00:00
|
|
|
midPt = iv.minPt + ((iv.maxPt - iv.minPt) shr 1)
|
|
|
|
iv1 = NodeTagRange.new(iv.minPt, midPt)
|
|
|
|
iv2 = NodeTagRange.new(midPt + 1.u256, iv.maxPt)
|
|
|
|
discard q[0].reduce iv1
|
|
|
|
discard q[1].merge iv1
|
|
|
|
discard q[0].merge iv2
|
|
|
|
discard q[1].reduce iv2
|
2022-11-08 18:56:04 +00:00
|
|
|
|
|
|
|
|
|
|
|
proc reduce*(q: var SnapTodoRanges; iv: NodeTagRange) =
|
|
|
|
## Unconditionally remove the node range from the account ranges list
|
|
|
|
discard q[0].reduce(iv)
|
|
|
|
discard q[1].reduce(iv)
|
|
|
|
|
|
|
|
|
2022-11-16 23:51:06 +00:00
|
|
|
iterator ivItems*(q: var SnapTodoRanges): NodeTagRange =
|
|
|
|
## Iterator over all list entries
|
|
|
|
for ivSet in q:
|
|
|
|
for iv in ivSet.increasing:
|
|
|
|
yield iv
|
|
|
|
|
|
|
|
|
2023-04-04 13:36:18 +00:00
|
|
|
proc fetch*(q: var SnapTodoRanges; maxLen = 0.u256): Result[NodeTagRange,void] =
|
|
|
|
## Fetch interval from node ranges with maximal size `maxLen`, where
|
|
|
|
## `0.u256` is interpreted as `2^256`.
|
2022-11-08 18:56:04 +00:00
|
|
|
|
|
|
|
# Swap batch queues if the first one is empty
|
|
|
|
if q[0].isEmpty:
|
|
|
|
swap(q[0], q[1])
|
|
|
|
|
|
|
|
# Fetch from first range list
|
|
|
|
let rc = q[0].ge()
|
|
|
|
if rc.isErr:
|
|
|
|
return err()
|
|
|
|
|
|
|
|
let
|
2023-04-04 13:36:18 +00:00
|
|
|
jv = rc.value
|
|
|
|
iv = block:
|
|
|
|
if maxLen == 0 or (0 < jv.len and jv.len <= maxLen):
|
|
|
|
jv
|
|
|
|
else:
|
|
|
|
# Note that either:
|
|
|
|
# (`jv.len` == 0) => (`jv` == `FullNodeTagRange`) => `jv.minPt` == 0
|
|
|
|
# or
|
|
|
|
# (`maxLen` < `jv.len`) => (`jv.minPt`+`maxLen` <= `jv.maxPt`)
|
|
|
|
NodeTagRange.new(jv.minPt, jv.minPt + maxLen)
|
|
|
|
|
2022-11-08 18:56:04 +00:00
|
|
|
discard q[0].reduce(iv)
|
|
|
|
ok(iv)
|
|
|
|
|
2022-11-25 14:56:42 +00:00
|
|
|
|
|
|
|
proc verify*(q: var SnapTodoRanges): bool =
|
|
|
|
## Verify consistency, i.e. that the two sets of ranges have no overlap.
|
|
|
|
if q[0].chunks == 0 or q[1].chunks == 0:
|
2022-12-19 21:22:09 +00:00
|
|
|
# At least one set is empty
|
2022-11-25 14:56:42 +00:00
|
|
|
return true
|
2022-12-19 21:22:09 +00:00
|
|
|
# So neither set is empty
|
2022-11-25 14:56:42 +00:00
|
|
|
if q[0].total == 0 or q[1].total == 0:
|
|
|
|
# At least one set is maximal and the other non-empty
|
|
|
|
return false
|
2022-12-19 21:22:09 +00:00
|
|
|
# So neither set is empty, not full
|
2022-11-25 14:56:42 +00:00
|
|
|
let (a,b) = if q[0].chunks < q[1].chunks: (0,1) else: (1,0)
|
|
|
|
for iv in q[a].increasing:
|
|
|
|
if 0 < q[b].covered(iv):
|
|
|
|
return false
|
|
|
|
true
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|