Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim` why: Functions `getCommittedStorage()` and `updateOriginalRoot()` from the `state_db` module are nowhere used. The emulation of a legacy `TransactionID` type functionality is administratively expensive to provide by `Aristo` (the legacy DB version is only partially implemented, anyway). As there is no other place where `TransactionID`s are used, they will not be provided by the `Aristo` variant of the `CoreDb`. For the legacy DB API, nothing will change. * Fix copyright headers in source code * Get rid of compiler warning * Update Aristo code, remove unused `merge()` variant, export `hashify()` why: Adapt to upcoming `CoreDb` wrapper * Remove synced tx feature from `Aristo` why: + This feature allowed to synchronise transaction methods like begin, commit, and rollback for a group of descriptors. + The feature is over engineered and not needed for `CoreDb`, neither is it complete (some convergence features missing.) * Add debugging helpers to `Kvt` also: Update database iterator, add count variable yield argument similar to `Aristo`. * Provide optional destructors for `CoreDb` API why; For the upcoming Aristo wrapper, this allows to control when certain smart destruction and update can take place. The auto destructor works fine in general when the storage/cache strategy is known and acceptable when creating descriptors. * Add update option for `CoreDb` API function `hash()` why; The hash function is typically used to get the state root of the MPT. Due to lazy hashing, this might be not available on the `Aristo` DB. So the `update` function asks for re-hashing the gurrent state changes if needed. * Update API tracking log mode: `info` => `debug * Use shared `Kvt` descriptor in new Ledger API why: No need to create a new descriptor all the time
This commit is contained in:
parent
6338969dd5
commit
c47f021596
|
@ -27,6 +27,11 @@ import
|
|||
export
|
||||
getKeyRc
|
||||
|
||||
import
|
||||
aristo/aristo_hashify
|
||||
export
|
||||
hashify
|
||||
|
||||
import
|
||||
aristo/aristo_path
|
||||
export
|
||||
|
|
|
@ -617,6 +617,12 @@ proc pp*(kMap: Table[VertexID,Hashlabel]; db: AristoDbRef; indent = 4): string =
|
|||
proc pp*(pAmk: VidsByLabel; db: AristoDbRef; indent = 4): string =
|
||||
db.ppXMap(db.top.kMap, pAmk, indent)
|
||||
|
||||
proc pp*(tx: AristoTxRef): string =
|
||||
result = "(uid=" & $tx.txUid & ",lvl=" & $tx.level
|
||||
if not tx.parent.isNil:
|
||||
result &= ", par=" & $tx.parent.txUid
|
||||
result &= ")"
|
||||
|
||||
# ---------------------
|
||||
|
||||
proc pp*(
|
||||
|
|
|
@ -37,7 +37,7 @@ export
|
|||
|
||||
type
|
||||
AristoDudes* = HashSet[AristoDbRef]
|
||||
## Descriptor peers asharing the same backend
|
||||
## Descriptor peers sharing the same backend
|
||||
|
||||
AristoTxRef* = ref object
|
||||
## Transaction descriptor
|
||||
|
@ -58,7 +58,6 @@ type
|
|||
case rwOk: bool
|
||||
of true:
|
||||
roDudes: AristoDudes ## Read-only peers
|
||||
txDudes: AristoDudes ## Other transaction peers
|
||||
else:
|
||||
rwDb: AristoDbRef ## Link to writable descriptor
|
||||
|
||||
|
@ -168,10 +167,7 @@ func getCentre*(db: AristoDbRef): AristoDbRef =
|
|||
else:
|
||||
db.dudes.rwDb
|
||||
|
||||
proc reCentre*(
|
||||
db: AristoDbRef;
|
||||
force = false;
|
||||
): Result[void,AristoError] =
|
||||
proc reCentre*(db: AristoDbRef): Result[void,AristoError] =
|
||||
## Re-focus the `db` argument descriptor so that it becomes the centre.
|
||||
## Nothing is done if the `db` descriptor is the centre, already.
|
||||
##
|
||||
|
@ -185,23 +181,9 @@ proc reCentre*(
|
|||
## accessing the same backend database. Descriptors where `isCentre()`
|
||||
## returns `false` must be single destructed with `forget()`.
|
||||
##
|
||||
## If there is an open transaction spanning several descriptors, the `force`
|
||||
## flag must be set `true` (unless the argument `db` is centre, already.) The
|
||||
## argument `db` must be covered by the transaction span. Then the re-centred
|
||||
## descriptor will also be the centre of the transaction span.
|
||||
##
|
||||
if not db.isCentre:
|
||||
let parent = db.dudes.rwDb
|
||||
|
||||
# Check for multi-transactions
|
||||
if 0 < parent.dudes.txDudes.len:
|
||||
if not force:
|
||||
return err(CentreTxLocked)
|
||||
if db notin parent.dudes.txDudes:
|
||||
return err(OutsideTxSpan)
|
||||
if db.txRef.isNil or parent.txRef.isNil:
|
||||
return err(GarbledTxSpan)
|
||||
|
||||
# Steal dudes list from parent, make the rw-parent a read-only dude
|
||||
db.dudes = parent.dudes
|
||||
parent.dudes = DudesRef(rwOk: false, rwDb: db)
|
||||
|
@ -217,67 +199,12 @@ proc reCentre*(
|
|||
# Update dudes list (parent was alredy updated)
|
||||
db.dudes.roDudes.incl parent
|
||||
|
||||
# Update transaction span
|
||||
if 0 < db.dudes.txDudes.len:
|
||||
db.dudes.txDudes.excl db
|
||||
db.dudes.txDudes.incl parent
|
||||
|
||||
ok()
|
||||
|
||||
|
||||
iterator txSpan*(db: AristoDbRef): AristoDbRef =
|
||||
## Interate over all descriptors belonging to the transaction span if there
|
||||
## is any. Note that the centre descriptor is aways part of the transaction
|
||||
## if there is any.
|
||||
##
|
||||
if not db.dudes.isNil:
|
||||
let parent = db.getCentre
|
||||
if 0 < parent.dudes.txDudes.len:
|
||||
yield parent
|
||||
for dude in parent.dudes.txDudes.items:
|
||||
yield dude
|
||||
|
||||
func nTxSpan*(db: AristoDbRef): int =
|
||||
## Returns the number of descriptors belonging to the transaction span. This
|
||||
## function is a fast version of `db.txSpan.toSeq.len`. Note that the
|
||||
## returned numbe is never `1` (either `0` or at least `2`.)
|
||||
##
|
||||
if not db.dudes.isNil:
|
||||
let parent = db.getCentre
|
||||
if 0 < parent.dudes.txDudes.len:
|
||||
return 1 + db.getCentre.dudes.txDudes.len
|
||||
|
||||
func inTxSpan*(db: AristoDbRef): bool =
|
||||
## Returns `true` if the argument descriptor `db` belongs to the transaction
|
||||
## span if there is any. Note that the centre descriptor is aways part of
|
||||
## the transaction if there is any.
|
||||
##
|
||||
if not db.isCentre:
|
||||
return db in db.dudes.rwDb.dudes.txDudes
|
||||
elif not db.dudes.isNil:
|
||||
return 0 < db.dudes.txDudes.len
|
||||
false
|
||||
|
||||
proc txSpanSet*(dudes: openArray[AristoDbRef]) =
|
||||
## Define the set of argument descriptors as transaction span.
|
||||
##
|
||||
if 0 < dudes.len:
|
||||
let parent = dudes[0].getCentre
|
||||
if not parent.dudes.isNil:
|
||||
parent.dudes.txDudes = dudes.toHashSet - [parent].toHashSet
|
||||
|
||||
proc txSpanClear*(db: AristoDbRef) =
|
||||
## Remove all descriptors from the transaction span.
|
||||
##
|
||||
if not db.isCentre:
|
||||
db.dudes.rwDb.dudes.txDudes.clear
|
||||
elif not db.dudes.isNil:
|
||||
db.dudes.txDudes.clear
|
||||
|
||||
|
||||
proc fork*(
|
||||
db: AristoDbRef;
|
||||
rawToplayer = false;
|
||||
rawTopLayer = false;
|
||||
): Result[AristoDbRef,AristoError] =
|
||||
## This function creates a new empty descriptor accessing the same backend
|
||||
## (if any) database as the argument `db`. This new descriptor joins the
|
||||
|
@ -288,7 +215,7 @@ proc fork*(
|
|||
## also cost computing ressources for maintaining and updating backend
|
||||
## filters when writing to the backend database .
|
||||
##
|
||||
## If the argument `rawToplayer` is set `true` the function will provide an
|
||||
## If the argument `rawTopLayer` is set `true` the function will provide an
|
||||
## uninitalised and inconsistent (!) top layer. This setting avoids some
|
||||
## database lookup for cases where the top layer is redefined anyway.
|
||||
##
|
||||
|
@ -296,7 +223,7 @@ proc fork*(
|
|||
top: LayerRef(),
|
||||
backend: db.backend)
|
||||
|
||||
if not rawToplayer:
|
||||
if not rawTopLayer:
|
||||
let rc = clone.backend.getIdgFn()
|
||||
if rc.isOk:
|
||||
clone.top.vGen = rc.value
|
||||
|
@ -345,7 +272,6 @@ proc forget*(db: AristoDbRef): Result[void,AristoError] =
|
|||
parent.dudes = DudesRef(nil)
|
||||
else:
|
||||
parent.dudes.roDudes.excl db
|
||||
parent.dudes.txDudes.excl db # might be empty, anyway
|
||||
|
||||
# Clear descriptor so it would not do harm if used wrongly
|
||||
db[] = AristoDbObj(top: LayerRef())
|
||||
|
|
|
@ -250,15 +250,11 @@ type
|
|||
TxNotTopTx
|
||||
TxStackGarbled
|
||||
TxStackUnderflow
|
||||
TxSpanOffCentre
|
||||
TxGarbledSpan
|
||||
|
||||
# Functions from `aristo_desc`
|
||||
CentreTxLocked
|
||||
MustBeOnCentre
|
||||
NotAllowedOnCentre
|
||||
GarbledTxSpan
|
||||
OutsideTxSpan
|
||||
|
||||
# Miscelaneous handy helpers
|
||||
PayloadTypeUnsupported
|
||||
|
|
|
@ -230,7 +230,7 @@ proc `==`*(a, b: NodeRef): bool =
|
|||
# Public helpers, miscellaneous functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc dup*(pld: PayloadRef): PayloadRef =
|
||||
func dup*(pld: PayloadRef): PayloadRef =
|
||||
## Duplicate payload.
|
||||
case pld.pType:
|
||||
of RawData:
|
||||
|
@ -246,7 +246,7 @@ proc dup*(pld: PayloadRef): PayloadRef =
|
|||
pType: AccountData,
|
||||
account: pld.account)
|
||||
|
||||
proc dup*(vtx: VertexRef): VertexRef =
|
||||
func dup*(vtx: VertexRef): VertexRef =
|
||||
## Duplicate vertex.
|
||||
# Not using `deepCopy()` here (some `gc` needs `--deepcopy:on`.)
|
||||
if vtx.isNil:
|
||||
|
@ -268,7 +268,7 @@ proc dup*(vtx: VertexRef): VertexRef =
|
|||
vType: Branch,
|
||||
bVid: vtx.bVid)
|
||||
|
||||
proc dup*(node: NodeRef): NodeRef =
|
||||
func dup*(node: NodeRef): NodeRef =
|
||||
## Duplicate node.
|
||||
# Not using `deepCopy()` here (some `gc` needs `--deepcopy:on`.)
|
||||
if node.isNil:
|
||||
|
@ -293,7 +293,7 @@ proc dup*(node: NodeRef): NodeRef =
|
|||
bVid: node.bVid,
|
||||
key: node.key)
|
||||
|
||||
proc dup*(layer: LayerRef): LayerRef =
|
||||
func dup*(layer: LayerRef): LayerRef =
|
||||
## Duplicate layer.
|
||||
result = LayerRef(
|
||||
lTab: layer.lTab,
|
||||
|
|
|
@ -710,22 +710,6 @@ proc merge*(
|
|||
|
||||
(merged, dups, AristoError(0))
|
||||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
path: PathID; # Path into database
|
||||
rlpData: openArray[byte]; # RLP encoded payload data
|
||||
): Result[bool,AristoError] =
|
||||
## Variant of `merge()` for storing a single item with implicit state root
|
||||
## argument `VertexID(1)`.
|
||||
##
|
||||
db.merge(
|
||||
LeafTie(
|
||||
root: VertexID(1),
|
||||
path: path.normal),
|
||||
PayloadRef(
|
||||
pType: RlpData,
|
||||
rlpBlob: @rlpData)).to(typeof result)
|
||||
|
||||
# ---------------------
|
||||
|
||||
proc merge*(
|
||||
|
|
|
@ -14,17 +14,9 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, tables],
|
||||
results,
|
||||
"."/[aristo_desc, aristo_filter, aristo_get, aristo_hashify]
|
||||
|
||||
type
|
||||
DoSpanPrepFn =
|
||||
proc(db: AristoDbRef; flg: bool): Result[void,AristoError] {.gcsafe.}
|
||||
|
||||
DoSpanExecFn =
|
||||
proc(db: AristoDbRef) {.gcsafe.}
|
||||
|
||||
func isTop*(tx: AristoTxRef): bool
|
||||
func level*(db: AristoDbRef): int
|
||||
|
||||
|
@ -50,149 +42,6 @@ proc getTxUid(db: AristoDbRef): uint =
|
|||
db.txUidGen.inc
|
||||
db.txUidGen
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions: Single descriptor transaction frame
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc txBeginPrepImpl(db: AristoDbRef): Result[void,AristoError] =
|
||||
## Starts a new transaction.
|
||||
##
|
||||
if db.level != db.stack.len:
|
||||
return err(TxStackGarbled)
|
||||
ok()
|
||||
|
||||
proc txBeginExecImpl(db: AristoDbRef) =
|
||||
## Starts a new transaction.
|
||||
##
|
||||
db.stack.add db.top.dup # push (save and use top later)
|
||||
db.top.txUid = db.getTxUid()
|
||||
|
||||
db.txRef = AristoTxRef(
|
||||
db: db,
|
||||
txUid: db.top.txUid,
|
||||
parent: db.txRef,
|
||||
level: db.stack.len)
|
||||
|
||||
# ---------------
|
||||
|
||||
proc rollbackImpl(db: AristoDbRef) =
|
||||
## Roll back to previous layer.
|
||||
##
|
||||
db.top = db.stack[^1]
|
||||
db.stack.setLen(db.stack.len-1)
|
||||
db.txRef = db.txRef.parent # `db.txRef` needs to be checked by caller
|
||||
|
||||
# ---------------
|
||||
|
||||
proc commitPrepImpl(
|
||||
db: AristoDbRef; # Top transaction on database
|
||||
dontHashify: bool; # Process/fix MPT hashes
|
||||
): Result[void,AristoError] =
|
||||
## Commit transaction layer.
|
||||
##
|
||||
if db.top.dirty and not dontHashify:
|
||||
discard ? db.hashify().mapErr fromVae
|
||||
ok()
|
||||
|
||||
proc commitExecImpl(db: AristoDbRef) =
|
||||
## Commit transaction layer.
|
||||
##
|
||||
# Keep top and discard layer below
|
||||
db.top.txUid = db.stack[^1].txUid
|
||||
db.stack.setLen(db.stack.len-1)
|
||||
db.txRef = db.txRef.parent # `db.txRef` needs to be checked by caller
|
||||
|
||||
# ---------------
|
||||
|
||||
proc collapseCommitPrepImpl(
|
||||
db: AristoDbRef;
|
||||
dontHashify = false; # Process/fix MPT hashes
|
||||
): Result[void,AristoError] =
|
||||
# For commit, hashify the current layer, otherwise the stack bottom layer.
|
||||
# install the stack bottom.
|
||||
if db.top.dirty and not dontHashify:
|
||||
discard ? db.hashify().mapErr fromVae
|
||||
ok()
|
||||
|
||||
proc collapseRollbackPrepImpl(
|
||||
db: AristoDbRef;
|
||||
dontHashify = false; # Process/fix MPT hashes
|
||||
): Result[void,AristoError] =
|
||||
# Rollback hashify the current layer, otherwise the stack bottom layer.
|
||||
# install the stack bottom.
|
||||
if db.top.dirty and not dontHashify:
|
||||
db.stack[0].swap db.top
|
||||
defer: db.stack[0].swap db.top
|
||||
discard ? db.hashify().mapErr fromVae
|
||||
ok()
|
||||
|
||||
|
||||
proc collapseCommitExecImpl(db: AristoDbRef) =
|
||||
# If commit, then leave the current layer and clear the stack, oterwise
|
||||
# install the stack bottom.
|
||||
db.top.txUid = 0
|
||||
db.stack.setLen(0)
|
||||
db.txRef = AristoTxRef(nil)
|
||||
|
||||
proc collapseRollbackExecImpl(db: AristoDbRef) =
|
||||
db.stack[0].swap db.top
|
||||
db.top.txUid = 0
|
||||
db.stack.setLen(0)
|
||||
db.txRef = AristoTxRef(nil)
|
||||
|
||||
# ---------------
|
||||
|
||||
proc doSpan(
|
||||
db: AristoDbRef; # Top transaction on database
|
||||
prepFn = DoSpanPrepFn(nil); # Optional preparation layer
|
||||
prepFlag = false; # `prepFn` argument
|
||||
execFn: DoSpanExecFn; # Mandatory execution layer
|
||||
): Result[void,AristoError]
|
||||
{.gcsafe.} =
|
||||
## Common execution framework for `rollbackImpl()` or `commitImpl()` over
|
||||
## all descriptors in the transaction span.
|
||||
##
|
||||
if not prepFn.isNil:
|
||||
var
|
||||
revert: Table[AristoDbRef,LayerRef]
|
||||
defer:
|
||||
# Restore previous layer
|
||||
for (dude,top) in revert.pairs:
|
||||
dude.top = top
|
||||
|
||||
for dude in db.txSpan:
|
||||
if dude.stack.len == 0 or
|
||||
dude.stack.len != dude.txRef.level or
|
||||
dude.top.txUid != dude.txRef.txUid:
|
||||
return err(TxStackGarbled)
|
||||
let keep = db.top
|
||||
? dude.prepFn prepFlag # Preparation function
|
||||
revert[dude] = keep
|
||||
revert.clear # Done, no restoring
|
||||
|
||||
for dude in db.txSpan:
|
||||
dude.execFn() # Commit function
|
||||
|
||||
if db.level == 0:
|
||||
db.txSpanClear()
|
||||
|
||||
ok()
|
||||
|
||||
proc doThisPrep(
|
||||
db: AristoDbRef; # Top transaction on database
|
||||
prepFn = DoSpanPrepFn(nil); # Mandatory preparation layer function
|
||||
prepFlag = false; # `prepFn` argument
|
||||
): Result[void,AristoError]
|
||||
{.gcsafe.} =
|
||||
## ..
|
||||
let
|
||||
keep = db.top
|
||||
rc = db.prepFn prepFlag
|
||||
if rc.isErr:
|
||||
db.top = keep
|
||||
return err(rc.error)
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, getters
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -226,16 +75,28 @@ func to*(tx: AristoTxRef; T: type[AristoDbRef]): T =
|
|||
## Getter, retrieves the parent database descriptor from argument `tx`
|
||||
tx.db
|
||||
|
||||
|
||||
proc forkTx*(
|
||||
tx: AristoTxRef; # Transaction descriptor
|
||||
dontHashify = false; # Process/fix MPT hashes
|
||||
): Result[AristoDbRef,AristoError] =
|
||||
## Clone a transaction into a new DB descriptor accessing the same backend
|
||||
## (if any) database as the argument `db`. The new descriptor is linked to
|
||||
## database (if any) as the argument `db`. The new descriptor is linked to
|
||||
## the transaction parent and is fully functional as a forked instance (see
|
||||
## comments on `aristo_desc.reCentre()` for details.)
|
||||
##
|
||||
## Input situation:
|
||||
## ::
|
||||
## tx -> db0 with tx is top transaction, tx.level > 0
|
||||
##
|
||||
## Output situation:
|
||||
## ::
|
||||
## tx -> db0 \
|
||||
## > share the same backend
|
||||
## tx1 -> db1 /
|
||||
##
|
||||
## where `tx.level > 0`, `db1.level == 1` and `db1` is returned. The
|
||||
## transaction `tx1` can be retrieved via `db1.txTop()`.
|
||||
##
|
||||
## The new DB descriptor will contain a copy of the argument transaction
|
||||
## `tx` as top layer of level 1 (i.e. this is he only transaction.) Rolling
|
||||
## back will end up at the backend layer (incl. backend filter.)
|
||||
|
@ -259,7 +120,7 @@ proc forkTx*(
|
|||
return err(TxArgStaleTx)
|
||||
topLayer.txUid = 1
|
||||
|
||||
# Empty stack
|
||||
# Provide new empty stack layer
|
||||
let stackLayer = block:
|
||||
let rc = db.getIdgBE()
|
||||
if rc.isOk:
|
||||
|
@ -351,53 +212,20 @@ proc txBegin*(db: AristoDbRef): Result[AristoTxRef,AristoError] =
|
|||
## ... continue using db ...
|
||||
## tx.commit()
|
||||
##
|
||||
if not db.inTxSpan:
|
||||
? db.txBeginPrepImpl()
|
||||
db.txBeginExecImpl()
|
||||
if db.level != db.stack.len:
|
||||
return err(TxStackGarbled)
|
||||
|
||||
elif not db.isCentre:
|
||||
return err(TxSpanOffCentre)
|
||||
db.stack.add db.top.dup # push (save and use top later)
|
||||
db.top.txUid = db.getTxUid()
|
||||
|
||||
else:
|
||||
for dude in db.txSpan:
|
||||
? dude.txBeginPrepImpl() # Only check, no need to restore
|
||||
for dude in db.txSpan:
|
||||
dude.txBeginExecImpl()
|
||||
db.txRef = AristoTxRef(
|
||||
db: db,
|
||||
txUid: db.top.txUid,
|
||||
parent: db.txRef,
|
||||
level: db.stack.len)
|
||||
|
||||
ok db.txRef
|
||||
|
||||
proc txBeginSpan*(db: AristoDbRef): Result[AristoTxRef,AristoError] =
|
||||
## Start a new transaction simultaneously on all descriptors accessing the
|
||||
## same backend.
|
||||
##
|
||||
## This function must be run on the centre argument descriptor `db` (see
|
||||
## comments on `aristo_desc.reCentre()` for details.) This function is
|
||||
## effective only when there is no transaction opened, yet. Sub-transactions
|
||||
## are handled by `txBegin()` accordingly.
|
||||
##
|
||||
## When starting sub-transactions outside a transaction span, these
|
||||
## transactions are handled independently.
|
||||
##
|
||||
## Example:
|
||||
## ::
|
||||
## let
|
||||
## tx = db.txBeginSpan # includes all forked descriptors
|
||||
## ty = db.txBegin # includes all forked descriptors
|
||||
##
|
||||
## tmpDb = tx.forkTx # outside transaction span
|
||||
## tz = tmpDb.txBegin # outside transaction span
|
||||
##
|
||||
if not db.isCentre:
|
||||
return err(TxSpanOffCentre)
|
||||
|
||||
if 0 < db.nForked:
|
||||
if db.level == 0:
|
||||
if 0 < db.nTxSpan:
|
||||
return err(TxGarbledSpan)
|
||||
db.forked.toSeq.txSpanSet
|
||||
|
||||
db.txBegin
|
||||
|
||||
|
||||
proc rollback*(
|
||||
tx: AristoTxRef; # Top transaction on database
|
||||
|
@ -407,14 +235,13 @@ proc rollback*(
|
|||
## there was any.
|
||||
##
|
||||
let db = ? tx.getDbDescFromTopTx()
|
||||
if not db.inTxSpan:
|
||||
db.rollbackImpl()
|
||||
return ok()
|
||||
|
||||
if not db.isCentre:
|
||||
return err(TxSpanOffCentre)
|
||||
# Roll back to previous layer.
|
||||
db.top = db.stack[^1]
|
||||
db.stack.setLen(db.stack.len-1)
|
||||
|
||||
db.doSpan(execFn = rollbackImpl)
|
||||
db.txRef = db.txRef.parent
|
||||
ok()
|
||||
|
||||
|
||||
proc commit*(
|
||||
|
@ -430,18 +257,16 @@ proc commit*(
|
|||
## This may produce additional errors (see `hashify()`.)
|
||||
##
|
||||
let db = ? tx.getDbDescFromTopTx()
|
||||
if not db.inTxSpan:
|
||||
? db.doThisPrep(commitPrepImpl, dontHashify)
|
||||
db.commitExecImpl()
|
||||
return ok()
|
||||
|
||||
if not db.isCentre:
|
||||
return err(TxSpanOffCentre)
|
||||
if db.top.dirty and not dontHashify:
|
||||
discard ? db.hashify().mapErr fromVae
|
||||
|
||||
db.doSpan(
|
||||
prepFn = commitPrepImpl,
|
||||
prepFlag = dontHashify,
|
||||
execFn = commitExecImpl)
|
||||
# Keep top and discard layer below
|
||||
db.top.txUid = db.stack[^1].txUid
|
||||
db.stack.setLen(db.stack.len-1)
|
||||
|
||||
db.txRef = db.txRef.parent
|
||||
ok()
|
||||
|
||||
|
||||
proc collapse*(
|
||||
|
@ -460,28 +285,26 @@ proc collapse*(
|
|||
## The `dontHashify` flag is treated as described for `commit()`
|
||||
##
|
||||
let db = ? tx.getDbDescFromTopTx()
|
||||
if not db.inTxSpan:
|
||||
if commit:
|
||||
? db.doThisPrep(collapseCommitPrepImpl, dontHashify)
|
||||
db.collapseCommitExecImpl()
|
||||
else:
|
||||
? db.doThisPrep(collapseRollbackPrepImpl, dontHashify)
|
||||
db.collapseRollbackExecImpl()
|
||||
return ok()
|
||||
|
||||
if not db.isCentre:
|
||||
return err(TxSpanOffCentre)
|
||||
|
||||
if commit:
|
||||
db.doSpan(
|
||||
prepFn = collapseCommitPrepImpl,
|
||||
prepFlag = dontHashify,
|
||||
execFn = collapseCommitExecImpl)
|
||||
# For commit, hashify the current layer if requested and install it
|
||||
if db.top.dirty and not dontHashify:
|
||||
discard ? db.hashify().mapErr fromVae
|
||||
else:
|
||||
db.doSpan(
|
||||
prepFn = collapseRollbackPrepImpl,
|
||||
prepFlag = dontHashify,
|
||||
execFn = collapseRollbackExecImpl)
|
||||
# For rollback hashify the stack bottom layer if requested and install it
|
||||
if db.top.dirty and not dontHashify:
|
||||
db.stack[0].swap db.top
|
||||
|
||||
var restore = true
|
||||
defer:
|
||||
if restore: db.stack[0].swap db.top
|
||||
discard ? db.hashify().mapErr fromVae
|
||||
restore = false
|
||||
|
||||
db.top.txUid = 0
|
||||
db.stack.setLen(0)
|
||||
db.txRef = AristoTxRef(nil)
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions: save database
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -198,6 +198,9 @@ proc kvtMethods(db: LegacyDbRef): CoreDbKvtFns =
|
|||
hasKeyFn: proc(k: openArray[byte]): CoreDbRc[bool] =
|
||||
ok(tdb.contains(k)),
|
||||
|
||||
destroyFn: proc(saveMode: CoreDbSaveFlags): CoreDbRc[void] =
|
||||
ok(),
|
||||
|
||||
pairsIt: iterator(): (Blob, Blob) =
|
||||
for k,v in tdb.pairsInMemoryDB:
|
||||
yield (k,v))
|
||||
|
@ -235,6 +238,9 @@ proc mptMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbMptFns =
|
|||
isPruningFn: proc(): bool =
|
||||
mpt.trie.isPruning,
|
||||
|
||||
destroyFn: proc(saveMode: CoreDbSaveFlags): CoreDbRc[void] =
|
||||
ok(),
|
||||
|
||||
pairsIt: iterator: (Blob,Blob) {.gcsafe, raises: [LegacyApiRlpError].} =
|
||||
reraiseRlpException("pairsIt()"):
|
||||
for k,v in mpt.trie.pairs():
|
||||
|
@ -276,7 +282,10 @@ proc accMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbAccFns =
|
|||
db.bless(LegacyCoreDbVid(vHash: mpt.trie.rootHash)),
|
||||
|
||||
isPruningFn: proc(): bool =
|
||||
mpt.trie.isPruning)
|
||||
mpt.trie.isPruning,
|
||||
|
||||
destroyFn: proc(saveMode: CoreDbSaveFlags): CoreDbRc[void] =
|
||||
ok())
|
||||
|
||||
proc txMethods(tx: DbTransaction): CoreDbTxFns =
|
||||
CoreDbTxFns(
|
||||
|
@ -331,7 +340,7 @@ proc baseMethods(
|
|||
if not closeDb.isNil:
|
||||
closeDb(),
|
||||
|
||||
vidHashFn: proc(vid: CoreDbVidRef): Result[Hash256,void] =
|
||||
vidHashFn: proc(vid: CoreDbVidRef; update: bool): CoreDbRc[Hash256] =
|
||||
ok(vid.lvHash),
|
||||
|
||||
errorPrintFn: proc(e: CoreDbErrorRef): string =
|
||||
|
@ -352,14 +361,22 @@ proc baseMethods(
|
|||
|
||||
err(db.bless(RootNotFound, LegacyCoreDbError(ctx: "getRoot()"))),
|
||||
|
||||
newKvtFn: proc(): CoreDxKvtRef =
|
||||
db.kvt,
|
||||
newKvtFn: proc(saveMode: CoreDbSaveFlags): CoreDbRc[CoreDxKvtRef] =
|
||||
ok(db.kvt),
|
||||
|
||||
newMptFn: proc(root: CoreDbVidRef, prune: bool): CoreDbRc[CoreDxMptRef] =
|
||||
newMptFn: proc(
|
||||
root: CoreDbVidRef,
|
||||
prune: bool;
|
||||
saveMode: CoreDbSaveFlags;
|
||||
): CoreDbRc[CoreDxMptRef] =
|
||||
let mpt = HexaryChildDbRef(trie: initHexaryTrie(tdb, root.lvHash, prune))
|
||||
ok(db.bless CoreDxMptRef(methods: mpt.mptMethods db)),
|
||||
|
||||
newAccFn: proc(root: CoreDbVidRef, prune: bool): CoreDbRc[CoreDxAccRef] =
|
||||
newAccFn: proc(
|
||||
root: CoreDbVidRef,
|
||||
prune: bool;
|
||||
saveMode: CoreDbSaveFlags;
|
||||
): CoreDbRc[CoreDxAccRef] =
|
||||
let mpt = HexaryChildDbRef(trie: initHexaryTrie(tdb, root.lvHash, prune))
|
||||
ok(db.bless CoreDxAccRef(methods: mpt.accMethods db)),
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -20,16 +20,18 @@ import
|
|||
./base/[base_desc, validate]
|
||||
|
||||
export
|
||||
CoreDbAccBackendRef,
|
||||
CoreDbAccount,
|
||||
CoreDbApiError,
|
||||
CoreDbBackendRef,
|
||||
CoreDbCaptFlags,
|
||||
CoreDbErrorCode,
|
||||
CoreDbErrorRef,
|
||||
CoreDbAccBackendRef,
|
||||
CoreDbKvtBackendRef,
|
||||
CoreDbMptBackendRef,
|
||||
CoreDbPersistentTypes,
|
||||
CoreDbRef,
|
||||
CoreDbSaveFlags,
|
||||
CoreDbType,
|
||||
CoreDbVidRef,
|
||||
CoreDxAccRef,
|
||||
|
@ -37,7 +39,6 @@ export
|
|||
CoreDxKvtRef,
|
||||
CoreDxMptRef,
|
||||
CoreDxPhkRef,
|
||||
CoreDxTxID,
|
||||
CoreDxTxRef
|
||||
|
||||
when defined(release):
|
||||
|
@ -48,7 +49,7 @@ else:
|
|||
const
|
||||
ProvideCoreDbLegacyAPI* = true # and false
|
||||
|
||||
EnableApiTracking = true and false
|
||||
EnableApiTracking = true # and false
|
||||
## When enabled, functions using this tracking facility need to import
|
||||
## `chronicles`, as well. Tracking is enabled by setting the `trackLegaApi`
|
||||
## and/or the `trackNewApi` flags to `true`.
|
||||
|
@ -92,6 +93,8 @@ type
|
|||
CoreDbBackends | CoreDbErrorRef
|
||||
## Shortcut, all descriptors with a `parent` entry.
|
||||
|
||||
proc `$$`*(e: CoreDbErrorRef): string {.gcsafe.}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -106,6 +109,7 @@ template itNotImplemented(db: CoreDbRef, name: string) =
|
|||
|
||||
when EnableApiTracking:
|
||||
import std/[sequtils, strutils], stew/byteutils
|
||||
{.warning: "*** Provided API logging for CoreDB (disabled by default)".}
|
||||
|
||||
func getParent(w: CoreDxChldRefs): auto =
|
||||
## Avoida inifinite call to `parent()` in `ifTrack*Api()` tmplates
|
||||
|
@ -134,6 +138,11 @@ when EnableApiTracking:
|
|||
if db.trackLegaApi:
|
||||
code
|
||||
|
||||
proc toStr(w: CoreDbKvtRef): string =
|
||||
if w.distinctBase.isNil: "kvtRef(nil)" else: "kvtRef"
|
||||
|
||||
# End LegacyAPI
|
||||
|
||||
template newApiTxt(info: static[string]): static[string] =
|
||||
logTxt "new API " & info
|
||||
|
||||
|
@ -154,7 +163,16 @@ when EnableApiTracking:
|
|||
if w == EMPTY_ROOT_HASH: "EMPTY_ROOT_HASH" else: w.data.oaToStr
|
||||
|
||||
proc toStr(p: CoreDbVidRef): string =
|
||||
if p.isNil: "vidRef(nil)" else: "vidRef"
|
||||
if p.isNil:
|
||||
"vidRef(nil)"
|
||||
elif not p.ready:
|
||||
"vidRef(not-ready)"
|
||||
else:
|
||||
let val = p.parent.methods.vidHashFn(p,false).valueOr: EMPTY_ROOT_HASH
|
||||
if val != EMPTY_ROOT_HASH:
|
||||
"vidRef(some-hash)"
|
||||
else:
|
||||
"vidRef(empty-hash)"
|
||||
|
||||
proc toStr(w: Blob): string =
|
||||
if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">"
|
||||
|
@ -167,22 +185,23 @@ when EnableApiTracking:
|
|||
"Flags[" & $w.len & "]"
|
||||
|
||||
proc toStr(rc: CoreDbRc[bool]): string =
|
||||
if rc.isOk: "ok(" & $rc.value & ")" else: "err(..)"
|
||||
if rc.isOk: "ok(" & $rc.value & ")" else: "err(" & $$rc.error & ")"
|
||||
|
||||
proc toStr(rc: CoreDbRc[void]): string =
|
||||
if rc.isOk: "ok()" else:"err()"
|
||||
if rc.isOk: "ok()" else: "err(" & $$rc.error & ")"
|
||||
|
||||
proc toStr(rc: CoreDbRc[Blob]): string =
|
||||
if rc.isOk: "ok(Blob[" & $rc.value.len & "])" else: "err(..)"
|
||||
if rc.isOk: "ok(Blob[" & $rc.value.len & "])"
|
||||
else: "err(" & $$rc.error & ")"
|
||||
|
||||
proc toStr(rc: Result[Hash256,void]): string =
|
||||
if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err()"
|
||||
proc toStr(rc: CoreDbRc[Hash256]): string =
|
||||
if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err(" & $$rc.error & ")"
|
||||
|
||||
proc toStr(rc: Result[Account,void]): string =
|
||||
if rc.isOk: "ok(Account)" else: "err()"
|
||||
proc toStr(rc: CoreDbRc[Account]): string =
|
||||
if rc.isOk: "ok(Account)" else: "err(" & $$rc.error & ")"
|
||||
|
||||
proc toStr[T](rc: CoreDbRc[T]; ifOk: static[string]): string =
|
||||
if rc.isOk: "ok(" & ifOk & ")" else: "err(..)"
|
||||
if rc.isOk: "ok(" & ifOk & ")" else: "err(" & $$rc.error & ")"
|
||||
|
||||
proc toStr(rc: CoreDbRc[CoreDbRef]): string = rc.toStr "dbRef"
|
||||
proc toStr(rc: CoreDbRc[CoreDbVidRef]): string = rc.toStr "vidRef"
|
||||
|
@ -244,7 +263,6 @@ proc bless*(db: CoreDbRef): CoreDbRef =
|
|||
## Verify descriptor
|
||||
when AutoValidateDescriptors:
|
||||
db.validate
|
||||
db.ifTrackNewApi: info newApiTxt "CoreDbRef.init()", dbType=db.dbType
|
||||
db
|
||||
|
||||
|
||||
|
@ -301,14 +319,14 @@ proc bless*(
|
|||
proc dbType*(db: CoreDbRef): CoreDbType =
|
||||
## Getter
|
||||
result = db.dbType
|
||||
db.ifTrackNewApi: info newApiTxt "dbType()", result
|
||||
db.ifTrackNewApi: debug newApiTxt "dbType()", result
|
||||
|
||||
proc compensateLegacySetup*(db: CoreDbRef) =
|
||||
## On the persistent legacy hexary trie, this function is needed for
|
||||
## bootstrapping and Genesis setup when the `purge` flag is activated.
|
||||
## Otherwise the database backend may defect on an internal inconsistency.
|
||||
db.methods.legacySetupFn()
|
||||
db.ifTrackNewApi: info newApiTxt "compensateLegacySetup()"
|
||||
db.ifTrackNewApi: debug newApiTxt "compensateLegacySetup()"
|
||||
|
||||
proc parent*(cld: CoreDxChldRefs): CoreDbRef =
|
||||
## Getter, common method for all sub-modules
|
||||
|
@ -317,7 +335,7 @@ proc parent*(cld: CoreDxChldRefs): CoreDbRef =
|
|||
proc backend*(dsc: CoreDxKvtRef | CoreDxTrieRelated | CoreDbRef): auto =
|
||||
## Getter, retrieves the *raw* backend object for special/localised support.
|
||||
result = dsc.methods.backendFn()
|
||||
dsc.ifTrackNewApi: info newApiTxt "backend()"
|
||||
dsc.ifTrackNewApi: debug newApiTxt "backend()"
|
||||
|
||||
proc finish*(db: CoreDbRef; flush = false) =
|
||||
## Database destructor. If the argument `flush` is set `false`, the database
|
||||
|
@ -327,15 +345,15 @@ proc finish*(db: CoreDbRef; flush = false) =
|
|||
## depends on the backend database. Currently, only the `AristoDbRocks` type
|
||||
## backend removes the database on `true`.
|
||||
db.methods.destroyFn flush
|
||||
db.ifTrackNewApi: info newApiTxt "finish()"
|
||||
db.ifTrackNewApi: debug newApiTxt "finish()"
|
||||
|
||||
proc `$$`*(e: CoreDbErrorRef): string =
|
||||
## Pretty print error symbol, note that this directive may have side effects
|
||||
## as it calls a backend function.
|
||||
result = $e.error & "(" & e.parent.methods.errorPrintFn(e) & ")"
|
||||
e.ifTrackNewApi: info newApiTxt "$$()", result
|
||||
e.ifTrackNewApi: debug newApiTxt "$$()", result
|
||||
|
||||
proc hash*(vid: CoreDbVidRef): Result[Hash256,void] =
|
||||
proc hash*(vid: CoreDbVidRef; update: bool): CoreDbRc[Hash256] =
|
||||
## Getter (well, sort of), retrieves the hash for a `vid` argument. The
|
||||
## function might fail if there is currently no hash available (e.g. on
|
||||
## `Aristo`.) Note that this is different from succeeding with an
|
||||
|
@ -346,17 +364,18 @@ proc hash*(vid: CoreDbVidRef): Result[Hash256,void] =
|
|||
##
|
||||
result = block:
|
||||
if not vid.isNil and vid.ready:
|
||||
vid.parent.methods.vidHashFn vid
|
||||
vid.parent.methods.vidHashFn(vid, update)
|
||||
else:
|
||||
ok EMPTY_ROOT_HASH
|
||||
# Note: tracker will be silent if `vid` is NIL
|
||||
vid.ifTrackNewApi: info newApiTxt "hash()", result=result.toStr
|
||||
vid.ifTrackNewApi:
|
||||
debug newApiTxt "hash()", vid=vid.toStr, result=result.toStr
|
||||
|
||||
proc hashOrEmpty*(vid: CoreDbVidRef): Hash256 =
|
||||
## Convenience wrapper, returns `EMPTY_ROOT_HASH` where `hash()` would fail.
|
||||
vid.hash.valueOr: EMPTY_ROOT_HASH
|
||||
vid.hash(update = true).valueOr: EMPTY_ROOT_HASH
|
||||
|
||||
proc recast*(account: CoreDbAccount): Result[Account,void] =
|
||||
proc recast*(account: CoreDbAccount; update: bool): CoreDbRc[Account] =
|
||||
## Convert the argument `account` to the portable Ethereum representation
|
||||
## of an account. This conversion may fail if the storage root hash (see
|
||||
## `hash()` above) is currently unavailable.
|
||||
|
@ -365,7 +384,7 @@ proc recast*(account: CoreDbAccount): Result[Account,void] =
|
|||
##
|
||||
let vid = account.storageVid
|
||||
result = block:
|
||||
let rc = vid.hash
|
||||
let rc = vid.hash(update)
|
||||
if rc.isOk:
|
||||
ok Account(
|
||||
nonce: account.nonce,
|
||||
|
@ -373,8 +392,8 @@ proc recast*(account: CoreDbAccount): Result[Account,void] =
|
|||
codeHash: account.codeHash,
|
||||
storageRoot: rc.value)
|
||||
else:
|
||||
err()
|
||||
vid.ifTrackNewApi: info newApiTxt "recast()", result=result.toStr
|
||||
err(rc.error)
|
||||
vid.ifTrackNewApi: debug newApiTxt "recast()", result=result.toStr
|
||||
|
||||
proc getRoot*(
|
||||
db: CoreDbRef;
|
||||
|
@ -396,22 +415,52 @@ proc getRoot*(
|
|||
##
|
||||
result = db.methods.getRootFn(root, createOk)
|
||||
db.ifTrackNewApi:
|
||||
info newApiTxt "getRoot()", root=root.toStr, result=result.toStr
|
||||
debug newApiTxt "getRoot()", root=root.toStr, result=result.toStr
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public key-value table methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc newKvt*(db: CoreDbRef): CoreDxKvtRef =
|
||||
## Getter (pseudo constructor)
|
||||
result = db.methods.newKvtFn()
|
||||
db.ifTrackNewApi: info newApiTxt "newKvt()"
|
||||
proc newKvt*(db: CoreDbRef; saveMode = AutoSave): CoreDxKvtRef =
|
||||
## Constructor, will defect on failure.
|
||||
##
|
||||
## Depending on the argument `saveMode`, the contructed object will have
|
||||
## the following properties.
|
||||
##
|
||||
## * `Cached`
|
||||
## Subscribe to the common base object shared with other subscribed
|
||||
## `AutoSave` or `Cached` descriptors. So any changes are immediately
|
||||
## visible among subscribers. On automatic destruction (when the
|
||||
## constructed object gets out of scope), changes are not saved to the
|
||||
## backend database but are still available to subscribers.
|
||||
##
|
||||
## * `AutoSave`
|
||||
## This mode works similar to `Cached` with the difference that changes
|
||||
## are saved to the backend database on automatic destruction when this
|
||||
## is permissible, i.e. there is a backend available and there is no
|
||||
## pending transaction on the common base object.
|
||||
##
|
||||
## * `Companion`
|
||||
## The contructed object will be a new descriptor separate from the common
|
||||
## base object. It will be a copy of the current state of the common
|
||||
## base object available to subscribers. On automatic destruction, changes
|
||||
## will be discarded.
|
||||
##
|
||||
## The constructed object can be manually descructed (see `destroy()`) where
|
||||
## the `saveMode` behaviour can be overridden.
|
||||
##
|
||||
## The legacy backend always assumes `AutoSave` mode regardless of the
|
||||
## function argument.
|
||||
##
|
||||
result = db.methods.newKvtFn(saveMode).valueOr:
|
||||
raiseAssert $$error
|
||||
db.ifTrackNewApi: debug newApiTxt "newKvt()", saveMode
|
||||
|
||||
proc get*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[Blob] =
|
||||
## This function always returns a non-empty `Blob` or an error code.
|
||||
result = kvt.methods.getFn key
|
||||
kvt.ifTrackNewApi:
|
||||
info newApiTxt "kvt/get()", key=key.toStr, result=result.toStr
|
||||
debug newApiTxt "get()", key=key.toStr, result=result.toStr
|
||||
|
||||
proc getOrEmpty*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[Blob] =
|
||||
## This function sort of mimics the behaviour of the legacy database
|
||||
|
@ -421,12 +470,12 @@ proc getOrEmpty*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[Blob] =
|
|||
if result.isErr and result.error.error == KvtNotFound:
|
||||
result = CoreDbRc[Blob].ok(EmptyBlob)
|
||||
kvt.ifTrackNewApi:
|
||||
info newApiTxt "kvt/getOrEmpty()", key=key.toStr, result=result.toStr
|
||||
debug newApiTxt "getOrEmpty()", key=key.toStr, result=result.toStr
|
||||
|
||||
proc del*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[void] =
|
||||
result = kvt.methods.delFn key
|
||||
kvt.ifTrackNewApi:
|
||||
info newApiTxt "kvt/del()", key=key.toStr, result=result.toStr
|
||||
debug newApiTxt "del()", key=key.toStr, result=result.toStr
|
||||
|
||||
proc put*(
|
||||
kvt: CoreDxKvtRef;
|
||||
|
@ -434,59 +483,122 @@ proc put*(
|
|||
val: openArray[byte];
|
||||
): CoreDbRc[void] =
|
||||
result = kvt.methods.putFn(key, val)
|
||||
kvt.ifTrackNewApi: info newApiTxt "kvt/put()",
|
||||
kvt.ifTrackNewApi: debug newApiTxt "put()",
|
||||
key=key.toStr, val=val.toSeq.toStr, result=result.toStr
|
||||
|
||||
proc hasKey*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[bool] =
|
||||
## Would be named `contains` if it returned `bool` rather than `Result[]`.
|
||||
result = kvt.methods.hasKeyFn key
|
||||
kvt.ifTrackNewApi:
|
||||
info newApiTxt "kvt/hasKey()", key=key.toStr, result=result.toStr
|
||||
debug newApiTxt "kvt/hasKey()", key=key.toStr, result=result.toStr
|
||||
|
||||
proc destroy*(dsc: CoreDxKvtRef; saveMode = AutoSave): CoreDbRc[void] =
|
||||
## For the legacy database, this function has no effect and succeeds always.
|
||||
##
|
||||
## The function explicitely destructs the descriptor `dsc`. If the function
|
||||
## argument `saveMode` is not `AutoSave` the data object behind the argument
|
||||
## descriptor `dsc` is just discarded and the function returns success.
|
||||
##
|
||||
## Otherwise, the state of the descriptor object is saved to the database
|
||||
## backend if that is possible, or an error is returned.
|
||||
##
|
||||
## Subject to change
|
||||
## -----------------
|
||||
## * Saving an object which was created with the `Companion` flag (see
|
||||
## `newKvt()`), the common base object will not reveal any change although
|
||||
## the backend database will have persistently stored the data.
|
||||
## * Subsequent saving of the common base object may override that.
|
||||
##
|
||||
## When returnng an error, the argument descriptor `dsc` will have been
|
||||
## disposed nevertheless.
|
||||
##
|
||||
result = dsc.methods.destroyFn saveMode
|
||||
dsc.ifTrackNewApi: debug newApiTxt "destroy()", saveMode, result=result.toStr
|
||||
|
||||
iterator pairs*(kvt: CoreDxKvtRef): (Blob, Blob) {.apiRaise.} =
|
||||
## Iterator supported on memory DB (otherwise implementation dependent)
|
||||
for k,v in kvt.methods.pairsIt():
|
||||
yield (k,v)
|
||||
kvt.ifTrackNewApi: info newApiTxt "kvt/pairs()"
|
||||
kvt.ifTrackNewApi: debug newApiTxt "kvt/pairs()"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public Merkle Patricia Tree, hexary trie constructors
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc newMpt*(db: CoreDbRef; root: CoreDbVidRef; prune = true): CoreDxMptRef =
|
||||
## Constructor, will defect on failure (note that the legacy backend
|
||||
## always succeeds)
|
||||
result = db.methods.newMptFn(root, prune).valueOr:
|
||||
proc newMpt*(
|
||||
db: CoreDbRef;
|
||||
root: CoreDbVidRef;
|
||||
prune = true;
|
||||
saveMode = AutoSave;
|
||||
): CoreDxMptRef =
|
||||
## Constructor, will defect on failure. The argument `prune` is currently
|
||||
## effective only for the legacy backend.
|
||||
##
|
||||
## See the discussion at `newKvt()` for an explanation of the `saveMode`
|
||||
## argument.
|
||||
##
|
||||
## The constructed object can be manually descructed (see `destroy()`) where
|
||||
## the `saveMode` behaviour can be overridden.
|
||||
##
|
||||
## The legacy backend always assumes `AutoSave` mode regardless of the
|
||||
## function argument.
|
||||
##
|
||||
result = db.methods.newMptFn(root, prune, saveMode).valueOr:
|
||||
raiseAssert $$error
|
||||
db.ifTrackNewApi: info newApiTxt "newMpt", root=root.toStr, prune
|
||||
db.ifTrackNewApi:
|
||||
debug newApiTxt "newMpt()", root=root.toStr, prune, saveMode
|
||||
|
||||
proc newMpt*(db: CoreDbRef; prune = true): CoreDxMptRef =
|
||||
proc newMpt*(
|
||||
db: CoreDbRef;
|
||||
prune = true;
|
||||
saveMode = AutoSave;
|
||||
): CoreDxMptRef =
|
||||
## Shortcut for `db.newMpt CoreDbVidRef()`
|
||||
result = db.methods.newMptFn(CoreDbVidRef(), prune).valueOr:
|
||||
let root = CoreDbVidRef()
|
||||
result = db.methods.newMptFn(root, prune, saveMode).valueOr:
|
||||
raiseAssert $$error
|
||||
db.ifTrackNewApi: info newApiTxt "newMpt", prune
|
||||
db.ifTrackNewApi: debug newApiTxt "newMpt()", root=root.toStr, prune, saveMode
|
||||
|
||||
proc newAccMpt*(db: CoreDbRef; root: CoreDbVidRef; prune = true): CoreDxAccRef =
|
||||
## Similar to `newMpt()` for handling accounts. Although this sub-trie can
|
||||
## be emulated by means of `newMpt(..).toPhk()`, it is recommended using
|
||||
## this constructor which implies its own subset of methods to handle that
|
||||
## trie.
|
||||
result = db.methods.newAccFn(root, prune).valueOr: raiseAssert $$error
|
||||
db.ifTrackNewApi: info newApiTxt "newAccMpt", root=root.toStr, prune
|
||||
proc newAccMpt*(
|
||||
db: CoreDbRef;
|
||||
root: CoreDbVidRef;
|
||||
prune = true;
|
||||
saveMode = AutoSave;
|
||||
): CoreDxAccRef =
|
||||
## This function works similar to `newMpt()` for handling accounts. Although
|
||||
## this sub-trie can be emulated by means of `newMpt(..).toPhk()`, it is
|
||||
## recommended using this particular constructor for accounts because it
|
||||
## provides its own subset of methods to handle accounts.
|
||||
##
|
||||
## The argument `prune` is currently effective only for the legacy backend.
|
||||
##
|
||||
## See the discussion at `newKvt()` for an explanation of the `saveMode`
|
||||
## argument.
|
||||
##
|
||||
## The constructed object can be manually descructed (see `destroy()`) where
|
||||
## the `saveMode` behaviour can be overridden.
|
||||
##
|
||||
## The legacy backend always assumes `AutoSave` mode regardless of the
|
||||
## function argument.
|
||||
##
|
||||
result = db.methods.newAccFn(root, prune, saveMode).valueOr:
|
||||
raiseAssert $$error
|
||||
db.ifTrackNewApi:
|
||||
debug newApiTxt "newAccMpt()", root=root.toStr, prune, saveMode
|
||||
|
||||
proc toMpt*(phk: CoreDxPhkRef): CoreDxMptRef =
|
||||
## Replaces the pre-hashed argument trie `phk` by the non pre-hashed *MPT*.
|
||||
## Note that this does not apply to an accounts trie that was created by
|
||||
## `newAccMpt()`.
|
||||
result = phk.fromMpt
|
||||
phk.ifTrackNewApi: info newApiTxt "phk/toMpt()"
|
||||
phk.ifTrackNewApi: debug newApiTxt "phk/toMpt()"
|
||||
|
||||
proc toPhk*(mpt: CoreDxMptRef): CoreDxPhkRef =
|
||||
## Replaces argument `mpt` by a pre-hashed *MPT*.
|
||||
## Note that this does not apply to an accounts trie that was created by
|
||||
## `newAaccMpt()`.
|
||||
result = mpt.toCoreDxPhkRef
|
||||
mpt.ifTrackNewApi: info newApiTxt "mpt/toPhk()"
|
||||
mpt.ifTrackNewApi: debug newApiTxt "mpt/toPhk()"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public common methods for all hexary trie databases (`mpt`, `phk`, or `acc`)
|
||||
|
@ -495,12 +607,25 @@ proc toPhk*(mpt: CoreDxMptRef): CoreDxPhkRef =
|
|||
proc isPruning*(dsc: CoreDxTrieRefs | CoreDxAccRef): bool =
|
||||
## Getter
|
||||
result = dsc.methods.isPruningFn()
|
||||
dsc.ifTrackNewApi: info newApiTxt "isPruning()", result
|
||||
dsc.ifTrackNewApi: debug newApiTxt "isPruning()", result
|
||||
|
||||
proc rootVid*(dsc: CoreDxTrieRefs | CoreDxAccRef): CoreDbVidRef =
|
||||
## Getter, result is not `nil`
|
||||
result = dsc.methods.rootVidFn()
|
||||
dsc.ifTrackNewApi: info newApiTxt "rootVid()", result=result.toStr
|
||||
dsc.ifTrackNewApi: debug newApiTxt "rootVid()", result=result.toStr
|
||||
|
||||
proc destroy*(
|
||||
dsc: CoreDxTrieRefs | CoreDxAccRef;
|
||||
saveMode = AutoSave;
|
||||
): CoreDbRc[void]
|
||||
{.discardable.} =
|
||||
## For the legacy database, this function has no effect and succeeds always.
|
||||
##
|
||||
## See the discussion at `destroy()` for `CoreDxKvtRef` for an explanation
|
||||
## of the `saveMode` argument.
|
||||
##
|
||||
result = dsc.methods.destroyFn saveMode
|
||||
dsc.ifTrackNewApi: debug newApiTxt "destroy()", result=result.toStr
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public generic hexary trie database methods (`mpt` or `phk`)
|
||||
|
@ -511,7 +636,7 @@ proc fetch*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[Blob] =
|
|||
## non-empty `Blob` or an error code.
|
||||
result = trie.methods.fetchFn(key)
|
||||
trie.ifTrackNewApi:
|
||||
info newApiTxt "trie/fetch()", key=key.toStr, result=result.toStr
|
||||
debug newApiTxt "trie/fetch()", key=key.toStr, result=result.toStr
|
||||
|
||||
proc fetchOrEmpty*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[Blob] =
|
||||
## This function returns an empty `Blob` if the argument `key` is not found
|
||||
|
@ -520,12 +645,12 @@ proc fetchOrEmpty*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[Blob] =
|
|||
if result.isErr and result.error.error == MptNotFound:
|
||||
result = ok(EmptyBlob)
|
||||
trie.ifTrackNewApi:
|
||||
info newApiTxt "trie/fetch()", key=key.toStr, result=result.toStr
|
||||
debug newApiTxt "trie/fetchOrEmpty()", key=key.toStr, result=result.toStr
|
||||
|
||||
proc delete*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[void] =
|
||||
result = trie.methods.deleteFn key
|
||||
trie.ifTrackNewApi:
|
||||
info newApiTxt "trie/delete()", key=key.toStr, result=result.toStr
|
||||
debug newApiTxt "trie/delete()", key=key.toStr, result=result.toStr
|
||||
|
||||
proc merge*(
|
||||
trie: CoreDxTrieRefs;
|
||||
|
@ -537,26 +662,26 @@ proc merge*(
|
|||
else:
|
||||
const info = "phk/merge()"
|
||||
result = trie.methods.mergeFn(key, val)
|
||||
trie.ifTrackNewApi: info newApiTxt info,
|
||||
trie.ifTrackNewApi: debug newApiTxt info,
|
||||
key=key.toStr, val=val.toSeq.toStr, result=result.toStr
|
||||
|
||||
proc hasPath*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[bool] =
|
||||
## Would be named `contains` if it returned `bool` rather than `Result[]`.
|
||||
result = trie.methods.hasPathFn key
|
||||
trie.ifTrackNewApi:
|
||||
info newApiTxt "trie/hasKey()", key=key.toStr, result=result.toStr
|
||||
debug newApiTxt "trie/hasKey()", key=key.toStr, result=result.toStr
|
||||
|
||||
iterator pairs*(mpt: CoreDxMptRef): (Blob, Blob) {.apiRaise.} =
|
||||
## Trie traversal, only supported for `CoreDxMptRef`
|
||||
for k,v in mpt.methods.pairsIt():
|
||||
yield (k,v)
|
||||
mpt.ifTrackNewApi: info newApiTxt "mpt/pairs()"
|
||||
mpt.ifTrackNewApi: debug newApiTxt "mpt/pairs()"
|
||||
|
||||
iterator replicate*(mpt: CoreDxMptRef): (Blob, Blob) {.apiRaise.} =
|
||||
## Low level trie dump, only supported for `CoreDxMptRef`
|
||||
for k,v in mpt.methods.replicateIt():
|
||||
yield (k,v)
|
||||
mpt.ifTrackNewApi: info newApiTxt "mpt/replicate()"
|
||||
mpt.ifTrackNewApi: debug newApiTxt "mpt/replicate()"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public trie database methods for accounts
|
||||
|
@ -566,12 +691,12 @@ proc fetch*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[CoreDbAccount] =
|
|||
## Fetch data from the argument `trie`.
|
||||
result = acc.methods.fetchFn address
|
||||
acc.ifTrackNewApi:
|
||||
info newApiTxt "acc/fetch()", address=address.toStr, result=result.toStr
|
||||
debug newApiTxt "acc/fetch()", address=address.toStr, result=result.toStr
|
||||
|
||||
proc delete*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[void] =
|
||||
result = acc.methods.deleteFn address
|
||||
acc.ifTrackNewApi:
|
||||
info newApiTxt "acc/delete()", address=address.toStr, result=result.toStr
|
||||
debug newApiTxt "acc/delete()", address=address.toStr, result=result.toStr
|
||||
|
||||
proc merge*(
|
||||
acc: CoreDxAccRef;
|
||||
|
@ -580,51 +705,38 @@ proc merge*(
|
|||
): CoreDbRc[void] =
|
||||
result = acc.methods.mergeFn(address, account)
|
||||
acc.ifTrackNewApi:
|
||||
info newApiTxt "acc/merge()", address=address.toStr, result=result.toStr
|
||||
debug newApiTxt "acc/merge()", address=address.toStr, result=result.toStr
|
||||
|
||||
proc hasPath*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[bool] =
|
||||
## Would be named `contains` if it returned `bool` rather than `Result[]`.
|
||||
result = acc.methods.hasPathFn address
|
||||
acc.ifTrackNewApi:
|
||||
info newApiTxt "acc/hasKey()", address=address.toStr, result=result.toStr
|
||||
debug newApiTxt "acc/hasKey()", address=address.toStr, result=result.toStr
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public transaction related methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc toTransactionID*(db: CoreDbRef): CoreDbRc[CoreDxTxID] =
|
||||
## Getter, current transaction state
|
||||
result = db.methods.getIdFn()
|
||||
db.ifTrackNewApi: info newApiTxt "toTransactionID()", result=result.toStr
|
||||
|
||||
proc shortTimeReadOnly*(
|
||||
id: CoreDxTxID;
|
||||
action: proc() {.noRaise.};
|
||||
): CoreDbRc[void] =
|
||||
## Run `action()` in an earlier transaction environment.
|
||||
result = id.methods.roWrapperFn action
|
||||
id.ifTrackNewApi: info newApiTxt "shortTimeReadOnly()", result=result.toStr
|
||||
|
||||
proc newTransaction*(db: CoreDbRef): CoreDbRc[CoreDxTxRef] =
|
||||
## Constructor
|
||||
result = db.methods.beginFn()
|
||||
db.ifTrackNewApi: info newApiTxt "newTransaction()", result=result.toStr
|
||||
db.ifTrackNewApi: debug newApiTxt "newTransaction()", result=result.toStr
|
||||
|
||||
proc commit*(tx: CoreDxTxRef, applyDeletes = true): CoreDbRc[void] =
|
||||
result = tx.methods.commitFn applyDeletes
|
||||
tx.ifTrackNewApi: info newApiTxt "tx/commit()", result=result.toStr
|
||||
tx.ifTrackNewApi: debug newApiTxt "tx/commit()", result=result.toStr
|
||||
|
||||
proc rollback*(tx: CoreDxTxRef): CoreDbRc[void] =
|
||||
result = tx.methods.rollbackFn()
|
||||
tx.ifTrackNewApi: info newApiTxt "tx/rollback()", result=result.toStr
|
||||
tx.ifTrackNewApi: debug newApiTxt "tx/rollback()", result=result.toStr
|
||||
|
||||
proc dispose*(tx: CoreDxTxRef): CoreDbRc[void] =
|
||||
result = tx.methods.disposeFn()
|
||||
tx.ifTrackNewApi: info newApiTxt "tx/dispose()", result=result.toStr
|
||||
tx.ifTrackNewApi: debug newApiTxt "tx/dispose()", result=result.toStr
|
||||
|
||||
proc safeDispose*(tx: CoreDxTxRef): CoreDbRc[void] =
|
||||
result = tx.methods.safeDisposeFn()
|
||||
tx.ifTrackNewApi: info newApiTxt "tx/safeDispose()", result=result.toStr
|
||||
tx.ifTrackNewApi: debug newApiTxt "tx/safeDispose()", result=result.toStr
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public tracer methods
|
||||
|
@ -636,21 +748,21 @@ proc newCapture*(
|
|||
): CoreDbRc[CoreDxCaptRef] =
|
||||
## Constructor
|
||||
result = db.methods.captureFn flags
|
||||
db.ifTrackNewApi: info newApiTxt "db/capture()", result=result.toStr
|
||||
db.ifTrackNewApi: debug newApiTxt "db/capture()", result=result.toStr
|
||||
|
||||
proc recorder*(cp: CoreDxCaptRef): CoreDbRc[CoreDbRef] =
|
||||
## Getter
|
||||
result = cp.methods.recorderFn()
|
||||
cp.ifTrackNewApi: info newApiTxt "capt/recorder()", result=result.toStr
|
||||
cp.ifTrackNewApi: debug newApiTxt "capt/recorder()", result=result.toStr
|
||||
|
||||
proc logDb*(cp: CoreDxCaptRef): CoreDbRc[CoreDbRef] =
|
||||
result = cp.methods.logDbFn()
|
||||
cp.ifTrackNewApi: info newApiTxt "capt/logDb()", result=result.toStr
|
||||
cp.ifTrackNewApi: debug newApiTxt "capt/logDb()", result=result.toStr
|
||||
|
||||
proc flags*(cp: CoreDxCaptRef): set[CoreDbCaptFlags] =
|
||||
## Getter
|
||||
result = cp.methods.getFlagsFn()
|
||||
cp.ifTrackNewApi: info newApiTxt "capt/flags()", result=result.toStr
|
||||
cp.ifTrackNewApi: debug newApiTxt "capt/flags()", result=result.toStr
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public methods, legacy API
|
||||
|
@ -665,7 +777,7 @@ when ProvideCoreDbLegacyAPI:
|
|||
proc backend*(dsc: CoreDbChldRefs): auto =
|
||||
dsc.setTrackLegaApiOnly
|
||||
result = dsc.distinctBase.backend
|
||||
dsc.ifTrackLegaApi: info legaApiTxt "parent()"
|
||||
dsc.ifTrackLegaApi: debug legaApiTxt "parent()"
|
||||
|
||||
# ----------------
|
||||
|
||||
|
@ -673,53 +785,55 @@ when ProvideCoreDbLegacyAPI:
|
|||
## Legacy pseudo constructor, see `toKvt()` for production constructor
|
||||
db.setTrackLegaApiOnly
|
||||
result = db.newKvt().CoreDbKvtRef
|
||||
db.ifTrackLegaApi: info legaApiTxt "kvt()"
|
||||
db.ifTrackLegaApi: debug legaApiTxt "kvt()", result=result.toStr
|
||||
|
||||
proc get*(kvt: CoreDbKvtRef; key: openArray[byte]): Blob =
|
||||
kvt.setTrackLegaApiOnly
|
||||
const info = "kvt/get()"
|
||||
result = kvt.distinctBase.getOrEmpty(key).expect info
|
||||
kvt.ifTrackLegaApi:
|
||||
info legaApiTxt info, key=key.toStr, result=result.toStr
|
||||
debug legaApiTxt info, key=key.toStr, result=result.toStr
|
||||
|
||||
proc del*(kvt: CoreDbKvtRef; key: openArray[byte]): void =
|
||||
kvt.setTrackLegaApiOnly
|
||||
const info = "kvt/del()"
|
||||
kvt.distinctBase.del(key).expect info
|
||||
kvt.ifTrackLegaApi: info legaApiTxt info, key=key.toStr
|
||||
kvt.ifTrackLegaApi: debug legaApiTxt info, key=key.toStr
|
||||
|
||||
proc put*(kvt: CoreDbKvtRef; key: openArray[byte]; val: openArray[byte]) =
|
||||
kvt.setTrackLegaApiOnly
|
||||
const info = "kvt/put()"
|
||||
kvt.distinctBase.put(key, val).expect info
|
||||
let w = kvt.distinctBase.parent.newKvt()
|
||||
w.put(key, val).expect info
|
||||
#kvt.distinctBase.put(key, val).expect info
|
||||
kvt.ifTrackLegaApi:
|
||||
info legaApiTxt info, key=key.toStr, val=val.toSeq.toStr
|
||||
debug legaApiTxt info, key=key.toStr, val=val.toSeq.toStr
|
||||
|
||||
proc contains*(kvt: CoreDbKvtRef; key: openArray[byte]): bool =
|
||||
kvt.setTrackLegaApiOnly
|
||||
const info = "kvt/contains()"
|
||||
result = kvt.distinctBase.hasKey(key).expect info
|
||||
kvt.ifTrackLegaApi: info legaApiTxt info, key=key.toStr, result
|
||||
kvt.ifTrackLegaApi: debug legaApiTxt info, key=key.toStr, result
|
||||
|
||||
iterator pairs*(kvt: CoreDbKvtRef): (Blob, Blob) {.apiRaise.} =
|
||||
kvt.setTrackLegaApiOnly
|
||||
for k,v in kvt.distinctBase.pairs():
|
||||
yield (k,v)
|
||||
kvt.ifTrackLegaApi: info legaApiTxt "kvt/pairs()"
|
||||
kvt.ifTrackLegaApi: debug legaApiTxt "kvt/pairs()"
|
||||
|
||||
# ----------------
|
||||
|
||||
proc toMpt*(phk: CoreDbPhkRef): CoreDbMptRef =
|
||||
phk.setTrackLegaApiOnly
|
||||
result = phk.distinctBase.toMpt.CoreDbMptRef
|
||||
phk.ifTrackLegaApi: info legaApiTxt "phk/toMpt()"
|
||||
phk.ifTrackLegaApi: debug legaApiTxt "phk/toMpt()"
|
||||
|
||||
proc mptPrune*(db: CoreDbRef; root: Hash256; prune = true): CoreDbMptRef =
|
||||
db.setTrackLegaApiOnly
|
||||
const info = "mptPrune()"
|
||||
let vid = db.getRoot(root, createOk=true).expect info
|
||||
result = db.newMpt(vid, prune).CoreDbMptRef
|
||||
db.ifTrackLegaApi: info legaApiTxt info, root=root.toStr, prune
|
||||
db.ifTrackLegaApi: debug legaApiTxt info, root=root.toStr, prune
|
||||
|
||||
proc mptPrune*(db: CoreDbRef; prune = true): CoreDbMptRef =
|
||||
db.newMpt(CoreDbVidRef(nil), prune).CoreDbMptRef
|
||||
|
@ -729,14 +843,14 @@ when ProvideCoreDbLegacyAPI:
|
|||
proc toPhk*(mpt: CoreDbMptRef): CoreDbPhkRef =
|
||||
mpt.setTrackLegaApiOnly
|
||||
result = mpt.distinctBase.toPhk.CoreDbPhkRef
|
||||
mpt.ifTrackLegaApi: info legaApiTxt "mpt/toMpt()"
|
||||
mpt.ifTrackLegaApi: debug legaApiTxt "mpt/toMpt()"
|
||||
|
||||
proc phkPrune*(db: CoreDbRef; root: Hash256; prune = true): CoreDbPhkRef =
|
||||
db.setTrackLegaApiOnly
|
||||
const info = "phkPrune()"
|
||||
let vid = db.getRoot(root, createOk=true).expect info
|
||||
result = db.newMpt(vid, prune).toCoreDxPhkRef.CoreDbPhkRef
|
||||
db.ifTrackLegaApi: info legaApiTxt info, root=root.toStr, prune
|
||||
db.ifTrackLegaApi: debug legaApiTxt info, root=root.toStr, prune
|
||||
|
||||
proc phkPrune*(db: CoreDbRef; prune = true): CoreDbPhkRef =
|
||||
db.newMpt(CoreDbVidRef(nil), prune).toCoreDxPhkRef.CoreDbPhkRef
|
||||
|
@ -746,20 +860,20 @@ when ProvideCoreDbLegacyAPI:
|
|||
proc isPruning*(trie: CoreDbTrieRefs): bool =
|
||||
trie.setTrackLegaApiOnly
|
||||
result = trie.distinctBase.isPruning()
|
||||
trie.ifTrackLegaApi: info legaApiTxt "trie/isPruning()", result
|
||||
trie.ifTrackLegaApi: debug legaApiTxt "trie/isPruning()", result
|
||||
|
||||
proc get*(trie: CoreDbTrieRefs; key: openArray[byte]): Blob =
|
||||
trie.setTrackLegaApiOnly
|
||||
const info = "trie/get()"
|
||||
result = trie.distinctBase.fetchOrEmpty(key).expect "trie/get()"
|
||||
result = trie.distinctBase.fetchOrEmpty(key).expect info
|
||||
trie.ifTrackLegaApi:
|
||||
info legaApiTxt info, key=key.toStr, result=result.toStr
|
||||
debug legaApiTxt info, key=key.toStr, result=result.toStr
|
||||
|
||||
proc del*(trie: CoreDbTrieRefs; key: openArray[byte]) =
|
||||
trie.setTrackLegaApiOnly
|
||||
const info = "trie/del()"
|
||||
trie.distinctBase.delete(key).expect info
|
||||
trie.ifTrackLegaApi: info legaApiTxt info, key=key.toStr
|
||||
trie.ifTrackLegaApi: debug legaApiTxt info, key=key.toStr
|
||||
|
||||
proc put*(trie: CoreDbTrieRefs; key: openArray[byte]; val: openArray[byte]) =
|
||||
trie.setTrackLegaApiOnly
|
||||
|
@ -769,41 +883,41 @@ when ProvideCoreDbLegacyAPI:
|
|||
const info = "phk/put()"
|
||||
trie.distinctBase.merge(key, val).expect info
|
||||
trie.ifTrackLegaApi:
|
||||
info legaApiTxt info, key=key.toStr, val=val.toSeq.toStr
|
||||
debug legaApiTxt info, key=key.toStr, val=val.toSeq.toStr
|
||||
|
||||
proc contains*(trie: CoreDbTrieRefs; key: openArray[byte]): bool =
|
||||
trie.setTrackLegaApiOnly
|
||||
const info = "trie/contains()"
|
||||
result = trie.distinctBase.hasPath(key).expect info
|
||||
trie.ifTrackLegaApi: info legaApiTxt info, key=key.toStr, result
|
||||
trie.ifTrackLegaApi: debug legaApiTxt info, key=key.toStr, result
|
||||
|
||||
proc rootHash*(trie: CoreDbTrieRefs): Hash256 =
|
||||
trie.setTrackLegaApiOnly
|
||||
const info = "trie/rootHash()"
|
||||
result = trie.distinctBase.rootVid().hash().expect info
|
||||
trie.ifTrackLegaApi: info legaApiTxt info, result=result.toStr
|
||||
result = trie.distinctBase.rootVid().hash(update=true).expect info
|
||||
trie.ifTrackLegaApi: debug legaApiTxt info, result=result.toStr
|
||||
|
||||
iterator pairs*(mpt: CoreDbMptRef): (Blob, Blob) {.apiRaise.} =
|
||||
## Trie traversal, not supported for `CoreDbPhkRef`
|
||||
mpt.setTrackLegaApiOnly
|
||||
for k,v in mpt.distinctBase.pairs():
|
||||
yield (k,v)
|
||||
mpt.ifTrackLegaApi: info legaApiTxt "mpt/pairs()"
|
||||
mpt.ifTrackLegaApi: debug legaApiTxt "mpt/pairs()"
|
||||
|
||||
iterator replicate*(mpt: CoreDbMptRef): (Blob, Blob) {.apiRaise.} =
|
||||
## Low level trie dump, not supported for `CoreDbPhkRef`
|
||||
mpt.setTrackLegaApiOnly
|
||||
for k,v in mpt.distinctBase.replicate():
|
||||
yield (k,v)
|
||||
mpt.ifTrackLegaApi: info legaApiTxt "mpt/replicate()"
|
||||
mpt.ifTrackLegaApi: debug legaApiTxt "mpt/replicate()"
|
||||
|
||||
# ----------------
|
||||
|
||||
proc getTransactionID*(db: CoreDbRef): CoreDbTxID =
|
||||
db.setTrackLegaApiOnly
|
||||
const info = "getTransactionID()"
|
||||
result = (db.toTransactionID().expect info).CoreDbTxID
|
||||
db.ifTrackLegaApi: info legaApiTxt info
|
||||
result = db.methods.getIdFn().expect(info).CoreDbTxID
|
||||
db.ifTrackLegaApi: debug legaApiTxt info
|
||||
|
||||
proc shortTimeReadOnly*(
|
||||
id: CoreDbTxID;
|
||||
|
@ -819,7 +933,7 @@ when ProvideCoreDbLegacyAPI:
|
|||
oops = some(e)
|
||||
# Action has finished now
|
||||
|
||||
id.distinctBase.shortTimeReadOnly(safeFn).expect info
|
||||
id.distinctBase.methods.roWrapperFn(safeFn).expect info
|
||||
|
||||
# Delayed exception
|
||||
if oops.isSome:
|
||||
|
@ -828,37 +942,37 @@ when ProvideCoreDbLegacyAPI:
|
|||
msg = "delayed and reraised" &
|
||||
", name=\"" & $e.name & "\", msg=\"" & e.msg & "\""
|
||||
raise (ref TxWrapperApiError)(msg: msg)
|
||||
id.ifTrackLegaApi: info legaApiTxt info
|
||||
id.ifTrackLegaApi: debug legaApiTxt info
|
||||
|
||||
proc beginTransaction*(db: CoreDbRef): CoreDbTxRef =
|
||||
db.setTrackLegaApiOnly
|
||||
const info = "newTransaction()"
|
||||
result = (db.distinctBase.newTransaction().expect info).CoreDbTxRef
|
||||
db.ifTrackLegaApi: info legaApiTxt info
|
||||
db.ifTrackLegaApi: debug legaApiTxt info
|
||||
|
||||
proc commit*(tx: CoreDbTxRef, applyDeletes = true) =
|
||||
tx.setTrackLegaApiOnly
|
||||
const info = "tx/commit()"
|
||||
tx.distinctBase.commit(applyDeletes).expect info
|
||||
tx.ifTrackLegaApi: info legaApiTxt info
|
||||
tx.ifTrackLegaApi: debug legaApiTxt info
|
||||
|
||||
proc rollback*(tx: CoreDbTxRef) =
|
||||
tx.setTrackLegaApiOnly
|
||||
const info = "tx/rollback()"
|
||||
tx.distinctBase.rollback().expect info
|
||||
tx.ifTrackLegaApi: info legaApiTxt info
|
||||
tx.ifTrackLegaApi: debug legaApiTxt info
|
||||
|
||||
proc dispose*(tx: CoreDbTxRef) =
|
||||
tx.setTrackLegaApiOnly
|
||||
const info = "tx/dispose()"
|
||||
tx.distinctBase.dispose().expect info
|
||||
tx.ifTrackLegaApi: info legaApiTxt info
|
||||
tx.ifTrackLegaApi: debug legaApiTxt info
|
||||
|
||||
proc safeDispose*(tx: CoreDbTxRef) =
|
||||
tx.setTrackLegaApiOnly
|
||||
const info = "tx/safeDispose()"
|
||||
tx.distinctBase.safeDispose().expect info
|
||||
tx.ifTrackLegaApi: info legaApiTxt info
|
||||
tx.ifTrackLegaApi: debug legaApiTxt info
|
||||
|
||||
# ----------------
|
||||
|
||||
|
@ -869,24 +983,24 @@ when ProvideCoreDbLegacyAPI:
|
|||
db.setTrackLegaApiOnly
|
||||
const info = "db/capture()"
|
||||
result = db.newCapture(flags).expect(info).CoreDbCaptRef
|
||||
db.ifTrackLegaApi: info legaApiTxt info
|
||||
db.ifTrackLegaApi: debug legaApiTxt info
|
||||
|
||||
proc recorder*(cp: CoreDbCaptRef): CoreDbRef =
|
||||
cp.setTrackLegaApiOnly
|
||||
const info = "capt/recorder()"
|
||||
result = cp.distinctBase.recorder().expect info
|
||||
cp.ifTrackLegaApi: info legaApiTxt info
|
||||
cp.ifTrackLegaApi: debug legaApiTxt info
|
||||
|
||||
proc logDb*(cp: CoreDbCaptRef): CoreDbRef =
|
||||
cp.setTrackLegaApiOnly
|
||||
const info = "capt/logDb()"
|
||||
result = cp.distinctBase.logDb().expect info
|
||||
cp.ifTrackLegaApi: info legaApiTxt info
|
||||
cp.ifTrackLegaApi: debug legaApiTxt info
|
||||
|
||||
proc flags*(cp: CoreDbCaptRef): set[CoreDbCaptFlags] =
|
||||
cp.setTrackLegaApiOnly
|
||||
result = cp.distinctBase.flags()
|
||||
cp.ifTrackLegaApi: info legaApiTxt "capt/flags()", result=result.toStr
|
||||
cp.ifTrackLegaApi: debug legaApiTxt "capt/flags()", result=result.toStr
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -46,6 +46,13 @@ type
|
|||
MptNotFound
|
||||
AccNotFound
|
||||
RootNotFound
|
||||
HashNotAvailable
|
||||
StorageFailed
|
||||
|
||||
CoreDbSaveFlags* = enum
|
||||
Cached ## Shared, leaves changes in memory cache
|
||||
AutoSave ## Shared, save changes on destruction
|
||||
Companion ## Separate, leaves changes in memory cache
|
||||
|
||||
CoreDbCaptFlags* {.pure.} = enum
|
||||
PersistPut
|
||||
|
@ -57,16 +64,19 @@ type
|
|||
CoreDbBaseBackendFn* = proc(): CoreDbBackendRef {.noRaise.}
|
||||
CoreDbBaseDestroyFn* = proc(flush = true) {.noRaise.}
|
||||
CoreDbBaseVidHashFn* =
|
||||
proc(vid: CoreDbVidRef): Result[Hash256,void] {.noRaise.}
|
||||
proc(vid: CoreDbVidRef; update: bool): CoreDbRc[Hash256] {.noRaise.}
|
||||
CoreDbBaseErrorPrintFn* = proc(e: CoreDbErrorRef): string {.noRaise.}
|
||||
CoreDbBaseInitLegaSetupFn* = proc() {.noRaise.}
|
||||
CoreDbBaseRootFn* =
|
||||
proc(root: Hash256; createOk: bool): CoreDbRc[CoreDbVidRef] {.noRaise.}
|
||||
CoreDbBaseKvtFn* = proc(): CoreDxKvtRef {.noRaise.}
|
||||
CoreDbBaseMptFn* =
|
||||
proc(root: CoreDbVidRef; prune: bool): CoreDbRc[CoreDxMptRef] {.noRaise.}
|
||||
CoreDbBaseAccFn* =
|
||||
proc(root: CoreDbVidRef; prune: bool): CoreDbRc[CoreDxAccRef] {.noRaise.}
|
||||
CoreDbBaseKvtFn* = proc(
|
||||
saveMode: CoreDbSaveFlags): CoreDbRc[CoreDxKvtRef] {.noRaise.}
|
||||
CoreDbBaseMptFn* = proc(
|
||||
root: CoreDbVidRef; prune: bool; saveMode: CoreDbSaveFlags;
|
||||
): CoreDbRc[CoreDxMptRef] {.noRaise.}
|
||||
CoreDbBaseAccFn* = proc(
|
||||
root: CoreDbVidRef; prune: bool; saveMode: CoreDbSaveFlags;
|
||||
): CoreDbRc[CoreDxAccRef] {.noRaise.}
|
||||
CoreDbBaseTxGetIdFn* = proc(): CoreDbRc[CoreDxTxID] {.noRaise.}
|
||||
CoreDbBaseTxBeginFn* = proc(): CoreDbRc[CoreDxTxRef] {.noRaise.}
|
||||
CoreDbBaseCaptFn* =
|
||||
|
@ -103,6 +113,8 @@ type
|
|||
CoreDbKvtDelFn* = proc(k: openArray[byte]): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbKvtPutFn* =
|
||||
proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbKvtDestroyFn* = proc(
|
||||
saveMode: CoreDbSaveFlags): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbKvtHasKeyFn* = proc(k: openArray[byte]): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbKvtPairsIt* = iterator(): (Blob,Blob) {.apiRaise.}
|
||||
|
||||
|
@ -113,6 +125,7 @@ type
|
|||
delFn*: CoreDbKvtDelFn
|
||||
putFn*: CoreDbKvtPutFn
|
||||
hasKeyFn*: CoreDbKvtHasKeyFn
|
||||
destroyFn*: CoreDbKvtDestroyFn
|
||||
pairsIt*: CoreDbKvtPairsIt
|
||||
|
||||
|
||||
|
@ -133,6 +146,8 @@ type
|
|||
CoreDbMptHasPathFn* = proc(k: openArray[byte]): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbMptRootVidFn* = proc(): CoreDbVidRef {.noRaise.}
|
||||
CoreDbMptIsPruningFn* = proc(): bool {.noRaise.}
|
||||
CoreDbMptDestroyFn* = proc(
|
||||
saveMode: CoreDbSaveFlags): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbMptPairsIt* = iterator(): (Blob,Blob) {.apiRaise.}
|
||||
CoreDbMptReplicateIt* = iterator(): (Blob,Blob) {.apiRaise.}
|
||||
|
||||
|
@ -144,9 +159,10 @@ type
|
|||
mergeFn*: CoreDbMptMergeFn
|
||||
hasPathFn*: CoreDbMptHasPathFn
|
||||
rootVidFn*: CoreDbMptRootVidFn
|
||||
isPruningFn*: CoreDbMptIsPruningFn
|
||||
destroyFn*: CoreDbMptDestroyFn
|
||||
pairsIt*: CoreDbMptPairsIt
|
||||
replicateIt*: CoreDbMptReplicateIt
|
||||
isPruningFn*: CoreDbMptIsPruningFn
|
||||
|
||||
|
||||
# ----------------------------------------------------
|
||||
|
@ -160,6 +176,8 @@ type
|
|||
CoreDbAccHasPathFn* = proc(k: EthAddress): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbAccRootVidFn* = proc(): CoreDbVidRef {.noRaise.}
|
||||
CoreDbAccIsPruningFn* = proc(): bool {.noRaise.}
|
||||
CoreDbAccDestroyFn* = proc(
|
||||
saveMode: CoreDbSaveFlags): CoreDbRc[void] {.noRaise.}
|
||||
|
||||
CoreDbAccFns* = object
|
||||
## Methods for trie objects
|
||||
|
@ -170,7 +188,7 @@ type
|
|||
hasPathFn*: CoreDbAccHasPathFn
|
||||
rootVidFn*: CoreDbAccRootVidFn
|
||||
isPruningFn*: CoreDbAccIsPruningFn
|
||||
|
||||
destroyFn*: CoreDbAccDestroyFn
|
||||
|
||||
# --------------------------------------------------
|
||||
# Sub-descriptor: Transaction frame management
|
||||
|
@ -242,18 +260,18 @@ type
|
|||
## Backend wrapper for direct backend access
|
||||
parent*: CoreDbRef
|
||||
|
||||
CoreDxKvtRef* = ref object
|
||||
CoreDxKvtRef* = ref object of RootRef
|
||||
## Statically initialised Key-Value pair table living in `CoreDbRef`
|
||||
parent*: CoreDbRef
|
||||
methods*: CoreDbKvtFns
|
||||
|
||||
CoreDxMptRef* = ref object
|
||||
CoreDxMptRef* = ref object of RootRef
|
||||
## Hexary/Merkle-Patricia tree derived from `CoreDbRef`, will be
|
||||
## initialised on-the-fly.
|
||||
parent*: CoreDbRef
|
||||
methods*: CoreDbMptFns
|
||||
|
||||
CoreDxAccRef* = ref object
|
||||
CoreDxAccRef* = ref object of RootRef
|
||||
## Similar to `CoreDxKvtRef`, only dealing with `CoreDbAccount` data
|
||||
## rather than `Blob` values.
|
||||
parent*: CoreDbRef
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -48,6 +48,7 @@ proc validateMethodsDesc(kvt: CoreDbKvtFns) =
|
|||
doAssert not kvt.getFn.isNil
|
||||
doAssert not kvt.delFn.isNil
|
||||
doAssert not kvt.putFn.isNil
|
||||
doAssert not kvt.destroyFn.isNil
|
||||
doAssert not kvt.hasKeyFn.isNil
|
||||
doAssert not kvt.pairsIt.isNil
|
||||
|
||||
|
@ -59,6 +60,7 @@ proc validateMethodsDesc(fns: CoreDbMptFns) =
|
|||
doAssert not fns.hasPathFn.isNil
|
||||
doAssert not fns.rootVidFn.isNil
|
||||
doAssert not fns.isPruningFn.isNil
|
||||
doAssert not fns.destroyFn.isNil
|
||||
doAssert not fns.pairsIt.isNil
|
||||
doAssert not fns.replicateIt.isNil
|
||||
|
||||
|
@ -70,6 +72,7 @@ proc validateMethodsDesc(fns: CoreDbAccFns) =
|
|||
doAssert not fns.hasPathFn.isNil
|
||||
doAssert not fns.rootVidFn.isNil
|
||||
doAssert not fns.isPruningFn.isNil
|
||||
doAssert not fns.destroyFn.isNil
|
||||
|
||||
# ------------
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -532,7 +532,7 @@ proc persistTransactions*(
|
|||
kvt.put(blockKey.toOpenArray, rlp.encode(txKey)).isOkOr:
|
||||
warn logTxt info, blockKey, action="put()", error=($$error)
|
||||
return EMPTY_ROOT_HASH
|
||||
mpt.rootVid.hash.valueOr:
|
||||
mpt.rootVid.hash(update=true).valueOr:
|
||||
warn logTxt info, action="hash()"
|
||||
return EMPTY_ROOT_HASH
|
||||
|
||||
|
@ -623,7 +623,7 @@ proc persistWithdrawals*(
|
|||
mpt.merge(rlp.encode(idx), rlp.encode(wd)).isOkOr:
|
||||
warn logTxt info, idx, action="merge()", error=($$error)
|
||||
return EMPTY_ROOT_HASH
|
||||
mpt.rootVid.hash.valueOr:
|
||||
mpt.rootVid.hash(update=true).valueOr:
|
||||
warn logTxt info, action="hash()"
|
||||
return EMPTY_ROOT_HASH
|
||||
|
||||
|
@ -769,7 +769,7 @@ proc persistReceipts*(
|
|||
for idx, rec in receipts:
|
||||
mpt.merge(rlp.encode(idx), rlp.encode(rec)).isOkOr:
|
||||
warn logTxt info, idx, action="merge()", error=($$error)
|
||||
mpt.rootVid.hash.valueOr:
|
||||
mpt.rootVid.hash(update=true).valueOr:
|
||||
warn logTxt info, action="hash()"
|
||||
return EMPTY_ROOT_HASH
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -36,7 +36,10 @@ export
|
|||
CoreDbApiError,
|
||||
CoreDbErrorCode,
|
||||
CoreDbErrorRef,
|
||||
CoreDbPersistentTypes,
|
||||
CoreDbRef,
|
||||
CoreDbSaveFlags,
|
||||
CoreDbTxID,
|
||||
CoreDbType,
|
||||
CoreDbVidRef,
|
||||
CoreDxAccRef,
|
||||
|
@ -44,7 +47,6 @@ export
|
|||
CoreDxKvtRef,
|
||||
CoreDxMptRef,
|
||||
CoreDxPhkRef,
|
||||
CoreDxTxID,
|
||||
CoreDxTxRef,
|
||||
`$$`,
|
||||
backend,
|
||||
|
@ -86,12 +88,9 @@ export
|
|||
setTransactionID,
|
||||
toLegacy,
|
||||
toMpt,
|
||||
toPhk,
|
||||
toTransactionID
|
||||
toPhk
|
||||
|
||||
when ProvideCoreDbLegacyAPI:
|
||||
type
|
||||
CoreDyTxID = CoreDxTxID|CoreDbTxID
|
||||
export
|
||||
CoreDbCaptFlags,
|
||||
CoreDbCaptRef,
|
||||
|
@ -137,13 +136,13 @@ proc newCoreDbRef*(
|
|||
newLegacyMemoryCoreDbRef()
|
||||
|
||||
else:
|
||||
{.error: "Unsupported dbType for memory-only newCoreDbRef()".}
|
||||
{.error: "Unsupported constructor " & $dbType & ".newCoreDbRef()".}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public template wrappers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template shortTimeReadOnly*(id: CoreDyTxID; body: untyped) =
|
||||
template shortTimeReadOnly*(id: CoreDbTxID; body: untyped) =
|
||||
proc action() =
|
||||
body
|
||||
id.shortTimeReadOnly action
|
||||
|
|
|
@ -0,0 +1,173 @@
|
|||
# nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, sequtils, strutils, tables],
|
||||
eth/common,
|
||||
results,
|
||||
stew/byteutils,
|
||||
./kvt_desc,
|
||||
./kvt_desc/desc_backend,
|
||||
./kvt_init/[memory_db, memory_only, rocks_db]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc squeeze(s: string; hex = false; ignLen = false): string =
|
||||
## For long strings print `begin..end` only
|
||||
if hex:
|
||||
let n = (s.len + 1) div 2
|
||||
result = if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. ^1]
|
||||
if not ignLen:
|
||||
result &= "[" & (if 0 < n: "#" & $n else: "") & "]"
|
||||
elif s.len <= 30:
|
||||
result = s
|
||||
else:
|
||||
result = if (s.len and 1) == 0: s[0 ..< 8] else: "0" & s[0 ..< 7]
|
||||
if not ignLen:
|
||||
result &= "..(" & $s.len & ")"
|
||||
result &= ".." & s[s.len-16 .. ^1]
|
||||
|
||||
proc stripZeros(a: string): string =
|
||||
a.strip(leading=true, trailing=false, chars={'0'})
|
||||
|
||||
proc toPfx(indent: int; offset = 0): string =
|
||||
if 0 < indent+offset: "\n" & " ".repeat(indent+offset) else: ""
|
||||
|
||||
|
||||
func getOrVoid*(tab: Table[Blob,uint64]; w: Blob): uint64 =
|
||||
tab.getOrDefault(w, 0u64)
|
||||
|
||||
func getOrVoid*(tab: Table[uint64,Blob]; w: uint64): Blob =
|
||||
tab.getOrDefault(w, EmptyBlob)
|
||||
|
||||
func isValid*(id: uint64): bool =
|
||||
0 < id
|
||||
|
||||
|
||||
proc keyID(key: Blob; db = KvtDbRef(nil)): uint64 =
|
||||
if key.len == 0:
|
||||
return 0
|
||||
elif db.isNil:
|
||||
return high(uint64)
|
||||
else:
|
||||
let
|
||||
ctr = db.getCentre
|
||||
id = ctr.xMap.getOrVoid key
|
||||
if id.isValid:
|
||||
id
|
||||
else:
|
||||
# Save new ID
|
||||
ctr.xIdGen.inc
|
||||
ctr.xMap[key] = db.xIdGen
|
||||
ctr.pAmx[db.xIdGen] = key
|
||||
ctr.xIdGen
|
||||
|
||||
proc keyBlob(id: uint64; db = KvtDbRef(nil)): Blob =
|
||||
if 0 < id and not db.isNil:
|
||||
result = db.getCentre.pAmx.getOrVoid id
|
||||
|
||||
|
||||
proc ppID(id: uint64): string =
|
||||
"$" & (if id == 0: "ø" else: $id)
|
||||
|
||||
proc ppKey(key: Blob; db = KvtDbRef(nil)): string =
|
||||
if key.len == 0:
|
||||
0.ppID
|
||||
elif db.isNil:
|
||||
key[0 .. 0].toHex & "-" &
|
||||
key[1 ..< key.len].toHex.squeeze(hex=true,ignLen=(key.len==33))
|
||||
else:
|
||||
key.keyID(db).ppID
|
||||
|
||||
proc ppValue(data: Blob): string =
|
||||
data.toHex.squeeze(hex=true)
|
||||
|
||||
proc ppTab(tab: Table[Blob,Blob]; db = KvtDbRef(nil); indent = 4): string =
|
||||
result = "{"
|
||||
if db.isNil:
|
||||
let keys = tab.keys.toSeq.sorted
|
||||
result &= keys.mapIt((it, tab.getOrVoid it))
|
||||
.mapIt("(" & it[0].ppKey & "," & it[1].ppValue & ")")
|
||||
.join(indent.toPfx(1))
|
||||
else:
|
||||
let keys = tab.keys.toSeq.mapIt(it.keyID db).sorted
|
||||
result &= keys.mapIt((it, tab.getOrVoid db.pAmx.getOrVoid(it)))
|
||||
.mapIt("(" & it[0].ppID & "," & it[1].ppValue & ")")
|
||||
.join(indent.toPfx(1))
|
||||
result &= "}"
|
||||
|
||||
proc ppMap(tab: Table[uint64,Blob]; indent = 4): string =
|
||||
let keys = tab.keys.toSeq.sorted
|
||||
"{" &
|
||||
keys.mapIt((it, tab.getOrVoid it))
|
||||
.mapIt("(" & it[0].ppID & "," & it[1].ppKey & ")")
|
||||
.join(indent.toPfx(1)) &
|
||||
"}"
|
||||
|
||||
proc ppBe[T](be: T; db: KvtDbRef; indent: int): string =
|
||||
## Walk over backend table
|
||||
let
|
||||
pfx1 = indent.toPfx(1)
|
||||
pfx2 = indent.toPfx(2)
|
||||
pfx3 = indent.toPfx(3)
|
||||
data = be.walk.toSeq.mapIt(
|
||||
$(1+it[0]) & "(" & it[1].ppKey(db) & "," & it[2].ppValue & ")"
|
||||
).join(pfx3)
|
||||
spc = if 0 < data.len: pfx2 else: " "
|
||||
"<" & $be.kind & ">" & pfx1 & "tab" & spc & "{" & data & "}"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp*(layer: LayerRef; db: KvtDbRef; indent = 4): string =
|
||||
let
|
||||
tLen = layer.tab.len
|
||||
info = "tab(" & $tLen & ")"
|
||||
pfx1 = indent.toPfx(1)
|
||||
pfx2 = if 0 < tLen: indent.toPfx(2) else: " "
|
||||
"<layer>" & pfx1 & info & pfx2 & layer.tab.ppTab(db,indent+2)
|
||||
|
||||
|
||||
proc pp*(
|
||||
be: BackendRef;
|
||||
db: KvtDbRef;
|
||||
indent = 4;
|
||||
): string =
|
||||
case be.kind:
|
||||
of BackendMemory:
|
||||
result &= be.MemBackendRef.ppBe(db, indent)
|
||||
of BackendRocksDB:
|
||||
result &= be.RdbBackendRef.ppBe(db, indent)
|
||||
of BackendVoid:
|
||||
result &= "<NoBackend>"
|
||||
|
||||
proc pp*(
|
||||
db: KvtDbRef;
|
||||
backendOk = false;
|
||||
keysOk = false;
|
||||
indent = 4;
|
||||
): string =
|
||||
let
|
||||
pfx = indent.toPfx
|
||||
pfx1 = indent.toPfx(1)
|
||||
result = db.top.pp(db, indent=indent)
|
||||
if backendOk:
|
||||
result &= pfx & db.backend.pp(db, indent=indent)
|
||||
if keysOk:
|
||||
result &= pfx & "<keys>" & pfx1 & db.getCentre.pAmx.ppMap(indent=indent+1)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
|
@ -56,6 +56,11 @@ type
|
|||
txUidGen*: uint ## Tx-relative unique number generator
|
||||
dudes: DudesRef ## Related DB descriptors
|
||||
|
||||
# Debugging data below, might go away in future
|
||||
xIdGen*: uint64
|
||||
xMap*: Table[Blob,uint64] ## For pretty printing
|
||||
pAmx*: Table[uint64,Blob] ## For pretty printing
|
||||
|
||||
KvtDbAction* = proc(db: KvtDbRef) {.gcsafe, raises: [].}
|
||||
## Generic call back function/closure.
|
||||
|
||||
|
@ -97,10 +102,7 @@ func getCentre*(db: KvtDbRef): KvtDbRef =
|
|||
else:
|
||||
db.dudes.rwDb
|
||||
|
||||
proc reCentre*(
|
||||
db: KvtDbRef;
|
||||
force = false;
|
||||
): Result[void,KvtError] =
|
||||
proc reCentre*(db: KvtDbRef) =
|
||||
## Re-focus the `db` argument descriptor so that it becomes the centre.
|
||||
## Nothing is done if the `db` descriptor is the centre, already.
|
||||
##
|
||||
|
@ -114,11 +116,6 @@ proc reCentre*(
|
|||
## accessing the same backend database. Descriptors where `isCentre()`
|
||||
## returns `false` must be single destructed with `forget()`.
|
||||
##
|
||||
## If there is an open transaction spanning several descriptors, the `force`
|
||||
## flag must be set `true` (unless the argument `db` is centre, already.) The
|
||||
## argument `db` must be covered by the transaction span. Then the re-centred
|
||||
## descriptor will also be the centre of the transaction span.
|
||||
##
|
||||
if not db.isCentre:
|
||||
let parent = db.dudes.rwDb
|
||||
|
||||
|
@ -137,8 +134,6 @@ proc reCentre*(
|
|||
# Update dudes list (parent was alredy updated)
|
||||
db.dudes.roDudes.incl parent
|
||||
|
||||
ok()
|
||||
|
||||
|
||||
proc fork*(
|
||||
db: KvtDbRef;
|
||||
|
|
|
@ -27,10 +27,11 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
std/[algorithm, sequtils, tables],
|
||||
chronicles,
|
||||
eth/common,
|
||||
results,
|
||||
stew/byteutils,
|
||||
../kvt_desc,
|
||||
../kvt_desc/desc_backend,
|
||||
./init_common
|
||||
|
@ -139,10 +140,14 @@ proc memoryBackend*: BackendRef =
|
|||
|
||||
iterator walk*(
|
||||
be: MemBackendRef;
|
||||
): tuple[key: Blob, data: Blob] =
|
||||
): tuple[n: int, key: Blob, data: Blob] =
|
||||
## Walk over all key-value pairs of the database.
|
||||
for (k,v) in be.tab.pairs:
|
||||
yield (k,v)
|
||||
for n,key in be.tab.keys.toSeq.sorted:
|
||||
let data = be.tab.getOrVoid key
|
||||
if data.len == 0:
|
||||
debug logTxt "walk() skip empty", n, key
|
||||
else:
|
||||
yield (n, key, data)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -169,11 +169,13 @@ proc rocksDbBackend*(
|
|||
|
||||
iterator walk*(
|
||||
be: RdbBackendRef;
|
||||
): tuple[key: Blob, data: Blob] =
|
||||
): tuple[n: int, key: Blob, data: Blob] =
|
||||
## Walk over all key-value pairs of the database.
|
||||
##
|
||||
var n = 0
|
||||
for (k,v) in be.rdb.walk:
|
||||
yield (k,v)
|
||||
yield (n, k,v)
|
||||
n.inc
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -221,7 +221,7 @@ proc collapse*(
|
|||
##
|
||||
let db = ? tx.getDbDescFromTopTx()
|
||||
|
||||
# If commit, then leave the current layer and clear the stack, oterwise
|
||||
# If commit, then leave the current layer and clear the stack, otherwise
|
||||
# install the stack bottom.
|
||||
if not commit:
|
||||
db.stack[0].swap db.top
|
||||
|
|
|
@ -102,7 +102,7 @@ proc hasKey*(
|
|||
let data = db.top.tab.getOrVoid @key
|
||||
if data.isValid:
|
||||
return ok(true)
|
||||
let rc = db.getBE(key)
|
||||
let rc = db.getBE key
|
||||
if rc.isOk:
|
||||
return ok(true)
|
||||
if rc.error == GetNotFound:
|
||||
|
|
|
@ -29,10 +29,10 @@ export
|
|||
iterator walkPairs*[T: MemBackendRef|VoidBackendRef](
|
||||
_: type T;
|
||||
db: KvtDbRef;
|
||||
): tuple[key: Blob, data: Blob] =
|
||||
): tuple[n: int; key: Blob, data: Blob] =
|
||||
## Iterate over backend filters.
|
||||
for (vid,vtx) in walkPairsImpl[T](db):
|
||||
yield (vid,vtx)
|
||||
for (n, vid,vtx) in walkPairsImpl[T](db):
|
||||
yield (n, vid,vtx)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -35,10 +35,10 @@ export
|
|||
iterator walkPairs*(
|
||||
T: type RdbBackendRef;
|
||||
db: KvtDbRef;
|
||||
): tuple[key: Blob, data: Blob] =
|
||||
): tuple[n: int, key: Blob, data: Blob] =
|
||||
## Iterate over backend filters.
|
||||
for (vid,vtx) in walkPairsImpl[T](db):
|
||||
yield (vid,vtx)
|
||||
for (n, vid,vtx) in walkPairsImpl[T](db):
|
||||
yield (n, vid,vtx)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -20,20 +20,22 @@ import
|
|||
|
||||
iterator walkPairsImpl*[T](
|
||||
db: KvtDbRef; # Database with top layer & backend filter
|
||||
): tuple[key: Blob, data: Blob] =
|
||||
): tuple[n: int, key: Blob, data: Blob] =
|
||||
## Walk over all `(VertexID,VertexRef)` in the database. Note that entries
|
||||
## are unsorted.
|
||||
|
||||
var i = 0
|
||||
for (key,data) in db.top.tab.pairs:
|
||||
if data.isValid:
|
||||
yield (key,data)
|
||||
yield (i,key,data)
|
||||
inc i
|
||||
|
||||
when T isnot VoidBackendRef:
|
||||
mixin walk
|
||||
|
||||
for (key,data) in db.backend.T.walk:
|
||||
for (n,key,data) in db.backend.T.walk:
|
||||
if key notin db.top.tab and data.isValid:
|
||||
yield (key,data)
|
||||
yield (n+i,key,data)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -56,6 +56,7 @@ type
|
|||
|
||||
AccountsLedgerRef* = ref object
|
||||
ledger: AccountLedger
|
||||
kvt: CoreDxKvtRef
|
||||
savePoint: LedgerSavePoint
|
||||
witnessCache: Table[EthAddress, WitnessData]
|
||||
isDirty: bool
|
||||
|
@ -91,11 +92,6 @@ const
|
|||
NewlyCreated
|
||||
}
|
||||
|
||||
ripemdAddr* = block:
|
||||
proc initAddress(x: int): EthAddress {.compileTime.} =
|
||||
result[19] = x.byte
|
||||
initAddress(3)
|
||||
|
||||
when debugAccountsLedgerRef:
|
||||
import
|
||||
stew/byteutils
|
||||
|
@ -108,15 +104,15 @@ when debugAccountsLedgerRef:
|
|||
debugEcho address.toHex, " ", acc.flags
|
||||
sp = sp.parentSavepoint
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"AccountsLedgerRef " & info
|
||||
|
||||
proc beginSavepoint*(ac: AccountsLedgerRef): LedgerSavePoint {.gcsafe.}
|
||||
|
||||
# FIXME-Adam: this is only necessary because of my sanity checks on the latest rootHash;
|
||||
# take this out once those are gone.
|
||||
proc rawTrie*(ac: AccountsLedgerRef): AccountLedger = ac.ledger
|
||||
|
||||
proc db(ac: AccountsLedgerRef): CoreDbRef = ac.ledger.db
|
||||
proc kvt(ac: AccountsLedgerRef): CoreDbKvtRef = ac.db.kvt
|
||||
|
||||
func newCoreDbAccount: CoreDbAccount =
|
||||
CoreDbAccount(
|
||||
nonce: emptyAcc.nonce,
|
||||
|
@ -135,6 +131,7 @@ proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef,
|
|||
root: KeccakHash, pruneTrie = true): AccountsLedgerRef =
|
||||
new result
|
||||
result.ledger = AccountLedger.init(db, root, pruneTrie)
|
||||
result.kvt = db.newKvt()
|
||||
result.witnessCache = initTable[EthAddress, WitnessData]()
|
||||
discard result.beginSavepoint
|
||||
|
||||
|
@ -300,9 +297,14 @@ proc persistMode(acc: RefAccount): PersistMode =
|
|||
proc persistCode(acc: RefAccount, ac: AccountsLedgerRef) =
|
||||
if acc.code.len != 0:
|
||||
when defined(geth):
|
||||
ac.kvt.put(acc.account.codeHash.data, acc.code)
|
||||
let rc = ac.kvt.put(
|
||||
acc.account.codeHash.data, acc.code)
|
||||
else:
|
||||
ac.kvt.put(contractHashKey(acc.account.codeHash).toOpenArray, acc.code)
|
||||
let rc = ac.kvt.put(
|
||||
contractHashKey(acc.account.codeHash).toOpenArray, acc.code)
|
||||
if rc.isErr:
|
||||
warn logTxt "persistCode()",
|
||||
codeHash=acc.account.codeHash, error=($$rc.error)
|
||||
|
||||
proc persistStorage(acc: RefAccount, ac: AccountsLedgerRef, clearCache: bool) =
|
||||
if acc.overlayStorage.len == 0:
|
||||
|
@ -322,8 +324,12 @@ proc persistStorage(acc: RefAccount, ac: AccountsLedgerRef, clearCache: bool) =
|
|||
else:
|
||||
storageLedger.delete(slot)
|
||||
|
||||
let key = slot.toBytesBE.keccakHash.data.slotHashToSlotKey
|
||||
ac.kvt.put(key.toOpenArray, rlp.encode(slot))
|
||||
let
|
||||
key = slot.toBytesBE.keccakHash.data.slotHashToSlotKey
|
||||
rc = ac.kvt.put(key.toOpenArray, rlp.encode(slot))
|
||||
if rc.isErr:
|
||||
warn logTxt "persistStorage()", slot, error=($$rc.error)
|
||||
|
||||
|
||||
if not clearCache:
|
||||
# if we preserve cache, move the overlayStorage
|
||||
|
@ -373,12 +379,15 @@ proc getCode*(ac: AccountsLedgerRef, address: EthAddress): seq[byte] =
|
|||
if CodeLoaded in acc.flags or CodeChanged in acc.flags:
|
||||
result = acc.code
|
||||
else:
|
||||
let rc = block:
|
||||
when defined(geth):
|
||||
let data = ac.kvt.get(acc.account.codeHash.data)
|
||||
ac.kvt.get(acc.account.codeHash.data)
|
||||
else:
|
||||
let data = ac.kvt.get(contractHashKey(acc.account.codeHash).toOpenArray)
|
||||
|
||||
acc.code = data
|
||||
ac.kvt.get(contractHashKey(acc.account.codeHash).toOpenArray)
|
||||
if rc.isErr:
|
||||
warn logTxt "getCode()", codeHash=acc.account.codeHash, error=($$rc.error)
|
||||
else:
|
||||
acc.code = rc.value
|
||||
acc.flags.incl CodeLoaded
|
||||
result = acc.code
|
||||
|
||||
|
@ -482,7 +491,7 @@ proc clearStorage*(ac: AccountsLedgerRef, address: EthAddress) =
|
|||
|
||||
let acc = ac.getAccount(address)
|
||||
acc.flags.incl {Alive, NewlyCreated}
|
||||
let accHash = acc.account.storageVid.hash.valueOr: return
|
||||
let accHash = acc.account.storageVid.hash(update=true).valueOr: return
|
||||
if accHash != EMPTY_ROOT_HASH:
|
||||
# there is no point to clone the storage since we want to remove it
|
||||
let acc = ac.makeDirty(address, cloneStorage = false)
|
||||
|
@ -545,7 +554,7 @@ proc clearEmptyAccounts(ac: AccountsLedgerRef) =
|
|||
|
||||
# https://github.com/ethereum/EIPs/issues/716
|
||||
if ac.ripemdSpecial:
|
||||
ac.deleteEmptyAccount(ripemdAddr)
|
||||
ac.deleteEmptyAccount(RIPEMD_ADDR)
|
||||
ac.ripemdSpecial = false
|
||||
|
||||
proc persist*(ac: AccountsLedgerRef,
|
||||
|
@ -606,13 +615,13 @@ iterator accounts*(ac: AccountsLedgerRef): Account =
|
|||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
for _, account in ac.savePoint.cache:
|
||||
yield account.account.recast.value
|
||||
yield account.account.recast(update=true).value
|
||||
|
||||
iterator pairs*(ac: AccountsLedgerRef): (EthAddress, Account) =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
for address, account in ac.savePoint.cache:
|
||||
yield (address, account.account.recast.value)
|
||||
yield (address, account.account.recast(update=true).value)
|
||||
|
||||
iterator storage*(ac: AccountsLedgerRef, address: EthAddress): (UInt256, UInt256) {.gcsafe, raises: [CoreDbApiError].} =
|
||||
# beware that if the account not persisted,
|
||||
|
@ -622,9 +631,11 @@ iterator storage*(ac: AccountsLedgerRef, address: EthAddress): (UInt256, UInt256
|
|||
noRlpException "storage()":
|
||||
for slotHash, value in ac.ledger.storage acc.account:
|
||||
if slotHash.len == 0: continue
|
||||
let keyData = ac.kvt.get(slotHashToSlotKey(slotHash).toOpenArray)
|
||||
if keyData.len == 0: continue
|
||||
yield (rlp.decode(keyData, UInt256), rlp.decode(value, UInt256))
|
||||
let rc = ac.kvt.get(slotHashToSlotKey(slotHash).toOpenArray)
|
||||
if rc.isErr:
|
||||
warn logTxt "storage()", slotHash, error=($$rc.error)
|
||||
else:
|
||||
yield (rlp.decode(rc.value, UInt256), rlp.decode(value, UInt256))
|
||||
|
||||
iterator cachedStorage*(ac: AccountsLedgerRef, address: EthAddress): (UInt256, UInt256) =
|
||||
let acc = ac.getAccount(address, false)
|
||||
|
@ -638,7 +649,7 @@ proc getStorageRoot*(ac: AccountsLedgerRef, address: EthAddress): Hash256 =
|
|||
# the storage root will not be updated
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil: EMPTY_ROOT_HASH
|
||||
else: acc.account.storageVid.hash.valueOr: EMPTY_ROOT_HASH
|
||||
else: acc.account.storageVid.hash(update=true).valueOr: EMPTY_ROOT_HASH
|
||||
|
||||
func update(wd: var WitnessData, acc: RefAccount) =
|
||||
wd.codeTouched = CodeChanged in acc.flags
|
||||
|
|
|
@ -42,7 +42,8 @@ const
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
when EnableApiTracking:
|
||||
import std/strutils, stew/byteutils
|
||||
import std/strutils, chronicles, stew/byteutils
|
||||
{.warning: "*** Provided API logging for Ledger (disabled by default)".}
|
||||
|
||||
template apiTxt(info: static[string]): static[string] =
|
||||
"Ledger API " & info
|
||||
|
@ -81,7 +82,7 @@ proc bless*(ldg: LedgerRef; db: CoreDbRef): LedgerRef =
|
|||
when EnableApiTracking:
|
||||
ldg.trackApi = db.trackLedgerApi
|
||||
if ldg.trackApi:
|
||||
info apiTxt "LedgerRef.init()", ldgType=ldg.ldgType
|
||||
debug apiTxt "LedgerRef.init()", ldgType=ldg.ldgType
|
||||
ldg
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -90,189 +91,189 @@ proc bless*(ldg: LedgerRef; db: CoreDbRef): LedgerRef =
|
|||
|
||||
proc accessList*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.methods.accessListFn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "accessList()", eAddr=eAddr.toStr
|
||||
ldg.ifTrackApi: debug apiTxt "accessList()", eAddr=eAddr.toStr
|
||||
|
||||
proc accessList*(ldg: LedgerRef, eAddr: EthAddress, slot: UInt256) =
|
||||
ldg.methods.accessList2Fn(eAddr, slot)
|
||||
ldg.ifTrackApi: info apiTxt "accessList()", eAddr=eAddr.toStr, slot
|
||||
ldg.ifTrackApi: debug apiTxt "accessList()", eAddr=eAddr.toStr, slot
|
||||
|
||||
proc accountExists*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
result = ldg.methods.accountExistsFn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "accountExists()", eAddr=eAddr.toStr, result
|
||||
ldg.ifTrackApi: debug apiTxt "accountExists()", eAddr=eAddr.toStr, result
|
||||
|
||||
proc addBalance*(ldg: LedgerRef, eAddr: EthAddress, delta: UInt256) =
|
||||
ldg.methods.addBalanceFn(eAddr, delta)
|
||||
ldg.ifTrackApi: info apiTxt "addBalance()", eAddr=eAddr.toStr, delta
|
||||
ldg.ifTrackApi: debug apiTxt "addBalance()", eAddr=eAddr.toStr, delta
|
||||
|
||||
proc addLogEntry*(ldg: LedgerRef, log: Log) =
|
||||
ldg.methods.addLogEntryFn(log)
|
||||
ldg.ifTrackApi: info apiTxt "addLogEntry()"
|
||||
ldg.ifTrackApi: debug apiTxt "addLogEntry()"
|
||||
|
||||
proc beginSavepoint*(ldg: LedgerRef): LedgerSpRef =
|
||||
result = ldg.methods.beginSavepointFn()
|
||||
ldg.ifTrackApi: info apiTxt "beginSavepoint()"
|
||||
ldg.ifTrackApi: debug apiTxt "beginSavepoint()"
|
||||
|
||||
proc clearStorage*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.methods.clearStorageFn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "clearStorage()", eAddr=eAddr.toStr
|
||||
ldg.ifTrackApi: debug apiTxt "clearStorage()", eAddr=eAddr.toStr
|
||||
|
||||
proc clearTransientStorage*(ldg: LedgerRef) =
|
||||
ldg.methods.clearTransientStorageFn()
|
||||
ldg.ifTrackApi: info apiTxt "clearTransientStorage()"
|
||||
ldg.ifTrackApi: debug apiTxt "clearTransientStorage()"
|
||||
|
||||
proc collectWitnessData*(ldg: LedgerRef) =
|
||||
ldg.methods.collectWitnessDataFn()
|
||||
ldg.ifTrackApi: info apiTxt "collectWitnessData()"
|
||||
ldg.ifTrackApi: debug apiTxt "collectWitnessData()"
|
||||
|
||||
proc commit*(ldg: LedgerRef, sp: LedgerSpRef) =
|
||||
ldg.methods.commitFn(sp)
|
||||
ldg.ifTrackApi: info apiTxt "commit()"
|
||||
ldg.ifTrackApi: debug apiTxt "commit()"
|
||||
|
||||
proc deleteAccount*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.methods.deleteAccountFn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "deleteAccount()", eAddr=eAddr.toStr
|
||||
ldg.ifTrackApi: debug apiTxt "deleteAccount()", eAddr=eAddr.toStr
|
||||
|
||||
proc dispose*(ldg: LedgerRef, sp: LedgerSpRef) =
|
||||
ldg.methods.disposeFn(sp)
|
||||
ldg.ifTrackApi: info apiTxt "dispose()"
|
||||
ldg.ifTrackApi: debug apiTxt "dispose()"
|
||||
|
||||
proc getAndClearLogEntries*(ldg: LedgerRef): seq[Log] =
|
||||
result = ldg.methods.getAndClearLogEntriesFn()
|
||||
ldg.ifTrackApi: info apiTxt "getAndClearLogEntries()"
|
||||
ldg.ifTrackApi: debug apiTxt "getAndClearLogEntries()"
|
||||
|
||||
proc getBalance*(ldg: LedgerRef, eAddr: EthAddress): UInt256 =
|
||||
result = ldg.methods.getBalanceFn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "getBalance()", eAddr=eAddr.toStr, result
|
||||
ldg.ifTrackApi: debug apiTxt "getBalance()", eAddr=eAddr.toStr, result
|
||||
|
||||
proc getCode*(ldg: LedgerRef, eAddr: EthAddress): Blob =
|
||||
result = ldg.methods.getCodeFn(eAddr)
|
||||
ldg.ifTrackApi:
|
||||
info apiTxt "getCode()", eAddr=eAddr.toStr, result=result.toStr
|
||||
debug apiTxt "getCode()", eAddr=eAddr.toStr, result=result.toStr
|
||||
|
||||
proc getCodeHash*(ldg: LedgerRef, eAddr: EthAddress): Hash256 =
|
||||
result = ldg.methods.getCodeHashFn(eAddr)
|
||||
ldg.ifTrackApi:
|
||||
info apiTxt "getCodeHash()", eAddr=eAddr.toStr, result=result.toStr
|
||||
debug apiTxt "getCodeHash()", eAddr=eAddr.toStr, result=result.toStr
|
||||
|
||||
proc getCodeSize*(ldg: LedgerRef, eAddr: EthAddress): int =
|
||||
result = ldg.methods.getCodeSizeFn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "getCodeSize()", eAddr=eAddr.toStr, result
|
||||
ldg.ifTrackApi: debug apiTxt "getCodeSize()", eAddr=eAddr.toStr, result
|
||||
|
||||
proc getCommittedStorage*(ldg: LedgerRef, eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
result = ldg.methods.getCommittedStorageFn(eAddr, slot)
|
||||
ldg.ifTrackApi:
|
||||
info apiTxt "getCommittedStorage()", eAddr=eAddr.toStr, slot, result
|
||||
debug apiTxt "getCommittedStorage()", eAddr=eAddr.toStr, slot, result
|
||||
|
||||
proc getNonce*(ldg: LedgerRef, eAddr: EthAddress): AccountNonce =
|
||||
result = ldg.methods.getNonceFn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "getNonce()", eAddr=eAddr.toStr, result
|
||||
ldg.ifTrackApi: debug apiTxt "getNonce()", eAddr=eAddr.toStr, result
|
||||
|
||||
proc getStorage*(ldg: LedgerRef, eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
result = ldg.methods.getStorageFn(eAddr, slot)
|
||||
ldg.ifTrackApi: info apiTxt "getStorage()", eAddr=eAddr.toStr, slot, result
|
||||
ldg.ifTrackApi: debug apiTxt "getStorage()", eAddr=eAddr.toStr, slot, result
|
||||
|
||||
proc getStorageRoot*(ldg: LedgerRef, eAddr: EthAddress): Hash256 =
|
||||
result = ldg.methods.getStorageRootFn(eAddr)
|
||||
ldg.ifTrackApi:
|
||||
info apiTxt "getStorageRoot()", eAddr=eAddr.toStr, result=result.toStr
|
||||
debug apiTxt "getStorageRoot()", eAddr=eAddr.toStr, result=result.toStr
|
||||
|
||||
proc getTransientStorage*(ldg: LedgerRef, eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
result = ldg.methods.getTransientStorageFn(eAddr, slot)
|
||||
ldg.ifTrackApi:
|
||||
info apiTxt "getTransientStorage()", eAddr=eAddr.toStr, slot, result
|
||||
debug apiTxt "getTransientStorage()", eAddr=eAddr.toStr, slot, result
|
||||
|
||||
proc hasCodeOrNonce*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
result = ldg.methods.hasCodeOrNonceFn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "hasCodeOrNonce()", eAddr=eAddr.toStr, result
|
||||
ldg.ifTrackApi: debug apiTxt "hasCodeOrNonce()", eAddr=eAddr.toStr, result
|
||||
|
||||
proc inAccessList*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
result = ldg.methods.inAccessListFn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "inAccessList()", eAddr=eAddr.toStr, result
|
||||
ldg.ifTrackApi: debug apiTxt "inAccessList()", eAddr=eAddr.toStr, result
|
||||
|
||||
proc inAccessList*(ldg: LedgerRef, eAddr: EthAddress, slot: UInt256): bool =
|
||||
result = ldg.methods.inAccessList2Fn(eAddr, slot)
|
||||
ldg.ifTrackApi: info apiTxt "inAccessList()", eAddr=eAddr.toStr, slot, result
|
||||
ldg.ifTrackApi: debug apiTxt "inAccessList()", eAddr=eAddr.toStr, slot, result
|
||||
|
||||
proc incNonce*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.methods.incNonceFn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "incNonce()", eAddr=eAddr.toStr
|
||||
ldg.ifTrackApi: debug apiTxt "incNonce()", eAddr=eAddr.toStr
|
||||
|
||||
proc isDeadAccount*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
result = ldg.methods.isDeadAccountFn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "isDeadAccount()", eAddr=eAddr.toStr, result
|
||||
ldg.ifTrackApi: debug apiTxt "isDeadAccount()", eAddr=eAddr.toStr, result
|
||||
|
||||
proc isEmptyAccount*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
result = ldg.methods.isEmptyAccountFn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "isEmptyAccount()", eAddr=eAddr.toStr, result
|
||||
ldg.ifTrackApi: debug apiTxt "isEmptyAccount()", eAddr=eAddr.toStr, result
|
||||
|
||||
proc isTopLevelClean*(ldg: LedgerRef): bool =
|
||||
result = ldg.methods.isTopLevelCleanFn()
|
||||
ldg.ifTrackApi: info apiTxt "isTopLevelClean()", result
|
||||
ldg.ifTrackApi: debug apiTxt "isTopLevelClean()", result
|
||||
|
||||
proc logEntries*(ldg: LedgerRef): seq[Log] =
|
||||
result = ldg.methods.logEntriesFn()
|
||||
ldg.ifTrackApi: info apiTxt "logEntries()", result=result.toStr
|
||||
ldg.ifTrackApi: debug apiTxt "logEntries()", result=result.toStr
|
||||
|
||||
proc makeMultiKeys*(ldg: LedgerRef): MultikeysRef =
|
||||
result = ldg.methods.makeMultiKeysFn()
|
||||
ldg.ifTrackApi: info apiTxt "makeMultiKeys()"
|
||||
ldg.ifTrackApi: debug apiTxt "makeMultiKeys()"
|
||||
|
||||
proc persist*(ldg: LedgerRef, clearEmptyAccount = false, clearCache = true) =
|
||||
ldg.methods.persistFn(clearEmptyAccount, clearCache)
|
||||
ldg.ifTrackApi: info apiTxt "persist()", clearEmptyAccount, clearCache
|
||||
ldg.ifTrackApi: debug apiTxt "persist()", clearEmptyAccount, clearCache
|
||||
|
||||
proc ripemdSpecial*(ldg: LedgerRef) =
|
||||
ldg.methods.ripemdSpecialFn()
|
||||
ldg.ifTrackApi: info apiTxt "ripemdSpecial()"
|
||||
ldg.ifTrackApi: debug apiTxt "ripemdSpecial()"
|
||||
|
||||
proc rollback*(ldg: LedgerRef, sp: LedgerSpRef) =
|
||||
ldg.methods.rollbackFn(sp)
|
||||
ldg.ifTrackApi: info apiTxt "rollback()"
|
||||
ldg.ifTrackApi: debug apiTxt "rollback()"
|
||||
|
||||
proc rootHash*(ldg: LedgerRef): Hash256 =
|
||||
result = ldg.methods.rootHashFn()
|
||||
ldg.ifTrackApi: info apiTxt "rootHash()", result=result.toStr
|
||||
ldg.ifTrackApi: debug apiTxt "rootHash()", result=result.toStr
|
||||
|
||||
proc safeDispose*(ldg: LedgerRef, sp: LedgerSpRef) =
|
||||
ldg.methods.safeDisposeFn(sp)
|
||||
ldg.ifTrackApi: info apiTxt "safeDispose()"
|
||||
ldg.ifTrackApi: debug apiTxt "safeDispose()"
|
||||
|
||||
proc selfDestruct*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.methods.selfDestructFn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "selfDestruct()"
|
||||
ldg.ifTrackApi: debug apiTxt "selfDestruct()"
|
||||
|
||||
proc selfDestruct6780*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.methods.selfDestruct6780Fn(eAddr)
|
||||
ldg.ifTrackApi: info apiTxt "selfDestruct6780()"
|
||||
ldg.ifTrackApi: debug apiTxt "selfDestruct6780()"
|
||||
|
||||
proc selfDestructLen*(ldg: LedgerRef): int =
|
||||
result = ldg.methods.selfDestructLenFn()
|
||||
ldg.ifTrackApi: info apiTxt "selfDestructLen()", result
|
||||
ldg.ifTrackApi: debug apiTxt "selfDestructLen()", result
|
||||
|
||||
proc setBalance*(ldg: LedgerRef, eAddr: EthAddress, balance: UInt256) =
|
||||
ldg.methods.setBalanceFn(eAddr, balance)
|
||||
ldg.ifTrackApi: info apiTxt "setBalance()", eAddr=eAddr.toStr, balance
|
||||
ldg.ifTrackApi: debug apiTxt "setBalance()", eAddr=eAddr.toStr, balance
|
||||
|
||||
proc setCode*(ldg: LedgerRef, eAddr: EthAddress, code: Blob) =
|
||||
ldg.methods.setCodeFn(eAddr, code)
|
||||
ldg.ifTrackApi: info apiTxt "setCode()", eAddr=eAddr.toStr, code=code.toStr
|
||||
ldg.ifTrackApi: debug apiTxt "setCode()", eAddr=eAddr.toStr, code=code.toStr
|
||||
|
||||
proc setNonce*(ldg: LedgerRef, eAddr: EthAddress, nonce: AccountNonce) =
|
||||
ldg.methods.setNonceFn(eAddr, nonce)
|
||||
ldg.ifTrackApi: info apiTxt "setNonce()", eAddr=eAddr.toStr, nonce
|
||||
ldg.ifTrackApi: debug apiTxt "setNonce()", eAddr=eAddr.toStr, nonce
|
||||
|
||||
proc setStorage*(ldg: LedgerRef, eAddr: EthAddress, slot, val: UInt256) =
|
||||
ldg.methods.setStorageFn(eAddr, slot, val)
|
||||
ldg.ifTrackApi: info apiTxt "setStorage()", eAddr=eAddr.toStr, slot, val
|
||||
ldg.ifTrackApi: debug apiTxt "setStorage()", eAddr=eAddr.toStr, slot, val
|
||||
|
||||
proc setTransientStorage*(ldg: LedgerRef, eAddr: EthAddress, slot, val: UInt256) =
|
||||
ldg.methods.setTransientStorageFn(eAddr, slot, val)
|
||||
ldg.ifTrackApi:
|
||||
info apiTxt "setTransientStorage()", eAddr=eAddr.toStr, slot, val
|
||||
debug apiTxt "setTransientStorage()", eAddr=eAddr.toStr, slot, val
|
||||
|
||||
proc subBalance*(ldg: LedgerRef, eAddr: EthAddress, delta: UInt256) =
|
||||
ldg.methods.subBalanceFn(eAddr, delta)
|
||||
ldg.ifTrackApi: info apiTxt "setTransientStorage()", eAddr=eAddr.toStr, delta
|
||||
ldg.ifTrackApi: debug apiTxt "setTransientStorage()", eAddr=eAddr.toStr, delta
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public methods, extensions to go away
|
||||
|
@ -280,7 +281,7 @@ proc subBalance*(ldg: LedgerRef, eAddr: EthAddress, delta: UInt256) =
|
|||
|
||||
proc rawRootHash*(ldg: LedgerRef): Hash256 =
|
||||
result = ldg.extras.rawRootHashFn()
|
||||
ldg.ifTrackApi: info apiTxt "rawRootHash()", result=result.toStr
|
||||
ldg.ifTrackApi: debug apiTxt "rawRootHash()", result=result.toStr
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public virtual read-only methods
|
||||
|
|
|
@ -71,7 +71,7 @@ proc db*(t: SomeLedger): CoreDbRef =
|
|||
t.distinctBase.parent
|
||||
|
||||
proc rootHash*(t: SomeLedger): Hash256 =
|
||||
t.distinctBase.rootVid().hash().expect "SomeLedger/rootHash()"
|
||||
t.distinctBase.rootVid().hash(update=true).expect "SomeLedger/rootHash()"
|
||||
|
||||
proc rootVid*(t: SomeLedger): CoreDbVidRef =
|
||||
t.distinctBase.rootVid
|
||||
|
|
|
@ -51,7 +51,7 @@ type
|
|||
AccountStateDB* = ref object
|
||||
trie: AccountsTrie
|
||||
originalRoot: KeccakHash # will be updated for every transaction
|
||||
transactionID: CoreDbTxID
|
||||
#transactionID: CoreDbTxID
|
||||
when aleth_compat:
|
||||
cleared: HashSet[EthAddress]
|
||||
|
||||
|
@ -77,7 +77,7 @@ proc newAccountStateDB*(backingStore: CoreDbRef,
|
|||
result.new()
|
||||
result.trie = initAccountsTrie(backingStore, root, pruneTrie)
|
||||
result.originalRoot = root
|
||||
result.transactionID = backingStore.getTransactionID()
|
||||
#result.transactionID = backingStore.getTransactionID()
|
||||
when aleth_compat:
|
||||
result.cleared = initHashSet[EthAddress]()
|
||||
|
||||
|
@ -251,29 +251,33 @@ proc isDeadAccount*(db: AccountStateDB, address: EthAddress): bool =
|
|||
else:
|
||||
result = true
|
||||
|
||||
proc getCommittedStorage*(db: AccountStateDB, address: EthAddress, slot: UInt256): UInt256 =
|
||||
let tmpHash = db.rootHash
|
||||
db.rootHash = db.originalRoot
|
||||
db.transactionID.shortTimeReadOnly():
|
||||
when aleth_compat:
|
||||
if address in db.cleared:
|
||||
debug "Forced contract creation on existing account detected", address
|
||||
result = 0.u256
|
||||
else:
|
||||
result = db.getStorage(address, slot)[0]
|
||||
else:
|
||||
result = db.getStorage(address, slot)[0]
|
||||
db.rootHash = tmpHash
|
||||
# Note: `state_db.getCommittedStorage()` is nowhere used.
|
||||
#
|
||||
#proc getCommittedStorage*(db: AccountStateDB, address: EthAddress, slot: UInt256): UInt256 =
|
||||
# let tmpHash = db.rootHash
|
||||
# db.rootHash = db.originalRoot
|
||||
# db.transactionID.shortTimeReadOnly():
|
||||
# when aleth_compat:
|
||||
# if address in db.cleared:
|
||||
# debug "Forced contract creation on existing account detected", address
|
||||
# result = 0.u256
|
||||
# else:
|
||||
# result = db.getStorage(address, slot)[0]
|
||||
# else:
|
||||
# result = db.getStorage(address, slot)[0]
|
||||
# db.rootHash = tmpHash
|
||||
|
||||
proc updateOriginalRoot*(db: AccountStateDB) =
|
||||
## this proc will be called for every transaction
|
||||
db.originalRoot = db.rootHash
|
||||
# no need to rollback or dispose
|
||||
# transactionID, it will be handled elsewhere
|
||||
db.transactionID = db.db.getTransactionID()
|
||||
|
||||
when aleth_compat:
|
||||
db.cleared.clear()
|
||||
# Note: `state_db.updateOriginalRoot()` is nowhere used.
|
||||
#
|
||||
#proc updateOriginalRoot*(db: AccountStateDB) =
|
||||
# ## this proc will be called for every transaction
|
||||
# db.originalRoot = db.rootHash
|
||||
# # no need to rollback or dispose
|
||||
# # transactionID, it will be handled elsewhere
|
||||
# db.transactionID = db.db.getTransactionID()
|
||||
#
|
||||
# when aleth_compat:
|
||||
# db.cleared.clear()
|
||||
|
||||
proc rootHash*(db: ReadOnlyStateDB): KeccakHash {.borrow.}
|
||||
proc getAccount*(db: ReadOnlyStateDB, address: EthAddress): Account {.borrow.}
|
||||
|
@ -287,4 +291,4 @@ proc hasCodeOrNonce*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
|||
proc accountExists*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc isDeadAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc isEmptyAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc getCommittedStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
||||
#proc getCommittedStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
## nim-ws
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
|
||||
# Nimbus
|
||||
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -61,7 +61,7 @@ proc dumpBlocksBegin*(headers: openArray[BlockHeader]): string =
|
|||
& "transaction #{headers[0].blockNumber} {headers.len}"
|
||||
|
||||
proc dumpBlocksList*(header: BlockHeader; body: BlockBody): string =
|
||||
&"block {rlp.encode(header).toHex} {rlp.encode(body).toHex}"
|
||||
& "block {rlp.encode(header).toHex} {rlp.encode(body).toHex}"
|
||||
|
||||
proc dumpBlocksEnd*: string =
|
||||
"commit"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -89,9 +88,6 @@ proc miscRunner(
|
|||
test &"High level cascaded fifos API (sample size: {qidSampleSize})":
|
||||
check noisy.testFilterFifo(sampleSize = qidSampleSize)
|
||||
|
||||
test "Multi instances transactions":
|
||||
check noisy.testTxSpanMultiInstances()
|
||||
|
||||
test "Short keys and other patholgical cases":
|
||||
check noisy.testShortKeys()
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -18,8 +17,7 @@ import
|
|||
unittest2,
|
||||
stew/endians2,
|
||||
../../nimbus/db/aristo/[
|
||||
aristo_check, aristo_debug, aristo_delete, aristo_desc, aristo_get,
|
||||
aristo_merge],
|
||||
aristo_check, aristo_delete, aristo_desc, aristo_get, aristo_merge],
|
||||
../../nimbus/db/[aristo, aristo/aristo_init/persistent],
|
||||
../replay/xcheck,
|
||||
./test_helpers
|
||||
|
@ -279,6 +277,24 @@ proc revWalkVerify(
|
|||
|
||||
true
|
||||
|
||||
proc mergeRlpData*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
path: PathID; # Path into database
|
||||
rlpData: openArray[byte]; # RLP encoded payload data
|
||||
): Result[void,AristoError] =
|
||||
block body:
|
||||
discard db.merge(
|
||||
LeafTie(
|
||||
root: VertexID(1),
|
||||
path: path.normal),
|
||||
PayloadRef(
|
||||
pType: RlpData,
|
||||
rlpBlob: @rlpData)).valueOr:
|
||||
if error == MergeLeafPathCachedAlready:
|
||||
break body
|
||||
return err(error)
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public test function
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -456,242 +472,6 @@ proc testTxMergeProofAndKvpList*(
|
|||
" groups=", count, " proved=", proved.pp, " merged=", merged.pp
|
||||
true
|
||||
|
||||
|
||||
proc testTxSpanMultiInstances*(
|
||||
noisy: bool;
|
||||
genBase = 42;
|
||||
): bool =
|
||||
## Test multi tx behaviour with span synchronisation
|
||||
##
|
||||
let
|
||||
db = AristoDbRef.init() # no backend needed
|
||||
var
|
||||
dx: seq[AristoDbRef]
|
||||
|
||||
var genID = genBase
|
||||
proc newPathID(): PathID =
|
||||
result = PathID(pfx: genID.u256, length: 64)
|
||||
genID.inc
|
||||
proc newPayload(): Blob =
|
||||
result = @[genID].encode
|
||||
genID.inc
|
||||
|
||||
proc show(serial = -42) =
|
||||
var s = ""
|
||||
if 0 <= serial:
|
||||
s &= "n=" & $serial
|
||||
s &= "\n db level=" & $db.level
|
||||
s &= " inTxSpan=" & $db.inTxSpan
|
||||
s &= " nForked=" & $db.nForked
|
||||
s &= " nTxSpan=" & $db.nTxSpan
|
||||
s &= "\n " & db.pp
|
||||
for n,w in dx:
|
||||
s &= "\n"
|
||||
s &= "\n dx[" & $n & "]"
|
||||
s &= " level=" & $w.level
|
||||
s &= " inTxSpan=" & $w.inTxSpan
|
||||
s &= "\n " & w.pp
|
||||
noisy.say "***", s, "\n"
|
||||
|
||||
# Add some data and first transaction
|
||||
block:
|
||||
let rc = db.merge(newPathID(), newPayload())
|
||||
xCheckRc rc.error == 0
|
||||
block:
|
||||
let rc = db.checkTop(relax=true)
|
||||
xCheckRc rc.error == (0,0)
|
||||
xCheck not db.inTxSpan
|
||||
|
||||
# Fork and populate two more instances
|
||||
for _ in 1 .. 2:
|
||||
block:
|
||||
let rc = db.forkTop
|
||||
xCheckRc rc.error == 0
|
||||
dx.add rc.value
|
||||
block:
|
||||
let rc = dx[^1].merge(newPathID(), newPayload())
|
||||
xCheckRc rc.error == 0
|
||||
block:
|
||||
let rc = db.checkTop(relax=true)
|
||||
xCheckRc rc.error == (0,0)
|
||||
xCheck not dx[^1].inTxSpan
|
||||
|
||||
#show(1)
|
||||
|
||||
# Span transaction on a non-centre instance fails but succeeds on centre
|
||||
block:
|
||||
let rc = dx[0].txBeginSpan
|
||||
xCheck rc.isErr
|
||||
xCheck rc.error == TxSpanOffCentre
|
||||
block:
|
||||
let rc = db.txBeginSpan
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
# Now all instances have transactions level 1
|
||||
xCheck db.level == 1
|
||||
xCheck db.inTxSpan
|
||||
xCheck db.nForked == dx.len
|
||||
xCheck db.nTxSpan == dx.len + 1
|
||||
for n in 0 ..< dx.len:
|
||||
xCheck dx[n].level == 1
|
||||
xCheck dx[n].inTxSpan
|
||||
|
||||
#show(2)
|
||||
|
||||
# Add more data ..
|
||||
block:
|
||||
let rc = db.merge(newPathID(), newPayload())
|
||||
xCheckRc rc.error == 0
|
||||
for n in 0 ..< dx.len:
|
||||
let rc = dx[n].merge(newPathID(), newPayload())
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
#show(3)
|
||||
|
||||
# Span transaction on a non-centre instance fails but succeeds on centre
|
||||
block:
|
||||
let rc = dx[0].txBeginSpan
|
||||
xCheck rc.isErr
|
||||
xCheck rc.error == TxSpanOffCentre
|
||||
block:
|
||||
let rc = db.txBegin
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
# Now all instances have transactions level 2
|
||||
xCheck db.level == 2
|
||||
xCheck db.inTxSpan
|
||||
xCheck db.nForked == dx.len
|
||||
xCheck db.nTxSpan == dx.len + 1
|
||||
for n in 0 ..< dx.len:
|
||||
xCheck dx[n].level == 2
|
||||
xCheck dx[n].inTxSpan
|
||||
|
||||
#show(4)
|
||||
|
||||
# Fork first transaction from a forked instance
|
||||
block:
|
||||
let rc = dx[0].txTop.value.parent.forkTx
|
||||
xCheckRc rc.error == 0
|
||||
dx.add rc.value
|
||||
|
||||
# No change for the other instances
|
||||
xCheck db.level == 2
|
||||
xCheck db.inTxSpan
|
||||
xCheck db.nForked == dx.len
|
||||
for n in 0 ..< dx.len - 1:
|
||||
xCheck dx[n].level == 2
|
||||
xCheck dx[n].inTxSpan
|
||||
|
||||
# This here has changed
|
||||
xCheck db.nTxSpan == dx.len
|
||||
xCheck not dx[^1].inTxSpan
|
||||
xCheck dx[^1].level == 1
|
||||
|
||||
# Add transaction outside tx span
|
||||
block:
|
||||
let rc = dx[^1].txBegin
|
||||
xCheckRc rc.error == 0
|
||||
xCheck not dx[^1].inTxSpan
|
||||
xCheck dx[^1].level == 2
|
||||
|
||||
# No change for the other instances
|
||||
xCheck db.level == 2
|
||||
xCheck db.inTxSpan
|
||||
xCheck db.nForked == dx.len
|
||||
xCheck db.nTxSpan == dx.len
|
||||
for n in 0 ..< dx.len - 1:
|
||||
xCheck dx[n].level == 2
|
||||
xCheck dx[n].inTxSpan
|
||||
|
||||
#show(5)
|
||||
|
||||
# Commit on a non-centre span instance fails but succeeds on centre
|
||||
block:
|
||||
let rc = dx[0].txTop.value.commit
|
||||
xCheck rc.isErr
|
||||
xCheck rc.error == TxSpanOffCentre
|
||||
block:
|
||||
let rc = db.txTop.value.commit
|
||||
xCheckRc rc.error == 0
|
||||
block:
|
||||
let rc = db.check() # full check as commit hashifies
|
||||
xCheckRc rc.error == (0,0)
|
||||
for n in 0 ..< dx.len - 1:
|
||||
let rc = dx[n].check()
|
||||
xCheckRc rc.error == (0,0)
|
||||
|
||||
# Verify changes for the span instances
|
||||
xCheck db.level == 1
|
||||
xCheck db.inTxSpan
|
||||
xCheck db.nForked == dx.len
|
||||
xCheck db.nTxSpan == dx.len
|
||||
for n in 0 ..< dx.len - 1:
|
||||
xCheck dx[n].level == 1
|
||||
xCheck dx[n].inTxSpan
|
||||
|
||||
# No changes for the instance outside tx span
|
||||
xCheck not dx[^1].inTxSpan
|
||||
xCheck dx[^1].level == 2
|
||||
|
||||
#show(6)
|
||||
|
||||
# Destroy one instance from the span instances
|
||||
block:
|
||||
let
|
||||
dxTop = dx.pop
|
||||
rc = dx[^1].forget
|
||||
xCheckRc rc.error == 0
|
||||
dx[^1] = dxTop
|
||||
|
||||
# Verify changes for the span instances
|
||||
xCheck db.level == 1
|
||||
xCheck db.inTxSpan
|
||||
xCheck db.nForked == dx.len
|
||||
xCheck db.nTxSpan == dx.len
|
||||
for n in 0 ..< dx.len - 1:
|
||||
xCheck dx[n].level == 1
|
||||
xCheck dx[n].inTxSpan
|
||||
|
||||
# No changes for the instance outside tx span
|
||||
xCheck not dx[^1].inTxSpan
|
||||
xCheck dx[^1].level == 2
|
||||
|
||||
# Finish up span instances
|
||||
block:
|
||||
let rc = db.txTop.value.collapse(commit = true)
|
||||
xCheckRc rc.error == 0
|
||||
block:
|
||||
let rc = db.check() # full check as commit hashifies
|
||||
xCheckRc rc.error == (0,0)
|
||||
for n in 0 ..< dx.len - 1:
|
||||
let rc = dx[n].check()
|
||||
xCheckRc rc.error == (0,0)
|
||||
|
||||
# No span instances anymore
|
||||
xCheck db.level == 0
|
||||
xCheck not db.inTxSpan
|
||||
xCheck db.nForked == dx.len
|
||||
xCheck db.nTxSpan == 0
|
||||
for n in 0 ..< dx.len - 1:
|
||||
xCheck dx[n].level == 0
|
||||
xCheck not dx[n].inTxSpan
|
||||
|
||||
#show(7)
|
||||
|
||||
# Clean up
|
||||
block:
|
||||
let rc = db.forgetOthers()
|
||||
xCheckRc rc.error == 0
|
||||
dx.setLen(0)
|
||||
xCheck db.level == 0
|
||||
xCheck not db.inTxSpan
|
||||
xCheck db.nForked == 0
|
||||
xCheck db.nTxSpan == 0
|
||||
|
||||
#show(8)
|
||||
|
||||
true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -150,7 +150,7 @@ when isMainModule:
|
|||
# dumps `bulkTest2`, `bulkTest3`, .. from the `nimbus-eth1-blobs` package.
|
||||
# For specs see `tests/test_coredb/bulk_test_xx.nim`.
|
||||
var testList = @[bulkTest0] # This test is superseded by `bulkTest1` and `2`
|
||||
testList = @[failSample0]
|
||||
#testList = @[failSample0]
|
||||
when true and false:
|
||||
testList = @[bulkTest2, bulkTest3]
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# nim-graphql
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# Nimbus - Types, data structures and shared utilities used in network sync
|
||||
#
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license: [LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT
|
||||
# * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
|
Loading…
Reference in New Issue