Added delete fuctionality (#1596)

This commit is contained in:
Jordan Hrycaj 2023-06-02 20:21:46 +01:00 committed by GitHub
parent 099444ab3f
commit 11bb33d0bc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 349 additions and 19 deletions

View File

@ -0,0 +1,135 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Aristo DB -- Patricia Trie delete funcionality
## ==============================================
##
## Deleate by `Hike` type chain of vertices.
{.push raises: [].}
import
std/[sets, tables],
chronicles,
eth/[common, trie/nibbles],
stew/results,
"."/[aristo_constants, aristo_desc, aristo_error, aristo_get, aristo_hike,
aristo_path, aristo_vid]
logScope:
topics = "aristo-delete"
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc branchStillNeeded(vtx: VertexRef): bool =
for n in 0 .. 15:
if vtx.bVid[n] != VertexID(0):
return true
proc clearKey(db: AristoDbRef; vid: VertexID) =
let key = db.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY:
db.kMap.del vid
db.pAmk.del key
proc doneWith(db: AristoDbRef; vid: VertexID) =
# Remove entry
db.vidDispose vid
db.sTab.del vid
db.clearKey vid # Update Merkle hash
proc deleteImpl(
hike: Hike; # Fully expanded path
pathTag: NodeTag; # `Patricia Trie` path root-to-leaf
db: AristoDbRef; # Database, top layer
): Result[void,(VertexID,AristoError)] =
## Implementation of *delete* functionality.
if hike.error != AristoError(0):
if 0 < hike.legs.len:
return err((hike.legs[^1].wp.vid,hike.error))
return err((VertexID(0),hike.error))
# doAssert 0 < hike.legs.len and hike.tail.len == 0 # as assured by `hikeUp()`
var inx = hike.legs.len - 1
# Remove leaf entry on the top
let lf = hike.legs[inx].wp
if lf.vtx.vType != Leaf:
return err((lf.vid,DelLeafExpexted))
if lf.vid in db.pPrf:
return err((lf.vid, DelLeafLocked))
db.doneWith lf.vid
inx.dec
while 0 <= inx:
# Unlink child node
let br = hike.legs[inx].wp
if br.vtx.vType != Branch:
return err((br.vid,DelBranchExpexted))
if br.vid in db.pPrf:
return err((br.vid, DelBranchLocked))
br.vtx.bVid[hike.legs[inx].nibble] = VertexID(0)
if br.vtx.branchStillNeeded:
db.clearKey br.vid
break
# Remove this `Branch` entry
db.doneWith br.vid
inx.dec
if inx < 0:
break
# There might be an optional `Extension` to remove
let ext = hike.legs[inx].wp
if ext.vtx.vType == Extension:
if br.vid in db.pPrf:
return err((ext.vid, DelExtLocked))
db.doneWith ext.vid
inx.dec
# Delete leaf entry
db.lTab.del pathTag
if db.lTab.len == 0:
db.lRoot = VertexID(0)
ok()
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc delete*(
hike: Hike; # Fully expanded chain of vertices
db: AristoDbRef; # Database, top layer
): Result[void,(VertexID,AristoError)] =
## Delete argument `hike` chain of vertices from the database
# Need path in order to remove it from `lTab[]`
let pathTag = block:
let rc = hike.to(NibblesSeq).pathToTag()
if rc.isErr:
return err((VertexID(0),DelPathTagError))
rc.value
hike.deleteImpl(pathTag, db)
proc delete*(
pathTag: NodeTag; # `Patricia Trie` path root-to-leaf
db: AristoDbRef; # Database, top layer
): Result[void,(VertexID,AristoError)] =
## Variant of `delete()`
pathTag.hikeUp(db.lRoot, db).deleteImpl(pathTag, db)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -118,4 +118,12 @@ type
NearbyPathTailInxOverflow NearbyPathTailInxOverflow
NearbyUnexpectedVtx NearbyUnexpectedVtx
# Deletion of vertices, `delete()`
DelPathTagError
DelLeafExpexted
DelLeafLocked
DelBranchExpexted
DelBranchLocked
DelExtLocked
# End # End

View File

@ -64,6 +64,9 @@ proc `xPfx=`(vtx: VertexRef, val: NibblesSeq) =
of Branch: of Branch:
doAssert vtx.vType != Branch # Ooops doAssert vtx.vType != Branch # Ooops
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc clearMerkleKeys( proc clearMerkleKeys(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer

View File

@ -75,7 +75,7 @@ proc branchNibbleMax*(vtx: VertexRef; maxInx: int8): int8 =
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc complete( proc complete(
hike: Hike; # Partially expanded path hike: Hike; # Partially expanded chain of vertices
vid: VertexID; # Start ID vid: VertexID; # Start ID
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
hikeLenMax: static[int]; # Beware of loops (if any) hikeLenMax: static[int]; # Beware of loops (if any)
@ -123,7 +123,7 @@ proc complete(
proc zeroAdjust( proc zeroAdjust(
hike: Hike; # Partially expanded path hike: Hike; # Partially expanded chain of vertices
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
doLeast: static[bool]; # Direction: *least* or *most* doLeast: static[bool]; # Direction: *least* or *most*
): Hike = ): Hike =
@ -204,9 +204,9 @@ proc zeroAdjust(
proc finalise( proc finalise(
hike: Hike; # Partially expanded path hike: Hike; # Partially expanded chain of vertices
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
moveRight: static[bool]; # Direction of next node moveRight: static[bool]; # Direction of next vertex
): Hike = ): Hike =
## Handle some pathological cases after main processing failed ## Handle some pathological cases after main processing failed
proc beyond(p: Hike; pfx: NibblesSeq): bool = proc beyond(p: Hike; pfx: NibblesSeq): bool =
@ -258,10 +258,10 @@ proc finalise(
proc nearbyNext( proc nearbyNext(
hike: Hike; # Partially expanded path hike: Hike; # Partially expanded chain of vertices
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
hikeLenMax: static[int]; # Beware of loops (if any) hikeLenMax: static[int]; # Beware of loops (if any)
moveRight: static[bool]; # Direction of next node moveRight: static[bool]; # Direction of next vertex
): Hike = ): Hike =
## Unified implementation of `nearbyRight()` and `nearbyLeft()`. ## Unified implementation of `nearbyRight()` and `nearbyLeft()`.
proc accept(nibble: int8): bool = proc accept(nibble: int8): bool =
@ -364,11 +364,11 @@ proc nearbyNext(
proc nearbyNext( proc nearbyNext(
baseTag: NodeTag; # Some node baseTag: NodeTag; # Some `Patricia Trie` path
root: VertexID; # State root root: VertexID; # State root
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
hikeLenMax: static[int]; # Beware of loops (if any) hikeLenMax: static[int]; # Beware of loops (if any)
moveRight:static[ bool]; # Direction of next node moveRight:static[ bool]; # Direction of next vertex
): Result[NodeTag,AristoError] = ): Result[NodeTag,AristoError] =
## Variant of `nearbyNext()`, convenience wrapper ## Variant of `nearbyNext()`, convenience wrapper
let hike = baseTag.hikeUp(root,db).nearbyNext(db, hikeLenMax, moveRight) let hike = baseTag.hikeUp(root,db).nearbyNext(db, hikeLenMax, moveRight)
@ -388,7 +388,7 @@ proc nearbyNext(
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc nearbyRight*( proc nearbyRight*(
hike: Hike; # Partially expanded path hike: Hike; # Partially expanded chain of vertices
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
): Hike = ): Hike =
## Extends the maximally extended argument nodes `hike` to the right (i.e. ## Extends the maximally extended argument nodes `hike` to the right (i.e.
@ -403,7 +403,7 @@ proc nearbyRight*(
hike.nearbyNext(db, 64, moveRight=true) hike.nearbyNext(db, 64, moveRight=true)
proc nearbyRight*( proc nearbyRight*(
nodeTag: NodeTag; # Some node nodeTag: NodeTag; # Some `Patricia Trie` path
root: VertexID; # State root root: VertexID; # State root
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
): Result[NodeTag,AristoError] = ): Result[NodeTag,AristoError] =
@ -412,7 +412,7 @@ proc nearbyRight*(
nodeTag.nearbyNext(root, db, 64, moveRight=true) nodeTag.nearbyNext(root, db, 64, moveRight=true)
proc nearbyLeft*( proc nearbyLeft*(
hike: Hike; # Partially expanded path hike: Hike; # Partially expanded chain of vertices
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
): Hike = ): Hike =
## Similar to `nearbyRight()`. ## Similar to `nearbyRight()`.
@ -422,7 +422,7 @@ proc nearbyLeft*(
hike.nearbyNext(db, 64, moveRight=false) hike.nearbyNext(db, 64, moveRight=false)
proc nearbyLeft*( proc nearbyLeft*(
nodeTag: NodeTag; # Some node nodeTag: NodeTag; # Some `Patricia Trie` path
root: VertexID; # State root root: VertexID; # State root
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
): Result[NodeTag,AristoError] = ): Result[NodeTag,AristoError] =
@ -435,7 +435,7 @@ proc nearbyLeft*(
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc nearbyRightMissing*( proc nearbyRightMissing*(
hike: Hike; # Partially expanded path hike: Hike; # Partially expanded chain of vertices
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
): Result[bool,AristoError] = ): Result[bool,AristoError] =
## Returns `true` if the maximally extended argument nodes `hike` is the ## Returns `true` if the maximally extended argument nodes `hike` is the

View File

@ -23,7 +23,8 @@ import
../nimbus/sync/snap/worker/db/[rocky_bulk_load, snapdb_accounts, snapdb_desc], ../nimbus/sync/snap/worker/db/[rocky_bulk_load, snapdb_accounts, snapdb_desc],
./replay/[pp, undump_accounts, undump_storages], ./replay/[pp, undump_accounts, undump_storages],
./test_sync_snap/[snap_test_xx, test_accounts, test_types], ./test_sync_snap/[snap_test_xx, test_accounts, test_types],
./test_aristo/[test_helpers, test_merge, test_nearby, test_transcode] ./test_aristo/[
test_delete, test_helpers, test_merge, test_nearby, test_transcode]
const const
baseDir = [".", "..", ".."/"..", $DirSep] baseDir = [".", "..", ".."/"..", $DirSep]
@ -200,6 +201,9 @@ proc accountsRunner(noisy=true; sample=accSample, resetDb=false) =
test &"Traverse accounts database w/{accLst.len} account lists": test &"Traverse accounts database w/{accLst.len} account lists":
noisy.test_nearbyKvpList(accLst, resetDb) noisy.test_nearbyKvpList(accLst, resetDb)
test &"Delete accounts database, successively {accLst.len} entries":
noisy.test_delete accLst
proc storagesRunner( proc storagesRunner(
noisy = true; noisy = true;
@ -223,6 +227,9 @@ proc storagesRunner(
test &"Traverse storage slots database w/{stoLst.len} account lists": test &"Traverse storage slots database w/{stoLst.len} account lists":
noisy.test_nearbyKvpList(stoLst, resetDb) noisy.test_nearbyKvpList(stoLst, resetDb)
test &"Delete storage database, successively {stoLst.len} entries":
noisy.test_delete stoLst
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Main function(s) # Main function(s)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -237,7 +244,7 @@ when isMainModule:
noisy = defined(debug) or true noisy = defined(debug) or true
# Borrowed from `test_sync_snap.nim` # Borrowed from `test_sync_snap.nim`
when true and false: when true: # and false:
for n,sam in snapTestList: for n,sam in snapTestList:
noisy.transcodeRunner(sam) noisy.transcodeRunner(sam)
for n,sam in snapTestStorageList: for n,sam in snapTestStorageList:
@ -248,18 +255,18 @@ when isMainModule:
import ./test_sync_snap/snap_other_xx import ./test_sync_snap/snap_other_xx
noisy.showElapsed("@snap_other_xx"): noisy.showElapsed("@snap_other_xx"):
for n,sam in snapOtherList: for n,sam in snapOtherList:
noisy.accountsRunner(sam) noisy.accountsRunner(sam, resetDb=true)
# This one usues dumps from the external `nimbus-eth1-blob` repo # This one usues dumps from the external `nimbus-eth1-blob` repo
when true: # and false: when true and false:
import ./test_sync_snap/snap_storage_xx import ./test_sync_snap/snap_storage_xx
let knownFailures: KnownHasherFailure = @[ let knownFailures: KnownHasherFailure = @[
("storages5__34__41_dump#10.20512",(VertexID(1),HashifyRootHashMismatch)), ("storages5__34__41_dump#10.20512",(VertexID(1),HashifyRootHashMismatch)),
] ]
noisy.showElapsed("@snap_storage_xx"): noisy.showElapsed("@snap_storage_xx"):
for n,sam in snapStorageList: for n,sam in snapStorageList:
noisy.accountsRunner(sam) noisy.accountsRunner(sam, resetDb=true)
noisy.storagesRunner(sam,oops=knownFailures) noisy.storagesRunner(sam, resetDb=true, oops=knownFailures)
when true: # and false: when true: # and false:
for n,sam in snapTestList: for n,sam in snapTestList:

View File

@ -0,0 +1,177 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Aristo (aka Patricia) DB records merge test
import
std/[algorithm, bitops, sequtils],
eth/common,
stew/results,
unittest2,
../../nimbus/db/aristo/[
aristo_desc, aristo_delete, aristo_error,
aristo_hashify, aristo_nearby, aristo_merge],
../../nimbus/sync/snap/range_desc,
./test_helpers
type
TesterDesc = object
prng: uint32 ## random state
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc posixPrngRand(state: var uint32): byte =
## POSIX.1-2001 example of a rand() implementation, see manual page rand(3).
state = state * 1103515245 + 12345;
let val = (state shr 16) and 32767 # mod 2^31
(val shr 8).byte # Extract second byte
proc rand[W: SomeInteger|VertexID](ap: var TesterDesc; T: type W): T =
var a: array[sizeof T,byte]
for n in 0 ..< sizeof T:
a[n] = ap.prng.posixPrngRand().byte
when sizeof(T) == 1:
let w = uint8.fromBytesBE(a).T
when sizeof(T) == 2:
let w = uint16.fromBytesBE(a).T
when sizeof(T) == 4:
let w = uint32.fromBytesBE(a).T
else:
let w = uint64.fromBytesBE(a).T
when T is SomeUnsignedInt:
# That way, `fromBytesBE()` can be applied to `uint`
result = w
else:
# That way the result is independent of endianness
(addr result).copyMem(unsafeAddr w, sizeof w)
proc init(T: type TesterDesc; seed: int): TesterDesc =
result.prng = (seed and 0x7fffffff).uint32
proc rand(td: var TesterDesc; top: int): int =
if 0 < top:
let mask = (1 shl (8 * sizeof(int) - top.countLeadingZeroBits)) - 1
for _ in 0 ..< 100:
let w = mask and td.rand(typeof(result))
if w < top:
return w
raiseAssert "Not here (!)"
# -----------------------
proc fwdWalkVerify(
db: AristoDbRef;
noisy: bool;
): tuple[visited: int, error: AristoError] =
let
lTabLen = db.lTab.len
var
error = AristoError(0)
tag: NodeTag
n = 0
while n < lTabLen + 1:
let rc = tag.nearbyRight(db.lRoot, db) # , noisy)
#noisy.say "=================== ", n
if rc.isErr:
if rc.error != NearbyBeyondRange:
noisy.say "***", "<", n, "/", lTabLen-1, "> fwd-walk error=", rc.error
error = rc.error
check rc.error == AristoError(0)
break
if rc.value < high(NodeTag):
tag = (rc.value.u256 + 1).NodeTag
n.inc
if error != AristoError(0):
return (n,error)
if n != lTabLen:
check n == lTabLen
return (-1, AristoError(1))
(0, AristoError(0))
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc test_delete*(
noisy: bool;
list: openArray[ProofTrieData];
) =
var td = TesterDesc.init 42
for n,w in list:
let
db = AristoDbRef()
lstLen = list.len
added = db.merge w.kvpLst
if added.error != AristoError(0):
check added.error == AristoError(0)
return
let rc = db.hashify
if rc.isErr:
check rc.error == (VertexID(0),AristoError(0))
return
# Now `db` represents a (fully labelled) `Merkle Patricia Tree`
# Provide a (reproducible) peudo-random copy of the leafs list
var leafs = db.lTab.keys.toSeq.mapIt(it.Uint256).sorted.mapIt(it.NodeTag)
if 2 < leafs.len:
for n in 0 ..< leafs.len-1:
let r = n + td.rand(leafs.len - n)
leafs[n].swap leafs[r]
let uMax = leafs.len - 1
for u,pathTag in leafs:
let rc = pathTag.delete(db) # , noisy=(tags.len < 2))
if rc.isErr:
check rc.error == (VertexID(0),AristoError(0))
return
if pathTag in db.lTab:
check pathTag notin db.lTab
return
if uMax != db.lTab.len + u:
check uMax == db.lTab.len + u
return
# Walking the database is too slow for large tables. So the hope is that
# potential errors will not go away and rather pop up later, as well.
const tailCheck = 999
if uMax < u + tailCheck:
if u < uMax:
let vfy = db.fwdWalkVerify(noisy)
if vfy.error != AristoError(0):
check vfy == (0, AristoError(0))
return
elif 0 < db.sTab.len:
check db.sTab.len == 0
return
let rc = db.hashifyCheck(relax=true)
if rc.isErr:
check rc.error == (VertexID(0),AristoError(0))
return
when true and false:
if uMax < u + tailCheck or (u mod 777) == 3:
noisy.say "***", "step lTab=", db.lTab.len
when true and false:
noisy.say "***", "sample <", n, "/", list.len-1, ">",
" lstLen=", w.kvpLst.len
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------