Aristo db update for short nodes key edge cases (#1887)
* Aristo: Provide key-value list signature calculator detail: Simple wrappers around `Aristo` core functionality * Update new API for `CoreDb` details: + Renamed new API functions `contains()` => `hasKey()` or `hasPath()` which disables the `in` operator on non-boolean `contains()` functions + The functions `get()` and `fetch()` always return a not-found error if there is no item, available. The new functions `getOrEmpty()` and `mergeOrEmpty()` return an an empty `Blob` if there is no such key found. * Rewrite `core_apps.nim` using new API from `CoreDb` * Use `Aristo` functionality for calculating Merkle signatures details: For debugging, the `VerifyAristoForMerkleRootCalc` can be set so that `Aristo` results will be verified against the legacy versions. * Provide general interface for Merkle signing key-value tables details: Export `Aristo` wrappers * Activate `CoreDb` tests why: Now, API seems to be stable enough for general tests. * Update `toHex()` usage why: Byteutils' `toHex()` is superior to `toSeq.mapIt(it.toHex(2)).join` * Split `aristo_transcode` => `aristo_serialise` + `aristo_blobify` why: + Different modules for different purposes + `aristo_serialise`: RLP encoding/decoding + `aristo_blobify`: Aristo database encoding/decoding * Compacted representation of small nodes' links instead of Keccak hashes why: Ethereum MPTs use Keccak hashes as node links if the size of an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded node value is used as a pseudo node link (rather than a hash.) Such a node is nor stored on key-value database. Rather the RLP encoded node value is stored instead of a lode link in a parent node instead. Only for the root hash, the top level node is always referred to by the hash. This feature needed an abstraction of the `HashKey` object which is now either a hash or a blob of length at most 31 bytes. This leaves two ways of representing an empty/void `HashKey` type, either as an empty blob of zero length, or the hash of an empty blob. * Update `CoreDb` interface (mainly reducing logger noise) * Fix copyright years (to make `Lint` happy)
This commit is contained in:
parent
03a739ff1b
commit
4feaa2cfab
|
@ -14,16 +14,13 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import aristo/[
|
||||
aristo_constants, aristo_delete, aristo_fetch, aristo_init,
|
||||
aristo_merge, aristo_nearby, aristo_tx, aristo_utils, aristo_walk]
|
||||
aristo_constants, aristo_delete, aristo_fetch, aristo_init, aristo_merge,
|
||||
aristo_nearby, aristo_serialise, aristo_sign, aristo_tx, aristo_utils,
|
||||
aristo_walk]
|
||||
export
|
||||
aristo_constants, aristo_delete, aristo_fetch, aristo_init,
|
||||
aristo_merge, aristo_nearby, aristo_tx, aristo_utils, aristo_walk
|
||||
|
||||
import
|
||||
aristo/aristo_transcode
|
||||
export
|
||||
append, read, serialise
|
||||
aristo_constants, aristo_delete, aristo_fetch, aristo_init, aristo_merge,
|
||||
aristo_nearby, aristo_serialise, aristo_sign, aristo_tx, aristo_utils,
|
||||
aristo_walk
|
||||
|
||||
import
|
||||
aristo/aristo_get
|
||||
|
@ -50,6 +47,7 @@ export
|
|||
AristoDbRef,
|
||||
AristoError,
|
||||
AristoTxRef,
|
||||
MerkleSignRef,
|
||||
forget,
|
||||
isValid
|
||||
|
||||
|
|
|
@ -388,7 +388,7 @@ assumed, i.e. the list with the single vertex ID *1*.
|
|||
88 +--+--+--+--+--+ .. --+
|
||||
... -- more unused vertex ID
|
||||
N1 +--+--+--+--+
|
||||
|| | -- flg(3) + vtxLen(29), 1st triplet
|
||||
|| | -- flg(2) + vtxLen(30), 1st triplet
|
||||
+--+--+--+--+--+ .. --+
|
||||
| | -- vertex ID of first triplet
|
||||
+--+--+--+--+--+ .. --+--+ .. --+
|
||||
|
@ -396,7 +396,7 @@ assumed, i.e. the list with the single vertex ID *1*.
|
|||
+--+--+--+--+--+ .. --+--+ .. --+
|
||||
... -- optional vertex record
|
||||
N2 +--+--+--+--+
|
||||
|| | -- flg(3) + vtxLen(29), 2nd triplet
|
||||
|| | -- flg(2) + vtxLen(30), 2nd triplet
|
||||
+--+--+--+--+
|
||||
...
|
||||
+--+
|
||||
|
@ -404,23 +404,22 @@ assumed, i.e. the list with the single vertex ID *1*.
|
|||
+--+
|
||||
|
||||
where
|
||||
+ minimum size of an empty filer is 72 bytes
|
||||
+ minimum size of an empty filter is 72 bytes
|
||||
|
||||
+ the flg(3) represents the tuple (key-mode,vertex-mode) encoding
|
||||
the serialised storage states
|
||||
+ the flg(2) represents a bit tuple encoding the serialised storage
|
||||
modes for the optional 32 bytes hash key:
|
||||
|
||||
0 -- encoded and present
|
||||
0 -- not encoded, to be ignored
|
||||
1 -- not encoded, void => considered deleted
|
||||
2 -- not encoded, to be ignored
|
||||
2 -- present, encoded as-is (32 bytes)
|
||||
3 -- present, encoded as (len(1),data,zero-padding)
|
||||
|
||||
so, when encoded as
|
||||
+ the vtxLen(30) is the number of bytes of the optional vertex record
|
||||
which has maximum size 2^30-2 which is short of 1 GiB. The value
|
||||
2^30-1 (i.e. 0x3fffffff) is reserverd for indicating that there is
|
||||
no vertex record following and it should be considered deleted.
|
||||
|
||||
flg(3) = key-mode * 3 + vertex-mode
|
||||
|
||||
the the tuple (2,2) will never occur and flg(3) < 9
|
||||
|
||||
+ the vtxLen(29) is the number of bytes of the optional vertex record
|
||||
which has maximum size 2^29-1 which is short of 512 MiB
|
||||
+ there is no blind entry, i.e. either flg(2) != 0 or vtxLen(30) != 0.
|
||||
|
||||
+ the marker(8) is the eight bit array *0111-1101*
|
||||
|
||||
|
|
|
@ -12,26 +12,15 @@
|
|||
|
||||
import
|
||||
std/[bitops, sequtils, sets],
|
||||
eth/[common, rlp, trie/nibbles],
|
||||
eth/[common, trie/nibbles],
|
||||
results,
|
||||
stew/endians2,
|
||||
"."/[aristo_constants, aristo_desc, aristo_get]
|
||||
|
||||
# Annotation helper
|
||||
{.pragma: noRaise, gcsafe, raises: [].}
|
||||
|
||||
type
|
||||
ResolveVidFn = proc(vid: VertexID): Result[HashKey,AristoError] {.noRaise.}
|
||||
## Resolve storage root vertex ID
|
||||
./aristo_desc
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helper
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc aristoError(error: AristoError): NodeRef =
|
||||
## Allows returning de
|
||||
NodeRef(vType: Leaf, error: error)
|
||||
|
||||
proc load64(data: Blob; start: var int): Result[uint64,AristoError] =
|
||||
if data.len < start + 9:
|
||||
return err(DeblobPayloadTooShortInt64)
|
||||
|
@ -46,154 +35,6 @@ proc load256(data: Blob; start: var int): Result[UInt256,AristoError] =
|
|||
start += 32
|
||||
ok val
|
||||
|
||||
proc serialise(
|
||||
pyl: PayloadRef;
|
||||
getKey: ResolveVidFn;
|
||||
): Result[Blob,(VertexID,AristoError)] =
|
||||
## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of
|
||||
## account type, otherwise pass the data as is.
|
||||
##
|
||||
case pyl.pType:
|
||||
of RawData:
|
||||
ok pyl.rawBlob
|
||||
of RlpData:
|
||||
ok pyl.rlpBlob
|
||||
of AccountData:
|
||||
let
|
||||
vid = pyl.account.storageID
|
||||
key = block:
|
||||
if not vid.isValid:
|
||||
VOID_HASH_KEY
|
||||
else:
|
||||
let rc = vid.getKey
|
||||
if rc.isErr:
|
||||
return err((vid,rc.error))
|
||||
rc.value
|
||||
ok rlp.encode Account(
|
||||
nonce: pyl.account.nonce,
|
||||
balance: pyl.account.balance,
|
||||
storageRoot: key.to(Hash256),
|
||||
codeHash: pyl.account.codeHash)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public RLP transcoder mixins
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc read*(rlp: var Rlp; T: type NodeRef): T {.gcsafe, raises: [RlpError].} =
|
||||
## Mixin for RLP writer, see `fromRlpRecord()` for an encoder with detailed
|
||||
## error return code (if needed.) This reader is a jazzed up version which
|
||||
## reports some particular errors in the `Dummy` type node.
|
||||
if not rlp.isList:
|
||||
# Otherwise `rlp.items` would raise a `Defect`
|
||||
return aristoError(Rlp2Or17ListEntries)
|
||||
|
||||
var
|
||||
blobs = newSeq[Blob](2) # temporary, cache
|
||||
links: array[16,HashKey] # reconstruct branch node
|
||||
top = 0 # count entries and positions
|
||||
|
||||
# Collect lists of either 2 or 17 blob entries.
|
||||
for w in rlp.items:
|
||||
case top
|
||||
of 0, 1:
|
||||
if not w.isBlob:
|
||||
return aristoError(RlpBlobExpected)
|
||||
blobs[top] = rlp.read(Blob)
|
||||
of 2 .. 15:
|
||||
if not links[top].init(rlp.read(Blob)):
|
||||
return aristoError(RlpBranchLinkExpected)
|
||||
of 16:
|
||||
if not w.isBlob:
|
||||
return aristoError(RlpBlobExpected)
|
||||
if 0 < rlp.read(Blob).len:
|
||||
return aristoError(RlpEmptyBlobExpected)
|
||||
else:
|
||||
return aristoError(Rlp2Or17ListEntries)
|
||||
top.inc
|
||||
|
||||
# Verify extension data
|
||||
case top
|
||||
of 2:
|
||||
if blobs[0].len == 0:
|
||||
return aristoError(RlpNonEmptyBlobExpected)
|
||||
let (isLeaf, pathSegment) = hexPrefixDecode blobs[0]
|
||||
if isLeaf:
|
||||
return NodeRef(
|
||||
vType: Leaf,
|
||||
lPfx: pathSegment,
|
||||
lData: PayloadRef(
|
||||
pType: RawData,
|
||||
rawBlob: blobs[1]))
|
||||
else:
|
||||
var node = NodeRef(
|
||||
vType: Extension,
|
||||
ePfx: pathSegment)
|
||||
if not node.key[0].init(blobs[1]):
|
||||
return aristoError(RlpExtPathEncoding)
|
||||
return node
|
||||
of 17:
|
||||
for n in [0,1]:
|
||||
if not links[n].init(blobs[n]):
|
||||
return aristoError(RlpBranchLinkExpected)
|
||||
return NodeRef(
|
||||
vType: Branch,
|
||||
key: links)
|
||||
else:
|
||||
discard
|
||||
|
||||
aristoError(Rlp2Or17ListEntries)
|
||||
|
||||
|
||||
proc append*(writer: var RlpWriter; node: NodeRef) =
|
||||
## Mixin for RLP writer. Note that a `Dummy` node is encoded as an empty
|
||||
## list.
|
||||
proc addHashKey(writer: var RlpWriter; key: HashKey) =
|
||||
if not key.isValid:
|
||||
writer.append EmptyBlob
|
||||
else:
|
||||
writer.append key.to(Hash256)
|
||||
|
||||
if node.error != AristoError(0):
|
||||
writer.startList(0)
|
||||
else:
|
||||
case node.vType:
|
||||
of Branch:
|
||||
writer.startList(17)
|
||||
for n in 0..15:
|
||||
writer.addHashKey node.key[n]
|
||||
writer.append EmptyBlob
|
||||
|
||||
of Extension:
|
||||
writer.startList(2)
|
||||
writer.append node.ePfx.hexPrefixEncode(isleaf = false)
|
||||
writer.addHashKey node.key[0]
|
||||
|
||||
of Leaf:
|
||||
proc getKey0(vid: VertexID): Result[HashKey,AristoError] {.noRaise.} =
|
||||
ok(node.key[0]) # always succeeds
|
||||
|
||||
writer.startList(2)
|
||||
writer.append node.lPfx.hexPrefixEncode(isleaf = true)
|
||||
writer.append node.lData.serialise(getKey0).value
|
||||
|
||||
# ---------------------
|
||||
|
||||
proc to*(node: NodeRef; T: type HashKey): T =
|
||||
## Convert the argument `node` to the corresponding Merkle hash key
|
||||
node.encode.digestTo T
|
||||
|
||||
proc serialise*(
|
||||
db: AristoDbRef;
|
||||
pyl: PayloadRef;
|
||||
): Result[Blob,(VertexID,AristoError)] =
|
||||
## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of
|
||||
## account type, otherwise pass the data as is.
|
||||
##
|
||||
proc getKey(vid: VertexID): Result[HashKey,AristoError] =
|
||||
db.getKeyRc(vid)
|
||||
|
||||
pyl.serialise getKey
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -331,12 +172,16 @@ proc blobify*(filter: FilterRef; data: var Blob): Result[void,AristoError] =
|
|||
## ... -- more triplets
|
||||
## 0x7d -- marker(8)
|
||||
##
|
||||
func blobify(lid: HashKey): Blob =
|
||||
let n = lid.len
|
||||
if n < 32: @[n.byte] & @lid & 0u8.repeat(31 - n) else: @lid
|
||||
|
||||
if not filter.isValid:
|
||||
return err(BlobifyNilFilter)
|
||||
data.setLen(0)
|
||||
data &= filter.fid.uint64.toBytesBE.toSeq
|
||||
data &= filter.src.ByteArray32.toSeq
|
||||
data &= filter.trg.ByteArray32.toSeq
|
||||
data &= @(filter.src.data)
|
||||
data &= @(filter.trg.data)
|
||||
|
||||
data &= filter.vGen.len.uint32.toBytesBE.toSeq
|
||||
data &= newSeq[byte](4) # place holder
|
||||
|
@ -355,30 +200,28 @@ proc blobify*(filter: FilterRef; data: var Blob): Result[void,AristoError] =
|
|||
leftOver.excl vid
|
||||
|
||||
var
|
||||
keyMode = 0u # present and usable
|
||||
vtxMode = 0u # present and usable
|
||||
keyMode = 0u # default: ignore that key
|
||||
vtxLen = 0u # default: ignore that vertex
|
||||
keyBlob: Blob
|
||||
vtxBlob: Blob
|
||||
|
||||
let key = filter.kMap.getOrVoid vid
|
||||
if key.isValid:
|
||||
keyBlob = key.ByteArray32.toSeq
|
||||
keyBlob = key.blobify
|
||||
keyMode = if key.len < 32: 0xc000_0000u else: 0x8000_0000u
|
||||
elif filter.kMap.hasKey vid:
|
||||
keyMode = 1u # void hash key => considered deleted
|
||||
else:
|
||||
keyMode = 2u # ignore that hash key
|
||||
keyMode = 0x4000_0000u # void hash key => considered deleted
|
||||
|
||||
if vtx.isValid:
|
||||
? vtx.blobify vtxBlob
|
||||
else:
|
||||
vtxMode = 1u # nil vertex => considered deleted
|
||||
|
||||
if (vtxBlob.len and not 0x1fffffff) != 0:
|
||||
vtxLen = vtxBlob.len.uint
|
||||
if 0x3fff_ffff <= vtxLen:
|
||||
return err(BlobifyFilterRecordOverflow)
|
||||
else:
|
||||
vtxLen = 0x3fff_ffff # nil vertex => considered deleted
|
||||
|
||||
let pfx = ((keyMode * 3 + vtxMode) shl 29) or vtxBlob.len.uint
|
||||
data &=
|
||||
pfx.uint32.toBytesBE.toSeq &
|
||||
(keyMode or vtxLen).uint32.toBytesBE.toSeq &
|
||||
vid.uint64.toBytesBE.toSeq &
|
||||
keyBlob &
|
||||
vtxBlob
|
||||
|
@ -387,18 +230,18 @@ proc blobify*(filter: FilterRef; data: var Blob): Result[void,AristoError] =
|
|||
for vid in leftOver:
|
||||
n.inc
|
||||
var
|
||||
mode = 2u # key present and usable, ignore vtx
|
||||
keyMode = 0u # present and usable
|
||||
keyBlob: Blob
|
||||
|
||||
let key = filter.kMap.getOrVoid vid
|
||||
if key.isValid:
|
||||
keyBlob = key.ByteArray32.toSeq
|
||||
keyBlob = key.blobify
|
||||
keyMode = if key.len < 32: 0xc000_0000u else: 0x8000_0000u
|
||||
else:
|
||||
mode = 5u # 1 * 3 + 2: void key, ignore vtx
|
||||
keyMode = 0x4000_0000u # void hash key => considered deleted
|
||||
|
||||
let pfx = (mode shl 29)
|
||||
data &=
|
||||
pfx.uint32.toBytesBE.toSeq &
|
||||
keyMode.uint32.toBytesBE.toSeq &
|
||||
vid.uint64.toBytesBE.toSeq &
|
||||
keyBlob
|
||||
|
||||
|
@ -491,7 +334,7 @@ proc deblobify*(record: Blob; vtx: var VertexRef): Result[void,AristoError] =
|
|||
## De-serialise a data record encoded with `blobify()`. The second
|
||||
## argument `vtx` can be `nil`.
|
||||
if record.len < 3: # minimum `Leaf` record
|
||||
return err(DeblobTooShort)
|
||||
return err(DeblobVtxTooShort)
|
||||
|
||||
case record[^1] shr 6:
|
||||
of 0: # `Branch` vertex
|
||||
|
@ -593,10 +436,16 @@ proc deblobify*(data: Blob; filter: var FilterRef): Result[void,AristoError] =
|
|||
if data[^1] != 0x7d:
|
||||
return err(DeblobWrongType)
|
||||
|
||||
func deblob(data: openArray[byte]; shortKey: bool): Result[HashKey,void] =
|
||||
if shortKey:
|
||||
HashKey.fromBytes data[1 .. min(data[0],31)]
|
||||
else:
|
||||
HashKey.fromBytes data
|
||||
|
||||
let f = FilterRef()
|
||||
f.fid = (uint64.fromBytesBE data[0 ..< 8]).FilterID
|
||||
(addr f.src.ByteArray32[0]).copyMem(unsafeAddr data[8], 32)
|
||||
(addr f.trg.ByteArray32[0]).copyMem(unsafeAddr data[40], 32)
|
||||
(addr f.src.data[0]).copyMem(unsafeAddr data[8], 32)
|
||||
(addr f.trg.data[0]).copyMem(unsafeAddr data[40], 32)
|
||||
|
||||
let
|
||||
nVids = uint32.fromBytesBE data[72 ..< 76]
|
||||
|
@ -615,33 +464,33 @@ proc deblobify*(data: Blob; filter: var FilterRef): Result[void,AristoError] =
|
|||
return err(DeblobFilterTrpTooShort)
|
||||
|
||||
let
|
||||
flag = data[offs] shr 5 # double triplets: {0,1,2} x {0,1,2}
|
||||
vLen = ((uint32.fromBytesBE data[offs ..< offs + 4]) and 0x1fffffff).int
|
||||
if (vLen == 0) != ((flag mod 3) > 0):
|
||||
return err(DeblobFilterTrpVtxSizeGarbled) # contadiction
|
||||
keyFlag = data[offs] shr 6
|
||||
vtxFlag = ((uint32.fromBytesBE data[offs ..< offs+4]) and 0x3fff_ffff).int
|
||||
vLen = if vtxFlag == 0x3fff_ffff: 0 else: vtxFlag
|
||||
if keyFlag == 0 and vtxFlag == 0:
|
||||
return err(DeblobFilterTrpVtxSizeGarbled) # no blind records
|
||||
offs = offs + 4
|
||||
|
||||
let vid = (uint64.fromBytesBE data[offs ..< offs + 8]).VertexID
|
||||
offs = offs + 8
|
||||
|
||||
if data.len < offs + (flag < 3).ord * 32 + vLen:
|
||||
if data.len < offs + (1 < keyFlag).ord * 32 + vLen:
|
||||
return err(DeblobFilterTrpTooShort)
|
||||
|
||||
if flag < 3: # {0} x {0,1,2}
|
||||
var key: HashKey
|
||||
(addr key.ByteArray32[0]).copyMem(unsafeAddr data[offs], 32)
|
||||
f.kMap[vid] = key
|
||||
if 1 < keyFlag:
|
||||
f.kMap[vid] = data[offs ..< offs + 32].deblob(keyFlag == 3).valueOr:
|
||||
return err(DeblobHashKeyExpected)
|
||||
offs = offs + 32
|
||||
elif flag < 6: # {0,1} x {0,1,2}
|
||||
elif keyFlag == 1:
|
||||
f.kMap[vid] = VOID_HASH_KEY
|
||||
|
||||
if 0 < vLen:
|
||||
if vtxFlag == 0x3fff_ffff:
|
||||
f.sTab[vid] = VertexRef(nil)
|
||||
elif 0 < vLen:
|
||||
var vtx: VertexRef
|
||||
? data[offs ..< offs + vLen].deblobify vtx
|
||||
f.sTab[vid] = vtx
|
||||
offs = offs + vLen
|
||||
elif (flag mod 3) == 1: # {0,1,2} x {1}
|
||||
f.sTab[vid] = VertexRef(nil)
|
||||
|
||||
if data.len != offs + 1:
|
||||
return err(DeblobFilterSizeGarbled)
|
|
@ -12,11 +12,11 @@
|
|||
|
||||
import
|
||||
std/[algorithm, sequtils, sets, tables],
|
||||
eth/common,
|
||||
eth/[common, trie/nibbles],
|
||||
stew/interval_set,
|
||||
../../aristo,
|
||||
../aristo_walk/persistent,
|
||||
".."/[aristo_desc, aristo_get, aristo_vid, aristo_transcode]
|
||||
".."/[aristo_desc, aristo_get, aristo_vid]
|
||||
|
||||
const
|
||||
Vid2 = @[VertexID(2)].toHashSet
|
||||
|
@ -98,6 +98,21 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
|||
let rc = db.getKeyBE vid
|
||||
if rc.isErr or not rc.value.isValid:
|
||||
return err((vid,CheckBeKeyMissing))
|
||||
case vtx.vType:
|
||||
of Leaf:
|
||||
discard
|
||||
of Branch:
|
||||
block check42Links:
|
||||
var seen = false
|
||||
for n in 0 .. 15:
|
||||
if vtx.bVid[n].isValid:
|
||||
if seen:
|
||||
break check42Links
|
||||
seen = true
|
||||
return err((vid,CheckBeVtxBranchLinksMissing))
|
||||
of Extension:
|
||||
if vtx.ePfx.len == 0:
|
||||
return err((vid,CheckBeVtxExtPfxMissing))
|
||||
|
||||
for (_,vid,key) in T.walkKeyBE db:
|
||||
if not key.isvalid:
|
||||
|
@ -109,7 +124,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
|||
if rx.isErr:
|
||||
return err((vid,CheckBeKeyCantCompile))
|
||||
if not relax:
|
||||
let expected = rx.value.to(HashKey)
|
||||
let expected = rx.value.digestTo(HashKey)
|
||||
if expected != key:
|
||||
return err((vid,CheckBeKeyMismatch))
|
||||
discard vids.reduce Interval[VertexID,uint64].new(vid,vid)
|
||||
|
@ -162,10 +177,11 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
|||
not db.backend.isNil and
|
||||
not db.backend.filters.isNil:
|
||||
var lastTrg = db.getKeyUBE(VertexID(1)).get(otherwise = VOID_HASH_KEY)
|
||||
.to(Hash256)
|
||||
for (qid,filter) in db.backend.T.walkFifoBe: # walk in fifo order
|
||||
if filter.src != lastTrg:
|
||||
return err((VertexID(0),CheckBeFifoSrcTrgMismatch))
|
||||
if filter.trg != filter.kMap.getOrVoid VertexID(1):
|
||||
if filter.trg != filter.kMap.getOrVoid(VertexID 1).to(Hash256):
|
||||
return err((VertexID(1),CheckBeFifoTrgNotStateRoot))
|
||||
lastTrg = filter.trg
|
||||
|
||||
|
@ -180,7 +196,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
|||
let rc = vtx.toNode db # compile cache first
|
||||
if rc.isErr:
|
||||
return err((vid,CheckBeCacheKeyCantCompile))
|
||||
let expected = rc.value.to(HashKey)
|
||||
let expected = rc.value.digestTo(HashKey)
|
||||
if expected != lbl.key:
|
||||
return err((vid,CheckBeCacheKeyMismatch))
|
||||
|
||||
|
@ -192,7 +208,10 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
|||
if 0 < delta.len:
|
||||
# Exclude fringe case when there is a single root vertex only
|
||||
if vGenExpected != Vid2 or 0 < vGen.len:
|
||||
return err((delta.toSeq.sorted[^1],CheckBeCacheGarbledVGen))
|
||||
let delta = delta.toSeq
|
||||
# As happens with Merkle signature calculator: `root=VertexID(2)`
|
||||
if delta.len != 1 or delta[0] != VertexID(1) or VertexID(1) in vGen:
|
||||
return err((delta.sorted[^1],CheckBeCacheGarbledVGen))
|
||||
|
||||
ok()
|
||||
|
||||
|
|
|
@ -12,9 +12,9 @@
|
|||
|
||||
import
|
||||
std/[sequtils, sets, tables],
|
||||
eth/common,
|
||||
eth/[common, trie/nibbles],
|
||||
results,
|
||||
".."/[aristo_desc, aristo_get, aristo_transcode, aristo_utils]
|
||||
".."/[aristo_desc, aristo_get, aristo_serialise, aristo_utils]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
|
@ -32,16 +32,17 @@ proc checkTopStrict*(
|
|||
let lbl = db.top.kMap.getOrVoid vid
|
||||
if not lbl.isValid:
|
||||
return err((vid,CheckStkVtxKeyMissing))
|
||||
if lbl.key != rc.value.to(HashKey):
|
||||
if lbl.key != rc.value.digestTo(HashKey):
|
||||
return err((vid,CheckStkVtxKeyMismatch))
|
||||
|
||||
let revVid = db.top.pAmk.getOrVoid lbl
|
||||
if not revVid.isValid:
|
||||
let revVids = db.top.pAmk.getOrVoid lbl
|
||||
if not revVids.isValid:
|
||||
return err((vid,CheckStkRevKeyMissing))
|
||||
if revVid != vid:
|
||||
if vid notin revVids:
|
||||
return err((vid,CheckStkRevKeyMismatch))
|
||||
|
||||
if 0 < db.top.pAmk.len and db.top.pAmk.len < db.top.sTab.len:
|
||||
let pAmkVtxCount = db.top.pAmk.values.toSeq.foldl(a + b.len, 0)
|
||||
if 0 < pAmkVtxCount and pAmkVtxCount < db.top.sTab.len:
|
||||
# Cannot have less changes than cached entries
|
||||
return err((VertexID(0),CheckStkVtxCountMismatch))
|
||||
|
||||
|
@ -62,13 +63,13 @@ proc checkTopRelaxed*(
|
|||
let lbl = db.top.kMap.getOrVoid vid
|
||||
if not lbl.isValid:
|
||||
return err((vid,CheckRlxVtxKeyMissing))
|
||||
if lbl.key != rc.value.to(HashKey):
|
||||
if lbl.key != rc.value.digestTo(HashKey):
|
||||
return err((vid,CheckRlxVtxKeyMismatch))
|
||||
|
||||
let revVid = db.top.pAmk.getOrVoid lbl
|
||||
if not revVid.isValid:
|
||||
let revVids = db.top.pAmk.getOrVoid lbl
|
||||
if not revVids.isValid:
|
||||
return err((vid,CheckRlxRevKeyMissing))
|
||||
if revVid != vid:
|
||||
if vid notin revVids:
|
||||
return err((vid,CheckRlxRevKeyMismatch))
|
||||
else:
|
||||
for (vid,lbl) in db.top.kMap.pairs:
|
||||
|
@ -77,15 +78,13 @@ proc checkTopRelaxed*(
|
|||
if vtx.isValid:
|
||||
let rc = vtx.toNode db
|
||||
if rc.isOk:
|
||||
if lbl.key != rc.value.to(HashKey):
|
||||
if lbl.key != rc.value.digestTo(HashKey):
|
||||
return err((vid,CheckRlxVtxKeyMismatch))
|
||||
|
||||
let revVid = db.top.pAmk.getOrVoid lbl
|
||||
if not revVid.isValid:
|
||||
let revVids = db.top.pAmk.getOrVoid lbl
|
||||
if not revVids.isValid:
|
||||
return err((vid,CheckRlxRevKeyMissing))
|
||||
if revVid != vid:
|
||||
return err((vid,CheckRlxRevKeyMissing))
|
||||
if revVid != vid:
|
||||
if vid notin revVids:
|
||||
return err((vid,CheckRlxRevKeyMismatch))
|
||||
ok()
|
||||
|
||||
|
@ -101,7 +100,23 @@ proc checkTopCommon*(
|
|||
# Check deleted entries
|
||||
var nNilVtx = 0
|
||||
for (vid,vtx) in db.top.sTab.pairs:
|
||||
if not vtx.isValid:
|
||||
if vtx.isValid:
|
||||
case vtx.vType:
|
||||
of Leaf:
|
||||
discard
|
||||
of Branch:
|
||||
block check42Links:
|
||||
var seen = false
|
||||
for n in 0 .. 15:
|
||||
if vtx.bVid[n].isValid:
|
||||
if seen:
|
||||
break check42Links
|
||||
seen = true
|
||||
return err((vid,CheckAnyVtxBranchLinksMissing))
|
||||
of Extension:
|
||||
if vtx.ePfx.len == 0:
|
||||
return err((vid,CheckAnyVtxExtPfxMissing))
|
||||
else:
|
||||
nNilVtx.inc
|
||||
let rc = db.getVtxBE vid
|
||||
if rc.isErr:
|
||||
|
@ -116,9 +131,11 @@ proc checkTopCommon*(
|
|||
if kMapNilCount != 0 and kMapNilCount < nNilVtx:
|
||||
return err((VertexID(0),CheckAnyVtxEmptyKeyMismatch))
|
||||
|
||||
if db.top.pAmk.len != kMapCount:
|
||||
let pAmkVtxCount = db.top.pAmk.values.toSeq.foldl(a + b.len, 0)
|
||||
if pAmkVtxCount != kMapCount:
|
||||
var knownKeys: HashSet[VertexID]
|
||||
for (key,vid) in db.top.pAmk.pairs:
|
||||
for (key,vids) in db.top.pAmk.pairs:
|
||||
for vid in vids:
|
||||
if not db.top.kMap.hasKey(vid):
|
||||
return err((vid,CheckAnyRevVtxMissing))
|
||||
if vid in knownKeys:
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/sets,
|
||||
eth/[common, trie/nibbles],
|
||||
./aristo_desc/desc_identifiers
|
||||
|
||||
|
@ -24,18 +25,21 @@ const
|
|||
EmptyVidSeq* = seq[VertexID].default
|
||||
## Useful shortcut
|
||||
|
||||
EmptyQidPairSeq* = seq[(QueueID,QueueID)].default
|
||||
EmptyVidSet* = EmptyVidSeq.toHashSet
|
||||
## Useful shortcut
|
||||
|
||||
VOID_CODE_HASH* = EMPTY_CODE_HASH
|
||||
## Equivalent of `nil` for `Account` object code hash
|
||||
|
||||
VOID_HASH_KEY* = EMPTY_ROOT_HASH.to(HashKey)
|
||||
VOID_HASH_KEY* = HashKey()
|
||||
## Void equivalent for Merkle hash value
|
||||
|
||||
VOID_HASH_LABEL* = HashLabel(root: VertexID(0), key: VOID_HASH_KEY)
|
||||
VOID_HASH_LABEL* = HashLabel()
|
||||
## Void equivalent for Merkle hash value
|
||||
|
||||
EmptyQidPairSeq* = seq[(QueueID,QueueID)].default
|
||||
## Useful shortcut
|
||||
|
||||
DEFAULT_QID_QUEUES* = [
|
||||
(128, 0), ## Consecutive list of 128 filter slots
|
||||
( 64, 63), ## Overflow list, 64 filters, skipping 63 filters in-between
|
||||
|
|
|
@ -25,10 +25,7 @@ import
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc toHex(w: VertexID): string =
|
||||
w.uint64.toHex.toLowerAscii
|
||||
|
||||
proc toHex(w: HashKey): string =
|
||||
w.ByteArray32.toHex.toLowerAscii
|
||||
w.uint64.toHex
|
||||
|
||||
proc toHexLsb(w: int8): string =
|
||||
$"0123456789abcdef"[w and 15]
|
||||
|
@ -51,21 +48,27 @@ proc sortedKeys(pPrf: HashSet[VertexID]): seq[VertexID] =
|
|||
proc toPfx(indent: int; offset = 0): string =
|
||||
if 0 < indent+offset: "\n" & " ".repeat(indent+offset) else: ""
|
||||
|
||||
proc labelVidUpdate(db: AristoDbRef, lbl: HashLabel, vid: VertexID): string =
|
||||
if lbl.key.isValid and vid.isValid:
|
||||
proc lidVidUpdate(
|
||||
db: AristoDbRef;
|
||||
root: VertexID;
|
||||
lid: HashKey;
|
||||
vid: VertexID;
|
||||
): string =
|
||||
if lid.isValid and vid.isValid:
|
||||
let lbl = HashLabel(root: root, key: lid)
|
||||
if not db.top.isNil:
|
||||
let lblVid = db.top.pAmk.getOrVoid lbl
|
||||
if lblVid.isValid:
|
||||
if lblVid != vid:
|
||||
let vids = db.top.pAmk.getOrVoid lbl
|
||||
if vids.isValid:
|
||||
if vid notin vids:
|
||||
result = "(!)"
|
||||
return
|
||||
block:
|
||||
let lblVid = db.xMap.getOrVoid lbl
|
||||
if lblVid.isValid:
|
||||
if lblVid != vid:
|
||||
let vids = db.xMap.getOrVoid lbl
|
||||
if vids.isValid:
|
||||
if vid notin vids:
|
||||
result = "(!)"
|
||||
return
|
||||
db.xMap[lbl] = vid
|
||||
db.xMap.append(lbl, vid)
|
||||
|
||||
proc squeeze(s: string; hex = false; ignLen = false): string =
|
||||
## For long strings print `begin..end` only
|
||||
|
@ -83,7 +86,7 @@ proc squeeze(s: string; hex = false; ignLen = false): string =
|
|||
result &= ".." & s[s.len-16 .. ^1]
|
||||
|
||||
proc stripZeros(a: string): string =
|
||||
a.strip(leading=true, trailing=false, chars={'0'}).toLowerAscii
|
||||
a.strip(leading=true, trailing=false, chars={'0'})
|
||||
|
||||
proc ppVid(vid: VertexID; pfx = true): string =
|
||||
if pfx:
|
||||
|
@ -119,55 +122,61 @@ proc ppQid(qid: QueueID): string =
|
|||
else:
|
||||
break here
|
||||
return
|
||||
result &= qid.toHex.stripZeros.toLowerAscii
|
||||
result &= qid.toHex.stripZeros
|
||||
|
||||
proc ppVidList(vGen: openArray[VertexID]): string =
|
||||
"[" & vGen.mapIt(it.ppVid).join(",") & "]"
|
||||
|
||||
proc ppVidList(vGen: HashSet[VertexID]): string =
|
||||
"{" & vGen.sortedKeys.mapIt(it.ppVid).join(",") & "}"
|
||||
|
||||
proc vidCode(lbl: HashLabel, db: AristoDbRef): uint64 =
|
||||
if lbl.isValid:
|
||||
if not db.top.isNil:
|
||||
let vid = db.top.pAmk.getOrVoid lbl
|
||||
if vid.isValid:
|
||||
return vid.uint64
|
||||
let vids = db.top.pAmk.getOrVoid lbl
|
||||
if vids.isValid:
|
||||
return vids.sortedKeys[0].uint64
|
||||
block:
|
||||
let vid = db.xMap.getOrVoid lbl
|
||||
if vid.isValid:
|
||||
return vid.uint64
|
||||
let vids = db.xMap.getOrVoid lbl
|
||||
if vids.isValid:
|
||||
return vids.sortedKeys[0].uint64
|
||||
|
||||
proc ppKey(key: HashKey): string =
|
||||
if key == HashKey.default:
|
||||
return "£ø"
|
||||
proc ppKey(key: HashKey; db: AristoDbRef; root: VertexID; pfx = true): string =
|
||||
proc getVids: HashSet[VertexID] =
|
||||
if not db.top.isNil:
|
||||
let vids = db.top.pAmk.getOrVoid HashLabel(root: root, key: key)
|
||||
if vids.isValid:
|
||||
return vids
|
||||
block:
|
||||
let vids = db.xMap.getOrVoid HashLabel(root: root, key: key)
|
||||
if vids.isValid:
|
||||
return vids
|
||||
if pfx:
|
||||
result = "£"
|
||||
if key == VOID_HASH_KEY:
|
||||
return "£r"
|
||||
|
||||
"%" & key.toHex.squeeze(hex=true,ignLen=true)
|
||||
result &= "ø"
|
||||
elif not key.isValid:
|
||||
result &= "r"
|
||||
else:
|
||||
let
|
||||
tag = if key.len < 32: "[#" & $key.len & "]" else: ""
|
||||
vids = getVids()
|
||||
if vids.isValid:
|
||||
if not pfx and 0 < tag.len:
|
||||
result &= "$"
|
||||
if 1 < vids.len: result &= "{"
|
||||
result &= vids.sortedKeys.mapIt(it.ppVid(pfx=false)).join(",")
|
||||
if 1 < vids.len: result &= "}"
|
||||
result &= tag
|
||||
return
|
||||
result &= @key.toHex.squeeze(hex=true,ignLen=true) & tag
|
||||
|
||||
proc ppLabel(lbl: HashLabel; db: AristoDbRef): string =
|
||||
if lbl.key == HashKey.default:
|
||||
return "£ø"
|
||||
if lbl.key == VOID_HASH_KEY:
|
||||
return "£r"
|
||||
|
||||
let rid = if not lbl.root.isValid: "ø:"
|
||||
else: ($lbl.root.toHex).stripZeros & ":"
|
||||
if not db.top.isNil:
|
||||
let vid = db.top.pAmk.getOrVoid lbl
|
||||
if vid.isValid:
|
||||
return "£" & rid & vid.ppVid(pfx=false)
|
||||
block:
|
||||
let vid = db.xMap.getOrVoid lbl
|
||||
if vid.isValid:
|
||||
return "£" & rid & vid.ppVid(pfx=false)
|
||||
|
||||
"%" & rid & lbl.key.toHex.squeeze(hex=true,ignLen=true)
|
||||
|
||||
proc ppRootKey(a: HashKey): string =
|
||||
if a.isValid:
|
||||
return a.ppKey
|
||||
|
||||
proc ppCodeKey(a: HashKey): string =
|
||||
a.ppKey
|
||||
if lbl.isValid:
|
||||
"%" & ($lbl.root.toHex).stripZeros &
|
||||
":" & lbl.key.ppKey(db, lbl.root, pfx=false)
|
||||
else:
|
||||
"%ø"
|
||||
|
||||
proc ppLeafTie(lty: LeafTie, db: AristoDbRef): string =
|
||||
if not db.top.isNil:
|
||||
|
@ -191,13 +200,13 @@ proc ppPayload(p: PayloadRef, db: AristoDbRef): string =
|
|||
of RawData:
|
||||
result &= p.rawBlob.toHex.squeeze(hex=true)
|
||||
of RlpData:
|
||||
result &= "(" & p.rlpBlob.toHex.squeeze(hex=true) & ")"
|
||||
result &= "[#" & p.rlpBlob.toHex.squeeze(hex=true) & "]"
|
||||
of AccountData:
|
||||
result = "("
|
||||
result &= $p.account.nonce & ","
|
||||
result &= $p.account.balance & ","
|
||||
result &= p.account.storageID.ppVid & ","
|
||||
result &= p.account.codeHash.to(HashKey).ppCodeKey() & ")"
|
||||
result &= $p.account.codeHash & ")"
|
||||
|
||||
proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string =
|
||||
if not nd.isValid:
|
||||
|
@ -230,7 +239,7 @@ proc ppSTab(
|
|||
"{" & sTab.sortedKeys
|
||||
.mapIt((it, sTab.getOrVoid it))
|
||||
.mapIt("(" & it[0].ppVid & "," & it[1].ppVtx(db,it[0]) & ")")
|
||||
.join(indent.toPfx(2)) & "}"
|
||||
.join(indent.toPfx(1)) & "}"
|
||||
|
||||
proc ppLTab(
|
||||
lTab: Table[LeafTie,VertexID];
|
||||
|
@ -240,7 +249,7 @@ proc ppLTab(
|
|||
"{" & lTab.sortedKeys
|
||||
.mapIt((it, lTab.getOrVoid it))
|
||||
.mapIt("(" & it[0].ppLeafTie(db) & "," & it[1].ppVid & ")")
|
||||
.join(indent.toPfx(2)) & "}"
|
||||
.join(indent.toPfx(1)) & "}"
|
||||
|
||||
proc ppPPrf(pPrf: HashSet[VertexID]): string =
|
||||
"{" & pPrf.sortedKeys.mapIt(it.ppVid).join(",") & "}"
|
||||
|
@ -248,31 +257,35 @@ proc ppPPrf(pPrf: HashSet[VertexID]): string =
|
|||
proc ppXMap*(
|
||||
db: AristoDbRef;
|
||||
kMap: Table[VertexID,HashLabel];
|
||||
pAmk: Table[HashLabel,VertexID];
|
||||
pAmk: Table[HashLabel,HashSet[VertexID]];
|
||||
indent: int;
|
||||
): string =
|
||||
|
||||
let
|
||||
pfx = indent.toPfx(1)
|
||||
dups = pAmk.values.toSeq.toCountTable.pairs.toSeq
|
||||
.filterIt(1 < it[1]).toTable
|
||||
revOnly = pAmk.pairs.toSeq.filterIt(not kMap.hasKey it[1])
|
||||
.mapIt((it[1],it[0])).toTable
|
||||
let pfx = indent.toPfx(1)
|
||||
|
||||
var dups: HashSet[VertexID]
|
||||
for vids in pAmk.values:
|
||||
if 1 < vids.len:
|
||||
dups = dups + vids
|
||||
|
||||
# Vertex IDs without forward mapping `kMap: VertexID -> HashLabel`
|
||||
var revOnly: Table[VertexID,HashLabel]
|
||||
for (lbl,vids) in pAmk.pairs:
|
||||
for vid in vids:
|
||||
if not kMap.hasKey vid:
|
||||
revOnly[vid] = lbl
|
||||
let revKeys =
|
||||
revOnly.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
|
||||
|
||||
proc ppNtry(n: uint64): string =
|
||||
var s = VertexID(n).ppVid
|
||||
let lbl = kMap.getOrVoid VertexID(n)
|
||||
if lbl.isValid:
|
||||
let vid = pAmk.getOrVoid lbl
|
||||
if not vid.isValid:
|
||||
s = "(" & s & "," & lbl.ppLabel(db) & ",ø"
|
||||
elif vid != VertexID(n):
|
||||
s = "(" & s & "," & lbl.ppLabel(db) & "," & vid.ppVid
|
||||
let count = dups.getOrDefault(VertexID(n), 0)
|
||||
if 0 < count:
|
||||
if s[0] != '(':
|
||||
s &= "(" & s
|
||||
s &= ",*" & $count
|
||||
let vids = pAmk.getOrVoid lbl
|
||||
if VertexID(n) notin vids or 1 < vids.len:
|
||||
s = "(" & s & "," & lbl.key.ppKey(db,lbl.root)
|
||||
elif lbl.key.len < 32:
|
||||
s &= "[#" & $lbl.key.len & "]"
|
||||
else:
|
||||
s &= "£ø"
|
||||
if s[0] == '(':
|
||||
|
@ -281,7 +294,6 @@ proc ppXMap*(
|
|||
|
||||
result = "{"
|
||||
# Extra reverse lookups
|
||||
let revKeys = revOnly.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
|
||||
if 0 < revKeys.len:
|
||||
proc ppRevlabel(vid: VertexID): string =
|
||||
"(ø," & revOnly.getOrVoid(vid).ppLabel(db) & ")"
|
||||
|
@ -311,9 +323,9 @@ proc ppXMap*(
|
|||
for vid in kMap.sortedKeys:
|
||||
let lbl = kMap.getOrVoid vid
|
||||
if lbl.isValid:
|
||||
cache.add (vid.uint64, lbl.vidCode(db), 0 < dups.getOrDefault(vid, 0))
|
||||
let lblVid = pAmk.getOrDefault(lbl, VertexID(0))
|
||||
if lblVid != VertexID(0) and lblVid != vid:
|
||||
cache.add (vid.uint64, lbl.vidCode(db), vid in dups)
|
||||
let vids = pAmk.getOrVoid lbl
|
||||
if (0 < vids.len and vid notin vids) or lbl.key.len < 32:
|
||||
cache[^1][2] = true
|
||||
else:
|
||||
cache.add (vid.uint64, 0u64, true)
|
||||
|
@ -347,7 +359,12 @@ proc ppXMap*(
|
|||
else:
|
||||
result &= "}"
|
||||
|
||||
proc ppFilter(fl: FilterRef; db: AristoDbRef; indent: int): string =
|
||||
proc ppFilter(
|
||||
fl: FilterRef;
|
||||
db: AristoDbRef;
|
||||
root: VertexID;
|
||||
indent: int;
|
||||
): string =
|
||||
## Walk over filter tables
|
||||
let
|
||||
pfx = indent.toPfx
|
||||
|
@ -358,8 +375,8 @@ proc ppFilter(fl: FilterRef; db: AristoDbRef; indent: int): string =
|
|||
result &= " n/a"
|
||||
return
|
||||
result &= pfx & "fid=" & fl.fid.ppFid
|
||||
result &= pfx & "src=" & fl.src.ppKey
|
||||
result &= pfx & "trg=" & fl.trg.ppKey
|
||||
result &= pfx & "src=" & fl.src.to(HashKey).ppKey(db,root)
|
||||
result &= pfx & "trg=" & fl.trg.to(HashKey).ppKey(db,root)
|
||||
result &= pfx & "vGen" & pfx1 & "[" &
|
||||
fl.vGen.mapIt(it.ppVid).join(",") & "]"
|
||||
result &= pfx & "sTab" & pfx1 & "{"
|
||||
|
@ -371,10 +388,10 @@ proc ppFilter(fl: FilterRef; db: AristoDbRef; indent: int): string =
|
|||
for n,vid in fl.kMap.sortedKeys:
|
||||
let key = fl.kMap.getOrVoid vid
|
||||
if 0 < n: result &= pfx2
|
||||
result &= $(1+n) & "(" & vid.ppVid & "," & key.ppKey & ")"
|
||||
result &= $(1+n) & "(" & vid.ppVid & "," & key.ppKey(db,root) & ")"
|
||||
result &= "}"
|
||||
|
||||
proc ppBe[T](be: T; db: AristoDbRef; indent: int): string =
|
||||
proc ppBe[T](be: T; db: AristoDbRef; root: VertexID; indent: int): string =
|
||||
## Walk over backend tables
|
||||
let
|
||||
pfx = indent.toPfx
|
||||
|
@ -387,7 +404,7 @@ proc ppBe[T](be: T; db: AristoDbRef; indent: int): string =
|
|||
$(1+it[0]) & "(" & it[1].ppVid & "," & it[2].ppVtx(db,it[1]) & ")"
|
||||
).join(pfx2) & "}"
|
||||
result &= pfx & "kMap" & pfx1 & "{" & be.walkKey.toSeq.mapIt(
|
||||
$(1+it[0]) & "(" & it[1].ppVid & "," & it[2].ppKey & ")"
|
||||
$(1+it[0]) & "(" & it[1].ppVid & "," & it[2].ppKey(db,root) & ")"
|
||||
).join(pfx2) & "}"
|
||||
|
||||
proc ppLayer(
|
||||
|
@ -430,12 +447,12 @@ proc ppLayer(
|
|||
let
|
||||
tLen = layer.sTab.len
|
||||
info = "sTab(" & $tLen & ")"
|
||||
result &= info.doPrefix(0 < tLen) & layer.sTab.ppSTab(db,indent+1)
|
||||
result &= info.doPrefix(0 < tLen) & layer.sTab.ppSTab(db,indent+2)
|
||||
if lTabOk:
|
||||
let
|
||||
tlen = layer.lTab.len
|
||||
info = "lTab(" & $tLen & ")"
|
||||
result &= info.doPrefix(0 < tLen) & layer.lTab.ppLTab(indent+1)
|
||||
result &= info.doPrefix(0 < tLen) & layer.lTab.ppLTab(indent+2)
|
||||
if kMapOk:
|
||||
let
|
||||
tLen = layer.kMap.len
|
||||
|
@ -443,7 +460,7 @@ proc ppLayer(
|
|||
lInf = if tLen == uLen: $tLen else: $tLen & "," & $ulen
|
||||
info = "kMap(" & lInf & ")"
|
||||
result &= info.doPrefix(0 < tLen + uLen)
|
||||
result &= db.ppXMap(layer.kMap, layer.pAmk,indent+1)
|
||||
result &= db.ppXMap(layer.kMap, layer.pAmk, indent+2)
|
||||
if pPrfOk:
|
||||
let
|
||||
tLen = layer.pPrf.len
|
||||
|
@ -458,8 +475,14 @@ proc ppLayer(
|
|||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp*(key: HashKey): string =
|
||||
key.ppKey
|
||||
proc pp*(w: Hash256): string =
|
||||
w.data.toHex.squeeze(hex=true,ignLen=true)
|
||||
|
||||
proc pp*(w: HashKey; sig: MerkleSignRef): string =
|
||||
w.ppKey(sig.db, sig.root)
|
||||
|
||||
proc pp*(w: HashKey; db = AristoDbRef(); root = VertexID(1)): string =
|
||||
w.ppKey(db, root)
|
||||
|
||||
proc pp*(lbl: HashLabel, db = AristoDbRef()): string =
|
||||
lbl.ppLabel(db)
|
||||
|
@ -506,23 +529,22 @@ proc pp*(nd: NodeRef; root: VertexID; db: AristoDbRef): string =
|
|||
result &= $nd.lPfx.ppPathPfx & "," & nd.lData.pp(db)
|
||||
|
||||
of Extension:
|
||||
let lbl = HashLabel(root: root, key: nd.key[0])
|
||||
result &= $nd.ePfx.ppPathPfx & "," & nd.eVid.ppVid & ","
|
||||
result &= lbl.ppLabel(db) & db.labelVidUpdate(lbl, nd.eVid)
|
||||
result &= nd.key[0].ppKey(db,root)
|
||||
result &= db.lidVidUpdate(root, nd.key[0], nd.eVid)
|
||||
|
||||
of Branch:
|
||||
result &= "["
|
||||
for n in 0..15:
|
||||
if nd.bVid[n].isValid or nd.key[n].isValid:
|
||||
result &= nd.bVid[n].ppVid
|
||||
let lbl = HashLabel(root: root, key: nd.key[n])
|
||||
result &= db.labelVidUpdate(lbl, nd.bVid[n]) & ","
|
||||
result &= db.lidVidUpdate(root, nd.key[n], nd.bVid[n]) & ","
|
||||
result[^1] = ']'
|
||||
|
||||
result &= ",["
|
||||
for n in 0..15:
|
||||
if nd.bVid[n].isValid or nd.key[n].isValid:
|
||||
result &= HashLabel(root: root, key: nd.key[n]).ppLabel(db)
|
||||
result &= nd.key[n].ppKey(db,root)
|
||||
result &= ","
|
||||
result[^1] = ']'
|
||||
result &= ")"
|
||||
|
@ -550,7 +572,7 @@ proc pp*(leg: Leg; db = AristoDbRef()): string =
|
|||
let lbl = db.top.kMap.getOrVoid leg.wp.vid
|
||||
if not lbl.isValid:
|
||||
result &= "ø"
|
||||
elif leg.wp.vid != db.top.pAmk.getOrVoid lbl:
|
||||
elif leg.wp.vid notin db.top.pAmk.getOrVoid lbl:
|
||||
result &= lbl.ppLabel(db)
|
||||
result &= ","
|
||||
if leg.backend:
|
||||
|
@ -592,7 +614,7 @@ proc pp*(pAmk: Table[Hashlabel,VertexID]; indent = 4): string =
|
|||
proc pp*(kMap: Table[VertexID,Hashlabel]; db: AristoDbRef; indent = 4): string =
|
||||
db.ppXMap(kMap, db.top.pAmk, indent)
|
||||
|
||||
proc pp*(pAmk: Table[Hashlabel,VertexID]; db: AristoDbRef; indent = 4): string =
|
||||
proc pp*(pAmk: VidsByLabel; db: AristoDbRef; indent = 4): string =
|
||||
db.ppXMap(db.top.kMap, pAmk, indent)
|
||||
|
||||
# ---------------------
|
||||
|
@ -645,34 +667,43 @@ proc pp*(
|
|||
proc pp*(
|
||||
filter: FilterRef;
|
||||
db = AristoDbRef();
|
||||
root = VertexID(1);
|
||||
indent = 4;
|
||||
): string =
|
||||
filter.ppFilter(db, indent)
|
||||
filter.ppFilter(db, root, indent)
|
||||
|
||||
proc pp*(
|
||||
be: BackendRef;
|
||||
db: AristoDbRef;
|
||||
root = VertexID(1);
|
||||
indent = 4;
|
||||
): string =
|
||||
result = db.roFilter.ppFilter(db, indent+1) & indent.toPfx
|
||||
result = db.roFilter.ppFilter(db, root, indent+1) & indent.toPfx
|
||||
case be.kind:
|
||||
of BackendMemory:
|
||||
result &= be.MemBackendRef.ppBe(db, indent)
|
||||
result &= be.MemBackendRef.ppBe(db, root, indent)
|
||||
of BackendRocksDB:
|
||||
result &= be.RdbBackendRef.ppBe(db, indent)
|
||||
result &= be.RdbBackendRef.ppBe(db, root, indent)
|
||||
of BackendVoid:
|
||||
result &= "<NoBackend>"
|
||||
|
||||
proc pp*(
|
||||
db: AristoDbRef;
|
||||
backendOk = false;
|
||||
root = VertexID(1);
|
||||
indent = 4;
|
||||
): string =
|
||||
result = db.top.pp(db, indent=indent) & indent.toPfx
|
||||
if backendOk:
|
||||
result &= db.backend.pp(db)
|
||||
else:
|
||||
result &= db.roFilter.ppFilter(db, indent+1)
|
||||
result &= db.roFilter.ppFilter(db, root, indent+1)
|
||||
|
||||
proc pp*(sdb: MerkleSignRef; indent = 4): string =
|
||||
"count=" & $sdb.count &
|
||||
" root=" & sdb.root.pp &
|
||||
" error=" & $sdb.error &
|
||||
"\n db\n " & sdb.db.pp(root=sdb.root, indent=indent+1)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -299,7 +299,10 @@ proc deleteImpl(
|
|||
if 1 < hike.legs.len:
|
||||
|
||||
# Get current `Branch` vertex `br`
|
||||
let br = hike.legs[^2].wp
|
||||
let br = block:
|
||||
var wp = hike.legs[^2].wp
|
||||
wp.vtx = wp.vtx.dup # make sure that layers are not impliciteley modified
|
||||
wp
|
||||
if br.vtx.vType != Branch:
|
||||
return err((br.vid,DelBranchExpexted))
|
||||
|
||||
|
|
|
@ -46,6 +46,14 @@ type
|
|||
txUid*: uint ## Unique ID among transactions
|
||||
level*: int ## Stack index for this transaction
|
||||
|
||||
MerkleSignRef* = ref object
|
||||
## Simple Merkle signature calculatior for key-value lists
|
||||
root*: VertexID
|
||||
db*: AristoDbRef
|
||||
count*: uint
|
||||
error*: AristoError
|
||||
errKey*: Blob
|
||||
|
||||
DudesRef = ref object
|
||||
case rwOk: bool
|
||||
of true:
|
||||
|
@ -67,7 +75,7 @@ type
|
|||
dudes: DudesRef ## Related DB descriptors
|
||||
|
||||
# Debugging data below, might go away in future
|
||||
xMap*: Table[HashLabel,VertexID] ## For pretty printing, extends `pAmk`
|
||||
xMap*: VidsByLabel ## For pretty printing, extends `pAmk`
|
||||
|
||||
AristoDbAction* = proc(db: AristoDbRef) {.gcsafe, raises: [].}
|
||||
## Generic call back function/closure.
|
||||
|
@ -82,12 +90,18 @@ func getOrVoid*[W](tab: Table[W,VertexRef]; w: W): VertexRef =
|
|||
func getOrVoid*[W](tab: Table[W,HashLabel]; w: W): HashLabel =
|
||||
tab.getOrDefault(w, VOID_HASH_LABEL)
|
||||
|
||||
func getOrVoid*[W](tab: Table[W,NodeRef]; w: W): NodeRef =
|
||||
tab.getOrDefault(w, NodeRef(nil))
|
||||
|
||||
func getOrVoid*[W](tab: Table[W,HashKey]; w: W): HashKey =
|
||||
tab.getOrDefault(w, VOID_HASH_KEY)
|
||||
|
||||
func getOrVoid*[W](tab: Table[W,VertexID]; w: W): VertexID =
|
||||
tab.getOrDefault(w, VertexID(0))
|
||||
|
||||
func getOrVoid*[W](tab: Table[W,HashSet[VertexID]]; w: W): HashSet[VertexID] =
|
||||
tab.getOrDefault(w, EmptyVidSet)
|
||||
|
||||
# --------
|
||||
|
||||
func isValid*(vtx: VertexRef): bool =
|
||||
|
@ -102,15 +116,24 @@ func isValid*(pld: PayloadRef): bool =
|
|||
func isValid*(filter: FilterRef): bool =
|
||||
filter != FilterRef(nil)
|
||||
|
||||
func isValid*(key: HashKey): bool =
|
||||
key != VOID_HASH_KEY
|
||||
func isValid*(root: Hash256): bool =
|
||||
root != EMPTY_ROOT_HASH
|
||||
|
||||
func isValid*(lbl: HashLabel): bool =
|
||||
lbl != VOID_HASH_LABEL
|
||||
func isValid*(key: HashKey): bool =
|
||||
if key.len == 32:
|
||||
key.to(Hash256).isValid
|
||||
else:
|
||||
0 < key.len
|
||||
|
||||
func isValid*(vid: VertexID): bool =
|
||||
vid != VertexID(0)
|
||||
|
||||
func isValid*(lbl: HashLabel): bool =
|
||||
lbl.root.isValid and lbl.key.isValid
|
||||
|
||||
func isValid*(sqv: HashSet[VertexID]): bool =
|
||||
sqv != EmptyVidSet
|
||||
|
||||
func isValid*(qid: QueueID): bool =
|
||||
qid != QueueID(0)
|
||||
|
||||
|
@ -126,18 +149,6 @@ func hash*(db: AristoDbRef): Hash =
|
|||
## Table/KeyedQueue/HashSet mixin
|
||||
cast[pointer](db).hash
|
||||
|
||||
# Note that the below `init()` function cannot go into `desc_identifiers` as
|
||||
# this would result in a circular import.
|
||||
func init*(key: var HashKey; data: openArray[byte]): bool =
|
||||
## Import argument `data` into `key` which must have length either `32`, or
|
||||
## `0`. The latter case is equivalent to an all zero byte array of size `32`.
|
||||
if data.len == 32:
|
||||
(addr key.ByteArray32[0]).copyMem(unsafeAddr data[0], data.len)
|
||||
return true
|
||||
if data.len == 0:
|
||||
key = VOID_HASH_KEY
|
||||
return true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, `dude` related
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -16,12 +16,16 @@ type
|
|||
# Rlp decoder, `read()`
|
||||
Rlp2Or17ListEntries
|
||||
RlpBlobExpected
|
||||
RlpBranchLinkExpected
|
||||
RlpExtPathEncoding
|
||||
RlpNonEmptyBlobExpected
|
||||
RlpBranchHashKeyExpected
|
||||
RlpEmptyBlobExpected
|
||||
RlpRlpException
|
||||
RlpExtHashKeyExpected
|
||||
RlpHashKeyExpected
|
||||
RlpNonEmptyBlobExpected
|
||||
RlpOtherException
|
||||
RlpRlpException
|
||||
|
||||
# Serialise decoder
|
||||
SerCantResolveStorageRoot
|
||||
|
||||
# Data record transcoders, `deblobify()` and `blobify()`
|
||||
BlobifyNilFilter
|
||||
|
@ -34,7 +38,8 @@ type
|
|||
|
||||
DeblobNilArgument
|
||||
DeblobUnknown
|
||||
DeblobTooShort
|
||||
DeblobVtxTooShort
|
||||
DeblobHashKeyExpected
|
||||
DeblobBranchTooShort
|
||||
DeblobBranchSizeGarbled
|
||||
DeblobBranchInxOutOfRange
|
||||
|
@ -90,13 +95,15 @@ type
|
|||
MergeAssemblyFailed # Ooops, internal error
|
||||
|
||||
MergeHashKeyInvalid
|
||||
MergeHashKeyCachedAlready
|
||||
MergeHashKeyDiffersFromCached
|
||||
MergeHashKeyRevLookUpGarbled
|
||||
MergeRootVidInvalid
|
||||
MergeRootKeyInvalid
|
||||
MergeRevVidMustHaveBeenCached
|
||||
MergeHashKeyCachedAlready
|
||||
MergeHashKeyDiffersFromCached
|
||||
MergeNodeVtxDiffersFromExisting
|
||||
MergeRootKeyDiffersForVid
|
||||
MergeNodeVtxDuplicates
|
||||
|
||||
# Update `Merkle` hashes `hashify()`
|
||||
HashifyCannotComplete
|
||||
|
@ -128,15 +135,19 @@ type
|
|||
CheckAnyVtxEmptyKeyMissing
|
||||
CheckAnyVtxEmptyKeyExpected
|
||||
CheckAnyVtxEmptyKeyMismatch
|
||||
CheckAnyVtxBranchLinksMissing
|
||||
CheckAnyVtxExtPfxMissing
|
||||
CheckAnyVtxLockWithoutKey
|
||||
CheckAnyRevVtxMissing
|
||||
CheckAnyRevVtxDup
|
||||
CheckAnyRevCountMismatch
|
||||
CheckAnyVtxLockWithoutKey
|
||||
|
||||
# Backend structural check `checkBE()`
|
||||
CheckBeVtxInvalid
|
||||
CheckBeKeyInvalid
|
||||
CheckBeVtxMissing
|
||||
CheckBeVtxBranchLinksMissing
|
||||
CheckBeVtxExtPfxMissing
|
||||
CheckBeKeyInvalid
|
||||
CheckBeKeyMissing
|
||||
CheckBeKeyCantCompile
|
||||
CheckBeKeyMismatch
|
||||
|
@ -229,6 +240,7 @@ type
|
|||
RdbBeAddSstWriter
|
||||
RdbBeFinishSstWriter
|
||||
RdbBeIngestSstWriter
|
||||
RdbHashKeyExpected
|
||||
|
||||
# Transaction wrappers
|
||||
TxArgStaleTx
|
||||
|
|
|
@ -17,12 +17,10 @@
|
|||
import
|
||||
std/[sequtils, strutils, hashes],
|
||||
eth/[common, trie/nibbles],
|
||||
results,
|
||||
stint
|
||||
|
||||
type
|
||||
ByteArray32* = array[32,byte]
|
||||
## Used for 32 byte hash components repurposed as Merkle hash labels.
|
||||
|
||||
QueueID* = distinct uint64
|
||||
## Identifier used to tag filter logs stored on the backend.
|
||||
|
||||
|
@ -37,10 +35,27 @@ type
|
|||
## backend of the database, there is no other reference to the node than
|
||||
## the very same `VertexID`.
|
||||
|
||||
HashKey* = distinct ByteArray32
|
||||
## Dedicated `Hash256` object variant that is used for labelling the
|
||||
## vertices of the `Patricia Trie` in order to make it a
|
||||
## `Merkle Patricia Tree`.
|
||||
HashKey* = object
|
||||
## Ethereum MPTs use Keccak hashes as node links if the size of an RLP
|
||||
## encoded node is of size at least 32 bytes. Otherwise, the RLP encoded
|
||||
## node value is used as a pseudo node link (rather than a hash.) Such a
|
||||
## node is nor stored on key-value database. Rather the RLP encoded node
|
||||
## value is stored instead of a lode link in a parent node instead. Only
|
||||
## for the root hash, the top level node is always referred to by the
|
||||
## hash.
|
||||
##
|
||||
## This compaction feature needed an abstraction of the `HashKey` object
|
||||
## which is either a `Hash256` or a `Blob` of length at most 31 bytes.
|
||||
## This leaves two ways of representing an empty/void `HashKey` type.
|
||||
## It may be available as an empty `Blob` of zero length, or the
|
||||
## `Hash256` type of the Keccak hash of an empty `Blob` (see constant
|
||||
## `EMPTY_ROOT_HASH`.)
|
||||
##
|
||||
case isHash: bool
|
||||
of true:
|
||||
key: Hash256 ## Merkle hash tacked to a vertex
|
||||
else:
|
||||
blob: Blob ## Optionally encoded small node data
|
||||
|
||||
PathID* = object
|
||||
## Path into the `Patricia Trie`. This is a chain of maximal 64 nibbles
|
||||
|
@ -79,11 +94,23 @@ type
|
|||
## `Aristo Trie`. They are used temporarily and in caches or backlog
|
||||
## tables.
|
||||
root*: VertexID ## Root ID for the sub-trie.
|
||||
key*: HashKey ## Merkle hash tacked to a vertex.
|
||||
key*: HashKey ## Merkle hash or encoded small node data
|
||||
|
||||
static:
|
||||
# Not that there is no doubt about this ...
|
||||
doAssert HashKey.default.ByteArray32.initNibbleRange.len == 64
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func to(lid: HashKey; T: type PathID): T =
|
||||
## Helper to bowrrow certain properties from `PathID`
|
||||
if lid.isHash:
|
||||
PathID(pfx: UInt256.fromBytesBE lid.key.data, length: 64)
|
||||
elif 0 < lid.blob.len:
|
||||
doAssert lid.blob.len < 32
|
||||
var a32: array[32,byte]
|
||||
(addr a32[0]).copyMem(unsafeAddr lid.blob[0], lid.blob.len)
|
||||
PathID(pfx: UInt256.fromBytesBE a32, length: 2 * lid.blob.len.uint8)
|
||||
else:
|
||||
PathID()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers: `VertexID` scalar data model
|
||||
|
@ -184,6 +211,52 @@ func `==`*(a, b: PathID): bool =
|
|||
## (see `normal()`.)
|
||||
a.pfx == b.pfx and a.length == b.length
|
||||
|
||||
func cmp*(a, b: PathID): int =
|
||||
if a < b: -1 elif b < a: 1 else: 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers: `HashKey` ordered scalar data model
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func len*(lid: HashKey): int =
|
||||
if lid.isHash: 32 else: lid.blob.len
|
||||
|
||||
func fromBytes*(T: type HashKey; data: openArray[byte]): Result[T,void] =
|
||||
## Write argument `data` of length 0 or between 2 and 32 bytes as a `HashKey`.
|
||||
##
|
||||
## A function argument `data` of length 32 is used as-is.
|
||||
##
|
||||
## For a function argument `data` of length between 2 and 31, the first
|
||||
## byte must be the start of an RLP encoded list, i.e. `0xc0 + len` where
|
||||
## where `len` is one less as the `data` length.
|
||||
##
|
||||
if data.len == 32:
|
||||
var lid: T
|
||||
lid.isHash = true
|
||||
(addr lid.key.data[0]).copyMem(unsafeAddr data[0], data.len)
|
||||
return ok lid
|
||||
if data.len == 0:
|
||||
return ok HashKey()
|
||||
if 1 < data.len and data.len < 32 and data[0].int == 0xbf + data.len:
|
||||
return ok T(isHash: false, blob: @data)
|
||||
err()
|
||||
|
||||
func `<`*(a, b: HashKey): bool =
|
||||
## Slow, but useful for debug sorting
|
||||
a.to(PathID) < b.to(PathID)
|
||||
|
||||
func `==`*(a, b: HashKey): bool =
|
||||
if a.isHash != b.isHash:
|
||||
false
|
||||
elif a.isHash:
|
||||
a.key == b.key
|
||||
else:
|
||||
a.blob == b.blob
|
||||
|
||||
func cmp*(a, b: HashKey): int =
|
||||
## Slow, but useful for debug sorting
|
||||
if a < b: -1 elif b < a: 1 else: 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers: `LeafTie` ordered scalar data model
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -223,35 +296,42 @@ func cmp*(a, b: LeafTie): int =
|
|||
# Public helpers: Reversible conversions between `PathID`, `HashKey`, etc.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc to*(key: HashKey; T: type UInt256): T =
|
||||
T.fromBytesBE key.ByteArray32
|
||||
|
||||
func to*(key: HashKey; T: type Hash256): T =
|
||||
T(data: ByteArray32(key))
|
||||
|
||||
func to*(key: HashKey; T: type PathID): T =
|
||||
## Not necessarily reversible for shorter lengths `PathID` values
|
||||
T(pfx: UInt256.fromBytesBE key.ByteArray32, length: 64)
|
||||
|
||||
func to*(hash: Hash256; T: type HashKey): T =
|
||||
hash.data.T
|
||||
|
||||
func to*(key: HashKey; T: type Blob): T =
|
||||
## Representation of a `HashKey` as `Blob` (preserving full information)
|
||||
key.ByteArray32.toSeq
|
||||
## Rewrite `HashKey` argument as `Blob` type of length between 0 and 32. A
|
||||
## blob of length 32 is taken as a representation of a `HashKey` type while
|
||||
## samller blobs are expected to represent an RLP encoded small node.
|
||||
if key.isHash:
|
||||
@(key.key.data)
|
||||
else:
|
||||
key.blob
|
||||
|
||||
func to*(key: HashKey; T: type NibblesSeq): T =
|
||||
## Representation of a `HashKey` as `NibbleSeq` (preserving full information)
|
||||
key.ByteArray32.initNibbleRange()
|
||||
func `@`*(lid: HashKey): Blob =
|
||||
## Variant of `to(Blob)`
|
||||
lid.to(Blob)
|
||||
|
||||
func to*(pid: PathID; T: type NibblesSeq): T =
|
||||
## Representation of a `HashKey` as `NibbleSeq` (preserving full information)
|
||||
## Representation of a `PathID` as `NibbleSeq` (preserving full information)
|
||||
let nibbles = pid.pfx.UInt256.toBytesBE.toSeq.initNibbleRange()
|
||||
if pid.length < 64:
|
||||
nibbles.slice(0, pid.length.int)
|
||||
else:
|
||||
nibbles
|
||||
|
||||
func to*(lid: HashKey; T: type Hash256): T =
|
||||
## Returns the `Hash236` key if available, otherwise the Keccak hash of
|
||||
## the `Blob` version.
|
||||
if lid.isHash:
|
||||
lid.key
|
||||
elif 0 < lid.blob.len:
|
||||
lid.blob.keccakHash
|
||||
else:
|
||||
EMPTY_ROOT_HASH
|
||||
|
||||
func to*(key: Hash256; T: type HashKey): T =
|
||||
## This is an efficient version of `HashKey.fromBytes(key.data).value`, not
|
||||
## to be confused with `digestTo(HashKey)`.
|
||||
T(isHash: true, key: key)
|
||||
|
||||
func to*(n: SomeUnsignedInt|UInt256; T: type PathID): T =
|
||||
## Representation of a scalar as `PathID` (preserving full information)
|
||||
T(pfx: n.u256, length: 64)
|
||||
|
@ -261,8 +341,13 @@ func to*(n: SomeUnsignedInt|UInt256; T: type PathID): T =
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
func digestTo*(data: openArray[byte]; T: type HashKey): T =
|
||||
## Keccak hash of a `Blob` like argument, represented as a `HashKey`
|
||||
keccakHash(data).data.T
|
||||
## For argument `data` with length smaller than 32, import them as-is into
|
||||
## the result. Otherwise import the Keccak hash of the argument `data`.
|
||||
if data.len < 32:
|
||||
result.blob = @data
|
||||
else:
|
||||
result.isHash = true
|
||||
result.key = data.keccakHash
|
||||
|
||||
func normal*(a: PathID): PathID =
|
||||
## Normalise path ID representation
|
||||
|
@ -283,22 +368,28 @@ func hash*(a: PathID): Hash =
|
|||
h = h !& a.length.hash
|
||||
!$h
|
||||
|
||||
func hash*(a: HashKey): Hash {.borrow.}
|
||||
func hash*(a: HashKey): Hash =
|
||||
## Table/KeyedQueue mixin
|
||||
var h: Hash = 0
|
||||
if a.isHash:
|
||||
h = h !& a.key.hash
|
||||
else:
|
||||
h = h !& a.blob.hash
|
||||
!$h
|
||||
|
||||
func `==`*(a, b: HashKey): bool {.borrow.}
|
||||
|
||||
func read*(rlp: var Rlp; T: type HashKey;): T {.gcsafe, raises: [RlpError].} =
|
||||
rlp.read(Hash256).to(T)
|
||||
|
||||
func append*(writer: var RlpWriter, val: HashKey) =
|
||||
writer.append(val.to(Hash256))
|
||||
func hash*(lbl: HashLabel): Hash =
|
||||
## Table/KeyedQueue/HashSet mixin
|
||||
var h: Hash = 0
|
||||
h = h !& lbl.root.hash
|
||||
h = h !& lbl.key.hash
|
||||
!$h
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Miscellaneous helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func `$`*(key: HashKey): string =
|
||||
let w = UInt256.fromBytesBE key.ByteArray32
|
||||
func `$`*(key: Hash256): string =
|
||||
let w = UInt256.fromBytesBE key.data
|
||||
if w == high(UInt256):
|
||||
"2^256-1"
|
||||
elif w == 0.u256:
|
||||
|
@ -316,8 +407,11 @@ func `$`*(key: HashKey): string =
|
|||
|
||||
func `$`*(a: PathID): string =
|
||||
if a.pfx != 0:
|
||||
result = ($a.pfx.toHex).strip(
|
||||
leading=true, trailing=false, chars={'0'}).toLowerAscii
|
||||
var dgts = $a.pfx.toHex
|
||||
if a.length < 64:
|
||||
dgts = dgts[0 ..< a.length]
|
||||
result = dgts.strip(
|
||||
leading=true, trailing=false, chars={'0'})
|
||||
elif a.length != 0:
|
||||
result = "0"
|
||||
if a.length < 64:
|
||||
|
@ -326,7 +420,7 @@ func `$`*(a: PathID): string =
|
|||
func `$`*(a: LeafTie): string =
|
||||
if a.root != 0:
|
||||
result = ($a.root.uint64.toHex).strip(
|
||||
leading=true, trailing=false, chars={'0'}).toLowerAscii
|
||||
leading=true, trailing=false, chars={'0'})
|
||||
else:
|
||||
result = "0"
|
||||
result &= ":" & $a.path
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sets, tables],
|
||||
std/[hashes, sets, tables],
|
||||
eth/[common, trie/nibbles],
|
||||
"."/[desc_error, desc_identifiers]
|
||||
|
||||
|
@ -75,7 +75,7 @@ type
|
|||
NodeRef* = ref object of VertexRef
|
||||
## Combined record for a *traditional* ``Merkle Patricia Tree` node merged
|
||||
## with a structural `VertexRef` type object.
|
||||
error*: AristoError ## Can be used for error signalling
|
||||
error*: AristoError ## Used for error signalling in RLP decoder
|
||||
key*: array[16,HashKey] ## Merkle hash/es for vertices
|
||||
|
||||
# ----------------------
|
||||
|
@ -83,19 +83,22 @@ type
|
|||
FilterRef* = ref object
|
||||
## Delta layer with expanded sequences for quick access
|
||||
fid*: FilterID ## Filter identifier
|
||||
src*: HashKey ## Applicable to this state root
|
||||
trg*: HashKey ## Resulting state root (i.e. `kMap[1]`)
|
||||
src*: Hash256 ## Applicable to this state root
|
||||
trg*: Hash256 ## Resulting state root (i.e. `kMap[1]`)
|
||||
sTab*: Table[VertexID,VertexRef] ## Filter structural vertex table
|
||||
kMap*: Table[VertexID,HashKey] ## Filter Merkle hash key mapping
|
||||
vGen*: seq[VertexID] ## Filter unique vertex ID generator
|
||||
|
||||
VidsByLabel* = Table[HashLabel,HashSet[VertexID]]
|
||||
## Reverse lookup searching `VertexID` by the hash key/label.
|
||||
|
||||
LayerRef* = ref object
|
||||
## Hexary trie database layer structures. Any layer holds the full
|
||||
## change relative to the backend.
|
||||
sTab*: Table[VertexID,VertexRef] ## Structural vertex table
|
||||
lTab*: Table[LeafTie,VertexID] ## Direct access, path to leaf vertex
|
||||
kMap*: Table[VertexID,HashLabel] ## Merkle hash key mapping
|
||||
pAmk*: Table[HashLabel,VertexID] ## Reverse `kMap` entries, hash key lookup
|
||||
pAmk*: VidsByLabel ## Reverse `kMap` entries, hash key lookup
|
||||
pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
|
||||
vGen*: seq[VertexID] ## Unique vertex ID generator
|
||||
txUid*: uint ## Transaction identifier if positive
|
||||
|
@ -135,10 +138,35 @@ const
|
|||
func max(a, b, c: int): int =
|
||||
max(max(a,b),c)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers: `Table[HashLabel,seq[VertexID]]`
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc append*(pAmk: var VidsByLabel; lbl: HashLabel; vid: VertexID) =
|
||||
pAmk.withValue(lbl,value):
|
||||
value[].incl vid
|
||||
do: # else if not found
|
||||
pAmk[lbl] = @[vid].toHashSet
|
||||
|
||||
proc delete*(pAmk: var VidsByLabel; lbl: HashLabel; vid: VertexID) =
|
||||
var deleteItem = false
|
||||
pAmk.withValue(lbl,value):
|
||||
value[].excl vid
|
||||
if value[].len == 0:
|
||||
deleteItem = true
|
||||
if deleteItem:
|
||||
pAmk.del lbl
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers: `NodeRef` and `PayloadRef`
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func hash*(node: NodeRef): Hash =
|
||||
## Table/KeyedQueue/HashSet mixin
|
||||
cast[pointer](node).hash
|
||||
|
||||
# ---------------
|
||||
|
||||
proc `==`*(a, b: PayloadRef): bool =
|
||||
## Beware, potential deep comparison
|
||||
if a.isNil:
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
import
|
||||
std/[sequtils, tables],
|
||||
eth/common,
|
||||
results,
|
||||
"."/[aristo_desc, aristo_get, aristo_vid],
|
||||
./aristo_desc/desc_backend,
|
||||
|
@ -89,16 +90,16 @@ proc merge*(
|
|||
## Merge the argument `filter` into the read-only filter layer. Note that
|
||||
## this function has no control of the filter source. Having merged the
|
||||
## argument `filter`, all the `top` and `stack` layers should be cleared.
|
||||
let ubeRootKey = block:
|
||||
let ubeRoot = block:
|
||||
let rc = db.getKeyUBE VertexID(1)
|
||||
if rc.isOk:
|
||||
rc.value
|
||||
rc.value.to(Hash256)
|
||||
elif rc.error == GetKeyNotFound:
|
||||
VOID_HASH_KEY
|
||||
EMPTY_ROOT_HASH
|
||||
else:
|
||||
return err((VertexID(1),rc.error))
|
||||
|
||||
db.roFilter = ? db.merge(filter, db.roFilter, ubeRootKey)
|
||||
db.roFilter = ? db.merge(filter, db.roFilter, ubeRoot)
|
||||
ok()
|
||||
|
||||
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
# except according to those terms.
|
||||
|
||||
import
|
||||
std/tables,
|
||||
std/[sets, tables],
|
||||
eth/common,
|
||||
results,
|
||||
".."/[aristo_desc, aristo_desc/desc_backend, aristo_get],
|
||||
./filter_scheduler
|
||||
|
@ -17,8 +18,8 @@ import
|
|||
type
|
||||
StateRootPair* = object
|
||||
## Helper structure for analysing state roots.
|
||||
be*: HashKey ## Backend state root
|
||||
fg*: HashKey ## Layer or filter implied state root
|
||||
be*: Hash256 ## Backend state root
|
||||
fg*: Hash256 ## Layer or filter implied state root
|
||||
|
||||
FilterIndexPair* = object
|
||||
## Helper structure for fetching filters from cascaded fifo
|
||||
|
@ -39,7 +40,7 @@ proc getLayerStateRoots*(
|
|||
##
|
||||
var spr: StateRootPair
|
||||
|
||||
spr.be = block:
|
||||
let sprBeKey = block:
|
||||
let rc = db.getKeyBE VertexID(1)
|
||||
if rc.isOk:
|
||||
rc.value
|
||||
|
@ -47,15 +48,20 @@ proc getLayerStateRoots*(
|
|||
VOID_HASH_KEY
|
||||
else:
|
||||
return err(rc.error)
|
||||
spr.be = sprBeKey.to(Hash256)
|
||||
|
||||
block:
|
||||
spr.fg = layer.kMap.getOrVoid(VertexID 1).key
|
||||
spr.fg = block:
|
||||
let lbl = layer.kMap.getOrVoid VertexID(1)
|
||||
if lbl.isValid:
|
||||
lbl.key.to(Hash256)
|
||||
else:
|
||||
EMPTY_ROOT_HASH
|
||||
if spr.fg.isValid:
|
||||
return ok(spr)
|
||||
|
||||
if chunkedMpt:
|
||||
let vid = layer.pAmk.getOrVoid HashLabel(root: VertexID(1), key: spr.be)
|
||||
if vid == VertexID(1):
|
||||
let vids = layer.pAmk.getOrVoid HashLabel(root: VertexID(1), key: sprBeKey)
|
||||
if VertexID(1) in vids:
|
||||
spr.fg = spr.be
|
||||
return ok(spr)
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
import
|
||||
std/tables,
|
||||
eth/common,
|
||||
results,
|
||||
".."/[aristo_desc, aristo_get]
|
||||
|
||||
|
@ -21,7 +22,7 @@ proc merge*(
|
|||
db: AristoDbRef;
|
||||
upper: FilterRef; # Src filter, `nil` is ok
|
||||
lower: FilterRef; # Trg filter, `nil` is ok
|
||||
beStateRoot: HashKey; # Merkle hash key
|
||||
beStateRoot: Hash256; # Merkle hash key
|
||||
): Result[FilterRef,(VertexID,AristoError)] =
|
||||
## Merge argument `upper` into the `lower` filter instance.
|
||||
##
|
||||
|
@ -88,7 +89,7 @@ proc merge*(
|
|||
elif newFilter.kMap.getOrVoid(vid).isValid:
|
||||
let rc = db.getKeyUBE vid
|
||||
if rc.isOk:
|
||||
newFilter.kMap[vid] = key # VOID_HASH_KEY
|
||||
newFilter.kMap[vid] = key
|
||||
elif rc.error == GetKeyNotFound:
|
||||
newFilter.kMap.del vid
|
||||
else:
|
||||
|
@ -113,9 +114,6 @@ proc merge*(
|
|||
## | (src1==trg0) --> newFilter --> trg2
|
||||
## (src1==trg0) --> lower --> trg1 |
|
||||
## |
|
||||
const
|
||||
noisy = false
|
||||
|
||||
if upper.isNil or lower.isNil:
|
||||
return err((VertexID(0),FilNilFilterRejected))
|
||||
|
||||
|
|
|
@ -164,9 +164,9 @@ proc getKeyRc*(db: AristoDbRef; vid: VertexID): Result[HashKey,AristoError] =
|
|||
if db.top.kMap.hasKey vid:
|
||||
# If the key is to be deleted on the backend, a `VOID_HASH_LABEL` entry
|
||||
# is kept on the local table in which case it is OK to return this value.
|
||||
let key = db.top.kMap.getOrVoid(vid).key
|
||||
if key.isValid:
|
||||
return ok(key)
|
||||
let lbl = db.top.kMap.getOrVoid vid
|
||||
if lbl.isValid:
|
||||
return ok lbl.key
|
||||
return err(GetKeyTempLocked)
|
||||
db.getKeyBE vid
|
||||
|
||||
|
@ -174,10 +174,8 @@ proc getKey*(db: AristoDbRef; vid: VertexID): HashKey =
|
|||
## Cascaded attempt to fetch a vertex from the top layer or the backend.
|
||||
## The function returns `nil` on error or failure.
|
||||
##
|
||||
let rc = db.getKeyRc vid
|
||||
if rc.isOk:
|
||||
return rc.value
|
||||
VOID_HASH_KEY
|
||||
db.getKeyRc(vid).valueOr:
|
||||
return VOID_HASH_KEY
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -47,7 +47,7 @@ import
|
|||
eth/common,
|
||||
results,
|
||||
stew/interval_set,
|
||||
"."/[aristo_desc, aristo_get, aristo_hike, aristo_transcode, aristo_utils,
|
||||
"."/[aristo_desc, aristo_get, aristo_hike, aristo_serialise, aristo_utils,
|
||||
aristo_vid]
|
||||
|
||||
type
|
||||
|
@ -144,6 +144,7 @@ proc updateHashKey(
|
|||
# Proceed `vidAttach()`, below
|
||||
|
||||
# Othwise there is no Merkle hash, so create one with the `expected` key
|
||||
# and write it to the top level `pAmk[]` and `kMap[]` tables.
|
||||
db.vidAttach(HashLabel(root: root, key: expected), vid)
|
||||
ok()
|
||||
|
||||
|
@ -166,11 +167,9 @@ proc leafToRootHasher(
|
|||
continue
|
||||
|
||||
# Check against existing key, or store new key
|
||||
let
|
||||
key = rc.value.to(HashKey)
|
||||
rx = db.updateHashKey(hike.root, wp.vid, key, bg)
|
||||
if rx.isErr:
|
||||
return err((wp.vid,rx.error))
|
||||
let key = rc.value.digestTo(HashKey)
|
||||
db.updateHashKey(hike.root, wp.vid, key, bg).isOkOr:
|
||||
return err((wp.vid,error))
|
||||
|
||||
ok -1 # all could be hashed
|
||||
|
||||
|
@ -197,7 +196,7 @@ proc deletedLeafHasher(
|
|||
let rc = wp.vtx.toNode(db, stopEarly=false)
|
||||
if rc.isOk:
|
||||
let
|
||||
expected = rc.value.to(HashKey)
|
||||
expected = rc.value.digestTo(HashKey)
|
||||
key = db.getKey wp.vid
|
||||
if key.isValid:
|
||||
if key != expected:
|
||||
|
@ -301,11 +300,9 @@ proc resolveStateRoots(
|
|||
let rc = fVal.vtx.toNode db
|
||||
if rc.isOk:
|
||||
# Update Merkle hash
|
||||
let
|
||||
key = rc.value.to(HashKey)
|
||||
rx = db.updateHashKey(fVal.w.root, fVid, key, fVal.w.onBe)
|
||||
if rx.isErr:
|
||||
return err((fVid, rx.error))
|
||||
let key = rc.value.digestTo(HashKey)
|
||||
db.updateHashKey(fVal.w.root, fVid, key, fVal.w.onBe).isOkOr:
|
||||
return err((fVid, error))
|
||||
changes = true
|
||||
else:
|
||||
# Cannot complete with this vertex, so dig deeper and do it later
|
||||
|
@ -440,11 +437,9 @@ proc hashify*(
|
|||
|
||||
else:
|
||||
# Update Merkle hash
|
||||
let
|
||||
key = rc.value.to(HashKey)
|
||||
rx = db.updateHashKey(val.root, vid, key, val.onBe)
|
||||
if rx.isErr:
|
||||
return err((vid,rx.error))
|
||||
let key = rc.value.digestTo(HashKey)
|
||||
db.updateHashKey(val.root, vid, key, val.onBe).isOkOr:
|
||||
return err((vid,error))
|
||||
|
||||
done.incl vid
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ import
|
|||
../aristo_constants,
|
||||
../aristo_desc,
|
||||
../aristo_desc/desc_backend,
|
||||
../aristo_transcode,
|
||||
../aristo_blobify,
|
||||
./init_common
|
||||
|
||||
type
|
||||
|
@ -93,7 +93,7 @@ proc getVtxFn(db: MemBackendRef): GetVtxFn =
|
|||
proc getKeyFn(db: MemBackendRef): GetKeyFn =
|
||||
result =
|
||||
proc(vid: VertexID): Result[HashKey,AristoError] =
|
||||
let key = db.kMap.getOrDefault(vid, VOID_HASH_KEY)
|
||||
let key = db.kMap.getOrVoid vid
|
||||
if key.isValid:
|
||||
return ok key
|
||||
err(GetKeyNotFound)
|
||||
|
@ -327,7 +327,7 @@ iterator walkKey*(
|
|||
): tuple[n: int, vid: VertexID, key: HashKey] =
|
||||
## Iteration over the Markle hash sub-table.
|
||||
for n,vid in be.kMap.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
||||
let key = be.kMap.getOrDefault(vid, VOID_HASH_KEY)
|
||||
let key = be.kMap.getOrVoid vid
|
||||
if key.isValid:
|
||||
yield (n, vid, key)
|
||||
|
||||
|
@ -371,7 +371,7 @@ iterator walk*(
|
|||
n.inc
|
||||
|
||||
for (_,vid,key) in be.walkKey:
|
||||
yield (n, KeyPfx, vid.uint64, key.to(Blob))
|
||||
yield (n, KeyPfx, vid.uint64, @key)
|
||||
n.inc
|
||||
|
||||
if not be.noFq:
|
||||
|
|
|
@ -34,7 +34,7 @@ import
|
|||
../aristo_constants,
|
||||
../aristo_desc,
|
||||
../aristo_desc/desc_backend,
|
||||
../aristo_transcode,
|
||||
../aristo_blobify,
|
||||
./init_common,
|
||||
./rocks_db/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk]
|
||||
|
||||
|
@ -124,9 +124,9 @@ proc getKeyFn(db: RdbBackendRef): GetKeyFn =
|
|||
|
||||
# Decode data record
|
||||
if 0 < rc.value.len:
|
||||
var key: HashKey
|
||||
if key.init rc.value:
|
||||
return ok key
|
||||
let lid = HashKey.fromBytes(rc.value).valueOr:
|
||||
return err(RdbHashKeyExpected)
|
||||
return ok lid
|
||||
|
||||
err(GetKeyNotFound)
|
||||
|
||||
|
@ -224,7 +224,7 @@ proc putKeyFn(db: RdbBackendRef): PutKeyFn =
|
|||
if hdl.error.isNil:
|
||||
for (vid,key) in vkps:
|
||||
if key.isValid:
|
||||
hdl.keyCache = (vid, key.to(Blob))
|
||||
hdl.keyCache = (vid, @key)
|
||||
else:
|
||||
hdl.keyCache = (vid, EmptyBlob)
|
||||
|
||||
|
@ -402,9 +402,9 @@ iterator walkKey*(
|
|||
): tuple[n: int, vid: VertexID, key: HashKey] =
|
||||
## Variant of `walk()` iteration over the Markle hash sub-table.
|
||||
for (n, xid, data) in be.rdb.walk KeyPfx:
|
||||
var hashKey: HashKey
|
||||
if hashKey.init data:
|
||||
yield (n, VertexID(xid), hashKey)
|
||||
let lid = HashKey.fromBytes(data).valueOr:
|
||||
continue
|
||||
yield (n, VertexID(xid), lid)
|
||||
|
||||
iterator walkFil*(
|
||||
be: RdbBackendRef;
|
||||
|
|
|
@ -29,8 +29,9 @@ import
|
|||
chronicles,
|
||||
eth/[common, trie/nibbles],
|
||||
results,
|
||||
stew/keyed_queue,
|
||||
../../sync/protocol/snap/snap_types,
|
||||
"."/[aristo_desc, aristo_get, aristo_hike, aristo_path, aristo_transcode,
|
||||
"."/[aristo_desc, aristo_get, aristo_hike, aristo_path, aristo_serialise,
|
||||
aristo_vid]
|
||||
|
||||
logScope:
|
||||
|
@ -484,11 +485,11 @@ proc updatePayload(
|
|||
|
||||
proc mergeNodeImpl(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
hashKey: HashKey; # Merkel hash of node
|
||||
hashKey: HashKey; # Merkel hash of node (or so)
|
||||
node: NodeRef; # Node derived from RLP representation
|
||||
rootVid: VertexID; # Current sub-trie
|
||||
): Result[VertexID,AristoError] =
|
||||
## The function merges the argument hash key `hashKey` as expanded from the
|
||||
): Result[void,AristoError] =
|
||||
## The function merges the argument hash key `lid` as expanded from the
|
||||
## node RLP representation into the `Aristo Trie` database. The vertex is
|
||||
## split off from the node and stored separately. So are the Merkle hashes.
|
||||
## The vertex is labelled `locked`.
|
||||
|
@ -497,8 +498,19 @@ proc mergeNodeImpl(
|
|||
## allocated, already. If the node comes straight from the `decode()` RLP
|
||||
## decoder as expected, these vertex IDs will be all zero.
|
||||
##
|
||||
if node.error != AristoError(0):
|
||||
return err(node.error)
|
||||
## This function expects that the parent for the argument node has already
|
||||
## been installed, i.e. the top layer cache mapping
|
||||
##
|
||||
## pAmk: {HashKey} -> {{VertexID}}
|
||||
##
|
||||
## has a result for the argument `node`. Also, the invers top layer cache
|
||||
## mapping
|
||||
##
|
||||
## sTab: {VertexID} -> {VertexRef}
|
||||
##
|
||||
## has no result for all images of the argument `node` under `pAmk`:
|
||||
##
|
||||
doAssert node.error == AristoError(0)
|
||||
if not rootVid.isValid:
|
||||
return err(MergeRootKeyInvalid)
|
||||
|
||||
|
@ -511,13 +523,21 @@ proc mergeNodeImpl(
|
|||
# order `root->.. ->leaf`.
|
||||
let
|
||||
hashLbl = HashLabel(root: rootVid, key: hashKey)
|
||||
vid = db.top.pAmk.getOrVoid hashLbl
|
||||
if not vid.isValid:
|
||||
vids = db.top.pAmk.getOrVoid(hashLbl).toSeq
|
||||
isRoot = rootVid in vids
|
||||
if vids.len == 0:
|
||||
return err(MergeRevVidMustHaveBeenCached)
|
||||
if isRoot and 1 < vids.len:
|
||||
# There can only be one root.
|
||||
return err(MergeHashKeyRevLookUpGarbled)
|
||||
|
||||
let lbl = db.top.kMap.getOrVoid vid
|
||||
# Use the first vertex ID from the `vis` list as representant for all others
|
||||
let lbl = db.top.kMap.getOrVoid vids[0]
|
||||
if lbl == hashLbl:
|
||||
if db.top.sTab.hasKey vid:
|
||||
if db.top.sTab.hasKey vids[0]:
|
||||
for n in 1 ..< vids.len:
|
||||
if not db.top.sTab.hasKey vids[n]:
|
||||
return err(MergeHashKeyRevLookUpGarbled)
|
||||
# This is tyically considered OK
|
||||
return err(MergeHashKeyCachedAlready)
|
||||
# Otherwise proceed
|
||||
|
@ -525,13 +545,27 @@ proc mergeNodeImpl(
|
|||
# Different key assigned => error
|
||||
return err(MergeHashKeyDiffersFromCached)
|
||||
|
||||
let (vtx, hasVtx) = block:
|
||||
let vty = db.getVtx vid
|
||||
# While the vertex referred to by `vids[0]` does not exists in the top layer
|
||||
# cache it may well be in some lower layers or the backend. This typically
|
||||
# happens for the root node.
|
||||
var (vtx, hasVtx) = block:
|
||||
let vty = db.getVtx vids[0]
|
||||
if vty.isValid:
|
||||
(vty, true)
|
||||
else:
|
||||
(node.to(VertexRef), false)
|
||||
|
||||
# Verify that all `vids` entries are similar
|
||||
for n in 1 ..< vids.len:
|
||||
let w = vids[n]
|
||||
if lbl != db.top.kMap.getOrVoid(w) or db.top.sTab.hasKey(w):
|
||||
return err(MergeHashKeyRevLookUpGarbled)
|
||||
if not hasVtx:
|
||||
# Prefer existing node which has all links available, already.
|
||||
let u = db.getVtx w
|
||||
if u.isValid:
|
||||
(vtx, hasVtx) = (u, true)
|
||||
|
||||
# The `vertexID <-> hashLabel` mappings need to be set up now (if any)
|
||||
case node.vType:
|
||||
of Leaf:
|
||||
|
@ -539,37 +573,30 @@ proc mergeNodeImpl(
|
|||
of Extension:
|
||||
if node.key[0].isValid:
|
||||
let eLbl = HashLabel(root: rootVid, key: node.key[0])
|
||||
if hasVtx:
|
||||
if not vtx.eVid.isValid:
|
||||
return err(MergeNodeVtxDiffersFromExisting)
|
||||
db.top.pAmk[eLbl] = vtx.eVid
|
||||
else:
|
||||
let eVid = db.top.pAmk.getOrVoid eLbl
|
||||
if eVid.isValid:
|
||||
vtx.eVid = eVid
|
||||
else:
|
||||
if not hasVtx:
|
||||
# Brand new reverse lookup link for this vertex
|
||||
vtx.eVid = db.vidAttach eLbl
|
||||
elif not vtx.eVid.isValid:
|
||||
return err(MergeNodeVtxDiffersFromExisting)
|
||||
db.top.pAmk.append(eLbl, vtx.eVid)
|
||||
of Branch:
|
||||
for n in 0..15:
|
||||
if node.key[n].isValid:
|
||||
let bLbl = HashLabel(root: rootVid, key: node.key[n])
|
||||
if hasVtx:
|
||||
if not vtx.bVid[n].isValid:
|
||||
return err(MergeNodeVtxDiffersFromExisting)
|
||||
db.top.pAmk[bLbl] = vtx.bVid[n]
|
||||
else:
|
||||
let bVid = db.top.pAmk.getOrVoid bLbl
|
||||
if bVid.isValid:
|
||||
vtx.bVid[n] = bVid
|
||||
else:
|
||||
if not hasVtx:
|
||||
# Brand new reverse lookup link for this vertex
|
||||
vtx.bVid[n] = db.vidAttach bLbl
|
||||
elif not vtx.bVid[n].isValid:
|
||||
return err(MergeNodeVtxDiffersFromExisting)
|
||||
db.top.pAmk.append(bLbl, vtx.bVid[n])
|
||||
|
||||
db.top.pPrf.incl vid
|
||||
if not hasVtx or db.getKey(vid) != hashKey:
|
||||
db.top.sTab[vid] = vtx
|
||||
for w in vids:
|
||||
db.top.pPrf.incl w
|
||||
if not hasVtx or db.getKey(w) != hashKey:
|
||||
db.top.sTab[w] = vtx.dup
|
||||
db.top.dirty = true # Modified top level cache
|
||||
|
||||
ok vid
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
|
@ -645,25 +672,22 @@ proc merge*(
|
|||
): Result[bool,AristoError] =
|
||||
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`
|
||||
## object.
|
||||
let lty = LeafTie(root: root, path: ? path.initNibbleRange.pathToTag)
|
||||
let lty = LeafTie(root: root, path: ? path.pathToTag)
|
||||
db.merge(lty, payload).to(typeof result)
|
||||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
root: VertexID; # MPT state root
|
||||
path: openArray[byte]; # Leaf item to add to the database
|
||||
data: openArray[byte]; # Payload value
|
||||
data: openArray[byte]; # Raw data payload value
|
||||
): Result[bool,AristoError] =
|
||||
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`
|
||||
## object. The payload argument `data` will be stored as `RlpData` if
|
||||
## the `root` argument is `VertexID(1)`, and as `RawData` otherwise.
|
||||
let pyl = if root == VertexID(1): PayloadRef(pType: RlpData, rlpBlob: @data)
|
||||
else: PayloadRef(pType: RawData, rawBlob: @data)
|
||||
db.merge(root, path, pyl)
|
||||
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`.
|
||||
## The argument `data` is stored as-is as a a `RawData` payload value.
|
||||
db.merge(root, path, PayloadRef(pType: RawData, rawBlob: @data))
|
||||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
leaf: LeafTiePayload # Leaf item to add to the database
|
||||
leaf: LeafTiePayload; # Leaf item to add to the database
|
||||
): Result[bool,AristoError] =
|
||||
## Variant of `merge()`. This function will not indicate if the leaf
|
||||
## was cached, already.
|
||||
|
@ -691,7 +715,7 @@ proc merge*(
|
|||
path: PathID; # Path into database
|
||||
rlpData: openArray[byte]; # RLP encoded payload data
|
||||
): Result[bool,AristoError] =
|
||||
## Variant of `merge()` for storing a single item with implicte state root
|
||||
## Variant of `merge()` for storing a single item with implicit state root
|
||||
## argument `VertexID(1)`.
|
||||
##
|
||||
db.merge(
|
||||
|
@ -714,6 +738,19 @@ proc merge*(
|
|||
## into the `Aristo Trie` database. This function is intended to be used with
|
||||
## the proof nodes as returened by `snap/1` messages.
|
||||
##
|
||||
proc update(
|
||||
seen: var Table[HashKey,NodeRef];
|
||||
todo: var KeyedQueueNV[NodeRef];
|
||||
key: HashKey;
|
||||
) {.gcsafe, raises: [RlpError].} =
|
||||
## Check for embedded nodes, i.e. fully encoded node instead of a hash
|
||||
if key.isValid and key.len < 32:
|
||||
let lid = @key.digestTo(HashKey)
|
||||
if not seen.hasKey lid:
|
||||
let node = @key.decode(NodeRef)
|
||||
discard todo.append node
|
||||
seen[lid] = node
|
||||
|
||||
if not rootVid.isValid:
|
||||
return (0,0,MergeRootVidInvalid)
|
||||
let rootKey = db.getKey rootVid
|
||||
|
@ -725,9 +762,25 @@ proc merge*(
|
|||
for w in proof:
|
||||
let
|
||||
key = w.Blob.digestTo(HashKey)
|
||||
node = w.Blob.decode(NodeRef)
|
||||
node = rlp.decode(w.Blob,NodeRef)
|
||||
if node.error != AristoError(0):
|
||||
return (0,0,node.error)
|
||||
nodeTab[key] = node
|
||||
|
||||
# Check for embedded nodes, i.e. fully encoded node instead of a hash
|
||||
var embNodes: KeyedQueueNV[NodeRef]
|
||||
discard embNodes.append node
|
||||
while true:
|
||||
let node = embNodes.shift.valueOr: break
|
||||
case node.vType:
|
||||
of Leaf:
|
||||
discard
|
||||
of Branch:
|
||||
for n in 0 .. 15:
|
||||
nodeTab.update(embNodes, node.key[n])
|
||||
of Extension:
|
||||
nodeTab.update(embNodes, node.key[0])
|
||||
|
||||
# Create a table with back links
|
||||
var
|
||||
backLink: Table[HashKey,HashKey]
|
||||
|
@ -761,7 +814,7 @@ proc merge*(
|
|||
nodeKey = w
|
||||
while nodeKey.isValid and nodeTab.hasKey nodeKey:
|
||||
chain.add nodeKey
|
||||
nodeKey = backLink.getOrDefault(nodeKey, VOID_HASH_KEY)
|
||||
nodeKey = backLink.getOrVoid nodeKey
|
||||
if 0 < chain.len and chain[^1] == rootKey:
|
||||
chains.add chain
|
||||
|
||||
|
@ -769,9 +822,9 @@ proc merge*(
|
|||
block:
|
||||
let
|
||||
lbl = HashLabel(root: rootVid, key: rootKey)
|
||||
vid = db.top.pAmk.getOrVoid lbl
|
||||
if not vid.isvalid:
|
||||
db.top.pAmk[lbl] = rootVid
|
||||
vids = db.top.pAmk.getOrVoid lbl
|
||||
if not vids.isValid:
|
||||
db.top.pAmk.append(lbl, rootVid)
|
||||
db.top.dirty = true # Modified top level cache
|
||||
|
||||
# Process over chains in reverse mode starting with the root node. This
|
||||
|
@ -782,13 +835,9 @@ proc merge*(
|
|||
# Process the root ID which is common to all chains
|
||||
for chain in chains:
|
||||
for key in chain.reversed:
|
||||
if key in seen:
|
||||
discard
|
||||
else:
|
||||
if key notin seen:
|
||||
seen.incl key
|
||||
let
|
||||
node = nodeTab.getOrDefault(key, NodeRef(nil))
|
||||
rc = db.mergeNodeImpl(key, node, rootVid)
|
||||
let rc = db.mergeNodeImpl(key, nodeTab.getOrVoid key, rootVid)
|
||||
if rc.isOK:
|
||||
merged.inc
|
||||
elif rc.error == MergeHashKeyCachedAlready:
|
||||
|
@ -800,7 +849,7 @@ proc merge*(
|
|||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
rootKey: HashKey; # Merkle hash for root
|
||||
rootKey: Hash256; # Merkle hash for root
|
||||
rootVid = VertexID(0) # Optionally, force root vertex ID
|
||||
): Result[VertexID,AristoError] =
|
||||
## Set up a `rootKey` associated with a vertex ID.
|
||||
|
@ -820,28 +869,30 @@ proc merge*(
|
|||
if not rootKey.isValid:
|
||||
return err(MergeRootKeyInvalid)
|
||||
|
||||
let rootLink = rootKey.to(HashKey)
|
||||
|
||||
if rootVid.isValid and rootVid != VertexID(1):
|
||||
let key = db.getKey rootVid
|
||||
if key == rootKey:
|
||||
if key.to(Hash256) == rootKey:
|
||||
return ok rootVid
|
||||
|
||||
if not key.isValid:
|
||||
db.vidAttach(HashLabel(root: rootVid, key: rootKey), rootVid)
|
||||
db.vidAttach(HashLabel(root: rootVid, key: rootLink), rootVid)
|
||||
return ok rootVid
|
||||
else:
|
||||
let key = db.getKey VertexID(1)
|
||||
if key == rootKey:
|
||||
if key.to(Hash256) == rootKey:
|
||||
return ok VertexID(1)
|
||||
|
||||
# Otherwise assign unless valid
|
||||
if not key.isValid:
|
||||
db.vidAttach(HashLabel(root: VertexID(1), key: rootKey), VertexID(1))
|
||||
db.vidAttach(HashLabel(root: VertexID(1), key: rootLink), VertexID(1))
|
||||
return ok VertexID(1)
|
||||
|
||||
# Create and assign a new root key
|
||||
if not rootVid.isValid:
|
||||
let vid = db.vidFetch
|
||||
db.vidAttach(HashLabel(root: vid, key: rootKey), vid)
|
||||
db.vidAttach(HashLabel(root: vid, key: rootLink), vid)
|
||||
return ok vid
|
||||
|
||||
err(MergeRootKeyDiffersForVid)
|
||||
|
|
|
@ -375,9 +375,9 @@ proc nearbyNextLeafTie(
|
|||
if 0 < hike.legs.len:
|
||||
if hike.legs[^1].wp.vtx.vType != Leaf:
|
||||
return err((hike.legs[^1].wp.vid,NearbyLeafExpected))
|
||||
let rc = hike.legsTo(NibblesSeq).pathToKey
|
||||
let rc = hike.legsTo(NibblesSeq).pathToTag
|
||||
if rc.isOk:
|
||||
return ok rc.value.to(PathID)
|
||||
return ok rc.value
|
||||
return err((VertexID(0),rc.error))
|
||||
|
||||
err((VertexID(0),NearbyLeafExpected))
|
||||
|
|
|
@ -36,34 +36,51 @@ func pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq
|
|||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func pathAsBlob*(keyOrTag: HashKey|PathID): Blob =
|
||||
keyOrTag.to(NibblesSeq).hexPrefixEncode(isLeaf=true)
|
||||
func pathAsBlob*(tag: PathID): Blob =
|
||||
## Convert the `tag` argument to a sequence of an even number of nibbles
|
||||
## represented by a `Blob`. If the argument `tag` represents an odd number
|
||||
## of nibbles, a zero nibble is appendend.
|
||||
##
|
||||
## This function is useful only if there is a tacit agreement that all paths
|
||||
## used to index database leaf values can be represented as `Blob`, i.e.
|
||||
## `PathID` type paths with an even number of nibbles.
|
||||
if 0 < tag.length:
|
||||
let key = @(tag.pfx.UInt256.toBytesBE)
|
||||
if 64 <= tag.length:
|
||||
return key
|
||||
else:
|
||||
return key[0 .. (tag.length + 1) div 2]
|
||||
|
||||
func pathToKey*(partPath: NibblesSeq): Result[HashKey,AristoError] =
|
||||
var key: ByteArray32
|
||||
if partPath.len == 64:
|
||||
# Trailing dummy nibbles (aka no nibbles) force a nibble seq reorg
|
||||
let path = (partPath & EmptyNibbleSeq).getBytes()
|
||||
(addr key[0]).copyMem(unsafeAddr path[0], 32)
|
||||
return ok(key.HashKey)
|
||||
err(PathExpected64Nibbles)
|
||||
|
||||
func pathToKey*(
|
||||
partPath: openArray[byte];
|
||||
): Result[HashKey,AristoError] =
|
||||
let (isLeaf,pathSegment) = partPath.hexPrefixDecode
|
||||
if isleaf:
|
||||
return pathSegment.pathToKey()
|
||||
err(PathExpectedLeaf)
|
||||
func pathAsHEP*(tag: PathID; isLeaf = false): Blob =
|
||||
## Convert the `tag` argument to a hex encoded partial path as used in `eth`
|
||||
## or `snap` protocol where full paths of nibble length 64 are encoded as 32
|
||||
## byte `Blob` and non-leaf partial paths are *compact encoded* (i.e. per
|
||||
## the Ethereum wire protocol.)
|
||||
if 64 <= tag.length:
|
||||
@(tag.pfx.UInt256.toBytesBE)
|
||||
else:
|
||||
tag.to(NibblesSeq).hexPrefixEncode(isLeaf=true)
|
||||
|
||||
func pathToTag*(partPath: NibblesSeq): Result[PathID,AristoError] =
|
||||
## Nickname `tag` for `PathID`
|
||||
## Convert the argument `partPath` to a `PathID` type value.
|
||||
if partPath.len == 0:
|
||||
return ok PathID()
|
||||
if partPath.len <= 64:
|
||||
return ok PathID(
|
||||
pfx: UInt256.fromBytesBE partPath.pathPfxPad(0).getBytes(),
|
||||
length: partPath.len.uint8)
|
||||
err(PathAtMost64Nibbles)
|
||||
|
||||
func pathToTag*(partPath: openArray[byte]): Result[PathID,AristoError] =
|
||||
## Variant of `pathToTag()`
|
||||
if partPath.len == 0:
|
||||
return ok PathID()
|
||||
if partPath.len <= 32:
|
||||
return ok PathID(
|
||||
pfx: UInt256.fromBytesBE @partPath & 0u8.repeat(32-partPath.len),
|
||||
length: 2 * partPath.len.uint8)
|
||||
err(PathAtMost64Nibbles)
|
||||
|
||||
# --------------------
|
||||
|
||||
func pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq =
|
||||
|
@ -85,14 +102,6 @@ func pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq =
|
|||
let nope = seq[byte].default.initNibbleRange
|
||||
result = pfx.slice(0,64) & nope # nope forces re-alignment
|
||||
|
||||
func pathPfxPadKey*(pfx: NibblesSeq; dblNibble: static[byte]): HashKey =
|
||||
## Variant of `pathPfxPad()`.
|
||||
##
|
||||
## Extend (or cut) the argument nibbles sequence `pfx` for generating a
|
||||
## `HashKey`.
|
||||
let bytes = pfx.pathPfxPad(dblNibble).getBytes
|
||||
(addr result.ByteArray32[0]).copyMem(unsafeAddr bytes[0], bytes.len)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -0,0 +1,183 @@
|
|||
# nimbus-eth1
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/sequtils,
|
||||
eth/[common, rlp, trie/nibbles],
|
||||
results,
|
||||
"."/[aristo_constants, aristo_desc, aristo_get]
|
||||
|
||||
# Annotation helper
|
||||
{.pragma: noRaise, gcsafe, raises: [].}
|
||||
|
||||
type
|
||||
ResolveVidFn = proc(vid: VertexID): Result[HashKey,AristoError] {.noRaise.}
|
||||
## Resolve storage root vertex ID
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helper
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc aristoError(error: AristoError): NodeRef =
|
||||
## Allows returning de
|
||||
NodeRef(vType: Leaf, error: error)
|
||||
|
||||
proc serialise(
|
||||
pyl: PayloadRef;
|
||||
getKey: ResolveVidFn;
|
||||
): Result[Blob,(VertexID,AristoError)] =
|
||||
## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of
|
||||
## account type, otherwise pass the data as is.
|
||||
##
|
||||
case pyl.pType:
|
||||
of RawData:
|
||||
ok pyl.rawBlob
|
||||
of RlpData:
|
||||
ok pyl.rlpBlob
|
||||
of AccountData:
|
||||
let
|
||||
vid = pyl.account.storageID
|
||||
key = block:
|
||||
if vid.isValid:
|
||||
vid.getKey.valueOr:
|
||||
let w = (vid,error)
|
||||
return err(w)
|
||||
else:
|
||||
VOID_HASH_KEY
|
||||
|
||||
ok rlp.encode Account(
|
||||
nonce: pyl.account.nonce,
|
||||
balance: pyl.account.balance,
|
||||
storageRoot: key.to(Hash256),
|
||||
codeHash: pyl.account.codeHash)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public RLP transcoder mixins
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc read*(rlp: var Rlp; T: type NodeRef): T {.gcsafe, raises: [RlpError].} =
|
||||
## Mixin for RLP writer, see `fromRlpRecord()` for an encoder with detailed
|
||||
## error return code (if needed.) This reader is a jazzed up version which
|
||||
## reports some particular errors in the `Dummy` type node.
|
||||
if not rlp.isList:
|
||||
# Otherwise `rlp.items` would raise a `Defect`
|
||||
return aristoError(Rlp2Or17ListEntries)
|
||||
|
||||
var
|
||||
blobs = newSeq[Blob](2) # temporary, cache
|
||||
links: array[16,HashKey] # reconstruct branch node
|
||||
top = 0 # count entries and positions
|
||||
|
||||
# Collect lists of either 2 or 17 blob entries.
|
||||
for w in rlp.items:
|
||||
case top
|
||||
of 0, 1:
|
||||
if not w.isBlob:
|
||||
return aristoError(RlpBlobExpected)
|
||||
blobs[top] = rlp.read(Blob)
|
||||
of 2 .. 15:
|
||||
let blob = rlp.read(Blob)
|
||||
links[top] = HashKey.fromBytes(blob).valueOr:
|
||||
return aristoError(RlpBranchHashKeyExpected)
|
||||
of 16:
|
||||
if not w.isBlob or 0 < rlp.read(Blob).len:
|
||||
return aristoError(RlpEmptyBlobExpected)
|
||||
else:
|
||||
return aristoError(Rlp2Or17ListEntries)
|
||||
top.inc
|
||||
|
||||
# Verify extension data
|
||||
case top
|
||||
of 2:
|
||||
if blobs[0].len == 0:
|
||||
return aristoError(RlpNonEmptyBlobExpected)
|
||||
let (isLeaf, pathSegment) = hexPrefixDecode blobs[0]
|
||||
if isLeaf:
|
||||
return NodeRef(
|
||||
vType: Leaf,
|
||||
lPfx: pathSegment,
|
||||
lData: PayloadRef(
|
||||
pType: RawData,
|
||||
rawBlob: blobs[1]))
|
||||
else:
|
||||
var node = NodeRef(
|
||||
vType: Extension,
|
||||
ePfx: pathSegment)
|
||||
node.key[0] = HashKey.fromBytes(blobs[1]).valueOr:
|
||||
return aristoError(RlpExtHashKeyExpected)
|
||||
return node
|
||||
of 17:
|
||||
for n in [0,1]:
|
||||
links[n] = HashKey.fromBytes(blobs[n]).valueOr:
|
||||
return aristoError(RlpBranchHashKeyExpected)
|
||||
return NodeRef(
|
||||
vType: Branch,
|
||||
key: links)
|
||||
else:
|
||||
discard
|
||||
|
||||
aristoError(Rlp2Or17ListEntries)
|
||||
|
||||
|
||||
proc append*(writer: var RlpWriter; node: NodeRef) =
|
||||
## Mixin for RLP writer. Note that a `Dummy` node is encoded as an empty
|
||||
## list.
|
||||
func addHashKey(w: var RlpWriter; key: HashKey) =
|
||||
if 1 < key.len and key.len < 32:
|
||||
w.appendRawBytes @key
|
||||
else:
|
||||
w.append @key
|
||||
|
||||
if node.error != AristoError(0):
|
||||
writer.startList(0)
|
||||
else:
|
||||
case node.vType:
|
||||
of Branch:
|
||||
writer.startList(17)
|
||||
for n in 0..15:
|
||||
writer.addHashKey node.key[n]
|
||||
writer.append EmptyBlob
|
||||
|
||||
of Extension:
|
||||
writer.startList(2)
|
||||
writer.append node.ePfx.hexPrefixEncode(isleaf = false)
|
||||
writer.addHashKey node.key[0]
|
||||
|
||||
of Leaf:
|
||||
proc getKey0(vid: VertexID): Result[HashKey,AristoError] {.noRaise.} =
|
||||
ok(node.key[0]) # always succeeds
|
||||
|
||||
writer.startList(2)
|
||||
writer.append node.lPfx.hexPrefixEncode(isleaf = true)
|
||||
writer.append node.lData.serialise(getKey0).value
|
||||
|
||||
# ---------------------
|
||||
|
||||
proc digestTo*(node: NodeRef; T: type HashKey): T =
|
||||
## Convert the argument `node` to the corresponding Merkle hash key
|
||||
rlp.encode(node).digestTo(HashKey)
|
||||
|
||||
proc serialise*(
|
||||
db: AristoDbRef;
|
||||
pyl: PayloadRef;
|
||||
): Result[Blob,(VertexID,AristoError)] =
|
||||
## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of
|
||||
## account type, otherwise pass the data as is.
|
||||
##
|
||||
proc getKey(vid: VertexID): Result[HashKey,AristoError] =
|
||||
db.getKeyRc(vid)
|
||||
|
||||
pyl.serialise getKey
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
|
@ -0,0 +1,67 @@
|
|||
# nimbus-eth1
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Aristo DB -- Sign Helper
|
||||
## ========================
|
||||
##
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
eth/common,
|
||||
results,
|
||||
"."/[aristo_constants, aristo_desc, aristo_get, aristo_hashify, aristo_init,
|
||||
aristo_merge, aristo_vid]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, signature generator
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc merkleSignBegin*(): MerkleSignRef =
|
||||
## Start signature calculator for a list of key-value items.
|
||||
let
|
||||
db = AristoDbRef.init VoidBackendRef
|
||||
vid = db.vidFetch # => 2
|
||||
MerkleSignRef(
|
||||
root: vid,
|
||||
db: db)
|
||||
|
||||
proc merkleSignAdd*(
|
||||
sdb: MerkleSignRef;
|
||||
key: openArray[byte];
|
||||
val: openArray[byte];
|
||||
) =
|
||||
## Add key-value item to the signature list. The order of the items to add
|
||||
## is irrelevant.
|
||||
if sdb.error == AristoError(0):
|
||||
sdb.count.inc
|
||||
discard sdb.db.merge(sdb.root, key, val).valueOr:
|
||||
sdb.`error` = error
|
||||
sdb.errKey = @key
|
||||
return
|
||||
|
||||
proc merkleSignCommit*(
|
||||
sdb: MerkleSignRef;
|
||||
): Result[HashKey,(Blob,AristoError)] =
|
||||
## Finish with the list, calculate signature and return it.
|
||||
if sdb.count == 0:
|
||||
return ok VOID_HASH_KEY
|
||||
if sdb.error != AristoError(0):
|
||||
return err((sdb.errKey, sdb.error))
|
||||
discard sdb.db.hashify().valueOr:
|
||||
let w = (EmptyBlob, error[1])
|
||||
return err(w)
|
||||
let hash = sdb.db.getKeyRc(sdb.root).valueOr:
|
||||
let w = (EmptyBlob, error)
|
||||
return err(w)
|
||||
ok hash
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
|
@ -92,10 +92,15 @@ proc toNode*(
|
|||
vtx: VertexRef; # Vertex to convert
|
||||
db: AristoDbRef; # Database, top layer
|
||||
stopEarly = true; # Full list of missing links if `false`
|
||||
beKeyOk = false; # Allow fetching DB backend keys
|
||||
): Result[NodeRef,seq[VertexID]] =
|
||||
## Convert argument the vertex `vtx` to a node type. Missing Merkle hash
|
||||
## keys are searched for on the argument database `db`.
|
||||
##
|
||||
## If backend keys are allowed by passing `beKeyOk` as `true`, there is no
|
||||
## compact embedding of a small node into another rather than its hash
|
||||
## reference. In that case, the hash reference will always be used.
|
||||
##
|
||||
## On error, at least the vertex ID of the first missing Merkle hash key is
|
||||
## returned. If the argument `stopEarly` is set `false`, all missing Merkle
|
||||
## hash keys are returned.
|
||||
|
@ -108,10 +113,13 @@ proc toNode*(
|
|||
let vid = vtx.lData.account.storageID
|
||||
if vid.isValid:
|
||||
let key = db.getKey vid
|
||||
if not key.isValid:
|
||||
if key.isValid:
|
||||
node.key[0] = key
|
||||
else:
|
||||
return err(@[vid])
|
||||
node.key[0] = key
|
||||
return ok node
|
||||
|
||||
of Branch:
|
||||
let node = NodeRef(vType: Branch, bVid: vtx.bVid)
|
||||
var missing: seq[VertexID]
|
||||
|
@ -121,24 +129,23 @@ proc toNode*(
|
|||
let key = db.getKey vid
|
||||
if key.isValid:
|
||||
node.key[n] = key
|
||||
elif stopEarly:
|
||||
return err(@[vid])
|
||||
else:
|
||||
missing.add vid
|
||||
if stopEarly:
|
||||
break
|
||||
else:
|
||||
node.key[n] = VOID_HASH_KEY
|
||||
if 0 < missing.len:
|
||||
return err(missing)
|
||||
return ok node
|
||||
|
||||
of Extension:
|
||||
let
|
||||
vid = vtx.eVid
|
||||
key = db.getKey vid
|
||||
if key.isValid:
|
||||
if not key.isValid:
|
||||
return err(@[vid])
|
||||
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vid)
|
||||
node.key[0] = key
|
||||
return ok node
|
||||
return err(@[vid])
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -97,7 +97,7 @@ proc vidReorg*(vGen: seq[VertexID]): seq[VertexID] =
|
|||
|
||||
proc vidAttach*(db: AristoDbRef; lbl: HashLabel; vid: VertexID) =
|
||||
## Attach (i.r. register) a Merkle hash key to a vertex ID.
|
||||
db.top.pAmk[lbl] = vid
|
||||
db.top.pAmk.append(lbl, vid)
|
||||
db.top.kMap[vid] = lbl
|
||||
db.top.dirty = true # Modified top level cache
|
||||
|
||||
|
|
|
@ -77,11 +77,10 @@ template mapRlpException(db: LegacyDbRef; info: static[string]; code: untyped) =
|
|||
try:
|
||||
code
|
||||
except RlpError as e:
|
||||
return err(db.bless LegacyCoreDbError(
|
||||
error: RlpException,
|
||||
return err(db.bless(RlpException, LegacyCoreDbError(
|
||||
ctx: info,
|
||||
name: $e.name,
|
||||
msg: e.msg))
|
||||
msg: e.msg)))
|
||||
|
||||
template reraiseRlpException(info: static[string]; code: untyped) =
|
||||
try:
|
||||
|
@ -183,7 +182,10 @@ proc kvtMethods(db: LegacyDbRef): CoreDbKvtFns =
|
|||
db.bless(LegacyCoreDbKvtBE(tdb: tdb)),
|
||||
|
||||
getFn: proc(k: openArray[byte]): CoreDbRc[Blob] =
|
||||
ok(tdb.get(k)),
|
||||
let data = tdb.get(k)
|
||||
if 0 < data.len:
|
||||
return ok(data)
|
||||
err(db.bless(KvtNotFound, LegacyCoreDbError(ctx: "getFn()"))),
|
||||
|
||||
delFn: proc(k: openArray[byte]): CoreDbRc[void] =
|
||||
tdb.del(k)
|
||||
|
@ -193,7 +195,7 @@ proc kvtMethods(db: LegacyDbRef): CoreDbKvtFns =
|
|||
tdb.put(k,v)
|
||||
ok(),
|
||||
|
||||
containsFn: proc(k: openArray[byte]): CoreDbRc[bool] =
|
||||
hasKeyFn: proc(k: openArray[byte]): CoreDbRc[bool] =
|
||||
ok(tdb.contains(k)),
|
||||
|
||||
pairsIt: iterator(): (Blob, Blob) =
|
||||
|
@ -207,21 +209,24 @@ proc mptMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbMptFns =
|
|||
db.bless(LegacyCoreDbMptBE(mpt: mpt.trie)),
|
||||
|
||||
fetchFn: proc(k: openArray[byte]): CoreDbRc[Blob] =
|
||||
db.mapRlpException("legacy/mpt/get()"):
|
||||
return ok(mpt.trie.get(k)),
|
||||
db.mapRlpException("fetchFn()"):
|
||||
let data = mpt.trie.get(k)
|
||||
if 0 < data.len:
|
||||
return ok(data)
|
||||
err(db.bless(MptNotFound, LegacyCoreDbError(ctx: "fetchFn()"))),
|
||||
|
||||
deleteFn: proc(k: openArray[byte]): CoreDbRc[void] =
|
||||
db.mapRlpException("legacy/mpt/del()"):
|
||||
db.mapRlpException("deleteFn()"):
|
||||
mpt.trie.del(k)
|
||||
ok(),
|
||||
|
||||
mergeFn: proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] =
|
||||
db.mapRlpException("legacy/mpt/put()"):
|
||||
db.mapRlpException("mergeFn()"):
|
||||
mpt.trie.put(k,v)
|
||||
ok(),
|
||||
|
||||
containsFn: proc(k: openArray[byte]): CoreDbRc[bool] =
|
||||
db.mapRlpException("legacy/mpt/put()"):
|
||||
hasPathFn: proc(k: openArray[byte]): CoreDbRc[bool] =
|
||||
db.mapRlpException("hasPathFn()"):
|
||||
return ok(mpt.trie.contains(k)),
|
||||
|
||||
rootVidFn: proc(): CoreDbVidRef =
|
||||
|
@ -231,15 +236,14 @@ proc mptMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbMptFns =
|
|||
mpt.trie.isPruning,
|
||||
|
||||
pairsIt: iterator: (Blob,Blob) {.gcsafe, raises: [LegacyApiRlpError].} =
|
||||
reraiseRlpException("legacy/mpt/pairs()"):
|
||||
reraiseRlpException("pairsIt()"):
|
||||
for k,v in mpt.trie.pairs():
|
||||
yield (k,v),
|
||||
|
||||
replicateIt: iterator: (Blob,Blob) {.gcsafe, raises: [LegacyApiRlpError].} =
|
||||
reraiseRlpException("legacy/mpt/replicate()"):
|
||||
reraiseRlpException("replicateIt()"):
|
||||
for k,v in mpt.trie.replicate():
|
||||
yield (k,v)
|
||||
)
|
||||
yield (k,v))
|
||||
|
||||
proc accMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbAccFns =
|
||||
## Hexary trie database handlers
|
||||
|
@ -248,22 +252,24 @@ proc accMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbAccFns =
|
|||
db.bless(LegacyCoreDbAccBE(mpt: mpt.trie)),
|
||||
|
||||
fetchFn: proc(k: EthAddress): CoreDbRc[CoreDbAccount] =
|
||||
const info = "legacy/mpt/getAccount()"
|
||||
db.mapRlpException info:
|
||||
return ok mpt.trie.get(k.keccakHash.data).toCoreDbAccount(db),
|
||||
db.mapRlpException "fetchFn()":
|
||||
let data = mpt.trie.get(k.keccakHash.data)
|
||||
if 0 < data.len:
|
||||
return ok data.toCoreDbAccount(db)
|
||||
err(db.bless(AccNotFound, LegacyCoreDbError(ctx: "fetchFn()"))),
|
||||
|
||||
deleteFn: proc(k: EthAddress): CoreDbRc[void] =
|
||||
db.mapRlpException("legacy/mpt/del()"):
|
||||
db.mapRlpException("deleteFn()"):
|
||||
mpt.trie.del(k.keccakHash.data)
|
||||
ok(),
|
||||
|
||||
mergeFn: proc(k: EthAddress; v: CoreDbAccount): CoreDbRc[void] =
|
||||
db.mapRlpException("legacy/mpt/put()"):
|
||||
db.mapRlpException("mergeFn()"):
|
||||
mpt.trie.put(k.keccakHash.data, rlp.encode v.toAccount)
|
||||
ok(),
|
||||
|
||||
containsFn: proc(k: EthAddress): CoreDbRc[bool] =
|
||||
db.mapRlpException("legacy/mpt/put()"):
|
||||
hasPathFn: proc(k: EthAddress): CoreDbRc[bool] =
|
||||
db.mapRlpException("hasPath()"):
|
||||
return ok(mpt.trie.contains k.keccakHash.data),
|
||||
|
||||
rootVidFn: proc(): CoreDbVidRef =
|
||||
|
@ -344,7 +350,7 @@ proc baseMethods(
|
|||
if createOk or tdb.contains(root.data):
|
||||
return ok(db.bless LegacyCoreDbVid(vHash: root))
|
||||
|
||||
err(db.bless LegacyCoreDbError(error: RootNotFound, ctx: "getRoot()")),
|
||||
err(db.bless(RootNotFound, LegacyCoreDbError(ctx: "getRoot()"))),
|
||||
|
||||
newKvtFn: proc(): CoreDxKvtRef =
|
||||
db.kvt,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -16,6 +16,7 @@ import
|
|||
eth/common,
|
||||
results,
|
||||
"../.."/[constants, errors],
|
||||
../aristo/aristo_constants, # `EmptyBlob`
|
||||
./base/[base_desc, validate]
|
||||
|
||||
export
|
||||
|
@ -45,7 +46,7 @@ else:
|
|||
const AutoValidateDescriptors = true
|
||||
|
||||
const
|
||||
ProvideCoreDbLegacyAPI = true
|
||||
ProvideCoreDbLegacyAPI* = true # and false
|
||||
|
||||
EnableApiTracking = true and false
|
||||
## When enabled, functions using this tracking facility need to import
|
||||
|
@ -106,16 +107,14 @@ template itNotImplemented(db: CoreDbRef, name: string) =
|
|||
when EnableApiTracking:
|
||||
import std/[sequtils, strutils], stew/byteutils
|
||||
|
||||
template newApiTxt(info: static[string]): static[string] =
|
||||
logTxt "new API " & info
|
||||
|
||||
template legaApiTxt(info: static[string]): static[string] =
|
||||
logTxt "legacy API " & info
|
||||
|
||||
func getParent(w: CoreDxChldRefs): auto =
|
||||
## Avoida inifinite call to `parent()` in `ifTrack*Api()` tmplates
|
||||
w.parent
|
||||
|
||||
when ProvideCoreDbLegacyAPI:
|
||||
template legaApiTxt(info: static[string]): static[string] =
|
||||
logTxt "legacy API " & info
|
||||
|
||||
template setTrackLegaApiOnly(w: CoreDbChldRefs|CoreDbRef) =
|
||||
when typeof(w) is CoreDbRef:
|
||||
let db = w
|
||||
|
@ -135,6 +134,9 @@ when EnableApiTracking:
|
|||
if db.trackLegaApi:
|
||||
code
|
||||
|
||||
template newApiTxt(info: static[string]): static[string] =
|
||||
logTxt "new API " & info
|
||||
|
||||
template ifTrackNewApi(w: CoreDxChldRefs|CoreDbRef; code: untyped) =
|
||||
block:
|
||||
when typeof(w) is CoreDbRef:
|
||||
|
@ -190,8 +192,9 @@ when EnableApiTracking:
|
|||
proc toStr(rc: CoreDbRc[CoreDxCaptRef]): string = rc.toStr "captRef"
|
||||
|
||||
else:
|
||||
when ProvideCoreDbLegacyAPI:
|
||||
template setTrackLegaApiOnly(w: CoreDbChldRefs|CoreDbRef) = discard
|
||||
template ifTrackLegaApi(w: CoreDbChldRefs|CoreDbRef; code: untyped) = discard
|
||||
template ifTrackLegaApi(w: CoreDbChldRefs|CoreDbRef; c: untyped) = discard
|
||||
template ifTrackNewApi(w: CoreDxChldRefs|CoreDbRef; code: untyped) = discard
|
||||
|
||||
# ---------
|
||||
|
@ -214,9 +217,9 @@ func toCoreDxPhkRef(mpt: CoreDxMptRef): CoreDxPhkRef =
|
|||
proc(k:openArray[byte]; v: openArray[byte]): CoreDbRc[void] =
|
||||
mpt.methods.mergeFn(k.keccakHash.data, v)
|
||||
|
||||
result.methods.containsFn =
|
||||
result.methods.hasPathFn =
|
||||
proc(k: openArray[byte]): CoreDbRc[bool] =
|
||||
mpt.methods.containsFn(k.keccakHash.data)
|
||||
mpt.methods.hasPathFn(k.keccakHash.data)
|
||||
|
||||
result.methods.pairsIt =
|
||||
iterator(): (Blob, Blob) {.apiRaise.} =
|
||||
|
@ -244,6 +247,7 @@ proc bless*(db: CoreDbRef): CoreDbRef =
|
|||
db.ifTrackNewApi: info newApiTxt "CoreDbRef.init()", dbType=db.dbType
|
||||
db
|
||||
|
||||
|
||||
proc bless*(db: CoreDbRef; child: CoreDbVidRef): CoreDbVidRef =
|
||||
## Complete sub-module descriptor, fill in `parent` and actvate it.
|
||||
child.parent = db
|
||||
|
@ -252,6 +256,7 @@ proc bless*(db: CoreDbRef; child: CoreDbVidRef): CoreDbVidRef =
|
|||
child.validate
|
||||
child
|
||||
|
||||
|
||||
proc bless*(db: CoreDbRef; child: CoreDxKvtRef): CoreDxKvtRef =
|
||||
## Complete sub-module descriptor, fill in `parent` and de-actvate
|
||||
## iterator for persistent database.
|
||||
|
@ -267,7 +272,7 @@ proc bless*(db: CoreDbRef; child: CoreDxKvtRef): CoreDxKvtRef =
|
|||
child
|
||||
|
||||
|
||||
proc bless*[T: CoreDxTrieRelated | CoreDbErrorRef | CoreDbBackends](
|
||||
proc bless*[T: CoreDxTrieRelated | CoreDbBackends](
|
||||
db: CoreDbRef;
|
||||
child: T;
|
||||
): auto =
|
||||
|
@ -277,6 +282,18 @@ proc bless*[T: CoreDxTrieRelated | CoreDbErrorRef | CoreDbBackends](
|
|||
child.validate
|
||||
child
|
||||
|
||||
|
||||
proc bless*(
|
||||
db: CoreDbRef;
|
||||
error: CoreDbErrorCode;
|
||||
child: CoreDbErrorRef;
|
||||
): CoreDbErrorRef =
|
||||
child.parent = db
|
||||
child.error = error
|
||||
when AutoValidateDescriptors:
|
||||
child.validate
|
||||
child
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public main descriptor methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -315,7 +332,7 @@ proc finish*(db: CoreDbRef; flush = false) =
|
|||
proc `$$`*(e: CoreDbErrorRef): string =
|
||||
## Pretty print error symbol, note that this directive may have side effects
|
||||
## as it calls a backend function.
|
||||
result = e.parent.methods.errorPrintFn(e)
|
||||
result = $e.error & "(" & e.parent.methods.errorPrintFn(e) & ")"
|
||||
e.ifTrackNewApi: info newApiTxt "$$()", result
|
||||
|
||||
proc hash*(vid: CoreDbVidRef): Result[Hash256,void] =
|
||||
|
@ -372,7 +389,7 @@ proc getRoot*(
|
|||
## This function is intended to open a virtual accounts trie database as in:
|
||||
## ::
|
||||
## proc openAccountLedger(db: CoreDbRef, rootHash: Hash256): CoreDxMptRef =
|
||||
## let root = db.getRoot(rootHash).isOkOr:
|
||||
## let root = db.getRoot(rootHash).valueOr:
|
||||
## # some error handling
|
||||
## return
|
||||
## db.newAccMpt root
|
||||
|
@ -391,10 +408,21 @@ proc newKvt*(db: CoreDbRef): CoreDxKvtRef =
|
|||
db.ifTrackNewApi: info newApiTxt "newKvt()"
|
||||
|
||||
proc get*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[Blob] =
|
||||
## This function always returns a non-empty `Blob` or an error code.
|
||||
result = kvt.methods.getFn key
|
||||
kvt.ifTrackNewApi:
|
||||
info newApiTxt "kvt/get()", key=key.toStr, result=result.toStr
|
||||
|
||||
proc getOrEmpty*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[Blob] =
|
||||
## This function sort of mimics the behaviour of the legacy database
|
||||
## returning an empty `Blob` if the argument `key` is not found on the
|
||||
## database.
|
||||
result = kvt.methods.getFn key
|
||||
if result.isErr and result.error.error == KvtNotFound:
|
||||
result = CoreDbRc[Blob].ok(EmptyBlob)
|
||||
kvt.ifTrackNewApi:
|
||||
info newApiTxt "kvt/getOrEmpty()", key=key.toStr, result=result.toStr
|
||||
|
||||
proc del*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[void] =
|
||||
result = kvt.methods.delFn key
|
||||
kvt.ifTrackNewApi:
|
||||
|
@ -409,10 +437,11 @@ proc put*(
|
|||
kvt.ifTrackNewApi: info newApiTxt "kvt/put()",
|
||||
key=key.toStr, val=val.toSeq.toStr, result=result.toStr
|
||||
|
||||
proc contains*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[bool] =
|
||||
result = kvt.methods.containsFn key
|
||||
proc hasKey*(kvt: CoreDxKvtRef; key: openArray[byte]): CoreDbRc[bool] =
|
||||
## Would be named `contains` if it returned `bool` rather than `Result[]`.
|
||||
result = kvt.methods.hasKeyFn key
|
||||
kvt.ifTrackNewApi:
|
||||
info newApiTxt "kvt/contains()", key=key.toStr, result=result.toStr
|
||||
info newApiTxt "kvt/hasKey()", key=key.toStr, result=result.toStr
|
||||
|
||||
iterator pairs*(kvt: CoreDxKvtRef): (Blob, Blob) {.apiRaise.} =
|
||||
## Iterator supported on memory DB (otherwise implementation dependent)
|
||||
|
@ -427,9 +456,16 @@ iterator pairs*(kvt: CoreDxKvtRef): (Blob, Blob) {.apiRaise.} =
|
|||
proc newMpt*(db: CoreDbRef; root: CoreDbVidRef; prune = true): CoreDxMptRef =
|
||||
## Constructor, will defect on failure (note that the legacy backend
|
||||
## always succeeds)
|
||||
result = db.methods.newMptFn(root, prune).valueOr: raiseAssert $$error
|
||||
result = db.methods.newMptFn(root, prune).valueOr:
|
||||
raiseAssert $$error
|
||||
db.ifTrackNewApi: info newApiTxt "newMpt", root=root.toStr, prune
|
||||
|
||||
proc newMpt*(db: CoreDbRef; prune = true): CoreDxMptRef =
|
||||
## Shortcut for `db.newMpt CoreDbVidRef()`
|
||||
result = db.methods.newMptFn(CoreDbVidRef(), prune).valueOr:
|
||||
raiseAssert $$error
|
||||
db.ifTrackNewApi: info newApiTxt "newMpt", prune
|
||||
|
||||
proc newAccMpt*(db: CoreDbRef; root: CoreDbVidRef; prune = true): CoreDxAccRef =
|
||||
## Similar to `newMpt()` for handling accounts. Although this sub-trie can
|
||||
## be emulated by means of `newMpt(..).toPhk()`, it is recommended using
|
||||
|
@ -471,11 +507,21 @@ proc rootVid*(dsc: CoreDxTrieRefs | CoreDxAccRef): CoreDbVidRef =
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc fetch*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[Blob] =
|
||||
## Fetch data from the argument `trie`
|
||||
## Fetch data from the argument `trie`. The function always returns a
|
||||
## non-empty `Blob` or an error code.
|
||||
result = trie.methods.fetchFn(key)
|
||||
trie.ifTrackNewApi:
|
||||
info newApiTxt "trie/fetch()", key=key.toStr, result=result.toStr
|
||||
|
||||
proc fetchOrEmpty*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[Blob] =
|
||||
## This function returns an empty `Blob` if the argument `key` is not found
|
||||
## on the database.
|
||||
result = trie.methods.fetchFn(key)
|
||||
if result.isErr and result.error.error == MptNotFound:
|
||||
result = ok(EmptyBlob)
|
||||
trie.ifTrackNewApi:
|
||||
info newApiTxt "trie/fetch()", key=key.toStr, result=result.toStr
|
||||
|
||||
proc delete*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[void] =
|
||||
result = trie.methods.deleteFn key
|
||||
trie.ifTrackNewApi:
|
||||
|
@ -486,7 +532,7 @@ proc merge*(
|
|||
key: openArray[byte];
|
||||
val: openArray[byte];
|
||||
): CoreDbRc[void] =
|
||||
when trie is CoreDbMptRef:
|
||||
when trie is CoreDxMptRef:
|
||||
const info = "mpt/merge()"
|
||||
else:
|
||||
const info = "phk/merge()"
|
||||
|
@ -494,10 +540,11 @@ proc merge*(
|
|||
trie.ifTrackNewApi: info newApiTxt info,
|
||||
key=key.toStr, val=val.toSeq.toStr, result=result.toStr
|
||||
|
||||
proc contains*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[bool] =
|
||||
result = trie.methods.containsFn key
|
||||
proc hasPath*(trie: CoreDxTrieRefs; key: openArray[byte]): CoreDbRc[bool] =
|
||||
## Would be named `contains` if it returned `bool` rather than `Result[]`.
|
||||
result = trie.methods.hasPathFn key
|
||||
trie.ifTrackNewApi:
|
||||
info newApiTxt "trie/contains()", key=key.toStr, result=result.toStr
|
||||
info newApiTxt "trie/hasKey()", key=key.toStr, result=result.toStr
|
||||
|
||||
iterator pairs*(mpt: CoreDxMptRef): (Blob, Blob) {.apiRaise.} =
|
||||
## Trie traversal, only supported for `CoreDxMptRef`
|
||||
|
@ -516,7 +563,7 @@ iterator replicate*(mpt: CoreDxMptRef): (Blob, Blob) {.apiRaise.} =
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc fetch*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[CoreDbAccount] =
|
||||
## Fetch data from the argument `trie`
|
||||
## Fetch data from the argument `trie`.
|
||||
result = acc.methods.fetchFn address
|
||||
acc.ifTrackNewApi:
|
||||
info newApiTxt "acc/fetch()", address=address.toStr, result=result.toStr
|
||||
|
@ -535,10 +582,11 @@ proc merge*(
|
|||
acc.ifTrackNewApi:
|
||||
info newApiTxt "acc/merge()", address=address.toStr, result=result.toStr
|
||||
|
||||
proc contains*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[bool] =
|
||||
result = acc.methods.containsFn address
|
||||
proc hasPath*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[bool] =
|
||||
## Would be named `contains` if it returned `bool` rather than `Result[]`.
|
||||
result = acc.methods.hasPathFn address
|
||||
acc.ifTrackNewApi:
|
||||
info newApiTxt "acc/contains()", address=address.toStr, result=result.toStr
|
||||
info newApiTxt "acc/hasKey()", address=address.toStr, result=result.toStr
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public transaction related methods
|
||||
|
@ -630,7 +678,7 @@ when ProvideCoreDbLegacyAPI:
|
|||
proc get*(kvt: CoreDbKvtRef; key: openArray[byte]): Blob =
|
||||
kvt.setTrackLegaApiOnly
|
||||
const info = "kvt/get()"
|
||||
result = kvt.distinctBase.get(key).expect info
|
||||
result = kvt.distinctBase.getOrEmpty(key).expect info
|
||||
kvt.ifTrackLegaApi:
|
||||
info legaApiTxt info, key=key.toStr, result=result.toStr
|
||||
|
||||
|
@ -650,7 +698,7 @@ when ProvideCoreDbLegacyAPI:
|
|||
proc contains*(kvt: CoreDbKvtRef; key: openArray[byte]): bool =
|
||||
kvt.setTrackLegaApiOnly
|
||||
const info = "kvt/contains()"
|
||||
result = kvt.distinctBase.contains(key).expect info
|
||||
result = kvt.distinctBase.hasKey(key).expect info
|
||||
kvt.ifTrackLegaApi: info legaApiTxt info, key=key.toStr, result
|
||||
|
||||
iterator pairs*(kvt: CoreDbKvtRef): (Blob, Blob) {.apiRaise.} =
|
||||
|
@ -703,7 +751,7 @@ when ProvideCoreDbLegacyAPI:
|
|||
proc get*(trie: CoreDbTrieRefs; key: openArray[byte]): Blob =
|
||||
trie.setTrackLegaApiOnly
|
||||
const info = "trie/get()"
|
||||
result = trie.distinctBase.fetch(key).expect "trie/get()"
|
||||
result = trie.distinctBase.fetchOrEmpty(key).expect "trie/get()"
|
||||
trie.ifTrackLegaApi:
|
||||
info legaApiTxt info, key=key.toStr, result=result.toStr
|
||||
|
||||
|
@ -726,7 +774,7 @@ when ProvideCoreDbLegacyAPI:
|
|||
proc contains*(trie: CoreDbTrieRefs; key: openArray[byte]): bool =
|
||||
trie.setTrackLegaApiOnly
|
||||
const info = "trie/contains()"
|
||||
result = trie.distinctBase.contains(key).expect info
|
||||
result = trie.distinctBase.hasPath(key).expect info
|
||||
trie.ifTrackLegaApi: info legaApiTxt info, key=key.toStr, result
|
||||
|
||||
proc rootHash*(trie: CoreDbTrieRefs): Hash256 =
|
||||
|
|
|
@ -39,10 +39,12 @@ type
|
|||
codeHash*: Hash256
|
||||
|
||||
CoreDbErrorCode* = enum
|
||||
Unspecified = 0
|
||||
Unset = 0
|
||||
Unspecified
|
||||
RlpException
|
||||
KvtNotFound
|
||||
MptNotFound
|
||||
AccNotFound
|
||||
RootNotFound
|
||||
|
||||
CoreDbCaptFlags* {.pure.} = enum
|
||||
|
@ -101,7 +103,7 @@ type
|
|||
CoreDbKvtDelFn* = proc(k: openArray[byte]): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbKvtPutFn* =
|
||||
proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbKvtContainsFn* = proc(k: openArray[byte]): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbKvtHasKeyFn* = proc(k: openArray[byte]): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbKvtPairsIt* = iterator(): (Blob,Blob) {.apiRaise.}
|
||||
|
||||
CoreDbKvtFns* = object
|
||||
|
@ -110,7 +112,7 @@ type
|
|||
getFn*: CoreDbKvtGetFn
|
||||
delFn*: CoreDbKvtDelFn
|
||||
putFn*: CoreDbKvtPutFn
|
||||
containsFn*: CoreDbKvtContainsFn
|
||||
hasKeyFn*: CoreDbKvtHasKeyFn
|
||||
pairsIt*: CoreDbKvtPairsIt
|
||||
|
||||
|
||||
|
@ -128,7 +130,7 @@ type
|
|||
proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbMptMergeAccountFn* =
|
||||
proc(k: openArray[byte]; v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbMptContainsFn* = proc(k: openArray[byte]): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbMptHasPathFn* = proc(k: openArray[byte]): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbMptRootVidFn* = proc(): CoreDbVidRef {.noRaise.}
|
||||
CoreDbMptIsPruningFn* = proc(): bool {.noRaise.}
|
||||
CoreDbMptPairsIt* = iterator(): (Blob,Blob) {.apiRaise.}
|
||||
|
@ -140,7 +142,7 @@ type
|
|||
fetchFn*: CoreDbMptFetchFn
|
||||
deleteFn*: CoreDbMptDeleteFn
|
||||
mergeFn*: CoreDbMptMergeFn
|
||||
containsFn*: CoreDbMptContainsFn
|
||||
hasPathFn*: CoreDbMptHasPathFn
|
||||
rootVidFn*: CoreDbMptRootVidFn
|
||||
pairsIt*: CoreDbMptPairsIt
|
||||
replicateIt*: CoreDbMptReplicateIt
|
||||
|
@ -155,7 +157,7 @@ type
|
|||
CoreDbAccDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccMergeFn* =
|
||||
proc(k: EthAddress; v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccContainsFn* = proc(k: EthAddress): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbAccHasPathFn* = proc(k: EthAddress): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbAccRootVidFn* = proc(): CoreDbVidRef {.noRaise.}
|
||||
CoreDbAccIsPruningFn* = proc(): bool {.noRaise.}
|
||||
|
||||
|
@ -165,7 +167,7 @@ type
|
|||
fetchFn*: CoreDbAccFetchFn
|
||||
deleteFn*: CoreDbAccDeleteFn
|
||||
mergeFn*: CoreDbAccMergeFn
|
||||
containsFn*: CoreDbAccContainsFn
|
||||
hasPathFn*: CoreDbAccHasPathFn
|
||||
rootVidFn*: CoreDbAccRootVidFn
|
||||
isPruningFn*: CoreDbAccIsPruningFn
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ proc validateMethodsDesc(kvt: CoreDbKvtFns) =
|
|||
doAssert not kvt.getFn.isNil
|
||||
doAssert not kvt.delFn.isNil
|
||||
doAssert not kvt.putFn.isNil
|
||||
doAssert not kvt.containsFn.isNil
|
||||
doAssert not kvt.hasKeyFn.isNil
|
||||
doAssert not kvt.pairsIt.isNil
|
||||
|
||||
proc validateMethodsDesc(fns: CoreDbMptFns) =
|
||||
|
@ -56,7 +56,7 @@ proc validateMethodsDesc(fns: CoreDbMptFns) =
|
|||
doAssert not fns.fetchFn.isNil
|
||||
doAssert not fns.deleteFn.isNil
|
||||
doAssert not fns.mergeFn.isNil
|
||||
doAssert not fns.containsFn.isNil
|
||||
doAssert not fns.hasPathFn.isNil
|
||||
doAssert not fns.rootVidFn.isNil
|
||||
doAssert not fns.isPruningFn.isNil
|
||||
doAssert not fns.pairsIt.isNil
|
||||
|
@ -67,7 +67,7 @@ proc validateMethodsDesc(fns: CoreDbAccFns) =
|
|||
doAssert not fns.fetchFn.isNil
|
||||
doAssert not fns.deleteFn.isNil
|
||||
doAssert not fns.mergeFn.isNil
|
||||
doAssert not fns.containsFn.isNil
|
||||
doAssert not fns.hasPathFn.isNil
|
||||
doAssert not fns.rootVidFn.isNil
|
||||
doAssert not fns.isPruningFn.isNil
|
||||
|
||||
|
@ -79,6 +79,7 @@ proc validateMethodsDesc(vid: CoreDbVidRef) =
|
|||
doAssert vid.ready == true
|
||||
|
||||
proc validateMethodsDesc(e: CoreDbErrorRef) =
|
||||
doAssert e.error != CoreDbErrorCode(0)
|
||||
doAssert not e.isNil
|
||||
doAssert not e.parent.isNil
|
||||
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
## This file was renamed from `core_apps.nim`.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
|
@ -0,0 +1,914 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
## Rewrite of `core_apps.nim` using the new `CoreDb` API. The original
|
||||
## `core_apps.nim` was renamed `core_apps_legacy.nim`.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, options, sequtils],
|
||||
chronicles,
|
||||
eth/[common, rlp],
|
||||
results,
|
||||
stew/byteutils,
|
||||
"../.."/[errors, constants],
|
||||
".."/[aristo, storage_types],
|
||||
"."/base
|
||||
|
||||
logScope:
|
||||
topics = "core_db-apps"
|
||||
|
||||
type
|
||||
TransactionKey = tuple
|
||||
blockNumber: BlockNumber
|
||||
index: int
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Forward declarations
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
output: var BlockHeader;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].}
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef,
|
||||
blockHash: Hash256;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [BlockNotFound].}
|
||||
|
||||
proc getBlockHash*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
output: var Hash256;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].}
|
||||
|
||||
proc addBlockNumberToHashLookup*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
) {.gcsafe.}
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef;
|
||||
blockHash: Hash256;
|
||||
output: var BlockHeader;
|
||||
): bool
|
||||
{.gcsafe.}
|
||||
|
||||
# Copied from `utils/utils` which cannot be imported here in order to
|
||||
# avoid circular imports.
|
||||
func hash(b: BlockHeader): Hash256
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Core apps " & info
|
||||
|
||||
template discardRlpException(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except RlpError as e:
|
||||
warn logTxt info, error=($e.name), msg=e.msg
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private iterators
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
iterator findNewAncestors(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
||||
## Returns the chain leading up from the given header until the first
|
||||
## ancestor it has in common with our canonical chain.
|
||||
var h = header
|
||||
var orig: BlockHeader
|
||||
while true:
|
||||
if db.getBlockHeader(h.blockNumber, orig) and orig.hash == h.hash:
|
||||
break
|
||||
|
||||
yield h
|
||||
|
||||
if h.parentHash == GENESIS_PARENT_HASH:
|
||||
break
|
||||
else:
|
||||
h = db.getBlockHeader(h.parentHash)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public iterators
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
iterator getBlockTransactionData*(
|
||||
db: CoreDbRef;
|
||||
transactionRoot: Hash256;
|
||||
): seq[byte]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
block body:
|
||||
let root = db.getRoot(transactionRoot).valueOr:
|
||||
warn logTxt "getBlockTransactionData()",
|
||||
transactionRoot, action="getRoot()", `error`=($$error)
|
||||
break body
|
||||
let transactionDb = db.newMpt root
|
||||
var transactionIdx = 0
|
||||
while true:
|
||||
let transactionKey = rlp.encode(transactionIdx)
|
||||
let data = transactionDb.fetch(transactionKey).valueOr:
|
||||
if error.error != MptNotFound:
|
||||
warn logTxt "getBlockTransactionData()", transactionRoot,
|
||||
transactionKey, action="fetch()", error=($$error)
|
||||
break body
|
||||
yield data
|
||||
inc transactionIdx
|
||||
|
||||
|
||||
iterator getBlockTransactions*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
): Transaction
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
for encodedTx in db.getBlockTransactionData(header.txRoot):
|
||||
yield rlp.decode(encodedTx, Transaction)
|
||||
|
||||
|
||||
iterator getBlockTransactionHashes*(
|
||||
db: CoreDbRef;
|
||||
blockHeader: BlockHeader;
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
## Returns an iterable of the transaction hashes from th block specified
|
||||
## by the given block header.
|
||||
for encodedTx in db.getBlockTransactionData(blockHeader.txRoot):
|
||||
let tx = rlp.decode(encodedTx, Transaction)
|
||||
yield rlpHash(tx) # beware EIP-4844
|
||||
|
||||
|
||||
iterator getWithdrawalsData*(
|
||||
db: CoreDbRef;
|
||||
withdrawalsRoot: Hash256;
|
||||
): seq[byte]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
block body:
|
||||
let root = db.getRoot(withdrawalsRoot).valueOr:
|
||||
warn logTxt "getWithdrawalsData()",
|
||||
withdrawalsRoot, action="getRoot()", error=($$error)
|
||||
break body
|
||||
let wddb = db.newMpt root
|
||||
var idx = 0
|
||||
while true:
|
||||
let wdKey = rlp.encode(idx)
|
||||
let data = wddb.fetch(wdKey).valueOr:
|
||||
if error.error != MptNotFound:
|
||||
warn logTxt "getWithdrawalsData()",
|
||||
withdrawalsRoot, wdKey, action="fetch()", error=($$error)
|
||||
break body
|
||||
yield data
|
||||
inc idx
|
||||
|
||||
|
||||
iterator getReceipts*(
|
||||
db: CoreDbRef;
|
||||
receiptRoot: Hash256;
|
||||
): Receipt
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
block body:
|
||||
let root = db.getRoot(receiptRoot).valueOr:
|
||||
warn logTxt "getWithdrawalsData()",
|
||||
receiptRoot, action="getRoot()", error=($$error)
|
||||
break body
|
||||
var receiptDb = db.newMpt root
|
||||
var receiptIdx = 0
|
||||
while true:
|
||||
let receiptKey = rlp.encode(receiptIdx)
|
||||
let receiptData = receiptDb.fetch(receiptKey).valueOr:
|
||||
if error.error != MptNotFound:
|
||||
warn logTxt "getWithdrawalsData()",
|
||||
receiptRoot, receiptKey, action="hasKey()", error=($$error)
|
||||
break body
|
||||
yield rlp.decode(receiptData, Receipt)
|
||||
inc receiptIdx
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func hash(b: BlockHeader): Hash256 =
|
||||
rlpHash(b)
|
||||
|
||||
proc removeTransactionFromCanonicalChain(
|
||||
db: CoreDbRef;
|
||||
transactionHash: Hash256;
|
||||
) =
|
||||
## Removes the transaction specified by the given hash from the canonical
|
||||
## chain.
|
||||
db.newKvt.del(transactionHashToBlockKey(transactionHash).toOpenArray).isOkOr:
|
||||
warn logTxt "removeTransactionFromCanonicalChain()",
|
||||
transactionHash, action="del()", error=($$error)
|
||||
|
||||
proc setAsCanonicalChainHead(
|
||||
db: CoreDbRef;
|
||||
headerHash: Hash256;
|
||||
): seq[BlockHeader]
|
||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
||||
## Sets the header as the canonical chain HEAD.
|
||||
let header = db.getBlockHeader(headerHash)
|
||||
|
||||
var newCanonicalHeaders = sequtils.toSeq(db.findNewAncestors(header))
|
||||
reverse(newCanonicalHeaders)
|
||||
for h in newCanonicalHeaders:
|
||||
var oldHash: Hash256
|
||||
if not db.getBlockHash(h.blockNumber, oldHash):
|
||||
break
|
||||
|
||||
let oldHeader = db.getBlockHeader(oldHash)
|
||||
for txHash in db.getBlockTransactionHashes(oldHeader):
|
||||
db.removeTransactionFromCanonicalChain(txHash)
|
||||
# TODO re-add txn to internal pending pool (only if local sender)
|
||||
|
||||
for h in newCanonicalHeaders:
|
||||
db.addBlockNumberToHashLookup(h)
|
||||
|
||||
let canonicalHeadHash = canonicalHeadHashKey()
|
||||
db.newKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||
warn logTxt "setAsCanonicalChainHead()",
|
||||
canonicalHeadHash, action="put()", error=($$error)
|
||||
|
||||
return newCanonicalHeaders
|
||||
|
||||
proc markCanonicalChain(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
headerHash: Hash256;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
## mark this chain as canonical by adding block number to hash lookup
|
||||
## down to forking point
|
||||
var
|
||||
currHash = headerHash
|
||||
currHeader = header
|
||||
|
||||
# mark current header as canonical
|
||||
let
|
||||
kvt = db.newKvt()
|
||||
key = blockNumberToHashKey(currHeader.blockNumber)
|
||||
kvt.put(key.toOpenArray, rlp.encode(currHash)).isOkOr:
|
||||
warn logTxt "markCanonicalChain()", key, action="put()", error=($$error)
|
||||
return false
|
||||
|
||||
# it is a genesis block, done
|
||||
if currHeader.parentHash == Hash256():
|
||||
return true
|
||||
|
||||
# mark ancestor blocks as canonical too
|
||||
currHash = currHeader.parentHash
|
||||
if not db.getBlockHeader(currHeader.parentHash, currHeader):
|
||||
return false
|
||||
|
||||
while currHash != Hash256():
|
||||
let key = blockNumberToHashKey(currHeader.blockNumber)
|
||||
let data = kvt.getOrEmpty(key.toOpenArray).valueOr:
|
||||
warn logTxt "markCanonicalChain()", key, action="get()", error=($$error)
|
||||
return false
|
||||
if data.len == 0:
|
||||
# not marked, mark it
|
||||
kvt.put(key.toOpenArray, rlp.encode(currHash)).isOkOr:
|
||||
warn logTxt "markCanonicalChain()", key, action="put()", error=($$error)
|
||||
elif rlp.decode(data, Hash256) != currHash:
|
||||
# replace prev chain
|
||||
kvt.put(key.toOpenArray, rlp.encode(currHash)).isOkOr:
|
||||
warn logTxt "markCanonicalChain()", key, action="put()", error=($$error)
|
||||
else:
|
||||
# forking point, done
|
||||
break
|
||||
|
||||
if currHeader.parentHash == Hash256():
|
||||
break
|
||||
|
||||
currHash = currHeader.parentHash
|
||||
if not db.getBlockHeader(currHeader.parentHash, currHeader):
|
||||
return false
|
||||
|
||||
return true
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc exists*(db: CoreDbRef, hash: Hash256): bool =
|
||||
db.newKvt.hasKey(hash.data).valueOr:
|
||||
warn logTxt "exisis()", hash, action="hasKey()", error=($$error)
|
||||
return false
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef;
|
||||
blockHash: Hash256;
|
||||
output: var BlockHeader;
|
||||
): bool =
|
||||
const info = "getBlockHeader()"
|
||||
let data = db.newKvt.get(genericHashKey(blockHash).toOpenArray).valueOr:
|
||||
if error.error != KvtNotFound:
|
||||
warn logTxt info, blockHash, action="get()", error=($$error)
|
||||
return false
|
||||
|
||||
discardRlpException info:
|
||||
output = rlp.decode(data, BlockHeader)
|
||||
return true
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef,
|
||||
blockHash: Hash256;
|
||||
): BlockHeader =
|
||||
## Returns the requested block header as specified by block hash.
|
||||
##
|
||||
## Raises BlockNotFound if it is not present in the db.
|
||||
if not db.getBlockHeader(blockHash, result):
|
||||
raise newException(
|
||||
BlockNotFound, "No block with hash " & blockHash.data.toHex)
|
||||
|
||||
proc getHash(
|
||||
db: CoreDbRef;
|
||||
key: DbKey;
|
||||
output: var Hash256;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
let data = db.newKvt.get(key.toOpenArray).valueOr:
|
||||
if error.error != KvtNotFound:
|
||||
warn logTxt "getHash()", key, action="get()", error=($$error)
|
||||
return false
|
||||
output = rlp.decode(data, Hash256)
|
||||
true
|
||||
|
||||
proc getCanonicalHead*(
|
||||
db: CoreDbRef;
|
||||
output: var BlockHeader;
|
||||
): bool =
|
||||
discardRlpException "getCanonicalHead()":
|
||||
var headHash: Hash256
|
||||
if db.getHash(canonicalHeadHashKey(), headHash) and
|
||||
db.getBlockHeader(headHash, output):
|
||||
return true
|
||||
|
||||
proc getCanonicalHead*(
|
||||
db: CoreDbRef;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [EVMError].} =
|
||||
if not db.getCanonicalHead result:
|
||||
raise newException(
|
||||
CanonicalHeadNotFound, "No canonical head set for this chain")
|
||||
|
||||
proc getCanonicalHeaderHash*(
|
||||
db: CoreDbRef;
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
discard db.getHash(canonicalHeadHashKey(), result)
|
||||
|
||||
proc getBlockHash*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
output: var Hash256;
|
||||
): bool =
|
||||
## Return the block hash for the given block number.
|
||||
db.getHash(blockNumberToHashKey(n), output)
|
||||
|
||||
proc getBlockHash*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
||||
## Return the block hash for the given block number.
|
||||
if not db.getHash(blockNumberToHashKey(n), result):
|
||||
raise newException(BlockNotFound, "No block hash for number " & $n)
|
||||
|
||||
proc getHeadBlockHash*(
|
||||
db: CoreDbRef;
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
if not db.getHash(canonicalHeadHashKey(), result):
|
||||
result = Hash256()
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
output: var BlockHeader;
|
||||
): bool =
|
||||
## Returns the block header with the given number in the canonical chain.
|
||||
var blockHash: Hash256
|
||||
if db.getBlockHash(n, blockHash):
|
||||
result = db.getBlockHeader(blockHash, output)
|
||||
|
||||
proc getBlockHeaderWithHash*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
): Option[(BlockHeader, Hash256)]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
## Returns the block header and its hash, with the given number in the
|
||||
## canonical chain. Hash is returned to avoid recomputing it
|
||||
var hash: Hash256
|
||||
if db.getBlockHash(n, hash):
|
||||
# Note: this will throw if header is not present.
|
||||
var header: BlockHeader
|
||||
if db.getBlockHeader(hash, header):
|
||||
return some((header, hash))
|
||||
else:
|
||||
# this should not happen, but if it happen lets fail laudly as this means
|
||||
# something is super wrong
|
||||
raiseAssert("Corrupted database. Mapping number->hash present, without header in database")
|
||||
else:
|
||||
return none[(BlockHeader, Hash256)]()
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
||||
## Returns the block header with the given number in the canonical chain.
|
||||
## Raises BlockNotFound error if the block is not in the DB.
|
||||
db.getBlockHeader(db.getBlockHash(n))
|
||||
|
||||
proc getScore*(
|
||||
db: CoreDbRef;
|
||||
blockHash: Hash256;
|
||||
): UInt256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
let data = db.newKvt.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
|
||||
if error.error != KvtNotFound:
|
||||
warn logTxt "getScore()", blockHash, action="get()", error=($$error)
|
||||
return
|
||||
rlp.decode(data, UInt256)
|
||||
|
||||
proc setScore*(db: CoreDbRef; blockHash: Hash256, score: UInt256) =
|
||||
## for testing purpose
|
||||
let scoreKey = blockHashToScoreKey blockHash
|
||||
db.newKvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
|
||||
warn logTxt "setScore()", scoreKey, action="put()", error=($$error)
|
||||
return
|
||||
|
||||
proc getTd*(db: CoreDbRef; blockHash: Hash256, td: var UInt256): bool =
|
||||
const info = "getId()"
|
||||
let bytes = db.newKvt.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
|
||||
if error.error != KvtNotFound:
|
||||
warn logTxt info, blockHash, action="get()", error=($$error)
|
||||
return false
|
||||
discardRlpException info:
|
||||
td = rlp.decode(bytes, UInt256)
|
||||
return true
|
||||
|
||||
proc headTotalDifficulty*(
|
||||
db: CoreDbRef;
|
||||
): UInt256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
# this is actually a combination of `getHash` and `getScore`
|
||||
const
|
||||
info = "headTotalDifficulty()"
|
||||
key = canonicalHeadHashKey()
|
||||
let
|
||||
kvt = db.newKvt
|
||||
data = kvt.get(key.toOpenArray).valueOr:
|
||||
if error.error != KvtNotFound:
|
||||
warn logTxt info, key, action="get()", error=($$error)
|
||||
return 0.u256
|
||||
blockHash = rlp.decode(data, Hash256)
|
||||
numData = kvt.get(blockHashToScoreKey(blockHash).toOpenArray).valueOr:
|
||||
warn logTxt info, blockHash, action="get()", error=($$error)
|
||||
return 0.u256
|
||||
|
||||
rlp.decode(numData, UInt256)
|
||||
|
||||
proc getAncestorsHashes*(
|
||||
db: CoreDbRef;
|
||||
limit: UInt256;
|
||||
header: BlockHeader;
|
||||
): seq[Hash256]
|
||||
{.gcsafe, raises: [BlockNotFound].} =
|
||||
var ancestorCount = min(header.blockNumber, limit).truncate(int)
|
||||
var h = header
|
||||
|
||||
result = newSeq[Hash256](ancestorCount)
|
||||
while ancestorCount > 0:
|
||||
h = db.getBlockHeader(h.parentHash)
|
||||
result[ancestorCount - 1] = h.hash
|
||||
dec ancestorCount
|
||||
|
||||
proc addBlockNumberToHashLookup*(db: CoreDbRef; header: BlockHeader) =
|
||||
let blockNumberKey = blockNumberToHashKey(header.blockNumber)
|
||||
db.newKvt.put(blockNumberKey.toOpenArray, rlp.encode(header.hash)).isOkOr:
|
||||
warn logTxt "addBlockNumberToHashLookup()",
|
||||
blockNumberKey, action="put()", error=($$error)
|
||||
|
||||
proc persistTransactions*(
|
||||
db: CoreDbRef;
|
||||
blockNumber: BlockNumber;
|
||||
transactions: openArray[Transaction];
|
||||
): Hash256
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
const
|
||||
info = "persistTransactions()"
|
||||
let
|
||||
mpt = db.newMpt()
|
||||
kvt = db.newKvt()
|
||||
for idx, tx in transactions:
|
||||
let
|
||||
encodedTx = rlp.encode(tx.removeNetworkPayload)
|
||||
txHash = rlpHash(tx) # beware EIP-4844
|
||||
blockKey = transactionHashToBlockKey(txHash)
|
||||
txKey: TransactionKey = (blockNumber, idx)
|
||||
mpt.merge(rlp.encode(idx), encodedTx).isOkOr:
|
||||
warn logTxt info, idx, action="merge()", error=($$error)
|
||||
return EMPTY_ROOT_HASH
|
||||
kvt.put(blockKey.toOpenArray, rlp.encode(txKey)).isOkOr:
|
||||
warn logTxt info, blockKey, action="put()", error=($$error)
|
||||
return EMPTY_ROOT_HASH
|
||||
mpt.rootVid.hash.valueOr:
|
||||
warn logTxt info, action="hash()"
|
||||
return EMPTY_ROOT_HASH
|
||||
|
||||
proc getTransaction*(
|
||||
db: CoreDbRef;
|
||||
txRoot: Hash256;
|
||||
txIndex: int;
|
||||
res: var Transaction;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
const
|
||||
info = "getTransaction()"
|
||||
let
|
||||
mpt = block:
|
||||
let root = db.getRoot(txRoot).valueOr:
|
||||
warn logTxt info, txRoot, action="getRoot()", `error`=($$error)
|
||||
return false
|
||||
db.newMpt root
|
||||
txData = mpt.fetch(rlp.encode(txIndex)).valueOr:
|
||||
if error.error != MptNotFound:
|
||||
warn logTxt info, txIndex, action="fetch()", `error`=($$error)
|
||||
return false
|
||||
res = rlp.decode(txData, Transaction)
|
||||
true
|
||||
|
||||
proc getTransactionCount*(
|
||||
db: CoreDbRef;
|
||||
txRoot: Hash256;
|
||||
): int
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
const
|
||||
info = "getTransactionCount()"
|
||||
let mpt = block:
|
||||
let root = db.getRoot(txRoot).valueOr:
|
||||
warn logTxt info, txRoot, action="getRoot()", `error`=($$error)
|
||||
return 0
|
||||
db.newMpt root
|
||||
var txCount = 0
|
||||
while true:
|
||||
let hasPath = mpt.hasPath(rlp.encode(txCount)).valueOr:
|
||||
warn logTxt info, txCount, action="hasPath()", `error`=($$error)
|
||||
return 0
|
||||
if hasPath:
|
||||
inc txCount
|
||||
else:
|
||||
return txCount
|
||||
|
||||
doAssert(false, "unreachable")
|
||||
|
||||
proc getUnclesCount*(
|
||||
db: CoreDbRef;
|
||||
ommersHash: Hash256;
|
||||
): int
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
const info = "getUnclesCount()"
|
||||
if ommersHash != EMPTY_UNCLE_HASH:
|
||||
let encodedUncles = block:
|
||||
let key = genericHashKey(ommersHash)
|
||||
db.newKvt.get(key.toOpenArray).valueOr:
|
||||
if error.error == KvtNotFound:
|
||||
warn logTxt info, ommersHash, action="get()", `error`=($$error)
|
||||
return 0
|
||||
return rlpFromBytes(encodedUncles).listLen
|
||||
|
||||
proc getUncles*(
|
||||
db: CoreDbRef;
|
||||
ommersHash: Hash256;
|
||||
): seq[BlockHeader]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
const info = "getUncles()"
|
||||
if ommersHash != EMPTY_UNCLE_HASH:
|
||||
let encodedUncles = block:
|
||||
let key = genericHashKey(ommersHash)
|
||||
db.newKvt.get(key.toOpenArray).valueOr:
|
||||
if error.error == KvtNotFound:
|
||||
warn logTxt info, ommersHash, action="get()", `error`=($$error)
|
||||
return @[]
|
||||
return rlp.decode(encodedUncles, seq[BlockHeader])
|
||||
|
||||
proc persistWithdrawals*(
|
||||
db: CoreDbRef;
|
||||
withdrawals: openArray[Withdrawal];
|
||||
): Hash256
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
const info = "persistWithdrawals()"
|
||||
let mpt = db.newMpt()
|
||||
for idx, wd in withdrawals:
|
||||
mpt.merge(rlp.encode(idx), rlp.encode(wd)).isOkOr:
|
||||
warn logTxt info, idx, action="merge()", error=($$error)
|
||||
return EMPTY_ROOT_HASH
|
||||
mpt.rootVid.hash.valueOr:
|
||||
warn logTxt info, action="hash()"
|
||||
return EMPTY_ROOT_HASH
|
||||
|
||||
proc getWithdrawals*(
|
||||
db: CoreDbRef;
|
||||
withdrawalsRoot: Hash256;
|
||||
): seq[Withdrawal]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
for encodedWd in db.getWithdrawalsData(withdrawalsRoot):
|
||||
result.add(rlp.decode(encodedWd, Withdrawal))
|
||||
|
||||
proc getBlockBody*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
output: var BlockBody;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
output.transactions = @[]
|
||||
output.uncles = @[]
|
||||
for encodedTx in db.getBlockTransactionData(header.txRoot):
|
||||
output.transactions.add(rlp.decode(encodedTx, Transaction))
|
||||
|
||||
if header.withdrawalsRoot.isSome:
|
||||
output.withdrawals = some(db.getWithdrawals(header.withdrawalsRoot.get))
|
||||
|
||||
if header.ommersHash != EMPTY_UNCLE_HASH:
|
||||
let
|
||||
key = genericHashKey(header.ommersHash)
|
||||
encodedUncles = db.newKvt.get(key.toOpenArray).valueOr:
|
||||
if error.error == KvtNotFound:
|
||||
warn logTxt "getBlockBody()",
|
||||
ommersHash=header.ommersHash, action="get()", `error`=($$error)
|
||||
return false
|
||||
output.uncles = rlp.decode(encodedUncles, seq[BlockHeader])
|
||||
true
|
||||
|
||||
proc getBlockBody*(
|
||||
db: CoreDbRef;
|
||||
blockHash: Hash256;
|
||||
output: var BlockBody;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var header: BlockHeader
|
||||
if db.getBlockHeader(blockHash, header):
|
||||
return db.getBlockBody(header, output)
|
||||
|
||||
proc getBlockBody*(
|
||||
db: CoreDbRef;
|
||||
hash: Hash256;
|
||||
): BlockBody
|
||||
{.gcsafe, raises: [RlpError,ValueError].} =
|
||||
if not db.getBlockBody(hash, result):
|
||||
raise newException(ValueError, "Error when retrieving block body")
|
||||
|
||||
proc getUncleHashes*(
|
||||
db: CoreDbRef;
|
||||
blockHashes: openArray[Hash256];
|
||||
): seq[Hash256]
|
||||
{.gcsafe, raises: [RlpError,ValueError].} =
|
||||
for blockHash in blockHashes:
|
||||
result &= db.getBlockBody(blockHash).uncles.mapIt(it.hash)
|
||||
|
||||
proc getUncleHashes*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
): seq[Hash256]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
if header.ommersHash != EMPTY_UNCLE_HASH:
|
||||
let
|
||||
key = genericHashKey(header.ommersHash)
|
||||
encodedUncles = db.newKvt.get(key.toOpenArray).valueOr:
|
||||
if error.error == KvtNotFound:
|
||||
warn logTxt "getUncleHashes()",
|
||||
ommersHash=header.ommersHash, action="get()", `error`=($$error)
|
||||
return @[]
|
||||
return rlp.decode(encodedUncles, seq[BlockHeader]).mapIt(it.hash)
|
||||
|
||||
proc getTransactionKey*(
|
||||
db: CoreDbRef;
|
||||
transactionHash: Hash256;
|
||||
): tuple[blockNumber: BlockNumber, index: int]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
let
|
||||
txKey = transactionHashToBlockKey(transactionHash)
|
||||
tx = db.newKvt.get(txKey.toOpenArray).valueOr:
|
||||
if error.error == KvtNotFound:
|
||||
warn logTxt "getTransactionKey()",
|
||||
transactionHash, action="get()", `error`=($$error)
|
||||
return (0.toBlockNumber, -1)
|
||||
let key = rlp.decode(tx, TransactionKey)
|
||||
(key.blockNumber, key.index)
|
||||
|
||||
proc headerExists*(db: CoreDbRef; blockHash: Hash256): bool =
|
||||
## Returns True if the header with the given block hash is in our DB.
|
||||
db.newKvt.hasKey(genericHashKey(blockHash).toOpenArray).valueOr:
|
||||
warn logTxt "headerExists()", blockHash, action="get()", `error`=($$error)
|
||||
return false
|
||||
|
||||
proc setHead*(
|
||||
db: CoreDbRef;
|
||||
blockHash: Hash256;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var header: BlockHeader
|
||||
if not db.getBlockHeader(blockHash, header):
|
||||
return false
|
||||
|
||||
if not db.markCanonicalChain(header, blockHash):
|
||||
return false
|
||||
|
||||
let canonicalHeadHash = canonicalHeadHashKey()
|
||||
db.newKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(blockHash)).isOkOr:
|
||||
warn logTxt "setHead()", canonicalHeadHash, action="put()", error=($$error)
|
||||
return true
|
||||
|
||||
proc setHead*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
writeHeader = false;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var headerHash = rlpHash(header)
|
||||
let kvt = db.newKvt()
|
||||
if writeHeader:
|
||||
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
|
||||
warn logTxt "setHead()", headerHash, action="put()", error=($$error)
|
||||
return false
|
||||
if not db.markCanonicalChain(header, headerHash):
|
||||
return false
|
||||
let canonicalHeadHash = canonicalHeadHashKey()
|
||||
kvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||
warn logTxt "setHead()", canonicalHeadHash, action="put()", error=($$error)
|
||||
return false
|
||||
true
|
||||
|
||||
proc persistReceipts*(
|
||||
db: CoreDbRef;
|
||||
receipts: openArray[Receipt];
|
||||
): Hash256
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
const info = "persistReceipts()"
|
||||
let mpt = db.newMpt()
|
||||
for idx, rec in receipts:
|
||||
mpt.merge(rlp.encode(idx), rlp.encode(rec)).isOkOr:
|
||||
warn logTxt info, idx, action="merge()", error=($$error)
|
||||
mpt.rootVid.hash.valueOr:
|
||||
warn logTxt info, action="hash()"
|
||||
return EMPTY_ROOT_HASH
|
||||
|
||||
proc getReceipts*(
|
||||
db: CoreDbRef;
|
||||
receiptRoot: Hash256;
|
||||
): seq[Receipt]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var receipts = newSeq[Receipt]()
|
||||
for r in db.getReceipts(receiptRoot):
|
||||
receipts.add(r)
|
||||
return receipts
|
||||
|
||||
proc persistHeaderToDb*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
forceCanonical: bool;
|
||||
startOfHistory = GENESIS_PARENT_HASH;
|
||||
): seq[BlockHeader]
|
||||
{.gcsafe, raises: [RlpError,EVMError].} =
|
||||
let isStartOfHistory = header.parentHash == startOfHistory
|
||||
let headerHash = header.blockHash
|
||||
if not isStartOfHistory and not db.headerExists(header.parentHash):
|
||||
raise newException(ParentNotFound, "Cannot persist block header " &
|
||||
$headerHash & " with unknown parent " & $header.parentHash)
|
||||
let kvt = db.newKvt()
|
||||
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
|
||||
warn logTxt "persistHeaderToDb()",
|
||||
headerHash, action="put()", `error`=($$error)
|
||||
return @[]
|
||||
|
||||
let score = if isStartOfHistory: header.difficulty
|
||||
else: db.getScore(header.parentHash) + header.difficulty
|
||||
let scoreKey = blockHashToScoreKey(headerHash)
|
||||
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
|
||||
warn logTxt "persistHeaderToDb()",
|
||||
scoreKey, action="put()", `error`=($$error)
|
||||
return @[]
|
||||
|
||||
db.addBlockNumberToHashLookup(header)
|
||||
|
||||
var canonHeader: BlockHeader
|
||||
if not db.getCanonicalHead canonHeader:
|
||||
return db.setAsCanonicalChainHead(headerHash)
|
||||
|
||||
let headScore = db.getScore(canonHeader.hash)
|
||||
if score > headScore or forceCanonical:
|
||||
return db.setAsCanonicalChainHead(headerHash)
|
||||
|
||||
proc persistHeaderToDbWithoutSetHead*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
startOfHistory = GENESIS_PARENT_HASH;
|
||||
) {.gcsafe, raises: [RlpError].} =
|
||||
let isStartOfHistory = header.parentHash == startOfHistory
|
||||
let headerHash = header.blockHash
|
||||
let score = if isStartOfHistory: header.difficulty
|
||||
else: db.getScore(header.parentHash) + header.difficulty
|
||||
let
|
||||
kvt = db.newKvt()
|
||||
scoreKey = blockHashToScoreKey(headerHash)
|
||||
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
|
||||
warn logTxt "persistHeaderToDbWithoutSetHead()",
|
||||
scoreKey, action="put()", `error`=($$error)
|
||||
return
|
||||
kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr:
|
||||
warn logTxt "persistHeaderToDbWithoutSetHead()",
|
||||
headerHash, action="put()", `error`=($$error)
|
||||
return
|
||||
|
||||
# FIXME-Adam: This seems like a bad idea. I don't see a way to get the score
|
||||
# in stateless mode, but it seems dangerous to just shove the header into
|
||||
# the DB *without* also storing the score.
|
||||
proc persistHeaderToDbWithoutSetHeadOrScore*(db: CoreDbRef; header: BlockHeader) =
|
||||
db.addBlockNumberToHashLookup(header)
|
||||
let
|
||||
kvt = db.newKvt()
|
||||
blockHash = header.blockHash
|
||||
kvt.put(genericHashKey(blockHash).toOpenArray, rlp.encode(header)).isOkOr:
|
||||
warn logTxt "persistHeaderToDbWithoutSetHeadOrScore()",
|
||||
blockHash, action="put()", `error`=($$error)
|
||||
return
|
||||
|
||||
proc persistUncles*(db: CoreDbRef, uncles: openArray[BlockHeader]): Hash256 =
|
||||
## Persists the list of uncles to the database.
|
||||
## Returns the uncles hash.
|
||||
let enc = rlp.encode(uncles)
|
||||
result = keccakHash(enc)
|
||||
db.newKvt.put(genericHashKey(result).toOpenArray, enc).isOkOr:
|
||||
warn logTxt "persistUncles()",
|
||||
unclesHash=result, action="put()", `error`=($$error)
|
||||
return EMPTY_ROOT_HASH
|
||||
|
||||
|
||||
proc safeHeaderHash*(
|
||||
db: CoreDbRef;
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
discard db.getHash(safeHashKey(), result)
|
||||
|
||||
proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
||||
let safeHashKey = safeHashKey()
|
||||
db.newKvt.put(safeHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||
warn logTxt "safeHeaderHash()",
|
||||
safeHashKey, action="put()", `error`=($$error)
|
||||
return
|
||||
|
||||
proc finalizedHeaderHash*(
|
||||
db: CoreDbRef;
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
discard db.getHash(finalizedHashKey(), result)
|
||||
|
||||
proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
||||
let finalizedHashKey = finalizedHashKey()
|
||||
db.newKvt.put(finalizedHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr:
|
||||
warn logTxt "finalizedHeaderHash()",
|
||||
finalizedHashKey, action="put()", `error`=($$error)
|
||||
return
|
||||
|
||||
proc safeHeader*(
|
||||
db: CoreDbRef;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
||||
db.getBlockHeader(db.safeHeaderHash)
|
||||
|
||||
proc finalizedHeader*(
|
||||
db: CoreDbRef;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
||||
db.getBlockHeader(db.finalizedHeaderHash)
|
||||
|
||||
proc haveBlockAndState*(db: CoreDbRef, headerHash: Hash256): bool =
|
||||
var header: BlockHeader
|
||||
if not db.getBlockHeader(headerHash, header):
|
||||
return false
|
||||
# see if stateRoot exists
|
||||
db.exists(header.stateRoot)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -13,26 +13,30 @@
|
|||
import
|
||||
std/options,
|
||||
eth/[common, trie/db],
|
||||
./backend/[legacy_db],
|
||||
"."/[base, core_apps]
|
||||
../aristo,
|
||||
./backend/legacy_db,
|
||||
./base,
|
||||
#./core_apps_legacy as core_apps
|
||||
./core_apps_newapi as core_apps
|
||||
|
||||
export
|
||||
common,
|
||||
core_apps,
|
||||
|
||||
# Provide a standard interface for calculating merkle hash signatures,
|
||||
# here by quoting `Aristo` functions.
|
||||
MerkleSignRef,
|
||||
merkleSignBegin,
|
||||
merkleSignAdd,
|
||||
merkleSignCommit,
|
||||
to,
|
||||
|
||||
# Not all symbols from the object sources will be exported by default
|
||||
CoreDbAccount,
|
||||
CoreDbApiError,
|
||||
CoreDbCaptFlags,
|
||||
CoreDbErrorCode,
|
||||
CoreDbErrorRef,
|
||||
CoreDbCaptRef,
|
||||
CoreDbKvtRef,
|
||||
CoreDbMptRef,
|
||||
CoreDbPhkRef,
|
||||
CoreDbRef,
|
||||
CoreDbTxID,
|
||||
CoreDbTxRef,
|
||||
CoreDbType,
|
||||
CoreDbVidRef,
|
||||
CoreDxAccRef,
|
||||
|
@ -45,26 +49,26 @@ export
|
|||
`$$`,
|
||||
backend,
|
||||
beginTransaction,
|
||||
capture,
|
||||
commit,
|
||||
compensateLegacySetup,
|
||||
contains,
|
||||
del,
|
||||
delete,
|
||||
dispose,
|
||||
fetch,
|
||||
fetchOrEmpty,
|
||||
finish,
|
||||
get,
|
||||
getOrEmpty,
|
||||
getRoot,
|
||||
getTransactionID,
|
||||
hash,
|
||||
hasKey,
|
||||
hashOrEmpty,
|
||||
hasPath,
|
||||
isLegacy,
|
||||
isPruning,
|
||||
kvt,
|
||||
logDb,
|
||||
merge,
|
||||
mptPrune,
|
||||
newAccMpt,
|
||||
newCapture,
|
||||
newKvt,
|
||||
|
@ -72,13 +76,11 @@ export
|
|||
newTransaction,
|
||||
pairs,
|
||||
parent,
|
||||
phkPrune,
|
||||
put,
|
||||
recast,
|
||||
recorder,
|
||||
replicate,
|
||||
rollback,
|
||||
rootHash,
|
||||
rootVid,
|
||||
safeDispose,
|
||||
setTransactionID,
|
||||
|
@ -87,6 +89,27 @@ export
|
|||
toPhk,
|
||||
toTransactionID
|
||||
|
||||
when ProvideCoreDbLegacyAPI:
|
||||
type
|
||||
CoreDyTxID = CoreDxTxID|CoreDbTxID
|
||||
export
|
||||
CoreDbCaptFlags,
|
||||
CoreDbCaptRef,
|
||||
CoreDbKvtRef,
|
||||
CoreDbMptRef,
|
||||
CoreDbPhkRef,
|
||||
CoreDbTxID,
|
||||
CoreDbTxRef,
|
||||
capture,
|
||||
contains,
|
||||
kvt,
|
||||
mptPrune,
|
||||
phkPrune,
|
||||
rootHash
|
||||
else:
|
||||
type
|
||||
CoreDyTxID = CoreDxTxID
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -120,7 +143,7 @@ proc newCoreDbRef*(
|
|||
# Public template wrappers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template shortTimeReadOnly*(id: CoreDxTxID|CoreDbTxID; body: untyped) =
|
||||
template shortTimeReadOnly*(id: CoreDyTxID; body: untyped) =
|
||||
proc action() =
|
||||
body
|
||||
id.shortTimeReadOnly action
|
||||
|
|
|
@ -16,7 +16,8 @@ import
|
|||
std/[algorithm, sequtils, sets, strutils, tables, times],
|
||||
chronos,
|
||||
eth/[common, trie/nibbles],
|
||||
stew/results,
|
||||
stew/byteutils,
|
||||
results,
|
||||
"../.."/[constants, range_desc],
|
||||
"."/[hexary_desc, hexary_error]
|
||||
|
||||
|
@ -69,7 +70,7 @@ proc ppImpl(key: RepairKey; db: HexaryTreeDbRef): string =
|
|||
return db.keyPp(key)
|
||||
except CatchableError:
|
||||
discard
|
||||
key.ByteArray33.toSeq.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
key.ByteArray33.toSeq.toHex.toLowerAscii
|
||||
|
||||
proc ppImpl(key: NodeKey; db: HexaryTreeDbRef): string =
|
||||
key.to(RepairKey).ppImpl(db)
|
||||
|
@ -563,6 +564,9 @@ proc pp*(db: HexaryTreeDbRef; root: NodeKey; indent=4): string =
|
|||
## Dump the entries from the a generic repair tree.
|
||||
db.pp(root, indent.toPfx)
|
||||
|
||||
proc pp*(db: HexaryTreeDbRef; root: Hash256; indent=4): string =
|
||||
## Dump the entries from the a generic repair tree.
|
||||
db.pp(root.to(NodeKey), indent.toPfx)
|
||||
|
||||
proc pp*(m: Moment): string =
|
||||
## Prints a moment in time similar to *chronicles* time format.
|
||||
|
|
|
@ -8,23 +8,24 @@
|
|||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[math, times, strutils],
|
||||
eth/[rlp, common/eth_types_rlp],
|
||||
stew/byteutils,
|
||||
nimcrypto,
|
||||
results,
|
||||
../db/core_db,
|
||||
../constants
|
||||
|
||||
export eth_types_rlp
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
proc calcRootHash[T](items: openArray[T]): Hash256 {.gcsafe.} =
|
||||
var tr = newCoreDbRef(LegacyDbMemory).mptPrune
|
||||
let sig = merkleSignBegin()
|
||||
for i, t in items:
|
||||
tr.put(rlp.encode(i), rlp.encode(t))
|
||||
return tr.rootHash
|
||||
sig.merkleSignAdd(rlp.encode(i), rlp.encode(t))
|
||||
sig.merkleSignCommit.value.to(Hash256)
|
||||
|
||||
template calcTxRoot*(transactions: openArray[Transaction]): Hash256 =
|
||||
calcRootHash(transactions)
|
||||
|
|
|
@ -13,6 +13,7 @@ cliBuilder:
|
|||
import ./test_code_stream,
|
||||
./test_accounts_cache,
|
||||
./test_aristo,
|
||||
./test_coredb,
|
||||
./test_custom_network,
|
||||
./test_sync_snap,
|
||||
./test_rocksdb_timing,
|
||||
|
|
|
@ -13,10 +13,11 @@
|
|||
## ----------------------------------------------------
|
||||
|
||||
import
|
||||
std/[tables],
|
||||
./pp_light,
|
||||
std/tables,
|
||||
eth/common,
|
||||
stew/byteutils,
|
||||
../../nimbus/common/chain_config,
|
||||
eth/common
|
||||
./pp_light
|
||||
|
||||
export
|
||||
pp_light
|
||||
|
@ -26,16 +27,16 @@ export
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp*(b: Blob): string =
|
||||
b.mapIt(it.toHex(2)).join.toLowerAscii.pp(hex = true)
|
||||
b.toHex.pp(hex = true)
|
||||
|
||||
proc pp*(a: EthAddress): string =
|
||||
a.mapIt(it.toHex(2)).join[32 .. 39].toLowerAscii
|
||||
a.toHex[32 .. 39]
|
||||
|
||||
proc pp*(a: openArray[EthAddress]): string =
|
||||
"[" & a.mapIt(it.pp).join(" ") & "]"
|
||||
|
||||
proc pp*(a: BlockNonce): string =
|
||||
a.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
a.toHex
|
||||
|
||||
proc pp*(h: BlockHeader; sep = " "): string =
|
||||
"" &
|
||||
|
|
|
@ -107,7 +107,7 @@ proc pp*(q: openArray[int]; itemsPerLine: int; lineSep: string): string =
|
|||
|
||||
proc pp*(a: MDigest[256]; collapse = true): string =
|
||||
if not collapse:
|
||||
a.data.toHex.toLowerAscii
|
||||
a.data.toHex
|
||||
elif a == ZERO_HASH256:
|
||||
"ZERO_HASH256"
|
||||
elif a == EMPTY_ROOT_HASH:
|
||||
|
@ -119,7 +119,7 @@ proc pp*(a: MDigest[256]; collapse = true): string =
|
|||
elif a == ZERO_HASH256:
|
||||
"ZERO_HASH256"
|
||||
else:
|
||||
a.data.toHex.join[56 .. 63].toLowerAscii
|
||||
a.data.toHex.join[56 .. 63]
|
||||
|
||||
proc pp*(a: openArray[MDigest[256]]; collapse = true): string =
|
||||
"@[" & a.toSeq.mapIt(it.pp).join(" ") & "]"
|
||||
|
@ -133,7 +133,7 @@ proc pp*(q: openArray[byte]; noHash = false): string =
|
|||
for n in 0..31: a[n] = q[n]
|
||||
MDigest[256](data: a).pp
|
||||
else:
|
||||
q.toHex.toLowerAscii.pp(hex = true)
|
||||
q.toHex.pp(hex = true)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Elapsed time pretty printer
|
||||
|
|
|
@ -9,13 +9,15 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
std/[os, sequtils, strformat, strutils],
|
||||
std/[os, strformat, strutils],
|
||||
eth/common,
|
||||
nimcrypto/utils,
|
||||
stew/byteutils,
|
||||
../../nimbus/sync/[protocol, snap/range_desc],
|
||||
./gunzip
|
||||
|
||||
import
|
||||
nimcrypto/utils except toHex
|
||||
|
||||
type
|
||||
UndumpState = enum
|
||||
UndumpHeader
|
||||
|
@ -66,16 +68,16 @@ proc dumpAccounts*(
|
|||
): string =
|
||||
## Dump accounts data in parseable Ascii text
|
||||
proc ppStr(blob: Blob): string =
|
||||
blob.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
blob.toHex
|
||||
|
||||
proc ppStr(proof: SnapProof): string =
|
||||
proof.to(Blob).ppStr
|
||||
|
||||
proc ppStr(hash: Hash256): string =
|
||||
hash.data.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
hash.data.toHex
|
||||
|
||||
proc ppStr(key: NodeKey): string =
|
||||
key.ByteArray32.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
key.ByteArray32.toHex
|
||||
|
||||
result = "accounts " & $data.accounts.len & " " & $data.proof.len & "\n"
|
||||
|
||||
|
|
|
@ -9,13 +9,15 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
std/[os, sequtils, strformat, strutils],
|
||||
std/[os, strformat, strutils],
|
||||
eth/common,
|
||||
nimcrypto/utils,
|
||||
stew/byteutils,
|
||||
../../nimbus/sync/[protocol, snap/range_desc],
|
||||
./gunzip
|
||||
|
||||
import
|
||||
nimcrypto/utils except toHex
|
||||
|
||||
type
|
||||
UndumpState = enum
|
||||
UndumpStoragesHeader
|
||||
|
@ -66,16 +68,16 @@ proc dumpStorages*(
|
|||
): string =
|
||||
## Dump account and storage data in parseable Ascii text
|
||||
proc ppStr(blob: Blob): string =
|
||||
blob.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
blob.toHex
|
||||
|
||||
proc ppStr(proof: SnapProof): string =
|
||||
proof.to(Blob).ppStr
|
||||
|
||||
proc ppStr(hash: Hash256): string =
|
||||
hash.data.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
hash.data.toHex
|
||||
|
||||
proc ppStr(key: NodeKey): string =
|
||||
key.ByteArray32.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
key.ByteArray32.toHex
|
||||
|
||||
result = "storages " & $data.storages.len & " " & $data.proof.len & "\n"
|
||||
result &= root.ppStr & "\n"
|
||||
|
|
|
@ -92,6 +92,9 @@ proc miscRunner(
|
|||
test "Multi instances transactions":
|
||||
check noisy.testTxSpanMultiInstances()
|
||||
|
||||
test "Short keys and other patholgical cases":
|
||||
check noisy.testShortKeys()
|
||||
|
||||
|
||||
proc accountsRunner(
|
||||
noisy = true;
|
||||
|
|
|
@ -26,7 +26,7 @@ import
|
|||
aristo_init/memory_db,
|
||||
aristo_init/rocks_db,
|
||||
aristo_persistent,
|
||||
aristo_transcode,
|
||||
aristo_blobify,
|
||||
aristo_vid],
|
||||
../replay/xcheck,
|
||||
./test_helpers
|
||||
|
@ -45,8 +45,8 @@ func hash(filter: FilterRef): Hash =
|
|||
##
|
||||
var h = BlindHash
|
||||
if not filter.isNil:
|
||||
h = h !& filter.src.ByteArray32.hash
|
||||
h = h !& filter.trg.ByteArray32.hash
|
||||
h = h !& filter.src.hash
|
||||
h = h !& filter.trg.hash
|
||||
|
||||
for w in filter.vGen.vidReorg:
|
||||
h = h !& w.uint64.hash
|
||||
|
@ -56,7 +56,7 @@ func hash(filter: FilterRef): Hash =
|
|||
h = h !& (w.uint64.toBytesBE.toSeq & data).hash
|
||||
|
||||
for w in filter.kMap.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
||||
let data = filter.kMap.getOrVoid(w).ByteArray32.toSeq
|
||||
let data = @(filter.kMap.getOrVoid(w))
|
||||
h = h !& (w.uint64.toBytesBE.toSeq & data).hash
|
||||
|
||||
!$h
|
||||
|
@ -67,7 +67,7 @@ func hash(filter: FilterRef): Hash =
|
|||
|
||||
proc mergeData(
|
||||
db: AristoDbRef;
|
||||
rootKey: HashKey;
|
||||
rootKey: Hash256;
|
||||
rootVid: VertexID;
|
||||
proof: openArray[SnapProof];
|
||||
leafs: openArray[LeafTiePayload];
|
||||
|
@ -205,7 +205,7 @@ proc testBackendConsistency*(
|
|||
ndb = AristoDbRef() # Reference cache
|
||||
mdb = AristoDbRef() # Memory backend database
|
||||
rdb = AristoDbRef() # Rocks DB backend database
|
||||
rootKey = HashKey.default
|
||||
rootKey = Hash256() # Root key
|
||||
count = 0
|
||||
|
||||
defer:
|
||||
|
|
|
@ -19,7 +19,7 @@ import
|
|||
unittest2,
|
||||
../../nimbus/db/aristo/[
|
||||
aristo_check, aristo_debug, aristo_desc, aristo_filter, aristo_get,
|
||||
aristo_merge, aristo_persistent, aristo_transcode],
|
||||
aristo_merge, aristo_persistent, aristo_blobify],
|
||||
../../nimbus/db/aristo,
|
||||
../../nimbus/db/aristo/aristo_desc/desc_backend,
|
||||
../../nimbus/db/aristo/aristo_filter/[filter_fifos, filter_scheduler],
|
||||
|
@ -72,12 +72,13 @@ proc fList(be: BackendRef): seq[(QueueID,FilterRef)] =
|
|||
check be.kind == BackendMemory or be.kind == BackendRocksDB
|
||||
|
||||
func ppFil(w: FilterRef; db = AristoDbRef(nil)): string =
|
||||
proc qq(key: HashKey; db: AristoDbRef): string =
|
||||
proc qq(key: Hash256; db: AristoDbRef): string =
|
||||
if db.isNil:
|
||||
let n = key.to(UInt256)
|
||||
if n == 0: "£ø" else: "£" & $n
|
||||
else:
|
||||
HashLabel(root: VertexID(1), key: key).pp(db)
|
||||
let keyLink = HashKey.fromBytes(key.data).value
|
||||
HashLabel(root: VertexID(1), key: keyLink).pp(db)
|
||||
"(" & w.fid.pp & "," & w.src.qq(db) & "->" & w.trg.qq(db) & ")"
|
||||
|
||||
func pp(qf: (QueueID,FilterRef); db = AristoDbRef(nil)): string =
|
||||
|
@ -376,9 +377,6 @@ proc checkFilterTrancoderOk(
|
|||
|
||||
# -------------------------
|
||||
|
||||
func to(fid: FilterID; T: type HashKey): T =
|
||||
fid.uint64.u256.toBytesBE.T
|
||||
|
||||
proc qid2fidFn(be: BackendRef): QuFilMap =
|
||||
result = proc(qid: QueueID): FilterID =
|
||||
let rc = be.getFilFn qid
|
||||
|
@ -414,8 +412,8 @@ proc storeFilter(
|
|||
let fid = FilterID(serial)
|
||||
be.storeFilter FilterRef(
|
||||
fid: fid,
|
||||
src: fid.to(HashKey),
|
||||
trg: (fid-1).to(HashKey))
|
||||
src: fid.to(Hash256),
|
||||
trg: (fid-1).to(Hash256))
|
||||
|
||||
proc fetchDelete(
|
||||
be: BackendRef;
|
||||
|
@ -496,7 +494,7 @@ proc validateFifo(
|
|||
lastFid = FilterID(serial+1)
|
||||
|
||||
if hashesOk:
|
||||
lastTrg = be.getKeyFn(VertexID(1)).get(otherwise=VOID_HASH_KEY).to(UInt256)
|
||||
lastTrg = be.getKeyFn(VertexID(1)).get(otherwise=HashKey()).to(UInt256)
|
||||
|
||||
for chn,fifo in be.fifos:
|
||||
for (qid,filter) in fifo:
|
||||
|
@ -750,8 +748,7 @@ proc testFilterBacklog*(
|
|||
s &= " n=" & $serial
|
||||
s &= " len=" & $be.filters.len
|
||||
s &= "" &
|
||||
" root=" & be.getKeyFn(VertexID(1))
|
||||
.get(otherwise = VOID_HASH_KEY).pp &
|
||||
" root=" & be.getKeyFn(VertexID(1)).get(otherwise=VOID_HASH_KEY).pp &
|
||||
"\n state=" & be.filters.state.pp &
|
||||
"\n fifo=" & be.fifos.pp(db) &
|
||||
"\n"
|
||||
|
|
|
@ -14,19 +14,18 @@ import
|
|||
eth/common,
|
||||
rocksdb,
|
||||
../../nimbus/db/aristo/[
|
||||
aristo_constants, aristo_debug, aristo_desc,
|
||||
aristo_filter/filter_scheduler, aristo_merge],
|
||||
aristo_debug, aristo_desc, aristo_filter/filter_scheduler, aristo_merge],
|
||||
../../nimbus/db/kvstore_rocksdb,
|
||||
../../nimbus/sync/protocol/snap/snap_types,
|
||||
../test_sync_snap/test_types,
|
||||
../replay/[pp, undump_accounts, undump_storages]
|
||||
|
||||
from ../../nimbus/sync/snap/range_desc
|
||||
import NodeKey
|
||||
import NodeKey, ByteArray32
|
||||
|
||||
type
|
||||
ProofTrieData* = object
|
||||
root*: HashKey
|
||||
root*: Hash256
|
||||
id*: int
|
||||
proof*: seq[SnapProof]
|
||||
kvpLst*: seq[LeafTiePayload]
|
||||
|
@ -39,24 +38,29 @@ const
|
|||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc toPfx(indent: int): string =
|
||||
func toPfx(indent: int): string =
|
||||
"\n" & " ".repeat(indent)
|
||||
|
||||
proc to(a: NodeKey; T: type HashKey): T =
|
||||
a.T
|
||||
func to(a: NodeKey; T: type UInt256): T =
|
||||
T.fromBytesBE ByteArray32(a)
|
||||
|
||||
func to(a: NodeKey; T: type PathID): T =
|
||||
a.to(UInt256).to(T)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public pretty printing
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp*(
|
||||
func pp*(
|
||||
w: ProofTrieData;
|
||||
rootID: VertexID;
|
||||
db: AristoDbRef;
|
||||
indent = 4;
|
||||
): string =
|
||||
let pfx = indent.toPfx
|
||||
result = "(" & HashLabel(root: rootID, key: w.root).pp(db)
|
||||
let
|
||||
pfx = indent.toPfx
|
||||
rootLink = w.root.to(HashKey)
|
||||
result = "(" & HashLabel(root: rootID, key: rootLink).pp(db)
|
||||
result &= "," & $w.id & ",[" & $w.proof.len & "],"
|
||||
result &= pfx & " ["
|
||||
for n,kvp in w.kvpLst:
|
||||
|
@ -99,24 +103,36 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
|||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc `==`*[T: AristoError|VertexID](a: T, b: int): bool =
|
||||
func `==`*[T: AristoError|VertexID](a: T, b: int): bool =
|
||||
a == T(b)
|
||||
|
||||
proc `==`*(a: (VertexID,AristoError), b: (int,int)): bool =
|
||||
func `==`*(a: (VertexID,AristoError), b: (int,int)): bool =
|
||||
(a[0].int,a[1].int) == b
|
||||
|
||||
proc `==`*(a: (VertexID,AristoError), b: (int,AristoError)): bool =
|
||||
func `==`*(a: (VertexID,AristoError), b: (int,AristoError)): bool =
|
||||
(a[0].int,a[1]) == b
|
||||
|
||||
proc `==`*(a: (int,AristoError), b: (int,int)): bool =
|
||||
func `==`*(a: (int,AristoError), b: (int,int)): bool =
|
||||
(a[0],a[1].int) == b
|
||||
|
||||
proc `==`*(a: (int,VertexID,AristoError), b: (int,int,int)): bool =
|
||||
func `==`*(a: (int,VertexID,AristoError), b: (int,int,int)): bool =
|
||||
(a[0], a[1].int, a[2].int) == b
|
||||
|
||||
proc `==`*(a: (QueueID,Hash), b: (int,Hash)): bool =
|
||||
func `==`*(a: (QueueID,Hash), b: (int,Hash)): bool =
|
||||
(a[0].int,a[1]) == b
|
||||
|
||||
func to*(a: Hash256; T: type UInt256): T =
|
||||
T.fromBytesBE a.data
|
||||
|
||||
func to*(a: Hash256; T: type PathID): T =
|
||||
a.to(UInt256).to(T)
|
||||
|
||||
func to*(a: HashKey; T: type UInt256): T =
|
||||
T.fromBytesBE 0u8.repeat(32 - a.len) & @a
|
||||
|
||||
func to*(fid: FilterID; T: type Hash256): T =
|
||||
result.data = fid.uint64.u256.toBytesBE
|
||||
|
||||
proc to*(sample: AccountsSample; T: type seq[UndumpAccounts]): T =
|
||||
## Convert test data into usable in-memory format
|
||||
let file = sample.file.findFilePath.value
|
||||
|
@ -149,10 +165,10 @@ proc to*(sample: AccountsSample; T: type seq[UndumpStorages]): T =
|
|||
break
|
||||
result.add w
|
||||
|
||||
proc to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
|
||||
var (rootKey, rootVid) = (VOID_HASH_KEY, VertexID(0))
|
||||
func to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
|
||||
var (rootKey, rootVid) = (Hash256(), VertexID(0))
|
||||
for w in ua:
|
||||
let thisRoot = w.root.to(HashKey)
|
||||
let thisRoot = w.root
|
||||
if rootKey != thisRoot:
|
||||
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
|
||||
if 0 < w.data.accounts.len:
|
||||
|
@ -162,14 +178,14 @@ proc to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
|
|||
kvpLst: w.data.accounts.mapIt(LeafTiePayload(
|
||||
leafTie: LeafTie(
|
||||
root: rootVid,
|
||||
path: it.accKey.to(HashKey).to(PathID)),
|
||||
path: it.accKey.to(PathID)),
|
||||
payload: PayloadRef(pType: RawData, rawBlob: it.accBlob))))
|
||||
|
||||
proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
|
||||
var (rootKey, rootVid) = (VOID_HASH_KEY, VertexID(0))
|
||||
func to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
|
||||
var (rootKey, rootVid) = (Hash256(), VertexID(0))
|
||||
for n,s in us:
|
||||
for w in s.data.storages:
|
||||
let thisRoot = w.account.storageRoot.to(HashKey)
|
||||
let thisRoot = w.account.storageRoot
|
||||
if rootKey != thisRoot:
|
||||
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
|
||||
if 0 < w.data.len:
|
||||
|
@ -179,12 +195,12 @@ proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
|
|||
kvpLst: w.data.mapIt(LeafTiePayload(
|
||||
leafTie: LeafTie(
|
||||
root: rootVid,
|
||||
path: it.slotHash.to(HashKey).to(PathID)),
|
||||
path: it.slotHash.to(PathID)),
|
||||
payload: PayloadRef(pType: RawData, rawBlob: it.slotData))))
|
||||
if 0 < result.len:
|
||||
result[^1].proof = s.data.proof
|
||||
|
||||
proc mapRootVid*(
|
||||
func mapRootVid*(
|
||||
a: openArray[LeafTiePayload];
|
||||
toVid: VertexID;
|
||||
): seq[LeafTiePayload] =
|
||||
|
|
|
@ -20,7 +20,7 @@ import
|
|||
unittest2,
|
||||
../../nimbus/db/aristo,
|
||||
../../nimbus/db/aristo/[
|
||||
aristo_debug, aristo_desc, aristo_transcode, aristo_vid],
|
||||
aristo_check, aristo_debug, aristo_desc, aristo_blobify, aristo_vid],
|
||||
../../nimbus/db/aristo/aristo_filter/filter_scheduler,
|
||||
../replay/xcheck,
|
||||
./test_helpers
|
||||
|
@ -457,6 +457,55 @@ proc testQidScheduler*(
|
|||
|
||||
true
|
||||
|
||||
|
||||
proc testShortKeys*(
|
||||
noisy = true;
|
||||
): bool =
|
||||
## Check for some pathological cases
|
||||
func x(s: string): Blob = s.hexToSeqByte
|
||||
func k(s: string): HashKey = HashKey.fromBytes(s.x).value
|
||||
|
||||
let samples = [
|
||||
# From InvalidBlocks/bc4895-withdrawals/twoIdenticalIndex.json
|
||||
[("80".x,
|
||||
"da808094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
|
||||
"27f166f1d7c789251299535cb176ba34116e44894476a7886fe5d73d9be5c973".k),
|
||||
("01".x,
|
||||
"da028094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
|
||||
"81eac5f476f48feb289af40ee764015f6b49036760438ea45df90d5342b6ae61".k),
|
||||
("02".x,
|
||||
"da018094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
|
||||
"463769ae507fcc6d6231c8888425191c5622f330fdd4b78a7b24c4521137b573".k),
|
||||
("03".x,
|
||||
"da028094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
|
||||
"a95b9a7b58a6b3cb4001eb0be67951c5517141cb0183a255b5cae027a7b10b36".k)]]
|
||||
|
||||
for n,sample in samples:
|
||||
let sig = merkleSignBegin()
|
||||
var inx = -1
|
||||
for (k,v,r) in sample:
|
||||
inx.inc
|
||||
sig.merkleSignAdd(k,v)
|
||||
false.say "*** testShortkeys (1)", "n=", n, " inx=", inx,
|
||||
"\n k=", k.toHex, " v=", v.toHex,
|
||||
"\n r=", r.pp(sig),
|
||||
"\n ", sig.pp(),
|
||||
"\n"
|
||||
let w = sig.merkleSignCommit().value
|
||||
false.say "*** testShortkeys (2)", "n=", n, " inx=", inx,
|
||||
"\n k=", k.toHex, " v=", v.toHex,
|
||||
"\n r=", r.pp(sig),
|
||||
"\n R=", w.pp(sig),
|
||||
"\n ", sig.pp(),
|
||||
"\n",
|
||||
"\n ----------------",
|
||||
"\n"
|
||||
let rc = sig.db.check
|
||||
xCheckRc rc.error == (0,0)
|
||||
xCheck r == w
|
||||
|
||||
true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -388,7 +388,7 @@ proc testTxMergeProofAndKvpList*(
|
|||
var
|
||||
db = AristoDbRef()
|
||||
tx = AristoTxRef(nil)
|
||||
rootKey: HashKey
|
||||
rootKey: Hash256
|
||||
count = 0
|
||||
defer:
|
||||
db.finish(flush=true)
|
||||
|
|
|
@ -17,7 +17,7 @@ import
|
|||
eth/common,
|
||||
results,
|
||||
unittest2,
|
||||
../../nimbus/db/[core_db/persistent, ledger],
|
||||
../../nimbus/db/core_db/persistent,
|
||||
../../nimbus/core/chain,
|
||||
./replay/pp,
|
||||
./test_coredb/[coredb_test_xx, test_chainsync]
|
||||
|
@ -102,7 +102,7 @@ proc openLegacyDB(
|
|||
# Test Runners: accounts and accounts storages
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc legacyRunner(
|
||||
proc chainSyncRunner(
|
||||
noisy = true;
|
||||
capture = bChainCapture;
|
||||
persistent = true;
|
||||
|
@ -120,7 +120,7 @@ proc legacyRunner(
|
|||
defer:
|
||||
if persistent: baseDir.flushDbDir
|
||||
|
||||
suite "Legacy DB: test Core API interfaces"&
|
||||
suite "CoreDB and LedgerRef API"&
|
||||
&", capture={fileInfo}, {sayPersistent}":
|
||||
|
||||
test &"Ledger API, {numBlocksInfo} blocks":
|
||||
|
@ -137,7 +137,7 @@ proc legacyRunner(
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc coreDbMain*(noisy = defined(debug)) =
|
||||
noisy.legacyRunner()
|
||||
noisy.chainSyncRunner()
|
||||
|
||||
when isMainModule:
|
||||
const
|
||||
|
@ -155,7 +155,7 @@ when isMainModule:
|
|||
testList = @[bulkTest2, bulkTest3]
|
||||
|
||||
for n,capture in testList:
|
||||
noisy.legacyRunner(capture=capture, persistent=persDb)
|
||||
noisy.chainSyncRunner(capture=capture, persistent=persDb)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
Loading…
Reference in New Issue