Core db+aristo updates n fixes (#2298)

* Fix `blobify()` for `SavedState` object

why:
  Have to treat varying sizes for `HashKey`, i.p. for an empty key which
  has zero size.

* Store correct block number in `SavedState` record

why:
  Stored `block-number - 1` for some obscure reason.

* Cosmetcs, docu
This commit is contained in:
Jordan Hrycaj 2024-06-05 18:17:50 +00:00 committed by GitHub
parent f275805198
commit 8985535ab2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 69 additions and 32 deletions

View File

@ -174,7 +174,7 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
dbTx.commit()
# Save and record the block number before the last saved block state.
c.db.persistent(headers[^1].blockNumber - 1)
c.db.persistent(headers[^1].blockNumber)
if c.com.pruneHistory:
# There is a feature for test systems to regularly clean up older blocks

View File

@ -24,7 +24,7 @@ Contents
+ [4.4 Leaf record payload serialisation for account data](#ch4x4)
+ [4.5 Leaf record payload serialisation for RLP encoded data](#ch4x5)
+ [4.6 Leaf record payload serialisation for unstructured data](#ch4x6)
+ [4.7 Serialisation of the list of unused vertex IDs](#ch4x7)
+ [4.7 Serialisation of the top used vertex ID](#ch4x7)
+ [4.8 Serialisation of a last saved state record](#ch4x8)
+ [4.9 Serialisation record identifier identification](#ch4x9)
@ -351,13 +351,11 @@ maximum value of that byte is 0x99.
marker(8) is the eight bit array *0110-1011*
<a name="ch4x7"></a>
### 4.7 Serialisation of the list of unused vertex IDs
### 4.7 Serialisation of the top used vertex ID
0 +-- ..
... -- recycled vertexIDs
+--+--+--+--+--+--+--+--+
| | -- last unused vertex IDs
+--+--+--+--+--+--+--+--+
0 +--+--+--+--+--+--+--+--+
| | -- last used vertex IDs
8 +--+--+--+--+--+--+--+--+
| | -- marker(8), 0x7c
+--+
@ -399,7 +397,7 @@ i.e. the last byte of a serialised record.
| 0xxx 0yyy | (x(3)<<4) + y(3) | Account payload | [4.4](#ch4x4) |
| 0110 1010 | 0x6a | RLP encoded payload | [4.5](#ch4x5) |
| 0110 1011 | 0x6b | Unstructured payload | [4.6](#ch4x6) |
| 0111 1100 | 0x7c | List of vertex IDs | [4.7](#ch4x7) |
| 0111 1100 | 0x7c | Last used vertex ID | [4.7](#ch4x7) |
| 0111 1111 | 0x7f | Last saved state | [4.8](#ch4x8) |
<a name="ch5"></a>

View File

@ -153,18 +153,32 @@ proc blobify*(tuv: VertexID): Blob =
## Variant of `blobifyTo()`
tuv.blobifyTo result
proc blobifyTo*(lSst: SavedState; data: var Blob) =
proc blobifyTo*(lSst: SavedState; data: var Blob): Result[void,AristoError] =
## Serialise a last saved state record
data.setLen(0)
data.add lSst.src.data
data.add lSst.trg.data
case lSst.src.len:
of 0:
data.setLen(32)
of 32:
data.setLen(0)
data.add lSst.src.data
else:
return err(BlobifyStateSrcLenGarbled)
case lSst.trg.len:
of 0:
data.setLen(64)
of 32:
data.add lSst.trg.data
else:
return err(BlobifyStateTrgLenGarbled)
data.add lSst.serial.toBytesBE
data.add @[0x7fu8]
ok()
proc blobify*(lSst: SavedState): Blob =
proc blobify*(lSst: SavedState): Result[Blob,AristoError] =
## Variant of `blobify()`
lSst.blobifyTo result
var data: Blob
? lSst.blobifyTo data
ok(move(data))
# -------------
@ -343,6 +357,7 @@ proc deblobifyTo*(
## De-serialise the last saved state data record previously encoded with
## `blobify()`.
if data.len != 73:
echo ">>> deblobifyTo got size=", data.len
return err(DeblobWrongSize)
if data[^1] != 0x7f:
return err(DeblobWrongType)

View File

@ -28,13 +28,14 @@ type
SerCantResolveStorageRoot
# Data record transcoders, `deblobify()` and `blobify()`
BlobifyNilFilter
BlobifyNilVertex
BlobifyBranchMissingRefs
BlobifyExtMissingRefs
BlobifyExtPathOverflow
BlobifyLeafPathOverflow
BlobifyFilterRecordOverflow
BlobifyNilFilter
BlobifyNilVertex
BlobifyStateSrcLenGarbled
BlobifyStateTrgLenGarbled
DeblobNilArgument
DeblobUnknown

View File

@ -29,6 +29,19 @@ type
## keys, the vertex component will be called a node. On the persistent
## backend of the database, there is no other reference to the node than
## the very same `VertexID`.
##
## Vertex IDs are generated on the fly and thrown away when not needed,
## anymore. They are not recycled. A quick estimate
##
## (2^64) / (100 * 365.25 * 24 * 3600) / 1000 / 1000 / 1000 = 5.86
##
## shows that the `uint64` scalar space is not exhausted in a 100 years
## if the database consumes somewhat less than 6 IDs per nanosecond.
##
## A simple recycling mechanism was tested which slowed down the system
## considerably because large swaths of database vertices were regularly
## freed so recycling had do deal with extensive lists of non-consecutive
## IDs.
HashKey* = object
## Ethereum MPTs use Keccak hashes as node links if the size of an RLP

View File

@ -168,7 +168,14 @@ proc putLstFn(db: MemBackendRef): PutLstFn =
proc(hdl: PutHdlRef; lst: SavedState) =
let hdl = hdl.getSession db
if hdl.error.isNil:
hdl.lSst = some(lst)
let rc = lst.blobify # test
if rc.isOk:
hdl.lSst = some(lst)
else:
hdl.error = TypedPutHdlErrRef(
pfx: AdmPfx,
aid: AdmTabIdLst,
code: rc.error)
proc putEndFn(db: MemBackendRef): PutEndFn =
result =
@ -287,7 +294,7 @@ iterator walk*(
if be.mdb.tUvi.isSome:
yield(AdmPfx, AdmTabIdTuv.uint64, be.mdb.tUvi.unsafeGet.blobify)
if be.mdb.lSst.isSome:
yield(AdmPfx, AdmTabIdLst.uint64, be.mdb.lSst.unsafeGet.blobify)
yield(AdmPfx, AdmTabIdLst.uint64, be.mdb.lSst.unsafeGet.blobify.value)
for vid in be.mdb.sTab.keys.toSeq.mapIt(it).sorted:
let data = be.mdb.sTab.getOrDefault(vid, EmptyBlob)

View File

@ -215,7 +215,13 @@ proc putLstFn(db: RdbBackendRef): PutLstFn =
proc(hdl: PutHdlRef; lst: SavedState) =
let hdl = hdl.getSession db
if hdl.error.isNil:
db.rdb.putByPfx(AdmPfx, @[(AdmTabIdLst.uint64, lst.blobify)]).isOkOr:
let data = lst.blobify.valueOr:
hdl.error = TypedPutHdlErrRef(
pfx: AdmPfx,
aid: AdmTabIdLst,
code: error)
return
db.rdb.putByPfx(AdmPfx, @[(AdmTabIdLst.uint64, data)]).isOkOr:
hdl.error = TypedPutHdlErrRef(
pfx: AdmPfx,
aid: AdmTabIdLst,

View File

@ -11,7 +11,7 @@
{.push raises: [].}
import
std/[tables, typetraits],
std/tables,
eth/common,
results,
../../aristo as use_ari,

View File

@ -353,18 +353,15 @@ proc getSavedStateBlockNumber*(
## the `relax` argument can be set `true` so this function also returns
## zero if the state consistency check fails.
##
var
header: BlockHeader
let
st = db.ctx.getMpt(CtGeneric).backend.toAristoSavedStateBlockNumber()
# The correct block number is one step ahead of the journal block number
bn = st.blockNumber + 1
if db.getBlockHeader(bn, header):
var header: BlockHeader
let st = db.ctx.getMpt(CtGeneric).backend.toAristoSavedStateBlockNumber()
if db.getBlockHeader(st.blockNumber, header):
discard db.ctx.newColumn(CtAccounts,header.stateRoot).valueOr:
if relax:
return
raiseAssert "getSavedStateBlockNumber(): state mismatch at #" & $bn
return bn
raiseAssert "getSavedStateBlockNumber(): state mismatch at " &
"#" & $st.blockNumber
return st.blockNumber
proc getBlockHeader*(
db: CoreDbRef;