nimbus-eth1/nimbus/db/aristo/aristo_blobify.nim

378 lines
11 KiB
Nim
Raw Normal View History

# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/bitops,
Aristo db update for short nodes key edge cases (#1887) * Aristo: Provide key-value list signature calculator detail: Simple wrappers around `Aristo` core functionality * Update new API for `CoreDb` details: + Renamed new API functions `contains()` => `hasKey()` or `hasPath()` which disables the `in` operator on non-boolean `contains()` functions + The functions `get()` and `fetch()` always return a not-found error if there is no item, available. The new functions `getOrEmpty()` and `mergeOrEmpty()` return an an empty `Blob` if there is no such key found. * Rewrite `core_apps.nim` using new API from `CoreDb` * Use `Aristo` functionality for calculating Merkle signatures details: For debugging, the `VerifyAristoForMerkleRootCalc` can be set so that `Aristo` results will be verified against the legacy versions. * Provide general interface for Merkle signing key-value tables details: Export `Aristo` wrappers * Activate `CoreDb` tests why: Now, API seems to be stable enough for general tests. * Update `toHex()` usage why: Byteutils' `toHex()` is superior to `toSeq.mapIt(it.toHex(2)).join` * Split `aristo_transcode` => `aristo_serialise` + `aristo_blobify` why: + Different modules for different purposes + `aristo_serialise`: RLP encoding/decoding + `aristo_blobify`: Aristo database encoding/decoding * Compacted representation of small nodes' links instead of Keccak hashes why: Ethereum MPTs use Keccak hashes as node links if the size of an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded node value is used as a pseudo node link (rather than a hash.) Such a node is nor stored on key-value database. Rather the RLP encoded node value is stored instead of a lode link in a parent node instead. Only for the root hash, the top level node is always referred to by the hash. This feature needed an abstraction of the `HashKey` object which is now either a hash or a blob of length at most 31 bytes. This leaves two ways of representing an empty/void `HashKey` type, either as an empty blob of zero length, or the hash of an empty blob. * Update `CoreDb` interface (mainly reducing logger noise) * Fix copyright years (to make `Lint` happy)
2023-11-08 12:18:32 +00:00
eth/[common, trie/nibbles],
Aristo db api extensions for use as core db backend (#1754) * Update docu * Update Aristo/Kvt constructor prototype why: Previous version used an `enum` value to indicate what backend is to be used. This was replaced by using the backend object type. * Rewrite `hikeUp()` return code into `Result[Hike,(Hike,AristoError)]` why: Better code maintenance. Previously, the `Hike` object was returned. It had an internal error field so partial success was also available on a failure. This error field has been removed. * Use `openArray[byte]` rather than `Blob` in functions prototypes * Provide synchronised multi instance transactions why: The `CoreDB` object was geared towards the legacy DB which used a single transaction for the key-value backend DB. Different state roots are provided by the backend database, so all instances work directly on the same backend. Aristo db instances have different in-memory mappings (aka different state roots) and the transactions are on top of there mappings. So each instance might run different transactions. Multi instance transactions are a compromise to converge towards the legacy behaviour. The synchronised transactions span over all instances available at the time when base transaction was opened. Instances created later are unaffected. * Provide key-value pair database iterator why: Needed in `CoreDB` for `replicate()` emulation also: Some update of internal code * Extend API (i.e. prototype variants) why: Needed for `CoreDB` geared towards the legacy backend which has a more basic API than Aristo.
2023-09-15 15:23:53 +00:00
results,
stew/endians2,
Aristo db update for short nodes key edge cases (#1887) * Aristo: Provide key-value list signature calculator detail: Simple wrappers around `Aristo` core functionality * Update new API for `CoreDb` details: + Renamed new API functions `contains()` => `hasKey()` or `hasPath()` which disables the `in` operator on non-boolean `contains()` functions + The functions `get()` and `fetch()` always return a not-found error if there is no item, available. The new functions `getOrEmpty()` and `mergeOrEmpty()` return an an empty `Blob` if there is no such key found. * Rewrite `core_apps.nim` using new API from `CoreDb` * Use `Aristo` functionality for calculating Merkle signatures details: For debugging, the `VerifyAristoForMerkleRootCalc` can be set so that `Aristo` results will be verified against the legacy versions. * Provide general interface for Merkle signing key-value tables details: Export `Aristo` wrappers * Activate `CoreDb` tests why: Now, API seems to be stable enough for general tests. * Update `toHex()` usage why: Byteutils' `toHex()` is superior to `toSeq.mapIt(it.toHex(2)).join` * Split `aristo_transcode` => `aristo_serialise` + `aristo_blobify` why: + Different modules for different purposes + `aristo_serialise`: RLP encoding/decoding + `aristo_blobify`: Aristo database encoding/decoding * Compacted representation of small nodes' links instead of Keccak hashes why: Ethereum MPTs use Keccak hashes as node links if the size of an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded node value is used as a pseudo node link (rather than a hash.) Such a node is nor stored on key-value database. Rather the RLP encoded node value is stored instead of a lode link in a parent node instead. Only for the root hash, the top level node is always referred to by the hash. This feature needed an abstraction of the `HashKey` object which is now either a hash or a blob of length at most 31 bytes. This leaves two ways of representing an empty/void `HashKey` type, either as an empty blob of zero length, or the hash of an empty blob. * Update `CoreDb` interface (mainly reducing logger noise) * Fix copyright years (to make `Lint` happy)
2023-11-08 12:18:32 +00:00
./aristo_desc
# ------------------------------------------------------------------------------
# Private helper
# ------------------------------------------------------------------------------
proc load64(data: openArray[byte]; start: var int): Result[uint64,AristoError] =
if data.len < start + 9:
return err(DeblobPayloadTooShortInt64)
let val = uint64.fromBytesBE(data.toOpenArray(start, start + 7))
start += 8
ok val
proc load256(data: openArray[byte]; start: var int): Result[UInt256,AristoError] =
if data.len < start + 33:
return err(DeblobPayloadTooShortInt256)
let val = UInt256.fromBytesBE(data.toOpenArray(start, start + 31))
start += 32
ok val
# ------------------------------------------------------------------------------
Aristo db api extensions for use as core db backend (#1754) * Update docu * Update Aristo/Kvt constructor prototype why: Previous version used an `enum` value to indicate what backend is to be used. This was replaced by using the backend object type. * Rewrite `hikeUp()` return code into `Result[Hike,(Hike,AristoError)]` why: Better code maintenance. Previously, the `Hike` object was returned. It had an internal error field so partial success was also available on a failure. This error field has been removed. * Use `openArray[byte]` rather than `Blob` in functions prototypes * Provide synchronised multi instance transactions why: The `CoreDB` object was geared towards the legacy DB which used a single transaction for the key-value backend DB. Different state roots are provided by the backend database, so all instances work directly on the same backend. Aristo db instances have different in-memory mappings (aka different state roots) and the transactions are on top of there mappings. So each instance might run different transactions. Multi instance transactions are a compromise to converge towards the legacy behaviour. The synchronised transactions span over all instances available at the time when base transaction was opened. Instances created later are unaffected. * Provide key-value pair database iterator why: Needed in `CoreDB` for `replicate()` emulation also: Some update of internal code * Extend API (i.e. prototype variants) why: Needed for `CoreDB` geared towards the legacy backend which has a more basic API than Aristo.
2023-09-15 15:23:53 +00:00
# Public functions
# ------------------------------------------------------------------------------
proc blobifyTo*(pyl: PayloadRef, data: var Blob) =
if pyl.isNil:
return
case pyl.pType
of RawData:
data &= pyl.rawBlob
data &= [0x6b.byte]
of AccountData:
var mask: byte
if 0 < pyl.account.nonce:
mask = mask or 0x01
data &= pyl.account.nonce.uint64.toBytesBE
if high(uint64).u256 < pyl.account.balance:
mask = mask or 0x08
data &= pyl.account.balance.toBytesBE
elif 0 < pyl.account.balance:
mask = mask or 0x04
data &= pyl.account.balance.truncate(uint64).uint64.toBytesBE
if VertexID(0) < pyl.account.storageID:
mask = mask or 0x10
data &= pyl.account.storageID.uint64.toBytesBE
if pyl.account.codeHash != VOID_CODE_HASH:
mask = mask or 0x80
data &= pyl.account.codeHash.data
data &= [mask]
proc blobifyTo*(vtx: VertexRef; data: var Blob): Result[void,AristoError] =
## This function serialises the vertex argument to a database record.
## Contrary to RLP based serialisation, these records aim to align on
## fixed byte boundaries.
## ::
## Branch:
## uint64, ... -- list of up to 16 child vertices lookup keys
## uint16 -- index bitmap
## 0x08 -- marker(8)
##
## Extension:
## uint64 -- child vertex lookup key
## Blob -- hex encoded partial path (at least one byte)
## 0x80 + xx -- marker(2) + pathSegmentLen(6)
##
## Leaf:
## Blob -- opaque leaf data payload (might be zero length)
## Blob -- hex encoded partial path (at least one byte)
## 0xc0 + yy -- marker(2) + partialPathLen(6)
##
## For a branch record, the bytes of the `access` array indicate the position
## of the Patricia Trie vertex reference. So the `vertexID` with index `n` has
## ::
## 8 * n * ((access shr (n * 4)) and 15)
##
if not vtx.isValid:
return err(BlobifyNilVertex)
case vtx.vType:
of Branch:
var
access = 0u16
pos = data.len
for n in 0..15:
if vtx.bVid[n].isValid:
access = access or (1u16 shl n)
data &= vtx.bVid[n].uint64.toBytesBE
if data.len - pos < 16:
return err(BlobifyBranchMissingRefs)
data &= access.toBytesBE
data &= [0x08u8]
of Extension:
let
pSegm = vtx.ePfx.hexPrefixEncode(isleaf = false)
psLen = pSegm.len.byte
if psLen == 0 or 33 < psLen:
return err(BlobifyExtPathOverflow)
if not vtx.eVid.isValid:
return err(BlobifyExtMissingRefs)
data &= vtx.eVid.uint64.toBytesBE
data &= pSegm
data &= [0x80u8 or psLen]
of Leaf:
let
pSegm = vtx.lPfx.hexPrefixEncode(isleaf = true)
psLen = pSegm.len.byte
if psLen == 0 or 33 < psLen:
return err(BlobifyLeafPathOverflow)
vtx.lData.blobifyTo(data)
data &= pSegm
data &= [0xC0u8 or psLen]
ok()
proc blobify*(vtx: VertexRef): Result[Blob, AristoError] =
## Variant of `blobify()`
var data: Blob
? vtx.blobifyTo data
2024-05-24 09:27:17 +00:00
ok(move(data))
2024-06-04 15:05:13 +00:00
proc blobifyTo*(tuv: VertexID; data: var Blob) =
## This function serialises a top used vertex ID.
data.setLen(9)
let w = tuv.uint64.toBytesBE
(addr data[0]).copyMem(unsafeAddr w[0], 8)
data[8] = 0x7Cu8
proc blobify*(tuv: VertexID): Blob =
## Variant of `blobifyTo()`
tuv.blobifyTo result
proc blobifyTo*(lSst: SavedState; data: var Blob): Result[void,AristoError] =
## Serialise a last saved state record
case lSst.src.len:
of 0:
data.setLen(32)
of 32:
data.setLen(0)
data.add lSst.src.data
else:
return err(BlobifyStateSrcLenGarbled)
case lSst.trg.len:
of 0:
data.setLen(64)
of 32:
data.add lSst.trg.data
else:
return err(BlobifyStateTrgLenGarbled)
data.add lSst.serial.toBytesBE
data.add @[0x7fu8]
ok()
proc blobify*(lSst: SavedState): Result[Blob,AristoError] =
## Variant of `blobify()`
var data: Blob
? lSst.blobifyTo data
ok(move(data))
# -------------
proc deblobifyTo(
data: openArray[byte];
pyl: var PayloadRef;
): Result[void,AristoError] =
if data.len == 0:
pyl = PayloadRef(pType: RawData)
return ok()
let mask = data[^1]
if mask == 0x6b: # unstructured payload
pyl = PayloadRef(pType: RawData, rawBlob: data[0 .. ^2])
return ok()
var
pAcc = PayloadRef(pType: AccountData)
start = 0
case mask and 0x03:
of 0x00:
discard
of 0x01:
pAcc.account.nonce = (? data.load64 start).AccountNonce
else:
return err(DeblobNonceLenUnsupported)
case mask and 0x0c:
of 0x00:
discard
of 0x04:
pAcc.account.balance = (? data.load64 start).u256
of 0x08:
pAcc.account.balance = (? data.load256 start)
else:
return err(DeblobBalanceLenUnsupported)
case mask and 0x30:
of 0x00:
discard
of 0x10:
pAcc.account.storageID = (? data.load64 start).VertexID
else:
return err(DeblobStorageLenUnsupported)
case mask and 0xc0:
of 0x00:
pAcc.account.codeHash = VOID_CODE_HASH
of 0x80:
if data.len < start + 33:
return err(DeblobPayloadTooShortInt256)
(addr pAcc.account.codeHash.data[0]).copyMem(unsafeAddr data[start], 32)
else:
return err(DeblobCodeLenUnsupported)
pyl = pAcc
ok()
proc deblobifyTo*(
record: openArray[byte];
vtx: var VertexRef;
): Result[void,AristoError] =
## De-serialise a data record encoded with `blobify()`. The second
## argument `vtx` can be `nil`.
if record.len < 3: # minimum `Leaf` record
Aristo db update for short nodes key edge cases (#1887) * Aristo: Provide key-value list signature calculator detail: Simple wrappers around `Aristo` core functionality * Update new API for `CoreDb` details: + Renamed new API functions `contains()` => `hasKey()` or `hasPath()` which disables the `in` operator on non-boolean `contains()` functions + The functions `get()` and `fetch()` always return a not-found error if there is no item, available. The new functions `getOrEmpty()` and `mergeOrEmpty()` return an an empty `Blob` if there is no such key found. * Rewrite `core_apps.nim` using new API from `CoreDb` * Use `Aristo` functionality for calculating Merkle signatures details: For debugging, the `VerifyAristoForMerkleRootCalc` can be set so that `Aristo` results will be verified against the legacy versions. * Provide general interface for Merkle signing key-value tables details: Export `Aristo` wrappers * Activate `CoreDb` tests why: Now, API seems to be stable enough for general tests. * Update `toHex()` usage why: Byteutils' `toHex()` is superior to `toSeq.mapIt(it.toHex(2)).join` * Split `aristo_transcode` => `aristo_serialise` + `aristo_blobify` why: + Different modules for different purposes + `aristo_serialise`: RLP encoding/decoding + `aristo_blobify`: Aristo database encoding/decoding * Compacted representation of small nodes' links instead of Keccak hashes why: Ethereum MPTs use Keccak hashes as node links if the size of an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded node value is used as a pseudo node link (rather than a hash.) Such a node is nor stored on key-value database. Rather the RLP encoded node value is stored instead of a lode link in a parent node instead. Only for the root hash, the top level node is always referred to by the hash. This feature needed an abstraction of the `HashKey` object which is now either a hash or a blob of length at most 31 bytes. This leaves two ways of representing an empty/void `HashKey` type, either as an empty blob of zero length, or the hash of an empty blob. * Update `CoreDb` interface (mainly reducing logger noise) * Fix copyright years (to make `Lint` happy)
2023-11-08 12:18:32 +00:00
return err(DeblobVtxTooShort)
case record[^1] shr 6:
of 0: # `Branch` vertex
if record[^1] != 0x08u8:
return err(DeblobUnknown)
if record.len < 19: # at least two edges
return err(DeblobBranchTooShort)
if (record.len mod 8) != 3:
return err(DeblobBranchSizeGarbled)
let
maxOffset = record.len - 11
aInx = record.len - 3
aIny = record.len - 2
var
offs = 0
access = uint16.fromBytesBE record.toOpenArray(aInx, aIny) # bitmap
vtxList: array[16,VertexID]
while access != 0:
if maxOffset < offs:
return err(DeblobBranchInxOutOfRange)
let n = access.firstSetBit - 1
access.clearBit n
vtxList[n] = (uint64.fromBytesBE record.toOpenArray(offs, offs + 7)).VertexID
offs += 8
# End `while`
vtx = VertexRef(
vType: Branch,
bVid: vtxList)
of 2: # `Extension` vertex
let
sLen = record[^1].int and 0x3f # length of path segment
rLen = record.len - 1 # `vertexID` + path segm
if record.len < 10:
return err(DeblobExtTooShort)
if 8 + sLen != rLen: # => slen is at least 1
return err(DeblobExtSizeGarbled)
let (isLeaf, pathSegment) = hexPrefixDecode record.toOpenArray(8, rLen - 1)
if isLeaf:
return err(DeblobExtGotLeafPrefix)
vtx = VertexRef(
vType: Extension,
eVid: (uint64.fromBytesBE record.toOpenArray(0, 7)).VertexID,
ePfx: pathSegment)
of 3: # `Leaf` vertex
let
sLen = record[^1].int and 0x3f # length of path segment
rLen = record.len - 1 # payload + path segment
pLen = rLen - sLen # payload length
if rLen < sLen:
return err(DeblobLeafSizeGarbled)
let (isLeaf, pathSegment) = hexPrefixDecode record.toOpenArray(pLen, rLen-1)
if not isLeaf:
return err(DeblobLeafGotExtPrefix)
var pyl: PayloadRef
? record.toOpenArray(0, pLen - 1).deblobifyTo(pyl)
vtx = VertexRef(
vType: Leaf,
lPfx: pathSegment,
lData: pyl)
else:
return err(DeblobUnknown)
ok()
proc deblobify*(
data: openArray[byte];
T: type VertexRef;
): Result[T,AristoError] =
## Variant of `deblobify()` for vertex deserialisation.
var vtx = T(nil) # will be auto-initialised
? data.deblobifyTo vtx
ok vtx
proc deblobifyTo*(
data: openArray[byte];
2024-06-04 15:05:13 +00:00
tuv: var VertexID;
): Result[void,AristoError] =
2024-06-04 15:05:13 +00:00
## De-serialise a top level vertex ID.
if data.len == 0:
2024-06-04 15:05:13 +00:00
tuv = VertexID(0)
elif data.len != 9:
return err(DeblobSizeGarbled)
elif data[^1] != 0x7c:
return err(DeblobWrongType)
else:
2024-06-04 15:05:13 +00:00
tuv = (uint64.fromBytesBE data.toOpenArray(0, 7)).VertexID
ok()
proc deblobify*(
data: openArray[byte];
2024-06-04 15:05:13 +00:00
T: type VertexID;
): Result[T,AristoError] =
2024-06-04 15:05:13 +00:00
## Variant of `deblobify()` for deserialising a top level vertex ID.
var vTop: T
? data.deblobifyTo vTop
ok move(vTop)
proc deblobifyTo*(
data: openArray[byte];
lSst: var SavedState;
): Result[void,AristoError] =
## De-serialise the last saved state data record previously encoded with
## `blobify()`.
if data.len != 73:
return err(DeblobWrongSize)
if data[^1] != 0x7f:
return err(DeblobWrongType)
func loadHashKey(data: openArray[byte]): Result[HashKey,AristoError] =
var w = HashKey.fromBytes(data).valueOr:
return err(DeblobHashKeyExpected)
ok move(w)
lSst.src = ? data.toOpenArray(0, 31).loadHashKey()
lSst.trg = ? data.toOpenArray(32, 63).loadHashKey()
lSst.serial = uint64.fromBytesBE data.toOpenArray(64, 71)
ok()
proc deblobify*(
data: openArray[byte];
T: type SavedState;
): Result[T,AristoError] =
## Variant of `deblobify()` for deserialising a last saved state data record
var lSst: T
? data.deblobifyTo lSst
ok move(lSst)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------