nimbus-eth1/nimbus/db/aristo/aristo_blobify.nim

382 lines
12 KiB
Nim
Raw Normal View History

# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
eth/common,
Aristo db api extensions for use as core db backend (#1754) * Update docu * Update Aristo/Kvt constructor prototype why: Previous version used an `enum` value to indicate what backend is to be used. This was replaced by using the backend object type. * Rewrite `hikeUp()` return code into `Result[Hike,(Hike,AristoError)]` why: Better code maintenance. Previously, the `Hike` object was returned. It had an internal error field so partial success was also available on a failure. This error field has been removed. * Use `openArray[byte]` rather than `Blob` in functions prototypes * Provide synchronised multi instance transactions why: The `CoreDB` object was geared towards the legacy DB which used a single transaction for the key-value backend DB. Different state roots are provided by the backend database, so all instances work directly on the same backend. Aristo db instances have different in-memory mappings (aka different state roots) and the transactions are on top of there mappings. So each instance might run different transactions. Multi instance transactions are a compromise to converge towards the legacy behaviour. The synchronised transactions span over all instances available at the time when base transaction was opened. Instances created later are unaffected. * Provide key-value pair database iterator why: Needed in `CoreDB` for `replicate()` emulation also: Some update of internal code * Extend API (i.e. prototype variants) why: Needed for `CoreDB` geared towards the legacy backend which has a more basic API than Aristo.
2023-09-15 15:23:53 +00:00
results,
stew/[arrayops, endians2],
Aristo db update for short nodes key edge cases (#1887) * Aristo: Provide key-value list signature calculator detail: Simple wrappers around `Aristo` core functionality * Update new API for `CoreDb` details: + Renamed new API functions `contains()` => `hasKey()` or `hasPath()` which disables the `in` operator on non-boolean `contains()` functions + The functions `get()` and `fetch()` always return a not-found error if there is no item, available. The new functions `getOrEmpty()` and `mergeOrEmpty()` return an an empty `Blob` if there is no such key found. * Rewrite `core_apps.nim` using new API from `CoreDb` * Use `Aristo` functionality for calculating Merkle signatures details: For debugging, the `VerifyAristoForMerkleRootCalc` can be set so that `Aristo` results will be verified against the legacy versions. * Provide general interface for Merkle signing key-value tables details: Export `Aristo` wrappers * Activate `CoreDb` tests why: Now, API seems to be stable enough for general tests. * Update `toHex()` usage why: Byteutils' `toHex()` is superior to `toSeq.mapIt(it.toHex(2)).join` * Split `aristo_transcode` => `aristo_serialise` + `aristo_blobify` why: + Different modules for different purposes + `aristo_serialise`: RLP encoding/decoding + `aristo_blobify`: Aristo database encoding/decoding * Compacted representation of small nodes' links instead of Keccak hashes why: Ethereum MPTs use Keccak hashes as node links if the size of an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded node value is used as a pseudo node link (rather than a hash.) Such a node is nor stored on key-value database. Rather the RLP encoded node value is stored instead of a lode link in a parent node instead. Only for the root hash, the top level node is always referred to by the hash. This feature needed an abstraction of the `HashKey` object which is now either a hash or a blob of length at most 31 bytes. This leaves two ways of representing an empty/void `HashKey` type, either as an empty blob of zero length, or the hash of an empty blob. * Update `CoreDb` interface (mainly reducing logger noise) * Fix copyright years (to make `Lint` happy)
2023-11-08 12:18:32 +00:00
./aristo_desc
# Allocation-free version short big-endian encoding that skips the leading
# zeroes
type
SbeBuf*[I] = object
buf*: array[sizeof(I), byte]
len*: byte
RVidBuf* = object
buf*: array[sizeof(SbeBuf[VertexID]) * 2, byte]
len*: byte
func significantBytesBE(val: openArray[byte]): byte =
for i in 0 ..< val.len:
if val[i] != 0:
return byte(val.len - i)
return 1
func blobify*(v: VertexID|uint64): SbeBuf[typeof(v)] =
let b = v.uint64.toBytesBE()
SbeBuf[typeof(v)](buf: b, len: significantBytesBE(b))
func blobify*(v: StUint): SbeBuf[typeof(v)] =
let b = v.toBytesBE()
SbeBuf[typeof(v)](buf: b, len: significantBytesBE(b))
template data*(v: SbeBuf): openArray[byte] =
let vv = v
vv.buf.toOpenArray(vv.buf.len - int(vv.len), vv.buf.high)
func blobify*(rvid: RootedVertexID): RVidBuf =
# Length-prefixed root encoding creates a unique and common prefix for all
# verticies sharing the same root
# TODO evaluate an encoding that colocates short roots (like VertexID(1)) with
# the length
let root = rvid.root.blobify()
result.buf[0] = root.len
assign(result.buf.toOpenArray(1, root.len), root.data())
if rvid.root == rvid.vid:
result.len = root.len + 1
else:
# We can derive the length of the `vid` from the total length
let vid = rvid.vid.blobify()
assign(result.buf.toOpenArray(root.len + 1, root.len + vid.len), vid.data())
result.len = root.len + 1 + vid.len
proc deblobify*[T: uint64|VertexID](data: openArray[byte], _: type T): Result[T,AristoError] =
if data.len < 1 or data.len > 8:
return err(Deblob64LenUnsupported)
var tmp: array[8, byte]
discard tmp.toOpenArray(8 - data.len, 7).copyFrom(data)
ok T(uint64.fromBytesBE(tmp))
proc deblobify*(data: openArray[byte], _: type UInt256): Result[UInt256,AristoError] =
if data.len < 1 or data.len > 32:
return err(Deblob256LenUnsupported)
ok UInt256.fromBytesBE(data)
func deblobify*(data: openArray[byte], T: type RootedVertexID): Result[T, AristoError] =
let rlen = int(data[0])
if data.len < 2:
return err(DeblobRVidLenUnsupported)
if data.len < rlen + 1:
return err(DeblobRVidLenUnsupported)
let
root = ?deblobify(data.toOpenArray(1, rlen), VertexID)
vid = if data.len > rlen + 1:
?deblobify(data.toOpenArray(rlen + 1, data.high()), VertexID)
else:
root
ok (root, vid)
template data*(v: RVidBuf): openArray[byte] =
let vv = v
vv.buf.toOpenArray(0, vv.len - 1)
# ------------------------------------------------------------------------------
# Private helper
# ------------------------------------------------------------------------------
proc load64(data: openArray[byte]; start: var int, len: int): Result[uint64,AristoError] =
if data.len < start + len:
return err(Deblob256LenUnsupported)
let val = ?deblobify(data.toOpenArray(start, start + len - 1), uint64)
start += len
ok val
proc load256(data: openArray[byte]; start: var int, len: int): Result[UInt256,AristoError] =
if data.len < start + len:
return err(Deblob256LenUnsupported)
let val = ?deblobify(data.toOpenArray(start, start + len - 1), UInt256)
start += len
ok val
# ------------------------------------------------------------------------------
Aristo db api extensions for use as core db backend (#1754) * Update docu * Update Aristo/Kvt constructor prototype why: Previous version used an `enum` value to indicate what backend is to be used. This was replaced by using the backend object type. * Rewrite `hikeUp()` return code into `Result[Hike,(Hike,AristoError)]` why: Better code maintenance. Previously, the `Hike` object was returned. It had an internal error field so partial success was also available on a failure. This error field has been removed. * Use `openArray[byte]` rather than `Blob` in functions prototypes * Provide synchronised multi instance transactions why: The `CoreDB` object was geared towards the legacy DB which used a single transaction for the key-value backend DB. Different state roots are provided by the backend database, so all instances work directly on the same backend. Aristo db instances have different in-memory mappings (aka different state roots) and the transactions are on top of there mappings. So each instance might run different transactions. Multi instance transactions are a compromise to converge towards the legacy behaviour. The synchronised transactions span over all instances available at the time when base transaction was opened. Instances created later are unaffected. * Provide key-value pair database iterator why: Needed in `CoreDB` for `replicate()` emulation also: Some update of internal code * Extend API (i.e. prototype variants) why: Needed for `CoreDB` geared towards the legacy backend which has a more basic API than Aristo.
2023-09-15 15:23:53 +00:00
# Public functions
# ------------------------------------------------------------------------------
proc blobifyTo*(pyl: PayloadRef, data: var Blob) =
if pyl.isNil:
return
case pyl.pType
of RawData:
data &= pyl.rawBlob
data &= [0x10.byte]
of AccountData:
# `lens` holds `len-1` since `mask` filters out the zero-length case (which
# allows saving 1 bit per length)
var lens: uint16
var mask: byte
if 0 < pyl.account.nonce:
mask = mask or 0x01
let tmp = pyl.account.nonce.blobify()
lens += tmp.len - 1 # 3 bits
data &= tmp.data()
if 0 < pyl.account.balance:
mask = mask or 0x02
let tmp = pyl.account.balance.blobify()
lens += uint16(tmp.len - 1) shl 3 # 5 bits
data &= tmp.data()
Update storage tree admin (#2419) * Tighten `CoreDb` API for accounts why: Apart from cruft, the way to fetch the accounts state root via a `CoreDbColRef` record was unnecessarily complicated. * Extend `CoreDb` API for accounts to cover storage tries why: In future, this will make the notion of column objects obsolete. Storage trees will then be indexed by the account address rather than the vertex ID equivalent like a `CoreDbColRef`. * Apply new/extended accounts API to ledger and tests details: This makes the `distinct_ledger` module obsolete * Remove column object constructors why: They were needed as an abstraction of MPT sub-trees including storage trees. Now, storage trees are handled by the account (e.g. via address) they belong to and all other trees can be identified by a constant well known vertex ID. So there is no need for column objects anymore. Still there are some left-over column object methods wnich will be removed next. * Remove `serialise()` and `PayloadRef` from default Aristo API why: Not needed. `PayloadRef` was used for unstructured/unknown payload formats (account or blob) and `serialise()` was used for decodng `PayloadRef`. Now it is known in advance what the payload looks like. * Added query function `hasStorageData()` whether a storage area exists why: Useful for supporting `slotStateEmpty()` of the `CoreDb` API * In the `Ledger` replace `storage.stateEmpty()` by `slotStateEmpty()` * On Aristo, hide the storage root/vertex ID in the `PayloadRef` why: The storage vertex ID is fully controlled by Aristo while the `AristoAccount` object is controlled by the application. With the storage root part of the `AristoAccount` object, there was a useless administrative burden to keep that storage root field up to date. * Remove cruft, update comments etc. * Update changed MPT access paradigms why: Fixes verified proxy tests * Fluffy cosmetics
2024-06-27 09:01:26 +00:00
if VertexID(0) < pyl.stoID:
mask = mask or 0x04
let tmp = pyl.stoID.blobify()
lens += uint16(tmp.len - 1) shl 8 # 3 bits
data &= tmp.data()
if pyl.account.codeHash != EMPTY_CODE_HASH:
mask = mask or 0x08
data &= pyl.account.codeHash.data
data &= lens.toBytesBE()
data &= [mask]
of StoData:
data &= pyl.stoData.blobify().data
data &= [0x20.byte]
proc blobifyTo*(vtx: VertexRef; data: var Blob): Result[void,AristoError] =
## This function serialises the vertex argument to a database record.
## Contrary to RLP based serialisation, these records aim to align on
## fixed byte boundaries.
## ::
## Branch:
## [VertexID, ...] -- list of up to 16 child vertices lookup keys
## uint64 -- lengths of each child vertex, each taking 4 bits
## 0x08 -- marker(8)
##
## Extension:
## VertexID -- child vertex lookup key
## Blob -- hex encoded partial path (at least one byte)
## 0x80 + xx -- marker(2) + pathSegmentLen(6)
##
## Leaf:
## Blob -- opaque leaf data payload (might be zero length)
## Blob -- hex encoded partial path (at least one byte)
## 0xc0 + yy -- marker(2) + partialPathLen(6)
##
## For a branch record, the bytes of the `access` array indicate the position
## of the Patricia Trie vertex reference. So the `vertexID` with index `n` has
## ::
## 8 * n * ((access shr (n * 4)) and 15)
##
if not vtx.isValid:
return err(BlobifyNilVertex)
case vtx.vType:
of Branch:
var
lens = 0u64
pos = data.len
for n in 0..15:
if vtx.bVid[n].isValid:
let tmp = vtx.bVid[n].blobify()
lens += uint64(tmp.len) shl (n * 4)
data &= tmp.data()
if data.len == pos:
return err(BlobifyBranchMissingRefs)
data &= lens.toBytesBE
data &= [0x08u8]
of Extension:
let
pSegm = vtx.ePfx.toHexPrefix(isleaf = false)
psLen = pSegm.len.byte
if psLen == 0 or 33 < psLen:
return err(BlobifyExtPathOverflow)
if not vtx.eVid.isValid:
return err(BlobifyExtMissingRefs)
data &= vtx.eVid.blobify().data()
data &= pSegm
data &= [0x80u8 or psLen]
of Leaf:
let
pSegm = vtx.lPfx.toHexPrefix(isleaf = true)
psLen = pSegm.len.byte
if psLen == 0 or 33 < psLen:
return err(BlobifyLeafPathOverflow)
vtx.lData.blobifyTo(data)
data &= pSegm
data &= [0xC0u8 or psLen]
ok()
proc blobify*(vtx: VertexRef): Result[Blob, AristoError] =
## Variant of `blobify()`
var data: Blob
? vtx.blobifyTo data
2024-05-24 09:27:17 +00:00
ok(move(data))
proc blobifyTo*(lSst: SavedState; data: var Blob): Result[void,AristoError] =
## Serialise a last saved state record
data.add lSst.key.data
data.add lSst.serial.toBytesBE
data.add @[0x7fu8]
ok()
proc blobify*(lSst: SavedState): Result[Blob,AristoError] =
## Variant of `blobify()`
var data: Blob
? lSst.blobifyTo data
ok(move(data))
# -------------
proc deblobify(
data: openArray[byte];
T: type PayloadRef;
): Result[PayloadRef,AristoError] =
if data.len == 0:
return ok PayloadRef(pType: RawData)
let mask = data[^1]
if (mask and 0x10) > 0: # unstructured payload
return ok PayloadRef(pType: RawData, rawBlob: data[0 .. ^2])
if (mask and 0x20) > 0: # Slot storage data
return ok PayloadRef(
pType: StoData,
stoData: ?deblobify(data.toOpenArray(0, data.len - 2), UInt256))
var
pAcc = PayloadRef(pType: AccountData)
start = 0
lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2))
if (mask and 0x01) > 0:
let len = lens and 0b111
pAcc.account.nonce = ? load64(data, start, int(len + 1))
if (mask and 0x02) > 0:
let len = (lens shr 3) and 0b11111
pAcc.account.balance = ? load256(data, start, int(len + 1))
if (mask and 0x04) > 0:
let len = (lens shr 8) and 0b111
pAcc.stoID = VertexID(? load64(data, start, int(len + 1)))
if (mask and 0x08) > 0:
if data.len() < start + 32:
return err(DeblobCodeLenUnsupported)
discard pAcc.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31))
else:
pAcc.account.codeHash = EMPTY_CODE_HASH
ok(pAcc)
proc deblobify*(
record: openArray[byte];
T: type VertexRef;
): Result[T,AristoError] =
## De-serialise a data record encoded with `blobify()`. The second
## argument `vtx` can be `nil`.
if record.len < 3: # minimum `Leaf` record
Aristo db update for short nodes key edge cases (#1887) * Aristo: Provide key-value list signature calculator detail: Simple wrappers around `Aristo` core functionality * Update new API for `CoreDb` details: + Renamed new API functions `contains()` => `hasKey()` or `hasPath()` which disables the `in` operator on non-boolean `contains()` functions + The functions `get()` and `fetch()` always return a not-found error if there is no item, available. The new functions `getOrEmpty()` and `mergeOrEmpty()` return an an empty `Blob` if there is no such key found. * Rewrite `core_apps.nim` using new API from `CoreDb` * Use `Aristo` functionality for calculating Merkle signatures details: For debugging, the `VerifyAristoForMerkleRootCalc` can be set so that `Aristo` results will be verified against the legacy versions. * Provide general interface for Merkle signing key-value tables details: Export `Aristo` wrappers * Activate `CoreDb` tests why: Now, API seems to be stable enough for general tests. * Update `toHex()` usage why: Byteutils' `toHex()` is superior to `toSeq.mapIt(it.toHex(2)).join` * Split `aristo_transcode` => `aristo_serialise` + `aristo_blobify` why: + Different modules for different purposes + `aristo_serialise`: RLP encoding/decoding + `aristo_blobify`: Aristo database encoding/decoding * Compacted representation of small nodes' links instead of Keccak hashes why: Ethereum MPTs use Keccak hashes as node links if the size of an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded node value is used as a pseudo node link (rather than a hash.) Such a node is nor stored on key-value database. Rather the RLP encoded node value is stored instead of a lode link in a parent node instead. Only for the root hash, the top level node is always referred to by the hash. This feature needed an abstraction of the `HashKey` object which is now either a hash or a blob of length at most 31 bytes. This leaves two ways of representing an empty/void `HashKey` type, either as an empty blob of zero length, or the hash of an empty blob. * Update `CoreDb` interface (mainly reducing logger noise) * Fix copyright years (to make `Lint` happy)
2023-11-08 12:18:32 +00:00
return err(DeblobVtxTooShort)
ok case record[^1] shr 6:
of 0: # `Branch` vertex
if record[^1] != 0x08u8:
return err(DeblobUnknown)
if record.len < 11: # at least two edges
return err(DeblobBranchTooShort)
let
aInx = record.len - 9
aIny = record.len - 2
var
offs = 0
lens = uint64.fromBytesBE record.toOpenArray(aInx, aIny) # bitmap
vtxList: array[16,VertexID]
n = 0
while lens != 0:
let len = lens and 0b1111
if len > 0:
vtxList[n] = VertexID(? load64(record, offs, int(len)))
inc n
lens = lens shr 4
# End `while`
VertexRef(
vType: Branch,
bVid: vtxList)
of 2: # `Extension` vertex
let
sLen = record[^1].int and 0x3f # length of path segment
rLen = record.len - 1 # `vertexID` + path segm
pLen = rLen - sLen # payload length
if rLen < sLen or pLen < 1:
return err(DeblobLeafSizeGarbled)
let (isLeaf, pathSegment) =
NibblesBuf.fromHexPrefix record.toOpenArray(pLen, rLen - 1)
if isLeaf:
return err(DeblobExtGotLeafPrefix)
var offs = 0
VertexRef(
vType: Extension,
eVid: VertexID(?load64(record, offs, pLen)),
ePfx: pathSegment)
of 3: # `Leaf` vertex
let
sLen = record[^1].int and 0x3f # length of path segment
rLen = record.len - 1 # payload + path segment
pLen = rLen - sLen # payload length
if rLen < sLen or pLen < 1:
return err(DeblobLeafSizeGarbled)
let (isLeaf, pathSegment) =
NibblesBuf.fromHexPrefix record.toOpenArray(pLen, rLen-1)
if not isLeaf:
return err(DeblobLeafGotExtPrefix)
let pyl = ? record.toOpenArray(0, pLen - 1).deblobify(PayloadRef)
VertexRef(
vType: Leaf,
lPfx: pathSegment,
lData: pyl)
else:
return err(DeblobUnknown)
proc deblobify*(
data: openArray[byte];
T: type SavedState;
): Result[SavedState,AristoError] =
## De-serialise the last saved state data record previously encoded with
## `blobify()`.
if data.len != 41:
return err(DeblobWrongSize)
if data[^1] != 0x7f:
return err(DeblobWrongType)
ok(SavedState(
key: Hash256(data: array[32, byte].initCopyFrom(data.toOpenArray(0, 31))),
serial: uint64.fromBytesBE data.toOpenArray(32, 39)))
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------