Remove SSZ Union usage in BlockHeaderWithProof type (#3019)

* Remove SSZ Union usage in BlockHeaderWithProof type

Remove SSZ Union in BlockHeaderWithProof type by making the proof
an SSZ encoded ByteList. The right type for the proof can be
selected at the decoding step by first looking into the header
for the timestamp and selecting the right type based on the
hardfork the block is in.

* Add content db migration function to fcli_db tool

* Update test vector repo
This commit is contained in:
kdeme 2025-02-24 18:09:05 +01:00 committed by GitHub
parent aa4770d337
commit 21be015031
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 371 additions and 42 deletions

View File

@ -0,0 +1,269 @@
# Fluffy
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import
chronicles,
metrics,
stint,
results,
stew/ptrops,
sqlite3_abi,
eth/db/kvstore,
eth/db/kvstore_sqlite3,
eth/common/[headers_rlp, blocks_rlp, receipts_rlp, transactions_rlp],
../network/history/history_content,
../network/history/history_type_conversions,
../network/state/state_utils,
../network/state/state_content,
../network/history/content/content_values_deprecated
export kvstore_sqlite3
type
ContentPair = tuple[contentKey: array[32, byte], contentItem: seq[byte]]
ContentDBDeprecated* = ref object
backend: SqStoreRef
kv: KvStoreRef
selectAllStmt: SqliteStmt[NoParams, ContentPair]
deleteBatchStmt: SqliteStmt[NoParams, void]
updateBatchStmt: SqliteStmt[NoParams, void]
func isWithoutProofImpl(content: openArray[byte]): bool =
let headerWithProof = decodeSsz(content, BlockHeaderWithProofDeprecated).valueOr:
# Leave all other content as it is
return false
if headerWithProof.proof.proofType ==
BlockHeaderProofType.historicalHashesAccumulatorProof:
false
elif headerWithProof.proof.proofType == BlockHeaderProofType.none:
true
else:
false
func isWithoutProof*(
ctx: SqliteContext, n: cint, v: SqliteValue
) {.cdecl, gcsafe, raises: [].} =
doAssert(n == 1)
let
ptrs = makeUncheckedArray(v)
blob1Len = sqlite3_value_bytes(ptrs[][0])
if isWithoutProofImpl(makeOpenArray(sqlite3_value_blob(ptrs[][0]), byte, blob1Len)):
ctx.sqlite3_result_int(cint 1)
else:
ctx.sqlite3_result_int(cint 0)
func isWithInvalidEncodingImpl(content: openArray[byte]): bool =
let headerWithProof = decodeSsz(content, BlockHeaderWithProofDeprecated).valueOr:
# Leave all other content as it is
return false
if headerWithProof.proof.proofType ==
BlockHeaderProofType.historicalHashesAccumulatorProof: true else: false
func isWithInvalidEncoding*(
ctx: SqliteContext, n: cint, v: SqliteValue
) {.cdecl, gcsafe, raises: [].} =
doAssert(n == 1)
let
ptrs = makeUncheckedArray(v)
blobLen = sqlite3_value_bytes(ptrs[][0])
if isWithInvalidEncodingImpl(
makeOpenArray(sqlite3_value_blob(ptrs[][0]), byte, blobLen)
):
ctx.sqlite3_result_int(cint 1)
else:
ctx.sqlite3_result_int(cint 0)
func adjustContentImpl(a: openArray[byte]): seq[byte] =
let headerWithProof = decodeSsz(a, BlockHeaderWithProofDeprecated).valueOr:
raiseAssert("Should not occur as decoding check is already done")
let accumulatorProof = headerWithProof.proof.historicalHashesAccumulatorProof
let adjustedContent = BlockHeaderWithProof(
header: headerWithProof.header,
proof: ByteList[MAX_HEADER_PROOF_LENGTH].init(SSZ.encode(accumulatorProof)),
)
SSZ.encode(adjustedContent)
func adjustContent*(
ctx: SqliteContext, n: cint, v: SqliteValue
) {.cdecl, gcsafe, raises: [].} =
doAssert(n == 1)
let
ptrs = makeUncheckedArray(v)
blobLen = sqlite3_value_bytes(ptrs[][0])
bytes =
adjustContentImpl(makeOpenArray(sqlite3_value_blob(ptrs[][0]), byte, blobLen))
sqlite3_result_blob(ctx, baseAddr bytes, cint bytes.len, SQLITE_TRANSIENT)
template expectDb(x: auto): untyped =
# There's no meaningful error handling implemented for a corrupt database or
# full disk - this requires manual intervention, so we'll panic for now
x.expect("working database (disk broken/full?)")
## Public calls to get database size, content size and similar.
proc new*(
T: type ContentDBDeprecated,
path: string,
inMemory = false,
manualCheckpoint = false,
): ContentDBDeprecated =
let db =
if inMemory:
SqStoreRef.init("", "fluffy-test", inMemory = true).expect(
"working database (out of memory?)"
)
else:
SqStoreRef.init(path, "fluffy", manualCheckpoint = false).expectDb()
db.createCustomFunction("isWithoutProof", 1, isWithoutProof).expect(
"Custom function isWithoutProof creation OK"
)
db.createCustomFunction("isWithInvalidEncoding", 1, isWithInvalidEncoding).expect(
"Custom function isWithInvalidEncoding creation OK"
)
db.createCustomFunction("adjustContent", 1, adjustContent).expect(
"Custom function adjustContent creation OK"
)
let selectAllStmt =
db.prepareStmt("SELECT key, value FROM kvstore", NoParams, ContentPair)[]
let deleteBatchStmt = db.prepareStmt(
"DELETE FROM kvstore WHERE key IN (SELECT key FROM kvstore WHERE isWithoutProof(value) == 1)",
NoParams, void,
)[]
let updateBatchStmt = db.prepareStmt(
"UPDATE kvstore SET value = adjustContent(value) WHERE key IN (SELECT key FROM kvstore WHERE isWithInvalidEncoding(value) == 1)",
NoParams, void,
)[]
let kvStore = kvStore db.openKvStore().expectDb()
let contentDb = ContentDBDeprecated(
kv: kvStore,
backend: db,
selectAllStmt: selectAllStmt,
deleteBatchStmt: deleteBatchStmt,
updateBatchStmt: updateBatchStmt,
)
contentDb
template disposeSafe(s: untyped): untyped =
if distinctBase(s) != nil:
s.dispose()
s = typeof(s)(nil)
proc close*(db: ContentDBDeprecated) =
db.selectAllStmt.disposeSafe()
db.deleteBatchStmt.disposeSafe()
db.updateBatchStmt.disposeSafe()
discard db.kv.close()
proc deleteAllHeadersWithoutProof*(db: ContentDBDeprecated) =
notice "ContentDB migration: deleting all headers without proof"
db.deleteBatchStmt.exec().expectDb()
notice "ContentDB migration done"
proc updateAllHeadersWithInvalidEncoding*(db: ContentDBDeprecated) =
notice "ContentDB migration: updating all headers with invalid encoding"
db.updateBatchStmt.exec().expectDb()
notice "ContentDB migration done"
proc iterateAllAndCountTypes*(db: ContentDBDeprecated) =
## Ugly debugging call to print out count of content types in case of issues.
var
contentPair: ContentPair
contentTotal = 0
contentOldHeaders = 0
contentNewHeaders = 0
contentBodies = 0
contentReceipts = 0
contentAccount = 0
contentContract = 0
contentCode = 0
contentOther = 0
notice "ContentDB type count: iterating over all content"
for e in db.selectAllStmt.exec(contentPair):
contentTotal.inc()
block:
let res = decodeSsz(contentPair.contentItem, BlockHeaderWithProofDeprecated)
if res.isOk():
if decodeRlp(res.value().header.asSeq(), Header).isOk():
contentOldHeaders.inc()
continue
block:
let res = decodeSsz(contentPair.contentItem, BlockHeaderWithProof)
if res.isOk():
if decodeRlp(res.value().header.asSeq(), Header).isOk():
contentNewHeaders.inc()
continue
block:
let res = decodeSsz(contentPair.contentItem, PortalReceipts)
if res.isOk():
if fromPortalReceipts(seq[Receipt], res.value()).isOk():
contentReceipts.inc()
continue
block:
let res = decodeSsz(contentPair.contentItem, PortalBlockBodyShanghai)
if res.isOk():
if fromPortalBlockBody(BlockBody, res.value()).isOk():
contentBodies.inc()
continue
block:
let res = decodeSsz(contentPair.contentItem, PortalBlockBodyLegacy)
if res.isOk():
if fromPortalBlockBody(BlockBody, res.value()).isOk():
contentBodies.inc()
continue
block:
let res = decodeSsz(contentPair.contentItem, AccountTrieNodeRetrieval)
if res.isOk():
if rlpDecodeAccountTrieNode(res.value().node).isOk():
contentAccount.inc()
continue
block:
let res = decodeSsz(contentPair.contentItem, ContractTrieNodeRetrieval)
if res.isOk():
if rlpDecodeContractTrieNode(res.value().node).isOk():
contentContract.inc()
continue
block:
let res = decodeSsz(contentPair.contentItem, ContractCodeRetrieval)
if res.isOk():
contentCode.inc()
continue
contentOther.inc()
notice "ContentDB type count done: ",
contentTotal, contentOldHeaders, contentNewHeaders, contentReceipts, contentBodies,
contentAccount, contentContract, contentCode, contentOther

View File

@ -26,25 +26,15 @@ const
MAX_WITHDRAWALS_COUNT = MAX_WITHDRAWALS_PER_PAYLOAD MAX_WITHDRAWALS_COUNT = MAX_WITHDRAWALS_PER_PAYLOAD
MAX_EPHEMERAL_HEADER_PAYLOAD = 256 MAX_EPHEMERAL_HEADER_PAYLOAD = 256
MAX_HEADER_PROOF_LENGTH* = 1024
type type
## BlockHeader types ## BlockHeader types
HistoricalHashesAccumulatorProof* = array[15, Digest] HistoricalHashesAccumulatorProof* = array[15, Digest]
BlockHeaderProofType* = enum
none = 0x00 # An SSZ Union None
historicalHashesAccumulatorProof = 0x01
BlockHeaderProof* = object
case proofType*: BlockHeaderProofType
of none:
discard
of historicalHashesAccumulatorProof:
historicalHashesAccumulatorProof*: HistoricalHashesAccumulatorProof
BlockHeaderWithProof* = object BlockHeaderWithProof* = object
header*: ByteList[MAX_HEADER_LENGTH] # RLP data header*: ByteList[MAX_HEADER_LENGTH] # RLP data
proof*: BlockHeaderProof proof*: ByteList[MAX_HEADER_PROOF_LENGTH]
## Ephemeral BlockHeader list ## Ephemeral BlockHeader list
EphemeralBlockHeaderList* = EphemeralBlockHeaderList* =
@ -73,11 +63,3 @@ type
## Receipts types ## Receipts types
ReceiptByteList* = ByteList[MAX_RECEIPT_LENGTH] # RLP data ReceiptByteList* = ByteList[MAX_RECEIPT_LENGTH] # RLP data
PortalReceipts* = List[ReceiptByteList, MAX_TRANSACTION_COUNT] PortalReceipts* = List[ReceiptByteList, MAX_TRANSACTION_COUNT]
func init*(T: type BlockHeaderProof, proof: HistoricalHashesAccumulatorProof): T =
BlockHeaderProof(
proofType: historicalHashesAccumulatorProof, historicalHashesAccumulatorProof: proof
)
func init*(T: type BlockHeaderProof): T =
BlockHeaderProof(proofType: none)

View File

@ -0,0 +1,33 @@
# fluffy
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import std/math, nimcrypto/hash, ssz_serialization
export ssz_serialization, hash
const MAX_HEADER_LENGTH = 2 ^ 11 # = 2048
type
## BlockHeader types
HistoricalHashesAccumulatorProof* = array[15, Digest]
BlockHeaderProofType* = enum
none = 0x00 # An SSZ Union None
historicalHashesAccumulatorProof = 0x01
BlockHeaderProof* = object
case proofType*: BlockHeaderProofType
of none:
discard
of historicalHashesAccumulatorProof:
historicalHashesAccumulatorProof*: HistoricalHashesAccumulatorProof
BlockHeaderWithProofDeprecated* = object
header*: ByteList[MAX_HEADER_LENGTH] # RLP data
proof*: BlockHeaderProof

View File

@ -50,20 +50,23 @@ func validateHeaderBytes*(
ok(header) ok(header)
func verifyBlockHeaderProof*( func verifyBlockHeaderProof*(
a: FinishedHistoricalHashesAccumulator, header: Header, proof: BlockHeaderProof a: FinishedHistoricalHashesAccumulator,
header: Header,
proof: ByteList[MAX_HEADER_PROOF_LENGTH],
): Result[void, string] = ): Result[void, string] =
case proof.proofType let timestamp = Moment.init(header.timestamp.int64, Second)
of BlockHeaderProofType.historicalHashesAccumulatorProof:
a.verifyAccumulatorProof(header, proof.historicalHashesAccumulatorProof) if isShanghai(chainConfig, timestamp):
of BlockHeaderProofType.none: # TODO: Add verification post merge based on historical_summaries
if header.isPreMerge(): err("Shanghai block verification not implemented")
err("Pre merge header requires HistoricalHashesAccumulatorProof") elif isPoSBlock(chainConfig, header.number):
else: # TODO: Add verification post merge based on historical_roots
# TODO: err("PoS block verification not implemented")
# Add verification post merge based on historical_roots & historical_summaries else:
# Lets for now no longer accept other headers without a proof and the most let accumulatorProof = decodeSsz(proof.asSeq(), HistoricalHashesAccumulatorProof).valueOr:
# recent ones are now a different type. return err("Failed decoding accumulator proof: " & error)
err("Post merge header proofs not yet activated")
a.verifyAccumulatorProof(header, accumulatorProof)
func validateCanonicalHeaderBytes*( func validateCanonicalHeaderBytes*(
bytes: openArray[byte], id: uint64 | Hash32, a: FinishedHistoricalHashesAccumulator bytes: openArray[byte], id: uint64 | Hash32, a: FinishedHistoricalHashesAccumulator

View File

@ -1,5 +1,5 @@
# Nimbus # Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH # Copyright (c) 2022-2025 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -198,7 +198,7 @@ func buildHeaderWithProof*(
ok( ok(
BlockHeaderWithProof( BlockHeaderWithProof(
header: ByteList[2048].init(rlp.encode(header)), header: ByteList[MAX_HEADER_LENGTH].init(rlp.encode(header)),
proof: BlockHeaderProof.init(proof), proof: ByteList[MAX_HEADER_PROOF_LENGTH].init(SSZ.encode(proof)),
) )
) )

View File

@ -1,5 +1,5 @@
# Nimbus - Portal Network # Nimbus - Portal Network
# Copyright (c) 2021-2024 Status Research & Development GmbH # Copyright (c) 2021-2025 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -63,7 +63,8 @@ proc store*(hn: HistoryNode, blockHash: Hash32, blockHeader: Header) =
let let
headerRlp = rlp.encode(blockHeader) headerRlp = rlp.encode(blockHeader)
blockHeaderWithProof = BlockHeaderWithProof( blockHeaderWithProof = BlockHeaderWithProof(
header: ByteList[2048].init(headerRlp), proof: BlockHeaderProof.init() header: ByteList[MAX_HEADER_LENGTH].init(headerRlp),
proof: ByteList[MAX_HEADER_PROOF_LENGTH].init(@[]),
) )
contentKeyBytes = blockHeaderContentKey(blockHash).encode() contentKeyBytes = blockHeaderContentKey(blockHash).encode()
contentId = history_content.toContentId(contentKeyBytes) contentId = history_content.toContentId(contentKeyBytes)

View File

@ -154,7 +154,8 @@ proc mockStateRootLookup*(
blockHeader = Header(stateRoot: stateRoot) blockHeader = Header(stateRoot: stateRoot)
headerRlp = rlp.encode(blockHeader) headerRlp = rlp.encode(blockHeader)
blockHeaderWithProof = BlockHeaderWithProof( blockHeaderWithProof = BlockHeaderWithProof(
header: ByteList[2048].init(headerRlp), proof: BlockHeaderProof.init() header: ByteList[MAX_HEADER_LENGTH].init(headerRlp),
proof: ByteList[MAX_HEADER_PROOF_LENGTH].init(@[]),
) )
contentKeyBytes = blockHeaderContentKey(blockNumOrHash).encode() contentKeyBytes = blockHeaderContentKey(blockNumOrHash).encode()
contentId = history_content.toContentId(contentKeyBytes) contentId = history_content.toContentId(contentKeyBytes)

View File

@ -1,12 +1,18 @@
# Fluffy # Fluffy
# Copyright (c) 2023-2024 Status Research & Development GmbH # Copyright (c) 2023-2025 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms. # at your option. This file may not be copied, modified, or distributed except according to those terms.
import import
chronicles, confutils, stint, eth/common/keys, ../database/content_db, ./benchmark chronicles,
confutils,
stint,
eth/common/keys,
../database/content_db,
../database/content_db_migrate_deprecated,
./benchmark
when defined(posix): when defined(posix):
import system/ansi_c import system/ansi_c
@ -29,6 +35,7 @@ type
generate = "Generate random content into the database, for testing purposes." generate = "Generate random content into the database, for testing purposes."
prune = "Prune the ContentDb in case of resizing or selecting a different local id" prune = "Prune the ContentDb in case of resizing or selecting a different local id"
validate = "Validate all the content in the ContentDb" validate = "Validate all the content in the ContentDb"
migrate = "Migrate the ContentDb for new HeaderWithProof format"
DbConf = object DbConf = object
databaseDir* {. databaseDir* {.
@ -60,6 +67,22 @@ type
.}: bool .}: bool
of DbCmd.validate: of DbCmd.validate:
discard discard
of DbCmd.migrate:
deleteNoProof* {.
desc: "Delete old HeaderWithProof content without proof",
defaultValue: true,
name: "delete-no-proof"
.}: bool
updateWithProof* {.
desc: "Update old HeaderWithProof per-merge content with proof",
defaultValue: true,
name: "update-with-proof"
.}: bool
checkTypes* {.
desc: "Iterate and count all content types",
defaultValue: false,
name: "debug-check-types"
.}: bool
const maxDbSize = 4_000_000_000'u64 const maxDbSize = 4_000_000_000'u64
@ -153,6 +176,21 @@ proc cmdPrune(conf: DbConf) =
notice "Functionality not yet implemented" notice "Functionality not yet implemented"
quit QuitSuccess quit QuitSuccess
proc cmdMigrate(conf: DbConf) =
let db = ContentDBDeprecated.new(conf.databaseDir.string)
if conf.checkTypes:
db.iterateAllAndCountTypes()
if conf.deleteNoProof:
db.deleteAllHeadersWithoutProof()
if conf.updateWithProof:
db.updateAllHeadersWithInvalidEncoding()
if conf.checkTypes and (conf.deleteNoProof or conf.updateWithProof):
db.iterateAllAndCountTypes()
proc controlCHook() {.noconv.} = proc controlCHook() {.noconv.} =
notice "Shutting down after having received SIGINT." notice "Shutting down after having received SIGINT."
quit QuitSuccess quit QuitSuccess
@ -177,3 +215,5 @@ when isMainModule:
cmdPrune(conf) cmdPrune(conf)
of DbCmd.validate: of DbCmd.validate:
notice "Functionality not yet implemented" notice "Functionality not yet implemented"
of DbCmd.migrate:
cmdMigrate(conf)

@ -1 +1 @@
Subproject commit 9d48845c2f01ccd95e9d4d6ffe4eb2b231b7cc40 Subproject commit 6f2a2a8a14225c2acabfd2d2707c5c75edbb6b46