mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-14 22:34:23 +00:00
dbe3393f5c
* Fix eth/common & web3 related deprecation warnings for fluffy This commit uses the new types in the new eth/common/ structure to remove deprecation warnings. It is however more than just a mass replace as also all places where eth/common or eth/common/eth_types or eth/common/eth_types_rlp got imported have been revised and adjusted to a better per submodule based import. There are still a bunch of toMDigest deprecation warnings but that convertor is not needed for fluffy code anymore so in theory it should not be used (bug?). It seems to still get imported via export leaks ffrom imported nimbus code I think. * Address review comments * Remove two more unused eth/common imports
83 lines
2.8 KiB
Nim
83 lines
2.8 KiB
Nim
# Nimbus - Portal Network
|
|
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
|
# Licensed and distributed under either of
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
{.push raises: [].}
|
|
|
|
import
|
|
stew/[byteutils, io2],
|
|
chronicles,
|
|
results,
|
|
eth/common/headers_rlp,
|
|
ncli/e2store,
|
|
../network/history/[history_content, validation/historical_hashes_accumulator]
|
|
|
|
export results
|
|
|
|
# Reading SSZ data from files
|
|
|
|
proc readAccumulator*(
|
|
file: string
|
|
): Result[FinishedHistoricalHashesAccumulator, string] =
|
|
let encodedAccumulator = ?readAllFile(file).mapErr(toString)
|
|
|
|
try:
|
|
ok(SSZ.decode(encodedAccumulator, FinishedHistoricalHashesAccumulator))
|
|
except SerializationError as e:
|
|
err("Failed decoding accumulator: " & e.msg)
|
|
|
|
proc readEpochRecord*(file: string): Result[EpochRecord, string] =
|
|
let encodedAccumulator = ?readAllFile(file).mapErr(toString)
|
|
|
|
try:
|
|
ok(SSZ.decode(encodedAccumulator, EpochRecord))
|
|
except SerializationError as e:
|
|
err("Decoding epoch accumulator failed: " & e.msg)
|
|
|
|
proc readEpochRecordCached*(file: string): Result[EpochRecordCached, string] =
|
|
let encodedAccumulator = ?readAllFile(file).mapErr(toString)
|
|
|
|
try:
|
|
ok(SSZ.decode(encodedAccumulator, EpochRecordCached))
|
|
except SerializationError as e:
|
|
err("Decoding epoch accumulator failed: " & e.msg)
|
|
|
|
# Reading data in e2s format
|
|
|
|
const
|
|
# Using the e2s format to store data, but without the specific structure
|
|
# like in an era file, as we currently don't really need that.
|
|
# See: https://github.com/status-im/nimbus-eth2/blob/stable/docs/e2store.md
|
|
# Added one type for now, with numbers not formally specified.
|
|
# Note:
|
|
# Snappy compression for `ExecutionBlockHeaderRecord` only helps for the
|
|
# first ~1M (?) block headers, after that there is no gain so we don't do it.
|
|
ExecutionBlockHeaderRecord* = [byte 0xFF, 0x00]
|
|
|
|
proc readBlockHeaders*(file: string): Result[seq[headers.Header], string] =
|
|
let fh = ?openFile(file, {OpenFlags.Read}).mapErr(toString)
|
|
defer:
|
|
discard closeFile(fh)
|
|
|
|
var data: seq[byte]
|
|
var blockHeaders: seq[headers.Header]
|
|
while true:
|
|
let header = readRecord(fh, data).valueOr:
|
|
break
|
|
|
|
if header.typ == ExecutionBlockHeaderRecord:
|
|
let blockHeader =
|
|
try:
|
|
rlp.decode(data, headers.Header)
|
|
except RlpError as e:
|
|
return err("Invalid block header in " & file & ": " & e.msg)
|
|
|
|
blockHeaders.add(blockHeader)
|
|
else:
|
|
warn "Skipping record, not a block header", typ = toHex(header.typ)
|
|
|
|
ok(blockHeaders)
|