Add Header Accumulator for the Header Gossip Network (#1094)

This commit is contained in:
Kim De Mey 2022-05-18 22:49:35 +02:00 committed by GitHub
parent 575c69e6ba
commit 6e05c7588e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 158 additions and 6 deletions

View File

@ -9,14 +9,15 @@
import
ssz_serialization/types,
stew/byteutils
stew/byteutils, nimcrypto/hash
type
ByteList* = List[byte, 2048]
Bytes2* = array[2, byte]
Bytes32* = array[32, byte]
ContentId* = Uint256
ContentId* = UInt256
BlockHash* = MDigest[32 * 8] # Bytes32
func `$`*(x: ByteList): string =
x.asSeq.toHex()

View File

@ -0,0 +1,78 @@
# Nimbus
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# https://github.com/ethereum/portal-network-specs/blob/master/header-gossip-network.md
{.push raises: [Defect].}
import
nimcrypto/[sha2, hash],
ssz_serialization, ssz_serialization/merkleization,
eth/common/eth_types,
../../common/common_types
export merkleization
const
epochSize = 8192 # blocks
maxHistoricalEpochs = 100_000 # Doesn't really need a limit, does it?
type
# Header Gossip Content Keys
# https://github.com/ethereum/portal-network-specs/blob/master/header-gossip-network.md#content-keys
ContentType* = enum
accumulatorSnapshot = 0x00
newBlockHeader = 0x01
AccumulatorSnapshotKey* = object
accumulatorRootHash*: Bytes32
NewBlockHeaderKey* = object
blockHash*: BlockHash
blockNumber*: UInt256
ContentKey* = object
case contentType*: ContentType
of accumulatorSnapshot:
accumulatorSnapshotKey*: AccumulatorSnapshotKey
of newBlockHeader:
newBlockHeaderKey*: NewBlockHeaderKey
# Header Accumulator
# https://github.com/ethereum/portal-network-specs/blob/master/header-gossip-network.md#accumulator-snapshot
HeaderRecord = object
blockHash: BlockHash
totalDifficulty: UInt256
EpochAccumulator = List[HeaderRecord, epochSize]
Accumulator* = object
historicalEpochs*: List[Bytes32, maxHistoricalEpochs]
currentEpoch*: EpochAccumulator
func updateAccumulator*(a: var Accumulator, header: BlockHeader) =
let lastTotalDifficulty =
if a.currentEpoch.len() == 0:
0.stuint(256)
else:
a.currentEpoch[^1].totalDifficulty
if a.currentEpoch.len() == epochSize:
let epochHash = hash_tree_root(a.currentEpoch)
doAssert(a.historicalEpochs.add(epochHash.data))
a.currentEpoch = EpochAccumulator.init(@[])
let headerRecord =
HeaderRecord(
blockHash: header.blockHash(),
totalDifficulty: lastTotalDifficulty + header.difficulty)
let res = a.currentEpoch.add(headerRecord)
doAssert(res, "Can't fail because of currentEpoch length check")

View File

@ -23,8 +23,6 @@ type
blockBody = 0x01
receipts = 0x02
BlockHash* = MDigest[32 * 8] # Bytes32
ContentKeyType* = object
chainId*: uint16
blockHash*: BlockHash

View File

@ -15,6 +15,8 @@ import
./network/wire/portal_protocol,
./network/history/history_content
export results
# Helper calls to, offline, populate the database with the current existing json
# files with block data. Might move to some other storage format later on.
# Perhaps https://github.com/status-im/nimbus-eth2/blob/stable/docs/e2store.md
@ -137,6 +139,22 @@ iterator blocks*(
else:
error "Failed reading block from block data", error = res.error
proc readBlockHeader*(blockData: BlockData): Result[BlockHeader, string] =
var rlp =
try:
rlpFromHex(blockData.rlp)
except ValueError as e:
return err("Invalid hex for rlp block data, number " &
$blockData.number & ": " & e.msg)
if rlp.enterList():
try:
return ok(rlp.read(BlockHeader))
except RlpError as e:
return err("Invalid header, number " & $blockData.number & ": " & e.msg)
else:
return err("Item is not a valid rlp list, number " & $blockData.number)
# TODO pass nodeid as uint256 so it will be possible to use put method which
# preserves size
proc populateHistoryDb*(
@ -146,7 +164,7 @@ proc populateHistoryDb*(
for b in blocks(blockData, verify):
for value in b:
# Note: This is the slowest part due to the hashing that takes place.
# TODO use put method which preserves size
# TODO use put method which preserves size
db.put(history_content.toContentId(value[0]), value[1])
ok()

View File

@ -15,6 +15,7 @@ import
./test_state_network,
./test_history_content,
./test_history_validation,
./test_header_content,
./test_content_db,
./test_discovery_rpc,
./test_bridge_parser

View File

@ -0,0 +1,10 @@
{
"0x88e96d4537bea4d9c05d12549907b32561d3bf31f45aae734cdc119f13406cb6": {
"rlp": "0xf90216f90211a0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479405a56e2d52c817161883f50c441c3228cfe54d9fa0d67e4d450343046425ae4271474353857ab860dbc0a1dde64b41b5cd3a532bf3a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008503ff80000001821388808455ba422499476574682f76312e302e302f6c696e75782f676f312e342e32a0969b900de27b6ac6a67742365dd65f55a0526c41fd18e1b16f1a1215c2e66f5988539bd4979fef1ec4c0c0",
"number": 1
},
"0xb495a1d7e6663152ae92708da4843337b958146015a2802f4193a410044698c9": {
"rlp": "0xf9021df90218a088e96d4537bea4d9c05d12549907b32561d3bf31f45aae734cdc119f13406cb6a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794dd2f1e6e498202e86d8f5442af596580a4f03c2ca04943d941637411107494da9ec8bc04359d731bfd08b72b4d0edcbd4cd2ecb341a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008503ff00100002821388808455ba4241a0476574682f76312e302e302d30636463373634372f6c696e75782f676f312e34a02f0790c5aa31ab94195e1f6443d645af5b75c46c04fbf9911711198a0ce8fdda88b853fa261a86aa9ec0c0",
"number": 2
}
}

View File

@ -0,0 +1,46 @@
# Nimbus
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.used.}
{.push raises: [Defect].}
import
std/tables,
unittest2, stew/byteutils,
eth/common/eth_types,
../network/header/header_content,
../populate_db
suite "Header Gossip Content":
test "Header Accumulator Update":
const dataFile = "./fluffy/tests/blocks/mainnet_blocks_1-2.json"
let blockDataRes = readBlockDataTable(dataFile)
check blockDataRes.isOk()
let blockData = blockDataRes.get()
var headers: seq[BlockHeader]
headers.setLen(blockData.len())
for k, v in blockData.pairs:
let res = v.readBlockHeader()
check res.isOk()
let header = res.get()
headers[header.blockNumber.truncate(int) - 1] = header
var accumulator: Accumulator
updateAccumulator(accumulator, headers[0])
check accumulator.hash_tree_root().data.toHex() ==
"411548579b5f6c651e6e1e56c3dc3fae6f389c663c0c910e462a4b806831fef6"
updateAccumulator(accumulator, headers[1])
check accumulator.hash_tree_root().data.toHex() ==
"e8dbd17538189d9a5b77001ff80c4ff6d841ceb0a3d374d17ddc4098550f5f93"

View File

@ -80,4 +80,4 @@ task test_portal_testnet, "Build test_portal_testnet":
task testfluffy, "Run fluffy tests":
# Need the nimbus_db_backend in state network tests as we need a Hexary to
# start from, even though it only uses the MemoryDb.
test "fluffy/tests", "all_fluffy_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite"
test "fluffy/tests", "all_fluffy_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false"