mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-02 23:35:31 +00:00
Add Accumulator build helper calls and refactors (#1127)
- Move the accumulator definitions to a history accumulator file - Add accumulator build helper calls + temporary database - Add a header gossip content key encoding test - Refactor & some cleanup
This commit is contained in:
parent
69a1000d77
commit
de2051dcf8
@ -11,6 +11,8 @@ import
|
|||||||
ssz_serialization/types,
|
ssz_serialization/types,
|
||||||
stew/byteutils, nimcrypto/hash
|
stew/byteutils, nimcrypto/hash
|
||||||
|
|
||||||
|
export hash
|
||||||
|
|
||||||
type
|
type
|
||||||
ByteList* = List[byte, 2048]
|
ByteList* = List[byte, 2048]
|
||||||
Bytes2* = array[2, byte]
|
Bytes2* = array[2, byte]
|
||||||
|
@ -10,27 +10,21 @@
|
|||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
nimcrypto/[sha2, hash],
|
std/options,
|
||||||
ssz_serialization, ssz_serialization/merkleization,
|
nimcrypto/[sha2, hash], stint,
|
||||||
eth/common/eth_types,
|
ssz_serialization,
|
||||||
../../common/common_types
|
../../common/common_types
|
||||||
|
|
||||||
export merkleization
|
export ssz_serialization, common_types, options, hash
|
||||||
|
|
||||||
const
|
|
||||||
epochSize = 8192 # blocks
|
|
||||||
maxHistoricalEpochs = 131072 # 2^17
|
|
||||||
|
|
||||||
type
|
type
|
||||||
# Header Gossip Content Keys
|
# Header Gossip Content Keys
|
||||||
# https://github.com/ethereum/portal-network-specs/blob/master/header-gossip-network.md#content-keys
|
# https://github.com/ethereum/portal-network-specs/blob/master/header-gossip-network.md#content-keys
|
||||||
|
# But with Accumulator removed as per
|
||||||
|
# https://github.com/ethereum/portal-network-specs/issues/153
|
||||||
|
|
||||||
ContentType* = enum
|
ContentType* = enum
|
||||||
accumulatorSnapshot = 0x00
|
newBlockHeader = 0x00
|
||||||
newBlockHeader = 0x01
|
|
||||||
|
|
||||||
AccumulatorSnapshotKey* = object
|
|
||||||
accumulatorRootHash*: Bytes32
|
|
||||||
|
|
||||||
NewBlockHeaderKey* = object
|
NewBlockHeaderKey* = object
|
||||||
blockHash*: BlockHash
|
blockHash*: BlockHash
|
||||||
@ -38,41 +32,22 @@ type
|
|||||||
|
|
||||||
ContentKey* = object
|
ContentKey* = object
|
||||||
case contentType*: ContentType
|
case contentType*: ContentType
|
||||||
of accumulatorSnapshot:
|
|
||||||
accumulatorSnapshotKey*: AccumulatorSnapshotKey
|
|
||||||
of newBlockHeader:
|
of newBlockHeader:
|
||||||
newBlockHeaderKey*: NewBlockHeaderKey
|
newBlockHeaderKey*: NewBlockHeaderKey
|
||||||
|
|
||||||
# Header Accumulator
|
func encode*(contentKey: ContentKey): ByteList =
|
||||||
# https://github.com/ethereum/portal-network-specs/blob/master/header-gossip-network.md#accumulator-snapshot
|
ByteList.init(SSZ.encode(contentKey))
|
||||||
|
|
||||||
HeaderRecord = object
|
func decode*(contentKey: ByteList): Option[ContentKey] =
|
||||||
blockHash: BlockHash
|
try:
|
||||||
totalDifficulty: UInt256
|
some(SSZ.decode(contentKey.asSeq(), ContentKey))
|
||||||
|
except SszError:
|
||||||
|
return none[ContentKey]()
|
||||||
|
|
||||||
EpochAccumulator = List[HeaderRecord, epochSize]
|
func toContentId*(contentKey: ByteList): ContentId =
|
||||||
|
# TODO: Should we try to parse the content key here for invalid ones?
|
||||||
|
let idHash = sha2.sha_256.digest(contentKey.asSeq())
|
||||||
|
readUintBE[256](idHash.data)
|
||||||
|
|
||||||
Accumulator* = object
|
func toContentId*(contentKey: ContentKey): ContentId =
|
||||||
historicalEpochs*: List[Bytes32, maxHistoricalEpochs]
|
toContentId(encode(contentKey))
|
||||||
currentEpoch*: EpochAccumulator
|
|
||||||
|
|
||||||
func updateAccumulator*(a: var Accumulator, header: BlockHeader) =
|
|
||||||
let lastTotalDifficulty =
|
|
||||||
if a.currentEpoch.len() == 0:
|
|
||||||
0.stuint(256)
|
|
||||||
else:
|
|
||||||
a.currentEpoch[^1].totalDifficulty
|
|
||||||
|
|
||||||
if a.currentEpoch.len() == epochSize:
|
|
||||||
let epochHash = hash_tree_root(a.currentEpoch)
|
|
||||||
|
|
||||||
doAssert(a.historicalEpochs.add(epochHash.data))
|
|
||||||
a.currentEpoch = EpochAccumulator.init(@[])
|
|
||||||
|
|
||||||
let headerRecord =
|
|
||||||
HeaderRecord(
|
|
||||||
blockHash: header.blockHash(),
|
|
||||||
totalDifficulty: lastTotalDifficulty + header.difficulty)
|
|
||||||
|
|
||||||
let res = a.currentEpoch.add(headerRecord)
|
|
||||||
doAssert(res, "Can't fail because of currentEpoch length check")
|
|
||||||
|
219
fluffy/network/history/accumulator.nim
Normal file
219
fluffy/network/history/accumulator.nim
Normal file
@ -0,0 +1,219 @@
|
|||||||
|
# Nimbus
|
||||||
|
# Copyright (c) 2022 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
import
|
||||||
|
eth/db/kvstore,
|
||||||
|
eth/db/kvstore_sqlite3,
|
||||||
|
eth/common/eth_types,
|
||||||
|
ssz_serialization, ssz_serialization/[proofs, merkleization],
|
||||||
|
../../common/common_types,
|
||||||
|
../../populate_db,
|
||||||
|
./history_content
|
||||||
|
|
||||||
|
export kvstore_sqlite3, merkleization
|
||||||
|
|
||||||
|
# Header Accumulator
|
||||||
|
# Part from specification
|
||||||
|
# https://github.com/ethereum/portal-network-specs/blob/master/header-gossip-network.md#accumulator-snapshot
|
||||||
|
# However, applied for the history network instead of the header gossip network
|
||||||
|
# as per https://github.com/ethereum/portal-network-specs/issues/153
|
||||||
|
|
||||||
|
const
|
||||||
|
epochSize* = 8192 # blocks
|
||||||
|
maxHistoricalEpochs = 131072 # 2^17
|
||||||
|
|
||||||
|
type
|
||||||
|
HeaderRecord* = object
|
||||||
|
blockHash*: BlockHash
|
||||||
|
totalDifficulty*: UInt256
|
||||||
|
|
||||||
|
EpochAccumulator* = List[HeaderRecord, epochSize]
|
||||||
|
|
||||||
|
Accumulator* = object
|
||||||
|
historicalEpochs*: List[Bytes32, maxHistoricalEpochs]
|
||||||
|
currentEpoch*: EpochAccumulator
|
||||||
|
|
||||||
|
func updateAccumulator*(a: var Accumulator, header: BlockHeader) =
|
||||||
|
let lastTotalDifficulty =
|
||||||
|
if a.currentEpoch.len() == 0:
|
||||||
|
0.stuint(256)
|
||||||
|
else:
|
||||||
|
a.currentEpoch[^1].totalDifficulty
|
||||||
|
|
||||||
|
if a.currentEpoch.len() == epochSize:
|
||||||
|
let epochHash = hash_tree_root(a.currentEpoch)
|
||||||
|
|
||||||
|
doAssert(a.historicalEpochs.add(epochHash.data))
|
||||||
|
a.currentEpoch = EpochAccumulator.init(@[])
|
||||||
|
|
||||||
|
let headerRecord =
|
||||||
|
HeaderRecord(
|
||||||
|
blockHash: header.blockHash(),
|
||||||
|
totalDifficulty: lastTotalDifficulty + header.difficulty)
|
||||||
|
|
||||||
|
let res = a.currentEpoch.add(headerRecord)
|
||||||
|
doAssert(res, "Can't fail because of currentEpoch length check")
|
||||||
|
|
||||||
|
type
|
||||||
|
# Note:
|
||||||
|
# This database should eventually just be a part of the ContentDB.
|
||||||
|
# The reason it is currently separated is because it is experimental and
|
||||||
|
# because accumulator data will in the first tests be used aside to verify
|
||||||
|
# headers without actually transferring the data over the network. Hence,
|
||||||
|
# all data needs to be available and no pruning should be done on this data.
|
||||||
|
AccumulatorDB* = ref object
|
||||||
|
kv: KvStoreRef
|
||||||
|
|
||||||
|
# This is a bit of a hacky way to access the latest accumulator right now,
|
||||||
|
# hacky in the sense that in theory some contentId could result in this key.
|
||||||
|
# Could have a prefix for each key access, but that will not play along nicely
|
||||||
|
# with calls that use distance function (pruning, range access)
|
||||||
|
# Could drop it in a seperate table/kvstore. And could have a mapping of
|
||||||
|
# certain specific requests (e.g. latest) to their hash.
|
||||||
|
DbKey = enum
|
||||||
|
kLatestAccumulator
|
||||||
|
|
||||||
|
func subkey(kind: DbKey): array[1, byte] =
|
||||||
|
[byte ord(kind)]
|
||||||
|
|
||||||
|
template expectDb(x: auto): untyped =
|
||||||
|
# There's no meaningful error handling implemented for a corrupt database or
|
||||||
|
# full disk - this requires manual intervention, so we'll panic for now
|
||||||
|
x.expect("working database (disk broken/full?)")
|
||||||
|
|
||||||
|
proc new*(T: type AccumulatorDB, path: string, inMemory = false): AccumulatorDB =
|
||||||
|
let db =
|
||||||
|
if inMemory:
|
||||||
|
SqStoreRef.init("", "fluffy-acc-db", inMemory = true).expect(
|
||||||
|
"working database (out of memory?)")
|
||||||
|
else:
|
||||||
|
SqStoreRef.init(path, "fluffy-acc-db").expectDb()
|
||||||
|
|
||||||
|
AccumulatorDB(kv: kvStore db.openKvStore().expectDb())
|
||||||
|
|
||||||
|
proc get(db: AccumulatorDB, key: openArray[byte]): Option[seq[byte]] =
|
||||||
|
var res: Option[seq[byte]]
|
||||||
|
proc onData(data: openArray[byte]) = res = some(@data)
|
||||||
|
|
||||||
|
discard db.kv.get(key, onData).expectDb()
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
proc put(db: AccumulatorDB, key, value: openArray[byte]) =
|
||||||
|
db.kv.put(key, value).expectDb()
|
||||||
|
|
||||||
|
proc contains(db: AccumulatorDB, key: openArray[byte]): bool =
|
||||||
|
db.kv.contains(key).expectDb()
|
||||||
|
|
||||||
|
proc del(db: AccumulatorDB, key: openArray[byte]) =
|
||||||
|
db.kv.del(key).expectDb()
|
||||||
|
|
||||||
|
proc get*(db: AccumulatorDB, key: ContentId): Option[seq[byte]] =
|
||||||
|
db.get(key.toByteArrayBE())
|
||||||
|
|
||||||
|
proc put*(db: AccumulatorDB, key: ContentId, value: openArray[byte]) =
|
||||||
|
db.put(key.toByteArrayBE(), value)
|
||||||
|
|
||||||
|
proc contains*(db: AccumulatorDB, key: ContentId): bool =
|
||||||
|
db.contains(key.toByteArrayBE())
|
||||||
|
|
||||||
|
proc del*(db: AccumulatorDB, key: ContentId) =
|
||||||
|
db.del(key.toByteArrayBE())
|
||||||
|
|
||||||
|
proc get(
|
||||||
|
db: AccumulatorDB, key: openArray[byte],
|
||||||
|
T: type auto): Option[T] =
|
||||||
|
let res = db.get(key)
|
||||||
|
if res.isSome():
|
||||||
|
try:
|
||||||
|
some(SSZ.decode(res.get(), T))
|
||||||
|
except SszError:
|
||||||
|
raiseAssert("Stored data should always be serialized correctly")
|
||||||
|
else:
|
||||||
|
none(T)
|
||||||
|
|
||||||
|
# TODO: Will it be required to store more than just the latest accumulator?
|
||||||
|
proc getAccumulator*(db: AccumulatorDB, key: ContentId): Option[Accumulator] =
|
||||||
|
db.get(key.toByteArrayBE, Accumulator)
|
||||||
|
|
||||||
|
proc getAccumulator*(db: AccumulatorDB): Option[Accumulator] =
|
||||||
|
db.get(subkey(kLatestAccumulator), Accumulator)
|
||||||
|
|
||||||
|
proc getAccumulatorSSZ*(db: AccumulatorDB): Option[seq[byte]] =
|
||||||
|
db.get(subkey(kLatestAccumulator))
|
||||||
|
|
||||||
|
proc putAccumulator*(db: AccumulatorDB, value: openArray[byte]) =
|
||||||
|
db.put(subkey(kLatestAccumulator), value)
|
||||||
|
|
||||||
|
proc getEpochAccumulator*(
|
||||||
|
db: AccumulatorDB, key: ContentId): Option[EpochAccumulator] =
|
||||||
|
db.get(key.toByteArrayBE(), EpochAccumulator)
|
||||||
|
|
||||||
|
# Following calls are there for building up the accumulator from a bit set of
|
||||||
|
# headers, which then can be used to inject into the network and to generate
|
||||||
|
# header proofs from.
|
||||||
|
# It will not be used in the more general usage of Fluffy
|
||||||
|
# Note: One could also make a Portal network and or json-rpc eth1 endpoint
|
||||||
|
# version of this.
|
||||||
|
|
||||||
|
proc buildAccumulator*(db: AccumulatorDB, headers: seq[BlockHeader]) =
|
||||||
|
var accumulator: Accumulator
|
||||||
|
for header in headers:
|
||||||
|
updateAccumulator(accumulator, header)
|
||||||
|
|
||||||
|
if accumulator.currentEpoch.len() == epochSize:
|
||||||
|
let rootHash = accumulator.currentEpoch.hash_tree_root()
|
||||||
|
let key = ContentKey(
|
||||||
|
contentType: epochAccumulator,
|
||||||
|
epochAccumulatorKey: EpochAccumulatorKey(
|
||||||
|
epochHash: rootHash))
|
||||||
|
|
||||||
|
db.put(key.toContentId(), SSZ.encode(accumulator.currentEpoch))
|
||||||
|
|
||||||
|
db.putAccumulator(SSZ.encode(accumulator))
|
||||||
|
|
||||||
|
proc buildAccumulator*(
|
||||||
|
db: AccumulatorDB, dataFile: string): Result[void, string] =
|
||||||
|
let blockData = ? readBlockDataTable(dataFile)
|
||||||
|
|
||||||
|
var headers: seq[BlockHeader]
|
||||||
|
# Len of headers from blockdata + genesis header
|
||||||
|
headers.setLen(blockData.len() + 1)
|
||||||
|
|
||||||
|
headers[0] = getGenesisHeader()
|
||||||
|
|
||||||
|
for k, v in blockData.pairs:
|
||||||
|
let header = ? v.readBlockHeader()
|
||||||
|
headers[header.blockNumber.truncate(int)] = header
|
||||||
|
|
||||||
|
db.buildAccumulator(headers)
|
||||||
|
|
||||||
|
ok()
|
||||||
|
|
||||||
|
func buildAccumulator(headers: seq[BlockHeader]): Accumulator =
|
||||||
|
var accumulator: Accumulator
|
||||||
|
for header in headers:
|
||||||
|
updateAccumulator(accumulator, header)
|
||||||
|
|
||||||
|
accumulator
|
||||||
|
|
||||||
|
proc buildAccumulator*(dataFile: string): Result[Accumulator, string] =
|
||||||
|
let blockData = ? readBlockDataTable(dataFile)
|
||||||
|
|
||||||
|
var headers: seq[BlockHeader]
|
||||||
|
# Len of headers from blockdata + genesis header
|
||||||
|
headers.setLen(blockData.len() + 1)
|
||||||
|
|
||||||
|
headers[0] = getGenesisHeader()
|
||||||
|
|
||||||
|
for k, v in blockData.pairs:
|
||||||
|
let header = ? v.readBlockHeader()
|
||||||
|
headers[header.blockNumber.truncate(int)] = header
|
||||||
|
|
||||||
|
ok(buildAccumulator(headers))
|
@ -15,7 +15,7 @@ import
|
|||||||
ssz_serialization,
|
ssz_serialization,
|
||||||
../../common/common_types
|
../../common/common_types
|
||||||
|
|
||||||
export ssz_serialization, common_types
|
export ssz_serialization, common_types, hash
|
||||||
|
|
||||||
type
|
type
|
||||||
ContentType* = enum
|
ContentType* = enum
|
||||||
|
@ -16,7 +16,7 @@ import
|
|||||||
ssz_serialization,
|
ssz_serialization,
|
||||||
../../common/common_types
|
../../common/common_types
|
||||||
|
|
||||||
export ssz_serialization, common_types
|
export ssz_serialization, common_types, hash
|
||||||
|
|
||||||
type
|
type
|
||||||
NodeHash* = MDigest[32 * 8] # keccak256
|
NodeHash* = MDigest[32 * 8] # keccak256
|
||||||
|
@ -18,7 +18,7 @@ import
|
|||||||
./network/wire/portal_protocol,
|
./network/wire/portal_protocol,
|
||||||
./network/history/history_content
|
./network/history/history_content
|
||||||
|
|
||||||
export results
|
export results, tables
|
||||||
|
|
||||||
# Helper calls to, offline, populate the database with the current existing json
|
# Helper calls to, offline, populate the database with the current existing json
|
||||||
# files with block data. Might move to some other storage format later on.
|
# files with block data. Might move to some other storage format later on.
|
||||||
|
@ -16,6 +16,7 @@ import
|
|||||||
./test_history_content,
|
./test_history_content,
|
||||||
./test_history_validation,
|
./test_history_validation,
|
||||||
./test_header_content,
|
./test_header_content,
|
||||||
|
./test_accumulator,
|
||||||
./test_content_db,
|
./test_content_db,
|
||||||
./test_discovery_rpc,
|
./test_discovery_rpc,
|
||||||
./test_bridge_parser
|
./test_bridge_parser
|
||||||
|
50
fluffy/tests/test_accumulator.nim
Normal file
50
fluffy/tests/test_accumulator.nim
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
# Nimbus
|
||||||
|
# Copyright (c) 2022 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.used.}
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
import
|
||||||
|
unittest2, stint, stew/byteutils,
|
||||||
|
eth/common/eth_types,
|
||||||
|
../populate_db,
|
||||||
|
../network/history/accumulator
|
||||||
|
|
||||||
|
suite "Header Accumulator":
|
||||||
|
test "Header Accumulator Update":
|
||||||
|
const
|
||||||
|
hashTreeRoots = [
|
||||||
|
"b629833240bb2f5eabfb5245be63d730ca4ed30d6a418340ca476e7c1f1d98c0",
|
||||||
|
"00cbebed829e1babb93f2300bebe7905a98cb86993c7fc09bb5b04626fd91ae5",
|
||||||
|
"88cce8439ebc0c1d007177ffb6831c15c07b4361984cc52235b6fd728434f0c7"]
|
||||||
|
|
||||||
|
dataFile = "./fluffy/tests/blocks/mainnet_blocks_1-2.json"
|
||||||
|
|
||||||
|
let blockDataRes = readBlockDataTable(dataFile)
|
||||||
|
|
||||||
|
check blockDataRes.isOk()
|
||||||
|
let blockData = blockDataRes.get()
|
||||||
|
|
||||||
|
var headers: seq[BlockHeader]
|
||||||
|
# Len of headers from blockdata + genesis header
|
||||||
|
headers.setLen(blockData.len() + 1)
|
||||||
|
|
||||||
|
headers[0] = getGenesisHeader()
|
||||||
|
|
||||||
|
for k, v in blockData.pairs:
|
||||||
|
let res = v.readBlockHeader()
|
||||||
|
check res.isOk()
|
||||||
|
let header = res.get()
|
||||||
|
headers[header.blockNumber.truncate(int)] = header
|
||||||
|
|
||||||
|
var accumulator: Accumulator
|
||||||
|
|
||||||
|
for i, hash in hashTreeRoots:
|
||||||
|
updateAccumulator(accumulator, headers[i])
|
||||||
|
|
||||||
|
check accumulator.hash_tree_root().data.toHex() == hashTreeRoots[i]
|
@ -10,42 +10,42 @@
|
|||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/tables,
|
|
||||||
unittest2, stew/byteutils,
|
unittest2, stew/byteutils,
|
||||||
eth/common/eth_types,
|
../network/header/header_content
|
||||||
../network/header/header_content,
|
|
||||||
../populate_db
|
|
||||||
|
|
||||||
suite "Header Gossip Content":
|
suite "Header Gossip ContentKey Encodings":
|
||||||
test "Header Accumulator Update":
|
test "BlockHeader":
|
||||||
const
|
# Input
|
||||||
hashTreeRoots = [
|
const
|
||||||
"b629833240bb2f5eabfb5245be63d730ca4ed30d6a418340ca476e7c1f1d98c0",
|
blockHash = BlockHash.fromHex(
|
||||||
"00cbebed829e1babb93f2300bebe7905a98cb86993c7fc09bb5b04626fd91ae5",
|
"0xd1c390624d3bd4e409a61a858e5dcc5517729a9170d014a6c96530d64dd8621d")
|
||||||
"88cce8439ebc0c1d007177ffb6831c15c07b4361984cc52235b6fd728434f0c7"]
|
blockNumber = 2.stuint(256)
|
||||||
|
|
||||||
dataFile = "./fluffy/tests/blocks/mainnet_blocks_1-2.json"
|
# Output
|
||||||
|
const
|
||||||
|
contentKeyHex =
|
||||||
|
"00d1c390624d3bd4e409a61a858e5dcc5517729a9170d014a6c96530d64dd8621d0200000000000000000000000000000000000000000000000000000000000000"
|
||||||
|
contentId =
|
||||||
|
"93053813395975896824800219097617621670658136800980011170166846009189305194644"
|
||||||
|
# or
|
||||||
|
contentIdHexBE =
|
||||||
|
"cdba9789eec7a1994ec7c033c46c2c94242da2c016051bf09240fd9a81589894"
|
||||||
|
|
||||||
let blockDataRes = readBlockDataTable(dataFile)
|
let contentKey = ContentKey(
|
||||||
|
contentType: newBlockHeader,
|
||||||
|
newBlockHeaderKey:
|
||||||
|
NewBlockHeaderKey(blockHash: blockHash, blockNumber: blockNumber))
|
||||||
|
|
||||||
check blockDataRes.isOk()
|
let encoded = encode(contentKey)
|
||||||
let blockData = blockDataRes.get()
|
check encoded.asSeq.toHex == contentKeyHex
|
||||||
|
let decoded = decode(encoded)
|
||||||
|
check decoded.isSome()
|
||||||
|
|
||||||
var headers: seq[BlockHeader]
|
let contentKeyDecoded = decoded.get()
|
||||||
# Len of headers from blockdata + genesis header
|
check:
|
||||||
headers.setLen(blockData.len() + 1)
|
contentKeyDecoded.contentType == contentKey.contentType
|
||||||
|
contentKeyDecoded.newBlockHeaderKey == contentKey.newBlockHeaderKey
|
||||||
|
|
||||||
headers[0] = getGenesisHeader()
|
toContentId(contentKey) == parse(contentId, Stuint[256], 10)
|
||||||
|
# In stint this does BE hex string
|
||||||
for k, v in blockData.pairs:
|
toContentId(contentKey).toHex() == contentIdHexBE
|
||||||
let res = v.readBlockHeader()
|
|
||||||
check res.isOk()
|
|
||||||
let header = res.get()
|
|
||||||
headers[header.blockNumber.truncate(int)] = header
|
|
||||||
|
|
||||||
var accumulator: Accumulator
|
|
||||||
|
|
||||||
for i, hash in hashTreeRoots:
|
|
||||||
updateAccumulator(accumulator, headers[i])
|
|
||||||
|
|
||||||
check accumulator.hash_tree_root().data.toHex() == hashTreeRoots[i]
|
|
||||||
|
@ -17,8 +17,7 @@ import
|
|||||||
suite "History ContentKey Encodings":
|
suite "History ContentKey Encodings":
|
||||||
test "BlockHeader":
|
test "BlockHeader":
|
||||||
# Input
|
# Input
|
||||||
var blockHash: BlockHash
|
const blockHash = BlockHash.fromHex(
|
||||||
blockHash.data = hexToByteArray[sizeof(BlockHash)](
|
|
||||||
"0xd1c390624d3bd4e409a61a858e5dcc5517729a9170d014a6c96530d64dd8621d")
|
"0xd1c390624d3bd4e409a61a858e5dcc5517729a9170d014a6c96530d64dd8621d")
|
||||||
|
|
||||||
# Output
|
# Output
|
||||||
@ -51,8 +50,7 @@ suite "History ContentKey Encodings":
|
|||||||
|
|
||||||
test "BlockBody":
|
test "BlockBody":
|
||||||
# Input
|
# Input
|
||||||
var blockHash: BlockHash
|
const blockHash = BlockHash.fromHex(
|
||||||
blockHash.data = hexToByteArray[sizeof(BlockHash)](
|
|
||||||
"0xd1c390624d3bd4e409a61a858e5dcc5517729a9170d014a6c96530d64dd8621d")
|
"0xd1c390624d3bd4e409a61a858e5dcc5517729a9170d014a6c96530d64dd8621d")
|
||||||
|
|
||||||
# Output
|
# Output
|
||||||
@ -84,8 +82,8 @@ suite "History ContentKey Encodings":
|
|||||||
toContentId(contentKey).toHex() == contentIdHexBE
|
toContentId(contentKey).toHex() == contentIdHexBE
|
||||||
|
|
||||||
test "Receipts":
|
test "Receipts":
|
||||||
var blockHash: BlockHash
|
# Input
|
||||||
blockHash.data = hexToByteArray[sizeof(BlockHash)](
|
const blockHash = BlockHash.fromHex(
|
||||||
"0xd1c390624d3bd4e409a61a858e5dcc5517729a9170d014a6c96530d64dd8621d")
|
"0xd1c390624d3bd4e409a61a858e5dcc5517729a9170d014a6c96530d64dd8621d")
|
||||||
|
|
||||||
# Output
|
# Output
|
||||||
@ -117,10 +115,11 @@ suite "History ContentKey Encodings":
|
|||||||
toContentId(contentKey).toHex() == contentIdHexBE
|
toContentId(contentKey).toHex() == contentIdHexBE
|
||||||
|
|
||||||
test "Epoch Accumulator":
|
test "Epoch Accumulator":
|
||||||
var epochHash: Digest
|
# Input
|
||||||
epochHash.data = hexToByteArray[sizeof(Digest)](
|
const epochHash = Digest.fromHex(
|
||||||
"0xe242814b90ed3950e13aac7e56ce116540c71b41d1516605aada26c6c07cc491")
|
"0xe242814b90ed3950e13aac7e56ce116540c71b41d1516605aada26c6c07cc491")
|
||||||
|
|
||||||
|
# Output
|
||||||
const
|
const
|
||||||
contentKeyHex =
|
contentKeyHex =
|
||||||
"03e242814b90ed3950e13aac7e56ce116540c71b41d1516605aada26c6c07cc491"
|
"03e242814b90ed3950e13aac7e56ce116540c71b41d1516605aada26c6c07cc491"
|
||||||
@ -149,10 +148,7 @@ suite "History ContentKey Encodings":
|
|||||||
toContentId(contentKey).toHex() == contentIdHexBE
|
toContentId(contentKey).toHex() == contentIdHexBE
|
||||||
|
|
||||||
test "Master Accumulator - Latest":
|
test "Master Accumulator - Latest":
|
||||||
var accumulatorHash: Digest
|
# Output
|
||||||
accumulatorHash.data = hexToByteArray[sizeof(Digest)](
|
|
||||||
"0x88cce8439ebc0c1d007177ffb6831c15c07b4361984cc52235b6fd728434f0c7")
|
|
||||||
|
|
||||||
const
|
const
|
||||||
contentKeyHex =
|
contentKeyHex =
|
||||||
"0400"
|
"0400"
|
||||||
@ -182,10 +178,11 @@ suite "History ContentKey Encodings":
|
|||||||
toContentId(contentKey).toHex() == contentIdHexBE
|
toContentId(contentKey).toHex() == contentIdHexBE
|
||||||
|
|
||||||
test "Master Accumulator - Hash":
|
test "Master Accumulator - Hash":
|
||||||
var accumulatorHash: Digest
|
# Input
|
||||||
accumulatorHash.data = hexToByteArray[sizeof(Digest)](
|
const accumulatorHash = Digest.fromHex(
|
||||||
"0x88cce8439ebc0c1d007177ffb6831c15c07b4361984cc52235b6fd728434f0c7")
|
"0x88cce8439ebc0c1d007177ffb6831c15c07b4361984cc52235b6fd728434f0c7")
|
||||||
|
|
||||||
|
# Output
|
||||||
const
|
const
|
||||||
contentKeyHex =
|
contentKeyHex =
|
||||||
"040188cce8439ebc0c1d007177ffb6831c15c07b4361984cc52235b6fd728434f0c7"
|
"040188cce8439ebc0c1d007177ffb6831c15c07b4361984cc52235b6fd728434f0c7"
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2021 Status Research & Development GmbH
|
# Copyright (c) 2021-2022 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
@ -12,7 +12,7 @@ import
|
|||||||
../network/state/state_content
|
../network/state/state_content
|
||||||
|
|
||||||
# According to test vectors:
|
# According to test vectors:
|
||||||
# TODO: Add link once test vectors are merged
|
# https://github.com/ethereum/portal-network-specs/blob/master/content-keys-test-vectors.md#state-network-keys
|
||||||
|
|
||||||
suite "State ContentKey Encodings":
|
suite "State ContentKey Encodings":
|
||||||
# Common input
|
# Common input
|
||||||
@ -24,10 +24,9 @@ suite "State ContentKey Encodings":
|
|||||||
|
|
||||||
test "AccountTrieNode":
|
test "AccountTrieNode":
|
||||||
# Input
|
# Input
|
||||||
var nodeHash: NodeHash
|
|
||||||
nodeHash.data = hexToByteArray[sizeof(NodeHash)](
|
|
||||||
"0xb8be7903aee73b8f6a59cd44a1f52c62148e1f376c0dfa1f5f773a98666efc2b")
|
|
||||||
const
|
const
|
||||||
|
nodeHash = NodeHash.fromHex(
|
||||||
|
"0xb8be7903aee73b8f6a59cd44a1f52c62148e1f376c0dfa1f5f773a98666efc2b")
|
||||||
path = ByteList.init(@[byte 1, 2, 0, 1])
|
path = ByteList.init(@[byte 1, 2, 0, 1])
|
||||||
|
|
||||||
# Output
|
# Output
|
||||||
@ -61,10 +60,9 @@ suite "State ContentKey Encodings":
|
|||||||
|
|
||||||
test "ContractStorageTrieNode":
|
test "ContractStorageTrieNode":
|
||||||
# Input
|
# Input
|
||||||
var nodeHash: NodeHash
|
|
||||||
nodeHash.data = hexToByteArray[sizeof(NodeHash)](
|
|
||||||
"0x3e190b68719aecbcb28ed2271014dd25f2aa633184988eb414189ce0899cade5")
|
|
||||||
const
|
const
|
||||||
|
nodeHash = NodeHash.fromHex(
|
||||||
|
"0x3e190b68719aecbcb28ed2271014dd25f2aa633184988eb414189ce0899cade5")
|
||||||
path = ByteList.init(@[byte 1, 0, 15, 14, 12, 0])
|
path = ByteList.init(@[byte 1, 0, 15, 14, 12, 0])
|
||||||
|
|
||||||
# Output
|
# Output
|
||||||
@ -168,8 +166,7 @@ suite "State ContentKey Encodings":
|
|||||||
|
|
||||||
test "ContractBytecode":
|
test "ContractBytecode":
|
||||||
# Input
|
# Input
|
||||||
var codeHash: CodeHash
|
const codeHash = CodeHash.fromHex(
|
||||||
codeHash.data = hexToByteArray[sizeof(CodeHash)](
|
|
||||||
"0xd1c390624d3bd4e409a61a858e5dcc5517729a9170d014a6c96530d64dd8621d")
|
"0xd1c390624d3bd4e409a61a858e5dcc5517729a9170d014a6c96530d64dd8621d")
|
||||||
|
|
||||||
# Output
|
# Output
|
||||||
|
Loading…
x
Reference in New Issue
Block a user