Refactor history/beacon content types to seperate keys and values (#2438)

Refactor content types, to have similar structure as for state
network.
Amd some clean-up of comments/TODOs/spec links.
This commit is contained in:
Kim De Mey 2024-07-02 15:48:49 +02:00 committed by GitHub
parent d7b849db3d
commit 87d090afa6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 457 additions and 415 deletions

View File

@ -1,4 +1,4 @@
# Fluffy - Portal Network # fluffy
# Copyright (c) 2022-2024 Status Research & Development GmbH # Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -7,255 +7,6 @@
{.push raises: [].} {.push raises: [].}
import import ./content/content_keys, ./content/content_values
std/typetraits,
stew/arrayops,
results,
beacon_chain/spec/forks,
beacon_chain/spec/datatypes/altair,
nimcrypto/[sha2, hash],
ssz_serialization,
ssz_serialization/codec,
../../common/common_types
export ssz_serialization, common_types, hash export content_keys, content_values
# https://github.com/ethereum/consensus-specs/blob/v1.2.0/specs/altair/light-client/p2p-interface.md#configuration
const
MAX_REQUEST_LIGHT_CLIENT_UPDATES* = 128
# Needed to properly encode List[List[byte, XXX], MAX_REQUEST_LIGHT_CLIENT_UPDATES]
# based on eth2 MAX_CHUNK_SIZE, light client update should not be bigger than
# that
MAX_LIGHT_CLIENT_UPDATE_SIZE* = 1 * 1024 * 1024
type
ContentType* = enum
# Note: See same note as for state_content.nim
unused = 0x00
lightClientBootstrap = 0x10
lightClientUpdate = 0x11
lightClientFinalityUpdate = 0x12
lightClientOptimisticUpdate = 0x13
historicalSummaries = 0x14
# TODO: Consider how we will gossip bootstraps?
# In consensus light client operation a node trusts only one bootstrap hash,
# therefore offers of other bootstraps would be rejected.
LightClientBootstrapKey* = object
blockHash*: Digest
LightClientUpdateKey* = object
startPeriod*: uint64
count*: uint64
# TODO:
# `optimisticSlot` and `finalizedSlot` are currently not in the spec. They are
# added to avoid accepting them in an offer based on the slot values. However,
# this causes them also to be included in a request, which makes perhaps less
# sense?
LightClientFinalityUpdateKey* = object
finalizedSlot*: uint64 ## slot of finalized header of the update
# TODO: Same remark as for `LightClientFinalityUpdateKey`
LightClientOptimisticUpdateKey* = object
optimisticSlot*: uint64 ## signature_slot of the update
HistoricalSummariesKey* = object
epoch*: uint64
ContentKey* = object
case contentType*: ContentType
of unused:
discard
of lightClientBootstrap:
lightClientBootstrapKey*: LightClientBootstrapKey
of lightClientUpdate:
lightClientUpdateKey*: LightClientUpdateKey
of lightClientFinalityUpdate:
lightClientFinalityUpdateKey*: LightClientFinalityUpdateKey
of lightClientOptimisticUpdate:
lightClientOptimisticUpdateKey*: LightClientOptimisticUpdateKey
of historicalSummaries:
historicalSummariesKey*: HistoricalSummariesKey
# TODO:
# ForkedLightClientUpdateBytesList can get pretty big and is send in one go.
# We will need some chunking here but that is currently only possible in
# Portal wire protocol (and only for offer/accept).
ForkedLightClientUpdateBytes* = List[byte, MAX_LIGHT_CLIENT_UPDATE_SIZE]
ForkedLightClientUpdateBytesList* =
List[ForkedLightClientUpdateBytes, MAX_REQUEST_LIGHT_CLIENT_UPDATES]
# Note: Type not send over the wire, just used internally.
ForkedLightClientUpdateList* =
List[ForkedLightClientUpdate, MAX_REQUEST_LIGHT_CLIENT_UPDATES]
func forkDigestAtEpoch*(
forkDigests: ForkDigests, epoch: Epoch, cfg: RuntimeConfig
): ForkDigest =
forkDigests.atEpoch(epoch, cfg)
func encode*(contentKey: ContentKey): ByteList =
doAssert(contentKey.contentType != unused)
ByteList.init(SSZ.encode(contentKey))
proc readSszBytes*(data: openArray[byte], val: var ContentKey) {.raises: [SszError].} =
mixin readSszValue
if data.len() > 0 and data[0] == ord(unused):
raise newException(MalformedSszError, "SSZ selector unused value")
readSszValue(data, val)
func decode*(contentKey: ByteList): Opt[ContentKey] =
try:
Opt.some(SSZ.decode(contentKey.asSeq(), ContentKey))
except SerializationError:
return Opt.none(ContentKey)
func toContentId*(contentKey: ByteList): ContentId =
# TODO: Should we try to parse the content key here for invalid ones?
let idHash = sha2.sha256.digest(contentKey.asSeq())
readUintBE[256](idHash.data)
func toContentId*(contentKey: ContentKey): ContentId =
toContentId(encode(contentKey))
# Yes, this API is odd as you pass a SomeForkedLightClientObject yet still have
# to also pass the ForkDigest. This is because we can't just select the right
# digest through the LightClientDataFork here as LightClientDataFork and
# ConsensusFork are not mapped 1-to-1. There is loss of fork data.
# This means we need to get the ConsensusFork directly, which is possible by
# passing the epoch (slot) from the object through `forkDigestAtEpoch`. This
# however requires the runtime config which is part of the `Eth2Node` object.
# Not something we would like to include as a parameter here, so we stick with
# just passing the forkDigest and doing the work outside of this encode call.
func encodeForkedLightClientObject*(
obj: SomeForkedLightClientObject, forkDigest: ForkDigest
): seq[byte] =
withForkyObject(obj):
when lcDataFork > LightClientDataFork.None:
var res: seq[byte]
res.add(distinctBase(forkDigest))
res.add(SSZ.encode(forkyObject))
return res
else:
raiseAssert("No light client objects before Altair")
func encodeBootstrapForked*(
forkDigest: ForkDigest, bootstrap: ForkedLightClientBootstrap
): seq[byte] =
encodeForkedLightClientObject(bootstrap, forkDigest)
func encodeFinalityUpdateForked*(
forkDigest: ForkDigest, finalityUpdate: ForkedLightClientFinalityUpdate
): seq[byte] =
encodeForkedLightClientObject(finalityUpdate, forkDigest)
func encodeOptimisticUpdateForked*(
forkDigest: ForkDigest, optimisticUpdate: ForkedLightClientOptimisticUpdate
): seq[byte] =
encodeForkedLightClientObject(optimisticUpdate, forkDigest)
func encodeLightClientUpdatesForked*(
forkDigest: ForkDigest, updates: openArray[ForkedLightClientUpdate]
): seq[byte] =
var list: ForkedLightClientUpdateBytesList
for update in updates:
discard list.add(
ForkedLightClientUpdateBytes(encodeForkedLightClientObject(update, forkDigest))
)
SSZ.encode(list)
func decodeForkedLightClientObject(
ObjType: type SomeForkedLightClientObject,
forkDigests: ForkDigests,
data: openArray[byte],
): Result[ObjType, string] =
if len(data) < 4:
return Result[ObjType, string].err("Not enough data for forkDigest")
let
forkDigest = ForkDigest(array[4, byte].initCopyFrom(data))
contextFork = forkDigests.consensusForkForDigest(forkDigest).valueOr:
return Result[ObjType, string].err("Unknown fork")
withLcDataFork(lcDataForkAtConsensusFork(contextFork)):
when lcDataFork > LightClientDataFork.None:
let res = decodeSsz(data.toOpenArray(4, len(data) - 1), ObjType.Forky(lcDataFork))
if res.isOk:
# TODO:
# How can we verify the Epoch vs fork, e.g. with `consensusForkAtEpoch`?
# And should we?
var obj = ok ObjType(kind: lcDataFork)
obj.get.forky(lcDataFork) = res.get
obj
else:
Result[ObjType, string].err(res.error)
else:
Result[ObjType, string].err("Invalid Fork")
func decodeLightClientBootstrapForked*(
forkDigests: ForkDigests, data: openArray[byte]
): Result[ForkedLightClientBootstrap, string] =
decodeForkedLightClientObject(ForkedLightClientBootstrap, forkDigests, data)
func decodeLightClientUpdateForked*(
forkDigests: ForkDigests, data: openArray[byte]
): Result[ForkedLightClientUpdate, string] =
decodeForkedLightClientObject(ForkedLightClientUpdate, forkDigests, data)
func decodeLightClientFinalityUpdateForked*(
forkDigests: ForkDigests, data: openArray[byte]
): Result[ForkedLightClientFinalityUpdate, string] =
decodeForkedLightClientObject(ForkedLightClientFinalityUpdate, forkDigests, data)
func decodeLightClientOptimisticUpdateForked*(
forkDigests: ForkDigests, data: openArray[byte]
): Result[ForkedLightClientOptimisticUpdate, string] =
decodeForkedLightClientObject(ForkedLightClientOptimisticUpdate, forkDigests, data)
func decodeLightClientUpdatesByRange*(
forkDigests: ForkDigests, data: openArray[byte]
): Result[ForkedLightClientUpdateList, string] =
let list = ?decodeSsz(data, ForkedLightClientUpdateBytesList)
var res: ForkedLightClientUpdateList
for encodedUpdate in list:
let update = ?decodeLightClientUpdateForked(forkDigests, encodedUpdate.asSeq())
discard res.add(update)
ok(res)
func bootstrapContentKey*(blockHash: Digest): ContentKey =
ContentKey(
contentType: lightClientBootstrap,
lightClientBootstrapKey: LightClientBootstrapKey(blockHash: blockHash),
)
func updateContentKey*(startPeriod: uint64, count: uint64): ContentKey =
ContentKey(
contentType: lightClientUpdate,
lightClientUpdateKey: LightClientUpdateKey(startPeriod: startPeriod, count: count),
)
func finalityUpdateContentKey*(finalizedSlot: uint64): ContentKey =
ContentKey(
contentType: lightClientFinalityUpdate,
lightClientFinalityUpdateKey:
LightClientFinalityUpdateKey(finalizedSlot: finalizedSlot),
)
func optimisticUpdateContentKey*(optimisticSlot: uint64): ContentKey =
ContentKey(
contentType: lightClientOptimisticUpdate,
lightClientOptimisticUpdateKey:
LightClientOptimisticUpdateKey(optimisticSlot: optimisticSlot),
)
func historicalSummariesContentKey*(epoch: uint64): ContentKey =
ContentKey(
contentType: historicalSummaries,
historicalSummariesKey: HistoricalSummariesKey(epoch: epoch),
)

View File

@ -0,0 +1,116 @@
# fluffy
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import results, nimcrypto/[sha2, hash], ssz_serialization, ../../../common/common_types
export ssz_serialization, common_types, hash
# As per spec:
# https://github.com/ethereum/portal-network-specs/blob/master/beacon-chain/beacon-network.md#data-types
type
ContentType* = enum
# Note: See same note as for state/content/content_keys.nim
unused = 0x00
lightClientBootstrap = 0x10
lightClientUpdate = 0x11
lightClientFinalityUpdate = 0x12
lightClientOptimisticUpdate = 0x13
historicalSummaries = 0x14
# TODO: Consider how we will gossip bootstraps?
# In consensus light client operation a node trusts only one bootstrap hash,
# therefore offers of other bootstraps would be rejected.
LightClientBootstrapKey* = object
blockHash*: Digest
LightClientUpdateKey* = object
startPeriod*: uint64
count*: uint64
LightClientFinalityUpdateKey* = object
finalizedSlot*: uint64 ## slot of finalized header of the update
LightClientOptimisticUpdateKey* = object
optimisticSlot*: uint64 ## signature_slot of the update
HistoricalSummariesKey* = object
epoch*: uint64
ContentKey* = object
case contentType*: ContentType
of unused:
discard
of lightClientBootstrap:
lightClientBootstrapKey*: LightClientBootstrapKey
of lightClientUpdate:
lightClientUpdateKey*: LightClientUpdateKey
of lightClientFinalityUpdate:
lightClientFinalityUpdateKey*: LightClientFinalityUpdateKey
of lightClientOptimisticUpdate:
lightClientOptimisticUpdateKey*: LightClientOptimisticUpdateKey
of historicalSummaries:
historicalSummariesKey*: HistoricalSummariesKey
func encode*(contentKey: ContentKey): ByteList =
doAssert(contentKey.contentType != unused)
ByteList.init(SSZ.encode(contentKey))
proc readSszBytes*(data: openArray[byte], val: var ContentKey) {.raises: [SszError].} =
mixin readSszValue
if data.len() > 0 and data[0] == ord(unused):
raise newException(MalformedSszError, "SSZ selector unused value")
readSszValue(data, val)
func decode*(contentKey: ByteList): Opt[ContentKey] =
try:
Opt.some(SSZ.decode(contentKey.asSeq(), ContentKey))
except SerializationError:
return Opt.none(ContentKey)
func toContentId*(contentKey: ByteList): ContentId =
# TODO: Should we try to parse the content key here for invalid ones?
let idHash = sha2.sha256.digest(contentKey.asSeq())
readUintBE[256](idHash.data)
func toContentId*(contentKey: ContentKey): ContentId =
toContentId(encode(contentKey))
func bootstrapContentKey*(blockHash: Digest): ContentKey =
ContentKey(
contentType: lightClientBootstrap,
lightClientBootstrapKey: LightClientBootstrapKey(blockHash: blockHash),
)
func updateContentKey*(startPeriod: uint64, count: uint64): ContentKey =
ContentKey(
contentType: lightClientUpdate,
lightClientUpdateKey: LightClientUpdateKey(startPeriod: startPeriod, count: count),
)
func finalityUpdateContentKey*(finalizedSlot: uint64): ContentKey =
ContentKey(
contentType: lightClientFinalityUpdate,
lightClientFinalityUpdateKey:
LightClientFinalityUpdateKey(finalizedSlot: finalizedSlot),
)
func optimisticUpdateContentKey*(optimisticSlot: uint64): ContentKey =
ContentKey(
contentType: lightClientOptimisticUpdate,
lightClientOptimisticUpdateKey:
LightClientOptimisticUpdateKey(optimisticSlot: optimisticSlot),
)
func historicalSummariesContentKey*(epoch: uint64): ContentKey =
ContentKey(
contentType: historicalSummaries,
historicalSummariesKey: HistoricalSummariesKey(epoch: epoch),
)

View File

@ -0,0 +1,154 @@
# Fluffy - Portal Network
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import
stew/arrayops,
results,
beacon_chain/spec/forks,
ssz_serialization,
../../../common/common_types
export ssz_serialization, results
const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/light-client/p2p-interface.md#configuration
MAX_REQUEST_LIGHT_CLIENT_UPDATES* = 128
# Needed to properly encode List[List[byte, XXX], MAX_REQUEST_LIGHT_CLIENT_UPDATES]
# based on eth2 MAX_CHUNK_SIZE, light client update should not be bigger than
# that
MAX_LIGHT_CLIENT_UPDATE_SIZE* = 1 * 1024 * 1024
# As per spec:
# https://github.com/ethereum/portal-network-specs/blob/master/beacon-chain/beacon-network.md#data-types
type
# TODO:
# ForkedLightClientUpdateBytesList can get pretty big and is send in one go.
# We might need chunking here but that is currently only possible in
# Portal wire protocol (and only for offer/accept).
ForkedLightClientUpdateBytes* = List[byte, MAX_LIGHT_CLIENT_UPDATE_SIZE]
ForkedLightClientUpdateBytesList* =
List[ForkedLightClientUpdateBytes, MAX_REQUEST_LIGHT_CLIENT_UPDATES]
# Note: Type not send over the wire, just used internally.
ForkedLightClientUpdateList* =
List[ForkedLightClientUpdate, MAX_REQUEST_LIGHT_CLIENT_UPDATES]
func forkDigestAtEpoch*(
forkDigests: ForkDigests, epoch: Epoch, cfg: RuntimeConfig
): ForkDigest =
forkDigests.atEpoch(epoch, cfg)
# Yes, this API is odd as you pass a SomeForkedLightClientObject yet still have
# to also pass the ForkDigest. This is because we can't just select the right
# digest through the LightClientDataFork here as LightClientDataFork and
# ConsensusFork are not mapped 1-to-1. There is loss of fork data.
# This means we need to get the ConsensusFork directly, which is possible by
# passing the epoch (slot) from the object through `forkDigestAtEpoch`. This
# however requires the runtime config which is part of the `Eth2Node` object.
# Not something we would like to include as a parameter here, so we stick with
# just passing the forkDigest and doing the work outside of this encode call.
func encodeForkedLightClientObject*(
obj: SomeForkedLightClientObject, forkDigest: ForkDigest
): seq[byte] =
withForkyObject(obj):
when lcDataFork > LightClientDataFork.None:
var res: seq[byte]
res.add(distinctBase(forkDigest))
res.add(SSZ.encode(forkyObject))
return res
else:
raiseAssert("No light client objects before Altair")
func encodeBootstrapForked*(
forkDigest: ForkDigest, bootstrap: ForkedLightClientBootstrap
): seq[byte] =
encodeForkedLightClientObject(bootstrap, forkDigest)
func encodeFinalityUpdateForked*(
forkDigest: ForkDigest, finalityUpdate: ForkedLightClientFinalityUpdate
): seq[byte] =
encodeForkedLightClientObject(finalityUpdate, forkDigest)
func encodeOptimisticUpdateForked*(
forkDigest: ForkDigest, optimisticUpdate: ForkedLightClientOptimisticUpdate
): seq[byte] =
encodeForkedLightClientObject(optimisticUpdate, forkDigest)
func encodeLightClientUpdatesForked*(
forkDigest: ForkDigest, updates: openArray[ForkedLightClientUpdate]
): seq[byte] =
var list: ForkedLightClientUpdateBytesList
for update in updates:
discard list.add(
ForkedLightClientUpdateBytes(encodeForkedLightClientObject(update, forkDigest))
)
SSZ.encode(list)
func decodeForkedLightClientObject(
ObjType: type SomeForkedLightClientObject,
forkDigests: ForkDigests,
data: openArray[byte],
): Result[ObjType, string] =
if len(data) < 4:
return Result[ObjType, string].err("Not enough data for forkDigest")
let
forkDigest = ForkDigest(array[4, byte].initCopyFrom(data))
contextFork = forkDigests.consensusForkForDigest(forkDigest).valueOr:
return Result[ObjType, string].err("Unknown fork")
withLcDataFork(lcDataForkAtConsensusFork(contextFork)):
when lcDataFork > LightClientDataFork.None:
let res = decodeSsz(data.toOpenArray(4, len(data) - 1), ObjType.Forky(lcDataFork))
if res.isOk:
# TODO:
# How can we verify the Epoch vs fork, e.g. with `consensusForkAtEpoch`?
# And should we?
var obj = ok ObjType(kind: lcDataFork)
obj.get.forky(lcDataFork) = res.get
obj
else:
Result[ObjType, string].err(res.error)
else:
Result[ObjType, string].err("Invalid Fork")
func decodeLightClientBootstrapForked*(
forkDigests: ForkDigests, data: openArray[byte]
): Result[ForkedLightClientBootstrap, string] =
decodeForkedLightClientObject(ForkedLightClientBootstrap, forkDigests, data)
func decodeLightClientUpdateForked*(
forkDigests: ForkDigests, data: openArray[byte]
): Result[ForkedLightClientUpdate, string] =
decodeForkedLightClientObject(ForkedLightClientUpdate, forkDigests, data)
func decodeLightClientFinalityUpdateForked*(
forkDigests: ForkDigests, data: openArray[byte]
): Result[ForkedLightClientFinalityUpdate, string] =
decodeForkedLightClientObject(ForkedLightClientFinalityUpdate, forkDigests, data)
func decodeLightClientOptimisticUpdateForked*(
forkDigests: ForkDigests, data: openArray[byte]
): Result[ForkedLightClientOptimisticUpdate, string] =
decodeForkedLightClientObject(ForkedLightClientOptimisticUpdate, forkDigests, data)
func decodeLightClientUpdatesByRange*(
forkDigests: ForkDigests, data: openArray[byte]
): Result[ForkedLightClientUpdateList, string] =
let list = ?decodeSsz(data, ForkedLightClientUpdateBytesList)
var res: ForkedLightClientUpdateList
for encodedUpdate in list:
let update = ?decodeLightClientUpdateForked(forkDigests, encodedUpdate.asSeq())
discard res.add(update)
ok(res)

View File

@ -0,0 +1,106 @@
# fluffy
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import
nimcrypto/[sha2, hash],
stew/byteutils,
results,
stint,
ssz_serialization,
../../../common/common_types
export ssz_serialization, common_types, hash, results
## History network content keys:
## https://github.com/ethereum/portal-network-specs/blob/master/history-network.md#content-keys-and-values
const
# Maximum content key size:
# - 32 bytes for SSZ serialized `BlockKey`
# - 1 byte for `ContentType`
maxContentKeySize* = 33
type
ContentType* = enum
blockHeader = 0x00
blockBody = 0x01
receipts = 0x02
epochAccumulator = 0x03
BlockKey* = object
blockHash*: BlockHash
EpochAccumulatorKey* = object
epochHash*: Digest
ContentKey* = object
case contentType*: ContentType
of blockHeader:
blockHeaderKey*: BlockKey
of blockBody:
blockBodyKey*: BlockKey
of receipts:
receiptsKey*: BlockKey
of epochAccumulator:
epochAccumulatorKey*: EpochAccumulatorKey
func init*(T: type ContentKey, contentType: ContentType, hash: BlockHash | Digest): T =
case contentType
of blockHeader:
ContentKey(contentType: contentType, blockHeaderKey: BlockKey(blockHash: hash))
of blockBody:
ContentKey(contentType: contentType, blockBodyKey: BlockKey(blockHash: hash))
of receipts:
ContentKey(contentType: contentType, receiptsKey: BlockKey(blockHash: hash))
of epochAccumulator:
ContentKey(
contentType: contentType,
epochAccumulatorKey: EpochAccumulatorKey(epochHash: hash),
)
func encode*(contentKey: ContentKey): ByteList =
ByteList.init(SSZ.encode(contentKey))
func decode*(contentKey: ByteList): Opt[ContentKey] =
try:
Opt.some(SSZ.decode(contentKey.asSeq(), ContentKey))
except SerializationError:
return Opt.none(ContentKey)
func toContentId*(contentKey: ByteList): ContentId =
# TODO: Should we try to parse the content key here for invalid ones?
let idHash = sha2.sha256.digest(contentKey.asSeq())
readUintBE[256](idHash.data)
func toContentId*(contentKey: ContentKey): ContentId =
toContentId(encode(contentKey))
func `$`*(x: BlockHash): string =
"0x" & x.data.toHex()
func `$`*(x: BlockKey): string =
"blockHash: " & $x.blockHash
func `$`*(x: ContentKey): string =
var res = "(type: " & $x.contentType & ", "
case x.contentType
of blockHeader:
res.add($x.blockHeaderKey)
of blockBody:
res.add($x.blockBodyKey)
of receipts:
res.add($x.receiptsKey)
of epochAccumulator:
let key = x.epochAccumulatorKey
res.add("epochHash: " & $key.epochHash)
res.add(")")
res

View File

@ -0,0 +1,76 @@
# fluffy
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import std/math, nimcrypto/hash, ssz_serialization
from beacon_chain/spec/datatypes/capella import Withdrawal
from beacon_chain/spec/presets/mainnet import MAX_WITHDRAWALS_PER_PAYLOAD
export ssz_serialization, hash
## History network content values:
## https://github.com/ethereum/portal-network-specs/blob/master/history-network.md#content-keys-and-values
const
MAX_TRANSACTION_LENGTH = 2 ^ 24 # ~= 16 million
MAX_TRANSACTION_COUNT = 2 ^ 14 # ~= 16k
MAX_RECEIPT_LENGTH = 2 ^ 27 # ~= 134 million
MAX_HEADER_LENGTH = 2 ^ 13 # = 8192
MAX_ENCODED_UNCLES_LENGTH = MAX_HEADER_LENGTH * 2 ^ 4 # = 2 ^ 17 ~= 131k
MAX_WITHDRAWAL_LENGTH = 64
MAX_WITHDRAWALS_COUNT = MAX_WITHDRAWALS_PER_PAYLOAD
type
## BlockHeader types
AccumulatorProof* = array[15, Digest]
BlockHeaderProofType* = enum
none = 0x00 # An SSZ Union None
accumulatorProof = 0x01
BlockHeaderProof* = object
case proofType*: BlockHeaderProofType
of none:
discard
of accumulatorProof:
accumulatorProof*: AccumulatorProof
BlockHeaderWithProof* = object
header*: List[byte, 2048] # RLP data
proof*: BlockHeaderProof
## BlockBody types
TransactionByteList* = List[byte, MAX_TRANSACTION_LENGTH] # RLP data
Transactions* = List[TransactionByteList, MAX_TRANSACTION_COUNT]
Uncles* = List[byte, MAX_ENCODED_UNCLES_LENGTH] # RLP data
WithdrawalByteList* = List[byte, MAX_WITHDRAWAL_LENGTH] # RLP data
Withdrawals* = List[WithdrawalByteList, MAX_WITHDRAWALS_COUNT]
# Pre-shanghai block body
PortalBlockBodyLegacy* = object
transactions*: Transactions
uncles*: Uncles # Post Paris/TheMerge, this list is required to be empty
# Post-shanghai block body
PortalBlockBodyShanghai* = object
transactions*: Transactions
uncles*: Uncles # Must be empty list
withdrawals*: Withdrawals # new field
## Receipts types
ReceiptByteList* = List[byte, MAX_RECEIPT_LENGTH] # RLP data
PortalReceipts* = List[ReceiptByteList, MAX_TRANSACTION_COUNT]
func init*(T: type BlockHeaderProof, proof: AccumulatorProof): T =
BlockHeaderProof(proofType: accumulatorProof, accumulatorProof: proof)
func init*(T: type BlockHeaderProof): T =
BlockHeaderProof(proofType: none)

View File

@ -5,169 +5,8 @@
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms. # at your option. This file may not be copied, modified, or distributed except according to those terms.
# https://github.com/ethereum/portal-network-specs/blob/master/history-network.md#content-keys-and-values
{.push raises: [].} {.push raises: [].}
import import ./content/content_keys, ./content/content_values
std/math,
nimcrypto/[sha2, hash],
stew/byteutils,
results,
stint,
ssz_serialization,
../../common/common_types
from beacon_chain/spec/datatypes/capella import Withdrawal export content_keys, content_values
from beacon_chain/spec/presets/mainnet import MAX_WITHDRAWALS_PER_PAYLOAD
export ssz_serialization, common_types, hash, results
## Types and calls for history network content keys
const
# Maximum content key size:
# - 32 bytes for SSZ serialized `BlockKey`
# - 1 byte for `ContentType`
# TODO: calculate it somehow from the object definition (macro?)
maxContentKeySize* = 33
type
ContentType* = enum
blockHeader = 0x00
blockBody = 0x01
receipts = 0x02
epochAccumulator = 0x03
BlockKey* = object
blockHash*: BlockHash
EpochAccumulatorKey* = object
epochHash*: Digest
# TODO: Perhaps this should be called epochRoot in the spec instead
ContentKey* = object
case contentType*: ContentType
of blockHeader:
blockHeaderKey*: BlockKey
of blockBody:
blockBodyKey*: BlockKey
of receipts:
receiptsKey*: BlockKey
of epochAccumulator:
epochAccumulatorKey*: EpochAccumulatorKey
func init*(T: type ContentKey, contentType: ContentType, hash: BlockHash | Digest): T =
case contentType
of blockHeader:
ContentKey(contentType: contentType, blockHeaderKey: BlockKey(blockHash: hash))
of blockBody:
ContentKey(contentType: contentType, blockBodyKey: BlockKey(blockHash: hash))
of receipts:
ContentKey(contentType: contentType, receiptsKey: BlockKey(blockHash: hash))
of epochAccumulator:
ContentKey(
contentType: contentType,
epochAccumulatorKey: EpochAccumulatorKey(epochHash: hash),
)
func encode*(contentKey: ContentKey): ByteList =
ByteList.init(SSZ.encode(contentKey))
func decode*(contentKey: ByteList): Opt[ContentKey] =
try:
Opt.some(SSZ.decode(contentKey.asSeq(), ContentKey))
except SerializationError:
return Opt.none(ContentKey)
func toContentId*(contentKey: ByteList): ContentId =
# TODO: Should we try to parse the content key here for invalid ones?
let idHash = sha2.sha256.digest(contentKey.asSeq())
readUintBE[256](idHash.data)
func toContentId*(contentKey: ContentKey): ContentId =
toContentId(encode(contentKey))
func `$`*(x: BlockHash): string =
"0x" & x.data.toHex()
func `$`*(x: BlockKey): string =
"blockHash: " & $x.blockHash
func `$`*(x: ContentKey): string =
var res = "(type: " & $x.contentType & ", "
case x.contentType
of blockHeader:
res.add($x.blockHeaderKey)
of blockBody:
res.add($x.blockBodyKey)
of receipts:
res.add($x.receiptsKey)
of epochAccumulator:
let key = x.epochAccumulatorKey
res.add("epochHash: " & $key.epochHash)
res.add(")")
res
## Types for history network content
const
MAX_TRANSACTION_LENGTH* = 2 ^ 24 # ~= 16 million
MAX_TRANSACTION_COUNT* = 2 ^ 14 # ~= 16k
MAX_RECEIPT_LENGTH* = 2 ^ 27 # ~= 134 million
MAX_HEADER_LENGTH = 2 ^ 13 # = 8192
MAX_ENCODED_UNCLES_LENGTH* = MAX_HEADER_LENGTH * 2 ^ 4 # = 2**17 ~= 131k
MAX_WITHDRAWAL_LENGTH = 64
MAX_WITHDRAWALS_COUNT = MAX_WITHDRAWALS_PER_PAYLOAD
type
## Types for content
# TODO: Using `init` on these lists appears to fail because of the constants
# that are used? Strange.
TransactionByteList* = List[byte, MAX_TRANSACTION_LENGTH] # RLP data
Transactions* = List[TransactionByteList, MAX_TRANSACTION_COUNT]
Uncles* = List[byte, MAX_ENCODED_UNCLES_LENGTH] # RLP data
WithdrawalByteList* = List[byte, MAX_WITHDRAWAL_LENGTH] # RLP data
Withdrawals* = List[WithdrawalByteList, MAX_WITHDRAWALS_COUNT]
# Pre-shanghai block body
# Post-merge this block body is required to have an empty list for uncles
PortalBlockBodyLegacy* = object
transactions*: Transactions
uncles*: Uncles
# Post-shanghai block body, added withdrawals
PortalBlockBodyShanghai* = object
transactions*: Transactions
uncles*: Uncles
withdrawals*: Withdrawals
ReceiptByteList* = List[byte, MAX_RECEIPT_LENGTH] # RLP data
PortalReceipts* = List[ReceiptByteList, MAX_TRANSACTION_COUNT]
AccumulatorProof* = array[15, Digest]
BlockHeaderProofType* = enum
none = 0x00 # An SSZ Union None
accumulatorProof = 0x01
BlockHeaderProof* = object
case proofType*: BlockHeaderProofType
of none:
discard
of accumulatorProof:
accumulatorProof*: AccumulatorProof
BlockHeaderWithProof* = object
header*: ByteList # RLP data
proof*: BlockHeaderProof
func init*(T: type BlockHeaderProof, proof: AccumulatorProof): T =
BlockHeaderProof(proofType: accumulatorProof, accumulatorProof: proof)
func init*(T: type BlockHeaderProof): T =
BlockHeaderProof(proofType: none)