2022-09-09 19:21:48 +00:00
|
|
|
# # Nimbus - Portal Network
|
2024-02-15 15:49:22 +00:00
|
|
|
# # Copyright (c) 2022-2024 Status Research & Development GmbH
|
2022-09-09 19:21:48 +00:00
|
|
|
# # Licensed and distributed under either of
|
|
|
|
# # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# # at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-31 12:38:08 +00:00
|
|
|
{.push raises: [].}
|
2022-09-09 19:21:48 +00:00
|
|
|
|
|
|
|
import
|
2024-02-28 17:31:45 +00:00
|
|
|
results,
|
|
|
|
chronos,
|
|
|
|
chronicles,
|
2022-09-09 19:21:48 +00:00
|
|
|
../network/wire/portal_protocol,
|
2024-09-23 16:56:28 +00:00
|
|
|
../network/history/
|
|
|
|
[history_content, history_network, validation/historical_hashes_accumulator],
|
2024-10-10 14:42:57 +00:00
|
|
|
"."/[era1, history_data_ssz_e2s]
|
2022-09-09 19:21:48 +00:00
|
|
|
|
2024-10-04 21:21:26 +00:00
|
|
|
from eth/common/eth_types_rlp import rlpHash
|
|
|
|
|
2022-09-09 19:21:48 +00:00
|
|
|
export results
|
|
|
|
|
2024-02-15 15:49:22 +00:00
|
|
|
##
|
|
|
|
## Era1 based iterators that encode to Portal content
|
|
|
|
##
|
|
|
|
|
|
|
|
# Note: these iterators + the era1 iterators will assert on error. These asserts
|
|
|
|
# would indicate corrupt/invalid era1 files. We might want to instead break,
|
|
|
|
# raise an exception or return a Result type instead, but the latter does not
|
|
|
|
# have great support for usage in iterators.
|
|
|
|
|
|
|
|
iterator headersWithProof*(
|
2024-07-11 15:42:45 +00:00
|
|
|
f: Era1File, epochRecord: EpochRecordCached
|
2024-07-17 15:07:27 +00:00
|
|
|
): (ContentKeyByteList, seq[byte]) =
|
2024-02-15 15:49:22 +00:00
|
|
|
for blockHeader in f.era1BlockHeaders:
|
|
|
|
doAssert blockHeader.isPreMerge()
|
|
|
|
|
|
|
|
let
|
|
|
|
contentKey = ContentKey(
|
2024-10-06 12:15:54 +00:00
|
|
|
contentType: ContentType.blockHeader,
|
2024-10-04 21:21:26 +00:00
|
|
|
blockHeaderKey: BlockKey(blockHash: blockHeader.rlpHash()),
|
2024-02-15 15:49:22 +00:00
|
|
|
).encode()
|
|
|
|
|
2024-07-11 15:42:45 +00:00
|
|
|
headerWithProof = buildHeaderWithProof(blockHeader, epochRecord).valueOr:
|
2024-06-14 07:31:08 +00:00
|
|
|
raiseAssert "Failed to build header with proof: " & $blockHeader.number
|
2024-02-15 15:49:22 +00:00
|
|
|
|
|
|
|
contentValue = SSZ.encode(headerWithProof)
|
|
|
|
|
|
|
|
yield (contentKey, contentValue)
|
|
|
|
|
2024-07-17 15:07:27 +00:00
|
|
|
iterator blockContent*(f: Era1File): (ContentKeyByteList, seq[byte]) =
|
2024-02-15 15:49:22 +00:00
|
|
|
for (header, body, receipts, _) in f.era1BlockTuples:
|
2024-10-04 21:21:26 +00:00
|
|
|
let blockHash = header.rlpHash()
|
2024-02-15 15:49:22 +00:00
|
|
|
|
|
|
|
block: # block body
|
|
|
|
let
|
|
|
|
contentKey = ContentKey(
|
2024-02-28 17:31:45 +00:00
|
|
|
contentType: blockBody, blockBodyKey: BlockKey(blockHash: blockHash)
|
2024-02-15 15:49:22 +00:00
|
|
|
).encode()
|
|
|
|
|
|
|
|
contentValue = encode(body)
|
|
|
|
|
|
|
|
yield (contentKey, contentValue)
|
|
|
|
|
|
|
|
block: # receipts
|
|
|
|
let
|
|
|
|
contentKey = ContentKey(
|
2024-10-06 12:15:54 +00:00
|
|
|
contentType: ContentType.receipts, receiptsKey: BlockKey(blockHash: blockHash)
|
2024-02-15 15:49:22 +00:00
|
|
|
).encode()
|
|
|
|
|
|
|
|
contentValue = encode(receipts)
|
|
|
|
|
|
|
|
yield (contentKey, contentValue)
|
|
|
|
|
|
|
|
##
|
|
|
|
## Era1 based Gossip calls
|
|
|
|
##
|
|
|
|
|
|
|
|
proc historyGossipHeadersWithProof*(
|
2024-07-11 15:42:45 +00:00
|
|
|
p: PortalProtocol, era1File: string, epochRecordFile: Opt[string], verifyEra = false
|
2024-02-28 17:31:45 +00:00
|
|
|
): Future[Result[void, string]] {.async.} =
|
2024-02-15 15:49:22 +00:00
|
|
|
let f = ?Era1File.open(era1File)
|
|
|
|
|
|
|
|
if verifyEra:
|
|
|
|
let _ = ?f.verify()
|
|
|
|
|
|
|
|
# Note: building the accumulator takes about 150ms vs 10ms for reading it,
|
|
|
|
# so it is probably not really worth using the read version considering the
|
|
|
|
# UX hassle it adds to provide the accumulator ssz files.
|
2024-07-11 15:42:45 +00:00
|
|
|
let epochRecord =
|
|
|
|
if epochRecordFile.isNone:
|
2024-02-15 15:49:22 +00:00
|
|
|
?f.buildAccumulator()
|
|
|
|
else:
|
2024-07-11 15:42:45 +00:00
|
|
|
?readEpochRecordCached(epochRecordFile.get())
|
2024-02-15 15:49:22 +00:00
|
|
|
|
2024-07-11 15:42:45 +00:00
|
|
|
for (contentKey, contentValue) in f.headersWithProof(epochRecord):
|
2024-02-15 15:49:22 +00:00
|
|
|
let peers = await p.neighborhoodGossip(
|
2024-02-28 17:31:45 +00:00
|
|
|
Opt.none(NodeId), ContentKeysList(@[contentKey]), @[contentValue]
|
|
|
|
)
|
2024-02-15 15:49:22 +00:00
|
|
|
info "Gossiped block header", contentKey, peers
|
|
|
|
|
|
|
|
ok()
|
|
|
|
|
|
|
|
proc historyGossipBlockContent*(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, era1File: string, verifyEra = false
|
|
|
|
): Future[Result[void, string]] {.async.} =
|
2024-02-15 15:49:22 +00:00
|
|
|
let f = ?Era1File.open(era1File)
|
|
|
|
|
|
|
|
if verifyEra:
|
|
|
|
let _ = ?f.verify()
|
|
|
|
|
|
|
|
for (contentKey, contentValue) in f.blockContent():
|
|
|
|
let peers = await p.neighborhoodGossip(
|
2024-02-28 17:31:45 +00:00
|
|
|
Opt.none(NodeId), ContentKeysList(@[contentKey]), @[contentValue]
|
|
|
|
)
|
2024-02-15 15:49:22 +00:00
|
|
|
info "Gossiped block content", contentKey, peers
|
|
|
|
|
|
|
|
ok()
|