2022-09-09 19:21:48 +00:00
|
|
|
# # Nimbus - Portal Network
|
2024-02-15 15:49:22 +00:00
|
|
|
# # Copyright (c) 2022-2024 Status Research & Development GmbH
|
2022-09-09 19:21:48 +00:00
|
|
|
# # Licensed and distributed under either of
|
|
|
|
# # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# # at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-31 12:38:08 +00:00
|
|
|
{.push raises: [].}
|
2022-09-09 19:21:48 +00:00
|
|
|
|
|
|
|
import
|
2022-10-18 11:07:32 +00:00
|
|
|
std/[strformat, os],
|
2024-02-28 17:31:45 +00:00
|
|
|
results,
|
|
|
|
chronos,
|
|
|
|
chronicles,
|
|
|
|
eth/common/eth_types,
|
|
|
|
eth/rlp,
|
2022-09-09 19:21:48 +00:00
|
|
|
../network/wire/portal_protocol,
|
2023-02-16 11:40:07 +00:00
|
|
|
../network/history/[history_content, history_network, accumulator],
|
2024-02-15 15:49:22 +00:00
|
|
|
"."/[era1, history_data_json_store, history_data_ssz_e2s]
|
2022-09-09 19:21:48 +00:00
|
|
|
|
|
|
|
export results
|
|
|
|
|
|
|
|
### Helper calls to seed the local database and/or the network
|
|
|
|
|
|
|
|
proc historyStore*(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, dataFile: string, verify = false
|
|
|
|
): Result[void, string] =
|
|
|
|
let blockData = ?readJsonType(dataFile, BlockDataTable)
|
2022-09-09 19:21:48 +00:00
|
|
|
|
|
|
|
for b in blocks(blockData, verify):
|
|
|
|
for value in b:
|
2022-11-08 17:31:45 +00:00
|
|
|
let encKey = history_content.encode(value[0])
|
2022-09-09 19:21:48 +00:00
|
|
|
# Note: This is the slowest part due to the hashing that takes place.
|
2022-11-08 17:31:45 +00:00
|
|
|
p.storeContent(encKey, history_content.toContentId(encKey), value[1])
|
2022-09-09 19:21:48 +00:00
|
|
|
|
|
|
|
ok()
|
|
|
|
|
2024-07-11 15:42:45 +00:00
|
|
|
proc propagateEpochRecord*(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, file: string
|
|
|
|
): Future[Result[void, string]] {.async.} =
|
2022-09-09 19:21:48 +00:00
|
|
|
## Propagate a specific epoch accumulator into the network.
|
2022-10-18 11:07:32 +00:00
|
|
|
## file holds the SSZ serialized epoch accumulator.
|
2024-07-11 15:42:45 +00:00
|
|
|
let epochRecordRes = readEpochRecord(file)
|
|
|
|
if epochRecordRes.isErr():
|
|
|
|
return err(epochRecordRes.error)
|
2022-09-09 19:21:48 +00:00
|
|
|
else:
|
|
|
|
let
|
2024-07-11 15:42:45 +00:00
|
|
|
epochRecord = epochRecordRes.get()
|
|
|
|
rootHash = epochRecord.hash_tree_root()
|
2022-09-09 19:21:48 +00:00
|
|
|
key = ContentKey(
|
2024-07-11 15:42:45 +00:00
|
|
|
contentType: epochRecord, epochRecordKey: EpochRecordKey(epochHash: rootHash)
|
2024-02-28 17:31:45 +00:00
|
|
|
)
|
2022-11-08 17:31:45 +00:00
|
|
|
encKey = history_content.encode(key)
|
2022-10-18 11:07:32 +00:00
|
|
|
# Note: The file actually holds the SSZ encoded accumulator, but we need
|
|
|
|
# to decode as we need the root for the content key.
|
2024-07-11 15:42:45 +00:00
|
|
|
encodedEpochRecord = SSZ.encode(epochRecord)
|
|
|
|
info "Gossiping epoch record", rootHash, contentKey = encKey
|
2022-10-18 11:07:32 +00:00
|
|
|
|
2024-07-11 15:42:45 +00:00
|
|
|
p.storeContent(encKey, history_content.toContentId(encKey), encodedEpochRecord)
|
2022-10-11 10:10:54 +00:00
|
|
|
discard await p.neighborhoodGossip(
|
2024-07-11 15:42:45 +00:00
|
|
|
Opt.none(NodeId), ContentKeysList(@[encKey]), @[encodedEpochRecord]
|
2024-02-28 17:31:45 +00:00
|
|
|
)
|
2022-09-09 19:21:48 +00:00
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
2024-07-11 15:42:45 +00:00
|
|
|
proc propagateEpochRecords*(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, path: string
|
|
|
|
): Future[Result[void, string]] {.async.} =
|
2022-10-18 11:07:32 +00:00
|
|
|
## Propagate all epoch accumulators created when building the accumulator
|
|
|
|
## from the block headers.
|
|
|
|
## path is a directory that holds all SSZ encoded epoch accumulator files.
|
2024-02-28 17:31:45 +00:00
|
|
|
for i in 0 ..< preMergeEpochs:
|
2022-10-18 11:07:32 +00:00
|
|
|
let file =
|
2024-02-28 17:31:45 +00:00
|
|
|
try:
|
2024-07-11 15:42:45 +00:00
|
|
|
path / &"mainnet-epoch-record-{i.uint64:05}.ssz"
|
2024-02-28 17:31:45 +00:00
|
|
|
except ValueError as e:
|
|
|
|
raiseAssert e.msg
|
2022-10-18 11:07:32 +00:00
|
|
|
|
2024-07-11 15:42:45 +00:00
|
|
|
let res = await p.propagateEpochRecord(file)
|
2022-10-18 11:07:32 +00:00
|
|
|
if res.isErr():
|
|
|
|
return err(res.error)
|
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
2022-09-09 19:21:48 +00:00
|
|
|
proc historyPropagate*(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, dataFile: string, verify = false
|
|
|
|
): Future[Result[void, string]] {.async.} =
|
2022-09-09 19:21:48 +00:00
|
|
|
const concurrentGossips = 20
|
|
|
|
|
|
|
|
var gossipQueue =
|
2023-09-04 10:21:01 +00:00
|
|
|
newAsyncQueue[(Opt[NodeId], ContentKeysList, seq[byte])](concurrentGossips)
|
2022-09-09 19:21:48 +00:00
|
|
|
var gossipWorkers: seq[Future[void]]
|
|
|
|
|
|
|
|
proc gossipWorker(p: PortalProtocol) {.async.} =
|
|
|
|
while true:
|
2023-09-04 10:21:01 +00:00
|
|
|
let (srcNodeId, keys, content) = await gossipQueue.popFirst()
|
2022-09-09 19:21:48 +00:00
|
|
|
|
2023-09-04 10:21:01 +00:00
|
|
|
discard await p.neighborhoodGossip(srcNodeId, keys, @[content])
|
2022-09-09 19:21:48 +00:00
|
|
|
|
|
|
|
for i in 0 ..< concurrentGossips:
|
|
|
|
gossipWorkers.add(gossipWorker(p))
|
|
|
|
|
|
|
|
let blockData = readJsonType(dataFile, BlockDataTable)
|
|
|
|
if blockData.isOk():
|
|
|
|
for b in blocks(blockData.get(), verify):
|
2022-09-29 06:42:54 +00:00
|
|
|
for i, value in b:
|
|
|
|
if i == 0:
|
|
|
|
# Note: Skipping propagation of headers here as they should be offered
|
|
|
|
# separately to be certain that bodies and receipts can be verified.
|
|
|
|
# TODO: Rename this chain of calls to be more clear about this and
|
|
|
|
# adjust the interator call.
|
|
|
|
continue
|
2022-09-09 19:21:48 +00:00
|
|
|
# Only sending non empty data, e.g. empty receipts are not send
|
|
|
|
# TODO: Could do a similar thing for a combination of empty
|
|
|
|
# txs and empty uncles, as then the serialization is always the same.
|
|
|
|
if value[1].len() > 0:
|
|
|
|
info "Seeding block content into the network", contentKey = value[0]
|
|
|
|
# Note: This is the slowest part due to the hashing that takes place.
|
2022-11-08 17:31:45 +00:00
|
|
|
let
|
|
|
|
encKey = history_content.encode(value[0])
|
|
|
|
contentId = history_content.toContentId(encKey)
|
|
|
|
p.storeContent(encKey, contentId, value[1])
|
2022-09-09 19:21:48 +00:00
|
|
|
|
|
|
|
await gossipQueue.addLast(
|
2024-02-28 17:31:45 +00:00
|
|
|
(Opt.none(NodeId), ContentKeysList(@[encode(value[0])]), value[1])
|
|
|
|
)
|
2022-09-09 19:21:48 +00:00
|
|
|
|
|
|
|
return ok()
|
|
|
|
else:
|
|
|
|
return err(blockData.error)
|
|
|
|
|
|
|
|
proc historyPropagateBlock*(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, dataFile: string, blockHash: string, verify = false
|
|
|
|
): Future[Result[void, string]] {.async.} =
|
2022-09-09 19:21:48 +00:00
|
|
|
let blockDataTable = readJsonType(dataFile, BlockDataTable)
|
|
|
|
|
|
|
|
if blockDataTable.isOk():
|
|
|
|
let b =
|
|
|
|
try:
|
|
|
|
blockDataTable.get()[blockHash]
|
|
|
|
except KeyError:
|
|
|
|
return err("Block hash not found in block data file")
|
|
|
|
|
|
|
|
let blockDataRes = readBlockData(blockHash, b)
|
|
|
|
if blockDataRes.isErr:
|
|
|
|
return err(blockDataRes.error)
|
|
|
|
|
|
|
|
let blockData = blockDataRes.get()
|
|
|
|
|
|
|
|
for value in blockData:
|
|
|
|
info "Seeding block content into the network", contentKey = value[0]
|
2022-11-08 17:31:45 +00:00
|
|
|
let
|
|
|
|
encKey = history_content.encode(value[0])
|
|
|
|
contentId = history_content.toContentId(encKey)
|
|
|
|
p.storeContent(encKey, contentId, value[1])
|
2022-09-09 19:21:48 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
discard await p.neighborhoodGossip(
|
|
|
|
Opt.none(NodeId), ContentKeysList(@[encode(value[0])]), @[value[1]]
|
|
|
|
)
|
2022-09-09 19:21:48 +00:00
|
|
|
|
|
|
|
return ok()
|
|
|
|
else:
|
|
|
|
return err(blockDataTable.error)
|
2022-09-29 06:42:54 +00:00
|
|
|
|
2022-11-04 08:27:01 +00:00
|
|
|
proc historyPropagateHeadersWithProof*(
|
2024-07-11 15:42:45 +00:00
|
|
|
p: PortalProtocol, epochHeadersFile: string, epochRecordFile: string
|
2024-02-28 17:31:45 +00:00
|
|
|
): Future[Result[void, string]] {.async.} =
|
2022-11-04 08:27:01 +00:00
|
|
|
let res = readBlockHeaders(epochHeadersFile)
|
|
|
|
if res.isErr():
|
|
|
|
return err(res.error)
|
|
|
|
|
|
|
|
let blockHeaders = res.get()
|
|
|
|
|
2024-07-11 15:42:45 +00:00
|
|
|
let epochRecordRes = readEpochRecordCached(epochRecordFile)
|
|
|
|
if epochRecordRes.isErr():
|
2022-11-04 08:27:01 +00:00
|
|
|
return err(res.error)
|
|
|
|
|
2024-07-11 15:42:45 +00:00
|
|
|
let epochRecord = epochRecordRes.get()
|
2022-11-04 08:27:01 +00:00
|
|
|
for header in blockHeaders:
|
|
|
|
if header.isPreMerge():
|
2024-07-11 15:42:45 +00:00
|
|
|
let headerWithProof = buildHeaderWithProof(header, epochRecord)
|
2022-11-04 08:27:01 +00:00
|
|
|
if headerWithProof.isErr:
|
|
|
|
return err(headerWithProof.error)
|
|
|
|
|
|
|
|
let
|
|
|
|
content = headerWithProof.get()
|
|
|
|
contentKey = ContentKey(
|
2023-01-20 22:04:58 +00:00
|
|
|
contentType: blockHeader,
|
2024-02-28 17:31:45 +00:00
|
|
|
blockHeaderKey: BlockKey(blockHash: header.blockHash()),
|
|
|
|
)
|
2022-11-08 17:31:45 +00:00
|
|
|
encKey = history_content.encode(contentKey)
|
|
|
|
contentId = history_content.toContentId(encKey)
|
2022-11-04 08:27:01 +00:00
|
|
|
encodedContent = SSZ.encode(content)
|
|
|
|
|
2022-11-08 17:31:45 +00:00
|
|
|
p.storeContent(encKey, contentId, encodedContent)
|
2022-11-04 08:27:01 +00:00
|
|
|
|
|
|
|
let keys = ContentKeysList(@[encode(contentKey)])
|
2023-09-04 10:21:01 +00:00
|
|
|
discard await p.neighborhoodGossip(Opt.none(NodeId), keys, @[encodedContent])
|
2022-11-04 08:27:01 +00:00
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
proc historyPropagateHeadersWithProof*(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, dataDir: string
|
|
|
|
): Future[Result[void, string]] {.async.} =
|
|
|
|
for i in 0 ..< preMergeEpochs:
|
2022-11-04 08:27:01 +00:00
|
|
|
let
|
|
|
|
epochHeadersfile =
|
2024-02-28 17:31:45 +00:00
|
|
|
try:
|
|
|
|
dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
|
|
|
|
except ValueError as e:
|
|
|
|
raiseAssert e.msg
|
2024-07-11 15:42:45 +00:00
|
|
|
epochRecordFile =
|
2024-02-28 17:31:45 +00:00
|
|
|
try:
|
2024-07-11 15:42:45 +00:00
|
|
|
dataDir / &"mainnet-epoch-record-{i.uint64:05}.ssz"
|
2024-02-28 17:31:45 +00:00
|
|
|
except ValueError as e:
|
|
|
|
raiseAssert e.msg
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
let res =
|
2024-07-11 15:42:45 +00:00
|
|
|
await p.historyPropagateHeadersWithProof(epochHeadersfile, epochRecordFile)
|
2022-11-04 08:27:01 +00:00
|
|
|
if res.isOk():
|
|
|
|
info "Finished gossiping 1 epoch of headers with proof", i
|
|
|
|
else:
|
|
|
|
return err(res.error)
|
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
2022-09-29 06:42:54 +00:00
|
|
|
proc historyPropagateHeaders*(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, dataFile: string, verify = false
|
|
|
|
): Future[Result[void, string]] {.async.} =
|
2022-09-29 06:42:54 +00:00
|
|
|
# TODO: Should perhaps be integrated with `historyPropagate` call.
|
|
|
|
const concurrentGossips = 20
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
var gossipQueue = newAsyncQueue[(ContentKeysList, seq[byte])](concurrentGossips)
|
2022-09-29 06:42:54 +00:00
|
|
|
var gossipWorkers: seq[Future[void]]
|
|
|
|
|
|
|
|
proc gossipWorker(p: PortalProtocol) {.async.} =
|
|
|
|
while true:
|
|
|
|
let (keys, content) = await gossipQueue.popFirst()
|
|
|
|
|
2023-09-04 10:21:01 +00:00
|
|
|
discard await p.neighborhoodGossip(Opt.none(NodeId), keys, @[content])
|
2022-09-29 06:42:54 +00:00
|
|
|
|
|
|
|
for i in 0 ..< concurrentGossips:
|
|
|
|
gossipWorkers.add(gossipWorker(p))
|
|
|
|
|
|
|
|
let blockData = readJsonType(dataFile, BlockDataTable)
|
|
|
|
if blockData.isOk():
|
|
|
|
for header in headers(blockData.get(), verify):
|
|
|
|
info "Seeding header content into the network", contentKey = header[0]
|
2022-11-08 17:31:45 +00:00
|
|
|
let
|
|
|
|
encKey = history_content.encode(header[0])
|
|
|
|
contentId = history_content.toContentId(encKey)
|
|
|
|
p.storeContent(encKey, contentId, header[1])
|
2022-09-29 06:42:54 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
await gossipQueue.addLast((ContentKeysList(@[encode(header[0])]), header[1]))
|
2022-09-29 06:42:54 +00:00
|
|
|
|
|
|
|
return ok()
|
|
|
|
else:
|
|
|
|
return err(blockData.error)
|
2024-02-15 15:49:22 +00:00
|
|
|
|
|
|
|
##
|
|
|
|
## Era1 based iterators that encode to Portal content
|
|
|
|
##
|
|
|
|
|
|
|
|
# Note: these iterators + the era1 iterators will assert on error. These asserts
|
|
|
|
# would indicate corrupt/invalid era1 files. We might want to instead break,
|
|
|
|
# raise an exception or return a Result type instead, but the latter does not
|
|
|
|
# have great support for usage in iterators.
|
|
|
|
|
|
|
|
iterator headersWithProof*(
|
2024-07-11 15:42:45 +00:00
|
|
|
f: Era1File, epochRecord: EpochRecordCached
|
2024-07-17 15:07:27 +00:00
|
|
|
): (ContentKeyByteList, seq[byte]) =
|
2024-02-15 15:49:22 +00:00
|
|
|
for blockHeader in f.era1BlockHeaders:
|
|
|
|
doAssert blockHeader.isPreMerge()
|
|
|
|
|
|
|
|
let
|
|
|
|
contentKey = ContentKey(
|
|
|
|
contentType: blockHeader,
|
2024-02-28 17:31:45 +00:00
|
|
|
blockHeaderKey: BlockKey(blockHash: blockHeader.blockHash()),
|
2024-02-15 15:49:22 +00:00
|
|
|
).encode()
|
|
|
|
|
2024-07-11 15:42:45 +00:00
|
|
|
headerWithProof = buildHeaderWithProof(blockHeader, epochRecord).valueOr:
|
2024-06-14 07:31:08 +00:00
|
|
|
raiseAssert "Failed to build header with proof: " & $blockHeader.number
|
2024-02-15 15:49:22 +00:00
|
|
|
|
|
|
|
contentValue = SSZ.encode(headerWithProof)
|
|
|
|
|
|
|
|
yield (contentKey, contentValue)
|
|
|
|
|
2024-07-17 15:07:27 +00:00
|
|
|
iterator blockContent*(f: Era1File): (ContentKeyByteList, seq[byte]) =
|
2024-02-15 15:49:22 +00:00
|
|
|
for (header, body, receipts, _) in f.era1BlockTuples:
|
|
|
|
let blockHash = header.blockHash()
|
|
|
|
|
|
|
|
block: # block body
|
|
|
|
let
|
|
|
|
contentKey = ContentKey(
|
2024-02-28 17:31:45 +00:00
|
|
|
contentType: blockBody, blockBodyKey: BlockKey(blockHash: blockHash)
|
2024-02-15 15:49:22 +00:00
|
|
|
).encode()
|
|
|
|
|
|
|
|
contentValue = encode(body)
|
|
|
|
|
|
|
|
yield (contentKey, contentValue)
|
|
|
|
|
|
|
|
block: # receipts
|
|
|
|
let
|
|
|
|
contentKey = ContentKey(
|
2024-02-28 17:31:45 +00:00
|
|
|
contentType: receipts, receiptsKey: BlockKey(blockHash: blockHash)
|
2024-02-15 15:49:22 +00:00
|
|
|
).encode()
|
|
|
|
|
|
|
|
contentValue = encode(receipts)
|
|
|
|
|
|
|
|
yield (contentKey, contentValue)
|
|
|
|
|
|
|
|
##
|
|
|
|
## Era1 based Gossip calls
|
|
|
|
##
|
|
|
|
|
|
|
|
proc historyGossipHeadersWithProof*(
|
2024-07-11 15:42:45 +00:00
|
|
|
p: PortalProtocol, era1File: string, epochRecordFile: Opt[string], verifyEra = false
|
2024-02-28 17:31:45 +00:00
|
|
|
): Future[Result[void, string]] {.async.} =
|
2024-02-15 15:49:22 +00:00
|
|
|
let f = ?Era1File.open(era1File)
|
|
|
|
|
|
|
|
if verifyEra:
|
|
|
|
let _ = ?f.verify()
|
|
|
|
|
|
|
|
# Note: building the accumulator takes about 150ms vs 10ms for reading it,
|
|
|
|
# so it is probably not really worth using the read version considering the
|
|
|
|
# UX hassle it adds to provide the accumulator ssz files.
|
2024-07-11 15:42:45 +00:00
|
|
|
let epochRecord =
|
|
|
|
if epochRecordFile.isNone:
|
2024-02-15 15:49:22 +00:00
|
|
|
?f.buildAccumulator()
|
|
|
|
else:
|
2024-07-11 15:42:45 +00:00
|
|
|
?readEpochRecordCached(epochRecordFile.get())
|
2024-02-15 15:49:22 +00:00
|
|
|
|
2024-07-11 15:42:45 +00:00
|
|
|
for (contentKey, contentValue) in f.headersWithProof(epochRecord):
|
2024-02-15 15:49:22 +00:00
|
|
|
let peers = await p.neighborhoodGossip(
|
2024-02-28 17:31:45 +00:00
|
|
|
Opt.none(NodeId), ContentKeysList(@[contentKey]), @[contentValue]
|
|
|
|
)
|
2024-02-15 15:49:22 +00:00
|
|
|
info "Gossiped block header", contentKey, peers
|
|
|
|
|
|
|
|
ok()
|
|
|
|
|
|
|
|
proc historyGossipBlockContent*(
|
2024-02-28 17:31:45 +00:00
|
|
|
p: PortalProtocol, era1File: string, verifyEra = false
|
|
|
|
): Future[Result[void, string]] {.async.} =
|
2024-02-15 15:49:22 +00:00
|
|
|
let f = ?Era1File.open(era1File)
|
|
|
|
|
|
|
|
if verifyEra:
|
|
|
|
let _ = ?f.verify()
|
|
|
|
|
|
|
|
for (contentKey, contentValue) in f.blockContent():
|
|
|
|
let peers = await p.neighborhoodGossip(
|
2024-02-28 17:31:45 +00:00
|
|
|
Opt.none(NodeId), ContentKeysList(@[contentKey]), @[contentValue]
|
|
|
|
)
|
2024-02-15 15:49:22 +00:00
|
|
|
info "Gossiped block content", contentKey, peers
|
|
|
|
|
|
|
|
ok()
|