2022-06-08 13:14:01 +00:00
|
|
|
# Nimbus
|
2023-01-05 14:26:58 +00:00
|
|
|
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
2022-06-08 13:14:01 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
# Tool to download chain history data from local node, and save it to the json
|
2022-06-20 14:52:48 +00:00
|
|
|
# file or sqlite database.
|
|
|
|
# In case of json:
|
2022-07-01 19:51:51 +00:00
|
|
|
# Block data is stored as it gets transmitted over the wire and as defined here:
|
|
|
|
# https://github.com/ethereum/portal-network-specs/blob/master/history-network.md#content-keys-and-values
|
|
|
|
#
|
2022-06-08 13:14:01 +00:00
|
|
|
# Json file has following format:
|
|
|
|
# {
|
|
|
|
# "hexEncodedBlockHash: {
|
2022-07-01 19:51:51 +00:00
|
|
|
# "header": "the rlp encoded block header as a hex string"
|
|
|
|
# "body": "the SSZ encoded container of transactions and uncles as a hex string"
|
|
|
|
# "receipts: "The SSZ encoded list of the receipts as a hex string"
|
|
|
|
# "number": "block number"
|
|
|
|
# },
|
2022-06-08 13:14:01 +00:00
|
|
|
# ...,
|
|
|
|
# ...,
|
|
|
|
# }
|
2022-06-20 14:52:48 +00:00
|
|
|
# In case of sqlite:
|
|
|
|
# Data is saved in a format friendly to history network i.e one table with 3
|
|
|
|
# columns: contentid, contentkey, content.
|
|
|
|
# Such format enables queries to quickly find content in range of some node
|
|
|
|
# which makes it possible to offer content to nodes in bulk.
|
2022-06-08 13:14:01 +00:00
|
|
|
#
|
2022-07-01 19:51:51 +00:00
|
|
|
# When using geth as client to download receipts from, be aware that you will
|
|
|
|
# have to set the number of blocks to maintain the transaction index for to
|
|
|
|
# unlimited if you want access to all transactions/receipts.
|
|
|
|
# e.g: `./build/bin/geth --ws --txlookuplimit=0`
|
|
|
|
#
|
2022-06-08 13:14:01 +00:00
|
|
|
|
2023-01-31 12:38:08 +00:00
|
|
|
{.push raises: [].}
|
2022-06-08 13:14:01 +00:00
|
|
|
|
|
|
|
import
|
2023-04-11 12:06:45 +00:00
|
|
|
std/[json, typetraits, strutils, strformat, os, uri],
|
2022-06-08 13:14:01 +00:00
|
|
|
confutils,
|
|
|
|
stew/[byteutils, io2],
|
|
|
|
json_serialization,
|
|
|
|
faststreams, chronicles,
|
|
|
|
eth/[common, rlp], chronos,
|
|
|
|
eth/common/eth_types_json_serialization,
|
2022-06-22 06:50:58 +00:00
|
|
|
json_rpc/rpcclient,
|
2022-10-14 06:57:17 +00:00
|
|
|
ncli/e2store,
|
2022-06-20 14:52:48 +00:00
|
|
|
../seed_db,
|
2022-08-01 19:00:21 +00:00
|
|
|
../../premix/[downloader, parser],
|
2022-10-17 18:38:51 +00:00
|
|
|
../network/history/[history_content, accumulator],
|
2023-07-05 18:17:03 +00:00
|
|
|
../eth_data/[history_data_json_store, history_data_ssz_e2s],
|
|
|
|
eth_data_exporter/[exporter_conf, exporter_common, cl_data_exporter]
|
2022-06-08 13:14:01 +00:00
|
|
|
|
2022-07-01 19:51:51 +00:00
|
|
|
# Need to be selective due to the `Block` type conflict from downloader
|
|
|
|
from ../network/history/history_network import encode
|
|
|
|
|
2023-04-19 15:01:01 +00:00
|
|
|
chronicles.formatIt(IoErrorCode): $it
|
|
|
|
|
2022-08-01 19:00:21 +00:00
|
|
|
proc downloadHeader(client: RpcClient, i: uint64): BlockHeader =
|
|
|
|
let blockNumber = u256(i)
|
|
|
|
try:
|
|
|
|
let jsonHeader = requestHeader(blockNumber, some(client))
|
|
|
|
parseBlockHeader(jsonHeader)
|
|
|
|
except CatchableError as e:
|
|
|
|
fatal "Error while requesting BlockHeader", error = e.msg, number = i
|
|
|
|
quit 1
|
|
|
|
|
2022-06-22 06:50:58 +00:00
|
|
|
proc downloadBlock(i: uint64, client: RpcClient): Block =
|
2022-06-08 13:14:01 +00:00
|
|
|
let num = u256(i)
|
|
|
|
try:
|
2022-06-22 06:50:58 +00:00
|
|
|
return requestBlock(num, flags = {DownloadReceipts}, client = some(client))
|
2022-06-08 13:14:01 +00:00
|
|
|
except CatchableError as e:
|
2022-06-20 14:52:48 +00:00
|
|
|
fatal "Error while requesting Block", error = e.msg, number = i
|
2022-06-08 13:14:01 +00:00
|
|
|
quit 1
|
|
|
|
|
2022-08-01 19:00:21 +00:00
|
|
|
proc writeHeadersToJson(config: ExporterConf, client: RpcClient) =
|
|
|
|
let fh = createAndOpenFile(string config.dataDir, string config.fileName)
|
|
|
|
|
|
|
|
try:
|
|
|
|
var writer = JsonWriter[DefaultFlavor].init(fh.s, pretty = true)
|
|
|
|
writer.beginRecord()
|
2022-11-04 08:27:01 +00:00
|
|
|
for i in config.startBlock..config.endBlock:
|
2022-08-01 19:00:21 +00:00
|
|
|
let blck = client.downloadHeader(i)
|
|
|
|
writer.writeHeaderRecord(blck)
|
2022-11-04 08:27:01 +00:00
|
|
|
if ((i - config.startBlock) mod 8192) == 0 and i != config.startBlock:
|
2022-08-01 19:00:21 +00:00
|
|
|
info "Downloaded 8192 new block headers", currentHeader = i
|
|
|
|
writer.endRecord()
|
|
|
|
info "File successfully written", path = config.dataDir / config.fileName
|
|
|
|
except IOError as e:
|
|
|
|
fatal "Error occured while writing to file", error = e.msg
|
|
|
|
quit 1
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
fh.close()
|
|
|
|
except IOError as e:
|
|
|
|
fatal "Error occured while closing file", error = e.msg
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
proc writeBlocksToJson(config: ExporterConf, client: RpcClient) =
|
|
|
|
let fh = createAndOpenFile(string config.dataDir, string config.fileName)
|
2022-06-08 13:14:01 +00:00
|
|
|
|
|
|
|
try:
|
2022-07-01 19:51:51 +00:00
|
|
|
var writer = JsonWriter[DefaultFlavor].init(fh.s, pretty = true)
|
2022-06-08 13:14:01 +00:00
|
|
|
writer.beginRecord()
|
2022-11-04 08:27:01 +00:00
|
|
|
for i in config.startBlock..config.endBlock:
|
2022-06-22 06:50:58 +00:00
|
|
|
let blck = downloadBlock(i, client)
|
2023-01-05 14:26:58 +00:00
|
|
|
writer.writeBlockRecord(blck.header, blck.body, blck.receipts)
|
2022-11-04 08:27:01 +00:00
|
|
|
if ((i - config.startBlock) mod 8192) == 0 and i != config.startBlock:
|
2022-08-01 19:00:21 +00:00
|
|
|
info "Downloaded 8192 new blocks", currentBlock = i
|
2022-06-08 13:14:01 +00:00
|
|
|
writer.endRecord()
|
2022-08-01 19:00:21 +00:00
|
|
|
info "File successfully written", path = config.dataDir / config.fileName
|
2022-06-08 13:14:01 +00:00
|
|
|
except IOError as e:
|
2022-08-01 19:00:21 +00:00
|
|
|
fatal "Error occured while writing to file", error = e.msg
|
2022-06-08 13:14:01 +00:00
|
|
|
quit 1
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
fh.close()
|
|
|
|
except IOError as e:
|
2022-08-01 19:00:21 +00:00
|
|
|
fatal "Error occured while closing file", error = e.msg
|
2022-06-08 13:14:01 +00:00
|
|
|
quit 1
|
|
|
|
|
2022-08-01 19:00:21 +00:00
|
|
|
proc writeBlocksToDb(config: ExporterConf, client: RpcClient) =
|
2022-09-10 19:00:27 +00:00
|
|
|
let db = SeedDb.new(distinctBase(config.dataDir), config.fileName)
|
2022-06-22 06:50:58 +00:00
|
|
|
|
2022-06-20 14:52:48 +00:00
|
|
|
defer:
|
|
|
|
db.close()
|
|
|
|
|
2022-11-04 08:27:01 +00:00
|
|
|
for i in config.startBlock..config.endBlock:
|
2022-06-20 14:52:48 +00:00
|
|
|
let
|
2022-06-22 06:50:58 +00:00
|
|
|
blck = downloadBlock(i, client)
|
2022-06-20 14:52:48 +00:00
|
|
|
blockHash = blck.header.blockHash()
|
2022-09-28 07:09:38 +00:00
|
|
|
contentKeyType = BlockKey(blockHash: blockHash)
|
2022-07-01 19:51:51 +00:00
|
|
|
headerKey = encode(ContentKey(
|
|
|
|
contentType: blockHeader, blockHeaderKey: contentKeyType))
|
|
|
|
bodyKey = encode(ContentKey(
|
|
|
|
contentType: blockBody, blockBodyKey: contentKeyType))
|
|
|
|
receiptsKey = encode(
|
|
|
|
ContentKey(contentType: receipts, receiptsKey: contentKeyType))
|
2022-06-20 14:52:48 +00:00
|
|
|
|
2022-07-01 19:51:51 +00:00
|
|
|
db.put(headerKey.toContentId(), headerKey.asSeq(), rlp.encode(blck.header))
|
2022-06-20 14:52:48 +00:00
|
|
|
|
2022-07-01 19:51:51 +00:00
|
|
|
# No need to seed empty lists into database
|
2022-06-20 14:52:48 +00:00
|
|
|
if len(blck.body.transactions) > 0 or len(blck.body.uncles) > 0:
|
2022-07-01 19:51:51 +00:00
|
|
|
let body = encode(blck.body)
|
|
|
|
db.put(bodyKey.toContentId(), bodyKey.asSeq(), body)
|
2022-06-20 14:52:48 +00:00
|
|
|
|
|
|
|
if len(blck.receipts) > 0:
|
2022-07-01 19:51:51 +00:00
|
|
|
let receipts = encode(blck.receipts)
|
|
|
|
db.put(receiptsKey.toContentId(), receiptsKey.asSeq(), receipts)
|
2022-06-20 14:52:48 +00:00
|
|
|
|
|
|
|
info "Data successfuly written to db"
|
|
|
|
|
2022-08-01 19:00:21 +00:00
|
|
|
proc exportBlocks(config: ExporterConf, client: RpcClient) =
|
2022-06-20 14:52:48 +00:00
|
|
|
case config.storageMode
|
2023-07-05 18:17:03 +00:00
|
|
|
of JsonStorage:
|
2022-08-01 19:00:21 +00:00
|
|
|
if config.headersOnly:
|
|
|
|
writeHeadersToJson(config, client)
|
|
|
|
else:
|
|
|
|
writeBlocksToJson(config, client)
|
2023-07-05 18:17:03 +00:00
|
|
|
of DbStorage:
|
2022-08-01 19:00:21 +00:00
|
|
|
if config.headersOnly:
|
|
|
|
fatal "Db mode not available for headers only"
|
|
|
|
quit 1
|
|
|
|
else:
|
|
|
|
writeBlocksToDb(config, client)
|
2022-06-20 14:52:48 +00:00
|
|
|
|
2023-04-11 12:06:45 +00:00
|
|
|
proc newRpcClient(web3Url: Web3Url): RpcClient =
|
|
|
|
# TODO: I don't like this API. I think the creation of the RPC clients should
|
|
|
|
# already include the URL. And then an optional connect may be necessary
|
|
|
|
# depending on the protocol.
|
|
|
|
let client: RpcClient =
|
|
|
|
case web3Url.kind
|
|
|
|
of HttpUrl:
|
|
|
|
newRpcHttpClient()
|
|
|
|
of WsUrl:
|
|
|
|
newRpcWebSocketClient()
|
|
|
|
|
|
|
|
client
|
|
|
|
|
|
|
|
proc connectRpcClient(
|
|
|
|
client: RpcClient, web3Url: Web3Url):
|
|
|
|
Future[Result[void, string]] {.async.} =
|
|
|
|
case web3Url.kind
|
|
|
|
of HttpUrl:
|
|
|
|
try:
|
|
|
|
await RpcHttpClient(client).connect(web3Url.url)
|
|
|
|
except CatchableError as e:
|
|
|
|
return err(e.msg)
|
|
|
|
of WsUrl:
|
|
|
|
try:
|
|
|
|
await RpcWebSocketClient(client).connect(web3Url.url)
|
|
|
|
except CatchableError as e:
|
|
|
|
return err(e.msg)
|
|
|
|
|
2022-06-08 13:14:01 +00:00
|
|
|
when isMainModule:
|
|
|
|
{.pop.}
|
|
|
|
let config = ExporterConf.load()
|
2023-01-31 12:38:08 +00:00
|
|
|
{.push raises: [].}
|
2022-06-08 13:14:01 +00:00
|
|
|
|
2022-08-01 19:00:21 +00:00
|
|
|
setLogLevel(config.logLevel)
|
|
|
|
|
2022-10-14 06:57:17 +00:00
|
|
|
let dataDir = config.dataDir.string
|
|
|
|
if not isDir(dataDir):
|
|
|
|
let res = createPath(dataDir)
|
|
|
|
if res.isErr():
|
|
|
|
fatal "Error occurred while creating data directory",
|
|
|
|
dir = dataDir, error = ioErrorMsg(res.error)
|
|
|
|
quit 1
|
2022-06-22 06:50:58 +00:00
|
|
|
|
2022-08-01 19:00:21 +00:00
|
|
|
case config.cmd
|
2023-07-05 18:17:03 +00:00
|
|
|
of ExporterCmd.history:
|
|
|
|
case config.historyCmd
|
|
|
|
of HistoryCmd.exportBlockData:
|
|
|
|
let client = newRpcClient(config.web3Url)
|
|
|
|
let connectRes = waitFor client.connectRpcClient(config.web3Url)
|
|
|
|
if connectRes.isErr():
|
|
|
|
fatal "Failed connecting to JSON-RPC client", error = connectRes.error
|
|
|
|
quit 1
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
if (config.endBlock < config.startBlock):
|
|
|
|
fatal "Initial block number should be smaller than end block number",
|
|
|
|
startBlock = config.startBlock,
|
|
|
|
endBlock = config.endBlock
|
|
|
|
quit 1
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
try:
|
|
|
|
exportBlocks(config, client)
|
|
|
|
finally:
|
|
|
|
waitFor client.close()
|
|
|
|
|
|
|
|
of HistoryCmd.exportEpochHeaders:
|
|
|
|
let client = newRpcClient(config.web3Url)
|
|
|
|
let connectRes = waitFor client.connectRpcClient(config.web3Url)
|
|
|
|
if connectRes.isErr():
|
|
|
|
fatal "Failed connecting to JSON-RPC client", error = connectRes.error
|
|
|
|
quit 1
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
proc exportEpochHeaders(file: string, epoch: uint64): Result[void, string] =
|
|
|
|
# Downloading headers from JSON RPC endpoint
|
|
|
|
info "Requesting epoch headers", epoch
|
|
|
|
var headers: seq[BlockHeader]
|
|
|
|
for j in 0..<epochSize.uint64:
|
|
|
|
debug "Requesting block", number = j
|
|
|
|
let header = client.downloadHeader(epoch*epochSize + j)
|
|
|
|
headers.add(header)
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let fh = ? openFile(file, {OpenFlags.Write, OpenFlags.Create}).mapErr(toString)
|
|
|
|
defer: discard closeFile(fh)
|
2022-08-01 19:00:21 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
info "Writing headers to file", file
|
|
|
|
for header in headers:
|
|
|
|
discard ? fh.appendRecord(ExecutionBlockHeaderRecord, rlp.encode(header))
|
2022-08-01 19:00:21 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
ok()
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
# TODO: Could make the JSON-RPC requests concurrent per epoch.
|
|
|
|
# Batching would also be nice but our json-rpc does not support that:
|
|
|
|
# https://geth.ethereum.org/docs/rpc/batch
|
|
|
|
for i in config.startEpoch..config.endEpoch:
|
|
|
|
let file = dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
if isFile(file):
|
|
|
|
notice "Skipping epoch headers, file already exists", file
|
2022-10-14 06:57:17 +00:00
|
|
|
else:
|
2023-07-05 18:17:03 +00:00
|
|
|
let res = exportEpochHeaders(file, i)
|
|
|
|
if res.isErr():
|
|
|
|
error "Failed exporting epoch headers", file, error = res.error
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
waitFor client.close()
|
2022-10-17 18:38:51 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
of HistoryCmd.verifyEpochHeaders:
|
|
|
|
proc verifyEpochHeaders(file: string, epoch: uint64): Result[void, string] =
|
2022-10-17 18:38:51 +00:00
|
|
|
let fh = ? openFile(file, {OpenFlags.Read}).mapErr(toString)
|
|
|
|
defer: discard closeFile(fh)
|
|
|
|
|
|
|
|
var data: seq[byte]
|
|
|
|
while true:
|
|
|
|
let header = readRecord(fh, data).valueOr:
|
|
|
|
break
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2022-10-17 18:38:51 +00:00
|
|
|
if header.typ == ExecutionBlockHeaderRecord:
|
2023-07-05 18:17:03 +00:00
|
|
|
let
|
|
|
|
blockHeader =
|
|
|
|
try:
|
|
|
|
rlp.decode(data, BlockHeader)
|
|
|
|
except RlpError as e:
|
|
|
|
return err("Invalid block header: " & e.msg)
|
|
|
|
|
|
|
|
headerHash = to0xHex(rlpHash(blockHeader).data)
|
|
|
|
debug "Header decoded successfully",
|
|
|
|
hash = headerHash, blockNumber = blockHeader.blockNumber
|
2022-10-17 18:38:51 +00:00
|
|
|
else:
|
|
|
|
warn "Skipping record, not a block header", typ = toHex(header.typ)
|
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
ok()
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
for i in config.startEpochVerify..config.endEpochVerify:
|
|
|
|
let file = dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
|
|
|
|
let res = verifyEpochHeaders(file, i)
|
|
|
|
if res.isErr():
|
|
|
|
error "Failed verifying epoch headers", file, error = res.error
|
|
|
|
else:
|
|
|
|
info "Successfully decoded epoch headers", file
|
|
|
|
|
|
|
|
of HistoryCmd.exportAccumulatorData:
|
|
|
|
# Lets first check if the accumulator file already exists before starting
|
|
|
|
# to build it.
|
|
|
|
let accumulatorFile = dataDir / config.accumulatorFileName
|
|
|
|
if isFile(accumulatorFile):
|
|
|
|
notice "Not building accumulator, file already exists",
|
|
|
|
file = accumulatorFile
|
|
|
|
quit 1
|
2022-08-01 19:00:21 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
# Lets verify if the necessary files exists before starting to build the
|
|
|
|
# accumulator.
|
|
|
|
for i in 0..<preMergeEpochs:
|
|
|
|
let file = dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
|
|
|
|
if not isFile(file):
|
|
|
|
fatal "Required epoch headers file does not exist", file
|
|
|
|
quit 1
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
proc buildAccumulator(dataDir: string, writeEpochAccumulators = false):
|
|
|
|
Result[FinishedAccumulator, string] =
|
|
|
|
var accumulator: Accumulator
|
|
|
|
for i in 0..<preMergeEpochs:
|
|
|
|
let file =
|
|
|
|
try: dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
|
|
|
|
except ValueError as e: raiseAssert e.msg
|
|
|
|
|
|
|
|
let fh = ? openFile(file, {OpenFlags.Read}).mapErr(toString)
|
|
|
|
defer: discard closeFile(fh)
|
|
|
|
|
|
|
|
var data: seq[byte]
|
|
|
|
var count = 0'u64
|
|
|
|
while true:
|
|
|
|
let header = readRecord(fh, data).valueOr:
|
|
|
|
break
|
|
|
|
|
|
|
|
if header.typ == ExecutionBlockHeaderRecord:
|
|
|
|
let blockHeader =
|
|
|
|
try:
|
|
|
|
rlp.decode(data, BlockHeader)
|
|
|
|
except RlpError as e:
|
|
|
|
return err("Invalid block header in " & file & ": " & e.msg)
|
|
|
|
|
|
|
|
# Quick sanity check
|
|
|
|
if blockHeader.blockNumber.truncate(uint64) != i*epochSize + count:
|
|
|
|
fatal "Incorrect block headers in file", file = file,
|
|
|
|
blockNumber = blockHeader.blockNumber,
|
|
|
|
expectedBlockNumber = i*epochSize + count
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
updateAccumulator(accumulator, blockHeader)
|
|
|
|
|
|
|
|
# Note: writing away of epoch accumulators occurs 1 iteration before
|
|
|
|
# updating the epoch accumulator, as the latter happens when passed
|
|
|
|
# a header for the next epoch (or on finishing the epoch).
|
|
|
|
if writeEpochAccumulators:
|
|
|
|
if accumulator.currentEpoch.len() == epochSize or
|
|
|
|
blockHeader.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
|
|
|
|
let file =
|
|
|
|
try: dataDir / &"mainnet-epoch-accumulator-{i.uint64:05}.ssz"
|
|
|
|
except ValueError as e: raiseAssert e.msg
|
|
|
|
let res = io2.writeFile(file, SSZ.encode(accumulator.currentEpoch))
|
|
|
|
if res.isErr():
|
|
|
|
error "Failed writing epoch accumulator to file",
|
|
|
|
file, error = res.error
|
|
|
|
else:
|
|
|
|
notice "Succesfully wrote epoch accumulator to file", file
|
|
|
|
|
|
|
|
if count == epochSize - 1:
|
|
|
|
info "Updated an epoch", epoch = i
|
|
|
|
count.inc()
|
|
|
|
|
|
|
|
if blockHeader.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
|
|
|
|
let finishedAccumulator = finishAccumulator(accumulator)
|
|
|
|
info "Updated last epoch, finished building master accumulator",
|
|
|
|
epoch = i
|
|
|
|
return ok(finishedAccumulator)
|
|
|
|
else:
|
|
|
|
warn "Skipping record, not a block header", typ = toHex(header.typ)
|
|
|
|
|
|
|
|
err("Not enough headers provided to finish the accumulator")
|
|
|
|
|
|
|
|
let accumulatorRes = buildAccumulator(dataDir, config.writeEpochAccumulators)
|
|
|
|
if accumulatorRes.isErr():
|
|
|
|
fatal "Could not build accumulator", error = accumulatorRes.error
|
|
|
|
quit 1
|
|
|
|
let accumulator = accumulatorRes.get()
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let res = io2.writeFile(accumulatorFile, SSZ.encode(accumulator))
|
|
|
|
if res.isErr():
|
|
|
|
error "Failed writing accumulator to file",
|
|
|
|
file = accumulatorFile, error = res.error
|
|
|
|
quit 1
|
|
|
|
else:
|
|
|
|
notice "Succesfully wrote master accumulator to file",
|
|
|
|
file = accumulatorFile
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
of HistoryCmd.printAccumulatorData:
|
|
|
|
let file = dataDir / config.accumulatorFileNamePrint
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let res = readAccumulator(file)
|
|
|
|
if res.isErr():
|
|
|
|
fatal "Failed reading accumulator from file", error = res.error, file
|
|
|
|
quit 1
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let
|
|
|
|
accumulator = res.get()
|
|
|
|
accumulatorRoot = hash_tree_root(accumulator)
|
|
|
|
|
|
|
|
info "Accumulator decoded successfully",
|
|
|
|
root = accumulatorRoot
|
|
|
|
|
|
|
|
echo "Master Accumulator:"
|
|
|
|
echo "-------------------"
|
|
|
|
echo &"Root: {accumulatorRoot}"
|
|
|
|
echo ""
|
|
|
|
echo "Historical Epochs:"
|
|
|
|
echo "------------------"
|
|
|
|
echo "Epoch Root"
|
|
|
|
for i, root in accumulator.historicalEpochs:
|
|
|
|
echo &"{i.uint64:05} 0x{root.toHex()}"
|
|
|
|
|
|
|
|
of HistoryCmd.exportHeaderRange:
|
|
|
|
let client = newRpcClient(config.web3Url)
|
|
|
|
let connectRes = waitFor client.connectRpcClient(config.web3Url)
|
|
|
|
if connectRes.isErr():
|
|
|
|
fatal "Failed connecting to JSON-RPC client", error = connectRes.error
|
|
|
|
quit 1
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let
|
|
|
|
startBlockNumber = config.startBlockNumber
|
|
|
|
endBlockNumber = config.endBlockNumber
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
if (endBlockNumber < startBlockNumber):
|
|
|
|
fatal "Start block number should be smaller than end block number",
|
|
|
|
startBlockNumber, endBlockNumber
|
|
|
|
quit 1
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
proc exportHeaders(
|
|
|
|
file: string, startBlockNumber, endBlockNumber: uint64):
|
|
|
|
Result[void, string] =
|
|
|
|
# Downloading headers from JSON RPC endpoint
|
|
|
|
info "Requesting headers", startBlockNumber, endBlockNumber
|
|
|
|
var headers: seq[BlockHeader]
|
|
|
|
for j in startBlockNumber..endBlockNumber:
|
|
|
|
debug "Requesting block", number = j
|
|
|
|
let header = client.downloadHeader(j)
|
|
|
|
headers.add(header)
|
|
|
|
|
|
|
|
let fh = ? openFile(
|
|
|
|
file, {OpenFlags.Write, OpenFlags.Create}).mapErr(toString)
|
|
|
|
defer: discard closeFile(fh)
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
info "Writing headers to file", file
|
|
|
|
for header in headers:
|
|
|
|
discard ? fh.appendRecord(ExecutionBlockHeaderRecord, rlp.encode(header))
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
ok()
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let file =
|
|
|
|
try: dataDir / &"mainnet-headers-{startBlockNumber:05}-{endBlockNumber:05}.e2s"
|
|
|
|
except ValueError as e: raiseAssert e.msg
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let res = exportHeaders(file, startBlockNumber, endBlockNumber)
|
|
|
|
if res.isErr():
|
|
|
|
fatal "Failed exporting headers", error = res.error
|
|
|
|
quit 1
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
of HistoryCmd.exportHeadersWithProof:
|
|
|
|
let
|
|
|
|
startBlockNumber = config.startBlockNumber2
|
|
|
|
endBlockNumber = config.endBlockNumber2
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
if (endBlockNumber < startBlockNumber):
|
|
|
|
fatal "Start block number should be smaller than end block number",
|
|
|
|
startBlockNumber, endBlockNumber
|
2023-04-11 12:06:45 +00:00
|
|
|
quit 1
|
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let file = &"mainnet-headersWithProof-{startBlockNumber:05}-{endBlockNumber:05}.json"
|
|
|
|
let fh = createAndOpenFile(string config.dataDir, file)
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
var contentTable: JsonPortalContentTable
|
|
|
|
for blockNumber in startBlockNumber..endBlockNumber:
|
|
|
|
let
|
|
|
|
epochIndex = getEpochIndex(blockNumber)
|
|
|
|
epochHeadersFile =
|
|
|
|
dataDir / &"mainnet-headers-epoch-{epochIndex:05}.e2s"
|
|
|
|
epochAccumulatorFile =
|
|
|
|
dataDir / &"mainnet-epoch-accumulator-{epochIndex:05}.ssz"
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let res = readBlockHeaders(epochHeadersFile)
|
|
|
|
if res.isErr():
|
|
|
|
error "Could not read headers epoch file", error = res.error
|
|
|
|
quit 1
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let blockHeaders = res.get()
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let epochAccumulatorRes = readEpochAccumulatorCached(epochAccumulatorFile)
|
|
|
|
if epochAccumulatorRes.isErr():
|
|
|
|
error "Could not read epoch accumulator file", error = res.error
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
let epochAccumulator = epochAccumulatorRes.get()
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let headerIndex = getHeaderRecordIndex(blockNumber, epochIndex)
|
|
|
|
let header = blockHeaders[headerIndex]
|
|
|
|
if header.isPreMerge():
|
|
|
|
let headerWithProof = buildHeaderWithProof(header, epochAccumulator)
|
|
|
|
if headerWithProof.isErr:
|
|
|
|
error "Error building proof", error = headerWithProof.error
|
|
|
|
quit 1
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let
|
|
|
|
content = headerWithProof.get()
|
|
|
|
contentKey = ContentKey(
|
|
|
|
contentType: blockHeader,
|
|
|
|
blockHeaderKey: BlockKey(blockHash: header.blockHash()))
|
|
|
|
encodedContentKey = history_content.encode(contentKey)
|
|
|
|
encodedContent = SSZ.encode(content)
|
|
|
|
|
|
|
|
let portalContent = JsonPortalContent(
|
|
|
|
content_key: encodedContentKey.asSeq().to0xHex(),
|
|
|
|
content_value: encodedContent.to0xHex())
|
|
|
|
|
2023-07-08 15:01:33 +00:00
|
|
|
contentTable[$blockNumber] = portalContent
|
2023-07-05 18:17:03 +00:00
|
|
|
else:
|
|
|
|
# TODO: Deal with writing post merge headers
|
|
|
|
error "Not a pre merge header"
|
2023-04-11 12:06:45 +00:00
|
|
|
quit 1
|
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
writePortalContentToJson(fh, contentTable)
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
try:
|
|
|
|
fh.close()
|
|
|
|
except IOError as e:
|
|
|
|
fatal "Error occured while closing file", error = e.msg
|
|
|
|
quit 1
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
of ExporterCmd.beacon:
|
2023-09-15 14:21:00 +00:00
|
|
|
let (cfg, forkDigests, _) = getBeaconData()
|
2023-07-05 18:17:03 +00:00
|
|
|
|
|
|
|
case config.beaconCmd
|
|
|
|
of BeaconCmd.exportLCBootstrap:
|
|
|
|
waitFor exportLCBootstrapUpdate(
|
|
|
|
config.restUrl, string config.dataDir,
|
|
|
|
config.trustedBlockRoot,
|
|
|
|
cfg, forkDigests)
|
|
|
|
of BeaconCmd.exportLCUpdates:
|
|
|
|
waitFor exportLCUpdates(
|
|
|
|
config.restUrl, string config.dataDir,
|
|
|
|
config.startPeriod, config.count,
|
|
|
|
cfg, forkDigests)
|
|
|
|
of BeaconCmd.exportLCFinalityUpdate:
|
|
|
|
waitFor exportLCFinalityUpdate(
|
|
|
|
config.restUrl, string config.dataDir, cfg, forkDigests)
|
|
|
|
of BeaconCmd.exportLCOptimisticUpdate:
|
|
|
|
waitFor exportLCOptimisticUpdate(
|
|
|
|
config.restUrl, string config.dataDir, cfg, forkDigests)
|