2023-12-01 16:20:52 +00:00
|
|
|
# Fluffy
|
2024-02-09 10:13:12 +00:00
|
|
|
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
2022-06-08 13:14:01 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
# Tool to download chain history data from local node, and save it to the json
|
2022-06-20 14:52:48 +00:00
|
|
|
# file or sqlite database.
|
|
|
|
# In case of json:
|
2022-07-01 19:51:51 +00:00
|
|
|
# Block data is stored as it gets transmitted over the wire and as defined here:
|
|
|
|
# https://github.com/ethereum/portal-network-specs/blob/master/history-network.md#content-keys-and-values
|
|
|
|
#
|
2022-06-08 13:14:01 +00:00
|
|
|
# Json file has following format:
|
|
|
|
# {
|
|
|
|
# "hexEncodedBlockHash: {
|
2022-07-01 19:51:51 +00:00
|
|
|
# "header": "the rlp encoded block header as a hex string"
|
|
|
|
# "body": "the SSZ encoded container of transactions and uncles as a hex string"
|
|
|
|
# "receipts: "The SSZ encoded list of the receipts as a hex string"
|
|
|
|
# "number": "block number"
|
|
|
|
# },
|
2022-06-08 13:14:01 +00:00
|
|
|
# ...,
|
|
|
|
# ...,
|
|
|
|
# }
|
2022-06-20 14:52:48 +00:00
|
|
|
# In case of sqlite:
|
|
|
|
# Data is saved in a format friendly to history network i.e one table with 3
|
|
|
|
# columns: contentid, contentkey, content.
|
|
|
|
# Such format enables queries to quickly find content in range of some node
|
|
|
|
# which makes it possible to offer content to nodes in bulk.
|
2022-06-08 13:14:01 +00:00
|
|
|
#
|
2022-07-01 19:51:51 +00:00
|
|
|
# When using geth as client to download receipts from, be aware that you will
|
|
|
|
# have to set the number of blocks to maintain the transaction index for to
|
|
|
|
# unlimited if you want access to all transactions/receipts.
|
|
|
|
# e.g: `./build/bin/geth --ws --txlookuplimit=0`
|
|
|
|
#
|
2022-06-08 13:14:01 +00:00
|
|
|
|
2023-01-31 12:38:08 +00:00
|
|
|
{.push raises: [].}
|
2022-06-08 13:14:01 +00:00
|
|
|
|
|
|
|
import
|
2023-04-11 12:06:45 +00:00
|
|
|
std/[json, typetraits, strutils, strformat, os, uri],
|
2022-06-08 13:14:01 +00:00
|
|
|
confutils,
|
|
|
|
stew/[byteutils, io2],
|
|
|
|
json_serialization,
|
2024-02-28 17:31:45 +00:00
|
|
|
faststreams,
|
|
|
|
chronicles,
|
|
|
|
eth/[common, rlp],
|
|
|
|
chronos,
|
2022-06-08 13:14:01 +00:00
|
|
|
eth/common/eth_types_json_serialization,
|
2022-06-22 06:50:58 +00:00
|
|
|
json_rpc/rpcclient,
|
2024-02-09 10:13:12 +00:00
|
|
|
snappy,
|
2022-10-14 06:57:17 +00:00
|
|
|
ncli/e2store,
|
2023-12-01 16:20:52 +00:00
|
|
|
../database/seed_db,
|
2022-08-01 19:00:21 +00:00
|
|
|
../../premix/[downloader, parser],
|
2022-10-17 18:38:51 +00:00
|
|
|
../network/history/[history_content, accumulator],
|
2024-02-09 10:13:12 +00:00
|
|
|
../eth_data/[history_data_json_store, history_data_ssz_e2s, era1],
|
2023-07-05 18:17:03 +00:00
|
|
|
eth_data_exporter/[exporter_conf, exporter_common, cl_data_exporter]
|
2022-06-08 13:14:01 +00:00
|
|
|
|
2022-07-01 19:51:51 +00:00
|
|
|
# Need to be selective due to the `Block` type conflict from downloader
|
|
|
|
from ../network/history/history_network import encode
|
2024-06-14 07:31:08 +00:00
|
|
|
from ../../nimbus/utils/utils import calcTxRoot, calcreceiptsRoot
|
2022-07-01 19:51:51 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
chronicles.formatIt(IoErrorCode):
|
|
|
|
$it
|
2023-04-19 15:01:01 +00:00
|
|
|
|
2022-08-01 19:00:21 +00:00
|
|
|
proc downloadHeader(client: RpcClient, i: uint64): BlockHeader =
|
|
|
|
try:
|
2024-06-14 07:31:08 +00:00
|
|
|
let jsonHeader = requestHeader(i, some(client))
|
2022-08-01 19:00:21 +00:00
|
|
|
parseBlockHeader(jsonHeader)
|
|
|
|
except CatchableError as e:
|
|
|
|
fatal "Error while requesting BlockHeader", error = e.msg, number = i
|
|
|
|
quit 1
|
|
|
|
|
2022-06-22 06:50:58 +00:00
|
|
|
proc downloadBlock(i: uint64, client: RpcClient): Block =
|
2022-06-08 13:14:01 +00:00
|
|
|
try:
|
2024-06-14 07:31:08 +00:00
|
|
|
return requestBlock(i, flags = {DownloadReceipts}, client = some(client))
|
2022-06-08 13:14:01 +00:00
|
|
|
except CatchableError as e:
|
2022-06-20 14:52:48 +00:00
|
|
|
fatal "Error while requesting Block", error = e.msg, number = i
|
2022-06-08 13:14:01 +00:00
|
|
|
quit 1
|
|
|
|
|
2022-08-01 19:00:21 +00:00
|
|
|
proc writeHeadersToJson(config: ExporterConf, client: RpcClient) =
|
|
|
|
let fh = createAndOpenFile(string config.dataDir, string config.fileName)
|
|
|
|
|
|
|
|
try:
|
|
|
|
var writer = JsonWriter[DefaultFlavor].init(fh.s, pretty = true)
|
|
|
|
writer.beginRecord()
|
2024-02-28 17:31:45 +00:00
|
|
|
for i in config.startBlock .. config.endBlock:
|
2022-08-01 19:00:21 +00:00
|
|
|
let blck = client.downloadHeader(i)
|
|
|
|
writer.writeHeaderRecord(blck)
|
2022-11-04 08:27:01 +00:00
|
|
|
if ((i - config.startBlock) mod 8192) == 0 and i != config.startBlock:
|
2022-08-01 19:00:21 +00:00
|
|
|
info "Downloaded 8192 new block headers", currentHeader = i
|
|
|
|
writer.endRecord()
|
|
|
|
info "File successfully written", path = config.dataDir / config.fileName
|
|
|
|
except IOError as e:
|
|
|
|
fatal "Error occured while writing to file", error = e.msg
|
|
|
|
quit 1
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
fh.close()
|
|
|
|
except IOError as e:
|
|
|
|
fatal "Error occured while closing file", error = e.msg
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
proc writeBlocksToJson(config: ExporterConf, client: RpcClient) =
|
|
|
|
let fh = createAndOpenFile(string config.dataDir, string config.fileName)
|
2022-06-08 13:14:01 +00:00
|
|
|
|
|
|
|
try:
|
2022-07-01 19:51:51 +00:00
|
|
|
var writer = JsonWriter[DefaultFlavor].init(fh.s, pretty = true)
|
2022-06-08 13:14:01 +00:00
|
|
|
writer.beginRecord()
|
2024-02-28 17:31:45 +00:00
|
|
|
for i in config.startBlock .. config.endBlock:
|
2022-06-22 06:50:58 +00:00
|
|
|
let blck = downloadBlock(i, client)
|
2023-01-05 14:26:58 +00:00
|
|
|
writer.writeBlockRecord(blck.header, blck.body, blck.receipts)
|
2022-11-04 08:27:01 +00:00
|
|
|
if ((i - config.startBlock) mod 8192) == 0 and i != config.startBlock:
|
2022-08-01 19:00:21 +00:00
|
|
|
info "Downloaded 8192 new blocks", currentBlock = i
|
2022-06-08 13:14:01 +00:00
|
|
|
writer.endRecord()
|
2022-08-01 19:00:21 +00:00
|
|
|
info "File successfully written", path = config.dataDir / config.fileName
|
2022-06-08 13:14:01 +00:00
|
|
|
except IOError as e:
|
2022-08-01 19:00:21 +00:00
|
|
|
fatal "Error occured while writing to file", error = e.msg
|
2022-06-08 13:14:01 +00:00
|
|
|
quit 1
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
fh.close()
|
|
|
|
except IOError as e:
|
2022-08-01 19:00:21 +00:00
|
|
|
fatal "Error occured while closing file", error = e.msg
|
2022-06-08 13:14:01 +00:00
|
|
|
quit 1
|
|
|
|
|
2022-08-01 19:00:21 +00:00
|
|
|
proc writeBlocksToDb(config: ExporterConf, client: RpcClient) =
|
2022-09-10 19:00:27 +00:00
|
|
|
let db = SeedDb.new(distinctBase(config.dataDir), config.fileName)
|
2022-06-22 06:50:58 +00:00
|
|
|
|
2022-06-20 14:52:48 +00:00
|
|
|
defer:
|
|
|
|
db.close()
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
for i in config.startBlock .. config.endBlock:
|
2022-06-20 14:52:48 +00:00
|
|
|
let
|
2022-06-22 06:50:58 +00:00
|
|
|
blck = downloadBlock(i, client)
|
2022-06-20 14:52:48 +00:00
|
|
|
blockHash = blck.header.blockHash()
|
2022-09-28 07:09:38 +00:00
|
|
|
contentKeyType = BlockKey(blockHash: blockHash)
|
2024-02-28 17:31:45 +00:00
|
|
|
headerKey =
|
|
|
|
encode(ContentKey(contentType: blockHeader, blockHeaderKey: contentKeyType))
|
|
|
|
bodyKey = encode(ContentKey(contentType: blockBody, blockBodyKey: contentKeyType))
|
|
|
|
receiptsKey =
|
|
|
|
encode(ContentKey(contentType: receipts, receiptsKey: contentKeyType))
|
2022-06-20 14:52:48 +00:00
|
|
|
|
2022-07-01 19:51:51 +00:00
|
|
|
db.put(headerKey.toContentId(), headerKey.asSeq(), rlp.encode(blck.header))
|
2022-06-20 14:52:48 +00:00
|
|
|
|
2022-07-01 19:51:51 +00:00
|
|
|
# No need to seed empty lists into database
|
2022-06-20 14:52:48 +00:00
|
|
|
if len(blck.body.transactions) > 0 or len(blck.body.uncles) > 0:
|
2022-07-01 19:51:51 +00:00
|
|
|
let body = encode(blck.body)
|
|
|
|
db.put(bodyKey.toContentId(), bodyKey.asSeq(), body)
|
2022-06-20 14:52:48 +00:00
|
|
|
|
|
|
|
if len(blck.receipts) > 0:
|
2022-07-01 19:51:51 +00:00
|
|
|
let receipts = encode(blck.receipts)
|
|
|
|
db.put(receiptsKey.toContentId(), receiptsKey.asSeq(), receipts)
|
2022-06-20 14:52:48 +00:00
|
|
|
|
|
|
|
info "Data successfuly written to db"
|
|
|
|
|
2022-08-01 19:00:21 +00:00
|
|
|
proc exportBlocks(config: ExporterConf, client: RpcClient) =
|
2022-06-20 14:52:48 +00:00
|
|
|
case config.storageMode
|
2023-07-05 18:17:03 +00:00
|
|
|
of JsonStorage:
|
2022-08-01 19:00:21 +00:00
|
|
|
if config.headersOnly:
|
|
|
|
writeHeadersToJson(config, client)
|
|
|
|
else:
|
|
|
|
writeBlocksToJson(config, client)
|
2023-07-05 18:17:03 +00:00
|
|
|
of DbStorage:
|
2022-08-01 19:00:21 +00:00
|
|
|
if config.headersOnly:
|
|
|
|
fatal "Db mode not available for headers only"
|
|
|
|
quit 1
|
|
|
|
else:
|
|
|
|
writeBlocksToDb(config, client)
|
2022-06-20 14:52:48 +00:00
|
|
|
|
2023-04-11 12:06:45 +00:00
|
|
|
proc newRpcClient(web3Url: Web3Url): RpcClient =
|
|
|
|
# TODO: I don't like this API. I think the creation of the RPC clients should
|
|
|
|
# already include the URL. And then an optional connect may be necessary
|
|
|
|
# depending on the protocol.
|
|
|
|
let client: RpcClient =
|
|
|
|
case web3Url.kind
|
|
|
|
of HttpUrl:
|
|
|
|
newRpcHttpClient()
|
|
|
|
of WsUrl:
|
|
|
|
newRpcWebSocketClient()
|
|
|
|
|
|
|
|
client
|
|
|
|
|
|
|
|
proc connectRpcClient(
|
2024-02-28 17:31:45 +00:00
|
|
|
client: RpcClient, web3Url: Web3Url
|
|
|
|
): Future[Result[void, string]] {.async.} =
|
2023-04-11 12:06:45 +00:00
|
|
|
case web3Url.kind
|
|
|
|
of HttpUrl:
|
|
|
|
try:
|
|
|
|
await RpcHttpClient(client).connect(web3Url.url)
|
2024-02-09 10:13:12 +00:00
|
|
|
ok()
|
2023-04-11 12:06:45 +00:00
|
|
|
except CatchableError as e:
|
|
|
|
return err(e.msg)
|
|
|
|
of WsUrl:
|
|
|
|
try:
|
|
|
|
await RpcWebSocketClient(client).connect(web3Url.url)
|
2024-02-09 10:13:12 +00:00
|
|
|
ok()
|
2023-04-11 12:06:45 +00:00
|
|
|
except CatchableError as e:
|
|
|
|
return err(e.msg)
|
|
|
|
|
2024-02-09 10:13:12 +00:00
|
|
|
proc cmdExportEra1(config: ExporterConf) =
|
|
|
|
let client = newRpcClient(config.web3Url)
|
|
|
|
try:
|
|
|
|
let connectRes = waitFor client.connectRpcClient(config.web3Url)
|
|
|
|
if connectRes.isErr():
|
|
|
|
fatal "Failed connecting to JSON-RPC client", error = connectRes.error
|
|
|
|
quit 1
|
|
|
|
except CatchableError as e:
|
|
|
|
# TODO: Add async raises to get rid of this.
|
|
|
|
fatal "Failed connecting to JSON-RPC client", error = e.msg
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
var era = Era1(config.era)
|
|
|
|
while config.eraCount == 0 or era < Era1(config.era) + config.eraCount:
|
2024-02-28 17:31:45 +00:00
|
|
|
defer:
|
|
|
|
era += 1
|
2024-02-09 10:13:12 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
startNumber = era.startNumber()
|
|
|
|
endNumber = era.endNumber()
|
|
|
|
|
|
|
|
if startNumber >= mergeBlockNumber:
|
|
|
|
info "Stopping era as it is after the merge"
|
|
|
|
break
|
|
|
|
|
|
|
|
var accumulatorRoot = default(Digest)
|
|
|
|
let tmpName = era1FileName("mainnet", era, default(Digest)) & ".tmp"
|
|
|
|
|
|
|
|
info "Writing era1", tmpName
|
|
|
|
|
|
|
|
var completed = false
|
|
|
|
block writeFileBlock:
|
2024-02-28 17:31:45 +00:00
|
|
|
let e2 =
|
|
|
|
openFile(tmpName, {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate}).get()
|
|
|
|
defer:
|
|
|
|
discard closeFile(e2)
|
2024-02-09 10:13:12 +00:00
|
|
|
|
|
|
|
# TODO: Not checking the result of init, update or finish here, as all
|
|
|
|
# error cases are fatal. But maybe we could throw proper errors still.
|
|
|
|
var group = Era1Group.init(e2, startNumber).get()
|
|
|
|
|
|
|
|
# Header records to build the accumulator root
|
|
|
|
var headerRecords: seq[accumulator.HeaderRecord]
|
2024-02-28 17:31:45 +00:00
|
|
|
for blockNumber in startNumber .. endNumber:
|
2024-02-09 10:13:12 +00:00
|
|
|
let blck =
|
|
|
|
try:
|
|
|
|
# TODO: Not sure about the errors that can occur here. But the whole
|
|
|
|
# block requests over json-rpc should be reworked here (and can be
|
|
|
|
# used in the bridge also then)
|
2024-06-14 07:31:08 +00:00
|
|
|
requestBlock(blockNumber, flags = {DownloadReceipts}, client = some(client))
|
2024-02-09 10:13:12 +00:00
|
|
|
except CatchableError as e:
|
2024-02-28 17:31:45 +00:00
|
|
|
error "Failed retrieving block, skip creation of era1 file",
|
|
|
|
blockNumber, era, error = e.msg
|
2024-02-09 10:13:12 +00:00
|
|
|
break writeFileBlock
|
|
|
|
|
|
|
|
var ttd: UInt256
|
|
|
|
try:
|
|
|
|
blck.jsonData.fromJson "totalDifficulty", ttd
|
|
|
|
except ValueError:
|
|
|
|
break writeFileBlock
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
headerRecords.add(
|
|
|
|
accumulator.HeaderRecord(
|
|
|
|
blockHash: blck.header.blockHash(), totalDifficulty: ttd
|
|
|
|
)
|
|
|
|
)
|
2024-02-09 10:13:12 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
group.update(e2, blockNumber, blck.header, blck.body, blck.receipts, ttd).get()
|
2024-02-09 10:13:12 +00:00
|
|
|
|
|
|
|
accumulatorRoot = getEpochAccumulatorRoot(headerRecords)
|
|
|
|
|
|
|
|
group.finish(e2, accumulatorRoot, endNumber).get()
|
|
|
|
completed = true
|
|
|
|
if completed:
|
|
|
|
let name = era1FileName("mainnet", era, accumulatorRoot)
|
|
|
|
# We cannot check for the exact file any earlier as we need to know the
|
|
|
|
# accumulator root.
|
|
|
|
# TODO: Could scan for file with era number in it.
|
|
|
|
if isFile(name):
|
|
|
|
info "Era1 file already exists", era, name
|
|
|
|
if (let e = io2.removeFile(tmpName); e.isErr):
|
|
|
|
warn "Failed to clean up tmp era1 file", tmpName, error = e.error
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
moveFile(tmpName, name)
|
|
|
|
except Exception as e: # TODO
|
|
|
|
warn "Failed to rename era1 file to its final name",
|
|
|
|
name, tmpName, error = e.msg
|
|
|
|
|
|
|
|
info "Writing era1 completed", name
|
|
|
|
else:
|
|
|
|
error "Failed creating the era1 file", era
|
|
|
|
if (let e = io2.removeFile(tmpName); e.isErr):
|
|
|
|
warn "Failed to clean up incomplete era1 file", tmpName, error = e.error
|
|
|
|
|
|
|
|
proc cmdVerifyEra1(config: ExporterConf) =
|
|
|
|
let f = Era1File.open(config.era1FileName).valueOr:
|
|
|
|
warn "Failed to open era file", error = error
|
|
|
|
quit 1
|
2024-02-28 17:31:45 +00:00
|
|
|
defer:
|
|
|
|
close(f)
|
2024-02-09 10:13:12 +00:00
|
|
|
|
|
|
|
let root = f.verify.valueOr:
|
|
|
|
warn "Verification of era file failed", error = error
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
notice "Era1 file succesfully verified",
|
2024-02-28 17:31:45 +00:00
|
|
|
accumulatorRoot = root.data.to0xHex(), file = config.era1FileName
|
2024-02-09 10:13:12 +00:00
|
|
|
|
2022-06-08 13:14:01 +00:00
|
|
|
when isMainModule:
|
|
|
|
{.pop.}
|
|
|
|
let config = ExporterConf.load()
|
2023-01-31 12:38:08 +00:00
|
|
|
{.push raises: [].}
|
2022-06-08 13:14:01 +00:00
|
|
|
|
2022-08-01 19:00:21 +00:00
|
|
|
setLogLevel(config.logLevel)
|
|
|
|
|
2022-10-14 06:57:17 +00:00
|
|
|
let dataDir = config.dataDir.string
|
|
|
|
if not isDir(dataDir):
|
|
|
|
let res = createPath(dataDir)
|
|
|
|
if res.isErr():
|
|
|
|
fatal "Error occurred while creating data directory",
|
|
|
|
dir = dataDir, error = ioErrorMsg(res.error)
|
|
|
|
quit 1
|
2022-06-22 06:50:58 +00:00
|
|
|
|
2022-08-01 19:00:21 +00:00
|
|
|
case config.cmd
|
2023-07-05 18:17:03 +00:00
|
|
|
of ExporterCmd.history:
|
|
|
|
case config.historyCmd
|
|
|
|
of HistoryCmd.exportBlockData:
|
|
|
|
let client = newRpcClient(config.web3Url)
|
|
|
|
let connectRes = waitFor client.connectRpcClient(config.web3Url)
|
|
|
|
if connectRes.isErr():
|
|
|
|
fatal "Failed connecting to JSON-RPC client", error = connectRes.error
|
|
|
|
quit 1
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
if (config.endBlock < config.startBlock):
|
|
|
|
fatal "Initial block number should be smaller than end block number",
|
2024-02-28 17:31:45 +00:00
|
|
|
startBlock = config.startBlock, endBlock = config.endBlock
|
2023-07-05 18:17:03 +00:00
|
|
|
quit 1
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
try:
|
|
|
|
exportBlocks(config, client)
|
|
|
|
finally:
|
|
|
|
waitFor client.close()
|
|
|
|
of HistoryCmd.exportEpochHeaders:
|
|
|
|
let client = newRpcClient(config.web3Url)
|
|
|
|
let connectRes = waitFor client.connectRpcClient(config.web3Url)
|
|
|
|
if connectRes.isErr():
|
|
|
|
fatal "Failed connecting to JSON-RPC client", error = connectRes.error
|
|
|
|
quit 1
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
proc exportEpochHeaders(file: string, epoch: uint64): Result[void, string] =
|
|
|
|
# Downloading headers from JSON RPC endpoint
|
|
|
|
info "Requesting epoch headers", epoch
|
|
|
|
var headers: seq[BlockHeader]
|
2024-02-28 17:31:45 +00:00
|
|
|
for j in 0 ..< epochSize.uint64:
|
2023-07-05 18:17:03 +00:00
|
|
|
debug "Requesting block", number = j
|
2024-02-28 17:31:45 +00:00
|
|
|
let header = client.downloadHeader(epoch * epochSize + j)
|
2023-07-05 18:17:03 +00:00
|
|
|
headers.add(header)
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
let fh = ?openFile(file, {OpenFlags.Write, OpenFlags.Create}).mapErr(toString)
|
|
|
|
defer:
|
|
|
|
discard closeFile(fh)
|
2022-08-01 19:00:21 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
info "Writing headers to file", file
|
|
|
|
for header in headers:
|
2024-02-28 17:31:45 +00:00
|
|
|
discard ?fh.appendRecord(ExecutionBlockHeaderRecord, rlp.encode(header))
|
2022-08-01 19:00:21 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
ok()
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
# TODO: Could make the JSON-RPC requests concurrent per epoch.
|
|
|
|
# Batching would also be nice but our json-rpc does not support that:
|
|
|
|
# https://geth.ethereum.org/docs/rpc/batch
|
2024-02-28 17:31:45 +00:00
|
|
|
for i in config.startEpoch .. config.endEpoch:
|
2023-07-05 18:17:03 +00:00
|
|
|
let file = dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
if isFile(file):
|
|
|
|
notice "Skipping epoch headers, file already exists", file
|
2022-10-14 06:57:17 +00:00
|
|
|
else:
|
2023-07-05 18:17:03 +00:00
|
|
|
let res = exportEpochHeaders(file, i)
|
|
|
|
if res.isErr():
|
|
|
|
error "Failed exporting epoch headers", file, error = res.error
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
waitFor client.close()
|
|
|
|
of HistoryCmd.verifyEpochHeaders:
|
|
|
|
proc verifyEpochHeaders(file: string, epoch: uint64): Result[void, string] =
|
2024-02-28 17:31:45 +00:00
|
|
|
let fh = ?openFile(file, {OpenFlags.Read}).mapErr(toString)
|
|
|
|
defer:
|
|
|
|
discard closeFile(fh)
|
2022-10-17 18:38:51 +00:00
|
|
|
|
|
|
|
var data: seq[byte]
|
|
|
|
while true:
|
|
|
|
let header = readRecord(fh, data).valueOr:
|
|
|
|
break
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2022-10-17 18:38:51 +00:00
|
|
|
if header.typ == ExecutionBlockHeaderRecord:
|
2023-07-05 18:17:03 +00:00
|
|
|
let
|
|
|
|
blockHeader =
|
|
|
|
try:
|
|
|
|
rlp.decode(data, BlockHeader)
|
|
|
|
except RlpError as e:
|
|
|
|
return err("Invalid block header: " & e.msg)
|
|
|
|
|
|
|
|
headerHash = to0xHex(rlpHash(blockHeader).data)
|
|
|
|
debug "Header decoded successfully",
|
2024-06-14 07:31:08 +00:00
|
|
|
hash = headerHash, blockNumber = blockHeader.number
|
2022-10-17 18:38:51 +00:00
|
|
|
else:
|
|
|
|
warn "Skipping record, not a block header", typ = toHex(header.typ)
|
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
ok()
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
for i in config.startEpochVerify .. config.endEpochVerify:
|
2023-07-05 18:17:03 +00:00
|
|
|
let file = dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
|
|
|
|
let res = verifyEpochHeaders(file, i)
|
|
|
|
if res.isErr():
|
|
|
|
error "Failed verifying epoch headers", file, error = res.error
|
|
|
|
else:
|
|
|
|
info "Successfully decoded epoch headers", file
|
|
|
|
of HistoryCmd.exportAccumulatorData:
|
|
|
|
# Lets first check if the accumulator file already exists before starting
|
|
|
|
# to build it.
|
|
|
|
let accumulatorFile = dataDir / config.accumulatorFileName
|
|
|
|
if isFile(accumulatorFile):
|
2024-02-28 17:31:45 +00:00
|
|
|
notice "Not building accumulator, file already exists", file = accumulatorFile
|
2023-07-05 18:17:03 +00:00
|
|
|
quit 1
|
2022-08-01 19:00:21 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
# Lets verify if the necessary files exists before starting to build the
|
|
|
|
# accumulator.
|
2024-02-28 17:31:45 +00:00
|
|
|
for i in 0 ..< preMergeEpochs:
|
2023-07-05 18:17:03 +00:00
|
|
|
let file = dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
|
|
|
|
if not isFile(file):
|
|
|
|
fatal "Required epoch headers file does not exist", file
|
|
|
|
quit 1
|
2022-10-14 06:57:17 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc buildAccumulator(
|
|
|
|
dataDir: string, writeEpochAccumulators = false
|
|
|
|
): Result[FinishedAccumulator, string] =
|
2023-07-05 18:17:03 +00:00
|
|
|
var accumulator: Accumulator
|
2024-02-28 17:31:45 +00:00
|
|
|
for i in 0 ..< preMergeEpochs:
|
2023-07-05 18:17:03 +00:00
|
|
|
let file =
|
2024-02-28 17:31:45 +00:00
|
|
|
try:
|
|
|
|
dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
|
|
|
|
except ValueError as e:
|
|
|
|
raiseAssert e.msg
|
2023-07-05 18:17:03 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
let fh = ?openFile(file, {OpenFlags.Read}).mapErr(toString)
|
|
|
|
defer:
|
|
|
|
discard closeFile(fh)
|
2023-07-05 18:17:03 +00:00
|
|
|
|
|
|
|
var data: seq[byte]
|
|
|
|
var count = 0'u64
|
|
|
|
while true:
|
|
|
|
let header = readRecord(fh, data).valueOr:
|
|
|
|
break
|
|
|
|
|
|
|
|
if header.typ == ExecutionBlockHeaderRecord:
|
|
|
|
let blockHeader =
|
|
|
|
try:
|
|
|
|
rlp.decode(data, BlockHeader)
|
|
|
|
except RlpError as e:
|
|
|
|
return err("Invalid block header in " & file & ": " & e.msg)
|
|
|
|
|
|
|
|
# Quick sanity check
|
2024-06-14 07:31:08 +00:00
|
|
|
if blockHeader.number != i * epochSize + count:
|
2024-02-28 17:31:45 +00:00
|
|
|
fatal "Incorrect block headers in file",
|
|
|
|
file = file,
|
2024-06-14 07:31:08 +00:00
|
|
|
blockNumber = blockHeader.number,
|
2024-02-28 17:31:45 +00:00
|
|
|
expectedBlockNumber = i * epochSize + count
|
2023-07-05 18:17:03 +00:00
|
|
|
quit 1
|
|
|
|
|
|
|
|
updateAccumulator(accumulator, blockHeader)
|
|
|
|
|
|
|
|
# Note: writing away of epoch accumulators occurs 1 iteration before
|
|
|
|
# updating the epoch accumulator, as the latter happens when passed
|
|
|
|
# a header for the next epoch (or on finishing the epoch).
|
|
|
|
if writeEpochAccumulators:
|
|
|
|
if accumulator.currentEpoch.len() == epochSize or
|
2024-06-14 07:31:08 +00:00
|
|
|
blockHeader.number == mergeBlockNumber - 1:
|
2024-02-28 17:31:45 +00:00
|
|
|
let file =
|
|
|
|
try:
|
|
|
|
dataDir / &"mainnet-epoch-accumulator-{i.uint64:05}.ssz"
|
|
|
|
except ValueError as e:
|
|
|
|
raiseAssert e.msg
|
|
|
|
let res = io2.writeFile(file, SSZ.encode(accumulator.currentEpoch))
|
|
|
|
if res.isErr():
|
|
|
|
error "Failed writing epoch accumulator to file",
|
|
|
|
file, error = res.error
|
|
|
|
else:
|
|
|
|
notice "Succesfully wrote epoch accumulator to file", file
|
2023-07-05 18:17:03 +00:00
|
|
|
|
|
|
|
if count == epochSize - 1:
|
|
|
|
info "Updated an epoch", epoch = i
|
|
|
|
count.inc()
|
|
|
|
|
2024-06-14 07:31:08 +00:00
|
|
|
if blockHeader.number == mergeBlockNumber - 1:
|
2023-07-05 18:17:03 +00:00
|
|
|
let finishedAccumulator = finishAccumulator(accumulator)
|
|
|
|
info "Updated last epoch, finished building master accumulator",
|
|
|
|
epoch = i
|
|
|
|
return ok(finishedAccumulator)
|
|
|
|
else:
|
|
|
|
warn "Skipping record, not a block header", typ = toHex(header.typ)
|
|
|
|
|
|
|
|
err("Not enough headers provided to finish the accumulator")
|
|
|
|
|
|
|
|
let accumulatorRes = buildAccumulator(dataDir, config.writeEpochAccumulators)
|
|
|
|
if accumulatorRes.isErr():
|
|
|
|
fatal "Could not build accumulator", error = accumulatorRes.error
|
|
|
|
quit 1
|
|
|
|
let accumulator = accumulatorRes.get()
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let res = io2.writeFile(accumulatorFile, SSZ.encode(accumulator))
|
|
|
|
if res.isErr():
|
|
|
|
error "Failed writing accumulator to file",
|
|
|
|
file = accumulatorFile, error = res.error
|
|
|
|
quit 1
|
|
|
|
else:
|
2024-02-28 17:31:45 +00:00
|
|
|
notice "Succesfully wrote master accumulator to file", file = accumulatorFile
|
2023-07-05 18:17:03 +00:00
|
|
|
of HistoryCmd.printAccumulatorData:
|
|
|
|
let file = dataDir / config.accumulatorFileNamePrint
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let res = readAccumulator(file)
|
|
|
|
if res.isErr():
|
|
|
|
fatal "Failed reading accumulator from file", error = res.error, file
|
|
|
|
quit 1
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let
|
|
|
|
accumulator = res.get()
|
|
|
|
accumulatorRoot = hash_tree_root(accumulator)
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
info "Accumulator decoded successfully", root = accumulatorRoot
|
2023-07-05 18:17:03 +00:00
|
|
|
|
|
|
|
echo "Master Accumulator:"
|
|
|
|
echo "-------------------"
|
|
|
|
echo &"Root: {accumulatorRoot}"
|
|
|
|
echo ""
|
|
|
|
echo "Historical Epochs:"
|
|
|
|
echo "------------------"
|
|
|
|
echo "Epoch Root"
|
|
|
|
for i, root in accumulator.historicalEpochs:
|
|
|
|
echo &"{i.uint64:05} 0x{root.toHex()}"
|
|
|
|
of HistoryCmd.exportHeaderRange:
|
|
|
|
let client = newRpcClient(config.web3Url)
|
|
|
|
let connectRes = waitFor client.connectRpcClient(config.web3Url)
|
|
|
|
if connectRes.isErr():
|
|
|
|
fatal "Failed connecting to JSON-RPC client", error = connectRes.error
|
|
|
|
quit 1
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let
|
|
|
|
startBlockNumber = config.startBlockNumber
|
|
|
|
endBlockNumber = config.endBlockNumber
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
if (endBlockNumber < startBlockNumber):
|
|
|
|
fatal "Start block number should be smaller than end block number",
|
|
|
|
startBlockNumber, endBlockNumber
|
|
|
|
quit 1
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
proc exportHeaders(
|
2024-02-28 17:31:45 +00:00
|
|
|
file: string, startBlockNumber, endBlockNumber: uint64
|
|
|
|
): Result[void, string] =
|
2023-07-05 18:17:03 +00:00
|
|
|
# Downloading headers from JSON RPC endpoint
|
|
|
|
info "Requesting headers", startBlockNumber, endBlockNumber
|
|
|
|
var headers: seq[BlockHeader]
|
2024-02-28 17:31:45 +00:00
|
|
|
for j in startBlockNumber .. endBlockNumber:
|
2023-07-05 18:17:03 +00:00
|
|
|
debug "Requesting block", number = j
|
|
|
|
let header = client.downloadHeader(j)
|
|
|
|
headers.add(header)
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
let fh = ?openFile(file, {OpenFlags.Write, OpenFlags.Create}).mapErr(toString)
|
|
|
|
defer:
|
|
|
|
discard closeFile(fh)
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
info "Writing headers to file", file
|
|
|
|
for header in headers:
|
2024-02-28 17:31:45 +00:00
|
|
|
discard ?fh.appendRecord(ExecutionBlockHeaderRecord, rlp.encode(header))
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
ok()
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let file =
|
2024-02-28 17:31:45 +00:00
|
|
|
try:
|
|
|
|
dataDir / &"mainnet-headers-{startBlockNumber:05}-{endBlockNumber:05}.e2s"
|
|
|
|
except ValueError as e:
|
|
|
|
raiseAssert e.msg
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let res = exportHeaders(file, startBlockNumber, endBlockNumber)
|
|
|
|
if res.isErr():
|
|
|
|
fatal "Failed exporting headers", error = res.error
|
|
|
|
quit 1
|
|
|
|
of HistoryCmd.exportHeadersWithProof:
|
|
|
|
let
|
|
|
|
startBlockNumber = config.startBlockNumber2
|
|
|
|
endBlockNumber = config.endBlockNumber2
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
if (endBlockNumber < startBlockNumber):
|
|
|
|
fatal "Start block number should be smaller than end block number",
|
|
|
|
startBlockNumber, endBlockNumber
|
2023-04-11 12:06:45 +00:00
|
|
|
quit 1
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
let file =
|
|
|
|
&"mainnet-headersWithProof-{startBlockNumber:05}-{endBlockNumber:05}.json"
|
2023-07-05 18:17:03 +00:00
|
|
|
let fh = createAndOpenFile(string config.dataDir, file)
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
var contentTable: JsonPortalContentTable
|
2024-02-28 17:31:45 +00:00
|
|
|
for blockNumber in startBlockNumber .. endBlockNumber:
|
2023-07-05 18:17:03 +00:00
|
|
|
let
|
|
|
|
epochIndex = getEpochIndex(blockNumber)
|
2024-02-28 17:31:45 +00:00
|
|
|
epochHeadersFile = dataDir / &"mainnet-headers-epoch-{epochIndex:05}.e2s"
|
2023-07-05 18:17:03 +00:00
|
|
|
epochAccumulatorFile =
|
|
|
|
dataDir / &"mainnet-epoch-accumulator-{epochIndex:05}.ssz"
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let res = readBlockHeaders(epochHeadersFile)
|
|
|
|
if res.isErr():
|
|
|
|
error "Could not read headers epoch file", error = res.error
|
|
|
|
quit 1
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let blockHeaders = res.get()
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let epochAccumulatorRes = readEpochAccumulatorCached(epochAccumulatorFile)
|
|
|
|
if epochAccumulatorRes.isErr():
|
|
|
|
error "Could not read epoch accumulator file", error = res.error
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
let epochAccumulator = epochAccumulatorRes.get()
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let headerIndex = getHeaderRecordIndex(blockNumber, epochIndex)
|
|
|
|
let header = blockHeaders[headerIndex]
|
|
|
|
if header.isPreMerge():
|
|
|
|
let headerWithProof = buildHeaderWithProof(header, epochAccumulator)
|
|
|
|
if headerWithProof.isErr:
|
|
|
|
error "Error building proof", error = headerWithProof.error
|
|
|
|
quit 1
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
let
|
|
|
|
content = headerWithProof.get()
|
|
|
|
contentKey = ContentKey(
|
|
|
|
contentType: blockHeader,
|
2024-02-28 17:31:45 +00:00
|
|
|
blockHeaderKey: BlockKey(blockHash: header.blockHash()),
|
|
|
|
)
|
2023-07-05 18:17:03 +00:00
|
|
|
encodedContentKey = history_content.encode(contentKey)
|
|
|
|
encodedContent = SSZ.encode(content)
|
|
|
|
|
|
|
|
let portalContent = JsonPortalContent(
|
|
|
|
content_key: encodedContentKey.asSeq().to0xHex(),
|
2024-02-28 17:31:45 +00:00
|
|
|
content_value: encodedContent.to0xHex(),
|
|
|
|
)
|
2023-07-05 18:17:03 +00:00
|
|
|
|
2023-07-08 15:01:33 +00:00
|
|
|
contentTable[$blockNumber] = portalContent
|
2023-07-05 18:17:03 +00:00
|
|
|
else:
|
|
|
|
# TODO: Deal with writing post merge headers
|
|
|
|
error "Not a pre merge header"
|
2023-04-11 12:06:45 +00:00
|
|
|
quit 1
|
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
writePortalContentToJson(fh, contentTable)
|
2023-04-11 12:06:45 +00:00
|
|
|
|
2023-07-05 18:17:03 +00:00
|
|
|
try:
|
|
|
|
fh.close()
|
|
|
|
except IOError as e:
|
|
|
|
fatal "Error occured while closing file", error = e.msg
|
|
|
|
quit 1
|
2024-02-09 10:13:12 +00:00
|
|
|
of HistoryCmd.exportEra1:
|
|
|
|
cmdExportEra1(config)
|
|
|
|
of HistoryCmd.verifyEra1:
|
|
|
|
cmdVerifyEra1(config)
|
2023-07-05 18:17:03 +00:00
|
|
|
of ExporterCmd.beacon:
|
2023-09-15 14:21:00 +00:00
|
|
|
let (cfg, forkDigests, _) = getBeaconData()
|
2023-07-05 18:17:03 +00:00
|
|
|
|
|
|
|
case config.beaconCmd
|
|
|
|
of BeaconCmd.exportLCBootstrap:
|
|
|
|
waitFor exportLCBootstrapUpdate(
|
2024-02-28 17:31:45 +00:00
|
|
|
config.restUrl, string config.dataDir, config.trustedBlockRoot, cfg, forkDigests
|
|
|
|
)
|
2023-07-05 18:17:03 +00:00
|
|
|
of BeaconCmd.exportLCUpdates:
|
|
|
|
waitFor exportLCUpdates(
|
2024-02-28 17:31:45 +00:00
|
|
|
config.restUrl,
|
|
|
|
string config.dataDir,
|
|
|
|
config.startPeriod,
|
|
|
|
config.count,
|
|
|
|
cfg,
|
|
|
|
forkDigests,
|
|
|
|
)
|
2023-07-05 18:17:03 +00:00
|
|
|
of BeaconCmd.exportLCFinalityUpdate:
|
|
|
|
waitFor exportLCFinalityUpdate(
|
2024-02-28 17:31:45 +00:00
|
|
|
config.restUrl, string config.dataDir, cfg, forkDigests
|
|
|
|
)
|
2023-07-05 18:17:03 +00:00
|
|
|
of BeaconCmd.exportLCOptimisticUpdate:
|
|
|
|
waitFor exportLCOptimisticUpdate(
|
2024-02-28 17:31:45 +00:00
|
|
|
config.restUrl, string config.dataDir, cfg, forkDigests
|
|
|
|
)
|
2024-03-19 15:45:32 +00:00
|
|
|
of BeaconCmd.exportHistoricalRoots:
|
|
|
|
waitFor exportHistoricalRoots(
|
|
|
|
config.restUrl, string config.dataDir, cfg, forkDigests
|
|
|
|
)
|
2024-05-03 09:50:18 +00:00
|
|
|
of BeaconCmd.exportBeaconBlockProof:
|
|
|
|
exportBeaconBlockProof(
|
2024-03-21 12:25:32 +00:00
|
|
|
string config.dataDir, string config.eraDir, config.slotNumber
|
|
|
|
)
|