Add export of beacon LC data to eth_data_exporter (#1629)

* Add export of beacon LC data to eth_data_exporter

* Update content seeding docs with new eth_data_exporter cli
This commit is contained in:
Kim De Mey 2023-07-05 20:17:03 +02:00 committed by GitHub
parent b3d06c14b6
commit ab13e43db8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 893 additions and 527 deletions

View File

@ -8,25 +8,22 @@
1. Set-up access to an Ethereum JSON-RPC endpoint (e.g. local geth instance)
that can serve the data.
2. Use the `eth-data-exporter` tool to download and store all block headers into
2. Use the `eth_data_exporter` tool to download and store all block headers into
*.e2s files arranged per epoch (8192 blocks):
```bash
make fluffy-tools
./build/eth_data_exporter exportEpochHeaders --data-dir:"./user_data_dir/"
./build/eth_data_exporter history exportEpochHeaders --data-dir:"./user_data_dir/"
```
This will store all block headers up till the merge block into *.e2s files in
the assigned `--data-dir`.
> Note: Currently only hardcoded address `ws://127.0.0.1:8546` works for the
Ethereum JSON-RPC endpoint.
3. Build the master accumulator and the epoch accumulators:
```bash
./build/eth_data_exporter exportAccumulatorData --writeEpochAccumulators --data-dir:"./user_data_dir/"
./build/eth_data_exporter history exportAccumulatorData --writeEpochAccumulators --data-dir:"./user_data_dir/"
```
#### Step 2: Seed the epoch accumulators into the Portal network
@ -59,22 +56,19 @@ Run the `content_verifier` tool and see if all epoch accumulators are found:
1. Set-up access to an Ethereum JSON-RPC endpoint (e.g. local geth instance)
that can serve the data.
2. Use the `eth-data-exporter` tool to download history data through the
2. Use the `eth_data_exporter` tool to download history data through the
JSON-RPC endpoint into the format which is suitable for reading data into
Fluffy client and propagating into the network:
```bash
make fluffy-tools
./build/eth_data_exporter --initial-block:1 --end-block:10 --data-dir:"/user_data_dir/"
./build/eth_data_exporter history exportBlockData--initial-block:1 --end-block:10 --data-dir:"/user_data_dir/"
```
This will store blocks 1 to 10 into a json file located at
`./user_data_dir/eth-history-data.json`.
> Note: Currently only hardcoded address `ws://127.0.0.1:8546` works for the
Ethereum JSON-RPC endpoint.
3. Run Fluffy and trigger the propagation of data with the
`portal_history_propagate` JSON-RPC API call:

View File

@ -49,183 +49,14 @@ import
../seed_db,
../../premix/[downloader, parser],
../network/history/[history_content, accumulator],
../eth_data/[history_data_json_store, history_data_ssz_e2s]
../eth_data/[history_data_json_store, history_data_ssz_e2s],
eth_data_exporter/[exporter_conf, exporter_common, cl_data_exporter]
# Need to be selective due to the `Block` type conflict from downloader
from ../network/history/history_network import encode
chronicles.formatIt(IoErrorCode): $it
proc defaultDataDir*(): string =
let dataDir = when defined(windows):
"AppData" / "Roaming" / "EthData"
elif defined(macosx):
"Library" / "Application Support" / "EthData"
else:
".cache" / "eth-data"
getHomeDir() / dataDir
type
Web3UrlKind* = enum
HttpUrl, WsUrl
Web3Url* = object
kind*: Web3UrlKind
url*: string
const
defaultDataDirDesc = defaultDataDir()
defaultBlockFileName = "eth-block-data"
defaultAccumulatorFileName = "mainnet-master-accumulator.ssz"
defaultWeb3Url = Web3Url(kind: HttpUrl, url: "http://127.0.0.1:8545")
type
ExporterCmd* = enum
# TODO: Multiline strings doesn't work here anymore with 1.6, and concat of
# several lines gives the error: Error: Invalid node kind nnkInfix for macros.`$`
exportBlockData =
"Export block data (headers, bodies and receipts) to a json format or a database. Some of this functionality is likely to get deprecated"
exportEpochHeaders =
"Export block headers from an Ethereum JSON RPC Execution endpoint to *.e2s files arranged per epoch (8192 blocks)"
verifyEpochHeaders =
"Verify *.e2s files containing block headers. Verify currently only means being able to RLP decode the block headers"
exportAccumulatorData =
"Build and export the master accumulator and historical epoch accumulators. Requires *.e2s block header files generated with the exportHeaders command up until the merge block"
printAccumulatorData =
"Print the root hash of the master accumulator and of all historical epoch accumulators. Requires data generated by exportAccumulatorData command"
exportHeaderRange =
"Export block headers from an Ethereum JSON RPC Execution endpoint to *.e2s files (unlimited amount)"
exportHeadersWithProof =
"Export block headers with proof from *.e2s headers file and epochAccumulator files"
StorageMode* = enum
Json, Db
ExporterConf* = object
logLevel* {.
defaultValue: LogLevel.INFO
defaultValueDesc: $LogLevel.INFO
desc: "Sets the log level"
name: "log-level" .}: LogLevel
dataDir* {.
desc: "The directory where generated data files will be exported to"
defaultValue: defaultDataDir()
defaultValueDesc: $defaultDataDirDesc
name: "data-dir" .}: OutDir
web3Url* {.
desc: "Execution layer JSON-RPC API URL"
defaultValue: defaultWeb3Url
name: "web3-url" .}: Web3Url
case cmd* {.
command
defaultValue: exportBlockData .}: ExporterCmd
of exportBlockData:
startBlock* {.
desc: "Number of the first block to be exported"
defaultValue: 0
name: "start-block" .}: uint64
endBlock* {.
desc: "Number of the last block to be exported"
defaultValue: 0
name: "end-block" .}: uint64
fileName* {.
desc: "File name (minus extension) where block data will be exported to"
defaultValue: defaultBlockFileName
defaultValueDesc: $defaultBlockFileName
name: "file-name" .}: string
storageMode* {.
desc: "Storage mode of block data export"
defaultValue: Json
name: "storage-mode" .}: StorageMode
headersOnly* {.
desc: "Only export the headers instead of full blocks and receipts"
defaultValue: false
name: "headers-only" .}: bool
of exportEpochHeaders:
startEpoch* {.
desc: "Number of the first epoch which should be downloaded"
defaultValue: 0
name: "start-epoch" .}: uint64
endEpoch* {.
desc: "Number of the last epoch which should be downloaded"
defaultValue: 1896
name: "end-epoch" .}: uint64
# TODO:
# Although options are the same as for exportHeaders, we can't drop them
# under the same case of as confutils does not agree with that.
of verifyEpochHeaders:
startEpochVerify* {.
desc: "Number of the first epoch which should be downloaded"
defaultValue: 0
name: "start-epoch" .}: uint64
endEpochVerify* {.
desc: "Number of the last epoch which should be downloaded"
defaultValue: 1896
name: "end-epoch" .}: uint64
of exportAccumulatorData:
accumulatorFileName* {.
desc: "File to which the serialized accumulator is written"
defaultValue: defaultAccumulatorFileName
defaultValueDesc: $defaultAccumulatorFileName
name: "accumulator-file-name" .}: string
writeEpochAccumulators* {.
desc: "Write also the SSZ encoded epoch accumulators to specific files"
defaultValue: false
name: "write-epoch-accumulators" .}: bool
of printAccumulatorData:
accumulatorFileNamePrint* {.
desc: "File from which the serialized accumulator is read"
defaultValue: defaultAccumulatorFileName
defaultValueDesc: $defaultAccumulatorFileName
name: "accumulator-file-name" .}: string
of exportHeaderRange:
startBlockNumber* {.
desc: "Number of the first block header to be exported"
name: "start-block" .}: uint64
endBlockNumber* {.
desc: "Number of the last block header to be exported"
name: "end-block" .}: uint64
of exportHeadersWithProof:
startBlockNumber2* {.
desc: "Number of the first block header to be exported"
name: "start-block" .}: uint64
endBlockNumber2* {.
desc: "Number of the last block header to be exported"
name: "end-block" .}: uint64
proc parseCmdArg*(
T: type Web3Url, p: string): T {.raises: [ConfigurationError].} =
let
url = parseUri(p)
normalizedScheme = url.scheme.toLowerAscii()
if (normalizedScheme == "http" or normalizedScheme == "https"):
Web3Url(kind: HttpUrl, url: p)
elif (normalizedScheme == "ws" or normalizedScheme == "wss"):
Web3Url(kind: WsUrl, url: p)
else:
raise newException(
ConfigurationError,
"The Web3 URL must specify one of following protocols: http/https/ws/wss"
)
proc completeCmdArg*(T: type Web3Url, val: string): seq[string] =
return @[]
proc parseCmdArg*(T: type StorageMode, p: string): T
{.raises: [ConfigurationError].} =
if p == "db":
return Db
elif p == "json":
return Json
else:
let msg = "Provided mode: " & p & " is not a valid. Should be `json` or `db`"
raise newException(ConfigurationError, msg)
proc completeCmdArg*(T: type StorageMode, val: string): seq[string] =
return @[]
proc downloadHeader(client: RpcClient, i: uint64): BlockHeader =
let blockNumber = u256(i)
try:
@ -243,34 +74,6 @@ proc downloadBlock(i: uint64, client: RpcClient): Block =
fatal "Error while requesting Block", error = e.msg, number = i
quit 1
proc createAndOpenFile(dataDir: string, fileName: string): OutputStreamHandle =
# Creates directory and file, if file already exists
# program is aborted with info to user, to avoid losing data
let fileName: string =
if not fileName.endsWith(".json"):
fileName & ".json"
else:
fileName
let filePath = dataDir / fileName
if isFile(filePath):
fatal "File under provided path already exists and would be overwritten",
path = filePath
quit 1
let res = createPath(dataDir)
if res.isErr():
fatal "Error occurred while creating directory",
error = ioErrorMsg(res.error)
quit 1
try:
return fileOutput(filePath)
except IOError as e:
fatal "Error occurred while opening the file", error = e.msg
quit 1
proc writeHeadersToJson(config: ExporterConf, client: RpcClient) =
let fh = createAndOpenFile(string config.dataDir, string config.fileName)
@ -350,12 +153,12 @@ proc writeBlocksToDb(config: ExporterConf, client: RpcClient) =
proc exportBlocks(config: ExporterConf, client: RpcClient) =
case config.storageMode
of Json:
of JsonStorage:
if config.headersOnly:
writeHeadersToJson(config, client)
else:
writeBlocksToJson(config, client)
of Db:
of DbStorage:
if config.headersOnly:
fatal "Db mode not available for headers only"
quit 1
@ -406,346 +209,353 @@ when isMainModule:
quit 1
case config.cmd
of ExporterCmd.exportBlockData:
let client = newRpcClient(config.web3Url)
let connectRes = waitFor client.connectRpcClient(config.web3Url)
if connectRes.isErr():
fatal "Failed connecting to JSON-RPC client", error = connectRes.error
quit 1
if (config.endBlock < config.startBlock):
fatal "Initial block number should be smaller than end block number",
startBlock = config.startBlock,
endBlock = config.endBlock
quit 1
try:
exportBlocks(config, client)
finally:
waitFor client.close()
of ExporterCmd.exportEpochHeaders:
let client = newRpcClient(config.web3Url)
let connectRes = waitFor client.connectRpcClient(config.web3Url)
if connectRes.isErr():
fatal "Failed connecting to JSON-RPC client", error = connectRes.error
quit 1
proc exportEpochHeaders(file: string, epoch: uint64): Result[void, string] =
# Downloading headers from JSON RPC endpoint
info "Requesting epoch headers", epoch
var headers: seq[BlockHeader]
for j in 0..<epochSize.uint64:
debug "Requesting block", number = j
let header = client.downloadHeader(epoch*epochSize + j)
headers.add(header)
let fh = ? openFile(file, {OpenFlags.Write, OpenFlags.Create}).mapErr(toString)
defer: discard closeFile(fh)
info "Writing headers to file", file
for header in headers:
discard ? fh.appendRecord(ExecutionBlockHeaderRecord, rlp.encode(header))
ok()
# TODO: Could make the JSON-RPC requests concurrent per epoch.
# Batching would also be nice but our json-rpc does not support that:
# https://geth.ethereum.org/docs/rpc/batch
for i in config.startEpoch..config.endEpoch:
let file = dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
if isFile(file):
notice "Skipping epoch headers, file already exists", file
else:
let res = exportEpochHeaders(file, i)
if res.isErr():
error "Failed exporting epoch headers", file, error = res.error
waitFor client.close()
of ExporterCmd.verifyEpochHeaders:
proc verifyEpochHeaders(file: string, epoch: uint64): Result[void, string] =
let fh = ? openFile(file, {OpenFlags.Read}).mapErr(toString)
defer: discard closeFile(fh)
var data: seq[byte]
while true:
let header = readRecord(fh, data).valueOr:
break
if header.typ == ExecutionBlockHeaderRecord:
let
blockHeader =
try:
rlp.decode(data, BlockHeader)
except RlpError as e:
return err("Invalid block header: " & e.msg)
headerHash = to0xHex(rlpHash(blockHeader).data)
debug "Header decoded successfully",
hash = headerHash, blockNumber = blockHeader.blockNumber
else:
warn "Skipping record, not a block header", typ = toHex(header.typ)
ok()
for i in config.startEpochVerify..config.endEpochVerify:
let file = dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
let res = verifyEpochHeaders(file, i)
if res.isErr():
error "Failed verifying epoch headers", file, error = res.error
else:
info "Successfully decoded epoch headers", file
of ExporterCmd.exportAccumulatorData:
# Lets first check if the accumulator file already exists before starting
# to build it.
let accumulatorFile = dataDir / config.accumulatorFileName
if isFile(accumulatorFile):
notice "Not building accumulator, file already exists",
file = accumulatorFile
quit 1
# Lets verify if the necessary files exists before starting to build the
# accumulator.
for i in 0..<preMergeEpochs:
let file = dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
if not isFile(file):
fatal "Required epoch headers file does not exist", file
of ExporterCmd.history:
case config.historyCmd
of HistoryCmd.exportBlockData:
let client = newRpcClient(config.web3Url)
let connectRes = waitFor client.connectRpcClient(config.web3Url)
if connectRes.isErr():
fatal "Failed connecting to JSON-RPC client", error = connectRes.error
quit 1
proc buildAccumulator(dataDir: string, writeEpochAccumulators = false):
Result[FinishedAccumulator, string] =
var accumulator: Accumulator
for i in 0..<preMergeEpochs:
let file =
try: dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
except ValueError as e: raiseAssert e.msg
if (config.endBlock < config.startBlock):
fatal "Initial block number should be smaller than end block number",
startBlock = config.startBlock,
endBlock = config.endBlock
quit 1
try:
exportBlocks(config, client)
finally:
waitFor client.close()
of HistoryCmd.exportEpochHeaders:
let client = newRpcClient(config.web3Url)
let connectRes = waitFor client.connectRpcClient(config.web3Url)
if connectRes.isErr():
fatal "Failed connecting to JSON-RPC client", error = connectRes.error
quit 1
proc exportEpochHeaders(file: string, epoch: uint64): Result[void, string] =
# Downloading headers from JSON RPC endpoint
info "Requesting epoch headers", epoch
var headers: seq[BlockHeader]
for j in 0..<epochSize.uint64:
debug "Requesting block", number = j
let header = client.downloadHeader(epoch*epochSize + j)
headers.add(header)
let fh = ? openFile(file, {OpenFlags.Write, OpenFlags.Create}).mapErr(toString)
defer: discard closeFile(fh)
info "Writing headers to file", file
for header in headers:
discard ? fh.appendRecord(ExecutionBlockHeaderRecord, rlp.encode(header))
ok()
# TODO: Could make the JSON-RPC requests concurrent per epoch.
# Batching would also be nice but our json-rpc does not support that:
# https://geth.ethereum.org/docs/rpc/batch
for i in config.startEpoch..config.endEpoch:
let file = dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
if isFile(file):
notice "Skipping epoch headers, file already exists", file
else:
let res = exportEpochHeaders(file, i)
if res.isErr():
error "Failed exporting epoch headers", file, error = res.error
waitFor client.close()
of HistoryCmd.verifyEpochHeaders:
proc verifyEpochHeaders(file: string, epoch: uint64): Result[void, string] =
let fh = ? openFile(file, {OpenFlags.Read}).mapErr(toString)
defer: discard closeFile(fh)
var data: seq[byte]
var count = 0'u64
while true:
let header = readRecord(fh, data).valueOr:
break
if header.typ == ExecutionBlockHeaderRecord:
let blockHeader =
try:
rlp.decode(data, BlockHeader)
except RlpError as e:
return err("Invalid block header in " & file & ": " & e.msg)
let
blockHeader =
try:
rlp.decode(data, BlockHeader)
except RlpError as e:
return err("Invalid block header: " & e.msg)
# Quick sanity check
if blockHeader.blockNumber.truncate(uint64) != i*epochSize + count:
fatal "Incorrect block headers in file", file = file,
blockNumber = blockHeader.blockNumber,
expectedBlockNumber = i*epochSize + count
quit 1
updateAccumulator(accumulator, blockHeader)
# Note: writing away of epoch accumulators occurs 1 iteration before
# updating the epoch accumulator, as the latter happens when passed
# a header for the next epoch (or on finishing the epoch).
if writeEpochAccumulators:
if accumulator.currentEpoch.len() == epochSize or
blockHeader.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
let file =
try: dataDir / &"mainnet-epoch-accumulator-{i.uint64:05}.ssz"
except ValueError as e: raiseAssert e.msg
let res = io2.writeFile(file, SSZ.encode(accumulator.currentEpoch))
if res.isErr():
error "Failed writing epoch accumulator to file",
file, error = res.error
else:
notice "Succesfully wrote epoch accumulator to file", file
if count == epochSize - 1:
info "Updated an epoch", epoch = i
count.inc()
if blockHeader.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
let finishedAccumulator = finishAccumulator(accumulator)
info "Updated last epoch, finished building master accumulator",
epoch = i
return ok(finishedAccumulator)
headerHash = to0xHex(rlpHash(blockHeader).data)
debug "Header decoded successfully",
hash = headerHash, blockNumber = blockHeader.blockNumber
else:
warn "Skipping record, not a block header", typ = toHex(header.typ)
err("Not enough headers provided to finish the accumulator")
ok()
let accumulatorRes = buildAccumulator(dataDir, config.writeEpochAccumulators)
if accumulatorRes.isErr():
fatal "Could not build accumulator", error = accumulatorRes.error
quit 1
let accumulator = accumulatorRes.get()
for i in config.startEpochVerify..config.endEpochVerify:
let file = dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
let res = verifyEpochHeaders(file, i)
if res.isErr():
error "Failed verifying epoch headers", file, error = res.error
else:
info "Successfully decoded epoch headers", file
let res = io2.writeFile(accumulatorFile, SSZ.encode(accumulator))
if res.isErr():
error "Failed writing accumulator to file",
file = accumulatorFile, error = res.error
quit 1
else:
notice "Succesfully wrote master accumulator to file",
file = accumulatorFile
of ExporterCmd.printAccumulatorData:
let file = dataDir / config.accumulatorFileNamePrint
let res = readAccumulator(file)
if res.isErr():
fatal "Failed reading accumulator from file", error = res.error, file
quit 1
let
accumulator = res.get()
accumulatorRoot = hash_tree_root(accumulator)
info "Accumulator decoded successfully",
root = accumulatorRoot
echo "Master Accumulator:"
echo "-------------------"
echo &"Root: {accumulatorRoot}"
echo ""
echo "Historical Epochs:"
echo "------------------"
echo "Epoch Root"
for i, root in accumulator.historicalEpochs:
echo &"{i.uint64:05} 0x{root.toHex()}"
of ExporterCmd.exportHeaderRange:
let client = newRpcClient(config.web3Url)
let connectRes = waitFor client.connectRpcClient(config.web3Url)
if connectRes.isErr():
fatal "Failed connecting to JSON-RPC client", error = connectRes.error
quit 1
let
startBlockNumber = config.startBlockNumber
endBlockNumber = config.endBlockNumber
if (endBlockNumber < startBlockNumber):
fatal "Start block number should be smaller than end block number",
startBlockNumber, endBlockNumber
quit 1
proc exportHeaders(
file: string, startBlockNumber, endBlockNumber: uint64):
Result[void, string] =
# Downloading headers from JSON RPC endpoint
info "Requesting headers", startBlockNumber, endBlockNumber
var headers: seq[BlockHeader]
for j in startBlockNumber..endBlockNumber:
debug "Requesting block", number = j
let header = client.downloadHeader(j)
headers.add(header)
let fh = ? openFile(
file, {OpenFlags.Write, OpenFlags.Create}).mapErr(toString)
defer: discard closeFile(fh)
info "Writing headers to file", file
for header in headers:
discard ? fh.appendRecord(ExecutionBlockHeaderRecord, rlp.encode(header))
ok()
let file =
try: dataDir / &"mainnet-headers-{startBlockNumber:05}-{endBlockNumber:05}.e2s"
except ValueError as e: raiseAssert e.msg
let res = exportHeaders(file, startBlockNumber, endBlockNumber)
if res.isErr():
fatal "Failed exporting headers", error = res.error
quit 1
of ExporterCmd.exportHeadersWithProof:
let
startBlockNumber = config.startBlockNumber2
endBlockNumber = config.endBlockNumber2
if (endBlockNumber < startBlockNumber):
fatal "Start block number should be smaller than end block number",
startBlockNumber, endBlockNumber
quit 1
type
JsonPortalContent = object
content_key*: string
content_value*: string
JsonPortalContentTable = OrderedTable[uint64, JsonPortalContent]
proc writePortalContentToJson(
fh: OutputStreamHandle, content: JsonPortalContentTable) =
try:
var writer = JsonWriter[DefaultFlavor].init(fh.s, pretty = true)
writer.writeValue(content)
except IOError as e:
fatal "Error occured while writing to file", error = e.msg
of HistoryCmd.exportAccumulatorData:
# Lets first check if the accumulator file already exists before starting
# to build it.
let accumulatorFile = dataDir / config.accumulatorFileName
if isFile(accumulatorFile):
notice "Not building accumulator, file already exists",
file = accumulatorFile
quit 1
let file = &"mainnet-headersWithProof-{startBlockNumber:05}-{endBlockNumber:05}.json"
let fh = createAndOpenFile(string config.dataDir, file)
var contentTable: JsonPortalContentTable
for blockNumber in startBlockNumber..endBlockNumber:
let
epochIndex = getEpochIndex(blockNumber)
epochHeadersFile =
dataDir / &"mainnet-headers-epoch-{epochIndex:05}.e2s"
epochAccumulatorFile =
dataDir / &"mainnet-epoch-accumulator-{epochIndex:05}.ssz"
let res = readBlockHeaders(epochHeadersFile)
if res.isErr():
error "Could not read headers epoch file", error = res.error
quit 1
let blockHeaders = res.get()
let epochAccumulatorRes = readEpochAccumulatorCached(epochAccumulatorFile)
if epochAccumulatorRes.isErr():
error "Could not read epoch accumulator file", error = res.error
quit 1
let epochAccumulator = epochAccumulatorRes.get()
let headerIndex = getHeaderRecordIndex(blockNumber, epochIndex)
let header = blockHeaders[headerIndex]
if header.isPreMerge():
let headerWithProof = buildHeaderWithProof(header, epochAccumulator)
if headerWithProof.isErr:
error "Error building proof", error = headerWithProof.error
# Lets verify if the necessary files exists before starting to build the
# accumulator.
for i in 0..<preMergeEpochs:
let file = dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
if not isFile(file):
fatal "Required epoch headers file does not exist", file
quit 1
let
content = headerWithProof.get()
contentKey = ContentKey(
contentType: blockHeader,
blockHeaderKey: BlockKey(blockHash: header.blockHash()))
encodedContentKey = history_content.encode(contentKey)
encodedContent = SSZ.encode(content)
proc buildAccumulator(dataDir: string, writeEpochAccumulators = false):
Result[FinishedAccumulator, string] =
var accumulator: Accumulator
for i in 0..<preMergeEpochs:
let file =
try: dataDir / &"mainnet-headers-epoch-{i.uint64:05}.e2s"
except ValueError as e: raiseAssert e.msg
let portalContent = JsonPortalContent(
content_key: encodedContentKey.asSeq().to0xHex(),
content_value: encodedContent.to0xHex())
let fh = ? openFile(file, {OpenFlags.Read}).mapErr(toString)
defer: discard closeFile(fh)
contentTable[blockNumber] = portalContent
var data: seq[byte]
var count = 0'u64
while true:
let header = readRecord(fh, data).valueOr:
break
if header.typ == ExecutionBlockHeaderRecord:
let blockHeader =
try:
rlp.decode(data, BlockHeader)
except RlpError as e:
return err("Invalid block header in " & file & ": " & e.msg)
# Quick sanity check
if blockHeader.blockNumber.truncate(uint64) != i*epochSize + count:
fatal "Incorrect block headers in file", file = file,
blockNumber = blockHeader.blockNumber,
expectedBlockNumber = i*epochSize + count
quit 1
updateAccumulator(accumulator, blockHeader)
# Note: writing away of epoch accumulators occurs 1 iteration before
# updating the epoch accumulator, as the latter happens when passed
# a header for the next epoch (or on finishing the epoch).
if writeEpochAccumulators:
if accumulator.currentEpoch.len() == epochSize or
blockHeader.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
let file =
try: dataDir / &"mainnet-epoch-accumulator-{i.uint64:05}.ssz"
except ValueError as e: raiseAssert e.msg
let res = io2.writeFile(file, SSZ.encode(accumulator.currentEpoch))
if res.isErr():
error "Failed writing epoch accumulator to file",
file, error = res.error
else:
notice "Succesfully wrote epoch accumulator to file", file
if count == epochSize - 1:
info "Updated an epoch", epoch = i
count.inc()
if blockHeader.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
let finishedAccumulator = finishAccumulator(accumulator)
info "Updated last epoch, finished building master accumulator",
epoch = i
return ok(finishedAccumulator)
else:
warn "Skipping record, not a block header", typ = toHex(header.typ)
err("Not enough headers provided to finish the accumulator")
let accumulatorRes = buildAccumulator(dataDir, config.writeEpochAccumulators)
if accumulatorRes.isErr():
fatal "Could not build accumulator", error = accumulatorRes.error
quit 1
let accumulator = accumulatorRes.get()
let res = io2.writeFile(accumulatorFile, SSZ.encode(accumulator))
if res.isErr():
error "Failed writing accumulator to file",
file = accumulatorFile, error = res.error
quit 1
else:
# TODO: Deal with writing post merge headers
error "Not a pre merge header"
notice "Succesfully wrote master accumulator to file",
file = accumulatorFile
of HistoryCmd.printAccumulatorData:
let file = dataDir / config.accumulatorFileNamePrint
let res = readAccumulator(file)
if res.isErr():
fatal "Failed reading accumulator from file", error = res.error, file
quit 1
writePortalContentToJson(fh, contentTable)
let
accumulator = res.get()
accumulatorRoot = hash_tree_root(accumulator)
try:
fh.close()
except IOError as e:
fatal "Error occured while closing file", error = e.msg
quit 1
info "Accumulator decoded successfully",
root = accumulatorRoot
echo "Master Accumulator:"
echo "-------------------"
echo &"Root: {accumulatorRoot}"
echo ""
echo "Historical Epochs:"
echo "------------------"
echo "Epoch Root"
for i, root in accumulator.historicalEpochs:
echo &"{i.uint64:05} 0x{root.toHex()}"
of HistoryCmd.exportHeaderRange:
let client = newRpcClient(config.web3Url)
let connectRes = waitFor client.connectRpcClient(config.web3Url)
if connectRes.isErr():
fatal "Failed connecting to JSON-RPC client", error = connectRes.error
quit 1
let
startBlockNumber = config.startBlockNumber
endBlockNumber = config.endBlockNumber
if (endBlockNumber < startBlockNumber):
fatal "Start block number should be smaller than end block number",
startBlockNumber, endBlockNumber
quit 1
proc exportHeaders(
file: string, startBlockNumber, endBlockNumber: uint64):
Result[void, string] =
# Downloading headers from JSON RPC endpoint
info "Requesting headers", startBlockNumber, endBlockNumber
var headers: seq[BlockHeader]
for j in startBlockNumber..endBlockNumber:
debug "Requesting block", number = j
let header = client.downloadHeader(j)
headers.add(header)
let fh = ? openFile(
file, {OpenFlags.Write, OpenFlags.Create}).mapErr(toString)
defer: discard closeFile(fh)
info "Writing headers to file", file
for header in headers:
discard ? fh.appendRecord(ExecutionBlockHeaderRecord, rlp.encode(header))
ok()
let file =
try: dataDir / &"mainnet-headers-{startBlockNumber:05}-{endBlockNumber:05}.e2s"
except ValueError as e: raiseAssert e.msg
let res = exportHeaders(file, startBlockNumber, endBlockNumber)
if res.isErr():
fatal "Failed exporting headers", error = res.error
quit 1
of HistoryCmd.exportHeadersWithProof:
let
startBlockNumber = config.startBlockNumber2
endBlockNumber = config.endBlockNumber2
if (endBlockNumber < startBlockNumber):
fatal "Start block number should be smaller than end block number",
startBlockNumber, endBlockNumber
quit 1
let file = &"mainnet-headersWithProof-{startBlockNumber:05}-{endBlockNumber:05}.json"
let fh = createAndOpenFile(string config.dataDir, file)
var contentTable: JsonPortalContentTable
for blockNumber in startBlockNumber..endBlockNumber:
let
epochIndex = getEpochIndex(blockNumber)
epochHeadersFile =
dataDir / &"mainnet-headers-epoch-{epochIndex:05}.e2s"
epochAccumulatorFile =
dataDir / &"mainnet-epoch-accumulator-{epochIndex:05}.ssz"
let res = readBlockHeaders(epochHeadersFile)
if res.isErr():
error "Could not read headers epoch file", error = res.error
quit 1
let blockHeaders = res.get()
let epochAccumulatorRes = readEpochAccumulatorCached(epochAccumulatorFile)
if epochAccumulatorRes.isErr():
error "Could not read epoch accumulator file", error = res.error
quit 1
let epochAccumulator = epochAccumulatorRes.get()
let headerIndex = getHeaderRecordIndex(blockNumber, epochIndex)
let header = blockHeaders[headerIndex]
if header.isPreMerge():
let headerWithProof = buildHeaderWithProof(header, epochAccumulator)
if headerWithProof.isErr:
error "Error building proof", error = headerWithProof.error
quit 1
let
content = headerWithProof.get()
contentKey = ContentKey(
contentType: blockHeader,
blockHeaderKey: BlockKey(blockHash: header.blockHash()))
encodedContentKey = history_content.encode(contentKey)
encodedContent = SSZ.encode(content)
let portalContent = JsonPortalContent(
content_key: encodedContentKey.asSeq().to0xHex(),
content_value: encodedContent.to0xHex())
contentTable[blockNumber] = portalContent
else:
# TODO: Deal with writing post merge headers
error "Not a pre merge header"
quit 1
writePortalContentToJson(fh, contentTable)
try:
fh.close()
except IOError as e:
fatal "Error occured while closing file", error = e.msg
quit 1
of ExporterCmd.beacon:
let (cfg, forkDigests) = getBeaconData()
case config.beaconCmd
of BeaconCmd.exportLCBootstrap:
waitFor exportLCBootstrapUpdate(
config.restUrl, string config.dataDir,
config.trustedBlockRoot,
cfg, forkDigests)
of BeaconCmd.exportLCUpdates:
waitFor exportLCUpdates(
config.restUrl, string config.dataDir,
config.startPeriod, config.count,
cfg, forkDigests)
of BeaconCmd.exportLCFinalityUpdate:
waitFor exportLCFinalityUpdate(
config.restUrl, string config.dataDir, cfg, forkDigests)
of BeaconCmd.exportLCOptimisticUpdate:
waitFor exportLCOptimisticUpdate(
config.restUrl, string config.dataDir, cfg, forkDigests)

View File

@ -0,0 +1,280 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import
chronicles, chronos,
stew/byteutils,
eth/async_utils,
beacon_chain/networking/network_metadata,
beacon_chain/spec//eth2_apis/rest_beacon_client,
../../network/beacon_light_client/beacon_light_client_content,
./exporter_common
const
restRequestsTimeout = 30.seconds
proc getBeaconData*(): (RuntimeConfig, ref ForkDigests) {.raises: [IOError].} =
let
metadata = getMetadataForNetwork("mainnet")
genesisState =
try:
template genesisData(): auto = metadata.genesisData
newClone(readSszForkedHashedBeaconState(
metadata.cfg,
genesisData.toOpenArray(genesisData.low, genesisData.high)))
except CatchableError as err:
raiseAssert "Invalid baked-in state: " & err.msg
genesis_validators_root =
getStateField(genesisState[], genesis_validators_root)
forkDigests = newClone ForkDigests.init(
metadata.cfg, genesis_validators_root)
return (metadata.cfg, forkDigests)
func forkDigestAtEpoch(
forkDigests: ForkDigests, epoch: Epoch, cfg: RuntimeConfig): ForkDigest =
forkDigests.atEpoch(epoch, cfg)
proc exportLCBootstrapUpdate*(
restUrl: string, dataDir: string,
trustedBlockRoot: Eth2Digest,
cfg: RuntimeConfig, forkDigests: ref ForkDigests) {.async.} =
let file = "light-client-bootstrap.json"
let fh = createAndOpenFile(dataDir, file)
defer:
try:
fh.close()
except IOError as e:
fatal "Error occured while closing file", error = e.msg
quit 1
var
client = RestClientRef.new(restUrl).valueOr:
error "Cannot connect to server", error = error
quit 1
var contentTable: JsonPortalContentTable
var update =
try:
notice "Downloading LC bootstrap"
awaitWithTimeout(
client.getLightClientBootstrap(
trustedBlockRoot,
cfg, forkDigests),
restRequestsTimeout
):
error "Attempt to download LC bootstrap timed out"
quit 1
except CatchableError as exc:
error "Unable to download LC bootstrap", error = exc.msg
quit 1
withForkyObject(update):
when lcDataFork > LightClientDataFork.None:
let
slot = forkyObject.header.beacon.slot
contentKey = encode(bootstrapContentKey(trustedBlockRoot))
forkDigest = forkDigestAtEpoch(
forkDigests[], epoch(slot), cfg)
content = encodeBootstrapForked(
forkDigest,
update
)
let portalContent = JsonPortalContent(
content_key: contentKey.asSeq().to0xHex(),
content_value: content.to0xHex()
)
var contentTable: JsonPortalContentTable
contentTable[slot.uint64] = portalContent
writePortalContentToJson(fh, contentTable)
proc exportLCUpdates*(
restUrl: string, dataDir: string,
startPeriod: uint64, count: uint64,
cfg: RuntimeConfig, forkDigests: ref ForkDigests) {.async.} =
let file = "light-client-updates.json"
let fh = createAndOpenFile(dataDir, file)
defer:
try:
fh.close()
except IOError as e:
fatal "Error occured while closing file", error = e.msg
quit 1
var
client = RestClientRef.new(restUrl).valueOr:
error "Cannot connect to server", error = error
quit 1
var contentTable: JsonPortalContentTable
var updates =
try:
notice "Downloading LC updates"
awaitWithTimeout(
client.getLightClientUpdatesByRange(
SyncCommitteePeriod(startPeriod), count, cfg, forkDigests),
restRequestsTimeout
):
error "Attempt to download LC updates timed out"
quit 1
except CatchableError as exc:
error "Unable to download LC updates", error = exc.msg
quit 1
if updates.len() > 0:
withForkyObject(updates[0]):
when lcDataFork > LightClientDataFork.None:
let
slot = forkyObject.attested_header.beacon.slot
period = forkyObject.attested_header.beacon.slot.sync_committee_period
contentKey = encode(updateContentKey(period.uint64, uint64(1)))
forkDigest = forkDigestAtEpoch(
forkDigests[], epoch(forkyObject.attested_header.beacon.slot), cfg)
content = encodeLightClientUpdatesForked(
forkDigest,
updates
)
let portalContent = JsonPortalContent(
content_key: contentKey.asSeq().to0xHex(),
content_value: content.to0xHex()
)
var contentTable: JsonPortalContentTable
contentTable[slot.uint64] = portalContent
writePortalContentToJson(fh, contentTable)
else:
error "No updates downloaded"
quit 1
proc exportLCFinalityUpdate*(
restUrl: string, dataDir: string,
cfg: RuntimeConfig, forkDigests: ref ForkDigests) {.async.} =
let file = "light-client-finality-update.json"
let fh = createAndOpenFile(dataDir, file)
defer:
try:
fh.close()
except IOError as e:
fatal "Error occured while closing file", error = e.msg
quit 1
var
client = RestClientRef.new(restUrl).valueOr:
error "Cannot connect to server", error = error
quit 1
var contentTable: JsonPortalContentTable
var update =
try:
notice "Downloading LC finality update"
awaitWithTimeout(
client.getLightClientFinalityUpdate(
cfg, forkDigests),
restRequestsTimeout
):
error "Attempt to download LC finality update timed out"
quit 1
except CatchableError as exc:
error "Unable to download LC finality update", error = exc.msg
quit 1
withForkyObject(update):
when lcDataFork > LightClientDataFork.None:
let
finalizedSlot = forkyObject.finalized_header.beacon.slot
optimisticSlot = forkyObject.attested_header.beacon.slot
contentKey = encode(finalityUpdateContentKey(
finalizedSlot.uint64, optimisticSlot.uint64))
contentId = beacon_light_client_content.toContentId(contentKey)
forkDigest = forkDigestAtEpoch(
forkDigests[], epoch(forkyObject.attested_header.beacon.slot), cfg)
content = encodeFinalityUpdateForked(
forkDigest,
update
)
let portalContent = JsonPortalContent(
content_key: contentKey.asSeq().to0xHex(),
content_value: content.to0xHex()
)
var contentTable: JsonPortalContentTable
contentTable[optimisticSlot.uint64] = portalContent
writePortalContentToJson(fh, contentTable)
proc exportLCOptimisticUpdate*(
restUrl: string, dataDir: string,
cfg: RuntimeConfig, forkDigests: ref ForkDigests) {.async.} =
let file = "light-client-optimistic-update.json"
let fh = createAndOpenFile(dataDir, file)
defer:
try:
fh.close()
except IOError as e:
fatal "Error occured while closing file", error = e.msg
quit 1
var
client = RestClientRef.new(restUrl).valueOr:
error "Cannot connect to server", error = error
quit 1
var contentTable: JsonPortalContentTable
var update =
try:
notice "Downloading LC optimistic update"
awaitWithTimeout(
client.getLightClientOptimisticUpdate(
cfg, forkDigests),
restRequestsTimeout
):
error "Attempt to download LC optimistic update timed out"
quit 1
except CatchableError as exc:
error "Unable to download LC optimistic update", error = exc.msg
quit 1
withForkyObject(update):
when lcDataFork > LightClientDataFork.None:
let
slot = forkyObject.attested_header.beacon.slot
contentKey = encode(optimisticUpdateContentKey(slot.uint64))
contentId = beacon_light_client_content.toContentId(contentKey)
forkDigest = forkDigestAtEpoch(
forkDigests[], epoch(forkyObject.attested_header.beacon.slot), cfg)
content = encodeOptimisticUpdateForked(
forkDigest,
update
)
let portalContent = JsonPortalContent(
content_key: contentKey.asSeq().to0xHex(),
content_value: content.to0xHex()
)
var contentTable: JsonPortalContentTable
contentTable[slot.uint64] = portalContent
writePortalContentToJson(fh, contentTable)

View File

@ -0,0 +1,59 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import
std/[strutils, os],
chronicles,
stew/io2,
json_serialization, json_serialization/std/tables,
faststreams
type
JsonPortalContent* = object
content_key*: string
content_value*: string
JsonPortalContentTable* = OrderedTable[uint64, JsonPortalContent]
proc writePortalContentToJson*(
fh: OutputStreamHandle, content: JsonPortalContentTable) =
try:
var writer = JsonWriter[DefaultFlavor].init(fh.s, pretty = true)
writer.writeValue(content)
except IOError as e:
fatal "Error occured while writing to file", error = e.msg
quit 1
proc createAndOpenFile*(dataDir: string, fileName: string): OutputStreamHandle =
# Creates directory and file, if file already exists
# program is aborted with info to user, to avoid losing data
let fileName: string =
if not fileName.endsWith(".json"):
fileName & ".json"
else:
fileName
let filePath = dataDir / fileName
if isFile(filePath):
fatal "File under provided path already exists and would be overwritten",
path = filePath
quit 1
let res = createPath(dataDir)
if res.isErr():
fatal "Error occurred while creating directory",
error = ioErrorMsg(res.error)
quit 1
try:
return fileOutput(filePath)
except IOError as e:
fatal "Error occurred while opening the file", error = e.msg
quit 1

View File

@ -0,0 +1,223 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import
std/[os, uri],
confutils, chronicles,
beacon_chain/spec/digest
proc defaultDataDir*(): string =
let dataDir = when defined(windows):
"AppData" / "Roaming" / "EthData"
elif defined(macosx):
"Library" / "Application Support" / "EthData"
else:
".cache" / "eth-data"
getHomeDir() / dataDir
type
Web3UrlKind* = enum
HttpUrl, WsUrl
Web3Url* = object
kind*: Web3UrlKind
url*: string
StorageMode* = enum
JsonStorage, DbStorage
const
defaultDataDirDesc* = defaultDataDir()
defaultBlockFileName* = "eth-block-data"
defaultAccumulatorFileName* = "mainnet-master-accumulator.ssz"
defaultWeb3Url* = Web3Url(kind: HttpUrl, url: "http://127.0.0.1:8545")
type
ExporterCmd* = enum
history,
beacon
HistoryCmd* = enum
# TODO: Multiline strings doesn't work here anymore with 1.6, and concat of
# several lines gives the error: Error: Invalid node kind nnkInfix for macros.`$`
exportBlockData =
"Export block data (headers, bodies and receipts) to a json format or a database. Some of this functionality is likely to get deprecated"
exportEpochHeaders =
"Export block headers from an Ethereum JSON RPC Execution endpoint to *.e2s files arranged per epoch (8192 blocks)"
verifyEpochHeaders =
"Verify *.e2s files containing block headers. Verify currently only means being able to RLP decode the block headers"
exportAccumulatorData =
"Build and export the master accumulator and historical epoch accumulators. Requires *.e2s block header files generated with the exportHeaders command up until the merge block"
printAccumulatorData =
"Print the root hash of the master accumulator and of all historical epoch accumulators. Requires data generated by exportAccumulatorData command"
exportHeaderRange =
"Export block headers from an Ethereum JSON RPC Execution endpoint to *.e2s files (unlimited amount)"
exportHeadersWithProof =
"Export block headers with proof from *.e2s headers file and epochAccumulator files"
BeaconCmd* = enum
exportLCBootstrap = "Export Light Client Bootstrap"
exportLCUpdates = "Export Light Client Updates"
exportLCFinalityUpdate = "Export Light Client Finality Update"
exportLCOptimisticUpdate = "Export Light Client Optimistic Update"
ExporterConf* = object
logLevel* {.
defaultValue: LogLevel.INFO
defaultValueDesc: $LogLevel.INFO
desc: "Sets the log level"
name: "log-level" .}: LogLevel
dataDir* {.
desc: "The directory where generated data files will be exported to"
defaultValue: defaultDataDir()
defaultValueDesc: $defaultDataDirDesc
name: "data-dir" .}: OutDir
case cmd* {.command.}: ExporterCmd
of ExporterCmd.history:
web3Url* {.
desc: "Execution layer JSON-RPC API URL"
defaultValue: defaultWeb3Url
name: "web3-url" .}: Web3Url
case historyCmd* {.command.}: HistoryCmd
of exportBlockData:
startBlock* {.
desc: "Number of the first block to be exported"
defaultValue: 0
name: "start-block" .}: uint64
endBlock* {.
desc: "Number of the last block to be exported"
defaultValue: 0
name: "end-block" .}: uint64
fileName* {.
desc: "File name (minus extension) where block data will be exported to"
defaultValue: defaultBlockFileName
defaultValueDesc: $defaultBlockFileName
name: "file-name" .}: string
storageMode* {.
desc: "Storage mode of block data export"
defaultValue: JsonStorage
name: "storage-mode" .}: StorageMode
headersOnly* {.
desc: "Only export the headers instead of full blocks and receipts"
defaultValue: false
name: "headers-only" .}: bool
of exportEpochHeaders:
startEpoch* {.
desc: "Number of the first epoch which should be downloaded"
defaultValue: 0
name: "start-epoch" .}: uint64
endEpoch* {.
desc: "Number of the last epoch which should be downloaded"
defaultValue: 1896
name: "end-epoch" .}: uint64
# TODO:
# Although options are the same as for exportHeaders, we can't drop them
# under the same case of as confutils does not agree with that.
of verifyEpochHeaders:
startEpochVerify* {.
desc: "Number of the first epoch which should be downloaded"
defaultValue: 0
name: "start-epoch" .}: uint64
endEpochVerify* {.
desc: "Number of the last epoch which should be downloaded"
defaultValue: 1896
name: "end-epoch" .}: uint64
of exportAccumulatorData:
accumulatorFileName* {.
desc: "File to which the serialized accumulator is written"
defaultValue: defaultAccumulatorFileName
defaultValueDesc: $defaultAccumulatorFileName
name: "accumulator-file-name" .}: string
writeEpochAccumulators* {.
desc: "Write also the SSZ encoded epoch accumulators to specific files"
defaultValue: false
name: "write-epoch-accumulators" .}: bool
of printAccumulatorData:
accumulatorFileNamePrint* {.
desc: "File from which the serialized accumulator is read"
defaultValue: defaultAccumulatorFileName
defaultValueDesc: $defaultAccumulatorFileName
name: "accumulator-file-name" .}: string
of exportHeaderRange:
startBlockNumber* {.
desc: "Number of the first block header to be exported"
name: "start-block" .}: uint64
endBlockNumber* {.
desc: "Number of the last block header to be exported"
name: "end-block" .}: uint64
of exportHeadersWithProof:
startBlockNumber2* {.
desc: "Number of the first block header to be exported"
name: "start-block" .}: uint64
endBlockNumber2* {.
desc: "Number of the last block header to be exported"
name: "end-block" .}: uint64
of ExporterCmd.beacon:
restUrl* {.
desc: "URL of the beacon node REST service"
defaultValue: "http://127.0.0.1:5052"
name: "rest-url" .}: string
case beaconCmd* {.command.}: BeaconCmd
of exportLCBootstrap:
trustedBlockRoot* {.
desc: "Trusted finalized block root of the requested bootstrap"
name: "trusted-block-root" .}: Eth2Digest
of exportLCUpdates:
startPeriod* {.
desc: "Period of the first LC update"
defaultValue: 0
name: "start-period" .}: uint64
count* {.
desc: "Amount of LC updates to request"
defaultValue: 1
name: "count" .}: uint64
of exportLCFinalityUpdate:
discard
of exportLCOptimisticUpdate:
discard
proc parseCmdArg*(
T: type Web3Url, p: string): T {.raises: [ConfigurationError].} =
let
url = parseUri(p)
normalizedScheme = url.scheme.toLowerAscii()
if (normalizedScheme == "http" or normalizedScheme == "https"):
Web3Url(kind: HttpUrl, url: p)
elif (normalizedScheme == "ws" or normalizedScheme == "wss"):
Web3Url(kind: WsUrl, url: p)
else:
raise newException(
ConfigurationError,
"The Web3 URL must specify one of following protocols: http/https/ws/wss"
)
proc completeCmdArg*(T: type Web3Url, val: string): seq[string] =
return @[]
proc parseCmdArg*(T: type StorageMode, p: string): T
{.raises: [ConfigurationError].} =
if p == "db":
return DbStorage
elif p == "json":
return JsonStorage
else:
let msg = "Provided mode: " & p & " is not a valid. Should be `json` or `db`"
raise newException(ConfigurationError, msg)
proc completeCmdArg*(T: type StorageMode, val: string): seq[string] =
return @[]
func parseCmdArg*(T: type Eth2Digest, input: string): T
{.raises: [ValueError, Defect].} =
Eth2Digest.fromHex(input)
func completeCmdArg*(T: type Eth2Digest, input: string): seq[string] =
return @[]