From ab13e43db87f5ff5fe74d71d6b0c48f01ecff2fc Mon Sep 17 00:00:00 2001 From: Kim De Mey Date: Wed, 5 Jul 2023 20:17:03 +0200 Subject: [PATCH] Add export of beacon LC data to eth_data_exporter (#1629) * Add export of beacon LC data to eth_data_exporter * Update content seeding docs with new eth_data_exporter cli --- fluffy/docs/content_seeding.md | 16 +- fluffy/tools/eth_data_exporter.nim | 842 +++++++----------- .../eth_data_exporter/cl_data_exporter.nim | 280 ++++++ .../eth_data_exporter/exporter_common.nim | 59 ++ .../tools/eth_data_exporter/exporter_conf.nim | 223 +++++ 5 files changed, 893 insertions(+), 527 deletions(-) create mode 100644 fluffy/tools/eth_data_exporter/cl_data_exporter.nim create mode 100644 fluffy/tools/eth_data_exporter/exporter_common.nim create mode 100644 fluffy/tools/eth_data_exporter/exporter_conf.nim diff --git a/fluffy/docs/content_seeding.md b/fluffy/docs/content_seeding.md index 61f55570b..2e3813699 100644 --- a/fluffy/docs/content_seeding.md +++ b/fluffy/docs/content_seeding.md @@ -8,25 +8,22 @@ 1. Set-up access to an Ethereum JSON-RPC endpoint (e.g. local geth instance) that can serve the data. -2. Use the `eth-data-exporter` tool to download and store all block headers into +2. Use the `eth_data_exporter` tool to download and store all block headers into *.e2s files arranged per epoch (8192 blocks): ```bash make fluffy-tools -./build/eth_data_exporter exportEpochHeaders --data-dir:"./user_data_dir/" +./build/eth_data_exporter history exportEpochHeaders --data-dir:"./user_data_dir/" ``` This will store all block headers up till the merge block into *.e2s files in the assigned `--data-dir`. -> Note: Currently only hardcoded address `ws://127.0.0.1:8546` works for the -Ethereum JSON-RPC endpoint. - 3. Build the master accumulator and the epoch accumulators: ```bash -./build/eth_data_exporter exportAccumulatorData --writeEpochAccumulators --data-dir:"./user_data_dir/" +./build/eth_data_exporter history exportAccumulatorData --writeEpochAccumulators --data-dir:"./user_data_dir/" ``` #### Step 2: Seed the epoch accumulators into the Portal network @@ -59,22 +56,19 @@ Run the `content_verifier` tool and see if all epoch accumulators are found: 1. Set-up access to an Ethereum JSON-RPC endpoint (e.g. local geth instance) that can serve the data. -2. Use the `eth-data-exporter` tool to download history data through the +2. Use the `eth_data_exporter` tool to download history data through the JSON-RPC endpoint into the format which is suitable for reading data into Fluffy client and propagating into the network: ```bash make fluffy-tools -./build/eth_data_exporter --initial-block:1 --end-block:10 --data-dir:"/user_data_dir/" +./build/eth_data_exporter history exportBlockData--initial-block:1 --end-block:10 --data-dir:"/user_data_dir/" ``` This will store blocks 1 to 10 into a json file located at `./user_data_dir/eth-history-data.json`. -> Note: Currently only hardcoded address `ws://127.0.0.1:8546` works for the -Ethereum JSON-RPC endpoint. - 3. Run Fluffy and trigger the propagation of data with the `portal_history_propagate` JSON-RPC API call: diff --git a/fluffy/tools/eth_data_exporter.nim b/fluffy/tools/eth_data_exporter.nim index 623942e16..9deea2f66 100644 --- a/fluffy/tools/eth_data_exporter.nim +++ b/fluffy/tools/eth_data_exporter.nim @@ -49,183 +49,14 @@ import ../seed_db, ../../premix/[downloader, parser], ../network/history/[history_content, accumulator], - ../eth_data/[history_data_json_store, history_data_ssz_e2s] + ../eth_data/[history_data_json_store, history_data_ssz_e2s], + eth_data_exporter/[exporter_conf, exporter_common, cl_data_exporter] # Need to be selective due to the `Block` type conflict from downloader from ../network/history/history_network import encode chronicles.formatIt(IoErrorCode): $it -proc defaultDataDir*(): string = - let dataDir = when defined(windows): - "AppData" / "Roaming" / "EthData" - elif defined(macosx): - "Library" / "Application Support" / "EthData" - else: - ".cache" / "eth-data" - - getHomeDir() / dataDir - -type - Web3UrlKind* = enum - HttpUrl, WsUrl - - Web3Url* = object - kind*: Web3UrlKind - url*: string - -const - defaultDataDirDesc = defaultDataDir() - defaultBlockFileName = "eth-block-data" - defaultAccumulatorFileName = "mainnet-master-accumulator.ssz" - defaultWeb3Url = Web3Url(kind: HttpUrl, url: "http://127.0.0.1:8545") - -type - ExporterCmd* = enum - # TODO: Multiline strings doesn't work here anymore with 1.6, and concat of - # several lines gives the error: Error: Invalid node kind nnkInfix for macros.`$` - exportBlockData = - "Export block data (headers, bodies and receipts) to a json format or a database. Some of this functionality is likely to get deprecated" - exportEpochHeaders = - "Export block headers from an Ethereum JSON RPC Execution endpoint to *.e2s files arranged per epoch (8192 blocks)" - verifyEpochHeaders = - "Verify *.e2s files containing block headers. Verify currently only means being able to RLP decode the block headers" - exportAccumulatorData = - "Build and export the master accumulator and historical epoch accumulators. Requires *.e2s block header files generated with the exportHeaders command up until the merge block" - printAccumulatorData = - "Print the root hash of the master accumulator and of all historical epoch accumulators. Requires data generated by exportAccumulatorData command" - exportHeaderRange = - "Export block headers from an Ethereum JSON RPC Execution endpoint to *.e2s files (unlimited amount)" - exportHeadersWithProof = - "Export block headers with proof from *.e2s headers file and epochAccumulator files" - - StorageMode* = enum - Json, Db - - ExporterConf* = object - logLevel* {. - defaultValue: LogLevel.INFO - defaultValueDesc: $LogLevel.INFO - desc: "Sets the log level" - name: "log-level" .}: LogLevel - dataDir* {. - desc: "The directory where generated data files will be exported to" - defaultValue: defaultDataDir() - defaultValueDesc: $defaultDataDirDesc - name: "data-dir" .}: OutDir - web3Url* {. - desc: "Execution layer JSON-RPC API URL" - defaultValue: defaultWeb3Url - name: "web3-url" .}: Web3Url - case cmd* {. - command - defaultValue: exportBlockData .}: ExporterCmd - of exportBlockData: - startBlock* {. - desc: "Number of the first block to be exported" - defaultValue: 0 - name: "start-block" .}: uint64 - endBlock* {. - desc: "Number of the last block to be exported" - defaultValue: 0 - name: "end-block" .}: uint64 - fileName* {. - desc: "File name (minus extension) where block data will be exported to" - defaultValue: defaultBlockFileName - defaultValueDesc: $defaultBlockFileName - name: "file-name" .}: string - storageMode* {. - desc: "Storage mode of block data export" - defaultValue: Json - name: "storage-mode" .}: StorageMode - headersOnly* {. - desc: "Only export the headers instead of full blocks and receipts" - defaultValue: false - name: "headers-only" .}: bool - of exportEpochHeaders: - startEpoch* {. - desc: "Number of the first epoch which should be downloaded" - defaultValue: 0 - name: "start-epoch" .}: uint64 - endEpoch* {. - desc: "Number of the last epoch which should be downloaded" - defaultValue: 1896 - name: "end-epoch" .}: uint64 - # TODO: - # Although options are the same as for exportHeaders, we can't drop them - # under the same case of as confutils does not agree with that. - of verifyEpochHeaders: - startEpochVerify* {. - desc: "Number of the first epoch which should be downloaded" - defaultValue: 0 - name: "start-epoch" .}: uint64 - endEpochVerify* {. - desc: "Number of the last epoch which should be downloaded" - defaultValue: 1896 - name: "end-epoch" .}: uint64 - of exportAccumulatorData: - accumulatorFileName* {. - desc: "File to which the serialized accumulator is written" - defaultValue: defaultAccumulatorFileName - defaultValueDesc: $defaultAccumulatorFileName - name: "accumulator-file-name" .}: string - writeEpochAccumulators* {. - desc: "Write also the SSZ encoded epoch accumulators to specific files" - defaultValue: false - name: "write-epoch-accumulators" .}: bool - of printAccumulatorData: - accumulatorFileNamePrint* {. - desc: "File from which the serialized accumulator is read" - defaultValue: defaultAccumulatorFileName - defaultValueDesc: $defaultAccumulatorFileName - name: "accumulator-file-name" .}: string - of exportHeaderRange: - startBlockNumber* {. - desc: "Number of the first block header to be exported" - name: "start-block" .}: uint64 - endBlockNumber* {. - desc: "Number of the last block header to be exported" - name: "end-block" .}: uint64 - of exportHeadersWithProof: - startBlockNumber2* {. - desc: "Number of the first block header to be exported" - name: "start-block" .}: uint64 - endBlockNumber2* {. - desc: "Number of the last block header to be exported" - name: "end-block" .}: uint64 - -proc parseCmdArg*( - T: type Web3Url, p: string): T {.raises: [ConfigurationError].} = - let - url = parseUri(p) - normalizedScheme = url.scheme.toLowerAscii() - - if (normalizedScheme == "http" or normalizedScheme == "https"): - Web3Url(kind: HttpUrl, url: p) - elif (normalizedScheme == "ws" or normalizedScheme == "wss"): - Web3Url(kind: WsUrl, url: p) - else: - raise newException( - ConfigurationError, - "The Web3 URL must specify one of following protocols: http/https/ws/wss" - ) - -proc completeCmdArg*(T: type Web3Url, val: string): seq[string] = - return @[] - -proc parseCmdArg*(T: type StorageMode, p: string): T - {.raises: [ConfigurationError].} = - if p == "db": - return Db - elif p == "json": - return Json - else: - let msg = "Provided mode: " & p & " is not a valid. Should be `json` or `db`" - raise newException(ConfigurationError, msg) - -proc completeCmdArg*(T: type StorageMode, val: string): seq[string] = - return @[] - proc downloadHeader(client: RpcClient, i: uint64): BlockHeader = let blockNumber = u256(i) try: @@ -243,34 +74,6 @@ proc downloadBlock(i: uint64, client: RpcClient): Block = fatal "Error while requesting Block", error = e.msg, number = i quit 1 -proc createAndOpenFile(dataDir: string, fileName: string): OutputStreamHandle = - # Creates directory and file, if file already exists - # program is aborted with info to user, to avoid losing data - let fileName: string = - if not fileName.endsWith(".json"): - fileName & ".json" - else: - fileName - - let filePath = dataDir / fileName - - if isFile(filePath): - fatal "File under provided path already exists and would be overwritten", - path = filePath - quit 1 - - let res = createPath(dataDir) - if res.isErr(): - fatal "Error occurred while creating directory", - error = ioErrorMsg(res.error) - quit 1 - - try: - return fileOutput(filePath) - except IOError as e: - fatal "Error occurred while opening the file", error = e.msg - quit 1 - proc writeHeadersToJson(config: ExporterConf, client: RpcClient) = let fh = createAndOpenFile(string config.dataDir, string config.fileName) @@ -350,12 +153,12 @@ proc writeBlocksToDb(config: ExporterConf, client: RpcClient) = proc exportBlocks(config: ExporterConf, client: RpcClient) = case config.storageMode - of Json: + of JsonStorage: if config.headersOnly: writeHeadersToJson(config, client) else: writeBlocksToJson(config, client) - of Db: + of DbStorage: if config.headersOnly: fatal "Db mode not available for headers only" quit 1 @@ -406,346 +209,353 @@ when isMainModule: quit 1 case config.cmd - of ExporterCmd.exportBlockData: - let client = newRpcClient(config.web3Url) - let connectRes = waitFor client.connectRpcClient(config.web3Url) - if connectRes.isErr(): - fatal "Failed connecting to JSON-RPC client", error = connectRes.error - quit 1 - - if (config.endBlock < config.startBlock): - fatal "Initial block number should be smaller than end block number", - startBlock = config.startBlock, - endBlock = config.endBlock - quit 1 - - try: - exportBlocks(config, client) - finally: - waitFor client.close() - - of ExporterCmd.exportEpochHeaders: - let client = newRpcClient(config.web3Url) - let connectRes = waitFor client.connectRpcClient(config.web3Url) - if connectRes.isErr(): - fatal "Failed connecting to JSON-RPC client", error = connectRes.error - quit 1 - - proc exportEpochHeaders(file: string, epoch: uint64): Result[void, string] = - # Downloading headers from JSON RPC endpoint - info "Requesting epoch headers", epoch - var headers: seq[BlockHeader] - for j in 0.. LightClientDataFork.None: + let + slot = forkyObject.header.beacon.slot + contentKey = encode(bootstrapContentKey(trustedBlockRoot)) + forkDigest = forkDigestAtEpoch( + forkDigests[], epoch(slot), cfg) + content = encodeBootstrapForked( + forkDigest, + update + ) + + let portalContent = JsonPortalContent( + content_key: contentKey.asSeq().to0xHex(), + content_value: content.to0xHex() + ) + + var contentTable: JsonPortalContentTable + contentTable[slot.uint64] = portalContent + + writePortalContentToJson(fh, contentTable) + +proc exportLCUpdates*( + restUrl: string, dataDir: string, + startPeriod: uint64, count: uint64, + cfg: RuntimeConfig, forkDigests: ref ForkDigests) {.async.} = + let file = "light-client-updates.json" + let fh = createAndOpenFile(dataDir, file) + + defer: + try: + fh.close() + except IOError as e: + fatal "Error occured while closing file", error = e.msg + quit 1 + + var + client = RestClientRef.new(restUrl).valueOr: + error "Cannot connect to server", error = error + quit 1 + + var contentTable: JsonPortalContentTable + + var updates = + try: + notice "Downloading LC updates" + awaitWithTimeout( + client.getLightClientUpdatesByRange( + SyncCommitteePeriod(startPeriod), count, cfg, forkDigests), + restRequestsTimeout + ): + error "Attempt to download LC updates timed out" + quit 1 + except CatchableError as exc: + error "Unable to download LC updates", error = exc.msg + quit 1 + + if updates.len() > 0: + withForkyObject(updates[0]): + when lcDataFork > LightClientDataFork.None: + let + slot = forkyObject.attested_header.beacon.slot + period = forkyObject.attested_header.beacon.slot.sync_committee_period + contentKey = encode(updateContentKey(period.uint64, uint64(1))) + forkDigest = forkDigestAtEpoch( + forkDigests[], epoch(forkyObject.attested_header.beacon.slot), cfg) + + content = encodeLightClientUpdatesForked( + forkDigest, + updates + ) + + + let portalContent = JsonPortalContent( + content_key: contentKey.asSeq().to0xHex(), + content_value: content.to0xHex() + ) + + var contentTable: JsonPortalContentTable + contentTable[slot.uint64] = portalContent + + writePortalContentToJson(fh, contentTable) + else: + error "No updates downloaded" + quit 1 + +proc exportLCFinalityUpdate*( + restUrl: string, dataDir: string, + cfg: RuntimeConfig, forkDigests: ref ForkDigests) {.async.} = + let file = "light-client-finality-update.json" + let fh = createAndOpenFile(dataDir, file) + + defer: + try: + fh.close() + except IOError as e: + fatal "Error occured while closing file", error = e.msg + quit 1 + + var + client = RestClientRef.new(restUrl).valueOr: + error "Cannot connect to server", error = error + quit 1 + + var contentTable: JsonPortalContentTable + + var update = + try: + notice "Downloading LC finality update" + awaitWithTimeout( + client.getLightClientFinalityUpdate( + cfg, forkDigests), + restRequestsTimeout + ): + error "Attempt to download LC finality update timed out" + quit 1 + except CatchableError as exc: + error "Unable to download LC finality update", error = exc.msg + quit 1 + + withForkyObject(update): + when lcDataFork > LightClientDataFork.None: + let + finalizedSlot = forkyObject.finalized_header.beacon.slot + optimisticSlot = forkyObject.attested_header.beacon.slot + contentKey = encode(finalityUpdateContentKey( + finalizedSlot.uint64, optimisticSlot.uint64)) + contentId = beacon_light_client_content.toContentId(contentKey) + forkDigest = forkDigestAtEpoch( + forkDigests[], epoch(forkyObject.attested_header.beacon.slot), cfg) + content = encodeFinalityUpdateForked( + forkDigest, + update + ) + + let portalContent = JsonPortalContent( + content_key: contentKey.asSeq().to0xHex(), + content_value: content.to0xHex() + ) + + var contentTable: JsonPortalContentTable + contentTable[optimisticSlot.uint64] = portalContent + + writePortalContentToJson(fh, contentTable) + +proc exportLCOptimisticUpdate*( + restUrl: string, dataDir: string, + cfg: RuntimeConfig, forkDigests: ref ForkDigests) {.async.} = + let file = "light-client-optimistic-update.json" + let fh = createAndOpenFile(dataDir, file) + + defer: + try: + fh.close() + except IOError as e: + fatal "Error occured while closing file", error = e.msg + quit 1 + + var + client = RestClientRef.new(restUrl).valueOr: + error "Cannot connect to server", error = error + quit 1 + + var contentTable: JsonPortalContentTable + + var update = + try: + notice "Downloading LC optimistic update" + awaitWithTimeout( + client.getLightClientOptimisticUpdate( + cfg, forkDigests), + restRequestsTimeout + ): + error "Attempt to download LC optimistic update timed out" + quit 1 + except CatchableError as exc: + error "Unable to download LC optimistic update", error = exc.msg + quit 1 + + withForkyObject(update): + when lcDataFork > LightClientDataFork.None: + let + slot = forkyObject.attested_header.beacon.slot + contentKey = encode(optimisticUpdateContentKey(slot.uint64)) + contentId = beacon_light_client_content.toContentId(contentKey) + forkDigest = forkDigestAtEpoch( + forkDigests[], epoch(forkyObject.attested_header.beacon.slot), cfg) + content = encodeOptimisticUpdateForked( + forkDigest, + update + ) + + let portalContent = JsonPortalContent( + content_key: contentKey.asSeq().to0xHex(), + content_value: content.to0xHex() + ) + + var contentTable: JsonPortalContentTable + contentTable[slot.uint64] = portalContent + + writePortalContentToJson(fh, contentTable) diff --git a/fluffy/tools/eth_data_exporter/exporter_common.nim b/fluffy/tools/eth_data_exporter/exporter_common.nim new file mode 100644 index 000000000..9b667bfb8 --- /dev/null +++ b/fluffy/tools/eth_data_exporter/exporter_common.nim @@ -0,0 +1,59 @@ +# Nimbus +# Copyright (c) 2023 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import + std/[strutils, os], + chronicles, + stew/io2, + json_serialization, json_serialization/std/tables, + faststreams + +type + JsonPortalContent* = object + content_key*: string + content_value*: string + + JsonPortalContentTable* = OrderedTable[uint64, JsonPortalContent] + +proc writePortalContentToJson*( + fh: OutputStreamHandle, content: JsonPortalContentTable) = + try: + var writer = JsonWriter[DefaultFlavor].init(fh.s, pretty = true) + writer.writeValue(content) + except IOError as e: + fatal "Error occured while writing to file", error = e.msg + quit 1 + +proc createAndOpenFile*(dataDir: string, fileName: string): OutputStreamHandle = + # Creates directory and file, if file already exists + # program is aborted with info to user, to avoid losing data + let fileName: string = + if not fileName.endsWith(".json"): + fileName & ".json" + else: + fileName + + let filePath = dataDir / fileName + + if isFile(filePath): + fatal "File under provided path already exists and would be overwritten", + path = filePath + quit 1 + + let res = createPath(dataDir) + if res.isErr(): + fatal "Error occurred while creating directory", + error = ioErrorMsg(res.error) + quit 1 + + try: + return fileOutput(filePath) + except IOError as e: + fatal "Error occurred while opening the file", error = e.msg + quit 1 diff --git a/fluffy/tools/eth_data_exporter/exporter_conf.nim b/fluffy/tools/eth_data_exporter/exporter_conf.nim new file mode 100644 index 000000000..879d50911 --- /dev/null +++ b/fluffy/tools/eth_data_exporter/exporter_conf.nim @@ -0,0 +1,223 @@ +# Nimbus +# Copyright (c) 2023 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import + std/[os, uri], + confutils, chronicles, + beacon_chain/spec/digest + +proc defaultDataDir*(): string = + let dataDir = when defined(windows): + "AppData" / "Roaming" / "EthData" + elif defined(macosx): + "Library" / "Application Support" / "EthData" + else: + ".cache" / "eth-data" + + getHomeDir() / dataDir + +type + Web3UrlKind* = enum + HttpUrl, WsUrl + + Web3Url* = object + kind*: Web3UrlKind + url*: string + + StorageMode* = enum + JsonStorage, DbStorage + +const + defaultDataDirDesc* = defaultDataDir() + defaultBlockFileName* = "eth-block-data" + defaultAccumulatorFileName* = "mainnet-master-accumulator.ssz" + defaultWeb3Url* = Web3Url(kind: HttpUrl, url: "http://127.0.0.1:8545") + +type + ExporterCmd* = enum + history, + beacon + + HistoryCmd* = enum + # TODO: Multiline strings doesn't work here anymore with 1.6, and concat of + # several lines gives the error: Error: Invalid node kind nnkInfix for macros.`$` + exportBlockData = + "Export block data (headers, bodies and receipts) to a json format or a database. Some of this functionality is likely to get deprecated" + exportEpochHeaders = + "Export block headers from an Ethereum JSON RPC Execution endpoint to *.e2s files arranged per epoch (8192 blocks)" + verifyEpochHeaders = + "Verify *.e2s files containing block headers. Verify currently only means being able to RLP decode the block headers" + exportAccumulatorData = + "Build and export the master accumulator and historical epoch accumulators. Requires *.e2s block header files generated with the exportHeaders command up until the merge block" + printAccumulatorData = + "Print the root hash of the master accumulator and of all historical epoch accumulators. Requires data generated by exportAccumulatorData command" + exportHeaderRange = + "Export block headers from an Ethereum JSON RPC Execution endpoint to *.e2s files (unlimited amount)" + exportHeadersWithProof = + "Export block headers with proof from *.e2s headers file and epochAccumulator files" + + BeaconCmd* = enum + exportLCBootstrap = "Export Light Client Bootstrap" + exportLCUpdates = "Export Light Client Updates" + exportLCFinalityUpdate = "Export Light Client Finality Update" + exportLCOptimisticUpdate = "Export Light Client Optimistic Update" + + ExporterConf* = object + logLevel* {. + defaultValue: LogLevel.INFO + defaultValueDesc: $LogLevel.INFO + desc: "Sets the log level" + name: "log-level" .}: LogLevel + dataDir* {. + desc: "The directory where generated data files will be exported to" + defaultValue: defaultDataDir() + defaultValueDesc: $defaultDataDirDesc + name: "data-dir" .}: OutDir + case cmd* {.command.}: ExporterCmd + of ExporterCmd.history: + web3Url* {. + desc: "Execution layer JSON-RPC API URL" + defaultValue: defaultWeb3Url + name: "web3-url" .}: Web3Url + case historyCmd* {.command.}: HistoryCmd + of exportBlockData: + startBlock* {. + desc: "Number of the first block to be exported" + defaultValue: 0 + name: "start-block" .}: uint64 + endBlock* {. + desc: "Number of the last block to be exported" + defaultValue: 0 + name: "end-block" .}: uint64 + fileName* {. + desc: "File name (minus extension) where block data will be exported to" + defaultValue: defaultBlockFileName + defaultValueDesc: $defaultBlockFileName + name: "file-name" .}: string + storageMode* {. + desc: "Storage mode of block data export" + defaultValue: JsonStorage + name: "storage-mode" .}: StorageMode + headersOnly* {. + desc: "Only export the headers instead of full blocks and receipts" + defaultValue: false + name: "headers-only" .}: bool + of exportEpochHeaders: + startEpoch* {. + desc: "Number of the first epoch which should be downloaded" + defaultValue: 0 + name: "start-epoch" .}: uint64 + endEpoch* {. + desc: "Number of the last epoch which should be downloaded" + defaultValue: 1896 + name: "end-epoch" .}: uint64 + # TODO: + # Although options are the same as for exportHeaders, we can't drop them + # under the same case of as confutils does not agree with that. + of verifyEpochHeaders: + startEpochVerify* {. + desc: "Number of the first epoch which should be downloaded" + defaultValue: 0 + name: "start-epoch" .}: uint64 + endEpochVerify* {. + desc: "Number of the last epoch which should be downloaded" + defaultValue: 1896 + name: "end-epoch" .}: uint64 + of exportAccumulatorData: + accumulatorFileName* {. + desc: "File to which the serialized accumulator is written" + defaultValue: defaultAccumulatorFileName + defaultValueDesc: $defaultAccumulatorFileName + name: "accumulator-file-name" .}: string + writeEpochAccumulators* {. + desc: "Write also the SSZ encoded epoch accumulators to specific files" + defaultValue: false + name: "write-epoch-accumulators" .}: bool + of printAccumulatorData: + accumulatorFileNamePrint* {. + desc: "File from which the serialized accumulator is read" + defaultValue: defaultAccumulatorFileName + defaultValueDesc: $defaultAccumulatorFileName + name: "accumulator-file-name" .}: string + of exportHeaderRange: + startBlockNumber* {. + desc: "Number of the first block header to be exported" + name: "start-block" .}: uint64 + endBlockNumber* {. + desc: "Number of the last block header to be exported" + name: "end-block" .}: uint64 + of exportHeadersWithProof: + startBlockNumber2* {. + desc: "Number of the first block header to be exported" + name: "start-block" .}: uint64 + endBlockNumber2* {. + desc: "Number of the last block header to be exported" + name: "end-block" .}: uint64 + of ExporterCmd.beacon: + restUrl* {. + desc: "URL of the beacon node REST service" + defaultValue: "http://127.0.0.1:5052" + name: "rest-url" .}: string + case beaconCmd* {.command.}: BeaconCmd + of exportLCBootstrap: + trustedBlockRoot* {. + desc: "Trusted finalized block root of the requested bootstrap" + name: "trusted-block-root" .}: Eth2Digest + of exportLCUpdates: + startPeriod* {. + desc: "Period of the first LC update" + defaultValue: 0 + name: "start-period" .}: uint64 + count* {. + desc: "Amount of LC updates to request" + defaultValue: 1 + name: "count" .}: uint64 + of exportLCFinalityUpdate: + discard + of exportLCOptimisticUpdate: + discard + +proc parseCmdArg*( + T: type Web3Url, p: string): T {.raises: [ConfigurationError].} = + let + url = parseUri(p) + normalizedScheme = url.scheme.toLowerAscii() + + if (normalizedScheme == "http" or normalizedScheme == "https"): + Web3Url(kind: HttpUrl, url: p) + elif (normalizedScheme == "ws" or normalizedScheme == "wss"): + Web3Url(kind: WsUrl, url: p) + else: + raise newException( + ConfigurationError, + "The Web3 URL must specify one of following protocols: http/https/ws/wss" + ) + +proc completeCmdArg*(T: type Web3Url, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type StorageMode, p: string): T + {.raises: [ConfigurationError].} = + if p == "db": + return DbStorage + elif p == "json": + return JsonStorage + else: + let msg = "Provided mode: " & p & " is not a valid. Should be `json` or `db`" + raise newException(ConfigurationError, msg) + +proc completeCmdArg*(T: type StorageMode, val: string): seq[string] = + return @[] + +func parseCmdArg*(T: type Eth2Digest, input: string): T + {.raises: [ValueError, Defect].} = + Eth2Digest.fromHex(input) + +func completeCmdArg*(T: type Eth2Digest, input: string): seq[string] = + return @[]