Rename beacon bridge to portal_bridge and prep for 1 bridge binary (#1936)

Renaming the beacon_chain_bridge to portal_bridge and preparing
for stuffing all bridge functionality for all networks under this
This commit is contained in:
Kim De Mey 2023-12-12 21:08:58 +01:00 committed by GitHub
parent b623909c44
commit c99c0687f8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 303 additions and 256 deletions

View File

@ -66,7 +66,7 @@ TOOLS_CSV := $(subst $(SPACE),$(COMMA),$(TOOLS))
# Fluffy debugging tools + testing tools
FLUFFY_TOOLS := \
beacon_chain_bridge \
portal_bridge \
beacon_lc_bridge \
eth_data_exporter \
content_verifier \
@ -75,7 +75,7 @@ FLUFFY_TOOLS := \
fcli_db
FLUFFY_TOOLS_DIRS := \
fluffy/tools/beacon_lc_bridge \
fluffy/tools/beacon_chain_bridge \
fluffy/tools/portal_bridge \
fluffy/tools
# comma-separated values for the "clean" target
FLUFFY_TOOLS_CSV := $(subst $(SPACE),$(COMMA),$(FLUFFY_TOOLS))

View File

@ -8,16 +8,16 @@ Run a Fluffy node with the JSON-RPC API enabled.
./build/fluffy --rpc
```
Build & run the `beacon_chain_bridge`:
Build & run the `portal_bridge` for the beacon network:
```bash
make beacon_chain_bridge
make portal_bridge
TRUSTED_BLOCK_ROOT=0x1234567890123456789012345678901234567890123456789012345678901234 # Replace with trusted block root.
# --rest-url = access to beacon node API, default http://127.0.0.1:5052
./build/beacon_chain_bridge --trusted-block-root:${TRUSTED_BLOCK_ROOT} --rest-url:http://127.0.0.1:5052
./build/portal_bridge beacon --trusted-block-root:${TRUSTED_BLOCK_ROOT} --rest-url:http://127.0.0.1:5052
```
The `beacon_chain_bridge` will connect to Fluffy node over the JSON-RPC
The `portal_bridge` will connect to Fluffy node over the JSON-RPC
interface and start gossiping an `LightClientBootstrap` for
given trusted block root and gossip backfill `LightClientUpdate`s.

View File

@ -5,7 +5,7 @@ network bridge in order to test if all nodes can do the beacon light client sync
and stay up to date with the latest head of the chain.
To accomodate this, the `launch_local_testnet.sh` script has the option to
launch the Fluffy `beacon_chain_bridge` automatically and connect it to `node0`
launch the Fluffy `portal_bridge` automatically and connect it to `node0`
of the local tesnet.
## Run the local testnet script with bridge
@ -15,19 +15,19 @@ The `launch_local_testnet.sh` script must be launched with the
The individual nodes will be started with this `trusted-block-root` and each
node will try to start sync from this block root.
Run the following command to launch the network with the `beacon_chain_bridge`
activated.
Run the following command to launch the network with the `portal_bridge`
activated for the beacon network.
```bash
TRUSTED_BLOCK_ROOT=0x1234567890123456789012345678901234567890123456789012345678901234 # Replace with trusted block root.
# Run the script, start 8 nodes + beacon_chain_bridge
./fluffy/scripts/launch_local_testnet.sh -n8 --trusted-block-root ${TRUSTED_BLOCK_ROOT} --beacon-chain-bridge
# Run the script, start 8 nodes + portal_bridge
./fluffy/scripts/launch_local_testnet.sh -n8 --trusted-block-root ${TRUSTED_BLOCK_ROOT} --portal-bridge
```
## Run the local testnet script and launch the bridge manually
To have control over when to start or restart the `beacon_chain_bridge` on can
To have control over when to start or restart the `portal_bridge` on can
also control the bridge manually, e.g. start the testnet:
```bash
@ -37,12 +37,12 @@ TRUSTED_BLOCK_ROOT=0x12345678901234567890123456789012345678901234567890123456789
./fluffy/scripts/launch_local_testnet.sh -n8 --trusted-block-root ${TRUSTED_BLOCK_ROOT}
```
Next, build and run the `beacon_chain_bridge`
Next, build and run the `portal_bridge` for the beacon network:
```bash
make beacon_chain_bridge
make portal_bridge
# --rpc-port 10000 = default node0
# --rest-url = access to beacon node API, default http://127.0.0.1:5052
./build/beacon_chain_bridge --trusted-block-root:${TRUSTED_BLOCK_ROOT} --rest-url:http://127.0.0.1:5052 --backfill-amount:128 --rpc-port:10000
./build/portal_bridge beacon --trusted-block-root:${TRUSTED_BLOCK_ROOT} --rest-url:http://127.0.0.1:5052 --backfill-amount:128 --rpc-port:10000
```

View File

@ -34,7 +34,7 @@ if [ ${PIPESTATUS[0]} != 4 ]; then
fi
OPTS="h:n:d"
LONGOPTS="help,nodes:,data-dir:,run-tests,log-level:,base-port:,base-rpc-port:,trusted-block-root:,beacon-chain-bridge,base-metrics-port:,reuse-existing-data-dir,timeout:,kill-old-processes"
LONGOPTS="help,nodes:,data-dir:,run-tests,log-level:,base-port:,base-rpc-port:,trusted-block-root:,portal-bridge,base-metrics-port:,reuse-existing-data-dir,timeout:,kill-old-processes"
# default values
NUM_NODES="64"
@ -48,9 +48,10 @@ REUSE_EXISTING_DATA_DIR="0"
TIMEOUT_DURATION="0"
KILL_OLD_PROCESSES="0"
SCRIPTS_DIR="fluffy/scripts/"
BEACON_CHAIN_BRIDGE="0"
PORTAL_BRIDGE="0"
TRUSTED_BLOCK_ROOT=""
REST_URL="http://127.0.0.1:5052"
# REST_URL="http://127.0.0.1:5052"
REST_URL="http://testing.mainnet.beacon-api.nimbus.team"
print_help() {
cat <<EOF
@ -64,7 +65,7 @@ E.g.: $(basename "$0") --nodes ${NUM_NODES} --data-dir "${DATA_DIR}" # defaults
--base-port bootstrap node's discv5 port (default: ${BASE_PORT})
--base-rpc-port bootstrap node's RPC port (default: ${BASE_RPC_PORT})
--base-metrics-port bootstrap node's metrics server port (default: ${BASE_METRICS_PORT})
--beacon-chain-bridge run a beacon chain bridge attached to the bootstrap node
--portal-bridge run a portal bridge attached to the bootstrap node
--trusted-block-root recent trusted finalized block root to initialize the consensus light client from
--run-tests when enabled run tests else use "htop" to see the fluffy processes without doing any tests
--log-level set the log level (default: ${LOG_LEVEL})
@ -116,8 +117,8 @@ while true; do
TRUSTED_BLOCK_ROOT="$2"
shift 2
;;
--beacon-chain-bridge)
BEACON_CHAIN_BRIDGE="1"
--portal-bridge)
PORTAL_BRIDGE="1"
shift
;;
--base-metrics-port)
@ -196,8 +197,8 @@ fi
# Build the binaries
BINARIES="fluffy"
if [[ "${BEACON_CHAIN_BRIDGE}" == "1" ]]; then
BINARIES="${BINARIES} beacon_chain_bridge"
if [[ "${PORTAL_BRIDGE}" == "1" ]]; then
BINARIES="${BINARIES} portal_bridge"
fi
$MAKE -j ${NPROC} LOG_LEVEL=TRACE ${BINARIES}
@ -242,7 +243,7 @@ if [[ "${TIMEOUT_DURATION}" != "0" ]]; then
fi
PIDS=""
NUM_JOBS=$(( NUM_NODES + BEACON_CHAIN_BRIDGE ))
NUM_JOBS=$(( NUM_NODES + PORTAL_BRIDGE ))
dump_logs() {
LOG_LINES=20
@ -326,17 +327,17 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do
fi
done
if [[ "$BEACON_CHAIN_BRIDGE" == "1" ]]; then
if [[ "$PORTAL_BRIDGE" == "1" ]]; then
# Give the nodes time to connect before the bridge (node 0) starts gossip
sleep 5
echo "Starting beacon chain bridge."
./build/beacon_chain_bridge \
sleep 10
echo "Starting portal bridge for beacon network."
./build/portal_bridge beacon \
--rest-url="${REST_URL}" \
--rpc-address="127.0.0.1" \
--rpc-port="${BASE_RPC_PORT}" \
--backfill-amount=128 \
${TRUSTED_BLOCK_ROOT_ARG} \
> "${DATA_DIR}/log_beacon_chain_bridge.txt" 2>&1 &
> "${DATA_DIR}/log_portal_bridge.txt" 2>&1 &
PIDS="${PIDS},$!"
fi

View File

@ -0,0 +1,237 @@
# Fluffy
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
#
# The Portal bridge;s task is to inject content into the different Portal networks.
# The bridge acts as a middle man between a content provider (i.e. full node)
# through its exposed API (REST, JSON-RCP, ...), and a Portal node, through the
# Portal JSON-RPC API.
#
# Beacon Network:
#
# For the beacon network a consensus full node is require on one side,
# making use of the Beacon Node REST-API, and a Portal node on the other side,
# making use of the Portal JSON-RPC API.
#
# Portal Network <-> Portal Client (e.g. fluffy) <--JSON-RPC--> bridge <--REST--> consensus client (e.g. Nimbus-eth2)
#
# The Consensus client must support serving the Beacon LC data.
#
# Bootstraps and updates can be backfilled, however how to do this for multiple
# bootstraps is still unsolved.
#
# Updates, optimistic updates and finality updates are injected as they become
# available.
#
# History network:
#
# To be implemented
#
# State network:
#
# To be implemented
#
{.push raises: [].}
import
std/os,
chronos,
confutils, confutils/std/net,
chronicles, chronicles/topics_registry,
json_rpc/clients/httpclient,
beacon_chain/spec/eth2_apis/rest_beacon_client,
../../network/beacon/beacon_content,
../../rpc/portal_rpc_client,
../../logging,
../eth_data_exporter/cl_data_exporter,
./portal_bridge_conf,
./portal_bridge_beacon
proc runBeacon(config: PortalBridgeConf) {.raises: [CatchableError].} =
notice "Launching Fluffy beacon chain bridge",
cmdParams = commandLineParams()
let
(cfg, forkDigests, beaconClock) = getBeaconData()
getBeaconTime = beaconClock.getBeaconTimeFn()
portalRpcClient = newRpcHttpClient()
restClient = RestClientRef.new(config.restUrl).valueOr:
fatal "Cannot connect to server", error = $error
quit 1
proc backfill(
beaconRestClient: RestClientRef, rpcAddress: string, rpcPort: Port,
backfillAmount: uint64, trustedBlockRoot: Option[TrustedDigest])
{.async.} =
# Bootstrap backfill, currently just one bootstrap selected by
# trusted-block-root, could become a selected list, or some other way.
if trustedBlockRoot.isSome():
await portalRpcClient.connect(rpcAddress, rpcPort, false)
let res = await gossipLCBootstrapUpdate(
beaconRestClient, portalRpcClient,
trustedBlockRoot.get(),
cfg, forkDigests)
if res.isErr():
warn "Error gossiping LC bootstrap", error = res.error
await portalRpcClient.close()
# Updates backfill, selected by backfillAmount
# Might want to alter this to default backfill to the
# `MIN_EPOCHS_FOR_BLOCK_REQUESTS`.
# TODO: This can be up to 128, but our JSON-RPC requests fail with a value
# higher than 16. TBI
const updatesPerRequest = 16
let
wallSlot = getBeaconTime().slotOrZero()
currentPeriod =
wallSlot div (SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
requestAmount = backfillAmount div updatesPerRequest
leftOver = backFillAmount mod updatesPerRequest
for i in 0..<requestAmount:
await portalRpcClient.connect(rpcAddress, rpcPort, false)
let res = await gossipLCUpdates(
beaconRestClient, portalRpcClient,
currentPeriod - updatesPerRequest * (i + 1) + 1, updatesPerRequest,
cfg, forkDigests)
if res.isErr():
warn "Error gossiping LC updates", error = res.error
await portalRpcClient.close()
if leftOver > 0:
await portalRpcClient.connect(rpcAddress, rpcPort, false)
let res = await gossipLCUpdates(
beaconRestClient, portalRpcClient,
currentPeriod - updatesPerRequest * requestAmount - leftOver + 1, leftOver,
cfg, forkDigests)
if res.isErr():
warn "Error gossiping LC updates", error = res.error
await portalRpcClient.close()
var
lastOptimisticUpdateSlot = Slot(0)
lastFinalityUpdateEpoch = epoch(lastOptimisticUpdateSlot)
lastUpdatePeriod = sync_committee_period(lastOptimisticUpdateSlot)
proc onSlotGossip(wallTime: BeaconTime, lastSlot: Slot) {.async.} =
let
wallSlot = wallTime.slotOrZero()
wallEpoch = epoch(wallSlot)
wallPeriod = sync_committee_period(wallSlot)
notice "Slot start info",
slot = wallSlot,
epoch = wallEpoch,
period = wallPeriod,
lastOptimisticUpdateSlot,
lastFinalityUpdateEpoch,
lastUpdatePeriod,
slotsTillNextEpoch =
SLOTS_PER_EPOCH - (wallSlot mod SLOTS_PER_EPOCH),
slotsTillNextPeriod =
SLOTS_PER_SYNC_COMMITTEE_PERIOD -
(wallSlot mod SLOTS_PER_SYNC_COMMITTEE_PERIOD)
if wallSlot > lastOptimisticUpdateSlot + 1:
# TODO: If this turns out to be too tricky to not gossip old updates,
# then an alternative could be to verify in the gossip calls if the actual
# slot number received is the correct one, before gossiping into Portal.
# And/or look into possibly using eth/v1/events for
# light_client_finality_update and light_client_optimistic_update if that
# is something that works.
# Or basically `lightClientOptimisticUpdateSlotOffset`
await sleepAsync((SECONDS_PER_SLOT div INTERVALS_PER_SLOT).int.seconds)
await portalRpcClient.connect(
config.rpcAddress, Port(config.rpcPort), false)
let res = await gossipLCOptimisticUpdate(
restClient, portalRpcClient,
cfg, forkDigests)
if res.isErr():
warn "Error gossiping LC optimistic update", error = res.error
else:
if wallEpoch > lastFinalityUpdateEpoch + 2 and
wallSlot > start_slot(wallEpoch):
let res = await gossipLCFinalityUpdate(
restClient, portalRpcClient,
cfg, forkDigests)
if res.isErr():
warn "Error gossiping LC finality update", error = res.error
else:
lastFinalityUpdateEpoch = epoch(res.get())
if wallPeriod > lastUpdatePeriod and
wallSlot > start_slot(wallEpoch):
# TODO: Need to delay timing here also with one slot?
let res = await gossipLCUpdates(
restClient, portalRpcClient,
sync_committee_period(wallSlot).uint64, 1,
cfg, forkDigests)
if res.isErr():
warn "Error gossiping LC update", error = res.error
else:
lastUpdatePeriod = wallPeriod
lastOptimisticUpdateSlot = res.get()
proc runOnSlotLoop() {.async.} =
var
curSlot = getBeaconTime().slotOrZero()
nextSlot = curSlot + 1
timeToNextSlot = nextSlot.start_beacon_time() - getBeaconTime()
while true:
await sleepAsync(timeToNextSlot)
let
wallTime = getBeaconTime()
wallSlot = wallTime.slotOrZero()
await onSlotGossip(wallTime, curSlot)
curSlot = wallSlot
nextSlot = wallSlot + 1
timeToNextSlot = nextSlot.start_beacon_time() - getBeaconTime()
waitFor backfill(
restClient, config.rpcAddress, config.rpcPort,
config.backfillAmount, config.trustedBlockRoot)
asyncSpawn runOnSlotLoop()
while true:
poll()
when isMainModule:
{.pop.}
let config = PortalBridgeConf.load()
{.push raises: [].}
setupLogging(config.logLevel, config.logStdout)
case config.cmd
of PortalBridgeCmd.beacon:
runBeacon(config)
of PortalBridgeCmd.history:
notice "Functionality not yet implemented"
of PortalBridgeCmd.state:
notice "Functionality not yet implemented"

View File

@ -1,47 +1,28 @@
# Nimbus
# Fluffy
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Portal bridge to inject beacon chain content into the network
# The bridge act as a middle man between a consensus full node, through the,
# Eth Beacon Node API REST-API), and a Portal node, through the Portal
# JSON-RPC API.
#
# Portal Network <-> Portal Client (e.g. fluffy) <--JSON-RPC--> bridge <--REST--> consensus client (e.g. Nimbus-eth2)
#
# The Consensus client must support serving the Beacon LC data.
#
# Bootstraps and updates can be backfilled, however how to do this for multiple
# bootstraps is still unsolved.
#
# Updates, optimistic updates and finality updates are injected as they become
# available.
#
{.push raises: [].}
import
std/os,
confutils, confutils/std/net, chronicles, chronicles/topics_registry,
json_rpc/clients/httpclient,
chronos,
chronicles, chronicles/topics_registry,
stew/byteutils,
eth/async_utils,
json_rpc/clients/httpclient,
beacon_chain/spec/eth2_apis/rest_beacon_client,
../../network/beacon/beacon_content,
../../rpc/portal_rpc_client,
../../logging,
../eth_data_exporter/cl_data_exporter,
./beacon_chain_bridge_conf
../eth_data_exporter/cl_data_exporter
const
restRequestsTimeout = 30.seconds
# TODO: From nimbus_binary_common, but we don't want to import that.
proc sleepAsync(t: TimeDiff): Future[void] =
proc sleepAsync*(t: TimeDiff): Future[void] =
sleepAsync(nanoseconds(
if t.nanoseconds < 0: 0'i64 else: t.nanoseconds))
@ -257,183 +238,3 @@ proc gossipLCOptimisticUpdate*(
else:
return err("No LC updates pre Altair")
proc run(config: BeaconBridgeConf) {.raises: [CatchableError].} =
setupLogging(config.logLevel, config.logStdout)
notice "Launching Fluffy beacon chain bridge",
cmdParams = commandLineParams()
let
(cfg, forkDigests, beaconClock) = getBeaconData()
getBeaconTime = beaconClock.getBeaconTimeFn()
portalRpcClient = newRpcHttpClient()
restClient = RestClientRef.new(config.restUrl).valueOr:
fatal "Cannot connect to server", error = $error
quit 1
proc backfill(
beaconRestClient: RestClientRef, rpcAddress: string, rpcPort: Port,
backfillAmount: uint64, trustedBlockRoot: Option[TrustedDigest])
{.async.} =
# Bootstrap backfill, currently just one bootstrap selected by
# trusted-block-root, could become a selected list, or some other way.
if trustedBlockRoot.isSome():
await portalRpcClient.connect(rpcAddress, rpcPort, false)
let res = await gossipLCBootstrapUpdate(
beaconRestClient, portalRpcClient,
trustedBlockRoot.get(),
cfg, forkDigests)
if res.isErr():
warn "Error gossiping LC bootstrap", error = res.error
await portalRpcClient.close()
# Updates backfill, selected by backfillAmount
# Might want to alter this to default backfill to the
# `MIN_EPOCHS_FOR_BLOCK_REQUESTS`.
# TODO: This can be up to 128, but our JSON-RPC requests fail with a value
# higher than 16. TBI
const updatesPerRequest = 16
let
wallSlot = getBeaconTime().slotOrZero()
currentPeriod =
wallSlot div (SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
requestAmount = backfillAmount div updatesPerRequest
leftOver = backFillAmount mod updatesPerRequest
for i in 0..<requestAmount:
await portalRpcClient.connect(rpcAddress, rpcPort, false)
let res = await gossipLCUpdates(
beaconRestClient, portalRpcClient,
currentPeriod - updatesPerRequest * (i + 1) + 1, updatesPerRequest,
cfg, forkDigests)
if res.isErr():
warn "Error gossiping LC updates", error = res.error
await portalRpcClient.close()
if leftOver > 0:
await portalRpcClient.connect(rpcAddress, rpcPort, false)
let res = await gossipLCUpdates(
beaconRestClient, portalRpcClient,
currentPeriod - updatesPerRequest * requestAmount - leftOver + 1, leftOver,
cfg, forkDigests)
if res.isErr():
warn "Error gossiping LC updates", error = res.error
await portalRpcClient.close()
var
lastOptimisticUpdateSlot = Slot(0)
lastFinalityUpdateEpoch = epoch(lastOptimisticUpdateSlot)
lastUpdatePeriod = sync_committee_period(lastOptimisticUpdateSlot)
proc onSlotGossip(wallTime: BeaconTime, lastSlot: Slot) {.async.} =
let
wallSlot = wallTime.slotOrZero()
wallEpoch = epoch(wallSlot)
wallPeriod = sync_committee_period(wallSlot)
notice "Slot start info",
slot = wallSlot,
epoch = wallEpoch,
period = wallPeriod,
lastOptimisticUpdateSlot,
lastFinalityUpdateEpoch,
lastUpdatePeriod,
slotsTillNextEpoch =
SLOTS_PER_EPOCH - (wallSlot mod SLOTS_PER_EPOCH),
slotsTillNextPeriod =
SLOTS_PER_SYNC_COMMITTEE_PERIOD -
(wallSlot mod SLOTS_PER_SYNC_COMMITTEE_PERIOD)
if wallSlot > lastOptimisticUpdateSlot + 1:
# TODO: If this turns out to be too tricky to not gossip old updates,
# then an alternative could be to verify in the gossip calls if the actual
# slot number received is the correct one, before gossiping into Portal.
# And/or look into possibly using eth/v1/events for
# light_client_finality_update and light_client_optimistic_update if that
# is something that works.
# Or basically `lightClientOptimisticUpdateSlotOffset`
await sleepAsync((SECONDS_PER_SLOT div INTERVALS_PER_SLOT).int.seconds)
await portalRpcClient.connect(
config.rpcAddress, Port(config.rpcPort), false)
let res = await gossipLCOptimisticUpdate(
restClient, portalRpcClient,
cfg, forkDigests)
if res.isErr():
warn "Error gossiping LC optimistic update", error = res.error
else:
if wallEpoch > lastFinalityUpdateEpoch + 2 and
wallSlot > start_slot(wallEpoch):
let res = await gossipLCFinalityUpdate(
restClient, portalRpcClient,
cfg, forkDigests)
if res.isErr():
warn "Error gossiping LC finality update", error = res.error
else:
lastFinalityUpdateEpoch = epoch(res.get())
if wallPeriod > lastUpdatePeriod and
wallSlot > start_slot(wallEpoch):
# TODO: Need to delay timing here also with one slot?
let res = await gossipLCUpdates(
restClient, portalRpcClient,
sync_committee_period(wallSlot).uint64, 1,
cfg, forkDigests)
if res.isErr():
warn "Error gossiping LC update", error = res.error
else:
lastUpdatePeriod = wallPeriod
lastOptimisticUpdateSlot = res.get()
proc runOnSlotLoop() {.async.} =
var
curSlot = getBeaconTime().slotOrZero()
nextSlot = curSlot + 1
timeToNextSlot = nextSlot.start_beacon_time() - getBeaconTime()
while true:
await sleepAsync(timeToNextSlot)
let
wallTime = getBeaconTime()
wallSlot = wallTime.slotOrZero()
await onSlotGossip(wallTime, curSlot)
curSlot = wallSlot
nextSlot = wallSlot + 1
timeToNextSlot = nextSlot.start_beacon_time() - getBeaconTime()
waitFor backfill(
restClient, config.rpcAddress, config.rpcPort,
config.backfillAmount, config.trustedBlockRoot)
asyncSpawn runOnSlotLoop()
while true:
poll()
when isMainModule:
{.pop.}
let config = BeaconBridgeConf.load()
{.push raises: [].}
case config.cmd
of BeaconBridgeCmd.noCommand:
run(config)

View File

@ -1,4 +1,4 @@
# Nimbus
# Fluffy
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -17,10 +17,12 @@ export net
type
TrustedDigest* = MDigest[32 * 8]
BeaconBridgeCmd* = enum
noCommand
PortalBridgeCmd* = enum
beacon = "Run a Portal bridge for the beacon network"
history = "Run a Portal bridge for the history network"
state = "Run a Portal bridge for the state network"
BeaconBridgeConf* = object
PortalBridgeConf* = object
# Logging
logLevel* {.
desc: "Sets the log level"
@ -45,27 +47,33 @@ type
defaultValue: 8545
name: "rpc-port" .}: Port
# Beacon node REST API URL
restUrl* {.
desc: "URL of the beacon node REST service"
defaultValue: "http://127.0.0.1:5052"
name: "rest-url" .}: string
# Backfill options
backFillAmount* {.
desc: "Amount of beacon LC updates to backfill gossip into the network"
defaultValue: 64
name: "backfill-amount" .}: uint64
trustedBlockRoot* {.
desc: "Trusted finalized block root for which to gossip a LC bootstrap into the network"
defaultValue: none(TrustedDigest)
name: "trusted-block-root" .}: Option[TrustedDigest]
case cmd* {.
command
defaultValue: noCommand .}: BeaconBridgeCmd
of noCommand:
desc: ""
.}: PortalBridgeCmd
of PortalBridgeCmd.beacon:
# Beacon node REST API URL
restUrl* {.
desc: "URL of the beacon node REST service"
defaultValue: "http://127.0.0.1:5052"
name: "rest-url" .}: string
# Backfill options
backFillAmount* {.
desc: "Amount of beacon LC updates to backfill gossip into the network"
defaultValue: 64
name: "backfill-amount" .}: uint64
trustedBlockRoot* {.
desc: "Trusted finalized block root for which to gossip a LC bootstrap into the network"
defaultValue: none(TrustedDigest)
name: "trusted-block-root" .}: Option[TrustedDigest]
of PortalBridgeCmd.history:
discard
of PortalBridgeCmd.state:
discard
func parseCmdArg*(T: type TrustedDigest, input: string): T