Initial implementation of Portal fluffy beacon chain bridge (#1751)
* Initial implementation of Portal fluffy beacon chain bridge * Run windows ci for fluffy with -j1 to avoid OOM
This commit is contained in:
parent
f4cf952a4c
commit
cb4626b488
|
@ -259,7 +259,7 @@ jobs:
|
||||||
if: runner.os == 'Windows'
|
if: runner.os == 'Windows'
|
||||||
run: |
|
run: |
|
||||||
gcc --version
|
gcc --version
|
||||||
DEFAULT_MAKE_FLAGS="-j${ncpu}"
|
DEFAULT_MAKE_FLAGS="-j1"
|
||||||
mingw32-make ${DEFAULT_MAKE_FLAGS} fluffy
|
mingw32-make ${DEFAULT_MAKE_FLAGS} fluffy
|
||||||
build/fluffy.exe --help
|
build/fluffy.exe --help
|
||||||
find . -type d -name ".git" -exec rm -rf {} +
|
find . -type d -name ".git" -exec rm -rf {} +
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -66,6 +66,7 @@ TOOLS_CSV := $(subst $(SPACE),$(COMMA),$(TOOLS))
|
||||||
|
|
||||||
# Fluffy debugging tools + testing tools
|
# Fluffy debugging tools + testing tools
|
||||||
FLUFFY_TOOLS := \
|
FLUFFY_TOOLS := \
|
||||||
|
beacon_chain_bridge \
|
||||||
beacon_lc_bridge \
|
beacon_lc_bridge \
|
||||||
eth_data_exporter \
|
eth_data_exporter \
|
||||||
content_verifier \
|
content_verifier \
|
||||||
|
@ -73,6 +74,7 @@ FLUFFY_TOOLS := \
|
||||||
portalcli
|
portalcli
|
||||||
FLUFFY_TOOLS_DIRS := \
|
FLUFFY_TOOLS_DIRS := \
|
||||||
fluffy/tools/beacon_lc_bridge \
|
fluffy/tools/beacon_lc_bridge \
|
||||||
|
fluffy/tools/beacon_chain_bridge \
|
||||||
fluffy/tools
|
fluffy/tools
|
||||||
# comma-separated values for the "clean" target
|
# comma-separated values for the "clean" target
|
||||||
FLUFFY_TOOLS_CSV := $(subst $(SPACE),$(COMMA),$(FLUFFY_TOOLS))
|
FLUFFY_TOOLS_CSV := $(subst $(SPACE),$(COMMA),$(FLUFFY_TOOLS))
|
||||||
|
|
|
@ -0,0 +1,445 @@
|
||||||
|
# Nimbus
|
||||||
|
# Copyright (c) 2023 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
# Portal bridge to inject beacon chain content into the network
|
||||||
|
# The bridge act as a middle man between a consensus full node, through the,
|
||||||
|
# Eth Beacon Node API REST-API), and a Portal node, through the Portal
|
||||||
|
# JSON-RPC API.
|
||||||
|
#
|
||||||
|
# Portal Network <-> Portal Client (e.g. fluffy) <--JSON-RPC--> bridge <--REST--> consensus client (e.g. Nimbus-eth2)
|
||||||
|
#
|
||||||
|
# The Consensus client must support serving the Beacon LC data.
|
||||||
|
#
|
||||||
|
# Bootstraps and updates can be backfilled, however how to do this for multiple
|
||||||
|
# bootstraps is still unsolved.
|
||||||
|
#
|
||||||
|
# Updates, optimistic updates and finality updates are injected as they become
|
||||||
|
# available.
|
||||||
|
#
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import
|
||||||
|
std/os,
|
||||||
|
confutils, confutils/std/net, chronicles, chronicles/topics_registry,
|
||||||
|
json_rpc/clients/httpclient,
|
||||||
|
chronos,
|
||||||
|
stew/byteutils,
|
||||||
|
eth/async_utils,
|
||||||
|
beacon_chain/spec/eth2_apis/rest_beacon_client,
|
||||||
|
../../network/beacon_light_client/beacon_light_client_content,
|
||||||
|
../../rpc/portal_rpc_client,
|
||||||
|
../../logging,
|
||||||
|
../eth_data_exporter/cl_data_exporter,
|
||||||
|
./beacon_chain_bridge_conf
|
||||||
|
|
||||||
|
const
|
||||||
|
restRequestsTimeout = 30.seconds
|
||||||
|
|
||||||
|
# TODO: Move somewhere common
|
||||||
|
func forkDigestAtEpoch(
|
||||||
|
forkDigests: ForkDigests, epoch: Epoch, cfg: RuntimeConfig): ForkDigest =
|
||||||
|
forkDigests.atEpoch(epoch, cfg)
|
||||||
|
|
||||||
|
# TODO: From nimbus_binary_common, but we don't want to import that.
|
||||||
|
proc sleepAsync(t: TimeDiff): Future[void] =
|
||||||
|
sleepAsync(nanoseconds(
|
||||||
|
if t.nanoseconds < 0: 0'i64 else: t.nanoseconds))
|
||||||
|
|
||||||
|
proc gossipLCBootstrapUpdate*(
|
||||||
|
restClient: RestClientRef, portalRpcClient: RpcHttpClient,
|
||||||
|
trustedBlockRoot: Eth2Digest,
|
||||||
|
cfg: RuntimeConfig, forkDigests: ref ForkDigests):
|
||||||
|
Future[Result[void, string]] {.async.} =
|
||||||
|
var bootstrap =
|
||||||
|
try:
|
||||||
|
info "Downloading LC bootstrap"
|
||||||
|
awaitWithTimeout(
|
||||||
|
restClient.getLightClientBootstrap(
|
||||||
|
trustedBlockRoot,
|
||||||
|
cfg, forkDigests),
|
||||||
|
restRequestsTimeout
|
||||||
|
):
|
||||||
|
return err("Attempt to download LC bootstrap timed out")
|
||||||
|
except CatchableError as exc:
|
||||||
|
return err("Unable to download LC bootstrap: " & exc.msg)
|
||||||
|
|
||||||
|
withForkyObject(bootstrap):
|
||||||
|
when lcDataFork > LightClientDataFork.None:
|
||||||
|
let
|
||||||
|
slot = forkyObject.header.beacon.slot
|
||||||
|
contentKey = encode(bootstrapContentKey(trustedBlockRoot))
|
||||||
|
forkDigest = forkDigestAtEpoch(
|
||||||
|
forkDigests[], epoch(slot), cfg)
|
||||||
|
content = encodeBootstrapForked(
|
||||||
|
forkDigest,
|
||||||
|
bootstrap
|
||||||
|
)
|
||||||
|
|
||||||
|
proc GossipRpcAndClose(): Future[Result[void, string]] {.async.} =
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
contentKeyHex = contentKey.asSeq().toHex()
|
||||||
|
peers = await portalRpcClient.portal_beaconLightClientGossip(
|
||||||
|
contentKeyHex,
|
||||||
|
content.toHex())
|
||||||
|
info "Beacon LC bootstrap gossiped", peers,
|
||||||
|
contentKey = contentKeyHex
|
||||||
|
return ok()
|
||||||
|
except CatchableError as e:
|
||||||
|
return err("JSON-RPC error: " & $e.msg)
|
||||||
|
|
||||||
|
let res = await GossipRpcAndClose()
|
||||||
|
if res.isOk():
|
||||||
|
return ok()
|
||||||
|
else:
|
||||||
|
return err(res.error)
|
||||||
|
|
||||||
|
else:
|
||||||
|
return err("No LC bootstraps pre Altair")
|
||||||
|
|
||||||
|
proc gossipLCUpdates*(
|
||||||
|
restClient: RestClientRef, portalRpcClient: RpcHttpClient,
|
||||||
|
startPeriod: uint64, count: uint64,
|
||||||
|
cfg: RuntimeConfig, forkDigests: ref ForkDigests):
|
||||||
|
Future[Result[void, string]] {.async.} =
|
||||||
|
var updates =
|
||||||
|
try:
|
||||||
|
info "Downloading LC updates", count
|
||||||
|
awaitWithTimeout(
|
||||||
|
restClient.getLightClientUpdatesByRange(
|
||||||
|
SyncCommitteePeriod(startPeriod), count, cfg, forkDigests),
|
||||||
|
restRequestsTimeout
|
||||||
|
):
|
||||||
|
return err("Attempt to download LC updates timed out")
|
||||||
|
except CatchableError as exc:
|
||||||
|
return err("Unable to download LC updates: " & exc.msg)
|
||||||
|
|
||||||
|
if updates.len() > 0:
|
||||||
|
withForkyObject(updates[0]):
|
||||||
|
when lcDataFork > LightClientDataFork.None:
|
||||||
|
let
|
||||||
|
slot = forkyObject.attested_header.beacon.slot
|
||||||
|
period = forkyObject.attested_header.beacon.slot.sync_committee_period
|
||||||
|
contentKey = encode(updateContentKey(period.uint64, count))
|
||||||
|
forkDigest = forkDigestAtEpoch(
|
||||||
|
forkDigests[], epoch(forkyObject.attested_header.beacon.slot), cfg)
|
||||||
|
|
||||||
|
content = encodeLightClientUpdatesForked(
|
||||||
|
forkDigest,
|
||||||
|
updates
|
||||||
|
)
|
||||||
|
|
||||||
|
proc GossipRpcAndClose(): Future[Result[void, string]] {.async.} =
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
contentKeyHex = contentKey.asSeq().toHex()
|
||||||
|
peers = await portalRpcClient.portal_beaconLightClientGossip(
|
||||||
|
contentKeyHex,
|
||||||
|
content.toHex())
|
||||||
|
info "Beacon LC update gossiped", peers,
|
||||||
|
contentKey = contentKeyHex, period, count
|
||||||
|
return ok()
|
||||||
|
except CatchableError as e:
|
||||||
|
return err("JSON-RPC error: " & $e.msg)
|
||||||
|
|
||||||
|
let res = await GossipRpcAndClose()
|
||||||
|
if res.isOk():
|
||||||
|
return ok()
|
||||||
|
else:
|
||||||
|
return err(res.error)
|
||||||
|
else:
|
||||||
|
return err("No LC updates pre Altair")
|
||||||
|
else:
|
||||||
|
# TODO:
|
||||||
|
# currently only error if no updates at all found. This might be due
|
||||||
|
# to selecting future period or too old period.
|
||||||
|
# Might want to error here in case count != updates.len or might not want to
|
||||||
|
# error at all and perhaps return the updates.len.
|
||||||
|
return err("No updates downloaded")
|
||||||
|
|
||||||
|
proc gossipLCFinalityUpdate*(
|
||||||
|
restClient: RestClientRef, portalRpcClient: RpcHttpClient,
|
||||||
|
cfg: RuntimeConfig, forkDigests: ref ForkDigests):
|
||||||
|
Future[Result[Slot, string]] {.async.} =
|
||||||
|
var update =
|
||||||
|
try:
|
||||||
|
info "Downloading LC finality update"
|
||||||
|
awaitWithTimeout(
|
||||||
|
restClient.getLightClientFinalityUpdate(
|
||||||
|
cfg, forkDigests),
|
||||||
|
restRequestsTimeout
|
||||||
|
):
|
||||||
|
return err("Attempt to download LC finality update timed out")
|
||||||
|
except CatchableError as exc:
|
||||||
|
return err("Unable to download LC finality update: " & exc.msg)
|
||||||
|
|
||||||
|
withForkyObject(update):
|
||||||
|
when lcDataFork > LightClientDataFork.None:
|
||||||
|
let
|
||||||
|
finalizedSlot = forkyObject.finalized_header.beacon.slot
|
||||||
|
optimisticSlot = forkyObject.attested_header.beacon.slot
|
||||||
|
contentKey = encode(finalityUpdateContentKey(
|
||||||
|
finalizedSlot.uint64, optimisticSlot.uint64))
|
||||||
|
forkDigest = forkDigestAtEpoch(
|
||||||
|
forkDigests[], epoch(forkyObject.attested_header.beacon.slot), cfg)
|
||||||
|
content = encodeFinalityUpdateForked(
|
||||||
|
forkDigest,
|
||||||
|
update
|
||||||
|
)
|
||||||
|
|
||||||
|
proc GossipRpcAndClose(): Future[Result[void, string]] {.async.} =
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
contentKeyHex = contentKey.asSeq().toHex()
|
||||||
|
peers = await portalRpcClient.portal_beaconLightClientGossip(
|
||||||
|
contentKeyHex,
|
||||||
|
content.toHex())
|
||||||
|
info "Beacon LC finality update gossiped", peers,
|
||||||
|
contentKey = contentKeyHex, finalizedSlot, optimisticSlot
|
||||||
|
return ok()
|
||||||
|
except CatchableError as e:
|
||||||
|
return err("JSON-RPC error: " & $e.msg)
|
||||||
|
|
||||||
|
let res = await GossipRpcAndClose()
|
||||||
|
if res.isOk():
|
||||||
|
return ok(finalizedSlot)
|
||||||
|
else:
|
||||||
|
return err(res.error)
|
||||||
|
|
||||||
|
else:
|
||||||
|
return err("No LC updates pre Altair")
|
||||||
|
|
||||||
|
proc gossipLCOptimisticUpdate*(
|
||||||
|
restClient: RestClientRef, portalRpcClient: RpcHttpClient,
|
||||||
|
cfg: RuntimeConfig, forkDigests: ref ForkDigests):
|
||||||
|
Future[Result[Slot, string]] {.async.} =
|
||||||
|
var update =
|
||||||
|
try:
|
||||||
|
info "Downloading LC optimistic update"
|
||||||
|
awaitWithTimeout(
|
||||||
|
restClient.getLightClientOptimisticUpdate(
|
||||||
|
cfg, forkDigests),
|
||||||
|
restRequestsTimeout
|
||||||
|
):
|
||||||
|
return err("Attempt to download LC optimistic update timed out")
|
||||||
|
except CatchableError as exc:
|
||||||
|
return err("Unable to download LC optimistic update: " & exc.msg)
|
||||||
|
|
||||||
|
withForkyObject(update):
|
||||||
|
when lcDataFork > LightClientDataFork.None:
|
||||||
|
let
|
||||||
|
slot = forkyObject.attested_header.beacon.slot
|
||||||
|
contentKey = encode(optimisticUpdateContentKey(slot.uint64))
|
||||||
|
forkDigest = forkDigestAtEpoch(
|
||||||
|
forkDigests[], epoch(forkyObject.attested_header.beacon.slot), cfg)
|
||||||
|
content = encodeOptimisticUpdateForked(
|
||||||
|
forkDigest,
|
||||||
|
update
|
||||||
|
)
|
||||||
|
|
||||||
|
proc GossipRpcAndClose(): Future[Result[void, string]] {.async.} =
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
contentKeyHex = contentKey.asSeq().toHex()
|
||||||
|
peers = await portalRpcClient.portal_beaconLightClientGossip(
|
||||||
|
contentKeyHex,
|
||||||
|
content.toHex())
|
||||||
|
info "Beacon LC optimistic update gossiped", peers,
|
||||||
|
contentKey = contentKeyHex, slot
|
||||||
|
|
||||||
|
return ok()
|
||||||
|
except CatchableError as e:
|
||||||
|
return err("JSON-RPC error: " & $e.msg)
|
||||||
|
|
||||||
|
let res = await GossipRpcAndClose()
|
||||||
|
if res.isOk():
|
||||||
|
return ok(slot)
|
||||||
|
else:
|
||||||
|
return err(res.error)
|
||||||
|
|
||||||
|
else:
|
||||||
|
return err("No LC updates pre Altair")
|
||||||
|
|
||||||
|
proc run(config: BeaconBridgeConf) {.raises: [CatchableError].} =
|
||||||
|
setupLogging(config.logLevel, config.logStdout)
|
||||||
|
|
||||||
|
notice "Launching Fluffy beacon chain bridge",
|
||||||
|
cmdParams = commandLineParams()
|
||||||
|
|
||||||
|
let
|
||||||
|
(cfg, forkDigests, beaconClock) = getBeaconData()
|
||||||
|
getBeaconTime = beaconClock.getBeaconTimeFn()
|
||||||
|
portalRpcClient = newRpcHttpClient()
|
||||||
|
restClient = RestClientRef.new(config.restUrl).valueOr:
|
||||||
|
fatal "Cannot connect to server", error = $error
|
||||||
|
quit 1
|
||||||
|
|
||||||
|
proc backfill(
|
||||||
|
beaconRestClient: RestClientRef, rpcAddress: string, rpcPort: Port,
|
||||||
|
backfillAmount: uint64, trustedBlockRoot: Option[TrustedDigest])
|
||||||
|
{.async.} =
|
||||||
|
# Bootstrap backfill, currently just one bootstrap selected by
|
||||||
|
# trusted-block-root, could become a selected list, or some other way.
|
||||||
|
if trustedBlockRoot.isSome():
|
||||||
|
await portalRpcClient.connect(rpcAddress, rpcPort, false)
|
||||||
|
|
||||||
|
let res = await gossipLCBootstrapUpdate(
|
||||||
|
beaconRestClient, portalRpcClient,
|
||||||
|
trustedBlockRoot.get(),
|
||||||
|
cfg, forkDigests)
|
||||||
|
|
||||||
|
if res.isErr():
|
||||||
|
warn "Error gossiping LC bootstrap", error = res.error
|
||||||
|
|
||||||
|
await portalRpcClient.close()
|
||||||
|
|
||||||
|
# Updates backfill, selected by backfillAmount
|
||||||
|
# Might want to alter this to default backfill to the
|
||||||
|
# `MIN_EPOCHS_FOR_BLOCK_REQUESTS`.
|
||||||
|
# TODO: This can be up to 128, but our JSON-RPC requests fail with a value
|
||||||
|
# higher than 16. TBI
|
||||||
|
const updatesPerRequest = 16
|
||||||
|
|
||||||
|
let
|
||||||
|
wallSlot = getBeaconTime().slotOrZero()
|
||||||
|
currentPeriod =
|
||||||
|
wallSlot div (SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||||
|
requestAmount = backfillAmount div updatesPerRequest
|
||||||
|
leftOver = backFillAmount mod updatesPerRequest
|
||||||
|
|
||||||
|
for i in 0..<requestAmount:
|
||||||
|
await portalRpcClient.connect(rpcAddress, rpcPort, false)
|
||||||
|
|
||||||
|
let res = await gossipLCUpdates(
|
||||||
|
beaconRestClient, portalRpcClient,
|
||||||
|
currentPeriod - updatesPerRequest * (i + 1) + 1, updatesPerRequest,
|
||||||
|
cfg, forkDigests)
|
||||||
|
|
||||||
|
if res.isErr():
|
||||||
|
warn "Error gossiping LC updates", error = res.error
|
||||||
|
|
||||||
|
await portalRpcClient.close()
|
||||||
|
|
||||||
|
if leftOver > 0:
|
||||||
|
await portalRpcClient.connect(rpcAddress, rpcPort, false)
|
||||||
|
|
||||||
|
let res = await gossipLCUpdates(
|
||||||
|
beaconRestClient, portalRpcClient,
|
||||||
|
currentPeriod - updatesPerRequest * requestAmount - leftOver + 1, leftOver,
|
||||||
|
cfg, forkDigests)
|
||||||
|
|
||||||
|
if res.isErr():
|
||||||
|
warn "Error gossiping LC updates", error = res.error
|
||||||
|
|
||||||
|
await portalRpcClient.close()
|
||||||
|
|
||||||
|
var
|
||||||
|
lastOptimisticUpdateSlot = Slot(0)
|
||||||
|
lastFinalityUpdateEpoch = epoch(lastOptimisticUpdateSlot)
|
||||||
|
lastUpdatePeriod = sync_committee_period(lastOptimisticUpdateSlot)
|
||||||
|
|
||||||
|
proc onSlotGossip(wallTime: BeaconTime, lastSlot: Slot) {.async.} =
|
||||||
|
let
|
||||||
|
wallSlot = wallTime.slotOrZero()
|
||||||
|
wallEpoch = epoch(wallSlot)
|
||||||
|
wallPeriod = sync_committee_period(wallSlot)
|
||||||
|
|
||||||
|
notice "Slot start info",
|
||||||
|
slot = wallSlot,
|
||||||
|
epoch = wallEpoch,
|
||||||
|
period = wallPeriod,
|
||||||
|
lastOptimisticUpdateSlot,
|
||||||
|
lastFinalityUpdateEpoch,
|
||||||
|
lastUpdatePeriod,
|
||||||
|
slotsTillNextEpoch =
|
||||||
|
SLOTS_PER_EPOCH - (wallSlot mod SLOTS_PER_EPOCH),
|
||||||
|
slotsTillNextPeriod =
|
||||||
|
SLOTS_PER_SYNC_COMMITTEE_PERIOD -
|
||||||
|
(wallSlot mod SLOTS_PER_SYNC_COMMITTEE_PERIOD)
|
||||||
|
|
||||||
|
if wallSlot > lastOptimisticUpdateSlot + 1:
|
||||||
|
# TODO: If this turns out to be too tricky to not gossip old updates,
|
||||||
|
# then an alternative could be to verify in the gossip calls if the actual
|
||||||
|
# slot number received is the correct one, before gossiping into Portal.
|
||||||
|
# And/or look into possibly using eth/v1/events for
|
||||||
|
# light_client_finality_update and light_client_optimistic_update if that
|
||||||
|
# is something that works.
|
||||||
|
|
||||||
|
# Or basically `lightClientOptimisticUpdateSlotOffset`
|
||||||
|
await sleepAsync((SECONDS_PER_SLOT div INTERVALS_PER_SLOT).int.seconds)
|
||||||
|
|
||||||
|
await portalRpcClient.connect(
|
||||||
|
config.rpcAddress, Port(config.rpcPort), false)
|
||||||
|
|
||||||
|
let res = await gossipLCOptimisticUpdate(
|
||||||
|
restClient, portalRpcClient,
|
||||||
|
cfg, forkDigests)
|
||||||
|
|
||||||
|
if res.isErr():
|
||||||
|
warn "Error gossiping LC optimistic update", error = res.error
|
||||||
|
else:
|
||||||
|
if wallEpoch > lastFinalityUpdateEpoch + 2 and
|
||||||
|
wallSlot > start_slot(wallEpoch):
|
||||||
|
let res = await gossipLCFinalityUpdate(
|
||||||
|
restClient, portalRpcClient,
|
||||||
|
cfg, forkDigests)
|
||||||
|
|
||||||
|
if res.isErr():
|
||||||
|
warn "Error gossiping LC finality update", error = res.error
|
||||||
|
else:
|
||||||
|
lastFinalityUpdateEpoch = epoch(res.get())
|
||||||
|
|
||||||
|
if wallPeriod > lastUpdatePeriod:
|
||||||
|
# TODO: Need to delay timing here also with one slot?
|
||||||
|
let res = await gossipLCUpdates(
|
||||||
|
restClient, portalRpcClient,
|
||||||
|
sync_committee_period(wallSlot).uint64, 1,
|
||||||
|
cfg, forkDigests)
|
||||||
|
|
||||||
|
if res.isErr():
|
||||||
|
warn "Error gossiping LC update", error = res.error
|
||||||
|
else:
|
||||||
|
lastUpdatePeriod = wallPeriod
|
||||||
|
|
||||||
|
lastOptimisticUpdateSlot = res.get()
|
||||||
|
|
||||||
|
proc runOnSlotLoop() {.async.} =
|
||||||
|
var
|
||||||
|
curSlot = getBeaconTime().slotOrZero()
|
||||||
|
nextSlot = curSlot + 1
|
||||||
|
timeToNextSlot = nextSlot.start_beacon_time() - getBeaconTime()
|
||||||
|
while true:
|
||||||
|
await sleepAsync(timeToNextSlot)
|
||||||
|
|
||||||
|
let
|
||||||
|
wallTime = getBeaconTime()
|
||||||
|
wallSlot = wallTime.slotOrZero()
|
||||||
|
|
||||||
|
await onSlotGossip(wallTime, curSlot)
|
||||||
|
|
||||||
|
curSlot = wallSlot
|
||||||
|
nextSlot = wallSlot + 1
|
||||||
|
timeToNextSlot = nextSlot.start_beacon_time() - getBeaconTime()
|
||||||
|
|
||||||
|
waitFor backfill(
|
||||||
|
restClient, config.rpcAddress, config.rpcPort,
|
||||||
|
config.backfillAmount, config.trustedBlockRoot)
|
||||||
|
|
||||||
|
asyncSpawn runOnSlotLoop()
|
||||||
|
|
||||||
|
while true:
|
||||||
|
poll()
|
||||||
|
|
||||||
|
when isMainModule:
|
||||||
|
{.pop.}
|
||||||
|
let config = BeaconBridgeConf.load()
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
case config.cmd
|
||||||
|
of BeaconBridgeCmd.noCommand:
|
||||||
|
run(config)
|
|
@ -0,0 +1,76 @@
|
||||||
|
# Nimbus
|
||||||
|
# Copyright (c) 2023 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import
|
||||||
|
confutils, confutils/std/net,
|
||||||
|
nimcrypto/hash,
|
||||||
|
../../logging
|
||||||
|
|
||||||
|
export net
|
||||||
|
|
||||||
|
type
|
||||||
|
TrustedDigest* = MDigest[32 * 8]
|
||||||
|
|
||||||
|
BeaconBridgeCmd* = enum
|
||||||
|
noCommand
|
||||||
|
|
||||||
|
BeaconBridgeConf* = object
|
||||||
|
# Logging
|
||||||
|
logLevel* {.
|
||||||
|
desc: "Sets the log level"
|
||||||
|
defaultValue: "INFO"
|
||||||
|
name: "log-level" .}: string
|
||||||
|
|
||||||
|
logStdout* {.
|
||||||
|
hidden
|
||||||
|
desc: "Specifies what kind of logs should be written to stdout (auto, colors, nocolors, json)"
|
||||||
|
defaultValueDesc: "auto"
|
||||||
|
defaultValue: StdoutLogKind.Auto
|
||||||
|
name: "log-format" .}: StdoutLogKind
|
||||||
|
|
||||||
|
# Portal JSON-RPC API server to connect to
|
||||||
|
rpcAddress* {.
|
||||||
|
desc: "Listening address of the Portal JSON-RPC server"
|
||||||
|
defaultValue: "127.0.0.1"
|
||||||
|
name: "rpc-address" .}: string
|
||||||
|
|
||||||
|
rpcPort* {.
|
||||||
|
desc: "Listening port of the Portal JSON-RPC server"
|
||||||
|
defaultValue: 8545
|
||||||
|
name: "rpc-port" .}: Port
|
||||||
|
|
||||||
|
# Beacon node REST API URL
|
||||||
|
restUrl* {.
|
||||||
|
desc: "URL of the beacon node REST service"
|
||||||
|
defaultValue: "http://127.0.0.1:5052"
|
||||||
|
name: "rest-url" .}: string
|
||||||
|
|
||||||
|
# Backfill options
|
||||||
|
backFillAmount* {.
|
||||||
|
desc: "Amount of beacon LC updates to backfill gossip into the network"
|
||||||
|
defaultValue: 64
|
||||||
|
name: "backfill-amount" .}: uint64
|
||||||
|
|
||||||
|
trustedBlockRoot* {.
|
||||||
|
desc: "Trusted finalized block root for which to gossip a LC bootstrap into the network"
|
||||||
|
defaultValue: none(TrustedDigest)
|
||||||
|
name: "trusted-block-root" .}: Option[TrustedDigest]
|
||||||
|
|
||||||
|
case cmd* {.
|
||||||
|
command
|
||||||
|
defaultValue: noCommand .}: BeaconBridgeCmd
|
||||||
|
of noCommand:
|
||||||
|
discard
|
||||||
|
|
||||||
|
func parseCmdArg*(T: type TrustedDigest, input: string): T
|
||||||
|
{.raises: [ValueError].} =
|
||||||
|
TrustedDigest.fromHex(input)
|
||||||
|
|
||||||
|
func completeCmdArg*(T: type TrustedDigest, input: string): seq[string] =
|
||||||
|
return @[]
|
|
@ -0,0 +1 @@
|
||||||
|
-d:"chronicles_sinks=textlines[dynamic],json[dynamic]"
|
|
@ -389,7 +389,7 @@ proc run(config: BeaconBridgeConf) {.raises: [CatchableError].} =
|
||||||
|
|
||||||
setupLogging(config.logLevel, config.logStdout, none(OutFile))
|
setupLogging(config.logLevel, config.logStdout, none(OutFile))
|
||||||
|
|
||||||
notice "Launching Nimbus beacon chain bridge",
|
notice "Launching fluffy beacon chain light bridge",
|
||||||
cmdParams = commandLineParams(), config
|
cmdParams = commandLineParams(), config
|
||||||
|
|
||||||
let metadata = loadEth2Network(config.eth2Network)
|
let metadata = loadEth2Network(config.eth2Network)
|
||||||
|
|
|
@ -15,18 +15,18 @@ import
|
||||||
|
|
||||||
export net, conf
|
export net, conf
|
||||||
|
|
||||||
proc defaultVerifiedProxyDataDir*(): string =
|
proc defaultDataDir*(): string =
|
||||||
let dataDir = when defined(windows):
|
let dataDir = when defined(windows):
|
||||||
"AppData" / "Roaming" / "FluffyBeaconChainBridge"
|
"AppData" / "Roaming" / "FluffyBeaconLCBridge"
|
||||||
elif defined(macosx):
|
elif defined(macosx):
|
||||||
"Library" / "Application Support" / "FluffyBeaconChainBridge"
|
"Library" / "Application Support" / "FluffyBeaconLCBridge"
|
||||||
else:
|
else:
|
||||||
".cache" / "fluffy-beacon-chain-bridge"
|
".cache" / "fluffy-beacon-lc-bridge"
|
||||||
|
|
||||||
getHomeDir() / dataDir
|
getHomeDir() / dataDir
|
||||||
|
|
||||||
const
|
const
|
||||||
defaultDataVerifiedProxyDirDesc* = defaultVerifiedProxyDataDir()
|
defaultDataDirDesc* = defaultDataDir()
|
||||||
|
|
||||||
type
|
type
|
||||||
Web3UrlKind* = enum
|
Web3UrlKind* = enum
|
||||||
|
@ -58,8 +58,8 @@ type BeaconBridgeConf* = object
|
||||||
# Storage
|
# Storage
|
||||||
dataDir* {.
|
dataDir* {.
|
||||||
desc: "The directory where beacon_lc_bridge will store all data"
|
desc: "The directory where beacon_lc_bridge will store all data"
|
||||||
defaultValue: defaultVerifiedProxyDataDir()
|
defaultValue: defaultDataDir()
|
||||||
defaultValueDesc: $defaultDataVerifiedProxyDirDesc
|
defaultValueDesc: $defaultDataDirDesc
|
||||||
abbr: "d"
|
abbr: "d"
|
||||||
name: "data-dir" .}: OutDir
|
name: "data-dir" .}: OutDir
|
||||||
|
|
||||||
|
|
|
@ -540,7 +540,7 @@ when isMainModule:
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
of ExporterCmd.beacon:
|
of ExporterCmd.beacon:
|
||||||
let (cfg, forkDigests) = getBeaconData()
|
let (cfg, forkDigests, _) = getBeaconData()
|
||||||
|
|
||||||
case config.beaconCmd
|
case config.beaconCmd
|
||||||
of BeaconCmd.exportLCBootstrap:
|
of BeaconCmd.exportLCBootstrap:
|
||||||
|
|
|
@ -13,13 +13,17 @@ import
|
||||||
eth/async_utils,
|
eth/async_utils,
|
||||||
beacon_chain/networking/network_metadata,
|
beacon_chain/networking/network_metadata,
|
||||||
beacon_chain/spec//eth2_apis/rest_beacon_client,
|
beacon_chain/spec//eth2_apis/rest_beacon_client,
|
||||||
|
beacon_chain/beacon_clock,
|
||||||
../../network/beacon_light_client/beacon_light_client_content,
|
../../network/beacon_light_client/beacon_light_client_content,
|
||||||
./exporter_common
|
./exporter_common
|
||||||
|
|
||||||
|
export beacon_clock
|
||||||
|
|
||||||
const
|
const
|
||||||
restRequestsTimeout = 30.seconds
|
restRequestsTimeout = 30.seconds
|
||||||
|
|
||||||
proc getBeaconData*(): (RuntimeConfig, ref ForkDigests) {.raises: [IOError].} =
|
proc getBeaconData*(): (
|
||||||
|
RuntimeConfig, ref ForkDigests, BeaconClock) {.raises: [IOError].} =
|
||||||
let
|
let
|
||||||
metadata = getMetadataForNetwork("mainnet")
|
metadata = getMetadataForNetwork("mainnet")
|
||||||
genesisState =
|
genesisState =
|
||||||
|
@ -35,7 +39,10 @@ proc getBeaconData*(): (RuntimeConfig, ref ForkDigests) {.raises: [IOError].} =
|
||||||
forkDigests = newClone ForkDigests.init(
|
forkDigests = newClone ForkDigests.init(
|
||||||
metadata.cfg, genesis_validators_root)
|
metadata.cfg, genesis_validators_root)
|
||||||
|
|
||||||
return (metadata.cfg, forkDigests)
|
beaconClock = BeaconClock.init(getStateField(genesisState[], genesis_time))
|
||||||
|
|
||||||
|
|
||||||
|
return (metadata.cfg, forkDigests, beaconClock)
|
||||||
|
|
||||||
func forkDigestAtEpoch(
|
func forkDigestAtEpoch(
|
||||||
forkDigests: ForkDigests, epoch: Epoch, cfg: RuntimeConfig): ForkDigest =
|
forkDigests: ForkDigests, epoch: Epoch, cfg: RuntimeConfig): ForkDigest =
|
||||||
|
|
Loading…
Reference in New Issue