Prepare Fluffy for beacon light client bridge (#1506)
This commit is contained in:
parent
ec2bd4a9c5
commit
11fc2de060
|
@ -11,7 +11,7 @@ import
|
||||||
std/os,
|
std/os,
|
||||||
confutils, confutils/std/net, chronicles, chronicles/topics_registry,
|
confutils, confutils/std/net, chronicles, chronicles/topics_registry,
|
||||||
chronos, metrics, metrics/chronos_httpserver, json_rpc/clients/httpclient,
|
chronos, metrics, metrics/chronos_httpserver, json_rpc/clients/httpclient,
|
||||||
json_rpc/rpcproxy, stew/[byteutils, io2],
|
json_rpc/rpcproxy, stew/[byteutils, io2, results],
|
||||||
eth/keys, eth/net/nat,
|
eth/keys, eth/net/nat,
|
||||||
eth/p2p/discoveryv5/protocol as discv5_protocol,
|
eth/p2p/discoveryv5/protocol as discv5_protocol,
|
||||||
beacon_chain/beacon_clock,
|
beacon_chain/beacon_clock,
|
||||||
|
@ -46,6 +46,70 @@ proc initializeBridgeClient(maybeUri: Option[string]): Option[BridgeClient] =
|
||||||
notice "Failed to initialize bridge client", error = err.msg
|
notice "Failed to initialize bridge client", error = err.msg
|
||||||
return none(BridgeClient)
|
return none(BridgeClient)
|
||||||
|
|
||||||
|
proc initBeaconLightClient(
|
||||||
|
network: LightClientNetwork, networkData: NetworkInitData,
|
||||||
|
trustedBlockRoot: Option[Eth2Digest]): LightClient =
|
||||||
|
let
|
||||||
|
getBeaconTime = networkData.clock.getBeaconTimeFn()
|
||||||
|
|
||||||
|
refDigests = newClone networkData.forks
|
||||||
|
|
||||||
|
lc = LightClient.new(
|
||||||
|
network,
|
||||||
|
network.portalProtocol.baseProtocol.rng,
|
||||||
|
networkData.metadata.cfg,
|
||||||
|
refDigests,
|
||||||
|
getBeaconTime,
|
||||||
|
networkData.genesis_validators_root,
|
||||||
|
LightClientFinalizationMode.Optimistic
|
||||||
|
)
|
||||||
|
|
||||||
|
# TODO: For now just log new headers. Ultimately we should also use callbacks
|
||||||
|
# for each lc object to save them to db and offer them to the network.
|
||||||
|
# TODO-2: The above statement sounds that this work should really be done at a
|
||||||
|
# later lower, and these callbacks are rather for use for the "application".
|
||||||
|
proc onFinalizedHeader(
|
||||||
|
lightClient: LightClient, finalizedHeader: ForkedLightClientHeader) =
|
||||||
|
withForkyHeader(finalizedHeader):
|
||||||
|
when lcDataFork > LightClientDataFork.None:
|
||||||
|
info "New LC finalized header",
|
||||||
|
finalized_header = shortLog(forkyHeader)
|
||||||
|
|
||||||
|
proc onOptimisticHeader(
|
||||||
|
lightClient: LightClient, optimisticHeader: ForkedLightClientHeader) =
|
||||||
|
withForkyHeader(optimisticHeader):
|
||||||
|
when lcDataFork > LightClientDataFork.None:
|
||||||
|
info "New LC optimistic header",
|
||||||
|
optimistic_header = shortLog(forkyHeader)
|
||||||
|
|
||||||
|
lc.onFinalizedHeader = onFinalizedHeader
|
||||||
|
lc.onOptimisticHeader = onOptimisticHeader
|
||||||
|
lc.trustedBlockRoot = trustedBlockRoot
|
||||||
|
|
||||||
|
# proc onSecond(time: Moment) =
|
||||||
|
# let wallSlot = getBeaconTime().slotOrZero()
|
||||||
|
# # TODO this is a place to enable/disable gossip based on the current status
|
||||||
|
# # of light client
|
||||||
|
# # lc.updateGossipStatus(wallSlot + 1)
|
||||||
|
|
||||||
|
# proc runOnSecondLoop() {.async.} =
|
||||||
|
# let sleepTime = chronos.seconds(1)
|
||||||
|
# while true:
|
||||||
|
# let start = chronos.now(chronos.Moment)
|
||||||
|
# await chronos.sleepAsync(sleepTime)
|
||||||
|
# let afterSleep = chronos.now(chronos.Moment)
|
||||||
|
# let sleepTime = afterSleep - start
|
||||||
|
# onSecond(start)
|
||||||
|
# let finished = chronos.now(chronos.Moment)
|
||||||
|
# let processingTime = finished - afterSleep
|
||||||
|
# trace "onSecond task completed", sleepTime, processingTime
|
||||||
|
|
||||||
|
# onSecond(Moment.now())
|
||||||
|
|
||||||
|
# asyncSpawn runOnSecondLoop()
|
||||||
|
|
||||||
|
lc
|
||||||
|
|
||||||
proc run(config: PortalConf) {.raises: [CatchableError].} =
|
proc run(config: PortalConf) {.raises: [CatchableError].} =
|
||||||
# Make sure dataDir exists
|
# Make sure dataDir exists
|
||||||
let pathExists = createPath(config.dataDir.string)
|
let pathExists = createPath(config.dataDir.string)
|
||||||
|
@ -113,8 +177,10 @@ proc run(config: PortalConf) {.raises: [CatchableError].} =
|
||||||
)
|
)
|
||||||
streamManager = StreamManager.new(d)
|
streamManager = StreamManager.new(d)
|
||||||
|
|
||||||
stateNetwork = StateNetwork.new(d, db, streamManager,
|
stateNetwork = Opt.some(StateNetwork.new(
|
||||||
bootstrapRecords = bootstrapRecords, portalConfig = portalConfig)
|
d, db, streamManager,
|
||||||
|
bootstrapRecords = bootstrapRecords,
|
||||||
|
portalConfig = portalConfig))
|
||||||
|
|
||||||
accumulator =
|
accumulator =
|
||||||
# Building an accumulator from header epoch files takes > 2m30s and is
|
# Building an accumulator from header epoch files takes > 2m30s and is
|
||||||
|
@ -132,8 +198,31 @@ proc run(config: PortalConf) {.raises: [CatchableError].} =
|
||||||
except SszError as err:
|
except SszError as err:
|
||||||
raiseAssert "Invalid baked-in accumulator: " & err.msg
|
raiseAssert "Invalid baked-in accumulator: " & err.msg
|
||||||
|
|
||||||
historyNetwork = HistoryNetwork.new(d, db, streamManager, accumulator,
|
historyNetwork = Opt.some(HistoryNetwork.new(
|
||||||
bootstrapRecords = bootstrapRecords, portalConfig = portalConfig)
|
d, db, streamManager, accumulator,
|
||||||
|
bootstrapRecords = bootstrapRecords,
|
||||||
|
portalConfig = portalConfig))
|
||||||
|
|
||||||
|
beaconLightClient =
|
||||||
|
# TODO: Currently disabled by default as it is not sufficiently polished.
|
||||||
|
# Eventually this should be always-on functionality.
|
||||||
|
if config.trustedBlockRoot.isSome():
|
||||||
|
let
|
||||||
|
# Fluffy works only over mainnet data currently
|
||||||
|
networkData = loadNetworkData("mainnet")
|
||||||
|
beaconLightClientDb = LightClientDb.new(
|
||||||
|
config.dataDir / "lightClientDb")
|
||||||
|
lightClientNetwork = LightClientNetwork.new(
|
||||||
|
d,
|
||||||
|
beaconLightClientDb,
|
||||||
|
streamManager,
|
||||||
|
networkData.forks,
|
||||||
|
bootstrapRecords = bootstrapRecords)
|
||||||
|
|
||||||
|
Opt.some(initBeaconLightClient(
|
||||||
|
lightClientNetwork, networkData, config.trustedBlockRoot))
|
||||||
|
else:
|
||||||
|
Opt.none(LightClient)
|
||||||
|
|
||||||
# TODO: If no new network key is generated then we should first check if an
|
# TODO: If no new network key is generated then we should first check if an
|
||||||
# enr file exists, and in the case it does read out the seqNum from it and
|
# enr file exists, and in the case it does read out the seqNum from it and
|
||||||
|
@ -143,6 +232,8 @@ proc run(config: PortalConf) {.raises: [CatchableError].} =
|
||||||
fatal "Failed to write the enr file", file = enrFile
|
fatal "Failed to write the enr file", file = enrFile
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
|
|
||||||
|
## Start metrics HTTP server
|
||||||
if config.metricsEnabled:
|
if config.metricsEnabled:
|
||||||
let
|
let
|
||||||
address = config.metricsAddress
|
address = config.metricsAddress
|
||||||
|
@ -155,101 +246,39 @@ proc run(config: PortalConf) {.raises: [CatchableError].} =
|
||||||
# TODO: Ideally we don't have the Exception here
|
# TODO: Ideally we don't have the Exception here
|
||||||
except Exception as exc: raiseAssert exc.msg
|
except Exception as exc: raiseAssert exc.msg
|
||||||
|
|
||||||
|
## Starting the different networks.
|
||||||
|
d.start()
|
||||||
|
if stateNetwork.isSome():
|
||||||
|
stateNetwork.get().start()
|
||||||
|
if historyNetwork.isSome():
|
||||||
|
historyNetwork.get().start()
|
||||||
|
if beaconLightClient.isSome():
|
||||||
|
let lc = beaconLightClient.get()
|
||||||
|
lc.network.start()
|
||||||
|
lc.start()
|
||||||
|
|
||||||
|
## Starting the JSON-RPC APIs
|
||||||
if config.rpcEnabled:
|
if config.rpcEnabled:
|
||||||
let ta = initTAddress(config.rpcAddress, config.rpcPort)
|
let ta = initTAddress(config.rpcAddress, config.rpcPort)
|
||||||
var rpcHttpServerWithProxy = RpcProxy.new([ta], config.proxyUri)
|
var rpcHttpServerWithProxy = RpcProxy.new([ta], config.proxyUri)
|
||||||
rpcHttpServerWithProxy.installEthApiHandlers(historyNetwork)
|
|
||||||
rpcHttpServerWithProxy.installDiscoveryApiHandlers(d)
|
rpcHttpServerWithProxy.installDiscoveryApiHandlers(d)
|
||||||
rpcHttpServerWithProxy.installPortalApiHandlers(stateNetwork.portalProtocol, "state")
|
if stateNetwork.isSome():
|
||||||
rpcHttpServerWithProxy.installPortalApiHandlers(historyNetwork.portalProtocol, "history")
|
rpcHttpServerWithProxy.installPortalApiHandlers(
|
||||||
rpcHttpServerWithProxy.installPortalDebugApiHandlers(stateNetwork.portalProtocol, "state")
|
stateNetwork.get().portalProtocol, "state")
|
||||||
rpcHttpServerWithProxy.installPortalDebugApiHandlers(historyNetwork.portalProtocol, "history")
|
if historyNetwork.isSome():
|
||||||
# TODO for now we can only proxy to local node (or remote one without ssl) to make it possible
|
rpcHttpServerWithProxy.installEthApiHandlers(historyNetwork.get())
|
||||||
# to call infura https://github.com/status-im/nim-json-rpc/pull/101 needs to get merged for http client to support https/
|
rpcHttpServerWithProxy.installPortalApiHandlers(
|
||||||
|
historyNetwork.get().portalProtocol, "history")
|
||||||
|
rpcHttpServerWithProxy.installPortalDebugApiHandlers(
|
||||||
|
historyNetwork.get().portalProtocol, "history")
|
||||||
|
if beaconLightClient.isSome():
|
||||||
|
rpcHttpServerWithProxy.installPortalApiHandlers(
|
||||||
|
beaconLightClient.get().network.portalProtocol, "beaconLightClient")
|
||||||
|
# TODO: Test proxy with remote node over HTTPS
|
||||||
waitFor rpcHttpServerWithProxy.start()
|
waitFor rpcHttpServerWithProxy.start()
|
||||||
|
|
||||||
let bridgeClient = initializeBridgeClient(config.bridgeUri)
|
let bridgeClient = initializeBridgeClient(config.bridgeUri)
|
||||||
|
|
||||||
d.start()
|
|
||||||
|
|
||||||
# TODO: Currently disabled by default as it is not stable/polished enough,
|
|
||||||
# ultimatetely this should probably be always on.
|
|
||||||
if config.trustedBlockRoot.isSome():
|
|
||||||
# fluffy light client works only over mainnet data
|
|
||||||
let
|
|
||||||
networkData = loadNetworkData("mainnet")
|
|
||||||
|
|
||||||
db = LightClientDb.new(config.dataDir / "lightClientDb")
|
|
||||||
|
|
||||||
lightClientNetwork = LightClientNetwork.new(
|
|
||||||
d,
|
|
||||||
db,
|
|
||||||
streamManager,
|
|
||||||
networkData.forks,
|
|
||||||
bootstrapRecords = bootstrapRecords)
|
|
||||||
|
|
||||||
getBeaconTime = networkData.clock.getBeaconTimeFn()
|
|
||||||
|
|
||||||
refDigests = newClone networkData.forks
|
|
||||||
|
|
||||||
lc = LightClient.new(
|
|
||||||
lightClientNetwork,
|
|
||||||
rng,
|
|
||||||
networkData.metadata.cfg,
|
|
||||||
refDigests,
|
|
||||||
getBeaconTime,
|
|
||||||
networkData.genesis_validators_root,
|
|
||||||
LightClientFinalizationMode.Optimistic
|
|
||||||
)
|
|
||||||
|
|
||||||
# TODO: For now just log headers. Ultimately we should also use callbacks for each
|
|
||||||
# lc object to save them to db and offer them to the network.
|
|
||||||
proc onFinalizedHeader(
|
|
||||||
lightClient: LightClient, finalizedHeader: ForkedLightClientHeader) =
|
|
||||||
withForkyHeader(finalizedHeader):
|
|
||||||
when lcDataFork > LightClientDataFork.None:
|
|
||||||
info "New LC finalized header",
|
|
||||||
finalized_header = shortLog(forkyHeader)
|
|
||||||
|
|
||||||
proc onOptimisticHeader(
|
|
||||||
lightClient: LightClient, optimisticHeader: ForkedLightClientHeader) =
|
|
||||||
withForkyHeader(optimisticHeader):
|
|
||||||
when lcDataFork > LightClientDataFork.None:
|
|
||||||
info "New LC optimistic header",
|
|
||||||
optimistic_header = shortLog(forkyHeader)
|
|
||||||
|
|
||||||
lc.onFinalizedHeader = onFinalizedHeader
|
|
||||||
lc.onOptimisticHeader = onOptimisticHeader
|
|
||||||
lc.trustedBlockRoot = config.trustedBlockRoot
|
|
||||||
|
|
||||||
proc onSecond(time: Moment) =
|
|
||||||
let wallSlot = getBeaconTime().slotOrZero()
|
|
||||||
# TODO this is a place to enable/disable gossip based on the current status
|
|
||||||
# of light client
|
|
||||||
# lc.updateGossipStatus(wallSlot + 1)
|
|
||||||
|
|
||||||
proc runOnSecondLoop() {.async.} =
|
|
||||||
let sleepTime = chronos.seconds(1)
|
|
||||||
while true:
|
|
||||||
let start = chronos.now(chronos.Moment)
|
|
||||||
await chronos.sleepAsync(sleepTime)
|
|
||||||
let afterSleep = chronos.now(chronos.Moment)
|
|
||||||
let sleepTime = afterSleep - start
|
|
||||||
onSecond(start)
|
|
||||||
let finished = chronos.now(chronos.Moment)
|
|
||||||
let processingTime = finished - afterSleep
|
|
||||||
trace "onSecond task completed", sleepTime, processingTime
|
|
||||||
|
|
||||||
onSecond(Moment.now())
|
|
||||||
|
|
||||||
lightClientNetwork.start()
|
|
||||||
lc.start()
|
|
||||||
|
|
||||||
asyncSpawn runOnSecondLoop()
|
|
||||||
|
|
||||||
historyNetwork.start()
|
|
||||||
stateNetwork.start()
|
|
||||||
|
|
||||||
runForever()
|
runForever()
|
||||||
|
|
||||||
when isMainModule:
|
when isMainModule:
|
||||||
|
|
|
@ -27,7 +27,7 @@ type
|
||||||
gcsafe, raises: [].}
|
gcsafe, raises: [].}
|
||||||
|
|
||||||
LightClient* = ref object
|
LightClient* = ref object
|
||||||
network: LightClientNetwork
|
network*: LightClientNetwork
|
||||||
cfg: RuntimeConfig
|
cfg: RuntimeConfig
|
||||||
forkDigests: ref ForkDigests
|
forkDigests: ref ForkDigests
|
||||||
getBeaconTime: GetBeaconTimeFn
|
getBeaconTime: GetBeaconTimeFn
|
||||||
|
|
|
@ -103,10 +103,15 @@ func decodeSsz*(input: openArray[byte], T: type): Result[T, string] =
|
||||||
except SszError as e:
|
except SszError as e:
|
||||||
err(e.msg)
|
err(e.msg)
|
||||||
|
|
||||||
# TODO: Not sure at this point how this API should look best, but the current
|
# Yes, this API is odd as you pass a SomeForkedLightClientObject yet still have
|
||||||
# version is a bit weird as it provides both a Forked object and a forkDigest
|
# to also pass the ForkDigest. This is because we can't just select the right
|
||||||
# Lets see when we get to used it in the bridge, might require something
|
# digest through the LightClientDataFork here as LightClientDataFork and
|
||||||
# like `forkDigestAtEpoch` instead.
|
# ConsensusFork are not mapped 1-to-1. There is loss of fork data.
|
||||||
|
# This means we need to get the ConsensusFork directly, which is possible by
|
||||||
|
# passing the epoch (slot) from the object through `forkDigestAtEpoch`. This
|
||||||
|
# however requires the runtime config which is part of the `Eth2Node` object.
|
||||||
|
# Not something we would like to include as a parameter here, so we stick with
|
||||||
|
# just passing the forkDigest and doing the work outside of this encode call.
|
||||||
func encodeForkedLightClientObject*(
|
func encodeForkedLightClientObject*(
|
||||||
obj: SomeForkedLightClientObject,
|
obj: SomeForkedLightClientObject,
|
||||||
forkDigest: ForkDigest): seq[byte] =
|
forkDigest: ForkDigest): seq[byte] =
|
||||||
|
|
|
@ -49,3 +49,29 @@ proc portal_historyRecursiveFindContent(contentKey: string): string
|
||||||
proc portal_historyStore(contentKey: string, contentValue: string): bool
|
proc portal_historyStore(contentKey: string, contentValue: string): bool
|
||||||
proc portal_historyLocalContent(contentKey: string): string
|
proc portal_historyLocalContent(contentKey: string): string
|
||||||
proc portal_historyGossip(contentKey: string, contentValue: string): int
|
proc portal_historyGossip(contentKey: string, contentValue: string): int
|
||||||
|
|
||||||
|
## Portal Beacon Light Client Network json-rpc calls
|
||||||
|
proc portal_beaconLightClientNodeInfo(): NodeInfo
|
||||||
|
proc portal_beaconLightClientRoutingTableInfo(): RoutingTableInfo
|
||||||
|
proc portal_beaconLightClientAddEnr(enr: Record): bool
|
||||||
|
proc portal_beaconLightClientAddEnrs(enrs: seq[Record]): bool
|
||||||
|
proc portal_beaconLightClientGetEnr(nodeId: NodeId): Record
|
||||||
|
proc portal_beaconLightClientDeleteEnr(nodeId: NodeId): bool
|
||||||
|
proc portal_beaconLightClientLookupEnr(nodeId: NodeId): Record
|
||||||
|
proc portal_beaconLightClientPing(enr: Record): tuple[
|
||||||
|
enrSeq: uint64, customPayload: string]
|
||||||
|
proc portal_beaconLightClientFindNodes(enr: Record): seq[Record]
|
||||||
|
proc portal_beaconLightClientFindContent(enr: Record, contentKey: string): tuple[
|
||||||
|
connectionId: Option[string],
|
||||||
|
content: Option[string],
|
||||||
|
enrs: Option[seq[Record]]]
|
||||||
|
proc portal_beaconLightClientFindContentFull(enr: Record, contentKey: string): tuple[
|
||||||
|
content: Option[string],
|
||||||
|
enrs: Option[seq[Record]]]
|
||||||
|
proc portal_beaconLightClientOffer(
|
||||||
|
enr: Record, contentKey: string, contentValue: string): string
|
||||||
|
proc portal_beaconLightClientRecursiveFindNodes(nodeId: NodeId): seq[Record]
|
||||||
|
proc portal_beaconLightClientRecursiveFindContent(contentKey: string): string
|
||||||
|
proc portal_beaconLightClientStore(contentKey: string, contentValue: string): bool
|
||||||
|
proc portal_beaconLightClientLocalContent(contentKey: string): string
|
||||||
|
proc portal_beaconLightClientGossip(contentKey: string, contentValue: string): int
|
||||||
|
|
Loading…
Reference in New Issue