mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-13 20:46:26 +00:00
Add initial implementation of portal powered light client (#1280)
* Add initial implementation of portal powered light client
This commit is contained in:
parent
a8df4c1165
commit
1c1c6f56cc
160
fluffy/network/beacon_light_client/beacon_light_client.nim
Normal file
160
fluffy/network/beacon_light_client/beacon_light_client.nim
Normal file
@ -0,0 +1,160 @@
|
||||
# beacon hain light client
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
eth/p2p/discoveryv5/random2,
|
||||
beacon_chain/gossip_processing/light_client_processor,
|
||||
beacon_chain/spec/datatypes/altair,
|
||||
beacon_chain/beacon_clock,
|
||||
"."/[light_client_network, beacon_light_client_manager]
|
||||
|
||||
export LightClientFinalizationMode
|
||||
|
||||
logScope: topics = "lightcl"
|
||||
|
||||
type
|
||||
LightClientHeaderCallback* =
|
||||
proc(lightClient: LightClient, header: BeaconBlockHeader) {.
|
||||
gcsafe, raises: [Defect].}
|
||||
|
||||
LightClient* = ref object
|
||||
network: LightClientNetwork
|
||||
cfg: RuntimeConfig
|
||||
forkDigests: ref ForkDigests
|
||||
getBeaconTime: GetBeaconTimeFn
|
||||
store: ref Option[LightClientStore]
|
||||
processor: ref LightClientProcessor
|
||||
manager: LightClientManager
|
||||
onFinalizedHeader*, onOptimisticHeader*: LightClientHeaderCallback
|
||||
trustedBlockRoot*: Option[Eth2Digest]
|
||||
|
||||
func finalizedHeader*(lightClient: LightClient): Opt[BeaconBlockHeader] =
|
||||
if lightClient.store[].isSome:
|
||||
ok lightClient.store[].get.finalized_header
|
||||
else:
|
||||
err()
|
||||
|
||||
func optimisticHeader*(lightClient: LightClient): Opt[BeaconBlockHeader] =
|
||||
if lightClient.store[].isSome:
|
||||
ok lightClient.store[].get.optimistic_header
|
||||
else:
|
||||
err()
|
||||
|
||||
proc new*(
|
||||
T: type LightClient,
|
||||
network: LightClientNetwork,
|
||||
rng: ref HmacDrbgContext,
|
||||
dumpEnabled: bool,
|
||||
dumpDirInvalid, dumpDirIncoming: string,
|
||||
cfg: RuntimeConfig,
|
||||
forkDigests: ref ForkDigests,
|
||||
getBeaconTime: GetBeaconTimeFn,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
finalizationMode: LightClientFinalizationMode): T =
|
||||
let lightClient = LightClient(
|
||||
network: network,
|
||||
cfg: cfg,
|
||||
forkDigests: forkDigests,
|
||||
getBeaconTime: getBeaconTime,
|
||||
store: (ref Option[LightClientStore])())
|
||||
|
||||
func getTrustedBlockRoot(): Option[Eth2Digest] =
|
||||
lightClient.trustedBlockRoot
|
||||
|
||||
proc onStoreInitialized() =
|
||||
discard
|
||||
|
||||
proc onFinalizedHeader() =
|
||||
if lightClient.onFinalizedHeader != nil:
|
||||
lightClient.onFinalizedHeader(
|
||||
lightClient, lightClient.finalizedHeader.get)
|
||||
|
||||
proc onOptimisticHeader() =
|
||||
if lightClient.onOptimisticHeader != nil:
|
||||
lightClient.onOptimisticHeader(
|
||||
lightClient, lightClient.optimisticHeader.get)
|
||||
|
||||
lightClient.processor = LightClientProcessor.new(
|
||||
dumpEnabled, dumpDirInvalid, dumpDirIncoming,
|
||||
cfg, genesis_validators_root, finalizationMode,
|
||||
lightClient.store, getBeaconTime, getTrustedBlockRoot,
|
||||
onStoreInitialized, onFinalizedHeader, onOptimisticHeader)
|
||||
|
||||
proc lightClientVerifier(obj: SomeLightClientObject):
|
||||
Future[Result[void, BlockError]] =
|
||||
let resfut = newFuture[Result[void, BlockError]]("lightClientVerifier")
|
||||
lightClient.processor[].addObject(MsgSource.gossip, obj, resfut)
|
||||
resfut
|
||||
|
||||
proc bootstrapVerifier(obj: altair.LightClientBootstrap): auto =
|
||||
lightClientVerifier(obj)
|
||||
proc updateVerifier(obj: altair.LightClientUpdate): auto =
|
||||
lightClientVerifier(obj)
|
||||
proc finalityVerifier(obj: altair.LightClientFinalityUpdate): auto =
|
||||
lightClientVerifier(obj)
|
||||
proc optimisticVerifier(obj: altair.LightClientOptimisticUpdate): auto =
|
||||
lightClientVerifier(obj)
|
||||
|
||||
func isLightClientStoreInitialized(): bool =
|
||||
lightClient.store[].isSome
|
||||
|
||||
func isNextSyncCommitteeKnown(): bool =
|
||||
if lightClient.store[].isSome:
|
||||
lightClient.store[].get.is_next_sync_committee_known
|
||||
else:
|
||||
false
|
||||
|
||||
func getFinalizedPeriod(): SyncCommitteePeriod =
|
||||
if lightClient.store[].isSome:
|
||||
lightClient.store[].get.finalized_header.slot.sync_committee_period
|
||||
else:
|
||||
GENESIS_SLOT.sync_committee_period
|
||||
|
||||
func getOptimisticPeriod(): SyncCommitteePeriod =
|
||||
if lightClient.store[].isSome:
|
||||
lightClient.store[].get.optimistic_header.slot.sync_committee_period
|
||||
else:
|
||||
GENESIS_SLOT.sync_committee_period
|
||||
|
||||
lightClient.manager = LightClientManager.init(
|
||||
lightClient.network, rng, getTrustedBlockRoot,
|
||||
bootstrapVerifier, updateVerifier, finalityVerifier, optimisticVerifier,
|
||||
isLightClientStoreInitialized, isNextSyncCommitteeKnown,
|
||||
getFinalizedPeriod, getOptimisticPeriod, getBeaconTime)
|
||||
|
||||
lightClient
|
||||
|
||||
proc new*(
|
||||
T: type LightClient,
|
||||
network: LightClientNetwork,
|
||||
rng: ref HmacDrbgContext,
|
||||
cfg: RuntimeConfig,
|
||||
forkDigests: ref ForkDigests,
|
||||
getBeaconTime: GetBeaconTimeFn,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
finalizationMode: LightClientFinalizationMode): T =
|
||||
LightClient.new(
|
||||
network, rng,
|
||||
dumpEnabled = false, dumpDirInvalid = ".", dumpDirIncoming = ".",
|
||||
cfg, forkDigests, getBeaconTime, genesis_validators_root, finalizationMode
|
||||
)
|
||||
|
||||
proc start*(lightClient: LightClient) =
|
||||
notice "Starting light client",
|
||||
trusted_block_root = lightClient.trustedBlockRoot
|
||||
lightClient.manager.start()
|
||||
|
||||
proc resetToFinalizedHeader*(
|
||||
lightClient: LightClient,
|
||||
header: BeaconBlockHeader,
|
||||
current_sync_committee: SyncCommittee) =
|
||||
lightClient.processor[].resetToFinalizedHeader(header, current_sync_committee)
|
||||
|
@ -0,0 +1,340 @@
|
||||
# beacon hain light client
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/typetraits,
|
||||
chronos, chronicles, stew/[base10, results],
|
||||
eth/p2p/discoveryv5/random2,
|
||||
beacon_chain/spec/datatypes/altair,
|
||||
beacon_chain/beacon_clock,
|
||||
./light_client_network
|
||||
|
||||
from beacon_chain/consensus_object_pools/block_pools_types import BlockError
|
||||
|
||||
logScope:
|
||||
topics = "lcman"
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0/specs/altair/light-client/p2p-interface.md#configuration
|
||||
const MAX_REQUEST_LIGHT_CLIENT_UPDATES* = 128
|
||||
|
||||
type
|
||||
Nothing = object
|
||||
NetRes*[T] = Result[T, void]
|
||||
Endpoint[K, V] =
|
||||
(K, V) # https://github.com/nim-lang/Nim/issues/19531
|
||||
Bootstrap =
|
||||
Endpoint[Eth2Digest, altair.LightClientBootstrap]
|
||||
UpdatesByRange =
|
||||
Endpoint[Slice[SyncCommitteePeriod], altair.LightClientUpdate]
|
||||
FinalityUpdate =
|
||||
Endpoint[Nothing, altair.LightClientFinalityUpdate]
|
||||
OptimisticUpdate =
|
||||
Endpoint[Nothing, altair.LightClientOptimisticUpdate]
|
||||
|
||||
ValueVerifier[V] =
|
||||
proc(v: V): Future[Result[void, BlockError]] {.gcsafe, raises: [Defect].}
|
||||
BootstrapVerifier* =
|
||||
ValueVerifier[altair.LightClientBootstrap]
|
||||
UpdateVerifier* =
|
||||
ValueVerifier[altair.LightClientUpdate]
|
||||
FinalityUpdateVerifier* =
|
||||
ValueVerifier[altair.LightClientFinalityUpdate]
|
||||
OptimisticUpdateVerifier* =
|
||||
ValueVerifier[altair.LightClientOptimisticUpdate]
|
||||
|
||||
GetTrustedBlockRootCallback* =
|
||||
proc(): Option[Eth2Digest] {.gcsafe, raises: [Defect].}
|
||||
GetBoolCallback* =
|
||||
proc(): bool {.gcsafe, raises: [Defect].}
|
||||
GetSyncCommitteePeriodCallback* =
|
||||
proc(): SyncCommitteePeriod {.gcsafe, raises: [Defect].}
|
||||
|
||||
LightClientManager* = object
|
||||
network: LightClientNetwork
|
||||
rng: ref HmacDrbgContext
|
||||
getTrustedBlockRoot: GetTrustedBlockRootCallback
|
||||
bootstrapVerifier: BootstrapVerifier
|
||||
updateVerifier: UpdateVerifier
|
||||
finalityUpdateVerifier: FinalityUpdateVerifier
|
||||
optimisticUpdateVerifier: OptimisticUpdateVerifier
|
||||
isLightClientStoreInitialized: GetBoolCallback
|
||||
isNextSyncCommitteeKnown: GetBoolCallback
|
||||
getFinalizedPeriod: GetSyncCommitteePeriodCallback
|
||||
getOptimisticPeriod: GetSyncCommitteePeriodCallback
|
||||
getBeaconTime: GetBeaconTimeFn
|
||||
loopFuture: Future[void]
|
||||
|
||||
func init*(
|
||||
T: type LightClientManager,
|
||||
network: LightClientNetwork,
|
||||
rng: ref HmacDrbgContext,
|
||||
getTrustedBlockRoot: GetTrustedBlockRootCallback,
|
||||
bootstrapVerifier: BootstrapVerifier,
|
||||
updateVerifier: UpdateVerifier,
|
||||
finalityUpdateVerifier: FinalityUpdateVerifier,
|
||||
optimisticUpdateVerifier: OptimisticUpdateVerifier,
|
||||
isLightClientStoreInitialized: GetBoolCallback,
|
||||
isNextSyncCommitteeKnown: GetBoolCallback,
|
||||
getFinalizedPeriod: GetSyncCommitteePeriodCallback,
|
||||
getOptimisticPeriod: GetSyncCommitteePeriodCallback,
|
||||
getBeaconTime: GetBeaconTimeFn
|
||||
): LightClientManager =
|
||||
## Initialize light client manager.
|
||||
LightClientManager(
|
||||
network: network,
|
||||
rng: rng,
|
||||
getTrustedBlockRoot: getTrustedBlockRoot,
|
||||
bootstrapVerifier: bootstrapVerifier,
|
||||
updateVerifier: updateVerifier,
|
||||
finalityUpdateVerifier: finalityUpdateVerifier,
|
||||
optimisticUpdateVerifier: optimisticUpdateVerifier,
|
||||
isLightClientStoreInitialized: isLightClientStoreInitialized,
|
||||
isNextSyncCommitteeKnown: isNextSyncCommitteeKnown,
|
||||
getFinalizedPeriod: getFinalizedPeriod,
|
||||
getOptimisticPeriod: getOptimisticPeriod,
|
||||
getBeaconTime: getBeaconTime
|
||||
)
|
||||
|
||||
proc isGossipSupported*(
|
||||
self: LightClientManager,
|
||||
period: SyncCommitteePeriod
|
||||
): bool =
|
||||
## Indicate whether the light client is sufficiently synced to accept gossip.
|
||||
if not self.isLightClientStoreInitialized():
|
||||
return false
|
||||
|
||||
let
|
||||
finalizedPeriod = self.getFinalizedPeriod()
|
||||
isNextSyncCommitteeKnown = self.isNextSyncCommitteeKnown()
|
||||
if isNextSyncCommitteeKnown:
|
||||
period <= finalizedPeriod + 1
|
||||
else:
|
||||
period <= finalizedPeriod
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap
|
||||
proc doRequest(
|
||||
e: typedesc[Bootstrap],
|
||||
n: LightClientNetwork,
|
||||
blockRoot: Eth2Digest
|
||||
): Future[NetRes[altair.LightClientBootstrap]] =
|
||||
n.getLightClientBootstrap(blockRoot)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
|
||||
type LightClientUpdatesByRangeResponse = NetRes[seq[altair.LightClientUpdate]]
|
||||
proc doRequest(
|
||||
e: typedesc[UpdatesByRange],
|
||||
n: LightClientNetwork,
|
||||
periods: Slice[SyncCommitteePeriod]
|
||||
): Future[LightClientUpdatesByRangeResponse] =
|
||||
let
|
||||
startPeriod = periods.a
|
||||
lastPeriod = periods.b
|
||||
reqCount = min(periods.len, MAX_REQUEST_LIGHT_CLIENT_UPDATES).uint64
|
||||
n.getLightClientUpdatesByRange(
|
||||
distinctBase(startPeriod),
|
||||
reqCount
|
||||
)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
|
||||
proc doRequest(
|
||||
e: typedesc[FinalityUpdate],
|
||||
n: LightClientNetwork
|
||||
): Future[NetRes[altair.LightClientFinalityUpdate]] =
|
||||
n.getLightClientFinalityUpdate()
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate
|
||||
proc doRequest(
|
||||
e: typedesc[OptimisticUpdate],
|
||||
n: LightClientNetwork
|
||||
): Future[NetRes[altair.LightClientOptimisticUpdate]] =
|
||||
n.getLightClientOptimisticUpdate()
|
||||
|
||||
template valueVerifier[E](
|
||||
self: LightClientManager,
|
||||
e: typedesc[E]
|
||||
): ValueVerifier[E.V] =
|
||||
when E.V is altair.LightClientBootstrap:
|
||||
self.bootstrapVerifier
|
||||
elif E.V is altair.LightClientUpdate:
|
||||
self.updateVerifier
|
||||
elif E.V is altair.LightClientFinalityUpdate:
|
||||
self.finalityUpdateVerifier
|
||||
elif E.V is altair.LightClientOptimisticUpdate:
|
||||
self.optimisticUpdateVerifier
|
||||
else: static: doAssert false
|
||||
|
||||
iterator values(v: auto): auto =
|
||||
## Local helper for `workerTask` to share the same implementation for both
|
||||
## scalar and aggregate values, by treating scalars as 1-length aggregates.
|
||||
when v is seq:
|
||||
for i in v:
|
||||
yield i
|
||||
else:
|
||||
yield v
|
||||
|
||||
proc workerTask[E](
|
||||
self: LightClientManager,
|
||||
e: typedesc[E],
|
||||
key: E.K
|
||||
): Future[bool] {.async.} =
|
||||
var
|
||||
didProgress = false
|
||||
try:
|
||||
let value =
|
||||
when E.K is Nothing:
|
||||
await E.doRequest(self.network)
|
||||
else:
|
||||
await E.doRequest(self.network, key)
|
||||
if value.isOk:
|
||||
for val in value.get.values:
|
||||
let res = await self.valueVerifier(E)(val)
|
||||
if res.isErr:
|
||||
case res.error
|
||||
of BlockError.MissingParent:
|
||||
# Stop, requires different request to progress
|
||||
return didProgress
|
||||
of BlockError.Duplicate:
|
||||
# Ignore, a concurrent request may have already fulfilled this
|
||||
when E.V is altair.LightClientBootstrap:
|
||||
didProgress = true
|
||||
else:
|
||||
discard
|
||||
of BlockError.UnviableFork:
|
||||
notice "Received value from an unviable fork", value = val.shortLog
|
||||
return didProgress
|
||||
of BlockError.Invalid:
|
||||
warn "Received invalid value", value = val.shortLog
|
||||
return didProgress
|
||||
else:
|
||||
didProgress = true
|
||||
else:
|
||||
debug "Failed to receive value on request", value
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Unexpected exception while receiving value", exc = exc.msg
|
||||
raise exc
|
||||
|
||||
return didProgress
|
||||
|
||||
proc query[E](
|
||||
self: LightClientManager,
|
||||
e: typedesc[E],
|
||||
key: E.K
|
||||
): Future[bool] =
|
||||
# TODO Consider making few requests concurrently
|
||||
return self.workertask(e, key)
|
||||
|
||||
template query(
|
||||
self: LightClientManager,
|
||||
e: typedesc[UpdatesByRange],
|
||||
key: SyncCommitteePeriod
|
||||
): Future[bool] =
|
||||
self.query(e, key .. key)
|
||||
|
||||
template query[E](
|
||||
self: LightClientManager,
|
||||
e: typedesc[E]
|
||||
): Future[bool] =
|
||||
self.query(e, Nothing())
|
||||
|
||||
type SchedulingMode = enum
|
||||
Soon,
|
||||
CurrentPeriod,
|
||||
NextPeriod
|
||||
|
||||
func fetchTime(
|
||||
self: LightClientManager,
|
||||
wallTime: BeaconTime,
|
||||
schedulingMode: SchedulingMode
|
||||
): BeaconTime =
|
||||
let
|
||||
remainingTime =
|
||||
case schedulingMode:
|
||||
of Soon:
|
||||
chronos.seconds(0)
|
||||
of CurrentPeriod:
|
||||
let
|
||||
wallPeriod = wallTime.slotOrZero().sync_committee_period
|
||||
deadlineSlot = (wallPeriod + 1).start_slot - 1
|
||||
deadline = deadlineSlot.start_beacon_time()
|
||||
chronos.nanoseconds((deadline - wallTime).nanoseconds)
|
||||
of NextPeriod:
|
||||
chronos.seconds(
|
||||
(SLOTS_PER_SYNC_COMMITTEE_PERIOD * SECONDS_PER_SLOT).int64)
|
||||
minDelay = max(remainingTime div 8, chronos.seconds(10))
|
||||
jitterSeconds = (minDelay * 2).seconds
|
||||
jitterDelay = chronos.seconds(self.rng[].rand(jitterSeconds).int64)
|
||||
return wallTime + minDelay + jitterDelay
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0/specs/altair/light-client/light-client.md#light-client-sync-process
|
||||
proc loop(self: LightClientManager) {.async.} =
|
||||
var nextFetchTime = self.getBeaconTime()
|
||||
while true:
|
||||
# Periodically wake and check for changes
|
||||
let wallTime = self.getBeaconTime()
|
||||
if wallTime < nextFetchTime:
|
||||
await sleepAsync(chronos.seconds(2))
|
||||
continue
|
||||
|
||||
# Obtain bootstrap data once a trusted block root is supplied
|
||||
if not self.isLightClientStoreInitialized():
|
||||
let trustedBlockRoot = self.getTrustedBlockRoot()
|
||||
if trustedBlockRoot.isNone:
|
||||
await sleepAsync(chronos.seconds(2))
|
||||
continue
|
||||
|
||||
let didProgress = await self.query(Bootstrap, trustedBlockRoot.get)
|
||||
if not didProgress:
|
||||
nextFetchTime = self.fetchTime(wallTime, Soon)
|
||||
continue
|
||||
|
||||
# Fetch updates
|
||||
var allowWaitNextPeriod = false
|
||||
let
|
||||
finalized = self.getFinalizedPeriod()
|
||||
optimistic = self.getOptimisticPeriod()
|
||||
current = wallTime.slotOrZero().sync_committee_period
|
||||
isNextSyncCommitteeKnown = self.isNextSyncCommitteeKnown()
|
||||
|
||||
didProgress =
|
||||
if finalized == optimistic and not isNextSyncCommitteeKnown:
|
||||
if finalized >= current:
|
||||
await self.query(UpdatesByRange, finalized)
|
||||
else:
|
||||
await self.query(UpdatesByRange, finalized ..< current)
|
||||
elif finalized + 1 < current:
|
||||
await self.query(UpdatesByRange, finalized + 1 ..< current)
|
||||
elif finalized != optimistic:
|
||||
await self.query(FinalityUpdate)
|
||||
else:
|
||||
allowWaitNextPeriod = true
|
||||
await self.query(OptimisticUpdate)
|
||||
|
||||
schedulingMode =
|
||||
if not didProgress or not self.isGossipSupported(current):
|
||||
Soon
|
||||
elif not allowWaitNextPeriod:
|
||||
CurrentPeriod
|
||||
else:
|
||||
NextPeriod
|
||||
|
||||
nextFetchTime = self.fetchTime(wallTime, schedulingMode)
|
||||
|
||||
proc start*(self: var LightClientManager) =
|
||||
## Start light client manager's loop.
|
||||
doAssert self.loopFuture == nil
|
||||
self.loopFuture = self.loop()
|
||||
|
||||
proc stop*(self: var LightClientManager) {.async.} =
|
||||
## Stop light client manager's loop.
|
||||
if self.loopFuture != nil:
|
||||
await self.loopFuture.cancelAndWait()
|
||||
self.loopFuture = nil
|
@ -9,7 +9,7 @@
|
||||
|
||||
import
|
||||
std/[options, tables],
|
||||
stew/[results, arrayops], chronos, chronicles,
|
||||
stew/results, chronos, chronicles,
|
||||
eth/p2p/discoveryv5/[protocol, enr],
|
||||
beacon_chain/spec/forks,
|
||||
beacon_chain/spec/datatypes/[phase0, altair, bellatrix],
|
||||
@ -45,7 +45,7 @@ proc dbGetHandler(db: ContentDB, contentId: ContentId):
|
||||
|
||||
proc getLightClientBootstrap*(
|
||||
l: LightClientNetwork,
|
||||
trustedRoot: Digest): Future[results.Opt[altair.LightClientBootstrap]] {.async.}=
|
||||
trustedRoot: Digest): Future[results.Opt[altair.LightClientBootstrap]] {.async.} =
|
||||
let
|
||||
bk = LightClientBootstrapKey(blockHash: trustedRoot)
|
||||
ck = ContentKey(
|
||||
@ -73,6 +73,25 @@ proc getLightClientBootstrap*(
|
||||
# above
|
||||
return Opt.some(decodingResult.get())
|
||||
|
||||
proc getLightClientUpdatesByRange*(
|
||||
l: LightClientNetwork,
|
||||
startPeriod: uint64,
|
||||
count: uint64): Future[results.Opt[seq[altair.LightClientUpdate]]] {.async.} =
|
||||
# TODO: Not implemented!
|
||||
return Opt.none(seq[altair.LightClientUpdate])
|
||||
|
||||
proc getLightClientFinalityUpdate*(
|
||||
l: LightClientNetwork
|
||||
): Future[results.Opt[altair.LightClientFinalityUpdate]] {.async.} =
|
||||
# TODO: Not implemented!
|
||||
return Opt.none(altair.LightClientFinalityUpdate)
|
||||
|
||||
proc getLightClientOptimisticUpdate*(
|
||||
l: LightClientNetwork
|
||||
): Future[results.Opt[altair.LightClientOptimisticUpdate]] {.async.} =
|
||||
# TODO: Not implemented!
|
||||
return Opt.none(altair.LightClientOptimisticUpdate)
|
||||
|
||||
proc new*(
|
||||
T: type LightClientNetwork,
|
||||
baseProtocol: protocol.Protocol,
|
||||
|
@ -16,6 +16,4 @@ import
|
||||
./test_history_network,
|
||||
./test_content_db,
|
||||
./test_discovery_rpc,
|
||||
./test_bridge_parser,
|
||||
./test_light_client_content,
|
||||
./test_light_client_network
|
||||
./test_bridge_parser
|
||||
|
@ -0,0 +1,13 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{. warning[UnusedImport]:off .}
|
||||
|
||||
import
|
||||
./test_light_client_content,
|
||||
./test_light_client_network,
|
||||
./test_beacon_light_client
|
@ -0,0 +1,59 @@
|
||||
# Nimbus - Portal Network
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
std/os,
|
||||
chronos,
|
||||
eth/p2p/discoveryv5/protocol as discv5_protocol, eth/p2p/discoveryv5/routing_table,
|
||||
eth/common/eth_types_rlp,
|
||||
beacon_chain/spec/forks,
|
||||
beacon_chain/spec/datatypes/altair,
|
||||
../../network/wire/[portal_protocol, portal_stream, portal_protocol_config],
|
||||
../../network/beacon_light_client/[light_client_network, light_client_content],
|
||||
../../content_db,
|
||||
../test_helpers
|
||||
|
||||
type LightClientNode* = ref object
|
||||
discoveryProtocol*: discv5_protocol.Protocol
|
||||
lightClientNetwork*: LightClientNetwork
|
||||
|
||||
proc getTestForkDigests*(): ForkDigests =
|
||||
return ForkDigests(
|
||||
phase0: ForkDigest([0'u8, 0, 0, 1]),
|
||||
altair: ForkDigest([0'u8, 0, 0, 2]),
|
||||
bellatrix: ForkDigest([0'u8, 0, 0, 3]),
|
||||
capella: ForkDigest([0'u8, 0, 0, 4]),
|
||||
sharding: ForkDigest([0'u8, 0, 0, 5])
|
||||
)
|
||||
|
||||
proc newLCNode*(
|
||||
rng: ref HmacDrbgContext,
|
||||
port: int,
|
||||
forks: ForkDigests = getTestForkDigests()): LightClientNode =
|
||||
let
|
||||
node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port))
|
||||
db = ContentDB.new("", uint32.high, inMemory = true)
|
||||
streamManager = StreamManager.new(node)
|
||||
hn = LightClientNetwork.new(node, db, streamManager, forks)
|
||||
|
||||
return LightClientNode(discoveryProtocol: node, lightClientNetwork: hn)
|
||||
|
||||
proc portalProtocol*(hn: LightClientNode): PortalProtocol =
|
||||
hn.lightClientNetwork.portalProtocol
|
||||
|
||||
proc localNode*(hn: LightClientNode): Node =
|
||||
hn.discoveryProtocol.localNode
|
||||
|
||||
proc start*(hn: LightClientNode) =
|
||||
hn.lightClientNetwork.start()
|
||||
|
||||
proc stop*(hn: LightClientNode) {.async.} =
|
||||
hn.lightClientNetwork.stop()
|
||||
await hn.discoveryProtocol.closeWait()
|
||||
|
||||
proc containsId*(hn: LightClientNode, contentId: ContentId): bool =
|
||||
return hn.lightClientNetwork.contentDB.get(contentId).isSome()
|
@ -0,0 +1,123 @@
|
||||
# Nimbus - Portal Network
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/options,
|
||||
testutils/unittests, chronos,
|
||||
eth/p2p/discoveryv5/protocol as discv5_protocol, eth/p2p/discoveryv5/routing_table,
|
||||
eth/common/eth_types_rlp,
|
||||
beacon_chain/spec/forks,
|
||||
beacon_chain/spec/datatypes/altair,
|
||||
beacon_chain/beacon_clock,
|
||||
beacon_chain/conf,
|
||||
../../network/wire/[portal_protocol, portal_stream],
|
||||
../../network/beacon_light_client/[
|
||||
light_client_content,
|
||||
beacon_light_client
|
||||
],
|
||||
"."/[light_client_test_data, light_client_test_helpers]
|
||||
|
||||
procSuite "Portal Light client":
|
||||
let rng = newRng()
|
||||
|
||||
proc headerCallback(q: AsyncQueue[BeaconBlockHeader]): LightClientHeaderCallback =
|
||||
return (
|
||||
proc (lightClient: LightClient, finalizedHeader: BeaconBlockHeader) {.gcsafe, raises: [Defect].} =
|
||||
try:
|
||||
q.putNoWait(finalizedHeader)
|
||||
except AsyncQueueFullError as exc:
|
||||
raiseAssert(exc.msg)
|
||||
)
|
||||
|
||||
proc loadMainnetData(): Eth2NetworkMetadata =
|
||||
try:
|
||||
return loadEth2Network(some("mainnet"))
|
||||
except CatchableError as exc:
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
asyncTest "Start and retrieve bootstrap":
|
||||
let
|
||||
finalHeaders = newAsyncQueue[BeaconBlockHeader]()
|
||||
optimisticHeaders = newAsyncQueue[BeaconBlockHeader]()
|
||||
# Test data is retrieved from mainnet
|
||||
metadata = loadMainnetData()
|
||||
genesisState =
|
||||
try:
|
||||
template genesisData(): auto = metadata.genesisData
|
||||
newClone(readSszForkedHashedBeaconState(
|
||||
metadata.cfg, genesisData.toOpenArrayByte(genesisData.low, genesisData.high)))
|
||||
except CatchableError as err:
|
||||
raiseAssert "Invalid baked-in state: " & err.msg
|
||||
|
||||
beaconClock = BeaconClock.init(getStateField(genesisState[], genesis_time))
|
||||
|
||||
# TODO: Should probably mock somehow passing time.
|
||||
getBeaconTime = beaconClock.getBeaconTimeFn()
|
||||
|
||||
genesis_validators_root =
|
||||
getStateField(genesisState[], genesis_validators_root)
|
||||
|
||||
forkDigests = newClone ForkDigests.init(metadata.cfg, genesis_validators_root)
|
||||
|
||||
lcNode1 = newLCNode(rng, 20302, forkDigests[])
|
||||
lcNode2 = newLCNode(rng, 20303, forkDigests[])
|
||||
bootstrap = SSZ.decode(bootstrapBytes, altair.LightClientBootstrap)
|
||||
bootstrapHeaderHash = hash_tree_root(bootstrap.header)
|
||||
|
||||
check:
|
||||
lcNode1.portalProtocol().addNode(lcNode2.localNode()) == Added
|
||||
lcNode2.portalProtocol().addNode(lcNode1.localNode()) == Added
|
||||
|
||||
(await lcNode1.portalProtocol().ping(lcNode2.localNode())).isOk()
|
||||
(await lcNode2.portalProtocol().ping(lcNode1.localNode())).isOk()
|
||||
|
||||
let
|
||||
bootstrapKey = LightClientBootstrapKey(
|
||||
blockHash: bootstrapHeaderHash
|
||||
)
|
||||
bootstrapContentKey = ContentKey(
|
||||
contentType: lightClientBootstrap,
|
||||
lightClientBootstrapKey: bootstrapKey
|
||||
)
|
||||
|
||||
bootstrapContentKeyEncoded = encode(bootstrapContentKey)
|
||||
bootstrapContentId = toContentId(bootstrapContentKeyEncoded)
|
||||
|
||||
lcNode2.portalProtocol().storeContent(
|
||||
bootstrapContentId, encodeBootstrapForked(forkDigests.altair, bootstrap)
|
||||
)
|
||||
|
||||
let lc = LightClient.new(
|
||||
lcNode1.lightClientNetwork,
|
||||
rng,
|
||||
metadata.cfg,
|
||||
forkDigests,
|
||||
getBeaconTime,
|
||||
genesis_validators_root,
|
||||
LightClientFinalizationMode.Optimistic
|
||||
)
|
||||
|
||||
lc.onFinalizedHeader = headerCallback(finalHeaders)
|
||||
lc.onOptimisticHeader = headerCallback(optimisticHeaders)
|
||||
lc.trustedBlockRoot = some bootstrapHeaderHash
|
||||
|
||||
# After start light client will try to retrieve bootstrap for given
|
||||
# trustedBlockRoot
|
||||
lc.start()
|
||||
|
||||
# wait till light client retrieves bootstrap. Upon receving bootstrap
|
||||
# both callbacks should be called onFinalizedHeader and onOptimisticHeader
|
||||
let
|
||||
receivedFinalHeader = await finalHeaders.get()
|
||||
receivedOptimisticHeader = await optimisticHeaders.get()
|
||||
|
||||
check:
|
||||
hash_tree_root(receivedFinalHeader) == bootstrapHeaderHash
|
||||
hash_tree_root(receivedOptimisticHeader) == bootstrapHeaderHash
|
||||
|
@ -9,12 +9,11 @@
|
||||
|
||||
import
|
||||
unittest2,
|
||||
stew/byteutils,
|
||||
stint,
|
||||
beacon_chain/spec/forks,
|
||||
beacon_chain/spec/datatypes/altair,
|
||||
../network/beacon_light_client/light_client_content,
|
||||
./light_client_data/light_client_test_data
|
||||
../../network/beacon_light_client/light_client_content,
|
||||
./light_client_test_data
|
||||
|
||||
suite "Test light client contentEncodings":
|
||||
var forks: ForkDigests
|
@ -13,50 +13,11 @@ import
|
||||
eth/rlp,
|
||||
beacon_chain/spec/forks,
|
||||
beacon_chain/spec/datatypes/altair,
|
||||
../network/wire/[portal_protocol, portal_stream, portal_protocol_config],
|
||||
../network/beacon_light_client/[light_client_network, light_client_content],
|
||||
../../nimbus/constants,
|
||||
../content_db,
|
||||
./test_helpers,
|
||||
./light_client_data/light_client_test_data
|
||||
|
||||
type LightClientNode = ref object
|
||||
discoveryProtocol*: discv5_protocol.Protocol
|
||||
lightClientNetwork*: LightClientNetwork
|
||||
|
||||
proc getTestForkDigests(): ForkDigests =
|
||||
return ForkDigests(
|
||||
phase0: ForkDigest([0'u8, 0, 0, 1]),
|
||||
altair: ForkDigest([0'u8, 0, 0, 2]),
|
||||
bellatrix: ForkDigest([0'u8, 0, 0, 3]),
|
||||
capella: ForkDigest([0'u8, 0, 0, 4]),
|
||||
sharding: ForkDigest([0'u8, 0, 0, 5])
|
||||
)
|
||||
|
||||
proc newLCNode(rng: ref HmacDrbgContext, port: int): LightClientNode =
|
||||
let
|
||||
node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port))
|
||||
db = ContentDB.new("", uint32.high, inMemory = true)
|
||||
streamManager = StreamManager.new(node)
|
||||
hn = LightClientNetwork.new(node, db, streamManager, getTestForkDigests())
|
||||
|
||||
return LightClientNode(discoveryProtocol: node, lightClientNetwork: hn)
|
||||
|
||||
proc portalProtocol(hn: LightClientNode): PortalProtocol =
|
||||
hn.lightClientNetwork.portalProtocol
|
||||
|
||||
proc localNode(hn: LightClientNode): Node =
|
||||
hn.discoveryProtocol.localNode
|
||||
|
||||
proc start(hn: LightClientNode) =
|
||||
hn.lightClientNetwork.start()
|
||||
|
||||
proc stop(hn: LightClientNode) {.async.} =
|
||||
hn.lightClientNetwork.stop()
|
||||
await hn.discoveryProtocol.closeWait()
|
||||
|
||||
proc containsId(hn: LightClientNode, contentId: ContentId): bool =
|
||||
return hn.lightClientNetwork.contentDB.get(contentId).isSome()
|
||||
../../network/wire/[portal_protocol, portal_stream],
|
||||
../../network/beacon_light_client/[light_client_network, light_client_content],
|
||||
../../../nimbus/constants,
|
||||
../../content_db,
|
||||
"."/[light_client_test_data, light_client_test_helpers]
|
||||
|
||||
procSuite "Light client Content Network":
|
||||
let rng = newRng()
|
@ -78,6 +78,7 @@ task fluffy_test, "Run fluffy tests":
|
||||
# Running tests with a low `mergeBlockNumber` to make the tests faster.
|
||||
# Using the real mainnet merge block number is not realistic for these tests.
|
||||
test "fluffy/tests", "all_fluffy_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false -d:canonicalVerify=true -d:mergeBlockNumber:38130"
|
||||
test "fluffy/tests/beacon_light_client_tests", "all_beacon_light_client_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false -d:canonicalVerify=true"
|
||||
|
||||
task fluffy_tools, "Build fluffy tools":
|
||||
buildBinary "portalcli", "fluffy/tools/", "-d:chronicles_log_level=TRACE -d:chronosStrictException -d:PREFER_BLST_SHA256=false"
|
||||
|
Loading…
x
Reference in New Issue
Block a user