Support for obtaining deposit snapshots during trustedNodeSync (#4303)
Other changes: * More optimal search for TTD block. * Add timeouts to all REST requests during trusted node sync. Fixes #4037 * Removed support for storing a deposit snapshot in the network metadata.
This commit is contained in:
parent
7cf432b155
commit
d30cb8baf1
|
@ -3,6 +3,8 @@ root = true
|
|||
[*.nim]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = false
|
||||
|
||||
[*.sh]
|
||||
indent_style = space
|
||||
|
@ -11,4 +13,3 @@ indent_size = 2
|
|||
[Makefile]
|
||||
ident_size = 2
|
||||
ident_style = tab
|
||||
|
||||
|
|
|
@ -120,6 +120,14 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
|||
+ Missing Authorization header [Beacon Node] [Preset: mainnet] OK
|
||||
```
|
||||
OK: 5/5 Fail: 0/5 Skip: 0/5
|
||||
## DepositTreeSnapshot
|
||||
```diff
|
||||
+ Migration OK
|
||||
+ SSZ OK
|
||||
+ depositCount OK
|
||||
+ isValid OK
|
||||
```
|
||||
OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||
## Discovery fork ID
|
||||
```diff
|
||||
+ Expected fork IDs OK
|
||||
|
@ -615,4 +623,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
|||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||
|
||||
---TOTAL---
|
||||
OK: 340/345 Fail: 0/345 Skip: 5/345
|
||||
OK: 344/349 Fail: 0/349 Skip: 5/349
|
||||
|
|
|
@ -16,7 +16,11 @@ import
|
|||
serialization, chronicles, snappy,
|
||||
eth/db/[kvstore, kvstore_sqlite3],
|
||||
./networking/network_metadata, ./beacon_chain_db_immutable,
|
||||
./spec/[eth2_ssz_serialization, eth2_merkleization, forks, state_transition],
|
||||
./spec/[deposit_snapshots,
|
||||
eth2_ssz_serialization,
|
||||
eth2_merkleization,
|
||||
forks,
|
||||
state_transition],
|
||||
./spec/datatypes/[phase0, altair, bellatrix],
|
||||
"."/[beacon_chain_db_light_client, filepath]
|
||||
|
||||
|
@ -46,10 +50,6 @@ type
|
|||
|
||||
DepositsSeq = DbSeq[DepositData]
|
||||
|
||||
DepositContractSnapshot* = object
|
||||
eth1Block*: Eth2Digest
|
||||
depositContractState*: DepositContractState
|
||||
|
||||
BeaconChainDBV0* = ref object
|
||||
## BeaconChainDBV0 based on old kvstore table that sets the WITHOUT ROWID
|
||||
## option which becomes unbearably slow with large blobs. It is used as a
|
||||
|
@ -154,17 +154,20 @@ type
|
|||
lcData: LightClientDataDB
|
||||
## Persistent light client data to avoid expensive recomputations
|
||||
|
||||
DbKeyKind = enum
|
||||
DbKeyKind* = enum
|
||||
# BEWARE. You should never remove entries from this enum.
|
||||
# Only new items should be added to its end.
|
||||
kHashToState
|
||||
kHashToBlock
|
||||
kHeadBlock
|
||||
## Pointer to the most recent block selected by the fork choice
|
||||
kTailBlock
|
||||
## Pointer to the earliest finalized block - this is the genesis block when
|
||||
## the chain starts, but might advance as the database gets pruned
|
||||
## TODO: determine how aggressively the database should be pruned. For a
|
||||
## healthy network sync, we probably need to store blocks at least
|
||||
## past the weak subjectivity period.
|
||||
## Pointer to the earliest finalized block - this is the genesis
|
||||
## block when the chain starts, but might advance as the database
|
||||
## gets pruned
|
||||
## TODO: determine how aggressively the database should be pruned.
|
||||
## For a healthy network sync, we probably need to store blocks
|
||||
## at least past the weak subjectivity period.
|
||||
kBlockSlotStateRoot
|
||||
## BlockSlot -> state_root mapping
|
||||
kGenesisBlock
|
||||
|
@ -172,19 +175,25 @@ type
|
|||
## (needed for satisfying requests to the beacon node API).
|
||||
kEth1PersistedTo # Obsolete
|
||||
kDepositsFinalizedByEth1 # Obsolete
|
||||
kDepositsFinalizedByEth2
|
||||
## A merkleizer checkpoint used for computing merkle proofs of
|
||||
## deposits added to Eth2 blocks (it may lag behind the finalized
|
||||
## eth1 deposits checkpoint).
|
||||
kOldDepositContractSnapshot
|
||||
## Deprecated:
|
||||
## This was the merkleizer checkpoint produced by processing the
|
||||
## finalized deposits (similar to kDepositTreeSnapshot, but before
|
||||
## the EIP-4881 support was introduced). Currently, we read from
|
||||
## it during upgrades and we keep writing data to it as a measure
|
||||
## allowing the users to downgrade to a previous version of Nimbus.
|
||||
kHashToBlockSummary # Block summaries for fast startup
|
||||
kSpeculativeDeposits
|
||||
## A merkelizer checkpoint created on the basis of deposit events
|
||||
## that we were not able to verify against a `deposit_root` served
|
||||
## by the web3 provider. This may happen on Geth nodes that serve
|
||||
## only recent contract state data (i.e. only recent `deposit_roots`).
|
||||
## Obsolete:
|
||||
## This was a merkelizer checkpoint created on the basis of deposit
|
||||
## events that we were not able to verify against a `deposit_root`
|
||||
## served by the web3 provider. This was happening on Geth nodes
|
||||
## that serve only recent contract state data (i.e. only recent
|
||||
## `deposit_roots`).
|
||||
kHashToStateDiff # Obsolete
|
||||
kHashToStateOnlyMutableValidators
|
||||
kBackfillBlock # Obsolete, was in `unstable` for a while, but never released
|
||||
kDepositTreeSnapshot # EIP-4881-compatible deposit contract state snapshot
|
||||
|
||||
BeaconBlockSummary* = object
|
||||
## Cache of beacon block summaries - during startup when we construct the
|
||||
|
@ -411,12 +420,15 @@ proc loadImmutableValidators(vals: DbSeq[ImmutableValidatorDataDb2]): seq[Immuta
|
|||
withdrawal_credentials: tmp.withdrawal_credentials)
|
||||
|
||||
template withManyWrites*(dbParam: BeaconChainDB, body: untyped) =
|
||||
let db = dbParam
|
||||
# Make sure we're not nesting transactions.
|
||||
if isInsideTransaction(db.db):
|
||||
raiseAssert "Sqlite does not support nested transactions"
|
||||
# We don't enforce strong ordering or atomicity requirements in the beacon
|
||||
# chain db in general, relying instead on readers to be able to deal with
|
||||
# minor inconsistencies - however, putting writes in a transaction is orders
|
||||
# of magnitude faster when doing many small writes, so we use this as an
|
||||
# optimization technique and the templace is named accordingly.
|
||||
let db = dbParam
|
||||
expectDb db.db.exec("BEGIN TRANSACTION;")
|
||||
var commit = false
|
||||
try:
|
||||
|
@ -572,7 +584,7 @@ proc new*(T: type BeaconChainDB,
|
|||
template getLightClientDataDB*(db: BeaconChainDB): LightClientDataDB =
|
||||
db.lcData
|
||||
|
||||
proc decodeSSZ[T](data: openArray[byte], output: var T): bool =
|
||||
proc decodeSSZ*[T](data: openArray[byte], output: var T): bool =
|
||||
try:
|
||||
readSszBytes(data, output, updateRoot = false)
|
||||
true
|
||||
|
@ -607,7 +619,7 @@ proc decodeSZSSZ[T](data: openArray[byte], output: var T): bool =
|
|||
err = e.msg, typ = name(T), dataLen = data.len
|
||||
false
|
||||
|
||||
func encodeSSZ(v: auto): seq[byte] =
|
||||
func encodeSSZ*(v: auto): seq[byte] =
|
||||
try:
|
||||
SSZ.encode(v)
|
||||
except IOError as err:
|
||||
|
@ -832,6 +844,13 @@ proc delState*(db: BeaconChainDB, key: Eth2Digest) =
|
|||
for kv in db.statesNoVal:
|
||||
kv.del(key.data).expectDb()
|
||||
|
||||
proc delKeyValue*(db: BeaconChainDB, key: array[1, byte]) =
|
||||
db.keyValues.del(key).expectDb()
|
||||
db.v0.backend.del(key).expectDb()
|
||||
|
||||
proc delKeyValue*(db: BeaconChainDB, key: DbKeyKind) =
|
||||
db.delKeyValue(subkey(key))
|
||||
|
||||
proc delStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot) =
|
||||
db.stateRoots.del(stateRootKey(root, slot)).expectDb()
|
||||
|
||||
|
@ -847,9 +866,36 @@ proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
|||
proc putGenesisBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
||||
db.keyValues.putRaw(subkey(kGenesisBlock), key)
|
||||
|
||||
proc putEth2FinalizedTo*(db: BeaconChainDB,
|
||||
eth1Checkpoint: DepositContractSnapshot) =
|
||||
db.keyValues.putSnappySSZ(subkey(kDepositsFinalizedByEth2), eth1Checkpoint)
|
||||
proc putDepositTreeSnapshot*(db: BeaconChainDB,
|
||||
snapshot: DepositTreeSnapshot) =
|
||||
db.withManyWrites:
|
||||
db.keyValues.putSnappySSZ(subkey(kDepositTreeSnapshot),
|
||||
snapshot)
|
||||
# TODO: We currently store this redundant old snapshot in order
|
||||
# to allow the users to rollback to a previous version
|
||||
# of Nimbus without problems. It would be reasonable
|
||||
# to remove this in Nimbus 23.2
|
||||
db.keyValues.putSnappySSZ(subkey(kOldDepositContractSnapshot),
|
||||
snapshot.toOldDepositContractSnapshot)
|
||||
|
||||
proc hasDepositTreeSnapshot*(db: BeaconChainDB): bool =
|
||||
expectDb(subkey(kDepositTreeSnapshot) in db.keyValues)
|
||||
|
||||
proc getDepositTreeSnapshot*(db: BeaconChainDB): Opt[DepositTreeSnapshot] =
|
||||
result.ok(default DepositTreeSnapshot)
|
||||
let r = db.keyValues.getSnappySSZ(subkey(kDepositTreeSnapshot), result.get)
|
||||
if r != GetResult.found: result.err()
|
||||
|
||||
proc getUpgradableDepositSnapshot*(db: BeaconChainDB): Option[OldDepositContractSnapshot] =
|
||||
var dcs: OldDepositContractSnapshot
|
||||
let oldKey = subkey(kOldDepositContractSnapshot)
|
||||
if db.keyValues.getSnappySSZ(oldKey, dcs) != GetResult.found:
|
||||
# Old record is not present in the current database.
|
||||
# We need to take a look in the v0 database as well.
|
||||
if db.v0.backend.getSnappySSZ(oldKey, dcs) != GetResult.found:
|
||||
return
|
||||
|
||||
return some dcs
|
||||
|
||||
proc getPhase0Block(
|
||||
db: BeaconChainDBV0, key: Eth2Digest): Opt[phase0.TrustedSignedBeaconBlock] =
|
||||
|
@ -1221,16 +1267,6 @@ proc getGenesisBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
|||
db.keyValues.getRaw(subkey(kGenesisBlock), Eth2Digest) or
|
||||
db.v0.getGenesisBlock()
|
||||
|
||||
proc getEth2FinalizedTo(db: BeaconChainDBV0): Opt[DepositContractSnapshot] =
|
||||
result.ok(DepositContractSnapshot())
|
||||
let r = db.backend.getSnappySSZ(subkey(kDepositsFinalizedByEth2), result.get)
|
||||
if r != found: result.err()
|
||||
|
||||
proc getEth2FinalizedTo*(db: BeaconChainDB): Opt[DepositContractSnapshot] =
|
||||
result.ok(DepositContractSnapshot())
|
||||
let r = db.keyValues.getSnappySSZ(subkey(kDepositsFinalizedByEth2), result.get)
|
||||
if r != found: return db.v0.getEth2FinalizedTo()
|
||||
|
||||
proc containsBlock*(db: BeaconChainDBV0, key: Eth2Digest): bool =
|
||||
db.backend.contains(subkey(phase0.SignedBeaconBlock, key)).expectDb()
|
||||
|
||||
|
|
|
@ -784,6 +784,11 @@ type
|
|||
desc: "Recreate historical state index at end of backfill, allowing full history access (requires full backfill)"
|
||||
defaultValue: false .}: bool
|
||||
|
||||
downloadDepositSnapshot* {.
|
||||
desc: "Also try to download a snapshot of the deposit contract state"
|
||||
defaultValue: false
|
||||
name: "with-deposit-snapshot" .}: bool
|
||||
|
||||
ValidatorClientConf* = object
|
||||
configFile* {.
|
||||
desc: "Loads the configuration from a TOML file"
|
||||
|
|
|
@ -73,7 +73,7 @@ type
|
|||
|
||||
ChainDAGRef* = ref object
|
||||
## ChainDAG validates, stores and serves chain history of valid blocks
|
||||
## according to the beacon chain state transtion. From genesis to the
|
||||
## according to the beacon chain state transition. From genesis to the
|
||||
## finalization point, block history is linear - from there, it branches out
|
||||
## into a dag with several heads, one of which is considered canonical.
|
||||
##
|
||||
|
@ -112,7 +112,7 @@ type
|
|||
## | | |
|
||||
## db.finalizedBlocks dag.forkBlocks
|
||||
##
|
||||
## The archive is the the part of finalized history for which we no longer
|
||||
## The archive is the part of finalized history for which we no longer
|
||||
## recreate states quickly because we don't have a reasonable state to
|
||||
## start replay from - when starting from a checkpoint, this is the typical
|
||||
## case - recreating history requires either replaying from genesis or
|
||||
|
|
|
@ -17,9 +17,9 @@ import
|
|||
chronos, metrics, chronicles/timings, stint/endians2,
|
||||
web3, web3/ethtypes as web3Types, web3/ethhexstrings, web3/engine_api,
|
||||
eth/common/eth_types,
|
||||
eth/async_utils, stew/[byteutils, objects, shims/hashes],
|
||||
eth/async_utils, stew/[byteutils, objects, results, shims/hashes],
|
||||
# Local modules:
|
||||
../spec/[eth2_merkleization, forks, helpers],
|
||||
../spec/[deposit_snapshots, eth2_merkleization, forks, helpers],
|
||||
../spec/datatypes/[base, phase0, bellatrix],
|
||||
../networking/network_metadata,
|
||||
../consensus_object_pools/block_pools_types,
|
||||
|
@ -30,8 +30,7 @@ from std/times import getTime, inSeconds, initTime, `-`
|
|||
from ../spec/engine_authentication import getSignedIatToken
|
||||
|
||||
export
|
||||
web3Types, deques, base,
|
||||
beacon_chain_db.DepositContractSnapshot
|
||||
web3Types, deques, base, DepositTreeSnapshot
|
||||
|
||||
logScope:
|
||||
topics = "eth1"
|
||||
|
@ -124,6 +123,7 @@ type
|
|||
web3Urls: seq[string]
|
||||
eth1Network: Option[Eth1Network]
|
||||
depositContractAddress*: Eth1Address
|
||||
depositContractDeployedAt: BlockHashOrNumber
|
||||
forcePolling: bool
|
||||
jwtSecret: Option[seq[byte]]
|
||||
blocksPerLogsRequest: uint64
|
||||
|
@ -809,6 +809,7 @@ proc onBlockHeaders(p: Web3DataProviderRef,
|
|||
p.web3.subscribeForBlockHeaders(blockHeaderHandler, errorHandler))
|
||||
|
||||
proc pruneOldBlocks(chain: var Eth1Chain, depositIndex: uint64) =
|
||||
## Called on block finalization to delete old and now redundant data.
|
||||
let initialChunks = chain.finalizedDepositsMerkleizer.getChunkCount
|
||||
var lastBlock: Eth1Block
|
||||
|
||||
|
@ -824,9 +825,11 @@ proc pruneOldBlocks(chain: var Eth1Chain, depositIndex: uint64) =
|
|||
|
||||
if chain.finalizedDepositsMerkleizer.getChunkCount > initialChunks:
|
||||
chain.finalizedBlockHash = lastBlock.hash
|
||||
chain.db.putEth2FinalizedTo DepositContractSnapshot(
|
||||
chain.db.putDepositTreeSnapshot DepositTreeSnapshot(
|
||||
eth1Block: lastBlock.hash,
|
||||
depositContractState: chain.finalizedDepositsMerkleizer.toDepositContractState)
|
||||
depositContractState: chain.finalizedDepositsMerkleizer.toDepositContractState,
|
||||
blockHeight: lastBlock.number,
|
||||
)
|
||||
|
||||
eth1_finalized_head.set lastBlock.number.toGaugeValue
|
||||
eth1_finalized_deposits.set lastBlock.depositCount.toGaugeValue
|
||||
|
@ -1057,12 +1060,6 @@ proc new*(T: type Web3DataProvider,
|
|||
|
||||
return ok Web3DataProviderRef(url: web3Url, web3: web3, ns: ns)
|
||||
|
||||
proc putInitialDepositContractSnapshot*(db: BeaconChainDB,
|
||||
s: DepositContractSnapshot) =
|
||||
let existingStart = db.getEth2FinalizedTo()
|
||||
if not existingStart.isOk:
|
||||
db.putEth2FinalizedTo(s)
|
||||
|
||||
template getOrDefault[T, E](r: Result[T, E]): T =
|
||||
type TT = T
|
||||
get(r, default(TT))
|
||||
|
@ -1071,9 +1068,9 @@ proc init*(T: type Eth1Chain, cfg: RuntimeConfig, db: BeaconChainDB): T =
|
|||
let
|
||||
finalizedDeposits =
|
||||
if db != nil:
|
||||
db.getEth2FinalizedTo().getOrDefault()
|
||||
db.getDepositTreeSnapshot().getOrDefault()
|
||||
else:
|
||||
default(DepositContractSnapshot)
|
||||
default(DepositTreeSnapshot)
|
||||
m = DepositsMerkleizer.init(finalizedDeposits.depositContractState)
|
||||
|
||||
T(db: db,
|
||||
|
@ -1082,31 +1079,13 @@ proc init*(T: type Eth1Chain, cfg: RuntimeConfig, db: BeaconChainDB): T =
|
|||
finalizedDepositsMerkleizer: m,
|
||||
headMerkleizer: copy m)
|
||||
|
||||
proc createInitialDepositSnapshot*(
|
||||
depositContractAddress: Eth1Address,
|
||||
depositContractDeployedAt: BlockHashOrNumber,
|
||||
web3Url: string,
|
||||
jwtSecret: Option[seq[byte]]): Future[Result[DepositContractSnapshot, string]]
|
||||
{.async.} =
|
||||
|
||||
let dataProviderRes =
|
||||
await Web3DataProvider.new(depositContractAddress, web3Url, jwtSecret)
|
||||
if dataProviderRes.isErr:
|
||||
return err(dataProviderRes.error)
|
||||
var dataProvider = dataProviderRes.get
|
||||
|
||||
let knownStartBlockHash =
|
||||
if depositContractDeployedAt.isHash:
|
||||
depositContractDeployedAt.hash
|
||||
else:
|
||||
try:
|
||||
var blk = awaitWithRetries(
|
||||
dataProvider.getBlockByNumber(depositContractDeployedAt.number))
|
||||
blk.hash.asEth2Digest
|
||||
except CatchableError as err:
|
||||
return err(err.msg)
|
||||
|
||||
return ok DepositContractSnapshot(eth1Block: knownStartBlockHash)
|
||||
proc getBlock(provider: Web3DataProviderRef, id: BlockHashOrNumber):
|
||||
Future[BlockObject] =
|
||||
if id.isHash:
|
||||
let hash = id.hash.asBlockHash()
|
||||
return provider.getBlockByHash(hash)
|
||||
else:
|
||||
return provider.getBlockByNumber(id.number)
|
||||
|
||||
proc currentEpoch(m: Eth1Monitor): Epoch =
|
||||
if m.getBeaconTime != nil:
|
||||
|
@ -1116,10 +1095,10 @@ proc currentEpoch(m: Eth1Monitor): Epoch =
|
|||
|
||||
proc init*(T: type Eth1Monitor,
|
||||
cfg: RuntimeConfig,
|
||||
depositContractDeployedAt: BlockHashOrNumber,
|
||||
db: BeaconChainDB,
|
||||
getBeaconTime: GetBeaconTimeFn,
|
||||
web3Urls: seq[string],
|
||||
depositContractSnapshot: Option[DepositContractSnapshot],
|
||||
eth1Network: Option[Eth1Network],
|
||||
forcePolling: bool,
|
||||
jwtSecret: Option[seq[byte]],
|
||||
|
@ -1129,12 +1108,10 @@ proc init*(T: type Eth1Monitor,
|
|||
for url in mitems(web3Urls):
|
||||
fixupWeb3Urls url
|
||||
|
||||
if depositContractSnapshot.isSome:
|
||||
putInitialDepositContractSnapshot(db, depositContractSnapshot.get)
|
||||
|
||||
T(state: Initialized,
|
||||
depositsChain: Eth1Chain.init(cfg, db),
|
||||
depositContractAddress: cfg.DEPOSIT_CONTRACT_ADDRESS,
|
||||
depositContractDeployedAt: depositContractDeployedAt,
|
||||
getBeaconTime: getBeaconTime,
|
||||
web3Urls: web3Urls,
|
||||
eth1Network: eth1Network,
|
||||
|
@ -1144,6 +1121,37 @@ proc init*(T: type Eth1Monitor,
|
|||
blocksPerLogsRequest: targetBlocksPerLogsRequest,
|
||||
ttdReachedField: ttdReached)
|
||||
|
||||
proc runDbMigrations*(m: Eth1Monitor) {.async.} =
|
||||
template db: auto = m.depositsChain.db
|
||||
|
||||
if db.hasDepositTreeSnapshot():
|
||||
return
|
||||
|
||||
# There might be an old deposit snapshot in the database that needs upgrade.
|
||||
let oldSnapshot = db.getUpgradableDepositSnapshot()
|
||||
if oldSnapshot.isSome:
|
||||
let
|
||||
hash = oldSnapshot.get.eth1Block.asBlockHash()
|
||||
blk = awaitWithRetries m.dataProvider.getBlockByHash(hash)
|
||||
blockNumber = uint64(blk.number)
|
||||
|
||||
db.putDepositTreeSnapshot oldSnapshot.get.toDepositTreeSnapshot(blockNumber)
|
||||
elif not m.depositContractAddress.isZeroMemory:
|
||||
# If there is no DCS record at all, create one pointing to the deployment block
|
||||
# of the deposit contract and insert it as a starting point.
|
||||
let blk = try:
|
||||
awaitWithRetries m.dataProvider.getBlock(m.depositContractDeployedAt)
|
||||
except CatchableError as e:
|
||||
fatal "Failed to fetch deployment block",
|
||||
depositContract = m.depositContractAddress,
|
||||
deploymentBlock = $m.depositContractDeployedAt,
|
||||
err = e.msg
|
||||
quit 1
|
||||
doAssert blk != nil, "getBlock should not return nil"
|
||||
db.putDepositTreeSnapshot DepositTreeSnapshot(
|
||||
eth1Block: blk.hash.asEth2Digest,
|
||||
blockHeight: uint64 blk.number)
|
||||
|
||||
proc safeCancel(fut: var Future[void]) =
|
||||
if not fut.isNil and not fut.finished:
|
||||
fut.cancel()
|
||||
|
@ -1341,11 +1349,14 @@ proc syncBlockRange(m: Eth1Monitor,
|
|||
m.processGenesisDeposit(deposit)
|
||||
blk.activeValidatorsCount = m.genesisValidators.lenu64
|
||||
|
||||
let depositContractState = DepositContractSnapshot(
|
||||
eth1Block: blocksWithDeposits[^1].hash,
|
||||
depositContractState: m.headMerkleizer.toDepositContractState)
|
||||
let
|
||||
lastBlock = blocksWithDeposits[^1]
|
||||
depositTreeSnapshot = DepositTreeSnapshot(
|
||||
eth1Block: lastBlock.hash,
|
||||
depositContractState: m.headMerkleizer.toDepositContractState,
|
||||
blockNumber: lastBlock.number)
|
||||
|
||||
m.depositsChain.db.putEth2FinalizedTo depositContractState
|
||||
m.depositsChain.db.putDepositTreeSnapshot depositTreeSnapshot
|
||||
|
||||
if m.genesisStateFut != nil and m.chainHasEnoughValidators:
|
||||
let lastIdx = m.depositsChain.blocks.len - 1
|
||||
|
@ -1405,6 +1416,48 @@ func init(T: type FullBlockId, blk: Eth1BlockHeader|BlockObject): T =
|
|||
func isNewLastBlock(m: Eth1Monitor, blk: Eth1BlockHeader|BlockObject): bool =
|
||||
m.latestEth1Block.isNone or blk.number.uint64 > m.latestEth1BlockNumber
|
||||
|
||||
proc findTerminalBlock(provider: Web3DataProviderRef,
|
||||
ttd: Uint256): Future[BlockObject] {.async.} =
|
||||
## Find the first execution block with a difficulty higher than the
|
||||
## specified `ttd`.
|
||||
var
|
||||
cache = initTable[uint64, BlockObject]()
|
||||
step = -0x4000'i64
|
||||
|
||||
proc next(x: BlockObject): Future[BlockObject] {.async.} =
|
||||
## Returns the next block that's `step` steps away.
|
||||
let key = uint64(max(int64(x.number) + step, 1))
|
||||
# Check if present in cache.
|
||||
if key in cache:
|
||||
return cache[key]
|
||||
# Not cached, fetch.
|
||||
let value = awaitWithRetries provider.getBlockByNumber(key)
|
||||
cache[key] = value
|
||||
return value
|
||||
|
||||
# Block A follows, B leads.
|
||||
var
|
||||
a = awaitWithRetries(
|
||||
provider.web3.provider.eth_getBlockByNumber("latest", false))
|
||||
b = await next(a)
|
||||
|
||||
while true:
|
||||
let one = a.totalDifficulty > ttd
|
||||
let two = b.totalDifficulty > ttd
|
||||
if one != two:
|
||||
step = step div -2i64
|
||||
if step == 0:
|
||||
# Since we can't know in advance from which side the block is
|
||||
# approached, one last check is needed to determine the proper
|
||||
# terminal block.
|
||||
if one: return a
|
||||
else : return b
|
||||
a = b
|
||||
b = await next(b)
|
||||
|
||||
# This is unreachable.
|
||||
doAssert(false)
|
||||
|
||||
proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} =
|
||||
if m.state == Started:
|
||||
return
|
||||
|
@ -1430,6 +1483,8 @@ proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} =
|
|||
await m.ensureDataProvider()
|
||||
doAssert m.dataProvider != nil, "close not called concurrently"
|
||||
|
||||
await m.runDbMigrations()
|
||||
|
||||
# We might need to reset the chain if the new provider disagrees
|
||||
# with the previous one regarding the history of the chain or if
|
||||
# we have detected a conensus violation - our view disagreeing with
|
||||
|
@ -1510,7 +1565,10 @@ proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} =
|
|||
await m.dataProvider.onBlockHeaders(newBlockHeadersHandler,
|
||||
subscriptionErrorHandler)
|
||||
|
||||
let shouldProcessDeposits = not m.depositContractAddress.isZeroMemory
|
||||
let shouldProcessDeposits = not (
|
||||
m.depositContractAddress.isZeroMemory or
|
||||
m.depositsChain.finalizedBlockHash.data.isZeroMemory)
|
||||
|
||||
var eth1SyncedTo: Eth1BlockNumber
|
||||
if shouldProcessDeposits:
|
||||
if m.depositsChain.blocks.len == 0:
|
||||
|
@ -1581,40 +1639,22 @@ proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} =
|
|||
doAssert m.latestEth1Block.isSome
|
||||
awaitWithRetries m.dataProvider.getBlockByHash(m.latestEth1Block.get.hash)
|
||||
|
||||
# TODO when a terminal block has is configured in cfg.TERMINAL_BLOCK_HASH,
|
||||
# TODO when a terminal block hash is configured in cfg.TERMINAL_BLOCK_HASH,
|
||||
# we should try to fetch that block from the EL - this facility is not
|
||||
# in use on any current network, but should be implemented for full
|
||||
# compliance
|
||||
if m.terminalBlockHash.isNone and shouldCheckForMergeTransition:
|
||||
var terminalBlockCandidate = nextBlock
|
||||
let terminalBlock = await findTerminalBlock(m.dataProvider, m.cfg.TERMINAL_TOTAL_DIFFICULTY)
|
||||
m.terminalBlockHash = some(terminalBlock.hash)
|
||||
m.ttdReachedField = true
|
||||
|
||||
debug "startEth1Syncing: checking for merge terminal block",
|
||||
debug "startEth1Syncing: found merge terminal block",
|
||||
currentEpoch = m.currentEpoch,
|
||||
BELLATRIX_FORK_EPOCH = m.cfg.BELLATRIX_FORK_EPOCH,
|
||||
totalDifficulty = $nextBlock.totalDifficulty,
|
||||
ttd = $m.cfg.TERMINAL_TOTAL_DIFFICULTY,
|
||||
terminalBlockHash = m.terminalBlockHash,
|
||||
candidateBlockHash = terminalBlockCandidate.hash,
|
||||
candidateBlockNumber = distinctBase(terminalBlockCandidate.number)
|
||||
|
||||
if terminalBlockCandidate.totalDifficulty >= m.cfg.TERMINAL_TOTAL_DIFFICULTY:
|
||||
while not terminalBlockCandidate.parentHash.isZeroMemory:
|
||||
var parentBlock = awaitWithRetries(
|
||||
m.dataProvider.getBlockByHash(terminalBlockCandidate.parentHash))
|
||||
if parentBlock.totalDifficulty < m.cfg.TERMINAL_TOTAL_DIFFICULTY:
|
||||
break
|
||||
terminalBlockCandidate = parentBlock
|
||||
m.terminalBlockHash = some terminalBlockCandidate.hash
|
||||
m.ttdReachedField = true
|
||||
|
||||
debug "startEth1Syncing: found merge terminal block",
|
||||
currentEpoch = m.currentEpoch,
|
||||
BELLATRIX_FORK_EPOCH = m.cfg.BELLATRIX_FORK_EPOCH,
|
||||
totalDifficulty = $nextBlock.totalDifficulty,
|
||||
ttd = $m.cfg.TERMINAL_TOTAL_DIFFICULTY,
|
||||
terminalBlockHash = m.terminalBlockHash,
|
||||
candidateBlockHash = terminalBlockCandidate.hash,
|
||||
candidateBlockNumber = distinctBase(terminalBlockCandidate.number)
|
||||
candidateBlockNumber = distinctBase(terminalBlock.number)
|
||||
|
||||
if shouldProcessDeposits:
|
||||
if m.latestEth1BlockNumber <= m.cfg.ETH1_FOLLOW_DISTANCE:
|
||||
|
|
|
@ -21,7 +21,7 @@ import
|
|||
./networking/topic_params,
|
||||
./rpc/[rest_api, state_ttl_cache],
|
||||
./spec/datatypes/[altair, bellatrix, phase0],
|
||||
./spec/[engine_authentication, weak_subjectivity],
|
||||
./spec/[deposit_snapshots, engine_authentication, weak_subjectivity],
|
||||
./validators/[keystore_management, validator_duties],
|
||||
"."/[
|
||||
beacon_node, beacon_node_light_client, deposits, interop,
|
||||
|
@ -382,25 +382,14 @@ const SlashingDbName = "slashing_protection"
|
|||
# changing this requires physical file rename as well or history is lost.
|
||||
|
||||
proc init*(T: type BeaconNode,
|
||||
cfg: RuntimeConfig,
|
||||
rng: ref HmacDrbgContext,
|
||||
config: BeaconNodeConf,
|
||||
depositContractDeployedAt: BlockHashOrNumber,
|
||||
eth1Network: Option[Eth1Network],
|
||||
genesisStateContents: string,
|
||||
depositContractSnapshotContents: string): BeaconNode {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
|
||||
metadata: Eth2NetworkMetadata): BeaconNode
|
||||
{.raises: [Defect, CatchableError].} =
|
||||
var taskpool: TaskPoolPtr
|
||||
|
||||
let depositContractSnapshot = if depositContractSnapshotContents.len > 0:
|
||||
try:
|
||||
some SSZ.decode(depositContractSnapshotContents, DepositContractSnapshot)
|
||||
except CatchableError as err:
|
||||
fatal "Invalid deposit contract snapshot", err = err.msg
|
||||
quit 1
|
||||
else:
|
||||
none DepositContractSnapshot
|
||||
template cfg: auto = metadata.cfg
|
||||
template eth1Network: auto = metadata.eth1Network
|
||||
|
||||
try:
|
||||
if config.numThreads < 0:
|
||||
|
@ -455,26 +444,6 @@ proc init*(T: type BeaconNode,
|
|||
|
||||
let optJwtSecret = rng[].loadJwtSecret(config, allowCreate = false)
|
||||
|
||||
template getDepositContractSnapshot: auto =
|
||||
if depositContractSnapshot.isSome:
|
||||
depositContractSnapshot
|
||||
elif not cfg.DEPOSIT_CONTRACT_ADDRESS.isZeroMemory:
|
||||
let snapshotRes = waitFor createInitialDepositSnapshot(
|
||||
cfg.DEPOSIT_CONTRACT_ADDRESS,
|
||||
depositContractDeployedAt,
|
||||
config.web3Urls[0],
|
||||
optJwtSecret)
|
||||
if snapshotRes.isErr:
|
||||
fatal "Failed to locate the deposit contract deployment block",
|
||||
depositContract = cfg.DEPOSIT_CONTRACT_ADDRESS,
|
||||
deploymentBlock = $depositContractDeployedAt,
|
||||
err = snapshotRes.error
|
||||
quit 1
|
||||
else:
|
||||
some snapshotRes.get
|
||||
else:
|
||||
none(DepositContractSnapshot)
|
||||
|
||||
if config.web3Urls.len() == 0:
|
||||
if cfg.BELLATRIX_FORK_EPOCH == FAR_FUTURE_EPOCH:
|
||||
notice "Running without execution client - validator features partially disabled (see https://nimbus.guide/eth1.html)"
|
||||
|
@ -484,11 +453,11 @@ proc init*(T: type BeaconNode,
|
|||
var eth1Monitor: Eth1Monitor
|
||||
|
||||
let genesisState =
|
||||
if genesisStateContents.len > 0:
|
||||
if metadata.genesisData.len > 0:
|
||||
try:
|
||||
newClone(readSszForkedHashedBeaconState(
|
||||
newClone readSszForkedHashedBeaconState(
|
||||
cfg,
|
||||
genesisStateContents.toOpenArrayByte(0, genesisStateContents.high())))
|
||||
metadata.genesisData.toOpenArrayByte(0, metadata.genesisData.high))
|
||||
except CatchableError as err:
|
||||
raiseAssert "Invalid baked-in state: " & err.msg
|
||||
else:
|
||||
|
@ -497,10 +466,6 @@ proc init*(T: type BeaconNode,
|
|||
if not ChainDAGRef.isInitialized(db).isOk():
|
||||
if genesisState == nil and checkpointState == nil:
|
||||
when hasGenesisDetection:
|
||||
if depositContractSnapshotContents.len > 0:
|
||||
fatal "A deposits snapshot cannot be provided without also providing a matching beacon state snapshot"
|
||||
quit 1
|
||||
|
||||
# This is a fresh start without a known genesis state
|
||||
# (most likely, it hasn't arrived yet). We'll try to
|
||||
# obtain a genesis through the Eth1 deposits monitor:
|
||||
|
@ -512,10 +477,10 @@ proc init*(T: type BeaconNode,
|
|||
# that would do only this - see Paul's proposal for this.
|
||||
let eth1Monitor = Eth1Monitor.init(
|
||||
cfg,
|
||||
metadata.depositContractDeployedAt,
|
||||
db,
|
||||
nil,
|
||||
config.web3Urls,
|
||||
getDepositContractSnapshot(),
|
||||
eth1Network,
|
||||
config.web3ForcePolling,
|
||||
optJwtSecret,
|
||||
|
@ -608,10 +573,10 @@ proc init*(T: type BeaconNode,
|
|||
if eth1Monitor.isNil and config.web3Urls.len > 0:
|
||||
eth1Monitor = Eth1Monitor.init(
|
||||
cfg,
|
||||
metadata.depositContractDeployedAt,
|
||||
db,
|
||||
getBeaconTime,
|
||||
config.web3Urls,
|
||||
getDepositContractSnapshot(),
|
||||
eth1Network,
|
||||
config.web3ForcePolling,
|
||||
optJwtSecret,
|
||||
|
@ -1824,14 +1789,7 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai
|
|||
for node in metadata.bootstrapNodes:
|
||||
config.bootstrapNodes.add node
|
||||
|
||||
let node = BeaconNode.init(
|
||||
metadata.cfg,
|
||||
rng,
|
||||
config,
|
||||
metadata.depositContractDeployedAt,
|
||||
metadata.eth1Network,
|
||||
metadata.genesisData,
|
||||
metadata.genesisDepositsSnapshot)
|
||||
let node = BeaconNode.init(rng, config, metadata)
|
||||
|
||||
if bnStatus == BeaconNodeStatus.Stopping:
|
||||
return
|
||||
|
@ -2036,6 +1994,7 @@ proc handleStartUpCmd(config: var BeaconNodeConf) {.raises: [Defect, CatchableEr
|
|||
config.stateId,
|
||||
config.backfillBlocks,
|
||||
config.reindex,
|
||||
config.downloadDepositSnapshot,
|
||||
genesis)
|
||||
|
||||
{.pop.} # TODO moduletests exceptions
|
||||
|
|
|
@ -90,8 +90,12 @@ programMain:
|
|||
eth1Monitor =
|
||||
if config.web3Urls.len > 0:
|
||||
let res = Eth1Monitor.init(
|
||||
cfg, db = nil, getBeaconTime, config.web3Urls,
|
||||
none(DepositContractSnapshot), metadata.eth1Network,
|
||||
cfg,
|
||||
metadata.depositContractDeployedAt,
|
||||
db = nil,
|
||||
getBeaconTime,
|
||||
config.web3Urls,
|
||||
metadata.eth1Network,
|
||||
forcePolling = false,
|
||||
rng[].loadJwtSecret(config, allowCreate = false),
|
||||
# TTD is not relevant for the light client, so it's safe
|
||||
|
|
|
@ -10,12 +10,12 @@ import
|
|||
stew/[results, base10],
|
||||
chronicles,
|
||||
./rest_utils,
|
||||
../beacon_node, ../networking/eth2_network,
|
||||
./state_ttl_cache,
|
||||
../beacon_node,
|
||||
../consensus_object_pools/[blockchain_dag, exit_pool, spec_cache],
|
||||
../spec/[eth2_merkleization, forks, network, validator],
|
||||
../spec/[deposit_snapshots, eth2_merkleization, forks, network, validator],
|
||||
../spec/datatypes/[phase0, altair],
|
||||
../validators/message_router_mev,
|
||||
./state_ttl_cache
|
||||
../validators/message_router_mev
|
||||
|
||||
export rest_utils
|
||||
|
||||
|
@ -124,6 +124,24 @@ proc toString*(kind: ValidatorFilterKind): string =
|
|||
"withdrawal_done"
|
||||
|
||||
proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-4881.md
|
||||
router.api(MethodGet, "/eth/v1/beacon/deposit_snapshot") do () -> RestApiResponse:
|
||||
let snapshotOpt = node.db.getDepositTreeSnapshot()
|
||||
if snapshotOpt.isSome():
|
||||
let snapshot = snapshotOpt.get()
|
||||
return RestApiResponse.jsonResponse(
|
||||
RestDepositSnapshot(
|
||||
finalized: snapshot.depositContractState.branch,
|
||||
deposit_root: snapshot.getDepositRoot(),
|
||||
deposit_count: snapshot.getDepositCountU64(),
|
||||
execution_block_hash: snapshot.eth1Block,
|
||||
execution_block_height: snapshot.blockHeight))
|
||||
else:
|
||||
# This can happen in a very short window after the client is started, but the
|
||||
# snapshot record still haven't been upgraded in the database. Returning 404
|
||||
# should be easy to handle for the clients - they just need to retry.
|
||||
return RestApiResponse.jsonError(Http404, NoFinalizedSnapshotAvailableError)
|
||||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Beacon/getGenesis
|
||||
router.api(MethodGet, "/eth/v1/beacon/genesis") do () -> RestApiResponse:
|
||||
return RestApiResponse.jsonResponse(
|
||||
|
|
|
@ -178,6 +178,8 @@ const
|
|||
"Unable to produce contribution using the passed parameters"
|
||||
InternalServerError* =
|
||||
"Internal server error"
|
||||
NoFinalizedSnapshotAvailableError* =
|
||||
"No Finalized Snapshot Available"
|
||||
NoImplementationError* =
|
||||
"Not implemented yet"
|
||||
KeystoreAdditionFailure =
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
from std/sequtils import all
|
||||
from stew/objects import isZeroMemory
|
||||
|
||||
import ./eth2_merkleization
|
||||
from ./datatypes/base import Eth1Data, DepositContractState
|
||||
from ./digest import Eth2Digest
|
||||
|
||||
export
|
||||
depositCountBytes, depositCountU64
|
||||
|
||||
type
|
||||
OldDepositContractSnapshot* = object
|
||||
eth1Block*: Eth2Digest
|
||||
depositContractState*: DepositContractState
|
||||
|
||||
DepositTreeSnapshot* = object
|
||||
## https://eips.ethereum.org/EIPS/eip-4881
|
||||
eth1Block*: Eth2Digest
|
||||
depositContractState*: DepositContractState
|
||||
blockHeight*: uint64
|
||||
|
||||
func toDepositTreeSnapshot*(d: OldDepositContractSnapshot,
|
||||
blockHeight: uint64): DepositTreeSnapshot =
|
||||
DepositTreeSnapshot(
|
||||
eth1Block: d.eth1Block,
|
||||
depositContractState: d.depositContractState,
|
||||
blockHeight: blockHeight)
|
||||
|
||||
func toOldDepositContractSnapshot*(d: DepositTreeSnapshot): OldDepositContractSnapshot =
|
||||
OldDepositContractSnapshot(eth1Block: d.eth1Block,
|
||||
depositContractState: d.depositContractState)
|
||||
|
||||
template getDepositCountU64*(d: OldDepositContractSnapshot |
|
||||
DepositTreeSnapshot): uint64 =
|
||||
depositCountU64(d.depositContractState.deposit_count)
|
||||
|
||||
func getDepositRoot*(d: OldDepositContractSnapshot |
|
||||
DepositTreeSnapshot): Eth2Digest =
|
||||
let merk = DepositsMerkleizer.init(d.depositContractState)
|
||||
let hash = merk.getFinalHash()
|
||||
# TODO: mixInLength should accept unsigned int instead of int as
|
||||
# this right now cuts in half the theoretical number of deposits.
|
||||
return mixInLength(hash, int(merk.totalChunks))
|
||||
|
||||
func isValid*(d: DepositTreeSnapshot, wantedDepositRoot: Eth2Digest): bool =
|
||||
## `isValid` requires the snapshot to be self-consistent and
|
||||
## to point to a specific Ethereum block
|
||||
return not (d.eth1Block.isZeroMemory or
|
||||
d.blockHeight == 0 or
|
||||
d.getDepositRoot() != wantedDepositRoot)
|
||||
|
||||
func matches*(snapshot: DepositTreeSnapshot, eth1_data: Eth1Data): bool =
|
||||
snapshot.getDepositCountU64() == eth1_data.deposit_count and
|
||||
snapshot.getDepositRoot() == eth1_data.deposit_root
|
|
@ -273,3 +273,8 @@ proc submitPoolVoluntaryExit*(body: SignedVoluntaryExit): RestPlainResponse {.
|
|||
rest, endpoint: "/eth/v1/beacon/pool/voluntary_exits",
|
||||
meth: MethodPost.}
|
||||
## https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolVoluntaryExit
|
||||
|
||||
proc getDepositSnapshot*(): RestResponse[GetDepositSnapshotResponse] {.
|
||||
rest, endpoint: "/eth/v1/beacon/deposit_snapshot",
|
||||
meth: MethodGet.}
|
||||
## https://github.com/ethereum/EIPs/blob/master/EIPS/eip-4881.md
|
||||
|
|
|
@ -439,6 +439,13 @@ type
|
|||
chain_id*: string
|
||||
address*: string
|
||||
|
||||
RestDepositSnapshot* = object
|
||||
finalized*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest]
|
||||
deposit_root*: Eth2Digest
|
||||
deposit_count*: uint64
|
||||
execution_block_hash*: Eth2Digest
|
||||
execution_block_height*: uint64
|
||||
|
||||
RestBlockInfo* = object
|
||||
slot*: Slot
|
||||
blck* {.serializedFieldName: "block".}: Eth2Digest
|
||||
|
@ -581,6 +588,7 @@ type
|
|||
GetBlockRootResponse* = DataEnclosedObject[RestRoot]
|
||||
GetDebugChainHeadsResponse* = DataEnclosedObject[seq[RestChainHead]]
|
||||
GetDepositContractResponse* = DataEnclosedObject[RestDepositContract]
|
||||
GetDepositSnapshotResponse* = DataEnclosedObject[RestDepositSnapshot]
|
||||
GetEpochCommitteesResponse* = DataEnclosedObject[seq[RestBeaconStatesCommittees]]
|
||||
GetForkScheduleResponse* = DataEnclosedObject[seq[Fork]]
|
||||
GetGenesisResponse* = DataEnclosedObject[RestGenesis]
|
||||
|
|
|
@ -38,14 +38,23 @@ func hash_tree_root*(
|
|||
bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock) {.
|
||||
error: "SignedBeaconBlock should not be hashed".}
|
||||
|
||||
func depositCountU64(s: DepositContractState): uint64 =
|
||||
for i in 0 .. 23:
|
||||
doAssert s.deposit_count[i] == 0
|
||||
func depositCountBytes*(x: uint64): array[32, byte] =
|
||||
doAssert(x <= 4294967295'u64)
|
||||
var z = x
|
||||
for i in 0..3:
|
||||
result[31-i] = byte(int64(z) %% 256'i64)
|
||||
z = z div 256
|
||||
|
||||
uint64.fromBytesBE s.deposit_count.toOpenArray(24, 31)
|
||||
func depositCountU64*(xs: openArray[byte]): uint64 =
|
||||
## depositCountU64 considers just the first 4 bytes as
|
||||
## MAX_DEPOSIT_COUNT is defined as 2^32 - 1.
|
||||
for i in 0 .. 27:
|
||||
doAssert xs[i] == 0
|
||||
return uint64.fromBytesBE(xs[24..31])
|
||||
|
||||
func init*(T: type DepositsMerkleizer, s: DepositContractState): DepositsMerkleizer =
|
||||
DepositsMerkleizer.init(s.branch, s.depositCountU64)
|
||||
let count = depositCountU64(s.deposit_count)
|
||||
DepositsMerkleizer.init(s.branch, count)
|
||||
|
||||
func toDepositContractState*(merkleizer: DepositsMerkleizer): DepositContractState =
|
||||
# TODO There is an off by one discrepancy in the size of the arrays here that
|
||||
|
|
|
@ -10,17 +10,51 @@ else:
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
stew/base10,
|
||||
chronicles, chronos,
|
||||
stew/[base10, results],
|
||||
chronicles, chronos, eth/async_utils,
|
||||
./sync/sync_manager,
|
||||
./consensus_object_pools/[block_clearance, blockchain_dag],
|
||||
./spec/eth2_apis/rest_beacon_client,
|
||||
./spec/[beaconstate, eth2_merkleization, forks, presets, state_transition],
|
||||
./spec/[beaconstate, eth2_merkleization, forks, presets,
|
||||
state_transition, deposit_snapshots],
|
||||
"."/[beacon_clock, beacon_chain_db, era_db]
|
||||
|
||||
from presto import RestDecodingError
|
||||
|
||||
const
|
||||
largeRequestsTimeout = 60.seconds # Downloading large items such as states.
|
||||
smallRequestsTimeout = 30.seconds # Downloading smaller items such as blocks and deposit snapshots.
|
||||
|
||||
proc fetchDepositSnapshot(client: RestClientRef):
|
||||
Future[Result[DepositTreeSnapshot, string]] {.async.} =
|
||||
let resp = try:
|
||||
awaitWithTimeout(client.getDepositSnapshot(), smallRequestsTimeout):
|
||||
return err "Fetching /eth/v1/beacon/deposit_snapshot timed out"
|
||||
except CatchableError as e:
|
||||
return err("The trusted node likely does not support the /eth/v1/beacon/deposit_snapshot end-point:" & e.msg)
|
||||
|
||||
let data = resp.data.data
|
||||
let snapshot = DepositTreeSnapshot(
|
||||
eth1Block: data.execution_block_hash,
|
||||
depositContractState: DepositContractState(
|
||||
branch: data.finalized,
|
||||
deposit_count: depositCountBytes(data.deposit_count)),
|
||||
blockHeight: data.execution_block_height)
|
||||
|
||||
if not snapshot.isValid(data.deposit_root):
|
||||
return err "The obtained deposit snapshot contains self-contradictory data"
|
||||
|
||||
return ok snapshot
|
||||
|
||||
proc doTrustedNodeSync*(
|
||||
cfg: RuntimeConfig, databaseDir, eraDir, restUrl, stateId: string,
|
||||
backfill: bool, reindex: bool,
|
||||
cfg: RuntimeConfig,
|
||||
databaseDir: string,
|
||||
eraDir: string,
|
||||
restUrl: string,
|
||||
stateId: string,
|
||||
backfill: bool,
|
||||
reindex: bool,
|
||||
downloadDepositSnapshot: bool,
|
||||
genesisState: ref ForkedHashedBeaconState = nil) {.async.} =
|
||||
logScope:
|
||||
restUrl
|
||||
|
@ -71,8 +105,11 @@ proc doTrustedNodeSync*(
|
|||
else:
|
||||
notice "Downloading genesis state", restUrl
|
||||
try:
|
||||
await client.getStateV2(
|
||||
StateIdent.init(StateIdentType.Genesis), cfg)
|
||||
awaitWithTimeout(
|
||||
client.getStateV2(StateIdent.init(StateIdentType.Genesis), cfg),
|
||||
largeRequestsTimeout):
|
||||
info "Attempt to download genesis state timed out"
|
||||
nil
|
||||
except CatchableError as exc:
|
||||
info "Unable to download genesis state",
|
||||
error = exc.msg, restUrl
|
||||
|
@ -111,7 +148,9 @@ proc doTrustedNodeSync*(
|
|||
StateIdent.init(tmp.slot.epoch().start_slot)
|
||||
else:
|
||||
tmp
|
||||
await client.getStateV2(id, cfg)
|
||||
awaitWithTimeout(client.getStateV2(id, cfg), largeRequestsTimeout):
|
||||
error "Attempt to download checkpoint state timed out"
|
||||
quit 1
|
||||
except CatchableError as exc:
|
||||
error "Unable to download checkpoint state",
|
||||
error = exc.msg
|
||||
|
@ -143,6 +182,21 @@ proc doTrustedNodeSync*(
|
|||
ChainDAGRef.preInit(db, state[])
|
||||
else:
|
||||
ChainDAGRef.preInit(db, state[])
|
||||
|
||||
if downloadDepositSnapshot:
|
||||
# Fetch deposit snapshot. This API endpoint is still optional.
|
||||
let depositSnapshot = await fetchDepositSnapshot(client)
|
||||
if depositSnapshot.isOk:
|
||||
if depositSnapshot.get.matches(getStateField(state[], eth1_data)):
|
||||
info "Writing deposit contracts snapshot",
|
||||
depositRoot = depositSnapshot.get.getDepositRoot(),
|
||||
depositCount = depositSnapshot.get.getDepositCountU64
|
||||
db.putDepositTreeSnapshot(depositSnapshot.get)
|
||||
else:
|
||||
warn "The downloaded deposit snapshot does not agree with the downloaded state"
|
||||
else:
|
||||
warn "Deposit tree snapshot was not imported", reason = depositSnapshot.error
|
||||
|
||||
else:
|
||||
notice "Skipping checkpoint download, database already exists (remove db directory to get a fresh snapshot)",
|
||||
databaseDir, head = shortLog(head.get())
|
||||
|
@ -178,7 +232,9 @@ proc doTrustedNodeSync*(
|
|||
var lastError: ref CatchableError
|
||||
for i in 0..<3:
|
||||
try:
|
||||
return await client.getBlockV2(BlockIdent.init(slot), cfg)
|
||||
return awaitWithTimeout(client.getBlockV2(BlockIdent.init(slot), cfg),
|
||||
smallRequestsTimeout):
|
||||
raise newException(CatchableError, "Request timed out")
|
||||
except RestResponseError as exc:
|
||||
lastError = exc
|
||||
notice "Server does not support block downloads / backfilling - blocks will be downloaded later",
|
||||
|
@ -280,4 +336,4 @@ when isMainModule:
|
|||
|
||||
waitFor doTrustedNodeSync(
|
||||
getRuntimeConfig(some os.paramStr(1)), os.paramStr(2), os.paramStr(3),
|
||||
os.paramStr(4), os.paramStr(5), backfill, false)
|
||||
os.paramStr(4), os.paramStr(5), backfill, false, true)
|
||||
|
|
|
@ -254,7 +254,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
blockRatio {.desc: "ratio of slots with blocks"} = 1.0,
|
||||
replay = true):
|
||||
let
|
||||
(genesisState, depositContractSnapshot) = loadGenesis(validators, false)
|
||||
(genesisState, depositTreeSnapshot) = loadGenesis(validators, false)
|
||||
genesisTime = float getStateField(genesisState[], genesis_time)
|
||||
|
||||
var
|
||||
|
@ -270,13 +270,13 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
defer: db.close()
|
||||
|
||||
ChainDAGRef.preInit(db, genesisState[])
|
||||
putInitialDepositContractSnapshot(db, depositContractSnapshot)
|
||||
db.putDepositTreeSnapshot(depositTreeSnapshot)
|
||||
|
||||
var
|
||||
validatorMonitor = newClone(ValidatorMonitor.init())
|
||||
dag = ChainDAGRef.init(cfg, db, validatorMonitor, {})
|
||||
eth1Chain = Eth1Chain.init(cfg, db)
|
||||
merkleizer = DepositsMerkleizer.init(depositContractSnapshot.depositContractState)
|
||||
merkleizer = DepositsMerkleizer.init(depositTreeSnapshot.depositContractState)
|
||||
taskpool = Taskpool.new()
|
||||
verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
|
||||
quarantine = newClone(Quarantine.init())
|
||||
|
|
|
@ -11,7 +11,7 @@ import
|
|||
../tests/testblockutil,
|
||||
../beacon_chain/beacon_chain_db,
|
||||
../beacon_chain/spec/datatypes/[phase0, altair],
|
||||
../beacon_chain/spec/[beaconstate, forks, helpers],
|
||||
../beacon_chain/spec/[beaconstate, deposit_snapshots, forks, helpers],
|
||||
../beacon_chain/consensus_object_pools/[blockchain_dag, block_pools_types]
|
||||
|
||||
template withTimer*(stats: var RunningStat, body: untyped) =
|
||||
|
@ -68,7 +68,7 @@ func verifyConsensus*(state: ForkedHashedBeaconState, attesterRatio: auto) =
|
|||
state, finalized_checkpoint).epoch + 2 >= current_epoch
|
||||
|
||||
proc loadGenesis*(validators: Natural, validate: bool):
|
||||
(ref ForkedHashedBeaconState, DepositContractSnapshot) =
|
||||
(ref ForkedHashedBeaconState, DepositTreeSnapshot) =
|
||||
let
|
||||
genesisFn =
|
||||
&"genesis_{const_preset}_{validators}_{SPEC_VERSION}.ssz"
|
||||
|
@ -93,7 +93,7 @@ proc loadGenesis*(validators: Natural, validate: bool):
|
|||
# TODO check that the private keys are interop keys
|
||||
|
||||
let contractSnapshot = SSZ.loadFile(contractSnapshotFn,
|
||||
DepositContractSnapshot)
|
||||
DepositTreeSnapshot)
|
||||
(res, contractSnapshot)
|
||||
else:
|
||||
echo "Genesis file not found, making one up (use nimbus_beacon_node createTestnet to make one)"
|
||||
|
@ -107,7 +107,7 @@ proc loadGenesis*(validators: Natural, validate: bool):
|
|||
var merkleizer = init DepositsMerkleizer
|
||||
for d in deposits:
|
||||
merkleizer.addChunk hash_tree_root(d).data
|
||||
let contractSnapshot = DepositContractSnapshot(
|
||||
let contractSnapshot = DepositTreeSnapshot(
|
||||
depositContractState: merkleizer.toDepositContractState)
|
||||
|
||||
let res = (ref ForkedHashedBeaconState)(
|
||||
|
|
|
@ -96,7 +96,7 @@ RUN_GETH="0"
|
|||
DL_GETH="0"
|
||||
DL_ETH2="0"
|
||||
BEACON_NODE_COMMAND="./build/nimbus_beacon_node"
|
||||
|
||||
WEB3_ARG=()
|
||||
CLEANUP_DIRS=()
|
||||
|
||||
#NIMBUS EL VARS
|
||||
|
@ -737,7 +737,7 @@ else
|
|||
ganache-cli --blockTime 17 --gasLimit 100000000 -e 100000 --verbose > "${DATA_DIR}/log_ganache.txt" 2>&1 &
|
||||
PIDS="${PIDS},$!"
|
||||
|
||||
WEB3_ARG="--web3-url=ws://localhost:8545"
|
||||
WEB3_ARG=("--web3-url=ws://localhost:8545")
|
||||
|
||||
echo "Deploying deposit contract"
|
||||
DEPLOY_CMD_OUTPUT=$(./build/deposit_contract deploy $WEB3_ARG)
|
||||
|
@ -756,7 +756,7 @@ else
|
|||
./build/deposit_contract sendDeposits \
|
||||
--deposits-file="${DEPOSITS_FILE}" \
|
||||
--min-delay=$MIN_DELAY --max-delay=$MAX_DELAY \
|
||||
$WEB3_ARG \
|
||||
"${WEB3_ARG[@]}" \
|
||||
--deposit-contract=${DEPOSIT_CONTRACT_ADDRESS} > "${DATA_DIR}/log_deposit_maker.txt" 2>&1 &
|
||||
|
||||
PIDS="${PIDS},$!"
|
||||
|
|
|
@ -58,7 +58,7 @@ proc run() {.async.} =
|
|||
let
|
||||
eth1Monitor = Eth1Monitor.init(
|
||||
defaultRuntimeConfig, db = nil, nil, @[paramStr(1)],
|
||||
none(DepositContractSnapshot), none(Eth1Network), false,
|
||||
none(DepositTreeSnapshot), none(Eth1Network), false,
|
||||
some readJwtSecret(paramStr(2)).get)
|
||||
|
||||
await eth1Monitor.ensureDataProvider()
|
||||
|
|
|
@ -60,7 +60,7 @@ proc run() {.async.} =
|
|||
jwtSecret = some readJwtSecret("jwt.hex").get
|
||||
eth1Monitor = Eth1Monitor.init(
|
||||
defaultRuntimeConfig, db = nil, nil, @[web3Url],
|
||||
none(DepositContractSnapshot), none(Eth1Network),
|
||||
none(DepositTreeSnapshot), none(Eth1Network),
|
||||
false, jwtSecret)
|
||||
web3Provider = (await Web3DataProvider.new(
|
||||
default(Eth1Address), web3Url, jwtSecret)).get
|
||||
|
|
|
@ -43,6 +43,7 @@ import # Unit test
|
|||
./test_key_splitting,
|
||||
./test_remote_keystore,
|
||||
./test_serialization,
|
||||
./test_deposit_snapshots,
|
||||
./fork_choice/tests_fork_choice,
|
||||
./consensus_spec/all_tests as consensus_all_tests,
|
||||
./slashing_protection/test_fixtures,
|
||||
|
|
|
@ -0,0 +1,186 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
std/[os, random, strutils, times],
|
||||
chronos, stew/results, unittest2, chronicles,
|
||||
../../beacon_chain/beacon_chain_db,
|
||||
../../beacon_chain/spec/deposit_snapshots
|
||||
|
||||
from eth/db/kvstore import kvStore
|
||||
from nimcrypto import toDigest
|
||||
from snappy import encode
|
||||
from stew/byteutils import hexToSeqByte
|
||||
|
||||
const ROOT = "342cecb5a18945fbbda7c62ede3016f3"
|
||||
|
||||
template databaseRoot: string = getTempDir().joinPath(ROOT)
|
||||
template key1: array[1, byte] = [byte(kOldDepositContractSnapshot)]
|
||||
template key2: array[1, byte] = [byte(kDepositTreeSnapshot)]
|
||||
|
||||
type
|
||||
DepositSnapshotUpgradeProc = proc(old: OldDepositContractSnapshot): DepositTreeSnapshot
|
||||
{.gcsafe, raises: [Defect].}
|
||||
|
||||
proc ifNecessaryMigrateDCS(db: BeaconChainDB,
|
||||
upgradeProc: DepositSnapshotUpgradeProc) =
|
||||
if not db.hasDepositTreeSnapshot():
|
||||
let oldSnapshot = db.getUpgradableDepositSnapshot()
|
||||
if oldSnapshot.isSome:
|
||||
db.putDepositTreeSnapshot upgradeProc(oldSnapshot.get)
|
||||
|
||||
# Hexlified copy of
|
||||
# eth2-networks/shared/mainnet/genesis_deposit_contract_snapshot.ssz
|
||||
let ds1: seq[byte] = hexToSeqByte(
|
||||
"""
|
||||
eeea1373d4aa9e099d7c9deddb694db9aeb4577755ef83f9b6345ce4357d9abfca3bfce2c
|
||||
304c4f52e0c83f96daf8c98a05f80281b62cf08f6be9c1bc10c0adbabcf2f74605a9eb36c
|
||||
f243bb5009259a3717d44df3caf02acc53ab49cfd2eeb6d4079d31e57638b3a6928ff3940
|
||||
d0d06545ae164278597bb8d46053084c335eaf9585ef52fc5eaf1f11718df7988d3f414d8
|
||||
b0be2e56e15d7ade9f5ee4cc7ee4a4c96f16c3a300034788ba8bf79c3125a697488006a4a
|
||||
4288c38fdc4e9891891cae036d14b83ff1523749d4fabf5c91e8d455dce2f14eae3408dce
|
||||
22f901efc7858ccad1a32af9e9796d3026ba18925103cad44cba4bdc1f3d3c23be125bba1
|
||||
811f1e08405d5d180444147397ea0d4aebf12edff5cebc52cb05983c8d4bd2d4a93d66676
|
||||
459ab2c5ca9d553a5c5599cc6992ed90edc939c51cc99d1820b5691914bfcab6eb8016c51
|
||||
77e9e8f006e7893ea46b232b91b1f923b05273a927cd6d0aa14720bc149ce68f20809d6fe
|
||||
55816acf09e72c14b54637dea24eb961558a7ac726d03ced287a817fa8fea71c90bd89955
|
||||
b093d7c5908305177efa8289457190435298b2d5b2b67543e4dceaf2c8b7fdbdac12836a7
|
||||
0ed910c34abcd10b3ddf53f640c85e35fef7e7ba4ab8c561fe9f1d763a32c65a1fbad5756
|
||||
6bda135236257aa502116cb72c9347d10dca1b64a342b41a829cc7ba95e71499f57be2be3
|
||||
cd00000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
0000000000000000000000000000000000000000000000000000000000000000000000000
|
||||
00000000000000000000000000000000000000000000000000000005251
|
||||
""".replace(" ", "").replace("\n", "")
|
||||
)
|
||||
|
||||
const
|
||||
ds1Root = toDigest("1a4c3cce02935defd159e4e207890ae26a325bf03e205c9ee94ca040ecce008a")
|
||||
|
||||
proc fixture1() =
|
||||
## Inserts a OldDepositContractSnapshot fixture.
|
||||
let
|
||||
compressed = snappy.encode(ds1)
|
||||
db = SqStoreRef.init(databaseRoot, "nbc").expect("")
|
||||
kv = kvStore(db.openKvStore("key_values", true).expect(""))
|
||||
kv.put(key1, compressed).expect("")
|
||||
db.close()
|
||||
|
||||
proc inspectDCS(snapshot: OldDepositContractSnapshot | DepositTreeSnapshot) =
|
||||
## Inspects a DCS and checks if all of its data corresponds to
|
||||
## what's encoded in ds1.
|
||||
const zero = toDigest("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
const root = toDigest("1a4c3cce02935defd159e4e207890ae26a325bf03e205c9ee94ca040ecce008a")
|
||||
const want = [
|
||||
"ca3bfce2c304c4f52e0c83f96daf8c98a05f80281b62cf08f6be9c1bc10c0adb",
|
||||
"abcf2f74605a9eb36cf243bb5009259a3717d44df3caf02acc53ab49cfd2eeb6",
|
||||
"d4079d31e57638b3a6928ff3940d0d06545ae164278597bb8d46053084c335ea",
|
||||
"f9585ef52fc5eaf1f11718df7988d3f414d8b0be2e56e15d7ade9f5ee4cc7ee4",
|
||||
"a4c96f16c3a300034788ba8bf79c3125a697488006a4a4288c38fdc4e9891891",
|
||||
"cae036d14b83ff1523749d4fabf5c91e8d455dce2f14eae3408dce22f901efc7",
|
||||
"858ccad1a32af9e9796d3026ba18925103cad44cba4bdc1f3d3c23be125bba18",
|
||||
"11f1e08405d5d180444147397ea0d4aebf12edff5cebc52cb05983c8d4bd2d4a",
|
||||
"93d66676459ab2c5ca9d553a5c5599cc6992ed90edc939c51cc99d1820b56919",
|
||||
"14bfcab6eb8016c5177e9e8f006e7893ea46b232b91b1f923b05273a927cd6d0",
|
||||
"aa14720bc149ce68f20809d6fe55816acf09e72c14b54637dea24eb961558a7a",
|
||||
"c726d03ced287a817fa8fea71c90bd89955b093d7c5908305177efa828945719",
|
||||
"0435298b2d5b2b67543e4dceaf2c8b7fdbdac12836a70ed910c34abcd10b3ddf",
|
||||
"53f640c85e35fef7e7ba4ab8c561fe9f1d763a32c65a1fbad57566bda1352362",
|
||||
"57aa502116cb72c9347d10dca1b64a342b41a829cc7ba95e71499f57be2be3cd",
|
||||
]
|
||||
# Check eth1Block.
|
||||
check($snapshot.eth1Block == "eeea1373d4aa9e099d7c9deddb694db9aeb4577755ef83f9b6345ce4357d9abf")
|
||||
# Check branch.
|
||||
for i in 0..want.high():
|
||||
check($snapshot.depositContractState.branch[i] == want[i])
|
||||
for i in (want.high() + 1)..31:
|
||||
check(snapshot.depositContractState.branch[i] == zero)
|
||||
# Check deposit_count.
|
||||
check(snapshot.getDepositCountU64() == 21073)
|
||||
# Check deposit root.
|
||||
check(snapshot.getDepositRoot == root)
|
||||
|
||||
proc inspectDCS(snapshot: DepositTreeSnapshot, wantedBlockHeight: uint64) =
|
||||
inspectDCS(snapshot)
|
||||
check(snapshot.blockHeight == wantedBlockHeight)
|
||||
|
||||
suite "DepositTreeSnapshot":
|
||||
setup:
|
||||
randomize()
|
||||
|
||||
teardown:
|
||||
# removeDir(databaseRoot)
|
||||
discard
|
||||
|
||||
test "SSZ":
|
||||
var snapshot = OldDepositContractSnapshot()
|
||||
check(decodeSSZ(ds1, snapshot))
|
||||
inspectDCS(snapshot)
|
||||
|
||||
test "Migration":
|
||||
# Start with a fresh database.
|
||||
removeDir(databaseRoot)
|
||||
createDir(databaseRoot)
|
||||
# Make sure there's no DepositTreeSnapshot yet.
|
||||
let db = BeaconChainDB.new(databaseRoot, inMemory=false)
|
||||
check(db.getDepositTreeSnapshot().isErr())
|
||||
# Setup fixture.
|
||||
fixture1()
|
||||
# Make sure there's still no DepositTreeSnapshot as
|
||||
# BeaconChainDB::getDepositTreeSnapshot() checks only for DCSv2.
|
||||
check(db.getDepositTreeSnapshot().isErr())
|
||||
# Migrate DB.
|
||||
db.ifNecessaryMigrateDCS do (d: OldDepositContractSnapshot) -> DepositTreeSnapshot:
|
||||
d.toDepositTreeSnapshot(11052984)
|
||||
# Make sure now there actually is a snapshot.
|
||||
check(db.getDepositTreeSnapshot().isOk())
|
||||
# Inspect content.
|
||||
let snapshot = db.getDepositTreeSnapshot().expect("")
|
||||
inspectDCS(snapshot, 11052984)
|
||||
|
||||
test "depositCount":
|
||||
let now = getTime()
|
||||
var rand = initRand(12345678)
|
||||
for i in 1..1000:
|
||||
let n = rand.next()
|
||||
let m = n mod 4294967296'u64
|
||||
check(depositCountU64(depositCountBytes(m)) == m)
|
||||
|
||||
test "isValid":
|
||||
const ZERO = toDigest("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
# Use our hard-coded ds1 as a model.
|
||||
var model: OldDepositContractSnapshot
|
||||
check(decodeSSZ(ds1, model))
|
||||
# Check blockHeight.
|
||||
var dcs = model.toDepositTreeSnapshot(0)
|
||||
check(not dcs.isValid(ds1Root))
|
||||
dcs.blockHeight = 11052984
|
||||
check(dcs.isValid(ds1Root))
|
||||
# Check eth1Block.
|
||||
dcs.eth1Block = ZERO
|
||||
check(not dcs.isValid(ds1Root))
|
||||
dcs.eth1Block = model.eth1Block
|
||||
check(dcs.isValid(ds1Root))
|
||||
# Check branch.
|
||||
for i in 0..len(dcs.depositContractState.branch)-1:
|
||||
dcs.depositContractState.branch[i] = ZERO
|
||||
check(not dcs.isValid(ds1Root))
|
||||
dcs.depositContractState.branch = model.depositContractState.branch
|
||||
check(dcs.isValid(ds1Root))
|
||||
# Check deposit count.
|
||||
for i in 0..len(dcs.depositContractState.deposit_count)-1:
|
||||
dcs.depositContractState.deposit_count[i] = 0
|
||||
check(not dcs.isValid(ds1Root))
|
||||
dcs.depositContractState.deposit_count = model.depositContractState.deposit_count
|
||||
check(dcs.isValid(ds1Root))
|
|
@ -283,17 +283,9 @@ proc startBeaconNode(basePort: int) {.raises: [Defect, CatchableError].} =
|
|||
except Exception as exc: # TODO fix confutils exceptions
|
||||
raiseAssert exc.msg
|
||||
|
||||
let metadata = loadEth2NetworkMetadata(dataDir)
|
||||
|
||||
let node = BeaconNode.init(
|
||||
metadata.cfg,
|
||||
rng,
|
||||
runNodeConf,
|
||||
metadata.depositContractDeployedAt,
|
||||
metadata.eth1Network,
|
||||
metadata.genesisData,
|
||||
metadata.genesisDepositsSnapshot
|
||||
)
|
||||
let
|
||||
metadata = loadEth2NetworkMetadata(dataDir)
|
||||
node = BeaconNode.init(rng, runNodeConf, metadata)
|
||||
|
||||
node.start() # This will run until the node is terminated by
|
||||
# setting its `bnStatus` to `Stopping`.
|
||||
|
|
Loading…
Reference in New Issue