Local sim impovements (#4551)

* Local sim impovements

* Added support for running Capella and EIP-4844 simulations
  by downloading the correct version of Geth.

* Added support for using Nimbus remote signer and Web3Signer.
  Use 2 out of 3 threshold signing configuration in the mainnet
  configuration and regular remote signing in the minimal one.

* The local testnet simulation can now use a payload builder.
  This is currently not activated in CI due to lack of automated
  procedures for installing third-party relays or builders.

  You are adviced to use mergemock for now, but for most realistic
  results, we can create a simple builder based on the nimbus-eth1
  codebase that will be able to propose transactions from the regular
  network mempool.

* Start the simulation from a merged state. This would allow us
  to start removing pre-merge functionality such as the gossip
  subsciption logic. The commit also removes the merge-forcing
  hack installed after the TTD removal.

* Consolidate all the tools used in the local simulation into a
  single `ncli_testnet` binary.
This commit is contained in:
zah 2023-02-23 04:10:07 +02:00 committed by GitHub
parent 08b6bb7a6b
commit 6036f2e7d7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
42 changed files with 2053 additions and 1243 deletions

View File

@ -67,7 +67,8 @@ TOOLS_CORE := \
nimbus_light_client \
nimbus_validator_client \
nimbus_signing_node \
validator_db_aggregator
validator_db_aggregator \
ncli_testnet
# This TOOLS/TOOLS_CORE decomposition is a workaroud so nimbus_beacon_node can
# build on its own, and if/when that becomes a non-issue, it can be recombined
@ -184,11 +185,12 @@ libbacktrace:
# - --base-metrics-port + [0, --nodes)
# - --base-vc-keymanager-port + [0, --nodes)
# - --base-vc-metrics-port + [0, --nodes]
# - --base-remote-signer-port + [0, --remote-signers)
# - --base-remote-signer-port + [0, --nimbus-signer-nodes | --web3signer-nodes)
# - --base-remote-signer-metrics-port + [0, --nimbus-signer-node | --web3signer-nodes)
#
# Local testnets with --run-geth or --run-nimbus (only these ports):
# - --base-el-net-port + --el-port-offset * [0, --nodes + --light-clients)
# - --base-el-http-port + --el-port-offset * [0, --nodes + --light-clients)
# - --base-el-rpc-port + --el-port-offset * [0, --nodes + --light-clients)
# - --base-el-ws-port + --el-port-offset * [0, --nodes + --light-clients)
# - --base-el-auth-rpc-port + --el-port-offset * [0, --nodes + --light-clients)
UNIT_TEST_BASE_PORT := 9950
@ -202,19 +204,19 @@ restapi-test:
--resttest-delay 30 \
--kill-old-processes
ifneq ($(shell uname -p), arm)
TESTNET_EXTRA_FLAGS := --run-geth --dl-geth
else
TESTNET_EXTRA_FLAGS :=
endif
local-testnet-minimal:
./scripts/launch_local_testnet.sh \
--data-dir $@ \
--preset minimal \
--nodes 4 \
--nodes 2 \
--capella-fork-epoch 3 \
--deneb-fork-epoch 20 \
--stop-at-epoch 6 \
--disable-htop \
--remote-validators-count 512 \
--enable-payload-builder \
--nimbus-signer-nodes 1 \
--threshold 1 \
--enable-logtrace \
--base-port $$(( 6001 + EXECUTOR_NUMBER * 500 )) \
--base-rest-port $$(( 6031 + EXECUTOR_NUMBER * 500 )) \
@ -222,14 +224,15 @@ local-testnet-minimal:
--base-vc-keymanager-port $$(( 6131 + EXECUTOR_NUMBER * 500 )) \
--base-vc-metrics-port $$(( 6161 + EXECUTOR_NUMBER * 500 )) \
--base-remote-signer-port $$(( 6201 + EXECUTOR_NUMBER * 500 )) \
--base-remote-signer-metrics-port $$(( 6251 + EXECUTOR_NUMBER * 500 )) \
--base-el-net-port $$(( 6301 + EXECUTOR_NUMBER * 500 )) \
--base-el-http-port $$(( 6302 + EXECUTOR_NUMBER * 500 )) \
--base-el-rpc-port $$(( 6302 + EXECUTOR_NUMBER * 500 )) \
--base-el-ws-port $$(( 6303 + EXECUTOR_NUMBER * 500 )) \
--base-el-auth-rpc-port $$(( 6304 + EXECUTOR_NUMBER * 500 )) \
--el-port-offset 5 \
--timeout 648 \
--kill-old-processes \
$(TESTNET_EXTRA_FLAGS) \
--run-geth --dl-geth \
-- \
--verify-finalization \
--discv5:no
@ -237,24 +240,28 @@ local-testnet-minimal:
local-testnet-mainnet:
./scripts/launch_local_testnet.sh \
--data-dir $@ \
--nodes 4 \
--nodes 2 \
--stop-at-epoch 6 \
--disable-htop \
--enable-logtrace \
--nimbus-signer-nodes 3 \
--threshold 2 \
--remote-validators-count 512 \
--base-port $$(( 7001 + EXECUTOR_NUMBER * 500 )) \
--base-rest-port $$(( 7031 + EXECUTOR_NUMBER * 500 )) \
--base-metrics-port $$(( 7061 + EXECUTOR_NUMBER * 500 )) \
--base-vc-keymanager-port $$(( 7131 + EXECUTOR_NUMBER * 500 )) \
--base-vc-metrics-port $$(( 7161 + EXECUTOR_NUMBER * 500 )) \
--base-remote-signer-port $$(( 7201 + EXECUTOR_NUMBER * 500 )) \
--base-remote-signer-metrics-port $$(( 7251 + EXECUTOR_NUMBER * 500 )) \
--base-el-net-port $$(( 7301 + EXECUTOR_NUMBER * 500 )) \
--base-el-http-port $$(( 7302 + EXECUTOR_NUMBER * 500 )) \
--base-el-rpc-port $$(( 7302 + EXECUTOR_NUMBER * 500 )) \
--base-el-ws-port $$(( 7303 + EXECUTOR_NUMBER * 500 )) \
--base-el-auth-rpc-port $$(( 7304 + EXECUTOR_NUMBER * 500 )) \
--el-port-offset 5 \
--timeout 2784 \
--kill-old-processes \
$(TESTNET_EXTRA_FLAGS) \
--run-geth --dl-geth \
-- \
--verify-finalization \
--discv5:no

View File

@ -95,7 +95,6 @@ type
dynamicFeeRecipientsStore*: ref DynamicFeeRecipientsStore
externalBuilderRegistrations*:
Table[ValidatorPubKey, SignedValidatorRegistrationV1]
mergeAtEpoch*: Epoch
dutyValidatorCount*: int
## Number of validators that we've checked for activation

View File

@ -64,7 +64,6 @@ else:
type
BNStartUpCmd* {.pure.} = enum
noCommand
createTestnet
deposits
wallets
record
@ -227,12 +226,6 @@ type
desc: "The slashing DB flavour to use"
name: "slashing-db-kind" .}: SlashingDbKind
mergeAtEpoch* {.
hidden
desc: "Debugging argument not for external use; may be removed at any time"
defaultValue: FAR_FUTURE_EPOCH
name: "merge-at-epoch-debug-internal" .}: uint64
numThreads* {.
defaultValue: 0,
desc: "Number of worker threads (\"0\" = use as many threads as there are CPU cores available)"
@ -324,6 +317,10 @@ type
desc: "SSZ file specifying a recent finalized state"
name: "finalized-checkpoint-state" .}: Option[InputFile]
finalizedDepositTreeSnapshot* {.
desc: "SSZ file specifying a recent finalized EIP-4881 deposit tree snapshot"
name: "finalized-deposit-tree-snapshot" .}: Option[InputFile]
finalizedCheckpointBlock* {.
hidden
desc: "SSZ file specifying a recent finalized block"
@ -607,40 +604,6 @@ type
defaultValue: HistoryMode.Archive
name: "history".}: HistoryMode
of BNStartUpCmd.createTestnet:
testnetDepositsFile* {.
desc: "A LaunchPad deposits file for the genesis state validators"
name: "deposits-file" .}: InputFile
totalValidators* {.
desc: "The number of validator deposits in the newly created chain"
name: "total-validators" .}: uint64
bootstrapAddress* {.
desc: "The public IP address that will be advertised as a bootstrap node for the testnet"
defaultValue: init(ValidIpAddress, defaultAdminListenAddress)
defaultValueDesc: $defaultAdminListenAddressDesc
name: "bootstrap-address" .}: ValidIpAddress
bootstrapPort* {.
desc: "The TCP/UDP port that will be used by the bootstrap node"
defaultValue: defaultEth2TcpPort
defaultValueDesc: $defaultEth2TcpPortDesc
name: "bootstrap-port" .}: Port
genesisOffset* {.
desc: "Seconds from now to add to genesis time"
defaultValue: 5
name: "genesis-offset" .}: int
outputGenesis* {.
desc: "Output file where to write the initial state snapshot"
name: "output-genesis" .}: OutFile
outputBootstrapFile* {.
desc: "Output file with list of bootstrap nodes for the network"
name: "output-bootstrap-file" .}: OutFile
of BNStartUpCmd.wallets:
case walletsCmd* {.command.}: WalletsCmd
of WalletsCmd.create:
@ -1136,6 +1099,13 @@ func parseCmdArg*(T: type Checkpoint, input: string): T
func completeCmdArg*(T: type Checkpoint, input: string): seq[string] =
return @[]
func parseCmdArg*(T: type Epoch, input: string): T
{.raises: [ValueError, Defect].} =
Epoch parseBiggestUInt(input)
func completeCmdArg*(T: type Epoch, input: string): seq[string] =
return @[]
func isPrintable(rune: Rune): bool =
# This can be eventually replaced by the `unicodeplus` package, but a single
# proc does not justify the extra dependencies at the moment:

View File

@ -1073,7 +1073,13 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
of ConsensusFork.EIP4844: denebFork(cfg)
stateFork = getStateField(dag.headState, fork)
if stateFork != configFork:
# Here, we check only the `current_version` field because the spec
# mandates that testnets starting directly from a particular fork
# should have `previous_version` set to `current_version` while
# this doesn't happen to be the case in network that go through
# regular hard-fork upgrades. See for example:
# https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#testing
if stateFork.current_version != configFork.current_version:
error "State from database does not match network, check --network parameter",
tail = dag.tail, headRef, stateFork, configFork
quit 1

View File

@ -269,9 +269,7 @@ func asConsensusExecutionPayload*(rpcExecutionPayload: ExecutionPayloadV1):
gas_limit: rpcExecutionPayload.gasLimit.uint64,
gas_used: rpcExecutionPayload.gasUsed.uint64,
timestamp: rpcExecutionPayload.timestamp.uint64,
extra_data:
List[byte, MAX_EXTRA_DATA_BYTES].init(
rpcExecutionPayload.extraData.distinctBase),
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
@ -294,9 +292,7 @@ func asConsensusExecutionPayload*(rpcExecutionPayload: ExecutionPayloadV2):
gas_limit: rpcExecutionPayload.gasLimit.uint64,
gas_used: rpcExecutionPayload.gasUsed.uint64,
timestamp: rpcExecutionPayload.timestamp.uint64,
extra_data:
List[byte, MAX_EXTRA_DATA_BYTES].init(
rpcExecutionPayload.extraData.distinctBase),
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
@ -321,9 +317,7 @@ func asConsensusExecutionPayload*(rpcExecutionPayload: ExecutionPayloadV3):
gas_limit: rpcExecutionPayload.gasLimit.uint64,
gas_used: rpcExecutionPayload.gasUsed.uint64,
timestamp: rpcExecutionPayload.timestamp.uint64,
extra_data:
List[byte, MAX_EXTRA_DATA_BYTES].init(
rpcExecutionPayload.extraData.distinctBase),
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
excess_data_gas: rpcExecutionPayload.excessDataGas,
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
@ -349,8 +343,7 @@ func asEngineExecutionPayload*(executionPayload: bellatrix.ExecutionPayload):
gasLimit: Quantity(executionPayload.gas_limit),
gasUsed: Quantity(executionPayload.gas_used),
timestamp: Quantity(executionPayload.timestamp),
extraData:
DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
baseFeePerGas: executionPayload.base_fee_per_gas,
blockHash: executionPayload.block_hash.asBlockHash,
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction))
@ -372,8 +365,7 @@ func asEngineExecutionPayload*(executionPayload: capella.ExecutionPayload):
gasLimit: Quantity(executionPayload.gas_limit),
gasUsed: Quantity(executionPayload.gas_used),
timestamp: Quantity(executionPayload.timestamp),
extraData:
DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
baseFeePerGas: executionPayload.base_fee_per_gas,
blockHash: executionPayload.block_hash.asBlockHash,
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
@ -396,8 +388,7 @@ func asEngineExecutionPayload*(executionPayload: eip4844.ExecutionPayload):
gasLimit: Quantity(executionPayload.gas_limit),
gasUsed: Quantity(executionPayload.gas_used),
timestamp: Quantity(executionPayload.timestamp),
extraData:
DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
baseFeePerGas: executionPayload.base_fee_per_gas,
excessDataGas: executionPayload.excess_data_gas,
blockHash: executionPayload.block_hash.asBlockHash,
@ -591,7 +582,7 @@ proc forkchoiceUpdated*(
Future[engine_api.ForkchoiceUpdatedResponse] =
# Eth1 monitor can recycle connections without (external) warning; at least,
# don't crash.
if p.isNil or p.dataProvider.isNil:
if p.isNil or p.dataProvider.isNil or headBlock.isZeroMemory:
let fcuR =
newFuture[engine_api.ForkchoiceUpdatedResponse]("forkchoiceUpdated")
fcuR.complete(engine_api.ForkchoiceUpdatedResponse(
@ -613,7 +604,7 @@ proc forkchoiceUpdated*(
Future[engine_api.ForkchoiceUpdatedResponse] =
# Eth1 monitor can recycle connections without (external) warning; at least,
# don't crash.
if p.isNil or p.dataProvider.isNil:
if p.isNil or p.dataProvider.isNil or headBlock.isZeroMemory:
let fcuR =
newFuture[engine_api.ForkchoiceUpdatedResponse]("forkchoiceUpdated")
fcuR.complete(engine_api.ForkchoiceUpdatedResponse(
@ -849,8 +840,7 @@ proc pruneOldBlocks(chain: var Eth1Chain, depositIndex: uint64) =
chain.db.putDepositTreeSnapshot DepositTreeSnapshot(
eth1Block: lastBlock.hash,
depositContractState: chain.finalizedDepositsMerkleizer.toDepositContractState,
blockHeight: lastBlock.number,
)
blockHeight: lastBlock.number)
eth1_finalized_head.set lastBlock.number.toGaugeValue
eth1_finalized_deposits.set lastBlock.depositCount.toGaugeValue
@ -1144,6 +1134,10 @@ proc init*(T: type Eth1Monitor,
for url in mitems(web3Urls):
fixupWeb3Urls url
debug "Initializing Eth1Monitor",
depositContractBlockNumber,
depositContractBlockHash
let eth1Chain = Eth1Chain.init(
cfg, db, depositContractBlockNumber, depositContractBlockHash)

View File

@ -2151,7 +2151,7 @@ proc getRandomNetKeys*(rng: var HmacDrbgContext): NetKeyPair =
quit QuitFailure
initNetKeys(privKey)
proc getPersistentNetKeys(
proc getPersistentNetKeys*(
rng: var HmacDrbgContext,
dataDir, netKeyFile: string,
netKeyInsecurePassword: bool,
@ -2214,15 +2214,6 @@ proc getPersistentNetKeys*(
rng.getPersistentNetKeys(
string(config.dataDir), config.netKeyFile, config.netKeyInsecurePassword,
allowLoadExisting = true)
of BNStartUpCmd.createTestnet:
if config.netKeyFile == "random":
fatal "Could not create testnet using `random` network key"
quit QuitFailure
rng.getPersistentNetKeys(
string(config.dataDir), config.netKeyFile, config.netKeyInsecurePassword,
allowLoadExisting = false)
else:
rng.getRandomNetKeys()

View File

@ -472,7 +472,7 @@ proc init*(T: type BeaconNode,
newClone(readSszForkedHashedBeaconState(
cfg, readAllBytes(checkpointStatePath).tryGet()))
except SszError as err:
fatal "Checkpoint state deserialization failed",
fatal "Checkpoint state loading failed",
err = formatMsg(err, checkpointStatePath)
quit 1
except CatchableError as err:
@ -487,6 +487,20 @@ proc init*(T: type BeaconNode,
else:
nil
if config.finalizedDepositTreeSnapshot.isSome:
let
depositTreeSnapshotPath = config.finalizedDepositTreeSnapshot.get.string
depositTreeSnapshot = try:
SSZ.loadFile(depositTreeSnapshotPath, DepositTreeSnapshot)
except SszError as err:
fatal "Deposit tree snapshot loading failed",
err = formatMsg(err, depositTreeSnapshotPath)
quit 1
except CatchableError as err:
fatal "Failed to read deposit tree snapshot file", err = err.msg
quit 1
db.putDepositTreeSnapshot(depositTreeSnapshot)
let optJwtSecret = rng[].loadJwtSecret(config, allowCreate = false)
if config.web3Urls.len() == 0:
@ -689,8 +703,7 @@ proc init*(T: type BeaconNode,
# Delay first call by that time to allow for EL syncing to begin; it can
# otherwise generate an EL warning by claiming a zero merge block.
Moment.now + chronos.seconds(60),
dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init()),
mergeAtEpoch: config.mergeAtEpoch.Epoch)
dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init()))
node.initLightClient(
rng, cfg, dag.forkDigests, getBeaconTime, dag.genesis_validators_root)
@ -1896,69 +1909,6 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai
else:
node.start()
proc doCreateTestnet*(config: BeaconNodeConf, rng: var HmacDrbgContext) {.raises: [Defect, CatchableError].} =
let launchPadDeposits = try:
Json.loadFile(config.testnetDepositsFile.string, seq[LaunchPadDeposit])
except SerializationError as err:
error "Invalid LaunchPad deposits file",
err = formatMsg(err, config.testnetDepositsFile.string)
quit 1
var deposits: seq[DepositData]
for i in 0 ..< launchPadDeposits.len:
deposits.add(launchPadDeposits[i] as DepositData)
let
startTime = uint64(times.toUnix(times.getTime()) + config.genesisOffset)
outGenesis = config.outputGenesis.string
eth1Hash = if config.web3Urls.len == 0: eth1BlockHash
else: (waitFor getEth1BlockHash(
config.web3Urls[0], blockId("latest"),
rng.loadJwtSecret(config, allowCreate = true))).asEth2Digest
cfg = getRuntimeConfig(config.eth2Network)
var
initialState = newClone(initialize_beacon_state_from_eth1(
cfg, eth1Hash, startTime, deposits, {skipBlsValidation}))
# https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#create-genesis-state
initialState.genesis_time = startTime
doAssert initialState.validators.len > 0
let outGenesisExt = splitFile(outGenesis).ext
if cmpIgnoreCase(outGenesisExt, ".json") == 0:
Json.saveFile(outGenesis, initialState, pretty = true)
echo "Wrote ", outGenesis
let outSszGenesis = outGenesis.changeFileExt "ssz"
SSZ.saveFile(outSszGenesis, initialState[])
echo "Wrote ", outSszGenesis
let bootstrapFile = config.outputBootstrapFile.string
if bootstrapFile.len > 0:
type MetaData = altair.MetaData
let
networkKeys = getPersistentNetKeys(rng, config)
netMetadata = MetaData()
forkId = getENRForkID(
cfg,
initialState[].slot.epoch,
initialState[].genesis_validators_root)
bootstrapEnr = enr.Record.init(
1, # sequence number
networkKeys.seckey.asEthKey,
some(config.bootstrapAddress),
some(config.bootstrapPort),
some(config.bootstrapPort),
[
toFieldPair(enrForkIdField, SSZ.encode(forkId)),
toFieldPair(enrAttestationSubnetsField, SSZ.encode(netMetadata.attnets))
])
writeFile(bootstrapFile, bootstrapEnr.tryGet().toURI)
echo "Wrote ", bootstrapFile
proc doRecord(config: BeaconNodeConf, rng: var HmacDrbgContext) {.
raises: [Defect, CatchableError].} =
case config.recordCmd:
@ -2056,7 +2006,6 @@ proc handleStartUpCmd(config: var BeaconNodeConf) {.raises: [Defect, CatchableEr
let rng = keys.newRng()
case config.cmd
of BNStartUpCmd.createTestnet: doCreateTestnet(config, rng[])
of BNStartUpCmd.noCommand: doRunBeaconNode(config, rng)
of BNStartUpCmd.deposits: doDeposits(config, rng[])
of BNStartUpCmd.wallets: doWallets(config, rng[])

View File

@ -16,12 +16,11 @@ import
"."/[eth2_merkleization, forks, signatures, validator]
from std/algorithm import fill
from std/math import `^`
from std/sequtils import anyIt, mapIt
from ./datatypes/capella import BeaconState, ExecutionPayloadHeader, Withdrawal
export extras, forks, validator
export extras, forks, validator, chronicles
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/phase0/beacon-chain.md#increase_balance
func increase_balance*(balance: var Gwei, delta: Gwei) =
@ -223,108 +222,6 @@ proc slash_validator*(
func genesis_time_from_eth1_timestamp*(cfg: RuntimeConfig, eth1_timestamp: uint64): uint64 =
eth1_timestamp + cfg.GENESIS_DELAY
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/phase0/beacon-chain.md#genesis
proc initialize_beacon_state_from_eth1*(
cfg: RuntimeConfig,
eth1_block_hash: Eth2Digest,
eth1_timestamp: uint64,
deposits: openArray[DepositData],
flags: UpdateFlags = {}): phase0.BeaconState =
## Get the genesis ``BeaconState``.
##
## Before the beacon chain starts, validators will register in the Eth1 chain
## and deposit ETH. When enough many validators have registered, a
## `ChainStart` log will be emitted and the beacon chain can start beaconing.
##
## Because the state root hash is part of the genesis block, the beacon state
## must be calculated before creating the genesis block.
# Induct validators
# Not in spec: the system doesn't work unless there are at least SLOTS_PER_EPOCH
# validators - there needs to be at least one member in each committee -
# good to know for testing, though arguably the system is not that useful at
# at that point :)
doAssert deposits.lenu64 >= SLOTS_PER_EPOCH
# TODO https://github.com/nim-lang/Nim/issues/19094
template state(): untyped = result
state = phase0.BeaconState(
fork: genesisFork(cfg),
genesis_time: genesis_time_from_eth1_timestamp(cfg, eth1_timestamp),
eth1_data:
Eth1Data(block_hash: eth1_block_hash, deposit_count: uint64(len(deposits))),
latest_block_header:
BeaconBlockHeader(
body_root: hash_tree_root(default(phase0.BeaconBlockBody))))
# Seed RANDAO with Eth1 entropy
state.randao_mixes.fill(eth1_block_hash)
var merkleizer = createMerkleizer(2'i64^DEPOSIT_CONTRACT_TREE_DEPTH)
for i, deposit in deposits:
let htr = hash_tree_root(deposit)
merkleizer.addChunk(htr.data)
# This is already known in the Eth1 monitor, but it would be too
# much work to refactor all the existing call sites in the test suite
state.eth1_data.deposit_root = mixInLength(merkleizer.getFinalHash(),
deposits.len)
state.eth1_deposit_index = deposits.lenu64
var pubkeyToIndex = initTable[ValidatorPubKey, ValidatorIndex]()
for idx, deposit in deposits:
let
pubkey = deposit.pubkey
amount = deposit.amount
pubkeyToIndex.withValue(pubkey, foundIdx) do:
# Increase balance by deposit amount
increase_balance(state, foundIdx[], amount)
do:
if skipBlsValidation in flags or
verify_deposit_signature(cfg, deposit):
pubkeyToIndex[pubkey] = ValidatorIndex(state.validators.len)
if not state.validators.add(get_validator_from_deposit(deposit)):
raiseAssert "too many validators"
if not state.balances.add(amount):
raiseAssert "same as validators"
else:
# Invalid deposits are perfectly possible
trace "Skipping deposit with invalid signature",
deposit = shortLog(deposit)
# Process activations
for vidx in state.validators.vindices:
let
balance = state.balances.item(vidx)
validator = addr state.validators.mitem(vidx)
validator.effective_balance = min(
balance - balance mod EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
validator.activation_eligibility_epoch = GENESIS_EPOCH
validator.activation_epoch = GENESIS_EPOCH
# Set genesis validators root for domain separation and chain versioning
state.genesis_validators_root = hash_tree_root(state.validators)
# TODO https://github.com/nim-lang/Nim/issues/19094
# state
proc initialize_hashed_beacon_state_from_eth1*(
cfg: RuntimeConfig,
eth1_block_hash: Eth2Digest,
eth1_timestamp: uint64,
deposits: openArray[DepositData],
flags: UpdateFlags = {}): phase0.HashedBeaconState =
# TODO https://github.com/nim-lang/Nim/issues/19094
result = phase0.HashedBeaconState(
data: initialize_beacon_state_from_eth1(
cfg, eth1_block_hash, eth1_timestamp, deposits, flags))
result.root = hash_tree_root(result.data)
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/phase0/beacon-chain.md#genesis-block
func get_initial_beacon_block*(state: phase0.HashedBeaconState):
phase0.TrustedSignedBeaconBlock =
@ -920,7 +817,248 @@ func get_next_sync_committee*(
res.aggregate_pubkey = finish(attestersAgg).toPubKey()
res
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/altair/fork.md#upgrading-the-state
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/phase0/beacon-chain.md#genesis
proc initialize_beacon_state_from_eth1*(
cfg: RuntimeConfig,
eth1_block_hash: Eth2Digest,
eth1_timestamp: uint64,
deposits: openArray[DepositData],
flags: UpdateFlags = {}): phase0.BeaconState =
## Get the genesis ``BeaconState``.
##
## Before the beacon chain starts, validators will register in the Eth1 chain
## and deposit ETH. When enough many validators have registered, a
## `ChainStart` log will be emitted and the beacon chain can start beaconing.
##
## Because the state root hash is part of the genesis block, the beacon state
## must be calculated before creating the genesis block.
# Induct validators
# Not in spec: the system doesn't work unless there are at least SLOTS_PER_EPOCH
# validators - there needs to be at least one member in each committee -
# good to know for testing, though arguably the system is not that useful at
# at that point :)
doAssert deposits.lenu64 >= SLOTS_PER_EPOCH
# TODO https://github.com/nim-lang/Nim/issues/19094
template state(): untyped = result
state = phase0.BeaconState(
fork: genesisFork(cfg),
genesis_time: genesis_time_from_eth1_timestamp(cfg, eth1_timestamp),
eth1_data:
Eth1Data(block_hash: eth1_block_hash, deposit_count: uint64(len(deposits))),
latest_block_header:
BeaconBlockHeader(
body_root: hash_tree_root(default(phase0.BeaconBlockBody))))
# Seed RANDAO with Eth1 entropy
state.randao_mixes.fill(eth1_block_hash)
var merkleizer = createMerkleizer(DEPOSIT_CONTRACT_LIMIT)
for i, deposit in deposits:
let htr = hash_tree_root(deposit)
merkleizer.addChunk(htr.data)
# This is already known in the Eth1 monitor, but it would be too
# much work to refactor all the existing call sites in the test suite
state.eth1_data.deposit_root = mixInLength(merkleizer.getFinalHash(),
deposits.len)
state.eth1_deposit_index = deposits.lenu64
var pubkeyToIndex = initTable[ValidatorPubKey, ValidatorIndex]()
for idx, deposit in deposits:
let
pubkey = deposit.pubkey
amount = deposit.amount
pubkeyToIndex.withValue(pubkey, foundIdx) do:
# Increase balance by deposit amount
increase_balance(state, foundIdx[], amount)
do:
if skipBlsValidation in flags or
verify_deposit_signature(cfg, deposit):
pubkeyToIndex[pubkey] = ValidatorIndex(state.validators.len)
if not state.validators.add(get_validator_from_deposit(deposit)):
raiseAssert "too many validators"
if not state.balances.add(amount):
raiseAssert "same as validators"
else:
# Invalid deposits are perfectly possible
trace "Skipping deposit with invalid signature",
deposit = shortLog(deposit)
# Process activations
for vidx in state.validators.vindices:
let
balance = state.balances.item(vidx)
validator = addr state.validators.mitem(vidx)
validator.effective_balance = min(
balance - balance mod EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
validator.activation_eligibility_epoch = GENESIS_EPOCH
validator.activation_epoch = GENESIS_EPOCH
# Set genesis validators root for domain separation and chain versioning
state.genesis_validators_root = hash_tree_root(state.validators)
# TODO https://github.com/nim-lang/Nim/issues/19094
# state
proc initialize_hashed_beacon_state_from_eth1*(
cfg: RuntimeConfig,
eth1_block_hash: Eth2Digest,
eth1_timestamp: uint64,
deposits: openArray[DepositData],
flags: UpdateFlags = {}): phase0.HashedBeaconState =
# TODO https://github.com/nim-lang/Nim/issues/19094
result = phase0.HashedBeaconState(
data: initialize_beacon_state_from_eth1(
cfg, eth1_block_hash, eth1_timestamp, deposits, flags))
result.root = hash_tree_root(result.data)
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/bellatrix/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/capella/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/eip4844/beacon-chain.md#testing
proc initialize_beacon_state_from_eth1*(
cfg: RuntimeConfig,
eth1_block_hash: Eth2Digest,
eth1_timestamp: uint64,
deposits: openArray[DepositData],
execution_payload_header: ForkyExecutionPayloadHeader,
flags: UpdateFlags = {}): auto =
## Get the genesis ``BeaconState``.
##
## Before the beacon chain starts, validators will register in the Eth1 chain
## and deposit ETH. When enough many validators have registered, a
## `ChainStart` log will be emitted and the beacon chain can start beaconing.
##
## Because the state root hash is part of the genesis block, the beacon state
## must be calculated before creating the genesis block.
# Induct validators
# Not in spec: the system doesn't work unless there are at least SLOTS_PER_EPOCH
# validators - there needs to be at least one member in each committee -
# good to know for testing, though arguably the system is not that useful at
# at that point :)
doAssert deposits.lenu64 >= SLOTS_PER_EPOCH
const consensusFork = typeof(execution_payload_header).toFork
let
forkVersion = cfg.forkVersion(consensusFork)
fork = Fork(
previous_version: forkVersion,
current_version: forkVersion,
epoch: GENESIS_EPOCH)
type BeaconState = BeaconStateType(consensusFork)
# TODO https://github.com/nim-lang/Nim/issues/19094
template state(): untyped = result
result = BeaconState(
fork: fork,
genesis_time: genesis_time_from_eth1_timestamp(cfg, eth1_timestamp),
eth1_data:
Eth1Data(block_hash: eth1_block_hash, deposit_count: uint64(len(deposits))),
latest_block_header:
BeaconBlockHeader(
body_root: hash_tree_root(default BeaconBlockBodyType(consensusFork))))
# Seed RANDAO with Eth1 entropy
state.randao_mixes.data.fill(eth1_block_hash)
var merkleizer = createMerkleizer(DEPOSIT_CONTRACT_LIMIT)
for i, deposit in deposits:
let htr = hash_tree_root(deposit)
merkleizer.addChunk(htr.data)
# This is already known in the Eth1 monitor, but it would be too
# much work to refactor all the existing call sites in the test suite
state.eth1_data.deposit_root = mixInLength(merkleizer.getFinalHash(),
deposits.len)
state.eth1_deposit_index = deposits.lenu64
var pubkeyToIndex = initTable[ValidatorPubKey, ValidatorIndex]()
for idx, deposit in deposits:
let
pubkey = deposit.pubkey
amount = deposit.amount
pubkeyToIndex.withValue(pubkey, foundIdx) do:
# Increase balance by deposit amount
increase_balance(state, foundIdx[], amount)
do:
if skipBlsValidation in flags or
verify_deposit_signature(cfg, deposit):
pubkeyToIndex[pubkey] = ValidatorIndex(state.validators.len)
if not state.validators.add(get_validator_from_deposit(deposit)):
raiseAssert "too many validators"
if not state.balances.add(amount):
raiseAssert "same as validators"
else:
# Invalid deposits are perfectly possible
trace "Skipping deposit with invalid signature",
deposit = shortLog(deposit)
# Initialize epoch participations - TODO (This must be added to the spec)
var
empty_participation: EpochParticipationFlags
inactivity_scores = HashList[uint64, Limit VALIDATOR_REGISTRY_LIMIT]()
doAssert empty_participation.asList.setLen(state.validators.len)
doAssert inactivity_scores.data.setLen(state.validators.len)
inactivity_scores.resetCache()
state.previous_epoch_participation = empty_participation
state.current_epoch_participation = empty_participation
state.inactivity_scores = inactivity_scores
# Process activations
for vidx in state.validators.vindices:
let
balance = state.balances.item(vidx)
validator = addr state.validators.mitem(vidx)
validator.effective_balance = min(
balance - balance mod EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
validator.activation_eligibility_epoch = GENESIS_EPOCH
validator.activation_epoch = GENESIS_EPOCH
# Set genesis validators root for domain separation and chain versioning
state.genesis_validators_root = hash_tree_root(state.validators)
# Fill in sync committees
# Note: A duplicate committee is assigned for the current and next committee at genesis
state.current_sync_committee = get_next_sync_committee(state)
state.next_sync_committee = get_next_sync_committee(state)
# [New in Bellatrix] Initialize the execution payload header
# If empty, will initialize a chain that has not yet gone through the Merge transition
state.latest_execution_payload_header = execution_payload_header
# TODO https://github.com/nim-lang/Nim/issues/19094
# state
proc initialize_hashed_beacon_state_from_eth1*(
cfg: RuntimeConfig,
eth1_block_hash: Eth2Digest,
eth1_timestamp: uint64,
deposits: openArray[DepositData],
execution_payload_header: ForkyExecutionPayloadHeader,
flags: UpdateFlags = {}): auto =
# TODO https://github.com/nim-lang/Nim/issues/19094
result = initHashedBeaconState(
initialize_beacon_state_from_eth1(
cfg, eth1_block_hash, eth1_timestamp, deposits,
execution_payload_header, flags))
result.root = hash_tree_root(result.data)
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.1/specs/altair/fork.md#upgrading-the-state
func translate_participation(
state: var altair.BeaconState,
pending_attestations: openArray[phase0.PendingAttestation]) =

View File

@ -249,6 +249,8 @@ type
## safety threshold)
current_max_active_participants*: uint64
InactivityScores* = HashList[uint64, Limit VALIDATOR_REGISTRY_LIMIT]
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/altair/beacon-chain.md#beaconstate
BeaconState* = object
# Versioning
@ -298,7 +300,7 @@ type
finalized_checkpoint*: Checkpoint
# Inactivity
inactivity_scores*: HashList[uint64, Limit VALIDATOR_REGISTRY_LIMIT] # [New in Altair]
inactivity_scores*: InactivityScores # [New in Altair]
# Light client sync committees
current_sync_committee*: SyncCommittee # [New in Altair]

View File

@ -1015,3 +1015,9 @@ const eip4844ImplementationMissing* = false
#template debugRaiseAssert*(x: string) = raiseAssert x
template debugRaiseAssert*(x: string) = discard
func ofLen*[T, N](ListType: type List[T, N], n: int): ListType =
if n < N:
distinctBase(result).setLen(n)
else:
raise newException(SszSizeMismatchError)

View File

@ -137,7 +137,7 @@ type
finalized_checkpoint*: Checkpoint
# Inactivity
inactivity_scores*: HashList[uint64, Limit VALIDATOR_REGISTRY_LIMIT] # [New in Altair]
inactivity_scores*: InactivityScores # [New in Altair]
# Light client sync committees
current_sync_committee*: SyncCommittee # [New in Altair]
@ -152,6 +152,7 @@ type
BeaconStateRef* = ref BeaconState not nil
NilableBeaconStateRef* = ref BeaconState
# TODO: There should be only a single generic HashedBeaconState definition
HashedBeaconState* = object
data*: BeaconState
root*: Eth2Digest # hash_tree_root(data)
@ -347,6 +348,10 @@ type
parentHash*: string
timestamp*: string
# TODO: There should be only a single generic HashedBeaconState definition
func initHashedBeaconState*(s: BeaconState): HashedBeaconState =
HashedBeaconState(data: s)
func fromHex*(T: typedesc[BloomLogs], s: string): T {.
raises: [Defect, ValueError].} =
hexToByteArray(s, result.data)

View File

@ -255,7 +255,7 @@ type
finalized_checkpoint*: Checkpoint
# Inactivity
inactivity_scores*: HashList[uint64, Limit VALIDATOR_REGISTRY_LIMIT]
inactivity_scores*: InactivityScores
# Light client sync committees
current_sync_committee*: SyncCommittee
@ -279,6 +279,7 @@ type
BeaconStateRef* = ref BeaconState not nil
NilableBeaconStateRef* = ref BeaconState
# TODO: There should be only a single generic HashedBeaconState definition
HashedBeaconState* = object
data*: BeaconState
root*: Eth2Digest # hash_tree_root(data)
@ -488,6 +489,10 @@ type
bls_to_execution_changes*:
List[SignedBLSToExecutionChange, Limit MAX_BLS_TO_EXECUTION_CHANGES]
# TODO: There should be only a single generic HashedBeaconState definition
func initHashedBeaconState*(s: BeaconState): HashedBeaconState =
HashedBeaconState(data: s)
func shortLog*(v: SomeBeaconBlock): auto =
(
slot: shortLog(v.slot),

View File

@ -280,7 +280,7 @@ type
finalized_checkpoint*: Checkpoint
# Inactivity
inactivity_scores*: HashList[uint64, Limit VALIDATOR_REGISTRY_LIMIT]
inactivity_scores*: InactivityScores
# Light client sync committees
current_sync_committee*: SyncCommittee
@ -303,6 +303,7 @@ type
BeaconStateRef* = ref BeaconState not nil
NilableBeaconStateRef* = ref BeaconState
# TODO: There should be only a single generic HashedBeaconState definition
HashedBeaconState* = object
data*: BeaconState
root*: Eth2Digest # hash_tree_root(data)
@ -501,6 +502,10 @@ type
parentHash*: string
timestamp*: string
# TODO: There should be only a single generic HashedBeaconState definition
func initHashedBeaconState*(s: BeaconState): HashedBeaconState =
HashedBeaconState(data: s)
func shortLog*(v: SomeBeaconBlock): auto =
(
slot: shortLog(v.slot),

View File

@ -441,7 +441,6 @@ template init*(T: type ForkedMsgTrustedSignedBeaconBlock, blck: capella.MsgTrust
template init*(T: type ForkedMsgTrustedSignedBeaconBlock, blck: deneb.MsgTrustedSignedBeaconBlock): T =
T(kind: ConsensusFork.EIP4844, eip4844Data: blck)
template init*(T: type ForkedTrustedSignedBeaconBlock, blck: phase0.TrustedSignedBeaconBlock): T =
T(kind: ConsensusFork.Phase0, phase0Data: blck)
template init*(T: type ForkedTrustedSignedBeaconBlock, blck: altair.TrustedSignedBeaconBlock): T =

View File

@ -1192,14 +1192,15 @@ proc process_epoch*(
process_justification_and_finalization(state, info.balances, flags)
# state.slot hasn't been incremented yet.
if strictVerification in flags and currentEpoch >= 2:
doAssert state.current_justified_checkpoint.epoch + 2 >= currentEpoch
if strictVerification in flags and currentEpoch >= 3:
if strictVerification in flags:
# Rule 2/3/4 finalization results in the most pessimal case. The other
# three finalization rules finalize more quickly as long as the any of
# the finalization rules triggered.
doAssert state.finalized_checkpoint.epoch + 3 >= currentEpoch
if (currentEpoch >= 2 and state.current_justified_checkpoint.epoch + 2 < currentEpoch) or
(currentEpoch >= 3 and state.finalized_checkpoint.epoch + 3 < currentEpoch):
fatal "The network did not finalize",
currentEpoch, finalizedEpoch = state.finalized_checkpoint.epoch
quit 1
process_inactivity_updates(cfg, state, info)

View File

@ -387,10 +387,9 @@ proc getExecutionPayload[T](
let
beaconHead = node.attestationPool[].getBeaconHead(node.dag.head)
executionBlockRoot = node.dag.loadExecutionBlockRoot(beaconHead.blck)
latestHead =
if not executionBlockRoot.isZero:
executionBlockRoot
executionHead = withState(proposalState[]):
when stateFork >= ConsensusFork.Bellatrix:
forkyState.data.latest_execution_payload_header.block_hash
else:
(static(default(Eth2Digest)))
latestSafe = beaconHead.safeExecutionPayloadHash
@ -405,7 +404,7 @@ proc getExecutionPayload[T](
Opt.none(seq[Withdrawal])
payload_id =
if lastFcU.isSome and
lastFcU.get.headBlockRoot == latestHead and
lastFcU.get.headBlockRoot == executionHead and
lastFcU.get.safeBlockRoot == latestSafe and
lastFcU.get.finalizedBlockRoot == latestFinalized and
lastFcU.get.timestamp == timestamp and
@ -414,7 +413,7 @@ proc getExecutionPayload[T](
some bellatrix.PayloadID(lastFcU.get.payloadId)
else:
debug "getExecutionPayload: didn't find payloadId, re-querying",
latestHead, latestSafe, latestFinalized,
executionHead, latestSafe, latestFinalized,
timestamp,
feeRecipient,
cachedForkchoiceUpdateInformation = lastFcU
@ -422,7 +421,7 @@ proc getExecutionPayload[T](
let random = withState(proposalState[]): get_randao_mix(
forkyState.data, get_current_epoch(forkyState.data))
let fcu_payload_id = (await forkchoice_updated(
latestHead, latestSafe, latestFinalized, timestamp, random,
executionHead, latestSafe, latestFinalized, timestamp, random,
feeRecipient, withdrawals, node.consensusManager.eth1Monitor))
await sleepAsync(500.milliseconds)
@ -534,9 +533,8 @@ proc makeBeaconBlockForHeadAndSlot*[EP](
let fut = newFuture[Opt[EP]]("given-payload")
fut.complete(modified_execution_payload)
fut
elif slot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH or not (
state[].is_merge_transition_complete or
slot.epoch >= node.mergeAtEpoch):
elif slot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH or
not state[].is_merge_transition_complete:
let fut = newFuture[Opt[EP]]("empty-payload")
fut.complete(Opt.some(default(EP)))
fut

View File

@ -4,9 +4,11 @@
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import confutils, json, times, streams, os, strutils, options, chronicles,
tables, sequtils
import json_serialization
import
std/[tables, sequtils, json, times, streams, os, strutils, options, typetraits],
confutils, chronicles, json_serialization
from stew/io2 import IoErrorCode
const
LogTraceName = "Beacon-Chain LogTrace Tool"
@ -22,55 +24,55 @@ const
LogTraceCopyright & "\r\n"
type
StartUpCommand {.pure.} = enum
StartUpCommand* {.pure.} = enum
pubsub, asl, asr, aggasr, scmsr, csr, lat, traceAll, localSimChecks
LogTraceConf = object
logFiles {.
LogTraceConf* = object
logFiles* {.
desc: "Specifies one or more log files",
abbr: "f",
name: "log-file" .}: seq[string]
simDir {.
simDir* {.
desc: "Specifies path to eth2_network_simulation directory",
defaultValue: "",
name: "sim-dir" .}: string
netDir {.
netDir* {.
desc: "Specifies path to network build directory",
defaultValue: "",
name: "net-dir" .}: string
logDir {.
logDir* {.
desc: "Specifies path with bunch of logs",
defaultValue: "",
name: "log-dir" .}: string
ignoreSerializationErrors {.
ignoreSerializationErrors* {.
desc: "Ignore serialization errors while parsing log files",
defaultValue: true,
name: "ignore-errors" .}: bool
dumpSerializationErrors {.
dumpSerializationErrors* {.
desc: "Dump full serialization errors while parsing log files",
defaultValue: false ,
name: "dump-errors" .}: bool
nodes {.
nodes* {.
desc: "Specifies node names which logs will be used",
name: "nodes" .}: seq[string]
allowedLag {.
allowedLag* {.
desc: "Allowed latency lag multiplier",
defaultValue: 2.0,
name: "lag" .}: float
constPreset {.
constPreset* {.
desc: "The const preset being used"
defaultValue: "mainnet"
name: "const-preset" .}: string
case cmd {.command.}: StartUpCommand
case cmd* {.command.}: StartUpCommand
of pubsub:
discard
of asl:
@ -316,13 +318,24 @@ template warning(issuesGroup: IssuesGroup, msg: string) =
proc new(T: type IssuesGroup, name: string): T =
T(name: name)
proc readValue(reader: var JsonReader, value: var DateTime) =
# TODO These definition can be moved to a more widely accessible module.
# It's needed when we compile logtrace itself with JSON logging.
proc writeValue*(writer: var JsonWriter, value: DateTime) =
writer.writeValue($value)
proc readValue*(reader: var JsonReader, value: var DateTime) =
let s = reader.readValue(string)
try:
value = parse(s, "YYYY-MM-dd HH:mm:ss'.'fffzzz", utc())
except CatchableError:
raiseUnexpectedValue(reader, "Invalid date time")
proc writeValue*(writer: var JsonWriter, value: IoErrorCode) =
writer.writeValue(distinctBase value)
proc readValue*(reader: var JsonReader, value: var IoErrorCode) =
IoErrorCode reader.readValue(distinctBase IoErrorCode)
proc init(t: typedesc[GossipMessage], kind: GossipDirection, id,
datestr: string): GossipMessage =
GossipMessage(
@ -1099,7 +1112,7 @@ proc runLatencyCheck(logConf: LogTraceConf, logFiles: seq[string],
info "Latency statistics", min_time = minTime, max_time = maxTime,
avg_time = avgTime, seconds_count = len(msgs)
proc run(conf: LogTraceConf) =
proc run*(conf: LogTraceConf) =
var logFiles: seq[string]
var logNodes: seq[NodeDirectory]

649
ncli/ncli_testnet.nim Normal file
View File

@ -0,0 +1,649 @@
# beacon_chain
# Copyright (c) 2018-2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import
std/[os, sequtils, strutils, options, json, terminal, times],
chronos, bearssl/rand, chronicles, confutils, stint, json_serialization,
web3, web3/confutils_defs, eth/keys, eth/p2p/discoveryv5/random2,
stew/[io2, byteutils], json_rpc/jsonmarshal,
../beacon_chain/[conf, filepath],
../beacon_chain/eth1/eth1_monitor,
../beacon_chain/networking/eth2_network,
../beacon_chain/spec/[beaconstate, eth2_merkleization],
../beacon_chain/spec/datatypes/base,
../beacon_chain/spec/eth2_apis/eth2_rest_serialization,
../beacon_chain/validators/keystore_management,
./logtrace
# Compiled version of /scripts/depositContract.v.py in this repo
# The contract was compiled in Remix (https://remix.ethereum.org/) with vyper (remote) compiler.
const depositContractCode = staticRead "../beacon_chain/eth1/deposit_contract_code.txt"
type
Eth1Address = web3.Address
StartUpCommand {.pure.} = enum
generateDeposits
createTestnet
run
sendDeposits
analyzeLogs
deployDepositContract
sendEth
CliConfig* = object
web3Url* {.
defaultValue: "",
desc: "URL of the Web3 server to observe Eth1"
name: "web3-url" }: string
privateKey* {.
defaultValue: ""
desc: "Private key of the controlling account"
name: "private-key" }: string
askForKey* {.
defaultValue: false
desc: "Ask for an Eth1 private key interactively"
name: "ask-for-key" }: bool
eth2Network* {.
desc: "The Eth2 network preset to use"
name: "network" }: Option[string]
case cmd* {.command.}: StartUpCommand
of StartUpCommand.deployDepositContract:
discard
of StartUpCommand.sendEth:
toAddress* {.name: "to".}: Eth1Address
valueEth* {.name: "eth".}: string
of StartUpCommand.generateDeposits:
simulationDepositsCount* {.
desc: "The number of validator keystores to generate"
name: "count" }: Natural
outValidatorsDir* {.
desc: "A directory to store the generated validator keystores"
name: "out-validators-dir" }: OutDir
outSecretsDir* {.
desc: "A directory to store the generated keystore password files"
name: "out-secrets-dir" }: OutDir
outDepositsFile* {.
desc: "A LaunchPad deposits file to write"
name: "out-deposits-file" }: OutFile
threshold* {.
defaultValue: 1
desc: "Used to generate distributed keys"
name: "threshold" }: uint32
remoteValidatorsCount* {.
defaultValue: 0
desc: "The number of distributed validators validator"
name: "remote-validators-count" }: uint32
remoteSignersUrls* {.
desc: "URLs of the remote signers"
name: "remote-signer" }: seq[string]
of StartUpCommand.createTestnet:
testnetDepositsFile* {.
desc: "A LaunchPad deposits file for the genesis state validators"
name: "deposits-file" .}: InputFile
totalValidators* {.
desc: "The number of validator deposits in the newly created chain"
name: "total-validators" .}: uint64
bootstrapAddress* {.
desc: "The public IP address that will be advertised as a bootstrap node for the testnet"
defaultValue: init(ValidIpAddress, defaultAdminListenAddress)
defaultValueDesc: $defaultAdminListenAddressDesc
name: "bootstrap-address" .}: ValidIpAddress
bootstrapPort* {.
desc: "The TCP/UDP port that will be used by the bootstrap node"
defaultValue: defaultEth2TcpPort
defaultValueDesc: $defaultEth2TcpPortDesc
name: "bootstrap-port" .}: Port
dataDir* {.
desc: "Nimbus data directory where the keys of the bootstrap node will be placed"
name: "data-dir" .}: OutDir
netKeyFile* {.
desc: "Source of network (secp256k1) private key file"
name: "netkey-file" .}: OutFile
netKeyInsecurePassword* {.
desc: "Use pre-generated INSECURE password for network private key file"
defaultValue: false,
name: "insecure-netkey-password" .}: bool
genesisTime* {.
desc: "Unix epoch time of the network genesis"
name: "genesis-time" .}: Option[uint64]
genesisOffset* {.
desc: "Seconds from now to add to genesis time"
name: "genesis-offset" .}: Option[int]
executionGenesisBlock* {.
desc: "The execution genesis block in a merged testnet"
name: "execution-genesis-block" .}: Option[InputFile]
capellaForkEpoch* {.
defaultValue: FAR_FUTURE_EPOCH
desc: "The epoch of the Capella hard-fork"
name: "capella-fork-epoch" .}: Epoch
denebForkEpoch* {.
defaultValue: FAR_FUTURE_EPOCH
desc: "The epoch of the Deneb hard-fork"
name: "deneb-fork-epoch" .}: Epoch
outputGenesis* {.
desc: "Output file where to write the initial state snapshot"
name: "output-genesis" .}: OutFile
outputDepositTreeSnapshot* {.
desc: "Output file where to write the initial deposit tree snapshot"
name: "output-deposit-tree-snapshot" .}: OutFile
outputBootstrapFile* {.
desc: "Output file with list of bootstrap nodes for the network"
name: "output-bootstrap-file" .}: OutFile
of StartUpCommand.sendDeposits:
depositsFile* {.
desc: "A LaunchPad deposits file"
name: "deposits-file" }: InputFile
depositContractAddress* {.
desc: "Address of the deposit contract"
name: "deposit-contract" }: Eth1Address
minDelay* {.
defaultValue: 0.0
desc: "Minimum possible delay between making two deposits (in seconds)"
name: "min-delay" }: float
maxDelay* {.
defaultValue: 0.0
desc: "Maximum possible delay between making two deposits (in seconds)"
name: "max-delay" }: float
of StartUpCommand.run:
discard
of StartUpCommand.analyzeLogs:
logFiles* {.
desc: "Specifies one or more log files",
abbr: "f",
name: "log-file" .}: seq[string]
simDir* {.
desc: "Specifies path to eth2_network_simulation directory",
defaultValue: "",
name: "sim-dir" .}: string
netDir* {.
desc: "Specifies path to network build directory",
defaultValue: "",
name: "net-dir" .}: string
logDir* {.
desc: "Specifies path with bunch of logs",
defaultValue: "",
name: "log-dir" .}: string
ignoreSerializationErrors* {.
desc: "Ignore serialization errors while parsing log files",
defaultValue: true,
name: "ignore-errors" .}: bool
dumpSerializationErrors* {.
desc: "Dump full serialization errors while parsing log files",
defaultValue: false ,
name: "dump-errors" .}: bool
nodes* {.
desc: "Specifies node names which logs will be used",
name: "nodes" .}: seq[string]
allowedLag* {.
desc: "Allowed latency lag multiplier",
defaultValue: 2.0,
name: "lag" .}: float
constPreset* {.
desc: "The const preset being used"
defaultValue: "mainnet"
name: "const-preset" .}: string
type
PubKeyBytes = DynamicBytes[48, 48]
WithdrawalCredentialsBytes = DynamicBytes[32, 32]
SignatureBytes = DynamicBytes[96, 96]
contract(DepositContract):
proc deposit(pubkey: PubKeyBytes,
withdrawalCredentials: WithdrawalCredentialsBytes,
signature: SignatureBytes,
deposit_data_root: FixedBytes[32])
template `as`(address: ethtypes.Address, T: type bellatrix.ExecutionAddress): T =
T(data: distinctBase(address))
template `as`(address: BlockHash, T: type Eth2Digest): T =
asEth2Digest(address)
func getOrDefault[T](x: Option[T]): T =
if x.isSome:
x.get
else:
default T
func `as`(blk: BlockObject, T: type bellatrix.ExecutionPayloadHeader): T =
T(parent_hash: blk.parentHash as Eth2Digest,
fee_recipient: blk.miner as ExecutionAddress,
state_root: blk.stateRoot as Eth2Digest,
receipts_root: blk.receiptsRoot as Eth2Digest,
logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)),
prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE), # Is BE correct here?
block_number: uint64 blk.number,
gas_limit: uint64 blk.gasLimit,
gas_used: uint64 blk.gasUsed,
timestamp: uint64 blk.timestamp,
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(blk.extraData.bytes),
base_fee_per_gas: blk.baseFeePerGas.getOrDefault(),
block_hash: blk.hash as Eth2Digest,
transactions_root: blk.transactionsRoot as Eth2Digest)
func `as`(blk: BlockObject, T: type capella.ExecutionPayloadHeader): T =
T(parent_hash: blk.parentHash as Eth2Digest,
fee_recipient: blk.miner as ExecutionAddress,
state_root: blk.stateRoot as Eth2Digest,
receipts_root: blk.receiptsRoot as Eth2Digest,
logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)),
prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE),
block_number: uint64 blk.number,
gas_limit: uint64 blk.gasLimit,
gas_used: uint64 blk.gasUsed,
timestamp: uint64 blk.timestamp,
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(blk.extraData.bytes),
base_fee_per_gas: blk.baseFeePerGas.getOrDefault(),
block_hash: blk.hash as Eth2Digest,
transactions_root: blk.transactionsRoot as Eth2Digest,
withdrawals_root: blk.withdrawalsRoot.getOrDefault() as Eth2Digest)
func `as`(blk: BlockObject, T: type deneb.ExecutionPayloadHeader): T =
T(parent_hash: blk.parentHash as Eth2Digest,
fee_recipient: blk.miner as ExecutionAddress,
state_root: blk.stateRoot as Eth2Digest,
receipts_root: blk.receiptsRoot as Eth2Digest,
logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)),
prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE),
block_number: uint64 blk.number,
gas_limit: uint64 blk.gasLimit,
gas_used: uint64 blk.gasUsed,
timestamp: uint64 blk.timestamp,
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(blk.extraData.bytes),
base_fee_per_gas: blk.baseFeePerGas.getOrDefault(),
excess_data_gas: blk.excessDataGas.getOrDefault(),
block_hash: blk.hash as Eth2Digest,
transactions_root: blk.transactionsRoot as Eth2Digest,
withdrawals_root: blk.withdrawalsRoot.getOrDefault() as Eth2Digest)
proc createDepositTreeSnapshot(deposits: seq[DepositData],
blockHash: Eth2Digest,
blockHeight: uint64): DepositTreeSnapshot =
var merkleizer = DepositsMerkleizer.init()
for i, deposit in deposits:
let htr = hash_tree_root(deposit)
merkleizer.addChunk(htr.data)
DepositTreeSnapshot(
eth1Block: blockHash,
depositContractState: merkleizer.toDepositContractState,
blockHeight: blockHeight)
proc doCreateTestnet*(config: CliConfig,
rng: var HmacDrbgContext)
{.raises: [Defect, CatchableError].} =
let launchPadDeposits = try:
Json.loadFile(config.testnetDepositsFile.string, seq[LaunchPadDeposit])
except SerializationError as err:
error "Invalid LaunchPad deposits file",
err = formatMsg(err, config.testnetDepositsFile.string)
quit 1
var deposits: seq[DepositData]
for i in 0 ..< launchPadDeposits.len:
deposits.add(launchPadDeposits[i] as DepositData)
let
startTime = if config.genesisTime.isSome:
config.genesisTime.get
else:
uint64(times.toUnix(times.getTime()) + config.genesisOffset.get(0))
outGenesis = config.outputGenesis.string
eth1Hash = eth1BlockHash # TODO: Can we set a more appropriate value?
cfg = getRuntimeConfig(config.eth2Network)
# This is intentionally left default initialized, when the user doesn't
# provide an execution genesis block. The generated genesis state will
# then be considered non-finalized merged state according to the spec.
var genesisBlock = BlockObject()
if config.executionGenesisBlock.isSome:
logScope:
path = config.executionGenesisBlock.get.string
if not fileExists(config.executionGenesisBlock.get.string):
error "The specified execution genesis block file doesn't exist"
quit 1
let genesisBlockContents = readAllChars(config.executionGenesisBlock.get.string)
if genesisBlockContents.isErr:
error "Failed to read the specified execution genesis block file",
err = genesisBlockContents.error
quit 1
try:
let blockAsJson = try:
parseJson genesisBlockContents.get
except CatchableError as err:
error "Failed to parse the genesis block json", err = err.msg
quit 1
except:
# TODO The Nim json library should not raise bare exceptions
raiseAssert "The Nim json library raise a bare exception"
fromJson(blockAsJson, "", genesisBlock)
except CatchableError as err:
error "Failed to load the genesis block from json",
err = err.msg
quit 1
template createAndSaveState(genesisExecutionPayloadHeader: auto): Eth2Digest =
var initialState = newClone(initialize_beacon_state_from_eth1(
cfg, eth1Hash, startTime, deposits, genesisExecutionPayloadHeader,
{skipBlsValidation}))
# https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#create-genesis-state
initialState.genesis_time = startTime
doAssert initialState.validators.len > 0
# let outGenesisExt = splitFile(outGenesis).ext
#if cmpIgnoreCase(outGenesisExt, ".json") == 0:
# let outGenesisJson = outGenesis & ".json"
# RestJson.saveFile(outGenesisJson, initialState, pretty = true)
# info "JSON genesis file written", path = outGenesisJson
let outSszGenesis = outGenesis.changeFileExt "ssz"
SSZ.saveFile(outSszGenesis, initialState[])
info "SSZ genesis file written",
path = outSszGenesis, fork = toFork(typeof initialState[])
SSZ.saveFile(
config.outputDepositTreeSnapshot.string,
createDepositTreeSnapshot(
deposits,
genesisExecutionPayloadHeader.block_hash,
genesisExecutionPayloadHeader.block_number))
initialState[].genesis_validators_root
let genesisValidatorsRoot =
if config.denebForkEpoch == 0:
createAndSaveState(genesisBlock as deneb.ExecutionPayloadHeader)
elif config.capellaForkEpoch == 0:
createAndSaveState(genesisBlock as capella.ExecutionPayloadHeader)
else:
createAndSaveState(genesisBlock as bellatrix.ExecutionPayloadHeader)
let bootstrapFile = string config.outputBootstrapFile
if bootstrapFile.len > 0:
type MetaData = altair.MetaData
let
networkKeys = rng.getPersistentNetKeys(
string config.dataDir, string config.netKeyFile,
config.netKeyInsecurePassword, allowLoadExisting = false)
netMetadata = MetaData()
forkId = getENRForkID(
cfg,
Epoch(0),
genesisValidatorsRoot)
bootstrapEnr = enr.Record.init(
1, # sequence number
networkKeys.seckey.asEthKey,
some(config.bootstrapAddress),
some(config.bootstrapPort),
some(config.bootstrapPort),
[
toFieldPair(enrForkIdField, SSZ.encode(forkId)),
toFieldPair(enrAttestationSubnetsField, SSZ.encode(netMetadata.attnets))
])
writeFile(bootstrapFile, bootstrapEnr.tryGet().toURI)
echo "Wrote ", bootstrapFile
proc deployContract*(web3: Web3, code: string): Future[ReceiptObject] {.async.} =
var code = code
if code[1] notin {'x', 'X'}:
code = "0x" & code
let tr = EthSend(
source: web3.defaultAccount,
data: code,
gas: Quantity(3000000).some,
gasPrice: 1.some)
let r = await web3.send(tr)
result = await web3.getMinedTransactionReceipt(r)
proc sendEth(web3: Web3, to: Eth1Address, valueEth: int): Future[TxHash] =
let tr = EthSend(
source: web3.defaultAccount,
gas: Quantity(3000000).some,
gasPrice: 1.some,
value: some(valueEth.u256 * 1000000000000000000.u256),
to: some(to))
web3.send(tr)
type
DelayGenerator* = proc(): chronos.Duration {.gcsafe, raises: [Defect].}
proc ethToWei(eth: UInt256): UInt256 =
eth * 1000000000000000000.u256
proc initWeb3(web3Url, privateKey: string): Future[Web3] {.async.} =
result = await newWeb3(web3Url)
if privateKey.len != 0:
result.privateKey = some(keys.PrivateKey.fromHex(privateKey)[])
else:
let accounts = await result.provider.eth_accounts()
doAssert(accounts.len > 0)
result.defaultAccount = accounts[0]
# TODO: async functions should note take `seq` inputs because
# this leads to full copies.
proc sendDeposits*(deposits: seq[LaunchPadDeposit],
web3Url, privateKey: string,
depositContractAddress: Eth1Address,
delayGenerator: DelayGenerator = nil) {.async.} =
notice "Sending deposits",
web3 = web3Url,
depositContract = depositContractAddress
var web3 = await initWeb3(web3Url, privateKey)
let gasPrice = int(await web3.provider.eth_gasPrice()) * 2
let depositContract = web3.contractSender(DepositContract,
Eth1Address depositContractAddress)
for i in 4200 ..< deposits.len:
let dp = deposits[i] as DepositData
while true:
try:
let tx = depositContract.deposit(
PubKeyBytes(@(dp.pubkey.toRaw())),
WithdrawalCredentialsBytes(@(dp.withdrawal_credentials.data)),
SignatureBytes(@(dp.signature.toRaw())),
FixedBytes[32](hash_tree_root(dp).data))
let status = await tx.send(value = 32.u256.ethToWei, gasPrice = gasPrice)
info "Deposit sent", tx = $status
if delayGenerator != nil:
await sleepAsync(delayGenerator())
break
except CatchableError:
await sleepAsync(chronos.seconds 60)
web3 = await initWeb3(web3Url, privateKey)
{.pop.} # TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
proc main() {.async.} =
var conf = try: CliConfig.load()
except CatchableError as exc:
raise exc
except Exception as exc: # TODO fix confutils
raiseAssert exc.msg
let rng = keys.newRng()
if conf.cmd == StartUpCommand.generateDeposits:
let
mnemonic = generateMnemonic(rng[])
seed = getSeed(mnemonic, KeystorePass.init "")
cfg = getRuntimeConfig(conf.eth2Network)
if (let res = secureCreatePath(string conf.outValidatorsDir); res.isErr):
warn "Could not create validators folder",
path = string conf.outValidatorsDir, err = ioErrorMsg(res.error)
if (let res = secureCreatePath(string conf.outSecretsDir); res.isErr):
warn "Could not create secrets folder",
path = string conf.outSecretsDir, err = ioErrorMsg(res.error)
let deposits = generateDeposits(
cfg,
rng[],
seed,
0, conf.simulationDepositsCount,
string conf.outValidatorsDir,
string conf.outSecretsDir,
conf.remoteSignersUrls,
conf.threshold,
conf.remoteValidatorsCount,
KeystoreMode.Fast)
if deposits.isErr:
fatal "Failed to generate deposits", err = deposits.error
quit 1
let launchPadDeposits =
mapIt(deposits.value, LaunchPadDeposit.init(cfg, it))
Json.saveFile(string conf.outDepositsFile, launchPadDeposits)
notice "Deposit data written", filename = conf.outDepositsFile
quit 0
var deposits: seq[LaunchPadDeposit]
if conf.cmd == StartUpCommand.sendDeposits:
deposits = Json.loadFile(string conf.depositsFile, seq[LaunchPadDeposit])
if conf.askForKey:
var
privateKey: string # TODO consider using a SecretString type
reasonForKey = ""
if conf.cmd == StartUpCommand.sendDeposits:
let
depositsWord = if deposits.len > 1: "deposits" else: "deposit"
totalEthNeeded = 32 * deposits.len
reasonForKey = " in order to make your $1 (you'll need access to $2 ETH)" %
[depositsWord, $totalEthNeeded]
echo "Please enter your Goerli Eth1 private key in hex form (e.g. 0x1a2...f3c)" &
reasonForKey
if not readPasswordFromStdin("> ", privateKey):
error "Failed to read an Eth1 private key from standard input"
if privateKey.len > 0:
conf.privateKey = privateKey.string
case conf.cmd
of StartUpCommand.createTestnet:
let rng = keys.newRng()
doCreateTestnet(conf, rng[])
of StartUpCommand.deployDepositContract:
let web3 = await initWeb3(conf.web3Url, conf.privateKey)
let receipt = await web3.deployContract(depositContractCode)
echo receipt.contractAddress.get, ";", receipt.blockHash
of StartUpCommand.sendEth:
let web3 = await initWeb3(conf.web3Url, conf.privateKey)
echo await sendEth(web3, conf.toAddress, conf.valueEth.parseInt)
of StartUpCommand.sendDeposits:
var delayGenerator: DelayGenerator
if not (conf.maxDelay > 0.0):
conf.maxDelay = conf.minDelay
elif conf.minDelay > conf.maxDelay:
echo "The minimum delay should not be larger than the maximum delay"
quit 1
if conf.maxDelay > 0.0:
delayGenerator = proc (): chronos.Duration =
let
minDelay = (conf.minDelay*1000).int64
maxDelay = (conf.maxDelay*1000).int64
chronos.milliseconds (rng[].rand(maxDelay - minDelay) + minDelay)
await sendDeposits(deposits, conf.web3Url, conf.privateKey,
conf.depositContractAddress, delayGenerator)
of StartUpCommand.run:
discard
of StartUpCommand.analyzeLogs:
try:
logtrace.run(LogTraceConf(
cmd: logtrace.StartUpCommand.localSimChecks,
logFiles: conf.logFiles,
simDir: conf.simDir,
netDir: conf.netDir,
logDir: conf.logDir,
ignoreSerializationErrors: conf.ignoreSerializationErrors,
dumpSerializationErrors: conf.dumpSerializationErrors,
nodes: conf.nodes,
allowedLag: conf.allowedLag,
constPreset: conf.constPreset
))
except CatchableError as err:
fatal "Unexpected error in logtrace", err = err.msg
except Exception as exc:
# TODO: Investigate where is this coming from?
fatal "Unexpected exception in logtrace", err = exc.msg
of StartUpCommand.generateDeposits:
# This is handled above before the case statement
discard
when isMainModule:
waitFor main()

View File

@ -2346,7 +2346,7 @@
"response": {
"status": {"operator": "equals", "value": "200"},
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
"body": [{"operator": "jstructcmps", "start": [],"value": [[]]}]
"body": [{"operator": "jstructcmps", "start": [],"value": [{"version":"","data":{"attested_header":{"beacon":{"slot":"6","proposer_index":"28","parent_root":"0xc3640959c639e6fd3e3404842f09e211c0b74e71bb44fc32939a013bf385fe77","state_root":"0x3c61bd0b9bdb706d42b4b2a093fc3bfc50444b60d67e66f0f90ac5a1bdf0fdd2","body_root":"0x41d46fd59c2b75b47f1b2a9904f1af14a0b21b2d00108e9c2c7db63355b4c14d"}},"next_sync_committee":{"pubkeys":["0x9799063b332cfbdd6c5913fdcf47d83b6277be3f174d2acd8059dcadb84dab131a9c8ec4ddb4e1e42c0acd543deebfa9"],"aggregate_pubkey":"0x8675cdecf7cb5aa6c14de5ad24f638d6c384b65737d9f60618e0a9c1411bc11659a080617d1e07d46789739ceb929d02"},"next_sync_committee_branch":["0x40cfc59c70b042b86576ff570f8c4f84ca397e436c9a13a16fc713723a53f983"],"finalized_header":{"beacon":{"slot":"0","proposer_index":"0","parent_root":"0x0000000000000000000000000000000000000000000000000000000000000000","state_root":"0x0000000000000000000000000000000000000000000000000000000000000000","body_root":"0x0000000000000000000000000000000000000000000000000000000000000000"}},"finality_branch":["0x0000000000000000000000000000000000000000000000000000000000000000"],"sync_aggregate":{"sync_committee_bits":"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","sync_committee_signature":"0xa9e05329da094eebe7874f1664c2dfccd0430420c720e8243c25e4031a678f8d99a69270f7b72ba09d6672a7e61e155612f1e0ccf260ede2344322ea53a27acfefe6cd09bd3a543dab1f9678e474f3ac01da94af28373031c2f09e3aeeba2de8"},"signature_slot":"7"}}]}]
}
},
{
@ -2387,7 +2387,11 @@
"url": "/eth/v1/beacon/light_client/finality_update",
"headers": {"Accept": "application/json"}
},
"response": {"status": {"operator": "equals", "value": "404"}}
"response": {
"status": {"operator": "equals", "value": "200"},
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
"body": [{"operator": "jstructcmps", "start": ["data"],"value": {"attested_header":{"beacon":{"slot":"2","proposer_index":"18","parent_root":"0x87cc18e17479f1af290f078dee05f78fb61a493866874d4c7319737f8470d8c3","state_root":"0x9b33428ca370d41a89e94239d7bb4f117bd6f3d828f84307523660481613f036","body_root":"0x2546d749931de6be38ea6aa16f0633d46aef84543f3b25804f0e14a41774ca0f"}},"finalized_header":{"beacon":{"slot":"0","proposer_index":"0","parent_root":"0x0000000000000000000000000000000000000000000000000000000000000000","state_root":"0x0000000000000000000000000000000000000000000000000000000000000000","body_root":"0x0000000000000000000000000000000000000000000000000000000000000000"}},"finality_branch":["0x0000000000000000000000000000000000000000000000000000000000000000"],"sync_aggregate":{"sync_committee_bits":"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","sync_committee_signature":"0xb73ccf4a5ed492a75e6d569de7caf3595203b5b0687c6ea02531a9ae2fbbc7f34b9fe86e8b8f0e0ae976f199641a296b0e29a757114dc5c3cb2516d6affead47df71078f2216ec0196bfd11d6ff1881722e95317c7e6f3446ff5aacbdc94f300"},"signature_slot":"3"}}]
}
},
{
"topics": ["beacon", "beacon_light_client_optimistic_update"],
@ -2395,7 +2399,11 @@
"url": "/eth/v1/beacon/light_client/optimistic_update",
"headers": {"Accept": "application/json"}
},
"response": {"status": {"operator": "equals", "value": "404"}}
"response": {
"status": {"operator": "equals", "value": "200"},
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
"body": [{"operator": "jstructcmps", "start": ["data"],"value": {"attested_header":{"beacon":{"slot":"2","proposer_index":"18","parent_root":"0x87cc18e17479f1af290f078dee05f78fb61a493866874d4c7319737f8470d8c3","state_root":"0x9b33428ca370d41a89e94239d7bb4f117bd6f3d828f84307523660481613f036","body_root":"0x2546d749931de6be38ea6aa16f0633d46aef84543f3b25804f0e14a41774ca0f"}},"sync_aggregate":{"sync_committee_bits":"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","sync_committee_signature":"0xb73ccf4a5ed492a75e6d569de7caf3595203b5b0687c6ea02531a9ae2fbbc7f34b9fe86e8b8f0e0ae976f199641a296b0e29a757114dc5c3cb2516d6affead47df71078f2216ec0196bfd11d6ff1881722e95317c7e6f3446ff5aacbdc94f300"},"signature_slot":"3"}}]
}
},
{
"topics": ["beacon", "pool_attestations"],
@ -2883,7 +2891,7 @@
"response": {
"status": {"operator": "equals", "value": "200"},
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
"body": [{"operator": "jstructcmps", "start": ["data"], "value": {"head_slot": "", "sync_distance": "", "is_syncing": false}}]
"body": [{"operator": "jstructcmps", "start": ["data"], "value": {"head_slot": "", "sync_distance": "", "is_syncing": false, "is_optimistic": false}}]
}
},
{
@ -2903,7 +2911,7 @@
"response": {
"status": {"operator": "equals", "value": "200"},
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
"body": [{"operator": "jstructcmps", "value": {"dependent_root": "", "data": [{"pubkey": "", "validator_index": "", "slot": ""}]}}]
"body": [{"operator": "jstructcmps", "value": {"dependent_root": "", "execution_optimistic": false, "data": [{"pubkey": "", "validator_index": "", "slot": ""}]}}]
}
},
{
@ -3146,7 +3154,7 @@
"response": {
"status": {"operator": "equals", "value": "200"},
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
"body": [{"operator": "jstructcmps", "value": {"dependent_root": "", "data":[{"pubkey": "", "validator_index": "", "committee_index": "", "committee_length": "", "committees_at_slot": "", "validator_committee_index": "", "slot": ""}]}}]
"body": [{"operator": "jstructcmps", "value": {"dependent_root": "", "execution_optimistic": false, "data":[{"pubkey": "", "validator_index": "", "committee_index": "", "committee_length": "", "committees_at_slot": "", "validator_committee_index": "", "slot": ""}]}}]
}
},
{
@ -3252,7 +3260,7 @@
"response": {
"status": {"operator": "equals", "value": "200"},
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
"body": [{"operator": "jstructcmps", "value": {"dependent_root": "", "data":[{"pubkey": "", "validator_index": "", "committee_index": "", "committee_length": "", "committees_at_slot": "", "validator_committee_index": "", "slot": ""}]}}]
"body": [{"operator": "jstructcmps", "value": {"dependent_root": "", "execution_optimistic": false, "data":[{"pubkey": "", "validator_index": "", "committee_index": "", "committee_length": "", "committees_at_slot": "", "validator_committee_index": "", "slot": ""}]}}]
}
},
{

21
research/timing.nim Normal file
View File

@ -0,0 +1,21 @@
import
std/[times, stats]
template withTimer*(stats: var RunningStat, body: untyped) =
# TODO unify timing somehow
let start = cpuTime()
block:
body
let stop = cpuTime()
stats.push stop - start
template withTimerRet*(stats: var RunningStat, body: untyped): untyped =
let start = cpuTime()
let tmp = block:
body
let stop = cpuTime()
stats.push stop - start
tmp

File diff suppressed because one or more lines are too long

View File

@ -14,7 +14,7 @@ source "${SCRIPTS_DIR}/bash_utils.sh"
download_geth_stable() {
if [[ ! -e "${STABLE_GETH_BINARY}" ]]; then
GETH_VERSION="1.10.26-e5eb32ac"
GETH_VERSION="1.10.26-e5eb32ac"
GETH_URL="https://gethstore.blob.core.windows.net/builds/"
case "${OS}-${ARCH}" in

View File

@ -5,13 +5,28 @@
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
GETH_BINARY="${GETH_BINARY:-"${HOME}/go-ethereum/build/bin/geth"}"
if [ -z "${GETH_VARS_SOURCED:-}" ]; then
GETH_VARS_SOURCED=1
GETH_NUM_NODES="${GETH_NUM_NODES:-4}"
GETH_BINARY="${GETH_BINARY:-${HOME}/go-ethereum/build/bin/geth}"
GETH_BASE_NET_PORT="${BASE_EL_NET_PORT:-30303}"
GETH_BASE_HTTP_PORT="${BASE_EL_HTTP_PORT:-8545}"
GETH_BASE_RPC_PORT="${BASE_EL_RPC_PORT:-8545}"
GETH_BASE_WS_PORT="${BASE_EL_WS_PORT:-8546}"
GETH_BASE_AUTH_RPC_PORT="${BASE_EL_AUTH_RPC_PORT:-8551}"
GETH_PORT_OFFSET="${EL_PORT_OFFSET:-10}"
GENESISJSON="${GENESISJSON:-${BASEDIR}/geth_genesis.json}"
GETH_PORT_OFFSET="${EL_PORT_OFFSET:-20}"
DISCOVER="--nodiscover"
GETH_NET_PORTS=()
GETH_AUTH_RPC_PORTS=()
GETH_DATA_DIRS=()
GETH_LAST_NODE_IDX=$((GETH_NUM_NODES - 1))
for GETH_NODE_IDX in $(seq 0 $GETH_LAST_NODE_IDX); do
GETH_NET_PORTS+=($(( GETH_NODE_IDX * GETH_PORT_OFFSET + GETH_BASE_NET_PORT )))
GETH_RPC_PORTS+=($(( GETH_NODE_IDX * GETH_PORT_OFFSET + GETH_BASE_RPC_PORT )))
GETH_AUTH_RPC_PORTS+=($(( GETH_NODE_IDX * GETH_PORT_OFFSET + GETH_BASE_AUTH_RPC_PORT )))
GETH_DATA_DIRS+=("${DATA_DIR}/geth-${GETH_NODE_IDX}")
done
fi

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,103 @@
# This file should contain the origin run-time config for the mainnet
# network [1] without all properties overriden in the local network
# simulation. We use to generate a full run-time config as required
# by third-party binaries, such as Lighthouse and Web3Signer.
#
# [1]: https://raw.githubusercontent.com/ethereum/consensus-specs/dev/configs/mainnet.yaml
# Mainnet config
# Extends the mainnet preset
# (overriden in launch_local_testnet.sh) PRESET_BASE: 'mainnet'
# Free-form short name of the network that this configuration applies to - known
# canonical network names include:
# * 'mainnet' - there can be only one
# * 'prater' - testnet
# Must match the regex: [a-z0-9\-]
CONFIG_NAME: 'mainnet'
# Transition
# ---------------------------------------------------------------
# Estimated on Sept 15, 2022
# (overriden in launch_local_testnet.sh) TERMINAL_TOTAL_DIFFICULTY: 58750000000000000000000
# By default, don't use these params
TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
# Genesis
# ---------------------------------------------------------------
# `2**14` (= 16,384)
# (overriden in launch_local_testnet.sh) MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
# Dec 1, 2020, 12pm UTC
# (overriden in launch_local_testnet.sh) MIN_GENESIS_TIME: 1606824000
# Mainnet initial fork version, recommend altering for testnets
GENESIS_FORK_VERSION: 0x00000000
# 604800 seconds (7 days)
# (overriden in launch_local_testnet.sh) GENESIS_DELAY: 604800
# Forking
# ---------------------------------------------------------------
# Some forks are disabled for now:
# - These may be re-assigned to another fork-version later
# - Temporarily set to max uint64 value: 2**64 - 1
# Altair
ALTAIR_FORK_VERSION: 0x01000000
# (overriden in launch_local_testnet.sh) ALTAIR_FORK_EPOCH: 74240 # Oct 27, 2021, 10:56:23am UTC
# Bellatrix
BELLATRIX_FORK_VERSION: 0x02000000
# (overriden in launch_local_testnet.sh) BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC
# Capella
CAPELLA_FORK_VERSION: 0x03000000
# (overriden in launch_local_testnet.sh) CAPELLA_FORK_EPOCH: 18446744073709551615
# EIP4844
EIP4844_FORK_VERSION: 0x04000000
# (overriden in launch_local_testnet.sh) EIP4844_FORK_EPOCH: 18446744073709551615
# Time parameters
# ---------------------------------------------------------------
# 12 seconds
SECONDS_PER_SLOT: 12
# 14 (estimate from Eth1 mainnet)
SECONDS_PER_ETH1_BLOCK: 14
# 2**8 (= 256) epochs ~27 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**8 (= 256) epochs ~27 hours
SHARD_COMMITTEE_PERIOD: 256
# 2**11 (= 2,048) Eth1 blocks ~8 hours
# (overriden in launch_local_testnet.sh) ETH1_FOLLOW_DISTANCE: 2048
# Validator cycle
# ---------------------------------------------------------------
# 2**2 (= 4)
INACTIVITY_SCORE_BIAS: 4
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# Fork choice
# ---------------------------------------------------------------
# 40%
PROPOSER_SCORE_BOOST: 40
# Deposit contract
# ---------------------------------------------------------------
# Ethereum PoW Mainnet
DEPOSIT_CHAIN_ID: 1
DEPOSIT_NETWORK_ID: 1
# (overriden in launch_local_testnet.sh) DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa

View File

@ -90,7 +90,7 @@ scrape_configs:
- job_name: "nimbus"
static_configs:
EOF
for NUM_NODE in $(seq 0 $(( ${NUM_NODES} - 1 ))); do
for NUM_NODE in $(seq 1 $NUM_NODES); do
cat >> "${CONFIG_FILE}" <<EOF
- targets: ['127.0.0.1:$(( BASE_METRICS_PORT + NUM_NODE ))']
EOF

View File

@ -0,0 +1,102 @@
# This file should contain the origin run-time config for the minimal
# network [1] without all properties overriden in the local network
# simulation. We use to generate a full run-time config as required
# by third-party binaries, such as Lighthouse and Web3Signer.
#
# [1]: https://raw.githubusercontent.com/ethereum/consensus-specs/dev/configs/minimal.yaml
# Minimal config
# Extends the minimal preset
# (overriden in launch_local_testnet.sh) PRESET_BASE: 'minimal'
# Free-form short name of the network that this configuration applies to - known
# canonical network names include:
# * 'mainnet' - there can be only one
# * 'prater' - testnet
# Must match the regex: [a-z0-9\-]
CONFIG_NAME: 'minimal'
# Transition
# ---------------------------------------------------------------
# 2**256-2**10 for testing minimal network
# (overriden in launch_local_testnet.sh) TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912
# By default, don't use these params
TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
# Genesis
# ---------------------------------------------------------------
# [customized]
# (overriden in launch_local_testnet.sh) MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 64
# Jan 3, 2020
# (overriden in launch_local_testnet.sh) MIN_GENESIS_TIME: 1578009600
# Highest byte set to 0x01 to avoid collisions with mainnet versioning
GENESIS_FORK_VERSION: 0x00000001
# [customized] Faster to spin up testnets, but does not give validator reasonable warning time for genesis
# (overriden in launch_local_testnet.sh) GENESIS_DELAY: 300
# Forking
# ---------------------------------------------------------------
# Values provided for illustrative purposes.
# Individual tests/testnets may set different values.
# Altair
ALTAIR_FORK_VERSION: 0x01000001
# (overriden in launch_local_testnet.sh) ALTAIR_FORK_EPOCH: 18446744073709551615
# Bellatrix
BELLATRIX_FORK_VERSION: 0x02000001
# (overriden in launch_local_testnet.sh) BELLATRIX_FORK_EPOCH: 18446744073709551615
# Capella
CAPELLA_FORK_VERSION: 0x03000001
# (overriden in launch_local_testnet.sh) CAPELLA_FORK_EPOCH: 18446744073709551615
# EIP4844
EIP4844_FORK_VERSION: 0x04000001
# (overriden in launch_local_testnet.sh) EIP4844_FORK_EPOCH: 18446744073709551615
# Time parameters
# ---------------------------------------------------------------
# [customized] Faster for testing purposes
SECONDS_PER_SLOT: 6
# 14 (estimate from Eth1 mainnet)
SECONDS_PER_ETH1_BLOCK: 14
# 2**8 (= 256) epochs
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# [customized] higher frequency of committee turnover and faster time to acceptable voluntary exit
SHARD_COMMITTEE_PERIOD: 64
# [customized] process deposits more quickly, but insecure
# (overriden in launch_local_testnet.sh) ETH1_FOLLOW_DISTANCE: 16
# Validator cycle
# ---------------------------------------------------------------
# 2**2 (= 4)
INACTIVITY_SCORE_BIAS: 4
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# [customized] scale queue churn at much lower validator counts for testing
CHURN_LIMIT_QUOTIENT: 32
# Fork choice
# ---------------------------------------------------------------
# 40%
PROPOSER_SCORE_BOOST: 40
# Deposit contract
# ---------------------------------------------------------------
# Ethereum Goerli testnet
DEPOSIT_CHAIN_ID: 5
DEPOSIT_NETWORK_ID: 5
# Configured on a per testnet basis
# (overriden in launch_local_testnet.sh) DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890

View File

@ -5,16 +5,29 @@
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
NIMBUSEL_DISCOVERY="--discovery=None"
if [ -z "${NIMBUS_ETH1_VARS_SOURCED:-}" ]; then
NIMBUS_ETH1_VARS_SOURCED=1
NIMBUS_ETH1_NUM_NODES="${NIMBUS_ETH1_NUM_NODES:-4}"
NIMBUS_ETH1_BASE_NET_PORT="${BASE_EL_NET_PORT:-40404}"
NIMBUS_ETH1_BASE_RPC_PORT="${BASE_EL_RPC_PORT:-9545}"
NIMBUS_ETH1_BASE_WS_PORT="${BASE_EL_WS_PORT:-9546}"
NIMBUS_ETH1_BASE_AUTH_RPC_PORT="${BASE_EL_AUTH_RPC_PORT:-9551}"
NIMBUS_ETH1_PORT_OFFSET="${EL_PORT_OFFSET:-10}"
NIMBUSEL_BINARY="${NIMBUSEL_BINARY:-"${HOME}/work/nimbus-eth1/build/nimbus"}"
NIMBUSEL_GENESIS="${NIMBUSEL_GENESIS:-"${HOME}/work/nimbus-eth2/scripts/nimbusel_genesis.json"}"
NIMBUSEL_NUM_NODES="${NIMBUSEL_NUM_NODES:-4}"
NIMBUSEL_BINARY="${NIMBUSEL_BINARY:-${HOME}/go-ethereum/build/bin/geth}"
NIMBUSEL_BASE_NET_PORT="${BASE_EL_NET_PORT:-30303}"
NIMBUSEL_BASE_HTTP_PORT="${BASE_EL_HTTP_PORT:-8545}"
NIMBUSEL_BASE_WS_PORT="${BASE_EL_WS_PORT:-8546}"
NIMBUSEL_BASE_AUTH_RPC_PORT="${BASE_EL_AUTH_RPC_PORT:-8551}"
NIMBUSEL_PORT_OFFSET="${EL_PORT_OFFSET:-10}"
CURL_BINARY=${CURL_BINARY:-curl}
JQ_BINARY=${JQ_BINARY:-jq}
NIMBUS_ETH1_NET_PORTS=()
NIMBUS_ETH1_RPC_PORTS=()
NIMBUS_ETH1_AUTH_RPC_PORTS=()
NIMBUS_ETH1_LAST_NODE_IDX=$((NIMBUS_ETH1_NUM_NODES - 1))
for NIMBUS_ETH1_NODE_IDX in $(seq 0 $NIMBUS_ETH1_LAST_NODE_IDX); do
NIMBUS_ETH1_NET_PORTS+=($(( NIMBUS_ETH1_NODE_IDX * NIMBUS_ETH1_PORT_OFFSET + 1 + NIMBUS_ETH1_BASE_NET_PORT )))
NIMBUS_ETH1_RPC_PORTS+=($(( NIMBUS_ETH1_NODE_IDX * NIMBUS_ETH1_PORT_OFFSET + 1 + NIMBUS_ETH1_BASE_RPC_PORT )))
NIMBUS_ETH1_AUTH_RPC_PORTS+=($(( NIMBUS_ETH1_NODE_IDX * NIMBUS_ETH1_PORT_OFFSET + 1 + NIMBUS_ETH1_BASE_AUTH_RPC_PORT )))
done
fi

15
scripts/run-nimbus.sh Executable file
View File

@ -0,0 +1,15 @@
#!/usr/bin/env bash
build/nimbus_beacon_node trustedNodeSync --config-file=config.toml
#\
# --network:mainnet \
# --data-dir=build/mainnet/nimbus \
# --backfill=false \
# --trusted-node-url=http://testing.mainnet.beacon-api.nimbus.team/
if [ ! -f build/mainnet/jwtsecret ]; then
openssl rand -hex 32 | tr -d "\n" > build/mainnet/jwtsecret
fi
# build/nimbus_beacon_node --non-interactive --udp-port=9123 --tcp-port=9123 --network=mainnet --log-level=DEBUG --data-dir=build/mainnet/nimbus --web3-url=http://localhost:9551/ --rest:on --metrics:on --doppelganger-detection=no --jwt-secret=build/mainnet/jwtsecret

View File

@ -2,58 +2,63 @@
set -euo pipefail
BASEDIR="$(dirname "${BASH_SOURCE[0]}")"
SCRIPTS_DIR="$(dirname "${BASH_SOURCE[0]}")"
. "${BASEDIR}/geth_vars.sh"
source "${SCRIPTS_DIR}/geth_binaries.sh"
source "${SCRIPTS_DIR}/geth_vars.sh"
#These are used in the caller script
GETH_ENODES=()
GETH_HTTP_PORTS=()
GETH_NET_PORTS=()
GETH_WS_PORTS=()
GETH_RPC_PORTS=()
GETH_DATA_DIRS=()
log "Using ${GETH_BINARY}"
for GETH_NUM_NODE in $(seq 0 $(( GETH_NUM_NODES - 1 ))); do
GETH_NET_PORT=$(( GETH_NUM_NODE * GETH_PORT_OFFSET + GETH_BASE_NET_PORT ))
GETH_HTTP_PORT=$(( GETH_NUM_NODE * GETH_PORT_OFFSET + GETH_BASE_HTTP_PORT ))
GETH_WS_PORT=$(( GETH_NUM_NODE * GETH_PORT_OFFSET + GETH_BASE_WS_PORT ))
GETH_AUTH_RPC_PORT=$(( GETH_NUM_NODE * GETH_PORT_OFFSET + GETH_BASE_AUTH_RPC_PORT ))
log "Starting geth node ${GETH_NUM_NODE} on net port ${GETH_NET_PORT} HTTP port ${GETH_HTTP_PORT} WS port ${GETH_WS_PORT}"
GETHDATADIR=$(mktemp -d "${DATA_DIR}"/geth-data-XXX)
GETH_DATA_DIRS+=(${GETHDATADIR})
openssl rand -hex 32 | tr -d "\n" > "${GETHDATADIR}/jwtsecret"
${GETH_BINARY} --http --ws -http.api "engine" --datadir "${GETHDATADIR}" init "${GENESISJSON}"
${GETH_BINARY} --syncmode full --http --ws --http.corsdomain '*' --http.api "eth,net,engine" -ws.api "eth,net,engine" --datadir "${GETHDATADIR}" ${DISCOVER} --port ${GETH_NET_PORT} --http.port ${GETH_HTTP_PORT} --ws.port ${GETH_WS_PORT} --authrpc.port ${GETH_AUTH_RPC_PORT} --authrpc.jwtsecret "${GETHDATADIR}/jwtsecret" &> "${DATA_DIR}/geth-log${GETH_NUM_NODE}.txt" &
GETH_RETRY=0
while :; do
if [[ -S "${GETHDATADIR}/geth.ipc" ]]; then
echo "Geth ${GETH_NUM_NODE} started in $(( GETH_RETRY * 100 ))ms"
break
fi
if (( ++GETH_RETRY >= 300 )); then
echo "Geth ${GETH_NUM_NODE} failed to start"
exit 1
fi
sleep 0.1
done
NODE_ID=$(${GETH_BINARY} attach --datadir "${GETHDATADIR}" --exec admin.nodeInfo.enode)
GETH_ENODES+=("${NODE_ID}")
GETH_HTTP_PORTS+=("${GETH_HTTP_PORT}")
GETH_NET_PORTS+=("${GETH_NET_PORT}")
GETH_WS_PORTS+=("${GETH_WS_PORT}")
GETH_RPC_PORTS+=("${GETH_AUTH_RPC_PORT}")
start_geth_node() {
GETH_NODE_IDX=$1
mkdir -p "${GETH_DATA_DIRS[GETH_NODE_IDX]}"
set -x
${GETH_BINARY} version
${GETH_BINARY} --datadir "${GETH_DATA_DIRS[GETH_NODE_IDX]}" init "${EXECUTION_GENESIS_JSON}"
${GETH_BINARY} \
--syncmode full \
--datadir "${GETH_DATA_DIRS[GETH_NODE_IDX]}" \
${DISCOVER} \
--http \
--http.port ${GETH_RPC_PORTS[GETH_NODE_IDX]} \
--port ${GETH_NET_PORTS[GETH_NODE_IDX]} \
--authrpc.port ${GETH_AUTH_RPC_PORTS[GETH_NODE_IDX]} \
--authrpc.jwtsecret "${JWT_FILE}"
}
for GETH_NODE_IDX in $(seq 0 $GETH_LAST_NODE_IDX); do
start_geth_node $GETH_NODE_IDX \
&> "${DATA_DIR}/geth-log${GETH_NODE_IDX}.txt" &
done
for GETH_NODE_IDX in $(seq 0 $GETH_LAST_NODE_IDX); do
GETH_RETRY=0
while :; do
if [[ -S "${GETH_DATA_DIRS[GETH_NODE_IDX]}/geth.ipc" ]]; then
echo "Geth ${GETH_NODE_IDX} started in $(( GETH_RETRY * 100 ))ms"
break
fi
if (( ++GETH_RETRY >= 300 )); then
echo "Geth ${GETH_NODE_IDX} failed to start"
exit 1
fi
sleep 0.1
done
NODE_ID=$(${GETH_BINARY} attach --datadir "${GETH_DATA_DIRS[GETH_NODE_IDX]}" --exec admin.nodeInfo.enode)
GETH_ENODES+=("${NODE_ID}")
done
#Add all nodes as peers
for dir in "${GETH_DATA_DIRS[@]}"
do
for enode in "${GETH_ENODES[@]}"
do
${GETH_BINARY} attach --datadir "${dir}" --exec "admin.addPeer(${enode})"
done
for enode in "${GETH_ENODES[@]}"
do
${GETH_BINARY} attach --datadir "${dir}" --exec "admin.addPeer(${enode})" &
done
done
log "GETH HTTP Ports: ${GETH_HTTP_PORTS[*]}"
log "GETH RPC Ports: ${GETH_AUTH_RPC_PORTS[*]}"

View File

@ -2,66 +2,74 @@
set -euo pipefail
BASEDIR="$(dirname "${BASH_SOURCE[0]}")"
SCRIPTS_DIR="$(dirname "${BASH_SOURCE[0]}")"
. "${BASEDIR}/nimbus_el_vars.sh"
. "${SCRIPTS_DIR}/nimbus_el_vars.sh"
#These are used in the caller script
NIMBUSEL_ENODES=()
NIMBUSEL_HTTP_PORTS=()
NIMBUSEL_NET_PORTS=()
NIMBUSEL_WS_PORTS=()
NIMBUSEL_RPC_PORTS=()
NIMBUSEL_DATA_DIRS=()
NIMBUS_ETH1_ENODES=()
NIMBUS_ETH1_DATA_DIRS=()
wait_for_port() {
for EXPONENTIAL_BACKOFF in {1..10}; do
nc -w 1 -z $1 $2 && break;
nc -w 1 -z $1 $2 > /dev/null && break;
DELAY=$((2**$EXPONENTIAL_BACKOFF))
echo "Port ${2} not yet available. Waiting ${DELAY} seconds"
sleep $DELAY
done
}
log "Using ${NIMBUSEL_BINARY}"
if [ -d /opt/homebrew/lib ]; then
# BEWARE
# The recent versions of homebrew/macOS can't add the libraries
# installed by Homebrew in the system's library search path, so
# Nimbus will fail to load RocksDB on start-up. THe new rules in
# macOS make it very difficult for the user to solve the problem
# in their profile, so we add an override here as the lessed evil:
export DYLD_LIBRARY_PATH="${DYLD_LIBRARY_PATH:-}:/opt/homebrew/lib"
# See https://github.com/Homebrew/brew/issues/13481 for more details
fi
for NUM_NODE in $(seq 0 $(( NIMBUSEL_NUM_NODES - 1 ))); do
NIMBUSEL_NET_PORT=$(( NUM_NODE * NIMBUSEL_PORT_OFFSET + NIMBUSEL_BASE_NET_PORT ))
NIMBUSEL_HTTP_PORT=$(( NUM_NODE * NIMBUSEL_PORT_OFFSET + NIMBUSEL_BASE_HTTP_PORT ))
NIMBUSEL_WS_PORT=$(( NUM_NODE * NIMBUSEL_PORT_OFFSET + NIMBUSEL_BASE_WS_PORT ))
NIMBUSEL_AUTH_RPC_PORT=$(( NUM_NODE * NIMBUSEL_PORT_OFFSET + NIMBUSEL_BASE_AUTH_RPC_PORT ))
log "Starting nimbus EL node ${NUM_NODE} on net port ${NIMBUSEL_NET_PORT} HTTP port ${NIMBUSEL_HTTP_PORT} WS port ${NIMBUSEL_WS_PORT}"
NIMBUSEL_DATADIR=$(mktemp -d nimbusel-data-XXX)
NIMBUSEL_DATA_DIRS+=("${NIMBUSEL_DATADIR}")
openssl rand -hex 32 | tr -d "\n" > "${NIMBUSEL_DATADIR}/jwtsecret"
${NIMBUSEL_BINARY} --data-dir="${NIMBUSEL_DATADIR}" --custom-network="${NIMBUSEL_GENESIS}" "${NIMBUSEL_DISCOVERY}" \
--tcp-port="${NIMBUSEL_NET_PORT}" --engine-api --engine-api-port="${NIMBUSEL_AUTH_RPC_PORT}" \
--rpc --rpc-port="${NIMBUSEL_HTTP_PORT}" &> "${DATA_DIR}/nimbusel_log${NUM_NODE}.txt" &
PROCS_TO_KILL+="(${NIMBUS_ETH1_BINARY})"
wait_for_port localhost "${NIMBUSEL_HTTP_PORT}"
for NIMBUS_ETH1_NODE_IDX in $(seq 0 $NIMBUS_ETH1_LAST_NODE_IDX); do
NIMBUS_ETH1_DATA_DIR=$(mktemp -d "${DATA_DIR}/nimbus-eth1-data-XXXXXX")
NIMBUS_ETH1_DATA_DIRS+=("${NIMBUS_ETH1_DATA_DIR}")
NODE_ID=$(
"${CURL_BINARY}" -sS -X POST \
-H 'Content-Type: application/json' \
-d '{"jsonrpc":"2.0","id":"id","method":"net_nodeInfo"}' \
"http://localhost:${NIMBUSEL_HTTP_PORT}" | "${JQ_BINARY}" .result.enode)
log "EL Node ID" "${NODE_ID}"
NIMBUSEL_ENODES+=("${NODE_ID}")
NIMBUSEL_HTTP_PORTS+=("${NIMBUSEL_HTTP_PORT}")
NIMBUSEL_NET_PORTS+=("${NIMBUSEL_NET_PORT}")
NIMBUSEL_WS_PORTS+=("${NIMBUSEL_WS_PORT}")
NIMBUSEL_RPC_PORTS+=("${NIMBUSEL_AUTH_RPC_PORT}")
${NIMBUS_ETH1_BINARY} \
--data-dir="${NIMBUS_ETH1_DATA_DIR}" \
--custom-network="${EXECUTION_GENESIS_JSON}" \
--discovery=None \
--tcp-port="${NIMBUS_ETH1_NET_PORTS[NIMBUS_ETH1_NODE_IDX]}" \
--jwt-secret="${JWT_FILE}" \
--engine-api --engine-api-port="${NIMBUS_ETH1_AUTH_RPC_PORTS[NIMBUS_ETH1_NODE_IDX]}" \
--rpc --rpc-port="${NIMBUS_ETH1_RPC_PORTS[NIMBUS_ETH1_NODE_IDX]}" \
&> "${DATA_DIR}/nimbus_eth1_log${NIMBUS_ETH1_NODE_IDX}.txt" &
done
for enode in "${NIMBUSEL_ENODES[@]}"
echo "Waiting for the Nimbus ETH1 nodes to come online..."
for NIMBUS_ETH1_NODE_IDX in $(seq 0 $NIMBUS_ETH1_LAST_NODE_IDX); do
wait_for_port localhost "${NIMBUS_ETH1_RPC_PORTS[NIMBUS_ETH1_NODE_IDX]}"
NODE_ID=$(
"${CURL_BINARY}" -sS -X POST \
-H 'Content-Type: application/json' \
-d '{"jsonrpc":"2.0","id":"id","method":"net_nodeInfo"}' \
"http://localhost:${NIMBUS_ETH1_RPC_PORTS[NIMBUS_ETH1_NODE_IDX]}" | "${JQ_BINARY}" .result.enode)
log "EL Node ID" "${NODE_ID}"
NIMBUS_ETH1_ENODES+=("${NODE_ID}")
done
# TODO Here we should connect to the Geth nodes as well
echo "Connect all nodes though the nimbus_addPeer RPC call..."
for enode in "${NIMBUS_ETH1_ENODES[@]}"
do
for port in "${NIMBUSEL_HTTP_PORTS[@]}"
for port in "${NIMBUS_ETH1_RPC_PORTS[@]}"
do
"${CURL_BINARY}" -sS -X POST \
-H 'Content-Type: application/json' \
-d '{"jsonrpc":"2.0","id":"1","method":"nimbus_addPeer","params": ['"${enode}"']}' \
"http://localhost:${port}"
done
"http://localhost:${port}" &
done
done
echo "NimbusEL HTTP Ports: ${NIMBUSEL_HTTP_PORTS[*]}"
echo "Nimbus ETH1 HTTP Ports: ${NIMBUS_ETH1_RPC_PORTS[*]}"

View File

@ -53,14 +53,13 @@ proc run() {.async.} =
echo "args are: web3url jwtsecretfilename"
let
eth1Monitor = Eth1Monitor.init(
elManager = newClone ELManager.init(
defaultRuntimeConfig, db = nil, nil, @[paramStr(1)],
none(DepositTreeSnapshot), none(Eth1Network), false,
some readJwtSecret(paramStr(2)).get)
await eth1Monitor.ensureDataProvider()
try:
await eth1Monitor.exchangeTransitionConfiguration()
await elManager.exchangeTransitionConfiguration()
except ValueError as exc:
# Expected, since nothing here sets up the Nimbus TTD correctly
echo "exchangeTransitionConfiguration ValueError: " & exc.msg

View File

@ -1,6 +1,5 @@
# Use only `secp256k1` public key cryptography as an identity in LibP2P.
-d:"libp2p_pki_schemes=secp256k1"
-d:chronosStrictException
--styleCheck:usages
--styleCheck:hint

View File

@ -101,11 +101,12 @@ LOG_TEST_FILE="${TEST_DIR}/client_log.txt"
VALIDATORS_DIR="${TEST_DIR}/validators"
SECRETS_DIR="${TEST_DIR}/secrets"
SNAPSHOT_FILE="${TEST_DIR}/genesis.ssz"
DEPOSIT_TREE_SNAPSHOT_FILE="${TEST_DIR}/deposit_tree_snapshot.ssz"
NETWORK_BOOTSTRAP_FILE="${TEST_DIR}/bootstrap_nodes.txt"
RESTTEST_RULES="${GIT_ROOT}/ncli/resttest-rules.json"
DEPOSIT_CONTRACT_BIN="${GIT_ROOT}/build/deposit_contract"
RESTTEST_BIN="${GIT_ROOT}/build/resttest"
NIMBUS_BEACON_NODE_BIN="${GIT_ROOT}/build/nimbus_beacon_node"
LOCAL_TESTNET_SIMULATION_BIN="${GIT_ROOT}/build/ncli_testnet"
BOOTSTRAP_ENR_FILE="${TEST_DIR}/beacon_node.enr"
RUNTIME_CONFIG_FILE="${TEST_DIR}/config.yaml"
DEPOSITS_FILE="${TEST_DIR}/deposits.json"
@ -163,10 +164,13 @@ if [[ -f "${DEPOSITS_FILE}" ]]; then
EXISTING_VALIDATORS=$(grep -o -i deposit_data_root "${DEPOSITS_FILE}" | wc -l)
fi
build_if_missing nimbus_beacon_node
build_if_missing ncli_testnet
build_if_missing resttest
if [[ ${EXISTING_VALIDATORS} -ne ${NUM_VALIDATORS} ]]; then
build_if_missing deposit_contract
rm -rf "${VALIDATORS_DIR}" "${SECRETS_DIR}"
${DEPOSIT_CONTRACT_BIN} generateSimulationDeposits \
${LOCAL_TESTNET_SIMULATION_BIN} generateDeposits \
--count="${NUM_VALIDATORS}" \
--out-validators-dir="${VALIDATORS_DIR}" \
--out-secrets-dir="${SECRETS_DIR}" \
@ -174,9 +178,6 @@ if [[ ${EXISTING_VALIDATORS} -ne ${NUM_VALIDATORS} ]]; then
echo "All deposits prepared"
fi
build_if_missing nimbus_beacon_node
build_if_missing resttest
# Kill child processes on Ctrl-C/SIGTERM/exit, passing the PID of this shell
# instance as the parent and the target process name as a pattern to the
# "pkill" command.
@ -189,22 +190,6 @@ cleanup() {
}
trap 'cleanup' SIGINT SIGTERM EXIT
echo "Creating testnet genesis..."
${NIMBUS_BEACON_NODE_BIN} \
--data-dir="${TEST_DIR}" \
createTestnet \
--deposits-file="${DEPOSITS_FILE}" \
--total-validators="${NUM_VALIDATORS}" \
--output-genesis="${SNAPSHOT_FILE}" \
--output-bootstrap-file="${NETWORK_BOOTSTRAP_FILE}" \
--netkey-file=network_key.json \
--insecure-netkey-password=true \
--genesis-offset=-12 # Chain that has already started allows testing empty slots
# Make sure we use the newly generated genesis
echo "Removing existing database..."
rm -rf "${TEST_DIR}/db"
DEPOSIT_CONTRACT_ADDRESS="0x0000000000000000000000000000000000000000"
DEPOSIT_CONTRACT_BLOCK="0x0000000000000000000000000000000000000000000000000000000000000000"
@ -218,8 +203,26 @@ GENESIS_DELAY: 10
GENESIS_FORK_VERSION: 0x00000000
DEPOSIT_CONTRACT_ADDRESS: ${DEPOSIT_CONTRACT_ADDRESS}
ETH1_FOLLOW_DISTANCE: 1
ALTAIR_FORK_EPOCH: 0
BELLATRIX_FORK_EPOCH: 0
EOF
echo "Creating testnet genesis..."
${LOCAL_TESTNET_SIMULATION_BIN} \
createTestnet \
--data-dir="${TEST_DIR}" \
--deposits-file="${DEPOSITS_FILE}" \
--total-validators="${NUM_VALIDATORS}" \
--output-genesis="${SNAPSHOT_FILE}" \
--output-deposit-tree-snapshot="${DEPOSIT_TREE_SNAPSHOT_FILE}" \
--output-bootstrap-file="${NETWORK_BOOTSTRAP_FILE}" \
--netkey-file=network_key.json \
--insecure-netkey-password=true \
--genesis-offset=-60 # Chain that has already started allows testing empty slots
# Make sure we use the newly generated genesis
echo "Removing existing database..."
rm -rf "${TEST_DIR}/db" "${TEST_DIR}/validators/slashing_protection.sqlite3"
${NIMBUS_BEACON_NODE_BIN} \
--tcp-port=${BASE_PORT} \
--udp-port=${BASE_PORT} \

View File

@ -1,104 +0,0 @@
#!/usr/bin/env bash
set -e
NODE_ID=${1}
shift
# Read in variables
# shellcheck source=/dev/null
source "$(dirname "$0")/vars.sh"
if [[ -n "$1" ]]; then
ADDITIONAL_BEACON_NODE_ARGS=$1
shift
else
ADDITIONAL_BEACON_NODE_ARGS=""
fi
BOOTSTRAP_ARG=""
if [[ -n "$1" ]]; then
BOOTSTRAP_NODE_ID=$1
shift
else
BOOTSTRAP_NODE_ID=$BOOTSTRAP_NODE
fi
BOOTSTRAP_ADDRESS_FILE="${SIMULATION_DIR}/node-${BOOTSTRAP_NODE_ID}/beacon_node.enr"
if [[ "$NODE_ID" != "$BOOTSTRAP_NODE" ]]; then
BOOTSTRAP_ARG="--bootstrap-file=$BOOTSTRAP_ADDRESS_FILE"
else
BOOTSTRAP_ARG="--netkey-file=network_key.json --insecure-netkey-password"
fi
# set up the environment
# shellcheck source=/dev/null
source "${SIM_ROOT}/../../env.sh"
cd "$GIT_ROOT"
NODE_DATA_DIR="${SIMULATION_DIR}/node-$NODE_ID"
NODE_VALIDATORS_DIR=$NODE_DATA_DIR/validators/
NODE_SECRETS_DIR=$NODE_DATA_DIR/secrets/
MAKEDIR=$GIT_ROOT/scripts/makedir.sh
COPYFILE=$GIT_ROOT/scripts/copyfile.sh
PORT=$(( BASE_P2P_PORT + NODE_ID ))
NAT_ARG="--nat:extip:127.0.0.1"
if [ "${NAT:-}" == "1" ]; then
NAT_ARG="--nat:any"
fi
"$MAKEDIR" "$NODE_DATA_DIR"
rm -rf "$NODE_VALIDATORS_DIR"
"$MAKEDIR" "$NODE_VALIDATORS_DIR"
rm -rf "$NODE_SECRETS_DIR"
"$MAKEDIR" "$NODE_SECRETS_DIR"
VALIDATORS_PER_NODE=$(( NUM_VALIDATORS / (TOTAL_NODES - 1) ))
if [ "${USE_BN_VC_VALIDATOR_SPLIT:-}" == "yes" ]; then
# if using validator client binaries in addition to beacon nodes we will
# split the keys for this instance in half between the BN and the VC
# and the validators for the BNs will be from the first half of all validators
VALIDATORS_PER_NODE=$((VALIDATORS_PER_NODE / 2 ))
fi
if [[ $NODE_ID -lt $BOOTSTRAP_NODE ]]; then
pushd "$VALIDATORS_DIR" >/dev/null
for VALIDATOR in $(ls | tail -n +$(( ($VALIDATORS_PER_NODE * $NODE_ID) + 1 )) | head -n $VALIDATORS_PER_NODE); do
"$COPYFILE" "$VALIDATOR" "$NODE_VALIDATORS_DIR"
"$COPYFILE" "$SECRETS_DIR/$VALIDATOR" "$NODE_SECRETS_DIR"
done
popd >/dev/null
fi
rm -rf "$NODE_DATA_DIR/dump"
"$MAKEDIR" "$NODE_DATA_DIR/dump"
cd "$NODE_DATA_DIR"
$BEACON_NODE_BIN \
--log-level=${LOG_LEVEL:-DEBUG} \
$BOOTSTRAP_ARG \
--network="$SIMULATION_DIR" \
--data-dir=$NODE_DATA_DIR \
--secrets-dir=$NODE_SECRETS_DIR \
--node-name=$NODE_ID \
--tcp-port=$PORT \
--udp-port=$PORT \
$NAT_ARG \
$WEB3_ARG \
--rest \
--rest-address="127.0.0.1" \
--rest-port="$(( $BASE_REST_PORT + $NODE_ID ))" \
--metrics \
--metrics-address="127.0.0.1" \
--metrics-port="$(( $BASE_METRICS_PORT + $NODE_ID ))" \
--doppelganger-detection=off \
${ADDITIONAL_BEACON_NODE_ARGS} \
"$@"

View File

@ -1,51 +0,0 @@
#!/usr/bin/env bash
set -e
NODE_ID=${1}
shift
# Read in variables
# shellcheck source=/dev/null
source "$(dirname "$0")/vars.sh"
# set up the environment
# shellcheck source=/dev/null
source "${SIM_ROOT}/../../env.sh"
cd "$GIT_ROOT"
NODE_DATA_DIR="${SIMULATION_DIR}/validator-$NODE_ID"
NODE_VALIDATORS_DIR=$NODE_DATA_DIR/validators/
NODE_SECRETS_DIR=$NODE_DATA_DIR/secrets/
MAKEDIR=$GIT_ROOT/scripts/makedir.sh
COPYFILE=$GIT_ROOT/scripts/copyfile.sh
rm -rf "$NODE_VALIDATORS_DIR"
"$MAKEDIR" "$NODE_VALIDATORS_DIR"
rm -rf "$NODE_SECRETS_DIR"
"$MAKEDIR" "$NODE_SECRETS_DIR"
# we will split the keys for this instance in half between the BN and the VC
# and the validators for the VCs will be from the second half of all validators
VALIDATORS_PER_NODE=$(( (NUM_VALIDATORS / TOTAL_NODES) / 2 ))
VALIDATOR_OFFSET=$((NUM_VALIDATORS / 2))
if [[ $NODE_ID -lt $TOTAL_NODES ]]; then
pushd "$VALIDATORS_DIR" >/dev/null
for VALIDATOR in $(ls | tail -n +$(( $VALIDATOR_OFFSET + ($VALIDATORS_PER_NODE * $NODE_ID) + 1 )) | head -n $VALIDATORS_PER_NODE); do
"$COPYFILE" "$VALIDATOR" "$NODE_VALIDATORS_DIR"
"$COPYFILE" "$SECRETS_DIR/$VALIDATOR" "$NODE_SECRETS_DIR"
done
popd >/dev/null
fi
cd "$NODE_DATA_DIR"
$VALIDATOR_CLIENT_BIN \
--log-level=${LOG_LEVEL:-DEBUG} \
--data-dir=$NODE_DATA_DIR \
--secrets-dir=$NODE_SECRETS_DIR \
--beacon-node="http://127.0.0.1:$(( $BASE_REST_PORT + $NODE_ID ))"

View File

@ -1,45 +0,0 @@
#!/usr/bin/env bash
set -eo pipefail
cd "$(dirname "$0")"
TMUX_CMD="${TMUX_CMD:-tmux}"
USE_TMUX="${USE_TMUX:-yes}"
if [[ "$USE_MULTITAIL" == "yes" ]]; then
USE_TMUX="no"
fi
if [[ "$USE_TMUX" != "no" ]]; then
type "$TMUX_CMD" &>/dev/null || { echo "${TMUX_CMD}" is missing; USE_TMUX="no"; }
fi
export TMUX_CMD USE_TMUX
if [[ "$USE_TMUX" != "no" ]]; then
TMUX_SESSION_NAME="${TMUX_SESSION_NAME:-nbc-sim}"
$TMUX_CMD kill-session -t "${TMUX_SESSION_NAME}" &>/dev/null || true
$TMUX_CMD new-session -s "${TMUX_SESSION_NAME}" -d
$TMUX_CMD setenv -t "${TMUX_SESSION_NAME}" USE_TMUX yes
$TMUX_CMD bind-key -n q kill-session
# maybe these should be moved to a user config file
$TMUX_CMD set-option -t "${TMUX_SESSION_NAME}" history-limit 999999
$TMUX_CMD set-option -t "${TMUX_SESSION_NAME}" remain-on-exit on
$TMUX_CMD set -t "${TMUX_SESSION_NAME}" mouse on
# We create a new window, so the above settings can take place
$TMUX_CMD new-window -d -t "${TMUX_SESSION_NAME}" -n "sim"
$TMUX_CMD kill-pane -t "${TMUX_SESSION_NAME}:0"
$TMUX_CMD new-window -t "${TMUX_SESSION_NAME}" -n "start-script" "if ! $PWD/start.sh; then echo -en '\nPress any key to exit... '; read; tmux kill-session; fi"
$TMUX_CMD select-window -t "${TMUX_SESSION_NAME}:start-script"
$TMUX_CMD attach-session -t "${TMUX_SESSION_NAME}"
else
./start.sh
fi

View File

@ -1,267 +0,0 @@
#!/usr/bin/env bash
set -eo pipefail
# To allow overriding the program names
TMUX_CMD="${TMUX_CMD:-tmux}"
MULTITAIL_CMD="${MULTITAIL_CMD:-multitail}"
GANACHE_CMD="${GANACHE_CMD:-ganache-cli}"
PROMETHEUS_CMD="${PROMETHEUS_CMD:-prometheus}"
CTAIL_CMD="${CTAIL_CMD:-ctail}"
TMUX_SESSION_NAME="${TMUX_SESSION_NAME:-nbc-sim}"
WAIT_GENESIS="${WAIT_GENESIS:-no}"
USE_MULTITAIL="${USE_MULTITAIL:-no}"
if [[ "$USE_MULTITAIL" != "no" ]]; then
type "$MULTITAIL_CMD" &>/dev/null || { echo "${MULTITAIL_CMD}" is missing; USE_MULTITAIL="no"; }
fi
USE_TMUX="${USE_TMUX:-no}"
if [[ "$USE_TMUX" == "yes" ]]; then
type "$TMUX_CMD" &>/dev/null || { echo "${TMUX_CMD}" is missing; USE_TMUX="no"; }
fi
USE_GANACHE="${USE_GANACHE:-yes}"
if [[ "$USE_GANACHE" == "yes" ]]; then
type "$GANACHE_CMD" &>/dev/null || { echo $GANACHE_CMD is missing; USE_GANACHE="no"; }
fi
USE_PROMETHEUS="${USE_PROMETHEUS:-yes}"
if [[ "$USE_PROMETHEUS" == "yes" ]]; then
type "$PROMETHEUS_CMD" &>/dev/null || { echo $PROMETHEUS_CMD is missing; USE_PROMETHEUS="no"; }
fi
USE_CTAIL="${USE_CTAIL:-yes}"
if [[ "$USE_CTAIL" == "yes" ]]; then
type "$CTAIL_CMD" &>/dev/null || { USE_CTAIL="no"; }
fi
# Read in variables
# shellcheck source=/dev/null
source "$(dirname "$0")/vars.sh"
cd "$SIM_ROOT"
mkdir -p "$SIMULATION_DIR"
mkdir -p "$VALIDATORS_DIR"
mkdir -p "$SECRETS_DIR"
cd "$GIT_ROOT"
CUSTOM_NIMFLAGS="${NIMFLAGS} -d:useSysAsserts -d:const_preset=mainnet -d:local_testnet"
GANACHE_BLOCK_TIME=5
# Run with "SLOTS_PER_EPOCH=8 ./start.sh" to change these
DEFS=""
DEFS+="-d:SECONDS_PER_ETH1_BLOCK=$GANACHE_BLOCK_TIME "
DEFS+="-d:MAX_COMMITTEES_PER_SLOT=${MAX_COMMITTEES_PER_SLOT:-1} " # Spec default: 64
DEFS+="-d:SLOTS_PER_EPOCH=${SLOTS_PER_EPOCH:-6} " # Spec default: 32
DEFS+="-d:SECONDS_PER_SLOT=${SECONDS_PER_SLOT:-6} " # Spec default: 12
# Windows detection
if uname | grep -qiE "mingw|msys"; then
MAKE="mingw32-make"
else
MAKE="make"
fi
make_once () {
target_flag_var="$1_name"
if [[ -z "${!target_flag_var}" ]]; then
export $target_flag_var=1
$MAKE $1
fi
}
mkdir -p "${METRICS_DIR}"
./scripts/make_prometheus_config.sh \
--nodes ${TOTAL_NODES} \
--base-metrics-port ${BASE_METRICS_PORT} \
--config-file "${METRICS_DIR}/prometheus.yml" || true # TODO: this currently fails on macOS,
# but it can be considered non-critical
COMMANDS=()
if [[ "$USE_GANACHE" == "yes" ]]; then
if [[ "$USE_TMUX" == "yes" ]]; then
$TMUX_CMD new-window -d -t $TMUX_SESSION_NAME -n "$GANACHE_CMD" "$GANACHE_CMD --blockTime $GANACHE_BLOCK_TIME --gasLimit 100000000 -e 100000 --verbose"
else
echo NOTICE: $GANACHE_CMD will be started automatically only with USE_TMUX=yes
USE_GANACHE="no"
fi
fi
if [[ "$USE_PROMETHEUS" == "yes" ]]; then
if [[ "$USE_TMUX" == "yes" ]]; then
rm -rf "${METRICS_DIR}/data"
mkdir -p "${METRICS_DIR}/data"
# TODO: Prometheus is not shut down properly on tmux kill-session
killall prometheus &>/dev/null || true
PROMETHEUS_FLAGS="--config.file=./prometheus.yml --storage.tsdb.path=./prometheus"
$TMUX_CMD new-window -d -t $TMUX_SESSION_NAME -n "$PROMETHEUS_CMD" "cd '$METRICS_DIR' && $PROMETHEUS_CMD $PROMETHEUS_FLAGS"
else
echo NOTICE: $PROMETHEUS_CMD will be started automatically only with USE_TMUX=yes
USE_PROMETHEUS="no"
fi
fi
$MAKE -j2 --no-print-directory NIMFLAGS="$CUSTOM_NIMFLAGS $DEFS" LOG_LEVEL="${LOG_LEVEL:-DEBUG}" nimbus_beacon_node nimbus_validator_client
EXISTING_VALIDATORS=0
if [[ -f "$DEPOSITS_FILE" ]]; then
# We count the number of deposits by counting the number of
# occurrences of the 'deposit_data_root' field:
EXISTING_VALIDATORS=$(grep -o -i deposit_data_root "$DEPOSITS_FILE" | wc -l)
fi
if [[ $EXISTING_VALIDATORS -ne $NUM_VALIDATORS ]]; then
make_once deposit_contract
rm -rf "$VALIDATORS_DIR"
rm -rf "$SECRETS_DIR"
build/deposit_contract generateSimulationDeposits \
--count="${NUM_VALIDATORS}" \
--out-validators-dir="$VALIDATORS_DIR" \
--out-secrets-dir="$SECRETS_DIR" \
--out-deposits-file="$DEPOSITS_FILE"
echo "All deposits prepared"
fi
if [ ! -f "${SNAPSHOT_FILE}" ]; then
if [[ "${WAIT_GENESIS}" != "yes" ]]; then
echo Creating testnet genesis...
$BEACON_NODE_BIN \
--data-dir="${SIMULATION_DIR}/node-$BOOTSTRAP_NODE" \
createTestnet \
$WEB3_ARG \
--deposits-file="${DEPOSITS_FILE}" \
--total-validators="${NUM_VALIDATORS}" \
--output-genesis="${SNAPSHOT_FILE}" \
--output-bootstrap-file="${NETWORK_BOOTSTRAP_FILE}" \
--bootstrap-address=127.0.0.1 \
--bootstrap-port=$(( BASE_P2P_PORT + BOOTSTRAP_NODE )) \
--netkey-file=network_key.json \
--insecure-netkey-password=true \
--genesis-offset=30 # Delay in seconds
fi
fi
function run_cmd {
i=$1
CMD=$2
bin_name=$3
if [[ "$USE_TMUX" == "yes" ]]; then
echo "Starting node $i..."
$TMUX_CMD select-window -t "${TMUX_SESSION_NAME}:sim"
$TMUX_CMD split-window -t "${TMUX_SESSION_NAME}" "if ! $CMD; then read; fi"
$TMUX_CMD select-layout -t "${TMUX_SESSION_NAME}:sim" tiled
elif [[ "$USE_MULTITAIL" != "no" ]]; then
if [[ "$i" == "$BOOTSTRAP_NODE" ]]; then
SLEEP="0"
else
SLEEP="3"
fi
# "multitail" closes the corresponding panel when a command exits, so let's make sure it doesn't exit
COMMANDS+=( " -cT ansi -t '$bin_name #$i' -l 'sleep $SLEEP; $CMD; echo [node execution completed]; while true; do sleep 100; done'" )
else
eval "${CMD}" &
fi
}
DEPOSIT_CONTRACT_ADDRESS="0x0000000000000000000000000000000000000000"
DEPOSIT_CONTRACT_BLOCK="0x0000000000000000000000000000000000000000000000000000000000000000"
if [ "$USE_GANACHE" != "no" ]; then
make_once deposit_contract
echo Deploying the validator deposit contract...
DEPLOY_CMD_OUTPUT=$($DEPOSIT_CONTRACT_BIN deploy $WEB3_ARG)
# https://stackoverflow.com/questions/918886/how-do-i-split-a-string-on-a-delimiter-in-bash
OUTPUT_PIECES=(${DEPLOY_CMD_OUTPUT//;/ })
DEPOSIT_CONTRACT_ADDRESS=${OUTPUT_PIECES[0]}
DEPOSIT_CONTRACT_BLOCK=${OUTPUT_PIECES[1]}
echo Contract deployed at $DEPOSIT_CONTRACT_ADDRESS:$DEPOSIT_CONTRACT_BLOCK
if [[ "$WAIT_GENESIS" == "yes" ]]; then
run_cmd "(deposit maker)" "$DEPOSIT_CONTRACT_BIN sendDeposits \
--deposits-file='$DEPOSITS_FILE' \
--min-delay=0 --max-delay=1 \
$WEB3_ARG \
--deposit-contract=${DEPOSIT_CONTRACT_ADDRESS}"
fi
fi
echo Wrote $RUNTIME_CONFIG_FILE:
tee "$RUNTIME_CONFIG_FILE" <<EOF
PRESET_BASE: mainnet
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: ${NUM_VALIDATORS}
MIN_GENESIS_TIME: 0
GENESIS_DELAY: 10
GENESIS_FORK_VERSION: 0x00000000
DEPOSIT_CONTRACT_ADDRESS: ${DEPOSIT_CONTRACT_ADDRESS}
ETH1_FOLLOW_DISTANCE: 1
ALTAIR_FORK_EPOCH: 2
EOF
if [[ "$USE_TMUX" == "yes" ]]; then
$TMUX_CMD select-window -t "${TMUX_SESSION_NAME}:sim"
fi
# Delete any leftover address files from a previous session
if [ -f "${BOOTSTRAP_ENR_FILE}" ]; then
rm "${BOOTSTRAP_ENR_FILE}"
fi
# Kill child processes on Ctrl-C/SIGTERM/exit, passing the PID of this shell
# instance as the parent and the target process name as a pattern to the
# "pkill" command.
if [[ "$USE_MULTITAIL" == "no" && "$USE_TMUX" != "yes" ]]; then
trap 'pkill -P $$ nimbus_beacon_node' SIGINT EXIT
fi
LAST_WAITING_NODE=0
for i in $(seq $BOOTSTRAP_NODE -1 $TOTAL_USER_NODES); do
if [[ "$i" != "$BOOTSTRAP_NODE" && "$USE_MULTITAIL" == "no" ]]; then
# Wait for the master node to write out its address file
while [ ! -f "${BOOTSTRAP_ENR_FILE}" ]; do
if (( LAST_WAITING_NODE != i )); then
echo Waiting for $BOOTSTRAP_ENR_FILE to appear...
LAST_WAITING_NODE=i
fi
sleep 0.1
done
fi
run_cmd $i "${SIM_ROOT}/run_node.sh ${i} --verify-finalization" "node"
if [ "${USE_BN_VC_VALIDATOR_SPLIT:-}" == "yes" ]; then
# start the VC with a few seconds of delay so that there are less RPC connection retries
run_cmd $i "sleep 3 && ${SIM_ROOT}/run_validator.sh ${i}" "validator"
fi
done
if [[ "$USE_CTAIL" != "no" ]]; then
if [[ "$USE_TMUX" == "yes" ]]; then
$TMUX_CMD new-window -d -t $TMUX_SESSION_NAME -n "$CTAIL_CMD" "$CTAIL_CMD tail -q -n +1 -f ${SIMULATION_DIR}/node-*/beacon_node.log"
else
echo NOTICE: $CTAIL_CMD will be started automatically only with USE_TMUX=1
USE_CTAIL="no"
fi
fi
if [[ "$USE_TMUX" == "yes" ]]; then
# kill the console window in the pane where the simulation is running
$TMUX_CMD kill-pane -t $TMUX_SESSION_NAME:sim.0
$TMUX_CMD select-window -t "${TMUX_SESSION_NAME}:sim"
$TMUX_CMD select-layout tiled
elif [[ "$USE_MULTITAIL" != "no" ]]; then
eval $MULTITAIL_CMD -s 3 -M 0 -x \"Nimbus beacon chain\" "${COMMANDS[@]}"
else
wait # Stop when all nodes have gone down
fi

View File

@ -1,50 +0,0 @@
#!/usr/bin/env bash
# https://github.com/koalaman/shellcheck/wiki/SC2034
# shellcheck disable=2034
true
PWD_CMD="pwd"
# get native Windows paths on Mingw
uname | grep -qi mingw && PWD_CMD="pwd -W"
cd "$(dirname "$0")"
SIM_ROOT="$($PWD_CMD)"
# Set a default value for the env vars usually supplied by a Makefile
cd "$(git rev-parse --show-toplevel)"
: ${GIT_ROOT:="$($PWD_CMD)"}
cd - &>/dev/null
# When changing these, also update the readme section on running simulation
# so that the run_node example is correct!
NUM_VALIDATORS=${VALIDATORS:-128}
TOTAL_NODES=${NODES:-4}
TOTAL_USER_NODES=${USER_NODES:-0}
TOTAL_SYSTEM_NODES=$(( TOTAL_NODES - TOTAL_USER_NODES ))
BOOTSTRAP_NODE=$(( TOTAL_NODES - 1 ))
USE_BN_VC_VALIDATOR_SPLIT=${BN_VC_VALIDATOR_SPLIT:-yes}
SIMULATION_DIR="${SIM_ROOT}/data"
METRICS_DIR="${SIM_ROOT}/prometheus"
VALIDATORS_DIR="${SIM_ROOT}/validators"
SECRETS_DIR="${SIM_ROOT}/secrets"
SNAPSHOT_FILE="${SIMULATION_DIR}/genesis.ssz"
NETWORK_BOOTSTRAP_FILE="${SIMULATION_DIR}/bootstrap_nodes.txt"
BEACON_NODE_BIN="${GIT_ROOT}/build/nimbus_beacon_node"
VALIDATOR_CLIENT_BIN="${GIT_ROOT}/build/nimbus_validator_client"
DEPOSIT_CONTRACT_BIN="${GIT_ROOT}/build/deposit_contract"
BOOTSTRAP_ENR_FILE="${SIMULATION_DIR}/node-${BOOTSTRAP_NODE}/beacon_node.enr"
RUNTIME_CONFIG_FILE="${SIMULATION_DIR}/config.yaml"
DEPOSITS_FILE="${SIMULATION_DIR}/deposits.json"
if [[ "$USE_GANACHE" == "yes" ]]; then
WEB3_ARG="--web3-url=ws://localhost:8545"
else
WEB3_ARG=""
fi
BASE_P2P_PORT=30000
BASE_REST_PORT=5052
BASE_METRICS_PORT=8008

View File

@ -23,7 +23,7 @@ import
nimbus_beacon_node, beacon_node_status,
nimbus_validator_client],
../beacon_chain/validator_client/common,
../ncli/ncli_testnet,
./testutil
type
@ -46,7 +46,9 @@ const
validatorsDir = dataDir / "validators"
secretsDir = dataDir / "secrets"
depositsFile = dataDir / "deposits.json"
runtimeConfigFile = dataDir / "config.yaml"
genesisFile = dataDir / "genesis.ssz"
depositTreeSnapshotFile = dataDir / "deposit_tree_snapshot.ssz"
bootstrapEnrFile = dataDir / "bootstrap_node.enr"
tokenFilePath = dataDir / "keymanager-token.txt"
defaultBasePort = 49000
@ -156,12 +158,24 @@ proc prepareNetwork =
Json.saveFile(depositsFile, launchPadDeposits)
notice "Deposit data written", filename = depositsFile
let createTestnetConf = try: BeaconNodeConf.load(cmdLine = mapIt([
"--data-dir=" & dataDir,
let runtimeConfigWritten = secureWriteFile(runtimeConfigFile, """
ALTAIR_FORK_EPOCH: 0
BELLATRIX_FORK_EPOCH: 0
""")
if runtimeConfigWritten.isOk:
notice "Run-time config written", filename = runtimeConfigFile
else:
fatal "Failed to write run-time config", filename = runtimeConfigFile
quit 1
let createTestnetConf = try: ncli_testnet.CliConfig.load(cmdLine = mapIt([
"createTestnet",
"--data-dir=" & dataDir,
"--total-validators=" & $simulationDepositsCount,
"--deposits-file=" & depositsFile,
"--output-genesis=" & genesisFile,
"--output-deposit-tree-snapshot=" & depositTreeSnapshotFile,
"--output-bootstrap-file=" & bootstrapEnrFile,
"--netkey-file=network_key.json",
"--insecure-netkey-password=true",

2
vendor/nim-eth vendored

@ -1 +1 @@
Subproject commit 68f4c53828cec749e398864bcc6b0097c8d66c31
Subproject commit 5b189ce5e22a785d1a3a3ea6a4d34387378df2b4

2
vendor/nim-web3 vendored

@ -1 +1 @@
Subproject commit 98fba0fb0471abffdbe69fb8e66bb59152a7075c
Subproject commit 4726fdc223d7cc8c3fe490e9ab58a7b43eae742a