Merge pull request #1818 from status-im/devel

Master <- Devel
This commit is contained in:
Mamy Ratsimbazafy 2020-10-07 18:46:02 +02:00 committed by GitHub
commit 180bad3a33
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
67 changed files with 1632 additions and 652 deletions

View File

@ -77,7 +77,14 @@ jobs:
steps:
- name: Get branch name
shell: bash
run: echo "##[set-output name=branch_name;]$(echo ${GITHUB_REF#refs/heads/})"
run: |
if [[ '${{ github.event_name }}' == 'pull_request' ]]; then
echo "##[set-output name=branch_name;]$(echo ${GITHUB_HEAD_REF})"
echo "Branch found (PR): ${GITHUB_HEAD_REF}"
else
echo "##[set-output name=branch_name;]$(echo ${GITHUB_REF#refs/heads/})"
echo "Branch found (not PR): ${GITHUB_REF#refs/heads/}"
fi
id: get_branch
- name: Cancel Previous Runs (except master/devel)

5
.gitmodules vendored
View File

@ -188,3 +188,8 @@
url = https://github.com/status-im/nimbus-security-resources.git
ignore = dirty
branch = master
[submodule "vendor/nim-normalize"]
path = vendor/nim-normalize
url = https://github.com/nitely/nim-normalize.git
ignore = dirty
branch = master

5
Jenkinsfile vendored
View File

@ -38,9 +38,9 @@ def runStages() {
sh """#!/bin/bash
set -e
make -j${env.NPROC} V=1
make -j${env.NPROC} V=1 LOG_LEVEL=TRACE NIMFLAGS='-d:UseSlashingProtection=true -d:testnet_servers_image' beacon_node
make -j${env.NPROC} V=1 LOG_LEVEL=TRACE NIMFLAGS='-d:testnet_servers_image' beacon_node
# Miracl fallback
# make -j${env.NPROC} V=1 LOG_LEVEL=TRACE NIMFLAGS='-d:BLS_FORCE_BACKEND=miracl -d:UseSlashingProtection=true -d:testnet_servers_image' beacon_node
# make -j${env.NPROC} V=1 LOG_LEVEL=TRACE NIMFLAGS='-d:BLS_FORCE_BACKEND=miracl -d:testnet_servers_image' beacon_node
"""
}
},
@ -52,7 +52,6 @@ def runStages() {
// EXECUTOR_NUMBER will be 0 or 1, since we have 2 executors per Jenkins node
sh """#!/bin/bash
set -e
export NIMFLAGS='-d:UseSlashingProtection=true'
./scripts/launch_local_testnet.sh --testnet 0 --nodes 4 --stop-at-epoch 5 --log-level DEBUG --disable-htop --enable-logtrace --data-dir local_testnet0_data --base-port \$(( 9000 + EXECUTOR_NUMBER * 100 )) --base-rpc-port \$(( 7000 + EXECUTOR_NUMBER * 100 )) --base-metrics-port \$(( 8008 + EXECUTOR_NUMBER * 100 )) -- --verify-finalization --discv5:no
./scripts/launch_local_testnet.sh --testnet 1 --nodes 4 --stop-at-epoch 5 --log-level DEBUG --disable-htop --enable-logtrace --data-dir local_testnet1_data --base-port \$(( 9000 + EXECUTOR_NUMBER * 100 )) --base-rpc-port \$(( 7000 + EXECUTOR_NUMBER * 100 )) --base-metrics-port \$(( 8008 + EXECUTOR_NUMBER * 100 )) -- --verify-finalization --discv5:no
"""

View File

@ -107,6 +107,9 @@ ifeq ($(OS), Windows_NT)
# 32-bit Windows is not supported by libbacktrace/libunwind
USE_LIBBACKTRACE := 0
endif
MKDIR_COMMAND := mkdir -p
else
MKDIR_COMMAND := mkdir -m 0750 -p
endif
DEPOSITS_DELAY := 0
@ -124,10 +127,6 @@ ifneq ($(USE_LIBBACKTRACE), 0)
deps: | libbacktrace
endif
clean-cross:
+ [[ -e vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc ]] && "$(MAKE)" -C vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc clean $(HANDLE_OUTPUT) || true
+ [[ -e vendor/nim-nat-traversal/vendor/libnatpmp ]] && "$(MAKE)" -C vendor/nim-nat-traversal/vendor/libnatpmp clean $(HANDLE_OUTPUT) || true
#- deletes and recreates "beacon_chain.nims" which on Windows is a copy instead of a proper symlink
update: | update-common
rm -f beacon_chain.nims && \
@ -187,25 +186,28 @@ testnet0 testnet1: | beacon_node signing_process
--data-dir=build/data/$@_$(NODE_ID) \
$(GOERLI_TESTNETS_PARAMS) $(NODE_PARAMS)
# https://www.gnu.org/software/make/manual/html_node/Multi_002dLine.html
#- https://www.gnu.org/software/make/manual/html_node/Multi_002dLine.html
#- macOS doesn't support "=" at the end of "define FOO": https://stackoverflow.com/questions/13260396/gnu-make-3-81-eval-function-not-working
define CONNECT_TO_NETWORK
mkdir -p build/data/shared_$(1)_$(NODE_ID)
$(MKDIR_COMMAND) build/data/shared_$(1)_$(NODE_ID)
scripts/make_prometheus_config.sh \
--nodes 1 \
--base-metrics-port $$(($(BASE_METRICS_PORT) + $(NODE_ID))) \
--config-file "build/data/shared_$(1)_$(NODE_ID)/prometheus.yml"
[ "$(2)" == "FastSync" ] && { export CHECKPOINT_PARAMS="--finalized-checkpoint-state=vendor/eth2-testnets/shared/$(1)/recent-finalized-state.ssz \
--finalized-checkpoint-block=vendor/eth2-testnets/shared/$(1)/recent-finalized-block.ssz" ; }; \
$(CPU_LIMIT_CMD) build/beacon_node \
--network=$(1) \
--log-level="$(LOG_LEVEL)" \
--log-file=build/data/shared_$(1)_$(NODE_ID)/nbc_bn_$$(date +"%Y%m%d%H%M%S").log \
--data-dir=build/data/shared_$(1)_$(NODE_ID) \
$(GOERLI_TESTNETS_PARAMS) $(NODE_PARAMS)
$$CHECKPOINT_PARAMS $(GOERLI_TESTNETS_PARAMS) $(NODE_PARAMS)
endef
define CONNECT_TO_NETWORK_IN_DEV_MODE
mkdir -p build/data/shared_$(1)_$(NODE_ID)
$(MKDIR_COMMAND) build/data/shared_$(1)_$(NODE_ID)
scripts/make_prometheus_config.sh \
--nodes 1 \
@ -221,7 +223,7 @@ endef
define CONNECT_TO_NETWORK_WITH_VALIDATOR_CLIENT
# if launching a VC as well - send the BN looking nowhere for validators/secrets
mkdir -p build/data/shared_$(1)_$(NODE_ID)/empty_dummy_folder
$(MKDIR_COMMAND) build/data/shared_$(1)_$(NODE_ID)/empty_dummy_folder
scripts/make_prometheus_config.sh \
--nodes 1 \
@ -289,6 +291,9 @@ medalla: | beacon_node signing_process
medalla-vc: | beacon_node signing_process validator_client
$(call CONNECT_TO_NETWORK_WITH_VALIDATOR_CLIENT,medalla)
medalla-fast-sync: | beacon_node signing_process
$(call CONNECT_TO_NETWORK,medalla,FastSync)
ifneq ($(LOG_LEVEL), TRACE)
medalla-dev:
+ "$(MAKE)" LOG_LEVEL=TRACE $@
@ -307,27 +312,34 @@ clean-medalla:
$(call CLEAN_NETWORK,medalla)
###
### spadina
### zinken
###
spadina: | beacon_node signing_process
$(call CONNECT_TO_NETWORK,spadina)
zinken: | beacon_node signing_process
$(call CONNECT_TO_NETWORK,zinken)
spadina-vc: | beacon_node signing_process validator_client
$(call CONNECT_TO_NETWORK_WITH_VALIDATOR_CLIENT,spadina)
zinken-vc: | beacon_node signing_process validator_client
$(call CONNECT_TO_NETWORK_WITH_VALIDATOR_CLIENT,zinken)
ifneq ($(LOG_LEVEL), TRACE)
spadina-dev:
zinken-dev:
+ "$(MAKE)" LOG_LEVEL=TRACE $@
else
spadina-dev: | beacon_node signing_process
$(call CONNECT_TO_NETWORK_IN_DEV_MODE,spadina)
zinken-dev: | beacon_node signing_process
$(call CONNECT_TO_NETWORK_IN_DEV_MODE,zinken)
endif
spadina-deposit-data: | beacon_node signing_process deposit_contract
$(call MAKE_DEPOSIT_DATA,spadina)
zinken-deposit-data: | beacon_node signing_process deposit_contract
$(call MAKE_DEPOSIT_DATA,zinken)
spadina-deposit: | beacon_node signing_process deposit_contract
$(call MAKE_DEPOSIT,spadina)
zinken-deposit: | beacon_node signing_process deposit_contract
$(call MAKE_DEPOSIT,zinken)
clean-zinken:
$(call CLEAN_NETWORK,zinken)
###
### spadina
###
clean-spadina:
$(call CLEAN_NETWORK,spadina)

View File

@ -29,10 +29,12 @@ requires "nim >= 0.19.0",
"libp2p",
"metrics",
"nimcrypto",
"normalize",
"serialization",
"stew",
"testutils",
"prompt",
"unicodedb",
"web3",
"yaml"
@ -73,7 +75,7 @@ task test, "Run all tests":
# Mainnet config
buildAndRunBinary "proto_array", "beacon_chain/fork_choice/", """-d:const_preset=mainnet -d:chronicles_sinks="json[file]""""
buildAndRunBinary "fork_choice", "beacon_chain/fork_choice/", """-d:const_preset=mainnet -d:chronicles_sinks="json[file]""""
buildAndRunBinary "all_tests", "tests/", """-d:UseSlashingProtection=true -d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:chronicles_sinks="json[file]""""
buildAndRunBinary "all_tests", "tests/", """-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:chronicles_sinks="json[file]""""
# Check Miracl/Milagro fallback on select tests
buildAndRunBinary "test_interop", "tests/", """-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:BLS_FORCE_BACKEND=miracl -d:chronicles_sinks="json[file]""""

View File

@ -26,14 +26,19 @@ type
DbKeyKind = enum
kHashToState
kHashToBlock
kHeadBlock # Pointer to the most recent block selected by the fork choice
kTailBlock ##\
## Pointer to the earliest finalized block - this is the genesis block when
## the chain starts, but might advance as the database gets pruned
## TODO: determine how aggressively the database should be pruned. For a
## healthy network sync, we probably need to store blocks at least
## past the weak subjectivity period.
kBlockSlotStateRoot ## BlockSlot -> state_root mapping
kHeadBlock
## Pointer to the most recent block selected by the fork choice
kTailBlock
## Pointer to the earliest finalized block - this is the genesis block when
## the chain starts, but might advance as the database gets pruned
## TODO: determine how aggressively the database should be pruned. For a
## healthy network sync, we probably need to store blocks at least
## past the weak subjectivity period.
kBlockSlotStateRoot
## BlockSlot -> state_root mapping
kGenesisBlockRoot
## Immutable reference to the network genesis state
## (needed for satisfying requests to the beacon node API).
const
maxDecompressedDbRecordSize = 16*1024*1024
@ -165,6 +170,9 @@ proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.put(subkey(kTailBlock), key)
proc putGenesisBlockRoot*(db: BeaconChainDB, key: Eth2Digest) =
db.put(subkey(kGenesisBlockRoot), key)
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
# We only store blocks that we trust in the database
result.ok(TrustedSignedBeaconBlock())
@ -207,6 +215,9 @@ proc getHeadBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
proc getTailBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
db.get(subkey(kTailBlock), Eth2Digest)
proc getGenesisBlockRoot*(db: BeaconChainDB): Eth2Digest =
db.get(subkey(kGenesisBlockRoot), Eth2Digest).expect("The database must be seeded with the genesis state")
proc containsBlock*(db: BeaconChainDB, key: Eth2Digest): bool =
db.backend.contains(subkey(SignedBeaconBlock, key)).expect("working database")

View File

@ -11,7 +11,7 @@ import
std/[osproc, random],
# Nimble packages
stew/[objects, byteutils, endians2], stew/shims/macros,
stew/[objects, byteutils, endians2, io2], stew/shims/macros,
chronos, confutils, metrics, json_rpc/[rpcserver, jsonmarshal],
chronicles, bearssl, blscurve,
json_serialization/std/[options, sets, net], serialization/errors,
@ -23,7 +23,7 @@ import
# Local modules
spec/[datatypes, digest, crypto, beaconstate, helpers, network, presets],
spec/state_transition,
spec/[state_transition, weak_subjectivity],
conf, time, beacon_chain_db, validator_pool, extras,
attestation_pool, exit_pool, eth2_network, eth2_discovery,
beacon_node_common, beacon_node_types, beacon_node_status,
@ -36,7 +36,6 @@ import
./eth2_processor
const
genesisFile* = "genesis.ssz"
hasPrompt = not defined(withoutPrompt)
type
@ -60,62 +59,6 @@ declareGauge ticks_delay,
logScope: topics = "beacnde"
proc getStateFromSnapshot(conf: BeaconNodeConf, stateSnapshotContents: ref string): NilableBeaconStateRef =
var
genesisPath = conf.dataDir/genesisFile
snapshotContents: TaintedString
writeGenesisFile = false
if conf.stateSnapshot.isSome:
let
snapshotPath = conf.stateSnapshot.get.string
snapshotExt = splitFile(snapshotPath).ext
if cmpIgnoreCase(snapshotExt, ".ssz") != 0:
error "The supplied state snapshot must be a SSZ file",
suppliedPath = snapshotPath
quit 1
snapshotContents = readFile(snapshotPath)
if fileExists(genesisPath):
let genesisContents = readFile(genesisPath)
if snapshotContents != genesisContents:
error "Data directory not empty. Existing genesis state differs from supplied snapshot",
dataDir = conf.dataDir.string, snapshot = snapshotPath
quit 1
else:
debug "No previous genesis state. Importing snapshot",
genesisPath, dataDir = conf.dataDir.string
writeGenesisFile = true
genesisPath = snapshotPath
elif fileExists(genesisPath):
try: snapshotContents = readFile(genesisPath)
except CatchableError as err:
error "Failed to read genesis file", err = err.msg
quit 1
elif stateSnapshotContents != nil:
swap(snapshotContents, TaintedString stateSnapshotContents[])
else:
# No snapshot was provided. We should wait for genesis.
return nil
result = try:
newClone(SSZ.decode(snapshotContents, BeaconState))
except SerializationError:
error "Failed to import genesis file", path = genesisPath
quit 1
info "Loaded genesis state", path = genesisPath
if writeGenesisFile:
try:
notice "Writing genesis to data directory", path = conf.dataDir/genesisFile
writeFile(conf.dataDir/genesisFile, snapshotContents.string)
except CatchableError as err:
error "Failed to persist genesis file to data dir",
err = err.msg, genesisFile = conf.dataDir/genesisFile
quit 1
func enrForkIdFromState(state: BeaconState): ENRForkID =
let
forkVer = state.fork.current_version
@ -129,21 +72,59 @@ func enrForkIdFromState(state: BeaconState): ENRForkID =
proc init*(T: type BeaconNode,
rng: ref BrHmacDrbgContext,
conf: BeaconNodeConf,
stateSnapshotContents: ref string): Future[BeaconNode] {.async.} =
genesisStateContents: ref string): Future[BeaconNode] {.async.} =
let
netKeys = getPersistentNetKeys(rng[], conf)
nickname = if conf.nodeName == "auto": shortForm(netKeys)
else: conf.nodeName
db = BeaconChainDB.init(kvStore SqStoreRef.init(conf.databaseDir, "nbc").tryGet())
var mainchainMonitor: MainchainMonitor
var
mainchainMonitor: MainchainMonitor
genesisState, checkpointState: ref BeaconState
checkpointBlock: SignedBeaconBlock
if conf.finalizedCheckpointState.isSome:
let checkpointStatePath = conf.finalizedCheckpointState.get.string
checkpointState = try:
newClone(SSZ.loadFile(checkpointStatePath, BeaconState))
except SerializationError as err:
fatal "Checkpoint state deserialization failed",
err = formatMsg(err, checkpointStatePath)
quit 1
except CatchableError as err:
fatal "Failed to read checkpoint state file", err = err.msg
quit 1
if conf.finalizedCheckpointBlock.isNone:
if checkpointState.slot > 0:
fatal "Specifying a non-genesis --finalized-checkpoint-state requires specifying --finalized-checkpoint-block as well"
quit 1
else:
let checkpointBlockPath = conf.finalizedCheckpointBlock.get.string
try:
checkpointBlock = SSZ.loadFile(checkpointBlockPath, SignedBeaconBlock)
except SerializationError as err:
fatal "Invalid checkpoint block", err = err.formatMsg(checkpointBlockPath)
quit 1
except IOError as err:
fatal "Failed to load the checkpoint block", err = err.msg
quit 1
elif conf.finalizedCheckpointBlock.isSome:
# TODO We can download the state from somewhere in the future relying
# on the trusted `state_root` appearing in the checkpoint block.
fatal "--finalized-checkpoint-block cannot be specified without --finalized-checkpoint-state"
quit 1
if not ChainDAGRef.isInitialized(db):
# Fresh start - need to load a genesis state from somewhere
var genesisState = conf.getStateFromSnapshot(stateSnapshotContents)
var
tailState: ref BeaconState
tailBlock: SignedBeaconBlock
# Try file from command line first
if genesisState.isNil:
if genesisStateContents == nil and checkpointState == nil:
# This is a fresh start without a known genesis state
# (most likely, it hasn't arrived yet). We'll try to
# obtain a genesis through the Eth1 deposits monitor:
if conf.web3Url.len == 0:
fatal "Web3 URL not specified"
quit 1
@ -186,42 +167,68 @@ proc init*(T: type BeaconNode,
if bnStatus == BeaconNodeStatus.Stopping:
return nil
tailState = genesisState
tailBlock = get_initial_beacon_block(genesisState[])
notice "Eth2 genesis state detected",
genesisTime = genesisState.genesisTime,
eth1Block = genesisState.eth1_data.block_hash,
totalDeposits = genesisState.eth1_data.deposit_count
# This is needed to prove the not nil property from here on
if genesisState == nil:
doAssert false
elif genesisStateContents == nil:
if checkpointState.slot == GENESIS_SLOT:
genesisState = checkpointState
tailState = checkpointState
tailBlock = get_initial_beacon_block(genesisState[])
else:
fatal "State checkpoints cannot be provided for a network without a known genesis state"
quit 1
else:
if genesisState.slot != GENESIS_SLOT:
# TODO how to get a block from a non-genesis state?
error "Starting from non-genesis state not supported",
stateSlot = genesisState.slot,
stateRoot = hash_tree_root(genesisState[])
quit 1
let tailBlock = get_initial_beacon_block(genesisState[])
try:
ChainDAGRef.preInit(db, genesisState[], tailBlock)
doAssert ChainDAGRef.isInitialized(db), "preInit should have initialized db"
except CatchableError as e:
error "Failed to initialize database", err = e.msg
quit 1
genesisState = newClone(SSZ.decode(genesisStateContents[], BeaconState))
except CatchableError as err:
raiseAssert "The baked-in state must be valid"
if stateSnapshotContents != nil:
# The memory for the initial snapshot won't be needed anymore
stateSnapshotContents[] = ""
if checkpointState != nil:
tailState = checkpointState
tailBlock = checkpointBlock
else:
tailState = genesisState
tailBlock = get_initial_beacon_block(genesisState[])
try:
ChainDAGRef.preInit(db, genesisState[], tailState[], tailBlock)
doAssert ChainDAGRef.isInitialized(db), "preInit should have initialized db"
except CatchableError as e:
error "Failed to initialize database", err = e.msg
quit 1
# TODO check that genesis given on command line (if any) matches database
let
chainDagFlags = if conf.verifyFinalization: {verifyFinalization}
else: {}
chainDag = init(ChainDAGRef, conf.runtimePreset, db, chainDagFlags)
beaconClock = BeaconClock.init(chainDag.headState.data.data)
quarantine = QuarantineRef()
if conf.weakSubjectivityCheckpoint.isSome:
let
currentSlot = beaconClock.now.slotOrZero
isCheckpointStale = not is_within_weak_subjectivity_period(
currentSlot,
chainDag.headState.data.data,
conf.weakSubjectivityCheckpoint.get)
if isCheckpointStale:
error "Weak subjectivity checkpoint is stale",
currentSlot,
checkpoint = conf.weakSubjectivityCheckpoint.get,
headStateSlot = chainDag.headState.data.data.slot
quit 1
if checkpointState != nil:
chainDag.setTailState(checkpointState[], checkpointBlock)
if mainchainMonitor.isNil and
conf.web3Url.len > 0 and
conf.depositContractAddress.isSome:
@ -243,7 +250,7 @@ proc init*(T: type BeaconNode,
enrForkId = enrForkIdFromState(chainDag.headState.data.data)
topicBeaconBlocks = getBeaconBlocksTopic(enrForkId.forkDigest)
topicAggregateAndProofs = getAggregateAndProofsTopic(enrForkId.forkDigest)
network = createEth2Node(rng, conf, enrForkId)
network = createEth2Node(rng, conf, netKeys, enrForkId)
attestationPool = newClone(AttestationPool.init(chainDag, quarantine))
exitPool = newClone(ExitPool.init(chainDag, quarantine))
var res = BeaconNode(
@ -259,7 +266,7 @@ proc init*(T: type BeaconNode,
attestationPool: attestationPool,
exitPool: exitPool,
mainchainMonitor: mainchainMonitor,
beaconClock: BeaconClock.init(chainDag.headState.data.data),
beaconClock: beaconClock,
rpcServer: rpcServer,
forkDigest: enrForkId.forkDigest,
topicBeaconBlocks: topicBeaconBlocks,
@ -269,10 +276,7 @@ proc init*(T: type BeaconNode,
res.attachedValidators = ValidatorPool.init(
SlashingProtectionDB.init(
chainDag.headState.data.data.genesis_validators_root,
when UseSlashingProtection:
kvStore SqStoreRef.init(conf.validatorsDir(), "slashing_protection").tryGet()
else:
KvStoreRef()
kvStore SqStoreRef.init(conf.validatorsDir(), "slashing_protection").tryGet()
)
)
@ -287,9 +291,10 @@ proc init*(T: type BeaconNode,
if res.config.inProcessValidators:
res.addLocalValidators()
else:
res.vcProcess = startProcess(getAppDir() & "/signing_process".addFileExt(ExeExt),
getCurrentDir(), [$res.config.validatorsDir,
$res.config.secretsDir])
let cmd = getAppDir() / "signing_process".addFileExt(ExeExt)
let args = [$res.config.validatorsDir, $res.config.secretsDir]
let workdir = io2.getCurrentDir().tryGet()
res.vcProcess = startProcess(cmd, workdir, args)
res.addRemoteValidators()
# This merely configures the BeaconSync
@ -651,8 +656,7 @@ proc startSyncManager(node: BeaconNode) =
epoch.compute_start_slot_at_epoch()
func getFirstSlotAtFinalizedEpoch(): Slot =
let fepoch = node.chainDag.headState.data.data.finalized_checkpoint.epoch
compute_start_slot_at_epoch(fepoch)
node.chainDag.finalizedHead.slot
proc scoreCheck(peer: Peer): bool =
if peer.score < PeerScoreLowLimit:
@ -894,10 +898,9 @@ proc run*(node: BeaconNode) =
var gPidFile: string
proc createPidFile(filename: string) =
createDir splitFile(filename).dir
writeFile filename, $os.getCurrentProcessId()
gPidFile = filename
addQuitProc proc {.noconv.} = removeFile gPidFile
addQuitProc proc {.noconv.} = discard io2.removeFile(gPidFile)
proc initializeNetworking(node: BeaconNode) {.async.} =
await node.network.startListening()
@ -1094,7 +1097,14 @@ programMain:
var
config = makeBannerAndConfig(clientId, BeaconNodeConf)
# This is ref so we can mutate it (to erase it) after the initial loading.
stateSnapshotContents: ref string
genesisStateContents: ref string
setupStdoutLogging(config.logLevel)
if not(checkAndCreateDataDir(string(config.dataDir))):
# We are unable to access/create data folder or data folder's
# permissions are insecure.
quit QuitFailure
setupLogging(config.logLevel, config.logFile)
@ -1106,8 +1116,8 @@ programMain:
for node in metadata.bootstrapNodes:
config.bootstrapNodes.add node
if config.stateSnapshot.isNone and metadata.genesisData.len > 0:
stateSnapshotContents = newClone metadata.genesisData
if metadata.genesisData.len > 0:
genesisStateContents = newClone metadata.genesisData
template checkForIncompatibleOption(flagName, fieldName) =
# TODO: This will have to be reworked slightly when we introduce config files.
@ -1131,6 +1141,13 @@ programMain:
# and avoid using system resources (such as urandom) after that
let rng = keys.newRng()
template findWalletWithoutErrors(name: WalletName): auto =
let res = keystore_management.findWallet(config, name)
if res.isErr:
fatal "Failed to locate wallet", error = res.error
quit 1
res.get
case config.cmd
of createTestnet:
let launchPadDeposits = try:
@ -1213,9 +1230,11 @@ programMain:
address = metricsAddress, port = config.metricsPort
metrics.startHttpServer($metricsAddress, config.metricsPort)
var node = waitFor BeaconNode.init(rng, config, stateSnapshotContents)
var node = waitFor BeaconNode.init(rng, config, genesisStateContents)
if bnStatus == BeaconNodeStatus.Stopping:
return
# The memory for the initial snapshot won't be needed anymore
if genesisStateContents != nil: genesisStateContents[] = ""
when hasPrompt:
initPrompt(node)
@ -1233,14 +1252,16 @@ programMain:
var walletPath: WalletPathPair
if config.existingWalletId.isSome:
let id = config.existingWalletId.get
let found = keystore_management.findWallet(config, id)
if found.isOk:
let
id = config.existingWalletId.get
found = findWalletWithoutErrors(id)
if found.isSome:
walletPath = found.get
else:
fatal "Unable to find wallet with the specified name/uuid",
id, err = found.error
fatal "Unable to find wallet with the specified name/uuid", id
quit 1
var unlocked = unlockWalletInteractively(walletPath.wallet)
if unlocked.isOk:
swap(mnemonic, unlocked.get)
@ -1256,8 +1277,15 @@ programMain:
swap(mnemonic, walletRes.get.mnemonic)
walletPath = walletRes.get.walletPath
createDir(config.outValidatorsDir)
createDir(config.outSecretsDir)
let vres = createPath(config.outValidatorsDir, 0o750)
if vres.isErr():
fatal "Could not create directory", path = config.outValidatorsDir
quit QuitFailure
let sres = createPath(config.outSecretsDir, 0o750)
if sres.isErr():
fatal "Could not create directory", path = config.outSecretsDir
quit QuitFailure
let deposits = generateDeposits(
config.runtimePreset,
@ -1282,17 +1310,17 @@ programMain:
mapIt(deposits.value, LaunchPadDeposit.init(config.runtimePreset, it))
Json.saveFile(depositDataPath, launchPadDeposits)
notice "Deposit data written", filename = depositDataPath
echo "Deposit data written to \"", depositDataPath, "\""
walletPath.wallet.nextAccount += deposits.value.len
let status = saveWallet(walletPath)
if status.isErr:
error "Failed to update wallet file after generating deposits",
fatal "Failed to update wallet file after generating deposits",
wallet = walletPath.path,
error = status.error
quit 1
except CatchableError as err:
error "Failed to create launchpad deposit data file", err = err.msg
fatal "Failed to create launchpad deposit data file", err = err.msg
quit 1
of DepositsCmd.`import`:
@ -1308,6 +1336,14 @@ programMain:
of wallets:
case config.walletsCmd:
of WalletsCmd.create:
if config.createdWalletNameFlag.isSome:
let
name = config.createdWalletNameFlag.get
existingWallet = findWalletWithoutErrors(name)
if existingWallet.isSome:
echo "The Wallet '" & name.string & "' already exists."
quit 1
var walletRes = createWalletInteractively(rng[], config)
if walletRes.isErr:
fatal "Unable to create wallet", err = walletRes.error
@ -1317,12 +1353,16 @@ programMain:
of WalletsCmd.list:
for kind, walletFile in walkDir(config.walletsDir):
if kind != pcFile: continue
let walletRes = loadWallet(walletFile)
if walletRes.isOk:
echo walletRes.get.longName
if checkSensitiveFilePermissions(walletFile):
let walletRes = loadWallet(walletFile)
if walletRes.isOk:
echo walletRes.get.longName
else:
warn "Found corrupt wallet file",
wallet = walletFile, error = walletRes.error
else:
warn "Found corrupt wallet file",
wallet = walletFile, error = walletRes.error
warn "Found wallet file with insecure permissions",
wallet = walletFile
of WalletsCmd.restore:
restoreWalletInteractively(rng[], config)

View File

@ -98,6 +98,9 @@ type
## Directed acyclic graph of blocks pointing back to a finalized block on the chain we're
## interested in - we call that block the tail
genesis*: BlockRef ##\
## The genesis block of the network
tail*: BlockRef ##\
## The earliest finalized block we know about
@ -207,6 +210,12 @@ func shortLog*(v: BlockRef): string =
else:
&"{v.root.data.toOpenArray(0, 3).toHex()}:{v.slot}"
func shortLog*(v: EpochRef): string =
if v == nil:
"EpochRef(nil)"
else:
&"(epoch ref: {v.epoch})"
chronicles.formatIt BlockSlot: shortLog(it)
chronicles.formatIt BlockRef: shortLog(it)

View File

@ -300,10 +300,22 @@ proc init*(T: type ChainDAGRef,
tailRef = BlockRef.init(tailRoot, tailBlock.message)
headRoot = headBlockRoot.get()
let genesisRef = if tailBlock.message.slot == GENESIS_SLOT:
tailRef
else:
let
genesisBlockRoot = db.getGenesisBlockRoot()
genesisBlock = db.getBlock(genesisBlockRoot).expect(
"preInit should have initialized the database with a genesis block")
BlockRef.init(genesisBlockRoot, genesisBlock.message)
var
blocks = {tailRef.root: tailRef}.toTable()
headRef: BlockRef
if genesisRef != tailRef:
blocks[genesisRef.root] = genesisRef
if headRoot != tailRoot:
var curRef: BlockRef
@ -363,6 +375,7 @@ proc init*(T: type ChainDAGRef,
blocks: blocks,
tail: tailRef,
head: headRef,
genesis: genesisRef,
db: db,
heads: @[headRef],
headState: tmpState[],
@ -383,8 +396,12 @@ proc init*(T: type ChainDAGRef,
# state we loaded might be older than head block - nonetheless, it will be
# from the same epoch as the head, thus the finalized and justified slots are
# the same - these only change on epoch boundaries.
res.finalizedHead = headRef.atEpochStart(
res.headState.data.data.finalized_checkpoint.epoch)
# When we start from a snapshot state, the `finalized_checkpoint` in the
# snapshot will point to an even older state, but we trust the tail state
# (the snapshot) to be finalized, hence the `max` expression below.
let finalizedEpoch = max(res.headState.data.data.finalized_checkpoint.epoch,
tailRef.slot.epoch)
res.finalizedHead = headRef.atEpochStart(finalizedEpoch)
res.clearanceState = res.headState
@ -398,6 +415,7 @@ proc init*(T: type ChainDAGRef,
proc findEpochRef*(blck: BlockRef, epoch: Epoch): EpochRef = # may return nil!
let ancestor = blck.epochAncestor(epoch)
doAssert ancestor.blck != nil
for epochRef in ancestor.blck.epochRefs:
if epochRef.epoch == epoch:
return epochRef
@ -405,8 +423,8 @@ proc findEpochRef*(blck: BlockRef, epoch: Epoch): EpochRef = # may return nil!
proc getEpochRef*(dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): EpochRef =
let epochRef = blck.findEpochRef(epoch)
if epochRef != nil:
beacon_state_data_cache_hits.inc
return epochRef
beacon_state_data_cache_hits.inc
return epochRef
beacon_state_data_cache_misses.inc
@ -415,7 +433,8 @@ proc getEpochRef*(dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): EpochRef =
dag.withState(dag.tmpState, ancestor):
let
prevEpochRef = blck.findEpochRef(epoch - 1)
prevEpochRef = if dag.tail.slot.epoch >= epoch: nil
else: blck.findEpochRef(epoch - 1)
newEpochRef = EpochRef.init(state, cache, prevEpochRef)
# TODO consider constraining the number of epochrefs per state
@ -512,14 +531,18 @@ func getBlockRange*(
## at this index.
##
## If there were no blocks in the range, `output.len` will be returned.
let requestedCount = output.lenu64
let
requestedCount = output.lenu64
headSlot = dag.head.slot
trace "getBlockRange entered",
head = shortLog(dag.head.root), requestedCount, startSlot, skipStep
head = shortLog(dag.head.root), requestedCount, startSlot, skipStep, headSlot
if startSlot < dag.tail.slot or headSlot <= startSlot:
return output.len # Identical to returning an empty set of block as indicated above
let
headSlot = dag.head.slot
runway = if headSlot > startSlot: uint64(headSlot - startSlot)
else: return output.len # Identical to returning an empty set of block as indicated above
runway = uint64(headSlot - startSlot)
skipStep = max(skipStep, 1) # Treat 0 step as 1
count = min(1'u64 + (runway div skipStep), requestedCount)
endSlot = startSlot + count * skipStep
@ -702,7 +725,7 @@ proc updateHead*(
## blocks that were once considered potential candidates for a tree will
## now fall from grace, or no longer be considered resolved.
doAssert not newHead.isNil()
doAssert not newHead.parent.isNil() or newHead.slot == 0
doAssert not newHead.parent.isNil() or newHead.slot <= dag.tail.slot
logScope:
newHead = shortLog(newHead)
@ -843,25 +866,50 @@ proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): bool =
true
proc preInit*(
T: type ChainDAGRef, db: BeaconChainDB, state: BeaconState,
signedBlock: SignedBeaconBlock) =
T: type ChainDAGRef, db: BeaconChainDB,
genesisState, tailState: BeaconState, tailBlock: SignedBeaconBlock) =
# write a genesis state, the way the ChainDAGRef expects it to be stored in
# database
# TODO probably should just init a block pool with the freshly written
# state - but there's more refactoring needed to make it nice - doing
# a minimal patch for now..
doAssert signedBlock.message.state_root == hash_tree_root(state)
doAssert tailBlock.message.state_root == hash_tree_root(tailState)
notice "New database from snapshot",
blockRoot = shortLog(signedBlock.root),
stateRoot = shortLog(signedBlock.message.state_root),
fork = state.fork,
validators = state.validators.len()
blockRoot = shortLog(tailBlock.root),
stateRoot = shortLog(tailBlock.message.state_root),
fork = tailState.fork,
validators = tailState.validators.len()
db.putState(state)
db.putBlock(signedBlock)
db.putTailBlock(signedBlock.root)
db.putHeadBlock(signedBlock.root)
db.putStateRoot(signedBlock.root, state.slot, signedBlock.message.state_root)
db.putState(tailState)
db.putBlock(tailBlock)
db.putTailBlock(tailBlock.root)
db.putHeadBlock(tailBlock.root)
db.putStateRoot(tailBlock.root, tailState.slot, tailBlock.message.state_root)
if tailState.slot == GENESIS_SLOT:
db.putGenesisBlockRoot(tailBlock.root)
else:
doAssert genesisState.slot == GENESIS_SLOT
db.putState(genesisState)
let genesisBlock = get_initial_beacon_block(genesisState)
db.putBlock(genesisBlock)
db.putStateRoot(genesisBlock.root, GENESIS_SLOT, genesisBlock.message.state_root)
db.putGenesisBlockRoot(genesisBlock.root)
proc setTailState*(dag: ChainDAGRef,
checkpointState: BeaconState,
checkpointBlock: SignedBeaconBlock) =
# TODO
# Delete all records up to the tail node. If the tail node is not
# in the database, init the dabase in a way similar to `preInit`.
discard
proc getGenesisBlockData*(dag: ChainDAGRef): BlockData =
dag.get(dag.genesis)
proc getGenesisBlockSlot*(dag: ChainDAGRef): BlockSlot =
let blockData = dag.getGenesisBlockData()
BlockSlot(blck: blockData.refs, slot: GENESIS_SLOT)
proc getProposer*(
dag: ChainDAGRef, head: BlockRef, slot: Slot):

View File

@ -1,15 +1,17 @@
{.push raises: [Defect].}
import
os, options,
strutils, os, options, unicode,
chronicles, chronicles/options as chroniclesOptions,
confutils, confutils/defs, confutils/std/net,
confutils, confutils/defs, confutils/std/net, stew/shims/net as stewNet,
stew/io2, unicodedb/properties, normalize,
json_serialization, web3/[ethtypes, confutils_defs],
network_metadata, spec/[crypto, keystore, digest, datatypes]
spec/[crypto, keystore, digest, datatypes, network],
network_metadata
export
defs, enabledLogLevel, parseCmdArg, completeCmdArg,
network_metadata
defaultEth2TcpPort, enabledLogLevel, ValidIpAddress,
defs, parseCmdArg, completeCmdArg, network_metadata
type
ValidatorKeyPath* = TypedInputFile[ValidatorPrivKey, Txt, "privkey"]
@ -82,6 +84,18 @@ type
desc: "Do not display interative prompts. Quit on missing configuration"
name: "non-interactive" }: bool
netKeyFile* {.
defaultValue: "random",
desc: "Source of network (secp256k1) private key file " &
"(random|<path>) (default: random)"
name: "netkey-file" }: string
netKeyInsecurePassword* {.
defaultValue: false,
desc: "Use pre-generated INSECURE password for network private key " &
"file (default: false)"
name: "insecure-netkey-password" }: bool
case cmd* {.
command
defaultValue: noCommand }: BNStartUpCmd
@ -128,13 +142,17 @@ type
abbr: "v"
name: "validator" }: seq[ValidatorKeyPath]
stateSnapshot* {.
desc: "SSZ file specifying a recent state snapshot"
abbr: "s"
name: "state-snapshot" }: Option[InputFile]
weakSubjectivityCheckpoint* {.
desc: "Weak subjectivity checkpoint in the format block_root:epoch_number"
name: "weak-subjectivity-checkpoint" }: Option[Checkpoint]
stateSnapshotContents* {.hidden.}: ref string
# This is ref so we can mutate it (to erase it) after the initial loading.
finalizedCheckpointState* {.
desc: "SSZ file specifying a recent finalized state"
name: "finalized-checkpoint-state" }: Option[InputFile]
finalizedCheckpointBlock* {.
desc: "SSZ file specifying a recent finalized block"
name: "finalized-checkpoint-block" }: Option[InputFile]
runtimePreset* {.hidden.}: RuntimePreset
@ -238,7 +256,7 @@ type
name: "last-user-validator" }: uint64
bootstrapAddress* {.
defaultValue: ValidIpAddress.init("127.0.0.1")
defaultValue: init(ValidIpAddress, "127.0.0.1")
desc: "The public IP address that will be advertised as a bootstrap node for the testnet"
name: "bootstrap-address" }: ValidIpAddress
@ -428,13 +446,15 @@ func dumpDirOutgoing*(conf: BeaconNodeConf|ValidatorClientConf): string =
proc createDumpDirs*(conf: BeaconNodeConf) =
if conf.dumpEnabled:
try:
createDir(conf.dumpDirInvalid)
createDir(conf.dumpDirIncoming)
createDir(conf.dumpDirOutgoing)
except CatchableError as err:
# Dumping is mainly a debugging feature, so ignore these..
warn "Cannot create dump directories", msg = err.msg
let resInv = createPath(conf.dumpDirInvalid, 0o750)
if resInv.isErr():
warn "Could not create dump directory", path = conf.dumpDirInvalid
let resInc = createPath(conf.dumpDirIncoming, 0o750)
if resInc.isErr():
warn "Could not create dump directory", path = conf.dumpDirIncoming
let resOut = createPath(conf.dumpDirOutgoing, 0o750)
if resOut.isErr():
warn "Could not create dump directory", path = conf.dumpDirOutgoing
func parseCmdArg*(T: type GraffitiBytes, input: TaintedString): T
{.raises: [ValueError, Defect].} =
@ -443,13 +463,39 @@ func parseCmdArg*(T: type GraffitiBytes, input: TaintedString): T
func completeCmdArg*(T: type GraffitiBytes, input: TaintedString): seq[string] =
return @[]
func parseCmdArg*(T: type Checkpoint, input: TaintedString): T
{.raises: [ValueError, Defect].} =
let sepIdx = find(input.string, ':')
if sepIdx == -1:
raise newException(ValueError,
"The weak subjectivity checkpoint must be provided in the `block_root:epoch_number` format")
T(root: Eth2Digest.fromHex(input[0 ..< sepIdx]),
epoch: parseBiggestUInt(input[sepIdx .. ^1]).Epoch)
func completeCmdArg*(T: type Checkpoint, input: TaintedString): seq[string] =
return @[]
proc isPrintable(rune: Rune): bool =
# This can be eventually replaced by the `unicodeplus` package, but a single
# proc does not justify the extra dependencies at the moment:
# https://github.com/nitely/nim-unicodeplus
# https://github.com/nitely/nim-segmentation
rune == Rune(0x20) or unicodeCategory(rune) notin ctgC+ctgZ
func parseCmdArg*(T: type WalletName, input: TaintedString): T
{.raises: [ValueError, Defect].} =
if input.len == 0:
raise newException(ValueError, "The wallet name should not be empty")
if input[0] == '_':
raise newException(ValueError, "The wallet name should not start with an underscore")
return T(input)
for rune in runes(input.string):
if not rune.isPrintable:
raise newException(ValueError, "The wallet name should consist only of printable characters")
# From the Unicode Normalization FAQ (https://unicode.org/faq/normalization.html):
# NFKC is the preferred form for identifiers, especially where there are security concerns
# (see UTR #36 http://www.unicode.org/reports/tr36/)
return T(toNFKC(input))
func completeCmdArg*(T: type WalletName, input: TaintedString): seq[string] =
return @[]

View File

@ -2,7 +2,7 @@ import
os, sequtils, strutils, options, json, terminal, random,
chronos, chronicles, confutils, stint, json_serialization,
../beacon_chain/network_metadata,
web3, web3/confutils_defs, eth/keys,
web3, web3/confutils_defs, eth/keys, stew/io2,
spec/[datatypes, crypto, presets], ssz/merkleization, keystore_management
# Compiled version of /scripts/depositContract.v.py in this repo
@ -178,8 +178,15 @@ proc main() {.async.} =
mnemonic = generateMnemonic(rng[])
runtimePreset = getRuntimePresetForNetwork(cfg.eth2Network)
createDir(string cfg.outValidatorsDir)
createDir(string cfg.outSecretsDir)
let vres = createPath(string cfg.outValidatorsDir, 0o750)
if vres.isErr():
warn "Could not create validators folder",
path = string cfg.outValidatorsDir, err = ioErrorMsg(vres.error)
let sres = createPath(string cfg.outSecretsDir, 0o750)
if sres.isErr():
warn "Could not create secrets folder",
path = string cfg.outSecretsDir, err = ioErrorMsg(sres.error)
let deposits = generateDeposits(
runtimePreset,

View File

@ -40,10 +40,16 @@ proc fromJson*(n: JsonNode, argName: string, result: var Version) =
proc `%`*(value: Version): JsonNode =
result = newJString($value)
template genFromJsonForIntType(t: untyped) =
proc fromJson*(n: JsonNode, argName: string, result: var t) =
template genFromJsonForIntType(T: untyped) =
proc fromJson*(n: JsonNode, argName: string, result: var T) =
n.kind.expect(JInt, argName)
result = n.getInt().t
let asInt = n.getInt()
# signed -> unsigned conversions are unchecked
# https://github.com/nim-lang/RFCs/issues/175
if asInt < 0:
raise newException(
ValueError, "JSON-RPC input is an unexpected negative value")
result = T(asInt)
genFromJsonForIntType(Epoch)
genFromJsonForIntType(Slot)

View File

@ -4,7 +4,7 @@ import
std/options as stdOptions,
# Status libs
stew/[varints, base58, endians2, results, byteutils], bearssl,
stew/[varints, base58, base64, endians2, results, byteutils, io2], bearssl,
stew/shims/net as stewNet,
stew/shims/[macros, tables],
faststreams/[inputs, outputs, buffers], snappy, snappy/framing,
@ -23,7 +23,8 @@ import
# Beacon node modules
version, conf, eth2_discovery, libp2p_json_serialization, conf,
ssz/ssz_serialization,
peer_pool, spec/[datatypes, network], ./time
peer_pool, spec/[datatypes, network], ./time,
keystore_management
when defined(nbc_gossipsub_11):
import libp2p/protocols/pubsub/gossipsub
@ -204,7 +205,6 @@ type
const
clientId* = "Nimbus beacon node v" & fullVersionStr
networkKeyFilename = "privkey.protobuf"
nodeMetadataFilename = "node-metadata.json"
TCP = net.Protocol.IPPROTO_TCP
@ -272,10 +272,14 @@ const
when libp2p_pki_schemes != "secp256k1":
{.fatal: "Incorrect building process, please use -d:\"libp2p_pki_schemes=secp256k1\"".}
const
NetworkInsecureKeyPassword = "INSECUREPASSWORD"
template libp2pProtocol*(name: string, version: int) {.pragma.}
func shortLog*(peer: Peer): string = shortLog(peer.info.peerId)
chronicles.formatIt(Peer): shortLog(it)
chronicles.formatIt(PublicKey): byteutils.toHex(it.getBytes().tryGet())
template remote*(peer: Peer): untyped =
peer.info.peerId
@ -832,7 +836,7 @@ proc runDiscoveryLoop*(node: Eth2Node) {.async.} =
new_peers = newPeers
if newPeers == 0:
if node.peerPool.lenSpace() <= node.wantedPeers shr 2:
if node.peerPool.lenCurrent() <= node.wantedPeers shr 2:
warn "Less than 25% wanted peers and could not discover new nodes",
discovered = len(discoveredNodes), new_peers = newPeers,
wanted_peers = node.wantedPeers
@ -1201,46 +1205,141 @@ proc initAddress*(T: type MultiAddress, str: string): T =
template tcpEndPoint(address, port): auto =
MultiAddress.init(address, tcpProtocol, port)
proc getPersistentNetKeys*(
rng: var BrHmacDrbgContext, conf: BeaconNodeConf): KeyPair =
let
privKeyPath = conf.dataDir / networkKeyFilename
privKey =
if not fileExists(privKeyPath):
createDir conf.dataDir.string
let key = PrivateKey.random(Secp256k1, rng).tryGet()
writeFile(privKeyPath, key.getBytes().tryGet())
key
else:
let keyBytes = readFile(privKeyPath)
PrivateKey.init(keyBytes.toOpenArrayByte(0, keyBytes.high)).tryGet()
proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
conf: BeaconNodeConf): KeyPair =
case conf.cmd
of noCommand:
if conf.netKeyFile == "random":
let res = PrivateKey.random(Secp256k1, rng)
if res.isErr():
fatal "Could not generate random network key file"
quit QuitFailure
let privKey = res.get()
let pubKey = privKey.getKey().tryGet()
info "Using random network key", network_public_key = pubKey
return KeyPair(seckey: privKey, pubkey: privKey.getKey().tryGet())
else:
let keyPath =
if isAbsolute(conf.netKeyFile):
conf.netKeyFile
else:
conf.dataDir / conf.netKeyFile
KeyPair(seckey: privKey, pubkey: privKey.getKey().tryGet())
if fileAccessible(keyPath, {AccessFlags.Find}):
info "Network key storage is present, unlocking", key_path = keyPath
# Insecure password used only for automated testing.
let insecurePassword =
if conf.netKeyInsecurePassword:
some(NetworkInsecureKeyPassword)
else:
none[string]()
let res = loadNetKeystore(keyPath, insecurePassword)
if res.isNone():
fatal "Could not load network key file"
quit QuitFailure
let privKey = res.get()
let pubKey = privKey.getKey().tryGet()
info "Network key storage was successfully unlocked",
key_path = keyPath, network_public_key = pubKey
return KeyPair(seckey: privKey, pubkey: pubKey)
else:
info "Network key storage is missing, creating a new one",
key_path = keyPath
let rres = PrivateKey.random(Secp256k1, rng)
if rres.isErr():
fatal "Could not generate random network key file"
quit QuitFailure
let privKey = rres.get()
let pubKey = privKey.getKey().tryGet()
# Insecure password used only for automated testing.
let insecurePassword =
if conf.netKeyInsecurePassword:
some(NetworkInsecureKeyPassword)
else:
none[string]()
let sres = saveNetKeystore(rng, keyPath, privKey, insecurePassword)
if sres.isErr():
fatal "Could not create network key file", key_path = keyPath
quit QuitFailure
info "New network key storage was created", key_path = keyPath,
network_public_key = pubKey
return KeyPair(seckey: privKey, pubkey: pubKey)
of createTestnet:
if conf.netKeyFile == "random":
fatal "Could not create testnet using `random` network key"
quit QuitFailure
let keyPath =
if isAbsolute(conf.netKeyFile):
conf.netKeyFile
else:
conf.dataDir / conf.netKeyFile
let rres = PrivateKey.random(Secp256k1, rng)
if rres.isErr():
fatal "Could not generate random network key file"
quit QuitFailure
let privKey = rres.get()
let pubKey = privKey.getKey().tryGet()
# Insecure password used only for automated testing.
let insecurePassword =
if conf.netKeyInsecurePassword:
some(NetworkInsecureKeyPassword)
else:
none[string]()
let sres = saveNetKeystore(rng, keyPath, privKey, insecurePassword)
if sres.isErr():
fatal "Could not create network key file", key_path = keyPath
quit QuitFailure
info "New network key storage was created", key_path = keyPath,
network_public_key = pubKey
return KeyPair(seckey: privKey, pubkey: privkey.getKey().tryGet())
else:
let res = PrivateKey.random(Secp256k1, rng)
if res.isErr():
fatal "Could not generate random network key file"
quit QuitFailure
let privKey = res.get()
return KeyPair(seckey: privKey, pubkey: privkey.getKey().tryGet())
func gossipId(data: openArray[byte]): string =
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.3/specs/phase0/p2p-interface.md#topics-and-messages
string.fromBytes(sha256.digest(data).data.toOpenArray(0, 7))
base64.encode(Base64Url, sha256.digest(data).data)
func msgIdProvider(m: messages.Message): string =
gossipId(m.data)
proc createEth2Node*(
rng: ref BrHmacDrbgContext, conf: BeaconNodeConf,
enrForkId: ENRForkID): Eth2Node =
proc createEth2Node*(rng: ref BrHmacDrbgContext,
conf: BeaconNodeConf,
netKeys: KeyPair,
enrForkId: ENRForkID): Eth2Node =
var
(extIp, extTcpPort, extUdpPort) = setupNat(conf)
hostAddress = tcpEndPoint(conf.listenAddress, conf.tcpPort)
announcedAddresses = if extIp.isNone(): @[]
else: @[tcpEndPoint(extIp.get(), extTcpPort)]
notice "Initializing networking", hostAddress,
info "Initializing networking", hostAddress,
network_public_key = netKeys.pubkey,
announcedAddresses
let keys = getPersistentNetKeys(rng[], conf)
# TODO nim-libp2p still doesn't have support for announcing addresses
# that are different from the host address (this is relevant when we
# are running behind a NAT).
var switch = newStandardSwitch(some keys.seckey, hostAddress,
var switch = newStandardSwitch(some netKeys.seckey, hostAddress,
transportFlags = {ServerFlags.ReuseAddr},
secureManagers = [
SecureProtocol.Noise, # Only noise in ETH2!
@ -1257,16 +1356,10 @@ proc createEth2Node*(
result = Eth2Node.init(conf, enrForkId, switch, pubsub,
extIp, extTcpPort, extUdpPort,
keys.seckey.asEthKey, discovery = conf.discv5Enabled,
netKeys.seckey.asEthKey,
discovery = conf.discv5Enabled,
rng = rng)
proc getPersistenBootstrapAddr*(rng: var BrHmacDrbgContext, conf: BeaconNodeConf,
ip: ValidIpAddress, port: Port): EnrResult[enr.Record] =
let pair = getPersistentNetKeys(rng, conf)
return enr.Record.init(1'u64, # sequence number
pair.seckey.asEthKey,
some(ip), port, port, @[])
proc announcedENR*(node: Eth2Node): enr.Record =
doAssert node.discovery != nil, "The Eth2Node must be initialized"
node.discovery.localNode.record

View File

@ -117,10 +117,11 @@ proc on_tick(self: var Checkpoints, dag: ChainDAGRef, time: Slot): FcResult[void
proc process_attestation_queue(self: var ForkChoice) {.gcsafe.}
proc update_time(self: var ForkChoice, dag: ChainDAGRef, time: Slot): FcResult[void] =
while time > self.checkpoints.time:
? on_tick(self.checkpoints, dag, self.checkpoints.time + 1)
if time > self.checkpoints.time:
while time > self.checkpoints.time:
? on_tick(self.checkpoints, dag, self.checkpoints.time + 1)
self.process_attestation_queue()
self.process_attestation_queue() # Only run if time changed!
ok()
@ -150,18 +151,14 @@ func process_attestation*(
new_vote = shortLog(vote)
proc process_attestation_queue(self: var ForkChoice) =
var
keep: seq[QueuedAttestation]
for attestation in self.queuedAttestations:
if attestation.slot < self.checkpoints.time:
for validator_index in attestation.attesting_indices:
self.queuedAttestations.keepItIf:
if it.slot < self.checkpoints.time:
for validator_index in it.attesting_indices:
self.backend.process_attestation(
validator_index, attestation.block_root,
attestation.slot.epoch())
validator_index, it.block_root, it.slot.epoch())
false
else:
keep.add attestation
self.queuedAttestations = keep
true
func contains*(self: ForkChoiceBackend, block_root: Eth2Digest): bool =
## Returns `true` if a block is known to the fork choice

View File

@ -1,17 +1,21 @@
import
std/[os, strutils, terminal, wordwrap],
stew/byteutils, chronicles, chronos, web3, stint, json_serialization,
std/[os, strutils, terminal, wordwrap, unicode],
chronicles, chronos, web3, stint, json_serialization,
serialization, blscurve, eth/common/eth_types, eth/keys, confutils, bearssl,
spec/[datatypes, digest, crypto, keystore],
stew/[byteutils, io2], libp2p/crypto/crypto as lcrypto,
nimcrypto/utils as ncrutils,
conf, ssz/merkleization, network_metadata
export
keystore
{.push raises: [Defect].}
{.localPassC: "-fno-lto".} # no LTO for crypto
const
keystoreFileName* = "keystore.json"
netKeystoreFileName* = "network_keystore.json"
type
WalletPathPair* = object
@ -22,6 +26,155 @@ type
walletPath*: WalletPathPair
mnemonic*: Mnemonic
const
minPasswordLen = 12
mostCommonPasswords = wordListArray(
currentSourcePath.parentDir /
"../vendor/nimbus-security-resources/passwords/10-million-password-list-top-100000.txt",
minWordLen = minPasswordLen)
template echo80(msg: string) =
echo wrapWords(msg, 80)
proc checkAndCreateDataDir*(dataDir: string): bool =
## Checks `conf.dataDir`.
## If folder exists, procedure will check it for access and
## permissions `0750 (rwxr-x---)`, if folder do not exists it will be created
## with permissions `0750 (rwxr-x---)`.
let amask = {AccessFlags.Read, AccessFlags.Write, AccessFlags.Execute}
when defined(posix):
if fileAccessible(dataDir, amask):
let gmask = {UserRead, UserWrite, UserExec, GroupRead, GroupExec}
let pmask = {OtherRead, OtherWrite, OtherExec, GroupWrite}
let pres = getPermissionsSet(dataDir)
if pres.isErr():
fatal "Could not check data folder permissions",
data_dir = dataDir, errorCode = $pres.error,
errorMsg = ioErrorMsg(pres.error)
false
else:
let insecurePermissions = pres.get() * pmask
if insecurePermissions != {}:
fatal "Data folder has insecure permissions",
data_dir = dataDir,
insecure_permissions = $insecurePermissions,
current_permissions = pres.get().toString(),
required_permissions = gmask.toString()
false
else:
true
else:
let res = createPath(dataDir, 0o750)
if res.isErr():
fatal "Could not create data folder", data_dir = dataDir,
errorMsg = ioErrorMsg(res.error), errorCode = $res.error
false
else:
true
elif defined(windows):
if fileAccessible(dataDir, amask):
let res = createPath(dataDir, 0o750)
if res.isErr():
fatal "Could not create data folder", data_dir = dataDir,
errorMsg = ioErrorMsg(res.error), errorCode = $res.error
false
else:
true
else:
true
else:
fatal "Unsupported operation system"
return false
proc checkSensitiveFilePermissions*(filePath: string): bool =
## Check if ``filePath`` has only "(600) rw-------" permissions.
## Procedure returns ``false`` if permissions are different
when defined(windows):
# Windows do not support per-user/group/other permissions,
# skiping verification part.
true
else:
let allowedMask = {UserRead, UserWrite}
let mask = {UserExec,
GroupRead, GroupWrite, GroupExec,
OtherRead, OtherWrite, OtherExec}
let pres = getPermissionsSet(filePath)
if pres.isErr():
error "Could not check file permissions",
key_path = filePath, errorCode = $pres.error,
errorMsg = ioErrorMsg(pres.error)
false
else:
let insecurePermissions = pres.get() * mask
if insecurePermissions != {}:
error "File has insecure permissions",
key_path = filePath,
insecure_permissions = $insecurePermissions,
current_permissions = pres.get().toString(),
required_permissions = allowedMask.toString()
false
else:
true
proc keyboardCreatePassword(prompt: string, confirm: string): KsResult[string] =
while true:
let password =
try:
readPasswordFromStdin(prompt)
except IOError:
error "Could not read password from stdin"
return err("Could not read password from stdin")
# We treat `password` as UTF-8 encoded string.
if validateUtf8(password) == -1:
if runeLen(password) < minPasswordLen:
echo80 "The entered password should be at least " & $minPasswordLen &
" characters."
continue
elif password in mostCommonPasswords:
echo80 "The entered password is too commonly used and it would be " &
"easy to brute-force with automated tools."
continue
else:
echo80 "Entered password is not valid UTF-8 string"
continue
let confirmedPassword =
try:
readPasswordFromStdin(confirm)
except IOError:
error "Could not read password from stdin"
return err("Could not read password from stdin")
if password != confirmedPassword:
echo "Passwords don't match, please try again"
continue
return ok(password)
proc keyboardGetPassword[T](prompt: string, attempts: int,
pred: proc(p: string): KsResult[T] {.closure.}): KsResult[T] =
var
remainingAttempts = attempts
counter = 1
while remainingAttempts > 0:
let passphrase =
try:
readPasswordFromStdin(prompt)
except IOError as exc:
error "Could not read password from stdin"
return
os.sleep(1000 * counter)
let res = pred(passphrase)
if res.isOk():
return res
else:
inc(counter)
dec(remainingAttempts)
err("Failed to decrypt keystore")
proc loadKeystore(validatorsDir, secretsDir, keyName: string,
nonInteractive: bool): Option[ValidatorPrivKey] =
let
@ -37,12 +190,17 @@ proc loadKeystore(validatorsDir, secretsDir, keyName: string,
let passphrasePath = secretsDir / keyName
if fileExists(passphrasePath):
let
passphrase = KeystorePass:
try: readFile(passphrasePath)
except IOError as err:
error "Failed to read passphrase file", err = err.msg, path = passphrasePath
return
if not(checkSensitiveFilePermissions(passphrasePath)):
error "Password file has insecure permissions", key_path = keyStorePath
return
let passphrase = KeystorePass.init:
try:
readFile(passphrasePath)
except IOError as err:
error "Failed to read passphrase file", err = err.msg,
path = passphrasePath
return
let res = decryptKeystore(keystore, passphrase)
if res.isOk:
@ -56,21 +214,20 @@ proc loadKeystore(validatorsDir, secretsDir, keyName: string,
keyName, validatorsDir, secretsDir = secretsDir
return
var remainingAttempts = 3
var prompt = "Please enter passphrase for key \"" & validatorsDir/keyName & "\"\n"
while remainingAttempts > 0:
let passphrase = KeystorePass:
try: readPasswordFromStdin(prompt)
except IOError:
error "STDIN not readable. Cannot obtain Keystore password"
return
let prompt = "Please enter passphrase for key \"" &
(validatorsDir / keyName) & "\": "
let res = keyboardGetPassword[ValidatorPrivKey](prompt, 3,
proc (password: string): KsResult[ValidatorPrivKey] =
let decrypted = decryptKeystore(keystore, KeystorePass.init password)
if decrypted.isErr():
error "Keystore decryption failed. Please try again", keystorePath
decrypted
)
let decrypted = decryptKeystore(keystore, passphrase)
if decrypted.isOk:
return decrypted.get.some
else:
prompt = "Keystore decryption failed. Please try again"
dec remainingAttempts
if res.isOk():
some(res.get())
else:
return
iterator validatorKeysFromDirs*(validatorsDir, secretsDir: string): ValidatorPrivKey =
try:
@ -117,6 +274,79 @@ type
FailedToCreateSecretFile
FailedToCreateKeystoreFile
proc loadNetKeystore*(keyStorePath: string,
insecurePwd: Option[string]): Option[lcrypto.PrivateKey] =
if not(checkSensitiveFilePermissions(keystorePath)):
error "Network keystorage file has insecure permissions",
key_path = keyStorePath
return
let keyStore =
try:
Json.loadFile(keystorePath, NetKeystore)
except IOError as err:
error "Failed to read network keystore", err = err.msg,
path = keystorePath
return
except SerializationError as err:
error "Invalid network keystore", err = err.formatMsg(keystorePath)
return
if insecurePwd.isSome():
warn "Using insecure password to unlock networking key"
let decrypted = decryptNetKeystore(keystore, KeystorePass.init insecurePwd.get)
if decrypted.isOk:
return some(decrypted.get())
else:
error "Network keystore decryption failed", key_store = keyStorePath
return
else:
let prompt = "Please enter passphrase to unlock networking key: "
let res = keyboardGetPassword[lcrypto.PrivateKey](prompt, 3,
proc (password: string): KsResult[lcrypto.PrivateKey] =
let decrypted = decryptNetKeystore(keystore, KeystorePass.init password)
if decrypted.isErr():
error "Keystore decryption failed. Please try again", keystorePath
decrypted
)
if res.isOk():
some(res.get())
else:
return
proc saveNetKeystore*(rng: var BrHmacDrbgContext, keyStorePath: string,
netKey: lcrypto.PrivateKey, insecurePwd: Option[string]
): Result[void, KeystoreGenerationError] =
let password =
if insecurePwd.isSome():
warn "Using insecure password to lock networking key",
key_path = keyStorePath
insecurePwd.get()
else:
let prompt = "Please enter NEW password to lock network key storage: "
let confirm = "Please confirm, network key storage password: "
let res = keyboardCreatePassword(prompt, confirm)
if res.isErr():
return err(FailedToCreateKeystoreFile)
res.get()
let keyStore = createNetKeystore(kdfScrypt, rng, netKey,
KeystorePass.init password)
var encodedStorage: string
try:
encodedStorage = Json.encode(keyStore)
except SerializationError:
error "Could not serialize network key storage", key_path = keyStorePath
return err(FailedToCreateKeystoreFile)
let res = writeFile(keyStorePath, encodedStorage, 0o600)
if res.isOk():
ok()
else:
error "Could not write to network key storage file", key_path = keyStorePath
err(FailedToCreateKeystoreFile)
proc saveKeystore(rng: var BrHmacDrbgContext,
validatorsDir, secretsDir: string,
signingKey: ValidatorPrivKey, signingPubKey: ValidatorPubKey,
@ -126,7 +356,7 @@ proc saveKeystore(rng: var BrHmacDrbgContext,
validatorDir = validatorsDir / keyName
if not existsDir(validatorDir):
var password = KeystorePass getRandomBytes(rng, 32).toHex
var password = KeystorePass.init ncrutils.toHex(getRandomBytes(rng, 32))
defer: burnMem(password)
let
@ -134,18 +364,28 @@ proc saveKeystore(rng: var BrHmacDrbgContext,
password, signingKeyPath)
keystoreFile = validatorDir / keystoreFileName
try: createDir validatorDir
except OSError, IOError: return err FailedToCreateValidatorDir
var encodedStorage: string
try:
encodedStorage = Json.encode(keyStore)
except SerializationError:
error "Could not serialize keystorage", key_path = keystoreFile
return err(FailedToCreateKeystoreFile)
try: createDir secretsDir
except OSError, IOError: return err FailedToCreateSecretsDir
let vres = createPath(validatorDir, 0o750)
if vres.isErr():
return err(FailedToCreateValidatorDir)
try: writeFile(secretsDir / keyName, password.string)
except IOError: return err FailedToCreateSecretFile
let sres = createPath(secretsDir, 0o750)
if sres.isErr():
return err(FailedToCreateSecretsDir)
try: Json.saveFile(keystoreFile, keyStore)
except IOError, SerializationError:
return err FailedToCreateKeystoreFile
let swres = writeFile(secretsDir / keyName, password.str, 0o600)
if swres.isErr():
return err(FailedToCreateSecretFile)
let kwres = writeFile(keystoreFile, encodedStorage, 0o600)
if kwres.isErr():
return err(FailedToCreateKeystoreFile)
ok()
@ -161,7 +401,7 @@ proc generateDeposits*(preset: RuntimePreset,
let withdrawalKeyPath = makeKeyPath(0, withdrawalKeyKind)
# TODO: Explain why we are using an empty password
var withdrawalKey = keyFromPath(mnemonic, KeystorePass"", withdrawalKeyPath)
var withdrawalKey = keyFromPath(mnemonic, KeystorePass.init "", withdrawalKeyPath)
defer: burnMem(withdrawalKey)
let withdrawalPubKey = withdrawalKey.toPubKey
@ -179,28 +419,19 @@ proc generateDeposits*(preset: RuntimePreset,
ok deposits
const
minPasswordLen = 10
mostCommonPasswords = wordListArray(
currentSourcePath.parentDir /
"../vendor/nimbus-security-resources/passwords/10-million-password-list-top-100000.txt",
minWordLen = minPasswordLen)
proc saveWallet*(wallet: Wallet, outWalletPath: string): Result[void, string] =
try: createDir splitFile(outWalletPath).dir
except OSError, IOError:
let e = getCurrentException()
return err("failure to create wallet directory: " & e.msg)
try: Json.saveFile(outWalletPath, wallet, pretty = true)
except IOError as e:
return err("failure to write file: " & e.msg)
except SerializationError as e:
# TODO: Saving a wallet should not produce SerializationErrors.
# Investigate the source of this exception.
return err("failure to serialize wallet: " & e.formatMsg("wallet"))
let walletDir = splitFile(outWalletPath).dir
var encodedWallet: string
try:
encodedWallet = Json.encode(wallet, pretty = true)
except SerializationError:
return err("Could not serialize wallet")
let pres = createPath(walletDir, 0o750)
if pres.isErr():
return err("Could not create wallet directory [" & walletDir & "]")
let wres = writeFile(outWalletPath, encodedWallet, 0o600)
if wres.isErr():
return err("Could not write wallet to file [" & outWalletPath & "]")
ok()
proc saveWallet*(wallet: WalletPathPair): Result[void, string] =
@ -253,19 +484,20 @@ proc importKeystoresFromDir*(rng: var BrHmacDrbgContext,
if toLowerAscii(ext) != ".json":
continue
let keystore = try:
Json.loadFile(file, Keystore)
except SerializationError as e:
warn "Invalid keystore", err = e.formatMsg(file)
continue
except IOError as e:
warn "Failed to read keystore file", file, err = e.msg
continue
let keystore =
try:
Json.loadFile(file, Keystore)
except SerializationError as e:
warn "Invalid keystore", err = e.formatMsg(file)
continue
except IOError as e:
warn "Failed to read keystore file", file, err = e.msg
continue
var firstDecryptionAttempt = true
while true:
var secret = decryptCryptoField(keystore.crypto, KeystorePass password)
var secret = decryptCryptoField(keystore.crypto, KeystorePass.init password)
if secret.len == 0:
if firstDecryptionAttempt:
@ -302,9 +534,6 @@ proc importKeystoresFromDir*(rng: var BrHmacDrbgContext,
fatal "Failed to access the imported deposits directory"
quit 1
template echo80(msg: string) =
echo wrapWords(msg, 80)
template ask(prompt: string): string =
try:
stdout.write prompt, ": "
@ -322,87 +551,61 @@ proc pickPasswordAndSaveWallet(rng: var BrHmacDrbgContext,
"installation and can be changed at any time."
echo ""
while true:
var password, confirmedPassword: TaintedString
try:
var firstTry = true
var password =
block:
let prompt = "Please enter a password: "
let confirm = "Please repeat the password: "
let res = keyboardCreatePassword(prompt, confirm)
if res.isErr():
return err($res.error)
res.get()
defer: burnMem(password)
template prompt: string =
if firstTry:
"Please enter a password: "
else:
"Please enter a new password: "
var name: WalletName
let outWalletName = config.outWalletName
if outWalletName.isSome:
name = outWalletName.get
else:
echo ""
echo80 "For your convenience, the wallet can be identified with a name " &
"of your choice. Please enter a wallet name below or press ENTER " &
"to continue with a machine-generated name."
while true:
if not readPasswordInput(prompt, password):
return err "failure to read a password from stdin"
if password.len < minPasswordLen:
while true:
var enteredName = ask "Wallet name"
if enteredName.len > 0:
name =
try:
echo "The entered password should be at least $1 characters." %
[$minPasswordLen]
except ValueError:
raiseAssert "The format string above is correct"
elif password in mostCommonPasswords:
echo80 "The entered password is too commonly used and it would be easy " &
"to brute-force with automated tools."
else:
break
WalletName.parseCmdArg(enteredName)
except CatchableError as err:
echo err.msg & ". Please try again."
continue
break
firstTry = false
if not readPasswordInput("Please repeat the password:", confirmedPassword):
return err "failure to read a password from stdin"
if password != confirmedPassword:
echo "Passwords don't match, please try again"
continue
var name: WalletName
let outWalletName = config.outWalletName
if outWalletName.isSome:
name = outWalletName.get
else:
echo ""
echo80 "For your convenience, the wallet can be identified with a name " &
"of your choice. Please enter a wallet name below or press ENTER " &
"to continue with a machine-generated name."
while true:
var enteredName = ask "Wallet name"
if enteredName.len > 0:
name = try: WalletName.parseCmdArg(enteredName)
except CatchableError as err:
echo err.msg & ". Please try again."
continue
break
let nextAccount = if config.cmd == wallets and
config.walletsCmd == WalletsCmd.restore:
let nextAccount =
if config.cmd == wallets and config.walletsCmd == WalletsCmd.restore:
config.restoredDepositsCount
else:
none Natural
let wallet = createWallet(kdfPbkdf2, rng, mnemonic,
name = name,
nextAccount = nextAccount,
password = KeystorePass password)
let wallet = createWallet(kdfPbkdf2, rng, mnemonic,
name = name,
nextAccount = nextAccount,
password = KeystorePass.init password)
let outWalletFileFlag = config.outWalletFile
let outWalletFile = if outWalletFileFlag.isSome:
let outWalletFileFlag = config.outWalletFile
let outWalletFile =
if outWalletFileFlag.isSome:
string outWalletFileFlag.get
else:
config.walletsDir / addFileExt(string wallet.uuid, "json")
config.walletsDir / addFileExt(string wallet.name, "json")
let status = saveWallet(wallet, outWalletFile)
if status.isErr:
return err("failure to create wallet file due to " & status.error)
let status = saveWallet(wallet, outWalletFile)
if status.isErr:
return err("failure to create wallet file due to " & status.error)
notice "Wallet file written", path = outWalletFile
return ok WalletPathPair(wallet: wallet, path: outWalletFile)
finally:
burnMem(password)
burnMem(confirmedPassword)
echo "\nWallet file successfully written to \"", outWalletFile, "\""
return ok WalletPathPair(wallet: wallet, path: outWalletFile)
proc createWalletInteractively*(
rng: var BrHmacDrbgContext,
@ -461,7 +664,7 @@ proc restoreWalletInteractively*(rng: var BrHmacDrbgContext,
echo "To restore your wallet, please enter your backed-up seed phrase."
while true:
if not readPasswordInput("Seedphrase:", enteredMnemonic):
if not readPasswordInput("Seedphrase: ", enteredMnemonic):
fatal "failure to read password from stdin"
quit 1
@ -472,52 +675,58 @@ proc restoreWalletInteractively*(rng: var BrHmacDrbgContext,
discard pickPasswordAndSaveWallet(rng, config, validatedMnemonic)
proc unlockWalletInteractively*(wallet: Wallet): Result[Mnemonic, string] =
let prompt = "Please enter the password for unlocking the wallet: "
echo "Please enter the password for unlocking the wallet"
let res = keyboardGetPassword[Mnemonic](prompt, 3,
proc (password: string): KsResult[Mnemonic] =
var secret = decryptCryptoField(wallet.crypto, KeystorePass.init password)
if len(secret) > 0:
let mnemonic = Mnemonic(string.fromBytes(secret))
burnMem(secret)
ok(mnemonic)
else:
let failed = "Unlocking of the wallet failed. Please try again"
echo failed
err(failed)
)
if res.isOk():
ok(res.get())
else:
err "Unlocking of the wallet failed."
proc loadWallet*(fileName: string): Result[Wallet, string] =
try:
ok Json.loadFile(fileName, Wallet)
except CatchableError as e:
err e.msg
except SerializationError as err:
err "Invalid wallet syntax: " & err.formatMsg(fileName)
except IOError as err:
err "Error accessing wallet file \"" & fileName & "\": " & err.msg
proc unlockWalletInteractively*(wallet: Wallet): Result[Mnemonic, string] =
echo "Please enter the password for unlocking the wallet"
for i in 1..3:
var password: TaintedString
try:
if not readPasswordInput("Password: ", password):
return err "failure to read password from stdin"
var secret = decryptCryptoField(wallet.crypto, KeystorePass password)
if secret.len > 0:
defer: burnMem(secret)
return ok Mnemonic(string.fromBytes(secret))
else:
echo "Unlocking of the wallet failed. Please try again."
finally:
burnMem(password)
return err "failure to unlock wallet"
proc findWallet*(config: BeaconNodeConf, name: WalletName): Result[WalletPathPair, string] =
proc findWallet*(config: BeaconNodeConf,
name: WalletName): Result[Option[WalletPathPair], string] =
var walletFiles = newSeq[string]()
try:
for kind, walletFile in walkDir(config.walletsDir):
if kind != pcFile: continue
let walletId = splitFile(walletFile).name
if cmpIgnoreCase(walletId, name.string) == 0:
let wallet = ? loadWallet(walletFile)
return ok WalletPathPair(wallet: wallet, path: walletFile)
return ok some WalletPathPair(wallet: wallet, path: walletFile)
walletFiles.add walletFile
except OSError:
return err "failure to list wallet directory"
except OSError as err:
return err("Error accessing the wallets directory \"" &
config.walletsDir & "\": " & err.msg)
for walletFile in walletFiles:
let wallet = ? loadWallet(walletFile)
if cmpIgnoreCase(wallet.name.string, name.string) == 0:
return ok WalletPathPair(wallet: wallet, path: walletFile)
if cmpIgnoreCase(wallet.name.string, name.string) == 0 or
cmpIgnoreCase(wallet.uuid.string, name.string) == 0:
return ok some WalletPathPair(wallet: wallet, path: walletFile)
return err "failure to locate wallet file"
return ok none(WalletPathPair)
type
# This is not particularly well-standardized yet.

View File

@ -162,7 +162,7 @@ template eth2testnet(path: string): Eth2NetworkMetadata =
const
medallaMetadata* = eth2testnet "shared/medalla"
spadinaMetadata* = eth2testnet "shared/spadina"
zinkenMetadata* = eth2testnet "shared/zinken"
testnet0Metadata* = eth2testnet "nimbus/testnet0"
testnet1Metadata* = eth2testnet "nimbus/testnet1"
attacknetMc0Metadata* = eth2testnet "shared/attacknet-beta1-mc-0"
@ -176,8 +176,8 @@ proc getMetadataForNetwork*(networkName: string): Eth2NetworkMetadata =
mainnetMetadata
of "medalla":
medallaMetadata
of "spadina":
spadinaMetadata
of "zinken":
zinkenMetadata
of "attacknet-beta1-mc-0":
attacknetMc0Metadata
of "testnet0":

View File

@ -14,11 +14,12 @@ import
# Nimble packages
chronos, confutils/defs,
chronicles, chronicles/helpers as chroniclesHelpers,
stew/io2,
# Local modules
spec/[datatypes, crypto, helpers], eth2_network, time
proc setupLogging*(logLevel: string, logFile: Option[OutFile]) =
proc setupStdoutLogging*(logLevel: string) =
when compiles(defaultChroniclesStream.output.writer):
defaultChroniclesStream.outputs[0].writer =
proc (logLevel: LogLevel, msg: LogOutputStr) {.gcsafe, raises: [Defect].} =
@ -27,6 +28,7 @@ proc setupLogging*(logLevel: string, logFile: Option[OutFile]) =
except IOError as err:
logLoggingFailure(cstring(msg), err)
proc setupLogging*(logLevel: string, logFile: Option[OutFile]) =
randomize()
if logFile.isSome:
@ -35,10 +37,10 @@ proc setupLogging*(logLevel: string, logFile: Option[OutFile]) =
let
logFile = logFile.get.string
logFileDir = splitFile(logFile).dir
try:
createDir logFileDir
except CatchableError as err:
error "Failed to create directory for log file", path = logFileDir, err = err.msg
let lres = createPath(logFileDir, 0o750)
if lres.isErr():
error "Failed to create directory for log file",
path = logFileDir, err = ioErrorMsg(lres.error)
break openLogFile
if not defaultChroniclesStream.outputs[1].open(logFile):

View File

@ -89,7 +89,9 @@ proc fetchAncestorBlocksFromNetwork(rman: RequestManager,
res = Result[void, BlockError].ok()
if res.isOk():
peer.updateScore(PeerScoreGoodBlocks)
if len(ublocks) > 0:
# We reward peer only if it returns something.
peer.updateScore(PeerScoreGoodBlocks)
else:
# We are not penalizing other errors because of the reasons described
# above.

View File

@ -304,17 +304,19 @@ func is_valid_genesis_state*(preset: RuntimePreset,
return false
true
func emptyBeaconBlockBody*(): BeaconBlockBody =
# TODO: This shouldn't be necessary if OpaqueBlob is the default
BeaconBlockBody(randao_reveal: ValidatorSig(kind: OpaqueBlob))
# TODO this is now a non-spec helper function, and it's not really accurate
# so only usable/used in research/ and tests/
func get_initial_beacon_block*(state: BeaconState): SignedBeaconBlock =
let message = BeaconBlock(
slot: GENESIS_SLOT,
state_root: hash_tree_root(state),
body: BeaconBlockBody(
# TODO: This shouldn't be necessary if OpaqueBlob is the default
randao_reveal: ValidatorSig(kind: OpaqueBlob)))
# parent_root, randao_reveal, eth1_data, signature, and body automatically
# initialized to default values.
slot: state.slot,
state_root: hash_tree_root(state),
body: emptyBeaconBlockBody())
# parent_root, randao_reveal, eth1_data, signature, and body automatically
# initialized to default values.
SignedBeaconBlock(message: message, root: hash_tree_root(message))
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.3/specs/phase0/beacon-chain.md#get_block_root_at_slot

View File

@ -25,7 +25,7 @@
import
# Standard library
options, tables,
std/[options, tables],
# Internal
./digest,
# Status
@ -33,6 +33,7 @@ import
blscurve,
chronicles,
json_serialization,
nimcrypto/utils as ncrutils,
# Standard library
hashes
@ -349,16 +350,16 @@ func shortLog*(x: BlsValue): string =
# The prefix must be short
# due to the mechanics of the `shortLog` function.
if x.kind == Real:
x.blsValue.exportRaw().toOpenArray(0, 3).toHex()
byteutils.toHex(x.blsValue.exportRaw().toOpenArray(0, 3))
else:
"r:" & x.blob.toOpenArray(0, 3).toHex()
"r:" & byteutils.toHex(x.blob.toOpenArray(0, 3))
func shortLog*(x: ValidatorPrivKey): string =
## Logging for raw unwrapped BLS types
"<private key>"
func shortLog*(x: TrustedSig): string =
x.data.toOpenArray(0, 3).toHex()
byteutils.toHex(x.data.toOpenArray(0, 3))
# Initialization
# ----------------------------------------------------------------------
@ -387,4 +388,4 @@ func init*(T: typedesc[ValidatorSig], data: array[RawSigSize, byte]): T {.noInit
v[]
proc burnMem*(key: var ValidatorPrivKey) =
key = default(ValidatorPrivKey)
ncrutils.burnMem(addr key, sizeof(ValidatorPrivKey))

View File

@ -64,6 +64,11 @@ func get_active_validator_indices*(state: BeaconState, epoch: Epoch):
if is_active_validator(val, epoch):
result.add idx.ValidatorIndex
func get_active_validator_indices_len*(state: BeaconState, epoch: Epoch): uint64 =
for idx, val in state.validators:
if is_active_validator(val, epoch):
inc result
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.3/specs/phase0/beacon-chain.md#get_current_epoch
func get_current_epoch*(state: BeaconState): Epoch =
## Return the current epoch.

View File

@ -7,18 +7,25 @@
import
# Standard library
std/[math, strutils, parseutils, strformat, typetraits, algorithm],
std/[algorithm, math, parseutils, strformat, strutils, typetraits, unicode],
# Third-party libraries
normalize,
# Status libraries
stew/[results, byteutils, bitseqs, bitops2], stew/shims/macros,
bearssl, eth/keyfile/uuid, blscurve, faststreams/textio, json_serialization,
nimcrypto/[sha2, rijndael, pbkdf2, bcmode, hash, utils, scrypt],
# Internal
stew/[results, bitseqs, bitops2], stew/shims/macros,
bearssl, eth/keyfile/uuid, blscurve, json_serialization,
nimcrypto/[sha2, rijndael, pbkdf2, bcmode, hash, scrypt],
# Local modules
libp2p/crypto/crypto as lcrypto,
./datatypes, ./crypto, ./digest, ./signatures
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
export
results, burnMem, writeValue, readValue
{.push raises: [Defect].}
{.localPassC: "-fno-lto".} # no LTO for crypto
type
ChecksumFunctionKind* = enum
@ -105,6 +112,13 @@ type
uuid*: string
version*: int
NetKeystore* = object
crypto*: Crypto
description*: ref string
pubkey*: lcrypto.PublicKey
uuid*: string
version*: int
KsResult*[T] = Result[T, string]
Eth2KeyKind* = enum
@ -115,8 +129,9 @@ type
WalletName* = distinct string
Mnemonic* = distinct string
KeyPath* = distinct string
KeystorePass* = distinct string
KeySeed* = distinct seq[byte]
KeystorePass* = object
str*: string
Credentials* = object
mnemonic*: Mnemonic
@ -124,7 +139,7 @@ type
signingKey*: ValidatorPrivKey
withdrawalKey*: ValidatorPrivKey
SensitiveData = Mnemonic|KeystorePass|KeySeed
SensitiveStrings = Mnemonic|KeySeed
SimpleHexEncodedTypes = ScryptSalt|ChecksumBytes|CipherBytes
const
@ -169,10 +184,15 @@ template `==`*(lhs, rhs: WalletName): bool =
template `$`*(x: WalletName): string =
string(x)
template burnMem*(m: var (SensitiveData|TaintedString)) =
template burnMem*(m: var (SensitiveStrings|TaintedString)) =
# TODO: `burnMem` in nimcrypto could use distinctBase
# to make its usage less error-prone.
utils.burnMem(string m)
ncrutils.burnMem(string m)
template burnMem*(m: var KeystorePass) =
# TODO: `burnMem` in nimcrypto could use distinctBase
# to make its usage less error-prone.
ncrutils.burnMem(m.str)
func longName*(wallet: Wallet): string =
if wallet.name.string == wallet.uuid.string:
@ -190,7 +210,7 @@ macro wordListArray*(filename: static string,
minWordLen: static int = 0,
maxWordLen: static int = high(int)): untyped =
result = newTree(nnkBracket)
var words = slurp(filename).split()
var words = slurp(filename).splitLines()
for word in words:
if word.len >= minWordLen and word.len <= maxWordLen:
result.add newCall("cstring", newLit(word))
@ -201,9 +221,21 @@ const
englishWords = wordListArray("english_word_list.txt",
maxWords = wordListLen,
maxWordLen = maxWordLen)
englishWordsDigest =
"AD90BF3BEB7B0EB7E5ACD74727DC0DA96E0A280A258354E7293FB7E211AC03DB".toDigest
proc checkEnglishWords(): bool =
if len(englishWords) != wordListLen:
false
else:
var ctx: sha256
ctx.init()
for item in englishWords:
ctx.update($item)
ctx.finish() == englishWordsDigest
static:
doAssert englishWords.len == wordListLen
doAssert(checkEnglishWords(), "English words array is corrupted!")
func append*(path: KeyPath, pathNode: Natural): KeyPath =
KeyPath(path.string & "/" & $pathNode)
@ -213,7 +245,8 @@ func validateKeyPath*(path: TaintedString): Result[KeyPath, cstring] =
var number: BiggestUint
try:
for elem in path.string.split("/"):
# TODO: doesn't "m" have to be the first character and is it the only place where it is valid?
# TODO: doesn't "m" have to be the first character and is it the only
# place where it is valid?
if elem == "m":
continue
# parseBiggestUInt can raise if overflow
@ -248,9 +281,18 @@ func makeKeyPath*(validatorIdx: Natural,
except ValueError:
raiseAssert "All values above can be converted successfully to strings"
func isControlRune(r: Rune): bool =
let r = int r
(r >= 0 and r < 0x20) or (r >= 0x7F and r < 0xA0)
proc init*(T: type KeystorePass, input: string): T =
for rune in toNFKD(input):
if not isControlRune(rune):
result.str.add rune
func getSeed*(mnemonic: Mnemonic, password: KeystorePass): KeySeed =
# https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki#from-mnemonic-to-seed
let salt = "mnemonic-" & password.string
let salt = toNFKD("mnemonic" & password.str)
KeySeed sha512.pbkdf2(mnemonic.string, salt, 2048, 64)
template add(m: var Mnemonic, s: cstring) =
@ -262,8 +304,6 @@ proc generateMnemonic*(
entropyParam: openarray[byte] = @[]): Mnemonic =
## Generates a valid BIP-0039 mnenomic:
## https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki#generating-the-mnemonic
doAssert words.len == wordListLen
var entropy: seq[byte]
if entropyParam.len == 0:
setLen(entropy, 32)
@ -309,7 +349,7 @@ proc validateMnemonic*(inputWords: TaintedString,
## with sensitive data even in case of validator failure.
## Make sure to burn the received data after usage.
let words = inputWords.string.strip.split(Whitespace)
let words = strutils.strip(inputWords.string.toNFKD).split(Whitespace)
if words.len < 12 or words.len > 24 or words.len mod 3 != 0:
return false
@ -375,7 +415,7 @@ proc shaChecksum(key, cipher: openarray[byte]): Sha256Digest =
proc writeJsonHexString(s: OutputStream, data: openarray[byte])
{.raises: [IOError, Defect].} =
s.write '"'
s.writeHex data
s.write ncrutils.toHex(data, {HexFlags.LowerCase})
s.write '"'
proc readValue*(r: var JsonReader, value: var Pbkdf2Salt)
@ -384,11 +424,11 @@ proc readValue*(r: var JsonReader, value: var Pbkdf2Salt)
if s.len == 0 or s.len mod 16 != 0:
r.raiseUnexpectedValue(
"The Pbkdf2Salt salf must have a non-zero length divisible by 16")
"The Pbkdf2Salt salt must have a non-zero length divisible by 16")
try:
value = Pbkdf2Salt hexToSeqByte(s)
except ValueError:
value = Pbkdf2Salt ncrutils.fromHex(s)
let length = len(seq[byte](value))
if length == 0 or (length mod 8) != 0:
r.raiseUnexpectedValue(
"The Pbkdf2Salt must be a valid hex string")
@ -400,17 +440,15 @@ proc readValue*(r: var JsonReader, value: var Aes128CtrIv)
r.raiseUnexpectedValue(
"The aes-128-ctr IV must be a string of length 32")
try:
value = Aes128CtrIv hexToSeqByte(s)
except ValueError:
value = Aes128CtrIv ncrutils.fromHex(s)
if len(seq[byte](value)) != 16:
r.raiseUnexpectedValue(
"The aes-128-ctr IV must be a valid hex string")
proc readValue*[T: SimpleHexEncodedTypes](r: var JsonReader, value: var T)
{.raises: [SerializationError, IOError, Defect].} =
try:
value = T hexToSeqByte(r.readValue(string))
except ValueError:
value = T ncrutils.fromHex(r.readValue(string))
if len(seq[byte](value)) == 0:
r.raiseUnexpectedValue("Valid hex string expected")
proc readValue*(r: var JsonReader, value: var Kdf)
@ -467,7 +505,7 @@ proc decryptCryptoField*(crypto: Crypto, password: KeystorePass): seq[byte] =
let decKey = case crypto.kdf.function
of kdfPbkdf2:
template params: auto = crypto.kdf.pbkdf2Params
sha256.pbkdf2(password.string, params.salt.bytes, params.c, params.dklen)
sha256.pbkdf2(password.str, params.salt.bytes, params.c, params.dklen)
of kdfScrypt:
template params: auto = crypto.kdf.scryptParams
if params.dklen != scryptParams.dklen or
@ -476,7 +514,7 @@ proc decryptCryptoField*(crypto: Crypto, password: KeystorePass): seq[byte] =
params.p != scryptParams.p:
# TODO This should be reported in a better way
return
@(scrypt(password.string,
@(scrypt(password.str,
params.salt.bytes,
scryptParams.n,
scryptParams.r,
@ -513,10 +551,44 @@ proc decryptKeystore*(keystore: JsonString,
return err e.formatMsg("<keystore>")
decryptKeystore(keystore, password)
proc writeValue*(writer: var JsonWriter, value: lcrypto.PublicKey) {.
inline, raises: [IOError, Defect].} =
writer.writeValue(ncrutils.toHex(value.getBytes().get(),
{HexFlags.LowerCase}))
proc readValue*(reader: var JsonReader, value: var lcrypto.PublicKey) {.
raises: [SerializationError, IOError, Defect].} =
let res = init(lcrypto.PublicKey, reader.readValue(string))
if res.isOk():
value = res.get()
else:
# TODO: Can we provide better diagnostic?
raiseUnexpectedValue(reader, "Valid hex-encoded public key expected")
proc decryptNetKeystore*(nkeystore: NetKeystore,
password: KeystorePass): KsResult[lcrypto.PrivateKey] =
let decryptedBytes = decryptCryptoField(nkeystore.crypto, password)
if len(decryptedBytes) > 0:
let res = init(lcrypto.PrivateKey, decryptedBytes)
if res.isOk():
ok(res.get())
else:
err("Incorrect network private key")
else:
err("Empty network private key")
proc decryptNetKeystore*(nkeystore: JsonString,
password: KeystorePass): KsResult[lcrypto.PrivateKey] =
try:
let keystore = Json.decode(string(nkeystore), NetKeystore)
return decryptNetKeystore(keystore, password)
except SerializationError as exc:
return err(exc.formatMsg("<keystore>"))
proc createCryptoField(kdfKind: KdfKind,
rng: var BrHmacDrbgContext,
secret: openarray[byte],
password = KeystorePass "",
password = KeystorePass.init "",
salt: openarray[byte] = @[],
iv: openarray[byte] = @[]): Crypto =
type AES = aes128
@ -537,7 +609,7 @@ proc createCryptoField(kdfKind: KdfKind,
var decKey: seq[byte]
let kdf = case kdfKind
of kdfPbkdf2:
decKey = sha256.pbkdf2(password.string,
decKey = sha256.pbkdf2(password.str,
kdfSalt,
pbkdf2Params.c,
pbkdf2Params.dklen)
@ -545,7 +617,7 @@ proc createCryptoField(kdfKind: KdfKind,
params.salt = Pbkdf2Salt kdfSalt
Kdf(function: kdfPbkdf2, pbkdf2Params: params, message: "")
of kdfScrypt:
decKey = @(scrypt(password.string, kdfSalt,
decKey = @(scrypt(password.str, kdfSalt,
scryptParams.n, scryptParams.r, scryptParams.p, keyLen))
var params = scryptParams
params.salt = ScryptSalt kdfSalt
@ -571,10 +643,31 @@ proc createCryptoField(kdfKind: KdfKind,
params: Aes128CtrParams(iv: Aes128CtrIv aesIv),
message: CipherBytes cipherMsg))
proc createNetKeystore*(kdfKind: KdfKind,
rng: var BrHmacDrbgContext,
privKey: lcrypto.PrivateKey,
password = KeystorePass.init "",
description = "",
salt: openarray[byte] = @[],
iv: openarray[byte] = @[]): NetKeystore =
let
secret = privKey.getBytes().get()
cryptoField = createCryptoField(kdfKind, rng, secret, password, salt, iv)
pubKey = privKey.getKey().get()
uuid = uuidGenerate().expect("Random bytes should be available")
NetKeystore(
crypto: cryptoField,
pubkey: pubKey,
description: newClone(description),
uuid: $uuid,
version: 1
)
proc createKeystore*(kdfKind: KdfKind,
rng: var BrHmacDrbgContext,
privKey: ValidatorPrivkey,
password = KeystorePass "",
password = KeystorePass.init "",
path = KeyPath "",
description = "",
salt: openarray[byte] = @[],
@ -599,7 +692,7 @@ proc createWallet*(kdfKind: KdfKind,
name = WalletName "",
salt: openarray[byte] = @[],
iv: openarray[byte] = @[],
password = KeystorePass "",
password = KeystorePass.init "",
nextAccount = none(Natural),
pretty = true): Wallet =
let
@ -607,7 +700,7 @@ proc createWallet*(kdfKind: KdfKind,
# Please note that we are passing an empty password here because
# we want the wallet restoration procedure to depend only on the
# mnemonic (the user is asked to treat the mnemonic as a password).
seed = getSeed(mnemonic, KeystorePass"")
seed = getSeed(mnemonic, KeystorePass.init "")
crypto = createCryptoField(kdfKind, rng, distinctBase seed,
password, salt, iv)
Wallet(

View File

@ -0,0 +1,33 @@
import
datatypes, digest, helpers
{.push raises: [Defect].}
const
SAFETY_DECAY* = 10'u64
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.3/specs/phase0/weak-subjectivity.md#calculating-the-weak-subjectivity-period
func compute_weak_subjectivity_period*(state: BeaconState): uint64 =
var weak_subjectivity_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
let validator_count = get_active_validator_indices_len(state, get_current_epoch(state))
if validator_count >= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT:
weak_subjectivity_period += SAFETY_DECAY * CHURN_LIMIT_QUOTIENT div (2 * 100)
else:
weak_subjectivity_period += SAFETY_DECAY * validator_count div (2 * 100 * MIN_PER_EPOCH_CHURN_LIMIT)
return weak_subjectivity_period
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.3/specs/phase0/weak-subjectivity.md#checking-for-stale-weak-subjectivity-checkpoint
func is_within_weak_subjectivity_period*(current_slot: Slot,
ws_state: BeaconState,
ws_checkpoint: Checkpoint): bool =
# Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint
doAssert ws_state.latest_block_header.state_root == ws_checkpoint.root
doAssert compute_epoch_at_slot(ws_state.slot) == ws_checkpoint.epoch
let
ws_period = compute_weak_subjectivity_period(ws_state)
ws_state_epoch = compute_epoch_at_slot(ws_state.slot)
current_epoch = compute_epoch_at_slot(current_slot)
current_epoch <= ws_state_epoch + ws_period

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018 Status Research & Development GmbH
# Copyright (c) 2018-2020 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -8,7 +8,7 @@
{.push raises: [Defect].}
import
stew/[bitops2, ptrops]
stew/[bitops2, endians2, ptrops]
type
Bytes = seq[byte]
@ -52,6 +52,17 @@ func add*(s: var BitSeq, value: bool) =
s.Bytes[lastBytePos].changeBit 7, value
s.Bytes.add byte(1)
func toBytesLE(x: uint): array[sizeof(x), byte] =
# stew/endians2 supports explicitly sized uints only
when sizeof(uint) == 4:
static: doAssert sizeof(uint) == sizeof(uint32)
toBytesLE(x.uint32)
elif sizeof(uint) == 8:
static: doAssert sizeof(uint) == sizeof(uint64)
toBytesLE(x.uint64)
else:
static: doAssert false, "requires a 32-bit or 64-bit platform"
func loadLEBytes(WordType: type, bytes: openarray[byte]): WordType =
# TODO: this is a temporary proc until the endians API is improved
var shift = 0
@ -61,13 +72,8 @@ func loadLEBytes(WordType: type, bytes: openarray[byte]): WordType =
func storeLEBytes(value: SomeUnsignedInt, dst: var openarray[byte]) =
doAssert dst.len <= sizeof(value)
when system.cpuEndian == bigEndian:
var shift = 0
for i in 0 ..< dst.len:
result[i] = byte((v shr shift) and 0xff)
shift += 8
else:
copyMem(addr dst[0], unsafeAddr value, dst.len)
let bytesLE = toBytesLE(value)
copyMem(addr dst[0], unsafeAddr bytesLE[0], dst.len)
template loopOverWords(lhs, rhs: BitSeq,
lhsIsVar, rhsIsVar: static bool,

View File

@ -27,7 +27,7 @@ type
logScope: topics = "valapi"
proc toBlockSlot(blckRef: BlockRef): BlockSlot =
proc toBlockSlot(blckRef: BlockRef): BlockSlot =
blckRef.atSlot(blckRef.slot)
proc parseRoot(str: string): Eth2Digest =
@ -39,6 +39,12 @@ proc parsePubkey(str: string): ValidatorPubKey =
raise newException(CatchableError, "Not a valid public key")
return pubkeyRes[]
func checkEpochToSlotOverflow(epoch: Epoch) =
const maxEpoch = compute_epoch_at_slot(not 0'u64)
if epoch >= maxEpoch:
raise newException(
ValueError, "Requesting epoch for which slot would overflow")
proc doChecksAndGetCurrentHead(node: BeaconNode, slot: Slot): BlockRef =
result = node.chainDag.head
if not node.isSynced(result):
@ -48,6 +54,7 @@ proc doChecksAndGetCurrentHead(node: BeaconNode, slot: Slot): BlockRef =
raise newException(CatchableError, "Requesting way ahead of the current head")
proc doChecksAndGetCurrentHead(node: BeaconNode, epoch: Epoch): BlockRef =
checkEpochToSlotOverflow(epoch)
node.doChecksAndGetCurrentHead(epoch.compute_start_slot_at_epoch)
# TODO currently this function throws if the validator isn't found - is this OK?
@ -135,7 +142,7 @@ proc getBlockDataFromBlockId(node: BeaconNode, blockId: string): BlockData =
of "head":
node.chainDag.get(node.chainDag.head)
of "genesis":
node.chainDag.get(node.chainDag.tail)
node.chainDag.getGenesisBlockData()
of "finalized":
node.chainDag.get(node.chainDag.finalizedHead.blck)
else:
@ -156,7 +163,7 @@ proc stateIdToBlockSlot(node: BeaconNode, stateId: string): BlockSlot =
of "head":
node.chainDag.head.toBlockSlot()
of "genesis":
node.chainDag.tail.toBlockSlot()
node.chainDag.getGenesisBlockSlot()
of "finalized":
node.chainDag.finalizedHead
of "justified":
@ -181,7 +188,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
template withStateForStateId(stateId: string, body: untyped): untyped =
# TODO this can be optimized for the "head" case since that should be most common
node.chainDag.withState(node.chainDag.tmpState,
node.stateIdToBlockSlot(stateId)):
node.stateIdToBlockSlot(stateId)):
body
rpcServer.rpc("get_v1_beacon_genesis") do () -> BeaconGenesisTuple:
@ -229,6 +236,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
rpcServer.rpc("get_v1_beacon_states_stateId_committees_epoch") do (
stateId: string, epoch: uint64, index: uint64, slot: uint64) ->
seq[BeaconStatesCommitteesTuple]:
checkEpochToSlotOverflow(epoch.Epoch)
withStateForStateId(stateId):
proc getCommittee(slot: Slot, index: CommitteeIndex): BeaconStatesCommitteesTuple =
let vals = get_beacon_committee(state, slot, index, cache).mapIt(it.uint64)

View File

@ -299,12 +299,11 @@ programMain:
vc.beaconGenesis = waitFor vc.client.get_v1_beacon_genesis()
vc.beaconClock = BeaconClock.init(vc.beaconGenesis.genesis_time)
when UseSlashingProtection:
vc.attachedValidators.slashingProtection =
SlashingProtectionDB.init(
vc.beaconGenesis.genesis_validators_root,
kvStore SqStoreRef.init(config.validatorsDir(), "slashing_protection").tryGet()
)
vc.attachedValidators.slashingProtection =
SlashingProtectionDB.init(
vc.beaconGenesis.genesis_validators_root,
kvStore SqStoreRef.init(config.validatorsDir(), "slashing_protection").tryGet()
)
let
curSlot = vc.beaconClock.now().slotOrZero()

View File

@ -35,6 +35,8 @@ declareCounter beacon_blocks_proposed,
logScope: topics = "beacval"
# TODO: This procedure follows insecure scheme of creating directory without
# any permissions and writing file without any permissions.
proc saveValidatorKey*(keyName, key: string, conf: BeaconNodeConf) =
let validatorsDir = conf.validatorsDir
let outputFile = validatorsDir / keyName
@ -397,6 +399,18 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
a.data.target.epoch)
if notSlashable.isOk():
# TODO signing_root is recomputed in produceAndSignAttestation/signAttestation just after
let signing_root = compute_attestation_root(
fork, genesis_validators_root, a.data)
node.attachedValidators
.slashingProtection
.registerAttestation(
a.validator.pubkey,
a.data.source.epoch,
a.data.target.epoch,
signing_root
)
traceAsyncErrors createAndSendAttestation(
node, fork, genesis_validators_root, a.validator, a.data,
a.committeeLen, a.indexInCommittee, num_active_validators)

View File

@ -216,13 +216,6 @@ type
logScope:
topics = "antislash"
const UseSlashingProtection* {.booldefine.} = true
when UseSlashingProtection:
static: echo " Built with slashing protection"
else:
static: echo " Built without slashing protection"
func subkey(
kind: static SlashingKeyKind,
validator: ValID,
@ -346,18 +339,16 @@ proc init*(
T: type SlashingProtectionDB,
genesis_validator_root: Eth2Digest,
backend: KVStoreRef): SlashingProtectionDB =
when UseSlashingProtection:
result = T(backend: backend)
result.setGenesis(genesis_validator_root)
result = T(backend: backend)
result.setGenesis(genesis_validator_root)
proc close*(db: SlashingProtectionDB) =
when UseSlashingProtection:
discard db.backend.close()
discard db.backend.close()
# DB Queries
# --------------------------------------------
proc checkSlashableBlockProposalImpl(
proc checkSlashableBlockProposal*(
db: SlashingProtectionDB,
validator: ValidatorPubKey,
slot: Slot
@ -378,26 +369,7 @@ proc checkSlashableBlockProposalImpl(
return ok()
return err(foundBlock.unsafeGet().block_root)
proc checkSlashableBlockProposal*(
db: SlashingProtectionDB,
validator: ValidatorPubKey,
slot: Slot
): Result[void, Eth2Digest] =
## Returns an error if the specified validator
## already proposed a block for the specified slot.
## This would lead to slashing.
## The error contains the blockroot that was already proposed
##
## Returns success otherwise
# TODO distinct type for the result block root
when UseSlashingProtection:
checkSlashableBlockProposalImpl(
db, validator, slot
)
else:
ok()
proc checkSlashableAttestationImpl(
proc checkSlashableAttestation*(
db: SlashingProtectionDB,
validator: ValidatorPubKey,
source: Epoch,
@ -532,26 +504,6 @@ proc checkSlashableAttestationImpl(
doAssert false, "Unreachable"
proc checkSlashableAttestation*(
db: SlashingProtectionDB,
validator: ValidatorPubKey,
source: Epoch,
target: Epoch
): Result[void, BadVote] =
## Returns an error if the specified validator
## already proposed a block for the specified slot.
## This would lead to slashing.
## The error contains the blockroot that was already proposed
##
## Returns success otherwise
# TODO distinct type for the result attestation root
when UseSlashingProtection:
checkSlashableAttestationImpl(
db, validator, source, target
)
else:
ok()
# DB update
# --------------------------------------------
@ -571,7 +523,7 @@ proc registerValidator(db: SlashingProtectionDB, validator: ValidatorPubKey) =
db.put(subkey(kValidator, valIndex), validator)
proc registerBlockImpl(
proc registerBlock*(
db: SlashingProtectionDB,
validator: ValidatorPubKey,
slot: Slot, block_root: Eth2Digest) =
@ -707,21 +659,7 @@ proc registerBlockImpl(
# ).expect("Consistent linked-list in DB")
).unsafeGet()
proc registerBlock*(
db: SlashingProtectionDB,
validator: ValidatorPubKey,
slot: Slot, block_root: Eth2Digest) =
## Add a block to the slashing protection DB
## `checkSlashableBlockProposal` MUST be run
## before to ensure no overwrite.
when UseSlashingProtection:
registerBlockImpl(
db, validator, slot, block_root
)
else:
discard
proc registerAttestationImpl(
proc registerAttestation*(
db: SlashingProtectionDB,
validator: ValidatorPubKey,
source, target: Epoch,
@ -870,21 +808,6 @@ proc registerAttestationImpl(
# ).expect("Consistent linked-list in DB")
).unsafeGet()
proc registerAttestation*(
db: SlashingProtectionDB,
validator: ValidatorPubKey,
source, target: Epoch,
attestation_root: Eth2Digest) =
## Add an attestation to the slashing protection DB
## `checkSlashableAttestation` MUST be run
## before to ensure no overwrite.
when UseSlashingProtection:
registerAttestationImpl(
db, validator, source, target, attestation_root
)
else:
discard
# Debug tools
# --------------------------------------------

View File

@ -101,3 +101,40 @@ switch("warning", "LockLevel:off")
# Useful for Chronos metrics.
#--define:chronosFutureTracking
# ############################################################
#
# No LTO for crypto
#
# ############################################################
# This applies per-file compiler flags to C files
# which do not support {.localPassC: "-fno-lto".}
# Unfortunately this is filename based instead of path-based
# Assumes GCC
# BLST
put("server.always", "-fno-lto")
put("assembly.always", "-fno-lto")
# Secp256k1
put("secp256k1.always", "-fno-lto")
# BearSSL - only RNGs
put("aesctr_drbg.always", "-fno-lto")
put("hmac_drbg.always", "-fno-lto")
put("sysrng.always", "-fno-lto")
# Miracl - only ECP to derive public key from private key
put("ecp_BLS12381.always", "-fno-lto")
# ############################################################
#
# Spurious warnings
#
# ############################################################
# sqlite3.c: In function sqlite3SelectNew:
# vendor/nim-sqlite3-abi/sqlite3.c:124500: warning: function may return address of local variable [-Wreturn-local-addr]
put("sqlite3.always", "-fno-lto") # -Wno-return-local-addr

View File

@ -2,7 +2,9 @@
## Conversions
Casting to or from a signed integer will lead to a range check
Casting to a signed integer will lead to a range check.
Conversion to an unsigned integer even from a negative signed integer will NOT lead to a range check (https://github.com/nim-lang/RFCs/issues/175)
https://nim-lang.org/docs/manual.html#statements-and-expressions-type-conversions
## Casting integers

View File

@ -1,5 +1,5 @@
[book]
authors = ["Lee Ting Ting", "Jacek Sieka"]
authors = ["Lee Ting Ting", "Jacek Sieka", "Sacha Saint-Leger"]
language = "en"
multilingual = false
src = "src"

View File

@ -1,7 +1,7 @@
# Summary
- [Introduction](./intro.md)
- [Installation](./install.md)
- [Become a Spadina validator](./spadina.md)
- [Become a Zinken validator](./zinken.md)
- [Become a Medalla validator](./medalla.md)
- [Troubleshooting Medalla](./medalla-troubleshooting.md)
- [Running the beacon node](./beacon_node.md)
@ -12,6 +12,7 @@
- [Setting up a systemd service](./beacon_node_systemd.md)
- [Generating your keys with NBC](./create_wallet_and_deposit.md)
# Misc
- [Infura guide](infura-guide.md)
- [Windows users]()
- [FAQ](./faq.md)
- [Contribute](./contribute.md)

View File

@ -0,0 +1,138 @@
# Supplying your own Infura endpoint
In a nutshell, Infura is a hosted ethereum node cluster that lets you make requests to the eth1 blockchain without requiring you to set up your own eth1 node.
While we do support Infura to process incoming validator deposits, we recommend running your own eth1 node to avoid relying on a third-party-service.
> **Note:** Nimbus currently supports remote Infura nodes and [local Geth archive nodes](https://gist.github.com/onqtam/aaf883d46f4dab1311ca9c160df12fe4). However we are working on relaxing that assumption (an archive node certainly won't be required for mainnet). In the future, we plan on having our own eth1 client -- [Nimbus 1](https://github.com/status-im/nimbus) -- be the recommended default.
## How it works
When you join an eth2 testnet by running `make zinken` or `make medalla`, the beacon node actually launches with an Infura endpoint supplied by us.
This endpoint is passed through the `web3-url` option (which takes as input the url of the web3 server from which you'd like to observe the eth1 chain).
If you look at the initial logs you should see something similar to the following:
```
DBG 2020-09-29 12:15:41.969+02:00 Launching beacon node
topics="beacnde" tid=8941404 file=beacon_node.nim:1190 version="0.5.0 (78ceeed8)" bls_backend=BLST
cmdParams="@[
\"--network=zinken\",
\"--log-level=DEBUG\",
\"--log-file=build/data/shared_zinken_0/nbc_bn_20200929121541.log\",
\"--data-dir=build/data/shared_zinken_0\",
\"--web3-url=wss://goerli.infura.io/ws/v3/809a18497dd74102b5f37d25aae3c85a\",
\"--tcp-port=9000\",
\"--udp-port=9000\",
\"--metrics\",
\"--metrics-port=8008\",
\"--rpc\",
\"--rpc-port=9190\"
]"
...
```
This allows us to deduce that the default endpoint is given by:
```
--web3-url=wss://goerli.infura.io/ws/v3/809a18497dd74102b5f37d25aae3c85a\"
```
## Potential problems
Because Infura caps the requests per endpoint per day to 100k, and all Nimbus nodes use the same Infura endpoint by default, it can happen that our Infura endpoint is overloaded (i.e the requests on a given day reach the 100k limit). If this happens, all requests to Infura using the default endpoint will fail, which means your node will stop processing new deposits.
To know if our endpoint has reached its limit for the day, keep your eye out for error messages that look like the following:
```
ERR 2020-09-29 14:04:33.313+02:00 Mainchain monitor failure, restarting tid=8941404
file=mainchain_monitor.nim:812 err="{\"code\":-32005,
\"data\":{\"rate\":{\"allowed_rps\":1,
\"backoff_seconds\":24,
\"current_rps\":22.5},
\"see\":\"https://infura.io/dashboard\"},
\"message\":\"daily request count exceeded, request rate limited\"}"
```
To get around this problem, we recommend launching the beacon node with your own endpoint.
## Supplying your own endpoint
> **Note:** In a previous version of the software it wasn't possible to manually override the web3 endpoint when running `make zinken` or `make medalla`. For the instructions below to work, make sure you've updated to the latest version of the software (run `git pull && make update` from the `master` branch of the `nim-beacon-chain` repository).
### 1. Visit Infura.io
Go to:
[https://infura.io/](https://infura.io/)
and click on `Get Started For Free`
![](https://i.imgur.com/BtStgup.png)
### 2. Sign up
Enter your email address and create a password
![](https://i.imgur.com/al1OsdR.png)
### 3. Verify email address
You should have received an email from Infura in your inbox. Open up it up and click on `Confirm Email Address`
![](https://i.imgur.com/EAD8ZhV.png)
### 4. Go to dashboard
This will take you to your Infura dashboard (https://infura.io/dashboard/)
![](https://i.imgur.com/LuNcoYr.png)
### 5. Create your first project
Click on the first option (`create your first project`) under `Let's Get Started`
![](https://i.imgur.com/wBAGhcs.png)
Choose a name for your project
![](https://i.imgur.com/yr5vnSo.png)
You'll be directed to the settings page of your newly created project
![](https://i.imgur.com/kx3R8XS.png)
### 6. View Görli endpoints
In the `KEYS` section, click on the dropdown menu to the right of `ENDPOINTS`, and select `GÖRLI`
![](https://i.imgur.com/D9186kv.png)
### 7. Copy the websocket endpoint
Copy the address that starts with `wss://`
![](https://i.imgur.com/fZ6Bcjy.png)
> ⚠️ **Warning:** make sure you've copied the endpoint that starts with`wss` (websocket), and not the `https` endpoint.
### 8. Run the beacon node
Run the beacon node on your favourite testnet, pasting in your websocket endpoint as the input to the `web3-url` option.
```
make NODE_PARAMS="--web3-url=wss://goerli.infura.io/ws/v3/83b9d67f81ca401b8f9651441b43f29e"
<TESTNET_NAME>
```
> Remember to replace <TESTNET_NAME> with either `medalla` or `zinken`.
### 9. Check stats
Visit your project's stats page to see a summary of your eth1 related activity and method calls
![](https://i.imgur.com/MZVTHHV.png)
That's all there is to it :)

View File

@ -22,7 +22,7 @@ In this book, we will cover:
1. An [introduction](./intro.md#introduction) to the beacon chain, eth2, and Nimbus to equip you with some basic knowledge
2. [Installation steps](./install.md) outlining the prerequisites to get started
3. How to [become a Spadina validator](./spadina.md)
3. How to [become a Zinken validator](./zinken.md)
4. How to [become a Medalla validator](./medalla.md)
5. [Troubleshooting Medalla](./medalla-troubleshooting.md)
6. How to [run the beacon node](./beacon_node.md) software to sync the beacon chain
@ -31,9 +31,10 @@ In this book, we will cover:
9. [Advanced usage](./advanced.md) for developers
10. How to [setup up a systemd service](./beacon_node_systemd.md)
11. How to [use Nimbus to generate your validator keys](./create_wallet_and_deposit.md)
12. Tips and tricks for windows users (WIP)
13. Common [questions and answers](./faq.md) to satisfy your curiosity
14. How to [contribute](./contribute.md) to this book
12. How to [supply your own Infura endpoint](./infura-guide)
13. Tips and tricks for windows users (WIP)
14. Common [questions and answers](./faq.md) to satisfy your curiosity
15. How to [contribute](./contribute.md) to this book
## Introduction

View File

@ -17,6 +17,7 @@ If you find that `make update` causes the console to hang for too long, try runn
>**Note:** rest assured that when you restart the beacon node, the software will resume from where it left off, using the validator keys you have already imported.
### Starting over
The directory that stores the blockchain data of the testnet is `build/data/shared_medalla_0` (if you're connecting to another testnet, replace `medalla` with that testnet's name). Delete this folder to start over (for example, if you started building medalla with the wrong private keys).
@ -78,3 +79,39 @@ make BASE_PORT=9100 medalla
(You can replace `9100` with a port of your choosing)
### Mainchain monitor failure
If you're seeing one or more error messages that look like the following:
```
ERR 2020-09-29 14:04:33.313+02:00 Mainchain monitor failure, restarting tid=8941404
file=mainchain_monitor.nim:812 err="{\"code\":-32005,
\"data\":{\"rate\":{\"allowed_rps\":1,
\"backoff_seconds\":24,
\"current_rps\":22.5},
\"see\":\"https://infura.io/dashboard\"},
\"message\":\"daily request count exceeded, request rate limited\"}"
```
This means that our Infura endpoint is overloaded (in other words, the requests on a given day have reached the 100k free tier limit).
You can fix this by passing in your own Infura endpoint.
To do so, run:
```
make NODE_PARAMS="--web3-url=<YOUR_WEBSOCKET_ENDPOINT>" medalla
```
Importantly, make sure you pass in a websocket (`wss`) endpoint, not `https`. If you're not familiar with Infura, we recommend reading through our [Infura guide](./infura-guide) first.
### Running multiple nodes on the same computer
If you're running different testnets on the same computer, you'll need to specify a different `NODE_ID` to avoid port conflicts (the default is `NODE_ID=0`).
For example, to run `medalla` and `zinken` at the same time:
```
make medalla NODE_ID=0 # the default
make zinken NODE_ID=1
```

View File

@ -8,7 +8,6 @@ If you generated your signing key using the [eth2 launchpad](https://medalla.lau
> If you're an advanced user running Ubuntu, we recommend you check out this [excellent and complementary guide](https://medium.com/@SomerEsat/guide-to-staking-on-ethereum-2-0-ubuntu-medalla-nimbus-5f4b2b0f2d7c).
## Prerequisites
> ⚠️ If this is your first time playing with Nimbus, please make sure you [install our external dependencies](./install.md) first.
@ -63,9 +62,20 @@ INF 2020-08-03 16:24:17.951+02:00 Local validators attached top
INF 2020-08-03 16:24:17.958+02:00 Starting beacon node topics="beacnde" tid=11677993 file=beacon_node.nim:875 version="0.5.0 (31b33907)" nim="Nim Compiler Version 1.2.6 [MacOSX: amd64] (bf320ed1)" timeSinceFinalization=81350 head=ebe49843:0 finalizedHead=ebe49843:0 SLOTS_PER_EPOCH=32 SECONDS_PER_SLOT=12 SPEC_VERSION=0.12.2 dataDir=build/data/shared_medalla_0 pcs=start_beacon_node
```
> **Note:** when you run `make medalla`, the beacon node launches with an Infura endpoint supplied by us. This endpoint is passed through the `web3-url` option (which takes as input the url of the web3 server from which you'd like to observe the eth1 chain).
>
> Because Infura caps the requests per endpoint per day to 100k, and all Nimbus nodes use the same Infura endpoint by default, it can happen that our Infura endpoint is overloaded (i.e the requests on a given day reach the 100k limit). If this happens, all requests to Infura using the default endpoint will fail, which means your node will stop processing new deposits.
>
> To pass in your own Infura endpoint, you'll need to run:
>```
> make NODE_PARAMS="--web3-url=<YOUR_WEBSOCKET_ENDPOINT>" medalla
>```
> Importantly, the endpoint must be a websocket (`wss`) endpoint, not `https`. If you're not familiar with Infura, we recommend reading through our [Infura guide](./infura-guide), first.
>
> P.S. We are well aware that Infura is less than ideal from a decentralisation perspective. As such we are in the process of changing our default to [Geth](https://geth.ethereum.org/docs/install-and-build/installing-geth) (with Infura as a fallback). For some rough notes on how to use Geth with Nimbus, see [here](https://gist.github.com/onqtam/aaf883d46f4dab1311ca9c160df12fe4) (we will be adding more complete instructions very soon).
> Tip: to 🎨 on the [graffitwall](https://medalla.beaconcha.in/graffitiwall), pass the graffiti parameter like this:
> **Tip:** to 🎨 on the [graffitwall](https://medalla.beaconcha.in/graffitiwall), pass the graffiti parameter like this:
>```
>make NODE_PARAMS="--graffiti='<YOUR_GRAFFITI>'" medalla

View File

@ -1,9 +1,9 @@
# Become a Spadina validator
# Become a Zinken validator
This page will take you through how to import your key(s) and get your validator(s) ready for [Spadina genesis](https://blog.ethereum.org/2020/09/22/eth2-quick-update-no-17/).
This page will take you through how to import your key(s) and get your validator(s) ready for [Zinken genesis](https://blog.ethereum.org/2020/09/22/eth2-quick-update-no-17/).
For those of you who are unfamiliar, [Spadina](https://github.com/goerli/medalla/blob/master/spadina/README.md) is a short-lived eth2 testnet that will begin on tuesday and last for three days or so. Its main objective is to allow us to test the deposit/[genesis](https://hackmd.io/@benjaminion/genesis) flow one more time before mainnet launch.
For those of you who are unfamiliar, [Zinken](https://github.com/goerli/medalla/blob/master/zinken/README.md) is a short-lived eth2 testnet that will begin on tuesday and last for three days or so. Its main objective is to allow us to test the deposit/[genesis](https://hackmd.io/@benjaminion/genesis) flow one more time before mainnet launch.
Although it will mainly be client teams taking part, it's also a chance for you to practice sending a deposit and launching a node under mainnet launch conditions (in order to avoid clogging up the validator queue, we recommend practicing with one, or at most a handful of validators).
@ -17,7 +17,7 @@ This tutorial assumes basic knowledge of the [command line](https://www.learneno
The easiest way to get your deposit in is to follow the Launchpad instructions here:
[https://spadina.launchpad.ethereum.org/](https://spadina.launchpad.ethereum.org/)
[https://zinken.launchpad.ethereum.org/](https://zinken.launchpad.ethereum.org/)
You should notice that there have been considerable improvements to the launchpad process since Medalla.
@ -37,7 +37,7 @@ This tutorial assumes basic knowledge of the [command line](https://www.learneno
## 2. Import your key(s)
To import your `spadina` key(s) into Nimbus:
To import your `zinken` key(s) into Nimbus:
> **Note:** You can skip steps 1 and 2 if you've already cloned `nim-beacon-chain` and built the beacon node for `medalla`: just make sure you run `git pull && make update` from the `master` branch before continuing with step 3.
@ -59,10 +59,10 @@ This tutorial assumes basic knowledge of the [command line](https://www.learneno
#### 3. Import keystore(s)
```
build/beacon_node deposits import --data-dir=build/data/shared_spadina_0 <YOUR VALIDATOR KEYS DIRECTORY>
build/beacon_node deposits import --data-dir=build/data/shared_zinken_0 <YOUR VALIDATOR KEYS DIRECTORY>
```
Replacing `<YOUR VALIDATOR KEYS DIRECTORY>` with the full pathname of the `validator_keys` directory that was created when you generated your keys using the [Spadina Launchpad](https://spadina.launchpad.ethereum.org/) [command line app](https://github.com/ethereum/eth2.0-deposit-cli/releases/).
Replacing `<YOUR VALIDATOR KEYS DIRECTORY>` with the full pathname of the `validator_keys` directory that was created when you generated your keys using the [Zinken Launchpad](https://zinken.launchpad.ethereum.org/) [command line app](https://github.com/ethereum/eth2.0-deposit-cli/releases/).
> **Tip:** run `pwd` in your `validator_keys` directory to print the full pathname to the console.
@ -70,53 +70,64 @@ This tutorial assumes basic knowledge of the [command line](https://www.learneno
Don't worry, this is entirely normal. Your validator client needs both your signing keystore(s) and the password encrypting it to import your [key](https://blog.ethereum.org/2020/05/21/keys/) (since it needs to decrypt the keystore in order to be able to use it to sign on your behalf).
## 3. Connect to Spadina
## 3. Connect to Zinken
To build Nimbus and its dependencies, and connect to Spadina, run:
To build Nimbus and its dependencies, and connect to Zinken, run:
```
make spadina
make zinken
```
You should see that your beacon node has launched, and that you are processing eth1 blocks (using [infura](https://infura.io/)) and obtaining information about other depositers (`deposit log events`) in the run-up to genesis:
```
DBG 2020-09-27 17:33:28.500+02:00 Launching beacon node topics="beacnde" tid=8490483 file=beacon_node.nim:1190 version="0.5.0 (6cf7e837)" bls_backend=BLST cmdParams="@[\"--network=spadina\", \"--log-level=DEBUG\", \"--log-file=build/data/shared_spadina_0/nbc_bn_20200927173328.log\", \"--data-dir=build/data/shared_spadina_0\", \"--web3-url=wss://goerli.infura.io/ws/v3/809a18497dd74102b5f37d25aae3c85a\", \"--tcp-port=9000\", \"--udp-port=9000\", \"--metrics\", \"--metrics-port=8008\", \"--rpc\", \"--rpc-port=9190\"]" config="(logLevel: \"DEBUG\", logFile: Some(build/data/shared_spadina_0/nbc_bn_20200927173328.log), eth2Network: Some(\"spadina\"), dataDir: build/data/shared_spadina_0, validatorsDirFlag: None[InputDir], secretsDirFlag: None[InputDir], walletsDirFlag: None[InputDir], web3Url: \"wss://goerli.infura.io/ws/v3/809a18497dd74102b5f37d25aae3c85a\", depositContractAddress: Some(0x48b597f4b53c21b48ad95c7256b49d1779bd5890), depositContractDeployedAt: Some(\"3384340\"), nonInteractive: false, cmd: noCommand, bootstrapNodes: @[\"# teku (@rolfyone)\", \"enr:-KG4QA-EcFfXQsL2dcneG8vp8HTWLrpwHQ5HhfyIytfpeKOISzROy2kYSsf_v-BZKnIx5XHDjqJ-ttz0hoz6qJA7tasEhGV0aDKQxKgkDQAAAAL__________4JpZIJ2NIJpcIQDFt-UiXNlY3AyNTZrMaECkR4C5DVO_9rB48eHTY4kdyOHsguTEDlvb7Ce0_mvghSDdGNwgiMog3VkcIIjKA\", \"\"], bootstrapNodesFile: , libp2pAddress: 0.0.0.0, tcpPort: 9000, udpPort: 9000, maxPeers: 79, nat: \"any\", validators: ..., stateSnapshot: None[InputFile], stateSnapshotContents: ..., runtimePreset: (GENESIS_FORK_VERSION: 00000002, GENESIS_DELAY: 172800, MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 1024, MIN_GENESIS_TIME: 1601380800, ETH1_FOLLOW_DISTANCE: 1024), nodeName: \"\", graffiti: None[GraffitiBytes], verifyFinalization: false, stopAtEpoch: 0, metricsEnabled: true, metricsAddress: 127.0.0.1, metricsPort: 8008, statusBarEnabled: true, statusBarContents: \"peers: $connected_peers;finalized: $finalized_root:$finalized_epoch;head: $head_root:$head_epoch:$head_epoch_slot;time: $epoch:$epoch_slot ($slot);sync: $sync_status|\", rpcEnabled: true, rpcPort: 9190, rpcAddress: 127.0.0.1, inProcessValidators: true, discv5Enabled: true, dumpEnabled: false)"
DBG 2020-09-27 17:33:28.500+02:00 Launching beacon node topics="beacnde" tid=8490483 file=beacon_node.nim:1190 version="0.5.0 (6cf7e837)" bls_backend=BLST cmdParams="@[\"--network=zinken\", \"--log-level=DEBUG\", \"--log-file=build/data/shared_zinken_0/nbc_bn_20200927173328.log\", \"--data-dir=build/data/shared_zinken_0\", \"--web3-url=wss://goerli.infura.io/ws/v3/809a18497dd74102b5f37d25aae3c85a\", \"--tcp-port=9000\", \"--udp-port=9000\", \"--metrics\", \"--metrics-port=8008\", \"--rpc\", \"--rpc-port=9190\"]" config="(logLevel: \"DEBUG\", logFile: Some(build/data/shared_zinken_0/nbc_bn_20200927173328.log), eth2Network: Some(\"zinken\"), dataDir: build/data/shared_zinken_0, validatorsDirFlag: None[InputDir], secretsDirFlag: None[InputDir], walletsDirFlag: None[InputDir], web3Url: \"wss://goerli.infura.io/ws/v3/809a18497dd74102b5f37d25aae3c85a\", depositContractAddress: Some(0x48b597f4b53c21b48ad95c7256b49d1779bd5890), depositContractDeployedAt: Some(\"3384340\"), nonInteractive: false, cmd: noCommand, bootstrapNodes: @[\"# teku (@rolfyone)\", \"enr:-KG4QA-EcFfXQsL2dcneG8vp8HTWLrpwHQ5HhfyIytfpeKOISzROy2kYSsf_v-BZKnIx5XHDjqJ-ttz0hoz6qJA7tasEhGV0aDKQxKgkDQAAAAL__________4JpZIJ2NIJpcIQDFt-UiXNlY3AyNTZrMaECkR4C5DVO_9rB48eHTY4kdyOHsguTEDlvb7Ce0_mvghSDdGNwgiMog3VkcIIjKA\", \"\"], bootstrapNodesFile: , libp2pAddress: 0.0.0.0, tcpPort: 9000, udpPort: 9000, maxPeers: 79, nat: \"any\", validators: ..., stateSnapshot: None[InputFile], stateSnapshotContents: ..., runtimePreset: (GENESIS_FORK_VERSION: 00000002, GENESIS_DELAY: 172800, MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 1024, MIN_GENESIS_TIME: 1601380800, ETH1_FOLLOW_DISTANCE: 1024), nodeName: \"\", graffiti: None[GraffitiBytes], verifyFinalization: false, stopAtEpoch: 0, metricsEnabled: true, metricsAddress: 127.0.0.1, metricsPort: 8008, statusBarEnabled: true, statusBarContents: \"peers: $connected_peers;finalized: $finalized_root:$finalized_epoch;head: $head_root:$head_epoch:$head_epoch_slot;time: $epoch:$epoch_slot ($slot);sync: $sync_status|\", rpcEnabled: true, rpcPort: 9190, rpcAddress: 127.0.0.1, inProcessValidators: true, discv5Enabled: true, dumpEnabled: false)"
INF 2020-09-27 17:33:31.018+02:00 Starting Eth1 deposit contract monitoring tid=8490483 file=mainchain_monitor.nim:758 contract=0x48b597f4b53c21b48ad95c7256b49d1779bd5890 url=web3(wss://goerli.infura.io/ws/v3/809a18497dd74102b5f37d25aae3c85a)
INF 2020-09-27 17:33:31.018+02:00 Waiting for new Eth1 block headers tid=8490483 file=mainchain_monitor.nim:415
INF 2020-09-27 17:33:46.213+02:00 Obtaining deposit log events tid=8490483 file=mainchain_monitor.nim:376 fromBlock=3384341 toBlock=3476604
INF 2020-09-27 17:33:56.912+02:00 Eth1 block processed tid=8490483 file=mainchain_monitor.nim:717 block=3423176:0ac7969b totalDeposits=1
```
> **Note:** as it stands, Nimbus defaults to using Infura to keep track of eth1 deposits. However we are well aware that Infura is less than ideal from a decentralisation perspective. As such we are in the process of changing the default to [Geth](https://geth.ethereum.org/docs/install-and-build/installing-geth) (with Infura as a fallback). For some rough notes on how to use Geth with Nimbus, see [here](https://gist.github.com/onqtam/aaf883d46f4dab1311ca9c160df12fe4) (we will be adding more complete instructions to this book very soon).
> **Note:** when you run `make zinken`, the beacon node launches with an Infura endpoint supplied by us. This endpoint is passed through the `web3-url` option (which takes as input the url of the web3 server from which you'd like to observe the eth1 chain).
>
> Because Infura caps the requests per endpoint per day to 100k, and all Nimbus nodes use the same Infura endpoint by default, it can happen that our Infura endpoint is overloaded (i.e the requests on a given day reach the 100k limit). If this happens, all requests to Infura using the default endpoint will fail, which means your node will stop processing new deposits.
>
> To pass in your own Infura endpoint, you'll need to run:
>```
> make NODE_PARAMS="--web3-url=<YOUR_WEBSOCKET_ENDPOINT>" medalla
>```
> Importantly, the endpoint must be a websocket (`wss`) endpoint, not `https`. If you're not familiar with Infura, we recommend reading through our [Infura guide](./infura-guide), first.
>
> P.S. We are well aware that Infura is less than ideal from a decentralisation perspective. As such we are in the process of changing our default to [Geth](https://geth.ethereum.org/docs/install-and-build/installing-geth) (with Infura as a fallback). For some rough notes on how to use Geth with Nimbus, see [here](https://gist.github.com/onqtam/aaf883d46f4dab1311ca9c160df12fe4) (we will be adding more complete instructions very soon).
## 4. Keep an eye on your validator
If you deposited after the [genesis](https://hackmd.io/@benjaminion/genesis) state was decided (September 27th 1400 UTC), your validators will have been put in a queue based on deposit time, and will slowly be inducted into the validator set after genesis. Getting through the queue may take a few hours or a day or so.
The best way to keep track of your validator's status is [spadina.beaconcha.in](https://medalla.beaconcha.in) (click on the orange magnifying glass at the very top and paste in your validator's public key).
The best way to keep track of your validator's status is [zinken.beaconcha.in](https://medalla.beaconcha.in) (click on the orange magnifying glass at the very top and paste in your validator's public key).
You can even [create an account](https://spadina.beaconcha.in/register) to add alerts and keep track of your validator's [performance](https://spadina.beaconcha.in/dashboard).
You can even [create an account](https://zinken.beaconcha.in/register) to add alerts and keep track of your validator's [performance](https://zinken.beaconcha.in/dashboard).
Finally, makes sure you stay on the lookout for any critical updates to Nimbus. This best way to do so is through the **announcements** channel on our [discord](https://discord.com/invite/XRxWahP).
To update to the latest version, disconnect from `spadina` and run:
To update to the latest version, disconnect from `zinken` and run:
```
git pull && make update
```
Once the update is complete, run `make spadina` again to reconnect to the network.
Once the update is complete, run `make zinken` again to reconnect to the network.
Looking forward to seeing you on Spadina! 💛
Looking forward to seeing you on Zinken! 💛
----------
## Key management
Keys are stored in the `build/data/shared_spadina_0/` folder, under `secrets` and `validators` - make sure you keep these folders backed up.
Keys are stored in the `build/data/shared_zinken_0/` folder, under `secrets` and `validators` - make sure you keep these folders backed up.
The `secrets` folder contains the common secret that gives you access to all your validator keys.
@ -129,7 +140,7 @@ This tutorial assumes basic knowledge of the [command line](https://www.learneno
Metrics are not included in the binary by default - to enable them, use the following options when starting the client:
```
make NIMFLAGS="-d:insecure" spadina
make NIMFLAGS="-d:insecure" zinken
```
You can then browse the metrics by connecting to:
@ -138,11 +149,11 @@ This tutorial assumes basic knowledge of the [command line](https://www.learneno
Make sure this port is protected as the http server used is not considered secure (it should not be used by untrusted peers).
For instructions on how to spin up a beautiful and useful monitoring dashboard for your validator and beacon node, see [this page](./metrics-pretty-pictures.md) (note you'll need to replace all mention of `medalla` with `spadina`).
For instructions on how to spin up a beautiful and useful monitoring dashboard for your validator and beacon node, see [this page](./metrics-pretty-pictures.md) (note you'll need to replace all mention of `medalla` with `zinken`).
## Advanced options
N.B. All the options you're used to running with `medalla` should work as expected with `spadina`.
N.B. All the options you're used to running with `medalla` should work as expected with `zinken`.
### Change the TCP and UDP ports
@ -150,7 +161,7 @@ This tutorial assumes basic knowledge of the [command line](https://www.learneno
To change the TCP and UDP ports from their default value of 9000 to 9100, say, run:
```
make BASE_PORT=9100 spadina
make BASE_PORT=9100 zinken
```
You may need to do this if you are running another client.
@ -162,21 +173,21 @@ This tutorial assumes basic knowledge of the [command line](https://www.learneno
You can customise your beacon node's parameters using the `NODE_PARAMS` option:
```
make NODE_PARAMS="--tcp-port=9100 --udp-port=9100" spadina
make NODE_PARAMS="--tcp-port=9100 --udp-port=9100" zinken
```
>**Note:** the above command has exactly the same effect as `make BASE_PORT=9100 spadina`
>**Note:** the above command has exactly the same effect as `make BASE_PORT=9100 zinken`
A complete list of the available parameters can be found [here](https://github.com/status-im/nim-beacon-chain/blob/devel/beacon_chain/conf.nim#L92-L210) (use a parameter's `name` field to set it).
### Logs
Log files are saved in `build/data/shared_spadina_0/`.
Log files are saved in `build/data/shared_zinken_0/`.
### Makefile
If you are comfortable reading [Makefiles](https://en.wikipedia.org/wiki/Makefile#:~:text=A%20makefile%20is%20a%20file,to%20generate%20a%20target%2Fgoal), you can see the commands that `make spadina` executes under the hood, [here](https://github.com/status-im/nim-beacon-chain/blob/23bec993414df904e9d7ea9d26e65005b981aee0/Makefile#L184-L197).
If you are comfortable reading [Makefiles](https://en.wikipedia.org/wiki/Makefile#:~:text=A%20makefile%20is%20a%20file,to%20generate%20a%20target%2Fgoal), you can see the commands that `make zinken` executes under the hood, [here](https://github.com/status-im/nim-beacon-chain/blob/23bec993414df904e9d7ea9d26e65005b981aee0/Makefile#L184-L197).
Some of the provided options (such as `--network`) are essential while others (such as the ones controlling logging, metrics, ports, and the RPC service) are optional and included for the sake convenience.
@ -186,10 +197,8 @@ This tutorial assumes basic knowledge of the [command line](https://www.learneno
- [ethstaker discord](https://discord.com/invite/e84CFep): great place for tips and discussions
- [Validator launchpad](https://spadina.launchpad.ethereum.org): to send Spadina deposits
- [Validator launchpad](https://zinken.launchpad.ethereum.org): to send Zinken deposits
- [Beacon chain explorer](https://spadina.beaconcha.in/) : to monitor testnet health
- [Reddit /r/ethstaker community](https://www.reddit.com/r/ethstaker/comments/iy3c6t/announcing_the_spadina_testnet_launch_viewing/) : to attend the launch viewing party (rumour has it that there will be a POAP for viewing the live stream of this call)
- [Beacon chain explorer](https://zinken.beaconcha.in/) : to monitor testnet health
- [Nimbus discord](https://discord.com/invite/XRxWahP) : best place to ask questions and to stay up-to-date with critical updates

View File

@ -54,7 +54,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
db = BeaconChainDB.init(kvStore SqStoreRef.init(".", "block_sim").tryGet())
defer: db.close()
ChainDAGRef.preInit(db, state[].data, genesisBlock)
ChainDAGRef.preInit(db, state[].data, state[].data, genesisBlock)
var
chainDag = init(ChainDAGRef, defaultRuntimePreset, db)

View File

@ -161,6 +161,8 @@ if [[ "$REUSE_EXISTING_DATA_DIR" == "0" ]]; then
rm -rf "${DATA_DIR}"
fi
mkdir -m 0750 -p "${DATA_DIR}"
DEPOSITS_FILE="${DATA_DIR}/deposits.json"
VALIDATORS_DIR="${DATA_DIR}/validators"
@ -211,6 +213,7 @@ if [[ $USE_GANACHE == "0" ]]; then
BOOTSTRAP_IP="127.0.0.1"
./build/beacon_node createTestnet \
--data-dir="${DATA_DIR}" \
--deposits-file="${DEPOSITS_FILE}" \
--total-validators=${TOTAL_VALIDATORS} \
--last-user-validator=${USER_VALIDATORS} \
@ -218,9 +221,11 @@ if [[ $USE_GANACHE == "0" ]]; then
--output-bootstrap-file="${NETWORK_DIR}/bootstrap_nodes.txt" \
--bootstrap-address=${BOOTSTRAP_IP} \
--bootstrap-port=${BASE_PORT} \
--netkey-file=network_key.json \
--insecure-netkey-password=true \
--genesis-offset=${GENESIS_OFFSET} # Delay in seconds
STATE_SNAPSHOT_ARG="--state-snapshot=${NETWORK_DIR}/genesis.ssz"
STATE_SNAPSHOT_ARG="--finalized-checkpoint-state=${NETWORK_DIR}/genesis.ssz"
else
echo "Launching ganache"
ganache-cli --blockTime 17 --gasLimit 100000000 -e 100000 --verbose > "${DATA_DIR}/log_ganache.txt" 2>&1 &
@ -312,10 +317,11 @@ fi
VALIDATORS_PER_VALIDATOR=$(( (SYSTEM_VALIDATORS / NODES_WITH_VALIDATORS) / 2 ))
VALIDATOR_OFFSET=$((SYSTEM_VALIDATORS / 2))
BOOTSTRAP_ENR="${DATA_DIR}/node${BOOTSTRAP_NODE}/beacon_node.enr"
NETWORK_KEYFILE="../network_key.json"
for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do
if [[ ${NUM_NODE} == ${BOOTSTRAP_NODE} ]]; then
BOOTSTRAP_ARG=""
BOOTSTRAP_ARG="--netkey-file=${NETWORK_KEYFILE} --insecure-netkey-password=true"
else
BOOTSTRAP_ARG="--bootstrap-file=${BOOTSTRAP_ENR}"
# Wait for the master node to write out its address file
@ -335,6 +341,7 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do
# The first $NODES_WITH_VALIDATORS nodes split them equally between them, after skipping the first $USER_VALIDATORS.
NODE_DATA_DIR="${DATA_DIR}/node${NUM_NODE}"
rm -rf "${NODE_DATA_DIR}"
mkdir -m 0750 -p "${NODE_DATA_DIR}"
mkdir -p "${NODE_DATA_DIR}/validators"
mkdir -p "${NODE_DATA_DIR}/secrets"

View File

@ -0,0 +1,29 @@
import os except dirExists
import
sequtils, strformat,
confutils, testutils/fuzzing_engines
const
gitRoot = thisDir() / ".."
fuzzingTestsDir = gitRoot / "tests" / "fuzzing"
cli do (testname {.argument.}: string,
fuzzer = defaultFuzzingEngine):
let fuzzingTestDir = fuzzingTestsDir / testname
if not dirExists(fuzzingTestDir):
echo "Cannot find a fuzz test directory named '", testname, "' in ", fuzzingTestsDir
quit 1
let nimFiles = listFiles(fuzzingTestDir).filterIt(splitFile(it).ext == ".nim")
if nimFiles.len != 1:
echo "The fuzzing test dir '" & fuzzingTestDir & "' should contain exactly one Nim file"
quit 1
let
corpusDir = fuzzingTestDir / "corpus"
testProgram = nimFiles[0]
exec &"""ntu fuzz --fuzzer={fuzzer} --corpus="{corpusDir}" "{testProgram}" """

View File

@ -9,8 +9,6 @@ const
fuzzingTestsDir = gitRoot / "tests" / "fuzzing"
fuzzingCorpusesDir = fuzzingTestsDir / "corpus"
fuzzNims = gitRoot / "vendor" / "nim-testutils" / "testutils" / "fuzzing" / "fuzz.nims"
cli do (testname {.argument.}: string,
fuzzer = defaultFuzzingEngine):

View File

@ -0,0 +1 @@
--help

View File

@ -0,0 +1 @@
deposits create --network=spadina --new-wallet-file=build/data/shared_spadina_0/wallet.json --out-validators-dir=build/data/shared_spadina_0/validators --out-secrets-dir=build/data/shared_spadina_0/secrets --out-deposits-file=spadina-deposits_data-20201001212925.json --count=1

View File

@ -0,0 +1 @@
--network=spadina --log-level=INFO --log-file=build/data/shared_spadina_0/nbc_bn_20201001212647.log --data-dir=build/data/shared_spadina_0 --web3-url=wss://goerli.infura.io/ws/v3/809a18497dd74102b5f37d25aae3c85a --tcp-port=9000 --udp-port=9000 --metrics --metrics-port=8008 --rpc --rpc-port=9190

View File

@ -0,0 +1 @@
--version

View File

@ -0,0 +1 @@
wallets create --name:"my wallet" --next-account:10 --out:/tmp/wallet.json

View File

@ -0,0 +1 @@
wallets restore --name:"some wallet name" --deposits:10 --out:"some wallet name.json"

View File

@ -0,0 +1,10 @@
import
# TODO These imports shouldn't be necessary here
# (this is a variation of the sandwich problem)
stew/shims/net, chronicles,
confutils/cli_parsing_fuzzer,
../../../beacon_chain/conf, ../../../beacon_chain/spec/network
fuzzCliParsing BeaconNodeConf

View File

@ -1,3 +1,5 @@
# clang complains that -flto=auto is not a supported option when creating libFuzzer builds
-d:disableLTO
-d:ssz_testing
-d:"const_preset=mainnet"

View File

@ -29,6 +29,8 @@ BOOTSTRAP_ADDRESS_FILE="${SIMULATION_DIR}/node-${BOOTSTRAP_NODE_ID}/beacon_node.
if [[ "$NODE_ID" != "$BOOTSTRAP_NODE" ]]; then
BOOTSTRAP_ARG="--bootstrap-file=$BOOTSTRAP_ADDRESS_FILE"
else
BOOTSTRAP_ARG="--netkey-file=network_key.json --insecure-netkey-password"
fi
# set up the environment
@ -48,11 +50,13 @@ if [ "${NAT:-}" == "1" ]; then
NAT_ARG="--nat:any"
fi
mkdir -m 0700 -p "$NODE_DATA_DIR"
rm -rf "$NODE_VALIDATORS_DIR"
mkdir -p "$NODE_VALIDATORS_DIR"
mkdir -m 0700 "$NODE_VALIDATORS_DIR"
rm -rf "$NODE_SECRETS_DIR"
mkdir -p "$NODE_SECRETS_DIR"
mkdir -m 0700 "$NODE_SECRETS_DIR"
VALIDATORS_PER_NODE=$(( NUM_VALIDATORS / (TOTAL_NODES - 1) ))
if [ "${USE_BN_VC_VALIDATOR_SPLIT:-}" == "yes" ]; then
@ -72,11 +76,11 @@ if [[ $NODE_ID -lt $BOOTSTRAP_NODE ]]; then
fi
rm -rf "$NODE_DATA_DIR/dump"
mkdir -p "$NODE_DATA_DIR/dump"
mkdir -m 0700 "$NODE_DATA_DIR/dump"
SNAPSHOT_ARG=""
if [ -f "${SNAPSHOT_FILE}" ]; then
SNAPSHOT_ARG="--state-snapshot=${SNAPSHOT_FILE}"
SNAPSHOT_ARG="--finalized-checkpoint-state=${SNAPSHOT_FILE}"
fi
cd "$NODE_DATA_DIR"

View File

@ -143,6 +143,8 @@ if [ ! -f "${SNAPSHOT_FILE}" ]; then
--output-bootstrap-file="${NETWORK_BOOTSTRAP_FILE}" \
--bootstrap-address=127.0.0.1 \
--bootstrap-port=$(( BASE_P2P_PORT + BOOTSTRAP_NODE )) \
--netkey-file=network_key.json \
--insecure-netkey-password=true \
--genesis-offset=30 # Delay in seconds
fi
fi

View File

@ -20,8 +20,6 @@ import
# Test utilies
../testutil
static: doAssert UseSlashingProtection, "The test was compiled without slashing protection, pass -d:UseSlashingProtection=true"
template wrappedTimedTest(name: string, body: untyped) =
# `check` macro takes a copy of whatever it's checking, on the stack!
block: # Symbol namespacing

View File

@ -19,8 +19,6 @@ import
# Test utilies
../testutil
static: doAssert UseSlashingProtection, "The test was compiled without slashing protection, pass -d:UseSlashingProtection=true"
template wrappedTimedTest(name: string, body: untyped) =
# `check` macro takes a copy of whatever it's checking, on the stack!
block: # Symbol namespacing

View File

@ -10,6 +10,8 @@
import
json, unittest,
stew/byteutils, blscurve, eth/keys, json_serialization,
libp2p/crypto/crypto as lcrypto,
nimcrypto/utils as ncrutils,
../beacon_chain/spec/[crypto, keystore],
./testutil
@ -89,38 +91,121 @@ const
"version": 4
}"""
pbkdf2NetVector = """{
"crypto":{
"kdf":{
"function":"pbkdf2",
"params":{
"dklen":32,
"c":262144,
"prf":"hmac-sha256",
"salt":"d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
},
"message":""
},
"checksum":{
"function":"sha256",
"params":{
},
"message":"3aaebceb5e81cce464d62287414befaa03522eb8f56cad4296c0dc9301e5f091"
},
"cipher":{
"function":"aes-128-ctr",
"params":{
"iv":"264daa3f303d7259501c93d997d84fe6"
},
"message":"c6e22dfed4aec458af6e46efff72937972a9360a8b4dc32c8c266de73a90b421d8892db3"
}
},
"description":"PBKDF2 Network private key storage",
"pubkey":"08021221031873e6f4e1bf837b93493d570653cb219743d4fab0ff468d4e005e1679730b0b",
"uuid":"7a053160-1cdf-4faf-a2bb-331e1bc2eb5f",
"version":1
}"""
scryptNetVector = """{
"crypto":{
"kdf":{
"function":"scrypt",
"params":{
"dklen":32,
"n":262144,
"p":1,
"r":8,
"salt":"d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
},
"message":""
},
"checksum":{
"function":"sha256",
"params":{
},
"message":"9a7d03a3f2107a11b6e34a081fb13d551012ff081efb81fc94ec114381fa707f"
},
"cipher":{
"function":"aes-128-ctr",
"params":{
"iv":"264daa3f303d7259501c93d997d84fe6"
},
"message":"0eac82f5a1bd53f81df688970ffeea8425ad7b8f877bcba5a74b87f090c340836cd52095"
}
},
"description":"SCRYPT Network private key storage",
"pubkey":"08021221031873e6f4e1bf837b93493d570653cb219743d4fab0ff468d4e005e1679730b0b",
"uuid":"83d77fa3-86cb-466a-af11-eeb338b0e258",
"version":1
}"""
password = string.fromBytes hexToSeqByte("7465737470617373776f7264f09f9491")
secretBytes = hexToSeqByte "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
secretNetBytes = hexToSeqByte "08021220fe442379443d6e2d7d75d3a58f96fbb35f0a9c7217796825fc9040e3b89c5736"
salt = hexToSeqByte "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
iv = hexToSeqByte "264daa3f303d7259501c93d997d84fe6"
let
rng = newRng()
rng = keys.newRng()
suiteReport "Keystore":
suiteReport "KeyStorage testing suite":
setup:
let secret = ValidatorPrivKey.fromRaw(secretBytes).get
let nsecret = init(lcrypto.PrivateKey, secretNetBytes).get
timedTest "Pbkdf2 decryption":
timedTest "[PBKDF2] Keystore decryption":
let
keystore = Json.decode(pbkdf2Vector, Keystore)
decrypt = decryptKeystore(keystore, KeystorePass password)
decrypt = decryptKeystore(keystore, KeystorePass.init password)
check decrypt.isOk
check secret.isEqual(decrypt.get())
timedTest "Scrypt decryption":
timedTest "[SCRYPT] Keystore decryption":
let
keystore = Json.decode(scryptVector, Keystore)
decrypt = decryptKeystore(keystore, KeystorePass password)
decrypt = decryptKeystore(keystore, KeystorePass.init password)
check decrypt.isOk
check secret.isEqual(decrypt.get())
timedTest "Pbkdf2 encryption":
timedTest "[PBKDF2] Network Keystore decryption":
let
keystore = Json.decode(pbkdf2NetVector, NetKeystore)
decrypt = decryptNetKeystore(keystore, KeystorePass.init password)
check decrypt.isOk
check nsecret == decrypt.get()
timedTest "[SCRYPT] Network Keystore decryption":
let
keystore = Json.decode(scryptNetVector, NetKeystore)
decrypt = decryptNetKeystore(keystore, KeystorePass.init password)
check decrypt.isOk
check nsecret == decrypt.get()
timedTest "[PBKDF2] Keystore encryption":
let keystore = createKeystore(kdfPbkdf2, rng[], secret,
KeystorePass password,
KeystorePass.init password,
salt=salt, iv=iv,
description = "This is a test keystore that uses PBKDF2 to secure the secret.",
path = validateKeyPath("m/12381/60/0/0").expect("Valid Keypath"))
@ -132,9 +217,22 @@ suiteReport "Keystore":
check encryptJson == pbkdf2Json
timedTest "Scrypt encryption":
timedTest "[PBKDF2] Network Keystore encryption":
let nkeystore = createNetKeystore(kdfPbkdf2, rng[], nsecret,
KeystorePass.init password,
salt = salt, iv = iv,
description =
"PBKDF2 Network private key storage")
var
encryptJson = parseJson Json.encode(nkeystore)
pbkdf2Json = parseJson(pbkdf2NetVector)
encryptJson{"uuid"} = %""
pbkdf2Json{"uuid"} = %""
check encryptJson == pbkdf2Json
timedTest "[SCRYPT] Keystore encryption":
let keystore = createKeystore(kdfScrypt, rng[], secret,
KeystorePass password,
KeystorePass.init password,
salt=salt, iv=iv,
description = "This is a test keystore that uses scrypt to secure the secret.",
path = validateKeyPath("m/12381/60/3141592653/589793238").expect("Valid keypath"))
@ -146,6 +244,19 @@ suiteReport "Keystore":
check encryptJson == scryptJson
timedTest "[SCRYPT] Network Keystore encryption":
let nkeystore = createNetKeystore(kdfScrypt, rng[], nsecret,
KeystorePass.init password,
salt = salt, iv = iv,
description =
"SCRYPT Network private key storage")
var
encryptJson = parseJson Json.encode(nkeystore)
pbkdf2Json = parseJson(scryptNetVector)
encryptJson{"uuid"} = %""
pbkdf2Json{"uuid"} = %""
check encryptJson == pbkdf2Json
timedTest "Pbkdf2 errors":
expect Defect:
echo createKeystore(kdfPbkdf2, rng[], secret, salt = [byte 1])
@ -154,20 +265,20 @@ suiteReport "Keystore":
echo createKeystore(kdfPbkdf2, rng[], secret, iv = [byte 1])
check decryptKeystore(JsonString pbkdf2Vector,
KeystorePass "wrong pass").isErr
KeystorePass.init "wrong pass").isErr
check decryptKeystore(JsonString pbkdf2Vector,
KeystorePass "").isErr
KeystorePass.init "").isErr
check decryptKeystore(JsonString "{\"a\": 0}",
KeystorePass "").isErr
KeystorePass.init "").isErr
check decryptKeystore(JsonString "",
KeystorePass "").isErr
KeystorePass.init "").isErr
template checkVariant(remove): untyped =
check decryptKeystore(JsonString pbkdf2Vector.replace(remove, "1234"),
KeystorePass password).isErr
KeystorePass.init password).isErr
checkVariant "f876" # salt
checkVariant "75ea" # checksum
@ -177,4 +288,4 @@ suiteReport "Keystore":
badKdf{"crypto", "kdf", "function"} = %"invalid"
check decryptKeystore(JsonString $badKdf,
KeystorePass password).iserr
KeystorePass.init password).iserr

View File

@ -99,7 +99,7 @@ template timedTest*(name, body) =
proc makeTestDB*(tailState: BeaconState, tailBlock: SignedBeaconBlock): BeaconChainDB =
result = init(BeaconChainDB, kvStore MemStoreRef.init())
ChainDAGRef.preInit(result, tailState, tailBlock)
ChainDAGRef.preInit(result, tailState, tailState, tailBlock)
proc makeTestDB*(validators: Natural): BeaconChainDB =
let

@ -1 +1 @@
Subproject commit f04512f15adccfc01b68f15c6216d681d08a1f83
Subproject commit d7cada91d64fea01adf9c932353ca95789da0b6a

@ -1 +1 @@
Subproject commit b60f70718f8039c5c86dfc2a4680d8c1e37cbce2
Subproject commit b9a7c6bb0733ce8c76f0ac8c7889c5e48e7c924f

@ -1 +1 @@
Subproject commit 2e8040ec5e6e5416846e008b4d455bd026394fb0
Subproject commit 5c81aab54d00bb4cc0921fc9d3ace828b1fcc09f

2
vendor/nim-eth vendored

@ -1 +1 @@
Subproject commit 3ddb498f2a41e1e470d780757faeedc0b8cb3a21
Subproject commit de2d43a7e7afb7b094ca251bdbbd58fbf47df031

2
vendor/nim-json-rpc vendored

@ -1 +1 @@
Subproject commit 6406c96b27b23fc270be03e4f0e4db7412adbb9c
Subproject commit 99455437ba3d83d5af1c38007fedeeff295e959e

2
vendor/nim-libp2p vendored

@ -1 +1 @@
Subproject commit 8ecef46738bd01ecf68a4cfb744c1f777cdfb4bb
Subproject commit 98d82fce5c0e18cc1c1be3e95635fd6ad3b15d7f

@ -1 +1 @@
Subproject commit 76219157a0afda125de9778efe485c6da627e718
Subproject commit 18828a7da26a10fff25b12be0a6581ca0f61d49b

1
vendor/nim-normalize vendored Submodule

@ -0,0 +1 @@
Subproject commit db9a74ad6a301f991c477fc2d90894957f640654

2
vendor/nim-stew vendored

@ -1 +1 @@
Subproject commit 47ff49aae7fdcf9fb7b4af44c6ab2377f04c731e
Subproject commit 529517d84837d8848dde769eea4d93a1a657a279

@ -1 +1 @@
Subproject commit 61e5e1ec817cc73fc43585acae4def287180e78e
Subproject commit cc5d6e46123e0cf5dfd14f5fc32f0d6f58a20645

@ -1 +1 @@
Subproject commit 79469de15653d40fc2751f984cf52d0ef6084e7a
Subproject commit 4b662df1e95b5bebac43b6d2faa297f135bc90d5

2
vendor/nimcrypto vendored

@ -1 +1 @@
Subproject commit a95a27e7b5e1ac6083fe5115971027b79e15b494
Subproject commit a065c1741836462762d18d2fced1fedd46095b02