use a separate process for the private keys (Off by default) - there is a new signing_process binary which loads all validators of the beacon node and the BN dictates through stdin of the signing process what to be signed and when and reads from stdout of the process
This commit is contained in:
parent
96e1a5d70e
commit
bb83817c2a
23
Makefile
23
Makefile
|
@ -45,7 +45,8 @@ TOOLS := \
|
||||||
process_dashboard \
|
process_dashboard \
|
||||||
stack_sizes \
|
stack_sizes \
|
||||||
state_sim \
|
state_sim \
|
||||||
validator_client
|
validator_client \
|
||||||
|
signing_process
|
||||||
|
|
||||||
# bench_bls_sig_agggregation TODO reenable after bls v0.10.1 changes
|
# bench_bls_sig_agggregation TODO reenable after bls v0.10.1 changes
|
||||||
|
|
||||||
|
@ -173,14 +174,14 @@ clean-testnet0:
|
||||||
clean-testnet1:
|
clean-testnet1:
|
||||||
rm -rf build/data/testnet1*
|
rm -rf build/data/testnet1*
|
||||||
|
|
||||||
testnet0 testnet1: | beacon_node
|
testnet0 testnet1: | beacon_node signing_process
|
||||||
build/beacon_node \
|
build/beacon_node \
|
||||||
--network=$@ \
|
--network=$@ \
|
||||||
--log-level="$(LOG_LEVEL)" \
|
--log-level="$(LOG_LEVEL)" \
|
||||||
--data-dir=build/data/$@_$(NODE_ID) \
|
--data-dir=build/data/$@_$(NODE_ID) \
|
||||||
$(GOERLI_TESTNETS_PARAMS) $(NODE_PARAMS)
|
$(GOERLI_TESTNETS_PARAMS) $(NODE_PARAMS)
|
||||||
|
|
||||||
medalla: | beacon_node
|
medalla: | beacon_node signing_process
|
||||||
mkdir -p build/data/shared_medalla_$(NODE_ID)
|
mkdir -p build/data/shared_medalla_$(NODE_ID)
|
||||||
|
|
||||||
scripts/make_prometheus_config.sh \
|
scripts/make_prometheus_config.sh \
|
||||||
|
@ -195,7 +196,7 @@ medalla: | beacon_node
|
||||||
--data-dir=build/data/shared_medalla_$(NODE_ID) \
|
--data-dir=build/data/shared_medalla_$(NODE_ID) \
|
||||||
$(GOERLI_TESTNETS_PARAMS) $(NODE_PARAMS)
|
$(GOERLI_TESTNETS_PARAMS) $(NODE_PARAMS)
|
||||||
|
|
||||||
medalla-vc: | beacon_node validator_client
|
medalla-vc: | beacon_node signing_process validator_client
|
||||||
# if launching a VC as well - send the BN looking nowhere for validators/secrets
|
# if launching a VC as well - send the BN looking nowhere for validators/secrets
|
||||||
mkdir -p build/data/shared_medalla_$(NODE_ID)/empty_dummy_folder
|
mkdir -p build/data/shared_medalla_$(NODE_ID)/empty_dummy_folder
|
||||||
|
|
||||||
|
@ -225,7 +226,7 @@ ifneq ($(LOG_LEVEL), TRACE)
|
||||||
medalla-dev:
|
medalla-dev:
|
||||||
+ $(MAKE) LOG_LEVEL=TRACE $@
|
+ $(MAKE) LOG_LEVEL=TRACE $@
|
||||||
else
|
else
|
||||||
medalla-dev: | beacon_node
|
medalla-dev: | beacon_node signing_process
|
||||||
mkdir -p build/data/shared_medalla_$(NODE_ID)
|
mkdir -p build/data/shared_medalla_$(NODE_ID)
|
||||||
|
|
||||||
scripts/make_prometheus_config.sh \
|
scripts/make_prometheus_config.sh \
|
||||||
|
@ -240,7 +241,7 @@ medalla-dev: | beacon_node
|
||||||
$(GOERLI_TESTNETS_PARAMS) --dump $(NODE_PARAMS)
|
$(GOERLI_TESTNETS_PARAMS) --dump $(NODE_PARAMS)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
medalla-deposit-data: | beacon_node deposit_contract
|
medalla-deposit-data: | beacon_node signing_process deposit_contract
|
||||||
build/beacon_node deposits create \
|
build/beacon_node deposits create \
|
||||||
--network=medalla \
|
--network=medalla \
|
||||||
--new-wallet-file=build/data/shared_medalla_$(NODE_ID)/wallet.json \
|
--new-wallet-file=build/data/shared_medalla_$(NODE_ID)/wallet.json \
|
||||||
|
@ -249,7 +250,7 @@ medalla-deposit-data: | beacon_node deposit_contract
|
||||||
--out-deposits-file=medalla-deposits_data-$$(date +"%Y%m%d%H%M%S").json \
|
--out-deposits-file=medalla-deposits_data-$$(date +"%Y%m%d%H%M%S").json \
|
||||||
--count=$(VALIDATORS)
|
--count=$(VALIDATORS)
|
||||||
|
|
||||||
medalla-deposit: | beacon_node deposit_contract
|
medalla-deposit: | beacon_node signing_process deposit_contract
|
||||||
build/beacon_node deposits create \
|
build/beacon_node deposits create \
|
||||||
--network=medalla \
|
--network=medalla \
|
||||||
--out-deposits-file=nbc-medalla-deposits.json \
|
--out-deposits-file=nbc-medalla-deposits.json \
|
||||||
|
@ -270,7 +271,7 @@ clean-medalla:
|
||||||
rm -rf build/data/shared_medalla*/dump
|
rm -rf build/data/shared_medalla*/dump
|
||||||
rm -rf build/data/shared_medalla*/*.log
|
rm -rf build/data/shared_medalla*/*.log
|
||||||
|
|
||||||
altona: | beacon_node
|
altona: | beacon_node signing_process
|
||||||
$(CPU_LIMIT_CMD) build/beacon_node \
|
$(CPU_LIMIT_CMD) build/beacon_node \
|
||||||
--network=altona \
|
--network=altona \
|
||||||
--log-level="$(LOG_LEVEL)" \
|
--log-level="$(LOG_LEVEL)" \
|
||||||
|
@ -278,7 +279,7 @@ altona: | beacon_node
|
||||||
--data-dir=build/data/shared_altona_$(NODE_ID) \
|
--data-dir=build/data/shared_altona_$(NODE_ID) \
|
||||||
$(GOERLI_TESTNETS_PARAMS) $(NODE_PARAMS)
|
$(GOERLI_TESTNETS_PARAMS) $(NODE_PARAMS)
|
||||||
|
|
||||||
altona-vc: | beacon_node validator_client
|
altona-vc: | beacon_node signing_process validator_client
|
||||||
# if launching a VC as well - send the BN looking nowhere for validators/secrets
|
# if launching a VC as well - send the BN looking nowhere for validators/secrets
|
||||||
mkdir -p build/data/shared_altona_$(NODE_ID)/empty_dummy_folder
|
mkdir -p build/data/shared_altona_$(NODE_ID)/empty_dummy_folder
|
||||||
$(CPU_LIMIT_CMD) build/beacon_node \
|
$(CPU_LIMIT_CMD) build/beacon_node \
|
||||||
|
@ -296,14 +297,14 @@ altona-vc: | beacon_node validator_client
|
||||||
--data-dir=build/data/shared_altona_$(NODE_ID) \
|
--data-dir=build/data/shared_altona_$(NODE_ID) \
|
||||||
--rpc-port=$$(( $(BASE_RPC_PORT) +$(NODE_ID) ))
|
--rpc-port=$$(( $(BASE_RPC_PORT) +$(NODE_ID) ))
|
||||||
|
|
||||||
altona-dev: | beacon_node
|
altona-dev: | beacon_node signing_process
|
||||||
$(CPU_LIMIT_CMD) build/beacon_node \
|
$(CPU_LIMIT_CMD) build/beacon_node \
|
||||||
--network=altona \
|
--network=altona \
|
||||||
--log-level="DEBUG; TRACE:discv5,networking; REQUIRED:none; DISABLED:none" \
|
--log-level="DEBUG; TRACE:discv5,networking; REQUIRED:none; DISABLED:none" \
|
||||||
--data-dir=build/data/shared_altona_$(NODE_ID) \
|
--data-dir=build/data/shared_altona_$(NODE_ID) \
|
||||||
$(GOERLI_TESTNETS_PARAMS) --dump $(NODE_PARAMS)
|
$(GOERLI_TESTNETS_PARAMS) --dump $(NODE_PARAMS)
|
||||||
|
|
||||||
altona-deposit: | beacon_node deposit_contract
|
altona-deposit: | beacon_node signing_process deposit_contract
|
||||||
build/beacon_node deposits create \
|
build/beacon_node deposits create \
|
||||||
--network=altona \
|
--network=altona \
|
||||||
--out-deposits-file=nbc-altona-deposits.json \
|
--out-deposits-file=nbc-altona-deposits.json \
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
options, chronicles,
|
options, chronos, chronicles,
|
||||||
./spec/[
|
./spec/[
|
||||||
beaconstate, datatypes, crypto, digest, helpers, network, validator,
|
beaconstate, datatypes, crypto, digest, helpers, network, validator,
|
||||||
signatures],
|
signatures],
|
||||||
|
@ -32,13 +32,10 @@ func is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex,
|
||||||
|
|
||||||
proc aggregate_attestations*(
|
proc aggregate_attestations*(
|
||||||
pool: AttestationPool, state: BeaconState, index: CommitteeIndex,
|
pool: AttestationPool, state: BeaconState, index: CommitteeIndex,
|
||||||
validatorIndex: ValidatorIndex, privkey: ValidatorPrivKey,
|
validatorIndex: ValidatorIndex, slot_signature: ValidatorSig,
|
||||||
cache: var StateCache):
|
cache: var StateCache): Option[AggregateAndProof] =
|
||||||
Option[AggregateAndProof] =
|
|
||||||
let
|
let
|
||||||
slot = state.slot
|
slot = state.slot
|
||||||
slot_signature = get_slot_signature(
|
|
||||||
state.fork, state.genesis_validators_root, slot, privkey)
|
|
||||||
|
|
||||||
doAssert validatorIndex in get_beacon_committee(state, slot, index, cache)
|
doAssert validatorIndex in get_beacon_committee(state, slot, index, cache)
|
||||||
doAssert index.uint64 < get_committee_count_per_slot(state, slot.epoch, cache)
|
doAssert index.uint64 < get_committee_count_per_slot(state, slot.epoch, cache)
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[algorithm, os, tables, strutils, sequtils, times, math, terminal],
|
std/[algorithm, os, tables, strutils, sequtils, times, math, terminal],
|
||||||
std/random,
|
std/[osproc, random],
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
stew/[objects, byteutils, endians2], stew/shims/macros,
|
stew/[objects, byteutils, endians2], stew/shims/macros,
|
||||||
|
@ -277,7 +277,13 @@ proc init*(T: type BeaconNode,
|
||||||
res.requestManager = RequestManager.init(
|
res.requestManager = RequestManager.init(
|
||||||
network, res.processor.blocksQueue)
|
network, res.processor.blocksQueue)
|
||||||
|
|
||||||
res.addLocalValidators()
|
if res.config.inProcessValidators:
|
||||||
|
res.addLocalValidators()
|
||||||
|
else:
|
||||||
|
res.vcProcess = startProcess(getAppDir() & "/signing_process".addFileExt(ExeExt),
|
||||||
|
getCurrentDir(), [$res.config.validatorsDir,
|
||||||
|
$res.config.secretsDir])
|
||||||
|
res.addRemoteValidators()
|
||||||
|
|
||||||
# This merely configures the BeaconSync
|
# This merely configures the BeaconSync
|
||||||
# The traffic will be started when we join the network.
|
# The traffic will be started when we join the network.
|
||||||
|
@ -787,6 +793,8 @@ proc removeMessageHandlers(node: BeaconNode) =
|
||||||
proc stop*(node: BeaconNode) =
|
proc stop*(node: BeaconNode) =
|
||||||
status = BeaconNodeStatus.Stopping
|
status = BeaconNodeStatus.Stopping
|
||||||
info "Graceful shutdown"
|
info "Graceful shutdown"
|
||||||
|
if not node.config.inProcessValidators:
|
||||||
|
node.vcProcess.close()
|
||||||
waitFor node.network.stop()
|
waitFor node.network.stop()
|
||||||
|
|
||||||
proc run*(node: BeaconNode) =
|
proc run*(node: BeaconNode) =
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
tables,
|
tables, osproc,
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
chronos, json_rpc/rpcserver, metrics,
|
chronos, json_rpc/rpcserver, metrics,
|
||||||
|
@ -46,6 +46,7 @@ type
|
||||||
mainchainMonitor*: MainchainMonitor
|
mainchainMonitor*: MainchainMonitor
|
||||||
beaconClock*: BeaconClock
|
beaconClock*: BeaconClock
|
||||||
rpcServer*: RpcServer
|
rpcServer*: RpcServer
|
||||||
|
vcProcess*: Process
|
||||||
forkDigest*: ForkDigest
|
forkDigest*: ForkDigest
|
||||||
requestManager*: RequestManager
|
requestManager*: RequestManager
|
||||||
syncManager*: SyncManager[Peer, PeerID]
|
syncManager*: SyncManager[Peer, PeerID]
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
deques, tables,
|
deques, tables, streams,
|
||||||
stew/endians2,
|
stew/endians2,
|
||||||
spec/[datatypes, crypto],
|
spec/[datatypes, crypto],
|
||||||
block_pools/block_pools_types,
|
block_pools/block_pools_types,
|
||||||
|
@ -74,6 +74,9 @@ type
|
||||||
remote
|
remote
|
||||||
|
|
||||||
ValidatorConnection* = object
|
ValidatorConnection* = object
|
||||||
|
inStream*: Stream
|
||||||
|
outStream*: Stream
|
||||||
|
pubKeyStr*: string
|
||||||
|
|
||||||
AttachedValidator* = ref object
|
AttachedValidator* = ref object
|
||||||
pubKey*: ValidatorPubKey
|
pubKey*: ValidatorPubKey
|
||||||
|
|
|
@ -204,6 +204,11 @@ type
|
||||||
desc: "Listening address of the RPC server"
|
desc: "Listening address of the RPC server"
|
||||||
name: "rpc-address" }: ValidIpAddress
|
name: "rpc-address" }: ValidIpAddress
|
||||||
|
|
||||||
|
inProcessValidators* {.
|
||||||
|
defaultValue: true # the use of the signing_process binary by default will be delayed until async I/O over stdin/stdout is developed for the child process.
|
||||||
|
desc: "Disable the push model (the beacon node tells a signing process with the private keys of the validators what to sign and when) and load the validators in the beacon node itself"
|
||||||
|
name: "in-process-validators" }: bool
|
||||||
|
|
||||||
discv5Enabled* {.
|
discv5Enabled* {.
|
||||||
defaultValue: true
|
defaultValue: true
|
||||||
desc: "Enable Discovery v5"
|
desc: "Enable Discovery v5"
|
||||||
|
@ -356,10 +361,24 @@ type
|
||||||
desc: "Do not display interative prompts. Quit on missing configuration"
|
desc: "Do not display interative prompts. Quit on missing configuration"
|
||||||
name: "non-interactive" }: bool
|
name: "non-interactive" }: bool
|
||||||
|
|
||||||
|
validators* {.
|
||||||
|
required
|
||||||
|
desc: "Attach a validator by supplying a keystore path"
|
||||||
|
abbr: "v"
|
||||||
|
name: "validator" }: seq[ValidatorKeyPath]
|
||||||
|
|
||||||
|
validatorsDirFlag* {.
|
||||||
|
desc: "A directory containing validator keystores"
|
||||||
|
name: "validators-dir" }: Option[InputDir]
|
||||||
|
|
||||||
|
secretsDirFlag* {.
|
||||||
|
desc: "A directory containing validator keystore passwords"
|
||||||
|
name: "secrets-dir" }: Option[InputDir]
|
||||||
|
|
||||||
case cmd* {.
|
case cmd* {.
|
||||||
command
|
command
|
||||||
defaultValue: VCNoCommand }: VCStartUpCmd
|
defaultValue: VCNoCommand }: VCStartUpCmd
|
||||||
|
|
||||||
of VCNoCommand:
|
of VCNoCommand:
|
||||||
graffiti* {.
|
graffiti* {.
|
||||||
desc: "The graffiti value that will appear in proposed blocks. " &
|
desc: "The graffiti value that will appear in proposed blocks. " &
|
||||||
|
@ -373,27 +392,18 @@ type
|
||||||
|
|
||||||
rpcPort* {.
|
rpcPort* {.
|
||||||
defaultValue: defaultEth2RpcPort
|
defaultValue: defaultEth2RpcPort
|
||||||
desc: "HTTP port of the server to connect to for RPC"
|
desc: "HTTP port of the server to connect to for RPC - for the validator duties in the pull model"
|
||||||
name: "rpc-port" }: Port
|
name: "rpc-port" }: Port
|
||||||
|
|
||||||
rpcAddress* {.
|
rpcAddress* {.
|
||||||
defaultValue: defaultAdminListenAddress(config)
|
defaultValue: defaultAdminListenAddress(config)
|
||||||
desc: "Address of the server to connect to for RPC"
|
desc: "Address of the server to connect to for RPC - for the validator duties in the pull model"
|
||||||
name: "rpc-address" }: ValidIpAddress
|
name: "rpc-address" }: ValidIpAddress
|
||||||
|
|
||||||
validators* {.
|
retryDelay* {.
|
||||||
required
|
defaultValue: 10
|
||||||
desc: "Attach a validator by supplying a keystore path"
|
desc: "Delay in seconds between retries after unsuccessful attempts to connect to a beacon node"
|
||||||
abbr: "v"
|
name: "retry-delay" }: int
|
||||||
name: "validator" }: seq[ValidatorKeyPath]
|
|
||||||
|
|
||||||
validatorsDirFlag* {.
|
|
||||||
desc: "A directory containing validator keystores"
|
|
||||||
name: "validators-dir" }: Option[InputDir]
|
|
||||||
|
|
||||||
secretsDirFlag* {.
|
|
||||||
desc: "A directory containing validator keystore passwords"
|
|
||||||
name: "secrets-dir" }: Option[InputDir]
|
|
||||||
|
|
||||||
proc defaultDataDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
|
proc defaultDataDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
|
||||||
let dataDir = when defined(windows):
|
let dataDir = when defined(windows):
|
||||||
|
@ -451,7 +461,7 @@ func validatorsDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
|
||||||
func secretsDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
|
func secretsDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
|
||||||
string conf.secretsDirFlag.get(InputDir(conf.dataDir / "secrets"))
|
string conf.secretsDirFlag.get(InputDir(conf.dataDir / "secrets"))
|
||||||
|
|
||||||
func walletsDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
|
func walletsDir*(conf: BeaconNodeConf): string =
|
||||||
if conf.walletsDirFlag.isSome:
|
if conf.walletsDirFlag.isSome:
|
||||||
conf.walletsDirFlag.get.string
|
conf.walletsDirFlag.get.string
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -22,8 +22,8 @@ type
|
||||||
walletPath*: WalletPathPair
|
walletPath*: WalletPathPair
|
||||||
mnemonic*: Mnemonic
|
mnemonic*: Mnemonic
|
||||||
|
|
||||||
proc loadKeystore(conf: BeaconNodeConf|ValidatorClientConf,
|
proc loadKeystore(validatorsDir, secretsDir, keyName: string,
|
||||||
validatorsDir, keyName: string): Option[ValidatorPrivKey] =
|
nonInteractive: bool): Option[ValidatorPrivKey] =
|
||||||
let
|
let
|
||||||
keystorePath = validatorsDir / keyName / keystoreFileName
|
keystorePath = validatorsDir / keyName / keystoreFileName
|
||||||
keystore =
|
keystore =
|
||||||
|
@ -35,7 +35,7 @@ proc loadKeystore(conf: BeaconNodeConf|ValidatorClientConf,
|
||||||
error "Invalid keystore", err = err.formatMsg(keystorePath)
|
error "Invalid keystore", err = err.formatMsg(keystorePath)
|
||||||
return
|
return
|
||||||
|
|
||||||
let passphrasePath = conf.secretsDir / keyName
|
let passphrasePath = secretsDir / keyName
|
||||||
if fileExists(passphrasePath):
|
if fileExists(passphrasePath):
|
||||||
let
|
let
|
||||||
passphrase = KeystorePass:
|
passphrase = KeystorePass:
|
||||||
|
@ -51,9 +51,9 @@ proc loadKeystore(conf: BeaconNodeConf|ValidatorClientConf,
|
||||||
error "Failed to decrypt keystore", keystorePath, passphrasePath
|
error "Failed to decrypt keystore", keystorePath, passphrasePath
|
||||||
return
|
return
|
||||||
|
|
||||||
if conf.nonInteractive:
|
if nonInteractive:
|
||||||
error "Unable to load validator key store. Please ensure matching passphrase exists in the secrets dir",
|
error "Unable to load validator key store. Please ensure matching passphrase exists in the secrets dir",
|
||||||
keyName, validatorsDir, secretsDir = conf.secretsDir
|
keyName, validatorsDir, secretsDir = secretsDir
|
||||||
return
|
return
|
||||||
|
|
||||||
var remainingAttempts = 3
|
var remainingAttempts = 3
|
||||||
|
@ -72,6 +72,19 @@ proc loadKeystore(conf: BeaconNodeConf|ValidatorClientConf,
|
||||||
prompt = "Keystore decryption failed. Please try again"
|
prompt = "Keystore decryption failed. Please try again"
|
||||||
dec remainingAttempts
|
dec remainingAttempts
|
||||||
|
|
||||||
|
iterator validatorKeysFromDirs*(validatorsDir, secretsDir: string): ValidatorPrivKey =
|
||||||
|
try:
|
||||||
|
for kind, file in walkDir(validatorsDir):
|
||||||
|
if kind == pcDir:
|
||||||
|
let keyName = splitFile(file).name
|
||||||
|
let key = loadKeystore(validatorsDir, secretsDir, keyName, true)
|
||||||
|
if key.isSome:
|
||||||
|
yield key.get
|
||||||
|
else:
|
||||||
|
quit 1
|
||||||
|
except OSError:
|
||||||
|
quit 1
|
||||||
|
|
||||||
iterator validatorKeys*(conf: BeaconNodeConf|ValidatorClientConf): ValidatorPrivKey =
|
iterator validatorKeys*(conf: BeaconNodeConf|ValidatorClientConf): ValidatorPrivKey =
|
||||||
for validatorKeyFile in conf.validators:
|
for validatorKeyFile in conf.validators:
|
||||||
try:
|
try:
|
||||||
|
@ -86,7 +99,7 @@ iterator validatorKeys*(conf: BeaconNodeConf|ValidatorClientConf): ValidatorPriv
|
||||||
for kind, file in walkDir(validatorsDir):
|
for kind, file in walkDir(validatorsDir):
|
||||||
if kind == pcDir:
|
if kind == pcDir:
|
||||||
let keyName = splitFile(file).name
|
let keyName = splitFile(file).name
|
||||||
let key = loadKeystore(conf, validatorsDir, keyName)
|
let key = loadKeystore(validatorsDir, conf.secretsDir, keyName, conf.nonInteractive)
|
||||||
if key.isSome:
|
if key.isSome:
|
||||||
yield key.get
|
yield key.get
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
import
|
||||||
|
# Standard library
|
||||||
|
os, strutils, tables,
|
||||||
|
|
||||||
|
# Local modules
|
||||||
|
spec/[digest, crypto],
|
||||||
|
keystore_management
|
||||||
|
|
||||||
|
programMain:
|
||||||
|
var validators: Table[ValidatorPubKey, ValidatorPrivKey]
|
||||||
|
# load and send all public keys so the BN knows for which ones to ping us
|
||||||
|
doAssert paramCount() == 2
|
||||||
|
for curr in validatorKeysFromDirs(paramStr(1), paramStr(2)):
|
||||||
|
validators[curr.toPubKey.initPubKey] = curr
|
||||||
|
echo curr.toPubKey
|
||||||
|
echo "end"
|
||||||
|
|
||||||
|
# simple format: `<pubkey> <eth2digest_to_sign>` => `<signature>`
|
||||||
|
while true:
|
||||||
|
let args = stdin.readLine.split(" ")
|
||||||
|
doAssert args.len == 2
|
||||||
|
|
||||||
|
let privKey = validators[ValidatorPubKey.fromHex(args[0]).get().initPubKey()]
|
||||||
|
|
||||||
|
echo blsSign(privKey, Eth2Digest.fromHex(args[1]).data)
|
|
@ -108,6 +108,9 @@ proc toRealPubKey(pubkey: ValidatorPubKey): Option[ValidatorPubKey] =
|
||||||
none ValidatorPubKey
|
none ValidatorPubKey
|
||||||
return validatorKeyCache.mGetOrPut(pubkey.blob, maybeRealKey)
|
return validatorKeyCache.mGetOrPut(pubkey.blob, maybeRealKey)
|
||||||
|
|
||||||
|
# TODO this needs a massive comment explaining the reasoning along with every
|
||||||
|
# seemingly ad-hoc place where it's called - one shouldn't have to git-blame
|
||||||
|
# commits and PRs for information which ought to be inplace here in the code
|
||||||
proc initPubKey*(pubkey: ValidatorPubKey): ValidatorPubKey =
|
proc initPubKey*(pubkey: ValidatorPubKey): ValidatorPubKey =
|
||||||
let key = toRealPubKey(pubkey)
|
let key = toRealPubKey(pubkey)
|
||||||
if key.isNone:
|
if key.isNone:
|
||||||
|
|
|
@ -17,17 +17,20 @@ template withTrust(sig: SomeSig, body: untyped): bool =
|
||||||
else:
|
else:
|
||||||
body
|
body
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#aggregation-selection
|
func compute_slot_root*(
|
||||||
func get_slot_signature*(
|
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
|
): Eth2Digest =
|
||||||
privkey: ValidatorPrivKey): ValidatorSig =
|
|
||||||
let
|
let
|
||||||
epoch = compute_epoch_at_slot(slot)
|
epoch = compute_epoch_at_slot(slot)
|
||||||
domain = get_domain(
|
domain = get_domain(
|
||||||
fork, DOMAIN_SELECTION_PROOF, epoch, genesis_validators_root)
|
fork, DOMAIN_SELECTION_PROOF, epoch, genesis_validators_root)
|
||||||
signing_root = compute_signing_root(slot, domain)
|
result = compute_signing_root(slot, domain)
|
||||||
|
|
||||||
blsSign(privKey, signing_root.data)
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#aggregation-selection
|
||||||
|
func get_slot_signature*(
|
||||||
|
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
|
||||||
|
privkey: ValidatorPrivKey): ValidatorSig =
|
||||||
|
blsSign(privKey, compute_slot_root(fork, genesis_validators_root, slot).data)
|
||||||
|
|
||||||
proc verify_slot_signature*(
|
proc verify_slot_signature*(
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
|
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
|
||||||
|
@ -41,15 +44,18 @@ proc verify_slot_signature*(
|
||||||
|
|
||||||
blsVerify(pubkey, signing_root.data, signature)
|
blsVerify(pubkey, signing_root.data, signature)
|
||||||
|
|
||||||
|
func compute_epoch_root*(
|
||||||
|
fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch
|
||||||
|
): Eth2Digest =
|
||||||
|
let
|
||||||
|
domain = get_domain(fork, DOMAIN_RANDAO, epoch, genesis_validators_root)
|
||||||
|
result = compute_signing_root(epoch, domain)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#randao-reveal
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#randao-reveal
|
||||||
func get_epoch_signature*(
|
func get_epoch_signature*(
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch,
|
fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch,
|
||||||
privkey: ValidatorPrivKey): ValidatorSig =
|
privkey: ValidatorPrivKey): ValidatorSig =
|
||||||
let
|
blsSign(privKey, compute_epoch_root(fork, genesis_validators_root, epoch).data)
|
||||||
domain = get_domain(fork, DOMAIN_RANDAO, epoch, genesis_validators_root)
|
|
||||||
signing_root = compute_signing_root(epoch, domain)
|
|
||||||
|
|
||||||
blsSign(privKey, signing_root.data)
|
|
||||||
|
|
||||||
proc verify_epoch_signature*(
|
proc verify_epoch_signature*(
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch,
|
fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch,
|
||||||
|
@ -61,17 +67,20 @@ proc verify_epoch_signature*(
|
||||||
|
|
||||||
blsVerify(pubkey, signing_root.data, signature)
|
blsVerify(pubkey, signing_root.data, signature)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#signature
|
func compute_block_root*(
|
||||||
func get_block_signature*(
|
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
|
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
|
||||||
root: Eth2Digest, privkey: ValidatorPrivKey): ValidatorSig =
|
root: Eth2Digest): Eth2Digest =
|
||||||
let
|
let
|
||||||
epoch = compute_epoch_at_slot(slot)
|
epoch = compute_epoch_at_slot(slot)
|
||||||
domain = get_domain(
|
domain = get_domain(
|
||||||
fork, DOMAIN_BEACON_PROPOSER, epoch, genesis_validators_root)
|
fork, DOMAIN_BEACON_PROPOSER, epoch, genesis_validators_root)
|
||||||
signing_root = compute_signing_root(root, domain)
|
result = compute_signing_root(root, domain)
|
||||||
|
|
||||||
blsSign(privKey, signing_root.data)
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#signature
|
||||||
|
func get_block_signature*(
|
||||||
|
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
|
||||||
|
root: Eth2Digest, privkey: ValidatorPrivKey): ValidatorSig =
|
||||||
|
blsSign(privKey, compute_block_root(fork, genesis_validators_root, slot, root).data)
|
||||||
|
|
||||||
proc verify_block_signature*(
|
proc verify_block_signature*(
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
|
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
|
||||||
|
@ -87,17 +96,21 @@ proc verify_block_signature*(
|
||||||
|
|
||||||
blsVerify(pubKey, signing_root.data, signature)
|
blsVerify(pubKey, signing_root.data, signature)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#broadcast-aggregate
|
func compute_aggregate_and_proof_root*(fork: Fork, genesis_validators_root: Eth2Digest,
|
||||||
func get_aggregate_and_proof_signature*(fork: Fork, genesis_validators_root: Eth2Digest,
|
aggregate_and_proof: AggregateAndProof,
|
||||||
aggregate_and_proof: AggregateAndProof,
|
): Eth2Digest =
|
||||||
privKey: ValidatorPrivKey): ValidatorSig =
|
|
||||||
let
|
let
|
||||||
epoch = compute_epoch_at_slot(aggregate_and_proof.aggregate.data.slot)
|
epoch = compute_epoch_at_slot(aggregate_and_proof.aggregate.data.slot)
|
||||||
domain = get_domain(
|
domain = get_domain(
|
||||||
fork, DOMAIN_AGGREGATE_AND_PROOF, epoch, genesis_validators_root)
|
fork, DOMAIN_AGGREGATE_AND_PROOF, epoch, genesis_validators_root)
|
||||||
signing_root = compute_signing_root(aggregate_and_proof, domain)
|
result = compute_signing_root(aggregate_and_proof, domain)
|
||||||
|
|
||||||
blsSign(privKey, signing_root.data)
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#broadcast-aggregate
|
||||||
|
func get_aggregate_and_proof_signature*(fork: Fork, genesis_validators_root: Eth2Digest,
|
||||||
|
aggregate_and_proof: AggregateAndProof,
|
||||||
|
privKey: ValidatorPrivKey): ValidatorSig =
|
||||||
|
blsSign(privKey, compute_aggregate_and_proof_root(fork, genesis_validators_root,
|
||||||
|
aggregate_and_proof).data)
|
||||||
|
|
||||||
proc verify_aggregate_and_proof_signature*(fork: Fork, genesis_validators_root: Eth2Digest,
|
proc verify_aggregate_and_proof_signature*(fork: Fork, genesis_validators_root: Eth2Digest,
|
||||||
aggregate_and_proof: AggregateAndProof,
|
aggregate_and_proof: AggregateAndProof,
|
||||||
|
@ -111,18 +124,23 @@ proc verify_aggregate_and_proof_signature*(fork: Fork, genesis_validators_root:
|
||||||
|
|
||||||
blsVerify(pubKey, signing_root.data, signature)
|
blsVerify(pubKey, signing_root.data, signature)
|
||||||
|
|
||||||
|
func compute_attestation_root*(
|
||||||
|
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||||
|
attestation_data: AttestationData
|
||||||
|
): Eth2Digest =
|
||||||
|
let
|
||||||
|
epoch = attestation_data.target.epoch
|
||||||
|
domain = get_domain(
|
||||||
|
fork, DOMAIN_BEACON_ATTESTER, epoch, genesis_validators_root)
|
||||||
|
result = compute_signing_root(attestation_data, domain)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#aggregate-signature
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#aggregate-signature
|
||||||
func get_attestation_signature*(
|
func get_attestation_signature*(
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||||
attestation_data: AttestationData,
|
attestation_data: AttestationData,
|
||||||
privkey: ValidatorPrivKey): ValidatorSig =
|
privkey: ValidatorPrivKey): ValidatorSig =
|
||||||
let
|
blsSign(privKey, compute_attestation_root(fork, genesis_validators_root,
|
||||||
epoch = attestation_data.target.epoch
|
attestation_data).data)
|
||||||
domain = get_domain(
|
|
||||||
fork, DOMAIN_BEACON_ATTESTER, epoch, genesis_validators_root)
|
|
||||||
signing_root = compute_signing_root(attestation_data, domain)
|
|
||||||
|
|
||||||
blsSign(privKey, signing_root.data)
|
|
||||||
|
|
||||||
proc verify_attestation_signature*(
|
proc verify_attestation_signature*(
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||||
|
|
|
@ -308,7 +308,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
||||||
raise newException(CatchableError, "could not retrieve block for slot: " & $slot)
|
raise newException(CatchableError, "could not retrieve block for slot: " & $slot)
|
||||||
let valInfo = ValidatorInfoForMakeBeaconBlock(kind: viRandao_reveal,
|
let valInfo = ValidatorInfoForMakeBeaconBlock(kind: viRandao_reveal,
|
||||||
randao_reveal: randao_reveal)
|
randao_reveal: randao_reveal)
|
||||||
let res = makeBeaconBlockForHeadAndSlot(
|
let res = await makeBeaconBlockForHeadAndSlot(
|
||||||
node, valInfo, proposer.get()[0], graffiti, head, slot)
|
node, valInfo, proposer.get()[0], graffiti, head, slot)
|
||||||
if res.message.isNone():
|
if res.message.isNone():
|
||||||
raise newException(CatchableError, "could not retrieve block for slot: " & $slot)
|
raise newException(CatchableError, "could not retrieve block for slot: " & $slot)
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
os, strutils, json, times,
|
os, strutils, json,
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
stew/shims/[tables, macros],
|
stew/shims/[tables, macros],
|
||||||
|
@ -52,7 +52,7 @@ template attemptUntilSuccess(vc: ValidatorClient, body: untyped) =
|
||||||
break
|
break
|
||||||
except CatchableError as err:
|
except CatchableError as err:
|
||||||
warn "Caught an unexpected error", err = err.msg
|
warn "Caught an unexpected error", err = err.msg
|
||||||
waitFor sleepAsync(chronos.seconds(1)) # 1 second before retrying
|
waitFor sleepAsync(chronos.seconds(vc.config.retryDelay))
|
||||||
|
|
||||||
proc getValidatorDutiesForEpoch(vc: ValidatorClient, epoch: Epoch) {.gcsafe, async.} =
|
proc getValidatorDutiesForEpoch(vc: ValidatorClient, epoch: Epoch) {.gcsafe, async.} =
|
||||||
info "Getting validator duties for epoch", epoch = epoch
|
info "Getting validator duties for epoch", epoch = epoch
|
||||||
|
@ -136,7 +136,7 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
|
||||||
|
|
||||||
info "Proposing block", slot = slot, public_key = public_key
|
info "Proposing block", slot = slot, public_key = public_key
|
||||||
|
|
||||||
let randao_reveal = validator.genRandaoReveal(
|
let randao_reveal = await validator.genRandaoReveal(
|
||||||
vc.fork, vc.beaconGenesis.genesis_validators_root, slot)
|
vc.fork, vc.beaconGenesis.genesis_validators_root, slot)
|
||||||
|
|
||||||
var newBlock = SignedBeaconBlock(
|
var newBlock = SignedBeaconBlock(
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[os, tables, strutils],
|
std/[os, tables, strutils, sequtils, osproc, streams],
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
stew/[objects], stew/shims/macros,
|
stew/[objects], stew/shims/macros,
|
||||||
|
@ -41,25 +41,43 @@ proc saveValidatorKey*(keyName, key: string, conf: BeaconNodeConf) =
|
||||||
writeFile(outputFile, key)
|
writeFile(outputFile, key)
|
||||||
info "Imported validator key", file = outputFile
|
info "Imported validator key", file = outputFile
|
||||||
|
|
||||||
proc addLocalValidator*(node: BeaconNode,
|
proc checkValidatorInRegistry(state: BeaconState,
|
||||||
state: BeaconState,
|
pubKey: ValidatorPubKey) =
|
||||||
privKey: ValidatorPrivKey) =
|
|
||||||
let pubKey = privKey.toPubKey()
|
|
||||||
|
|
||||||
let idx = state.validators.asSeq.findIt(it.pubKey == pubKey)
|
let idx = state.validators.asSeq.findIt(it.pubKey == pubKey)
|
||||||
if idx == -1:
|
if idx == -1:
|
||||||
# We allow adding a validator even if its key is not in the state registry:
|
# We allow adding a validator even if its key is not in the state registry:
|
||||||
# it might be that the deposit for this validator has not yet been processed
|
# it might be that the deposit for this validator has not yet been processed
|
||||||
warn "Validator not in registry (yet?)", pubKey
|
warn "Validator not in registry (yet?)", pubKey
|
||||||
|
|
||||||
|
proc addLocalValidator*(node: BeaconNode,
|
||||||
|
state: BeaconState,
|
||||||
|
privKey: ValidatorPrivKey) =
|
||||||
|
let pubKey = privKey.toPubKey()
|
||||||
|
state.checkValidatorInRegistry(pubKey)
|
||||||
node.attachedValidators.addLocalValidator(pubKey, privKey)
|
node.attachedValidators.addLocalValidator(pubKey, privKey)
|
||||||
|
|
||||||
proc addLocalValidators*(node: BeaconNode) =
|
proc addLocalValidators*(node: BeaconNode) =
|
||||||
for validatorKey in node.config.validatorKeys:
|
for validatorKey in node.config.validatorKeys:
|
||||||
node.addLocalValidator node.chainDag.headState.data.data, validatorKey
|
node.addLocalValidator node.chainDag.headState.data.data, validatorKey
|
||||||
|
|
||||||
info "Local validators attached ", count = node.attachedValidators.count
|
info "Local validators attached ", count = node.attachedValidators.count
|
||||||
|
|
||||||
|
proc addRemoteValidators*(node: BeaconNode) =
|
||||||
|
# load all the validators from the child process - loop until `end`
|
||||||
|
var line = newStringOfCap(120).TaintedString
|
||||||
|
while line != "end" and running(node.vcProcess):
|
||||||
|
if node.vcProcess.outputStream.readLine(line) and line != "end":
|
||||||
|
let key = ValidatorPubKey.fromHex(line).get().initPubKey()
|
||||||
|
node.chainDag.headState.data.data.checkValidatorInRegistry(key)
|
||||||
|
|
||||||
|
let v = AttachedValidator(pubKey: key,
|
||||||
|
kind: ValidatorKind.remote,
|
||||||
|
connection: ValidatorConnection(
|
||||||
|
inStream: node.vcProcess.inputStream,
|
||||||
|
outStream: node.vcProcess.outputStream,
|
||||||
|
pubKeyStr: $key))
|
||||||
|
node.attachedValidators.addRemoteValidator(key, v)
|
||||||
|
info "Remote validators attached ", count = node.attachedValidators.count
|
||||||
|
|
||||||
proc getAttachedValidator*(node: BeaconNode,
|
proc getAttachedValidator*(node: BeaconNode,
|
||||||
pubkey: ValidatorPubKey): AttachedValidator =
|
pubkey: ValidatorPubKey): AttachedValidator =
|
||||||
node.attachedValidators.getValidator(pubkey)
|
node.attachedValidators.getValidator(pubkey)
|
||||||
|
@ -172,7 +190,8 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
||||||
graffiti: GraffitiBytes,
|
graffiti: GraffitiBytes,
|
||||||
head: BlockRef,
|
head: BlockRef,
|
||||||
slot: Slot):
|
slot: Slot):
|
||||||
tuple[message: Option[BeaconBlock], fork: Fork, genesis_validators_root: Eth2Digest] =
|
Future[tuple[message: Option[BeaconBlock], fork: Fork,
|
||||||
|
genesis_validators_root: Eth2Digest]] {.async.} =
|
||||||
# Advance state to the slot that we're proposing for - this is the equivalent
|
# Advance state to the slot that we're proposing for - this is the equivalent
|
||||||
# of running `process_slots` up to the slot of the new block.
|
# of running `process_slots` up to the slot of the new block.
|
||||||
node.chainDag.withState(
|
node.chainDag.withState(
|
||||||
|
@ -189,9 +208,11 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
||||||
# need for the discriminated union)... but we need the `state` from `withState`
|
# need for the discriminated union)... but we need the `state` from `withState`
|
||||||
# in order to get the fork/root for the specific head/slot for the randao_reveal
|
# in order to get the fork/root for the specific head/slot for the randao_reveal
|
||||||
# and it's causing problems when the function becomes a generic for 2 types...
|
# and it's causing problems when the function becomes a generic for 2 types...
|
||||||
proc getRandaoReveal(val_info: ValidatorInfoForMakeBeaconBlock): ValidatorSig =
|
proc getRandaoReveal(val_info: ValidatorInfoForMakeBeaconBlock):
|
||||||
|
Future[ValidatorSig] {.async.} =
|
||||||
if val_info.kind == viValidator:
|
if val_info.kind == viValidator:
|
||||||
return val_info.validator.genRandaoReveal(state.fork, state.genesis_validators_root, slot)
|
return await val_info.validator.genRandaoReveal(
|
||||||
|
state.fork, state.genesis_validators_root, slot)
|
||||||
elif val_info.kind == viRandao_reveal:
|
elif val_info.kind == viRandao_reveal:
|
||||||
return val_info.randao_reveal
|
return val_info.randao_reveal
|
||||||
|
|
||||||
|
@ -210,7 +231,7 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
||||||
hashedState,
|
hashedState,
|
||||||
validator_index,
|
validator_index,
|
||||||
head.root,
|
head.root,
|
||||||
getRandaoReveal(val_info),
|
await getRandaoReveal(val_info),
|
||||||
eth1data,
|
eth1data,
|
||||||
graffiti,
|
graffiti,
|
||||||
node.attestationPool[].getAttestationsForBlock(state),
|
node.attestationPool[].getAttestationsForBlock(state),
|
||||||
|
@ -280,7 +301,8 @@ proc proposeBlock(node: BeaconNode,
|
||||||
return head
|
return head
|
||||||
|
|
||||||
let valInfo = ValidatorInfoForMakeBeaconBlock(kind: viValidator, validator: validator)
|
let valInfo = ValidatorInfoForMakeBeaconBlock(kind: viValidator, validator: validator)
|
||||||
let beaconBlockTuple = makeBeaconBlockForHeadAndSlot(node, valInfo, validator_index, node.graffitiBytes, head, slot)
|
let beaconBlockTuple = await makeBeaconBlockForHeadAndSlot(
|
||||||
|
node, valInfo, validator_index, node.graffitiBytes, head, slot)
|
||||||
if not beaconBlockTuple.message.isSome():
|
if not beaconBlockTuple.message.isSome():
|
||||||
return head # already logged elsewhere!
|
return head # already logged elsewhere!
|
||||||
var
|
var
|
||||||
|
@ -388,7 +410,7 @@ proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
|
||||||
return head
|
return head
|
||||||
|
|
||||||
proc broadcastAggregatedAttestations(
|
proc broadcastAggregatedAttestations(
|
||||||
node: BeaconNode, aggregationHead: BlockRef, aggregationSlot: Slot) =
|
node: BeaconNode, aggregationHead: BlockRef, aggregationSlot: Slot) {.async.} =
|
||||||
# The index is via a
|
# The index is via a
|
||||||
# locally attested validator. Unlike in handleAttestations(...) there's a
|
# locally attested validator. Unlike in handleAttestations(...) there's a
|
||||||
# single one at most per slot (because that's how aggregation attestation
|
# single one at most per slot (because that's how aggregation attestation
|
||||||
|
@ -402,6 +424,13 @@ proc broadcastAggregatedAttestations(
|
||||||
let
|
let
|
||||||
committees_per_slot =
|
committees_per_slot =
|
||||||
get_committee_count_per_slot(state, aggregationSlot.epoch, cache)
|
get_committee_count_per_slot(state, aggregationSlot.epoch, cache)
|
||||||
|
|
||||||
|
var
|
||||||
|
slotSigs: seq[Future[ValidatorSig]] = @[]
|
||||||
|
slotSigsData: seq[tuple[committee_index: uint64,
|
||||||
|
validator_idx: ValidatorIndex,
|
||||||
|
v: AttachedValidator]] = @[]
|
||||||
|
|
||||||
for committee_index in 0'u64..<committees_per_slot:
|
for committee_index in 0'u64..<committees_per_slot:
|
||||||
let committee = get_beacon_committee(
|
let committee = get_beacon_committee(
|
||||||
state, aggregationSlot, committee_index.CommitteeIndex, cache)
|
state, aggregationSlot, committee_index.CommitteeIndex, cache)
|
||||||
|
@ -409,32 +438,33 @@ proc broadcastAggregatedAttestations(
|
||||||
for index_in_committee, validatorIdx in committee:
|
for index_in_committee, validatorIdx in committee:
|
||||||
let validator = node.getAttachedValidator(state, validatorIdx)
|
let validator = node.getAttachedValidator(state, validatorIdx)
|
||||||
if validator != nil:
|
if validator != nil:
|
||||||
# This is slightly strange/inverted control flow, since really it's
|
# the validator index and private key pair.
|
||||||
# going to happen once per slot, but this is the best way to get at
|
slotSigs.add getSlotSig(validator, state.fork,
|
||||||
# the validator index and private key pair. TODO verify it only has
|
state.genesis_validators_root, state.slot)
|
||||||
# one isSome() with test.
|
slotSigsData.add (committee_index, validatorIdx, validator)
|
||||||
let aggregateAndProof =
|
|
||||||
aggregate_attestations(node.attestationPool[], state,
|
|
||||||
committee_index.CommitteeIndex,
|
|
||||||
# TODO https://github.com/status-im/nim-beacon-chain/issues/545
|
|
||||||
# this assumes in-process private keys
|
|
||||||
validatorIdx,
|
|
||||||
validator.privKey,
|
|
||||||
cache)
|
|
||||||
|
|
||||||
# Don't broadcast when, e.g., this node isn't aggregator
|
await allFutures(slotSigs)
|
||||||
if aggregateAndProof.isSome:
|
|
||||||
var signedAP = SignedAggregateAndProof(
|
|
||||||
message: aggregateAndProof.get,
|
|
||||||
# TODO Make the signing async here
|
|
||||||
signature: validator.signAggregateAndProof(
|
|
||||||
aggregateAndProof.get, state.fork,
|
|
||||||
state.genesis_validators_root))
|
|
||||||
|
|
||||||
node.network.broadcast(node.topicAggregateAndProofs, signedAP)
|
for curr in zip(slotSigsData, slotSigs):
|
||||||
info "Aggregated attestation sent",
|
let aggregateAndProof =
|
||||||
attestation = shortLog(signedAP.message.aggregate),
|
aggregate_attestations(node.attestationPool[], state,
|
||||||
validator = shortLog(validator)
|
curr[0].committee_index.CommitteeIndex,
|
||||||
|
curr[0].validator_idx,
|
||||||
|
curr[1].read, cache)
|
||||||
|
|
||||||
|
# Don't broadcast when, e.g., this node isn't aggregator
|
||||||
|
# TODO verify there is only one isSome() with test.
|
||||||
|
if aggregateAndProof.isSome:
|
||||||
|
let sig = await signAggregateAndProof(curr[0].v,
|
||||||
|
aggregateAndProof.get, state.fork,
|
||||||
|
state.genesis_validators_root)
|
||||||
|
var signedAP = SignedAggregateAndProof(
|
||||||
|
message: aggregateAndProof.get,
|
||||||
|
signature: sig)
|
||||||
|
node.network.broadcast(node.topicAggregateAndProofs, signedAP)
|
||||||
|
info "Aggregated attestation sent",
|
||||||
|
attestation = shortLog(signedAP.message.aggregate),
|
||||||
|
validator = shortLog(curr[0].v)
|
||||||
|
|
||||||
proc handleValidatorDuties*(
|
proc handleValidatorDuties*(
|
||||||
node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
||||||
|
@ -531,4 +561,4 @@ proc handleValidatorDuties*(
|
||||||
aggregationSlot = slot - TRAILING_DISTANCE
|
aggregationSlot = slot - TRAILING_DISTANCE
|
||||||
aggregationHead = get_ancestor(head, aggregationSlot)
|
aggregationHead = get_ancestor(head, aggregationSlot)
|
||||||
|
|
||||||
broadcastAggregatedAttestations(node, aggregationHead, aggregationSlot)
|
await broadcastAggregatedAttestations(node, aggregationHead, aggregationSlot)
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
import
|
import
|
||||||
tables,
|
tables, strutils, json, streams,
|
||||||
chronos, chronicles,
|
chronos, chronicles,
|
||||||
spec/[datatypes, crypto, digest, signatures, helpers],
|
spec/[datatypes, crypto, digest, signatures, helpers],
|
||||||
beacon_node_types
|
beacon_node_types,
|
||||||
|
json_serialization/std/[sets, net]
|
||||||
|
|
||||||
func init*(T: type ValidatorPool): T =
|
func init*(T: type ValidatorPool): T =
|
||||||
result.validators = initTable[ValidatorPubKey, AttachedValidator]()
|
result.validators = initTable[ValidatorPubKey, AttachedValidator]()
|
||||||
|
@ -17,45 +18,51 @@ proc addLocalValidator*(pool: var ValidatorPool,
|
||||||
kind: inProcess,
|
kind: inProcess,
|
||||||
privKey: privKey)
|
privKey: privKey)
|
||||||
pool.validators[pubKey] = v
|
pool.validators[pubKey] = v
|
||||||
|
|
||||||
info "Local validator attached", pubKey, validator = shortLog(v)
|
info "Local validator attached", pubKey, validator = shortLog(v)
|
||||||
|
|
||||||
|
proc addRemoteValidator*(pool: var ValidatorPool,
|
||||||
|
pubKey: ValidatorPubKey,
|
||||||
|
v: AttachedValidator) =
|
||||||
|
pool.validators[pubKey] = v
|
||||||
|
info "Remote validator attached", pubKey, validator = shortLog(v)
|
||||||
|
|
||||||
proc getValidator*(pool: ValidatorPool,
|
proc getValidator*(pool: ValidatorPool,
|
||||||
validatorKey: ValidatorPubKey): AttachedValidator =
|
validatorKey: ValidatorPubKey): AttachedValidator =
|
||||||
pool.validators.getOrDefault(validatorKey.initPubKey)
|
pool.validators.getOrDefault(validatorKey.initPubKey)
|
||||||
|
|
||||||
|
proc signWithRemoteValidator(v: AttachedValidator, data: Eth2Digest):
|
||||||
|
Future[ValidatorSig] {.async.} =
|
||||||
|
v.connection.inStream.writeLine(v.connection.pubKeyStr, " ", $data)
|
||||||
|
v.connection.inStream.flush()
|
||||||
|
var line = newStringOfCap(120).TaintedString
|
||||||
|
discard v.connection.outStream.readLine(line)
|
||||||
|
result = ValidatorSig.fromHex(line).get()
|
||||||
|
# TODO this is an ugly hack to fake a delay and subsequent async reordering
|
||||||
|
# for the purpose of testing the external validator delay - to be
|
||||||
|
# replaced by something more sensible
|
||||||
|
await sleepAsync(chronos.milliseconds(1))
|
||||||
|
|
||||||
# TODO: Honest validator - https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md
|
# TODO: Honest validator - https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md
|
||||||
proc signBlockProposal*(v: AttachedValidator, fork: Fork,
|
proc signBlockProposal*(v: AttachedValidator, fork: Fork,
|
||||||
genesis_validators_root: Eth2Digest, slot: Slot,
|
genesis_validators_root: Eth2Digest, slot: Slot,
|
||||||
blockRoot: Eth2Digest): Future[ValidatorSig] {.async.} =
|
blockRoot: Eth2Digest): Future[ValidatorSig] {.async.} =
|
||||||
|
|
||||||
if v.kind == inProcess:
|
if v.kind == inProcess:
|
||||||
# TODO this is an ugly hack to fake a delay and subsequent async reordering
|
|
||||||
# for the purpose of testing the external validator delay - to be
|
|
||||||
# replaced by something more sensible
|
|
||||||
await sleepAsync(chronos.milliseconds(1))
|
|
||||||
|
|
||||||
result = get_block_signature(
|
result = get_block_signature(
|
||||||
fork, genesis_validators_root, slot, blockRoot, v.privKey)
|
fork, genesis_validators_root, slot, blockRoot, v.privKey)
|
||||||
else:
|
else:
|
||||||
error "Unimplemented"
|
let root = compute_block_root(fork, genesis_validators_root, slot, blockRoot)
|
||||||
quit 1
|
result = await signWithRemoteValidator(v, root)
|
||||||
|
|
||||||
proc signAttestation*(v: AttachedValidator,
|
proc signAttestation*(v: AttachedValidator,
|
||||||
attestation: AttestationData,
|
attestation: AttestationData,
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest):
|
fork: Fork, genesis_validators_root: Eth2Digest):
|
||||||
Future[ValidatorSig] {.async.} =
|
Future[ValidatorSig] {.async.} =
|
||||||
if v.kind == inProcess:
|
if v.kind == inProcess:
|
||||||
# TODO this is an ugly hack to fake a delay and subsequent async reordering
|
|
||||||
# for the purpose of testing the external validator delay - to be
|
|
||||||
# replaced by something more sensible
|
|
||||||
await sleepAsync(chronos.milliseconds(1))
|
|
||||||
|
|
||||||
result = get_attestation_signature(
|
result = get_attestation_signature(
|
||||||
fork, genesis_validators_root, attestation, v.privKey)
|
fork, genesis_validators_root, attestation, v.privKey)
|
||||||
else:
|
else:
|
||||||
error "Unimplemented"
|
let root = compute_attestation_root(fork, genesis_validators_root, attestation)
|
||||||
quit 1
|
result = await signWithRemoteValidator(v, root)
|
||||||
|
|
||||||
proc produceAndSignAttestation*(validator: AttachedValidator,
|
proc produceAndSignAttestation*(validator: AttachedValidator,
|
||||||
attestationData: AttestationData,
|
attestationData: AttestationData,
|
||||||
|
@ -72,13 +79,15 @@ proc produceAndSignAttestation*(validator: AttachedValidator,
|
||||||
|
|
||||||
proc signAggregateAndProof*(v: AttachedValidator,
|
proc signAggregateAndProof*(v: AttachedValidator,
|
||||||
aggregate_and_proof: AggregateAndProof,
|
aggregate_and_proof: AggregateAndProof,
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest): ValidatorSig =
|
fork: Fork, genesis_validators_root: Eth2Digest):
|
||||||
|
Future[ValidatorSig] {.async.} =
|
||||||
if v.kind == inProcess:
|
if v.kind == inProcess:
|
||||||
result = get_aggregate_and_proof_signature(
|
result = get_aggregate_and_proof_signature(
|
||||||
fork, genesis_validators_root, aggregate_and_proof, v.privKey)
|
fork, genesis_validators_root, aggregate_and_proof, v.privKey)
|
||||||
else:
|
else:
|
||||||
error "Out of process signAggregateAndProof not implemented"
|
let root = compute_aggregate_and_proof_root(
|
||||||
quit 1
|
fork, genesis_validators_root, aggregate_and_proof)
|
||||||
|
result = await signWithRemoteValidator(v, root)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#randao-reveal
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#randao-reveal
|
||||||
func genRandaoReveal*(k: ValidatorPrivKey, fork: Fork,
|
func genRandaoReveal*(k: ValidatorPrivKey, fork: Fork,
|
||||||
|
@ -86,6 +95,22 @@ func genRandaoReveal*(k: ValidatorPrivKey, fork: Fork,
|
||||||
get_epoch_signature(
|
get_epoch_signature(
|
||||||
fork, genesis_validators_root, slot.compute_epoch_at_slot, k)
|
fork, genesis_validators_root, slot.compute_epoch_at_slot, k)
|
||||||
|
|
||||||
func genRandaoReveal*(v: AttachedValidator, fork: Fork,
|
proc genRandaoReveal*(v: AttachedValidator, fork: Fork,
|
||||||
genesis_validators_root: Eth2Digest, slot: Slot): ValidatorSig =
|
genesis_validators_root: Eth2Digest, slot: Slot):
|
||||||
genRandaoReveal(v.privKey, fork, genesis_validators_root, slot)
|
Future[ValidatorSig] {.async.} =
|
||||||
|
if v.kind == inProcess:
|
||||||
|
return genRandaoReveal(v.privKey, fork, genesis_validators_root, slot)
|
||||||
|
else:
|
||||||
|
let root = compute_epoch_root(
|
||||||
|
fork, genesis_validators_root, slot.compute_epoch_at_slot)
|
||||||
|
result = await signWithRemoteValidator(v, root)
|
||||||
|
|
||||||
|
proc getSlotSig*(v: AttachedValidator, fork: Fork,
|
||||||
|
genesis_validators_root: Eth2Digest, slot: Slot
|
||||||
|
): Future[ValidatorSig] {.async.} =
|
||||||
|
if v.kind == inProcess:
|
||||||
|
result = get_slot_signature(
|
||||||
|
fork, genesis_validators_root, slot, v.privKey)
|
||||||
|
else:
|
||||||
|
let root = compute_slot_root(fork, genesis_validators_root, slot)
|
||||||
|
result = await signWithRemoteValidator(v, root)
|
||||||
|
|
|
@ -91,7 +91,7 @@ if [[ "$BUILD" == "1" ]]; then
|
||||||
git pull
|
git pull
|
||||||
# don't use too much RAM
|
# don't use too much RAM
|
||||||
make update
|
make update
|
||||||
make LOG_LEVEL="TRACE" NIMFLAGS="-d:insecure -d:testnet_servers_image --parallelBuild:1" beacon_node
|
make LOG_LEVEL="TRACE" NIMFLAGS="-d:insecure -d:testnet_servers_image --parallelBuild:1" beacon_node signing_process
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#######
|
#######
|
||||||
|
|
|
@ -128,6 +128,9 @@ The following options are available:
|
||||||
--rpc Enable the JSON-RPC server.
|
--rpc Enable the JSON-RPC server.
|
||||||
--rpc-port HTTP port for the JSON-RPC service.
|
--rpc-port HTTP port for the JSON-RPC service.
|
||||||
--rpc-address Listening address of the RPC server.
|
--rpc-address Listening address of the RPC server.
|
||||||
|
--in-process-validators Disable the push model (the beacon node tells a signing process with the private
|
||||||
|
keys of the validators what to sign and when) and load the validators in the
|
||||||
|
beacon node itself.
|
||||||
--dump Write SSZ dumps of blocks, attestations and states to data dir.
|
--dump Write SSZ dumps of blocks, attestations and states to data dir.
|
||||||
|
|
||||||
Available sub-commands:
|
Available sub-commands:
|
||||||
|
|
|
@ -184,7 +184,7 @@ else
|
||||||
fi
|
fi
|
||||||
|
|
||||||
NETWORK_NIM_FLAGS=$(scripts/load-testnet-nim-flags.sh "${NETWORK}")
|
NETWORK_NIM_FLAGS=$(scripts/load-testnet-nim-flags.sh "${NETWORK}")
|
||||||
$MAKE -j2 LOG_LEVEL="${LOG_LEVEL}" NIMFLAGS="${NIMFLAGS} -d:insecure -d:testnet_servers_image -d:local_testnet ${NETWORK_NIM_FLAGS}" beacon_node validator_client deposit_contract
|
$MAKE -j2 LOG_LEVEL="${LOG_LEVEL}" NIMFLAGS="${NIMFLAGS} -d:insecure -d:testnet_servers_image -d:local_testnet ${NETWORK_NIM_FLAGS}" beacon_node signing_process validator_client deposit_contract
|
||||||
if [[ "$ENABLE_LOGTRACE" == "1" ]]; then
|
if [[ "$ENABLE_LOGTRACE" == "1" ]]; then
|
||||||
$MAKE LOG_LEVEL="${LOG_LEVEL}" NIMFLAGS="${NIMFLAGS} -d:insecure -d:testnet_servers_image -d:local_testnet ${NETWORK_NIM_FLAGS}" logtrace
|
$MAKE LOG_LEVEL="${LOG_LEVEL}" NIMFLAGS="${NIMFLAGS} -d:insecure -d:testnet_servers_image -d:local_testnet ${NETWORK_NIM_FLAGS}" logtrace
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -66,7 +66,7 @@ if [ "$ETH1_PRIVATE_KEY" != "" ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Building a local beacon_node instance for 'deposits create' and 'createTestnet'"
|
echo "Building a local beacon_node instance for 'deposits create' and 'createTestnet'"
|
||||||
make -j2 NIMFLAGS="-d:insecure -d:testnet_servers_image ${NETWORK_NIM_FLAGS}" beacon_node process_dashboard
|
make -j2 NIMFLAGS="-d:insecure -d:testnet_servers_image ${NETWORK_NIM_FLAGS}" beacon_node signing_process process_dashboard
|
||||||
|
|
||||||
echo "Generating Grafana dashboards for remote testnet servers"
|
echo "Generating Grafana dashboards for remote testnet servers"
|
||||||
for testnet in 0 1; do
|
for testnet in 0 1; do
|
||||||
|
|
|
@ -106,7 +106,7 @@ if [[ "$USE_PROMETHEUS" == "yes" ]]; then
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
$MAKE -j2 --no-print-directory NIMFLAGS="$CUSTOM_NIMFLAGS $DEFS" LOG_LEVEL="${LOG_LEVEL:-DEBUG}" beacon_node validator_client
|
$MAKE -j2 --no-print-directory NIMFLAGS="$CUSTOM_NIMFLAGS $DEFS" LOG_LEVEL="${LOG_LEVEL:-DEBUG}" beacon_node signing_process validator_client
|
||||||
|
|
||||||
EXISTING_VALIDATORS=0
|
EXISTING_VALIDATORS=0
|
||||||
if [[ -f "$DEPOSITS_FILE" ]]; then
|
if [[ -f "$DEPOSITS_FILE" ]]; then
|
||||||
|
|
Loading…
Reference in New Issue