Merge branch 'devel'

This commit is contained in:
Jacek Sieka 2020-11-11 13:27:26 +01:00
commit ef00ff271d
No known key found for this signature in database
GPG Key ID: A1B09461ABB656B8
27 changed files with 112 additions and 113 deletions

View File

@ -293,28 +293,30 @@ endef
###
### medalla
###
medalla-build: | nimbus_beacon_node_spec_0_12_3 nimbus_signing_process
# https://www.gnu.org/software/make/manual/html_node/Call-Function.html#Call-Function
medalla: | nimbus_beacon_node_spec_0_12_3 nimbus_signing_process
medalla: | medalla-build
$(call CONNECT_TO_NETWORK,medalla,nimbus_beacon_node_spec_0_12_3)
medalla-vc: | nimbus_beacon_node_spec_0_12_3 nimbus_signing_process nimbus_validator_client
medalla-vc: | medalla-build nimbus_validator_client
$(call CONNECT_TO_NETWORK_WITH_VALIDATOR_CLIENT,medalla,nimbus_beacon_node_spec_0_12_3)
medalla-fast-sync: | nimbus_beacon_node_spec_0_12_3 nimbus_signing_process
medalla-fast-sync: | medalla-build
$(call CONNECT_TO_NETWORK,medalla,nimbus_beacon_node_spec_0_12_3,FastSync)
ifneq ($(LOG_LEVEL), TRACE)
medalla-dev:
+ "$(MAKE)" LOG_LEVEL=TRACE $@
else
medalla-dev: | nimbus_beacon_node_spec_0_12_3 nimbus_signing_process
medalla-dev: | medalla-build
$(call CONNECT_TO_NETWORK_IN_DEV_MODE,medalla,nimbus_beacon_node_spec_0_12_3)
endif
medalla-deposit-data: | nimbus_beacon_node_spec_0_12_3 nimbus_signing_process deposit_contract
medalla-deposit-data: | medalla-build deposit_contract
$(call MAKE_DEPOSIT_DATA,medalla)
medalla-deposit: | nimbus_beacon_node_spec_0_12_3 nimbus_signing_process deposit_contract
medalla-deposit: | medalla-build deposit_contract
$(call MAKE_DEPOSIT,medalla)
clean-medalla:
@ -323,18 +325,20 @@ clean-medalla:
###
### toledo
###
toledo-build: | nimbus_beacon_node nimbus_signing_process
# https://www.gnu.org/software/make/manual/html_node/Call-Function.html#Call-Function
toledo: | nimbus_beacon_node nimbus_signing_process
toledo: | toledo-build
$(call CONNECT_TO_NETWORK,toledo,nimbus_beacon_node)
toledo-vc: | nimbus_beacon_node nimbus_signing_process nimbus_validator_client
toledo-vc: | toledo-build nimbus_validator_client
$(call CONNECT_TO_NETWORK_WITH_VALIDATOR_CLIENT,toledo,nimbus_beacon_node)
ifneq ($(LOG_LEVEL), TRACE)
toledo-dev:
+ "$(MAKE)" LOG_LEVEL=TRACE $@
else
toledo-dev: | nimbus_beacon_node nimbus_signing_process
toledo-dev: | toledo-build
$(call CONNECT_TO_NETWORK_IN_DEV_MODE,toledo,nimbus_beacon_node)
endif

View File

@ -129,8 +129,8 @@ func subkey(root: Eth2Digest, slot: Slot): array[40, byte] =
ret
template panic =
# TODO: Could we recover from a corrupted database?
# Review all usages.
# TODO(zah): Could we recover from a corrupted database?
# Review all usages.
raiseAssert "The database should not be corrupted"
proc init*[T](Seq: type DbSeq[T], db: SqStoreRef, name: string): Seq =
@ -232,15 +232,16 @@ proc init*(T: type BeaconChainDB,
inMemory = false): BeaconChainDB =
if inMemory:
# TODO
# The inMemory store shuold offer the complete functionality
# of the database-backed one (i.e. tracking of deposits and validators)
# To support testing, the inMemory store should offer the complete
# functionalityof the database-backed one (i.e. tracking of deposits
# and validators)
T(backend: kvStore MemStoreRef.init(),
preset: preset,
finalizedEth1DepositsMerkleizer: init DepositsMerkleizer,
finalizedEth2DepositsMerkleizer: init DepositsMerkleizer)
else:
let s = secureCreatePath(dir)
doAssert s.isOk # TODO Handle this in a better way
doAssert s.isOk # TODO(zah) Handle this in a better way
let sqliteStore = SqStoreRef.init(dir, "nbc", Keyspaces).expect(
"working database")

View File

@ -110,10 +110,6 @@ type
heads*: seq[BlockRef] ##\
## Candidate heads of candidate chains
head*: BlockRef ##\
## The latest block we know about, that's been chosen as a head by the fork
## choice rule
finalizedHead*: BlockSlot ##\
## The latest block that was finalized according to the block in head
## Ancestors of this block are guaranteed to have 1 child only.
@ -122,14 +118,18 @@ type
# Rewinder - Mutable state processing
headState*: StateData ##\
## State given by the head block; only update in `updateHead`, not anywhere
## else via `withState`
## State given by the head block - must only be updated in `updateHead` -
## always matches dag.head
tmpState*: StateData ## Scratchpad - may be any state
epochRefState*: StateData ##\
## State used to produce epochRef instances - must only be used in
## `getEpochRef`
clearanceState*: StateData ##\
## Cached state used during block clearance - should only be used in the
## clearance module to avoid the risk of modifying it in a callback
## Cached state used during block clearance - must only be used in
## clearance module
tmpState*: StateData ## Scratchpad - may be any state
updateFlags*: UpdateFlags
@ -202,6 +202,8 @@ type
template validator_keys*(e: EpochRef): untyped = e.validator_key_store[1][]
template head*(v: ChainDagRef): BlockRef = v.headState.blck
func shortLog*(v: BlockSlot): string =
if v.blck.slot == v.slot:
&"{v.blck.root.data.toOpenArray(0, 3).toHex()}:{v.blck.slot}"

View File

@ -389,13 +389,13 @@ proc init*(T: type ChainDAGRef,
let res = ChainDAGRef(
blocks: blocks,
tail: tailRef,
head: headRef,
genesis: genesisRef,
db: db,
heads: @[headRef],
headState: tmpState[],
tmpState: tmpState[],
epochRefState: tmpState[],
clearanceState: tmpState[],
tmpState: tmpState[],
# The only allowed flag right now is verifyFinalization, as the others all
# allow skipping some validation.
@ -446,7 +446,7 @@ proc getEpochRef*(dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): EpochRef =
let
ancestor = blck.epochAncestor(epoch)
dag.withState(dag.tmpState, ancestor):
dag.withState(dag.epochRefState, ancestor):
let
prevEpochRef = if dag.tail.slot.epoch >= epoch: nil
else: blck.findEpochRef(epoch - 1)
@ -702,6 +702,11 @@ proc updateStateData*(
found = true
break
if canAdvance(dag.epochRefState, cur):
assign(state, dag.epochRefState)
found = true
break
if cur.slot == cur.blck.slot:
# This is not an empty slot, so the block will need to be applied to
# eventually reach bs
@ -812,8 +817,6 @@ proc updateHead*(
updateStateData(
dag, dag.headState, newHead.atSlot(newHead.slot), false, cache)
dag.head = newHead
if not lastHead.isAncestorOf(newHead):
notice "Updated head block with chain reorg",
lastHead = shortLog(lastHead),
@ -961,7 +964,7 @@ proc preInit*(
proc setTailState*(dag: ChainDAGRef,
checkpointState: BeaconState,
checkpointBlock: SignedBeaconBlock) =
# TODO
# TODO(zah)
# Delete all records up to the tail node. If the tail node is not
# in the database, init the dabase in a way similar to `preInit`.
discard

View File

@ -315,7 +315,8 @@ proc getBlockProposalData*(m: Eth1Monitor,
var pendingDepositsCount = state.eth1_data.deposit_count -
state.eth1_deposit_index
# TODO To make block proposal cheaper, we can perform this action more regularly
# TODO(zah)
# To make block proposal cheaper, we can perform this action more regularly
# (e.g. in BeaconNode.onSlot). But keep in mind that this action needs to be
# performed only when there are validators attached to the node.
let ourDepositsCount = m.db.deposits.len
@ -573,7 +574,7 @@ proc syncBlockRange(m: Eth1Monitor, fromBlock, toBlock: Eth1BlockNumber) {.async
let eth1Blocks = depositEventsToBlocks(depositLogs)
for i in 0 ..< eth1Blocks.len:
# TODO: The DB operations should be executed as a transaction here
# TODO(zah): The DB operations should be executed as a transaction here
let blk = eth1Blocks[i]
for deposit in blk.deposits:

View File

@ -68,16 +68,6 @@ proc loadBootstrapFile*(bootstrapFile: string,
except IOError as e:
error "Could not read bootstrap file", msg = e.msg
quit 1
elif cmpIgnoreCase(ext, ".yaml") == 0:
# TODO. This is very ugly, but let's try to negotiate the
# removal of YAML metadata.
try:
for ln in strippedLines(bootstrapFile):
addBootstrapNode(string(ln.strip()[3..^2]), bootstrapEnrs)
except IOError as e:
error "Could not read bootstrap file", msg = e.msg
quit 1
else:
error "Unknown bootstrap file format", ext
quit 1

View File

@ -57,7 +57,6 @@ type
peerId*: PeerID
stamp*: chronos.Moment
# TODO Is this really needed?
Eth2Node* = ref object of RootObj
switch*: Switch
pubsub*: PubSub
@ -323,8 +322,8 @@ proc getPeer*(node: Eth2Node, peerId: PeerID): Peer =
return node.peers.mGetOrPut(peerId, peer)
proc peerFromStream(network: Eth2Node, conn: Connection): Peer =
# TODO: Can this be `nil`?
return network.getPeer(conn.peerInfo.peerId)
result = network.getPeer(conn.peerInfo.peerId)
result.info = conn.peerInfo
proc getKey*(peer: Peer): PeerID {.inline.} =
peer.info.peerId
@ -427,7 +426,7 @@ proc addSeen*(network: ETh2Node, peerId: PeerID,
proc disconnect*(peer: Peer, reason: DisconnectionReason,
notifyOtherPeer = false) {.async.} =
# TODO: How should we notify the other peer?
# TODO(zah): How should we notify the other peer?
try:
if peer.connectionState notin {Disconnecting, Disconnected}:
peer.connectionState = Disconnecting
@ -655,11 +654,6 @@ proc handleIncomingStream(network: Eth2Node,
let peer = peerFromStream(network, conn)
try:
# TODO peer connection setup is broken, update info in some better place
# whenever race is fix:
# https://github.com/status-im/nimbus-eth2/issues/1157
peer.info = conn.peerInfo
template returnInvalidRequest(msg: ErrorMsg) =
peer.updateScore(PeerScoreInvalidRequest)
await sendErrorResponse(peer, conn, InvalidRequest, msg)
@ -676,7 +670,7 @@ proc handleIncomingStream(network: Eth2Node,
fs
else:
# TODO The TTFB timeout is not implemented in LibP2P streams back-end
# TODO(zah) The TTFB timeout is not implemented in LibP2P streams back-end
conn
let deadline = sleepAsync RESP_TIMEOUT
@ -807,7 +801,7 @@ proc dialPeer*(node: Eth2Node, peerAddr: PeerAddr, index = 0) {.async.} =
deadline.cancel()
inc nbc_successful_dials
else:
# TODO: As soon as `nim-libp2p` will be able to handle cancellation
# TODO(cheatfate): As soon as `nim-libp2p` will be able to handle cancellation
# properly and will have cancellation tests, we need add here cancellation
# of `workfut`.
# workfut.cancel()
@ -1448,7 +1442,7 @@ proc addValidator*[MsgType](node: Eth2Node,
if decompressed.len > 0:
return msgValidator(SSZ.decode(decompressed, MsgType))
else:
# TODO penalize peer?
# TODO(zah) penalize peer?
debug "Failed to decompress gossip payload"
except CatchableError as err:
debug "Gossip validation error",

View File

@ -19,8 +19,6 @@ import
../../beacon_chain/spec/[datatypes, digest],
../../beacon_chain/ssz/merkleization
# TODO All tests need to be moved to the test suite.
const depositContractLimit* = Limit(1'u64 shl DEPOSIT_CONTRACT_TREE_DEPTH)
func attachMerkleProofs*(deposits: var openArray[Deposit]) =

View File

@ -13,7 +13,7 @@ import
# binary). It makes sense to keep the file small and separated from the rest
# of the module in order go gain maximum efficiency in incremental compilation.
#
# TODO:
# TODO(zah):
# We can compress the embedded states with snappy before embedding them here.
{.push raises: [Defect].}
@ -151,7 +151,7 @@ const
# that there are no constant overrides
eth1Network: some mainnet,
runtimePreset: mainnetRuntimePreset,
# TODO Add bootstrap nodes for mainnet
# TODO(zah) Add bootstrap nodes for mainnet
bootstrapNodes: @[],
depositContractAddress: Eth1Address.fromHex "0x00000000219ab540356cBB839Cbe05303d7705Fa",
depositContractDeployedAt: "11052984",

View File

@ -204,7 +204,7 @@ proc init*(T: type BeaconNode,
error "Failed to initialize database", err = e.msg
quit 1
# TODO check that genesis given on command line (if any) matches database
# TODO(zah) check that genesis given on command line (if any) matches database
let
chainDagFlags = if conf.verifyFinalization: {verifyFinalization}
else: {}
@ -234,8 +234,8 @@ proc init*(T: type BeaconNode,
conf.web3Url.len > 0 and
conf.depositContractAddress.isSome and
conf.depositContractDeployedAt.isSome:
# TODO if we don't have any validators attached,
# we don't need a mainchain monitor
# TODO(zah) if we don't have any validators attached,
# we don't need a mainchain monitor
eth1Monitor = await startEth1Monitor(db, eth1Network, conf)
let rpcServer = if conf.rpcEnabled:

View File

@ -33,7 +33,6 @@ proc parsePubkey(str: string): ValidatorPubKey =
raise newException(CatchableError, "Not a valid public key")
return pubkeyRes[]
# TODO currently this function throws if the validator isn't found - is this OK?
proc getValidatorInfoFromValidatorId(
state: BeaconState,
current_epoch: Epoch,

View File

@ -17,10 +17,10 @@ template unimplemented() =
proc installNodeApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
rpcServer.rpc("get_v1_node_identity") do () -> NodeIdentityTuple:
# TODO rest of fields
return (
peer_id: node.network.peerId(),
enr: node.network.enrRecord(),
# TODO rest of fields
p2p_addresses: newSeq[MultiAddress](0),
discovery_addresses: newSeq[MultiAddress](0),
metadata: (0'u64, "")

View File

@ -163,6 +163,6 @@ const
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.3/configs/mainnet/phase0.yaml#L52
# Ethereum PoW Mainnet
# TODO These violate the spec (this is a temporary change to allow `make medalla` to work)
# TODO(zah) These violate the spec (this is a temporary change to allow `make medalla` to work)
DEPOSIT_CHAIN_ID* = 5
DEPOSIT_NETWORK_ID* = 5

View File

@ -171,6 +171,6 @@ const
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/configs/mainnet/phase0.yaml#L51
# Ethereum PoW Mainnet
# TODO These violate the spec (this is a temporary change to allow `make medalla` to work)
# TODO(zah) These violate the spec (this is a temporary change to allow `make medalla` to work)
DEPOSIT_CHAIN_ID* = 5
DEPOSIT_NETWORK_ID* = 5

View File

@ -159,12 +159,9 @@ func readSszValue*[T](input: openArray[byte],
ex.elementSize = elemSize
raise ex
val.setOutputSize input.len div elemSize
trs "READING LIST WITH LEN ", val.len
for i in 0 ..< val.len:
trs "TRYING TO READ LIST ELEM ", i
let offset = i * elemSize
readSszValue(input.toOpenArray(offset, offset + elemSize - 1), val[i])
trs "LIST READING COMPLETE"
else:
if input.len == 0:
@ -175,10 +172,7 @@ func readSszValue*[T](input: openArray[byte],
raise newException(MalformedSszError, "SSZ input of insufficient size")
var offset = readOffset 0
trs "GOT OFFSET ", offset
let resultLen = offset div offsetSize
trs "LEN ", resultLen
if resultLen == 0:
# If there are too many elements, other constraints detect problems
@ -197,11 +191,8 @@ func readSszValue*[T](input: openArray[byte],
readSszValue(input.toOpenArray(offset, input.len - 1), val[resultLen - 1])
# TODO: Should be possible to remove BitArray from here
elif val is UintN|bool:
trs "READING BASIC TYPE ", typetraits.name(T), " input=", input.len
val = fromSszBytes(T, input)
trs "RESULT WAS ", repr(val)
elif val is BitArray:
if sizeof(val) != input.len:
@ -218,7 +209,6 @@ func readSszValue*[T](input: openArray[byte],
enumInstanceSerializedFields(val, fieldName, field):
const boundingOffsets = getFieldBoundingOffsets(T, fieldName)
trs "BOUNDING OFFSET FOR FIELD ", fieldName, " = ", boundingOffsets
# type FieldType = type field # buggy
# For some reason, Nim gets confused about the alias here. This could be a
@ -233,7 +223,6 @@ func readSszValue*[T](input: openArray[byte],
const
startOffset = boundingOffsets[0]
endOffset = boundingOffsets[1]
trs "FIXED FIELD ", startOffset, "-", endOffset
else:
let
startOffset = readOffsetUnchecked(boundingOffsets[0])
@ -244,7 +233,6 @@ func readSszValue*[T](input: openArray[byte],
if startOffset != minimallyExpectedSize:
raise newException(MalformedSszError, "SSZ object dynamic portion starts at invalid offset")
trs "VAR FIELD ", startOffset, "-", endOffset
if startOffset > endOffset:
raise newException(MalformedSszError, "SSZ field offsets are not monotonically increasing")
elif endOffset > inputLen:
@ -254,16 +242,10 @@ func readSszValue*[T](input: openArray[byte],
# TODO The extra type escaping here is a work-around for a Nim issue:
when type(field) is type(SszType):
trs "READING NATIVE ", fieldName, ": ", name(SszType)
# TODO passing in `FieldType` instead of `type(field)` triggers a
# bug in the compiler
readSszValue(
input.toOpenArray(int(startOffset), int(endOffset - 1)),
field)
trs "READING COMPLETE ", fieldName
else:
trs "READING FOREIGN ", fieldName, ": ", name(SszType)
field = fromSszBytes(
type(field),
input.toOpenArray(int(startOffset), int(endOffset - 1)))

View File

@ -462,8 +462,6 @@ func chunkedHashTreeRootForBasicTypes[T](merkleizer: var SszMerkleizerImpl,
func bitListHashTreeRoot(merkleizer: var SszMerkleizerImpl, x: BitSeq): Eth2Digest =
# TODO: Switch to a simpler BitList representation and
# replace this with `chunkedHashTreeRoot`
trs "CHUNKIFYING BIT SEQ WITH TOP INDEX ", merkleizer.topIndex
var
totalBytes = bytes(x).len
lastCorrectedByte = bytes(x)[^1]

View File

@ -233,6 +233,6 @@ proc readValue*[T](r: var SszReader, val: var T) {.raises: [Defect, MalformedSsz
else:
raise newException(MalformedSszError, "SSZ input of insufficient size")
else:
# TODO Read the fixed portion first and precisely measure the size of
# the dynamic portion to consume the right number of bytes.
# TODO(zah) Read the fixed portion first and precisely measure the
# size of the dynamic portion to consume the right number of bytes.
readSszValue(r.stream.read(r.stream.len.get), val, r.updateRoot)

View File

@ -987,13 +987,35 @@ proc getWorkersStats[A, B](man: SyncManager[A, B]): tuple[map: string,
map[i] = ch
(map, sleeping, waiting, pending)
proc guardTask[A, B](man: SyncManager[A, B]) {.async.} =
var pending: array[SyncWorkersCount, Future[void]]
# Starting all the synchronization workers.
for i in 0 ..< len(man.workers):
let future = syncWorker[A, B](man, i)
man.workers[i].future = future
pending[i] = future
# Wait for synchronization worker's failure and replace it with new one.
while true:
let failFuture = await one(pending)
let index = pending.find(failFuture)
if failFuture.failed():
warn "Synchronization worker stopped working unexpectedly with an error",
index = index, errMsg = failFuture.error.msg
else:
warn "Synchronization worker stopped working unexpectedly without error",
index = index
let future = syncWorker[A, B](man, index)
man.workers[index].future = future
pending[index] = future
proc syncLoop[A, B](man: SyncManager[A, B]) {.async.} =
mixin getKey, getScore
var pauseTime = 0
# Starting all sync workers
for i in 0 ..< len(man.workers):
man.workers[i].future = syncWorker[A, B](man, i)
asyncSpawn man.guardTask()
debug "Synchronization loop started", topics = "syncman"

View File

@ -99,7 +99,8 @@ p2pProtocol BeaconSync(version = 1,
# makes the incoming flag unreliable / obsolete by the time we get to
# this point - instead of making assumptions, we'll just send a status
# message redundantly.
# TODO the spec does not prohibit sending the extra status message on
# TODO(zah)
# the spec does not prohibit sending the extra status message on
# incoming connections, but it should not be necessary - this would
# need a dedicated flow in libp2p that resolves the race conditions -
# this needs more thinking around the ordering of events and the

View File

@ -35,15 +35,6 @@ declareCounter beacon_blocks_proposed,
logScope: topics = "beacval"
# # TODO: This procedure follows insecure scheme of creating directory without
# # any permissions and writing file without any permissions.
# proc saveValidatorKey*(keyName, key: string, conf: BeaconNodeConf) =
# let validatorsDir = conf.validatorsDir
# let outputFile = validatorsDir / keyName
# createDir validatorsDir
# writeFile(outputFile, key)
# notice "Imported validator key", file = outputFile
proc checkValidatorInRegistry(state: BeaconState,
pubKey: ValidatorPubKey) =
let idx = state.validators.asSeq.findIt(it.pubKey == pubKey)

View File

@ -13,20 +13,21 @@
# Volume 2
- [Command line options](./options.md)
- [Troubleshooting](./troubleshooting.md)
- [Useful pre-genesis checks]()
- [Nimbus binaries](./binaries.md)
- [Metrics and pretty pictures](./metrics-pretty-pictures.md)
- [Network stats + monitoring](./eth2-stats.md)
- [Run your node on a Raspberry Pi](./pi-guide.md)
- [Graffiti the blockchain](./graffiti.md)
- [Your own Infura endpoint](infura-guide.md)
- [Run just the beacon node]()
- [Switch between clients]()
- [Your own Infura endpoint](infura-guide.md)
- [Useful pre-genesis checks]()
# Volume 3
- [Advanced options]()
- [Windows users]()
- [API](./api.md)
- [For developers](./developers.md)
- [Set up a systemd service](./beacon-node-systemd.md)
- [Windows users]()
- [FAQ](./faq.md)
- [Contribute](./contribute.md)
- [Resources](./resources.md)

View File

@ -0,0 +1,17 @@
# Nimbus binaries
Nimbus binaries exist for Nimbus -- initially `x86 64-bit`, but Windows, MacOS and additional Linux binaries will be added shortly.
You can find the latest release --`hope`-- here: [https://github.com/status-im/nimbus-eth2/releases/tag/v0.6.0](https://github.com/status-im/nimbus-eth2/releases/tag/v0.6.0)
Scroll to the bottom and click on `Assets`. You should see the following assets appear.
![](https://i.imgur.com/4FBhUpk.png)
Click on the first option: `nimbus-eth2_Linux_amd64_0.6.0_64838720.tar.gz`
And follow the instructions [here](https://github.com/status-im/nimbus-eth2/blob/master/docker/dist/README.md#running-a-medalla-node).
We've designed this binary to be reproducible: in practice, this means that anyone who wishes to can verify that no vulnerabilities or backdoors have been introduced during the compilation process. For more on the philosophy and importance of reproducible builds [see here](https://reproducible-builds.org/).
For instructions on how to reproduce the build, [see here](https://reproducible-builds.org/).

View File

@ -2,7 +2,7 @@
*Todo*
```
make NODE_PARAMS="--graffiti='<YOUR_GRAFFITI>'" medalla
./run-medalla-beacon-node.sh --graffiti="<YOUR_GRAFFITI>"
```

View File

@ -7,7 +7,7 @@
To import your signing key(s) into Nimbus, from the `nimbus-eth2` directory run:
```
build/nimbus_beacon_node deposits import --data-dir=build/data/medalla <YOUR VALIDATOR KEYS DIRECTORY>
build/nimbus_beacon_node_spec_0_12_3 deposits import --data-dir=build/data/shared_medalla_0 <YOUR VALIDATOR KEYS DIRECTORY>
```
@ -21,7 +21,7 @@ To import your signing key(s) into Nimbus, from the `nimbus-eth2` directory run:
## Storage
When you import your keys into Nimbus, your validator signing key(s) are stored in the `build/data/medalla/` folder, under `secrets` and `validators` - **make sure you keep these folders backed up somewhere safe.**
When you import your keys into Nimbus, your validator signing key(s) are stored in the `build/data/shared_medalla_0/` folder, under `secrets` and `validators` - **make sure you keep these folders backed up somewhere safe.**
The `secrets` folder contains the common secret that gives you access to all your validator keys.

View File

@ -275,7 +275,7 @@ As usual, replace `195.177.101.93` with your Pi's IP address, and `<VALIDATOR_KE
To import your signing key into Nimbus, from the `nimbus-eth2` directory run:
```
build/nimbus_beacon_node deposits import --data-dir=build/data/medalla ../validator_keys
build/nimbus_beacon_node_spec_0_3 deposits import --data-dir=build/data/shared_medalla_0 ../validator_keys
```

View File

@ -21,12 +21,6 @@ import
from ../../beacon_chain/spec/beaconstate import process_registry_updates
# XXX: move to state_transition_epoch?
# TODO: parsing SSZ
# can overwrite the calling function stack
# https://github.com/status-im/nimbus-eth2/issues/369
#
# We store the state on the heap to avoid that
template runSuite(suiteDir, testName: string, transitionProc: untyped{ident}, useCache: static bool): untyped =
# We wrap the tests in a proc to avoid running out of globals
# in the future: Nim supports up to 3500 globals
@ -39,6 +33,7 @@ template runSuite(suiteDir, testName: string, transitionProc: untyped{ident}, us
let unitTestName = testDir.rsplit(DirSep, 1)[1]
timedTest testName & " - " & unitTestName & preset():
# BeaconState objects are stored on the heap to avoid stack overflow
var preState = newClone(parseTest(testDir/"pre.ssz", SSZ, BeaconState))
let postState = newClone(parseTest(testDir/"post.ssz", SSZ, BeaconState))

@ -1 +1 @@
Subproject commit 5f939fe1c91df330b2ea660fe35c5856e4de84c6
Subproject commit dbe50949196c2b259cc4971773bd6f7de9ddda99