Merge pull request #1259 from status-im/devel

merge "devel" into "master"
This commit is contained in:
Ștefan Talpalaru 2020-06-30 00:45:04 +02:00 committed by GitHub
commit 4a2e180653
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
130 changed files with 3721 additions and 2906 deletions

14
.editorconfig Normal file
View File

@ -0,0 +1,14 @@
root = true
[*.nim]
indent_style = space
indent_size = 2
[*.sh]
indent_style = space
indent_size = 2
[Makefile]
ident_size = 2
ident_style = tab

25
.gitmodules vendored
View File

@ -153,3 +153,28 @@
url = https://github.com/status-im/nim-rocksdb.git
ignore = dirty
branch = master
[submodule "vendor/asynctools"]
path = vendor/asynctools
url = https://github.com/cheatfate/asynctools.git
ignore = dirty
branch = master
[submodule "vendor/karax"]
path = vendor/karax
url = https://github.com/pragmagic/karax.git
ignore = dirty
branch = master
[submodule "vendor/jswebsockets"]
path = vendor/jswebsockets
url = https://github.com/stisa/jswebsockets.git
ignore = dirty
branch = master
[submodule "vendor/websocket.nim"]
path = vendor/websocket.nim
url = https://github.com/niv/websocket.nim.git
ignore = dirty
branch = master
[submodule "vendor/nim-chronicles-tail"]
path = vendor/nim-chronicles-tail
url = https://github.com/status-im/nim-chronicles-tail.git
ignore = dirty
branch = master

1
.nvmrc Normal file
View File

@ -0,0 +1 @@
v13.12.0

View File

@ -1,268 +0,0 @@
AllTests-minimal
===
## Attestation pool processing [Preset: minimal]
```diff
+ Attestations may arrive in any order [Preset: minimal] OK
+ Attestations may overlap, bigger first [Preset: minimal] OK
+ Attestations may overlap, smaller first [Preset: minimal] OK
+ Attestations should be combined [Preset: minimal] OK
+ Can add and retrieve simple attestation [Preset: minimal] OK
+ Fork choice returns block with attestation OK
+ Fork choice returns latest block with no attestations OK
```
OK: 7/7 Fail: 0/7 Skip: 0/7
## Beacon chain DB [Preset: minimal]
```diff
+ empty database [Preset: minimal] OK
+ find ancestors [Preset: minimal] OK
+ sanity check blocks [Preset: minimal] OK
+ sanity check genesis roundtrip [Preset: minimal] OK
+ sanity check states [Preset: minimal] OK
```
OK: 5/5 Fail: 0/5 Skip: 0/5
## Beacon node
```diff
+ Compile OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Beacon state [Preset: minimal]
```diff
+ Smoke test initialize_beacon_state_from_eth1 [Preset: minimal] OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Block pool processing [Preset: minimal]
```diff
+ Can add same block twice [Preset: minimal] OK
+ Reverse order block add & get [Preset: minimal] OK
+ Simple block add&get [Preset: minimal] OK
+ getRef returns nil for missing blocks OK
+ loadTailState gets genesis block on first load [Preset: minimal] OK
+ updateHead updates head and headState [Preset: minimal] OK
+ updateStateData sanity [Preset: minimal] OK
```
OK: 7/7 Fail: 0/7 Skip: 0/7
## Block processing [Preset: minimal]
```diff
+ Attestation gets processed at epoch [Preset: minimal] OK
+ Passes from genesis state, empty block [Preset: minimal] OK
+ Passes from genesis state, no block [Preset: minimal] OK
+ Passes through epoch update, empty block [Preset: minimal] OK
+ Passes through epoch update, no block [Preset: minimal] OK
```
OK: 5/5 Fail: 0/5 Skip: 0/5
## BlockPool finalization tests [Preset: minimal]
```diff
+ init with gaps [Preset: minimal] OK
+ prune heads on finalization [Preset: minimal] OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## BlockRef and helpers [Preset: minimal]
```diff
+ getAncestorAt sanity [Preset: minimal] OK
+ isAncestorOf sanity [Preset: minimal] OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## BlockSlot and helpers [Preset: minimal]
```diff
+ atSlot sanity [Preset: minimal] OK
+ parent sanity [Preset: minimal] OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## Fork Choice + Finality [Preset: minimal]
```diff
+ fork_choice - testing finality #01 OK
+ fork_choice - testing finality #02 OK
+ fork_choice - testing no votes OK
+ fork_choice - testing with votes OK
```
OK: 4/4 Fail: 0/4 Skip: 0/4
## Honest validator
```diff
+ General pubsub topics: OK
+ Mainnet attestation topics OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## Interop
```diff
+ Interop genesis OK
+ Interop signatures OK
+ Mocked start private key OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
## Keystore
```diff
+ Pbkdf2 decryption OK
+ Pbkdf2 encryption OK
+ Pbkdf2 errors OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
## Mocking utilities
```diff
+ merkle_minimal OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Official - constants & config [Preset: minimal]
```diff
+ BASE_REWARD_FACTOR 64 [Preset: minimal] OK
+ BLS_WITHDRAWAL_PREFIX "0x00" [Preset: minimal] OK
+ CHURN_LIMIT_QUOTIENT 65536 [Preset: minimal] OK
+ CUSTODY_PERIOD_TO_RANDAO_PADDING 2048 [Preset: minimal] OK
DEPOSIT_CONTRACT_ADDRESS "0x1234567890123456789012345678901234567 Skip
+ DOMAIN_AGGREGATE_AND_PROOF "0x06000000" [Preset: minimal] OK
+ DOMAIN_BEACON_ATTESTER "0x01000000" [Preset: minimal] OK
+ DOMAIN_BEACON_PROPOSER "0x00000000" [Preset: minimal] OK
+ DOMAIN_CUSTODY_BIT_SLASHING "0x83000000" [Preset: minimal] OK
+ DOMAIN_DEPOSIT "0x03000000" [Preset: minimal] OK
+ DOMAIN_LIGHT_CLIENT "0x82000000" [Preset: minimal] OK
+ DOMAIN_RANDAO "0x02000000" [Preset: minimal] OK
+ DOMAIN_SELECTION_PROOF "0x05000000" [Preset: minimal] OK
+ DOMAIN_SHARD_COMMITTEE "0x81000000" [Preset: minimal] OK
+ DOMAIN_SHARD_PROPOSAL "0x80000000" [Preset: minimal] OK
+ DOMAIN_VOLUNTARY_EXIT "0x04000000" [Preset: minimal] OK
+ EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS 4096 [Preset: minimal] OK
+ EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE 2 [Preset: minimal] OK
+ EFFECTIVE_BALANCE_INCREMENT 1000000000 [Preset: minimal] OK
+ EJECTION_BALANCE 16000000000 [Preset: minimal] OK
+ EPOCHS_PER_CUSTODY_PERIOD 2048 [Preset: minimal] OK
+ EPOCHS_PER_ETH1_VOTING_PERIOD 4 [Preset: minimal] OK
+ EPOCHS_PER_HISTORICAL_VECTOR 64 [Preset: minimal] OK
+ EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION 256 [Preset: minimal] OK
+ EPOCHS_PER_SLASHINGS_VECTOR 64 [Preset: minimal] OK
+ ETH1_FOLLOW_DISTANCE 16 [Preset: minimal] OK
+ GASPRICE_ADJUSTMENT_COEFFICIENT 8 [Preset: minimal] OK
+ GENESIS_DELAY 300 [Preset: minimal] OK
GENESIS_FORK_VERSION "0x00000001" [Preset: minimal] Skip
+ HISTORICAL_ROOTS_LIMIT 16777216 [Preset: minimal] OK
+ HYSTERESIS_DOWNWARD_MULTIPLIER 1 [Preset: minimal] OK
+ HYSTERESIS_QUOTIENT 4 [Preset: minimal] OK
+ HYSTERESIS_UPWARD_MULTIPLIER 5 [Preset: minimal] OK
+ INACTIVITY_PENALTY_QUOTIENT 16777216 [Preset: minimal] OK
+ INITIAL_ACTIVE_SHARDS 4 [Preset: minimal] OK
+ LIGHT_CLIENT_COMMITTEE_PERIOD 256 [Preset: minimal] OK
+ LIGHT_CLIENT_COMMITTEE_SIZE 128 [Preset: minimal] OK
+ MAX_ATTESTATIONS 128 [Preset: minimal] OK
+ MAX_ATTESTER_SLASHINGS 2 [Preset: minimal] OK
+ MAX_COMMITTEES_PER_SLOT 4 [Preset: minimal] OK
+ MAX_CUSTODY_KEY_REVEALS 256 [Preset: minimal] OK
+ MAX_CUSTODY_SLASHINGS 1 [Preset: minimal] OK
+ MAX_DEPOSITS 16 [Preset: minimal] OK
+ MAX_EARLY_DERIVED_SECRET_REVEALS 1 [Preset: minimal] OK
+ MAX_EFFECTIVE_BALANCE 32000000000 [Preset: minimal] OK
+ MAX_EPOCHS_PER_CROSSLINK 4 [Preset: minimal] OK
+ MAX_GASPRICE 16384 [Preset: minimal] OK
+ MAX_PROPOSER_SLASHINGS 16 [Preset: minimal] OK
+ MAX_REVEAL_LATENESS_DECREMENT 128 [Preset: minimal] OK
+ MAX_SEED_LOOKAHEAD 4 [Preset: minimal] OK
+ MAX_SHARDS 8 [Preset: minimal] OK
+ MAX_SHARD_BLOCKS_PER_ATTESTATION 12 [Preset: minimal] OK
+ MAX_SHARD_BLOCK_CHUNKS 4 [Preset: minimal] OK
+ MAX_VALIDATORS_PER_COMMITTEE 2048 [Preset: minimal] OK
+ MAX_VOLUNTARY_EXITS 16 [Preset: minimal] OK
+ MINOR_REWARD_QUOTIENT 256 [Preset: minimal] OK
+ MIN_ATTESTATION_INCLUSION_DELAY 1 [Preset: minimal] OK
+ MIN_DEPOSIT_AMOUNT 1000000000 [Preset: minimal] OK
+ MIN_EPOCHS_TO_INACTIVITY_PENALTY 4 [Preset: minimal] OK
+ MIN_GASPRICE 8 [Preset: minimal] OK
+ MIN_GENESIS_ACTIVE_VALIDATOR_COUNT 64 [Preset: minimal] OK
+ MIN_GENESIS_TIME 1578009600 [Preset: minimal] OK
+ MIN_PER_EPOCH_CHURN_LIMIT 4 [Preset: minimal] OK
+ MIN_SEED_LOOKAHEAD 1 [Preset: minimal] OK
+ MIN_SLASHING_PENALTY_QUOTIENT 32 [Preset: minimal] OK
+ MIN_VALIDATOR_WITHDRAWABILITY_DELAY 256 [Preset: minimal] OK
+ ONLINE_PERIOD 8 [Preset: minimal] OK
+ PHASE_1_FORK_VERSION "0x01000001" [Preset: minimal] OK
+ PHASE_1_GENESIS_SLOT 8 [Preset: minimal] OK
+ PROPOSER_REWARD_QUOTIENT 8 [Preset: minimal] OK
+ RANDAO_PENALTY_EPOCHS 2 [Preset: minimal] OK
+ RANDOM_SUBNETS_PER_VALIDATOR 1 [Preset: minimal] OK
+ SAFE_SLOTS_TO_UPDATE_JUSTIFIED 2 [Preset: minimal] OK
+ SECONDS_PER_ETH1_BLOCK 14 [Preset: minimal] OK
+ SECONDS_PER_SLOT 6 [Preset: minimal] OK
+ SHARD_BLOCK_CHUNK_SIZE 262144 [Preset: minimal] OK
SHARD_BLOCK_OFFSETS [1,2,3,5,8,13,21,34,55,89,144,233] [Pres Skip
+ SHARD_COMMITTEE_PERIOD 64 [Preset: minimal] OK
+ SHUFFLE_ROUND_COUNT 10 [Preset: minimal] OK
+ SLOTS_PER_EPOCH 8 [Preset: minimal] OK
+ SLOTS_PER_HISTORICAL_ROOT 64 [Preset: minimal] OK
+ TARGET_AGGREGATORS_PER_COMMITTEE 16 [Preset: minimal] OK
+ TARGET_COMMITTEE_SIZE 4 [Preset: minimal] OK
+ TARGET_SHARD_BLOCK_SIZE 196608 [Preset: minimal] OK
+ VALIDATOR_REGISTRY_LIMIT 1099511627776 [Preset: minimal] OK
+ WHISTLEBLOWER_REWARD_QUOTIENT 512 [Preset: minimal] OK
```
OK: 83/86 Fail: 0/86 Skip: 3/86
## PeerPool testing suite
```diff
+ Access peers by key test OK
+ Acquire from empty pool OK
+ Acquire/Sorting and consistency test OK
+ Iterators test OK
+ Peer lifetime test OK
+ Safe/Clear test OK
+ Score check test OK
+ addPeer() test OK
+ addPeerNoWait() test OK
+ deletePeer() test OK
```
OK: 10/10 Fail: 0/10 Skip: 0/10
## SSZ dynamic navigator
```diff
+ navigating fields OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## SSZ navigator
```diff
+ basictype OK
+ lists with max size OK
+ simple object fields OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
## Spec helpers
```diff
+ integer_squareroot OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Sync protocol
```diff
+ Compile OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Zero signature sanity checks
```diff
+ SSZ serialization roundtrip of SignedBeaconBlockHeader OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## [Unit - Spec - Block processing] Attestations [Preset: minimal]
```diff
+ Valid attestation OK
+ Valid attestation from previous epoch OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## [Unit - Spec - Block processing] Deposits [Preset: minimal]
```diff
+ Deposit at MAX_EFFECTIVE_BALANCE balance (32 ETH) OK
+ Deposit over MAX_EFFECTIVE_BALANCE balance (32 ETH) OK
+ Deposit under MAX_EFFECTIVE_BALANCE balance (32 ETH) OK
+ Validator top-up OK
```
OK: 4/4 Fail: 0/4 Skip: 0/4
## [Unit - Spec - Epoch processing] Justification and Finalization [Preset: minimal]
```diff
+ Rule I - 234 finalization with enough support OK
+ Rule I - 234 finalization without support OK
+ Rule II - 23 finalization with enough support OK
+ Rule II - 23 finalization without support OK
+ Rule III - 123 finalization with enough support OK
+ Rule III - 123 finalization without support OK
+ Rule IV - 12 finalization with enough support OK
+ Rule IV - 12 finalization without support OK
```
OK: 8/8 Fail: 0/8 Skip: 0/8
## hash
```diff
+ HashArray OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
---TOTAL---
OK: 160/163 Fail: 0/163 Skip: 3/163

View File

@ -1,183 +0,0 @@
FixtureAll-minimal
===
## Official - Epoch Processing - Final updates [Preset: minimal]
```diff
+ Final updates - effective_balance_hysteresis [Preset: minimal] OK
+ Final updates - eth1_vote_no_reset [Preset: minimal] OK
+ Final updates - eth1_vote_reset [Preset: minimal] OK
+ Final updates - historical_root_accumulator [Preset: minimal] OK
```
OK: 4/4 Fail: 0/4 Skip: 0/4
## Official - Epoch Processing - Justification & Finalization [Preset: minimal]
```diff
+ Justification & Finalization - 123_ok_support [Preset: minimal] OK
+ Justification & Finalization - 123_poor_support [Preset: minimal] OK
+ Justification & Finalization - 12_ok_support [Preset: minimal] OK
+ Justification & Finalization - 12_ok_support_messed_target [Preset: minimal] OK
+ Justification & Finalization - 12_poor_support [Preset: minimal] OK
+ Justification & Finalization - 234_ok_support [Preset: minimal] OK
+ Justification & Finalization - 234_poor_support [Preset: minimal] OK
+ Justification & Finalization - 23_ok_support [Preset: minimal] OK
+ Justification & Finalization - 23_poor_support [Preset: minimal] OK
```
OK: 9/9 Fail: 0/9 Skip: 0/9
## Official - Epoch Processing - Registry updates [Preset: minimal]
```diff
+ Registry updates - activation_queue_activation_and_ejection [Preset: minimal] OK
+ Registry updates - activation_queue_efficiency [Preset: minimal] OK
+ Registry updates - activation_queue_no_activation_no_finality [Preset: minimal] OK
+ Registry updates - activation_queue_sorting [Preset: minimal] OK
+ Registry updates - activation_queue_to_activated_if_finalized [Preset: minimal] OK
+ Registry updates - add_to_activation_queue [Preset: minimal] OK
+ Registry updates - ejection [Preset: minimal] OK
+ Registry updates - ejection_past_churn_limit [Preset: minimal] OK
```
OK: 8/8 Fail: 0/8 Skip: 0/8
## Official - Epoch Processing - Slashings [Preset: minimal]
```diff
+ Slashings - max_penalties [Preset: minimal] OK
+ Slashings - scaled_penalties [Preset: minimal] OK
+ Slashings - small_penalty [Preset: minimal] OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
## Official - Operations - Attestations [Preset: minimal]
```diff
+ [Invalid] after_epoch_slots OK
+ [Invalid] bad_source_root OK
+ [Invalid] before_inclusion_delay OK
+ [Invalid] empty_participants_seemingly_valid_sig OK
+ [Invalid] empty_participants_zeroes_sig OK
+ [Invalid] future_target_epoch OK
+ [Invalid] invalid_attestation_signature OK
+ [Invalid] invalid_current_source_root OK
+ [Invalid] invalid_index OK
+ [Invalid] mismatched_target_and_slot OK
+ [Invalid] new_source_epoch OK
+ [Invalid] old_source_epoch OK
+ [Invalid] old_target_epoch OK
+ [Invalid] source_root_is_target_root OK
+ [Invalid] too_few_aggregation_bits OK
+ [Invalid] too_many_aggregation_bits OK
+ [Invalid] wrong_index_for_committee_signature OK
+ [Invalid] wrong_index_for_slot OK
+ [Valid] success OK
+ [Valid] success_multi_proposer_index_iterations OK
+ [Valid] success_previous_epoch OK
```
OK: 21/21 Fail: 0/21 Skip: 0/21
## Official - Operations - Attester slashing [Preset: minimal]
```diff
+ [Invalid] att1_bad_extra_index OK
+ [Invalid] att1_bad_replaced_index OK
+ [Invalid] att1_duplicate_index_double_signed OK
+ [Invalid] att1_duplicate_index_normal_signed OK
+ [Invalid] att2_bad_extra_index OK
+ [Invalid] att2_bad_replaced_index OK
+ [Invalid] att2_duplicate_index_double_signed OK
+ [Invalid] att2_duplicate_index_normal_signed OK
+ [Invalid] invalid_sig_1 OK
+ [Invalid] invalid_sig_1_and_2 OK
+ [Invalid] invalid_sig_2 OK
+ [Invalid] no_double_or_surround OK
+ [Invalid] participants_already_slashed OK
+ [Invalid] same_data OK
+ [Invalid] unsorted_att_1 OK
+ [Invalid] unsorted_att_2 OK
+ [Valid] success_already_exited_long_ago OK
+ [Valid] success_already_exited_recent OK
+ [Valid] success_double OK
+ [Valid] success_surround OK
```
OK: 20/20 Fail: 0/20 Skip: 0/20
## Official - Operations - Block header [Preset: minimal]
```diff
+ [Invalid] invalid_multiple_blocks_single_slot OK
+ [Invalid] invalid_parent_root OK
+ [Invalid] invalid_proposer_index OK
+ [Invalid] invalid_slot_block_header OK
+ [Invalid] proposer_slashed OK
+ [Valid] success_block_header OK
```
OK: 6/6 Fail: 0/6 Skip: 0/6
## Official - Operations - Deposits [Preset: minimal]
```diff
+ [Invalid] bad_merkle_proof OK
+ [Invalid] wrong_deposit_for_deposit_count OK
+ [Valid] invalid_sig_new_deposit OK
+ [Valid] invalid_sig_other_version OK
+ [Valid] invalid_sig_top_up OK
+ [Valid] invalid_withdrawal_credentials_top_up OK
+ [Valid] new_deposit_max OK
+ [Valid] new_deposit_over_max OK
+ [Valid] new_deposit_under_max OK
+ [Valid] success_top_up OK
+ [Valid] valid_sig_but_forked_state OK
```
OK: 11/11 Fail: 0/11 Skip: 0/11
## Official - Operations - Proposer slashing [Preset: minimal]
```diff
+ [Invalid] identifier OK
+ [Valid] identifier OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## Official - Operations - Voluntary exit [Preset: minimal]
```diff
+ [Invalid] invalid_signature OK
+ [Invalid] validator_already_exited OK
+ [Invalid] validator_exit_in_future OK
+ [Invalid] validator_invalid_validator_index OK
+ [Invalid] validator_not_active OK
+ [Invalid] validator_not_active_long_enough OK
+ [Valid] default_exit_epoch_subsequent_exit OK
+ [Valid] success OK
+ [Valid] success_exit_queue OK
```
OK: 9/9 Fail: 0/9 Skip: 0/9
## Official - Sanity - Blocks [Preset: minimal]
```diff
+ [Invalid] double_same_proposer_slashings_same_block OK
+ [Invalid] double_similar_proposer_slashings_same_block OK
+ [Invalid] double_validator_exit_same_block OK
+ [Invalid] duplicate_attester_slashing OK
+ [Invalid] expected_deposit_in_block OK
+ [Invalid] invalid_block_sig OK
+ [Invalid] invalid_proposer_index_sig_from_expected_proposer OK
+ [Invalid] invalid_proposer_index_sig_from_proposer_index OK
+ [Invalid] invalid_state_root OK
+ [Invalid] parent_from_same_slot OK
+ [Invalid] prev_slot_block_transition OK
+ [Invalid] proposal_for_genesis_slot OK
+ [Invalid] same_slot_block_transition OK
+ [Invalid] zero_block_sig OK
+ [Valid] attestation OK
+ [Valid] attester_slashing OK
+ [Valid] balance_driven_status_transitions OK
+ [Valid] deposit_in_block OK
+ [Valid] deposit_top_up OK
+ [Valid] empty_block_transition OK
+ [Valid] empty_epoch_transition OK
+ [Valid] empty_epoch_transition_not_finalizing OK
+ [Valid] high_proposer_index OK
+ [Valid] historical_batch OK
+ [Valid] multiple_attester_slashings_no_overlap OK
+ [Valid] multiple_attester_slashings_partial_overlap OK
+ [Valid] multiple_different_proposer_slashings_same_block OK
+ [Valid] multiple_different_validator_exits_same_block OK
+ [Valid] proposer_after_inactive_index OK
+ [Valid] proposer_slashing OK
+ [Valid] skipped_slots OK
+ [Valid] voluntary_exit OK
```
OK: 32/32 Fail: 0/32 Skip: 0/32
## Official - Sanity - Slots [Preset: minimal]
```diff
+ Slots - double_empty_epoch OK
+ Slots - empty_epoch OK
+ Slots - over_epoch_boundary OK
+ Slots - slots_1 OK
+ Slots - slots_2 OK
```
OK: 5/5 Fail: 0/5 Skip: 0/5
---TOTAL---
OK: 130/130 Fail: 0/130 Skip: 0/130

View File

@ -1,36 +0,0 @@
FixtureSSZConsensus-minimal
===
## Official - SSZ consensus objects [Preset: minimal]
```diff
+ Testing AggregateAndProof OK
+ Testing Attestation OK
+ Testing AttestationData OK
+ Testing AttesterSlashing OK
+ Testing BeaconBlock OK
+ Testing BeaconBlockBody OK
+ Testing BeaconBlockHeader OK
+ Testing BeaconState OK
+ Testing Checkpoint OK
+ Testing Deposit OK
+ Testing DepositData OK
+ Testing DepositMessage OK
+ Testing Eth1Block OK
+ Testing Eth1Data OK
+ Testing Fork OK
+ Testing ForkData OK
+ Testing HistoricalBatch OK
+ Testing IndexedAttestation OK
+ Testing PendingAttestation OK
+ Testing ProposerSlashing OK
+ Testing SignedAggregateAndProof OK
+ Testing SignedBeaconBlock OK
+ Testing SignedBeaconBlockHeader OK
+ Testing SignedVoluntaryExit OK
+ Testing SigningData OK
+ Testing Validator OK
+ Testing VoluntaryExit OK
```
OK: 27/27 Fail: 0/27 Skip: 0/27
---TOTAL---
OK: 27/27 Fail: 0/27 Skip: 0/27

View File

@ -1,21 +0,0 @@
FixtureSSZGeneric-minimal
===
## Official - SSZ generic types
```diff
Testing basic_vector inputs - invalid - skipping Vector[uint128, N] and Vector[uint256, N] Skip
+ Testing basic_vector inputs - valid - skipping Vector[uint128, N] and Vector[uint256, N] OK
+ Testing bitlist inputs - invalid OK
+ Testing bitlist inputs - valid OK
Testing bitvector inputs - invalid Skip
+ Testing bitvector inputs - valid OK
+ Testing boolean inputs - invalid OK
+ Testing boolean inputs - valid OK
+ Testing containers inputs - invalid - skipping BitsStruct OK
+ Testing containers inputs - valid - skipping BitsStruct OK
+ Testing uints inputs - invalid - skipping uint128 and uint256 OK
+ Testing uints inputs - valid - skipping uint128 and uint256 OK
```
OK: 10/12 Fail: 0/12 Skip: 2/12
---TOTAL---
OK: 10/12 Fail: 0/12 Skip: 2/12

View File

@ -15,23 +15,25 @@ BUILD_SYSTEM_DIR := vendor/nimbus-build-system
# unconditionally built by the default Make target
TOOLS := \
validator_client \
beacon_node \
block_sim \
deposit_contract \
inspector \
logtrace \
deposit_contract \
nbench \
nbench_spec_scenarios \
ncli_db \
ncli_hash_tree_root \
ncli_pretty \
ncli_query \
ncli_transition \
ncli_db \
process_dashboard \
stack_sizes \
state_sim \
block_sim \
nbench \
nbench_spec_scenarios
# bench_bls_sig_agggregation TODO reenable after bls v0.10.1 changes
validator_client
# bench_bls_sig_agggregation TODO reenable after bls v0.10.1 changes
TOOLS_DIRS := \
beacon_chain \
benchmarks \
@ -47,7 +49,7 @@ TOOLS_CSV := $(subst $(SPACE),$(COMMA),$(TOOLS))
update \
test \
$(TOOLS) \
clean_eth2_network_simulation_files \
clean_eth2_network_simulation_all \
eth2_network_simulation \
clean-testnet0 \
testnet0 \
@ -55,12 +57,6 @@ TOOLS_CSV := $(subst $(SPACE),$(COMMA),$(TOOLS))
testnet1 \
clean \
libbacktrace \
clean-schlesi \
schlesi \
schlesi-dev \
clean-witti \
witti \
witti-dev \
book \
publish-book
@ -99,7 +95,7 @@ else
NIM_PARAMS := $(NIM_PARAMS) -d:release
endif
deps: | deps-common beacon_chain.nims
deps: | deps-common nat-libs beacon_chain.nims
ifneq ($(USE_LIBBACKTRACE), 0)
deps: | libbacktrace
endif
@ -131,11 +127,14 @@ $(TOOLS): | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim c -o:build/$@ $(NIM_PARAMS) "$${TOOL_DIR}/$@.nim"
clean_eth2_network_simulation_files:
clean_eth2_network_simulation_data:
rm -rf tests/simulation/data
clean_eth2_network_simulation_all:
rm -rf tests/simulation/{data,validators}
eth2_network_simulation: | build deps clean_eth2_network_simulation_files
+ GIT_ROOT="$$PWD" NIMFLAGS="$(NIMFLAGS)" LOG_LEVEL="$(LOG_LEVEL)" tests/simulation/start.sh
eth2_network_simulation: | build deps clean_eth2_network_simulation_data
+ GIT_ROOT="$$PWD" NIMFLAGS="$(NIMFLAGS)" LOG_LEVEL="$(LOG_LEVEL)" tests/simulation/start-in-tmux.sh
clean-testnet0:
rm -rf build/data/testnet0*
@ -147,25 +146,24 @@ clean-testnet1:
# - try SCRIPT_PARAMS="--skipGoerliKey"
testnet0 testnet1: | build deps
source scripts/$@.env; \
NIM_PARAMS="$(subst ",\",$(NIM_PARAMS))" LOG_LEVEL="$(LOG_LEVEL)" $(ENV_SCRIPT) nim $(NIM_PARAMS) scripts/connect_to_testnet.nims $(SCRIPT_PARAMS) --const-preset=$$CONST_PRESET --dev-build $@
NIM_PARAMS="$(NIM_PARAMS)" LOG_LEVEL="$(LOG_LEVEL)" $(ENV_SCRIPT) nim $(NIM_PARAMS) scripts/connect_to_testnet.nims $(SCRIPT_PARAMS) --const-preset=$$CONST_PRESET --dev-build $@
clean-schlesi:
rm -rf build/data/shared_schlesi*
clean-altona:
rm -rf build/data/shared_altona*
schlesi: | build deps
NIM_PARAMS="$(subst ",\",$(NIM_PARAMS))" LOG_LEVEL="$(LOG_LEVEL)" $(ENV_SCRIPT) nim $(NIM_PARAMS) scripts/connect_to_testnet.nims $(SCRIPT_PARAMS) shared/schlesi
altona: | build deps
NIM_PARAMS="$(NIM_PARAMS)" LOG_LEVEL="$(LOG_LEVEL)" $(ENV_SCRIPT) nim $(NIM_PARAMS) scripts/connect_to_testnet.nims $(SCRIPT_PARAMS) shared/altona
schlesi-dev: | build deps
NIM_PARAMS="$(subst ",\",$(NIM_PARAMS))" LOG_LEVEL="DEBUG; TRACE:discv5,networking; REQUIRED:none; DISABLED:none" $(ENV_SCRIPT) nim $(NIM_PARAMS) scripts/connect_to_testnet.nims $(SCRIPT_PARAMS) shared/schlesi
altona-dev: | build deps
NIM_PARAMS="$(NIM_PARAMS)" LOG_LEVEL="DEBUG; TRACE:discv5,networking; REQUIRED:none; DISABLED:none" $(ENV_SCRIPT) nim $(NIM_PARAMS) scripts/connect_to_testnet.nims $(SCRIPT_PARAMS) shared/altona
clean-witti:
rm -rf build/data/shared_witti*
ctail: | build deps
mkdir -p vendor/.nimble/bin/
$(ENV_SCRIPT) nim -d:danger -o:vendor/.nimble/bin/ctail c vendor/nim-chronicles-tail/ctail.nim
witti: | build deps
NIM_PARAMS="$(subst ",\",$(NIM_PARAMS))" LOG_LEVEL="$(LOG_LEVEL)" $(ENV_SCRIPT) nim $(NIM_PARAMS) scripts/connect_to_testnet.nims $(SCRIPT_PARAMS) shared/witti
witti-dev: | build deps
NIM_PARAMS="$(subst ",\",$(NIM_PARAMS))" LOG_LEVEL="DEBUG; TRACE:discv5,networking; REQUIRED:none; DISABLED:none" $(ENV_SCRIPT) nim $(NIM_PARAMS) scripts/connect_to_testnet.nims $(SCRIPT_PARAMS) shared/witti
ntu: | build deps
mkdir -p vendor/.nimble/bin/
$(ENV_SCRIPT) nim -d:danger -o:vendor/.nimble/bin/ntu c vendor/nim-testutils/ntu.nim
clean: | clean-common
rm -rf build/{$(TOOLS_CSV),all_tests,*_node,*ssz*,beacon_node_*,block_sim,state_sim,transition*}

View File

@ -1,4 +1,5 @@
# Nimbus Eth2 (Beacon Chain)
[![Build Status (Travis)](https://img.shields.io/travis/status-im/nim-beacon-chain/master.svg?label=Linux%20/%20macOS "Linux/macOS build status (Travis)")](https://travis-ci.org/status-im/nim-beacon-chain)
[![Build Status (Azure)](https://dev.azure.com/nimbus-dev/nim-beacon-chain/_apis/build/status/status-im.nim-beacon-chain?branchName=master)](https://dev.azure.com/nimbus-dev/nim-beacon-chain/_build/latest?definitionId=3&branchName=master)
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
@ -6,46 +7,52 @@
![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)
[![Discord: Nimbus](https://img.shields.io/badge/discord-nimbus-orange.svg)](https://discord.gg/XRxWahP)
[![Gitter: #status-im/nimbus](https://img.shields.io/badge/gitter-status--im%2Fnimbus-orange.svg)](https://gitter.im/status-im/nimbus)
[![Status: #nimbus-general](https://img.shields.io/badge/status-nimbus--general-orange.svg)](https://join.status.im/nimbus-general)
Welcome to Nimbus for Ethereum 2.0.
Nimbus beacon chain is a research implementation of the beacon chain component of the upcoming Ethereum Serenity upgrade, aka Eth2.
## Related
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Documentation](#documentation)
- [Related](#related)
- [Prerequisites for everyone](#prerequisites-for-everyone)
- [Linux](#linux)
- [MacOS](#macos)
- [Windows](#windows)
- [Android](#android)
- [For users](#for-users)
- [Connecting to testnets](#connecting-to-testnets)
- [Getting metrics from a local testnet client](#getting-metrics-from-a-local-testnet-client)
- [Interop (for other Eth2 clients)](#interop-for-other-eth2-clients)
- [For researchers](#for-researchers)
- [State transition simulation](#state-transition-simulation)
- [Local network simulation](#local-network-simulation)
- [Visualising simulation metrics](#visualising-simulation-metrics)
- [Network inspection](#network-inspection)
- [For developers](#for-developers)
- [Windows dev environment](#windows-dev-environment)
- [Linux, MacOS](#linux-macos)
- [Raspberry Pi](#raspberry-pi)
- [Makefile tips and tricks for developers](#makefile-tips-and-tricks-for-developers)
- [CI setup](#ci-setup)
- [License](#license)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Documentation
You can find complete information about running a beacon node and operating as a validator in [The Book](https://status-im.github.io/nim-beacon-chain/).
## Related projects
* [status-im/nimbus](https://github.com/status-im/nimbus/): Nimbus for Ethereum 1
* [ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs/tree/v0.12.1#phase-0): Serenity specification that this project implements
You can check where the beacon chain fits in the Ethereum ecosystem our Two-Point-Oh series: https://our.status.im/tag/two-point-oh/
## Table of Contents
- [Nimbus Eth2 (Beacon Chain)](#nimbus-eth2-beacon-chain)
- [Related](#related)
- [Table of Contents](#table-of-contents)
- [Prerequisites for everyone](#prerequisites-for-everyone)
- [Linux](#linux)
- [MacOS](#macos)
- [Windows](#windows)
- [For users](#for-users)
- [Connecting to testnets](#connecting-to-testnets)
- [Getting metrics from a local testnet client](#getting-metrics-from-a-local-testnet-client)
- [Interop (for other Eth2 clients)](#interop-for-other-eth2-clients)
- [For researchers](#for-researchers)
- [State transition simulation](#state-transition-simulation)
- [Local network simulation](#local-network-simulation)
- [Visualising simulation metrics](#visualising-simulation-metrics)
- [Network inspection](#network-inspection)
- [For developers](#for-developers)
- [Windows dev environment](#windows-dev-environment)
- [Linux, MacOS](#linux-macos)
- [Raspberry Pi](#raspberry-pi)
- [Makefile tips and tricks for developers](#makefile-tips-and-tricks-for-developers)
- [CI setup](#ci-setup)
- [License](#license)
## Prerequisites for everyone
At the moment, Nimbus has to be built from source.
@ -191,6 +198,21 @@ make VALIDATORS=192 NODES=6 USER_NODES=1 eth2_network_simulation
# looks like from a single nodes' perspective.
```
By default, all validators are loaded within the beacon nodes, but if you want to use
external processes as validator clients you can pass `BN_VC_VALIDATOR_SPLIT=yes` as an
additional argument to the `make eth2_network_simulation` command and that will split
the `VALIDATORS` between beacon nodes and validator clients - for example with `192`
validators and `6` nodes you will end up with 6 beacon node and 6 validator client
processes, where each of them will handle 16 validators.
By default, the simulation will start from a pre-generated genesis state. If you wish to
simulate the bootstrap process with a Ethereum 1.0 validator deposit contract, start the
simulation with `WAIT_GENESIS=yes`
```
make eth2_network_simulation WAIT_GENESIS=yes
```
You can also separate the output from each beacon node in its own panel, using [multitail](http://www.vanheusden.com/multitail/):
```bash
@ -296,7 +318,9 @@ After cloning the repo:
```bash
# The first `make` invocation will update all Git submodules.
# You'll run `make update` after each `git pull`, in the future, to keep those submodules up to date.
make
# Build beacon_node and all the tools, using 4 parallel Make jobs
make -j4
# Run tests
make test
@ -411,4 +435,4 @@ or
* Apache License, Version 2.0, ([LICENSE-APACHEv2](LICENSE-APACHEv2) or http://www.apache.org/licenses/LICENSE-2.0)
at your option. These files may not be copied, modified, or distributed except according to those terms.
at your option. These files may not be copied, modified, or distributed except according to those terms.

View File

@ -1,7 +1,7 @@
jobs:
- job: Windows
timeoutInMinutes: 80
timeoutInMinutes: 90
pool:
vmImage: windows-latest

View File

@ -49,7 +49,7 @@ proc buildAndRunBinary(name: string, srcDir = "./", params = "", cmdParams = "",
task moduleTests, "Run all module tests":
buildAndRunBinary "beacon_node", "beacon_chain/",
"-d:chronicles_log_level=TRACE " &
"-d:const_preset=minimal -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\" " &
"-d:const_preset=minimal " &
"-d:testutils_test_build"
### tasks
@ -58,27 +58,23 @@ task test, "Run all tests":
# pieces of code get tested regularly. Increased test output verbosity is the
# price we pay for that.
# Just the part of minimal config which explicitly differs from mainnet
buildAndRunBinary "test_fixture_const_sanity_check", "tests/official/", "-d:const_preset=minimal"
# Mainnet config
buildAndRunBinary "proto_array", "beacon_chain/fork_choice/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildAndRunBinary "fork_choice", "beacon_chain/fork_choice/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildAndRunBinary "all_tests", "tests/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildAndRunBinary "proto_array", "beacon_chain/fork_choice/", "-d:const_preset=mainnet"
buildAndRunBinary "fork_choice", "beacon_chain/fork_choice/", "-d:const_preset=mainnet"
buildAndRunBinary "all_tests", "tests/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet"
# Generic SSZ test, doesn't use consensus objects minimal/mainnet presets
buildAndRunBinary "test_fixture_ssz_generic_types", "tests/official/", "-d:chronicles_log_level=TRACE"
# Consensus object SSZ tests
# 0.11.3
buildAndRunBinary "test_fixture_ssz_consensus_objects", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:ETH2_SPEC=\"v0.11.3\""
buildAndRunBinary "test_fixture_ssz_consensus_objects", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet"
# 0.12.1
buildAndRunBinary "test_fixture_ssz_consensus_objects", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
# 0.11.3
buildAndRunBinary "all_fixtures_require_ssz", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:ETH2_SPEC=\"v0.11.3\""
# 0.12.1
buildAndRunBinary "all_fixtures_require_ssz", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
# State sim; getting into 4th epoch useful to trigger consensus checks
buildAndRunBinary "state_sim", "research/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\"", "--validators=2000 --slots=128"
buildAndRunBinary "all_fixtures_require_ssz", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet"
# State and block sims; getting to 4th epoch triggers consensus checks
buildAndRunBinary "state_sim", "research/", "-d:const_preset=mainnet", "--validators=2000 --slots=128"
buildAndRunBinary "block_sim", "research/", "-d:const_preset=mainnet", "--validators=2000 --slots=128"

View File

@ -10,8 +10,10 @@
import
options, chronicles,
./spec/[
beaconstate, datatypes, crypto, digest, helpers, validator, signatures],
./block_pool, ./attestation_pool, ./beacon_node_types, ./ssz
beaconstate, datatypes, crypto, digest, helpers, network, validator,
signatures],
./block_pool, ./block_pools/candidate_chains, ./attestation_pool,
./beacon_node_types, ./ssz
logScope:
topics = "att_aggr"
@ -73,22 +75,14 @@ proc aggregate_attestations*(
none(AggregateAndProof)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#attestation-subnets
proc isValidAttestation*(
pool: AttestationPool, attestation: Attestation, current_slot: Slot,
pool: var AttestationPool, attestation: Attestation, current_slot: Slot,
topicCommitteeIndex: uint64): bool =
logScope:
topics = "att_aggr valid_att"
received_attestation = shortLog(attestation)
# The attestation's committee index (attestation.data.index) is for the
# correct subnet.
if attestation.data.index != topicCommitteeIndex:
debug "attestation's committee index not for the correct subnet",
topicCommitteeIndex = topicCommitteeIndex
return false
if not (attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >=
current_slot and current_slot >= attestation.data.slot):
debug "attestation.data.slot not within ATTESTATION_PROPAGATION_SLOT_RANGE"
@ -133,19 +127,36 @@ proc isValidAttestation*(
# TODO: consider a "slush pool" of attestations whose blocks have not yet
# propagated - i.e. imagine that attestations are smaller than blocks and
# therefore propagate faster, thus reordering their arrival in some nodes
if pool.blockPool.get(attestation.data.beacon_block_root).isNone():
let attestationBlck = pool.blockPool.getRef(attestation.data.beacon_block_root)
if attestationBlck.isNil:
debug "block doesn't exist in block pool"
pool.blockPool.addMissing(attestation.data.beacon_block_root)
return false
# The signature of attestation is valid.
# TODO need to know above which validator anyway, and this is too general
# as it supports aggregated attestations (which this can't be)
var cache = get_empty_per_epoch_cache()
if not is_valid_indexed_attestation(
pool.blockPool.headState.data.data,
get_indexed_attestation(
pool.blockPool.headState.data.data, attestation, cache), {}):
debug "signature verification failed"
return false
pool.blockPool.withState(
pool.blockPool.tmpState,
BlockSlot(blck: attestationBlck, slot: attestation.data.slot)):
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#attestation-subnets
# [REJECT] The attestation is for the correct subnet (i.e.
# compute_subnet_for_attestation(state, attestation) == subnet_id).
let
epochInfo = blck.getEpochInfo(state)
requiredSubnetIndex =
compute_subnet_for_attestation(
epochInfo.shuffled_active_validator_indices.len.uint64, attestation)
if requiredSubnetIndex != topicCommitteeIndex:
debug "isValidAttestation: attestation's committee index not for the correct subnet",
topicCommitteeIndex = topicCommitteeIndex,
attestation_data_index = attestation.data.index,
requiredSubnetIndex = requiredSubnetIndex
return false
# The signature of attestation is valid.
var cache = getEpochCache(blck, state)
if not is_valid_indexed_attestation(
state, get_indexed_attestation(state, attestation, cache), {}):
debug "signature verification failed"
return false
true

View File

@ -61,7 +61,7 @@ proc slotIndex(
doAssert attestationSlot >= pool.startingSlot,
"""
We should have checked in validate that attestation is newer than
We should have checked in addResolved that attestation is newer than
finalized_slot and we never prune things before that, per below condition!
""" &
", attestationSlot: " & $shortLog(attestationSlot) &
@ -145,6 +145,16 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
attestation = shortLog(attestation),
blockSlot = shortLog(blck.slot)
return
if attestation.data.slot < pool.startingSlot:
# It can happen that attestations in blocks for example are included even
# though they no longer are relevant for finalization - let's clear
# these out
debug "Old attestation",
attestation = shortLog(attestation),
startingSlot = pool.startingSlot
return
# if not isValidAttestationSlot(attestation.data.slot, blck.slot):
# # Logging in isValidAttestationSlot
# return
@ -161,9 +171,7 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
# on the state and those that don't to cheaply
# discard invalid attestations before rewinding state.
# TODO: stateCache usage
var stateCache = get_empty_per_epoch_cache()
if not isValidAttestationTargetEpoch(state, attestation):
if not isValidAttestationTargetEpoch(state, attestation.data):
notice "Invalid attestation",
attestation = shortLog(attestation),
current_epoch = get_current_epoch(state),

View File

@ -4,8 +4,8 @@ import
typetraits, stew/[results, objects, endians2],
serialization, chronicles, snappy,
eth/db/kvstore,
./spec/[datatypes, digest, crypto],
./ssz/[ssz_serialization, merkleization], ./state_transition
./spec/[datatypes, digest, crypto, state_transition],
./ssz/[ssz_serialization, merkleization]
type
BeaconChainDB* = ref object
@ -94,24 +94,31 @@ proc get(db: BeaconChainDB, key: openArray[byte], T: type Eth2Digest): Opt[T] =
res
proc get(db: BeaconChainDB, key: openArray[byte], T: typedesc): Opt[T] =
var res: Opt[T]
proc get(db: BeaconChainDB, key: openArray[byte], res: var auto): bool =
var found = false
# TODO address is needed because there's no way to express lifetimes in nim
# we'll use unsafeAddr to find the code later
var resPtr = unsafeAddr res # callback is local, ptr wont escape
proc decode(data: openArray[byte]) =
try:
res.ok SSZ.decode(snappy.decode(data), T)
resPtr[] = SSZ.decode(snappy.decode(data), type res)
found = true
except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?",
err = e.msg, typ = name(T), dataLen = data.len
err = e.msg, typ = name(type res), dataLen = data.len
discard
discard db.backend.get(key, decode).expect("working database")
res
found
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: SignedBeaconBlock) =
db.put(subkey(type value, key), value)
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: TrustedSignedBeaconBlock) =
db.put(subkey(SignedBeaconBlock, key), value)
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
# TODO prune old states - this is less easy than it seems as we never know
@ -126,7 +133,9 @@ proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
value: Eth2Digest) =
db.put(subkey(root, slot), value)
proc putBlock*(db: BeaconChainDB, value: SignedBeaconBlock) =
proc putBlock*(db: BeaconChainDB, value: SomeSignedBeaconBlock) =
# TODO this should perhaps be a TrustedSignedBeaconBlock, but there's no
# trivial way to coerce one type into the other, as it stands..
db.putBlock(hash_tree_root(value.message), value)
proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
@ -145,8 +154,11 @@ proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.put(subkey(kTailBlock), key)
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[SignedBeaconBlock] =
db.get(subkey(SignedBeaconBlock, key), SignedBeaconBlock)
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
# We only store blocks that we trust in the database
result.ok(TrustedSignedBeaconBlock())
if not db.get(subkey(SignedBeaconBlock, key), result.get):
result.err()
proc getState*(
db: BeaconChainDB, key: Eth2Digest, output: var BeaconState,
@ -159,20 +171,11 @@ proc getState*(
# https://github.com/nim-lang/Nim/issues/14126
# TODO RVO is inefficient for large objects:
# https://github.com/nim-lang/Nim/issues/13879
# TODO address is needed because there's no way to express lifetimes in nim
# we'll use unsafeAddr to find the code later
let outputAddr = unsafeAddr output # callback is local
proc decode(data: openArray[byte]) =
try:
# TODO can't write to output directly..
assign(outputAddr[], SSZ.decode(snappy.decode(data), BeaconState))
except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?", err = e.msg
rollback(outputAddr[])
db.backend.get(subkey(BeaconState, key), decode).expect("working database")
if not db.get(subkey(BeaconState, key), output):
rollback(output)
false
else:
true
proc getStateRoot*(db: BeaconChainDB,
root: Eth2Digest,
@ -192,14 +195,14 @@ proc containsState*(db: BeaconChainDB, key: Eth2Digest): bool =
db.backend.contains(subkey(BeaconState, key)).expect("working database")
iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
tuple[root: Eth2Digest, blck: SignedBeaconBlock] =
tuple[root: Eth2Digest, blck: TrustedSignedBeaconBlock] =
## Load a chain of ancestors for blck - returns a list of blocks with the
## oldest block last (blck will be at result[0]).
##
## The search will go on until the ancestor cannot be found.
var root = root
while (let blck = db.getBlock(root); blck.isOk()):
yield (root, blck.get())
root = blck.get().message.parent_root
var res: tuple[root: Eth2Digest, blck: TrustedSignedBeaconBlock]
res.root = root
while db.get(subkey(SignedBeaconBlock, res.root), res.blck):
yield res
res.root = res.blck.message.parent_root

View File

@ -7,7 +7,7 @@
import
# Standard library
os, tables, random, strutils, times, math,
algorithm, os, tables, strutils, times, math, terminal,
# Nimble packages
stew/[objects, byteutils], stew/shims/macros,
@ -19,19 +19,17 @@ import
# Local modules
spec/[datatypes, digest, crypto, beaconstate, helpers, network],
spec/presets/custom,
spec/state_transition, spec/presets/custom,
conf, time, beacon_chain_db, validator_pool, extras,
attestation_pool, block_pool, eth2_network, eth2_discovery,
beacon_node_common, beacon_node_types, block_pools/block_pools_types,
nimbus_binary_common,
mainchain_monitor, version, ssz/[merkleization], sszdump,
sync_protocol, request_manager, keystore_management, interop, statusbar,
sync_manager, state_transition,
validator_duties, validator_api, attestation_aggregation
sync_manager, validator_duties, validator_api, attestation_aggregation
const
genesisFile* = "genesis.ssz"
timeToInitNetworkingBeforeGenesis = chronos.seconds(10)
hasPrompt = not defined(withoutPrompt)
type
@ -95,12 +93,14 @@ proc getStateFromSnapshot(conf: BeaconNodeConf): NilableBeaconStateRef =
genesisPath, dataDir = conf.dataDir.string
writeGenesisFile = true
genesisPath = snapshotPath
else:
try:
snapshotContents = readFile(genesisPath)
elif fileExists(genesisPath):
try: snapshotContents = readFile(genesisPath)
except CatchableError as err:
error "Failed to read genesis file", err = err.msg
quit 1
else:
# No snapshot was provided. We should wait for genesis.
return nil
result = try:
newClone(SSZ.decode(snapshotContents, BeaconState))
@ -144,20 +144,34 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
# Try file from command line first
if genesisState.isNil:
# Didn't work, try creating a genesis state using main chain monitor
# TODO Could move this to a separate "GenesisMonitor" process or task
# that would do only this - see Paul's proposal for this.
if conf.web3Url.len > 0 and conf.depositContractAddress.len > 0:
mainchainMonitor = MainchainMonitor.init(
web3Provider(conf.web3Url),
conf.depositContractAddress,
Eth2Digest())
mainchainMonitor.start()
else:
error "No initial state, need genesis state or deposit contract address"
if conf.web3Url.len == 0:
fatal "Web3 URL not specified"
quit 1
genesisState = await mainchainMonitor.getGenesis()
if conf.depositContractAddress.len == 0:
fatal "Deposit contract address not specified"
quit 1
if conf.depositContractDeployedAt.isNone:
# When we don't have a known genesis state, the network metadata
# must specify the deployment block of the contract.
fatal "Deposit contract deployment block not specified"
quit 1
# TODO Could move this to a separate "GenesisMonitor" process or task
# that would do only this - see Paul's proposal for this.
mainchainMonitor = MainchainMonitor.init(
web3Provider(conf.web3Url),
conf.depositContractAddress,
Eth1Data(block_hash: conf.depositContractDeployedAt.get, deposit_count: 0))
mainchainMonitor.start()
genesisState = await mainchainMonitor.waitGenesis()
info "Eth2 genesis state detected",
genesisTime = genesisState.genesisTime,
eth1Block = genesisState.eth1_data.block_hash,
totalDeposits = genesisState.eth1_data.deposit_count
# This is needed to prove the not nil property from here on
if genesisState == nil:
@ -193,7 +207,7 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
mainchainMonitor = MainchainMonitor.init(
web3Provider(conf.web3Url),
conf.depositContractAddress,
blockPool.headState.data.data.eth1_data.block_hash)
blockPool.headState.data.data.eth1_data)
# TODO if we don't have any validators attached, we don't need a mainchain
# monitor
mainchainMonitor.start()
@ -213,7 +227,6 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
nickname: nickname,
network: network,
netKeys: netKeys,
requestManager: RequestManager.init(network),
db: db,
config: conf,
attachedValidators: ValidatorPool.init(),
@ -227,7 +240,12 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
topicAggregateAndProofs: topicAggregateAndProofs,
)
traceAsyncErrors res.addLocalValidators()
res.requestManager = RequestManager.init(network,
proc(signedBlock: SignedBeaconBlock) =
onBeaconBlock(res, signedBlock)
)
await res.addLocalValidators()
# This merely configures the BeaconSync
# The traffic will be started when we join the network.
@ -322,11 +340,12 @@ proc storeBlock(
# The block we received contains attestations, and we might not yet know about
# all of them. Let's add them to the attestation pool.
let currentSlot = node.beaconClock.now.toSlot
if currentSlot.afterGenesis and
signedBlock.message.slot.epoch + 1 >= currentSlot.slot.epoch:
for attestation in signedBlock.message.body.attestations:
node.onAttestation(attestation)
for attestation in signedBlock.message.body.attestations:
debug "Attestation from block",
attestation = shortLog(attestation),
cat = "consensus" # Tag "consensus|attestation"?
node.attestationPool.add(attestation)
ok()
proc onBeaconBlock(node: BeaconNode, signedBlock: SignedBeaconBlock) =
@ -356,7 +375,7 @@ func verifyFinalization(node: BeaconNode, slot: Slot) =
proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, async.} =
## Called at the beginning of a slot - usually every slot, but sometimes might
## skip a few in case we're running late.
## lastSlot: the last slot that we sucessfully processed, so we know where to
## lastSlot: the last slot that we successfully processed, so we know where to
## start work from
## scheduledSlot: the slot that we were aiming for, in terms of timing
@ -501,21 +520,8 @@ proc handleMissingBlocks(node: BeaconNode) =
let missingBlocks = node.blockPool.checkMissing()
if missingBlocks.len > 0:
var left = missingBlocks.len
info "Requesting detected missing blocks", missingBlocks
node.requestManager.fetchAncestorBlocks(missingBlocks) do (b: SignedBeaconBlock):
onBeaconBlock(node, b)
# TODO instead of waiting for a full second to try the next missing block
# fetching, we'll do it here again in case we get all blocks we asked
# for (there might be new parents to fetch). of course, this is not
# good because the onSecond fetching also kicks in regardless but
# whatever - this is just a quick fix for making the testnet easier
# work with while the sync problem is dealt with more systematically
# dec left
# if left == 0:
# discard setTimer(Moment.now()) do (p: pointer):
# handleMissingBlocks(node)
info "Requesting detected missing blocks", blocks = shortLog(missingBlocks)
node.requestManager.fetchAncestorBlocks(missingBlocks)
proc onSecond(node: BeaconNode) {.async.} =
## This procedure will be called once per second.
@ -569,7 +575,7 @@ proc runForwardSyncLoop(node: BeaconNode) {.async.} =
# We doing round manually because stdlib.round is deprecated
storeSpeed = round(v * 10000) / 10000
info "Forward sync blocks got imported sucessfully", count = len(list),
info "Forward sync blocks got imported successfully", count = len(list),
local_head_slot = getLocalHeadSlot(), store_speed = storeSpeed
ok()
@ -705,6 +711,22 @@ proc installDebugApiHandlers(rpcServer: RpcServer, node: BeaconNode) =
return res
rpcServer.rpc("peers") do () -> JsonNode:
var res = newJObject()
var peers = newJArray()
for id, peer in node.network.peerPool:
peers.add(
%(
info: shortLog(peer.info),
wasDialed: peer.wasDialed,
connectionState: $peer.connectionState,
score: peer.score,
)
)
res.add("peers", peers)
return res
proc installRpcHandlers(rpcServer: RpcServer, node: BeaconNode) =
rpcServer.installValidatorApiHandlers(node)
rpcServer.installBeaconApiHandlers(node)
@ -723,7 +745,7 @@ proc installAttestationHandlers(node: BeaconNode) =
proc attestationValidator(attestation: Attestation,
committeeIndex: uint64): bool =
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#attestation-subnets
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#attestation-subnets
let (afterGenesis, slot) = node.beaconClock.now().toSlot()
if not afterGenesis:
return false
@ -731,27 +753,17 @@ proc installAttestationHandlers(node: BeaconNode) =
var attestationSubscriptions: seq[Future[void]] = @[]
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#mainnet-3
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#attestations-and-aggregation
for it in 0'u64 ..< ATTESTATION_SUBNET_COUNT.uint64:
closureScope:
let ci = it
attestationSubscriptions.add(node.network.subscribe(
getMainnetAttestationTopic(node.forkDigest, ci), attestationHandler,
getAttestationTopic(node.forkDigest, ci), attestationHandler,
# This proc needs to be within closureScope; don't lift out of loop.
proc(attestation: Attestation): bool =
attestationValidator(attestation, ci)
))
when ETH2_SPEC == "v0.11.3":
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#interop-3
attestationSubscriptions.add(node.network.subscribe(
getInteropAttestationTopic(node.forkDigest), attestationHandler,
proc(attestation: Attestation): bool =
# isValidAttestation checks attestation.data.index == topicCommitteeIndex
# which doesn't make sense here, so rig that check to vacuously pass.
attestationValidator(attestation, attestation.data.index)
))
waitFor allFutures(attestationSubscriptions)
proc stop*(node: BeaconNode) =
@ -799,6 +811,8 @@ proc run*(node: BeaconNode) =
node.onSecondLoop = runOnSecondLoop(node)
node.forwardSyncLoop = runForwardSyncLoop(node)
node.requestManager.start()
# main event loop
while status == BeaconNodeStatus.Running:
try:
@ -819,21 +833,20 @@ proc createPidFile(filename: string) =
proc initializeNetworking(node: BeaconNode) {.async.} =
node.network.startListening()
let addressFile = node.config.dataDir / "beacon_node.address"
let addressFile = node.config.dataDir / "beacon_node.enr"
writeFile(addressFile, node.network.announcedENR.toURI)
await node.network.startLookingForPeers()
info "Networking initialized",
enr = node.network.announcedENR.toURI,
libp2p = shortLog(node.network.switch.peerInfo)
proc start(node: BeaconNode) =
let
head = node.blockPool.head
finalizedHead = node.blockPool.finalizedHead
let genesisTime = node.beaconClock.fromNow(toBeaconTime(Slot 0))
if genesisTime.inFuture and genesisTime.offset > timeToInitNetworkingBeforeGenesis:
info "Waiting for the genesis event", genesisIn = genesisTime.offset
waitFor sleepAsync(genesisTime.offset - timeToInitNetworkingBeforeGenesis)
genesisTime = node.beaconClock.fromNow(toBeaconTime(Slot 0))
info "Starting beacon node",
version = fullVersionStr,
@ -851,6 +864,9 @@ proc start(node: BeaconNode) =
cat = "init",
pcs = "start_beacon_node"
if genesisTime.inFuture:
notice "Waiting for genesis", genesisIn = genesisTime.offset
waitFor node.initializeNetworking()
node.run()
@ -871,7 +887,7 @@ func formatGwei(amount: uint64): string =
when hasPrompt:
from unicode import Rune
import terminal, prompt
import prompt
proc providePromptCompletions*(line: seq[Rune], cursorPos: int): seq[string] =
# TODO
@ -998,6 +1014,104 @@ when hasPrompt:
# var t: Thread[ptr Prompt]
# createThread(t, processPromptCommands, addr p)
proc createWalletInteractively(conf: BeaconNodeConf): OutFile {.raises: [Defect].} =
if conf.nonInteractive:
fatal "Wallets can be created only in interactive mode"
quit 1
var mnemonic = generateMnemonic()
defer: keystore_management.burnMem(mnemonic)
template readLine: string =
try: stdin.readLine()
except IOError:
fatal "Failed to read data from stdin"
quit 1
echo "The created wallet will be protected with a password " &
"that applies only to the current Nimbus installation. " &
"In case you lose your wallet and you need to restore " &
"it on a different machine, you must use the following " &
"seed recovery phrase: \n"
echo $mnemonic
echo "Please back up the seed phrase now to a safe location as " &
"if you are protecting a sensitive password. The seed phrase " &
"be used to withdrawl funds from your wallet.\n"
echo "Did you back up your seed recovery phrase? (please type 'yes' to continue or press enter to quit)"
while true:
let answer = readLine()
if answer == "":
quit 1
elif answer != "yes":
echo "To continue, please type 'yes' (without the quotes) or press enter to quit"
else:
break
echo "When you perform operations with your wallet such as withdrawals " &
"and additional deposits, you'll be asked to enter a password. " &
"Please note that this password is local to the current Nimbus " &
"installation and can be changed at any time."
while true:
var password, confirmedPassword: TaintedString
try:
let status = try:
readPasswordFromStdin("Please enter a password:", password) and
readPasswordFromStdin("Please repeat the password:", confirmedPassword)
except IOError:
fatal "Failed to read password interactively"
quit 1
if status:
if password != confirmedPassword:
echo "Passwords don't match, please try again"
else:
var name: WalletName
if conf.createdWalletName.isSome:
name = conf.createdWalletName.get
else:
echo "For your convenience, the wallet can be identified with a name " &
"of your choice. Please enter a wallet name below or press ENTER " &
"to continue with a machine-generated name."
while true:
var enteredName = readLine()
if enteredName.len > 0:
name = try: WalletName.parseCmdArg(enteredName)
except CatchableError as err:
echo err.msg & ". Please try again."
continue
break
let (uuid, walletContent) = KdfPbkdf2.createWalletContent(mnemonic, name)
try:
var outWalletFile: OutFile
if conf.createdWalletFile.isSome:
outWalletFile = conf.createdWalletFile.get
createDir splitFile(string outWalletFile).dir
else:
let walletsDir = conf.walletsDir
createDir walletsDir
outWalletFile = OutFile(walletsDir / addFileExt(string uuid, "json"))
writeFile(string outWalletFile, string walletContent)
return outWalletFile
except CatchableError as err:
fatal "Failed to write wallet file", err = err.msg
quit 1
if not status:
fatal "Failed to read a password from stdin"
quit 1
finally:
keystore_management.burnMem(password)
keystore_management.burnMem(confirmedPassword)
programMain:
let config = makeBannerAndConfig(clientId, BeaconNodeConf)
@ -1005,8 +1119,10 @@ programMain:
case config.cmd
of createTestnet:
var deposits: seq[Deposit]
var i = -1
var
depositDirs: seq[string]
deposits: seq[Deposit]
i = -1
for kind, dir in walkDir(config.testnetDepositsDir.string):
if kind != pcDir:
continue
@ -1015,13 +1131,19 @@ programMain:
if i < config.firstValidator.int:
continue
depositDirs.add dir
# Add deposits, in order, to pass Merkle validation
sort(depositDirs, system.cmp)
for dir in depositDirs:
let depositFile = dir / "deposit.json"
try:
deposits.add Json.loadFile(depositFile, Deposit)
except SerializationError as err:
stderr.write "Error while loading a deposit file:\n"
stderr.write err.formatMsg(depositFile), "\n"
stderr.write "Please regenerate the deposit files by running makeDeposits again\n"
stderr.write "Please regenerate the deposit files by running 'beacon_node deposits create' again\n"
quit 1
let
@ -1031,7 +1153,7 @@ programMain:
else: waitFor getLatestEth1BlockHash(config.web3Url)
var
initialState = initialize_beacon_state_from_eth1(
eth1Hash, startTime, deposits, {skipBlsValidation, skipMerkleValidation})
eth1Hash, startTime, deposits, {skipBlsValidation})
# https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#create-genesis-state
initialState.genesis_time = startTime
@ -1064,22 +1186,6 @@ programMain:
writeFile(bootstrapFile, bootstrapEnr.tryGet().toURI)
echo "Wrote ", bootstrapFile
of importValidator:
template reportFailureFor(keyExpr) =
error "Failed to import validator key", key = keyExpr
programResult = 1
if config.keyFiles.len == 0:
stderr.write "Please specify at least one keyfile to import."
quit 1
for keyFile in config.keyFiles:
try:
saveValidatorKey(keyFile.string.extractFilename,
readFile(keyFile.string), config)
except:
reportFailureFor keyFile.string
of noCommand:
debug "Launching beacon node",
version = fullVersionStr,
@ -1109,34 +1215,44 @@ programMain:
else:
node.start()
of makeDeposits:
createDir(config.outValidatorsDir)
createDir(config.outSecretsDir)
of deposits:
case config.depositsCmd
of DepositsCmd.create:
createDir(config.outValidatorsDir)
createDir(config.outSecretsDir)
let
deposits = generateDeposits(
let deposits = generateDeposits(
config.totalDeposits,
config.outValidatorsDir,
config.outSecretsDir).tryGet
config.outSecretsDir)
if config.web3Url.len > 0 and config.depositContractAddress.len > 0:
if deposits.isErr:
fatal "Failed to generate deposits", err = deposits.error
quit 1
if not config.dontSend:
waitFor sendDeposits(config, deposits.value)
of DepositsCmd.send:
if config.minDelay > config.maxDelay:
echo "The minimum delay should not be larger than the maximum delay"
quit 1
var delayGenerator: DelayGenerator
if config.maxDelay > 0.0:
delayGenerator = proc (): chronos.Duration {.gcsafe.} =
chronos.milliseconds (rand(config.minDelay..config.maxDelay)*1000).int
let deposits = loadDeposits(config.depositsDir)
waitFor sendDeposits(config, deposits)
info "Sending deposits",
web3 = config.web3Url,
depositContract = config.depositContractAddress
waitFor sendDeposits(
deposits,
config.web3Url,
config.depositContractAddress,
config.depositPrivateKey,
delayGenerator)
of DepositsCmd.status:
# TODO
echo "The status command is not implemented yet"
quit 1
of wallets:
case config.walletsCmd:
of WalletsCmd.create:
let walletFile = createWalletInteractively(config)
of WalletsCmd.list:
# TODO
discard
of WalletsCmd.restore:
# TODO
discard

View File

@ -35,7 +35,7 @@ type
# Quarantine dispatch
# --------------------------------------------
func checkMissing*(pool: var BlockPool): seq[FetchRecord] {.noInit.} =
func checkMissing*(pool: var BlockPool): seq[FetchRecord] =
checkMissing(pool.quarantine)
# CandidateChains
@ -127,6 +127,9 @@ proc latestJustifiedBlock*(pool: BlockPool): BlockSlot =
## as the latest finalized block
latestJustifiedBlock(pool.dag)
proc addMissing*(pool: var BlockPool, broot: Eth2Digest) {.inline.} =
pool.quarantine.addMissing(broot)
proc isInitialized*(T: type BlockPool, db: BeaconChainDB): bool =
isInitialized(CandidateChains, db)
@ -152,7 +155,8 @@ template justifiedState*(pool: BlockPool): StateData =
pool.dag.justifiedState
template withState*(
pool: BlockPool, cache: var StateData, blockSlot: BlockSlot, body: untyped): untyped =
pool: BlockPool, cache: var StateData, blockSlot: BlockSlot, body: untyped):
untyped =
## Helper template that updates state to a particular BlockSlot - usage of
## cache is unsafe outside of block.
## TODO async transformations will lead to a race where cache gets updated
@ -160,6 +164,19 @@ template withState*(
withState(pool.dag, cache, blockSlot, body)
template withEpochState*(
pool: BlockPool, cache: var StateData, blockSlot: BlockSlot, body: untyped):
untyped =
## Helper template that updates state to a state with an epoch matching the
## epoch of blockSlot. This aims to be at least as fast as withState, quick
## enough to expose to unautheticated, remote use, but trades off that it's
## possible for it to decide that finding a state from a matching epoch may
## provide too expensive for such use cases.
##
## cache is unsafe outside of block.
withEpochState(pool.dag, cache, blockSlot, body)
proc updateStateData*(pool: BlockPool, state: var StateData, bs: BlockSlot) =
## Rewind or advance state such that it matches the given block and slot -
## this may include replaying from an earlier snapshot if blck is on a

View File

@ -37,7 +37,7 @@ type
##
## Invalid blocks are dropped immediately.
pending*: Table[Eth2Digest, SignedBeaconBlock] ##\
orphans*: Table[Eth2Digest, SignedBeaconBlock] ##\
## Blocks that have passed validation but that we lack a link back to tail
## for - when we receive a "missing link", we can use this data to build
## an entire branch
@ -49,12 +49,10 @@ type
inAdd*: bool
MissingBlock* = object
slots*: uint64 # number of slots that are suspected missing
tries*: int
FetchRecord* = object
root*: Eth2Digest
historySlots*: uint64
CandidateChains* = ref object
## Pool of blocks responsible for keeping a DAG of resolved blocks.
@ -145,7 +143,7 @@ type
BlockData* = object
## Body and graph in one
data*: SignedBeaconBlock
data*: TrustedSignedBeaconBlock # We trust all blocks we have a ref for
refs*: BlockRef
StateData* = object

View File

@ -10,8 +10,8 @@
import
chronicles, options, sequtils, tables,
metrics,
../ssz/merkleization, ../beacon_chain_db, ../state_transition, ../extras,
../spec/[crypto, datatypes, digest, helpers, validator],
../ssz/merkleization, ../beacon_chain_db, ../extras,
../spec/[crypto, datatypes, digest, helpers, validator, state_transition],
block_pools_types
declareCounter beacon_reorgs_total, "Total occurrences of reorganizations of the chain" # On fork choice
@ -20,7 +20,8 @@ declareCounter beacon_state_data_cache_misses, "dag.cachedStates misses"
logScope: topics = "hotdb"
proc putBlock*(dag: var CandidateChains, blockRoot: Eth2Digest, signedBlock: SignedBeaconBlock) {.inline.} =
proc putBlock*(
dag: var CandidateChains, blockRoot: Eth2Digest, signedBlock: SignedBeaconBlock) =
dag.db.putBlock(blockRoot, signedBlock)
proc updateStateData*(
@ -53,7 +54,7 @@ func parent*(bs: BlockSlot): BlockSlot =
slot: bs.slot - 1
)
func populateEpochCache*(state: BeaconState, epoch: Epoch): EpochRef =
func populateEpochCache(state: BeaconState, epoch: Epoch): EpochRef =
result = (EpochRef)(
epoch: state.slot.compute_epoch_at_slot,
shuffled_active_validator_indices:
@ -129,6 +130,22 @@ func get_ancestor*(blck: BlockRef, slot: Slot): BlockRef =
blck = blck.parent
iterator get_ancestors_in_epoch(blockSlot: BlockSlot): BlockSlot =
let min_slot =
blockSlot.slot.compute_epoch_at_slot.compute_start_slot_at_epoch
var blockSlot = blockSlot
while true:
for slot in countdown(blockSlot.slot, max(blockSlot.blck.slot, min_slot)):
yield BlockSlot(blck: blockSlot.blck, slot: slot)
if blockSlot.blck.parent.isNil or blockSlot.blck.slot <= min_slot:
break
doAssert blockSlot.blck.slot > blockSlot.blck.parent.slot
blockSlot =
BlockSlot(blck: blockSlot.blck.parent, slot: blockSlot.blck.slot - 1)
func atSlot*(blck: BlockRef, slot: Slot): BlockSlot =
## Return a BlockSlot at a given slot, with the block set to the closest block
## available. If slot comes from before the block, a suitable block ancestor
@ -148,7 +165,7 @@ func getEpochInfo*(blck: BlockRef, state: BeaconState): EpochRef =
if matching_epochinfo.len == 0:
let cache = populateEpochCache(state, state_epoch)
blck.epochsInfo.add(cache)
trace "candidate_chains.skipAndUpdateState(): back-filling parent.epochInfo",
trace "candidate_chains.getEpochInfo: back-filling parent.epochInfo",
state_slot = state.slot
cache
elif matching_epochinfo.len == 1:
@ -169,7 +186,7 @@ func init(T: type BlockRef, root: Eth2Digest, slot: Slot): BlockRef =
slot: slot
)
func init*(T: type BlockRef, root: Eth2Digest, blck: BeaconBlock): BlockRef =
func init*(T: type BlockRef, root: Eth2Digest, blck: SomeBeaconBlock): BlockRef =
BlockRef.init(root, blck.slot)
proc init*(T: type CandidateChains, db: BeaconChainDB,
@ -492,10 +509,10 @@ proc skipAndUpdateState(
ok
proc rewindState(dag: CandidateChains, state: var StateData, bs: BlockSlot):
seq[BlockData] =
seq[BlockRef] =
logScope: pcs = "replay_state"
var ancestors = @[dag.get(bs.blck)]
var ancestors = @[bs.blck]
# Common case: the last block applied is the parent of the block to apply:
if not bs.blck.parent.isNil and state.blck.root == bs.blck.parent.root and
state.data.data.slot < bs.blck.slot:
@ -522,7 +539,7 @@ proc rewindState(dag: CandidateChains, state: var StateData, bs: BlockSlot):
break # Bug probably!
if parBs.blck != curBs.blck:
ancestors.add(dag.get(parBs.blck))
ancestors.add(parBs.blck)
# TODO investigate replacing with getStateCached, by refactoring whole
# function. Empirically, this becomes pretty rare once good caches are
@ -531,12 +548,12 @@ proc rewindState(dag: CandidateChains, state: var StateData, bs: BlockSlot):
if idx >= 0:
assign(state.data, dag.cachedStates[idx].state[])
let ancestor = ancestors.pop()
state.blck = ancestor.refs
state.blck = ancestor
beacon_state_data_cache_hits.inc()
trace "Replaying state transitions via in-memory cache",
stateSlot = shortLog(state.data.data.slot),
ancestorStateRoot = shortLog(ancestor.data.message.state_root),
ancestorStateRoot = shortLog(state.data.root),
ancestorStateSlot = shortLog(state.data.data.slot),
slot = shortLog(bs.slot),
blockRoot = shortLog(bs.blck.root),
@ -568,7 +585,7 @@ proc rewindState(dag: CandidateChains, state: var StateData, bs: BlockSlot):
let
ancestor = ancestors.pop()
root = stateRoot.get()
found = dag.getState(dag.db, root, ancestor.refs, state)
found = dag.getState(dag.db, root, ancestor, state)
if not found:
# TODO this should only happen if the database is corrupt - we walked the
@ -584,7 +601,6 @@ proc rewindState(dag: CandidateChains, state: var StateData, bs: BlockSlot):
trace "Replaying state transitions",
stateSlot = shortLog(state.data.data.slot),
ancestorStateRoot = shortLog(ancestor.data.message.state_root),
ancestorStateSlot = shortLog(state.data.data.slot),
slot = shortLog(bs.slot),
blockRoot = shortLog(bs.blck.root),
@ -618,6 +634,24 @@ proc getStateDataCached(dag: CandidateChains, state: var StateData, bs: BlockSlo
false
template withEpochState*(
dag: CandidateChains, cache: var StateData, blockSlot: BlockSlot, body: untyped): untyped =
## Helper template that updates state to a particular BlockSlot - usage of
## cache is unsafe outside of block.
## TODO async transformations will lead to a race where cache gets updated
## while waiting for future to complete - catch this here somehow?
for ancestor in get_ancestors_in_epoch(blockSlot):
if getStateDataCached(dag, cache, ancestor):
break
template hashedState(): HashedBeaconState {.inject, used.} = cache.data
template state(): BeaconState {.inject, used.} = cache.data.data
template blck(): BlockRef {.inject, used.} = cache.blck
template root(): Eth2Digest {.inject, used.} = cache.data.root
body
proc updateStateData*(dag: CandidateChains, state: var StateData, bs: BlockSlot) =
## Rewind or advance state such that it matches the given block and slot -
## this may include replaying from an earlier snapshot if blck is on a
@ -655,10 +689,7 @@ proc updateStateData*(dag: CandidateChains, state: var StateData, bs: BlockSlot)
# no state root calculation will take place here, because we can load
# the final state root from the block itself.
let ok =
dag.skipAndUpdateState(
state, ancestors[i],
{skipBlsValidation, skipStateRootValidation},
false)
dag.skipAndUpdateState(state, dag.get(ancestors[i]), {}, false)
doAssert ok, "Blocks in database should never fail to apply.."
# We save states here - blocks were guaranteed to have passed through the save

View File

@ -10,9 +10,9 @@
import
chronicles, sequtils, tables,
metrics, stew/results,
../ssz/merkleization, ../state_transition, ../extras,
../spec/[crypto, datatypes, digest, helpers, signatures],
block_pools_types, candidate_chains
../ssz/merkleization, ../extras,
../spec/[crypto, datatypes, digest, helpers, signatures, state_transition],
block_pools_types, candidate_chains, quarantine
export results
@ -32,7 +32,7 @@ func getOrResolve*(dag: CandidateChains, quarantine: var Quarantine, root: Eth2D
result = dag.getRef(root)
if result.isNil:
quarantine.missing[root] = MissingBlock(slots: 1)
quarantine.missing[root] = MissingBlock()
proc add*(
dag: var CandidateChains, quarantine: var Quarantine,
@ -99,12 +99,12 @@ proc addResolvedBlock(
defer: quarantine.inAdd = false
var keepGoing = true
while keepGoing:
let retries = quarantine.pending
let retries = quarantine.orphans
for k, v in retries:
discard add(dag, quarantine, k, v)
# Keep going for as long as the pending dag is shrinking
# TODO inefficient! so what?
keepGoing = quarantine.pending.len < retries.len
keepGoing = quarantine.orphans.len < retries.len
blockRef
proc add*(
@ -165,9 +165,9 @@ proc add*(
return err Invalid
# The block might have been in either of pending or missing - we don't want
# any more work done on its behalf
quarantine.pending.del(blockRoot)
# The block might have been in either of `orphans` or `missing` - we don't
# want any more work done on its behalf
quarantine.orphans.del(blockRoot)
# The block is resolved, now it's time to validate it to ensure that the
# blocks we add to the database are clean for the given state
@ -209,7 +209,7 @@ proc add*(
# the pending dag calls this function back later in a loop, so as long
# as dag.add(...) requires a SignedBeaconBlock, easier to keep them in
# pending too.
quarantine.pending[blockRoot] = signedBlock
quarantine.add(dag, signedBlock, some(blockRoot))
# TODO possibly, it makes sense to check the database - that would allow sync
# to simply fill up the database with random blocks the other clients
@ -217,7 +217,7 @@ proc add*(
# junk that's not part of the block graph
if blck.parent_root in quarantine.missing or
blck.parent_root in quarantine.pending:
blck.parent_root in quarantine.orphans:
return err MissingParent
# This is an unresolved block - put its parent on the missing list for now...
@ -232,24 +232,11 @@ proc add*(
# filter.
# TODO when we receive the block, we don't know how many others we're missing
# from that branch, so right now, we'll just do a blind guess
let parentSlot = blck.slot - 1
quarantine.missing[blck.parent_root] = MissingBlock(
slots:
# The block is at least two slots ahead - try to grab whole history
if parentSlot > dag.head.blck.slot:
parentSlot - dag.head.blck.slot
else:
# It's a sibling block from a branch that we're missing - fetch one
# epoch at a time
max(1.uint64, SLOTS_PER_EPOCH.uint64 -
(parentSlot.uint64 mod SLOTS_PER_EPOCH.uint64))
)
debug "Unresolved block (parent missing)",
blck = shortLog(blck),
blockRoot = shortLog(blockRoot),
pending = quarantine.pending.len,
orphans = quarantine.orphans.len,
missing = quarantine.missing.len,
cat = "filtering"
@ -345,8 +332,7 @@ proc isValidBeaconBlock*(
# not specific to this, but by the pending dag keying on the htr of the
# BeaconBlock, not SignedBeaconBlock, opens up certain spoofing attacks.
debug "parent unknown, putting block in quarantine"
quarantine.pending[hash_tree_root(signed_beacon_block.message)] =
signed_beacon_block
quarantine.add(dag, signed_beacon_block)
return err(MissingParent)
# The proposer signature, signed_beacon_block.signature, is valid with

View File

@ -6,13 +6,15 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
chronicles, tables,
chronicles, tables, options,
stew/bitops2,
metrics,
../spec/digest,
../spec/[datatypes, digest],
../ssz/merkleization,
block_pools_types
export options
logScope: topics = "quarant"
{.push raises: [Defect].}
@ -35,4 +37,21 @@ func checkMissing*(quarantine: var Quarantine): seq[FetchRecord] =
# simple (simplistic?) exponential backoff for retries..
for k, v in quarantine.missing.pairs():
if countOnes(v.tries.uint64) == 1:
result.add(FetchRecord(root: k, historySlots: v.slots))
result.add(FetchRecord(root: k))
func addMissing*(quarantine: var Quarantine, broot: Eth2Digest) {.inline.} =
discard quarantine.missing.hasKeyOrPut(broot, MissingBlock())
func add*(quarantine: var Quarantine, dag: CandidateChains,
sblck: SignedBeaconBlock,
broot: Option[Eth2Digest] = none[Eth2Digest]()) =
## Adds block to quarantine's `orphans` and `missing` lists.
let blockRoot = if broot.isSome():
broot.get()
else:
hash_tree_root(sblck.message)
quarantine.orphans[blockRoot] = sblck
let parentRoot = sblck.message.parent_root
quarantine.addMissing(parentRoot)

View File

@ -5,7 +5,7 @@ import
chronicles, confutils, json_serialization,
confutils/defs, confutils/std/net,
chronicles/options as chroniclesOptions,
spec/[crypto, keystore]
spec/[crypto, keystore, digest]
export
defs, enabledLogLevel, parseCmdArg, completeCmdArg
@ -15,9 +15,19 @@ type
BNStartUpCmd* = enum
noCommand
importValidator
createTestnet
makeDeposits
deposits
wallets
WalletsCmd* {.pure.} = enum
create = "Creates a new EIP-2386 wallet"
restore = "Restores a wallet from cold storage"
list = "Lists details about all wallets"
DepositsCmd* {.pure.} = enum
create = "Creates validator keystores and deposits"
send = "Sends prepared deposits to the validator deposit contract"
status = "Displays status information about all deposits"
VCStartUpCmd* = enum
VCNoCommand
@ -31,32 +41,36 @@ type
BeaconNodeConf* = object
logLevel* {.
defaultValue: "DEBUG"
desc: "Sets the log level."
desc: "Sets the log level"
name: "log-level" }: string
eth1Network* {.
defaultValue: goerli
desc: "The Eth1 network tracked by the beacon node."
desc: "The Eth1 network tracked by the beacon node"
name: "eth1-network" }: Eth1Network
dataDir* {.
defaultValue: config.defaultDataDir()
desc: "The directory where nimbus will store all blockchain data."
desc: "The directory where nimbus will store all blockchain data"
abbr: "d"
name: "data-dir" }: OutDir
web3Url* {.
defaultValue: ""
desc: "URL of the Web3 server to observe Eth1."
desc: "URL of the Web3 server to observe Eth1"
name: "web3-url" }: string
depositContractAddress* {.
defaultValue: ""
desc: "Address of the deposit contract."
desc: "Address of the deposit contract"
name: "deposit-contract" }: string
depositContractDeployedAt* {.
desc: "The Eth1 block hash where the deposit contract has been deployed"
name: "deposit-contract-block" }: Option[Eth2Digest]
nonInteractive* {.
desc: "Do not display interative prompts. Quit on missing configuration."
desc: "Do not display interative prompts. Quit on missing configuration"
name: "non-interactive" }: bool
case cmd* {.
@ -65,28 +79,28 @@ type
of noCommand:
bootstrapNodes* {.
desc: "Specifies one or more bootstrap nodes to use when connecting to the network."
desc: "Specifies one or more bootstrap nodes to use when connecting to the network"
abbr: "b"
name: "bootstrap-node" }: seq[string]
bootstrapNodesFile* {.
defaultValue: ""
desc: "Specifies a line-delimited file of bootsrap Ethereum network addresses."
desc: "Specifies a line-delimited file of bootstrap Ethereum network addresses"
name: "bootstrap-file" }: InputFile
libp2pAddress* {.
defaultValue: defaultListenAddress(config)
desc: "Listening address for the Ethereum LibP2P traffic."
desc: "Listening address for the Ethereum LibP2P traffic"
name: "listen-address" }: ValidIpAddress
tcpPort* {.
defaultValue: defaultEth2TcpPort
desc: "Listening TCP port for Ethereum LibP2P traffic."
desc: "Listening TCP port for Ethereum LibP2P traffic"
name: "tcp-port" }: Port
udpPort* {.
defaultValue: defaultEth2TcpPort
desc: "Listening UDP port for node discovery."
desc: "Listening UDP port for node discovery"
name: "udp-port" }: Port
maxPeers* {.
@ -96,62 +110,66 @@ type
nat* {.
desc: "Specify method to use for determining public address. " &
"Must be one of: any, none, upnp, pmp, extip:<IP>."
"Must be one of: any, none, upnp, pmp, extip:<IP>"
defaultValue: "any" }: string
validators* {.
required
desc: "Path to a validator private key, as generated by makeDeposits."
desc: "Path to a validator keystore"
abbr: "v"
name: "validator" }: seq[ValidatorKeyPath]
validatorsDirFlag* {.
desc: "A directory containing validator keystores."
desc: "A directory containing validator keystores"
name: "validators-dir" }: Option[InputDir]
secretsDirFlag* {.
desc: "A directory containing validator keystore passwords."
desc: "A directory containing validator keystore passwords"
name: "secrets-dir" }: Option[InputDir]
walletsDirFlag* {.
desc: "A directory containing wallet files"
name: "wallets-dir" }: Option[InputDir]
stateSnapshot* {.
desc: "Json file specifying a recent state snapshot."
desc: "Json file specifying a recent state snapshot"
abbr: "s"
name: "state-snapshot" }: Option[InputFile]
nodeName* {.
defaultValue: ""
desc: "A name for this node that will appear in the logs. " &
"If you set this to 'auto', a persistent automatically generated ID will be seleceted for each --dataDir folder."
"If you set this to 'auto', a persistent automatically generated ID will be selected for each --data-dir folder"
name: "node-name" }: string
verifyFinalization* {.
defaultValue: false
desc: "Specify whether to verify finalization occurs on schedule, for testing."
desc: "Specify whether to verify finalization occurs on schedule, for testing"
name: "verify-finalization" }: bool
stopAtEpoch* {.
defaultValue: 0
desc: "A positive epoch selects the epoch at which to stop."
desc: "A positive epoch selects the epoch at which to stop"
name: "stop-at-epoch" }: uint64
metricsEnabled* {.
defaultValue: false
desc: "Enable the metrics server."
desc: "Enable the metrics server"
name: "metrics" }: bool
metricsAddress* {.
defaultValue: defaultAdminListenAddress(config)
desc: "Listening address of the metrics server."
desc: "Listening address of the metrics server"
name: "metrics-address" }: ValidIpAddress
metricsPort* {.
defaultValue: 8008
desc: "Listening HTTP port of the metrics server."
desc: "Listening HTTP port of the metrics server"
name: "metrics-port" }: Port
statusBarEnabled* {.
defaultValue: true
desc: "Display a status bar at the bottom of the terminal screen."
desc: "Display a status bar at the bottom of the terminal screen"
name: "status-bar" }: bool
statusBarContents* {.
@ -160,7 +178,7 @@ type
"head: $head_root:$head_epoch:$head_epoch_slot;" &
"time: $epoch:$epoch_slot ($slot)|" &
"ETH: $attached_validators_balance"
desc: "Textual template for the contents of the status bar."
desc: "Textual template for the contents of the status bar"
name: "status-bar-contents" }: string
rpcEnabled* {.
@ -170,7 +188,7 @@ type
rpcPort* {.
defaultValue: defaultEth2RpcPort
desc: "HTTP port for the JSON-RPC service."
desc: "HTTP port for the JSON-RPC service"
name: "rpc-port" }: Port
rpcAddress* {.
@ -185,86 +203,137 @@ type
of createTestnet:
testnetDepositsDir* {.
desc: "Directory containing validator keystores."
desc: "Directory containing validator keystores"
name: "validators-dir" }: InputDir
totalValidators* {.
desc: "The number of validator deposits in the newly created chain."
desc: "The number of validator deposits in the newly created chain"
name: "total-validators" }: uint64
firstValidator* {.
defaultValue: 0
desc: "Index of first validator to add to validator list."
desc: "Index of first validator to add to validator list"
name: "first-validator" }: uint64
lastUserValidator* {.
defaultValue: config.totalValidators - 1,
desc: "The last validator index that will free for taking from a testnet participant."
desc: "The last validator index that will free for taking from a testnet participant"
name: "last-user-validator" }: uint64
bootstrapAddress* {.
defaultValue: ValidIpAddress.init("127.0.0.1")
desc: "The public IP address that will be advertised as a bootstrap node for the testnet."
desc: "The public IP address that will be advertised as a bootstrap node for the testnet"
name: "bootstrap-address" }: ValidIpAddress
bootstrapPort* {.
defaultValue: defaultEth2TcpPort
desc: "The TCP/UDP port that will be used by the bootstrap node."
desc: "The TCP/UDP port that will be used by the bootstrap node"
name: "bootstrap-port" }: Port
genesisOffset* {.
defaultValue: 5
desc: "Seconds from now to add to genesis time."
desc: "Seconds from now to add to genesis time"
name: "genesis-offset" }: int
outputGenesis* {.
desc: "Output file where to write the initial state snapshot."
desc: "Output file where to write the initial state snapshot"
name: "output-genesis" }: OutFile
withGenesisRoot* {.
defaultValue: false
desc: "Include a genesis root in 'network.json'."
desc: "Include a genesis root in 'network.json'"
name: "with-genesis-root" }: bool
outputBootstrapFile* {.
desc: "Output file with list of bootstrap nodes for the network."
desc: "Output file with list of bootstrap nodes for the network"
name: "output-bootstrap-file" }: OutFile
of importValidator:
keyFiles* {.
desc: "File with validator key to be imported (in hex form)."
name: "keyfile" }: seq[ValidatorKeyPath]
of wallets:
case walletsCmd* {.command.}: WalletsCmd
of WalletsCmd.create:
createdWalletName* {.
desc: "An easy-to-remember name for the wallet of your choice"
name: "name"}: Option[WalletName]
of makeDeposits:
totalDeposits* {.
defaultValue: 1
desc: "Number of deposits to generate."
name: "count" }: int
nextAccount* {.
desc: "Initial value for the 'nextaccount' property of the wallet"
name: "next-account" }: Option[Natural]
outValidatorsDir* {.
defaultValue: "validators"
desc: "Output folder for validator keystores and deposits."
name: "out-validators-dir" }: string
createdWalletFile* {.
desc: "Output wallet file"
name: "out" }: Option[OutFile]
outSecretsDir* {.
defaultValue: "secrets"
desc: "Output folder for randomly generated keystore passphrases."
name: "out-secrets-dir" }: string
of WalletsCmd.restore:
restoredWalletName* {.
desc: "An easy-to-remember name for the wallet of your choice"
name: "name"}: Option[WalletName]
restoredDepositsCount* {.
desc: "Expected number of deposits to recover. If not specified, " &
"Nimbus will try to guess the number by inspecting the latest " &
"beacon state"
name: "deposits".}: Option[Natural]
restoredWalletFile* {.
desc: "Output wallet file"
name: "out" }: Option[OutFile]
of WalletsCmd.list:
discard
of deposits:
depositPrivateKey* {.
defaultValue: ""
desc: "Private key of the controlling (sending) account.",
desc: "Private key of the controlling (sending) account",
name: "deposit-private-key" }: string
minDelay* {.
defaultValue: 0.0
desc: "Minimum possible delay between making two deposits (in seconds)."
name: "min-delay" }: float
case depositsCmd* {.command.}: DepositsCmd
of DepositsCmd.create:
totalDeposits* {.
defaultValue: 1
desc: "Number of deposits to generate"
name: "count" }: int
maxDelay* {.
defaultValue: 0.0
desc: "Maximum possible delay between making two deposits (in seconds)."
name: "max-delay" }: float
existingWalletId* {.
desc: "An existing wallet ID. If not specified, a new wallet will be created"
name: "wallet" }: Option[WalletName]
outValidatorsDir* {.
defaultValue: "validators"
desc: "Output folder for validator keystores and deposits"
name: "out-deposits-dir" }: string
outSecretsDir* {.
defaultValue: "secrets"
desc: "Output folder for randomly generated keystore passphrases"
name: "out-secrets-dir" }: string
dontSend* {.
defaultValue: false,
desc: "By default, all created deposits are also immediately sent " &
"to the validator deposit contract. You can use this option to " &
"prevent this behavior. Use the `deposits send` command to send " &
"the deposit transactions at your convenience later"
name: "dont-send" .}: bool
of DepositsCmd.send:
depositsDir* {.
defaultValue: "validators"
desc: "A folder with validator metadata created by the `deposits create` command"
name: "deposits-dir" }: string
minDelay* {.
defaultValue: 0.0
desc: "Minimum possible delay between making two deposits (in seconds)"
name: "min-delay" }: float
maxDelay* {.
defaultValue: 0.0
desc: "Maximum possible delay between making two deposits (in seconds)"
name: "max-delay" }: float
of DepositsCmd.status:
discard
ValidatorClientConf* = object
logLevel* {.
@ -274,12 +343,12 @@ type
dataDir* {.
defaultValue: config.defaultDataDir()
desc: "The directory where nimbus will store all blockchain data."
desc: "The directory where nimbus will store all blockchain data"
abbr: "d"
name: "data-dir" }: OutDir
nonInteractive* {.
desc: "Do not display interative prompts. Quit on missing configuration."
desc: "Do not display interative prompts. Quit on missing configuration"
name: "non-interactive" }: bool
case cmd* {.
@ -289,26 +358,26 @@ type
of VCNoCommand:
rpcPort* {.
defaultValue: defaultEth2RpcPort
desc: "HTTP port of the server to connect to for RPC."
desc: "HTTP port of the server to connect to for RPC"
name: "rpc-port" }: Port
rpcAddress* {.
defaultValue: defaultAdminListenAddress(config)
desc: "Address of the server to connect to for RPC."
desc: "Address of the server to connect to for RPC"
name: "rpc-address" }: ValidIpAddress
validators* {.
required
desc: "Path to a validator key store, as generated by makeDeposits."
desc: "Attach a validator by supplying a keystore path"
abbr: "v"
name: "validator" }: seq[ValidatorKeyPath]
validatorsDirFlag* {.
desc: "A directory containing validator keystores."
desc: "A directory containing validator keystores"
name: "validators-dir" }: Option[InputDir]
secretsDirFlag* {.
desc: "A directory containing validator keystore passwords."
desc: "A directory containing validator keystore passwords"
name: "secrets-dir" }: Option[InputDir]
proc defaultDataDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
@ -343,12 +412,33 @@ proc createDumpDirs*(conf: BeaconNodeConf) =
# Dumping is mainly a debugging feature, so ignore these..
warn "Cannot create dump directories", msg = err.msg
func parseCmdArg*(T: type Eth2Digest, input: TaintedString): T
{.raises: [ValueError, Defect].} =
fromHex(T, string input)
func completeCmdArg*(T: type Eth2Digest, input: TaintedString): seq[string] =
return @[]
func parseCmdArg*(T: type WalletName, input: TaintedString): T
{.raises: [ValueError, Defect].} =
if input.len == 0:
raise newException(ValueError, "The wallet name should not be empty")
if input[0] == '_':
raise newException(ValueError, "The wallet name should not start with an underscore")
return T(input)
func completeCmdArg*(T: type WalletName, input: TaintedString): seq[string] =
return @[]
func validatorsDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
string conf.validatorsDirFlag.get(InputDir(conf.dataDir / "validators"))
func secretsDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
string conf.secretsDirFlag.get(InputDir(conf.dataDir / "secrets"))
func walletsDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
string conf.walletsDirFlag.get(InputDir(conf.dataDir / "wallets"))
func databaseDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
conf.dataDir / "db"

View File

@ -40,7 +40,7 @@ type
contract(Deposit):
proc drain()
proc deployContract*(web3: Web3, code: string): Future[Address] {.async.} =
proc deployContract*(web3: Web3, code: string): Future[ReceiptObject] {.async.} =
var code = code
if code[1] notin {'x', 'X'}:
code = "0x" & code
@ -51,8 +51,7 @@ proc deployContract*(web3: Web3, code: string): Future[Address] {.async.} =
gasPrice: 1.some)
let r = await web3.send(tr)
let receipt = await web3.getMinedTransactionReceipt(r)
result = receipt.contractAddress.get
result = await web3.getMinedTransactionReceipt(r)
proc sendEth(web3: Web3, to: string, valueEth: int): Future[TxHash] =
let tr = EthSend(
@ -67,7 +66,7 @@ proc main() {.async.} =
let cfg = CliConfig.load()
let web3 = await newWeb3(cfg.web3Url)
if cfg.privateKey.len != 0:
web3.privateKey = PrivateKey.fromHex(cfg.privateKey)[]
web3.privateKey = some(PrivateKey.fromHex(cfg.privateKey)[])
else:
let accounts = await web3.provider.eth_accounts()
doAssert(accounts.len > 0)
@ -75,8 +74,8 @@ proc main() {.async.} =
case cfg.cmd
of StartUpCommand.deploy:
let contractAddress = await web3.deployContract(contractCode)
echo "0x", contractAddress
let receipt = await web3.deployContract(contractCode)
echo "0x", receipt.contractAddress.get, ";", receipt.blockHash
of StartUpCommand.drain:
let sender = web3.contractSender(Deposit, Address.fromHex(cfg.contractAddress))
discard await sender.drain().send(gasPrice = 1)

View File

@ -51,7 +51,7 @@ proc loadBootstrapFile*(bootstrapFile: string,
localPubKey: PublicKey) =
if bootstrapFile.len == 0: return
let ext = splitFile(bootstrapFile).ext
if cmpIgnoreCase(ext, ".txt") == 0:
if cmpIgnoreCase(ext, ".txt") == 0 or cmpIgnoreCase(ext, ".enr") == 0 :
try:
for ln in lines(bootstrapFile):
addBootstrapNode(ln, bootstrapEnrs, localPubKey)
@ -64,7 +64,7 @@ proc loadBootstrapFile*(bootstrapFile: string,
# removal of YAML metadata.
try:
for ln in lines(bootstrapFile):
addBootstrapNode(string(ln[3..^2]), bootstrapEnrs, localPubKey)
addBootstrapNode(string(ln.strip()[3..^2]), bootstrapEnrs, localPubKey)
except IOError as e:
error "Could not read bootstrap file", msg = e.msg
quit 1
@ -75,7 +75,7 @@ proc loadBootstrapFile*(bootstrapFile: string,
proc new*(T: type Eth2DiscoveryProtocol,
conf: BeaconNodeConf,
ip: Option[ValidIpAddress], tcpPort, udpPort: Port,
rawPrivKeyBytes: openarray[byte],
pk: PrivateKey,
enrFields: openarray[(string, seq[byte])]):
T {.raises: [Exception, Defect].} =
# TODO
@ -83,8 +83,7 @@ proc new*(T: type Eth2DiscoveryProtocol,
# * for setting up a specific key
# * for using a persistent database
let
pk = PrivateKey.fromRaw(rawPrivKeyBytes).expect("Valid private key")
ourPubKey = pk.toPublicKey().expect("Public key from valid private key")
ourPubKey = pk.toPublicKey()
# TODO: `newMemoryDB()` causes raises: [Exception]
db = DiscoveryDB.init(newMemoryDB())

View File

@ -4,18 +4,18 @@ import
options as stdOptions,
# Status libs
stew/[varints, base58, endians2, results, byteutils],
stew/[varints, base58, base64, endians2, results, byteutils],
stew/shims/net as stewNet,
stew/shims/[macros, tables],
faststreams/[inputs, outputs, buffers], snappy, snappy/framing,
json_serialization, json_serialization/std/[net, options],
chronos, chronicles, metrics,
# TODO: create simpler to use libp2p modules that use re-exports
libp2p/[switch, standard_setup, peerinfo, peer, connection, errors,
libp2p/[switch, standard_setup, peerinfo, peer, errors,
multiaddress, multicodec, crypto/crypto, crypto/secp,
protocols/identify, protocols/protocol],
libp2p/protocols/secure/[secure, secio],
libp2p/protocols/pubsub/[pubsub, floodsub, rpc/messages],
libp2p/protocols/pubsub/[pubsub, floodsub, rpc/message, rpc/messages],
libp2p/transports/tcptransport,
libp2p/stream/lpstream,
eth/[keys, async_utils], eth/p2p/p2p_protocol_dsl,
@ -63,6 +63,7 @@ type
connQueue: AsyncQueue[PeerInfo]
seenTable: Table[PeerID, SeenItem]
connWorkers: seq[Future[void]]
connTable: Table[PeerID, PeerInfo]
forkId: ENRForkID
EthereumNode = Eth2Node # needed for the definitions in p2p_backends_helpers
@ -144,9 +145,10 @@ type
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.}
DisconnectionReason* = enum
ClientShutDown
IrrelevantNetwork
FaultOrError
# might see other values on the wire!
ClientShutDown = 1
IrrelevantNetwork = 2
FaultOrError = 3
PeerDisconnected* = object of CatchableError
reason*: DisconnectionReason
@ -192,8 +194,6 @@ const
TTFB_TIMEOUT* = 5.seconds
RESP_TIMEOUT* = 10.seconds
readTimeoutErrorMsg = "Exceeded read timeout for a request"
NewPeerScore* = 200
## Score which will be assigned to new connected Peer
PeerScoreLowLimit* = 0
@ -274,10 +274,6 @@ proc openStream(node: Eth2Node,
else:
raise
func peerId(conn: Connection): PeerID =
# TODO: Can this be `nil`?
conn.peerInfo.peerId
proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer {.gcsafe.}
proc getPeer*(node: Eth2Node, peerInfo: PeerInfo): Peer {.gcsafe.} =
@ -454,17 +450,6 @@ when useNativeSnappy:
else:
include libp2p_streams_backend
template awaitWithTimeout[T](operation: Future[T],
deadline: Future[void],
onTimeout: untyped): T =
let f = operation
await f or deadline
if not f.finished:
cancel f
onTimeout
else:
f.read
proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes,
ResponseMsg: type,
timeout: Duration): Future[NetRes[ResponseMsg]]
@ -495,7 +480,7 @@ proc init*[MsgType](T: type SingleChunkResponse[MsgType],
peer: Peer, conn: Connection, noSnappy: bool): T =
T(UntypedResponse(peer: peer, stream: conn, noSnappy: noSnappy))
template write*[M](r: MultipleChunksResponse[M], val: M): untyped =
template write*[M](r: MultipleChunksResponse[M], val: auto): untyped =
sendResponseChunkObj(UntypedResponse(r), val)
template send*[M](r: SingleChunkResponse[M], val: auto): untyped =
@ -572,6 +557,11 @@ proc handleIncomingStream(network: Eth2Node,
try:
let peer = peerFromStream(network, conn)
# TODO peer connection setup is broken, update info in some better place
# whenever race is fix:
# https://github.com/status-im/nim-beacon-chain/issues/1157
peer.info = conn.peerInfo
template returnInvalidRequest(msg: ErrorMsg) =
await sendErrorResponse(peer, conn, noSnappy, InvalidRequest, msg)
return
@ -735,30 +725,35 @@ proc connectWorker(network: Eth2Node) {.async.} =
let pi = await network.connQueue.popFirst()
let r1 = network.peerPool.hasPeer(pi.peerId)
let r2 = network.isSeen(pi)
let r3 = network.connTable.hasKey(pi.peerId)
if not(r1) and not(r2):
# We trying to connect to peers which are not present in our PeerPool and
# not present in our SeenTable.
var fut = network.dialPeer(pi)
# We discarding here just because we going to check future state, to avoid
# condition where connection happens and timeout reached.
let res = await withTimeout(fut, network.connectTimeout)
# We handling only timeout and errors, because successfull connections
# will be stored in PeerPool.
if fut.finished():
if fut.failed() and not(fut.cancelled()):
debug "Unable to establish connection with peer", peer = pi.id,
errMsg = fut.readError().msg
inc nbc_failed_dials
network.addSeen(pi, SeenTableTimeDeadPeer)
continue
debug "Connection to remote peer timed out", peer = pi.id
inc nbc_timeout_dials
network.addSeen(pi, SeenTableTimeTimeout)
if not(r1) and not(r2) and not(r3):
network.connTable[pi.peerId] = pi
try:
# We trying to connect to peers which are not in PeerPool, SeenTable and
# ConnTable.
var fut = network.dialPeer(pi)
# We discarding here just because we going to check future state, to avoid
# condition where connection happens and timeout reached.
let res = await withTimeout(fut, network.connectTimeout)
# We handling only timeout and errors, because successfull connections
# will be stored in PeerPool.
if fut.finished():
if fut.failed() and not(fut.cancelled()):
debug "Unable to establish connection with peer", peer = pi.id,
errMsg = fut.readError().msg
inc nbc_failed_dials
network.addSeen(pi, SeenTableTimeDeadPeer)
continue
debug "Connection to remote peer timed out", peer = pi.id
inc nbc_timeout_dials
network.addSeen(pi, SeenTableTimeTimeout)
finally:
network.connTable.del(pi.peerId)
else:
trace "Peer is already connected or already seen", peer = pi.id,
peer_pool_has_peer = $r1, seen_table_has_peer = $r2,
seen_table_size = len(network.seenTable)
trace "Peer is already connected, connecting or already seen",
peer = pi.id, peer_pool_has_peer = $r1, seen_table_has_peer = $r2,
connecting_peer = $r3, seen_table_size = len(network.seenTable)
proc runDiscoveryLoop*(node: Eth2Node) {.async.} =
debug "Starting discovery loop"
@ -809,11 +804,12 @@ proc init*(T: type Eth2Node, conf: BeaconNodeConf, enrForkId: ENRForkID,
result.connectTimeout = 10.seconds
result.seenThreshold = 10.minutes
result.seenTable = initTable[PeerID, SeenItem]()
result.connTable = initTable[PeerID, PeerInfo]()
result.connQueue = newAsyncQueue[PeerInfo](ConcurrentConnections)
result.metadata = getPersistentNetMetadata(conf)
result.forkId = enrForkId
result.discovery = Eth2DiscoveryProtocol.new(
conf, ip, tcpPort, udpPort, privKey.toRaw,
conf, ip, tcpPort, udpPort, privKey,
{"eth2": SSZ.encode(result.forkId), "attnets": SSZ.encode(result.metadata.attnets)})
newSeq result.protocolStates, allProtocols.len
@ -826,7 +822,7 @@ proc init*(T: type Eth2Node, conf: BeaconNodeConf, enrForkId: ENRForkID,
msg.protocolMounter result
template publicKey*(node: Eth2Node): keys.PublicKey =
node.discovery.privKey.toPublicKey.tryGet()
node.discovery.privKey.toPublicKey
template addKnownPeer*(node: Eth2Node, peer: enr.Record) =
node.discovery.addNode peer
@ -1083,6 +1079,13 @@ proc getPersistentNetKeys*(conf: BeaconNodeConf): KeyPair =
KeyPair(seckey: privKey, pubkey: privKey.getKey().tryGet())
func gossipId(data: openArray[byte]): string =
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#topics-and-messages
base64.encode(Base64Url, sha256.digest(data).data)
func msgIdProvider(m: messages.Message): string =
gossipId(m.data)
proc createEth2Node*(conf: BeaconNodeConf, enrForkId: ENRForkID): Future[Eth2Node] {.async, gcsafe.} =
var
(extIp, extTcpPort, extUdpPort) = setupNat(conf)
@ -1100,7 +1103,8 @@ proc createEth2Node*(conf: BeaconNodeConf, enrForkId: ENRForkID): Future[Eth2Nod
var switch = newStandardSwitch(some keys.seckey, hostAddress,
triggerSelf = true, gossip = true,
sign = false, verifySignature = false,
transportFlags = {ServerFlags.ReuseAddr})
transportFlags = {ServerFlags.ReuseAddr},
msgIdProvider = msgIdProvider)
result = Eth2Node.init(conf, enrForkId, switch,
extIp, extTcpPort, extUdpPort,
keys.seckey.asEthKey)
@ -1138,69 +1142,47 @@ proc subscribe*[MsgType](node: Eth2Node,
topic: string,
msgHandler: proc(msg: MsgType) {.gcsafe.},
msgValidator: proc(msg: MsgType): bool {.gcsafe.} ) {.async, gcsafe.} =
template execMsgHandler(peerExpr, gossipBytes, gossipTopic, useSnappy) =
proc execMsgHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
inc nbc_gossip_messages_received
trace "Incoming pubsub message received",
peer = peerExpr, len = gossipBytes.len, topic = gossipTopic,
message_id = `$`(sha256.digest(gossipBytes))
when useSnappy:
msgHandler SSZ.decode(snappy.decode(gossipBytes), MsgType)
else:
msgHandler SSZ.decode(gossipBytes, MsgType)
# All message types which are subscribed to should be validated; putting
# this in subscribe(...) ensures that the default approach is correct.
template execMsgValidator(gossipBytes, gossipTopic, useSnappy): bool =
trace "Incoming pubsub message received for validation",
len = gossipBytes.len, topic = gossipTopic,
message_id = `$`(sha256.digest(gossipBytes))
when useSnappy:
msgValidator SSZ.decode(snappy.decode(gossipBytes), MsgType)
else:
msgValidator SSZ.decode(gossipBytes, MsgType)
len = data.len, topic, msgId = gossipId(data)
try:
msgHandler SSZ.decode(snappy.decode(data), MsgType)
except CatchableError as err:
debug "Gossip msg handler error",
msg = err.msg, len = data.len, topic, msgId = gossipId(data)
# Validate messages as soon as subscribed
let incomingMsgValidator = proc(topic: string,
message: GossipMsg): Future[bool]
{.async, gcsafe.} =
return execMsgValidator(message.data, topic, false)
let incomingMsgValidatorSnappy = proc(topic: string,
message: GossipMsg): Future[bool]
{.async, gcsafe.} =
return execMsgValidator(message.data, topic, true)
proc execValidator(
topic: string, message: GossipMsg): Future[bool] {.async, gcsafe.} =
trace "Validating incoming gossip message",
len = message.data.len, topic, msgId = gossipId(message.data)
try:
return msgValidator SSZ.decode(snappy.decode(message.data), MsgType)
except CatchableError as err:
debug "Gossip validation error",
msg = err.msg, msgId = gossipId(message.data)
return false
node.switch.addValidator(topic, incomingMsgValidator)
node.switch.addValidator(topic & "_snappy", incomingMsgValidatorSnappy)
node.switch.addValidator(topic & "_snappy", execValidator)
let incomingMsgHandler = proc(topic: string,
data: seq[byte]) {.async, gcsafe.} =
execMsgHandler "unknown", data, topic, false
let incomingMsgHandlerSnappy = proc(topic: string,
data: seq[byte]) {.async, gcsafe.} =
execMsgHandler "unknown", data, topic, true
await node.switch.subscribe(topic & "_snappy", execMsgHandler)
var switchSubscriptions: seq[Future[void]] = @[]
switchSubscriptions.add(node.switch.subscribe(topic, incomingMsgHandler))
switchSubscriptions.add(node.switch.subscribe(topic & "_snappy", incomingMsgHandlerSnappy))
await allFutures(switchSubscriptions)
proc traceMessage(fut: FutureBase, digest: MDigest[256]) =
proc traceMessage(fut: FutureBase, msgId: string) =
fut.addCallback do (arg: pointer):
if not(fut.failed):
trace "Outgoing pubsub message sent", message_id = `$`(digest)
trace "Outgoing pubsub message sent", msgId
elif fut.error != nil:
debug "Gossip message not sent", msgId, err = fut.error.msg
else:
debug "Unexpected future state for gossip", msgId, state = fut.state
proc broadcast*(node: Eth2Node, topic: string, msg: auto) =
inc nbc_gossip_messages_sent
let broadcastBytes = SSZ.encode(msg)
var fut = node.switch.publish(topic, broadcastBytes)
traceMessage(fut, sha256.digest(broadcastBytes))
traceAsyncErrors(fut)
# also publish to the snappy-compressed topics
let snappyEncoded = snappy.encode(broadcastBytes)
var futSnappy = node.switch.publish(topic & "_snappy", snappyEncoded)
traceMessage(futSnappy, sha256.digest(snappyEncoded))
traceAsyncErrors(futSnappy)
let
data = snappy.encode(SSZ.encode(msg))
var futSnappy = node.switch.publish(topic & "_snappy", data)
traceMessage(futSnappy, gossipId(data))
# TODO:
# At the moment, this is just a compatiblity shim for the existing RLPx functionality.

View File

@ -20,17 +20,12 @@
type
UpdateFlag* = enum
skipMerkleValidation ##\
## When processing deposits, skip verifying the Merkle proof trees of each
## deposit.
skipBlsValidation ##\
## Skip verification of BLS signatures in block processing.
## Predominantly intended for use in testing, e.g. to allow extra coverage.
## Also useful to avoid unnecessary work when replaying known, good blocks.
skipStateRootValidation ##\
## Skip verification of block state root.
skipBlockParentRootValidation ##\
## Skip verification that the block's parent root matches the previous block header.
verifyFinalization
UpdateFlags* = set[UpdateFlag]

View File

@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import strutils, os, tables, options
import confutils, chronicles, chronos
import libp2p/[switch, standard_setup, connection, multiaddress, multicodec,
import libp2p/[switch, standard_setup, multiaddress, multicodec,
peer, peerinfo, peer]
import libp2p/crypto/crypto as lcrypto
import libp2p/crypto/secp as lsecp
@ -52,10 +52,8 @@ type
next_fork_version*: Version
next_fork_epoch*: Epoch
# TODO remove InteropAttestations when Altona launches
TopicFilter* {.pure.} = enum
Blocks, Attestations, Exits, ProposerSlashing, AttesterSlashings,
InteropAttestations
Blocks, Attestations, Exits, ProposerSlashing, AttesterSlashings
BootstrapKind* {.pure.} = enum
Enr, MultiAddr
@ -197,18 +195,12 @@ func getTopics(forkDigest: ForkDigest,
of TopicFilter.AttesterSlashings:
let topic = getAttesterSlashingsTopic(forkDigest)
@[topic, topic & "_snappy"]
of TopicFilter.InteropAttestations:
when ETH2_SPEC == "v0.11.3":
let topic = getInteropAttestationTopic(forkDigest)
@[topic, topic & "_snappy"]
else:
@[]
of TopicFilter.Attestations:
var topics = newSeq[string](ATTESTATION_SUBNET_COUNT * 2)
var offset = 0
for i in 0'u64 ..< ATTESTATION_SUBNET_COUNT.uint64:
topics[offset] = getMainnetAttestationTopic(forkDigest, i)
topics[offset + 1] = getMainnetAttestationTopic(forkDigest, i) & "_snappy"
topics[offset] = getAttestationTopic(forkDigest, i)
topics[offset + 1] = getAttestationTopic(forkDigest, i) & "_snappy"
offset += 2
topics
@ -537,10 +529,6 @@ proc pubsubLogger(conf: InspectorConf, switch: Switch,
elif topic.endsWith(topicAggregateAndProofsSuffix) or
topic.endsWith(topicAggregateAndProofsSuffix & "_snappy"):
info "AggregateAndProof", msg = SSZ.decode(buffer, AggregateAndProof)
when ETH2_SPEC == "v0.11.3":
if topic.endsWith(topicInteropAttestationSuffix) or
topic.endsWith(topicInteropAttestationSuffix & "_snappy"):
info "Attestation", msg = SSZ.decode(buffer, Attestation)
except CatchableError as exc:
info "Unable to decode message", errMsg = exc.msg
@ -708,8 +696,6 @@ proc run(conf: InspectorConf) {.async.} =
topics.incl({TopicFilter.Blocks, TopicFilter.Attestations,
TopicFilter.Exits, TopicFilter.ProposerSlashing,
TopicFilter.AttesterSlashings})
when ETH2_SPEC == "v0.11.3":
topics.incl({TopicFilter.AttesterSlashings})
break
elif lcitem == "a":
topics.incl(TopicFilter.Attestations)
@ -723,16 +709,10 @@ proc run(conf: InspectorConf) {.async.} =
topics.incl(TopicFilter.AttesterSlashings)
else:
discard
when ETH2_SPEC == "v0.11.3":
if lcitem == "ia":
topics.incl(TopicFilter.InteropAttestations)
else:
topics.incl({TopicFilter.Blocks, TopicFilter.Attestations,
TopicFilter.Exits, TopicFilter.ProposerSlashing,
TopicFilter.AttesterSlashings})
when ETH2_SPEC == "v0.11.3":
topics.incl({TopicFilter.AttesterSlashings})
proc pubsubTrampoline(topic: string,
data: seq[byte]): Future[void] {.gcsafe.} =

View File

@ -1,9 +1,12 @@
import
os, strutils, terminal,
os, strutils, terminal, random,
chronicles, chronos, blscurve, nimcrypto, json_serialization, serialization,
web3, stint, eth/keys, confutils,
spec/[datatypes, digest, crypto, keystore], conf, ssz/merkleization, merkle_minimal
export
keystore
contract(DepositContract):
proc deposit(pubkey: Bytes48, withdrawalCredentials: Bytes32, signature: Bytes96, deposit_data_root: FixedBytes[32])
@ -109,7 +112,7 @@ proc generateDeposits*(totalValidators: int,
let credentials = generateCredentials(password = password)
let
keyName = $(credentials.signingKey.toPubKey)
keyName = intToStr(i, 6) & "_" & $(credentials.signingKey.toPubKey)
validatorDir = validatorsDir / keyName
passphraseFile = secretsDir / keyName
depositFile = validatorDir / depositFileName
@ -139,14 +142,34 @@ proc generateDeposits*(totalValidators: int,
ok deposits
proc loadDeposits*(depositsDir: string): seq[Deposit] =
try:
for kind, dir in walkDir(depositsDir):
if kind == pcDir:
let depositFile = dir / depositFileName
try:
result.add Json.loadFile(depositFile, Deposit)
except IOError as err:
error "Failed to open deposit file", depositFile, err = err.msg
quit 1
except SerializationError as err:
error "Invalid deposit file", error = formatMsg(err, depositFile)
quit 1
except OSError as err:
error "Deposits directory not accessible",
path = depositsDir, err = err.msg
quit 1
{.pop.}
# TODO: async functions should note take `seq` inputs because
# this leads to full copies.
proc sendDeposits*(deposits: seq[Deposit],
web3Url, depositContractAddress, privateKey: string,
delayGenerator: DelayGenerator = nil) {.async.} =
var web3 = await newWeb3(web3Url)
if privateKey.len != 0:
web3.privateKey = PrivateKey.fromHex(privateKey).tryGet
web3.privateKey = some(PrivateKey.fromHex(privateKey).tryGet)
else:
let accounts = await web3.provider.eth_accounts()
if accounts.len == 0:
@ -158,12 +181,32 @@ proc sendDeposits*(deposits: seq[Deposit],
let depositContract = web3.contractSender(DepositContract, contractAddress)
for i, dp in deposits:
discard await depositContract.deposit(
let status = await depositContract.deposit(
Bytes48(dp.data.pubKey.toRaw()),
Bytes32(dp.data.withdrawal_credentials.data),
Bytes96(dp.data.signature.toRaw()),
FixedBytes[32](hash_tree_root(dp.data).data)).send(value = 32.u256.ethToWei, gasPrice = 1)
info "Deposit sent", status = $status
if delayGenerator != nil:
await sleepAsync(delayGenerator())
proc sendDeposits*(config: BeaconNodeConf,
deposits: seq[Deposit]) {.async.} =
var delayGenerator: DelayGenerator
if config.maxDelay > 0.0:
delayGenerator = proc (): chronos.Duration {.gcsafe.} =
chronos.milliseconds (rand(config.minDelay..config.maxDelay)*1000).int
info "Sending deposits",
web3 = config.web3Url,
depositContract = config.depositContractAddress
await sendDeposits(
deposits,
config.web3Url,
config.depositContractAddress,
config.depositPrivateKey,
delayGenerator)

View File

@ -1,7 +1,13 @@
import
deques, tables, hashes, options,
chronos, web3, json, chronicles,
spec/[datatypes, digest, crypto, beaconstate, helpers]
deques, tables, hashes, options, strformat,
chronos, web3, web3/ethtypes, json, chronicles, eth/async_utils,
spec/[datatypes, digest, crypto, beaconstate, helpers],
merkle_minimal
from times import epochTime
export
ethtypes
contract(DepositContract):
proc deposit(pubkey: Bytes48,
@ -17,7 +23,6 @@ contract(DepositContract):
amount: Bytes8,
signature: Bytes96,
index: Bytes8) {.event.}
# TODO
# The raises list of this module are still not usable due to general
# Exceptions being reported from Chronos's asyncfutures2.
@ -31,30 +36,29 @@ type
timestamp*: Eth1BlockTimestamp
deposits*: seq[Deposit]
voteData*: Eth1Data
knownGoodDepositsCount*: Option[uint64]
Eth1Chain* = object
knownStart: Eth1Data
knownStartBlockNum: Option[Eth1BlockNumber]
blocks: Deque[Eth1Block]
blocksByHash: Table[BlockHash, Eth1Block]
allDeposits*: seq[Deposit]
MainchainMonitor* = ref object
startBlock: BlockHash
depositContractAddress: Address
dataProviderFactory*: DataProviderFactory
genesisState: NilableBeaconStateRef
genesisStateFut: Future[void]
genesisMonitoringFut: Future[void]
eth1Chain: Eth1Chain
depositQueue: AsyncQueue[DepositQueueElem]
depositQueue: AsyncQueue[BlockHeader]
runFut: Future[void]
Web3EventType = enum
NewEvent
RemovedEvent
DepositQueueElem = (BlockHash, Web3EventType)
DataProvider* = object of RootObj
DataProviderRef* = ref DataProvider
@ -69,7 +73,7 @@ type
url: string
web3: Web3
ns: Sender[DepositContract]
subscription: Subscription
blockHeadersSubscription: Subscription
Web3DataProviderRef* = ref Web3DataProvider
@ -86,11 +90,21 @@ type
const
reorgDepthLimit = 1000
web3Timeouts = 5.seconds
followDistanceInSeconds = uint64(SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE)
totalDepositsNeededForGenesis = uint64 max(SLOTS_PER_EPOCH,
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT)
# TODO Nim's analysis on the lock level of the methods in this
# module seems broken. Investigate and file this as an issue.
{.push warning[LockLevel]: off.}
static:
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#genesis
when SPEC_VERSION == "0.12.1":
doAssert SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE < GENESIS_DELAY,
"Invalid configuration: GENESIS_DELAY is set too low"
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#get_eth1_data
func compute_time_at_slot(state: BeaconState, slot: Slot): uint64 =
state.genesis_time + slot * SECONDS_PER_SLOT
@ -106,12 +120,15 @@ func is_candidate_block(blk: Eth1Block, period_start: uint64): bool =
(blk.timestamp + SECONDS_PER_ETH1_BLOCK.uint64 * ETH1_FOLLOW_DISTANCE.uint64 <= period_start) and
(blk.timestamp + SECONDS_PER_ETH1_BLOCK.uint64 * ETH1_FOLLOW_DISTANCE.uint64 * 2 >= period_start)
func asEth2Digest(x: BlockHash): Eth2Digest =
func asEth2Digest*(x: BlockHash): Eth2Digest =
Eth2Digest(data: array[32, byte](x))
template asBlockHash(x: Eth2Digest): BlockHash =
BlockHash(x.data)
func shortLog(b: Eth1Block): string =
&"{b.number}:{shortLog b.voteData.block_hash}"
func getDepositsInRange(eth1Chain: Eth1Chain,
sinceBlock, latestBlock: Eth1BlockNumber): seq[Deposit] =
## Returns all deposits that happened AFTER the block `sinceBlock` (not inclusive).
@ -147,57 +164,46 @@ proc findParent*(eth1Chain: Eth1Chain, blk: BlockObject): Eth1Block =
parentHash = blk.parentHash.toHex, parentNumber = result.number
result = nil
when false:
func getCacheIdx(eth1Chain: Eth1Chain, blockNumber: Eth1BlockNumber): int =
if eth1Chain.blocks.len == 0:
return -1
let idx = blockNumber - eth1Chain.blocks[0].number
if idx < 0 or idx >= eth1Chain.blocks.len:
return -1
idx
func `{}`*(eth1Chain: Eth1Chain, blockNumber: Eth1BlockNumber): Eth1Block =
## Finds a block in our cache that corresponds to a particular Eth block
## number. May return `nil` if we don't have such a block in the cache.
let idx = eth1Chain.getCacheIdx(blockNumber)
if idx != -1: eth1Chain.blocks[idx] else: nil
func latestCandidateBlock(eth1Chain: Eth1Chain, periodStart: uint64): Eth1Block =
for i in countdown(eth1Chain.blocks.len - 1, 0):
let blk = eth1Chain.blocks[i]
if is_candidate_block(blk, periodStart):
return blk
func popBlock(eth1Chain: var Eth1Chain) =
let removed = eth1Chain.blocks.popLast
eth1Chain.blocksByHash.del removed.voteData.block_hash.asBlockHash
func trimHeight(eth1Chain: var Eth1Chain, blockNumber: Eth1BlockNumber) =
## Removes all blocks above certain `blockNumber`
if eth1Chain.blocks.len == 0:
return
while eth1Chain.blocks.len > 0:
if eth1Chain.blocks.peekLast.number > blockNumber:
eth1Chain.popBlock()
else:
break
let newLen = max(0, int(blockNumber - eth1Chain.blocks[0].number + 1))
for i in newLen ..< eth1Chain.blocks.len:
let removed = eth1Chain.blocks.popLast
eth1Chain.blocksByHash.del removed.voteData.block_hash.asBlockHash
template purgeChain*(eth1Chain: var Eth1Chain, blk: Eth1Block) =
## This is used when we discover that a previously considered block
## is no longer part of the selected chain (due to a reorg). We can
## then remove from our chain together with all blocks that follow it.
trimHeight(eth1Chain, blk.number - 1)
func purgeChain*(eth1Chain: var Eth1Chain, blockHash: BlockHash) =
let blk = eth1Chain.findBlock(blockHash)
if blk != nil: eth1Chain.purgeChain(blk)
template purgeDescendants*(eth1CHain: Eth1Chain, blk: Eth1Block) =
trimHeight(eth1Chain, blk.number)
func addBlock*(eth1Chain: var Eth1Chain, newBlock: Eth1Block) =
if eth1Chain.blocks.len > 0:
doAssert eth1Chain.blocks.peekLast.number + 1 == newBlock.number
eth1Chain.blocks.addLast newBlock
eth1Chain.blocksByHash[newBlock.voteData.block_hash.asBlockHash] = newBlock
eth1Chain.allDeposits.setLen(eth1Chain.blocks[^1].voteData.deposit_count)
else:
eth1Chain.allDeposits.setLen(0)
func isSuccessorBlock(eth1Chain: Eth1Chain, newBlock: Eth1Block): bool =
let currentDepositCount = if eth1Chain.blocks.len == 0:
eth1Chain.knownStart.deposit_count
else:
let lastBlock = eth1Chain.blocks.peekLast
if lastBlock.number >= newBlock.number: return false
lastBlock.voteData.deposit_count
(currentDepositCount + newBlock.deposits.len.uint64) == newBlock.voteData.deposit_count
func addSuccessorBlock*(eth1Chain: var Eth1Chain, newBlock: Eth1Block): bool =
result = isSuccessorBlock(eth1Chain, newBlock)
if result:
eth1Chain.allDeposits.add newBlock.deposits
reset newBlock.deposits
eth1Chain.blocks.addLast newBlock
eth1Chain.blocksByHash[newBlock.voteData.block_hash.asBlockHash] = newBlock
func totalDeposits*(eth1Chain: Eth1Chain): int =
for blk in eth1Chain.blocks:
@ -207,6 +213,9 @@ func allDeposits*(eth1Chain: Eth1Chain): seq[Deposit] =
for blk in eth1Chain.blocks:
result.add blk.deposits
func clear*(eth1Chain: var Eth1Chain) =
eth1Chain = default(Eth1Chain)
template hash*(x: Eth1Block): Hash =
hash(x.voteData.block_hash.data)
@ -219,8 +228,15 @@ method getBlockByHash*(p: DataProviderRef, hash: BlockHash): Future[BlockObject]
locks: 0
# raises: [Defect]
.} =
discard
# notImplemented
notImplemented
method getBlockByNumber*(p: DataProviderRef, hash: Eth1BlockNumber): Future[BlockObject] {.
base
gcsafe
locks: 0
# raises: [Defect]
.} =
notImplemented
method onDisconnect*(p: DataProviderRef, handler: DisconnectHandler) {.
base
@ -230,9 +246,9 @@ method onDisconnect*(p: DataProviderRef, handler: DisconnectHandler) {.
.} =
notImplemented
method onDepositEvent*(p: DataProviderRef,
startBlock: Eth1BlockNumber,
handler: DepositEventHandler): Future[void] {.
method onBlockHeaders*(p: DataProviderRef,
blockHeaderHandler: BlockHeaderHandler,
errorHandler: SubscriptionErrorHandler): Future[void] {.
base
gcsafe
locks: 0
@ -249,7 +265,7 @@ method close*(p: DataProviderRef): Future[void] {.
notImplemented
method fetchDepositData*(p: DataProviderRef,
web3Block: BlockObject): Future[Eth1Block] {.
fromBlock, toBlock: Eth1BlockNumber): Future[seq[Eth1Block]] {.
base
gcsafe
locks: 0
@ -257,6 +273,153 @@ method fetchDepositData*(p: DataProviderRef,
.} =
notImplemented
method fetchBlockDetails(p: DataProviderRef, blk: Eth1Block): Future[void] {.
base
gcsafe
locks: 0
# raises: [Defect, CatchableError]
.} =
notImplemented
proc new*(T: type Web3DataProvider,
web3Url: string,
depositContractAddress: Address): Future[ref Web3DataProvider] {.
async
# raises: [Defect]
.} =
try:
type R = ref T
let
web3 = await newWeb3(web3Url)
ns = web3.contractSender(DepositContract, depositContractAddress)
return R(url: web3Url, web3: web3, ns: ns)
except CatchableError:
return nil
func web3Provider*(web3Url: string): DataProviderFactory =
proc factory(depositContractAddress: Address): Future[DataProviderRef] {.async.} =
result = await Web3DataProvider.new(web3Url, depositContractAddress)
DataProviderFactory(desc: "web3(" & web3Url & ")", new: factory)
method close*(p: Web3DataProviderRef): Future[void] {.async, locks: 0.} =
if p.blockHeadersSubscription != nil:
await p.blockHeadersSubscription.unsubscribe()
await p.web3.close()
method getBlockByHash*(p: Web3DataProviderRef, hash: BlockHash): Future[BlockObject] =
return p.web3.provider.eth_getBlockByHash(hash, false)
method getBlockByNumber*(p: Web3DataProviderRef, number: Eth1BlockNumber): Future[BlockObject] =
return p.web3.provider.eth_getBlockByNumber(&"0x{number:X}", false)
proc getBlockNumber(p: DataProviderRef, hash: BlockHash): Future[Eth1BlockNumber] {.async.} =
try:
let blk = awaitWithTimeout(p.getBlockByHash(hash), web3Timeouts):
return 0
return Eth1BlockNumber(blk.number)
except CatchableError as exc:
notice "Failed to get Eth1 block number from hash",
hash = $hash, err = exc.msg
raise
template readJsonField(j: JsonNode,
fieldName: string,
ValueType: type): untyped =
var res: ValueType
fromJson(j[fieldName], fieldName, res)
res
proc readJsonDeposits(depositsList: JsonNode): seq[Eth1Block] =
if depositsList.kind != JArray:
raise newException(CatchableError,
"Web3 provider didn't return a list of deposit events")
var lastEth1Block: Eth1Block
for logEvent in depositsList:
let
blockNumber = Eth1BlockNumber readJsonField(logEvent, "blockNumber", Quantity)
blockHash = readJsonField(logEvent, "blockHash", BlockHash)
logData = strip0xPrefix(logEvent["data"].getStr)
if lastEth1Block == nil or lastEth1Block.number != blockNumber:
lastEth1Block = Eth1Block(
number: blockNumber,
voteData: Eth1Data(block_hash: blockHash.asEth2Digest))
result.add lastEth1Block
var
pubkey: Bytes48
withdrawalCredentials: Bytes32
amount: Bytes8
signature: Bytes96
index: Bytes8
var offset = 0
offset += decode(logData, offset, pubkey)
offset += decode(logData, offset, withdrawalCredentials)
offset += decode(logData, offset, amount)
offset += decode(logData, offset, signature)
offset += decode(logData, offset, index)
lastEth1Block.deposits.add Deposit(
data: DepositData(
pubkey: ValidatorPubKey.init(array[48, byte](pubkey)),
withdrawal_credentials: Eth2Digest(data: array[32, byte](withdrawalCredentials)),
amount: bytes_to_int(array[8, byte](amount)),
signature: ValidatorSig.init(array[96, byte](signature))))
method fetchDepositData*(p: Web3DataProviderRef,
fromBlock, toBlock: Eth1BlockNumber): Future[seq[Eth1Block]]
{.async, locks: 0.} =
info "Obtaining deposit log events", fromBlock, toBlock
return readJsonDeposits(await p.ns.getJsonLogs(DepositEvent,
fromBlock = some blockId(fromBlock),
toBlock = some blockId(toBlock)))
method fetchBlockDetails(p: Web3DataProviderRef, blk: Eth1Block) {.async.} =
let
web3Block = p.getBlockByNumber(blk.number)
depositRoot = p.ns.get_deposit_root.call(blockNumber = blk.number)
rawCount = p.ns.get_deposit_count.call(blockNumber = blk.number)
discard await web3Block
discard await depositRoot
discard await rawCount
let depositCount = bytes_to_int(array[8, byte](rawCount.read))
blk.timestamp = Eth1BlockTimestamp(web3Block.read.timestamp)
blk.voteData.deposit_count = depositCount
blk.voteData.deposit_root = depositRoot.read.asEth2Digest
method onDisconnect*(p: Web3DataProviderRef, handler: DisconnectHandler) {.
gcsafe
locks: 0
# raises: []
.} =
p.web3.onDisconnect = handler
method onBlockHeaders*(p: Web3DataProviderRef,
blockHeaderHandler: BlockHeaderHandler,
errorHandler: SubscriptionErrorHandler): Future[void] {.
async
gcsafe
locks: 0
# raises: []
.} =
if p.blockHeadersSubscription != nil:
await p.blockHeadersSubscription.unsubscribe()
info "Waiting for new Eth1 block headers"
let options = newJObject()
p.blockHeadersSubscription = await p.web3.subscribeForBlockHeaders(
options, blockHeaderHandler, errorHandler)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#get_eth1_data
func getBlockProposalData*(eth1Chain: Eth1Chain,
state: BeaconState): (Eth1Data, seq[Deposit]) =
@ -296,147 +459,163 @@ template getBlockProposalData*(m: MainchainMonitor, state: BeaconState): untyped
proc init*(T: type MainchainMonitor,
dataProviderFactory: DataProviderFactory,
depositContractAddress: string,
startBlock: Eth2Digest): T =
T(depositContractAddress: Address.fromHex(depositContractAddress),
depositQueue: newAsyncQueue[DepositQueueElem](),
startBlock: BlockHash(startBlock.data),
dataProviderFactory: dataProviderFactory)
startPosition: Eth1Data): T =
T(depositQueue: newAsyncQueue[BlockHeader](),
dataProviderFactory: dataProviderFactory,
depositContractAddress: Address.fromHex(depositContractAddress),
eth1Chain: Eth1Chain(knownStart: startPosition))
const MIN_GENESIS_TIME = 0
proc isCandidateForGenesis(timeNow: float, blk: Eth1Block): bool =
if float(blk.timestamp + followDistanceInSeconds) > timeNow:
return false
proc readJsonDeposits(json: JsonNode): seq[Deposit] =
if json.kind != JArray:
raise newException(CatchableError,
"Web3 provider didn't return a list of deposit events")
if genesis_time_from_eth1_timestamp(blk.timestamp) < MIN_GENESIS_TIME:
return false
for logEvent in json:
var logData = strip0xPrefix(json["data"].getStr)
var
pubkey: Bytes48
withdrawalCredentials: Bytes32
amount: Bytes8
signature: Bytes96
index: Bytes8
if blk.knownGoodDepositsCount.isSome:
blk.knownGoodDepositsCount.get >= totalDepositsNeededForGenesis
else:
blk.voteData.deposit_count >= totalDepositsNeededForGenesis
var offset = 0
offset = decode(logData, offset, pubkey)
offset = decode(logData, offset, withdrawalCredentials)
offset = decode(logData, offset, amount)
offset = decode(logData, offset, signature)
offset = decode(logData, offset, index)
result.add Deposit(
# proof: TODO
data: DepositData(
pubkey: ValidatorPubKey.init(array[48, byte](pubkey)),
withdrawal_credentials: Eth2Digest(data: array[32, byte](withdrawalCredentials)),
amount: bytes_to_int(array[8, byte](amount)),
signature: ValidatorSig.init(array[96, byte](signature))))
proc checkForGenesisEvent(m: MainchainMonitor) =
if not m.genesisState.isNil:
proc minGenesisCandidateBlockIdx(eth1Chain: Eth1Chain): Option[int]
{.raises: [Defect].} =
if eth1Chain.blocks.len == 0:
return
let lastBlock = m.eth1Chain.blocks.peekLast
const totalDepositsNeeded = max(SLOTS_PER_EPOCH,
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT)
let now = epochTime()
if not isCandidateForGenesis(now, eth1Chain.blocks.peekLast):
return
if lastBlock.timestamp.uint64 >= MIN_GENESIS_TIME.uint64 and
m.eth1Chain.totalDeposits >= totalDepositsNeeded:
# This block is a genesis candidate
let startTime = lastBlock.timestamp.uint64
var s = initialize_beacon_state_from_eth1(lastBlock.voteData.block_hash,
startTime, m.eth1Chain.allDeposits, {})
if is_valid_genesis_state(s[]):
# https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#create-genesis-state
s.genesis_time = startTime
var candidatePos = eth1Chain.blocks.len - 1
while candidatePos > 1:
if not isCandidateForGenesis(now, eth1Chain.blocks[candidatePos - 1]):
break
dec candidatePos
m.genesisState = s
if not m.genesisStateFut.isNil:
m.genesisStateFut.complete()
m.genesisStateFut = nil
return some(candidatePos)
proc processDeposits(m: MainchainMonitor, dataProvider: DataProviderRef) {.
async
# raises: [Defect]
.} =
# ATTENTION!
# Please note that this code is using a queue to guarantee the
# strict serial order of processing of deposits. If we had the
# same code embedded in the deposit contracts events handler,
# it could easily re-order the steps due to the intruptable
# interleaved execution of async code.
proc createBeaconStateAux(eth1Block: Eth1Block,
deposits: var openarray[Deposit]): BeaconStateRef =
attachMerkleProofs deposits
result = initialize_beacon_state_from_eth1(eth1Block.voteData.block_hash,
eth1Block.timestamp.uint64,
deposits, {})
let activeValidators = get_active_validator_indices(result[], GENESIS_EPOCH)
eth1Block.knownGoodDepositsCount = some len(activeValidators).uint64
proc createBeaconState(eth1Chain: var Eth1Chain, eth1Block: Eth1Block): BeaconStateRef =
createBeaconStateAux(
eth1Block,
eth1Chain.allDeposits.toOpenArray(0, int(eth1Block.voteData.deposit_count - 1)))
proc signalGenesis(m: MainchainMonitor, genesisState: BeaconStateRef) =
m.genesisState = genesisState
if not m.genesisStateFut.isNil:
m.genesisStateFut.complete()
m.genesisStateFut = nil
proc findGenesisBlockInRange(m: MainchainMonitor,
startBlock, endBlock: Eth1Block): Future[Eth1Block]
{.async.} =
let dataProvider = await m.dataProviderFactory.new(m.depositContractAddress)
if dataProvider == nil:
error "Failed to initialize Eth1 data provider",
provider = m.dataProviderFactory.desc
raise newException(CatchableError, "Failed to initialize Eth1 data provider")
var
startBlock = startBlock
endBlock = endBlock
depositData = startBlock.voteData
while startBlock.number + 1 < endBlock.number:
let
startBlockTime = genesis_time_from_eth1_timestamp(startBlock.timestamp)
secondsPerBlock = float(endBlock.timestamp - startBlock.timestamp) /
float(endBlock.number - startBlock.number)
blocksToJump = max(float(MIN_GENESIS_TIME - startBlockTime) / secondsPerBlock, 1.0)
candidateNumber = min(endBlock.number - 1, startBlock.number + blocksToJump.uint64)
candidateBlock = await dataProvider.getBlockByNumber(candidateNumber)
var candidateAsEth1Block = Eth1Block(number: candidateBlock.number.uint64,
timestamp: candidateBlock.timestamp.uint64,
voteData: depositData)
candidateAsEth1Block.voteData.block_hash = candidateBlock.hash.asEth2Digest
info "Probing possible genesis block",
`block` = candidateBlock.number.uint64,
timestamp = genesis_time_from_eth1_timestamp(candidateBlock.timestamp.uint64)
if genesis_time_from_eth1_timestamp(candidateBlock.timestamp.uint64) < MIN_GENESIS_TIME:
startBlock = candidateAsEth1Block
else:
endBlock = candidateAsEth1Block
return endBlock
proc checkForGenesisLoop(m: MainchainMonitor) {.async.} =
while true:
let (blockHash, eventType) = await m.depositQueue.popFirst()
if not m.genesisState.isNil:
return
if eventType == RemovedEvent:
m.eth1Chain.purgeChain(blockHash)
continue
let cachedBlock = m.eth1Chain.findBlock(blockHash)
if cachedBlock == nil:
try:
try:
let genesisCandidateIdx = m.eth1Chain.minGenesisCandidateBlockIdx
if genesisCandidateIdx.isSome:
let
web3Block = await dataProvider.getBlockByHash(blockHash)
eth1Block = await dataProvider.fetchDepositData(web3Block)
genesisCandidateIdx = genesisCandidateIdx.get
genesisCandidate = m.eth1Chain.blocks[genesisCandidateIdx]
candidateState = m.eth1Chain.createBeaconState(genesisCandidate)
if m.eth1Chain.blocks.len > 0:
var cachedParent = m.eth1Chain.findParent(web3Block)
if cachedParent == nil:
# We are missing the parent block.
# This shouldn't be happening if the deposits events are reported in
# proper order, but nevertheless let's try to repair our chain:
var chainOfParents = newSeq[Eth1Block]()
var parentHash = web3Block.parentHash
var expectedParentBlockNumber = web3Block.number.uint64 - 1
warn "Eth1 parent block missing. Attempting to request from the network",
parentHash = parentHash.toHex
if genesisCandidate.knownGoodDepositsCount.get >= totalDepositsNeededForGenesis:
# We have a candidate state on our hands, but our current Eth1Chain
# may consist only of blocks that have deposits attached to them
# while the real genesis may have happened in a block without any
# deposits (triggered by MIN_GENESIS_TIME).
#
# This can happen when the beacon node is launched after the genesis
# event. We take a short cut when constructing the initial Eth1Chain
# by downloading only deposit log entries. Thus, we'll see all the
# blocks with deposits, but not the regular blocks in between.
#
# We'll handle this special case below by examing whether we are in
# this potential scenario and we'll use a fast guessing algorith to
# discover the ETh1 block with minimal valid genesis time.
if genesisCandidateIdx > 0:
let preceedingEth1Block = m.eth1Chain.blocks[genesisCandidateIdx - 1]
if preceedingEth1Block.voteData.deposit_root == genesisCandidate.voteData.deposit_root:
preceedingEth1Block.knownGoodDepositsCount = genesisCandidate.knownGoodDepositsCount
else:
discard m.eth1Chain.createBeaconState(preceedingEth1Block)
while true:
if chainOfParents.len > reorgDepthLimit:
error "Detected Eth1 re-org exceeded the maximum depth limit",
headBlockHash = web3Block.hash.toHex,
ourHeadHash = m.eth1Chain.blocks.peekLast.voteData.block_hash
raise newException(ReorgDepthLimitExceeded, "Reorg depth limit exceeded")
if preceedingEth1Block.knownGoodDepositsCount.get >= totalDepositsNeededForGenesis and
genesisCandidate.number - preceedingEth1Block.number > 1:
let genesisBlock = await m.findGenesisBlockInRange(preceedingEth1Block, genesisCandidate)
if genesisBlock.number != genesisCandidate.number:
m.signalGenesis m.eth1Chain.createBeaconState(genesisBlock)
return
let parentWeb3Block = await dataProvider.getBlockByHash(parentHash)
if parentWeb3Block.number.uint64 != expectedParentBlockNumber:
error "Eth1 data provider supplied invalid parent block",
parentBlockNumber = parentWeb3Block.number.uint64,
expectedParentBlockNumber, parentHash = parentHash.toHex
raise newException(CorruptDataProvider,
"Parent block with incorrect number")
m.signalGenesis candidateState
return
else:
info "Eth2 genesis candidate block rejected",
`block` = shortLog(genesisCandidate),
validDeposits = genesisCandidate.knownGoodDepositsCount.get,
needed = totalDepositsNeededForGenesis
else:
# TODO: check for a stale monitor
discard
except CatchableError as err:
debug "Unexpected error in checkForGenesisLoop", err = err.msg
chainOfParents.add(await dataProvider.fetchDepositData(parentWeb3Block))
let localParent = m.eth1Chain.findParent(parentWeb3Block)
if localParent != nil:
m.eth1Chain.purgeDescendants(localParent)
for i in countdown(chainOfParents.len - 1, 0):
m.eth1Chain.addBlock chainOfParents[i]
cachedParent = m.eth1Chain.blocks.peekLast
break
await sleepAsync(1.seconds)
dec expectedParentBlockNumber
parentHash = parentWeb3Block.parentHash
m.eth1Chain.purgeDescendants(cachedParent)
m.eth1Chain.addBlock eth1Block
m.checkForGenesisEvent()
except CatchableError:
# Connection problem? Put the unprocessed deposit back to queue.
# Raising the exception here will lead to a restart of the whole monitor.
m.depositQueue.addFirstNoWait((blockHash, eventType))
raise
proc isRunning*(m: MainchainMonitor): bool =
not m.runFut.isNil
proc getGenesis*(m: MainchainMonitor): Future[BeaconStateRef] {.async.} =
proc waitGenesis*(m: MainchainMonitor): Future[BeaconStateRef] {.async.} =
if m.genesisState.isNil:
if m.genesisStateFut.isNil:
m.genesisStateFut = newFuture[void]("getGenesis")
m.genesisStateFut = newFuture[void]("waitGenesis")
m.genesisMonitoringFut = m.checkForGenesisLoop()
await m.genesisStateFut
m.genesisStateFut = nil
@ -446,85 +625,105 @@ proc getGenesis*(m: MainchainMonitor): Future[BeaconStateRef] {.async.} =
result = new BeaconStateRef # make the compiler happy
raiseAssert "Unreachable code"
method getBlockByHash*(p: Web3DataProviderRef, hash: BlockHash): Future[BlockObject] =
discard
# p.web3.provider.eth_getBlockByHash(hash, false)
func totalNonFinalizedBlocks(eth1Chain: Eth1Chain): Natural =
# TODO: implement this precisely
eth1Chain.blocks.len
method close*(p: Web3DataProviderRef): Future[void] {.async, locks: 0.} =
if p.subscription != nil:
await p.subscription.unsubscribe()
await p.web3.close()
func latestEth1Data(eth1Chain: Eth1Chain): Eth1Data =
if eth1Chain.blocks.len > 0:
eth1Chain.blocks[^1].voteData
else:
eth1Chain.knownStart
method fetchDepositData*(p: Web3DataProviderRef,
web3Block: BlockObject): Future[Eth1Block] {.async, locks: 0.} =
let
blockHash = web3Block.hash
depositRoot = await p.ns.get_deposit_root.call(blockNumber = web3Block.number.uint64)
rawCount = await p.ns.get_deposit_count.call(blockNumber = web3Block.number.uint64)
depositCount = bytes_to_int(array[8, byte](rawCount))
depositsJson = await p.ns.getJsonLogs(DepositEvent, blockHash = some(blockHash))
deposits = readJsonDeposits(depositsJson)
func knownInvalidDepositsCount(eth1Chain: Eth1Chain): uint64 =
for i in countdown(eth1Chain.blocks.len - 1, 0):
let blk = eth1Chain.blocks[i]
if blk.knownGoodDepositsCount.isSome:
return blk.voteData.deposit_count - blk.knownGoodDepositsCount.get
return Eth1Block(
number: Eth1BlockNumber(web3Block.number),
timestamp: Eth1BlockTimestamp(web3Block.timestamp),
deposits: deposits,
voteData: Eth1Data(deposit_root: depositRoot.asEth2Digest,
deposit_count: depositCount,
block_hash: blockHash.asEth2Digest))
return 0
method onDisconnect*(p: Web3DataProviderRef, handler: DisconnectHandler) {.
gcsafe
locks: 0
# raises: []
.} =
p.web3.onDisconnect = handler
func maxValidDeposits(eth1Chain: Eth1Chain): uint64 =
if eth1Chain.blocks.len > 0:
let lastBlock = eth1Chain.blocks[^1]
lastBlock.knownGoodDepositsCount.get(
lastBlock.voteData.deposit_count - eth1Chain.knownInvalidDepositsCount)
else:
0
method onDepositEvent*(p: Web3DataProviderRef,
startBlock: Eth1BlockNumber,
handler: DepositEventHandler): Future[void] {.
async
gcsafe
locks: 0
# raises: []
.} =
if p.subscription != nil:
await p.subscription.unsubscribe()
proc processDeposits(m: MainchainMonitor,
dataProvider: DataProviderRef) {.async.} =
# ATTENTION!
# Please note that this code is using a queue to guarantee the
# strict serial order of processing of deposits. If we had the
# same code embedded in the deposit contracts events handler,
# it could easily re-order the steps due to the intruptable
# interleaved execution of async code.
while true:
let blk = await m.depositQueue.popFirst()
m.eth1Chain.trimHeight(Eth1BlockNumber(blk.number) - 1)
p.subscription = await p.ns.subscribe(
DepositEvent, %*{"fromBlock": startBlock}, handler)
let latestKnownBlock = if m.eth1Chain.blocks.len > 0:
m.eth1Chain.blocks[^1].number
elif m.eth1Chain.knownStartBlockNum.isSome:
m.eth1Chain.knownStartBlockNum.get
else:
m.eth1Chain.knownStartBlockNum = some(
await dataProvider.getBlockNumber(m.eth1Chain.knownStart.block_hash.asBlockHash))
m.eth1Chain.knownStartBlockNum.get
proc getBlockNumber(p: DataProviderRef, hash: BlockHash): Future[Quantity] {.async.} =
debug "Querying block number", hash = $hash
let eth1Blocks = await dataProvider.fetchDepositData(latestKnownBlock + 1,
Eth1BlockNumber blk.number)
if eth1Blocks.len == 0:
if m.eth1Chain.maxValidDeposits > totalDepositsNeededForGenesis and
m.eth1Chain.knownStart.deposit_count == 0:
let latestEth1Data = m.eth1Chain.latestEth1Data
try:
let blk = await p.getBlockByHash(hash)
return blk.number
except CatchableError as exc:
notice "Failed to get Eth1 block number from hash",
hash = $hash, err = exc.msg
raise
for missingBlockNum in latestKnownBlock + 1 ..< Eth1BlockNumber(blk.number):
let missingBlock = await dataProvider.getBlockByNumber(missingBlockNum)
doAssert m.eth1Chain.addSuccessorBlock Eth1Block(
number: Eth1BlockNumber(missingBlock.number),
timestamp: Eth1BlockTimestamp(missingBlock.timestamp),
voteData: latestEth1Data)
proc new*(T: type Web3DataProvider,
web3Url: string,
depositContractAddress: Address): Future[ref Web3DataProvider] {.
async
# raises: [Defect]
.} =
try:
type R = ref T
let
web3 = await newWeb3(web3Url)
ns = web3.contractSender(DepositContract, depositContractAddress)
return R(url: web3Url, web3: web3, ns: ns)
except CatchableError:
return nil
doAssert m.eth1Chain.addSuccessorBlock Eth1Block(
number: Eth1BlockNumber(blk.number),
timestamp: Eth1BlockTimestamp(blk.timestamp),
voteData: latestEth1Data)
else:
template logBlockProcessed(blk) =
info "Eth1 block processed",
`block` = shortLog(blk), totalDeposits = blk.voteData.deposit_count
func web3Provider*(web3Url: string): DataProviderFactory =
proc factory(depositContractAddress: Address): Future[DataProviderRef] {.async.} =
result = await Web3DataProvider.new(web3Url, depositContractAddress)
await dataProvider.fetchBlockDetails(eth1Blocks[0])
if m.eth1Chain.addSuccessorBlock(eth1Blocks[0]):
logBlockProcessed eth1Blocks[0]
DataProviderFactory(desc: "web3(" & web3Url & ")", new: factory)
for i in 1 ..< eth1Blocks.len:
await dataProvider.fetchBlockDetails(eth1Blocks[i])
if m.eth1Chain.addSuccessorBlock(eth1Blocks[i]):
logBlockProcessed eth1Blocks[i]
else:
raise newException(CorruptDataProvider,
"A non-successor Eth1 block reported")
else:
# A non-continuous chain detected.
# This could be the result of a deeper fork that was not reported
# properly by the web3 provider. Since this should be an extremely
# rare event we can afford to handle it in a relatively inefficient
# manner. Let's delete half of our non-finalized chain and try again.
let blocksToPop = max(1, m.eth1Chain.totalNonFinalizedBlocks div 2)
warn "Web3 provider responded with a non-continous chain of deposits.",
backtrackedDeposits = blocksToPop
for i in 0 ..< blocksToPop:
m.eth1Chain.popBlock()
m.depositQueue.addFirstNoWait blk
proc isRunning*(m: MainchainMonitor): bool =
not m.runFut.isNil
func `===`(json: JsonNode, boolean: bool): bool =
json.kind == JBool and json.bval == boolean
proc run(m: MainchainMonitor, delayBeforeStart: Duration) {.async.} =
if delayBeforeStart != ZeroDuration:
@ -535,44 +734,37 @@ proc run(m: MainchainMonitor, delayBeforeStart: Duration) {.async.} =
error "Failed to initialize Eth1 data provider",
provider = m.dataProviderFactory.desc
raise newException(CatchableError, "Failed to initialize Eth1 data provider")
defer: await close(dataProvider)
let processFut = m.processDeposits(dataProvider)
defer: await processFut
try:
info "Starting Eth1 deposit contract monitoring",
contract = $m.depositContractAddress,
url = m.dataProviderFactory.desc
dataProvider.onDisconnect do:
error "Eth1 data provider disconnected",
provider = m.dataProviderFactory.desc
processFut.cancel()
await dataProvider.onBlockHeaders do (blk: BlockHeader)
{.raises: [Defect], gcsafe}:
try:
m.depositQueue.addLastNoWait(blk)
except AsyncQueueFullError:
raiseAssert "The depositQueue has no size limit"
except Exception:
# TODO Investigate why this exception is being raised
raiseAssert "queue.addLastNoWait should not raise exceptions"
do (err: CatchableError):
debug "Error while processing Eth1 block headers subscription", err = err.msg
let startBlkNum = await dataProvider.getBlockNumber(m.startBlock)
notice "Monitoring eth1 deposits",
fromBlock = startBlkNum.uint64,
contract = $m.depositContractAddress,
url = m.dataProviderFactory.desc
await m.processDeposits(dataProvider)
await dataProvider.onDepositEvent(Eth1BlockNumber(startBlkNum)) do (
pubkey: Bytes48,
withdrawalCredentials: Bytes32,
amount: Bytes8,
signature: Bytes96, merkleTreeIndex: Bytes8, j: JsonNode)
{.raises: [Defect], gcsafe.}:
try:
let
blockHash = BlockHash.fromHex(j["blockHash"].getStr())
eventType = if j.hasKey("removed"): RemovedEvent
else: NewEvent
finally:
await close(dataProvider)
m.depositQueue.addLastNoWait((blockHash, eventType))
proc safeCancel(fut: var Future[void]) =
if not fut.isNil and not fut.finished:
fut.cancel()
fut = nil
except CatchableError as exc:
warn "Received invalid deposit", err = exc.msg, j
except Exception as err:
# chronos still raises exceptions which inherit directly from Exception
if err[] of Defect:
raise (ref Defect)(err)
else:
warn "Received invalid deposit", err = err.msg, j
proc stop*(m: MainchainMonitor) =
safeCancel m.runFut
safeCancel m.genesisMonitoringFut
proc start(m: MainchainMonitor, delayBeforeStart: Duration) =
if m.runFut.isNil:
@ -583,7 +775,7 @@ proc start(m: MainchainMonitor, delayBeforeStart: Duration) =
if runFut.error[] of CatchableError:
if runFut == m.runFut:
error "Mainchain monitor failure, restarting", err = runFut.error.msg
m.runFut = nil
m.stop()
m.start(5.seconds)
else:
fatal "Fatal exception reached", err = runFut.error.msg
@ -592,11 +784,6 @@ proc start(m: MainchainMonitor, delayBeforeStart: Duration) =
proc start*(m: MainchainMonitor) {.inline.} =
m.start(0.seconds)
proc stop*(m: MainchainMonitor) =
if not m.runFut.isNil:
m.runFut.cancel()
m.runFut = nil
proc getLatestEth1BlockHash*(url: string): Future[Eth2Digest] {.async.} =
let web3 = await newWeb3(url)
try:

View File

@ -83,7 +83,7 @@ proc getMerkleProof*[Depth: static int](tree: SparseMerkleTree[Depth],
else:
result[depth] = zeroHashes[depth]
proc attachMerkleProofs*(deposits: var seq[Deposit]) =
func attachMerkleProofs*(deposits: var openarray[Deposit]) =
let deposit_data_roots = mapIt(deposits, it.data.hash_tree_root)
var
deposit_data_sums: seq[Eth2Digest]

View File

@ -1,71 +1,133 @@
import
options, random,
chronos, chronicles,
spec/datatypes,
eth2_network, beacon_node_types, sync_protocol,
eth/async_utils
import options, sequtils, strutils
import chronos, chronicles
import spec/[datatypes, digest], eth2_network, beacon_node_types, sync_protocol,
sync_manager, ssz/merkleization
logScope:
topics = "requman"
const
MAX_REQUEST_BLOCKS* = 4 # Specification's value is 1024.
## Maximum number of blocks, which can be requested by beaconBlocksByRoot.
PARALLEL_REQUESTS* = 2
## Number of peers we using to resolve our request.
type
RequestManager* = object
network*: Eth2Node
queue*: AsyncQueue[FetchRecord]
responseHandler*: FetchAncestorsResponseHandler
loopFuture: Future[void]
proc init*(T: type RequestManager, network: Eth2Node): T =
T(network: network)
type
FetchAncestorsResponseHandler = proc (b: SignedBeaconBlock) {.gcsafe.}
proc fetchAncestorBlocksFromPeer(
peer: Peer,
rec: FetchRecord,
responseHandler: FetchAncestorsResponseHandler) {.async.} =
# TODO: It's not clear if this function follows the intention of the
# FetchRecord data type. Perhaps it is supposed to get a range of blocks
# instead. In order to do this, we'll need the slot number of the known
# block to be stored in the FetchRecord, so we can ask for a range of
# blocks starting N positions before this slot number.
try:
let blocks = await peer.beaconBlocksByRoot(BlockRootsList @[rec.root])
if blocks.isOk:
for b in blocks.get:
responseHandler(b)
except CatchableError as err:
debug "Error while fetching ancestor blocks",
err = err.msg, root = rec.root, peer = peer
func shortLog*(x: seq[Eth2Digest]): string =
"[" & x.mapIt(shortLog(it)).join(", ") & "]"
proc fetchAncestorBlocksFromNetwork(
network: Eth2Node,
rec: FetchRecord,
responseHandler: FetchAncestorsResponseHandler) {.async.} =
func shortLog*(x: seq[FetchRecord]): string =
"[" & x.mapIt(shortLog(it.root)).join(", ") & "]"
proc init*(T: type RequestManager, network: Eth2Node,
responseCb: FetchAncestorsResponseHandler): T =
T(
network: network, queue: newAsyncQueue[FetchRecord](),
responseHandler: responseCb
)
proc checkResponse(roots: openArray[Eth2Digest],
blocks: openArray[SignedBeaconBlock]): bool =
## This procedure checks peer's response.
var checks = @roots
if len(blocks) > len(roots):
return false
for blk in blocks:
let blockRoot = hash_tree_root(blk.message)
let res = checks.find(blockRoot)
if res == -1:
return false
else:
checks.del(res)
return true
proc fetchAncestorBlocksFromNetwork(rman: RequestManager,
items: seq[Eth2Digest]) {.async.} =
var peer: Peer
try:
peer = await network.peerPool.acquire()
let blocks = await peer.beaconBlocksByRoot(BlockRootsList @[rec.root])
peer = await rman.network.peerPool.acquire()
debug "Requesting blocks by root", peer = peer, blocks = shortLog(items),
peer_score = peer.getScore()
let blocks = await peer.beaconBlocksByRoot(BlockRootsList items)
if blocks.isOk:
for b in blocks.get:
responseHandler(b)
except CatchableError as err:
debug "Error while fetching ancestor blocks",
err = err.msg, root = rec.root, peer = peer
let ublocks = blocks.get()
if checkResponse(items, ublocks):
for b in ublocks:
rman.responseHandler(b)
peer.updateScore(PeerScoreGoodBlocks)
else:
peer.updateScore(PeerScoreBadResponse)
else:
peer.updateScore(PeerScoreNoBlocks)
except CancelledError as exc:
raise exc
except CatchableError as exc:
debug "Error while fetching ancestor blocks", exc = exc.msg,
items = shortLog(items), peer = peer, peer_score = peer.getScore()
raise exc
finally:
if not(isNil(peer)):
network.peerPool.release(peer)
rman.network.peerPool.release(peer)
proc fetchAncestorBlocks*(requestManager: RequestManager,
roots: seq[FetchRecord],
responseHandler: FetchAncestorsResponseHandler) =
# TODO: we could have some fancier logic here:
#
# * Keeps track of what was requested
# (this would give a little bit of time for the asked peer to respond)
#
# * Keep track of the average latency of each peer
# (we can give priority to peers with better latency)
#
const ParallelRequests = 2
proc requestManagerLoop(rman: RequestManager) {.async.} =
var rootList = newSeq[Eth2Digest]()
var workers = newSeq[Future[void]](PARALLEL_REQUESTS)
while true:
try:
rootList.setLen(0)
let req = await rman.queue.popFirst()
rootList.add(req.root)
for i in 0 ..< ParallelRequests:
traceAsyncErrors fetchAncestorBlocksFromNetwork(requestManager.network,
roots.sample(),
responseHandler)
var count = min(MAX_REQUEST_BLOCKS - 1, len(rman.queue))
while count > 0:
rootList.add(rman.queue.popFirstNoWait().root)
dec(count)
let start = SyncMoment.now(Slot(0))
for i in 0 ..< PARALLEL_REQUESTS:
workers[i] = rman.fetchAncestorBlocksFromNetwork(rootList)
# We do not care about
await allFutures(workers)
let finish = SyncMoment.now(Slot(0) + uint64(len(rootList)))
var succeed = 0
for worker in workers:
if worker.finished() and not(worker.failed()):
inc(succeed)
debug "Request manager tick", blocks_count = len(rootList),
succeed = succeed,
failed = (len(workers) - succeed),
queue_size = len(rman.queue),
sync_speed = speed(start, finish)
except CatchableError as exc:
debug "Got a problem in request manager", exc = exc.msg
proc start*(rman: var RequestManager) =
## Start Request Manager's loop.
rman.loopFuture = requestManagerLoop(rman)
proc stop*(rman: RequestManager) =
## Stop Request Manager's loop.
if not(isNil(rman.loopFuture)):
rman.loopFuture.cancel()
proc fetchAncestorBlocks*(rman: RequestManager, roots: seq[FetchRecord]) =
## Enqueue list missing blocks roots ``roots`` for download by
## Request Manager ``rman``.
for item in roots:
rman.queue.addLastNoWait(item)

View File

@ -14,7 +14,7 @@ import
./crypto, ./datatypes, ./digest, ./helpers, ./signatures, ./validator,
../../nbench/bench_lab
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#is_valid_merkle_branch
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_valid_merkle_branch
func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool {.nbench.}=
## Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and
## ``branch``.
@ -32,37 +32,37 @@ func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], de
value = eth2digest(buf)
value == root
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#increase_balance
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#increase_balance
func increase_balance*(
state: var BeaconState, index: ValidatorIndex, delta: Gwei) =
# Increase the validator balance at index ``index`` by ``delta``.
state.balances[index] += delta
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#decrease_balance
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#decrease_balance
func decrease_balance*(
state: var BeaconState, index: ValidatorIndex, delta: Gwei) =
## Decrease the validator balance at index ``index`` by ``delta``, with
## underflow protection.
# Decrease the validator balance at index ``index`` by ``delta``, with
# underflow protection.
state.balances[index] =
if delta > state.balances[index]:
0'u64
else:
state.balances[index] - delta
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#deposits
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#deposits
proc process_deposit*(
state: var BeaconState, deposit: Deposit, flags: UpdateFlags = {}): bool {.nbench.}=
# Process an Eth1 deposit, registering a validator or increasing its balance.
# Verify the Merkle branch
if skipMerkleValidation notin flags and not is_valid_merkle_branch(
if not is_valid_merkle_branch(
hash_tree_root(deposit.data),
deposit.proof,
DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the `List` length mix-in
state.eth1_deposit_index,
state.eth1_data.deposit_root,
):
notice "Deposit merkle validation failed",
notice "Deposit Merkle validation failed",
proof = deposit.proof, deposit_root = state.eth1_data.deposit_root,
deposit_index = state.eth1_deposit_index
return false
@ -83,7 +83,9 @@ proc process_deposit*(
if not verify_deposit_signature(deposit.data):
# It's ok that deposits fail - they get included in blocks regardless
# TODO spec test?
debug "Skipping deposit with invalid signature",
# TODO: This is temporary set to trace level in order to deal with the
# large number of invalid deposits on Altona
trace "Skipping deposit with invalid signature",
deposit = shortLog(deposit.data)
return true
@ -111,7 +113,7 @@ func compute_activation_exit_epoch(epoch: Epoch): Epoch =
## ``epoch`` take effect.
epoch + 1 + MAX_SEED_LOOKAHEAD
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_validator_churn_limit
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_validator_churn_limit
func get_validator_churn_limit(state: BeaconState, cache: var StateCache):
uint64 =
# Return the validator churn limit for the current epoch.
@ -119,7 +121,7 @@ func get_validator_churn_limit(state: BeaconState, cache: var StateCache):
len(cache.shuffled_active_validator_indices) div
CHURN_LIMIT_QUOTIENT).uint64
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#initiate_validator_exit
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#initiate_validator_exit
func initiate_validator_exit*(state: var BeaconState,
index: ValidatorIndex, cache: var StateCache) =
# Initiate the exit of the validator with index ``index``.
@ -148,7 +150,7 @@ func initiate_validator_exit*(state: var BeaconState,
validator.withdrawable_epoch =
validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#slash_validator
proc slash_validator*(state: var BeaconState, slashed_index: ValidatorIndex,
cache: var StateCache) =
# Slash the validator with index ``index``.
@ -192,12 +194,20 @@ proc slash_validator*(state: var BeaconState, slashed_index: ValidatorIndex,
increase_balance(
state, whistleblower_index, whistleblowing_reward - proposer_reward)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#genesis
func genesis_time_from_eth1_timestamp*(eth1_timestamp: uint64): uint64 =
# TODO: remove once we switch completely to v0.12.1
when SPEC_VERSION == "0.12.1":
eth1_timestamp + GENESIS_DELAY
else:
const SECONDS_PER_DAY = uint64(60*60*24)
eth1_timestamp + 2'u64 * SECONDS_PER_DAY - (eth1_timestamp mod SECONDS_PER_DAY)
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#genesis
proc initialize_beacon_state_from_eth1*(
eth1_block_hash: Eth2Digest,
eth1_timestamp: uint64,
deposits: openArray[Deposit],
flags: UpdateFlags = {}): BeaconStateRef {.nbench.}=
flags: UpdateFlags = {}): BeaconStateRef {.nbench.} =
## Get the genesis ``BeaconState``.
##
## Before the beacon chain starts, validators will register in the Eth1 chain
@ -214,20 +224,20 @@ proc initialize_beacon_state_from_eth1*(
# at that point :)
doAssert deposits.len >= SLOTS_PER_EPOCH
const SECONDS_PER_DAY = uint64(60*60*24)
var state = BeaconStateRef(
fork: Fork(
previous_version: Version(GENESIS_FORK_VERSION),
current_version: Version(GENESIS_FORK_VERSION),
epoch: GENESIS_EPOCH),
genesis_time:
eth1_timestamp + 2'u64 * SECONDS_PER_DAY -
(eth1_timestamp mod SECONDS_PER_DAY),
genesis_time: genesis_time_from_eth1_timestamp(eth1_timestamp),
eth1_data:
Eth1Data(block_hash: eth1_block_hash, deposit_count: uint64(len(deposits))),
latest_block_header:
BeaconBlockHeader(
body_root: hash_tree_root(BeaconBlockBody(
# This differs from the spec intentionally.
# We must specify the default value for `ValidatorSig`
# in order to get a correct `hash_tree_root`.
randao_reveal: ValidatorSig(kind: OpaqueBlob)
))
)
@ -273,11 +283,11 @@ proc initialize_hashed_beacon_state_from_eth1*(
eth1_block_hash, eth1_timestamp, deposits, flags)
HashedBeaconState(data: genesisState[], root: hash_tree_root(genesisState[]))
func is_valid_genesis_state*(state: BeaconState): bool =
func is_valid_genesis_state*(state: BeaconState, active_validator_indices: seq[ValidatorIndex]): bool =
if state.genesis_time < MIN_GENESIS_TIME:
return false
# This is an okay get_active_validator_indices(...) for the time being.
if len(get_active_validator_indices(state, GENESIS_EPOCH)) < MIN_GENESIS_ACTIVE_VALIDATOR_COUNT:
if len(active_validator_indices) < MIN_GENESIS_ACTIVE_VALIDATOR_COUNT:
return false
return true
@ -303,12 +313,12 @@ func get_block_root_at_slot*(state: BeaconState,
doAssert slot < state.slot
state.block_roots[slot mod SLOTS_PER_HISTORICAL_ROOT]
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_block_root
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_block_root
func get_block_root*(state: BeaconState, epoch: Epoch): Eth2Digest =
# Return the block root at the start of a recent ``epoch``.
get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch))
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_total_balance
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_total_balance
func get_total_balance*(state: BeaconState, validators: auto): Gwei =
## Return the combined effective balance of the ``indices``.
## ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
@ -317,15 +327,13 @@ func get_total_balance*(state: BeaconState, validators: auto): Gwei =
foldl(validators, a + state.validators[b].effective_balance, 0'u64)
)
# XXX: Move to state_transition_epoch.nim?
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#is_eligible_for_activation_queue
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_eligible_for_activation_queue
func is_eligible_for_activation_queue(validator: Validator): bool =
# Check if ``validator`` is eligible to be placed into the activation queue.
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
validator.effective_balance == MAX_EFFECTIVE_BALANCE
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#is_eligible_for_activation
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_eligible_for_activation
func is_eligible_for_activation(state: BeaconState, validator: Validator):
bool =
# Check if ``validator`` is eligible for activation.
@ -394,21 +402,31 @@ proc process_registry_updates*(state: var BeaconState,
validator.activation_epoch =
compute_activation_exit_epoch(get_current_epoch(state))
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
proc is_valid_indexed_attestation*(
state: BeaconState, indexed_attestation: IndexedAttestation,
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
func is_valid_indexed_attestation*(
state: BeaconState, indexed_attestation: SomeIndexedAttestation,
flags: UpdateFlags): bool =
# Check if ``indexed_attestation`` has sorted and unique indices and a valid
# aggregate signature.
# TODO: this is noSideEffect besides logging
# https://github.com/status-im/nim-chronicles/issues/62
# Check if ``indexed_attestation`` is not empty, has sorted and unique
# indices and has a valid aggregate signature.
template is_sorted_and_unique(s: untyped): bool =
for i in 1 ..< s.len:
if s[i - 1].uint64 >= s[i].uint64:
return false
true
# Not from spec, but this function gets used in front-line roles, not just
# behind firewall.
let num_validators = state.validators.len.uint64
if anyIt(indexed_attestation.attesting_indices, it >= num_validators):
trace "indexed attestation: not all indices valid validators"
return false
# Verify indices are sorted and unique
# TODO: A simple loop can verify that the indicates are monotonically
# increasing and non-repeating here!
let indices = indexed_attestation.attesting_indices
if indices.asSeq != sorted(toHashSet(indices.asSeq).toSeq, system.cmp):
notice "indexed attestation: indices not sorted"
let indices = indexed_attestation.attesting_indices.asSeq
if len(indices) == 0 or not is_sorted_and_unique(indices):
trace "indexed attestation: indices not sorted and unique"
return false
# Verify aggregate signature
@ -418,7 +436,7 @@ proc is_valid_indexed_attestation*(
if not verify_attestation_signature(
state.fork, state.genesis_validators_root, indexed_attestation.data,
pubkeys, indexed_attestation.signature):
notice "indexed attestation: signature verification failure"
trace "indexed attestation: signature verification failure"
return false
true
@ -449,7 +467,7 @@ func get_attesting_indices*(state: BeaconState,
if bits[i]:
result.incl index
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_indexed_attestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_indexed_attestation
func get_indexed_attestation*(state: BeaconState, attestation: Attestation,
stateCache: var StateCache): IndexedAttestation =
# Return the indexed attestation corresponding to ``attestation``.
@ -466,6 +484,22 @@ func get_indexed_attestation*(state: BeaconState, attestation: Attestation,
signature: attestation.signature
)
func get_indexed_attestation*(state: BeaconState, attestation: TrustedAttestation,
stateCache: var StateCache): TrustedIndexedAttestation =
# Return the indexed attestation corresponding to ``attestation``.
let
attesting_indices =
get_attesting_indices(
state, attestation.data, attestation.aggregation_bits, stateCache)
TrustedIndexedAttestation(
attesting_indices:
List[uint64, MAX_VALIDATORS_PER_COMMITTEE].init(
sorted(mapIt(attesting_indices.toSeq, it.uint64), system.cmp)),
data: attestation.data,
signature: attestation.signature
)
# Attestation validation
# ------------------------------------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#attestations
@ -496,7 +530,7 @@ proc isValidAttestationSlot*(attestationSlot, stateSlot: Slot): bool =
# TODO remove/merge with p2p-interface validation
proc isValidAttestationTargetEpoch*(
state: BeaconState, attestation: Attestation): bool =
state: BeaconState, data: AttestationData): bool =
# TODO what constitutes a valid attestation when it's about to be added to
# the pool? we're interested in attestations that will become viable
# for inclusion in blocks in the future and on any fork, so we need to
@ -509,7 +543,6 @@ proc isValidAttestationTargetEpoch*(
# include an attestation in a block even if the corresponding validator
# was slashed in the same epoch - there's no penalty for doing this and
# the vote counting logic will take care of any ill effects (TODO verify)
let data = attestation.data
# TODO re-enable check
#if not (data.crosslink.shard < SHARD_COUNT):
# notice "Attestation shard too high",
@ -539,7 +572,7 @@ proc isValidAttestationTargetEpoch*(
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.2/specs/phase0/beacon-chain.md#attestations
proc check_attestation*(
state: BeaconState, attestation: Attestation, flags: UpdateFlags,
state: BeaconState, attestation: SomeAttestation, flags: UpdateFlags,
stateCache: var StateCache): bool =
## Check that an attestation follows the rules of being included in the state
## at the current slot. When acting as a proposer, the same rules need to
@ -564,7 +597,7 @@ proc check_attestation*(
committee_count = get_committee_count_at_slot(state, data.slot))
return
if not isValidAttestationTargetEpoch(state, attestation):
if not isValidAttestationTargetEpoch(state, data):
# Logging in isValidAttestationTargetEpoch
return
@ -602,7 +635,7 @@ proc check_attestation*(
true
proc process_attestation*(
state: var BeaconState, attestation: Attestation, flags: UpdateFlags,
state: var BeaconState, attestation: SomeAttestation, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.}=
# In the spec, attestation validation is mixed with state mutation, so here
# we've split it into two functions so that the validation logic can be

View File

@ -71,6 +71,11 @@ type
RandomSourceDepleted* = object of CatchableError
TrustedSig* = object
data*: array[RawSigSize, byte]
SomeSig* = TrustedSig | ValidatorSig
func `==`*(a, b: BlsValue): bool =
if a.kind != b.kind: return false
if a.kind == Real:
@ -86,7 +91,7 @@ template `==`*[N, T](a: T, b: BlsValue[N, T]): bool =
# API
# ----------------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#bls-signatures
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#bls-signatures
func toPubKey*(privkey: ValidatorPrivKey): ValidatorPubKey =
## Create a private key from a public key
@ -218,6 +223,9 @@ func toRaw*(x: BlsValue): auto =
else:
x.blob
func toRaw*(x: TrustedSig): auto =
x.data
func toHex*(x: BlsCurveType): string =
toHex(toRaw(x))
@ -260,30 +268,47 @@ template hash*(x: BlsCurveType): Hash =
# Serialization
# ----------------------------------------------------------------------
{.pragma: serializationRaises, raises: [SerializationError, IOError, Defect].}
proc writeValue*(writer: var JsonWriter, value: ValidatorPubKey) {.
inline, raises: [IOError, Defect].} =
writer.writeValue(value.toHex())
proc readValue*(reader: var JsonReader, value: var ValidatorPubKey) {.
inline, raises: [Exception].} =
value = ValidatorPubKey.fromHex(reader.readValue(string)).tryGet()
proc readValue*(reader: var JsonReader, value: var ValidatorPubKey)
{.serializationRaises.} =
let key = ValidatorPubKey.fromHex(reader.readValue(string))
if key.isOk:
value = key.get
else:
# TODO: Can we provide better diagnostic?
raiseUnexpectedValue(reader, "Valid hex-encoded public key expected")
proc writeValue*(writer: var JsonWriter, value: ValidatorSig) {.
inline, raises: [IOError, Defect].} =
# Workaround: https://github.com/status-im/nim-beacon-chain/issues/374
writer.writeValue(value.toHex())
proc readValue*(reader: var JsonReader, value: var ValidatorSig) {.
inline, raises: [Exception].} =
value = ValidatorSig.fromHex(reader.readValue(string)).tryGet()
proc readValue*(reader: var JsonReader, value: var ValidatorSig)
{.serializationRaises.} =
let sig = ValidatorSig.fromHex(reader.readValue(string))
if sig.isOk:
value = sig.get
else:
# TODO: Can we provide better diagnostic?
raiseUnexpectedValue(reader, "Valid hex-encoded signature expected")
proc writeValue*(writer: var JsonWriter, value: ValidatorPrivKey) {.
inline, raises: [IOError, Defect].} =
writer.writeValue(value.toHex())
proc readValue*(reader: var JsonReader, value: var ValidatorPrivKey) {.
inline, raises: [Exception].} =
value = ValidatorPrivKey.fromHex(reader.readValue(string)).tryGet()
proc readValue*(reader: var JsonReader, value: var ValidatorPrivKey)
{.serializationRaises.} =
let key = ValidatorPrivKey.fromHex(reader.readValue(string))
if key.isOk:
value = key.get
else:
# TODO: Can we provide better diagnostic?
raiseUnexpectedValue(reader, "Valid hex-encoded private key expected")
template fromSszBytes*(T: type BlsValue, bytes: openArray[byte]): auto =
let v = fromRaw(T, bytes)
@ -308,6 +333,9 @@ func shortLog*(x: ValidatorPrivKey): string =
## Logging for raw unwrapped BLS types
x.toRaw()[0..3].toHex()
func shortLog*(x: TrustedSig): string =
x.data[0..3].toHex()
# Initialization
# ----------------------------------------------------------------------

View File

@ -46,27 +46,15 @@ export
# Eventually, we could also differentiate between user/tainted data and
# internal state that's gone through sanity checks already.
const ETH2_SPEC* {.strdefine.} = "v0.11.3"
static: doAssert: ETH2_SPEC == "v0.11.3" or ETH2_SPEC == "v0.12.1"
# Constant presets
const const_preset* {.strdefine.} = "mainnet"
when const_preset == "mainnet":
when ETH2_SPEC == "v0.12.1":
import ./presets/mainnet
export mainnet
else:
import ./presets/mainnet_v0_11_3
export mainnet_v0_11_3
import ./presets/v0_12_1/mainnet
export mainnet
elif const_preset == "minimal":
when ETH2_SPEC == "v0.12.1":
import ./presets/minimal
export minimal
else:
import ./presets/minimal_v0_11_3
export minimal_v0_11_3
import ./presets/v0_12_1/minimal
export minimal
else:
type
Slot* = distinct uint64
@ -76,11 +64,7 @@ else:
loadCustomPreset const_preset
const
SPEC_VERSION* =
when ETH2_SPEC == "v0.12.1":
"0.12.1"
else:
"0.11.3" ## \
SPEC_VERSION* = "0.12.1" ## \
## Spec version we're aiming to be compatible with, right now
GENESIS_SLOT* = Slot(0)
@ -129,12 +113,12 @@ type
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase1/custody-game.md#signature-domain-types
DOMAIN_CUSTODY_BIT_SLASHING = 0x83
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#custom-types
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#custom-types
Domain* = array[32, byte]
# https://github.com/nim-lang/Nim/issues/574 and be consistent across
# 32-bit and 64-bit word platforms.
# TODO VALIDATOR_REGISTRY_LIMIT is 1 shl 40 in 0.8.3, and
# TODO VALIDATOR_REGISTRY_LIMIT is 1 shl 40 in 0.12.1, and
# proc newSeq(typ: PNimType, len: int): pointer {.compilerRtl.}
# in Nim/lib/system/gc.nim quite tightly ties seq addressibility
# to the system wordsize. This lifts smaller, and now incorrect,
@ -160,6 +144,12 @@ type
data*: AttestationData
signature*: ValidatorSig
TrustedIndexedAttestation* = object
# TODO ValidatorIndex, but that doesn't serialize properly
attesting_indices*: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
data*: AttestationData
signature*: TrustedSig
CommitteeValidatorsBits* = BitList[MAX_VALIDATORS_PER_COMMITTEE]
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#attestation
@ -168,10 +158,15 @@ type
data*: AttestationData
signature*: ValidatorSig
TrustedAttestation* = object
aggregation_bits*: CommitteeValidatorsBits
data*: AttestationData
signature*: TrustedSig
Version* = distinct array[4, byte]
ForkDigest* = distinct array[4, byte]
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#forkdata
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#forkdata
ForkData* = object
current_version*: Version
genesis_validators_root*: Eth2Digest
@ -203,7 +198,7 @@ type
data*: DepositData
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#depositmessage
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#depositmessage
DepositMessage* = object
pubkey*: ValidatorPubKey
withdrawal_credentials*: Eth2Digest
@ -214,6 +209,8 @@ type
pubkey*: ValidatorPubKey
withdrawal_credentials*: Eth2Digest
amount*: Gwei
# Cannot use TrustedSig here as invalid signatures are possible and determine
# if the deposit should be added or not during processing
signature*: ValidatorSig # Signing over DepositMessage
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#voluntaryexit
@ -242,6 +239,29 @@ type
body*: BeaconBlockBody
TrustedBeaconBlock* = object
## When we receive blocks from outside sources, they are untrusted and go
## through several layers of validation. Blocks that have gone through
## validations can be trusted to be well-formed, with a correct signature,
## having a parent and applying cleanly to the state that their parent
## left them with.
##
## When loading such blocks from the database, to rewind states for example,
## it is expensive to redo the validations (in particular, the signature
## checks), thus `TrustedBlock` uses a `TrustedSig` type to mark that these
## checks can be skipped.
##
## TODO this could probably be solved with some type trickery, but there
## too many bugs in nim around generics handling, and we've used up
## the trickery budget in the serialization library already. Until
## then, the type must be manually kept compatible with its untrusted
## cousin.
slot*: Slot
proposer_index*: uint64
parent_root*: Eth2Digest ##\
state_root*: Eth2Digest ##\
body*: TrustedBeaconBlockBody
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconblockheader
BeaconBlockHeader* = object
slot*: Slot
@ -250,6 +270,11 @@ type
state_root*: Eth2Digest
body_root*: Eth2Digest
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#signingdata
SigningData* = object
object_root*: Eth2Digest
domain*: Domain
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object
randao_reveal*: ValidatorSig
@ -263,8 +288,26 @@ type
deposits*: List[Deposit, MAX_DEPOSITS]
voluntary_exits*: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
TrustedBeaconBlockBody* = object
randao_reveal*: TrustedSig
eth1_data*: Eth1Data
graffiti*: Eth2Digest # TODO make that raw bytes
# Operations
proposer_slashings*: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
attester_slashings*: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
attestations*: List[TrustedAttestation, MAX_ATTESTATIONS]
deposits*: List[Deposit, MAX_DEPOSITS]
voluntary_exits*: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
SomeSignedBeaconBlock* = SignedBeaconBlock | TrustedSignedBeaconBlock
SomeBeaconBlock* = BeaconBlock | TrustedBeaconBlock
SomeBeaconBlockBody* = BeaconBlockBody | TrustedBeaconBlockBody
SomeAttestation* = Attestation | TrustedAttestation
SomeIndexedAttestation* = IndexedAttestation | TrustedIndexedAttestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate
BeaconStateObj* = object
BeaconState* = object
# Versioning
genesis_time*: uint64
genesis_validators_root*: Eth2Digest
@ -316,11 +359,10 @@ type
current_justified_checkpoint*: Checkpoint
finalized_checkpoint*: Checkpoint
BeaconState* = BeaconStateObj
BeaconStateRef* = ref BeaconStateObj not nil
NilableBeaconStateRef* = ref BeaconStateObj
BeaconStateRef* = ref BeaconState not nil
NilableBeaconStateRef* = ref BeaconState
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#validator
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#validator
Validator* = object
pubkey*: ValidatorPubKey
@ -373,6 +415,13 @@ type
deposit_count*: uint64
block_hash*: Eth2Digest
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#eth1block
Eth1Block* = object
timestamp*: uint64
deposit_root*: Eth2Digest
deposit_count*: uint64
# All other eth1 block fields
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#signedvoluntaryexit
SignedVoluntaryExit* = object
message*: VoluntaryExit
@ -383,6 +432,10 @@ type
message*: BeaconBlock
signature*: ValidatorSig
TrustedSignedBeaconBlock* = object
message*: TrustedBeaconBlock
signature*: TrustedSig
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#signedbeaconblockheader
SignedBeaconBlockHeader* = object
message*: BeaconBlockHeader
@ -410,31 +463,6 @@ type
committee_count_cache*: Table[Epoch, uint64]
beacon_proposer_indices*: Table[Slot, Option[ValidatorIndex]]
JsonError = jsonTypes.JsonError
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#signingdata
# TODO move back into big `type` block
when ETH2_SPEC == "v0.12.1":
type SigningData* = object
object_root*: Eth2Digest
domain*: Domain
else:
type SigningRoot* = object
object_root*: Eth2Digest
domain*: Domain
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#eth1block
when ETH2_SPEC == "v0.12.1":
type Eth1Block* = object
timestamp*: uint64
deposit_root*: Eth2Digest
deposit_count*: uint64
# All other eth1 block fields
else:
type Eth1Block* = object
timestamp*: uint64
# All other eth1 block fields
func shortValidatorKey*(state: BeaconState, validatorIdx: int): string =
($state.validators[validatorIdx].pubkey)[0..7]
@ -487,7 +515,7 @@ template ethTimeUnit(typ: type) {.dirty.} =
writeValue(writer, uint64 value)
proc readValue*(reader: var JsonReader, value: var typ)
{.raises: [IOError, JsonError, Defect].} =
{.raises: [IOError, SerializationError, Defect].} =
value = typ reader.readValue(uint64)
proc writeValue*(writer: var JsonWriter, value: ValidatorIndex)
@ -495,14 +523,14 @@ proc writeValue*(writer: var JsonWriter, value: ValidatorIndex)
writeValue(writer, uint32 value)
proc readValue*(reader: var JsonReader, value: var ValidatorIndex)
{.raises: [IOError, JsonError, Defect].} =
{.raises: [IOError, SerializationError, Defect].} =
value = ValidatorIndex reader.readValue(uint32)
template writeValue*(writer: var JsonWriter, value: Version | ForkDigest) =
writeValue(writer, $value)
proc readValue*(reader: var JsonReader, value: var Version)
{.raises: [IOError, JsonError, Defect].} =
{.raises: [IOError, SerializationError, Defect].} =
let hex = reader.readValue(string)
try:
hexToByteArray(hex, array[4, byte](value))
@ -510,7 +538,7 @@ proc readValue*(reader: var JsonReader, value: var Version)
raiseUnexpectedValue(reader, "Hex string of 4 bytes expected")
proc readValue*(reader: var JsonReader, value: var ForkDigest)
{.raises: [IOError, JsonError, Defect].} =
{.raises: [IOError, SerializationError, Defect].} =
let hex = reader.readValue(string)
try:
hexToByteArray(hex, array[4, byte](value))
@ -518,23 +546,23 @@ proc readValue*(reader: var JsonReader, value: var ForkDigest)
raiseUnexpectedValue(reader, "Hex string of 4 bytes expected")
# `ValidatorIndex` seq handling.
proc max*(a: ValidatorIndex, b: int) : auto =
func max*(a: ValidatorIndex, b: int) : auto =
max(a.int, b)
proc `[]`*[T](a: var seq[T], b: ValidatorIndex): var T =
func `[]`*[T](a: var seq[T], b: ValidatorIndex): var T =
a[b.int]
proc `[]`*[T](a: seq[T], b: ValidatorIndex): auto =
func `[]`*[T](a: seq[T], b: ValidatorIndex): auto =
a[b.int]
proc `[]=`*[T](a: var seq[T], b: ValidatorIndex, c: T) =
func `[]=`*[T](a: var seq[T], b: ValidatorIndex, c: T) =
a[b.int] = c
# `ValidatorIndex` Nim integration
proc `==`*(x, y: ValidatorIndex) : bool {.borrow.}
proc `<`*(x, y: ValidatorIndex) : bool {.borrow.}
proc hash*(x: ValidatorIndex): Hash {.borrow.}
proc `$`*(x: ValidatorIndex): auto = $(x.int64)
func `$`*(x: ValidatorIndex): auto = $(x.int64)
ethTimeUnit Slot
ethTimeUnit Epoch
@ -603,7 +631,7 @@ func shortLog*(s: Slot): uint64 =
func shortLog*(e: Epoch): uint64 =
e - GENESIS_EPOCH
func shortLog*(v: BeaconBlock): auto =
func shortLog*(v: SomeBeaconBlock): auto =
(
slot: shortLog(v.slot),
proposer_index: v.proposer_index,
@ -616,7 +644,7 @@ func shortLog*(v: BeaconBlock): auto =
voluntary_exits_len: v.body.voluntary_exits.len(),
)
func shortLog*(v: SignedBeaconBlock): auto =
func shortLog*(v: SomeSignedBeaconBlock): auto =
(
blck: shortLog(v.message),
signature: shortLog(v.signature)
@ -641,7 +669,7 @@ func shortLog*(v: AttestationData): auto =
target_root: shortLog(v.target.root)
)
func shortLog*(v: Attestation): auto =
func shortLog*(v: SomeAttestation): auto =
(
aggregation_bits: v.aggregation_bits,
data: shortLog(v.data),

View File

@ -1,6 +1,20 @@
import
options,
../datatypes
../[datatypes, digest, crypto],
json_rpc/jsonmarshal,
callsigs_types
proc get_v1_beacon_genesis(): BeaconGenesisTuple
# TODO stateId is part of the REST path
proc get_v1_beacon_states_root(stateId: string): Eth2Digest
# TODO stateId is part of the REST path
proc get_v1_beacon_states_fork(stateId: string): Fork
# TODO: delete old stuff
# https://github.com/ethereum/eth2.0-APIs/blob/master/apis/beacon/basic.md
#

View File

@ -0,0 +1,23 @@
import
# Standard library
options,
# Local modules
# TODO for some reason "../[datatypes, digest, crypto]" results in "Error: cannot open file"
../datatypes,
../digest,
../crypto
type
AttesterDuties* = tuple
public_key: ValidatorPubKey
committee_index: CommitteeIndex
committee_length: uint64
validator_committee_index: uint64
slot: Slot
ValidatorPubkeySlotPair* = tuple[public_key: ValidatorPubKey, slot: Slot]
BeaconGenesisTuple* = tuple
genesis_time: uint64
genesis_validators_root: Eth2Digest
genesis_fork_version: Version

View File

@ -4,24 +4,17 @@ import
# Local modules
../[datatypes, digest, crypto],
json_rpc/jsonmarshal,
validator_callsigs_types
# TODO check which arguments are part of the path in the REST API
callsigs_types
# calls that return a bool are actually without a return type in the main REST API
# spec but nim-json-rpc requires that all RPC calls have a return type.
# TODO this doesn't have "validator" in it's path but is used by the validators nonetheless
proc get_v1_beacon_states_fork(stateId: string): Fork
# TODO this doesn't have "validator" in it's path but is used by the validators nonetheless
proc get_v1_beacon_genesis(): BeaconGenesisTuple
# TODO returns a bool even though in the API there is no return type - because of nim-json-rpc
proc post_v1_beacon_pool_attestations(attestation: Attestation): bool
# TODO slot is part of the REST path
proc get_v1_validator_blocks(slot: Slot, graffiti: Eth2Digest, randao_reveal: ValidatorSig): BeaconBlock
# TODO returns a bool even though in the API there is no return type - because of nim-json-rpc
proc post_v1_beacon_blocks(body: SignedBeaconBlock): bool
proc get_v1_validator_attestation_data(slot: Slot, committee_index: CommitteeIndex): AttestationData
@ -31,16 +24,17 @@ proc get_v1_validator_attestation_data(slot: Slot, committee_index: CommitteeInd
# https://docs.google.com/spreadsheets/d/1kVIx6GvzVLwNYbcd-Fj8YUlPf4qGrWUlS35uaTnIAVg/edit?disco=AAAAGh7r_fQ
proc get_v1_validator_aggregate_attestation(attestation_data: AttestationData): Attestation
# TODO returns a bool even though in the API there is no return type - because of nim-json-rpc
proc post_v1_validator_aggregate_and_proof(payload: SignedAggregateAndProof): bool
# this is a POST instead of a GET because of this: https://docs.google.com/spreadsheets/d/1kVIx6GvzVLwNYbcd-Fj8YUlPf4qGrWUlS35uaTnIAVg/edit?disco=AAAAJk5rbKA
# TODO epoch is part of the REST path
proc post_v1_validator_duties_attester(epoch: Epoch, public_keys: seq[ValidatorPubKey]): seq[AttesterDuties]
# TODO epoch is part of the REST path
proc get_v1_validator_duties_proposer(epoch: Epoch): seq[ValidatorPubkeySlotPair]
proc post_v1_validator_beacon_committee_subscription(committee_index: CommitteeIndex,
slot: Slot,
aggregator: bool,
validator_pubkey: ValidatorPubKey,
slot_signature: ValidatorSig)
proc post_v1_validator_beacon_committee_subscriptions(committee_index: CommitteeIndex,
slot: Slot,
aggregator: bool,
validator_pubkey: ValidatorPubKey,
slot_signature: ValidatorSig): bool

View File

@ -1,27 +0,0 @@
import
# Standard library
options,
# Local modules
# TODO for some reason "../[datatypes, digest, crypto]" results in "Error: cannot open file"
../datatypes,
../digest,
../crypto
type
AttesterDuties* = object
public_key*: ValidatorPubKey
committee_index*: CommitteeIndex
committee_length*: uint64
validator_committee_index*: uint64
slot*: Slot
# TODO do we even need this? how about a simple tuple (alias)?
ValidatorPubkeySlotPair* = object
public_key*: ValidatorPubKey
slot*: Slot
# TODO do we even need this? how about a simple tuple (alias)?
BeaconGenesisTuple* = object
genesis_time*: uint64
genesis_validators_root*: Eth2Digest
genesis_fork_version*: Version

View File

@ -64,7 +64,13 @@ func get_active_validator_indices*(state: BeaconState, epoch: Epoch):
if is_active_validator(val, epoch):
result.add idx.ValidatorIndex
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_committee_count_at_slot
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_committee_count_at_slot
func get_committee_count_at_slot*(num_active_validators: auto):
uint64 =
clamp(
num_active_validators div SLOTS_PER_EPOCH div TARGET_COMMITTEE_SIZE,
1, MAX_COMMITTEES_PER_SLOT).uint64
func get_committee_count_at_slot*(state: BeaconState, slot: Slot): uint64 =
# Return the number of committees at ``slot``.
@ -74,10 +80,7 @@ func get_committee_count_at_slot*(state: BeaconState, slot: Slot): uint64 =
# CommitteeIndex return type here.
let epoch = compute_epoch_at_slot(slot)
let active_validator_indices = get_active_validator_indices(state, epoch)
let committees_per_slot = clamp(
len(active_validator_indices) div SLOTS_PER_EPOCH div TARGET_COMMITTEE_SIZE,
1, MAX_COMMITTEES_PER_SLOT).uint64
result = committees_per_slot
result = get_committee_count_at_slot(len(active_validator_indices))
# Otherwise, get_beacon_committee(...) cannot access some committees.
doAssert (SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT).uint64 >= result
@ -163,7 +166,7 @@ func compute_domain*(
result[0..3] = int_to_bytes4(domain_type.uint64)
result[4..31] = fork_data_root.data[0..27]
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_domain
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_domain
func get_domain*(
fork: Fork, domain_type: DomainType, epoch: Epoch, genesis_validators_root: Eth2Digest): Domain =
## Return the signature domain (fork version concatenated with domain type)
@ -185,16 +188,10 @@ func get_domain*(
func compute_signing_root*(ssz_object: auto, domain: Domain): Eth2Digest =
# Return the signing root of an object by calculating the root of the
# object-domain tree.
when ETH2_SPEC == "v0.12.1":
let domain_wrapped_object = SigningData(
object_root: hash_tree_root(ssz_object),
domain: domain
)
else:
let domain_wrapped_object = SigningRoot(
object_root: hash_tree_root(ssz_object),
domain: domain
)
let domain_wrapped_object = SigningData(
object_root: hash_tree_root(ssz_object),
domain: domain
)
hash_tree_root(domain_wrapped_object)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_seed

View File

@ -6,10 +6,10 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
json, math, strutils, strformat,
json, math, strutils, strformat, typetraits,
stew/[results, byteutils, bitseqs, bitops2], stew/shims/macros,
eth/keyfile/uuid, blscurve,
nimcrypto/[sha2, rijndael, pbkdf2, bcmode, hash, sysrand],
eth/keyfile/uuid, blscurve, json_serialization,
nimcrypto/[sha2, rijndael, pbkdf2, bcmode, hash, sysrand, utils],
./datatypes, ./crypto, ./digest, ./signatures
export
@ -44,6 +44,17 @@ type
prf: string
salt: string
# https://github.com/ethereum/EIPs/blob/4494da0966afa7318ec0157948821b19c4248805/EIPS/eip-2386.md#specification
Wallet* = object
uuid*: UUID
name*: WalletName
version*: uint
walletType* {.serializedFieldName: "type"}: string
# TODO: The use of `JsonString` can be removed once we
# solve the serialization problem for `Crypto[T]`
crypto*: JsonString
nextAccount* {.serializedFieldName: "nextaccount".}: Natural
KdfParams = KdfPbkdf2 | KdfScrypt
Kdf[T: KdfParams] = object
@ -69,12 +80,18 @@ type
signingKeyKind # Also known as voting key
withdrawalKeyKind
UUID* = distinct string
WalletName* = distinct string
Mnemonic* = distinct string
KeyPath* = distinct string
KeyStorePass* = distinct string
KeyStoreContent* = distinct JsonString
KeySeed* = distinct seq[byte]
KeyStoreContent* = distinct JsonString
WalletContent* = distinct JsonString
SensitiveData = Mnemonic|KeyStorePass|KeySeed
Credentials* = object
mnemonic*: Mnemonic
keyStore*: KeyStoreContent
@ -104,6 +121,17 @@ const
# https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md
wordListLen = 2048
UUID.serializesAsBaseIn Json
WalletName.serializesAsBaseIn Json
template `$`*(m: Mnemonic): string =
string(m)
template burnMem*(m: var (SensitiveData|TaintedString)) =
# TODO: `burnMem` in nimcrypto could use distinctBase
# to make its usage less error-prone.
utils.burnMem(string m)
macro wordListArray(filename: static string): array[wordListLen, cstring] =
result = newTree(nnkBracket)
var words = slurp(filename).split()
@ -152,7 +180,7 @@ func getSeed*(mnemonic: Mnemonic, password: KeyStorePass): KeySeed =
let salt = "mnemonic-" & password.string
KeySeed sha512.pbkdf2(mnemonic.string, salt, 2048, 64)
proc generateMnemonic*(words: openarray[cstring],
proc generateMnemonic*(words: openarray[cstring] = englishWords,
entropyParam: openarray[byte] = @[]): Mnemonic =
# https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki#generating-the-mnemonic
doAssert words.len == wordListLen
@ -226,9 +254,9 @@ proc shaChecksum(key, cipher: openarray[byte]): array[32, byte] =
result = ctx.finish().data
ctx.clear()
template tryJsonToCrypto(ks: JsonNode; crypto: typedesc): untyped =
template tryJsonToCrypto(json: JsonNode; crypto: typedesc): untyped =
try:
ks{"crypto"}.to(Crypto[crypto])
json.to(Crypto[crypto])
except Exception:
return err "ks: failed to parse crypto"
@ -238,12 +266,8 @@ template hexToBytes(data, name: string): untyped =
except ValueError:
return err "ks: failed to parse " & name
proc decryptKeystore*(data: KeyStoreContent,
password: KeyStorePass): KsResult[ValidatorPrivKey] =
# TODO: `parseJson` can raise a general `Exception`
let ks = try: parseJson(data.string)
except Exception: return err "ks: failed to parse keystore"
proc decryptoCryptoField*(json: JsonNode,
password: KeyStorePass): KsResult[seq[byte]] =
var
decKey: seq[byte]
salt: seq[byte]
@ -251,15 +275,15 @@ proc decryptKeystore*(data: KeyStoreContent,
cipherMsg: seq[byte]
checksumMsg: seq[byte]
let kdf = ks{"crypto", "kdf", "function"}.getStr
let kdf = json{"kdf", "function"}.getStr
case kdf
of "scrypt":
let crypto = tryJsonToCrypto(ks, KdfScrypt)
let crypto = tryJsonToCrypto(json, KdfScrypt)
return err "ks: scrypt not supported"
of "pbkdf2":
let
crypto = tryJsonToCrypto(ks, KdfPbkdf2)
crypto = tryJsonToCrypto(json, KdfPbkdf2)
kdfParams = crypto.kdf.params
salt = hexToBytes(kdfParams.salt, "salt")
@ -288,34 +312,41 @@ proc decryptKeystore*(data: KeyStoreContent,
aesCipher.decrypt(cipherMsg, secret)
aesCipher.clear()
ValidatorPrivKey.fromRaw(secret)
ok secret
proc decryptKeystore*(data: KeyStoreContent,
password: KeyStorePass): KsResult[ValidatorPrivKey] =
# TODO: `parseJson` can raise a general `Exception`
let
ks = try: parseJson(data.string)
except Exception: return err "ks: failed to parse keystore"
secret = decryptoCryptoField(ks{"crypto"}, password)
ValidatorPrivKey.fromRaw(? secret)
proc createCryptoField(T: type[KdfParams],
secret: openarray[byte],
password = KeyStorePass "",
salt: openarray[byte] = @[],
iv: openarray[byte] = @[]): Crypto[T] =
type AES = aes128
proc encryptKeystore*(T: type[KdfParams],
privKey: ValidatorPrivkey,
password = KeyStorePass "",
path = KeyPath "",
salt: openarray[byte] = @[],
iv: openarray[byte] = @[],
pretty = true): KeyStoreContent =
var
secret = privKey.toRaw[^32..^1]
decKey: seq[byte]
aesCipher: CTR[aes128]
aesIv = newSeq[byte](aes128.sizeBlock)
kdfSalt = newSeq[byte](saltSize)
aesCipher: CTR[AES]
cipherMsg = newSeq[byte](secret.len)
if salt.len > 0:
let kdfSalt = if salt.len > 0:
doAssert salt.len == saltSize
kdfSalt = @salt
@salt
else:
getRandomBytesOrPanic(kdfSalt)
getRandomBytesOrPanic(saltSize)
if iv.len > 0:
doAssert iv.len == aes128.sizeBlock
aesIv = @iv
let aesIv = if iv.len > 0:
doAssert iv.len == AES.sizeBlock
@iv
else:
getRandomBytesOrPanic(aesIv)
getRandomBytesOrPanic(AES.sizeBlock)
when T is KdfPbkdf2:
decKey = sha256.pbkdf2(password.string, kdfSalt, pbkdf2Params.c,
@ -324,39 +355,83 @@ proc encryptKeystore*(T: type[KdfParams],
var kdf = Kdf[KdfPbkdf2](function: "pbkdf2", params: pbkdf2Params, message: "")
kdf.params.salt = byteutils.toHex(kdfSalt)
else:
return
{.fatal: "Other KDFs are supported yet".}
aesCipher.init(decKey.toOpenArray(0, 15), aesIv)
aesCipher.encrypt(secret, cipherMsg)
aesCipher.clear()
let pubkey = privKey.toPubKey()
let sum = shaChecksum(decKey.toOpenArray(16, 31), cipherMsg)
Crypto[T](
kdf: kdf,
checksum: Checksum(
function: "sha256",
message: byteutils.toHex(sum)),
cipher: Cipher(
function: "aes-128-ctr",
params: CipherParams(iv: byteutils.toHex(aesIv)),
message: byteutils.toHex(cipherMsg)))
proc encryptKeystore*(T: type[KdfParams],
privKey: ValidatorPrivkey,
password = KeyStorePass "",
path = KeyPath "",
salt: openarray[byte] = @[],
iv: openarray[byte] = @[],
pretty = true): KeyStoreContent =
let
sum = shaChecksum(decKey.toOpenArray(16, 31), cipherMsg)
uuid = uuidGenerate().get
secret = privKey.toRaw[^32..^1]
cryptoField = createCryptoField(T, secret, password, salt, iv)
pubkey = privKey.toPubKey()
uuid = uuidGenerate().expect("Random bytes should be available")
keystore = Keystore[T](
crypto: Crypto[T](
kdf: kdf,
checksum: Checksum(
function: "sha256",
message: byteutils.toHex(sum)
),
cipher: Cipher(
function: "aes-128-ctr",
params: CipherParams(iv: byteutils.toHex(aesIv)),
message: byteutils.toHex(cipherMsg)
)
),
crypto: cryptoField,
pubkey: toHex(pubkey),
path: path.string,
uuid: $uuid,
version: 4)
KeyStoreContent if pretty: json.pretty(%keystore, indent=4)
KeyStoreContent if pretty: json.pretty(%keystore)
else: $(%keystore)
proc createWallet*(T: type[KdfParams],
mnemonic: Mnemonic,
name = WalletName "",
salt: openarray[byte] = @[],
iv: openarray[byte] = @[],
password = KeyStorePass "",
nextAccount = none(Natural),
pretty = true): Wallet =
let
uuid = UUID $(uuidGenerate().expect("Random bytes should be available"))
# Please note that we are passing an empty password here because
# we want the wallet restoration procedure to depend only on the
# mnemonic (the user is asked to treat the mnemonic as a password).
seed = getSeed(mnemonic, KeyStorePass"")
cryptoField = %createCryptoField(T, distinctBase seed, password, salt, iv)
Wallet(
uuid: uuid,
name: if name.string.len > 0: name
else: WalletName(uuid),
version: 1,
walletType: "hierarchical deterministic",
crypto: JsonString(if pretty: json.pretty(cryptoField)
else: $cryptoField),
nextAccount: nextAccount.get(0))
proc createWalletContent*(T: type[KdfParams],
mnemonic: Mnemonic,
name = WalletName "",
salt: openarray[byte] = @[],
iv: openarray[byte] = @[],
password = KeyStorePass "",
nextAccount = none(Natural),
pretty = true): (UUID, WalletContent) =
let wallet = createWallet(T, mnemonic, name, salt, iv, password, nextAccount, pretty)
(wallet.uuid, WalletContent Json.encode(wallet, pretty = pretty))
proc restoreCredentials*(mnemonic: Mnemonic,
password = KeyStorePass ""): Credentials =
let

View File

@ -9,17 +9,20 @@
import
strformat,
datatypes
datatypes, helpers
const
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#topics-and-messages
topicBeaconBlocksSuffix* = "beacon_block/ssz"
topicMainnetAttestationsSuffix* = "_beacon_attestation/ssz"
topicVoluntaryExitsSuffix* = "voluntary_exit/ssz"
topicProposerSlashingsSuffix* = "proposer_slashing/ssz"
topicAttesterSlashingsSuffix* = "attester_slashing/ssz"
topicAggregateAndProofsSuffix* = "beacon_aggregate_and_proof/ssz"
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#configuration
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/p2p-interface.md#topics-and-messages
topicMainnetAttestationsSuffix* = "_beacon_attestation/ssz"
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#misc
ATTESTATION_SUBNET_COUNT* = 64
defaultEth2TcpPort* = 9000
@ -27,55 +30,63 @@ const
# This is not part of the spec yet!
defaultEth2RpcPort* = 9090
when ETH2_SPEC == "v0.11.3":
const topicInteropAttestationSuffix* = "beacon_attestation/ssz"
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#topics-and-messages
func getBeaconBlocksTopic*(forkDigest: ForkDigest): string =
try:
&"/eth2/{$forkDigest}/{topicBeaconBlocksSuffix}"
except ValueError as e:
raiseAssert e.msg
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#topics-and-messages
func getVoluntaryExitsTopic*(forkDigest: ForkDigest): string =
try:
&"/eth2/{$forkDigest}/{topicVoluntaryExitsSuffix}"
except ValueError as e:
raiseAssert e.msg
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#topics-and-messages
func getProposerSlashingsTopic*(forkDigest: ForkDigest): string =
try:
&"/eth2/{$forkDigest}/{topicProposerSlashingsSuffix}"
except ValueError as e:
raiseAssert e.msg
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#topics-and-messages
func getAttesterSlashingsTopic*(forkDigest: ForkDigest): string =
try:
&"/eth2/{$forkDigest}/{topicAttesterSlashingsSuffix}"
except ValueError as e:
raiseAssert e.msg
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#topics-and-messages
func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string =
try:
&"/eth2/{$forkDigest}/{topicAggregateAndProofsSuffix}"
except ValueError as e:
raiseAssert e.msg
when ETH2_SPEC == "v0.11.3":
func getInteropAttestationTopic*(forkDigest: ForkDigest): string =
try:
&"/eth2/{$forkDigest}/{topicInteropAttestationSuffix}"
except ValueError as e:
raiseAssert e.msg
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#broadcast-attestation
func compute_subnet_for_attestation*(
num_active_validators: uint64, attestation: Attestation): uint64 =
# Compute the correct subnet for an attestation for Phase 0.
# Note, this mimics expected Phase 1 behavior where attestations will be
# mapped to their shard subnet.
#
# The spec version has params (state: BeaconState, attestation: Attestation),
# but it's only to call get_committee_count_at_slot(), which needs only epoch
# and the number of active validators.
let
slots_since_epoch_start = attestation.data.slot mod SLOTS_PER_EPOCH
committees_since_epoch_start =
get_committee_count_at_slot(num_active_validators) * slots_since_epoch_start
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#mainnet-3
func getMainnetAttestationTopic*(forkDigest: ForkDigest, committeeIndex: uint64): string =
(committees_since_epoch_start + attestation.data.index) mod ATTESTATION_SUBNET_COUNT
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#broadcast-attestation
func getAttestationTopic*(forkDigest: ForkDigest, subnetIndex: uint64):
string =
# This is for subscribing or broadcasting manually to a known index.
try:
let topicIndex = committeeIndex mod ATTESTATION_SUBNET_COUNT
&"/eth2/{$forkDigest}/committee_index{topicIndex}{topicMainnetAttestationsSuffix}"
&"/eth2/{$forkDigest}/beacon_attestation_{subnetIndex}/ssz"
except ValueError as e:
raiseAssert e.msg
func getAttestationTopic*(forkDigest: ForkDigest, attestation: Attestation, num_active_validators: uint64): string =
getAttestationTopic(
forkDigest,
compute_subnet_for_attestation(num_active_validators, attestation))

View File

@ -73,14 +73,12 @@ type
MIN_EPOCHS_TO_INACTIVITY_PENALTY
MIN_GASPRICE
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
MIN_GENESIS_DELAY # TODO BLS_SPEC == "v0.11.3" remove
MIN_GENESIS_TIME
MIN_PER_EPOCH_CHURN_LIMIT
MIN_SEED_LOOKAHEAD
MIN_SLASHING_PENALTY_QUOTIENT
MIN_VALIDATOR_WITHDRAWABILITY_DELAY
ONLINE_PERIOD
PERSISTENT_COMMITTEE_PERIOD # TODO BLS_SPEC == "v0.11.3" remove
PHASE_1_FORK_VERSION
PHASE_1_GENESIS_SLOT
PROPOSER_REWARD_QUOTIENT
@ -142,7 +140,6 @@ const
MIN_DEPOSIT_AMOUNT: "'u64",
MIN_EPOCHS_TO_INACTIVITY_PENALTY: "'u64",
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: "'u64",
PERSISTENT_COMMITTEE_PERIOD: "'u64",
PHASE_1_FORK_VERSION: forkVersionConversionFn,
PROPOSER_REWARD_QUOTIENT: "'u64",
SECONDS_PER_SLOT: "'u64",

View File

@ -1,211 +0,0 @@
# beacon_chain
# Copyright (c) 2018-2020 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# This file contains constants that are part of the spec and thus subject to
# serialization and spec updates.
import
math
type
Slot* = distinct uint64
Epoch* = distinct uint64
{.experimental: "codeReordering".} # SLOTS_PER_EPOCH is use before being defined in spec
const
# Misc
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L6
MAX_COMMITTEES_PER_SLOT* {.intdefine.} = 64
TARGET_COMMITTEE_SIZE* = 2^7 ##\
## Number of validators in the committee attesting to one shard
## Per spec:
## For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds
## [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf);
## with sufficient active validators (at least
## `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures
## committee sizes at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness
## with a Verifiable Delay Function (VDF) will improve committee robustness
## and lower the safe minimum committee size.)
MAX_VALIDATORS_PER_COMMITTEE* = 2048 ##\
## votes
MIN_PER_EPOCH_CHURN_LIMIT* = 4
CHURN_LIMIT_QUOTIENT* = 2^16
SHUFFLE_ROUND_COUNT* = 90
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT* {.intdefine.} = 16384
MIN_GENESIS_TIME* {.intdefine.} = 1578009600
HYSTERESIS_QUOTIENT* = 4
HYSTERESIS_DOWNWARD_MULTIPLIER* = 1
HYSTERESIS_UPWARD_MULTIPLIER* = 5
# Gwei values
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L58
MIN_DEPOSIT_AMOUNT* = 2'u64^0 * 10'u64^9 ##\
## Minimum amounth of ETH that can be deposited in one call - deposits can
## be used either to top up an existing validator or commit to a new one
MAX_EFFECTIVE_BALANCE* = 2'u64^5 * 10'u64^9 ##\
## Maximum amounth of ETH that can be deposited in one call
EJECTION_BALANCE* = 2'u64^4 * 10'u64^9 ##\
## Once the balance of a validator drops below this, it will be ejected from
## the validator pool
EFFECTIVE_BALANCE_INCREMENT* = 2'u64^0 * 10'u64^9
# Initial values
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L70
GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 0'u8]
BLS_WITHDRAWAL_PREFIX* = 0'u8
# Time parameters
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L77
MIN_GENESIS_DELAY* = 86400 # 86400 seconds (1 day)
SECONDS_PER_SLOT*{.intdefine.} = 12'u64 # Compile with -d:SECONDS_PER_SLOT=1 for 12x faster slots
## TODO consistent time unit across projects, similar to C++ chrono?
MIN_ATTESTATION_INCLUSION_DELAY* = 1 ##\
## (12 seconds)
## Number of slots that attestations stay in the attestation
## pool before being added to a block.
## The attestation delay exists so that there is time for attestations to
## propagate before the block is created.
## When creating an attestation, the validator will look at the best
## information known to at that time, and may not revise it during the same
## slot (see `is_double_vote`) - the delay gives the validator a chance to
## wait towards the end of the slot and still have time to publish the
## attestation.
SLOTS_PER_EPOCH* {.intdefine.} = 32 ##\
## (~6.4 minutes)
## slots that make up an epoch, at the end of which more heavy
## processing is done
## Compile with -d:SLOTS_PER_EPOCH=4 for shorter epochs
MIN_SEED_LOOKAHEAD* = 1 ##\
## epochs (~6.4 minutes)
MAX_SEED_LOOKAHEAD* = 4 ##\
## epochs (~25.6 minutes)
EPOCHS_PER_ETH1_VOTING_PERIOD* = 32 ##\
## epochs (~3.4 hours)
SLOTS_PER_HISTORICAL_ROOT* = 8192 ##\
## slots (13 hours)
MIN_VALIDATOR_WITHDRAWABILITY_DELAY* = 2'u64^8 ##\
## epochs (~27 hours)
PERSISTENT_COMMITTEE_PERIOD* = 2'u64^11 ##\
## epochs (9 days)
MAX_EPOCHS_PER_CROSSLINK* = 2'u64^6 ##\
## epochs (~7 hours)
MIN_EPOCHS_TO_INACTIVITY_PENALTY* = 2'u64^2 ##\
## epochs (25.6 minutes)
# State vector lengths
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L105
EPOCHS_PER_HISTORICAL_VECTOR* = 65536 ##\
## epochs (~0.8 years)
EPOCHS_PER_SLASHINGS_VECTOR* = 8192 ##\
## epochs (~36 days)
HISTORICAL_ROOTS_LIMIT* = 16777216 ##\
## epochs (~26,131 years)
VALIDATOR_REGISTRY_LIMIT* = 1099511627776
# Reward and penalty quotients
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L117
BASE_REWARD_FACTOR* = 2'u64^6
WHISTLEBLOWER_REWARD_QUOTIENT* = 2'u64^9
PROPOSER_REWARD_QUOTIENT* = 2'u64^3
INACTIVITY_PENALTY_QUOTIENT* = 2'u64^25
MIN_SLASHING_PENALTY_QUOTIENT* = 32 # 2^5
# Max operations per block
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L131
MAX_PROPOSER_SLASHINGS* = 2^4
MAX_ATTESTER_SLASHINGS* = 2^0
MAX_ATTESTATIONS* = 2^7
MAX_DEPOSITS* = 2^4
MAX_VOLUNTARY_EXITS* = 2^4
# Fork choice
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L32
SAFE_SLOTS_TO_UPDATE_JUSTIFIED* = 8 # 96 seconds
# Validators
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L38
ETH1_FOLLOW_DISTANCE* = 1024 # blocks ~ 4 hours
TARGET_AGGREGATORS_PER_COMMITTEE* = 16 # validators
RANDOM_SUBNETS_PER_VALIDATOR* = 1 # subnet
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION* = 256 # epochs ~ 27 hours
SECONDS_PER_ETH1_BLOCK* = 14 # (estimate from Eth1 mainnet)
# Phase 1: Upgrade from Phase 0
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L161
PHASE_1_FORK_VERSION* = 1
INITIAL_ACTIVE_SHARDS* = 64
# Phase 1: General
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L166
MAX_SHARDS* = 1024
ONLINE_PERIOD* = 8 # epochs (~51 min)
LIGHT_CLIENT_COMMITTEE_SIZE* = 128
LIGHT_CLIENT_COMMITTEE_PERIOD* = 256 # epochs (~27 hours)
SHARD_COMMITTEE_PERIOD* = 256 # epochs (~27 hours)
SHARD_BLOCK_CHUNK_SIZE* = 262144
MAX_SHARD_BLOCK_CHUNKS* = 4
TARGET_SHARD_BLOCK_SIZE* = 196608
MAX_SHARD_BLOCKS_PER_ATTESTATION* = 12
MAX_GASPRICE* = 16384 # Gwei
MIN_GASPRICE* = 8 # Gwei
GASPRICE_ADJUSTMENT_COEFFICIENT* = 8
# Phase 1: Custody game
# ---------------------------------------------------------------
# Time parameters
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L199
RANDAO_PENALTY_EPOCHS* = 2 # epochs (12.8 minutes)
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS* = 16384 # epochs (~73 days)
EPOCHS_PER_CUSTODY_PERIOD* = 2048 # epochs (~9 days)
CUSTODY_PERIOD_TO_RANDAO_PADDING* = 2048 # epochs (~9 days)
MAX_REVEAL_LATENESS_DECREMENT* = 128 # epochs (~14 hours)
# Max operations
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L211
MAX_CUSTODY_KEY_REVEALS* = 256
MAX_EARLY_DERIVED_SECRET_REVEALS* = 1
MAX_CUSTODY_SLASHINGS* = 1
# Reward and penalty quotients
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L217
EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE* = 2
MINOR_REWARD_QUOTIENT* = 256

View File

@ -1,190 +0,0 @@
# beacon_chain
# Copyright (c) 2018-2020 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# This file contains constants that are part of the spec and thus subject to
# serialization and spec updates.
import
math
type
Slot* = distinct uint64
Epoch* = distinct uint64
{.experimental: "codeReordering".} # SLOTS_PER_EPOCH is use before being defined in spec
const
# Misc
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L4
# Changed
MAX_COMMITTEES_PER_SLOT* = 4
TARGET_COMMITTEE_SIZE* = 4
# Unchanged
MAX_VALIDATORS_PER_COMMITTEE* = 2048
MIN_PER_EPOCH_CHURN_LIMIT* = 4
CHURN_LIMIT_QUOTIENT* = 2^16
# Changed
SHUFFLE_ROUND_COUNT* = 10
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT* {.intdefine.} = 64
MIN_GENESIS_TIME* {.intdefine.} = 1578009600 # 3 Jan, 2020
# Unchanged
HYSTERESIS_QUOTIENT* = 4
HYSTERESIS_DOWNWARD_MULTIPLIER* = 1
HYSTERESIS_UPWARD_MULTIPLIER* = 5
# Gwei values
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L58
# Unchanged
MIN_DEPOSIT_AMOUNT* = 2'u64^0 * 10'u64^9
MAX_EFFECTIVE_BALANCE* = 2'u64^5 * 10'u64^9
EJECTION_BALANCE* = 2'u64^4 * 10'u64^9
EFFECTIVE_BALANCE_INCREMENT* = 2'u64^0 * 10'u64^9
# Initial values
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L70
GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 1'u8]
BLS_WITHDRAWAL_PREFIX* = 0'u8
# Time parameters
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L77
# Changed: Faster to spin up testnets, but does not give validator
# reasonable warning time for genesis
MIN_GENESIS_DELAY* = 300
# Unchanged
SECONDS_PER_SLOT*{.intdefine.} = 6'u64
# Unchanged
MIN_ATTESTATION_INCLUSION_DELAY* = 1
# Changed
SLOTS_PER_EPOCH* {.intdefine.} = 8
# Unchanged
MIN_SEED_LOOKAHEAD* = 1
MAX_SEED_LOOKAHEAD* = 4
# Changed
EPOCHS_PER_ETH1_VOTING_PERIOD* = 2
SLOTS_PER_HISTORICAL_ROOT* = 64
# Unchanged
MIN_VALIDATOR_WITHDRAWABILITY_DELAY* = 2'u64^8
# Changed
PERSISTENT_COMMITTEE_PERIOD* = 128
# Unchanged
MAX_EPOCHS_PER_CROSSLINK* = 4
# Changed
MIN_EPOCHS_TO_INACTIVITY_PENALTY* = 2'u64^2
# State vector lengths
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L105
# Changed
EPOCHS_PER_HISTORICAL_VECTOR* = 64
EPOCHS_PER_SLASHINGS_VECTOR* = 64
# Unchanged
HISTORICAL_ROOTS_LIMIT* = 16777216
VALIDATOR_REGISTRY_LIMIT* = 1099511627776
# Reward and penalty quotients
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L117
BASE_REWARD_FACTOR* = 2'u64^6
WHISTLEBLOWER_REWARD_QUOTIENT* = 2'u64^9
PROPOSER_REWARD_QUOTIENT* = 2'u64^3
INACTIVITY_PENALTY_QUOTIENT* = 2'u64^25
MIN_SLASHING_PENALTY_QUOTIENT* = 32 # 2^5
# Max operations per block
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L131
MAX_PROPOSER_SLASHINGS* = 2^4
MAX_ATTESTER_SLASHINGS* = 2^0
MAX_ATTESTATIONS* = 2^7
MAX_DEPOSITS* = 2^4
MAX_VOLUNTARY_EXITS* = 2^4
# Fork choice
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L32
# Changed
SAFE_SLOTS_TO_UPDATE_JUSTIFIED* = 2
# Validators
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L38
# Changed
ETH1_FOLLOW_DISTANCE* = 16 # blocks
# Unchanged
TARGET_AGGREGATORS_PER_COMMITTEE* = 16 # validators
RANDOM_SUBNETS_PER_VALIDATOR* = 1 # subnet
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION* = 256 # epochs ~ 27 hours
SECONDS_PER_ETH1_BLOCK* = 14 # estimate from Eth1 mainnet)
# Phase 1: Upgrade from Phase 0
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L161
PHASE_1_FORK_VERSION* = 16777217
INITIAL_ACTIVE_SHARDS* = 4
# Phase 1: General
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L169
MAX_SHARDS* = 8
ONLINE_PERIOD* = 8 # epochs ~ 51 minutes
LIGHT_CLIENT_COMMITTEE_SIZE* = 128
LIGHT_CLIENT_COMMITTEE_PERIOD* = 256 # epochs
SHARD_COMMITTEE_PERIOD* = 256 # epochs
SHARD_BLOCK_CHUNK_SIZE* = 262144
MAX_SHARD_BLOCK_CHUNKS* = 4
TARGET_SHARD_BLOCK_SIZE* = 196608
MAX_SHARD_BLOCKS_PER_ATTESTATION* = 12
MAX_GASPRICE* = 16384 # Gwei
MIN_GASPRICE* = 8 # Gwei
GASPRICE_ADJUSTMENT_COEFFICIENT* = 8
# Phase 1 - Custody game
# ---------------------------------------------------------------
# Time parameters
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L202
RANDAO_PENALTY_EPOCHS* = 2
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS* = 4096 # epochs
EPOCHS_PER_CUSTODY_PERIOD* = 2048
CUSTODY_PERIOD_TO_RANDAO_PADDING* = 2048
MAX_REVEAL_LATENESS_DECREMENT* = 128
# Max operations
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L214
MAX_CUSTODY_KEY_REVEALS* = 256
MAX_EARLY_DERIVED_SECRET_REVEALS* = 1
MAX_CUSTODY_SLASHINGS* = 1
# Reward and penalty quotients
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L220
EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE* = 2
MINOR_REWARD_QUOTIENT* = 256

View File

@ -24,7 +24,7 @@ const
MAX_COMMITTEES_PER_SLOT* {.intdefine.} = 64
TARGET_COMMITTEE_SIZE* = 2^7 ##\
TARGET_COMMITTEE_SIZE* = 128 ##\
## Number of validators in the committee attesting to one shard
## Per spec:
## For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds
@ -74,9 +74,9 @@ const
# Time parameters
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L77
GENESIS_DELAY* = 172800 # 172800 seconds (2 days)
GENESIS_DELAY* {.intdefine.} = 172800 # 172800 seconds (2 days)
SECONDS_PER_SLOT*{.intdefine.} = 12'u64 # Compile with -d:SECONDS_PER_SLOT=1 for 12x faster slots
SECONDS_PER_SLOT* {.intdefine.} = 12'u64 # Compile with -d:SECONDS_PER_SLOT=1 for 12x faster slots
## TODO consistent time unit across projects, similar to C++ chrono?
MIN_ATTESTATION_INCLUSION_DELAY* = 1 ##\
@ -161,11 +161,11 @@ const
# Validators
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L38
ETH1_FOLLOW_DISTANCE* = 1024 # blocks ~ 4 hours
ETH1_FOLLOW_DISTANCE* {.intdefine.} = 1024 # blocks ~ 4 hours
TARGET_AGGREGATORS_PER_COMMITTEE* = 16 # validators
RANDOM_SUBNETS_PER_VALIDATOR* = 1 # subnet
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION* = 256 # epochs ~ 27 hours
SECONDS_PER_ETH1_BLOCK* = 14 # (estimate from Eth1 mainnet)
SECONDS_PER_ETH1_BLOCK* {.intdefine.} = 14 # (estimate from Eth1 mainnet)
# Phase 1: Upgrade from Phase 0
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/mainnet.yaml#L161

View File

@ -20,7 +20,7 @@ type
const
# Misc
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L4
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L4
# Changed
MAX_COMMITTEES_PER_SLOT* = 4
@ -63,7 +63,7 @@ const
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L77
# Changed: Faster to spin up testnets, but does not give validator
# reasonable warning time for genesis
GENESIS_DELAY* = 300
GENESIS_DELAY* {.intdefine.} = 300
# Unchanged
SECONDS_PER_SLOT*{.intdefine.} = 6'u64
@ -95,7 +95,7 @@ const
# State vector lengths
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L105
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L105
# Changed
EPOCHS_PER_HISTORICAL_VECTOR* = 64
@ -127,7 +127,7 @@ const
# Fork choice
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L32
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L32
# Changed
SAFE_SLOTS_TO_UPDATE_JUSTIFIED* = 2
@ -137,13 +137,13 @@ const
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L38
# Changed
ETH1_FOLLOW_DISTANCE* = 16 # blocks
ETH1_FOLLOW_DISTANCE* {.intdefine.} = 16 # blocks
# Unchanged
TARGET_AGGREGATORS_PER_COMMITTEE* = 16 # validators
RANDOM_SUBNETS_PER_VALIDATOR* = 1 # subnet
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION* = 256 # epochs ~ 27 hours
SECONDS_PER_ETH1_BLOCK* = 14 # estimate from Eth1 mainnet)
SECONDS_PER_ETH1_BLOCK* {.intdefine.} = 14 # estimate from Eth1 mainnet)
# Phase 1: Upgrade from Phase 0
# ---------------------------------------------------------------

View File

@ -10,6 +10,12 @@
import
./crypto, ./digest, ./datatypes, ./helpers, ../ssz/merkleization
template withTrust(sig: SomeSig, body: untyped): bool =
when sig is TrustedSig:
true
else:
body
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#aggregation-selection
func get_slot_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
@ -34,12 +40,13 @@ func get_epoch_signature*(
func verify_epoch_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch,
pubkey: ValidatorPubKey, signature: ValidatorSig): bool =
let
domain = get_domain(fork, DOMAIN_RANDAO, epoch, genesis_validators_root)
signing_root = compute_signing_root(epoch, domain)
pubkey: ValidatorPubKey, signature: SomeSig): bool =
withTrust(signature):
let
domain = get_domain(fork, DOMAIN_RANDAO, epoch, genesis_validators_root)
signing_root = compute_signing_root(epoch, domain)
blsVerify(pubkey, signing_root.data, signature)
blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#signature
func get_block_signature*(
@ -55,15 +62,17 @@ func get_block_signature*(
func verify_block_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
blck: Eth2Digest | BeaconBlock | BeaconBlockHeader, pubkey: ValidatorPubKey,
signature: ValidatorSig): bool =
let
epoch = compute_epoch_at_slot(slot)
domain = get_domain(
fork, DOMAIN_BEACON_PROPOSER, epoch, genesis_validators_root)
signing_root = compute_signing_root(blck, domain)
blck: Eth2Digest | SomeBeaconBlock | BeaconBlockHeader,
pubkey: ValidatorPubKey,
signature: SomeSig): bool =
withTrust(signature):
let
epoch = compute_epoch_at_slot(slot)
domain = get_domain(
fork, DOMAIN_BEACON_PROPOSER, epoch, genesis_validators_root)
signing_root = compute_signing_root(blck, domain)
blsVerify(pubKey, signing_root.data, signature)
blsVerify(pubKey, signing_root.data, signature)
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#broadcast-aggregate
func get_aggregate_and_proof_signature*(fork: Fork, genesis_validators_root: Eth2Digest,
@ -94,14 +103,15 @@ func verify_attestation_signature*(
fork: Fork, genesis_validators_root: Eth2Digest,
attestation_data: AttestationData,
pubkeys: openArray[ValidatorPubKey],
signature: ValidatorSig): bool =
let
epoch = attestation_data.target.epoch
domain = get_domain(
fork, DOMAIN_BEACON_ATTESTER, epoch, genesis_validators_root)
signing_root = compute_signing_root(attestation_data, domain)
signature: SomeSig): bool =
withTrust(signature):
let
epoch = attestation_data.target.epoch
domain = get_domain(
fork, DOMAIN_BEACON_ATTESTER, epoch, genesis_validators_root)
signing_root = compute_signing_root(attestation_data, domain)
blsFastAggregateVerify(pubkeys, signing_root.data, signature)
blsFastAggregateVerify(pubkeys, signing_root.data, signature)
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#deposits
func get_deposit_signature*(
@ -128,10 +138,11 @@ func verify_deposit_signature*(deposit: DepositData): bool =
func verify_voluntary_exit_signature*(
fork: Fork, genesis_validators_root: Eth2Digest,
voluntary_exit: VoluntaryExit,
pubkey: ValidatorPubKey, signature: ValidatorSig): bool =
let
domain = get_domain(
fork, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch, genesis_validators_root)
signing_root = compute_signing_root(voluntary_exit, domain)
pubkey: ValidatorPubKey, signature: SomeSig): bool =
withTrust(signature):
let
domain = get_domain(
fork, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch, genesis_validators_root)
signing_root = compute_signing_root(voluntary_exit, domain)
blsVerify(pubkey, signing_root.data, signature)
blsVerify(pubkey, signing_root.data, signature)

View File

@ -31,10 +31,10 @@ import
tables,
chronicles,
stew/results,
./extras, ./ssz/merkleization, metrics,
./spec/[datatypes, crypto, digest, helpers, signatures, validator],
./spec/[state_transition_block, state_transition_epoch],
../nbench/bench_lab
../extras, ../ssz/merkleization, metrics,
datatypes, crypto, digest, helpers, signatures, validator,
state_transition_block, state_transition_epoch,
../../nbench/bench_lab
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
declareGauge beacon_current_validators, """Number of status="pending|active|exited|withdrawable" validators in current epoch""" # On epoch transition
@ -64,7 +64,7 @@ func get_epoch_validator_count(state: BeaconState): int64 {.nbench.} =
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
proc verify_block_signature*(
state: BeaconState, signed_block: SignedBeaconBlock): bool {.nbench.} =
state: BeaconState, signed_block: SomeSignedBeaconBlock): bool {.nbench.} =
let
proposer_index = signed_block.message.proposer_index
if proposer_index >= state.validators.len.uint64:
@ -88,11 +88,15 @@ proc verifyStateRoot(state: BeaconState, blck: BeaconBlock): bool =
let state_root = hash_tree_root(state)
if state_root != blck.state_root:
notice "Block: root verification failed",
block_state_root = blck.state_root, state_root
block_state_root = shortLog(blck.state_root), state_root = shortLog(state_root)
false
else:
true
proc verifyStateRoot(state: BeaconState, blck: TrustedBeaconBlock): bool =
# This is inlined in state_transition(...) in spec.
true
type
RollbackProc* = proc(v: var BeaconState) {.gcsafe, raises: [Defect].}
@ -170,7 +174,7 @@ proc noRollback*(state: var HashedBeaconState) =
trace "Skipping rollback of broken state"
proc state_transition*(
state: var HashedBeaconState, signedBlock: SignedBeaconBlock,
state: var HashedBeaconState, signedBlock: SomeSignedBeaconBlock,
# TODO this is ... okay, but not perfect; align with EpochRef
stateCache: var StateCache,
flags: UpdateFlags, rollback: RollbackHashedProc): bool {.nbench.} =
@ -225,7 +229,7 @@ proc state_transition*(
trace "in state_transition: processing block, signature passed",
signature = signedBlock.signature,
blockRoot = hash_tree_root(signedBlock.message)
if processBlock(state.data, signedBlock.message, flags, stateCache):
if process_block(state.data, signedBlock.message, flags, stateCache):
if skipStateRootValidation in flags or verifyStateRoot(state.data, signedBlock.message):
# State root is what it should be - we're done!
@ -245,7 +249,7 @@ proc state_transition*(
false
proc state_transition*(
state: var HashedBeaconState, signedBlock: SignedBeaconBlock,
state: var HashedBeaconState, signedBlock: SomeSignedBeaconBlock,
flags: UpdateFlags, rollback: RollbackHashedProc): bool {.nbench.} =
# TODO consider moving this to testutils or similar, since non-testing
# and fuzzing code should always be coming from blockpool which should

View File

@ -44,12 +44,13 @@ declareGauge beacon_processed_deposits_total, "Number of total deposits included
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#block-header
proc process_block_header*(
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.}=
state: var BeaconState, blck: SomeBeaconBlock, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.} =
logScope:
blck = shortLog(blck)
# Verify that the slots match
if not (blck.slot == state.slot):
notice "Block header: slot mismatch",
block_slot = shortLog(blck.slot),
state_slot = shortLog(state.slot)
return false
@ -66,16 +67,13 @@ proc process_block_header*(
if not (blck.proposer_index.ValidatorIndex == proposer_index.get):
notice "Block header: proposer index incorrect",
block_proposer_index = blck.proposer_index.ValidatorIndex,
proposer_index = proposer_index.get
return false
# Verify that the parent matches
if skipBlockParentRootValidation notin flags and not (blck.parent_root ==
hash_tree_root(state.latest_block_header)):
if not (blck.parent_root == hash_tree_root(state.latest_block_header)):
notice "Block header: previous block root mismatch",
latest_block_header = state.latest_block_header,
blck = shortLog(blck),
latest_block_header_root = shortLog(hash_tree_root(state.latest_block_header))
return false
@ -100,10 +98,10 @@ proc `xor`[T: array](a, b: T): T =
for i in 0..<result.len:
result[i] = a[i] xor b[i]
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#randao
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#randao
proc process_randao(
state: var BeaconState, body: BeaconBlockBody, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.}=
state: var BeaconState, body: SomeBeaconBlockBody, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.} =
let
proposer_index = get_beacon_proposer_index(state, stateCache)
@ -138,7 +136,7 @@ proc process_randao(
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#eth1-data
func process_eth1_data(state: var BeaconState, body: BeaconBlockBody) {.nbench.}=
func process_eth1_data(state: var BeaconState, body: SomeBeaconBlockBody) {.nbench.}=
state.eth1_data_votes.add body.eth1_data
if state.eth1_data_votes.asSeq.count(body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD.int:
@ -201,7 +199,7 @@ proc process_proposer_slashing*(
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#is_slashable_attestation_data
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_slashable_attestation_data
func is_slashable_attestation_data(
data_1: AttestationData, data_2: AttestationData): bool =
## Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG
@ -285,16 +283,10 @@ proc process_voluntary_exit*(
return false
# Verify the validator has been active long enough
when ETH2_SPEC == "v0.12.1":
if not (get_current_epoch(state) >= validator.activation_epoch +
SHARD_COMMITTEE_PERIOD):
notice "Exit: not in validator set long enough"
return false
else:
if not (get_current_epoch(state) >= validator.activation_epoch +
PERSISTENT_COMMITTEE_PERIOD):
notice "Exit: not in validator set long enough"
return false
if not (get_current_epoch(state) >= validator.activation_epoch +
SHARD_COMMITTEE_PERIOD):
notice "Exit: not in validator set long enough"
return false
# Verify signature
if skipBlsValidation notin flags:
@ -321,7 +313,7 @@ proc process_voluntary_exit*(
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#operations
proc process_operations(state: var BeaconState, body: BeaconBlockBody,
proc process_operations(state: var BeaconState, body: SomeBeaconBlockBody,
flags: UpdateFlags, stateCache: var StateCache): bool {.nbench.} =
# Verify that outstanding deposits are processed up to the maximum number of
# deposits
@ -357,7 +349,7 @@ proc process_operations(state: var BeaconState, body: BeaconBlockBody,
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#block-processing
proc process_block*(
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
state: var BeaconState, blck: SomeBeaconBlock, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.}=
## When there's a new block, we need to verify that the block is sane and
## update the state accordingly
@ -383,8 +375,8 @@ proc process_block*(
notice "Block header not valid", slot = shortLog(state.slot)
return false
if not processRandao(state, blck.body, flags, stateCache):
debug "[Block processing] Randao failure", slot = shortLog(state.slot)
if not process_randao(state, blck.body, flags, stateCache):
debug "Randao failure", slot = shortLog(state.slot)
return false
process_eth1_data(state, blck.body)

View File

@ -173,7 +173,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
root: get_block_root(state, previous_epoch))
state.justification_bits.setBit 1
info "Justified with previous epoch",
debug "Justified with previous epoch",
current_epoch = current_epoch,
checkpoint = shortLog(state.current_justified_checkpoint),
cat = "justification"
@ -187,7 +187,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
root: get_block_root(state, current_epoch))
state.justification_bits.setBit 0
info "Justified with current epoch",
debug "Justified with current epoch",
current_epoch = current_epoch,
checkpoint = shortLog(state.current_justified_checkpoint),
cat = "justification"
@ -201,7 +201,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
old_previous_justified_checkpoint.epoch + 3 == current_epoch:
state.finalized_checkpoint = old_previous_justified_checkpoint
info "Finalized with rule 234",
debug "Finalized with rule 234",
current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint),
cat = "finalization"
@ -212,7 +212,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
old_previous_justified_checkpoint.epoch + 2 == current_epoch:
state.finalized_checkpoint = old_previous_justified_checkpoint
info "Finalized with rule 23",
debug "Finalized with rule 23",
current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint),
cat = "finalization"
@ -223,7 +223,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
old_current_justified_checkpoint.epoch + 2 == current_epoch:
state.finalized_checkpoint = old_current_justified_checkpoint
info "Finalized with rule 123",
debug "Finalized with rule 123",
current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint),
cat = "finalization"
@ -234,7 +234,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
old_current_justified_checkpoint.epoch + 1 == current_epoch:
state.finalized_checkpoint = old_current_justified_checkpoint
info "Finalized with rule 12",
debug "Finalized with rule 12",
current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint),
cat = "finalization"
@ -393,124 +393,27 @@ func get_inactivity_penalty_deltas(state: BeaconState, cache: var StateCache):
# Spec constructs rewards anyway; this doesn't
penalties
when ETH2_SPEC == "v0.12.1":
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_attestation_deltas
func get_attestation_deltas(state: BeaconState, cache: var StateCache): tuple[a: seq[Gwei], b: seq[Gwei]] =
# Return attestation reward/penalty deltas for each validator.
let
(source_rewards, source_penalties) = get_source_deltas(state, cache)
(target_rewards, target_penalties) = get_target_deltas(state, cache)
(head_rewards, head_penalties) = get_head_deltas(state, cache)
inclusion_delay_rewards = get_inclusion_delay_deltas(state, cache)
inactivity_penalties = get_inactivity_penalty_deltas(state, cache)
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_attestation_deltas
func get_attestation_deltas(state: BeaconState, cache: var StateCache): tuple[a: seq[Gwei], b: seq[Gwei]] =
# Return attestation reward/penalty deltas for each validator.
let
(source_rewards, source_penalties) = get_source_deltas(state, cache)
(target_rewards, target_penalties) = get_target_deltas(state, cache)
(head_rewards, head_penalties) = get_head_deltas(state, cache)
inclusion_delay_rewards = get_inclusion_delay_deltas(state, cache)
inactivity_penalties = get_inactivity_penalty_deltas(state, cache)
let rewards = mapIt(0 ..< len(state.validators),
source_rewards[it] + target_rewards[it] + head_rewards[it] +
inclusion_delay_rewards[it])
let rewards = mapIt(0 ..< len(state.validators),
source_rewards[it] + target_rewards[it] + head_rewards[it] +
inclusion_delay_rewards[it])
let penalties = mapIt(0 ..< len(state.validators),
source_penalties[it] + target_penalties[it] + head_penalties[it] +
inactivity_penalties[it])
let penalties = mapIt(0 ..< len(state.validators),
source_penalties[it] + target_penalties[it] + head_penalties[it] +
inactivity_penalties[it])
(rewards, penalties)
else:
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#rewards-and-penalties-1
func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
tuple[a: seq[Gwei], b: seq[Gwei]] {.nbench.}=
let
previous_epoch = get_previous_epoch(state)
total_balance = get_total_active_balance(state, stateCache)
var
rewards = repeat(0'u64, len(state.validators))
penalties = repeat(0'u64, len(state.validators))
eligible_validator_indices : seq[ValidatorIndex] = @[]
(rewards, penalties)
for index, v in state.validators:
if is_active_validator(v, previous_epoch) or
(v.slashed and previous_epoch + 1 < v.withdrawable_epoch):
eligible_validator_indices.add index.ValidatorIndex
# Micro-incentives for matching FFG source, FFG target, and head
let
matching_source_attestations =
get_matching_source_attestations(state, previous_epoch)
matching_target_attestations =
get_matching_target_attestations(state, previous_epoch)
matching_head_attestations =
get_matching_head_attestations(state, previous_epoch)
for attestations in
[matching_source_attestations, matching_target_attestations,
matching_head_attestations]:
let
unslashed_attesting_indices =
get_unslashed_attesting_indices(state, attestations, stateCache)
attesting_balance = get_total_balance(state, unslashed_attesting_indices)
for index in eligible_validator_indices:
if index in unslashed_attesting_indices:
# Factored out from balance totals to avoid uint64 overflow
const increment = EFFECTIVE_BALANCE_INCREMENT
let reward_numerator = get_base_reward(state, index, total_balance) *
(attesting_balance div increment)
rewards[index] += reward_numerator div (total_balance div increment)
else:
penalties[index] += get_base_reward(state, index, total_balance)
# Proposer and inclusion delay micro-rewards
## This depends on matching_source_attestations being an indexable seq, not a
## set, hash table, etc.
let source_attestation_attesting_indices =
mapIt(
matching_source_attestations,
get_attesting_indices(state, it.data, it.aggregation_bits, stateCache))
for index in get_unslashed_attesting_indices(
state, matching_source_attestations, stateCache):
# Translation of attestation = min([...])
doAssert matching_source_attestations.len > 0
# Start by filtering the right attestations
var filtered_matching_source_attestations: seq[PendingAttestation]
for source_attestation_index, a in matching_source_attestations:
if index notin
source_attestation_attesting_indices[source_attestation_index]:
continue
filtered_matching_source_attestations.add a
# The first filtered attestation serves as min until we find something
# better
var attestation = filtered_matching_source_attestations[0]
for source_attestation_index, a in filtered_matching_source_attestations:
if a.inclusion_delay < attestation.inclusion_delay:
attestation = a
let
base_reward = get_base_reward(state, index, total_balance)
proposer_reward = (base_reward div PROPOSER_REWARD_QUOTIENT).Gwei
rewards[attestation.proposer_index.int] += proposer_reward
let max_attester_reward = base_reward - proposer_reward
rewards[index] += max_attester_reward div attestation.inclusion_delay
# Inactivity penalty
let finality_delay = previous_epoch - state.finalized_checkpoint.epoch
if finality_delay > MIN_EPOCHS_TO_INACTIVITY_PENALTY:
let matching_target_attesting_indices =
get_unslashed_attesting_indices(
state, matching_target_attestations, stateCache)
for index in eligible_validator_indices:
penalties[index] +=
BASE_REWARDS_PER_EPOCH.uint64 * get_base_reward(state, index, total_balance)
if index notin matching_target_attesting_indices:
penalties[index] +=
state.validators[index].effective_balance *
finality_delay div INACTIVITY_PENALTY_QUOTIENT
(rewards, penalties)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#rewards-and-penalties-1
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#process_rewards_and_penalties
func process_rewards_and_penalties(
state: var BeaconState, cache: var StateCache) {.nbench.}=
if get_current_epoch(state) == GENESIS_EPOCH:

View File

@ -12,8 +12,8 @@ import
algorithm, options, sequtils, math, tables,
./datatypes, ./digest, ./helpers
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#compute_shuffled_index
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#compute_committee
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_shuffled_index
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_committee
func get_shuffled_seq*(seed: Eth2Digest,
list_size: uint64,
): seq[ValidatorIndex] =
@ -99,7 +99,7 @@ func get_previous_epoch*(state: BeaconState): Epoch =
else:
current_epoch - 1
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#compute_committee
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_committee
func compute_committee(indices: seq[ValidatorIndex], seed: Eth2Digest,
index: uint64, count: uint64): seq[ValidatorIndex] =
## Return the committee corresponding to ``indices``, ``seed``, ``index``,
@ -123,7 +123,7 @@ func compute_committee(indices: seq[ValidatorIndex], seed: Eth2Digest,
except KeyError:
raiseAssert("Cached entries are added before use")
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_beacon_committee
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_beacon_committee
func get_beacon_committee*(
state: BeaconState, slot: Slot, index: CommitteeIndex,
cache: var StateCache): seq[ValidatorIndex] =
@ -162,7 +162,7 @@ func get_empty_per_epoch_cache*(): StateCache =
initTable[Epoch, seq[ValidatorIndex]]()
result.committee_count_cache = initTable[Epoch, uint64]()
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#compute_proposer_index
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_proposer_index
func compute_proposer_index(state: BeaconState, indices: seq[ValidatorIndex],
seed: Eth2Digest): Option[ValidatorIndex] =
# Return from ``indices`` a random index sampled by effective balance.
@ -193,7 +193,7 @@ func compute_proposer_index(state: BeaconState, indices: seq[ValidatorIndex],
return some(candidate_index)
i += 1
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_beacon_proposer_index
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_index*(state: BeaconState, cache: var StateCache, slot: Slot):
Option[ValidatorIndex] =
try:
@ -227,7 +227,7 @@ func get_beacon_proposer_index*(state: BeaconState, cache: var StateCache, slot:
except KeyError:
raiseAssert("Cached entries are added before use")
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_beacon_proposer_index
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_index*(state: BeaconState, cache: var StateCache):
Option[ValidatorIndex] =
get_beacon_proposer_index(state, cache, state.slot)
@ -257,7 +257,7 @@ func get_committee_assignment*(
return some((committee, idx, slot))
none(tuple[a: seq[ValidatorIndex], b: CommitteeIndex, c: Slot])
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/validator.md#validator-assignments
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#validator-assignments
func is_proposer(
state: BeaconState, validator_index: ValidatorIndex): bool {.used.} =
var cache = get_empty_per_epoch_cache()

View File

@ -57,9 +57,6 @@ func fromSszBytes*(T: type Version, bytes: openarray[byte]): T {.raisesssz.} =
raiseIncorrectSize T
copyMem(result.addr, unsafeAddr bytes[0], sizeof(result))
template fromSszBytes*(T: type enum, bytes: openarray[byte]): auto =
T fromSszBytes(uint64, bytes)
template fromSszBytes*(T: type BitSeq, bytes: openarray[byte]): auto =
BitSeq @bytes

View File

@ -10,7 +10,9 @@ template toSszType*(x: auto): auto =
# Please note that BitArray doesn't need any special treatment here
# because it can be considered a regular fixed-size object type.
when x is Slot|Epoch|ValidatorIndex|enum: uint64(x)
# enum should not be added here as nim will raise Defect when value is out
# of range
when x is Slot|Epoch|ValidatorIndex: uint64(x)
elif x is Eth2Digest: x.data
elif x is BlsCurveType: toRaw(x)
elif x is ForkDigest|Version: distinctBase(x)

View File

@ -22,7 +22,11 @@ proc dump*(dir: string, v: SignedBeaconBlock, root: Eth2Digest) =
logErrors:
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(root)}.ssz", v)
proc dump*(dir: string, v: SignedBeaconBlock, blck: BlockRef) =
proc dump*(dir: string, v: TrustedSignedBeaconBlock, root: Eth2Digest) =
logErrors:
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(root)}.ssz", v)
proc dump*(dir: string, v: SomeSignedBeaconBlock, blck: BlockRef) =
dump(dir, v, blck.root)
proc dump*(dir: string, v: HashedBeaconState, blck: BlockRef) =

View File

@ -59,6 +59,13 @@ proc shortLog*(s: StatusMsg): auto =
)
chronicles.formatIt(StatusMsg): shortLog(it)
func disconnectReasonName(reason: uint64): string =
# haha, nim doesn't support uint64 in `case`!
if reason == uint64(ClientShutDown): "Client shutdown"
elif reason == uint64(IrrelevantNetwork): "Irrelevant network"
elif reason == uint64(FaultOrError): "Fault or error"
else: "Disconnected (" & $reason & ")"
proc importBlocks(state: BeaconSyncNetworkState,
blocks: openarray[SignedBeaconBlock]) {.gcsafe.} =
for blk in blocks:
@ -121,12 +128,13 @@ p2pProtocol BeaconSync(version = 1,
{.libp2pProtocol("metadata", 1).} =
return peer.network.metadata
proc beaconBlocksByRange(peer: Peer,
startSlot: Slot,
count: uint64,
step: uint64,
response: MultipleChunksResponse[SignedBeaconBlock])
{.async, libp2pProtocol("beacon_blocks_by_range", 1).} =
proc beaconBlocksByRange(
peer: Peer,
startSlot: Slot,
count: uint64,
step: uint64,
response: MultipleChunksResponse[SignedBeaconBlock])
{.async, libp2pProtocol("beacon_blocks_by_range", 1).} =
trace "got range request", peer, startSlot, count, step
if count > 0'u64:
@ -149,10 +157,11 @@ p2pProtocol BeaconSync(version = 1,
debug "Block range request done",
peer, startSlot, count, step, found = count - startIndex
proc beaconBlocksByRoot(peer: Peer,
blockRoots: BlockRootsList,
response: MultipleChunksResponse[SignedBeaconBlock])
{.async, libp2pProtocol("beacon_blocks_by_root", 1).} =
proc beaconBlocksByRoot(
peer: Peer,
blockRoots: BlockRootsList,
response: MultipleChunksResponse[SignedBeaconBlock])
{.async, libp2pProtocol("beacon_blocks_by_root", 1).} =
let
pool = peer.networkState.blockPool
count = blockRoots.len
@ -169,9 +178,9 @@ p2pProtocol BeaconSync(version = 1,
peer, roots = blockRoots.len, count, found
proc goodbye(peer: Peer,
reason: DisconnectionReason)
reason: uint64)
{.async, libp2pProtocol("goodbye", 1).} =
debug "Received Goodbye message", reason, peer
debug "Received Goodbye message", reason = disconnectReasonName(reason), peer
proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) =
debug "Peer status", peer, statusMsg

View File

@ -1,5 +1,5 @@
## Generated at line 87
## Generated at line 94
type
BeaconSync* = object
template State*(PROTO: type BeaconSync): type =
@ -69,15 +69,15 @@ template RecType*(MSG: type beaconBlocksByRootObj): untyped =
BlockRootsList
type
goodbyeObj* = distinct DisconnectionReason
goodbyeObj* = distinct uint64
template goodbye*(PROTO: type BeaconSync): type =
DisconnectionReason
uint64
template msgProtocol*(MSG: type goodbyeObj): type =
BeaconSync
template RecType*(MSG: type goodbyeObj): untyped =
DisconnectionReason
uint64
var BeaconSyncProtocolObj = initProtocol("BeaconSync", createPeerState[Peer,
ref[BeaconSyncPeerState:ObjectType]], createNetworkState[Eth2Node,
@ -136,7 +136,7 @@ proc beaconBlocksByRoot*(peer: Peer; blockRoots: BlockRootsList;
makeEth2Request(peer, "/eth2/beacon_chain/req/beacon_blocks_by_root/1/",
msgBytes, seq[SignedBeaconBlock], timeout)
proc goodbye*(peer: Peer; reason: DisconnectionReason): Future[void] {.gcsafe,
proc goodbye*(peer: Peer; reason: uint64): Future[void] {.gcsafe,
libp2pProtocol("goodbye", 1).} =
var outputStream = memoryOutput()
var writer = init(WriterType(SSZ), outputStream)
@ -238,7 +238,7 @@ proc beaconBlocksByRootUserHandler(peer: Peer; blockRoots: BlockRootsList; respo
inc found
debug "Block root request done", peer, roots = blockRoots.len, count, found
proc goodbyeUserHandler(peer: Peer; reason: DisconnectionReason) {.async,
proc goodbyeUserHandler(peer: Peer; reason: uint64) {.async,
libp2pProtocol("goodbye", 1), gcsafe.} =
type
CurrentProtocol = BeaconSync
@ -249,7 +249,7 @@ proc goodbyeUserHandler(peer: Peer; reason: DisconnectionReason) {.async,
cast[ref[BeaconSyncNetworkState:ObjectType]](getNetworkState(peer.network,
BeaconSyncProtocol))
debug "Received Goodbye message", reason, peer
debug "Received Goodbye message", reason = disconnectReasonName(reason), peer
template callUserHandler(MSG: type statusObj; peer: Peer; stream: Connection;
noSnappy: bool; msg: StatusMsg): untyped =
@ -338,7 +338,7 @@ proc beaconBlocksByRootMounter(network: Eth2Node) =
"ssz_snappy", handler: snappyThunk)
template callUserHandler(MSG: type goodbyeObj; peer: Peer; stream: Connection;
noSnappy: bool; msg: DisconnectionReason): untyped =
noSnappy: bool; msg: uint64): untyped =
goodbyeUserHandler(peer, msg)
proc goodbyeMounter(network: Eth2Node) =

View File

@ -7,7 +7,7 @@
import
# Standard library
tables, strutils,
tables, strutils, parseutils,
# Nimble packages
stew/[objects],
@ -19,7 +19,7 @@ import
block_pool, ssz/merkleization,
beacon_node_common, beacon_node_types,
validator_duties, eth2_network,
spec/eth2_apis/validator_callsigs_types,
spec/eth2_apis/callsigs_types,
eth2_json_rpc_serialization
type
@ -27,64 +27,102 @@ type
logScope: topics = "valapi"
# TODO Probably the `beacon` ones should be defined elsewhere...?
proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
# TODO Probably the `beacon` ones (and not `validator`) should be defined elsewhere...
rpcServer.rpc("get_v1_beacon_states_fork") do (stateId: string) -> Fork:
notice "== get_v1_beacon_states_fork", stateId = stateId
template withStateForSlot(stateId: string, body: untyped): untyped =
var res: BiggestInt
if parseBiggestInt(stateId, res) == stateId.len:
raise newException(CatchableError, "Not a valid slot number")
let head = node.updateHead()
let blockSlot = head.atSlot(res.Slot)
node.blockPool.withState(node.blockPool.tmpState, blockSlot):
body
rpcServer.rpc("get_v1_beacon_genesis") do () -> BeaconGenesisTuple:
debug "get_v1_beacon_genesis"
return (genesis_time: node.blockPool.headState.data.data.genesis_time,
genesis_validators_root:
node.blockPool.headState.data.data.genesis_validators_root,
genesis_fork_version: Version(GENESIS_FORK_VERSION))
rpcServer.rpc("get_v1_beacon_states_root") do (stateId: string) -> Eth2Digest:
debug "get_v1_beacon_states_root", stateId = stateId
# TODO do we need to call node.updateHead() before using headState?
result = case stateId:
of "head":
node.blockPool.headState.blck.root
of "genesis":
node.blockPool.headState.data.data.genesis_validators_root
of "finalized":
node.blockPool.headState.data.data.finalized_checkpoint.root
of "justified":
node.blockPool.headState.data.data.current_justified_checkpoint.root
else:
if stateId.startsWith("0x"):
# TODO not sure if `fromHex` is the right thing here...
# https://github.com/ethereum/eth2.0-APIs/issues/37#issuecomment-638566144
# we return whatever was passed to us (this is a nonsense request)
fromHex(Eth2Digest, stateId[2..<stateId.len]) # skip first 2 chars
else:
withStateForSlot(stateId):
hashedState.root
rpcServer.rpc("get_v1_beacon_states_fork") do (stateId: string) -> Fork:
debug "get_v1_beacon_states_fork", stateId = stateId
result = case stateId:
of "head":
discard node.updateHead() # TODO do we need this?
node.blockPool.headState.data.data.fork
of "genesis":
Fork(previous_version: Version(GENESIS_FORK_VERSION),
current_version: Version(GENESIS_FORK_VERSION),
epoch: GENESIS_EPOCH)
of "finalized":
# TODO
Fork()
node.blockPool.withState(node.blockPool.tmpState, node.blockPool.finalizedHead):
state.fork
of "justified":
# TODO
Fork()
node.blockPool.justifiedState.data.data.fork
else:
# TODO parse `stateId` as either a number (slot) or a hash (stateRoot)
Fork()
if stateId.startsWith("0x"):
# TODO not sure if `fromHex` is the right thing here...
# https://github.com/ethereum/eth2.0-APIs/issues/37#issuecomment-638566144
let blckRoot = fromHex(Eth2Digest, stateId[2..<stateId.len]) # skip first 2 chars
let blckRef = node.blockPool.getRef(blckRoot)
if blckRef.isNil:
raise newException(CatchableError, "Block not found")
let blckSlot = blckRef.atSlot(blckRef.slot)
node.blockPool.withState(node.blockPool.tmpState, blckSlot):
state.fork
else:
withStateForSlot(stateId):
state.fork
# TODO Probably the `beacon` ones (and not `validator`) should be defined elsewhere...
rpcServer.rpc("get_v1_beacon_genesis") do () -> BeaconGenesisTuple:
notice "== get_v1_beacon_genesis"
return BeaconGenesisTuple(genesis_time: node.blockPool.headState.data.data.genesis_time,
genesis_validators_root: node.blockPool.headState.data.data.genesis_validators_root,
genesis_fork_version: Version(GENESIS_FORK_VERSION))
rpcServer.rpc("post_v1_beacon_pool_attestations") do (attestation: Attestation) -> bool:
#notice "== post_v1_beacon_pool_attestations"
rpcServer.rpc("post_v1_beacon_pool_attestations") do (
attestation: Attestation) -> bool:
node.sendAttestation(attestation)
return true
rpcServer.rpc("get_v1_validator_blocks") do (
slot: Slot, graffiti: Eth2Digest, randao_reveal: ValidatorSig) -> BeaconBlock:
notice "== get_v1_validator_blocks", slot = slot
debug "get_v1_validator_blocks", slot = slot
let head = node.updateHead()
let proposer = node.blockPool.getProposer(head, slot)
# TODO how do we handle the case when we cannot return a meaningful block? 404...
doAssert(proposer.isSome())
if proposer.isNone():
raise newException(CatchableError, "could not retrieve block for slot: " & $slot)
let valInfo = ValidatorInfoForMakeBeaconBlock(kind: viRandao_reveal,
randao_reveal: randao_reveal)
let res = makeBeaconBlockForHeadAndSlot(
node, valInfo, proposer.get()[0], graffiti, head, slot)
# TODO how do we handle the case when we cannot return a meaningful block? 404...
doAssert(res.message.isSome())
return res.message.get(BeaconBlock()) # returning a default if empty
if res.message.isNone():
raise newException(CatchableError, "could not retrieve block for slot: " & $slot)
return res.message.get()
rpcServer.rpc("post_v1_beacon_blocks") do (body: SignedBeaconBlock) -> bool:
notice "== post_v1_beacon_blocks"
debug "post_v1_beacon_blocks",
slot = body.message.slot,
prop_idx = body.message.proposer_index
logScope: pcs = "block_proposal"
let head = node.updateHead()
if head.slot >= body.message.slot:
warn "Skipping proposal, have newer head already",
@ -92,14 +130,15 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
headBlockRoot = shortLog(head.root),
slot = shortLog(body.message.slot),
cat = "fastforward"
return false
return head != await proposeSignedBlock(node, head, AttachedValidator(),
body, hash_tree_root(body.message))
raise newException(CatchableError,
"Proposal is for a past slot: " & $body.message.slot)
if head == await proposeSignedBlock(node, head, AttachedValidator(),
body, hash_tree_root(body.message)):
raise newException(CatchableError, "Could not propose block")
return true
rpcServer.rpc("get_v1_validator_attestation_data") do (
slot: Slot, committee_index: CommitteeIndex) -> AttestationData:
#notice "== get_v1_validator_attestation_data"
# Obtain the data to form an attestation
let head = node.updateHead()
let attestationHead = head.atSlot(slot)
node.blockPool.withState(node.blockPool.tmpState, attestationHead):
@ -107,45 +146,43 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
rpcServer.rpc("get_v1_validator_aggregate_attestation") do (
attestation_data: AttestationData)-> Attestation:
notice "== get_v1_validator_aggregate_attestation"
debug "get_v1_validator_aggregate_attestation"
rpcServer.rpc("post_v1_validator_aggregate_and_proof") do (
payload: SignedAggregateAndProof) -> bool:
notice "== post_v1_validator_aggregate_and_proof"
# TODO is this enough?
node.network.broadcast(node.topicAggregateAndProofs, payload)
return true
rpcServer.rpc("post_v1_validator_duties_attester") do (
epoch: Epoch, public_keys: seq[ValidatorPubKey]) -> seq[AttesterDuties]:
notice "== post_v1_validator_duties_attester", epoch = epoch
discard node.updateHead() # TODO do we need this?
for pubkey in public_keys:
let idx = node.blockPool.headState.data.data.validators.asSeq.findIt(it.pubKey == pubkey)
if idx != -1:
# TODO this might crash if the requested epoch is further than the BN epoch
# because of this: `doAssert epoch <= next_epoch`
let res = node.blockPool.headState.data.data.get_committee_assignment(
epoch, idx.ValidatorIndex)
if res.isSome:
result.add(AttesterDuties(public_key: pubkey,
committee_index: res.get.b,
committee_length: res.get.a.len.uint64,
validator_committee_index: res.get.a.find(idx.ValidatorIndex).uint64,
slot: res.get.c))
debug "post_v1_validator_duties_attester", epoch = epoch
let head = node.updateHead()
let attestationHead = head.atSlot(compute_start_slot_at_epoch(epoch))
node.blockPool.withState(node.blockPool.tmpState, attestationHead):
for pubkey in public_keys:
let idx = state.validators.asSeq.findIt(it.pubKey == pubkey)
if idx == -1:
continue
let ca = state.get_committee_assignment(epoch, idx.ValidatorIndex)
if ca.isSome:
result.add((public_key: pubkey,
committee_index: ca.get.b,
committee_length: ca.get.a.len.uint64,
validator_committee_index: ca.get.a.find(idx.ValidatorIndex).uint64,
slot: ca.get.c))
rpcServer.rpc("get_v1_validator_duties_proposer") do (
epoch: Epoch) -> seq[ValidatorPubkeySlotPair]:
notice "== get_v1_validator_duties_proposer", epoch = epoch
debug "get_v1_validator_duties_proposer", epoch = epoch
let head = node.updateHead()
for i in 0 ..< SLOTS_PER_EPOCH:
let currSlot = (compute_start_slot_at_epoch(epoch).int + i).Slot
let proposer = node.blockPool.getProposer(head, currSlot)
if proposer.isSome():
result.add(ValidatorPubkeySlotPair(public_key: proposer.get()[1], slot: currSlot))
result.add((public_key: proposer.get()[1], slot: currSlot))
rpcServer.rpc("post_v1_validator_beacon_committee_subscription") do (
rpcServer.rpc("post_v1_validator_beacon_committee_subscriptions") do (
committee_index: CommitteeIndex, slot: Slot, aggregator: bool,
validator_pubkey: ValidatorPubKey, slot_signature: ValidatorSig):
notice "== post_v1_validator_beacon_committee_subscription"
# TODO
validator_pubkey: ValidatorPubKey, slot_signature: ValidatorSig) -> bool:
debug "post_v1_validator_beacon_committee_subscriptions"
raise newException(CatchableError, "Not implemented")

View File

@ -22,7 +22,7 @@ import
nimbus_binary_common,
version, ssz/merkleization,
sync_manager, keystore_management,
spec/eth2_apis/validator_callsigs_types,
spec/eth2_apis/callsigs_types,
eth2_json_rpc_serialization
logScope: topics = "vc"
@ -31,6 +31,7 @@ template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
## Generate client convenience marshalling wrappers from forward declarations
createRpcSigs(RpcClient, sourceDir / "spec" / "eth2_apis" / "validator_callsigs.nim")
createRpcSigs(RpcClient, sourceDir / "spec" / "eth2_apis" / "beacon_callsigs.nim")
type
ValidatorClient = ref object
@ -39,31 +40,66 @@ type
beaconClock: BeaconClock
attachedValidators: ValidatorPool
fork: Fork
proposalsForEpoch: Table[Slot, ValidatorPubKey]
attestationsForEpoch: Table[Slot, seq[AttesterDuties]]
proposalsForCurrentEpoch: Table[Slot, ValidatorPubKey]
attestationsForEpoch: Table[Epoch, Table[Slot, seq[AttesterDuties]]]
beaconGenesis: BeaconGenesisTuple
proc connectToBN(vc: ValidatorClient) {.gcsafe, async.} =
while true:
try:
await vc.client.connect($vc.config.rpcAddress, Port(vc.config.rpcPort))
info "Connected to BN",
port = vc.config.rpcPort,
address = vc.config.rpcAddress
return
except CatchableError as err:
warn "Could not connect to the BN - retrying!", err = err.msg
await sleepAsync(chronos.seconds(1)) # 1 second before retrying
template attemptUntilSuccess(vc: ValidatorClient, body: untyped) =
while true:
try:
body
break
except CatchableError as err:
warn "Caught an unexpected error", err = err.msg
waitFor vc.connectToBN()
proc getValidatorDutiesForEpoch(vc: ValidatorClient, epoch: Epoch) {.gcsafe, async.} =
let proposals = await vc.client.get_v1_validator_duties_proposer(epoch)
# update the block proposal duties this VC should do during this epoch
vc.proposalsForEpoch.clear()
vc.proposalsForCurrentEpoch.clear()
for curr in proposals:
if vc.attachedValidators.validators.contains curr.public_key:
vc.proposalsForEpoch.add(curr.slot, curr.public_key)
vc.proposalsForCurrentEpoch.add(curr.slot, curr.public_key)
# couldn't use mapIt in ANY shape or form so reverting to raw loops - sorry Sean Parent :|
var validatorPubkeys: seq[ValidatorPubKey]
for key in vc.attachedValidators.validators.keys:
validatorPubkeys.add key
# update the attestation duties this VC should do during this epoch
let attestations = await vc.client.post_v1_validator_duties_attester(
epoch, validatorPubkeys)
vc.attestationsForEpoch.clear()
for a in attestations:
if vc.attestationsForEpoch.hasKeyOrPut(a.slot, @[a]):
vc.attestationsForEpoch[a.slot].add(a)
proc getAttesterDutiesForEpoch(epoch: Epoch) {.gcsafe, async.} =
let attestations = await vc.client.post_v1_validator_duties_attester(
epoch, validatorPubkeys)
# make sure there's an entry
if not vc.attestationsForEpoch.contains epoch:
vc.attestationsForEpoch.add(epoch, Table[Slot, seq[AttesterDuties]]())
for a in attestations:
if vc.attestationsForEpoch[epoch].hasKeyOrPut(a.slot, @[a]):
vc.attestationsForEpoch[epoch][a.slot].add(a)
# obtain the attestation duties this VC should do during the next epoch
await getAttesterDutiesForEpoch(epoch + 1)
# also get the attestation duties for the current epoch if missing
if not vc.attestationsForEpoch.contains epoch:
await getAttesterDutiesForEpoch(epoch)
# cleanup old epoch attestation duties
vc.attestationsForEpoch.del(epoch - 1)
# TODO handle subscriptions to beacon committees for both the next epoch and
# for the current if missing (beacon_committee_subscriptions from the REST api)
# for now we will get the fork each time we update the validator duties for each epoch
# TODO should poll occasionally `/v1/config/fork_schedule`
vc.fork = await vc.client.get_v1_beacon_states_fork("head")
proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, async.} =
@ -76,6 +112,7 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
let
slot = wallSlot.slot # afterGenesis == true!
nextSlot = slot + 1
epoch = slot.compute_epoch_at_slot
info "Slot start",
lastSlot = shortLog(lastSlot),
@ -91,11 +128,11 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
# could take up time for attesting... Perhaps this should be called more
# than once per epoch because of forks & other events...
if slot.isEpoch:
await getValidatorDutiesForEpoch(vc, slot.compute_epoch_at_slot)
await getValidatorDutiesForEpoch(vc, epoch)
# check if we have a validator which needs to propose on this slot
if vc.proposalsForEpoch.contains slot:
let public_key = vc.proposalsForEpoch[slot]
if vc.proposalsForCurrentEpoch.contains slot:
let public_key = vc.proposalsForCurrentEpoch[slot]
let validator = vc.attachedValidators.validators[public_key]
let randao_reveal = validator.genRandaoReveal(
@ -121,8 +158,8 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
seconds(int64(SECONDS_PER_SLOT)) div 3, slot, "Waiting to send attestations")
# check if we have validators which need to attest on this slot
if vc.attestationsForEpoch.contains slot:
for a in vc.attestationsForEpoch[slot]:
if vc.attestationsForEpoch[epoch].contains slot:
for a in vc.attestationsForEpoch[epoch][slot]:
let validator = vc.attachedValidators.validators[a.public_key]
let ad = await vc.client.get_v1_validator_attestation_data(slot, a.committee_index)
@ -135,7 +172,8 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
discard await vc.client.post_v1_beacon_pool_attestations(attestation)
except CatchableError as err:
error "Caught an unexpected error", err = err.msg
warn "Caught an unexpected error", err = err.msg, slot = shortLog(slot)
await vc.connectToBN()
let
nextSlotStart = saturate(vc.beaconClock.fromNow(nextSlot))
@ -177,34 +215,27 @@ programMain:
var vc = ValidatorClient(
config: config,
client: newRpcHttpClient(),
attachedValidators: ValidatorPool.init()
client: newRpcHttpClient()
)
vc.proposalsForEpoch.init()
vc.attestationsForEpoch.init()
# load all the validators from the data dir into memory
for curr in vc.config.validatorKeys:
vc.attachedValidators.addLocalValidator(curr.toPubKey, curr)
# TODO perhaps we should handle the case if the BN is down and try to connect to it
# untill success, and also later on disconnets we should continue trying to reconnect
waitFor vc.client.connect("localhost", Port(config.rpcPort)) # TODO: use config.rpcAddress
info "Connected to beacon node", port = config.rpcPort
waitFor vc.connectToBN()
# init the beacon clock
vc.beaconGenesis = waitFor vc.client.get_v1_beacon_genesis()
vc.beaconClock = BeaconClock.init(vc.beaconGenesis.genesis_time)
vc.attemptUntilSuccess:
# init the beacon clock
vc.beaconGenesis = waitFor vc.client.get_v1_beacon_genesis()
vc.beaconClock = BeaconClock.init(vc.beaconGenesis.genesis_time)
let
curSlot = vc.beaconClock.now().slotOrZero()
nextSlot = curSlot + 1 # No earlier than GENESIS_SLOT + 1
fromNow = saturate(vc.beaconClock.fromNow(nextSlot))
# onSlotStart() requests the validator duties only on the start of each epoch
# so we should request the duties here when the VC binary boots up in order
# to handle the case when in the middle of an epoch. Also for the genesis slot.
waitFor vc.getValidatorDutiesForEpoch(curSlot.compute_epoch_at_slot)
vc.attemptUntilSuccess:
waitFor vc.getValidatorDutiesForEpoch(curSlot.compute_epoch_at_slot)
info "Scheduling first slot action",
beaconTime = shortLog(vc.beaconClock.now()),

View File

@ -19,10 +19,11 @@ import
# Local modules
spec/[datatypes, digest, crypto, beaconstate, helpers, validator, network],
conf, time, validator_pool, state_transition,
attestation_pool, block_pool, eth2_network, keystore_management,
beacon_node_common, beacon_node_types, nimbus_binary_common,
mainchain_monitor, version, ssz/merkleization, interop,
spec/state_transition,
conf, time, validator_pool,
attestation_pool, block_pool, block_pools/candidate_chains, eth2_network,
keystore_management, beacon_node_common, beacon_node_types,
nimbus_binary_common, mainchain_monitor, version, ssz/merkleization, interop,
attestation_aggregation, sync_manager, sszdump
# Metrics for tracking attestation and beacon block loss
@ -98,28 +99,45 @@ proc isSynced(node: BeaconNode, head: BlockRef): bool =
else:
true
proc sendAttestation*(node: BeaconNode, attestation: Attestation) =
proc sendAttestation*(
node: BeaconNode, attestation: Attestation, num_active_validators: uint64) =
logScope: pcs = "send_attestation"
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#broadcast-attestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#attestations-and-aggregation
node.network.broadcast(
getMainnetAttestationTopic(node.forkDigest, attestation.data.index),
getAttestationTopic(node.forkDigest, attestation, num_active_validators),
attestation)
beacon_attestations_sent.inc()
proc sendAttestation*(node: BeaconNode, attestation: Attestation) =
# For the validator API, which doesn't supply num_active_validators.
let attestationBlck =
node.blockPool.getRef(attestation.data.beacon_block_root)
if attestationBlck.isNil:
debug "Attempt to send attestation without corresponding block"
return
node.blockPool.withEpochState(
node.blockPool.tmpState,
BlockSlot(blck: attestationBlck, slot: attestation.data.slot)):
node.sendAttestation(
attestation,
blck.getEpochInfo(state).shuffled_active_validator_indices.len.uint64)
proc createAndSendAttestation(node: BeaconNode,
fork: Fork,
genesis_validators_root: Eth2Digest,
validator: AttachedValidator,
attestationData: AttestationData,
committeeLen: int,
indexInCommittee: int) {.async.} =
indexInCommittee: int,
num_active_validators: uint64) {.async.} =
logScope: pcs = "send_attestation"
var attestation = await validator.produceAndSignAttestation(attestationData, committeeLen, indexInCommittee, fork, genesis_validators_root)
node.sendAttestation(attestation)
node.sendAttestation(attestation, num_active_validators)
if node.config.dumpEnabled:
dump(node.config.dumpDirOutgoing, attestation.data, validator.pubKey)
@ -311,8 +329,15 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
# the complexity of handling forks correctly - instead, we use an adapted
# version here that calculates the committee for a single slot only
node.blockPool.withState(node.blockPool.tmpState, attestationHead):
var cache = get_empty_per_epoch_cache()
let committees_per_slot = get_committee_count_at_slot(state, slot)
var cache = getEpochCache(attestationHead.blck, state)
let
committees_per_slot = get_committee_count_at_slot(state, slot)
num_active_validators =
try:
cache.shuffled_active_validator_indices[
slot.compute_epoch_at_slot].len.uint64
except KeyError:
raiseAssert "getEpochCache(...) didn't fill cache"
for committee_index in 0'u64..<committees_per_slot:
let committee = get_beacon_committee(
@ -327,7 +352,7 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
for a in attestations:
traceAsyncErrors createAndSendAttestation(
node, state.fork, state.genesis_validators_root, a.validator, a.data,
a.committeeLen, a.indexInCommittee)
a.committeeLen, a.indexInCommittee, num_active_validators)
proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
Future[BlockRef] {.async.} =

View File

@ -41,6 +41,11 @@ else:
# for heap-usage-by-instance-type metrics and object base-type strings
--define:nimTypeNames
# switch("define", "snappy_implementation=libp2p")
const currentDir = currentSourcePath()[0 .. ^(len("config.nims") + 1)]
switch("define", "nim_compiler_path=" & currentDir & "env.sh nim")
switch("import", "testutils/moduletests")
const useLibStackTrace = not defined(macosx) and

View File

@ -3,7 +3,7 @@ FROM debian:bullseye-slim AS build
SHELL ["/bin/bash", "-c"]
RUN apt-get -qq update \
&& apt-get -qq -y install build-essential make wget libpcre3-dev golang-go git &>/dev/null \
&& apt-get -qq -y install build-essential libpcre3-dev git &>/dev/null \
&& apt-get -qq clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
@ -11,8 +11,7 @@ RUN apt-get -qq update \
RUN cd /root \
&& git clone https://github.com/status-im/nim-beacon-chain.git \
&& cd nim-beacon-chain \
&& make -j$(nproc) update \
&& make deps
&& make -j$(nproc) update
# Please note that the commands above have the goal of caching the
# compilation of Nim, but don't depend on the current git revision.
@ -47,5 +46,7 @@ COPY --from=build /root/nim-beacon-chain/build/beacon_node /usr/bin/
MAINTAINER Zahary Karadjov <zahary@status.im>
LABEL description="Nimbus installation that can act as an ETH2 network bootstrap node."
STOPSIGNAL SIGINT
ENTRYPOINT ["/usr/bin/beacon_node"]

View File

@ -1,5 +1,5 @@
import
strformat, os, confutils, algorithm
strformat, os, confutils, algorithm, sequtils
type
Command = enum
@ -9,15 +9,15 @@ type
CliConfig = object
network: string
depositsDir {.
defaultValue: "deposits"
name: "deposits-dir" }: string
case cmd {.command.}: Command
of restart_nodes:
discard
of reset_network:
depositsDir {.
defaultValue: "deposits"
name: "deposits-dir" }: string
secretsDir {.
defaultValue: "secrets"
name: "secrets-dir" }: string
@ -38,9 +38,9 @@ type
var conf = load CliConfig
var
serverCount = 10
instancesCount = 2
validators = listDirs(conf.depositsDir)
serverCount = 6
instancesCount = 1
validators = listDirs(conf.depositsDir).mapIt(splitPath(it)[1])
sort(validators)
@ -115,6 +115,7 @@ of reset_network:
for n, firstValidator, lastValidator in validatorAssignments():
var
validatorDirs = ""
secretFiles = ""
networkDataFiles = conf.networkDataDir & "/{genesis.ssz,bootstrap_nodes.txt}"
for i in firstValidator ..< lastValidator:
@ -125,15 +126,14 @@ of reset_network:
let dockerPath = &"/docker/{n.container}/data/BeaconNode"
echo &"echo Syncing {lastValidator - firstValidator} keys starting from {firstValidator} to container {n.container}@{n.server} ... && \\"
echo &" ssh {n.server} 'sudo rm -rf /tmp/nimbus && mkdir -p /tmp/nimbus/{{validators,secrets}}' && \\"
echo &" ssh {n.server} 'sudo rm -rf /tmp/nimbus && mkdir -p /tmp/nimbus/{{net-data,validators,secrets}}' && \\"
echo &" rsync -a -zz {networkDataFiles} {n.server}:/tmp/nimbus/net-data/ && \\"
if validator.len > 0:
if validators.len > 0:
echo &" rsync -a -zz {validatorDirs} {n.server}:/tmp/nimbus/validators/ && \\"
echo &" rsync -a -zz {secretFiles} {n.server}:/tmp/nimbus/secrets/ && \\"
echo &" ssh {n.server} 'sudo docker container stop {n.container}; " &
&"sudo rm -rf {dockerPath}/{{db,validators,secrets}}* && " &
(if validators.len > 0: &"sudo mv /tmp/nimbus/* {dockerPath}/ && " else: "") &
&"sudo mv /tmp/nimbus/net-data/* {dockerPath}/ && " &
&"sudo rm -rf {dockerPath}/{{db,validators,secrets,net-data}}* && " &
&"sudo mv /tmp/nimbus/* {dockerPath}/ && " &
&"sudo chown dockremap:docker -R {dockerPath}'"

View File

@ -0,0 +1,17 @@
FROM debian:bullseye-slim
SHELL ["/bin/bash", "-c"]
RUN apt-get -qq update \
&& apt-get -qq -y install build-essential libpcre3-dev git &>/dev/null \
&& apt-get -qq clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
ARG NETWORK
ENV NETWORK=${NETWORK}
STOPSIGNAL SIGINT
COPY "entry_point.sh" "/root/"
ENTRYPOINT ["/root/entry_point.sh"]

View File

@ -0,0 +1,28 @@
SHELL := bash # the shell used internally by "make"
# These default settings can be overriden by exporting env variables
NETWORK ?= witti
IMAGE_TAG ?= testnet2
IMAGE_NAME ?= statusteam/nimbus_beacon_node:$(IMAGE_TAG)
CURRENT_BRANCH = $(shell git rev-parse --abbrev-ref HEAD)
COMPUTER_SAYS_NO = { echo "I'm sorry, Dave. I'm afraid I can't do that."; exit 1; }
.PHONY: build push push-last
build:
@ DOCKER_BUILDKIT=1 \
docker build \
--build-arg="NETWORK=$(NETWORK)" \
-t $(IMAGE_NAME) \
--progress=plain \
.
push: build
+@ $(MAKE) push-last
push-last:
@ [[ "$(CURRENT_BRANCH)" != "devel" ]] && $(COMPUTER_SAYS_NO) || true
docker push $(IMAGE_NAME)

View File

@ -0,0 +1,57 @@
## local testing
From the "nim-beacon-chain" repo (top-level dir):
```text
make -C docker/shared_testnet NETWORK=witti build
mkdir tmp
docker run --rm --mount type=bind,source="$(pwd)"/tmp,target=/root/.cache/nimbus --name testnet2 statusteam/nimbus_beacon_node:testnet2 --build
ls -l tmp/nim-beacon-chain/build
docker run --rm --mount type=bind,source="$(pwd)"/tmp,target=/root/.cache/nimbus --name testnet2 -p 127.0.0.1:8008:8008 -p 9000:9000 statusteam/nimbus_beacon_node:testnet2 --run -- --metrics-address=0.0.0.0
# from another terminal
docker ps
docker stop testnet2
# when you're happy with the Docker image:
make -C docker/shared_testnet NETWORK=witti push
```
## setting up remote servers
From the "infra-nimbus" repo:
```text
git pull
ansible-galaxy install -g -f -r ansible/requirements.yml
ansible-playbook ansible/nimbus.yml -i ansible/inventory/test -t beacon-node -u YOUR_USER -K -l nimbus-slaves[5:8]
# faster way to pull the Docker image and recreate the containers (this also stops any running container)
ansible nimbus-slaves[5:8] -i ansible/inventory/test -u YOUR_USER -o -m shell -a "echo; cd /docker/beacon-node-testnet2-1; docker-compose --compatibility pull; docker-compose --compatibility up --no-start; echo '---'" | sed 's/\\n/\n/g'
# build beacon_node in an external volume
ansible nimbus-slaves[5:8] -i ansible/inventory/test -u YOUR_USER -o -m shell -a "echo; cd /docker/beacon-node-testnet2-1; docker-compose --compatibility run --rm beacon_node --build; echo '---'" | sed 's/\\n/\n/g'
```
### create and copy validator keys
Back up "build/data/shared\_witti\_0", if you need to. It will be deleted.
From the nim-beacon-chain repo:
```bash
# If you have "ignorespace" or "ignoreboth" in HISTCONTROL in your ".bashrc", you can prevent
# the key from being stored in your command history by prefixing it with a space.
# See https://www.linuxjournal.com/content/using-bash-history-more-efficiently-histcontrol
./docker/shared_testnet/validator_keys.sh 0xYOUR_ETH1_PRIVATE_GOERLI_KEY
```
### start the containers
From the "infra-nimbus" repo:
```bash
ansible nimbus-slaves[5:8] -i ansible/inventory/test -u YOUR_USER -o -m shell -a "echo; cd /docker/beacon-node-testnet2-1; docker-compose --compatibility up -d; echo '---'" | sed 's/\\n/\n/g'
```

View File

@ -0,0 +1,32 @@
#!/bin/bash
set -e
cd "$(dirname "${BASH_SOURCE[0]}")/../.."
GROUP=0
TOTAL=$(ls -d ../nimbus-private/altona_deposits/validators/* | wc -l)
#echo "TOTAL=${TOTAL}"
PER_GROUP=$((TOTAL / 4))
#echo "PER_GROUP=${PER_GROUP}"
for N in $(seq 6 9); do
ssh node-0${N}.aws-eu-central-1a.nimbus.test.statusim.net "sudo rm -rf /docker/beacon-node-testnet2-1/data/nim-beacon-chain/build/data/shared_altona_0/secrets"
ssh node-0${N}.aws-eu-central-1a.nimbus.test.statusim.net "sudo rm -rf /docker/beacon-node-testnet2-1/data/nim-beacon-chain/build/data/shared_altona_0/validators"
#echo GROUP="${GROUP}"
for TARGET in "validators" "secrets"; do
DIR_NO=0
ls -d ../nimbus-private/altona_deposits/${TARGET}/* | while read DIR; do
if [[ $DIR_NO -ge $((GROUP * PER_GROUP)) && $DIR_NO -lt $(( (GROUP + 1) * PER_GROUP )) ]]; then
#echo "DIR_NO=${DIR_NO}"
#echo "$DIR"
rsync -a -zz --rsync-path="sudo rsync" "$DIR" node-0${N}.aws-eu-central-1a.nimbus.test.statusim.net:/docker/beacon-node-testnet2-1/data/nim-beacon-chain/build/data/shared_altona_0/${TARGET}/
fi
DIR_NO=$((DIR_NO + 1))
done
done
GROUP=$((GROUP + 1))
ssh node-0${N}.aws-eu-central-1a.nimbus.test.statusim.net "sudo chown -R dockremap:dockremap /docker/beacon-node-testnet2-1/data/nim-beacon-chain/build/data/shared_altona_0/secrets"
ssh node-0${N}.aws-eu-central-1a.nimbus.test.statusim.net "sudo chown -R dockremap:dockremap /docker/beacon-node-testnet2-1/data/nim-beacon-chain/build/data/shared_altona_0/validators"
done

View File

@ -0,0 +1,106 @@
#!/bin/bash
set -e
cd "$(dirname "${BASH_SOURCE[0]}")"
####################
# argument parsing #
####################
! getopt --test > /dev/null
if [ ${PIPESTATUS[0]} != 4 ]; then
echo '`getopt --test` failed in this environment.'
exit 1
fi
OPTS="h"
LONGOPTS="help,network:,build,run"
# default values
NETWORK="altona"
BUILD=0
RUN=0
print_help() {
cat <<EOF
Usage: $(basename $0) <options> -- <beacon_node options>
-h, --help this help message
--network default: ${NETWORK}
--build build the beacon_node
--run run the beacon_node
EOF
}
! PARSED=$(getopt --options=${OPTS} --longoptions=${LONGOPTS} --name "$0" -- "$@")
if [ ${PIPESTATUS[0]} != 0 ]; then
# getopt has complained about wrong arguments to stdout
exit 1
fi
# read getopt's output this way to handle the quoting right
eval set -- "$PARSED"
while true; do
case "$1" in
-h|--help)
print_help
exit
;;
--network)
NETWORK="$2"
shift 2
;;
--build)
BUILD=1
shift
;;
--run)
RUN=1
shift
;;
--)
shift
break
;;
*)
echo "argument parsing error"
print_help
exit 1
esac
done
# docker-compose.yml inserts newlines in our options
if [[ "$(echo $1 | tr -d '[:space:]')" == "--" ]]; then
shift
fi
EXTRA_ARGS="$@"
#########
# build #
#########
if [[ "$BUILD" == "1" ]]; then
# "/root/.cache/nimbus" is the external bind-mounted dir, preserved between runs
cd /root/.cache/nimbus
[[ -d nim-beacon-chain ]] || git clone https://github.com/status-im/nim-beacon-chain.git
cd nim-beacon-chain
git config pull.rebase false
git checkout devel
git pull
# don't use too much RAM
make update
make LOG_LEVEL="TRACE" NIMFLAGS="-d:insecure -d:testnet_servers_image --parallelBuild:1" SCRIPT_PARAMS="--skipGoerliKey --writeLogFile=false --buildOnly" ${NETWORK}
fi
#######
# run #
#######
if [[ "$RUN" == "1" ]]; then
cd /root/.cache/nimbus/nim-beacon-chain
echo $(make SCRIPT_PARAMS="--skipGoerliKey --writeLogFile=false --runOnly --printCmdOnly" ${NETWORK} | tail -n 1) ${EXTRA_ARGS}
# make sure Docker's SIGINT reaches the beacon_node binary
eval $(make SCRIPT_PARAMS="--skipGoerliKey --writeLogFile=false --runOnly --printCmdOnly" ${NETWORK} | tail -n 1) ${EXTRA_ARGS}
fi

View File

@ -0,0 +1,31 @@
#!/bin/bash
# This script creates validator keys and uploads them to remote servers,
# assuming your local username is the same as the remote one.
set -e
cd "$(dirname "${BASH_SOURCE[0]}")/../.."
[[ -z "$1" ]] && { echo "Usage: $(basename $0) YOUR_ETH1_PRIVATE_GOERLI_KEY"; exit 1; }
# TODO: make "witti" a parameter
echo -ne "About to delete \"build/data/shared_witti_0\".\nMake a backup, if you need to, then press Enter. >"
read TMP
make clean-witti
for N in $(seq 6 9); do
make SCRIPT_PARAMS="--becomeValidatorOnly --privateGoerliKey=$1" witti && \
ssh node-0${N}.aws-eu-central-1a.nimbus.test.statusim.net "sudo rm -rf /docker/beacon-node-testnet2-1/data/nim-beacon-chain/build/data/shared_witti_0/secrets" && \
ssh node-0${N}.aws-eu-central-1a.nimbus.test.statusim.net "sudo rm -rf /docker/beacon-node-testnet2-1/data/nim-beacon-chain/build/data/shared_witti_0/validators" && \
rsync -a -zz --rsync-path="sudo rsync" build/data/shared_witti_0/{secrets,validators} node-0${N}.aws-eu-central-1a.nimbus.test.statusim.net:/docker/beacon-node-testnet2-1/data/nim-beacon-chain/build/data/shared_witti_0/ && \
ssh node-0${N}.aws-eu-central-1a.nimbus.test.statusim.net "sudo chown -R dockremap:dockremap /docker/beacon-node-testnet2-1/data/nim-beacon-chain/build/data/shared_witti_0/secrets" && \
ssh node-0${N}.aws-eu-central-1a.nimbus.test.statusim.net "sudo chown -R dockremap:dockremap /docker/beacon-node-testnet2-1/data/nim-beacon-chain/build/data/shared_witti_0/validators"
rm -rf build/data/shared_witti_0/{secrets,validators}
# if we're doing it too fast, we get {"code":-32000,"message":"replacement transaction underpriced"}
# or {"code":-32000,"message":"nonce too low"}
echo "Sleeping..."
sleep 120
done

View File

@ -1,10 +1,9 @@
# Summary
- [Introduction](./intro.md)
- [What is Beacon Chain?](./beacon-chain.md)
- [What is Nimbus?](./nimbus.md)
- [Become a Validator](./validator.md)
- [Installation](./install.md)
- [Become a Validator](./validator.md)
- [Command-line Options](./cli.md)
- [API](./api.md)
- [Advanced Usage for Developers](./advanced.md)
- [FAQs](./faq.md)

View File

@ -8,7 +8,7 @@ The following sections explain how to setup your build environment on your platf
Install Mingw-w64 for your architecture using the "[MinGW-W64 Online
Installer](https://sourceforge.net/projects/mingw-w64/files/)" (first link
under the directory listing). Run it and select your architecture in the setup
menu ("i686" on 32-bit, "x86\_64" on 64-bit), set the threads to "win32" and
menu (`i686` on 32-bit, `x86_64` on 64-bit), set the threads to `win32` and
the exceptions to "dwarf" on 32-bit and "seh" on 64-bit. Change the
installation directory to "C:\mingw-w64" and add it to your system PATH in "My
Computer"/"This PC" -> Properties -> Advanced system settings -> Environment
@ -17,6 +17,7 @@ Variables -> Path -> Edit -> New -> C:\mingw-w64\mingw64\bin (it's "C:\mingw-w64
Install [Git for Windows](https://gitforwindows.org/) and use a "Git Bash" shell to clone and build nim-beacon-chain.
If you don't want to compile PCRE separately, you can fetch pre-compiled DLLs with:
```bash
mingw32-make # this first invocation will update the Git submodules
mingw32-make fetch-dlls # this will place the right DLLs for your architecture in the "build/" directory
@ -30,14 +31,13 @@ You can now follow those instructions in the previous section by replacing `make
mingw32-make test # run the test suite
```
### Linux, MacOS
### Linux, macOS
After cloning the repo:
```bash
make # The first `make` invocation will update all Git submodules and prompt you to run `make` again.
# It's only required once per Git clone. You'll run `make update` after each `git pull`, in the future,
# to keep those submodules up to date.
# Build beacon_node and all the tools, using 4 parallel Make jobs
make -j4
# Run tests
make test
@ -48,6 +48,7 @@ make update
```
To run a command that might use binaries from the Status Nim fork:
```bash
./env.sh bash # start a new interactive shell with the right env vars set
which nim
@ -62,9 +63,9 @@ nim --version # Nimbus is tested and supported on 1.0.2 at the moment
We recommend you remove any cover or use a fan; the Raspberry Pi will get hot (85°C) and throttle.
* Raspberry PI 3b+ or Raspberry Pi 4b.
* 64gb SD Card (less might work too, but the default recommended 4-8GB will probably be too small)
* [Rasbian Buster Lite](https://www.raspberrypi.org/downloads/raspbian/) - Lite version is enough to get going and will save some disk space!
- Raspberry PI 3b+ or Raspberry Pi 4b.
- 64gb SD Card (less might work too, but the default recommended 4-8GB will probably be too small)
- [Raspbian Buster Lite](https://www.raspberrypi.org/downloads/raspbian/) - Lite version is enough to get going and will save some disk space!
Assuming you're working with a freshly written image:
@ -129,4 +130,3 @@ make -j$(nproc) NIMFLAGS="-d:release" USE_MULTITAIL=yes eth2_network_simulation
```bash
make USE_LIBBACKTRACE=0 # expect the resulting binaries to be 2-3 times slower
```

View File

@ -1 +1,82 @@
# API
NBC exposes API:s for querying the state of the application at runtime.
:note: Where applicable, this API mimics https://github.com/ethereum/eth2.0-APIs with the exception that JSON-RPC is used instead of http rest - method names, parameters and results are equivalent except for the encoding / access method.
## Introduction
The NBC API is implemented using JSON-RPC 2.0. To query it, you can use a JSON-RPC library in the language of your choice, or a tool like `curl` to access it from the command line. A tool like [jq](https://stedolan.github.io/jq/) is helpful to pretty-print the responses.
```
curl -d '{"jsonrpc":"2.0","id":"id","method":"peers","params":[] }' -H 'Content-Type: application/json' localhost:9190 -s | jq
```
Before you can access the API, make sure it's enabled using the RPC flag (`beacon_node --rpc`):
```
--rpc Enable the JSON-RPC server.
--rpc-port HTTP port for the JSON-RPC service.
--rpc-address Listening address of the RPC server.
```
## Beacon Node API
### getBeaconHead
The latest head slot, as chosen by the latest fork choice.
```
curl -d '{"jsonrpc":"2.0","id":"id","method":"getBeaconHead","params":[] }' -H 'Content-Type: application/json' localhost:9190 -s | jq
```
### getChainHead
Show chain head information, including head, justified and finalized checkpoints.
```
curl -d '{"jsonrpc":"2.0","id":"id","method":"getChainHead","params":[] }' -H 'Content-Type: application/json' localhost:9190 -s | jq
```
### getSyncing
### getBeaconBlock
### getBeaconState
### getNetworkPeerId
### getNetworkPeers
### getNetworkEnr
```
curl -d '{"jsonrpc":"2.0","id":"id","method":"getNetworkEnr","params":[] }' -H 'Content-Type: application/json' localhost:9190 -s | jq
```
## Valdiator API
## Administrative / Debug API
### getNodeVersion
Show version of the software
```
curl -d '{"jsonrpc":"2.0","id":"id","method":"getNodeVersion","params":[] }' -H 'Content-Type: application/json' localhost:9190 -s | jq
```
### getSpecPreset
Show spec constants in use.
```
curl -d '{"jsonrpc":"2.0","id":"id","method":"getSpecPreset","params":[] }' -H 'Content-Type: application/json' localhost:9190 -s | jq
```
### peers
Show a list of peers that the beacon node is connected to.
```
curl -d '{"jsonrpc":"2.0","id":"id","method":"peers","params":[] }' -H 'Content-Type: application/json' localhost:9190 -s | jq
```

View File

@ -1,18 +0,0 @@
# What is Beacon Chain?
A complete introduction about the beacon chain can be found in the [Ethereum 2.0 blog series](https://our.status.im/two-point-oh-the-beacon-chain/).
In short, the beacon chain is a **new type of blockchain** to help the Ethereum blockchain to smoothly transfer its consensus algorithm from PoW (Proof of Work) to PoS (Proof of Stake), aka Ethereum 2.0. You can also see it as a hybrid PoS + PoW solution.
## Differences Compared to Ethereum 1.0
In traditional PoW, those that propose new blocks are called **_miners_**, whereas in PoS, they are called **_validators_**. In essence, _miners_ rely on actual hardware (such as some specifically manufactured mining machines), while _validators_ rely on just software.
## What it is Like to Be a Validator?
It is obvious that you must have enough computing power or dedicated hardware in order to be a miner, but how about being a validator? Here is a brief overview:
1. A special smart contract named **_deposit contract_** is deployed on the original Ethereum blockchain. Note that in this case, the new beacon chain and the original blockchain co-exists.
2. To "register" as a validator, you have to first deposit **_32 Ether_** from your account to this smart contract.
3. Run the beacon node and wait for the network to sync before your validator is activated.
4. That's all! Remember to stay connected to the network, or you may lose your deposit. :P

109
docs/src/cli.md Normal file
View File

@ -0,0 +1,109 @@
# Command-line Options
You can run your customized beacon node using the `beacon_node` executable. The available options are shown below - you can also run `beacon_node --help` for a reminder.
## Prerequisites
Specifying a genesis file is mandatory to run this executable. You can either get it from the official eth2 repository [here](https://github.com/eth2-clients/eth2-testnets/blob/master/shared/witti/genesis.ssz) or generate your own like [this](https://github.com/status-im/nim-beacon-chain/blob/db92c2f2549a339be60896c3907cefdb394b5e11/scripts/launch_local_testnet.sh#L154) when starting a local testnet. You can also specify the path of your genesis file like [this](https://github.com/status-im/nim-beacon-chain/blob/db92c2f2549a339be60896c3907cefdb394b5e11/scripts/launch_local_testnet.sh#L229).
For example, download a genesis file and then run the following command to start the node:
<img src="./img/beacon_node_example.PNG" alt="" style="margin: 0 40 0 40"/>
## Usage
```
$ ./build/beacon_node --help
Nimbus beacon node v0.3.0 (e537ed9
)
Copyright (c) 2019-2020 Status Research & Development GmbH
Nim Compiler Version 1.3.1 [Windows: amd64]
Compiled at 2020-04-16
Copyright (c) 2006-2020 by Andreas Rumpf
git hash: b4e9f8e814373fc38741736197d88475663ce758
active boot switches: -d:release
Usage:
beacon_node [OPTIONS]... command
The following options are available:
--log-level Sets the log level.
--eth1-network The Eth1 network tracked by the beacon node.
--quick-start Run in quickstart mode
-d, --data-dir The directory where nimbus will store all blockchain data.
--web3-url URL of the Web3 server to observe Eth1.
--deposit-contract Address of the deposit contract.
-b, --bootstrap-node Specifies one or more bootstrap nodes to use when connecting to the
network.
--bootstrap-file Specifies a line-delimited file of bootstrap Ethereum network addresses.
--listen-address Listening address for the Ethereum LibP2P traffic.
--tcp-port Listening TCP port for Ethereum LibP2P traffic.
--udp-port Listening UDP port for node discovery.
--max-peers The maximum number of peers to connect to
--nat Specify method to use for determining public address. Must be one of: any,
none, upnp, pmp, extip:<IP>.
-v, --validator Path to a validator private key, as generated by makeDeposits.
-s, --state-snapshot Json file specifying a recent state snapshot.
--node-name A name for this node that will appear in the logs. If you set this to
'auto', a persistent automatically generated ID will be selected for each
--dataDir folder.
--verify-finalization Specify whether to verify finalization occurs on schedule, for testing.
--stop-at-epoch A positive epoch selects the epoch at which to stop.
--metrics Enable the metrics server.
--metrics-address Listening address of the metrics server.
--metrics-port Listening HTTP port of the metrics server.
--status-bar Display a status bar at the bottom of the terminal screen.
--status-bar-contents Textual template for the contents of the status bar.
--rpc Enable the JSON-RPC server
--rpc-port HTTP port for the JSON-RPC service.
--rpc-address Listening address of the RPC server
--dump Write SSZ dumps of blocks, attestations and states to data dir
Available sub-commands:
beacon_node importValidator [OPTIONS]...
The following options are available:
--keyfile File with validator key to be imported (in hex form).
beacon_node createTestnet [OPTIONS]...
The following options are available:
-d, --validators-dir Directory containing validator descriptors named 'vXXXXXXX.deposit.json'.
--total-validators The number of validators in the newly created chain.
--first-validator Index of first validator to add to validator list.
--last-user-validator The last validator index that will free for taking from a testnet
participant.
--bootstrap-address The public IP address that will be advertised as a bootstrap node for the
testnet.
--bootstrap-port The TCP/UDP port that will be used by the bootstrap node.
-g, --genesis-offset Seconds from now to add to genesis time.
--output-genesis Output file where to write the initial state snapshot.
--with-genesis-root Include a genesis root in 'network.json'.
--output-bootstrap-file Output file with list of bootstrap nodes for the network.
beacon_node makeDeposits [OPTIONS]...
The following options are available:
--quickstart-deposits Number of quick-start deposits to generate.
--random-deposits Number of secure random deposits to generate.
--deposits-dir Folder to write deposits to.
--deposit-private-key Private key of the controlling (sending) account
--min-delay Minimum possible delay between making two deposits (in seconds)
--max-delay Maximum possible delay between making two deposits (in seconds)
beacon_node query command
Available sub-commands:
beacon_node query get <getQueryPath>
<getQueryPath> REST API path to evaluate
```

View File

@ -1,7 +1,6 @@
# Contribute
Follow these steps to contribute to this book!
Follow these steps to contribute to this book!
We use an utility tool called mdBook to create online books from Markdown files.
@ -18,7 +17,27 @@ We use an utility tool called mdBook to create online books from Markdown files.
## Build and Deploy
1. `mdbook build`
2. `make publish-book`
The first step is to submit a pull request to the [devel branch](https://github.com/status-im/nim-beacon-chain/tree/devel).
Then, after it is merged, do the following under our main repository:
1. `cd nim-beacon-chain`
2. `git checkout devel`
3. `git pull`
4. `make update` (This is to update the submodules to the latest version)
5. `make publish-book`
## Trouble Shooting
If you see file conflicts in the pull request, this may due to that you have created your new branch from an old version of the `devel` branch. Update your new branch using the following commands:
```
git checkout devel
git pull
make update
git checkout readme
git merge devel
# use something like `git mergetool` to resolve conflicts, then read the instructions for completing the merge (usually just a `git commit`)
# check the output of `git diff devel`
```
Thank you so much for your help to the decentralized and open source community. :)

View File

@ -1 +1,35 @@
# Frequently Asked Questions
## 1. What is Beacon Chain?
A complete introduction about the beacon chain can be found in the [Ethereum 2.0 blog series](https://our.status.im/two-point-oh-the-beacon-chain/).
In short, the beacon chain is a **new type of blockchain** to help the Ethereum blockchain to smoothly transfer its consensus algorithm from PoW (Proof of Work) to PoS (Proof of Stake), aka Ethereum 2.0.
## 2. Differences Between Beacon Chain and Ethereum 1.0
In traditional PoW, those that propose new blocks are called **_miners_**, whereas in PoS, they are called **_validators_**. In essence, _miners_ rely on actual hardware (such as some specifically manufactured mining machines), while _validators_ rely on just software and a good network connection.
## 3. What it is Like to Be a Validator?
It is obvious that you must have enough computing power or dedicated hardware in order to be a miner, but how about being a validator? Here is a brief overview:
1. A special smart contract named **_deposit contract_** is deployed on the original Ethereum blockchain. Note that in this case, the new beacon chain and the original blockchain co-exists.
2. To "register" as a validator, you have to first deposit **_32 Ether_** from your account to this smart contract.
3. Run the beacon node and wait for the network to sync before your validator is activated.
4. That's all! Remember to stay connected to the network, or you may lose some of your deposit, as punishment, depending on how long you're offline. :P
## 4. What is Nimbus?
In a sentence, Nimbus is an Ethereum 1.0 & 2.0 Client for Resource-Restricted Devices.
It is open sourced at [github.com/status-im/nimbus](github.com/status-im/nimbus). Development progress and updates can be viewed at the [Nimbus blog](https://our.status.im/tag/nimbus/).
## Why are metrics not working?
Metrics are currently implemented using a HTTP server that hasn't been hardened sufficiently that it can be exposed as a public endpoint - it must thus be enabled specifically during build:
```
make NIMFLAGS="-d:insecure"
beacon_node --metrics ...
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

View File

@ -1,15 +1,18 @@
# Installation
Nimbus beacon chain can run on Linux, MacOS, Windows, and Andriod. At the moment, Nimbus has to be built from source.
Beacon chain can run on Linux, macOS, Windows, and Android. At the moment, Nimbus has to be built from source.
## External Dependencies
## External Dependencies
- Developer tools (C compiler, Make, Bash, Git)
- PCRE
Nim is not an external dependency, Nimbus will build its own local copy.
## Linux
On common Linux distributions the dependencies can be installed with:
```sh
# Debian and Ubuntu
sudo apt-get install build-essential git libpcre3-dev
@ -21,7 +24,7 @@ dnf install @development-tools pcre
yourAURmanager -S base-devel pcre-static
```
### MacOS
### macOS
Assuming you use [Homebrew](https://brew.sh/) to manage packages
@ -36,12 +39,16 @@ It also provides a downloading script for prebuilt PCRE.
### Android
* Install the [Termux](https://termux.com) app from FDroid or the Google Play store
* Install a [PRoot](https://wiki.termux.com/wiki/PRoot) of your choice following the instructions for your preferred distribution.
Note, the Ubuntu PRoot is known to contain all Nimbus prerequisites compiled on Arm64 architecture (common architecture for Android devices).
- Install the [Termux](https://termux.com) app from FDroid or the Google Play store
- Install a [PRoot](https://wiki.termux.com/wiki/PRoot) of your choice following the instructions for your preferred distribution.
Note, the Ubuntu PRoot is known to contain all Nimbus prerequisites compiled on Arm64 architecture (common architecture for Android devices).
*Assuming Ubuntu PRoot is used*
_Assuming Ubuntu PRoot is used_
```sh
apt install build-essential git libpcre3-dev
```
## Next steps
Once you've installed build tools, you're ready to move on to launching the beacon node and becoming a [validator](./validator.md)

View File

@ -1,23 +1,24 @@
# Nimbus Beacon Chain Book
# The nim-beacon-chain Book
_Documentation for Nimbus Beacon Chain users and developers._
Nimbus beacon chain is a research implementation of the beacon chain component of the upcoming Ethereum Serenity upgrade, aka Eth2.
- Open sourced at [github.com/status-im/nim-beacon-chain/docs](github.com/status-im/nim-beacon-chain/docs).
- Specification of our implementation can be found at [ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs/tree/v0.11.1#phase-0).
- Open sourced at [github.com/status-im/nim-beacon-chain](https://github.com/status-im/nim-beacon-chain/tree/master).
- Specification of our implementation can be found at [ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs/tree/v0.12.1#phase-0).
## Overview
In this book, we will cover:
1. [What is beacon chain](./beacon-chain.md) and [what is Nimbus](./nimbus.md) to equip you with some basic knowledge.
2. How to [become a validator](./validator.md) in Ethereum as a user.
3. [Installation steps](./install.md) for nimbus beacon chain.
4. The [api documentation](./api.md) for interested developers.
5. [Advanced usage](./advanced.md) for developers.
6. Common [questions and answers](./faq.md) to satisfy your curiosity.
7. How to [contribute](./contribute.md) to this book.
1. [What is beacon chain](./faq.md#1-what-is-beacon-chain) and [what is Nimbus](./faq.md#4-what-is-nimbus) to equip you with some basic knowledge.
2. [Installation steps](./install.md) outline the prerequisites to get started.
3. How to [become a validator](./validator.md) in Ethereum 2.0 as a user, for example on the Altona testnet.
4. [CLI](./cli.md) for running your customized nimbus beacon node.
5. [API](./api.md) for monitoring your node through `http`.
6. [Advanced usage](./advanced.md) for developers.
7. Common [questions and answers](./faq.md) to satisfy your curiosity.
8. How to [contribute](./contribute.md) to this book.
Feel free to give us feedback on how to improve as well as contribute to our book on github. :)

View File

@ -1,6 +0,0 @@
# What is Nimbus?
In a sentence, Nimbus is an Ethereum 1.0 & 2.0 Client for Resource-Restricted Devices.
It is open sourced at [github.com/status-im/nimbus](github.com/status-im/nimbus). Development progress and updates can be viewed at the [Nimbus blog](https://our.status.im/tag/nimbus/).
## Why should you choose Nimbus?

View File

@ -1,26 +1,29 @@
# Become a Validator
To become a validator, you have to first connect to a testnet, deposit your Ethers, and sync with the network.
To become a validator, you need to install the beacon chain software, acquire 32 ETH, set up your validator account and register with the deposit contract on Ethereum.
There is currently no Eth2 mainnet - all networks are testnets.
## Recommended Testnets
Though Nimbus can connect to any of the testnets published in the [eth2-clients/eth2-testnets repo](https://github.com/eth2-clients/eth2-testnets/tree/master/nimbus), below are the recommended ones:
- Public Testnet: [witti](https://github.com/goerli/witti) ([explorer](https://witti.beaconcha.in))
- Local Testnet: testnet0
- Multi-client Testnet: [altona](https://github.com/goerli/altona) ([explorer](https://altona.beaconcha.in))
- Nimbus Testnet: testnet0 (experimental, not always active)
## Connecting to Testnets
## Altona
Before we start, we have to obtain 32 Ethers on the Goerli testnet. Then, we can deposit 32 Ethers to the registration smart contract to become a validator.
### Initial setup
1. Open your [MetaMask](https://metamask.io/) wallet, switch to the `Goerli Test Network` option from the top right cornor.
Before we start, we have to obtain 32 ETH on the Goerli testnet. Then, we can deposit 32 Ethers to the registration smart contract to become a validator.
1. Open your [MetaMask](https://metamask.io/) wallet, switch to the `Goerli Test Network` option from the top right corner.
2. Copy your account address by clicking on one of your accounts.
3. Post your account address on a social media platform (Twitter or Facebook). Copy the url to the post.
4. Paste your post url on the [Goerli faucet](https://faucet.goerli.mudit.blog/) and select `Give me Ether > 37.5 Ethers` from the top right cornor of the page.
5. Wait for a few seconds and return to your MetaMask wallet to check if you have successfully received.
6. Once the [prerequisites](./install.md) are installed, you can connect to testnet0 with the following commands: <br>
6. Once the [prerequisites](./install.md) are installed, you can connect to the altona testnet with the following commands: <br>
- Change `testnet0` to `witti` to connect to the witti testnet.
- **_Remember to replace `make` with `mingw32-make` if using Windows._**
```bash
@ -29,13 +32,13 @@ cd nim-beacon-chain
git checkout devel
git pull
make update
make testnet0 # This will build Nimbus and all other dependencies
# and connect you to testnet0
make altona # This will build Nimbus and all other dependencies
# and connect you to altona
```
<img src="./img/connect_testnet.PNG" alt="" style="margin: 0 40 0 40"/>
7. The testnet should now be up and running. Then, you will be prompted to enter your private key of the account you want to deposit the 32 Ether from. Find your private key from MetaMask as below:
7. You will be prompted to enter your private key of the account you want to deposit the 32 Ether from. Find your private key from MetaMask as below:
<img src="./img/export_pkey.PNG" alt="" width="200" style="margin: 0 40 0 40"/>
@ -45,16 +48,47 @@ make testnet0 # This will build Nimbus and all other dependencies
<img src="./img/deposit_sent.PNG" alt="" style="margin: 0 40 0 40"/>
9. Now you should be syncing with the network. It may take a while (may be quite a few hours). You can know that you are synced if you see the following output.
9. The beacon chain client will start syncing the network while your deposit is being processed. As soon as the deposit has been added, the client will start performing validation duties.
<img src="./img/success.PNG" alt="" style="margin: 0 40 0 40"/>
You can also get a brief estimate of the time remaining until your network gets synced by comparing the output `epoch` value and the one in the blockchain explorer (the [witti explorer](https://witti.beaconcha.in) for example).
You can also get a brief estimate of the time remaining until your network gets synced by comparing the output `epoch` value and the one in the blockchain explorer (the [altona explorer](https://altona.beaconcha.in) for example).
## Trouble Shooting
### Upgrading
1. The directory that stores the blockchain data of the testnet is `build/data/testnet0` (replace `testnet0` with other testnet names). Delete this folder if you want to start over. For example, you can start over with a fresh storage if you entered a wrong private key.
When restarting the beacon node, the software will resume from where it left off, using your previous deposits.
```
cd nim-beacon-chain
git pull
make update # Update dependencies
make altona # Restart using same keys as last run
```
## Key management
Keys are stored in the `build/data/testnet_name/` folder, under `secrets` and `validators` - make sure to keep these folders backed up.
## Metrics
Metrics are not included in the binary by default - to enable them, use the following options when starting the client:
```
make NIMFLAGS="-d:insecure" altona
```
You can now browse the metrics using a browser and connecting to:
http://localhost:8008/metrics
Make sure to protect this port as the http server used is not considered secure and should not be used by untrusted peers.
## Troubleshooting
1. The directory that stores the blockchain data of the testnet is `build/data/shared_altona_0` (replace `altona` with other testnet names). Delete this folder if you want to start over. For example, you can start over with a fresh storage if you entered a wrong private key.
2. Currently, you have to switch to the devel branch in order to run the validator node successfully.
3. Everytime you want to update your node to the latest version, run `git pull`, `make update`, and then `make testnet0`.
3. Everytime you want to update your node to the latest version, run `git pull`, `make update`, and then `make altona`.
4. If `make update` has been running for too long, you can use `make update V=1` or `make update V=2` for verbose output.

17
env.sh
View File

@ -4,5 +4,22 @@
# and we fall back to a Zsh-specific special var to also support Zsh.
REL_PATH="$(dirname ${BASH_SOURCE[0]:-${(%):-%x}})"
ABS_PATH="$(cd ${REL_PATH}; pwd)"
# Activate nvm only when this file is sourced without arguments:
if [ -z "$*" ]; then
if command -v nvm > /dev/null; then
nvm use
command -v ganache-cli > /dev/null || { npm install -g ganache-cli; }
else
echo <<EOF
In order to use Ganache (a development ETH1 chain), please install NVM with:
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | bash
For more info:
https://github.com/nvm-sh/nvm
EOF
fi
fi
source ${ABS_PATH}/vendor/nimbus-build-system/scripts/env.sh

View File

@ -12,7 +12,7 @@
},
{
"datasource": "Prometheus",
"enable": true,
"enable": false,
"expr": "changes(beacon_current_epoch{node=\"0\"}[2s])",
"hide": false,
"iconColor": "#FFA6B0",
@ -44,7 +44,8 @@
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 13,
"id": 23,
"iteration": 1593300421721,
"links": [],
"panels": [
{
@ -210,14 +211,10 @@
"steppedLine": false,
"targets": [
{
"expr": "libp2p_open_bufferstream{node=\"${node}\"}",
"legendFormat": "BufferStream",
"expr": "libp2p_open_streams{node=\"${node}\"}",
"interval": "",
"legendFormat": "{{type}}",
"refId": "A"
},
{
"expr": "libp2p_open_connection{node=\"${node}\"}",
"legendFormat": "Connection",
"refId": "B"
}
],
"thresholds": [],
@ -1742,7 +1739,6 @@
{
"allValue": null,
"current": {
"tags": [],
"text": "0",
"value": "0"
},
@ -1792,5 +1788,5 @@
"variables": {
"list": []
},
"version": 38
}
"version": 3
}

View File

@ -9,12 +9,12 @@ import
# Standard library
os, tables,
# Status libraries
confutils/defs, serialization,
confutils/defs, serialization, chronicles,
# Beacon-chain
../beacon_chain/spec/[
datatypes, crypto, helpers, beaconstate, validator,
state_transition_block, state_transition_epoch],
../beacon_chain/[state_transition, extras],
state_transition_block, state_transition_epoch, state_transition],
../beacon_chain/extras,
../beacon_chain/ssz/[merkleization, ssz_serialization]
# Nimbus Bench - Scenario configuration

View File

@ -1,9 +1,9 @@
import
confutils, stats, chronicles, strformat, tables,
stew/byteutils,
../beacon_chain/[beacon_chain_db, block_pool, extras, state_transition],
../beacon_chain/spec/[crypto, datatypes, digest, helpers],
../beacon_chain/sszdump,
../beacon_chain/[beacon_chain_db, block_pool, extras],
../beacon_chain/spec/[crypto, datatypes, digest, helpers, state_transition, validator],
../beacon_chain/sszdump, ../beacon_chain/ssz/merkleization,
../research/simutils,
eth/db/[kvstore, kvstore_sqlite3]
@ -18,6 +18,7 @@ type
DbCmd* = enum
bench
dumpState
dumpBlock
rewindState
DbConf = object
@ -32,15 +33,20 @@ type
.}: DbCmd
of bench:
validate* {.
defaultValue: true
desc: "Enable BLS validation" }: bool
slots* {.
defaultValue: 50000
desc: "Number of slots to run benchmark for".}: uint64
of dumpState:
stateRoot* {.
argument
desc: "State roots to save".}: seq[string]
of dumpBlock:
blockRootx* {.
argument
desc: "Block roots to save".}: seq[string]
of rewindState:
blockRoot* {.
argument
@ -70,7 +76,7 @@ proc cmdBench(conf: DbConf) =
var
blockRefs: seq[BlockRef]
blocks: seq[SignedBeaconBlock]
blocks: seq[TrustedSignedBeaconBlock]
cur = pool.head.blck
while cur != nil:
@ -78,6 +84,9 @@ proc cmdBench(conf: DbConf) =
cur = cur.parent
for b in 1..<blockRefs.len: # Skip genesis block
if blockRefs[blockRefs.len - b - 1].slot > conf.slots:
break
withTimer(timers[tLoadBlock]):
blocks.add db.getBlock(blockRefs[blockRefs.len - b - 1].root).get()
@ -88,15 +97,17 @@ proc cmdBench(conf: DbConf) =
withTimer(timers[tLoadState]):
discard db.getState(state[].root, state[].data, noRollback)
let flags = if conf.validate: {} else: {skipBlsValidation}
for b in blocks:
let
isEpoch = state[].data.slot.compute_epoch_at_slot !=
b.message.slot.compute_epoch_at_slot
withTimer(timers[if isEpoch: tApplyEpochBlock else: tApplyBlock]):
discard state_transition(state[], b, flags, noRollback)
if not state_transition(state[], b, {}, noRollback):
dump("./", b, hash_tree_root(b.message))
echo "State transition failed (!)"
quit 1
printTimers(conf.validate, timers)
printTimers(false, timers)
proc cmdDumpState(conf: DbConf) =
let
@ -114,6 +125,21 @@ proc cmdDumpState(conf: DbConf) =
except CatchableError as e:
echo "Couldn't load ", stateRoot, ": ", e.msg
proc cmdDumpBlock(conf: DbConf) =
let
db = BeaconChainDB.init(
kvStore SqStoreRef.init(conf.databaseDir.string, "nbc").tryGet())
for blockRoot in conf.blockRootx:
try:
let root = Eth2Digest(data: hexToByteArray[32](blockRoot))
if (let blck = db.getBlock(root); blck.isSome):
dump("./", blck.get(), root)
else:
echo "Couldn't load ", root
except CatchableError as e:
echo "Couldn't load ", blockRoot, ": ", e.msg
proc cmdRewindState(conf: DbConf) =
echo "Opening database..."
let
@ -145,5 +171,7 @@ when isMainModule:
cmdBench(conf)
of dumpState:
cmdDumpState(conf)
of dumpBlock:
cmdDumpBlock(conf)
of rewindState:
cmdRewindState(conf)

View File

@ -1,16 +1,16 @@
import
confutils, chronicles,
../beacon_chain/spec/[crypto, datatypes],
../beacon_chain/[extras, state_transition],
../beacon_chain/spec/[crypto, datatypes, state_transition],
../beacon_chain/extras,
../beacon_chain/ssz/[merkleization, ssz_serialization]
cli do(pre: string, blck: string, post: string, verifyStateRoot = false):
cli do(pre: string, blck: string, post: string, verifyStateRoot = true):
let
stateY = (ref HashedBeaconState)(
data: SSZ.loadFile(pre, BeaconState),
)
blckX = SSZ.loadFile(blck, SignedBeaconBlock)
flags = if verifyStateRoot: {skipStateRootValidation} else: {}
flags = if not verifyStateRoot: {skipStateRootValidation} else: {}
stateY.root = hash_tree_root(stateY.data)

View File

@ -2,10 +2,10 @@
# https://github.com/nim-lang/Nim/issues/11225
import
stew/ptrops, stew/ranges/ptr_arith,
../beacon_chain/[extras, state_transition],
stew/ptrops, stew/ranges/ptr_arith, chronicles,
../beacon_chain/extras,
../beacon_chain/spec/[crypto, datatypes, digest, validator, beaconstate,
state_transition_block],
state_transition_block, state_transition],
../beacon_chain/ssz/[merkleization, ssz_serialization]
type
@ -33,8 +33,8 @@ type
FuzzCrashError = object of CatchableError
# TODO: change ptr uint to ptr csize_t when available in newer Nim version.
proc copyState(state: BeaconState, output: ptr byte,
output_size: ptr uint): bool {.raises: [FuzzCrashError, Defect].} =
proc copyState(state: BeaconState, xoutput: ptr byte,
xoutput_size: ptr uint): bool {.raises: [FuzzCrashError, Defect].} =
var resultState =
try:
SSZ.encode(state)
@ -42,18 +42,18 @@ proc copyState(state: BeaconState, output: ptr byte,
# Shouldn't occur as the writer isn't a file
raise newException(FuzzCrashError, "Unexpected failure to serialize.", e)
if unlikely(resultState.len.uint > output_size[]):
if unlikely(resultState.len.uint > xoutput_size[]):
let msg = (
"Not enough output buffer provided to nimbus harness. Provided: " &
$(output_size[]) &
"Not enough xoutput buffer provided to nimbus harness. Provided: " &
$(xoutput_size[]) &
"Required: " &
$resultState.len.uint
)
raise newException(FuzzCrashError, msg)
output_size[] = resultState.len.uint
# TODO: improvement might be to write directly to buffer with OutputStream
xoutput_size[] = resultState.len.uint
# TODO: improvement might be to write directly to buffer with xoutputStream
# and SszWriter (but then need to ensure length doesn't overflow)
copyMem(output, unsafeAddr resultState[0], output_size[])
copyMem(xoutput, unsafeAddr resultState[0], xoutput_size[])
result = true
template decodeAndProcess(typ, process: untyped): bool =
@ -90,22 +90,22 @@ template decodeAndProcess(typ, process: untyped): bool =
raise newException(FuzzCrashError, "Unexpected Exception in state transition", e)
if processOk:
copyState(data.state, output, output_size)
copyState(data.state, xoutput, xoutput_size)
else:
false
proc nfuzz_attestation(input: openArray[byte], output: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
proc nfuzz_attestation(input: openArray[byte], xoutput: ptr byte,
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
decodeAndProcess(AttestationInput):
process_attestation(data.state, data.attestation, flags, cache)
proc nfuzz_attester_slashing(input: openArray[byte], output: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
proc nfuzz_attester_slashing(input: openArray[byte], xoutput: ptr byte,
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
decodeAndProcess(AttesterSlashingInput):
process_attester_slashing(data.state, data.attesterSlashing, flags, cache)
proc nfuzz_block(input: openArray[byte], output: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
proc nfuzz_block(input: openArray[byte], xoutput: ptr byte,
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
# There's not a perfect approach here, but it's not worth switching the rest
# and requiring HashedBeaconState (yet). So to keep consistent, puts wrapper
# only in one function.
@ -120,35 +120,35 @@ proc nfuzz_block(input: openArray[byte], output: ptr byte,
decodeAndProcess(BlockInput):
state_transition(data, data.beaconBlock, flags, noRollback)
proc nfuzz_block_header(input: openArray[byte], output: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
proc nfuzz_block_header(input: openArray[byte], xoutput: ptr byte,
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
decodeAndProcess(BlockHeaderInput):
process_block_header(data.state, data.beaconBlock.message, flags, cache)
proc nfuzz_deposit(input: openArray[byte], output: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
proc nfuzz_deposit(input: openArray[byte], xoutput: ptr byte,
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
decodeAndProcess(DepositInput):
process_deposit(data.state, data.deposit, flags)
proc nfuzz_proposer_slashing(input: openArray[byte], output: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
proc nfuzz_proposer_slashing(input: openArray[byte], xoutput: ptr byte,
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
decodeAndProcess(ProposerSlashingInput):
process_proposer_slashing(data.state, data.proposerSlashing, flags, cache)
proc nfuzz_voluntary_exit(input: openArray[byte], output: ptr byte,
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
proc nfuzz_voluntary_exit(input: openArray[byte], xoutput: ptr byte,
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
decodeAndProcess(VoluntaryExitInput):
process_voluntary_exit(data.state, data.exit, flags)
# Note: Could also accept raw input pointer and access list_size + seed here.
# However, list_size needs to be known also outside this proc to allocate output.
# However, list_size needs to be known also outside this proc to allocate xoutput.
# TODO: rework to copy immediatly in an uint8 openArray, considering we have to
# go over the list anyhow?
proc nfuzz_shuffle(input_seed: ptr byte, output: var openArray[uint64]): bool
proc nfuzz_shuffle(input_seed: ptr byte, xoutput: var openArray[uint64]): bool
{.exportc, raises: [Defect].} =
var seed: Eth2Digest
# Should be OK as max 2 bytes are passed by the framework.
let list_size = output.len.uint64
let list_size = xoutput.len.uint64
copyMem(addr(seed.data), input_seed, sizeof(seed.data))
@ -162,8 +162,8 @@ proc nfuzz_shuffle(input_seed: ptr byte, output: var openArray[uint64]): bool
for i in 0..<list_size:
# ValidatorIndex is currently wrongly uint32 so we copy this 1 by 1,
# assumes passed output is zeroed.
copyMem(offset(addr output, i.int), shuffled_seq[i.int].unsafeAddr,
# assumes passed xoutput is zeroed.
copyMem(offset(addr xoutput, i.int), shuffled_seq[i.int].unsafeAddr,
sizeof(ValidatorIndex))
result = true

View File

@ -20,10 +20,11 @@ import
options, random, tables,
../tests/[testblockutil],
../beacon_chain/spec/[
beaconstate, crypto, datatypes, digest, helpers, validator, signatures],
beaconstate, crypto, datatypes, digest, helpers, validator, signatures,
state_transition],
../beacon_chain/[
attestation_pool, block_pool, beacon_node_types, beacon_chain_db,
interop, state_transition, validator_pool],
interop, validator_pool],
eth/db/[kvstore, kvstore_sqlite3],
../beacon_chain/ssz/[merkleization, ssz_serialization],
./simutils

View File

@ -6,10 +6,11 @@ const
bootstrapTxtFileName = "bootstrap_nodes.txt"
bootstrapYamlFileName = "boot_enr.yaml"
depositContractFileName = "deposit_contract.txt"
depositContractBlockFileName = "deposit_contract_block.txt"
genesisFile = "genesis.ssz"
configFile = "config.yaml"
testnetsRepo = "eth2-testnets"
web3Url = "wss://goerli.infura.io/ws/v3/809a18497dd74102b5f37d25aae3c85a"
web3Url = "wss://goerli.infura.io/ws/v3/6224f3c792cc443fafb64e70a98f871e"
let
testnetsOrg = getEnv("ETH2_TESTNETS_ORG", "eth2-clients")
@ -23,9 +24,108 @@ proc validateTestnetName(parts: openarray[string]): auto =
quit 1
(parts[0], parts[1])
# reduces the error output when interrupting an external command with Ctrl+C
proc execIgnoringExitCode(s: string) =
try:
exec s
except OsError:
discard
proc updateTestnetsRepo(allTestnetsDir, buildDir: string) =
rmDir(allTestnetsDir)
let cwd = system.getCurrentDir()
cd buildDir
exec &"git clone --quiet --depth=1 {testnetsGitUrl}"
cd cwd
proc makePrometheusConfig(nodeID, baseMetricsPort: int, dataDir: string) =
# macOS may not have gnu-getopts installed and in the PATH
execIgnoringExitCode &"""./scripts/make_prometheus_config.sh --nodes """ & $(1 + nodeID) & &""" --base-metrics-port {baseMetricsPort} --config-file "{dataDir}/prometheus.yml""""
proc buildNode(nimFlags, preset, beaconNodeBinary: string) =
exec &"""nim c {nimFlags} -d:"const_preset={preset}" -o:"{beaconNodeBinary}" beacon_chain/beacon_node.nim"""
proc becomeValidator(validatorsDir, beaconNodeBinary, secretsDir, depositContractOpt, privateGoerliKey: string,
becomeValidatorOnly: bool) =
mode = Silent
var privKey = privateGoerliKey
if privKey.len == 0:
echo "\nPlease enter your Goerli Eth1 private key in hex form (e.g. 0x1a2...f3c) in order to become a validator (you'll need access to 32 GoETH)."
echo "Hit Enter to skip this."
# is there no other way to print without a trailing newline?
exec "printf '> '"
privKey = readLineFromStdin()
if privKey.len > 0:
mkDir validatorsDir
mode = Verbose
exec replace(&"""{beaconNodeBinary} deposits create
--count=1
--out-deposits-dir="{validatorsDir}"
--out-secrets-dir="{secretsDir}"
--deposit-private-key={privKey}
--web3-url={web3Url}
{depositContractOpt}
""", "\n", " ")
mode = Silent
if becomeValidatorOnly:
echo "\nDeposit sent."
else:
echo "\nDeposit sent, wait for confirmation then press enter to continue"
discard readLineFromStdin()
proc runNode(dataDir, beaconNodeBinary, bootstrapFileOpt, depositContractOpt,
genesisFileOpt, natConfig: string,
basePort, nodeID, baseMetricsPort, baseRpcPort: int,
printCmdOnly: bool) =
let logLevel = getEnv("LOG_LEVEL")
var logLevelOpt = ""
if logLevel.len > 0:
logLevelOpt = &"""--log-level="{logLevel}" """
mode = Verbose
var cmd: string
if printCmdOnly:
# When you reinvent getopt() and you forget to support repeating the same
# option to overwrite the old value...
cmd = replace(&"""{beaconNodeBinary}
--data-dir="{dataDir}"
--web3-url={web3Url}
{bootstrapFileOpt}
{depositContractOpt}
{genesisFileOpt} """, "\n", " ")
echo &"cd {dataDir}; exec {cmd}"
else:
cd dataDir
cmd = replace(&"""{beaconNodeBinary}
--data-dir="{dataDir}"
--dump
--web3-url={web3Url}
--nat={natConfig}
--tcp-port=""" & $(basePort + nodeID) & &"""
--udp-port=""" & $(basePort + nodeID) & &"""
--metrics
--metrics-port=""" & $(baseMetricsPort + nodeID) & &"""
--rpc
--rpc-port=""" & $(baseRpcPort + nodeID) & &"""
{bootstrapFileOpt}
{logLevelOpt}
{depositContractOpt}
{genesisFileOpt} """, "\n", " ")
execIgnoringExitCode cmd
cli do (skipGoerliKey {.
desc: "Don't prompt for an Eth1 Goerli key to become a validator" .}: bool,
privateGoerliKey {.
desc: "Use this private Eth1 Goerli key to become a validator (careful with this option, the private key will end up in your shell's command history)" .} = "",
specVersion {.
desc: "Spec version"
name: "spec" .} = "v0.11.3",
constPreset {.
desc: "The Ethereum 2.0 const preset of the network (optional)"
name: "const-preset" .} = "",
@ -42,6 +142,26 @@ cli do (skipGoerliKey {.
baseRpcPort {.
desc: "Base rpc port (nodeID will be added to it)" .} = 9190.int,
natConfig {.
desc: "Specify method to use for determining public address. " &
"Must be one of: any, none, upnp, pmp, extip:<IP>",
name: "nat" .} = "any",
writeLogFile {.
desc: "Write a log file in dataDir" .} = true,
buildOnly {.
desc: "Just the build, please." .} = false,
becomeValidatorOnly {.
desc: "Just become a validator." .} = false,
runOnly {.
desc: "Just run it." .} = false,
printCmdOnly {.
desc: "Just print the commands (suitable for passing to 'eval'; might replace current shell)." .} = false,
testnetName {.argument .}: string):
let
nameParts = testnetName.split "/"
@ -52,27 +172,38 @@ cli do (skipGoerliKey {.
buildDir = rootDir / "build"
allTestnetsDir = buildDir / testnetsRepo
rmDir(allTestnetsDir)
cd buildDir
exec &"git clone --quiet --depth=1 {testnetsGitUrl}"
if not (runOnly or becomeValidatorOnly):
updateTestnetsRepo(allTestnetsDir, buildDir)
var
depositContractOpt = ""
bootstrapFileOpt = ""
genesisFileOpt = ""
doBuild, doBecomeValidator, doRun = true
# step-skipping logic
if skipGoerliKey:
doBecomeValidator = false
if buildOnly:
doBecomeValidator = false
doRun = false
if becomeValidatorOnly:
doBuild = false
doRun = false
if runOnly:
doBuild = false
doBecomeValidator = false
let
testnetDir = allTestnetsDir / team / testnet
genesisFilePath = testnetDir / genesisFile
let testnetDir = allTestnetsDir / team / testnet
if not system.dirExists(testnetDir):
echo &"No metadata files exists for the '{testnetName}' testnet"
quit 1
proc checkRequiredFile(fileName: string) =
let filePath = testnetDir / fileName
if not system.fileExists(filePath):
echo &"The required file {fileName} is not present in '{testnetDir}'."
quit 1
checkRequiredFile genesisFile
if system.fileExists(genesisFilePath):
genesisFileOpt = &"--state-snapshot=\"{genesisFilePath}\""
let bootstrapTxtFile = testnetDir / bootstrapTxtFileName
if system.fileExists(bootstrapTxtFile):
@ -80,7 +211,7 @@ cli do (skipGoerliKey {.
else:
let bootstrapYamlFile = testnetDir / bootstrapYamlFileName
if system.fileExists(bootstrapYamlFile):
bootstrapFileOpt = &"--enr-bootstrap-file=\"{bootstrapYamlFile}\""
bootstrapFileOpt = &"--bootstrap-file=\"{bootstrapYamlFile}\""
else:
echo "Warning: the network metadata doesn't include a bootstrap file"
@ -89,6 +220,8 @@ cli do (skipGoerliKey {.
preset = constPreset
if preset.len == 0: preset = "minimal"
doAssert specVersion in ["v0.11.3", "v0.12.1"]
let
dataDirName = testnetName.replace("/", "_")
.replace("(", "_")
@ -98,84 +231,31 @@ cli do (skipGoerliKey {.
secretsDir = dataDir / "secrets"
beaconNodeBinary = buildDir / "beacon_node_" & dataDirName
var
nimFlags = "-d:chronicles_log_level=TRACE " & getEnv("NIM_PARAMS")
nimFlags = &"-d:chronicles_log_level=TRACE " & getEnv("NIM_PARAMS")
# write the logs to a file
nimFlags.add """ -d:"chronicles_sinks=textlines,json[file(nbc""" & staticExec("date +\"%Y%m%d%H%M%S\"") & """.log)]" """
if writeLogFile:
# write the logs to a file
nimFlags.add """ -d:"chronicles_sinks=textlines,json[file(nbc""" & staticExec("date +\"%Y%m%d%H%M%S\"") & """.log)]" """
let depositContractFile = testnetDir / depositContractFileName
if system.fileExists(depositContractFile):
depositContractOpt = "--deposit-contract=" & readFile(depositContractFile).strip
if system.dirExists(dataDir):
block resetDataDir:
# We reset the testnet data dir if the existing data dir is
# incomplete (it misses a genesis file) or if it has a genesis
# file from an older testnet:
if system.fileExists(dataDir/genesisFile):
let localGenesisContent = readFile(dataDir/genesisFile)
let testnetGenesisContent = readFile(testnetDir/genesisFile)
if localGenesisContent == testnetGenesisContent:
break
echo "Detected testnet restart. Deleting previous database..."
rmDir dataDir
proc execIgnoringExitCode(s: string) =
# reduces the error output when interrupting an external command with Ctrl+C
try:
exec s
except OsError:
discard
let depositContractBlockFile = testnetDir / depositContractBlockFileName
if system.fileExists(depositContractBlockFile):
depositContractOpt.add " --deposit-contract-block=" & readFile(depositContractBlockFile).strip
cd rootDir
mkDir dataDir
# macOS may not have gnu-getopts installed and in the PATH
execIgnoringExitCode &"""./scripts/make_prometheus_config.sh --nodes """ & $(1 + nodeID) & &""" --base-metrics-port {baseMetricsPort} --config-file "{dataDir}/prometheus.yml""""
if doBuild:
makePrometheusConfig(nodeID, baseMetricsPort, dataDir)
buildNode(nimFlags, preset, beaconNodeBinary)
exec &"""nim c {nimFlags} -d:"const_preset={preset}" -o:"{beaconNodeBinary}" beacon_chain/beacon_node.nim"""
if not skipGoerliKey and depositContractOpt.len > 0 and not system.dirExists(validatorsDir):
mode = Silent
echo "\nPlease enter your Goerli Eth1 private key in hex form (e.g. 0x1a2...f3c) in order to become a validator (you'll need access to 32 GoETH)."
echo "Hit Enter to skip this."
# is there no other way to print without a trailing newline?
exec "printf '> '"
let privKey = readLineFromStdin()
if privKey.len > 0:
mkDir validatorsDir
mode = Verbose
exec replace(&"""{beaconNodeBinary} makeDeposits
--count=1
--out-validators-dir="{validatorsDir}"
--out-secrets-dir="{secretsDir}"
--deposit-private-key={privKey}
--web3-url={web3Url}
{depositContractOpt}
""", "\n", " ")
mode = Silent
echo "\nDeposit sent, wait for confirmation then press enter to continue"
discard readLineFromStdin()
let logLevel = getEnv("LOG_LEVEL")
var logLevelOpt = ""
if logLevel.len > 0:
logLevelOpt = &"""--log-level="{logLevel}" """
mode = Verbose
cd dataDir
execIgnoringExitCode replace(&"""{beaconNodeBinary}
--data-dir="{dataDir}"
--dump
--web3-url={web3Url}
--tcp-port=""" & $(basePort + nodeID) & &"""
--udp-port=""" & $(basePort + nodeID) & &"""
--metrics
--metrics-port=""" & $(baseMetricsPort + nodeID) & &"""
--rpc
--rpc-port=""" & $(baseRpcPort + nodeID) & &"""
{bootstrapFileOpt}
{logLevelOpt}
{depositContractOpt}
--state-snapshot="{testnetDir/genesisFile}" """, "\n", " ")
if doBecomeValidator and depositContractOpt.len > 0 and not system.dirExists(validatorsDir):
becomeValidator(validatorsDir, beaconNodeBinary, secretsDir, depositContractOpt, privateGoerliKey, becomeValidatorOnly)
if doRun:
runNode(dataDir, beaconNodeBinary, bootstrapFileOpt, depositContractOpt,
genesisFileOpt, natConfig, basePort, nodeID, baseMetricsPort,
baseRpcPort, printCmdOnly)

View File

@ -23,14 +23,15 @@ if [ ${PIPESTATUS[0]} != 4 ]; then
exit 1
fi
OPTS="ht:n:d:"
LONGOPTS="help,testnet:,nodes:,data-dir:,disable-htop,log-level:,base-port:,base-metrics-port:"
OPTS="hgt:n:d:"
LONGOPTS="help,testnet:,nodes:,data-dir:,disable-htop,log-level:,base-port:,base-metrics-port:,with-ganache"
# default values
TESTNET="1"
NUM_NODES="10"
DATA_DIR="local_testnet_data"
USE_HTOP="1"
USE_GANACHE="0"
LOG_LEVEL="DEBUG"
BASE_PORT="9000"
BASE_METRICS_PORT="8008"
@ -44,8 +45,9 @@ CI run: $(basename $0) --disable-htop -- --verify-finalization --stop-at-epoch=5
-h, --help this help message
-t, --testnet testnet number (default: ${TESTNET})
-n, --nodes number of nodes to launch (default: ${NUM_NODES})
-g, --with-ganache simulate a genesis event based on a deposit contract
-d, --data-dir directory where all the node data and logs will end up
(default: "${DATA_DIR}")
(default: "${DATA_DIR}")
--base-port bootstrap node's Eth2 traffic port (default: ${BASE_PORT})
--base-metrics-port bootstrap node's metrics server port (default: ${BASE_METRICS_PORT})
--disable-htop don't use "htop" to see the beacon_node processes
@ -83,6 +85,10 @@ while true; do
USE_HTOP="0"
shift
;;
-g|--with-ganache)
USE_GANACHE="1"
shift
;;
--log-level)
LOG_LEVEL="$2"
shift 2
@ -138,29 +144,66 @@ fi
NETWORK_NIM_FLAGS=$(scripts/load-testnet-nim-flags.sh ${NETWORK})
$MAKE LOG_LEVEL="${LOG_LEVEL}" NIMFLAGS="-d:insecure -d:testnet_servers_image ${NETWORK_NIM_FLAGS}" beacon_node
./build/beacon_node makeDeposits \
PIDS=""
WEB3_ARG=""
DEPOSIT_CONTRACT_ARG=""
STATE_SNAPSHOT_ARG=""
BOOTSTRAP_TIMEOUT=10 # in seconds
./build/beacon_node deposits create \
--count=${TOTAL_VALIDATORS} \
--out-validators-dir="${DEPOSITS_DIR}" \
--out-secrets-dir="${SECRETS_DIR}"
--out-deposits-dir="${DEPOSITS_DIR}" \
--out-secrets-dir="${SECRETS_DIR}" \
--dont-send
GENESIS_OFFSET=30
if [[ $USE_GANACHE == "0" ]]; then
GENESIS_OFFSET=30
BOOTSTRAP_IP="127.0.0.1"
BOOTSTRAP_IP="127.0.0.1"
./build/beacon_node createTestnet \
--data-dir="${DATA_DIR}/node0" \
--validators-dir="${DEPOSITS_DIR}" \
--total-validators=${TOTAL_VALIDATORS} \
--last-user-validator=${USER_VALIDATORS} \
--output-genesis="${NETWORK_DIR}/genesis.ssz" \
--output-bootstrap-file="${NETWORK_DIR}/bootstrap_nodes.txt" \
--bootstrap-address=${BOOTSTRAP_IP} \
--bootstrap-port=${BASE_PORT} \
--genesis-offset=${GENESIS_OFFSET} # Delay in seconds
./build/beacon_node createTestnet \
--data-dir="${DATA_DIR}/node0" \
--validators-dir="${DEPOSITS_DIR}" \
--total-validators=${TOTAL_VALIDATORS} \
--last-user-validator=${USER_VALIDATORS} \
--output-genesis="${NETWORK_DIR}/genesis.ssz" \
--output-bootstrap-file="${NETWORK_DIR}/bootstrap_nodes.txt" \
--bootstrap-address=${BOOTSTRAP_IP} \
--bootstrap-port=${BASE_PORT} \
--genesis-offset=${GENESIS_OFFSET} # Delay in seconds
STATE_SNAPSHOT_ARG="--state-snapshot=${NETWORK_DIR}/genesis.ssz"
else
make deposit_contract
echo "Launching ganache"
ganache-cli --blockTime 17 --gasLimit 100000000 -e 100000 --verbose > "${DATA_DIR}/log_ganache.txt" 2>&1 &
PIDS="${PIDS},$!"
echo "Deploying deposit contract"
WEB3_ARG="--web3-url=ws://localhost:8545"
DEPOSIT_CONTRACT_ADDRESS=$(./build/deposit_contract deploy $WEB3_ARG)
DEPOSIT_CONTRACT_ARG="--deposit-contract=$DEPOSIT_CONTRACT_ADDRESS"
MIN_DELAY=1
MAX_DELAY=5
BOOTSTRAP_TIMEOUT=$(( MAX_DELAY * TOTAL_VALIDATORS ))
./build/beacon_node deposits send \
--non-interactive \
--deposits-dir="${DEPOSITS_DIR}" \
--min-delay=$MIN_DELAY --max-delay=$MAX_DELAY \
$WEB3_ARG \
$DEPOSIT_CONTRACT_ARG > "${DATA_DIR}/log_deposit_maker.txt" 2>&1 &
PIDS="${PIDS},$!"
fi
./scripts/make_prometheus_config.sh \
--nodes ${NUM_NODES} \
--base-metrics-port ${BASE_METRICS_PORT} \
--config-file "${DATA_DIR}/prometheus.yml"
--nodes ${NUM_NODES} \
--base-metrics-port ${BASE_METRICS_PORT} \
--config-file "${DATA_DIR}/prometheus.yml" || true # TODO: this currently fails on macOS,
# but it can be considered non-critical
# Kill child processes on Ctrl-C/SIGTERM/exit, passing the PID of this shell
# instance as the parent and the target process name as a pattern to the
@ -181,20 +224,20 @@ dump_logs() {
done
}
PIDS=""
NODES_WITH_VALIDATORS=${NODES_WITH_VALIDATORS:-4}
BOOTSTRAP_NODE=$(( NUM_NODES - 1 ))
SYSTEM_VALIDATORS=$(( TOTAL_VALIDATORS - USER_VALIDATORS ))
VALIDATORS_PER_NODE=$(( SYSTEM_VALIDATORS / NODES_WITH_VALIDATORS ))
BOOTSTRAP_TIMEOUT=10 # in seconds
for NUM_NODE in $(seq 0 $((NUM_NODES - 1))); do
if [[ ${NUM_NODE} == 0 ]]; then
for NUM_NODE in $(seq $BOOTSTRAP_NODE -1 0); do
if [[ ${NUM_NODE} == ${BOOTSTRAP_NODE} ]]; then
BOOTSTRAP_ARG=""
else
BOOTSTRAP_ARG="--bootstrap-file=${NETWORK_DIR}/bootstrap_nodes.txt"
BOOTSTRAP_ENR="${DATA_DIR}/node${BOOTSTRAP_NODE}/beacon_node.enr"
BOOTSTRAP_ARG="--bootstrap-file=${BOOTSTRAP_ENR}"
# Wait for the master node to write out its address file
START_TIMESTAMP=$(date +%s)
while [ ! -f "${DATA_DIR}/node0/beacon_node.address" ]; do
while [ ! -f "${BOOTSTRAP_ENR}" ]; do
sleep 0.1
NOW_TIMESTAMP=$(date +%s)
if [[ "$(( NOW_TIMESTAMP - START_TIMESTAMP - GENESIS_OFFSET ))" -ge "$BOOTSTRAP_TIMEOUT" ]]; then
@ -220,13 +263,15 @@ for NUM_NODE in $(seq 0 $((NUM_NODES - 1))); do
./build/beacon_node \
--non-interactive \
--nat:extip:127.0.0.1 \
--nat:extip:127.0.0.1 \
--log-level="${LOG_LEVEL}" \
--tcp-port=$(( BASE_PORT + NUM_NODE )) \
--udp-port=$(( BASE_PORT + NUM_NODE )) \
--data-dir="${NODE_DATA_DIR}" \
${BOOTSTRAP_ARG} \
--state-snapshot="${NETWORK_DIR}/genesis.ssz" \
${STATE_SNAPSHOT_ARG} \
${WEB3_ARG} \
${DEPOSIT_CONTRACT_ARG} \
--metrics \
--metrics-address="127.0.0.1" \
--metrics-port="$(( BASE_METRICS_PORT + NUM_NODE ))" \

View File

@ -65,7 +65,7 @@ if [ "$ETH1_PRIVATE_KEY" != "" ]; then
echo "Done: $DEPOSIT_CONTRACT_ADDRESS"
fi
echo "Building a local beacon_node instance for 'makeDeposits' and 'createTestnet'"
echo "Building a local beacon_node instance for 'deposits create' and 'createTestnet'"
make -j2 NIMFLAGS="-d:insecure -d:testnet_servers_image ${NETWORK_NIM_FLAGS}" beacon_node process_dashboard
echo "Generating Grafana dashboards for remote testnet servers"
@ -83,10 +83,11 @@ echo "Building Docker image..."
# in docker/Makefile, and are enabled by default.
make build
../build/beacon_node makeDeposits \
../build/beacon_node deposits create \
--count=$TOTAL_VALIDATORS \
--out-validators-dir="$DEPOSITS_DIR_ABS" \
--out-secrets-dir="$SECRETS_DIR_ABS"
--out-deposits-dir="$DEPOSITS_DIR_ABS" \
--out-secrets-dir="$SECRETS_DIR_ABS" \
--dont-send
../build/beacon_node createTestnet \
--data-dir="$DATA_DIR_ABS" \

View File

@ -1,5 +1,6 @@
import os except dirExists
import strformat, confutils
import testutils/fuzzing_engines
const
gitRoot = thisDir() / ".."
@ -10,13 +11,8 @@ const
fuzzNims = gitRoot / "vendor" / "nim-testutils" / "testutils" / "fuzzing" / "fuzz.nims"
type
FuzzerKind = enum
libFuzzer
afl
cli do (testname {.argument.}: string,
fuzzer = libFuzzer):
fuzzer = defaultFuzzingEngine):
if not dirExists(fixturesDir):
echo "Please run `make test` first in order to download the official ETH2 test vectors"
@ -43,5 +39,5 @@ cli do (testname {.argument.}: string,
let testProgram = fuzzingTestsDir / &"ssz_decode_{testname}.nim"
exec &"""nim "{fuzzNims}" "{fuzzer}" "{testProgram}" "{corpusDir}" """
exec &"""ntu fuzz --fuzzer={fuzzer} --corpus="{corpusDir}" "{testProgram}" """

View File

@ -1,2 +1,3 @@
-d:ssz_testing
-d:"const_preset=mainnet"

View File

@ -13,9 +13,9 @@ import
sets,
# Specs
../../beacon_chain/spec/[datatypes, beaconstate, helpers, validator, crypto,
signatures],
signatures, state_transition],
# Internals
../../beacon_chain/[ssz, extras, state_transition],
../../beacon_chain/[ssz, extras],
# Mocking procs
./mock_blocks,
./mock_validator_keys

View File

@ -10,9 +10,7 @@
import
# Specs
../../beacon_chain/spec/[datatypes],
# Internals
../../beacon_chain/state_transition
../../beacon_chain/spec/[datatypes, state_transition]
proc nextEpoch*(state: var HashedBeaconState) =
## Transition to the start of the next epoch

View File

@ -80,12 +80,14 @@ macro parseNumConsts(file: static string): untyped =
result = quote do: `constsToCheck`
const datatypesConsts = @(parseNumConsts(SpecDir/"datatypes.nim"))
const
datatypesConsts = @(parseNumConsts(SpecDir/"datatypes.nim"))
specVersionPresetDir = SpecDir/"presets"/("v"&replace(SPEC_VERSION, ".", "_"))
when const_preset == "minimal":
const minimalConsts = @(parseNumConsts(SpecDir/"presets"/"minimal.nim"))
const minimalConsts = @(parseNumConsts(specVersionPresetDir/"minimal.nim"))
else:
const mainnetConsts = @(parseNumConsts(SpecDir/"presets"/"mainnet.nim"))
const mainnetConsts = @(parseNumConsts(specVersionPresetDir/"mainnet.nim"))
const IgnoreKeys = [
# Ignore all non-numeric types

View File

@ -11,8 +11,8 @@ import
# Standard library
os, sequtils, unittest,
# Beacon chain internals
../../beacon_chain/spec/[crypto, datatypes],
../../beacon_chain/[ssz, state_transition],
../../beacon_chain/spec/[crypto, datatypes, state_transition],
../../beacon_chain/ssz,
# Test utilities
../testutil,
./fixtures_utils

View File

@ -11,8 +11,7 @@ import
# Standard library
os, strutils, unittest,
# Beacon chain internals
../../beacon_chain/spec/datatypes,
../../beacon_chain/state_transition,
../../beacon_chain/spec/[datatypes, state_transition],
# Test utilities
../testutil,
./fixtures_utils,

View File

@ -117,11 +117,7 @@ proc runSSZtests() =
checkSSZ(SignedBeaconBlockHeader, path, hash)
of "SignedVoluntaryExit": checkSSZ(SignedVoluntaryExit, path, hash)
of "SigningData":
when ETH2_SPEC == "v0.12.1":
checkSSZ(SigningData, path, hash)
of "SigningRoot":
when ETH2_SPEC == "v0.11.3":
checkSSZ(SigningRoot, path, hash)
checkSSZ(SigningData, path, hash)
of "Validator": checkSSZ(Validator, path, hash)
of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash)
else:

View File

@ -0,0 +1,189 @@
import
strformat, jsconsole, jsffi,
karax/[karax, kdom, karaxdsl, vdom],
chronicles_tail/jsplugins
# Make sure that the Karax instance in the plugin is the same one
# as the Karax instance in the enclosing chronicle-tail page.
kxi = getKarax()
type EventsTable = ref object of VComponent
proc renderNetworkEvents(page: VComponent): VNode =
result = buildHtml:
table:
tr:
th: text "Time"
th: text "Nodes"
const
columnWidth = 320
timestampsHeight = 50
eventsMargin = 10
var
eventsTable = newComponent(EventsTable, renderNetworkEvents)
protocolMessages = newJsAssoc[cstring, JsAssoc[cstring, cstring]]()
pendingEvents = newSeq[TailEvent]()
freedColumns = newSeq[int]()
columnBottoms = newSeq[int]()
peerToColumnTable = newJsAssoc[cstring, int]()
lastTimestampBottom = timestampsHeight
proc startsWith*(a, b: cstring): bool {.importcpp: "startsWith", nodecl.}
proc getMsgName(protocol: cstring, msgId: int): cstring =
protocolMessages[protocol][cast[cstring](msgId)]
proc renderEvent(ev: TailEvent): cstring =
var res = newStringOfCap(1024)
let eventType = ev.msg
res.add &"""<div class="event {eventType}">"""
template addField(class, value) =
res.add "<div class=\"" & class & "\">"
res.addEscaped $value
res.add "</div>"
if eventType.startsWith(cstring("peer_")):
addField "peer", ev.peer
addField "port", ev.port
else:
addField "msgName", getMsgName(ev.protocol, ev.msgId)
res.addAsHtml ev.data
res.add """</div>"""
return cstring(res)
proc selectColumn(ev: TailEvent): int =
let key = cast[cstring](ev.port)# & ev.peer
kout ev.msg, key
if ev.msg in [cstring"peer_accepted", "peer_connected"]:
if freedColumns.len > 0:
result = freedColumns.pop()
else:
result = columnBottoms.len
columnBottoms.add(timestampsHeight)
peerToColumnTable[key] = result
elif ev.msg == cstring("peer_disconnected"):
result = peerToColumnTable[key]
discard jsDelete peerToColumnTable[key]
freedColumns.add result
else:
result = peerToColumnTable[key]
template pixels(n: int): cstring =
cast[cstring](n) & "px"
proc addEvent(ev: TailEvent) =
var
row = document.createElement("tr")
timeElem = document.createElement("td")
eventElem = document.createElement("td")
eventsTable = eventsTable.dom
eventsCount = eventsTable.children.len
lastEventRow = eventsTable.children[eventsCount - 1]
row.class = if eventsCount mod 2 == 0: "even" else: "odd"
# Hide the element initially, so we can safely measure its size.
# It has to be added to the DOM before it can be measured.
row.style.visibility = "hidden"
row.appendChild(timeElem)
row.appendChild(eventElem)
timeElem.innerHtml = ev.ts
timeElem.class = "time"
eventElem.innerHTML = renderEvent(ev)
eventsTable.appendChild(row)
let rowHeight = row.offsetHeight
let eventColumn = selectColumn(ev)
let timestampOffset = max(lastTimestampBottom, columnBottoms[eventColumn])
let prevTimestampOffset = lastTimestampBottom - timestampsHeight
lastTimestampBottom = timestampOffset + timestampsHeight
columnBottoms[eventColumn] += rowHeight + eventsMargin
# Make sure the event data is in the right column and that it
# can overflow past the row height:
eventElem.style.paddingLeft = pixels(eventColumn * columnWidth)
# Position the row in its right place and show it:
lastEventRow.style.height = pixels(timestampOffset - prevTimestampOffset)
row.style.top = pixels(timestampOffset)
row.style.visibility = ""
proc networkSectionContent: VNode =
result = buildHtml(tdiv(id = "network")):
text "Network section"
eventsTable
proc tailEventFilter(ev: TailEvent): bool =
if ev.topics != "p2pdump":
return false
if ev.msg == "p2p_protocols":
protocolMessages = cast[type(protocolMessages)](ev.data)
else:
if eventsTable.dom == nil:
pendingEvents.add ev
else:
addEvent ev
return true
proc addPending =
if eventsTable.dom != nil and pendingEvents.len > 0:
defer: pendingEvents.setLen(0)
for ev in pendingEvents:
addEvent ev
let interval = window.setInterval(addPending, 1000)
proc addStyles(styles: cstring) =
var s = document.createElement("style")
s.appendChild document.createTextNode(styles)
document.head.appendChild(s)
once:
addStyles cstring"""
#network > table {
position: relative;
}
#network .event {
border: 1px solid blue;
}
#network .event table {
width: 100%;
}
#network > table > tr {
position: absolute;
display: flex;
flex-direction: row;
border-left: 1px solid red;
}
#network .time {
width: 160px;
}
#network .event {
width: 320px;
}
"""
addSection("Network", networkSectionContent)
addEventFilter(tailEventFilter)
kxi.redraw()

View File

@ -16,13 +16,19 @@ else
ADDITIONAL_BEACON_NODE_ARGS=""
fi
BOOTSTRAP_ARG=""
if [[ ! -z "$1" ]]; then
BOOTSTRAP_NODE_ID=$1
BOOTSTRAP_ADDRESS_FILE="${SIMULATION_DIR}/node-${BOOTSTRAP_NODE_ID}/beacon_node.address"
shift
else
BOOTSTRAP_NODE_ID=$MASTER_NODE
BOOTSTRAP_ADDRESS_FILE=$NETWORK_BOOTSTRAP_FILE
BOOTSTRAP_NODE_ID=$BOOTSTRAP_NODE
fi
BOOTSTRAP_ADDRESS_FILE="${SIMULATION_DIR}/node-${BOOTSTRAP_NODE_ID}/beacon_node.enr"
if [[ "$NODE_ID" != "$BOOTSTRAP_NODE" ]]; then
BOOTSTRAP_ARG="--bootstrap-file=$BOOTSTRAP_ADDRESS_FILE"
fi
# set up the environment
@ -48,12 +54,12 @@ mkdir -p "$NODE_VALIDATORS_DIR"
rm -rf "$NODE_SECRETS_DIR"
mkdir -p "$NODE_SECRETS_DIR"
VALIDATORS_PER_NODE=$((NUM_VALIDATORS / TOTAL_NODES))
VALIDATORS_PER_NODE=$(( NUM_VALIDATORS / (TOTAL_NODES - 1) ))
if [[ $NODE_ID -lt $TOTAL_NODES ]]; then
if [[ $NODE_ID -lt $BOOTSTRAP_NODE ]]; then
# if using validator client binaries in addition to beacon nodes
# we will split the keys for this instance in half between the BN and the VC
if [ "${SPLIT_VALIDATORS_BETWEEN_BN_AND_VC:-}" == "yes" ]; then
if [ "${BN_VC_VALIDATOR_SPLIT:-}" == "yes" ]; then
ATTACHED_VALIDATORS=$((VALIDATORS_PER_NODE / 2))
else
ATTACHED_VALIDATORS=$VALIDATORS_PER_NODE
@ -75,12 +81,18 @@ if [ -f "${SNAPSHOT_FILE}" ]; then
SNAPSHOT_ARG="--state-snapshot=${SNAPSHOT_FILE}"
fi
DEPOSIT_CONTRACT_ARGS=""
if [ -f "${DEPOSIT_CONTRACT_FILE}" ]; then
DEPOSIT_CONTRACT_ARGS="$WEB3_ARG \
--deposit-contract=$(cat $DEPOSIT_CONTRACT_FILE) \
--deposit-contract-block=$(cat $DEPOSIT_CONTRACT_BLOCK_FILE)"
fi
cd "$NODE_DATA_DIR"
# if you want tracing messages, add "--log-level=TRACE" below
$BEACON_NODE_BIN \
--log-level=${LOG_LEVEL:-DEBUG} \
--bootstrap-file=$BOOTSTRAP_ADDRESS_FILE \
$BOOTSTRAP_ARG \
--data-dir=$NODE_DATA_DIR \
--secrets-dir=$NODE_SECRETS_DIR \
--node-name=$NODE_ID \
@ -88,8 +100,7 @@ $BEACON_NODE_BIN \
--udp-port=$PORT \
$SNAPSHOT_ARG \
$NAT_ARG \
$WEB3_ARG \
--deposit-contract=$DEPOSIT_CONTRACT_ADDRESS \
$DEPOSIT_CONTRACT_ARGS \
--rpc \
--rpc-address="127.0.0.1" \
--rpc-port="$(( $BASE_RPC_PORT + $NODE_ID ))" \

View File

@ -15,26 +15,34 @@ source "${SIM_ROOT}/../../env.sh"
cd "$GIT_ROOT"
VC_DATA_DIR="${SIMULATION_DIR}/validator-$NODE_ID"
NODE_DATA_DIR="${SIMULATION_DIR}/validator-$NODE_ID"
NODE_VALIDATORS_DIR=$NODE_DATA_DIR/validators/
NODE_SECRETS_DIR=$NODE_DATA_DIR/secrets/
mkdir -p "$VC_DATA_DIR/validators"
rm -f $VC_DATA_DIR/validators/*
rm -rf "$NODE_VALIDATORS_DIR"
mkdir -p "$NODE_VALIDATORS_DIR"
rm -rf "$NODE_SECRETS_DIR"
mkdir -p "$NODE_SECRETS_DIR"
VALIDATORS_PER_NODE=$((NUM_VALIDATORS / TOTAL_NODES))
if [[ $NODE_ID -lt $TOTAL_NODES ]]; then
# we will split the keys for this instance in half between the BN and the VC
VALIDATORS_PER_NODE=$((NUM_VALIDATORS / TOTAL_NODES))
VALIDATORS_PER_NODE_HALF=$((VALIDATORS_PER_NODE / 2))
FIRST_VALIDATOR_IDX=$(( VALIDATORS_PER_NODE * NODE_ID + VALIDATORS_PER_NODE_HALF))
LAST_VALIDATOR_IDX=$(( FIRST_VALIDATOR_IDX + VALIDATORS_PER_NODE_HALF - 1 ))
ATTACHED_VALIDATORS=$((VALIDATORS_PER_NODE / 2))
pushd "$VALIDATORS_DIR" >/dev/null
cp $(seq -s " " -f v%07g.privkey $FIRST_VALIDATOR_IDX $LAST_VALIDATOR_IDX) "$VC_DATA_DIR/validators"
for VALIDATOR in $(ls | tail -n +$(( ($VALIDATORS_PER_NODE * $NODE_ID) + 1 + $ATTACHED_VALIDATORS )) | head -n $ATTACHED_VALIDATORS); do
cp -ar "$VALIDATOR" "$NODE_VALIDATORS_DIR"
cp -a "$SECRETS_DIR/$VALIDATOR" "$NODE_SECRETS_DIR"
done
popd >/dev/null
fi
cd "$VC_DATA_DIR"
cd "$NODE_DATA_DIR"
$VALIDATOR_CLIENT_BIN \
--log-level=${LOG_LEVEL:-DEBUG} \
--data-dir=$VC_DATA_DIR \
--data-dir=$NODE_DATA_DIR \
--secrets-dir=$NODE_SECRETS_DIR \
--rpc-port="$(( $BASE_RPC_PORT + $NODE_ID ))"

Some files were not shown because too many files have changed in this diff Show More