Merge branch 'devel' into readme

This commit is contained in:
李婷婷 2020-06-19 22:11:12 +08:00
commit 04e1a61f58
104 changed files with 2946 additions and 2385 deletions

2
.gitignore vendored
View File

@ -33,6 +33,8 @@ build/
/local_testnet_data*/
# Prometheus db
/data
# Grafana dashboards
/docker/*.json

View File

@ -1,268 +0,0 @@
AllTests-minimal
===
## Attestation pool processing [Preset: minimal]
```diff
+ Attestations may arrive in any order [Preset: minimal] OK
+ Attestations may overlap, bigger first [Preset: minimal] OK
+ Attestations may overlap, smaller first [Preset: minimal] OK
+ Attestations should be combined [Preset: minimal] OK
+ Can add and retrieve simple attestation [Preset: minimal] OK
+ Fork choice returns block with attestation OK
+ Fork choice returns latest block with no attestations OK
```
OK: 7/7 Fail: 0/7 Skip: 0/7
## Beacon chain DB [Preset: minimal]
```diff
+ empty database [Preset: minimal] OK
+ find ancestors [Preset: minimal] OK
+ sanity check blocks [Preset: minimal] OK
+ sanity check genesis roundtrip [Preset: minimal] OK
+ sanity check states [Preset: minimal] OK
```
OK: 5/5 Fail: 0/5 Skip: 0/5
## Beacon node
```diff
+ Compile OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Beacon state [Preset: minimal]
```diff
+ Smoke test initialize_beacon_state_from_eth1 [Preset: minimal] OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Block pool processing [Preset: minimal]
```diff
+ Can add same block twice [Preset: minimal] OK
+ Reverse order block add & get [Preset: minimal] OK
+ Simple block add&get [Preset: minimal] OK
+ getRef returns nil for missing blocks OK
+ loadTailState gets genesis block on first load [Preset: minimal] OK
+ updateHead updates head and headState [Preset: minimal] OK
+ updateStateData sanity [Preset: minimal] OK
```
OK: 7/7 Fail: 0/7 Skip: 0/7
## Block processing [Preset: minimal]
```diff
+ Attestation gets processed at epoch [Preset: minimal] OK
+ Passes from genesis state, empty block [Preset: minimal] OK
+ Passes from genesis state, no block [Preset: minimal] OK
+ Passes through epoch update, empty block [Preset: minimal] OK
+ Passes through epoch update, no block [Preset: minimal] OK
```
OK: 5/5 Fail: 0/5 Skip: 0/5
## BlockPool finalization tests [Preset: minimal]
```diff
+ init with gaps [Preset: minimal] OK
+ prune heads on finalization [Preset: minimal] OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## BlockRef and helpers [Preset: minimal]
```diff
+ getAncestorAt sanity [Preset: minimal] OK
+ isAncestorOf sanity [Preset: minimal] OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## BlockSlot and helpers [Preset: minimal]
```diff
+ atSlot sanity [Preset: minimal] OK
+ parent sanity [Preset: minimal] OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## Fork Choice + Finality [Preset: minimal]
```diff
+ fork_choice - testing finality #01 OK
+ fork_choice - testing finality #02 OK
+ fork_choice - testing no votes OK
+ fork_choice - testing with votes OK
```
OK: 4/4 Fail: 0/4 Skip: 0/4
## Honest validator
```diff
+ General pubsub topics: OK
+ Mainnet attestation topics OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## Interop
```diff
+ Interop genesis OK
+ Interop signatures OK
+ Mocked start private key OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
## Keystore
```diff
+ Pbkdf2 decryption OK
+ Pbkdf2 encryption OK
+ Pbkdf2 errors OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
## Mocking utilities
```diff
+ merkle_minimal OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Official - constants & config [Preset: minimal]
```diff
+ BASE_REWARD_FACTOR 64 [Preset: minimal] OK
+ BLS_WITHDRAWAL_PREFIX "0x00" [Preset: minimal] OK
+ CHURN_LIMIT_QUOTIENT 65536 [Preset: minimal] OK
+ CUSTODY_PERIOD_TO_RANDAO_PADDING 2048 [Preset: minimal] OK
DEPOSIT_CONTRACT_ADDRESS "0x1234567890123456789012345678901234567 Skip
+ DOMAIN_AGGREGATE_AND_PROOF "0x06000000" [Preset: minimal] OK
+ DOMAIN_BEACON_ATTESTER "0x01000000" [Preset: minimal] OK
+ DOMAIN_BEACON_PROPOSER "0x00000000" [Preset: minimal] OK
+ DOMAIN_CUSTODY_BIT_SLASHING "0x83000000" [Preset: minimal] OK
+ DOMAIN_DEPOSIT "0x03000000" [Preset: minimal] OK
+ DOMAIN_LIGHT_CLIENT "0x82000000" [Preset: minimal] OK
+ DOMAIN_RANDAO "0x02000000" [Preset: minimal] OK
+ DOMAIN_SELECTION_PROOF "0x05000000" [Preset: minimal] OK
+ DOMAIN_SHARD_COMMITTEE "0x81000000" [Preset: minimal] OK
+ DOMAIN_SHARD_PROPOSAL "0x80000000" [Preset: minimal] OK
+ DOMAIN_VOLUNTARY_EXIT "0x04000000" [Preset: minimal] OK
+ EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS 4096 [Preset: minimal] OK
+ EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE 2 [Preset: minimal] OK
+ EFFECTIVE_BALANCE_INCREMENT 1000000000 [Preset: minimal] OK
+ EJECTION_BALANCE 16000000000 [Preset: minimal] OK
+ EPOCHS_PER_CUSTODY_PERIOD 2048 [Preset: minimal] OK
+ EPOCHS_PER_ETH1_VOTING_PERIOD 4 [Preset: minimal] OK
+ EPOCHS_PER_HISTORICAL_VECTOR 64 [Preset: minimal] OK
+ EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION 256 [Preset: minimal] OK
+ EPOCHS_PER_SLASHINGS_VECTOR 64 [Preset: minimal] OK
+ ETH1_FOLLOW_DISTANCE 16 [Preset: minimal] OK
+ GASPRICE_ADJUSTMENT_COEFFICIENT 8 [Preset: minimal] OK
+ GENESIS_DELAY 300 [Preset: minimal] OK
GENESIS_FORK_VERSION "0x00000001" [Preset: minimal] Skip
+ HISTORICAL_ROOTS_LIMIT 16777216 [Preset: minimal] OK
+ HYSTERESIS_DOWNWARD_MULTIPLIER 1 [Preset: minimal] OK
+ HYSTERESIS_QUOTIENT 4 [Preset: minimal] OK
+ HYSTERESIS_UPWARD_MULTIPLIER 5 [Preset: minimal] OK
+ INACTIVITY_PENALTY_QUOTIENT 16777216 [Preset: minimal] OK
+ INITIAL_ACTIVE_SHARDS 4 [Preset: minimal] OK
+ LIGHT_CLIENT_COMMITTEE_PERIOD 256 [Preset: minimal] OK
+ LIGHT_CLIENT_COMMITTEE_SIZE 128 [Preset: minimal] OK
+ MAX_ATTESTATIONS 128 [Preset: minimal] OK
+ MAX_ATTESTER_SLASHINGS 2 [Preset: minimal] OK
+ MAX_COMMITTEES_PER_SLOT 4 [Preset: minimal] OK
+ MAX_CUSTODY_KEY_REVEALS 256 [Preset: minimal] OK
+ MAX_CUSTODY_SLASHINGS 1 [Preset: minimal] OK
+ MAX_DEPOSITS 16 [Preset: minimal] OK
+ MAX_EARLY_DERIVED_SECRET_REVEALS 1 [Preset: minimal] OK
+ MAX_EFFECTIVE_BALANCE 32000000000 [Preset: minimal] OK
+ MAX_EPOCHS_PER_CROSSLINK 4 [Preset: minimal] OK
+ MAX_GASPRICE 16384 [Preset: minimal] OK
+ MAX_PROPOSER_SLASHINGS 16 [Preset: minimal] OK
+ MAX_REVEAL_LATENESS_DECREMENT 128 [Preset: minimal] OK
+ MAX_SEED_LOOKAHEAD 4 [Preset: minimal] OK
+ MAX_SHARDS 8 [Preset: minimal] OK
+ MAX_SHARD_BLOCKS_PER_ATTESTATION 12 [Preset: minimal] OK
+ MAX_SHARD_BLOCK_CHUNKS 4 [Preset: minimal] OK
+ MAX_VALIDATORS_PER_COMMITTEE 2048 [Preset: minimal] OK
+ MAX_VOLUNTARY_EXITS 16 [Preset: minimal] OK
+ MINOR_REWARD_QUOTIENT 256 [Preset: minimal] OK
+ MIN_ATTESTATION_INCLUSION_DELAY 1 [Preset: minimal] OK
+ MIN_DEPOSIT_AMOUNT 1000000000 [Preset: minimal] OK
+ MIN_EPOCHS_TO_INACTIVITY_PENALTY 4 [Preset: minimal] OK
+ MIN_GASPRICE 8 [Preset: minimal] OK
+ MIN_GENESIS_ACTIVE_VALIDATOR_COUNT 64 [Preset: minimal] OK
+ MIN_GENESIS_TIME 1578009600 [Preset: minimal] OK
+ MIN_PER_EPOCH_CHURN_LIMIT 4 [Preset: minimal] OK
+ MIN_SEED_LOOKAHEAD 1 [Preset: minimal] OK
+ MIN_SLASHING_PENALTY_QUOTIENT 32 [Preset: minimal] OK
+ MIN_VALIDATOR_WITHDRAWABILITY_DELAY 256 [Preset: minimal] OK
+ ONLINE_PERIOD 8 [Preset: minimal] OK
+ PHASE_1_FORK_VERSION "0x01000001" [Preset: minimal] OK
+ PHASE_1_GENESIS_SLOT 8 [Preset: minimal] OK
+ PROPOSER_REWARD_QUOTIENT 8 [Preset: minimal] OK
+ RANDAO_PENALTY_EPOCHS 2 [Preset: minimal] OK
+ RANDOM_SUBNETS_PER_VALIDATOR 1 [Preset: minimal] OK
+ SAFE_SLOTS_TO_UPDATE_JUSTIFIED 2 [Preset: minimal] OK
+ SECONDS_PER_ETH1_BLOCK 14 [Preset: minimal] OK
+ SECONDS_PER_SLOT 6 [Preset: minimal] OK
+ SHARD_BLOCK_CHUNK_SIZE 262144 [Preset: minimal] OK
SHARD_BLOCK_OFFSETS [1,2,3,5,8,13,21,34,55,89,144,233] [Pres Skip
+ SHARD_COMMITTEE_PERIOD 64 [Preset: minimal] OK
+ SHUFFLE_ROUND_COUNT 10 [Preset: minimal] OK
+ SLOTS_PER_EPOCH 8 [Preset: minimal] OK
+ SLOTS_PER_HISTORICAL_ROOT 64 [Preset: minimal] OK
+ TARGET_AGGREGATORS_PER_COMMITTEE 16 [Preset: minimal] OK
+ TARGET_COMMITTEE_SIZE 4 [Preset: minimal] OK
+ TARGET_SHARD_BLOCK_SIZE 196608 [Preset: minimal] OK
+ VALIDATOR_REGISTRY_LIMIT 1099511627776 [Preset: minimal] OK
+ WHISTLEBLOWER_REWARD_QUOTIENT 512 [Preset: minimal] OK
```
OK: 83/86 Fail: 0/86 Skip: 3/86
## PeerPool testing suite
```diff
+ Access peers by key test OK
+ Acquire from empty pool OK
+ Acquire/Sorting and consistency test OK
+ Iterators test OK
+ Peer lifetime test OK
+ Safe/Clear test OK
+ Score check test OK
+ addPeer() test OK
+ addPeerNoWait() test OK
+ deletePeer() test OK
```
OK: 10/10 Fail: 0/10 Skip: 0/10
## SSZ dynamic navigator
```diff
+ navigating fields OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## SSZ navigator
```diff
+ basictype OK
+ lists with max size OK
+ simple object fields OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
## Spec helpers
```diff
+ integer_squareroot OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Sync protocol
```diff
+ Compile OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Zero signature sanity checks
```diff
+ SSZ serialization roundtrip of SignedBeaconBlockHeader OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## [Unit - Spec - Block processing] Attestations [Preset: minimal]
```diff
+ Valid attestation OK
+ Valid attestation from previous epoch OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## [Unit - Spec - Block processing] Deposits [Preset: minimal]
```diff
+ Deposit at MAX_EFFECTIVE_BALANCE balance (32 ETH) OK
+ Deposit over MAX_EFFECTIVE_BALANCE balance (32 ETH) OK
+ Deposit under MAX_EFFECTIVE_BALANCE balance (32 ETH) OK
+ Validator top-up OK
```
OK: 4/4 Fail: 0/4 Skip: 0/4
## [Unit - Spec - Epoch processing] Justification and Finalization [Preset: minimal]
```diff
+ Rule I - 234 finalization with enough support OK
+ Rule I - 234 finalization without support OK
+ Rule II - 23 finalization with enough support OK
+ Rule II - 23 finalization without support OK
+ Rule III - 123 finalization with enough support OK
+ Rule III - 123 finalization without support OK
+ Rule IV - 12 finalization with enough support OK
+ Rule IV - 12 finalization without support OK
```
OK: 8/8 Fail: 0/8 Skip: 0/8
## hash
```diff
+ HashArray OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
---TOTAL---
OK: 160/163 Fail: 0/163 Skip: 3/163

View File

@ -1,183 +0,0 @@
FixtureAll-minimal
===
## Official - Epoch Processing - Final updates [Preset: minimal]
```diff
+ Final updates - effective_balance_hysteresis [Preset: minimal] OK
+ Final updates - eth1_vote_no_reset [Preset: minimal] OK
+ Final updates - eth1_vote_reset [Preset: minimal] OK
+ Final updates - historical_root_accumulator [Preset: minimal] OK
```
OK: 4/4 Fail: 0/4 Skip: 0/4
## Official - Epoch Processing - Justification & Finalization [Preset: minimal]
```diff
+ Justification & Finalization - 123_ok_support [Preset: minimal] OK
+ Justification & Finalization - 123_poor_support [Preset: minimal] OK
+ Justification & Finalization - 12_ok_support [Preset: minimal] OK
+ Justification & Finalization - 12_ok_support_messed_target [Preset: minimal] OK
+ Justification & Finalization - 12_poor_support [Preset: minimal] OK
+ Justification & Finalization - 234_ok_support [Preset: minimal] OK
+ Justification & Finalization - 234_poor_support [Preset: minimal] OK
+ Justification & Finalization - 23_ok_support [Preset: minimal] OK
+ Justification & Finalization - 23_poor_support [Preset: minimal] OK
```
OK: 9/9 Fail: 0/9 Skip: 0/9
## Official - Epoch Processing - Registry updates [Preset: minimal]
```diff
+ Registry updates - activation_queue_activation_and_ejection [Preset: minimal] OK
+ Registry updates - activation_queue_efficiency [Preset: minimal] OK
+ Registry updates - activation_queue_no_activation_no_finality [Preset: minimal] OK
+ Registry updates - activation_queue_sorting [Preset: minimal] OK
+ Registry updates - activation_queue_to_activated_if_finalized [Preset: minimal] OK
+ Registry updates - add_to_activation_queue [Preset: minimal] OK
+ Registry updates - ejection [Preset: minimal] OK
+ Registry updates - ejection_past_churn_limit [Preset: minimal] OK
```
OK: 8/8 Fail: 0/8 Skip: 0/8
## Official - Epoch Processing - Slashings [Preset: minimal]
```diff
+ Slashings - max_penalties [Preset: minimal] OK
+ Slashings - scaled_penalties [Preset: minimal] OK
+ Slashings - small_penalty [Preset: minimal] OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
## Official - Operations - Attestations [Preset: minimal]
```diff
+ [Invalid] after_epoch_slots OK
+ [Invalid] bad_source_root OK
+ [Invalid] before_inclusion_delay OK
+ [Invalid] empty_participants_seemingly_valid_sig OK
+ [Invalid] empty_participants_zeroes_sig OK
+ [Invalid] future_target_epoch OK
+ [Invalid] invalid_attestation_signature OK
+ [Invalid] invalid_current_source_root OK
+ [Invalid] invalid_index OK
+ [Invalid] mismatched_target_and_slot OK
+ [Invalid] new_source_epoch OK
+ [Invalid] old_source_epoch OK
+ [Invalid] old_target_epoch OK
+ [Invalid] source_root_is_target_root OK
+ [Invalid] too_few_aggregation_bits OK
+ [Invalid] too_many_aggregation_bits OK
+ [Invalid] wrong_index_for_committee_signature OK
+ [Invalid] wrong_index_for_slot OK
+ [Valid] success OK
+ [Valid] success_multi_proposer_index_iterations OK
+ [Valid] success_previous_epoch OK
```
OK: 21/21 Fail: 0/21 Skip: 0/21
## Official - Operations - Attester slashing [Preset: minimal]
```diff
+ [Invalid] att1_bad_extra_index OK
+ [Invalid] att1_bad_replaced_index OK
+ [Invalid] att1_duplicate_index_double_signed OK
+ [Invalid] att1_duplicate_index_normal_signed OK
+ [Invalid] att2_bad_extra_index OK
+ [Invalid] att2_bad_replaced_index OK
+ [Invalid] att2_duplicate_index_double_signed OK
+ [Invalid] att2_duplicate_index_normal_signed OK
+ [Invalid] invalid_sig_1 OK
+ [Invalid] invalid_sig_1_and_2 OK
+ [Invalid] invalid_sig_2 OK
+ [Invalid] no_double_or_surround OK
+ [Invalid] participants_already_slashed OK
+ [Invalid] same_data OK
+ [Invalid] unsorted_att_1 OK
+ [Invalid] unsorted_att_2 OK
+ [Valid] success_already_exited_long_ago OK
+ [Valid] success_already_exited_recent OK
+ [Valid] success_double OK
+ [Valid] success_surround OK
```
OK: 20/20 Fail: 0/20 Skip: 0/20
## Official - Operations - Block header [Preset: minimal]
```diff
+ [Invalid] invalid_multiple_blocks_single_slot OK
+ [Invalid] invalid_parent_root OK
+ [Invalid] invalid_proposer_index OK
+ [Invalid] invalid_slot_block_header OK
+ [Invalid] proposer_slashed OK
+ [Valid] success_block_header OK
```
OK: 6/6 Fail: 0/6 Skip: 0/6
## Official - Operations - Deposits [Preset: minimal]
```diff
+ [Invalid] bad_merkle_proof OK
+ [Invalid] wrong_deposit_for_deposit_count OK
+ [Valid] invalid_sig_new_deposit OK
+ [Valid] invalid_sig_other_version OK
+ [Valid] invalid_sig_top_up OK
+ [Valid] invalid_withdrawal_credentials_top_up OK
+ [Valid] new_deposit_max OK
+ [Valid] new_deposit_over_max OK
+ [Valid] new_deposit_under_max OK
+ [Valid] success_top_up OK
+ [Valid] valid_sig_but_forked_state OK
```
OK: 11/11 Fail: 0/11 Skip: 0/11
## Official - Operations - Proposer slashing [Preset: minimal]
```diff
+ [Invalid] identifier OK
+ [Valid] identifier OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## Official - Operations - Voluntary exit [Preset: minimal]
```diff
+ [Invalid] invalid_signature OK
+ [Invalid] validator_already_exited OK
+ [Invalid] validator_exit_in_future OK
+ [Invalid] validator_invalid_validator_index OK
+ [Invalid] validator_not_active OK
+ [Invalid] validator_not_active_long_enough OK
+ [Valid] default_exit_epoch_subsequent_exit OK
+ [Valid] success OK
+ [Valid] success_exit_queue OK
```
OK: 9/9 Fail: 0/9 Skip: 0/9
## Official - Sanity - Blocks [Preset: minimal]
```diff
+ [Invalid] double_same_proposer_slashings_same_block OK
+ [Invalid] double_similar_proposer_slashings_same_block OK
+ [Invalid] double_validator_exit_same_block OK
+ [Invalid] duplicate_attester_slashing OK
+ [Invalid] expected_deposit_in_block OK
+ [Invalid] invalid_block_sig OK
+ [Invalid] invalid_proposer_index_sig_from_expected_proposer OK
+ [Invalid] invalid_proposer_index_sig_from_proposer_index OK
+ [Invalid] invalid_state_root OK
+ [Invalid] parent_from_same_slot OK
+ [Invalid] prev_slot_block_transition OK
+ [Invalid] proposal_for_genesis_slot OK
+ [Invalid] same_slot_block_transition OK
+ [Invalid] zero_block_sig OK
+ [Valid] attestation OK
+ [Valid] attester_slashing OK
+ [Valid] balance_driven_status_transitions OK
+ [Valid] deposit_in_block OK
+ [Valid] deposit_top_up OK
+ [Valid] empty_block_transition OK
+ [Valid] empty_epoch_transition OK
+ [Valid] empty_epoch_transition_not_finalizing OK
+ [Valid] high_proposer_index OK
+ [Valid] historical_batch OK
+ [Valid] multiple_attester_slashings_no_overlap OK
+ [Valid] multiple_attester_slashings_partial_overlap OK
+ [Valid] multiple_different_proposer_slashings_same_block OK
+ [Valid] multiple_different_validator_exits_same_block OK
+ [Valid] proposer_after_inactive_index OK
+ [Valid] proposer_slashing OK
+ [Valid] skipped_slots OK
+ [Valid] voluntary_exit OK
```
OK: 32/32 Fail: 0/32 Skip: 0/32
## Official - Sanity - Slots [Preset: minimal]
```diff
+ Slots - double_empty_epoch OK
+ Slots - empty_epoch OK
+ Slots - over_epoch_boundary OK
+ Slots - slots_1 OK
+ Slots - slots_2 OK
```
OK: 5/5 Fail: 0/5 Skip: 0/5
---TOTAL---
OK: 130/130 Fail: 0/130 Skip: 0/130

View File

@ -1,36 +0,0 @@
FixtureSSZConsensus-minimal
===
## Official - SSZ consensus objects [Preset: minimal]
```diff
+ Testing AggregateAndProof OK
+ Testing Attestation OK
+ Testing AttestationData OK
+ Testing AttesterSlashing OK
+ Testing BeaconBlock OK
+ Testing BeaconBlockBody OK
+ Testing BeaconBlockHeader OK
+ Testing BeaconState OK
+ Testing Checkpoint OK
+ Testing Deposit OK
+ Testing DepositData OK
+ Testing DepositMessage OK
+ Testing Eth1Block OK
+ Testing Eth1Data OK
+ Testing Fork OK
+ Testing ForkData OK
+ Testing HistoricalBatch OK
+ Testing IndexedAttestation OK
+ Testing PendingAttestation OK
+ Testing ProposerSlashing OK
+ Testing SignedAggregateAndProof OK
+ Testing SignedBeaconBlock OK
+ Testing SignedBeaconBlockHeader OK
+ Testing SignedVoluntaryExit OK
+ Testing SigningData OK
+ Testing Validator OK
+ Testing VoluntaryExit OK
```
OK: 27/27 Fail: 0/27 Skip: 0/27
---TOTAL---
OK: 27/27 Fail: 0/27 Skip: 0/27

View File

@ -1,21 +0,0 @@
FixtureSSZGeneric-minimal
===
## Official - SSZ generic types
```diff
Testing basic_vector inputs - invalid - skipping Vector[uint128, N] and Vector[uint256, N] Skip
+ Testing basic_vector inputs - valid - skipping Vector[uint128, N] and Vector[uint256, N] OK
+ Testing bitlist inputs - invalid OK
+ Testing bitlist inputs - valid OK
Testing bitvector inputs - invalid Skip
+ Testing bitvector inputs - valid OK
+ Testing boolean inputs - invalid OK
+ Testing boolean inputs - valid OK
+ Testing containers inputs - invalid - skipping BitsStruct OK
+ Testing containers inputs - valid - skipping BitsStruct OK
+ Testing uints inputs - invalid - skipping uint128 and uint256 OK
+ Testing uints inputs - valid - skipping uint128 and uint256 OK
```
OK: 10/12 Fail: 0/12 Skip: 2/12
---TOTAL---
OK: 10/12 Fail: 0/12 Skip: 2/12

15
Jenkinsfile vendored
View File

@ -40,8 +40,8 @@ def runStages() {
// EXECUTOR_NUMBER will be 0 or 1, since we have 2 executors per Jenkins node
sh """#!/bin/bash
set -e
timeout -k 20s 10m ./scripts/launch_local_testnet.sh --testnet 0 --nodes 4 --log-level INFO --disable-htop --base-port \$(( 9000 + EXECUTOR_NUMBER * 100 )) --base-metrics-port \$(( 8008 + EXECUTOR_NUMBER * 100 )) -- --verify-finalization --stop-at-epoch=5
timeout -k 20s 40m ./scripts/launch_local_testnet.sh --testnet 1 --nodes 4 --log-level INFO --disable-htop --base-port \$(( 9000 + EXECUTOR_NUMBER * 100 )) --base-metrics-port \$(( 8008 + EXECUTOR_NUMBER * 100 )) -- --verify-finalization --stop-at-epoch=5
timeout -k 20s 10m ./scripts/launch_local_testnet.sh --testnet 0 --nodes 4 --log-level INFO --disable-htop --data-dir local_testnet0_data --base-port \$(( 9000 + EXECUTOR_NUMBER * 100 )) --base-metrics-port \$(( 8008 + EXECUTOR_NUMBER * 100 )) -- --verify-finalization --stop-at-epoch=5
timeout -k 20s 40m ./scripts/launch_local_testnet.sh --testnet 1 --nodes 4 --log-level INFO --disable-htop --data-dir local_testnet1_data --base-port \$(( 9000 + EXECUTOR_NUMBER * 100 )) --base-metrics-port \$(( 8008 + EXECUTOR_NUMBER * 100 )) -- --verify-finalization --stop-at-epoch=5
"""
}
}
@ -49,10 +49,19 @@ def runStages() {
)
}
} catch(e) {
echo "'${env.STAGE_NAME}' stage failed"
// we need to rethrow the exception here
throw e
} finally {
// archive testnet logs
if ("${NODE_NAME}" ==~ /linux.*/) {
sh """#!/bin/bash
for D in local_testnet0_data local_testnet1_data; do
[[ -d "\$D" ]] && tar cjf "\${D}.tar.bz2" "\${D}"/*.txt
done
"""
archiveArtifacts("*.tar.bz2")
}
// clean the workspace
cleanWs(disableDeferredWipeout: true, deleteDirs: true)
}
}

View File

@ -38,7 +38,7 @@ TOOLS_DIRS := \
ncli \
nbench \
research \
tests/simulation
tools
TOOLS_CSV := $(subst $(SPACE),$(COMMA),$(TOOLS))
.PHONY: \
@ -85,6 +85,13 @@ all: | $(TOOLS) libnfuzz.so libnfuzz.a
# must be included after the default target
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
ifeq ($(OS), Windows_NT)
ifeq ($(ARCH), x86)
# 32-bit Windows is not supported by libbacktrace/libunwind
USE_LIBBACKTRACE := 0
endif
endif
# "--define:release" implies "--stacktrace:off" and it cannot be added to config.nims
ifeq ($(USE_LIBBACKTRACE), 0)
NIM_PARAMS := $(NIM_PARAMS) -d:debug -d:disable_libbacktrace
@ -131,10 +138,10 @@ eth2_network_simulation: | build deps clean_eth2_network_simulation_files
+ GIT_ROOT="$$PWD" NIMFLAGS="$(NIMFLAGS)" LOG_LEVEL="$(LOG_LEVEL)" tests/simulation/start.sh
clean-testnet0:
rm -rf build/data/testnet0
rm -rf build/data/testnet0*
clean-testnet1:
rm -rf build/data/testnet1
rm -rf build/data/testnet1*
# - we're getting the preset from a testnet-specific .env file
# - try SCRIPT_PARAMS="--skipGoerliKey"
@ -143,7 +150,7 @@ testnet0 testnet1: | build deps
NIM_PARAMS="$(subst ",\",$(NIM_PARAMS))" LOG_LEVEL="$(LOG_LEVEL)" $(ENV_SCRIPT) nim $(NIM_PARAMS) scripts/connect_to_testnet.nims $(SCRIPT_PARAMS) --const-preset=$$CONST_PRESET --dev-build $@
clean-schlesi:
rm -rf build/data/shared_schlesi
rm -rf build/data/shared_schlesi*
schlesi: | build deps
NIM_PARAMS="$(subst ",\",$(NIM_PARAMS))" LOG_LEVEL="$(LOG_LEVEL)" $(ENV_SCRIPT) nim $(NIM_PARAMS) scripts/connect_to_testnet.nims $(SCRIPT_PARAMS) shared/schlesi
@ -152,7 +159,7 @@ schlesi-dev: | build deps
NIM_PARAMS="$(subst ",\",$(NIM_PARAMS))" LOG_LEVEL="DEBUG; TRACE:discv5,networking; REQUIRED:none; DISABLED:none" $(ENV_SCRIPT) nim $(NIM_PARAMS) scripts/connect_to_testnet.nims $(SCRIPT_PARAMS) shared/schlesi
clean-witti:
rm -rf build/data/shared_witti
rm -rf build/data/shared_witti*
witti: | build deps
NIM_PARAMS="$(subst ",\",$(NIM_PARAMS))" LOG_LEVEL="$(LOG_LEVEL)" $(ENV_SCRIPT) nim $(NIM_PARAMS) scripts/connect_to_testnet.nims $(SCRIPT_PARAMS) shared/witti
@ -161,7 +168,7 @@ witti-dev: | build deps
NIM_PARAMS="$(subst ",\",$(NIM_PARAMS))" LOG_LEVEL="DEBUG; TRACE:discv5,networking; REQUIRED:none; DISABLED:none" $(ENV_SCRIPT) nim $(NIM_PARAMS) scripts/connect_to_testnet.nims $(SCRIPT_PARAMS) shared/witti
clean: | clean-common
rm -rf build/{$(TOOLS_CSV),all_tests,*_node,*ssz*,beacon_node_testnet*,block_sim,state_sim,transition*}
rm -rf build/{$(TOOLS_CSV),all_tests,*_node,*ssz*,beacon_node_*,block_sim,state_sim,transition*}
ifneq ($(USE_LIBBACKTRACE), 0)
+ $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT)
endif

397
README.md
View File

@ -14,28 +14,411 @@ Welcome to Nimbus for Ethereum 2.0.
Nimbus beacon chain is a research implementation of the beacon chain component of the upcoming Ethereum Serenity upgrade, aka Eth2.
## Manual
Please see the complete guide at [The nim-beacon-chain Book](https://status-im.github.io/nim-beacon-chain/).
Please see the complete documentation at [The nim-beacon-chain Book](https://status-im.github.io/nim-beacon-chain/).
## Quick Start
As a user, you can connect to testnets and become a validator as follows:
## Related
* [status-im/nimbus](https://github.com/status-im/nimbus/): Nimbus for Ethereum 1
* [ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs/tree/v0.12.1#phase-0): Serenity specification that this project implements
You can check where the beacon chain fits in the Ethereum ecosystem our Two-Point-Oh series: https://our.status.im/tag/two-point-oh/
## Table of Contents
- [Nimbus Eth2 (Beacon Chain)](#nimbus-eth2-beacon-chain)
- [Manual](#manual)
- [Related](#related)
- [Table of Contents](#table-of-contents)
- [Prerequisites for everyone](#prerequisites-for-everyone)
- [Linux](#linux)
- [MacOS](#macos)
- [Windows](#windows)
- [For users](#for-users)
- [Connecting to testnets](#connecting-to-testnets)
- [Getting metrics from a local testnet client](#getting-metrics-from-a-local-testnet-client)
- [Interop (for other Eth2 clients)](#interop-for-other-eth2-clients)
- [For researchers](#for-researchers)
- [State transition simulation](#state-transition-simulation)
- [Local network simulation](#local-network-simulation)
- [Visualising simulation metrics](#visualising-simulation-metrics)
- [Network inspection](#network-inspection)
- [For developers](#for-developers)
- [Windows dev environment](#windows-dev-environment)
- [Linux, MacOS](#linux-macos)
- [Raspberry Pi](#raspberry-pi)
- [Makefile tips and tricks for developers](#makefile-tips-and-tricks-for-developers)
- [CI setup](#ci-setup)
- [License](#license)
## Prerequisites for everyone
At the moment, Nimbus has to be built from source.
Nimbus has the following external dependencies:
* Developer tools (C compiler, Make, Bash, Git)
* PCRE
Nim is not an external dependency, Nimbus will build its own local copy.
### Linux
On common Linux distributions the dependencies can be installed with:
```sh
# Debian and Ubuntu
sudo apt-get install build-essential git libpcre3-dev
# Fedora
dnf install @development-tools pcre
# Archlinux, using an AUR manager for pcre-static
yourAURmanager -S base-devel pcre-static
```
### MacOS
Assuming you use [Homebrew](https://brew.sh/) to manage packages:
```sh
brew install pcre
```
Make sure you have [CMake](https://cmake.org/) installed, to be able to build libunwind (used for [lightweight stack traces](https://github.com/status-im/nim-libbacktrace)).
### Windows
You can install the developer tools by following the instruction in our [Windows dev environment section](#windows-dev-environment).
It also provides a downloading script for prebuilt PCRE.
### Android
* Install the [Termux](https://termux.com) app from FDroid or the Google Play store
* Install a [PRoot](https://wiki.termux.com/wiki/PRoot) of your choice following the instructions for your preferred distribution.
Note, the Ubuntu PRoot is known to contain all Nimbus prerequisites compiled on Arm64 architecture (common architecture for Android devices).
*Assuming Ubuntu PRoot is used*
```sh
apt install build-essential git libpcre3-dev
```
## For users
### Connecting to testnets
Nimbus connects to any of the testnets published in the [eth2-clients/eth2-testnets repo](https://github.com/eth2-clients/eth2-testnets/tree/master/nimbus).
Once the [prerequisites](#prerequisites) are installed you can connect to the [Witti testnet](https://github.com/goerli/witti) with the following commands:
```bash
git clone https://github.com/status-im/nim-beacon-chain
cd nim-beacon-chain
make witti
make witti # This will build Nimbus and all other dependencies
# and connect you to Witti
```
Sometimes, you may want to disable the interactive prompt asking you for a Goerli key in order to become a validator:
```bash
make SCRIPT_PARAMS="--skipGoerliKey" witti # not a validator
```
You can also start multiple local nodes, in different terminal windows/tabs, by specifying their numeric IDs:
```bash
make SCRIPT_PARAMS="--nodeID=0" witti # the default
make SCRIPT_PARAMS="--nodeID=1" witti
make SCRIPT_PARAMS="--nodeID=2" witti
```
### Getting metrics from a local testnet client
```bash
# the primitive HTTP server started to serve the metrics is considered insecure
make NIMFLAGS="-d:insecure" witti
```
You can now see the raw metrics on http://127.0.0.1:8008/metrics but they're not very useful like this, so let's feed them to a Prometheus instance:
```bash
prometheus --config.file=build/data/shared_witti_0/prometheus.yml
# when starting multiple nodes at the same time, just use the config file from the one with the highest ID
```
For some pretty pictures, get [Grafana](https://grafana.com/) up and running, then import the dashboard definition in "grafana/beacon\_nodes\_Grafana\_dashboard.json".
## Interop (for other Eth2 clients)
After installing the [prerequisites](#prerequisites)
To run the Nimbus state transition, we provide the `ncli` tool:
* [ncli](ncli)
The interop scripts have been moved in a common repo, the interop relied on 0.8.3 specs which had seen significant changes. The interop branch still exist but is unmaintained.
* [multinet](https://github.com/status-im/nim-beacon-chain/tree/master/multinet) - a set of scripts to build and run several Eth2 clients locally
* [interop branch](https://github.com/status-im/nim-beacon-chain/tree/interop) (unmaintained)
## For researchers
### State transition simulation
The state transition simulator can quickly run the Beacon chain state transition function in isolation and output JSON snapshots of the state. The simulation runs without networking and blocks are processed without slot time delays.
```bash
# build and run the state simulator, then display its help ("-d:release" speeds it
# up substantially, allowing the simulation of longer runs in reasonable time)
make NIMFLAGS="-d:release" state_sim
build/state_sim --help
```
### Local network simulation
The local network simulation will create a full peer-to-peer network of beacon nodes and validators on a single machine, and run the beacon chain in real time.
Parameters such as shard, validator counts, and data folders are configured [vars.sh](tests/simulation/vars.sh). They can be set in as environment variables before launching the simulation.
```bash
# Clear data files from your last run and start the simulation with a new genesis block:
make VALIDATORS=192 NODES=6 USER_NODES=1 eth2_network_simulation
# In another terminal, get a shell with the right environment variables set:
./env.sh bash
# In the above example, the network is prepared for 7 beacon nodes but one of
# them is not started by default (`USER_NODES`) - this is useful to test
# catching up to the consensus. The following command will start the missing node.
./tests/simulation/run_node.sh 0 # (or the index (0-based) of the missing node)
# Running a separate node allows you to test sync as well as see what the action
# looks like from a single nodes' perspective.
```
By default all validators are loaded within the beacon nodes, but if you want to use external processes as validator clients you can pass `BN_VC_VALIDATOR_SPLIT=yes` as an additional argument to the `make eth2_network_simulation` command and that will split the `VALIDATORS` between beacon nodes and validator clients - for example with `192` validators and `6` nodes you will end up with 6 beacon node and 6 validator client processes, where each of them will handle 16 validators.
You can also separate the output from each beacon node in its own panel, using [multitail](http://www.vanheusden.com/multitail/):
```bash
make USE_MULTITAIL="yes" eth2_network_simulation
```
You can find out more about it in the [development update](https://our.status.im/nimbus-development-update-2018-12-2/).
_Alternatively, fire up our [experimental Vagrant instance with Nim pre-installed](https://our.status.im/setting-up-a-local-vagrant-environment-for-nim-development/) and give us yout feedback about the process!_
### Visualising simulation metrics
The [generic instructions from the Nimbus repo](https://github.com/status-im/nimbus/#metric-visualisation) apply here as well.
Specific steps:
```bash
# This will generate the Prometheus config on the fly, based on the number of
# nodes (which you can control by passing something like NODES=6 to `make`).
# The `-d:insecure` flag starts an HTTP server from which the Prometheus daemon will pull the metrics.
make VALIDATORS=192 NODES=6 USER_NODES=0 NIMFLAGS="-d:insecure" eth2_network_simulation
# In another terminal tab, after the sim started:
cd tests/simulation/prometheus
prometheus
```
The dashboard you need to import in Grafana is "grafana/beacon\_nodes\_Grafana\_dashboard.json".
![monitoring dashboard](./media/monitoring.png)
### Network inspection
The [inspector tool](./beacon_chain/inspector.nim) can help monitor the libp2p network and the various channels where blocks and attestations are being transmitted, showing message and connectivity metadata. By default, it will monitor all ethereum 2 gossip traffic.
```bash
. ./env.sh
# Build inspector for minimal config:
./env.sh nim c -d:const_preset=minimal -o:build/inspector_minimal beacon_chain/inspector.nim
# Build inspector for mainnet config:
./env.sh nim c -d:const_preset=mainnet -o:build/inspector_mainnet beacon_chain/inspector.nim
# See available options
./env.sh build/inspector_minimal --help
# Connect to a network from eth2 testnet repo bootstrap file - --decode option attempts to decode the messages as well
./env.sh build/inspector_minimal --decode -b:$(curl -s https://raw.githubusercontent.com/eth2-clients/eth2-testnets/master/nimbus/testnet0/bootstrap_nodes.txt | head -n1)
```
## For developers
Latest updates happen in the `devel` branch which is merged into `master` every week on Tuesday before deploying new testnets.
Interesting Make variables and targets are documented in the [nimbus-build-system](https://github.com/status-im/nimbus-build-system) repo.
The following sections explain how to set up your build environment on your platform.
### Windows dev environment
Install Mingw-w64 for your architecture using the "[MinGW-W64 Online
Installer](https://sourceforge.net/projects/mingw-w64/files/)" (first link
under the directory listing). Run it and select your architecture in the setup
menu ("i686" on 32-bit, "x86\_64" on 64-bit), set the threads to "win32" and
the exceptions to "dwarf" on 32-bit and "seh" on 64-bit. Change the
installation directory to "C:\mingw-w64" and add it to your system PATH in "My
Computer"/"This PC" -> Properties -> Advanced system settings -> Environment
Variables -> Path -> Edit -> New -> C:\mingw-w64\mingw64\bin (it's "C:\mingw-w64\mingw32\bin" on 32-bit)
Install [Git for Windows](https://gitforwindows.org/) and use a "Git Bash" shell to clone and build nim-beacon-chain.
Install [CMake](https://cmake.org/) to be able to build libunwind (used for [lightweight stack traces](https://github.com/status-im/nim-libbacktrace)).
When running the tests, you might hit some Windows path length limits. Increase them by editing the Registry in a PowerShell instance with administrator privileges:
```powershell
Set-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Control\FileSystem' -Name 'LongPathsEnabled' -Value 1
```
and run this in a "Git Bash" terminal:
```bash
git config --global core.longpaths true
```
If you don't want to compile PCRE separately, you can fetch pre-compiled DLLs with:
```bash
mingw32-make fetch-dlls # this will place the right DLLs for your architecture in the "build/" directory
```
> If you were following the Windows testnet instructions, you can jump back to [Connecting to testnets](#connecting-to-testnets) now
You can now follow those instructions in the previous section by replacing `make` with `mingw32-make` (regardless of your 32-bit or 64-bit architecture):
```bash
mingw32-make test # run the test suite
```
### Linux, MacOS
After cloning the repo:
```bash
# The first `make` invocation will update all Git submodules.
# You'll run `make update` after each `git pull`, in the future, to keep those submodules up to date.
make
# Run tests
make test
# Update to latest version
git pull
make update
```
To run a command that might use binaries from the Status Nim fork:
```bash
./env.sh bash # start a new interactive shell with the right env vars set
which nim
nim --version # Nimbus is tested and supported on 1.0.2 at the moment
# or without starting a new interactive shell:
./env.sh which nim
./env.sh nim --version
```
### Raspberry Pi
We recommend you remove any cover or use a fan; the Raspberry Pi will get hot (85°C) and throttle.
* Raspberry PI 3b+ or Raspberry Pi 4b.
* 64gb SD Card (less might work too, but the default recommended 4-8GB will probably be too small)
* [Rasbian Buster Lite](https://www.raspberrypi.org/downloads/raspbian/) - Lite version is enough to get going and will save some disk space!
Assuming you're working with a freshly written image:
```bash
# Start by increasing swap size to 2gb:
sudo vi /etc/dphys-swapfile
# Set CONF_SWAPSIZE=2048
# :wq
sudo reboot
# Install prerequisites
sudo apt-get install git libgflags-dev libsnappy-dev libpcre3-dev
# Then you can follow instructions for Linux.
```
### Makefile tips and tricks for developers
- build all those tools known to the Makefile:
```bash
# $(nproc) corresponds to the number of cores you have
make -j$(nproc)
```
- build a specific tool:
```bash
make state_sim
```
- you can control the Makefile's verbosity with the V variable (defaults to 0):
```bash
make V=1 # verbose
make V=2 test # even more verbose
```
- same for the [Chronicles log level](https://github.com/status-im/nim-chronicles#chronicles_log_level):
```bash
make LOG_LEVEL=DEBUG bench_bls_sig_agggregation # this is the default
make LOG_LEVEL=TRACE beacon_node # log everything
```
- pass arbitrary parameters to the Nim compiler:
```bash
make NIMFLAGS="-d:release"
```
- you can freely combine those variables on the `make` command line:
```bash
make -j$(nproc) NIMFLAGS="-d:release" USE_MULTITAIL=yes eth2_network_simulation
```
- don't use the [lightweight stack tracing implementation from nim-libbacktrace](https://github.com/status-im/nim-beacon-chain/pull/745):
```bash
make USE_LIBBACKTRACE=0 # expect the resulting binaries to be 2-3 times slower
```
- publish a book using [mdBook](https://github.com/rust-lang/mdBook) from sources in "docs/" to GitHub pages:
```bash
make publish-book
```
### CI setup
Local testnets run for 4 epochs each, to test finalization. That happens only on Jenkins Linux hosts, and their logs are available for download as artifacts, from the job's page. Don't expect these artifacts to be kept more than a day after the corresponding branch is deleted.
![Jenkins artifacts](./media/jenkins_artifacts.png)
## License
Licensed and distributed under either of
- MIT license: [LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT
* MIT license: [LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT
or
- Apache License, Version 2.0, ([LICENSE-APACHEv2](LICENSE-APACHEv2) or http://www.apache.org/licenses/LICENSE-2.0)
* Apache License, Version 2.0, ([LICENSE-APACHEv2](LICENSE-APACHEv2) or http://www.apache.org/licenses/LICENSE-2.0)
at your option. These files may not be copied, modified, or distributed except according to those terms.
at your option. These files may not be copied, modified, or distributed except according to those terms.

View File

@ -16,7 +16,7 @@ jobs:
- task: CacheBeta@1
displayName: 'cache Nim binaries'
inputs:
key: NimBinaries | $(Agent.OS) | $(PLATFORM) | "$(Build.SourceBranchName)" | "v4"
key: NimBinaries | $(Agent.OS) | $(PLATFORM) | "$(Build.SourceBranchName)" | "v7"
path: NimBinaries
- task: CacheBeta@1
@ -65,10 +65,14 @@ jobs:
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} CI_CACHE=NimBinaries update
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} fetch-dlls
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image"
if [[ $PLATFORM == "x64" ]]; then
# everything builds more slowly on 32-bit, since there's no libbacktrace support
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image"
fi
file build/beacon_node
# fail fast
export NIMTEST_ABORT_ON_ERROR=1
scripts/setup_official_tests.sh jsonTestsCache
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} DISABLE_TEST_FIXTURES_SCRIPT=1 test
displayName: 'build and test'

View File

@ -37,7 +37,7 @@ requires "nim >= 0.19.0",
"yaml"
### Helper functions
proc buildBinary(name: string, srcDir = "./", params = "", cmdParams = "", lang = "c") =
proc buildAndRunBinary(name: string, srcDir = "./", params = "", cmdParams = "", lang = "c") =
if not dirExists "build":
mkDir "build"
# allow something like "nim test --verbosity:0 --hints:off beacon_chain.nims"
@ -47,7 +47,7 @@ proc buildBinary(name: string, srcDir = "./", params = "", cmdParams = "", lang
exec "nim " & lang & " --out:./build/" & name & " -r " & extra_params & " " & srcDir & name & ".nim" & " " & cmdParams
task moduleTests, "Run all module tests":
buildBinary "beacon_node", "beacon_chain/",
buildAndRunBinary "beacon_node", "beacon_chain/",
"-d:chronicles_log_level=TRACE " &
"-d:const_preset=minimal -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\" " &
"-d:testutils_test_build"
@ -58,35 +58,27 @@ task test, "Run all tests":
# pieces of code get tested regularly. Increased test output verbosity is the
# price we pay for that.
# Minimal config
buildBinary "proto_array", "beacon_chain/fork_choice/", "-d:const_preset=minimal -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildBinary "fork_choice", "beacon_chain/fork_choice/", "-d:const_preset=minimal -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildBinary "all_tests", "tests/", "-d:chronicles_log_level=TRACE -d:const_preset=minimal -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
# Mainnet config
buildBinary "proto_array", "beacon_chain/fork_choice/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildBinary "fork_choice", "beacon_chain/fork_choice/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildBinary "all_tests", "tests/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildAndRunBinary "proto_array", "beacon_chain/fork_choice/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildAndRunBinary "fork_choice", "beacon_chain/fork_choice/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildAndRunBinary "all_tests", "tests/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
# Generic SSZ test, doesn't use consensus objects minimal/mainnet presets
buildBinary "test_fixture_ssz_generic_types", "tests/official/", "-d:chronicles_log_level=TRACE"
buildAndRunBinary "test_fixture_ssz_generic_types", "tests/official/", "-d:chronicles_log_level=TRACE"
# Consensus object SSZ tests
# 0.11.3
buildBinary "test_fixture_ssz_consensus_objects", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=minimal -d:ETH2_SPEC=\"v0.11.3\""
buildBinary "test_fixture_ssz_consensus_objects", "tests/official/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.11.3\""
buildAndRunBinary "test_fixture_ssz_consensus_objects", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:ETH2_SPEC=\"v0.11.3\""
# 0.12.1
buildBinary "test_fixture_ssz_consensus_objects", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=minimal -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildBinary "test_fixture_ssz_consensus_objects", "tests/official/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildAndRunBinary "test_fixture_ssz_consensus_objects", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
# 0.11.3
buildBinary "all_fixtures_require_ssz", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=minimal -d:ETH2_SPEC=\"v0.11.3\""
buildBinary "all_fixtures_require_ssz", "tests/official/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.11.3\""
buildAndRunBinary "all_fixtures_require_ssz", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:ETH2_SPEC=\"v0.11.3\""
# 0.12.1
buildBinary "all_fixtures_require_ssz", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=minimal -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildBinary "all_fixtures_require_ssz", "tests/official/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
buildAndRunBinary "all_fixtures_require_ssz", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\""
# State sim; getting into 4th epoch useful to trigger consensus checks
buildBinary "state_sim", "research/", "-d:const_preset=minimal -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\"", "--validators=2000 --slots=32"
buildBinary "state_sim", "research/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\"", "--validators=2000 --slots=128"
buildAndRunBinary "state_sim", "research/", "-d:const_preset=mainnet -d:ETH2_SPEC=\"v0.12.1\" -d:BLS_ETH2_SPEC=\"v0.12.x\"", "--validators=2000 --slots=128"

View File

@ -9,10 +9,13 @@
import
options, chronicles,
./spec/[beaconstate, datatypes, crypto, digest, helpers, validator,
state_transition_block],
./spec/[
beaconstate, datatypes, crypto, digest, helpers, validator, signatures],
./block_pool, ./attestation_pool, ./beacon_node_types, ./ssz
logScope:
topics = "att_aggr"
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#aggregation-selection
func is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex,
slot_signature: ValidatorSig): bool =
@ -21,7 +24,7 @@ func is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex,
let
committee = get_beacon_committee(state, slot, index, cache)
modulo = max(1, len(committee) div TARGET_AGGREGATORS_PER_COMMITTEE).uint64
bytes_to_int(eth2hash(slot_signature.toRaw()).data[0..7]) mod modulo == 0
bytes_to_int(eth2digest(slot_signature.toRaw()).data[0..7]) mod modulo == 0
proc aggregate_attestations*(
pool: AttestationPool, state: BeaconState, index: CommitteeIndex,
@ -75,17 +78,20 @@ proc aggregate_attestations*(
proc isValidAttestation*(
pool: AttestationPool, attestation: Attestation, current_slot: Slot,
topicCommitteeIndex: uint64): bool =
logScope:
topics = "att_aggr valid_att"
received_attestation = shortLog(attestation)
# The attestation's committee index (attestation.data.index) is for the
# correct subnet.
if attestation.data.index != topicCommitteeIndex:
debug "isValidAttestation: attestation's committee index not for the correct subnet",
topicCommitteeIndex = topicCommitteeIndex,
attestation_data_index = attestation.data.index
debug "attestation's committee index not for the correct subnet",
topicCommitteeIndex = topicCommitteeIndex
return false
if not (attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >=
current_slot and current_slot >= attestation.data.slot):
debug "isValidAttestation: attestation.data.slot not within ATTESTATION_PROPAGATION_SLOT_RANGE"
debug "attestation.data.slot not within ATTESTATION_PROPAGATION_SLOT_RANGE"
return false
# The attestation is unaggregated -- that is, it has exactly one
@ -100,11 +106,10 @@ proc isValidAttestation*(
continue
onesCount += 1
if onesCount > 1:
debug "isValidAttestation: attestation has too many aggregation bits",
aggregation_bits = attestation.aggregation_bits
debug "attestation has too many aggregation bits"
return false
if onesCount != 1:
debug "isValidAttestation: attestation has too few aggregation bits"
debug "attestation has too few aggregation bits"
return false
# The attestation is the first valid attestation received for the
@ -117,9 +122,7 @@ proc isValidAttestation*(
# Attestations might be aggregated eagerly or lazily; allow for both.
for validation in attestationEntry.validations:
if attestation.aggregation_bits.isSubsetOf(validation.aggregation_bits):
debug "isValidAttestation: attestation already exists at slot",
attestation_data_slot = attestation.data.slot,
attestation_aggregation_bits = attestation.aggregation_bits,
debug "attestation already exists at slot",
attestation_pool_validation = validation.aggregation_bits
return false
@ -131,8 +134,7 @@ proc isValidAttestation*(
# propagated - i.e. imagine that attestations are smaller than blocks and
# therefore propagate faster, thus reordering their arrival in some nodes
if pool.blockPool.get(attestation.data.beacon_block_root).isNone():
debug "isValidAttestation: block doesn't exist in block pool",
attestation_data_beacon_block_root = attestation.data.beacon_block_root
debug "block doesn't exist in block pool"
return false
# The signature of attestation is valid.
@ -143,7 +145,7 @@ proc isValidAttestation*(
pool.blockPool.headState.data.data,
get_indexed_attestation(
pool.blockPool.headState.data.data, attestation, cache), {}):
debug "isValidAttestation: signature verification failed"
debug "signature verification failed"
return false
true

View File

@ -1,8 +1,8 @@
{.push raises: [Defect].}
import
typetraits, stew/[results, endians2],
serialization, chronicles,
typetraits, stew/[results, objects, endians2],
serialization, chronicles, snappy,
eth/db/kvstore,
./spec/[datatypes, digest, crypto],
./ssz/[ssz_serialization, merkleization], ./state_transition
@ -66,14 +66,39 @@ func subkey(root: Eth2Digest, slot: Slot): array[40, byte] =
proc init*(T: type BeaconChainDB, backend: KVStoreRef): BeaconChainDB =
T(backend: backend)
proc snappyEncode(inp: openArray[byte]): seq[byte] =
try:
snappy.encode(inp)
except CatchableError as err:
raiseAssert err.msg
proc put(db: BeaconChainDB, key: openArray[byte], v: Eth2Digest) =
db.backend.put(key, v.data).expect("working database")
proc put(db: BeaconChainDB, key: openArray[byte], v: auto) =
db.backend.put(key, SSZ.encode(v)).expect("working database")
db.backend.put(key, snappyEncode(SSZ.encode(v))).expect("working database")
proc get(db: BeaconChainDB, key: openArray[byte], T: type Eth2Digest): Opt[T] =
var res: Opt[T]
proc decode(data: openArray[byte]) =
if data.len == 32:
res.ok Eth2Digest(data: toArray(32, data))
else:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?",
typ = name(T), dataLen = data.len
discard
discard db.backend.get(key, decode).expect("working database")
res
proc get(db: BeaconChainDB, key: openArray[byte], T: typedesc): Opt[T] =
var res: Opt[T]
proc decode(data: openArray[byte]) =
try:
res.ok SSZ.decode(data, T)
res.ok SSZ.decode(snappy.decode(data), T)
except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
@ -99,8 +124,7 @@ proc putState*(db: BeaconChainDB, value: BeaconState) =
proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
value: Eth2Digest) =
db.backend.put(subkey(root, slot), value.data).expect(
"working database")
db.put(subkey(root, slot), value)
proc putBlock*(db: BeaconChainDB, value: SignedBeaconBlock) =
db.putBlock(hash_tree_root(value.message), value)
@ -116,10 +140,10 @@ proc delStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot) =
db.backend.del(subkey(root, slot)).expect("working database")
proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.put(subkey(kHeadBlock), key.data).expect("working database")
db.put(subkey(kHeadBlock), key)
proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.put(subkey(kTailBlock), key.data).expect("working database")
db.put(subkey(kTailBlock), key)
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[SignedBeaconBlock] =
db.get(subkey(SignedBeaconBlock, key), SignedBeaconBlock)
@ -141,7 +165,7 @@ proc getState*(
proc decode(data: openArray[byte]) =
try:
# TODO can't write to output directly..
outputAddr[] = SSZ.decode(data, BeaconState)
assign(outputAddr[], SSZ.decode(snappy.decode(data), BeaconState))
except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding

View File

@ -7,7 +7,7 @@
import
# Standard library
os, tables, random, strutils, times, math,
algorithm, os, tables, random, strutils, times, math,
# Nimble packages
stew/[objects, byteutils], stew/shims/macros,
@ -25,12 +25,13 @@ import
beacon_node_common, beacon_node_types, block_pools/block_pools_types,
nimbus_binary_common,
mainchain_monitor, version, ssz/[merkleization], sszdump,
sync_protocol, request_manager, validator_keygen, interop, statusbar,
sync_protocol, request_manager, keystore_management, interop, statusbar,
sync_manager, state_transition,
validator_duties, validator_api, attestation_aggregation
const
genesisFile* = "genesis.ssz"
timeToInitNetworkingBeforeGenesis = chronos.seconds(10)
hasPrompt = not defined(withoutPrompt)
type
@ -145,7 +146,7 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
if genesisState.isNil:
# Didn't work, try creating a genesis state using main chain monitor
# TODO Could move this to a separate "GenesisMonitor" process or task
# that would do only this - see
# that would do only this - see Paul's proposal for this.
if conf.web3Url.len > 0 and conf.depositContractAddress.len > 0:
mainchainMonitor = MainchainMonitor.init(
web3Provider(conf.web3Url),
@ -212,7 +213,6 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
nickname: nickname,
network: network,
netKeys: netKeys,
requestManager: RequestManager.init(network),
db: db,
config: conf,
attachedValidators: ValidatorPool.init(),
@ -226,8 +226,15 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
topicAggregateAndProofs: topicAggregateAndProofs,
)
# TODO sync is called when a remote peer is connected - is that the right
# time to do so?
res.requestManager = RequestManager.init(network,
proc(signedBlock: SignedBeaconBlock) =
onBeaconBlock(res, signedBlock)
)
traceAsyncErrors res.addLocalValidators()
# This merely configures the BeaconSync
# The traffic will be started when we join the network.
network.initBeaconSync(blockPool, enrForkId.forkDigest,
proc(signedBlock: SignedBeaconBlock) =
if signedBlock.message.slot.isEpoch:
@ -251,12 +258,6 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
return res
proc connectToNetwork(node: BeaconNode) {.async.} =
await node.network.connectToNetwork()
let addressFile = node.config.dataDir / "beacon_node.address"
writeFile(addressFile, node.network.announcedENR.toURI)
proc onAttestation(node: BeaconNode, attestation: Attestation) =
# We received an attestation from the network but don't know much about it
# yet - in particular, we haven't verified that it belongs to particular chain
@ -290,6 +291,22 @@ proc onAttestation(node: BeaconNode, attestation: Attestation) =
node.attestationPool.add(attestation)
proc dumpBlock[T](
node: BeaconNode, signedBlock: SignedBeaconBlock,
res: Result[T, BlockError]) =
if node.config.dumpEnabled and res.isErr:
case res.error
of Invalid:
dump(
node.config.dumpDirInvalid, signedBlock,
hash_tree_root(signedBlock.message))
of MissingParent:
dump(
node.config.dumpDirIncoming, signedBlock,
hash_tree_root(signedBlock.message))
else:
discard
proc storeBlock(
node: BeaconNode, signedBlock: SignedBeaconBlock): Result[void, BlockError] =
let blockRoot = hash_tree_root(signedBlock.message)
@ -299,27 +316,16 @@ proc storeBlock(
cat = "block_listener",
pcs = "receive_block"
if node.config.dumpEnabled:
dump(node.config.dumpDir / "incoming", signedBlock, blockRoot)
beacon_blocks_received.inc()
let blck = node.blockPool.add(blockRoot, signedBlock)
if blck.isErr:
if blck.error == Invalid and node.config.dumpEnabled:
let parent = node.blockPool.getRef(signedBlock.message.parent_root)
if parent != nil:
node.blockPool.withState(
node.blockPool.tmpState, parent.atSlot(signedBlock.message.slot - 1)):
dump(node.config.dumpDir / "invalid", hashedState, parent)
dump(node.config.dumpDir / "invalid", signedBlock, blockRoot)
node.dumpBlock(signedBlock, blck)
if blck.isErr:
return err(blck.error)
# The block we received contains attestations, and we might not yet know about
# all of them. Let's add them to the attestation pool - in case the block
# is not yet resolved, neither will the attestations be!
# But please note that we only care about recent attestations.
# TODO shouldn't add attestations if the block turns out to be invalid..
# all of them. Let's add them to the attestation pool.
let currentSlot = node.beaconClock.now.toSlot
if currentSlot.afterGenesis and
signedBlock.message.slot.epoch + 1 >= currentSlot.slot.epoch:
@ -499,21 +505,8 @@ proc handleMissingBlocks(node: BeaconNode) =
let missingBlocks = node.blockPool.checkMissing()
if missingBlocks.len > 0:
var left = missingBlocks.len
info "Requesting detected missing blocks", missingBlocks
node.requestManager.fetchAncestorBlocks(missingBlocks) do (b: SignedBeaconBlock):
onBeaconBlock(node, b)
# TODO instead of waiting for a full second to try the next missing block
# fetching, we'll do it here again in case we get all blocks we asked
# for (there might be new parents to fetch). of course, this is not
# good because the onSecond fetching also kicks in regardless but
# whatever - this is just a quick fix for making the testnet easier
# work with while the sync problem is dealt with more systematically
# dec left
# if left == 0:
# discard setTimer(Moment.now()) do (p: pointer):
# handleMissingBlocks(node)
info "Requesting detected missing blocks", blocks = shortLog(missingBlocks)
node.requestManager.fetchAncestorBlocks(missingBlocks)
proc onSecond(node: BeaconNode) {.async.} =
## This procedure will be called once per second.
@ -538,10 +531,14 @@ proc runForwardSyncLoop(node: BeaconNode) {.async.} =
result = node.blockPool.head.blck.slot
proc getLocalWallSlot(): Slot {.gcsafe.} =
let epoch = node.beaconClock.now().toSlot().slot.compute_epoch_at_slot() +
let epoch = node.beaconClock.now().slotOrZero.compute_epoch_at_slot() +
1'u64
result = epoch.compute_start_slot_at_epoch()
proc getFirstSlotAtFinalizedEpoch(): Slot {.gcsafe.} =
let fepoch = node.blockPool.headState.data.data.finalized_checkpoint.epoch
compute_start_slot_at_epoch(fepoch)
proc updateLocalBlocks(list: openarray[SignedBeaconBlock]): Result[void, BlockError] =
debug "Forward sync imported blocks", count = len(list),
local_head_slot = getLocalHeadSlot()
@ -583,7 +580,7 @@ proc runForwardSyncLoop(node: BeaconNode) {.async.} =
node.syncManager = newSyncManager[Peer, PeerID](
node.network.peerPool, getLocalHeadSlot, getLocalWallSlot,
updateLocalBlocks,
getFirstSlotAtFinalizedEpoch, updateLocalBlocks,
# 4 blocks per chunk is the optimal value right now, because our current
# syncing speed is around 4 blocks per second. So there no need to request
# more then 4 blocks right now. As soon as `store_speed` value become
@ -598,7 +595,7 @@ proc currentSlot(node: BeaconNode): Slot =
node.beaconClock.now.slotOrZero
proc connectedPeersCount(node: BeaconNode): int =
libp2p_peers.value.int
nbc_peers.value.int
proc fromJson(n: JsonNode; argName: string; result: var Slot) =
var i: int
@ -699,6 +696,22 @@ proc installDebugApiHandlers(rpcServer: RpcServer, node: BeaconNode) =
return res
rpcServer.rpc("peers") do () -> JsonNode:
var res = newJObject()
var peers = newJArray()
for id, peer in node.network.peerPool:
peers.add(
%(
info: shortLog(peer.info),
wasDialed: peer.wasDialed,
connectionState: $peer.connectionState,
score: peer.score,
)
)
res.add("peers", peers)
return res
proc installRpcHandlers(rpcServer: RpcServer, node: BeaconNode) =
rpcServer.installValidatorApiHandlers(node)
rpcServer.installBeaconApiHandlers(node)
@ -729,12 +742,20 @@ proc installAttestationHandlers(node: BeaconNode) =
for it in 0'u64 ..< ATTESTATION_SUBNET_COUNT.uint64:
closureScope:
let ci = it
attestationSubscriptions.add(node.network.subscribe(
getMainnetAttestationTopic(node.forkDigest, ci), attestationHandler,
# This proc needs to be within closureScope; don't lift out of loop.
proc(attestation: Attestation): bool =
attestationValidator(attestation, ci)
))
when ETH2_SPEC == "v0.12.1":
attestationSubscriptions.add(node.network.subscribe(
getAttestationTopic(node.forkDigest, ci), attestationHandler,
# This proc needs to be within closureScope; don't lift out of loop.
proc(attestation: Attestation): bool =
attestationValidator(attestation, ci)
))
else:
attestationSubscriptions.add(node.network.subscribe(
getMainnetAttestationTopic(node.forkDigest, ci), attestationHandler,
# This proc needs to be within closureScope; don't lift out of loop.
proc(attestation: Attestation): bool =
attestationValidator(attestation, ci)
))
when ETH2_SPEC == "v0.11.3":
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#interop-3
@ -768,7 +789,11 @@ proc run*(node: BeaconNode) =
let (afterGenesis, slot) = node.beaconClock.now.toSlot()
if not afterGenesis:
return false
node.blockPool.isValidBeaconBlock(signedBlock, slot, {})
let blck = node.blockPool.isValidBeaconBlock(signedBlock, slot, {})
node.dumpBlock(signedBlock, blck)
blck.isOk
installAttestationHandlers(node)
@ -789,6 +814,8 @@ proc run*(node: BeaconNode) =
node.onSecondLoop = runOnSecondLoop(node)
node.forwardSyncLoop = runForwardSyncLoop(node)
node.requestManager.start()
# main event loop
while status == BeaconNodeStatus.Running:
try:
@ -806,17 +833,25 @@ proc createPidFile(filename: string) =
gPidFile = filename
addQuitProc proc {.noconv.} = removeFile gPidFile
proc start(node: BeaconNode) =
# TODO: while it's nice to cheat by waiting for connections here, we
# actually need to make this part of normal application flow -
# losing all connections might happen at any time and we should be
# prepared to handle it.
waitFor node.connectToNetwork()
proc initializeNetworking(node: BeaconNode) {.async.} =
node.network.startListening()
let addressFile = node.config.dataDir / "beacon_node.address"
writeFile(addressFile, node.network.announcedENR.toURI)
await node.network.startLookingForPeers()
proc start(node: BeaconNode) =
let
head = node.blockPool.head
finalizedHead = node.blockPool.finalizedHead
let genesisTime = node.beaconClock.fromNow(toBeaconTime(Slot 0))
if genesisTime.inFuture and genesisTime.offset > timeToInitNetworkingBeforeGenesis:
info "Waiting for the genesis event", genesisIn = genesisTime.offset
waitFor sleepAsync(genesisTime.offset - timeToInitNetworkingBeforeGenesis)
info "Starting beacon node",
version = fullVersionStr,
timeSinceFinalization =
@ -833,12 +868,7 @@ proc start(node: BeaconNode) =
cat = "init",
pcs = "start_beacon_node"
let
bs = BlockSlot(blck: head.blck, slot: head.blck.slot)
node.blockPool.withState(node.blockPool.tmpState, bs):
node.addLocalValidators(state)
waitFor node.initializeNetworking()
node.run()
func formatGwei(amount: uint64): string =
@ -992,10 +1022,25 @@ programMain:
case config.cmd
of createTestnet:
var deposits: seq[Deposit]
for i in config.firstValidator.int ..< config.totalValidators.int:
let depositFile = config.validatorsDir /
validatorFileBaseName(i) & ".deposit.json"
var
depositDirs: seq[string]
deposits: seq[Deposit]
i = -1
for kind, dir in walkDir(config.testnetDepositsDir.string):
if kind != pcDir:
continue
inc i
if i < config.firstValidator.int:
continue
depositDirs.add dir
# Add deposits, in order, to pass Merkle validation
sort(depositDirs, system.cmp)
for dir in depositDirs:
let depositFile = dir / "deposit.json"
try:
deposits.add Json.loadFile(depositFile, Deposit)
except SerializationError as err:
@ -1011,7 +1056,7 @@ programMain:
else: waitFor getLatestEth1BlockHash(config.web3Url)
var
initialState = initialize_beacon_state_from_eth1(
eth1Hash, startTime, deposits, {skipBlsValidation, skipMerkleValidation})
eth1Hash, startTime, deposits, {skipBlsValidation})
# https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#create-genesis-state
initialState.genesis_time = startTime
@ -1068,9 +1113,7 @@ programMain:
createPidFile(config.dataDir.string / "beacon_node.pid")
if config.dumpEnabled:
createDir(config.dumpDir)
createDir(config.dumpDir / "incoming")
config.createDumpDirs()
var node = waitFor BeaconNode.init(config)
@ -1092,15 +1135,14 @@ programMain:
node.start()
of makeDeposits:
createDir(config.depositsDir)
createDir(config.outValidatorsDir)
createDir(config.outSecretsDir)
let
quickstartDeposits = generateDeposits(
config.totalQuickstartDeposits, config.depositsDir, false)
randomDeposits = generateDeposits(
config.totalRandomDeposits, config.depositsDir, true,
firstIdx = config.totalQuickstartDeposits)
deposits = generateDeposits(
config.totalDeposits,
config.outValidatorsDir,
config.outSecretsDir).tryGet
if config.web3Url.len > 0 and config.depositContractAddress.len > 0:
if config.minDelay > config.maxDelay:
@ -1117,7 +1159,7 @@ programMain:
depositContract = config.depositContractAddress
waitFor sendDeposits(
quickstartDeposits & randomDeposits,
deposits,
config.web3Url,
config.depositContractAddress,
config.depositPrivateKey,

View File

@ -171,7 +171,8 @@ proc updateStateData*(pool: BlockPool, state: var StateData, bs: BlockSlot) =
proc loadTailState*(pool: BlockPool): StateData =
loadTailState(pool.dag)
proc isValidBeaconBlock*(pool: var BlockPool,
signed_beacon_block: SignedBeaconBlock,
current_slot: Slot, flags: UpdateFlags): bool =
isValidBeaconBlock(pool.dag, pool.quarantine, signed_beacon_block, current_slot, flags)
proc isValidBeaconBlock*(
pool: var BlockPool, signed_beacon_block: SignedBeaconBlock,
current_slot: Slot, flags: UpdateFlags): Result[void, BlockError] =
isValidBeaconBlock(
pool.dag, pool.quarantine, signed_beacon_block, current_slot, flags)

View File

@ -37,7 +37,7 @@ type
##
## Invalid blocks are dropped immediately.
pending*: Table[Eth2Digest, SignedBeaconBlock] ##\
orphans*: Table[Eth2Digest, SignedBeaconBlock] ##\
## Blocks that have passed validation but that we lack a link back to tail
## for - when we receive a "missing link", we can use this data to build
## an entire branch
@ -49,12 +49,10 @@ type
inAdd*: bool
MissingBlock* = object
slots*: uint64 # number of slots that are suspected missing
tries*: int
FetchRecord* = object
root*: Eth2Digest
historySlots*: uint64
CandidateChains* = ref object
## Pool of blocks responsible for keeping a DAG of resolved blocks.

View File

@ -53,7 +53,7 @@ func parent*(bs: BlockSlot): BlockSlot =
slot: bs.slot - 1
)
func populateEpochCache*(state: BeaconState, epoch: Epoch): EpochRef =
func populateEpochCache(state: BeaconState, epoch: Epoch): EpochRef =
result = (EpochRef)(
epoch: state.slot.compute_epoch_at_slot,
shuffled_active_validator_indices:
@ -148,7 +148,7 @@ func getEpochInfo*(blck: BlockRef, state: BeaconState): EpochRef =
if matching_epochinfo.len == 0:
let cache = populateEpochCache(state, state_epoch)
blck.epochsInfo.add(cache)
trace "candidate_chains.skipAndUpdateState(): back-filling parent.epochInfo",
trace "candidate_chains.getEpochInfo: back-filling parent.epochInfo",
state_slot = state.slot
cache
elif matching_epochinfo.len == 1:
@ -301,7 +301,7 @@ proc getState(
# Nonetheless, this is an ugly workaround that needs to go away
doAssert false, "Cannot alias headState"
outputAddr[] = dag.headState
assign(outputAddr[], dag.headState)
if not db.getState(stateRoot, output.data.data, restore):
return false
@ -313,12 +313,50 @@ proc getState(
func getStateCacheIndex(dag: CandidateChains, blockRoot: Eth2Digest, slot: Slot): int =
for i, cachedState in dag.cachedStates:
let (cacheBlockRoot, cacheSlot, state) = cachedState
let (cacheBlockRoot, cacheSlot, _) = cachedState
if cacheBlockRoot == blockRoot and cacheSlot == slot:
return i
-1
func putStateCache(
dag: CandidateChains, state: HashedBeaconState, blck: BlockRef) =
# Need to be able to efficiently access states for both attestation
# aggregation and to process block proposals going back to the last
# finalized slot. Ideally to avoid potential combinatiorial forking
# storage and/or memory constraints could CoW, up to and including,
# in particular, hash_tree_root() which is expensive to do 30 times
# since the previous epoch, to efficiently state_transition back to
# desired slot. However, none of that's in place, so there are both
# expensive, repeated BeaconState copies as well as computationally
# time-consuming-near-end-of-epoch hash tree roots. The latter are,
# effectively, naïvely O(n^2) in slot number otherwise, so when the
# slots become in the mid-to-high-20s it's spending all its time in
# pointlessly repeated calculations of prefix-state-transitions. An
# intermediate time/memory workaround involves storing only mapping
# between BlockRefs, or BlockSlots, and the BeaconState tree roots,
# but that still involves tens of megabytes worth of copying, along
# with the concomitant memory allocator and GC load. Instead, use a
# more memory-intensive (but more conceptually straightforward, and
# faster) strategy to just store, for the most recent slots.
let stateCacheIndex = dag.getStateCacheIndex(blck.root, state.data.slot)
if stateCacheIndex == -1:
# Could use a deque or similar, but want simpler structure, and the data
# items are small and few.
const MAX_CACHE_SIZE = 32
let cacheLen = dag.cachedStates.len
doAssert cacheLen <= MAX_CACHE_SIZE
let entry =
if dag.cachedStates.len == MAX_CACHE_SIZE: dag.cachedStates.pop().state
else: (ref HashedBeaconState)()
assign(entry[], state)
insert(dag.cachedStates, (blck.root, state.data.slot, entry))
trace "CandidateChains.putState(): state cache updated",
cacheLen, root = shortLog(blck.root), slot = state.data.slot
proc putState*(dag: CandidateChains, state: HashedBeaconState, blck: BlockRef) =
# TODO we save state at every epoch start but never remove them - we also
# potentially save multiple states per slot if reorgs happen, meaning
@ -344,35 +382,7 @@ proc putState*(dag: CandidateChains, state: HashedBeaconState, blck: BlockRef) =
if not rootWritten:
dag.db.putStateRoot(blck.root, state.data.slot, state.root)
# Need to be able to efficiently access states for both attestation
# aggregation and to process block proposals going back to the last
# finalized slot. Ideally to avoid potential combinatiorial forking
# storage and/or memory constraints could CoW, up to and including,
# in particular, hash_tree_root() which is expensive to do 30 times
# since the previous epoch, to efficiently state_transition back to
# desired slot. However, none of that's in place, so there are both
# expensive, repeated BeaconState copies as well as computationally
# time-consuming-near-end-of-epoch hash tree roots. The latter are,
# effectively, naïvely O(n^2) in slot number otherwise, so when the
# slots become in the mid-to-high-20s it's spending all its time in
# pointlessly repeated calculations of prefix-state-transitions. An
# intermediate time/memory workaround involves storing only mapping
# between BlockRefs, or BlockSlots, and the BeaconState tree roots,
# but that still involves tens of megabytes worth of copying, along
# with the concomitant memory allocator and GC load. Instead, use a
# more memory-intensive (but more conceptually straightforward, and
# faster) strategy to just store, for the most recent slots.
let stateCacheIndex = dag.getStateCacheIndex(blck.root, state.data.slot)
if stateCacheIndex == -1:
# Could use a deque or similar, but want simpler structure, and the data
# items are small and few.
const MAX_CACHE_SIZE = 32
insert(dag.cachedStates, (blck.root, state.data.slot, newClone(state)))
while dag.cachedStates.len > MAX_CACHE_SIZE:
discard dag.cachedStates.pop()
let cacheLen = dag.cachedStates.len
trace "CandidateChains.putState(): state cache updated", cacheLen
doAssert cacheLen > 0 and cacheLen <= MAX_CACHE_SIZE
putStateCache(dag, state, blck)
func getRef*(dag: CandidateChains, root: Eth2Digest): BlockRef =
## Retrieve a resolved block reference, if available
@ -519,7 +529,7 @@ proc rewindState(dag: CandidateChains, state: var StateData, bs: BlockSlot):
# used in the front-end.
let idx = dag.getStateCacheIndex(parBs.blck.root, parBs.slot)
if idx >= 0:
state.data = dag.cachedStates[idx].state[]
assign(state.data, dag.cachedStates[idx].state[])
let ancestor = ancestors.pop()
state.blck = ancestor.refs
@ -595,7 +605,7 @@ proc getStateDataCached(dag: CandidateChains, state: var StateData, bs: BlockSlo
let idx = dag.getStateCacheIndex(bs.blck.root, bs.slot)
if idx >= 0:
state.data = dag.cachedStates[idx].state[]
assign(state.data, dag.cachedStates[idx].state[])
state.blck = bs.blck
beacon_state_data_cache_hits.inc()
return true
@ -647,7 +657,7 @@ proc updateStateData*(dag: CandidateChains, state: var StateData, bs: BlockSlot)
let ok =
dag.skipAndUpdateState(
state, ancestors[i],
{skipBlsValidation, skipMerkleValidation, skipStateRootValidation},
{skipBlsValidation, skipStateRootValidation},
false)
doAssert ok, "Blocks in database should never fail to apply.."

View File

@ -5,13 +5,14 @@
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
import
chronicles, sequtils, tables,
metrics, stew/results,
../ssz/merkleization, ../state_transition, ../extras,
../spec/[crypto, datatypes, digest, helpers],
block_pools_types, candidate_chains
../spec/[crypto, datatypes, digest, helpers, signatures],
block_pools_types, candidate_chains, quarantine
export results
@ -22,8 +23,8 @@ export results
# "quarantined" network blocks
# pass the firewall and be stored in the blockpool
logScope: topics = "clearblk"
{.push raises: [Defect].}
logScope:
topics = "clearance"
func getOrResolve*(dag: CandidateChains, quarantine: var Quarantine, root: Eth2Digest): BlockRef =
## Fetch a block ref, or nil if not found (will be added to list of
@ -31,7 +32,7 @@ func getOrResolve*(dag: CandidateChains, quarantine: var Quarantine, root: Eth2D
result = dag.getRef(root)
if result.isNil:
quarantine.missing[root] = MissingBlock(slots: 1)
quarantine.missing[root] = MissingBlock()
proc add*(
dag: var CandidateChains, quarantine: var Quarantine,
@ -98,12 +99,12 @@ proc addResolvedBlock(
defer: quarantine.inAdd = false
var keepGoing = true
while keepGoing:
let retries = quarantine.pending
let retries = quarantine.orphans
for k, v in retries:
discard add(dag, quarantine, k, v)
# Keep going for as long as the pending dag is shrinking
# TODO inefficient! so what?
keepGoing = quarantine.pending.len < retries.len
keepGoing = quarantine.orphans.len < retries.len
blockRef
proc add*(
@ -164,9 +165,9 @@ proc add*(
return err Invalid
# The block might have been in either of pending or missing - we don't want
# any more work done on its behalf
quarantine.pending.del(blockRoot)
# The block might have been in either of `orphans` or `missing` - we don't
# want any more work done on its behalf
quarantine.orphans.del(blockRoot)
# The block is resolved, now it's time to validate it to ensure that the
# blocks we add to the database are clean for the given state
@ -183,7 +184,7 @@ proc add*(
# `state_transition` that takes a `StateData` instead and updates
# the block as well
doAssert v.addr == addr poolPtr.tmpState.data
poolPtr.tmpState = poolPtr.headState
assign(poolPtr.tmpState, poolPtr.headState)
var stateCache = getEpochCache(parent, dag.tmpState.data.data)
if not state_transition(
@ -208,7 +209,7 @@ proc add*(
# the pending dag calls this function back later in a loop, so as long
# as dag.add(...) requires a SignedBeaconBlock, easier to keep them in
# pending too.
quarantine.pending[blockRoot] = signedBlock
quarantine.add(dag, signedBlock, some(blockRoot))
# TODO possibly, it makes sense to check the database - that would allow sync
# to simply fill up the database with random blocks the other clients
@ -216,7 +217,7 @@ proc add*(
# junk that's not part of the block graph
if blck.parent_root in quarantine.missing or
blck.parent_root in quarantine.pending:
blck.parent_root in quarantine.orphans:
return err MissingParent
# This is an unresolved block - put its parent on the missing list for now...
@ -231,24 +232,11 @@ proc add*(
# filter.
# TODO when we receive the block, we don't know how many others we're missing
# from that branch, so right now, we'll just do a blind guess
let parentSlot = blck.slot - 1
quarantine.missing[blck.parent_root] = MissingBlock(
slots:
# The block is at least two slots ahead - try to grab whole history
if parentSlot > dag.head.blck.slot:
parentSlot - dag.head.blck.slot
else:
# It's a sibling block from a branch that we're missing - fetch one
# epoch at a time
max(1.uint64, SLOTS_PER_EPOCH.uint64 -
(parentSlot.uint64 mod SLOTS_PER_EPOCH.uint64))
)
debug "Unresolved block (parent missing)",
blck = shortLog(blck),
blockRoot = shortLog(blockRoot),
pending = quarantine.pending.len,
orphans = quarantine.orphans.len,
missing = quarantine.missing.len,
cat = "filtering"
@ -258,7 +246,11 @@ proc add*(
proc isValidBeaconBlock*(
dag: CandidateChains, quarantine: var Quarantine,
signed_beacon_block: SignedBeaconBlock, current_slot: Slot,
flags: UpdateFlags): bool =
flags: UpdateFlags): Result[void, BlockError] =
logScope:
topics = "clearance valid_blck"
received_block = shortLog(signed_beacon_block.message)
# In general, checks are ordered from cheap to expensive. Especially, crypto
# verification could be quite a bit more expensive than the rest. This is an
# externally easy-to-invoke function by tossing network packets at the node.
@ -266,25 +258,26 @@ proc isValidBeaconBlock*(
# The block is not from a future slot
# TODO allow `MAXIMUM_GOSSIP_CLOCK_DISPARITY` leniency, especially towards
# seemingly future slots.
if not (signed_beacon_block.message.slot <= current_slot):
debug "isValidBeaconBlock: block is from a future slot",
signed_beacon_block_message_slot = signed_beacon_block.message.slot,
current_slot = current_slot
return false
# TODO using +1 here while this is being sorted - should queue these until
# they're within the DISPARITY limit
if not (signed_beacon_block.message.slot <= current_slot + 1):
debug "block is from a future slot",
current_slot
return err(Invalid)
# The block is from a slot greater than the latest finalized slot (with a
# MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. validate that
# signed_beacon_block.message.slot >
# compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
if not (signed_beacon_block.message.slot > dag.finalizedHead.slot):
debug "isValidBeaconBlock: block is not from a slot greater than the latest finalized slot"
return false
debug "block is not from a slot greater than the latest finalized slot"
return err(Invalid)
# The block is the first block with valid signature received for the proposer
# for the slot, signed_beacon_block.message.slot.
#
# While this condition is similar to the proposer slashing condition at
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#proposer-slashing
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#proposer-slashing
# it's not identical, and this check does not address slashing:
#
# (1) The beacon blocks must be conflicting, i.e. different, for the same
@ -315,12 +308,10 @@ proc isValidBeaconBlock*(
signed_beacon_block.message.proposer_index and
blck.message.slot == signed_beacon_block.message.slot and
blck.signature.toRaw() != signed_beacon_block.signature.toRaw():
debug "isValidBeaconBlock: block isn't first block with valid signature received for the proposer",
signed_beacon_block_message_slot = signed_beacon_block.message.slot,
debug "block isn't first block with valid signature received for the proposer",
blckRef = slotBlockRef,
received_block = shortLog(signed_beacon_block.message),
existing_block = shortLog(dag.get(slotBlockRef).data.message)
return false
existing_block = shortLog(blck.message)
return err(Invalid)
# If this block doesn't have a parent we know about, we can't/don't really
# trace it back to a known-good state/checkpoint to verify its prevenance;
@ -340,27 +331,35 @@ proc isValidBeaconBlock*(
# CandidateChains.add(...) directly, with no additional validity checks. TODO,
# not specific to this, but by the pending dag keying on the htr of the
# BeaconBlock, not SignedBeaconBlock, opens up certain spoofing attacks.
quarantine.pending[hash_tree_root(signed_beacon_block.message)] =
signed_beacon_block
return false
debug "parent unknown, putting block in quarantine"
quarantine.add(dag, signed_beacon_block)
return err(MissingParent)
# The proposer signature, signed_beacon_block.signature, is valid with
# respect to the proposer_index pubkey.
let bs =
BlockSlot(blck: parent_ref, slot: dag.get(parent_ref).data.message.slot)
dag.withState(dag.tmpState, bs):
let
blockRoot = hash_tree_root(signed_beacon_block.message)
domain = get_domain(dag.headState.data.data, DOMAIN_BEACON_PROPOSER,
compute_epoch_at_slot(signed_beacon_block.message.slot))
signing_root = compute_signing_root(blockRoot, domain)
proposer_index = signed_beacon_block.message.proposer_index
let
proposer = getProposer(dag, parent_ref, signed_beacon_block.message.slot)
if proposer_index >= dag.headState.data.data.validators.len.uint64:
return false
if not blsVerify(dag.headState.data.data.validators[proposer_index].pubkey,
signing_root.data, signed_beacon_block.signature):
debug "isValidBeaconBlock: block failed signature verification"
return false
if proposer.isNone:
notice "cannot compute proposer for message"
return err(Invalid)
true
if proposer.get()[0] !=
ValidatorIndex(signed_beacon_block.message.proposer_index):
debug "block had unexpected proposer",
expected_proposer = proposer.get()[0]
return err(Invalid)
if not verify_block_signature(
dag.headState.data.data.fork,
dag.headState.data.data.genesis_validators_root,
signed_beacon_block.message.slot,
signed_beacon_block.message,
proposer.get()[1],
signed_beacon_block.signature):
debug "block failed signature verification",
signature = shortLog(signed_beacon_block.signature)
return err(Invalid)
ok()

View File

@ -6,13 +6,15 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
chronicles, tables,
chronicles, tables, options,
stew/bitops2,
metrics,
../spec/digest,
../spec/[datatypes, digest],
../ssz/merkleization,
block_pools_types
export options
logScope: topics = "quarant"
{.push raises: [Defect].}
@ -35,4 +37,19 @@ func checkMissing*(quarantine: var Quarantine): seq[FetchRecord] =
# simple (simplistic?) exponential backoff for retries..
for k, v in quarantine.missing.pairs():
if countOnes(v.tries.uint64) == 1:
result.add(FetchRecord(root: k, historySlots: v.slots))
result.add(FetchRecord(root: k))
func add*(quarantine: var Quarantine, dag: CandidateChains,
sblck: SignedBeaconBlock,
broot: Option[Eth2Digest] = none[Eth2Digest]()) =
## Adds block to quarantine's `orphans` and `missing` lists.
let blockRoot = if broot.isSome():
broot.get()
else:
hash_tree_root(sblck.message)
quarantine.orphans[blockRoot] = sblck
let parentRoot = sblck.message.parent_root
if parentRoot notin quarantine.missing:
quarantine.missing[parentRoot] = MissingBlock()

View File

@ -1,11 +1,11 @@
{.push raises: [Defect].}
import
os, options, strformat, strutils,
os, options,
chronicles, confutils, json_serialization,
confutils/defs, confutils/std/net,
chronicles/options as chroniclesOptions,
spec/[crypto]
spec/[crypto, keystore]
export
defs, enabledLogLevel, parseCmdArg, completeCmdArg
@ -39,11 +39,6 @@ type
desc: "The Eth1 network tracked by the beacon node."
name: "eth1-network" }: Eth1Network
quickStart* {.
defaultValue: false
desc: "Run in quickstart mode"
name: "quick-start" }: bool
dataDir* {.
defaultValue: config.defaultDataDir()
desc: "The directory where nimbus will store all blockchain data."
@ -60,6 +55,10 @@ type
desc: "Address of the deposit contract."
name: "deposit-contract" }: string
nonInteractive* {.
desc: "Do not display interative prompts. Quit on missing configuration."
name: "non-interactive" }: bool
case cmd* {.
command
defaultValue: noCommand }: BNStartUpCmd
@ -106,6 +105,14 @@ type
abbr: "v"
name: "validator" }: seq[ValidatorKeyPath]
validatorsDirFlag* {.
desc: "A directory containing validator keystores."
name: "validators-dir" }: Option[InputDir]
secretsDirFlag* {.
desc: "A directory containing validator keystore passwords."
name: "secrets-dir" }: Option[InputDir]
stateSnapshot* {.
desc: "Json file specifying a recent state snapshot."
abbr: "s"
@ -177,13 +184,12 @@ type
name: "dump" }: bool
of createTestnet:
validatorsDir* {.
desc: "Directory containing validator descriptors named 'vXXXXXXX.deposit.json'."
abbr: "d"
testnetDepositsDir* {.
desc: "Directory containing validator keystores."
name: "validators-dir" }: InputDir
totalValidators* {.
desc: "The number of validators in the newly created chain."
desc: "The number of validator deposits in the newly created chain."
name: "total-validators" }: uint64
firstValidator* {.
@ -209,7 +215,6 @@ type
genesisOffset* {.
defaultValue: 5
desc: "Seconds from now to add to genesis time."
abbr: "g"
name: "genesis-offset" }: int
outputGenesis* {.
@ -231,34 +236,34 @@ type
name: "keyfile" }: seq[ValidatorKeyPath]
of makeDeposits:
totalQuickstartDeposits* {.
defaultValue: 0
desc: "Number of quick-start deposits to generate."
name: "quickstart-deposits" }: int
totalDeposits* {.
defaultValue: 1
desc: "Number of deposits to generate."
name: "count" }: int
totalRandomDeposits* {.
defaultValue: 0
desc: "Number of secure random deposits to generate."
name: "random-deposits" }: int
depositsDir* {.
outValidatorsDir* {.
defaultValue: "validators"
desc: "Folder to write deposits to."
name: "deposits-dir" }: string
desc: "Output folder for validator keystores and deposits."
name: "out-validators-dir" }: string
outSecretsDir* {.
defaultValue: "secrets"
desc: "Output folder for randomly generated keystore passphrases."
name: "out-secrets-dir" }: string
depositPrivateKey* {.
defaultValue: ""
desc: "Private key of the controlling (sending) account",
desc: "Private key of the controlling (sending) account.",
name: "deposit-private-key" }: string
minDelay* {.
defaultValue: 0.0
desc: "Minimum possible delay between making two deposits (in seconds)"
desc: "Minimum possible delay between making two deposits (in seconds)."
name: "min-delay" }: float
maxDelay* {.
defaultValue: 0.0
desc: "Maximum possible delay between making two deposits (in seconds)"
desc: "Maximum possible delay between making two deposits (in seconds)."
name: "max-delay" }: float
ValidatorClientConf* = object
@ -273,6 +278,10 @@ type
abbr: "d"
name: "data-dir" }: OutDir
nonInteractive* {.
desc: "Do not display interative prompts. Quit on missing configuration."
name: "non-interactive" }: bool
case cmd* {.
command
defaultValue: VCNoCommand }: VCStartUpCmd
@ -290,10 +299,18 @@ type
validators* {.
required
desc: "Path to a validator private key, as generated by makeDeposits."
desc: "Path to a validator key store, as generated by makeDeposits."
abbr: "v"
name: "validator" }: seq[ValidatorKeyPath]
validatorsDirFlag* {.
desc: "A directory containing validator keystores."
name: "validators-dir" }: Option[InputDir]
secretsDirFlag* {.
desc: "A directory containing validator keystore passwords."
name: "secrets-dir" }: Option[InputDir]
proc defaultDataDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
let dataDir = when defined(windows):
"AppData" / "Roaming" / "Nimbus"
@ -304,18 +321,33 @@ proc defaultDataDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
getHomeDir() / dataDir / "BeaconNode"
proc validatorFileBaseName*(validatorIdx: int): string =
# there can apparently be tops 4M validators so we use 7 digits..
try:
fmt"v{validatorIdx:07}"
except ValueError as e:
raiseAssert e.msg
func dumpDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
conf.dataDir / "dump"
func localValidatorsDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
conf.dataDir / "validators"
func dumpDirInvalid*(conf: BeaconNodeConf|ValidatorClientConf): string =
conf.dumpDir / "invalid" # things that failed validation
func dumpDirIncoming*(conf: BeaconNodeConf|ValidatorClientConf): string =
conf.dumpDir / "incoming" # things that couldn't be validated (missingparent etc)
func dumpDirOutgoing*(conf: BeaconNodeConf|ValidatorClientConf): string =
conf.dumpDir / "outgoing" # things we produced
proc createDumpDirs*(conf: BeaconNodeConf) =
if conf.dumpEnabled:
try:
createDir(conf.dumpDirInvalid)
createDir(conf.dumpDirIncoming)
createDir(conf.dumpDirOutgoing)
except CatchableError as err:
# Dumping is mainly a debugging feature, so ignore these..
warn "Cannot create dump directories", msg = err.msg
func validatorsDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
string conf.validatorsDirFlag.get(InputDir(conf.dataDir / "validators"))
func secretsDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
string conf.secretsDirFlag.get(InputDir(conf.dataDir / "secrets"))
func databaseDir*(conf: BeaconNodeConf|ValidatorClientConf): string =
conf.dataDir / "db"
@ -328,26 +360,6 @@ func defaultListenAddress*(conf: BeaconNodeConf|ValidatorClientConf): ValidIpAdd
func defaultAdminListenAddress*(conf: BeaconNodeConf|ValidatorClientConf): ValidIpAddress =
(static ValidIpAddress.init("127.0.0.1"))
iterator validatorKeys*(conf: BeaconNodeConf|ValidatorClientConf): ValidatorPrivKey =
for validatorKeyFile in conf.validators:
try:
yield validatorKeyFile.load
except CatchableError as err:
warn "Failed to load validator private key",
file = validatorKeyFile.string, err = err.msg
try:
for kind, file in walkDir(conf.localValidatorsDir):
if kind in {pcFile, pcLinkToFile} and
cmpIgnoreCase(".privkey", splitFile(file).ext) == 0:
try:
yield ValidatorPrivKey.init(readFile(file).string)
except CatchableError as err:
warn "Failed to load a validator private key", file, err = err.msg
except OSError as err:
warn "Cannot load validator keys",
dir = conf.localValidatorsDir, err = err.msg
template writeValue*(writer: var JsonWriter,
value: TypedInputFile|InputFile|InputDir|OutPath|OutDir|OutFile) =
writer.writeValue(string value)

View File

@ -63,6 +63,7 @@ type
connQueue: AsyncQueue[PeerInfo]
seenTable: Table[PeerID, SeenItem]
connWorkers: seq[Future[void]]
forkId: ENRForkID
EthereumNode = Eth2Node # needed for the definitions in p2p_backends_helpers
@ -124,8 +125,8 @@ type
# Private fields:
peerStateInitializer*: PeerStateInitializer
networkStateInitializer*: NetworkStateInitializer
handshake*: HandshakeStep
disconnectHandler*: DisconnectionHandler
onPeerConnected*: OnPeerConnectedHandler
onPeerDisconnected*: OnPeerDisconnectedHandler
ProtocolInfo* = ptr ProtocolInfoObj
@ -136,8 +137,8 @@ type
PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe.}
NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe.}
HandshakeStep* = proc(peer: Peer, conn: Connection): Future[void] {.gcsafe.}
DisconnectionHandler* = proc(peer: Peer): Future[void] {.gcsafe.}
OnPeerConnectedHandler* = proc(peer: Peer, conn: Connection): Future[void] {.gcsafe.}
OnPeerDisconnectedHandler* = proc(peer: Peer): Future[void] {.gcsafe.}
ThunkProc* = LPProtoHandler
MounterProc* = proc(network: Eth2Node) {.gcsafe.}
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.}
@ -191,8 +192,6 @@ const
TTFB_TIMEOUT* = 5.seconds
RESP_TIMEOUT* = 10.seconds
readTimeoutErrorMsg = "Exceeded read timeout for a request"
NewPeerScore* = 200
## Score which will be assigned to new connected Peer
PeerScoreLowLimit* = 0
@ -218,22 +217,22 @@ template neterr(kindParam: Eth2NetworkingErrorKind): auto =
err(type(result), Eth2NetworkingError(kind: kindParam))
# Metrics for tracking attestation and beacon block loss
declareCounter gossip_messages_sent,
declareCounter nbc_gossip_messages_sent,
"Number of gossip messages sent by this peer"
declareCounter gossip_messages_received,
declareCounter nbc_gossip_messages_received,
"Number of gossip messages received by this peer"
declarePublicGauge libp2p_successful_dials,
declarePublicCounter nbc_successful_dials,
"Number of successfully dialed peers"
declarePublicGauge libp2p_failed_dials,
declarePublicCounter nbc_failed_dials,
"Number of dialing attempts that failed"
declarePublicGauge libp2p_timeout_dials,
declarePublicCounter nbc_timeout_dials,
"Number of dialing attempts that exceeded timeout"
declarePublicGauge libp2p_peers,
declarePublicGauge nbc_peers,
"Number of active libp2p peers"
proc safeClose(conn: Connection) {.async.} =
@ -273,10 +272,6 @@ proc openStream(node: Eth2Node,
else:
raise
func peerId(conn: Connection): PeerID =
# TODO: Can this be `nil`?
conn.peerInfo.peerId
proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer {.gcsafe.}
proc getPeer*(node: Eth2Node, peerInfo: PeerInfo): Peer {.gcsafe.} =
@ -504,14 +499,11 @@ template send*[M](r: SingleChunkResponse[M], val: auto): untyped =
proc performProtocolHandshakes*(peer: Peer) {.async.} =
var subProtocolsHandshakes = newSeqOfCap[Future[void]](allProtocols.len)
for protocol in allProtocols:
if protocol.handshake != nil:
subProtocolsHandshakes.add((protocol.handshake)(peer, nil))
if protocol.onPeerConnected != nil:
subProtocolsHandshakes.add protocol.onPeerConnected(peer, nil)
await allFuturesThrowing(subProtocolsHandshakes)
template initializeConnection*(peer: Peer): auto =
performProtocolHandshakes(peer)
proc initProtocol(name: string,
peerInit: PeerStateInitializer,
networkInit: NetworkStateInitializer): ProtocolInfoObj =
@ -528,10 +520,10 @@ proc registerProtocol(protocol: ProtocolInfo) =
gProtocols[i].index = i
proc setEventHandlers(p: ProtocolInfo,
handshake: HandshakeStep,
disconnectHandler: DisconnectionHandler) =
p.handshake = handshake
p.disconnectHandler = disconnectHandler
onPeerConnected: OnPeerConnectedHandler,
onPeerDisconnected: OnPeerDisconnectedHandler) =
p.onPeerConnected = onPeerConnected
p.onPeerDisconnected = onPeerDisconnected
proc implementSendProcBody(sendProc: SendProc) =
let
@ -574,6 +566,11 @@ proc handleIncomingStream(network: Eth2Node,
try:
let peer = peerFromStream(network, conn)
# TODO peer connection setup is broken, update info in some better place
# whenever race is fix:
# https://github.com/status-im/nim-beacon-chain/issues/1157
peer.info = conn.peerInfo
template returnInvalidRequest(msg: ErrorMsg) =
await sendErrorResponse(peer, conn, noSnappy, InvalidRequest, msg)
return
@ -654,33 +651,33 @@ proc handleOutgoingPeer*(peer: Peer): Future[bool] {.async.} =
let network = peer.network
proc onPeerClosed(udata: pointer) {.gcsafe.} =
debug "Peer (outgoing) lost", peer = $peer.info
libp2p_peers.set int64(len(network.peerPool))
debug "Peer (outgoing) lost", peer
nbc_peers.set int64(len(network.peerPool))
let res = await network.peerPool.addOutgoingPeer(peer)
if res:
peer.updateScore(NewPeerScore)
debug "Peer (outgoing) has been added to PeerPool", peer = $peer.info
debug "Peer (outgoing) has been added to PeerPool", peer
peer.getFuture().addCallback(onPeerClosed)
result = true
libp2p_peers.set int64(len(network.peerPool))
nbc_peers.set int64(len(network.peerPool))
proc handleIncomingPeer*(peer: Peer): Future[bool] {.async.} =
let network = peer.network
proc onPeerClosed(udata: pointer) {.gcsafe.} =
debug "Peer (incoming) lost", peer = $peer.info
libp2p_peers.set int64(len(network.peerPool))
debug "Peer (incoming) lost", peer
nbc_peers.set int64(len(network.peerPool))
let res = await network.peerPool.addIncomingPeer(peer)
if res:
peer.updateScore(NewPeerScore)
debug "Peer (incoming) has been added to PeerPool", peer = $peer.info
debug "Peer (incoming) has been added to PeerPool", peer
peer.getFuture().addCallback(onPeerClosed)
result = true
libp2p_peers.set int64(len(network.peerPool))
nbc_peers.set int64(len(network.peerPool))
proc toPeerInfo*(r: enr.TypedRecord): PeerInfo =
if r.secp256k1.isSome:
@ -713,7 +710,7 @@ proc toPeerInfo(r: Option[enr.TypedRecord]): PeerInfo =
return r.get.toPeerInfo
proc dialPeer*(node: Eth2Node, peerInfo: PeerInfo) {.async.} =
logScope: peer = $peerInfo
logScope: peer = peerInfo.id
debug "Connecting to discovered peer"
await node.switch.connect(peerInfo)
@ -726,9 +723,9 @@ proc dialPeer*(node: Eth2Node, peerInfo: PeerInfo) {.async.} =
#debug "Supported protocols", ls
debug "Initializing connection"
await initializeConnection(peer)
await performProtocolHandshakes(peer)
inc libp2p_successful_dials
inc nbc_successful_dials
debug "Network handshakes completed"
proc connectWorker(network: Eth2Node) {.async.} =
@ -749,28 +746,30 @@ proc connectWorker(network: Eth2Node) {.async.} =
# will be stored in PeerPool.
if fut.finished():
if fut.failed() and not(fut.cancelled()):
debug "Unable to establish connection with peer", peer = $pi,
debug "Unable to establish connection with peer", peer = pi.id,
errMsg = fut.readError().msg
inc libp2p_failed_dials
inc nbc_failed_dials
network.addSeen(pi, SeenTableTimeDeadPeer)
continue
debug "Connection to remote peer timed out", peer = $pi
inc libp2p_timeout_dials
debug "Connection to remote peer timed out", peer = pi.id
inc nbc_timeout_dials
network.addSeen(pi, SeenTableTimeTimeout)
else:
trace "Peer is already connected or already seen", peer = $pi,
trace "Peer is already connected or already seen", peer = pi.id,
peer_pool_has_peer = $r1, seen_table_has_peer = $r2,
seen_table_size = len(network.seenTable)
proc runDiscoveryLoop*(node: Eth2Node) {.async.} =
debug "Starting discovery loop"
let enrField = ("eth2", SSZ.encode(node.forkId))
while true:
let currentPeerCount = node.peerPool.len
if currentPeerCount < node.wantedPeers:
try:
let discoveredPeers =
node.discovery.randomNodes(node.wantedPeers - currentPeerCount)
node.discovery.randomNodes(node.wantedPeers - currentPeerCount,
enrField)
for peer in discoveredPeers:
try:
let peerRecord = peer.record.toTypedRecord
@ -811,9 +810,10 @@ proc init*(T: type Eth2Node, conf: BeaconNodeConf, enrForkId: ENRForkID,
result.seenTable = initTable[PeerID, SeenItem]()
result.connQueue = newAsyncQueue[PeerInfo](ConcurrentConnections)
result.metadata = getPersistentNetMetadata(conf)
result.forkId = enrForkId
result.discovery = Eth2DiscoveryProtocol.new(
conf, ip, tcpPort, udpPort, privKey.toRaw,
{"eth2": SSZ.encode(enrForkId), "attnets": SSZ.encode(result.metadata.attnets)})
{"eth2": SSZ.encode(result.forkId), "attnets": SSZ.encode(result.metadata.attnets)})
newSeq result.protocolStates, allProtocols.len
for proto in allProtocols:
@ -824,19 +824,21 @@ proc init*(T: type Eth2Node, conf: BeaconNodeConf, enrForkId: ENRForkID,
if msg.protocolMounter != nil:
msg.protocolMounter result
for i in 0 ..< ConcurrentConnections:
result.connWorkers.add(connectWorker(result))
template publicKey*(node: Eth2Node): keys.PublicKey =
node.discovery.privKey.toPublicKey.tryGet()
template addKnownPeer*(node: Eth2Node, peer: enr.Record) =
node.discovery.addNode peer
proc start*(node: Eth2Node) {.async.} =
proc startListening*(node: Eth2Node) =
node.discovery.open()
node.discovery.start()
proc start*(node: Eth2Node) {.async.} =
for i in 0 ..< ConcurrentConnections:
node.connWorkers.add connectWorker(node)
node.libp2pTransportLoops = await node.switch.start()
node.discovery.start()
node.discoveryLoop = node.runDiscoveryLoop()
traceAsyncErrors node.discoveryLoop
@ -1116,19 +1118,17 @@ proc announcedENR*(node: Eth2Node): enr.Record =
proc shortForm*(id: KeyPair): string =
$PeerID.init(id.pubkey)
proc connectToNetwork*(node: Eth2Node) {.async.} =
proc startLookingForPeers*(node: Eth2Node) {.async.} =
await node.start()
proc checkIfConnectedToBootstrapNode {.async.} =
await sleepAsync(30.seconds)
if node.discovery.bootstrapRecords.len > 0 and libp2p_successful_dials.value == 0:
if node.discovery.bootstrapRecords.len > 0 and nbc_successful_dials.value == 0:
fatal "Failed to connect to any bootstrap node. Quitting",
bootstrapEnrs = node.discovery.bootstrapRecords
quit 1
# TODO: The initial sync forces this to time out.
# Revisit when the new Sync manager is integrated.
# traceAsyncErrors checkIfConnectedToBootstrapNode()
traceAsyncErrors checkIfConnectedToBootstrapNode()
func peersCount*(node: Eth2Node): int =
len(node.peerPool)
@ -1138,7 +1138,7 @@ proc subscribe*[MsgType](node: Eth2Node,
msgHandler: proc(msg: MsgType) {.gcsafe.},
msgValidator: proc(msg: MsgType): bool {.gcsafe.} ) {.async, gcsafe.} =
template execMsgHandler(peerExpr, gossipBytes, gossipTopic, useSnappy) =
inc gossip_messages_received
inc nbc_gossip_messages_received
trace "Incoming pubsub message received",
peer = peerExpr, len = gossipBytes.len, topic = gossipTopic,
message_id = `$`(sha256.digest(gossipBytes))
@ -1190,7 +1190,7 @@ proc traceMessage(fut: FutureBase, digest: MDigest[256]) =
trace "Outgoing pubsub message sent", message_id = `$`(digest)
proc broadcast*(node: Eth2Node, topic: string, msg: auto) =
inc gossip_messages_sent
inc nbc_gossip_messages_sent
let broadcastBytes = SSZ.encode(msg)
var fut = node.switch.publish(topic, broadcastBytes)
traceMessage(fut, sha256.digest(broadcastBytes))

View File

@ -20,17 +20,12 @@
type
UpdateFlag* = enum
skipMerkleValidation ##\
## When processing deposits, skip verifying the Merkle proof trees of each
## deposit.
skipBlsValidation ##\
## Skip verification of BLS signatures in block processing.
## Predominantly intended for use in testing, e.g. to allow extra coverage.
## Also useful to avoid unnecessary work when replaying known, good blocks.
skipStateRootValidation ##\
## Skip verification of block state root.
skipBlockParentRootValidation ##\
## Skip verification that the block's parent root matches the previous block header.
verifyFinalization
UpdateFlags* = set[UpdateFlag]

View File

@ -161,15 +161,6 @@ type
proc `==`*(a, b: ENRFieldPair): bool {.inline.} =
result = (a.eth2 == b.eth2)
proc shortLog*(a: PeerInfo): string =
for ma in a.addrs:
if TCP.match(ma):
return $ma & "/" & $a.peerId
for ma in a.addrs:
if UDP.match(ma):
return $ma & "/" & $a.peerId
result = $a
proc hasTCP(a: PeerInfo): bool =
for ma in a.addrs:
if TCP.match(ma):
@ -188,7 +179,7 @@ proc toNodeId(a: PeerID): Option[NodeId] =
chronicles.formatIt PeerInfo: it.shortLog
chronicles.formatIt seq[PeerInfo]:
var res = newSeq[string]()
for item in it.items(): res.add(item.shortLog())
for item in it.items(): res.add($item.shortLog())
"[" & res.join(", ") & "]"
func getTopics(forkDigest: ForkDigest,
@ -216,8 +207,12 @@ func getTopics(forkDigest: ForkDigest,
var topics = newSeq[string](ATTESTATION_SUBNET_COUNT * 2)
var offset = 0
for i in 0'u64 ..< ATTESTATION_SUBNET_COUNT.uint64:
topics[offset] = getMainnetAttestationTopic(forkDigest, i)
topics[offset + 1] = getMainnetAttestationTopic(forkDigest, i) & "_snappy"
when ETH2_SPEC == "v0.12.1":
topics[offset] = getAttestationTopic(forkDigest, i)
topics[offset + 1] = getAttestationTopic(forkDigest, i) & "_snappy"
else:
topics[offset] = getMainnetAttestationTopic(forkDigest, i)
topics[offset + 1] = getMainnetAttestationTopic(forkDigest, i) & "_snappy"
offset += 2
topics

View File

@ -3,7 +3,7 @@
import
stew/endians2, stint,
./extras, ./ssz/merkleization,
spec/[crypto, datatypes, digest, helpers]
spec/[crypto, datatypes, digest, keystore, signatures]
func get_eth1data_stub*(deposit_count: uint64, current_epoch: Epoch): Eth1Data =
# https://github.com/ethereum/eth2.0-pm/blob/e596c70a19e22c7def4fd3519e20ae4022349390/interop/mocked_eth1data/README.md
@ -16,7 +16,7 @@ func get_eth1data_stub*(deposit_count: uint64, current_epoch: Epoch): Eth1Data =
block_hash: hash_tree_root(hash_tree_root(voting_period).data),
)
func makeInteropPrivKey*(i: int): BlsResult[ValidatorPrivKey] =
func makeInteropPrivKey*(i: int): ValidatorPrivKey =
var bytes: array[32, byte]
bytes[0..7] = uint64(i).toBytesLE()
@ -25,22 +25,16 @@ func makeInteropPrivKey*(i: int): BlsResult[ValidatorPrivKey] =
curveOrder =
"52435875175126190479447740508185965837690552500527637822603658699938581184513".parse(UInt256)
privkeyBytes = eth2hash(bytes)
privkeyBytes = eth2digest(bytes)
key = (UInt256.fromBytesLE(privkeyBytes.data) mod curveOrder).toBytesBE()
ValidatorPrivKey.fromRaw(key)
ValidatorPrivKey.fromRaw(key).get
const eth1BlockHash* = block:
var x: Eth2Digest
for v in x.data.mitems: v = 0x42
x
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/deposit-contract.md#withdrawal-credentials
func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
var bytes = eth2hash(k.toRaw())
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8
bytes
func makeDeposit*(
pubkey: ValidatorPubKey, privkey: ValidatorPrivKey, epoch = 0.Epoch,
amount: Gwei = MAX_EFFECTIVE_BALANCE.Gwei,
@ -53,9 +47,6 @@ func makeDeposit*(
withdrawal_credentials: makeWithdrawalCredentials(pubkey)))
if skipBLSValidation notin flags:
let domain = compute_domain(DOMAIN_DEPOSIT)
let signing_root = compute_signing_root(ret.getDepositMessage, domain)
ret.data.signature = bls_sign(privkey, signing_root.data)
ret.data.signature = get_deposit_signature(ret.data, privkey)
ret

View File

@ -0,0 +1,169 @@
import
os, strutils, terminal,
chronicles, chronos, blscurve, nimcrypto, json_serialization, serialization,
web3, stint, eth/keys, confutils,
spec/[datatypes, digest, crypto, keystore], conf, ssz/merkleization, merkle_minimal
contract(DepositContract):
proc deposit(pubkey: Bytes48, withdrawalCredentials: Bytes32, signature: Bytes96, deposit_data_root: FixedBytes[32])
const
keystoreFileName* = "keystore.json"
depositFileName* = "deposit.json"
type
DelayGenerator* = proc(): chronos.Duration {.closure, gcsafe.}
{.push raises: [Defect].}
proc ethToWei(eth: UInt256): UInt256 =
eth * 1000000000000000000.u256
proc loadKeyStore(conf: BeaconNodeConf|ValidatorClientConf,
validatorsDir, keyName: string): Option[ValidatorPrivKey] =
let
keystorePath = validatorsDir / keyName / keystoreFileName
keystoreContents = KeyStoreContent:
try: readFile(keystorePath)
except IOError as err:
error "Failed to read keystore", err = err.msg, path = keystorePath
return
let passphrasePath = conf.secretsDir / keyName
if fileExists(passphrasePath):
let
passphrase = KeyStorePass:
try: readFile(passphrasePath)
except IOError as err:
error "Failed to read passphrase file", err = err.msg, path = passphrasePath
return
let res = decryptKeystore(keystoreContents, passphrase)
if res.isOk:
return res.get.some
else:
error "Failed to decrypt keystore", keystorePath, passphrasePath
return
if conf.nonInteractive:
error "Unable to load validator key store. Please ensure matching passphrase exists in the secrets dir",
keyName, validatorsDir, secretsDir = conf.secretsDir
return
var remainingAttempts = 3
var prompt = "Please enter passphrase for key \"" & validatorsDir/keyName & "\"\n"
while remainingAttempts > 0:
let passphrase = KeyStorePass:
try: readPasswordFromStdin(prompt)
except IOError:
error "STDIN not readable. Cannot obtain KeyStore password"
return
let decrypted = decryptKeystore(keystoreContents, passphrase)
if decrypted.isOk:
return decrypted.get.some
else:
prompt = "Keystore decryption failed. Please try again"
dec remainingAttempts
iterator validatorKeys*(conf: BeaconNodeConf|ValidatorClientConf): ValidatorPrivKey =
for validatorKeyFile in conf.validators:
try:
yield validatorKeyFile.load
except CatchableError as err:
error "Failed to load validator private key",
file = validatorKeyFile.string, err = err.msg
quit 1
let validatorsDir = conf.validatorsDir
try:
for kind, file in walkDir(validatorsDir):
if kind == pcDir:
let keyName = splitFile(file).name
let key = loadKeyStore(conf, validatorsDir, keyName)
if key.isSome:
yield key.get
else:
quit 1
except OSError as err:
error "Validator keystores directory not accessible",
path = validatorsDir, err = err.msg
quit 1
type
GenerateDepositsError = enum
RandomSourceDepleted,
FailedToCreateValidatoDir
FailedToCreateSecretFile
FailedToCreateKeystoreFile
FailedToCreateDepositFile
proc generateDeposits*(totalValidators: int,
validatorsDir: string,
secretsDir: string): Result[seq[Deposit], GenerateDepositsError] =
var deposits: seq[Deposit]
info "Generating deposits", totalValidators, validatorsDir, secretsDir
for i in 0 ..< totalValidators:
let password = KeyStorePass getRandomBytesOrPanic(32).toHex
let credentials = generateCredentials(password = password)
let
keyName = intToStr(i, 6) & "_" & $(credentials.signingKey.toPubKey)
validatorDir = validatorsDir / keyName
passphraseFile = secretsDir / keyName
depositFile = validatorDir / depositFileName
keystoreFile = validatorDir / keystoreFileName
if existsDir(validatorDir) and existsFile(depositFile):
continue
try: createDir validatorDir
except OSError, IOError: return err FailedToCreateValidatoDir
try: writeFile(secretsDir / keyName, password.string)
except IOError: return err FailedToCreateSecretFile
try: writeFile(keystoreFile, credentials.keyStore.string)
except IOError: return err FailedToCreateKeystoreFile
deposits.add credentials.prepareDeposit()
# Does quadratic additional work, but fast enough, and otherwise more
# cleanly allows free intermixing of pre-existing and newly generated
# deposit and private key files. TODO: only generate new Merkle proof
# for the most recent deposit if this becomes bottleneck.
attachMerkleProofs(deposits)
try: Json.saveFile(depositFile, deposits[^1], pretty = true)
except: return err FailedToCreateDepositFile
ok deposits
{.pop.}
proc sendDeposits*(deposits: seq[Deposit],
web3Url, depositContractAddress, privateKey: string,
delayGenerator: DelayGenerator = nil) {.async.} =
var web3 = await newWeb3(web3Url)
if privateKey.len != 0:
web3.privateKey = PrivateKey.fromHex(privateKey).tryGet
else:
let accounts = await web3.provider.eth_accounts()
if accounts.len == 0:
error "No account offered by the web3 provider", web3Url
return
web3.defaultAccount = accounts[0]
let contractAddress = Address.fromHex(depositContractAddress)
let depositContract = web3.contractSender(DepositContract, contractAddress)
for i, dp in deposits:
discard await depositContract.deposit(
Bytes48(dp.data.pubKey.toRaw()),
Bytes32(dp.data.withdrawal_credentials.data),
Bytes96(dp.data.signature.toRaw()),
FixedBytes[32](hash_tree_root(dp.data).data)).send(value = 32.u256.ethToWei, gasPrice = 1)
if delayGenerator != nil:
await sleepAsync(delayGenerator())

View File

@ -91,16 +91,17 @@ const
# module seems broken. Investigate and file this as an issue.
{.push warning[LockLevel]: off.}
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#get_eth1_data
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#get_eth1_data
func compute_time_at_slot(state: BeaconState, slot: Slot): uint64 =
return state.genesis_time + slot * SECONDS_PER_SLOT
state.genesis_time + slot * SECONDS_PER_SLOT
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#get_eth1_data
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#get_eth1_data
func voting_period_start_time*(state: BeaconState): uint64 =
let eth1_voting_period_start_slot = state.slot - state.slot mod SLOTS_PER_ETH1_VOTING_PERIOD.uint64
return compute_time_at_slot(state, eth1_voting_period_start_slot)
let eth1_voting_period_start_slot =
state.slot - state.slot mod SLOTS_PER_ETH1_VOTING_PERIOD.uint64
compute_time_at_slot(state, eth1_voting_period_start_slot)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#get_eth1_data
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#get_eth1_data
func is_candidate_block(blk: Eth1Block, period_start: uint64): bool =
(blk.timestamp + SECONDS_PER_ETH1_BLOCK.uint64 * ETH1_FOLLOW_DISTANCE.uint64 <= period_start) and
(blk.timestamp + SECONDS_PER_ETH1_BLOCK.uint64 * ETH1_FOLLOW_DISTANCE.uint64 * 2 >= period_start)

View File

@ -5,7 +5,7 @@
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
# Merkle tree helpers
# ---------------------------------------------------------------
@ -18,6 +18,14 @@ import
../../beacon_chain/spec/[beaconstate, datatypes, digest, helpers],
../../beacon_chain/ssz/merkleization
# TODO
#
# This module currently represents a direct translation of the Python
# code, appearing in the spec. We need to review it to ensure that it
# doesn't duplicate any code defined in ssz.nim already.
#
# All tests need to be moved to the test suite.
func round_step_down*(x: Natural, step: static Natural): int {.inline.} =
## Round the input to the previous multiple of "step"
when (step and (step - 1)) == 0:
@ -26,16 +34,6 @@ func round_step_down*(x: Natural, step: static Natural): int {.inline.} =
else:
result = x - x mod step
let ZeroHashes = block:
# hashes for a merkle tree full of zeros for leafs
var zh = @[Eth2Digest()]
for i in 1 ..< DEPOSIT_CONTRACT_TREE_DEPTH:
let nodehash = withEth2Hash:
h.update zh[i-1]
h.update zh[i-1]
zh.add nodehash
zh
type SparseMerkleTree*[Depth: static int] = object
## Sparse Merkle tree
# There is an extra "depth" layer to store leaf nodes
@ -67,13 +65,11 @@ proc merkleTreeFromLeaves*(
# with the zeroHash corresponding to the current depth
let nodeHash = withEth2Hash:
h.update result.nnznodes[depth-1][^1]
h.update ZeroHashes[depth-1]
h.update zeroHashes[depth-1]
result.nnznodes[depth].add nodeHash
proc getMerkleProof*[Depth: static int](
tree: SparseMerkleTree[Depth],
index: int,
): array[Depth, Eth2Digest] =
proc getMerkleProof*[Depth: static int](tree: SparseMerkleTree[Depth],
index: int): array[Depth, Eth2Digest] =
# Descend down the tree according to the bit representation
# of the index:
@ -85,7 +81,7 @@ proc getMerkleProof*[Depth: static int](
if nodeIdx < tree.nnznodes[depth].len:
result[depth] = tree.nnznodes[depth][nodeIdx]
else:
result[depth] = ZeroHashes[depth]
result[depth] = zeroHashes[depth]
proc attachMerkleProofs*(deposits: var seq[Deposit]) =
let deposit_data_roots = mapIt(deposits, it.data.hash_tree_root)

View File

@ -1,71 +1,133 @@
import
options, random,
chronos, chronicles,
spec/datatypes,
eth2_network, beacon_node_types, sync_protocol,
eth/async_utils
import options, sequtils, strutils
import chronos, chronicles
import spec/[datatypes, digest], eth2_network, beacon_node_types, sync_protocol,
sync_manager, ssz/merkleization
logScope:
topics = "requman"
const
MAX_REQUEST_BLOCKS* = 4 # Specification's value is 1024.
## Maximum number of blocks, which can be requested by beaconBlocksByRoot.
PARALLEL_REQUESTS* = 2
## Number of peers we using to resolve our request.
type
RequestManager* = object
network*: Eth2Node
queue*: AsyncQueue[FetchRecord]
responseHandler*: FetchAncestorsResponseHandler
loopFuture: Future[void]
proc init*(T: type RequestManager, network: Eth2Node): T =
T(network: network)
type
FetchAncestorsResponseHandler = proc (b: SignedBeaconBlock) {.gcsafe.}
proc fetchAncestorBlocksFromPeer(
peer: Peer,
rec: FetchRecord,
responseHandler: FetchAncestorsResponseHandler) {.async.} =
# TODO: It's not clear if this function follows the intention of the
# FetchRecord data type. Perhaps it is supposed to get a range of blocks
# instead. In order to do this, we'll need the slot number of the known
# block to be stored in the FetchRecord, so we can ask for a range of
# blocks starting N positions before this slot number.
try:
let blocks = await peer.beaconBlocksByRoot(BlockRootsList @[rec.root])
if blocks.isOk:
for b in blocks.get:
responseHandler(b)
except CatchableError as err:
debug "Error while fetching ancestor blocks",
err = err.msg, root = rec.root, peer = peer
func shortLog*(x: seq[Eth2Digest]): string =
"[" & x.mapIt(shortLog(it)).join(", ") & "]"
proc fetchAncestorBlocksFromNetwork(
network: Eth2Node,
rec: FetchRecord,
responseHandler: FetchAncestorsResponseHandler) {.async.} =
func shortLog*(x: seq[FetchRecord]): string =
"[" & x.mapIt(shortLog(it.root)).join(", ") & "]"
proc init*(T: type RequestManager, network: Eth2Node,
responseCb: FetchAncestorsResponseHandler): T =
T(
network: network, queue: newAsyncQueue[FetchRecord](),
responseHandler: responseCb
)
proc checkResponse(roots: openArray[Eth2Digest],
blocks: openArray[SignedBeaconBlock]): bool =
## This procedure checks peer's response.
var checks = @roots
if len(blocks) > len(roots):
return false
for blk in blocks:
let blockRoot = hash_tree_root(blk.message)
let res = checks.find(blockRoot)
if res == -1:
return false
else:
checks.del(res)
return true
proc fetchAncestorBlocksFromNetwork(rman: RequestManager,
items: seq[Eth2Digest]) {.async.} =
var peer: Peer
try:
peer = await network.peerPool.acquire()
let blocks = await peer.beaconBlocksByRoot(BlockRootsList @[rec.root])
peer = await rman.network.peerPool.acquire()
debug "Requesting blocks by root", peer = peer, blocks = shortLog(items),
peer_score = peer.getScore()
let blocks = await peer.beaconBlocksByRoot(BlockRootsList items)
if blocks.isOk:
for b in blocks.get:
responseHandler(b)
except CatchableError as err:
debug "Error while fetching ancestor blocks",
err = err.msg, root = rec.root, peer = peer
let ublocks = blocks.get()
if checkResponse(items, ublocks):
for b in ublocks:
rman.responseHandler(b)
peer.updateScore(PeerScoreGoodBlocks)
else:
peer.updateScore(PeerScoreBadResponse)
else:
peer.updateScore(PeerScoreNoBlocks)
except CancelledError as exc:
raise exc
except CatchableError as exc:
debug "Error while fetching ancestor blocks", exc = exc.msg,
items = shortLog(items), peer = peer, peer_score = peer.getScore()
raise exc
finally:
if not(isNil(peer)):
network.peerPool.release(peer)
rman.network.peerPool.release(peer)
proc fetchAncestorBlocks*(requestManager: RequestManager,
roots: seq[FetchRecord],
responseHandler: FetchAncestorsResponseHandler) =
# TODO: we could have some fancier logic here:
#
# * Keeps track of what was requested
# (this would give a little bit of time for the asked peer to respond)
#
# * Keep track of the average latency of each peer
# (we can give priority to peers with better latency)
#
const ParallelRequests = 2
proc requestManagerLoop(rman: RequestManager) {.async.} =
var rootList = newSeq[Eth2Digest]()
var workers = newSeq[Future[void]](PARALLEL_REQUESTS)
while true:
try:
rootList.setLen(0)
let req = await rman.queue.popFirst()
rootList.add(req.root)
for i in 0 ..< ParallelRequests:
traceAsyncErrors fetchAncestorBlocksFromNetwork(requestManager.network,
roots.sample(),
responseHandler)
var count = min(MAX_REQUEST_BLOCKS - 1, len(rman.queue))
while count > 0:
rootList.add(rman.queue.popFirstNoWait().root)
dec(count)
let start = SyncMoment.now(Slot(0))
for i in 0 ..< PARALLEL_REQUESTS:
workers[i] = rman.fetchAncestorBlocksFromNetwork(rootList)
# We do not care about
await allFutures(workers)
let finish = SyncMoment.now(Slot(0) + uint64(len(rootList)))
var succeed = 0
for worker in workers:
if worker.finished() and not(worker.failed()):
inc(succeed)
debug "Request manager tick", blocks_count = len(rootList),
succeed = succeed,
failed = (len(workers) - succeed),
queue_size = len(rman.queue),
sync_speed = speed(start, finish)
except CatchableError as exc:
debug "Got a problem in request manager", exc = exc.msg
proc start*(rman: var RequestManager) =
## Start Request Manager's loop.
rman.loopFuture = requestManagerLoop(rman)
proc stop*(rman: RequestManager) =
## Stop Request Manager's loop.
if not(isNil(rman.loopFuture)):
rman.loopFuture.cancel()
proc fetchAncestorBlocks*(rman: RequestManager, roots: seq[FetchRecord]) =
## Enqueue list missing blocks roots ``roots`` for download by
## Request Manager ``rman``.
for item in roots:
rman.queue.addLastNoWait(item)

View File

@ -11,10 +11,10 @@ import
tables, algorithm, math, sequtils, options,
json_serialization/std/sets, chronicles,
../extras, ../ssz/merkleization,
./crypto, ./datatypes, ./digest, ./helpers, ./validator,
./crypto, ./datatypes, ./digest, ./helpers, ./signatures, ./validator,
../../nbench/bench_lab
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#is_valid_merkle_branch
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_valid_merkle_branch
func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool {.nbench.}=
## Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and
## ``branch``.
@ -29,40 +29,40 @@ func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], de
else:
buf[0..31] = value.data
buf[32..63] = branch[i.int].data
value = eth2hash(buf)
value = eth2digest(buf)
value == root
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#increase_balance
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#increase_balance
func increase_balance*(
state: var BeaconState, index: ValidatorIndex, delta: Gwei) =
# Increase the validator balance at index ``index`` by ``delta``.
state.balances[index] += delta
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#decrease_balance
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#decrease_balance
func decrease_balance*(
state: var BeaconState, index: ValidatorIndex, delta: Gwei) =
## Decrease the validator balance at index ``index`` by ``delta``, with
## underflow protection.
# Decrease the validator balance at index ``index`` by ``delta``, with
# underflow protection.
state.balances[index] =
if delta > state.balances[index]:
0'u64
else:
state.balances[index] - delta
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#deposits
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#deposits
proc process_deposit*(
state: var BeaconState, deposit: Deposit, flags: UpdateFlags = {}): bool {.nbench.}=
# Process an Eth1 deposit, registering a validator or increasing its balance.
# Verify the Merkle branch
if skipMerkleValidation notin flags and not is_valid_merkle_branch(
if not is_valid_merkle_branch(
hash_tree_root(deposit.data),
deposit.proof,
DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the `List` length mix-in
state.eth1_deposit_index,
state.eth1_data.deposit_root,
):
notice "Deposit merkle validation failed",
notice "Deposit Merkle validation failed",
proof = deposit.proof, deposit_root = state.eth1_data.deposit_root,
deposit_index = state.eth1_deposit_index
return false
@ -79,19 +79,13 @@ proc process_deposit*(
if index == -1:
# Verify the deposit signature (proof of possession) which is not checked
# by the deposit contract
# Fork-agnostic domain since deposits are valid across forks
let domain = compute_domain(DOMAIN_DEPOSIT)
let signing_root = compute_signing_root(deposit.getDepositMessage, domain)
if skipBLSValidation notin flags and not bls_verify(
pubkey, signing_root.data,
deposit.data.signature):
# It's ok that deposits fail - they get included in blocks regardless
# TODO spec test?
debug "Skipping deposit with invalid signature",
pubkey, signing_root, signature = deposit.data.signature
return true
if skipBLSValidation notin flags:
if not verify_deposit_signature(deposit.data):
# It's ok that deposits fail - they get included in blocks regardless
# TODO spec test?
debug "Skipping deposit with invalid signature",
deposit = shortLog(deposit.data)
return true
# Add validator and balance entries
state.validators.add(Validator(
@ -111,13 +105,13 @@ proc process_deposit*(
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#compute_activation_exit_epoch
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_activation_exit_epoch
func compute_activation_exit_epoch(epoch: Epoch): Epoch =
## Return the epoch during which validator activations and exits initiated in
## ``epoch`` take effect.
epoch + 1 + MAX_SEED_LOOKAHEAD
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_validator_churn_limit
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_validator_churn_limit
func get_validator_churn_limit(state: BeaconState, cache: var StateCache):
uint64 =
# Return the validator churn limit for the current epoch.
@ -125,7 +119,7 @@ func get_validator_churn_limit(state: BeaconState, cache: var StateCache):
len(cache.shuffled_active_validator_indices) div
CHURN_LIMIT_QUOTIENT).uint64
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#initiate_validator_exit
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#initiate_validator_exit
func initiate_validator_exit*(state: var BeaconState,
index: ValidatorIndex, cache: var StateCache) =
# Initiate the exit of the validator with index ``index``.
@ -154,7 +148,7 @@ func initiate_validator_exit*(state: var BeaconState,
validator.withdrawable_epoch =
validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#slash_validator
proc slash_validator*(state: var BeaconState, slashed_index: ValidatorIndex,
cache: var StateCache) =
# Slash the validator with index ``index``.
@ -174,7 +168,7 @@ proc slash_validator*(state: var BeaconState, slashed_index: ValidatorIndex,
validator.slashed = true
validator.withdrawable_epoch =
max(validator.withdrawable_epoch, epoch + EPOCHS_PER_SLASHINGS_VECTOR)
state.slashings[epoch mod EPOCHS_PER_SLASHINGS_VECTOR] +=
state.slashings[int(epoch mod EPOCHS_PER_SLASHINGS_VECTOR)] +=
validator.effective_balance
decrease_balance(state, slashed_index,
validator.effective_balance div MIN_SLASHING_PENALTY_QUOTIENT)
@ -300,7 +294,7 @@ func get_initial_beacon_block*(state: BeaconState): SignedBeaconBlock =
# parent_root, randao_reveal, eth1_data, signature, and body automatically
# initialized to default values.
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_block_root_at_slot
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_block_root_at_slot
func get_block_root_at_slot*(state: BeaconState,
slot: Slot): Eth2Digest =
# Return the block root at a recent ``slot``.
@ -309,12 +303,12 @@ func get_block_root_at_slot*(state: BeaconState,
doAssert slot < state.slot
state.block_roots[slot mod SLOTS_PER_HISTORICAL_ROOT]
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_block_root
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_block_root
func get_block_root*(state: BeaconState, epoch: Epoch): Eth2Digest =
# Return the block root at the start of a recent ``epoch``.
get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch))
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_total_balance
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_total_balance
func get_total_balance*(state: BeaconState, validators: auto): Gwei =
## Return the combined effective balance of the ``indices``.
## ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
@ -323,15 +317,13 @@ func get_total_balance*(state: BeaconState, validators: auto): Gwei =
foldl(validators, a + state.validators[b].effective_balance, 0'u64)
)
# XXX: Move to state_transition_epoch.nim?
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#is_eligible_for_activation_queue
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_eligible_for_activation_queue
func is_eligible_for_activation_queue(validator: Validator): bool =
# Check if ``validator`` is eligible to be placed into the activation queue.
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
validator.effective_balance == MAX_EFFECTIVE_BALANCE
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#is_eligible_for_activation
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_eligible_for_activation
func is_eligible_for_activation(state: BeaconState, validator: Validator):
bool =
# Check if ``validator`` is eligible for activation.
@ -341,7 +333,7 @@ func is_eligible_for_activation(state: BeaconState, validator: Validator):
# Has not yet been activated
validator.activation_epoch == FAR_FUTURE_EPOCH
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#registry-updates
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#registry-updates
proc process_registry_updates*(state: var BeaconState,
cache: var StateCache) {.nbench.}=
## Process activation eligibility and ejections
@ -418,15 +410,14 @@ proc is_valid_indexed_attestation*(
return false
# Verify aggregate signature
let pubkeys = mapIt(indices, state.validators[it.int].pubkey) # TODO: fuse loops with blsFastAggregateVerify
let domain = state.get_domain(DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch)
let signing_root = compute_signing_root(indexed_attestation.data, domain)
if skipBLSValidation notin flags and
not blsFastAggregateVerify(
pubkeys, signing_root.data, indexed_attestation.signature
):
notice "indexed attestation: signature verification failure"
return false
if skipBLSValidation notin flags:
# TODO: fuse loops with blsFastAggregateVerify
let pubkeys = mapIt(indices, state.validators[it.int].pubkey)
if not verify_attestation_signature(
state.fork, state.genesis_validators_root, indexed_attestation.data,
pubkeys, indexed_attestation.signature):
notice "indexed attestation: signature verification failure"
return false
true
@ -456,7 +447,7 @@ func get_attesting_indices*(state: BeaconState,
if bits[i]:
result.incl index
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_indexed_attestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_indexed_attestation
func get_indexed_attestation*(state: BeaconState, attestation: Attestation,
stateCache: var StateCache): IndexedAttestation =
# Return the indexed attestation corresponding to ``attestation``.

View File

@ -69,6 +69,8 @@ type
BlsResult*[T] = Result[T, cstring]
RandomSourceDepleted* = object of CatchableError
func `==`*(a, b: BlsValue): bool =
if a.kind != b.kind: return false
if a.kind == Real:
@ -84,7 +86,7 @@ template `==`*[N, T](a: T, b: BlsValue[N, T]): bool =
# API
# ----------------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#bls-signatures
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#bls-signatures
func toPubKey*(privkey: ValidatorPrivKey): ValidatorPubKey =
## Create a private key from a public key
@ -97,22 +99,12 @@ func toPubKey*(privkey: ValidatorPrivKey): ValidatorPubKey =
else:
privkey.getKey
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#bls-signatures
func aggregate*[T](values: openarray[ValidatorSig]): ValidatorSig =
## Aggregate arrays of sequences of Validator Signatures
## This assumes that they are real signatures
result = BlsValue[T](kind: Real, blsValue: values[0].BlsValue)
for i in 1 ..< values.len:
result.blsValue.aggregate(values[i].blsValue)
func aggregate*(x: var ValidatorSig, other: ValidatorSig) =
## Aggregate 2 Validator Signatures
## This assumes that they are real signatures
x.blsValue.aggregate(other.blsValue)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#bls-signatures
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#bls-signatures
func blsVerify*(
pubkey: ValidatorPubKey, message: openArray[byte],
signature: ValidatorSig): bool =
@ -141,13 +133,13 @@ func blsVerify*(
# return true
pubkey.blsValue.verify(message, signature.blsValue)
func blsSign*(privkey: ValidatorPrivKey, message: openarray[byte]): ValidatorSig =
func blsSign*(privkey: ValidatorPrivKey, message: openArray[byte]): ValidatorSig =
## Computes a signature from a secret key and a message
ValidatorSig(kind: Real, blsValue: SecretKey(privkey).sign(message))
func blsFastAggregateVerify*[T: byte|char](
publicKeys: openarray[ValidatorPubKey],
message: openarray[T],
func blsFastAggregateVerify*(
publicKeys: openArray[ValidatorPubKey],
message: openArray[byte],
signature: ValidatorSig
): bool =
## Verify the aggregate of multiple signatures on the same message
@ -175,7 +167,8 @@ func blsFastAggregateVerify*[T: byte|char](
if pubkey.kind != Real:
return false
unwrapped.add pubkey.blsValue
return fastAggregateVerify(unwrapped, message, signature.blsValue)
fastAggregateVerify(unwrapped, message, signature.blsValue)
proc newKeyPair*(): BlsResult[tuple[pub: ValidatorPubKey, priv: ValidatorPrivKey]] =
## Generates a new public-private keypair
@ -228,14 +221,14 @@ func toRaw*(x: BlsValue): auto =
func toHex*(x: BlsCurveType): string =
toHex(toRaw(x))
func fromRaw*(T: type ValidatorPrivKey, bytes: openarray[byte]): BlsResult[T] =
func fromRaw*(T: type ValidatorPrivKey, bytes: openArray[byte]): BlsResult[T] =
var val: SecretKey
if val.fromBytes(bytes):
ok ValidatorPrivKey(val)
else:
err "bls: invalid private key"
func fromRaw*[N, T](BT: type BlsValue[N, T], bytes: openarray[byte]): BlsResult[BT] =
func fromRaw*[N, T](BT: type BlsValue[N, T], bytes: openArray[byte]): BlsResult[BT] =
# This is a workaround, so that we can deserialize the serialization of a
# default-initialized BlsValue without raising an exception
when defined(ssz_testing):
@ -292,7 +285,7 @@ proc readValue*(reader: var JsonReader, value: var ValidatorPrivKey) {.
inline, raises: [Exception].} =
value = ValidatorPrivKey.fromHex(reader.readValue(string)).tryGet()
template fromSszBytes*(T: type BlsValue, bytes: openarray[byte]): auto =
template fromSszBytes*(T: type BlsValue, bytes: openArray[byte]): auto =
let v = fromRaw(T, bytes)
if v.isErr:
raise newException(MalformedSszError, $v.error)
@ -341,3 +334,16 @@ func init*(T: typedesc[ValidatorSig], data: array[RawSigSize, byte]): T {.noInit
if v.isErr:
raise (ref ValueError)(msg: $v.error)
return v[]
proc getRandomBytes*(n: Natural): seq[byte]
{.raises: [RandomSourceDepleted, Defect].} =
result = newSeq[byte](n)
if randomBytes(result) != result.len:
raise newException(RandomSourceDepleted, "Failed to generate random bytes")
proc getRandomBytesOrPanic*(output: var openArray[byte]) =
doAssert randomBytes(output) == output.len
proc getRandomBytesOrPanic*(n: Natural): seq[byte] =
result = newSeq[byte](n)
getRandomBytesOrPanic(result)

View File

@ -22,7 +22,7 @@
{.push raises: [Defect].}
import
macros, hashes, json, strutils, tables,
macros, hashes, json, strutils, tables, typetraits,
stew/[byteutils], chronicles,
json_serialization/types as jsonTypes,
../ssz/types as sszTypes, ./crypto, ./digest
@ -98,7 +98,7 @@ const
# TODO: This needs revisiting.
# Why was the validator WITHDRAWAL_PERIOD altered in the spec?
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.2/specs/phase0/p2p-interface.md#configuration
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#configuration
ATTESTATION_PROPAGATION_SLOT_RANGE* = 32
SLOTS_PER_ETH1_VOTING_PERIOD* = Slot(EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH)
@ -111,7 +111,7 @@ template maxSize*(n: int) {.pragma.}
type
# Domains
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#domain-types
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#domain-types
DomainType* = enum
DOMAIN_BEACON_PROPOSER = 0
DOMAIN_BEACON_ATTESTER = 1
@ -121,15 +121,15 @@ type
DOMAIN_SELECTION_PROOF = 5
DOMAIN_AGGREGATE_AND_PROOF = 6
# Phase 1 - Sharding
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.2/specs/phase1/beacon-chain.md#misc
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase1/beacon-chain.md#misc
DOMAIN_SHARD_PROPOSAL = 128
DOMAIN_SHARD_COMMITTEE = 129
DOMAIN_LIGHT_CLIENT = 130
# Phase 1 - Custody game
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.2/specs/phase1/custody-game.md#signature-domain-types
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase1/custody-game.md#signature-domain-types
DOMAIN_CUSTODY_BIT_SLASHING = 0x83
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#custom-types
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#custom-types
Domain* = array[32, byte]
# https://github.com/nim-lang/Nim/issues/574 and be consistent across
@ -143,17 +143,17 @@ type
Gwei* = uint64
CommitteeIndex* = distinct uint64
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#proposerslashing
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#proposerslashing
ProposerSlashing* = object
signed_header_1*: SignedBeaconBlockHeader
signed_header_2*: SignedBeaconBlockHeader
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#attesterslashing
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#attesterslashing
AttesterSlashing* = object
attestation_1*: IndexedAttestation
attestation_2*: IndexedAttestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#indexedattestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#indexedattestation
IndexedAttestation* = object
# TODO ValidatorIndex, but that doesn't serialize properly
attesting_indices*: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
@ -162,7 +162,7 @@ type
CommitteeValidatorsBits* = BitList[MAX_VALIDATORS_PER_COMMITTEE]
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#attestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#attestation
Attestation* = object
aggregation_bits*: CommitteeValidatorsBits
data*: AttestationData
@ -171,17 +171,17 @@ type
Version* = distinct array[4, byte]
ForkDigest* = distinct array[4, byte]
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#forkdata
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#forkdata
ForkData* = object
current_version*: Version
genesis_validators_root*: Eth2Digest
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#checkpoint
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#checkpoint
Checkpoint* = object
epoch*: Epoch
root*: Eth2Digest
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#AttestationData
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#AttestationData
AttestationData* = object
slot*: Slot
@ -196,27 +196,27 @@ type
source*: Checkpoint
target*: Checkpoint
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#deposit
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#deposit
Deposit* = object
proof*: array[DEPOSIT_CONTRACT_TREE_DEPTH + 1, Eth2Digest] ##\
## Merkle path to deposit root
data*: DepositData
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#depositmessage
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#depositmessage
DepositMessage* = object
pubkey*: ValidatorPubKey
withdrawal_credentials*: Eth2Digest
amount*: Gwei
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#depositdata
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#depositdata
DepositData* = object
pubkey*: ValidatorPubKey
withdrawal_credentials*: Eth2Digest
amount*: uint64
amount*: Gwei
signature*: ValidatorSig # Signing over DepositMessage
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#voluntaryexit
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#voluntaryexit
VoluntaryExit* = object
epoch*: Epoch ##\
## Earliest epoch when voluntary exit can be processed
@ -242,7 +242,7 @@ type
body*: BeaconBlockBody
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#beaconblockheader
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconblockheader
BeaconBlockHeader* = object
slot*: Slot
proposer_index*: uint64
@ -263,7 +263,7 @@ type
deposits*: List[Deposit, MAX_DEPOSITS]
voluntary_exits*: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#beaconstate
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate
BeaconStateObj* = object
# Versioning
genesis_time*: uint64
@ -320,7 +320,7 @@ type
BeaconStateRef* = ref BeaconStateObj not nil
NilableBeaconStateRef* = ref BeaconStateObj
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#validator
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#validator
Validator* = object
pubkey*: ValidatorPubKey
@ -342,7 +342,7 @@ type
withdrawable_epoch*: Epoch ##\
## When validator can withdraw or transfer funds
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#pendingattestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#pendingattestation
PendingAttestation* = object
aggregation_bits*: CommitteeValidatorsBits
data*: AttestationData
@ -352,12 +352,12 @@ type
proposer_index*: uint64
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#historicalbatch
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#historicalbatch
HistoricalBatch* = object
block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#fork
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#fork
Fork* = object
# TODO: Spec introduced an alias for Version = array[4, byte]
# and a default parameter to compute_domain
@ -367,13 +367,13 @@ type
epoch*: Epoch ##\
## Epoch of latest fork
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#eth1data
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#eth1data
Eth1Data* = object
deposit_root*: Eth2Digest
deposit_count*: uint64
block_hash*: Eth2Digest
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#signedvoluntaryexit
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#signedvoluntaryexit
SignedVoluntaryExit* = object
message*: VoluntaryExit
signature*: ValidatorSig
@ -388,13 +388,13 @@ type
message*: BeaconBlockHeader
signature*: ValidatorSig
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.2/specs/phase0/validator.md#aggregateandproof
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#aggregateandproof
AggregateAndProof* = object
aggregator_index*: uint64
aggregate*: Attestation
selection_proof*: ValidatorSig
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.2/specs/phase0/validator.md#signedaggregateandproof
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#signedaggregateandproof
SignedAggregateAndProof* = object
message*: AggregateAndProof
signature*: ValidatorSig
@ -622,6 +622,14 @@ func shortLog*(v: SignedBeaconBlock): auto =
signature: shortLog(v.signature)
)
func shortLog*(v: DepositData): auto =
(
pubkey: shortLog(v.pubkey),
withdrawal_credentials: shortlog(v.withdrawal_credentials),
amount: v.amount,
signature: shortLog(v.signature)
)
func shortLog*(v: AttestationData): auto =
(
slot: shortLog(v.slot),
@ -649,3 +657,39 @@ chronicles.formatIt Attestation: it.shortLog
import json_serialization
export json_serialization
export writeValue, readValue
static:
# Sanity checks - these types should be trivial enough to copy with memcpy
doAssert supportsCopyMem(Validator)
doAssert supportsCopyMem(Eth2Digest)
func assign*[T](tgt: var T, src: T) =
# The default `genericAssignAux` that gets generated for assignments in nim
# is ridiculously slow. When syncing, the application was spending 50%+ CPU
# time in it - `assign`, in the same test, doesn't even show in the perf trace
when supportsCopyMem(T):
when sizeof(src) <= sizeof(int):
tgt = src
else:
copyMem(addr tgt, unsafeAddr src, sizeof(tgt))
elif T is object|tuple:
for t, s in fields(tgt, src):
when supportsCopyMem(type s) and sizeof(s) <= sizeof(int) * 2:
t = s # Shortcut
else:
assign(t, s)
elif T is List|BitList:
assign(distinctBase tgt, distinctBase src)
elif T is seq:
tgt.setLen(src.len)
when supportsCopyMem(type(tgt[0])):
if tgt.len > 0:
copyMem(addr tgt[0], unsafeAddr src[0], sizeof(tgt[0]) * tgt.len)
else:
for i in 0..<tgt.len:
assign(tgt[i], src[i])
elif T is ref:
tgt = src
else:
unsupported T

View File

@ -7,7 +7,7 @@
# Serenity hash function / digest
#
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#hash
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#hash
#
# In Phase 0 the beacon chain is deployed with SHA256 (SHA2-256).
# Note that is is different from Keccak256 (often mistakenly called SHA3-256)
@ -17,7 +17,7 @@
#
# In our code base, to enable a smooth transition
# (already did Blake2b --> Keccak256 --> SHA2-256),
# we call this function `eth2hash`, and it outputs a `Eth2Digest`. Easy to sed :)
# we call this function `eth2digest`, and it outputs a `Eth2Digest`. Easy to sed :)
{.push raises: [Defect].}
@ -44,7 +44,7 @@ chronicles.formatIt Eth2Digest:
# TODO: expose an in-place digest function
# when hashing in loop or into a buffer
# See: https://github.com/cheatfate/nimcrypto/blob/b90ba3abd/nimcrypto/sha2.nim#L570
func eth2hash*(v: openArray[byte]): Eth2Digest {.inline.} =
func eth2digest*(v: openArray[byte]): Eth2Digest {.inline.} =
# We use the init-update-finish interface to avoid
# the expensive burning/clearing memory (20~30% perf)
# TODO: security implication?
@ -63,8 +63,7 @@ template withEth2Hash*(body: untyped): Eth2Digest =
var h {.inject.}: sha256
init(h)
body
var res = finish(h)
res
finish(h)
func hash*(x: Eth2Digest): Hash =
## Hash for digests for Nim hash tables

View File

@ -1,6 +1,20 @@
import
options,
../datatypes
../[datatypes, digest, crypto],
json_rpc/jsonmarshal,
callsigs_types
proc get_v1_beacon_genesis(): BeaconGenesisTuple
# TODO stateId is part of the REST path
proc get_v1_beacon_states_root(stateId: string): Eth2Digest
# TODO stateId is part of the REST path
proc get_v1_beacon_states_fork(stateId: string): Fork
# TODO: delete old stuff
# https://github.com/ethereum/eth2.0-APIs/blob/master/apis/beacon/basic.md
#

View File

@ -0,0 +1,23 @@
import
# Standard library
options,
# Local modules
# TODO for some reason "../[datatypes, digest, crypto]" results in "Error: cannot open file"
../datatypes,
../digest,
../crypto
type
AttesterDuties* = tuple
public_key: ValidatorPubKey
committee_index: CommitteeIndex
committee_length: uint64
validator_committee_index: uint64
slot: Slot
ValidatorPubkeySlotPair* = tuple[public_key: ValidatorPubKey, slot: Slot]
BeaconGenesisTuple* = tuple
genesis_time: uint64
genesis_validators_root: Eth2Digest
genesis_fork_version: Version

View File

@ -4,24 +4,17 @@ import
# Local modules
../[datatypes, digest, crypto],
json_rpc/jsonmarshal,
validator_callsigs_types
# TODO check which arguments are part of the path in the REST API
callsigs_types
# calls that return a bool are actually without a return type in the main REST API
# spec but nim-json-rpc requires that all RPC calls have a return type.
# TODO this doesn't have "validator" in it's path but is used by the validators nonetheless
proc get_v1_beacon_states_fork(stateId: string): Fork
# TODO this doesn't have "validator" in it's path but is used by the validators nonetheless
proc get_v1_beacon_genesis(): BeaconGenesisTuple
# TODO returns a bool even though in the API there is no return type - because of nim-json-rpc
proc post_v1_beacon_pool_attestations(attestation: Attestation): bool
# TODO slot is part of the REST path
proc get_v1_validator_blocks(slot: Slot, graffiti: Eth2Digest, randao_reveal: ValidatorSig): BeaconBlock
# TODO returns a bool even though in the API there is no return type - because of nim-json-rpc
proc post_v1_beacon_blocks(body: SignedBeaconBlock): bool
proc get_v1_validator_attestation_data(slot: Slot, committee_index: CommitteeIndex): AttestationData
@ -31,16 +24,17 @@ proc get_v1_validator_attestation_data(slot: Slot, committee_index: CommitteeInd
# https://docs.google.com/spreadsheets/d/1kVIx6GvzVLwNYbcd-Fj8YUlPf4qGrWUlS35uaTnIAVg/edit?disco=AAAAGh7r_fQ
proc get_v1_validator_aggregate_attestation(attestation_data: AttestationData): Attestation
# TODO returns a bool even though in the API there is no return type - because of nim-json-rpc
proc post_v1_validator_aggregate_and_proof(payload: SignedAggregateAndProof): bool
# this is a POST instead of a GET because of this: https://docs.google.com/spreadsheets/d/1kVIx6GvzVLwNYbcd-Fj8YUlPf4qGrWUlS35uaTnIAVg/edit?disco=AAAAJk5rbKA
# TODO epoch is part of the REST path
proc post_v1_validator_duties_attester(epoch: Epoch, public_keys: seq[ValidatorPubKey]): seq[AttesterDuties]
# TODO epoch is part of the REST path
proc get_v1_validator_duties_proposer(epoch: Epoch): seq[ValidatorPubkeySlotPair]
proc post_v1_validator_beacon_committee_subscription(committee_index: CommitteeIndex,
slot: Slot,
aggregator: bool,
validator_pubkey: ValidatorPubKey,
slot_signature: ValidatorSig)
proc post_v1_validator_beacon_committee_subscriptions(committee_index: CommitteeIndex,
slot: Slot,
aggregator: bool,
validator_pubkey: ValidatorPubKey,
slot_signature: ValidatorSig): bool

View File

@ -1,27 +0,0 @@
import
# Standard library
options,
# Local modules
# TODO for some reason "../[datatypes, digest, crypto]" results in "Error: cannot open file"
../datatypes,
../digest,
../crypto
type
AttesterDuties* = object
public_key*: ValidatorPubKey
committee_index*: CommitteeIndex
committee_length*: uint64
validator_committee_index*: uint64
slot*: Slot
# TODO do we even need this? how about a simple tuple (alias)?
ValidatorPubkeySlotPair* = object
public_key*: ValidatorPubKey
slot*: Slot
# TODO do we even need this? how about a simple tuple (alias)?
BeaconGenesisTuple* = object
genesis_time*: uint64
genesis_validators_root*: Eth2Digest
genesis_fork_version*: Version

View File

@ -22,7 +22,7 @@ type
# (other candidate is nativesockets.Domain)
Domain = datatypes.Domain
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#integer_squareroot
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#integer_squareroot
func integer_squareroot*(n: SomeInteger): SomeInteger =
# Return the largest integer ``x`` such that ``x**2 <= n``.
doAssert n >= 0'u64
@ -35,7 +35,7 @@ func integer_squareroot*(n: SomeInteger): SomeInteger =
y = (x + n div x) div 2
x
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#compute_epoch_at_slot
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_epoch_at_slot
func compute_epoch_at_slot*(slot: Slot|uint64): Epoch =
# Return the epoch number at ``slot``.
(slot div SLOTS_PER_EPOCH).Epoch
@ -46,12 +46,12 @@ template epoch*(slot: Slot): Epoch =
template isEpoch*(slot: Slot): bool =
(slot mod SLOTS_PER_EPOCH) == 0
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch
func compute_start_slot_at_epoch*(epoch: Epoch): Slot =
# Return the start slot of ``epoch``.
(epoch * SLOTS_PER_EPOCH).Slot
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#is_active_validator
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_active_validator
func is_active_validator*(validator: Validator, epoch: Epoch): bool =
### Check if ``validator`` is active
validator.activation_epoch <= epoch and epoch < validator.exit_epoch
@ -64,7 +64,13 @@ func get_active_validator_indices*(state: BeaconState, epoch: Epoch):
if is_active_validator(val, epoch):
result.add idx.ValidatorIndex
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_committee_count_at_slot
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_committee_count_at_slot
func get_committee_count_at_slot*(num_active_validators: auto):
uint64 =
clamp(
num_active_validators div SLOTS_PER_EPOCH div TARGET_COMMITTEE_SIZE,
1, MAX_COMMITTEES_PER_SLOT).uint64
func get_committee_count_at_slot*(state: BeaconState, slot: Slot): uint64 =
# Return the number of committees at ``slot``.
@ -74,10 +80,7 @@ func get_committee_count_at_slot*(state: BeaconState, slot: Slot): uint64 =
# CommitteeIndex return type here.
let epoch = compute_epoch_at_slot(slot)
let active_validator_indices = get_active_validator_indices(state, epoch)
let committees_per_slot = clamp(
len(active_validator_indices) div SLOTS_PER_EPOCH div TARGET_COMMITTEE_SIZE,
1, MAX_COMMITTEES_PER_SLOT).uint64
result = committees_per_slot
result = get_committee_count_at_slot(len(active_validator_indices))
# Otherwise, get_beacon_committee(...) cannot access some committees.
doAssert (SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT).uint64 >= result
@ -152,7 +155,7 @@ func compute_fork_digest*(current_version: Version,
compute_fork_data_root(
current_version, genesis_validators_root).data.toOpenArray(0, 3)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#compute_domain
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_domain
func compute_domain*(
domain_type: DomainType,
fork_version: Version = Version(GENESIS_FORK_VERSION),
@ -212,4 +215,4 @@ func get_seed*(state: BeaconState, epoch: Epoch, domain_type: DomainType): Eth2D
seed_input[12..43] =
get_randao_mix(state, # Avoid underflow
epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1).data
eth2hash(seed_input)
eth2digest(seed_input)

View File

@ -6,13 +6,14 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
json, math, strutils,
eth/keyfile/uuid,
stew/[results, byteutils],
json, math, strutils, strformat,
stew/[results, byteutils, bitseqs, bitops2], stew/shims/macros,
eth/keyfile/uuid, blscurve,
nimcrypto/[sha2, rijndael, pbkdf2, bcmode, hash, sysrand],
./crypto
./datatypes, ./crypto, ./digest, ./signatures
export results
export
results
{.push raises: [Defect].}
@ -64,6 +65,22 @@ type
KsResult*[T] = Result[T, cstring]
Eth2KeyKind* = enum
signingKeyKind # Also known as voting key
withdrawalKeyKind
Mnemonic* = distinct string
KeyPath* = distinct string
KeyStorePass* = distinct string
KeyStoreContent* = distinct JsonString
KeySeed* = distinct seq[byte]
Credentials* = object
mnemonic*: Mnemonic
keyStore*: KeyStoreContent
signingKey*: ValidatorPrivKey
withdrawalKey*: ValidatorPrivKey
const
saltSize = 32
@ -80,6 +97,127 @@ const
prf: "hmac-sha256"
)
# https://eips.ethereum.org/EIPS/eip-2334
eth2KeyPurpose = 12381
eth2CoinType* = 3600
# https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md
wordListLen = 2048
macro wordListArray(filename: static string): array[wordListLen, cstring] =
result = newTree(nnkBracket)
var words = slurp(filename).split()
words.setLen wordListLen
for word in words:
result.add newCall("cstring", newLit(word))
const
englishWords = wordListArray "english_word_list.txt"
iterator pathNodesImpl(path: string): Natural
{.raises: [ValueError].} =
for elem in path.split("/"):
if elem == "m": continue
yield parseInt(elem)
func append*(path: KeyPath, pathNode: Natural): KeyPath =
KeyPath(path.string & "/" & $pathNode)
func validateKeyPath*(path: TaintedString): KeyPath
{.raises: [ValueError].} =
for elem in pathNodesImpl(path.string): discard elem
KeyPath path
iterator pathNodes(path: KeyPath): Natural =
try:
for elem in pathNodesImpl(path.string):
yield elem
except ValueError:
doAssert false, "Make sure you've validated the key path with `validateKeyPath`"
func makeKeyPath*(validatorIdx: Natural,
keyType: Eth2KeyKind): KeyPath =
# https://eips.ethereum.org/EIPS/eip-2334
let use = case keyType
of withdrawalKeyKind: "0"
of signingKeyKind: "0/0"
try:
KeyPath &"m/{eth2KeyPurpose}/{eth2CoinType}/{validatorIdx}/{use}"
except ValueError:
raiseAssert "All values above can be converted successfully to strings"
func getSeed*(mnemonic: Mnemonic, password: KeyStorePass): KeySeed =
# https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki#from-mnemonic-to-seed
let salt = "mnemonic-" & password.string
KeySeed sha512.pbkdf2(mnemonic.string, salt, 2048, 64)
proc generateMnemonic*(words: openarray[cstring],
entropyParam: openarray[byte] = @[]): Mnemonic =
# https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki#generating-the-mnemonic
doAssert words.len == wordListLen
var entropy: seq[byte]
if entropyParam.len == 0:
entropy = getRandomBytesOrPanic(32)
else:
doAssert entropyParam.len >= 128 and
entropyParam.len <= 256 and
entropyParam.len mod 32 == 0
entropy = @entropyParam
let
checksumBits = entropy.len div 4 # ranges from 4 to 8
mnemonicWordCount = 12 + (checksumBits - 4) * 3
checksum = sha256.digest(entropy)
entropy.add byte(checksum.data.getBitsBE(0 ..< checksumBits))
var res = ""
res.add words[entropy.getBitsBE(0..10)]
for i in 1 ..< mnemonicWordCount:
let
firstBit = i*11
lastBit = firstBit + 10
res.add " "
res.add words[entropy.getBitsBE(firstBit..lastBit)]
Mnemonic res
proc deriveChildKey*(parentKey: ValidatorPrivKey,
index: Natural): ValidatorPrivKey =
let success = derive_child_secretKey(SecretKey result,
SecretKey parentKey,
uint32 index)
# TODO `derive_child_secretKey` is reporting pre-condition
# failures with return values. We should turn the checks
# into asserts inside the function.
doAssert success
proc deriveMasterKey*(seed: KeySeed): ValidatorPrivKey =
let success = derive_master_secretKey(SecretKey result,
seq[byte] seed)
# TODO `derive_master_secretKey` is reporting pre-condition
# failures with return values. We should turn the checks
# into asserts inside the function.
doAssert success
proc deriveMasterKey*(mnemonic: Mnemonic,
password: KeyStorePass): ValidatorPrivKey =
deriveMasterKey(getSeed(mnemonic, password))
proc deriveChildKey*(masterKey: ValidatorPrivKey,
path: KeyPath): ValidatorPrivKey =
result = masterKey
for idx in pathNodes(path):
result = deriveChildKey(result, idx)
proc keyFromPath*(mnemonic: Mnemonic,
password: KeyStorePass,
path: KeyPath): ValidatorPrivKey =
deriveChildKey(deriveMasterKey(mnemonic, password), path)
proc shaChecksum(key, cipher: openarray[byte]): array[32, byte] =
var ctx: sha256
ctx.init()
@ -100,12 +238,11 @@ template hexToBytes(data, name: string): untyped =
except ValueError:
return err "ks: failed to parse " & name
proc decryptKeystore*(data, passphrase: string): KsResult[seq[byte]] =
let ks =
try:
parseJson(data)
except Exception:
return err "ks: failed to parse keystore"
proc decryptKeystore*(data: KeyStoreContent,
password: KeyStorePass): KsResult[ValidatorPrivKey] =
# TODO: `parseJson` can raise a general `Exception`
let ks = try: parseJson(data.string)
except Exception: return err "ks: failed to parse keystore"
var
decKey: seq[byte]
@ -126,7 +263,7 @@ proc decryptKeystore*(data, passphrase: string): KsResult[seq[byte]] =
kdfParams = crypto.kdf.params
salt = hexToBytes(kdfParams.salt, "salt")
decKey = sha256.pbkdf2(passphrase, salt, kdfParams.c, kdfParams.dklen)
decKey = sha256.pbkdf2(password.string, salt, kdfParams.c, kdfParams.dklen)
iv = hexToBytes(crypto.cipher.params.iv, "iv")
cipherMsg = hexToBytes(crypto.cipher.message, "cipher")
checksumMsg = hexToBytes(crypto.checksum.message, "checksum")
@ -151,41 +288,41 @@ proc decryptKeystore*(data, passphrase: string): KsResult[seq[byte]] =
aesCipher.decrypt(cipherMsg, secret)
aesCipher.clear()
result = ok secret
ValidatorPrivKey.fromRaw(secret)
proc encryptKeystore*[T: KdfParams](secret: openarray[byte];
passphrase: string;
path="";
salt: openarray[byte] = @[];
iv: openarray[byte] = @[];
ugly=true): KsResult[string] =
proc encryptKeystore*(T: type[KdfParams],
privKey: ValidatorPrivkey,
password = KeyStorePass "",
path = KeyPath "",
salt: openarray[byte] = @[],
iv: openarray[byte] = @[],
pretty = true): KeyStoreContent =
var
secret = privKey.toRaw[^32..^1]
decKey: seq[byte]
aesCipher: CTR[aes128]
aesIv = newSeq[byte](aes128.sizeBlock)
kdfSalt = newSeq[byte](saltSize)
cipherMsg = newSeq[byte](secret.len)
if salt.len == saltSize:
if salt.len > 0:
doAssert salt.len == saltSize
kdfSalt = @salt
elif salt.len > 0:
return err "ks: invalid salt"
elif randomBytes(kdfSalt) != saltSize:
return err "ks: no random bytes for salt"
else:
getRandomBytesOrPanic(kdfSalt)
if iv.len == aes128.sizeBlock:
if iv.len > 0:
doAssert iv.len == aes128.sizeBlock
aesIv = @iv
elif iv.len > 0:
return err "ks: invalid iv"
elif randomBytes(aesIv) != aes128.sizeBlock:
return err "ks: no random bytes for iv"
else:
getRandomBytesOrPanic(aesIv)
when T is KdfPbkdf2:
decKey = sha256.pbkdf2(passphrase, kdfSalt, pbkdf2Params.c,
decKey = sha256.pbkdf2(password.string, kdfSalt, pbkdf2Params.c,
pbkdf2Params.dklen)
var kdf = Kdf[KdfPbkdf2](function: "pbkdf2", params: pbkdf2Params, message: "")
kdf.params.salt = kdfSalt.toHex()
kdf.params.salt = byteutils.toHex(kdfSalt)
else:
return
@ -193,29 +330,72 @@ proc encryptKeystore*[T: KdfParams](secret: openarray[byte];
aesCipher.encrypt(secret, cipherMsg)
aesCipher.clear()
let pubkey = (? ValidatorPrivkey.fromRaw(secret)).toPubKey()
let pubkey = privKey.toPubKey()
let
sum = shaChecksum(decKey.toOpenArray(16, 31), cipherMsg)
uuid = uuidGenerate().get
keystore = Keystore[T](
crypto: Crypto[T](
kdf: kdf,
checksum: Checksum(
function: "sha256",
message: sum.toHex()
message: byteutils.toHex(sum)
),
cipher: Cipher(
function: "aes-128-ctr",
params: CipherParams(iv: aesIv.toHex()),
message: cipherMsg.toHex()
params: CipherParams(iv: byteutils.toHex(aesIv)),
message: byteutils.toHex(cipherMsg)
)
),
pubkey: pubkey.toHex(),
path: path,
uuid: $(? uuidGenerate()),
version: 4
)
pubkey: toHex(pubkey),
path: path.string,
uuid: $uuid,
version: 4)
result = ok(if ugly: $(%keystore)
else: pretty(%keystore, indent=4))
KeyStoreContent if pretty: json.pretty(%keystore, indent=4)
else: $(%keystore)
proc restoreCredentials*(mnemonic: Mnemonic,
password = KeyStorePass ""): Credentials =
let
withdrawalKeyPath = makeKeyPath(0, withdrawalKeyKind)
withdrawalKey = keyFromPath(mnemonic, password, withdrawalKeyPath)
signingKeyPath = withdrawalKeyPath.append 0
signingKey = deriveChildKey(withdrawalKey, 0)
Credentials(
mnemonic: mnemonic,
keyStore: encryptKeystore(KdfPbkdf2, signingKey, password, signingKeyPath),
signingKey: signingKey,
withdrawalKey: withdrawalKey)
proc generateCredentials*(entropy: openarray[byte] = @[],
password = KeyStorePass ""): Credentials =
let mnemonic = generateMnemonic(englishWords, entropy)
restoreCredentials(mnemonic, password)
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/deposit-contract.md#withdrawal-credentials
proc makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
var bytes = eth2digest(k.toRaw())
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8
bytes
proc prepareDeposit*(credentials: Credentials,
amount = MAX_EFFECTIVE_BALANCE.Gwei): Deposit =
let
withdrawalPubKey = credentials.withdrawalKey.toPubKey
signingPubKey = credentials.signingKey.toPubKey
var
ret = Deposit(
data: DepositData(
amount: amount,
pubkey: signingPubKey,
withdrawal_credentials: makeWithdrawalCredentials(withdrawalPubKey)))
ret.data.signature = get_deposit_signature(ret.data, credentials.signingKey)
ret

View File

@ -12,14 +12,17 @@ import
datatypes
const
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#topics-and-messages
topicBeaconBlocksSuffix* = "beacon_block/ssz"
topicMainnetAttestationsSuffix* = "_beacon_attestation/ssz"
topicVoluntaryExitsSuffix* = "voluntary_exit/ssz"
topicProposerSlashingsSuffix* = "proposer_slashing/ssz"
topicAttesterSlashingsSuffix* = "attester_slashing/ssz"
topicAggregateAndProofsSuffix* = "beacon_aggregate_and_proof/ssz"
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#configuration
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/p2p-interface.md#topics-and-messages
topicMainnetAttestationsSuffix* = "_beacon_attestation/ssz"
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#misc
ATTESTATION_SUBNET_COUNT* = 64
defaultEth2TcpPort* = 9000
@ -30,35 +33,30 @@ const
when ETH2_SPEC == "v0.11.3":
const topicInteropAttestationSuffix* = "beacon_attestation/ssz"
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#topics-and-messages
func getBeaconBlocksTopic*(forkDigest: ForkDigest): string =
try:
&"/eth2/{$forkDigest}/{topicBeaconBlocksSuffix}"
except ValueError as e:
raiseAssert e.msg
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#topics-and-messages
func getVoluntaryExitsTopic*(forkDigest: ForkDigest): string =
try:
&"/eth2/{$forkDigest}/{topicVoluntaryExitsSuffix}"
except ValueError as e:
raiseAssert e.msg
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#topics-and-messages
func getProposerSlashingsTopic*(forkDigest: ForkDigest): string =
try:
&"/eth2/{$forkDigest}/{topicProposerSlashingsSuffix}"
except ValueError as e:
raiseAssert e.msg
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#topics-and-messages
func getAttesterSlashingsTopic*(forkDigest: ForkDigest): string =
try:
&"/eth2/{$forkDigest}/{topicAttesterSlashingsSuffix}"
except ValueError as e:
raiseAssert e.msg
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#topics-and-messages
func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string =
try:
&"/eth2/{$forkDigest}/{topicAggregateAndProofsSuffix}"
@ -72,10 +70,44 @@ when ETH2_SPEC == "v0.11.3":
except ValueError as e:
raiseAssert e.msg
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#mainnet-3
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/p2p-interface.md#mainnet-3
func getMainnetAttestationTopic*(forkDigest: ForkDigest, committeeIndex: uint64): string =
let topicIndex = committeeIndex mod ATTESTATION_SUBNET_COUNT
try:
let topicIndex = committeeIndex mod ATTESTATION_SUBNET_COUNT
&"/eth2/{$forkDigest}/committee_index{topicIndex}{topicMainnetAttestationsSuffix}"
except ValueError as e:
raiseAssert e.msg
when ETH2_SPEC == "v0.12.1":
import helpers
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#broadcast-attestation
func compute_subnet_for_attestation*(
num_active_validators: uint64, attestation: Attestation): uint64 =
# Compute the correct subnet for an attestation for Phase 0.
# Note, this mimics expected Phase 1 behavior where attestations will be
# mapped to their shard subnet.
#
# The spec version has params (state: BeaconState, attestation: Attestation),
# but it's only to call get_committee_count_at_slot(), which needs only epoch
# and the number of active validators.
let
slots_since_epoch_start = attestation.data.slot mod SLOTS_PER_EPOCH
committees_since_epoch_start =
get_committee_count_at_slot(num_active_validators) * slots_since_epoch_start
(committees_since_epoch_start + attestation.data.index) mod ATTESTATION_SUBNET_COUNT
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#broadcast-attestation
func getAttestationTopic*(forkDigest: ForkDigest, subnetIndex: uint64):
string =
# This is for subscribing or broadcasting manually to a known index.
try:
&"/eth2/{$forkDigest}/beacon_attestation_{subnetIndex}/ssz"
except ValueError as e:
raiseAssert e.msg
func getAttestationTopic*(forkDigest: ForkDigest, attestation: Attestation, num_active_validators: uint64): string =
getAttestationTopic(
forkDigest,
compute_subnet_for_attestation(num_active_validators, attestation))

View File

@ -24,7 +24,7 @@ const
MAX_COMMITTEES_PER_SLOT* {.intdefine.} = 64
TARGET_COMMITTEE_SIZE* = 2^7 ##\
TARGET_COMMITTEE_SIZE* = 128 ##\
## Number of validators in the committee attesting to one shard
## Per spec:
## For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds

View File

@ -20,7 +20,7 @@ type
const
# Misc
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L4
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L4
# Changed
MAX_COMMITTEES_PER_SLOT* = 4
@ -43,7 +43,7 @@ const
# Gwei values
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L58
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L58
# Unchanged
MIN_DEPOSIT_AMOUNT* = 2'u64^0 * 10'u64^9
@ -53,14 +53,14 @@ const
# Initial values
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L70
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L70
GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 1'u8]
BLS_WITHDRAWAL_PREFIX* = 0'u8
# Time parameters
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L77
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L77
# Changed: Faster to spin up testnets, but does not give validator
# reasonable warning time for genesis
GENESIS_DELAY* = 300
@ -71,8 +71,6 @@ const
# Unchanged
MIN_ATTESTATION_INCLUSION_DELAY* = 1
SHARD_COMMITTEE_PERIOD* = 64 # epochs
# Changed
SLOTS_PER_EPOCH* {.intdefine.} = 8
@ -87,6 +85,8 @@ const
# Unchanged
MIN_VALIDATOR_WITHDRAWABILITY_DELAY* = 2'u64^8
SHARD_COMMITTEE_PERIOD* = 64 # epochs
# Unchanged
MAX_EPOCHS_PER_CROSSLINK* = 4
@ -95,7 +95,7 @@ const
# State vector lengths
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L105
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L105
# Changed
EPOCHS_PER_HISTORICAL_VECTOR* = 64
@ -107,7 +107,7 @@ const
# Reward and penalty quotients
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L117
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L117
BASE_REWARD_FACTOR* = 2'u64^6
WHISTLEBLOWER_REWARD_QUOTIENT* = 2'u64^9
@ -117,7 +117,7 @@ const
# Max operations per block
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L131
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L131
MAX_PROPOSER_SLASHINGS* = 2^4
MAX_ATTESTER_SLASHINGS* = 2^1
@ -127,14 +127,14 @@ const
# Fork choice
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L32
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L32
# Changed
SAFE_SLOTS_TO_UPDATE_JUSTIFIED* = 2
# Validators
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L38
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L38
# Changed
ETH1_FOLLOW_DISTANCE* = 16 # blocks
@ -147,14 +147,14 @@ const
# Phase 1: Upgrade from Phase 0
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L161
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L161
PHASE_1_FORK_VERSION* = 16777217
PHASE_1_GENESIS_SLOT* = 8
INITIAL_ACTIVE_SHARDS* = 4
# Phase 1: General
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L169
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L171
MAX_SHARDS* = 8
ONLINE_PERIOD* = 8 # epochs ~ 51 minutes
LIGHT_CLIENT_COMMITTEE_SIZE* = 128
@ -170,7 +170,7 @@ const
# Phase 1 - Custody game
# ---------------------------------------------------------------
# Time parameters
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L202
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L202
RANDAO_PENALTY_EPOCHS* = 2
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS* = 4096 # epochs
EPOCHS_PER_CUSTODY_PERIOD* = 2048
@ -178,12 +178,12 @@ const
MAX_REVEAL_LATENESS_DECREMENT* = 128
# Max operations
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L214
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L214
MAX_CUSTODY_KEY_REVEALS* = 256
MAX_EARLY_DERIVED_SECRET_REVEALS* = 1
MAX_CUSTODY_SLASHINGS* = 1
# Reward and penalty quotients
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/configs/minimal.yaml#L220
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/configs/minimal.yaml#L220
EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE* = 2
MINOR_REWARD_QUOTIENT* = 256

View File

@ -0,0 +1,137 @@
# beacon_chain
# Copyright (c) 2018-2020 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
import
./crypto, ./digest, ./datatypes, ./helpers, ../ssz/merkleization
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#aggregation-selection
func get_slot_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
privkey: ValidatorPrivKey): ValidatorSig =
let
epoch = compute_epoch_at_slot(slot)
domain = get_domain(
fork, DOMAIN_SELECTION_PROOF, epoch, genesis_validators_root)
signing_root = compute_signing_root(slot, domain)
blsSign(privKey, signing_root.data)
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#randao-reveal
func get_epoch_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch,
privkey: ValidatorPrivKey): ValidatorSig =
let
domain = get_domain(fork, DOMAIN_RANDAO, epoch, genesis_validators_root)
signing_root = compute_signing_root(epoch, domain)
blsSign(privKey, signing_root.data)
func verify_epoch_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch,
pubkey: ValidatorPubKey, signature: ValidatorSig): bool =
let
domain = get_domain(fork, DOMAIN_RANDAO, epoch, genesis_validators_root)
signing_root = compute_signing_root(epoch, domain)
blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#signature
func get_block_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
root: Eth2Digest, privkey: ValidatorPrivKey): ValidatorSig =
let
epoch = compute_epoch_at_slot(slot)
domain = get_domain(
fork, DOMAIN_BEACON_PROPOSER, epoch, genesis_validators_root)
signing_root = compute_signing_root(root, domain)
blsSign(privKey, signing_root.data)
func verify_block_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
blck: Eth2Digest | BeaconBlock | BeaconBlockHeader, pubkey: ValidatorPubKey,
signature: ValidatorSig): bool =
let
epoch = compute_epoch_at_slot(slot)
domain = get_domain(
fork, DOMAIN_BEACON_PROPOSER, epoch, genesis_validators_root)
signing_root = compute_signing_root(blck, domain)
blsVerify(pubKey, signing_root.data, signature)
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#broadcast-aggregate
func get_aggregate_and_proof_signature*(fork: Fork, genesis_validators_root: Eth2Digest,
aggregate_and_proof: AggregateAndProof,
privKey: ValidatorPrivKey): ValidatorSig =
let
epoch = compute_epoch_at_slot(aggregate_and_proof.aggregate.data.slot)
domain = get_domain(
fork, DOMAIN_AGGREGATE_AND_PROOF, epoch, genesis_validators_root)
signing_root = compute_signing_root(aggregate_and_proof, domain)
blsSign(privKey, signing_root.data)
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#aggregate-signature
func get_attestation_signature*(
fork: Fork, genesis_validators_root: Eth2Digest,
attestation_data: AttestationData,
privkey: ValidatorPrivKey): ValidatorSig =
let
epoch = attestation_data.target.epoch
domain = get_domain(
fork, DOMAIN_BEACON_ATTESTER, epoch, genesis_validators_root)
signing_root = compute_signing_root(attestation_data, domain)
blsSign(privKey, signing_root.data)
func verify_attestation_signature*(
fork: Fork, genesis_validators_root: Eth2Digest,
attestation_data: AttestationData,
pubkeys: openArray[ValidatorPubKey],
signature: ValidatorSig): bool =
let
epoch = attestation_data.target.epoch
domain = get_domain(
fork, DOMAIN_BEACON_ATTESTER, epoch, genesis_validators_root)
signing_root = compute_signing_root(attestation_data, domain)
blsFastAggregateVerify(pubkeys, signing_root.data, signature)
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#deposits
func get_deposit_signature*(
deposit: DepositData,
privkey: ValidatorPrivKey): ValidatorSig =
let
deposit_message = deposit.getDepositMessage()
# Fork-agnostic domain since deposits are valid across forks
domain = compute_domain(DOMAIN_DEPOSIT)
signing_root = compute_signing_root(deposit_message, domain)
blsSign(privKey, signing_root.data)
func verify_deposit_signature*(deposit: DepositData): bool =
let
deposit_message = deposit.getDepositMessage()
# Fork-agnostic domain since deposits are valid across forks
domain = compute_domain(DOMAIN_DEPOSIT)
signing_root = compute_signing_root(deposit_message, domain)
blsVerify(deposit.pubkey, signing_root.data, deposit.signature)
func verify_voluntary_exit_signature*(
fork: Fork, genesis_validators_root: Eth2Digest,
voluntary_exit: VoluntaryExit,
pubkey: ValidatorPubKey, signature: ValidatorSig): bool =
let
domain = get_domain(
fork, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch, genesis_validators_root)
signing_root = compute_signing_root(voluntary_exit, domain)
blsVerify(pubkey, signing_root.data, signature)

View File

@ -32,7 +32,8 @@
import
algorithm, collections/sets, chronicles, options, sequtils, sets,
../extras, ../ssz/merkleization, metrics,
beaconstate, crypto, datatypes, digest, helpers, validator,
./beaconstate, ./crypto, ./datatypes, ./digest, ./helpers, ./validator,
./signatures,
../../nbench/bench_lab
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
@ -41,7 +42,7 @@ declareGauge beacon_previous_live_validators, "Number of active validators that
declareGauge beacon_pending_deposits, "Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index)" # On block
declareGauge beacon_processed_deposits_total, "Number of total deposits included on chain" # On block
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#block-header
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#block-header
proc process_block_header*(
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.}=
@ -70,8 +71,7 @@ proc process_block_header*(
return false
# Verify that the parent matches
if skipBlockParentRootValidation notin flags and not (blck.parent_root ==
hash_tree_root(state.latest_block_header)):
if not (blck.parent_root == hash_tree_root(state.latest_block_header)):
notice "Block header: previous block root mismatch",
latest_block_header = state.latest_block_header,
blck = shortLog(blck),
@ -99,12 +99,11 @@ proc `xor`[T: array](a, b: T): T =
for i in 0..<result.len:
result[i] = a[i] xor b[i]
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#randao
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#randao
proc process_randao(
state: var BeaconState, body: BeaconBlockBody, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.}=
let
epoch = state.get_current_epoch()
proposer_index = get_beacon_proposer_index(state, stateCache)
if proposer_index.isNone:
@ -112,14 +111,17 @@ proc process_randao(
return false
# Verify RANDAO reveal
let proposer = addr state.validators[proposer_index.get]
let
epoch = state.get_current_epoch()
let signing_root = compute_signing_root(
epoch, get_domain(state, DOMAIN_RANDAO, get_current_epoch(state)))
if skipBLSValidation notin flags:
if not blsVerify(proposer.pubkey, signing_root.data, body.randao_reveal):
notice "Randao mismatch", proposer_pubkey = shortLog(proposer.pubkey),
message = epoch,
let proposer_pubkey = state.validators[proposer_index.get].pubkey
if not verify_epoch_signature(
state.fork, state.genesis_validators_root, epoch, proposer_pubkey,
body.randao_reveal):
notice "Randao mismatch", proposer_pubkey = shortLog(proposer_pubkey),
epoch,
signature = shortLog(body.randao_reveal),
slot = state.slot
return false
@ -127,28 +129,28 @@ proc process_randao(
# Mix it in
let
mix = get_randao_mix(state, epoch)
rr = eth2hash(body.randao_reveal.toRaw()).data
rr = eth2digest(body.randao_reveal.toRaw()).data
state.randao_mixes[epoch mod EPOCHS_PER_HISTORICAL_VECTOR].data =
mix.data xor rr
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#eth1-data
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#eth1-data
func process_eth1_data(state: var BeaconState, body: BeaconBlockBody) {.nbench.}=
state.eth1_data_votes.add body.eth1_data
if state.eth1_data_votes.asSeq.count(body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD.int:
state.eth1_data = body.eth1_data
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#is_slashable_validator
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_slashable_validator
func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
# Check if ``validator`` is slashable.
(not validator.slashed) and
(validator.activation_epoch <= epoch) and
(epoch < validator.withdrawable_epoch)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#proposer-slashings
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#proposer-slashings
proc process_proposer_slashing*(
state: var BeaconState, proposer_slashing: ProposerSlashing,
flags: UpdateFlags, stateCache: var StateCache): bool {.nbench.}=
@ -187,12 +189,9 @@ proc process_proposer_slashing*(
if skipBlsValidation notin flags:
for i, signed_header in [proposer_slashing.signed_header_1,
proposer_slashing.signed_header_2]:
let domain = get_domain(
state, DOMAIN_BEACON_PROPOSER,
compute_epoch_at_slot(signed_header.message.slot)
)
let signing_root = compute_signing_root(signed_header.message, domain)
if not blsVerify(proposer.pubkey, signing_root.data, signed_header.signature):
if not verify_block_signature(
state.fork, state.genesis_validators_root, signed_header.message.slot,
signed_header.message, proposer.pubkey, signed_header.signature):
notice "Proposer slashing: invalid signature",
signature_index = i
return false
@ -201,7 +200,7 @@ proc process_proposer_slashing*(
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#is_slashable_attestation_data
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_slashable_attestation_data
func is_slashable_attestation_data(
data_1: AttestationData, data_2: AttestationData): bool =
## Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG
@ -213,7 +212,7 @@ func is_slashable_attestation_data(
(data_1.source.epoch < data_2.source.epoch and
data_2.target.epoch < data_1.target.epoch)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#attester-slashings
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#attester-slashings
proc process_attester_slashing*(
state: var BeaconState,
attester_slashing: AttesterSlashing,
@ -260,7 +259,7 @@ proc process_voluntary_exit*(
let voluntary_exit = signed_voluntary_exit.message
# Not in spec. Check that validator_index is in range
if voluntary_exit.validator_index.int >= state.validators.len:
if voluntary_exit.validator_index >= state.validators.len.uint64:
notice "Exit: invalid validator index",
index = voluntary_exit.validator_index,
num_validators = state.validators.len
@ -298,9 +297,9 @@ proc process_voluntary_exit*(
# Verify signature
if skipBlsValidation notin flags:
let domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
let signing_root = compute_signing_root(voluntary_exit, domain)
if not bls_verify(validator.pubkey, signing_root.data, signed_voluntary_exit.signature):
if not verify_voluntary_exit_signature(
state.fork, state.genesis_validators_root, voluntary_exit,
validator.pubkey, signed_voluntary_exit.signature):
notice "Exit: invalid signature"
return false
@ -320,7 +319,7 @@ proc process_voluntary_exit*(
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#operations
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#operations
proc process_operations(state: var BeaconState, body: BeaconBlockBody,
flags: UpdateFlags, stateCache: var StateCache): bool {.nbench.} =
# Verify that outstanding deposits are processed up to the maximum number of
@ -355,7 +354,7 @@ proc process_operations(state: var BeaconState, body: BeaconBlockBody,
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#block-processing
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#block-processing
proc process_block*(
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.}=
@ -393,61 +392,3 @@ proc process_block*(
return false
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#aggregation-selection
func get_slot_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
privkey: ValidatorPrivKey): ValidatorSig =
let
domain = get_domain(fork, DOMAIN_SELECTION_PROOF,
compute_epoch_at_slot(slot), genesis_validators_root)
signing_root = compute_signing_root(slot, domain)
blsSign(privKey, signing_root.data)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#randao-reveal
func get_epoch_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
privkey: ValidatorPrivKey): ValidatorSig =
let
domain = get_domain(fork, DOMAIN_RANDAO, compute_epoch_at_slot(slot),
genesis_validators_root)
signing_root = compute_signing_root(compute_epoch_at_slot(slot), domain)
blsSign(privKey, signing_root.data)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#signature
func get_block_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
root: Eth2Digest, privkey: ValidatorPrivKey): ValidatorSig =
let
domain = get_domain(fork, DOMAIN_BEACON_PROPOSER,
compute_epoch_at_slot(slot), genesis_validators_root)
signing_root = compute_signing_root(root, domain)
blsSign(privKey, signing_root.data)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#broadcast-aggregate
func get_aggregate_and_proof_signature*(fork: Fork, genesis_validators_root: Eth2Digest,
aggregate_and_proof: AggregateAndProof,
privKey: ValidatorPrivKey): ValidatorSig =
let
aggregate = aggregate_and_proof.aggregate
domain = get_domain(fork, DOMAIN_AGGREGATE_AND_PROOF,
compute_epoch_at_slot(aggregate.data.slot),
genesis_validators_root)
signing_root = compute_signing_root(aggregate_and_proof, domain)
return blsSign(privKey, signing_root.data)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#aggregate-signature
func get_attestation_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, attestation: AttestationData,
privkey: ValidatorPrivKey): ValidatorSig =
let
attestationRoot = hash_tree_root(attestation)
domain = get_domain(fork, DOMAIN_BEACON_ATTESTER,
attestation.target.epoch, genesis_validators_root)
signing_root = compute_signing_root(attestationRoot, domain)
blsSign(privKey, signing_root.data)

View File

@ -53,7 +53,7 @@ declareGauge beacon_current_epoch, "Current epoch"
# Spec
# --------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_total_active_balance
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_total_active_balance
func get_total_active_balance*(state: BeaconState, cache: var StateCache): Gwei =
# Return the combined effective balance of the active validators.
# Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei
@ -145,11 +145,11 @@ proc process_justification_and_finalization*(state: var BeaconState,
## matter -- in the next epoch, they'll be 2 epochs old, when BeaconState
## tracks current_epoch_attestations and previous_epoch_attestations only
## per
## https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#attestations
## https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#attestations
## and `get_matching_source_attestations(...)` via
## https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#helper-functions-1
## https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#helper-functions-1
## and
## https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#final-updates
## https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#final-updates
## after which the state.previous_epoch_attestations is replaced.
let total_active_balance = get_total_active_balance(state, stateCache)
trace "Non-attesting indices in previous epoch",
@ -510,7 +510,7 @@ else:
(rewards, penalties)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#rewards-and-penalties-1
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#process_rewards_and_penalties
func process_rewards_and_penalties(
state: var BeaconState, cache: var StateCache) {.nbench.}=
if get_current_epoch(state) == GENESIS_EPOCH:
@ -539,7 +539,7 @@ func process_slashings*(state: var BeaconState, cache: var StateCache) {.nbench.
let penalty = penalty_numerator div total_balance * increment
decrease_balance(state, index.ValidatorIndex, penalty)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#final-updates
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#final-updates
func process_final_updates*(state: var BeaconState) {.nbench.}=
let
current_epoch = get_current_epoch(state)
@ -566,7 +566,7 @@ func process_final_updates*(state: var BeaconState) {.nbench.}=
MAX_EFFECTIVE_BALANCE)
# Reset slashings
state.slashings[next_epoch mod EPOCHS_PER_SLASHINGS_VECTOR] = 0.Gwei
state.slashings[int(next_epoch mod EPOCHS_PER_SLASHINGS_VECTOR)] = 0.Gwei
# Set randao mix
state.randao_mixes[next_epoch mod EPOCHS_PER_HISTORICAL_VECTOR] =
@ -576,7 +576,7 @@ func process_final_updates*(state: var BeaconState) {.nbench.}=
if next_epoch mod (SLOTS_PER_HISTORICAL_ROOT div SLOTS_PER_EPOCH).uint64 == 0:
# Equivalent to hash_tree_root(foo: HistoricalBatch), but without using
# significant additional stack or heap.
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#historicalbatch
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#historicalbatch
# In response to https://github.com/status-im/nim-beacon-chain/issues/921
state.historical_roots.add hash_tree_root(
[hash_tree_root(state.block_roots), hash_tree_root(state.state_roots)])
@ -585,14 +585,14 @@ func process_final_updates*(state: var BeaconState) {.nbench.}=
state.previous_epoch_attestations = state.current_epoch_attestations
state.current_epoch_attestations = default(type state.current_epoch_attestations)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#epoch-processing
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#epoch-processing
proc process_epoch*(state: var BeaconState, updateFlags: UpdateFlags,
per_epoch_cache: var StateCache) {.nbench.} =
let currentEpoch = get_current_epoch(state)
trace "process_epoch",
current_epoch = currentEpoch
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#justification-and-finalization
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#justification-and-finalization
process_justification_and_finalization(state, per_epoch_cache, updateFlags)
# state.slot hasn't been incremented yet.
@ -602,16 +602,16 @@ proc process_epoch*(state: var BeaconState, updateFlags: UpdateFlags,
# the finalization rules triggered.
doAssert state.finalized_checkpoint.epoch + 3 >= currentEpoch
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#rewards-and-penalties-1
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#rewards-and-penalties-1
process_rewards_and_penalties(state, per_epoch_cache)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#registry-updates
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#registry-updates
process_registry_updates(state, per_epoch_cache)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#slashings
process_slashings(state, per_epoch_cache)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#final-updates
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#final-updates
process_final_updates(state)
# Once per epoch metrics

View File

@ -35,7 +35,7 @@ func get_attesting_indices*(
result = result.union(get_attesting_indices(
state, a.data, a.aggregation_bits, stateCache))
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#helper-functions-1
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#helper-functions-1
func get_unslashed_attesting_indices*(
state: BeaconState, attestations: openarray[PendingAttestation],
stateCache: var StateCache): HashSet[ValidatorIndex] =

View File

@ -12,8 +12,8 @@ import
algorithm, options, sequtils, math, tables,
./datatypes, ./digest, ./helpers
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#compute_shuffled_index
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#compute_committee
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_shuffled_index
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_committee
func get_shuffled_seq*(seed: Eth2Digest,
list_size: uint64,
): seq[ValidatorIndex] =
@ -52,14 +52,14 @@ func get_shuffled_seq*(seed: Eth2Digest,
source_buffer[32] = round_bytes1
# Only one pivot per round.
let pivot = bytes_to_int(eth2hash(pivot_buffer).data.toOpenArray(0, 7)) mod list_size
let pivot = bytes_to_int(eth2digest(pivot_buffer).data.toOpenArray(0, 7)) mod list_size
## Only need to run, per round, position div 256 hashes, so precalculate
## them. This consumes memory, but for low-memory devices, it's possible
## to mitigate by some light LRU caching and similar.
for reduced_position in 0 ..< sources.len:
source_buffer[33..36] = int_to_bytes4(reduced_position.uint64)
sources[reduced_position] = eth2hash(source_buffer)
sources[reduced_position] = eth2digest(source_buffer)
## Iterate over all the indices. This was in get_permuted_index, but large
## efficiency gains exist in caching and re-using data.
@ -90,7 +90,7 @@ func get_shuffled_active_validator_indices*(state: BeaconState, epoch: Epoch):
active_validator_indices.len.uint64),
active_validator_indices[it])
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_previous_epoch
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_previous_epoch
func get_previous_epoch*(state: BeaconState): Epoch =
# Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``).
let current_epoch = get_current_epoch(state)
@ -99,7 +99,7 @@ func get_previous_epoch*(state: BeaconState): Epoch =
else:
current_epoch - 1
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#compute_committee
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_committee
func compute_committee(indices: seq[ValidatorIndex], seed: Eth2Digest,
index: uint64, count: uint64): seq[ValidatorIndex] =
## Return the committee corresponding to ``indices``, ``seed``, ``index``,
@ -123,7 +123,7 @@ func compute_committee(indices: seq[ValidatorIndex], seed: Eth2Digest,
except KeyError:
raiseAssert("Cached entries are added before use")
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_beacon_committee
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_beacon_committee
func get_beacon_committee*(
state: BeaconState, slot: Slot, index: CommitteeIndex,
cache: var StateCache): seq[ValidatorIndex] =
@ -162,7 +162,7 @@ func get_empty_per_epoch_cache*(): StateCache =
initTable[Epoch, seq[ValidatorIndex]]()
result.committee_count_cache = initTable[Epoch, uint64]()
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#compute_proposer_index
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_proposer_index
func compute_proposer_index(state: BeaconState, indices: seq[ValidatorIndex],
seed: Eth2Digest): Option[ValidatorIndex] =
# Return from ``indices`` a random index sampled by effective balance.
@ -185,7 +185,7 @@ func compute_proposer_index(state: BeaconState, indices: seq[ValidatorIndex],
buffer[32..39] = int_to_bytes8(i.uint64 div 32)
let
candidate_index = shuffled_seq[(i.uint64 mod seq_len).int]
random_byte = (eth2hash(buffer).data)[i mod 32]
random_byte = (eth2digest(buffer).data)[i mod 32]
effective_balance =
state.validators[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >=
@ -193,7 +193,7 @@ func compute_proposer_index(state: BeaconState, indices: seq[ValidatorIndex],
return some(candidate_index)
i += 1
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_beacon_proposer_index
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_index*(state: BeaconState, cache: var StateCache, slot: Slot):
Option[ValidatorIndex] =
try:
@ -217,7 +217,7 @@ func get_beacon_proposer_index*(state: BeaconState, cache: var StateCache, slot:
try:
let
seed = eth2hash(buffer)
seed = eth2digest(buffer)
indices =
sorted(cache.shuffled_active_validator_indices[epoch], system.cmp)
@ -227,12 +227,12 @@ func get_beacon_proposer_index*(state: BeaconState, cache: var StateCache, slot:
except KeyError:
raiseAssert("Cached entries are added before use")
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#get_beacon_proposer_index
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_index*(state: BeaconState, cache: var StateCache):
Option[ValidatorIndex] =
get_beacon_proposer_index(state, cache, state.slot)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/validator.md#validator-assignments
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#validator-assignments
func get_committee_assignment*(
state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex):
Option[tuple[a: seq[ValidatorIndex], b: CommitteeIndex, c: Slot]] {.used.} =
@ -257,7 +257,7 @@ func get_committee_assignment*(
return some((committee, idx, slot))
none(tuple[a: seq[ValidatorIndex], b: CommitteeIndex, c: Slot])
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/validator.md#validator-assignments
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#validator-assignments
func is_proposer(
state: BeaconState, validator_index: ValidatorIndex): bool {.used.} =
var cache = get_empty_per_epoch_cache()

View File

@ -19,3 +19,4 @@ import
export
merkleization, ssz_serialization, types

View File

@ -85,14 +85,14 @@ template checkForForbiddenBits(ResulType: type,
func readSszValue*[T](input: openarray[byte], val: var T) {.raisesssz.} =
mixin fromSszBytes, toSszType
template readOffsetUnchecked(n: int): int {.used.}=
int fromSszBytes(uint32, input.toOpenArray(n, n + offsetSize - 1))
template readOffsetUnchecked(n: int): uint32 {.used.}=
fromSszBytes(uint32, input.toOpenArray(n, n + offsetSize - 1))
template readOffset(n: int): int {.used.} =
let offset = readOffsetUnchecked(n)
if offset > input.len:
if offset > input.len.uint32:
raise newException(MalformedSszError, "SSZ list element offset points past the end of the input")
offset
int(offset)
#when result is List:
# result.setOutputSize input.len
@ -141,6 +141,7 @@ func readSszValue*[T](input: openarray[byte], val: var T) {.raisesssz.} =
elif val is List|array:
type E = type val[0]
when E is byte:
val.setOutputSize input.len
if input.len > 0:
@ -171,8 +172,8 @@ func readSszValue*[T](input: openarray[byte], val: var T) {.raisesssz.} =
raise newException(MalformedSszError, "SSZ input of insufficient size")
var offset = readOffset 0
trs "GOT OFFSET ", offset
let resultLen = offset div offsetSize
trs "LEN ", resultLen
@ -206,8 +207,10 @@ func readSszValue*[T](input: openarray[byte], val: var T) {.raisesssz.} =
copyMem(addr val.bytes[0], unsafeAddr input[0], input.len)
elif val is object|tuple:
const minimallyExpectedSize = fixedPortionSize(T)
if input.len < minimallyExpectedSize:
let inputLen = uint32 input.len
const minimallyExpectedSize = uint32 fixedPortionSize(T)
if inputLen < minimallyExpectedSize:
raise newException(MalformedSszError, "SSZ input of insufficient size")
enumInstanceSerializedFields(val, fieldName, field):
@ -231,7 +234,7 @@ func readSszValue*[T](input: openarray[byte], val: var T) {.raisesssz.} =
else:
let
startOffset = readOffsetUnchecked(boundingOffsets[0])
endOffset = if boundingOffsets[1] == -1: input.len
endOffset = if boundingOffsets[1] == -1: inputLen
else: readOffsetUnchecked(boundingOffsets[1])
when boundingOffsets.isFirstOffset:
@ -241,7 +244,7 @@ func readSszValue*[T](input: openarray[byte], val: var T) {.raisesssz.} =
trs "VAR FIELD ", startOffset, "-", endOffset
if startOffset > endOffset:
raise newException(MalformedSszError, "SSZ field offsets are not monotonically increasing")
elif endOffset > input.len:
elif endOffset > inputLen:
raise newException(MalformedSszError, "SSZ field offset points past the end of the input")
elif startOffset < minimallyExpectedSize:
raise newException(MalformedSszError, "SSZ field offset points outside bounding offsets")
@ -253,14 +256,14 @@ func readSszValue*[T](input: openarray[byte], val: var T) {.raisesssz.} =
# TODO passing in `FieldType` instead of `type(field)` triggers a
# bug in the compiler
readSszValue(
input.toOpenArray(startOffset, endOffset - 1),
input.toOpenArray(int(startOffset), int(endOffset - 1)),
field)
trs "READING COMPLETE ", fieldName
else:
trs "READING FOREIGN ", fieldName, ": ", name(SszType)
field = fromSszBytes(
type(field),
input.toOpenArray(startOffset, endOffset - 1))
input.toOpenArray(int(startOffset), int(endOffset - 1)))
else:
unsupported T

View File

@ -72,7 +72,7 @@ func computeZeroHashes: array[sizeof(Limit) * 8, Eth2Digest] =
for i in 1 .. result.high:
result[i] = mergeBranches(result[i - 1], result[i - 1])
const zeroHashes = computeZeroHashes()
const zeroHashes* = computeZeroHashes()
func addChunk(merkleizer: var SszChunksMerkleizer, data: openarray[byte]) =
doAssert data.len > 0 and data.len <= bytesPerChunk

View File

@ -1,24 +1,38 @@
{.push raises: [Defect].}
import
os, strformat,
os, strformat, chronicles,
ssz/ssz_serialization,
beacon_node_types,
./spec/[crypto, datatypes, digest]
# Dump errors are generally not fatal where used currently - the code calling
# these functions, like most code, is not exception safe
template logErrors(body: untyped) =
try:
body
except CatchableError as err:
notice "Failed to write SSZ", dir, msg = err.msg
proc dump*(dir: string, v: AttestationData, validator: ValidatorPubKey) =
SSZ.saveFile(dir / &"att-{v.slot}-{v.index}-{shortLog(validator)}.ssz", v)
logErrors:
SSZ.saveFile(dir / &"att-{v.slot}-{v.index}-{shortLog(validator)}.ssz", v)
proc dump*(dir: string, v: SignedBeaconBlock, root: Eth2Digest) =
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(root)}.ssz", v)
logErrors:
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(root)}.ssz", v)
proc dump*(dir: string, v: SignedBeaconBlock, blck: BlockRef) =
dump(dir, v, blck.root)
proc dump*(dir: string, v: HashedBeaconState, blck: BlockRef) =
SSZ.saveFile(
dir / &"state-{v.data.slot}-{shortLog(blck.root)}-{shortLog(v.root)}.ssz",
v.data)
logErrors:
SSZ.saveFile(
dir / &"state-{v.data.slot}-{shortLog(blck.root)}-{shortLog(v.root)}.ssz",
v.data)
proc dump*(dir: string, v: HashedBeaconState) =
SSZ.saveFile(
dir / &"state-{v.data.slot}-{shortLog(v.root)}.ssz",
v.data)
logErrors:
SSZ.saveFile(
dir / &"state-{v.data.slot}-{shortLog(v.root)}.ssz",
v.data)

View File

@ -32,7 +32,7 @@ import
chronicles,
stew/results,
./extras, ./ssz/merkleization, metrics,
./spec/[datatypes, crypto, digest, helpers, validator],
./spec/[datatypes, crypto, digest, helpers, signatures, validator],
./spec/[state_transition_block, state_transition_epoch],
../nbench/bench_lab
@ -62,30 +62,27 @@ func get_epoch_validator_count(state: BeaconState): int64 {.nbench.} =
validator.withdrawable_epoch > get_current_epoch(state):
result += 1
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#verify_block_signature
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
proc verify_block_signature*(
state: BeaconState, signedBlock: SignedBeaconBlock): bool {.nbench.} =
if signedBlock.message.proposer_index >= state.validators.len.uint64:
state: BeaconState, signed_block: SignedBeaconBlock): bool {.nbench.} =
let
proposer_index = signed_block.message.proposer_index
if proposer_index >= state.validators.len.uint64:
notice "Invalid proposer index in block",
blck = shortLog(signedBlock.message)
blck = shortLog(signed_block.message)
return false
let
proposer = state.validators[signedBlock.message.proposer_index]
domain = get_domain(
state, DOMAIN_BEACON_PROPOSER,
compute_epoch_at_slot(signedBlock.message.slot))
signing_root = compute_signing_root(signedBlock.message, domain)
if not bls_verify(proposer.pubKey, signing_root.data, signedBlock.signature):
if not verify_block_signature(
state.fork, state.genesis_validators_root, signed_block.message.slot,
signed_block.message, state.validators[proposer_index].pubkey,
signed_block.signature):
notice "Block: signature verification failed",
blck = shortLog(signedBlock),
signingRoot = shortLog(signing_root)
blck = shortLog(signedBlock)
return false
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
proc verifyStateRoot(state: BeaconState, blck: BeaconBlock): bool =
# This is inlined in state_transition(...) in spec.
let state_root = hash_tree_root(state)
@ -108,7 +105,7 @@ type
# Hashed-state transition functions
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
func process_slot*(state: var HashedBeaconState) {.nbench.} =
# Cache state root
let previous_slot_state_root = state.root
@ -123,7 +120,7 @@ func process_slot*(state: var HashedBeaconState) {.nbench.} =
state.data.block_roots[state.data.slot mod SLOTS_PER_HISTORICAL_ROOT] =
hash_tree_root(state.data.latest_block_header)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
proc advance_slot*(state: var HashedBeaconState,
nextStateRoot: Opt[Eth2Digest], updateFlags: UpdateFlags,
epochCache: var StateCache) {.nbench.} =
@ -145,7 +142,7 @@ proc advance_slot*(state: var HashedBeaconState,
else:
state.root = hash_tree_root(state.data)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
proc process_slots*(state: var HashedBeaconState, slot: Slot,
updateFlags: UpdateFlags = {}): bool {.nbench.} =
# TODO this function is not _really_ necessary: when replaying states, we

View File

@ -75,11 +75,14 @@ func width(cells: seq[StatusBarCell]): int =
proc renderCells(cells: seq[StatusBarCell], sep: string) =
for i, cell in cells:
if i > 0: stdout.write sep
stdout.setBackgroundColor backgroundColor
stdout.setForegroundColor foregroundColor
stdout.setStyle {styleDim}
if i > 0: stdout.write sep
stdout.write " ", cell.label, ": "
stdout.setStyle {styleBright}
stdout.write cell.content, " "
stdout.resetAttributes()
proc render*(s: var StatusBarView) =
doAssert s.consumedLines == 0
@ -89,9 +92,8 @@ proc render*(s: var StatusBarView) =
allCellsWidth = s.layout.cellsLeft.width + s.layout.cellsRight.width
if allCellsWidth > 0:
stdout.setBackgroundColor backgroundColor
stdout.setForegroundColor foregroundColor
renderCells(s.layout.cellsLeft, sepLeft)
stdout.setBackgroundColor backgroundColor
if termWidth > allCellsWidth:
stdout.write spaces(termWidth - allCellsWidth)
s.consumedLines = 1
@ -99,7 +101,6 @@ proc render*(s: var StatusBarView) =
stdout.write spaces(max(0, termWidth - s.layout.cellsLeft.width)), "\p"
s.consumedLines = 2
renderCells(s.layout.cellsRight, sepRight)
stdout.resetAttributes
stdout.flushFile
proc erase*(s: var StatusBarView) =

View File

@ -1,7 +1,7 @@
import chronicles
import options, deques, heapqueue, tables, strutils, sequtils, math, algorithm
import stew/results, chronos, chronicles
import spec/datatypes, spec/digest, peer_pool, eth2_network
import spec/[datatypes, digest], peer_pool, eth2_network
import eth/async_utils
import block_pools/block_pools_types
@ -25,7 +25,7 @@ const
## Peer's response contains incorrect blocks.
PeerScoreBadResponse* = -1000
## Peer's response is not in requested range.
PeerScoreJokeBlocks* = -200
PeerScoreMissingBlocks* = -200
## Peer response contains too many empty blocks.
type
@ -66,35 +66,32 @@ type
SyncQueue*[T] = ref object
inpSlot*: Slot
outSlot*: Slot
startSlot*: Slot
lastSlot: Slot
chunkSize*: uint64
queueSize*: int
counter*: uint64
pending*: Table[uint64, SyncRequest[T]]
waiters: seq[SyncWaiter[T]]
syncUpdate*: SyncUpdateCallback[T]
getFirstSlotAFE*: GetSlotCallback
debtsQueue: HeapQueue[SyncRequest[T]]
debtsCount: uint64
readyQueue: HeapQueue[SyncResult[T]]
zeroPoint: Option[Slot]
suspects: seq[SyncResult[T]]
SyncManager*[A, B] = ref object
pool: PeerPool[A, B]
responseTimeout: chronos.Duration
sleepTime: chronos.Duration
maxWorkersCount: int
maxStatusAge: uint64
maxHeadAge: uint64
maxRecurringFailures: int
toleranceValue: uint64
getLocalHeadSlot: GetSlotCallback
getLocalWallSlot: GetSlotCallback
getFirstSlotAFE: GetSlotCallback
syncUpdate: SyncUpdateCallback[A]
chunkSize: uint64
queue: SyncQueue[A]
@ -211,6 +208,7 @@ proc isEmpty*[T](sr: SyncRequest[T]): bool {.inline.} =
proc init*[T](t1: typedesc[SyncQueue], t2: typedesc[T],
start, last: Slot, chunkSize: uint64,
updateCb: SyncUpdateCallback[T],
fsafeCb: GetSlotCallback,
queueSize: int = -1): SyncQueue[T] =
## Create new synchronization queue with parameters
##
@ -261,34 +259,8 @@ proc init*[T](t1: typedesc[SyncQueue], t2: typedesc[T],
# missing" error. Lets call such peers "jokers", because they are joking
# with responses.
#
# To fix "joker" problem i'm going to introduce "zero-point" which will
# represent first non-empty slot in gap at the end of requested chunk.
# If SyncQueue receives chunk of blocks with gap at the end and this chunk
# will be successfully processed by `block_pool` it will set `zero_point` to
# the first uncertain (empty) slot. For example:
#
# Case 1
# X X X X X -
# 3 4 5 6 7 8
#
# Case2
# X X - - - -
# 3 4 5 6 7 8
#
# In Case 1 `zero-point` will be equal to 8, in Case 2 `zero-point` will be
# set to 5.
#
# When `zero-point` is set and the next received chunk of blocks will be
# empty, then peer produced this chunk of blocks will be added to suspect
# list.
#
# If the next chunk of blocks has at least one non-empty block and this chunk
# will be successfully processed by `block_pool`, then `zero-point` will be
# reset and suspect list will be cleared.
#
# If the `block_pool` failed to process next chunk of blocks, SyncQueue will
# perform rollback to `zero-point` and penalize all the peers in suspect list.
# To fix "joker" problem we going to perform rollback to the latest finalized
# epoch's first slot.
doAssert(chunkSize > 0'u64, "Chunk size should not be zero")
result = SyncQueue[T](
startSlot: start,
@ -296,11 +268,11 @@ proc init*[T](t1: typedesc[SyncQueue], t2: typedesc[T],
chunkSize: chunkSize,
queueSize: queueSize,
syncUpdate: updateCb,
getFirstSlotAFE: fsafeCb,
waiters: newSeq[SyncWaiter[T]](),
counter: 1'u64,
pending: initTable[uint64, SyncRequest[T]](),
debtsQueue: initHeapQueue[SyncRequest[T]](),
zeroPoint: some[Slot](start),
inpSlot: start,
outSlot: start
)
@ -373,10 +345,10 @@ proc resetWait*[T](sq: SyncQueue[T], toSlot: Option[Slot]) {.async.} =
# without missing any blocks. There 3 sources:
# 1. Debts queue.
# 2. Processing queue (`inpSlot`, `outSlot`).
# 3. Requested slot `toSlot` (which can be `zero-point` slot).
# 3. Requested slot `toSlot`.
#
# Queue's `outSlot` is the lowest slot we added to `block_pool`, but
# `zero-point` slot can be less then `outSlot`. `debtsQueue` holds only not
# `toSlot` slot can be less then `outSlot`. `debtsQueue` holds only not
# added slot requests, so it can't be bigger then `outSlot` value.
var minSlot = sq.outSlot
if toSlot.isSome():
@ -442,8 +414,7 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
if res:
continue
else:
# SyncQueue reset happens (it can't be `zero-point` reset, or continous
# failure reset). We are exiting to wake up sync-worker.
# SyncQueue reset happens. We are exiting to wake up sync-worker.
exitNow = true
break
let syncres = SyncResult[T](request: sr, data: data)
@ -461,55 +432,6 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
let item = sq.readyQueue.pop()
let res = sq.syncUpdate(item.request, item.data)
if res.isOk:
if sq.zeroPoint.isSome():
if item.isEmpty():
# If the `zeropoint` is set and response is empty, we will add this
# request to suspect list.
debug "Adding peer to suspect list", peer = item.request.item,
request_slot = item.request.slot,
request_count = item.request.count,
request_step = item.request.step,
response_count = len(item.data), topics = "syncman"
sq.suspects.add(item)
else:
# If the `zeropoint` is set and response is not empty, we will clean
# suspect list and reset `zeropoint`.
sq.suspects.setLen(0)
sq.zeroPoint = none[Slot]()
# At this point `zeropoint` is unset, but received response can have
# gap at the end.
if item.hasEndGap():
debug "Zero-point reset and new zero-point found",
peer = item.request.item, request_slot = item.request.slot,
request_count = item.request.count,
request_step = item.request.step,
response_count = len(item.data),
blocks_map = getShortMap(item.request, item.data),
topics = "syncman"
sq.suspects.add(item)
sq.zeroPoint = some(item.getLastNonEmptySlot())
else:
debug "Zero-point reset", peer = item.request.item,
request_slot = item.request.slot,
request_count = item.request.count,
request_step = item.request.step,
response_count = len(item.data),
blocks_map = getShortMap(item.request, item.data),
topics = "syncman"
else:
# If the `zeropoint` is not set and response has gap at the end, we
# will add first suspect to the suspect list and set `zeropoint`.
if item.hasEndGap():
debug "New zero-point found", peer = item.request.item,
request_slot = item.request.slot,
request_count = item.request.count,
request_step = item.request.step,
response_count = len(item.data),
blocks_map = getShortMap(item.request, item.data),
topics = "syncman"
sq.suspects.add(item)
sq.zeroPoint = some(item.getLastNonEmptySlot())
sq.outSlot = sq.outSlot + item.request.count
sq.wakeupWaiters()
else:
@ -523,50 +445,25 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
var resetSlot: Option[Slot]
if res.error == BlockError.MissingParent:
if sq.zeroPoint.isSome():
# If the `zeropoint` is set and we are unable to store response in
# `block_pool` we are going to revert suspicious responses list.
# If `zeropoint` is set, suspicious list should not be empty.
var req: SyncRequest[T]
if isEmpty(sq.suspects[0]):
# If initial suspicious response is an empty list, then previous
# chunk of blocks did not have a gap at the end. So we are going to
# request suspicious response one more time without any changes.
req = sq.suspects[0].request
else:
# If initial suspicious response is not an empty list, we are going
# to request only gap at the end of the suspicious response.
let startSlot = sq.suspects[0].getLastNonEmptySlot() + 1'u64
let lastSlot = sq.suspects[0].request.lastSlot()
req = SyncRequest.init(T, startSlot, lastSlot)
debug "Resolve joker's problem", request_slot = req.slot,
request_count = req.count,
request_step = req.step,
suspects_count = (len(sq.suspects) - 1)
sq.suspects[0].request.item.updateScore(PeerScoreJokeBlocks)
sq.toDebtsQueue(req)
# We move all left suspicious responses to the debts queue.
if len(sq.suspects) > 1:
for i in 1 ..< len(sq.suspects):
sq.toDebtsQueue(sq.suspects[i].request)
sq.suspects[i].request.item.updateScore(PeerScoreJokeBlocks)
# Reset state to the `zeropoint`.
sq.suspects.setLen(0)
resetSlot = sq.zeroPoint
sq.zeroPoint = none[Slot]()
else:
# If we got `BlockError.MissingParent` and `zero-point` is not set
# it means that peer returns chain of blocks with holes.
let req = item.request
warn "Received sequence of blocks with holes", peer = req.item,
# If we got `BlockError.MissingParent` it means that peer returns chain
# of blocks with holes or `block_pool` is in incomplete state. We going
# to rewind to the first slot at latest finalized epoch.
let req = item.request
let finalizedSlot = sq.getFirstSlotAFE()
if finalizedSlot < req.slot:
warn "Unexpected missing parent, rewind happens",
peer = req.item, rewind_to_slot = finalizedSlot,
request_slot = req.slot, request_count = req.count,
request_step = req.step, blocks_count = len(item.data),
blocks_map = getShortMap(req, item.data)
resetSlot = some(finalizedSlot)
req.item.updateScore(PeerScoreMissingBlocks)
else:
error "Unexpected missing parent at finalized epoch slot",
peer = req.item, to_slot = finalizedSlot,
request_slot = req.slot, request_count = req.count,
request_step = req.step, blocks_count = len(item.data),
blocks_map = getShortMap(req, item.data)
req.item.updateScore(PeerScoreBadBlocks)
elif res.error == BlockError.Invalid:
let req = item.request
@ -587,8 +484,9 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
sq.toDebtsQueue(item.request)
if resetSlot.isSome():
await sq.resetWait(resetSlot)
debug "Zero-point reset happens", queue_input_slot = sq.inpSlot,
queue_output_slot = sq.outSlot
debug "Rewind to slot was happened", reset_slot = reset_slot.get(),
queue_input_slot = sq.inpSlot,
queue_output_slot = sq.outSlot
break
proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T]) =
@ -668,7 +566,9 @@ proc speed*(start, finish: SyncMoment): float {.inline.} =
proc newSyncManager*[A, B](pool: PeerPool[A, B],
getLocalHeadSlotCb: GetSlotCallback,
getLocalWallSlotCb: GetSlotCallback,
getFSAFECb: GetSlotCallback,
updateLocalBlocksCb: UpdateLocalBlocksCallback,
maxWorkers = 10,
maxStatusAge = uint64(SLOTS_PER_EPOCH * 4),
maxHeadAge = uint64(SLOTS_PER_EPOCH * 1),
sleepTime = (int(SLOTS_PER_EPOCH) *
@ -687,14 +587,16 @@ proc newSyncManager*[A, B](pool: PeerPool[A, B],
return res
let queue = SyncQueue.init(A, getLocalHeadSlotCb(), getLocalWallSlotCb(),
chunkSize, syncUpdate, 2)
chunkSize, syncUpdate, getFSAFECb, 2)
result = SyncManager[A, B](
pool: pool,
maxWorkersCount: maxWorkers,
maxStatusAge: maxStatusAge,
getLocalHeadSlot: getLocalHeadSlotCb,
syncUpdate: syncUpdate,
getLocalWallSlot: getLocalWallSlotCb,
getFirstSlotAFE: getFSAFECb,
maxHeadAge: maxHeadAge,
maxRecurringFailures: maxRecurringFailures,
sleepTime: sleepTime,
@ -897,14 +799,14 @@ proc sync*[A, B](man: SyncManager[A, B]) {.async.} =
# This procedure manages main loop of SyncManager and in this loop it
# performs
# 1. It checks for current sync status, "are we synced?".
# 2. If we are in active syncing, it tries to acquire peers from PeerPool and
# spawns new sync-workers.
# 2. If we are in active syncing, it tries to acquire new peers from PeerPool
# and spawns new sync-workers. Number of spawned sync-workers can be
# controlled by `maxWorkersCount` value.
# 3. It stops spawning sync-workers when we are "in sync".
# 4. It calculates syncing performance.
mixin getKey, getScore
var pending = newSeq[Future[A]]()
var acquireFut: Future[A]
var wallSlot, headSlot: Slot
var syncSpeed: float = 0.0
template workersCount(): int =
@ -932,53 +834,24 @@ proc sync*[A, B](man: SyncManager[A, B]) {.async.} =
traceAsyncErrors watchTask()
while true:
wallSlot = man.getLocalWallSlot()
headSlot = man.getLocalHeadSlot()
let wallSlot = man.getLocalWallSlot()
let headSlot = man.getLocalHeadSlot()
var progress: uint64
if headSlot <= man.queue.lastSlot:
progress = man.queue.progress()
else:
progress = 100'u64
let progress =
if headSlot <= man.queue.lastSlot:
man.queue.progress()
else:
100'u64
debug "Synchronization loop start tick", wall_head_slot = wallSlot,
debug "Synchronization loop tick", wall_head_slot = wallSlot,
local_head_slot = headSlot, queue_status = progress,
queue_start_slot = man.queue.startSlot,
queue_last_slot = man.queue.lastSlot,
workers_count = workersCount(), topics = "syncman"
if headAge <= man.maxHeadAge:
debug "Synchronization loop sleeping", wall_head_slot = wallSlot,
local_head_slot = headSlot, workers_count = workersCount(),
difference = (wallSlot - headSlot),
max_head_age = man.maxHeadAge, topics = "syncman"
if len(pending) == 0:
man.inProgress = false
await sleepAsync(man.sleepTime)
else:
var peerFut = one(pending)
# We do not care about result here because we going to check peerFut
# later.
discard await withTimeout(peerFut, man.sleepTime)
else:
man.inProgress = true
if isNil(acquireFut):
acquireFut = man.pool.acquire()
pending.add(acquireFut)
debug "Synchronization loop waiting for new peer",
wall_head_slot = wallSlot, local_head_slot = headSlot,
workers_count = workersCount(), topics = "syncman"
var peerFut = one(pending)
# We do not care about result here, because we going to check peerFut
# later.
discard await withTimeout(peerFut, man.sleepTime)
waiting_for_new_peer = $not(isNil(acquireFut)),
sync_speed = syncSpeed, workers_count = workersCount(),
topics = "syncman"
var temp = newSeqOfCap[Future[A]](len(pending))
# Update slots to with more recent data
wallSlot = man.getLocalWallSlot()
headSlot = man.getLocalHeadSlot()
for fut in pending:
if fut.finished():
if fut == acquireFut:
@ -989,7 +862,7 @@ proc sync*[A, B](man: SyncManager[A, B]) {.async.} =
workers_count = workersCount(),
errMsg = acquireFut.readError().msg, topics = "syncman"
else:
var peer = acquireFut.read()
let peer = acquireFut.read()
if headAge <= man.maxHeadAge:
# If we are already in sync, we going to release just acquired
# peer and do not acquire peers
@ -999,19 +872,22 @@ proc sync*[A, B](man: SyncManager[A, B]) {.async.} =
man.pool.release(peer)
else:
if headSlot > man.queue.lastSlot:
debug "Synchronization lost, restoring",
wall_head_slot = wallSlot, local_head_slot = headSlot,
queue_last_slot = man.queue.lastSlot, topics = "syncman"
man.queue = SyncQueue.init(A, headSlot, wallSlot,
man.chunkSize, man.syncUpdate, 2)
man.chunkSize, man.syncUpdate,
man.getFirstSlotAFE, 2)
debug "Synchronization loop starting new worker", peer = peer,
wall_head_slot = wallSlot, local_head_slot = headSlot,
peer_score = peer.getScore(), topics = "syncman"
temp.add(syncWorker(man, peer))
acquireFut = nil
if headAge > man.maxHeadAge:
acquireFut = man.pool.acquire()
temp.add(acquireFut)
# We will create new `acquireFut` later.
acquireFut = nil
else:
# Worker finished its work
# We got worker finished its work
if fut.failed():
debug "Synchronization loop got worker finished with an error",
wall_head_slot = wallSlot, local_head_slot = headSlot,
@ -1024,6 +900,7 @@ proc sync*[A, B](man: SyncManager[A, B]) {.async.} =
topics = "syncman"
else:
if fut == acquireFut:
# Task which waits for new peer from PeerPool is not yet finished.
if headAge <= man.maxHeadAge:
debug "Synchronization loop reached sync barrier",
wall_head_slot = wallSlot, local_head_slot = headSlot,
@ -1037,13 +914,39 @@ proc sync*[A, B](man: SyncManager[A, B]) {.async.} =
pending = temp
if headAge <= man.maxHeadAge:
debug "Synchronization loop sleeping", wall_head_slot = wallSlot,
local_head_slot = headSlot, workers_count = workersCount(),
difference = (wallSlot - headSlot),
max_head_age = man.maxHeadAge, topics = "syncman"
if len(pending) == 0:
man.inProgress = false
await sleepAsync(man.sleepTime)
else:
debug "Synchronization loop waiting for workers completion",
workers_count = workersCount()
discard await withTimeout(one(pending), man.sleepTime)
else:
man.inProgress = true
if isNil(acquireFut) and len(pending) < man.maxWorkersCount:
acquireFut = man.pool.acquire()
pending.add(acquireFut)
debug "Synchronization loop waiting for new peer",
wall_head_slot = wallSlot, local_head_slot = headSlot,
workers_count = workersCount(), topics = "syncman",
sleep_time = $man.sleepTime
else:
debug "Synchronization loop waiting for workers",
wall_head_slot = wallSlot, local_head_slot = headSlot,
workers_count = workersCount(), topics = "syncman",
sleep_time = $man.sleep_time
discard await withTimeout(one(pending), man.sleepTime)
if len(man.failures) > man.maxRecurringFailures and (workersCount() > 1):
debug "Number of recurring failures exceeds limit, reseting queue",
workers_count = workers_count(), rec_failures = len(man.failures)
# Cleaning up failures.
man.failures.setLen(0)
await man.queue.resetWait(none[Slot]())
debug "Synchronization loop end tick", wall_head_slot = wallSlot,
local_head_slot = headSlot, workers_count = workersCount(),
waiting_for_new_peer = $not(isNil(acquireFut)),
sync_speed = syncSpeed, queue_slot = man.queue.outSlot,
topics = "syncman"

View File

@ -89,6 +89,8 @@ p2pProtocol BeaconSync(version = 1,
peerState = BeaconSyncPeerState):
onPeerConnected do (peer: Peer) {.async.}:
debug "Peer connected",
peer, peerInfo = shortLog(peer.info), wasDialed = peer.wasDialed
if peer.wasDialed:
let
ourStatus = peer.networkState.getCurrentStatus()
@ -100,7 +102,7 @@ p2pProtocol BeaconSync(version = 1,
await peer.handleStatus(peer.networkState,
ourStatus, theirStatus.get())
else:
warn "Status response not received in time", peer = peer
warn "Status response not received in time", peer
proc status(peer: Peer,
theirStatus: StatusMsg,
@ -169,7 +171,7 @@ p2pProtocol BeaconSync(version = 1,
proc goodbye(peer: Peer,
reason: DisconnectionReason)
{.async, libp2pProtocol("goodbye", 1).} =
debug "Received Goodbye message", reason
debug "Received Goodbye message", reason, peer
proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) =
debug "Peer status", peer, statusMsg

View File

@ -249,7 +249,7 @@ proc goodbyeUserHandler(peer: Peer; reason: DisconnectionReason) {.async,
cast[ref[BeaconSyncNetworkState:ObjectType]](getNetworkState(peer.network,
BeaconSyncProtocol))
debug "Received Goodbye message", reason
debug "Received Goodbye message", reason, peer
template callUserHandler(MSG: type statusObj; peer: Peer; stream: Connection;
noSnappy: bool; msg: StatusMsg): untyped =
@ -375,6 +375,8 @@ proc BeaconSyncPeerConnected(peer: Peer; stream: Connection) {.async, gcsafe.} =
cast[ref[BeaconSyncNetworkState:ObjectType]](getNetworkState(peer.network,
BeaconSyncProtocol))
debug "Peer connected", peer, peerInfo = shortLog(peer.info),
wasDialed = peer.wasDialed
if peer.wasDialed:
let
ourStatus = peer.networkState.getCurrentStatus()
@ -382,7 +384,7 @@ proc BeaconSyncPeerConnected(peer: Peer; stream: Connection) {.async, gcsafe.} =
if theirStatus.isOk:
await peer.handleStatus(peer.networkState, ourStatus, theirStatus.get())
else:
warn "Status response not received in time", peer = peer
warn "Status response not received in time", peer
setEventHandlers(BeaconSyncProtocol, BeaconSyncPeerConnected, nil)
registerProtocol(BeaconSyncProtocol)

View File

@ -16,7 +16,7 @@ type
## which blocks are valid - in particular, blocks are not valid if they
## come from the future as seen from the local clock.
##
## https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/fork-choice.md#fork-choice
## https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#fork-choice
##
# TODO replace time in chronos with a proper unit type, then this code can
# follow:

View File

@ -7,7 +7,7 @@
import
# Standard library
tables, strutils,
tables, strutils, parseutils,
# Nimble packages
stew/[objects],
@ -19,7 +19,7 @@ import
block_pool, ssz/merkleization,
beacon_node_common, beacon_node_types,
validator_duties, eth2_network,
spec/eth2_apis/validator_callsigs_types,
spec/eth2_apis/callsigs_types,
eth2_json_rpc_serialization
type
@ -27,64 +27,102 @@ type
logScope: topics = "valapi"
# TODO Probably the `beacon` ones should be defined elsewhere...?
proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
# TODO Probably the `beacon` ones (and not `validator`) should be defined elsewhere...
rpcServer.rpc("get_v1_beacon_states_fork") do (stateId: string) -> Fork:
notice "== get_v1_beacon_states_fork", stateId = stateId
template withStateForSlot(stateId: string, body: untyped): untyped =
var res: BiggestInt
if parseBiggestInt(stateId, res) == stateId.len:
raise newException(CatchableError, "Not a valid slot number")
let head = node.updateHead()
let blockSlot = head.atSlot(res.Slot)
node.blockPool.withState(node.blockPool.tmpState, blockSlot):
body
rpcServer.rpc("get_v1_beacon_genesis") do () -> BeaconGenesisTuple:
debug "get_v1_beacon_genesis"
return (genesis_time: node.blockPool.headState.data.data.genesis_time,
genesis_validators_root:
node.blockPool.headState.data.data.genesis_validators_root,
genesis_fork_version: Version(GENESIS_FORK_VERSION))
rpcServer.rpc("get_v1_beacon_states_root") do (stateId: string) -> Eth2Digest:
debug "get_v1_beacon_states_root", stateId = stateId
# TODO do we need to call node.updateHead() before using headState?
result = case stateId:
of "head":
node.blockPool.headState.blck.root
of "genesis":
node.blockPool.headState.data.data.genesis_validators_root
of "finalized":
node.blockPool.headState.data.data.finalized_checkpoint.root
of "justified":
node.blockPool.headState.data.data.current_justified_checkpoint.root
else:
if stateId.startsWith("0x"):
# TODO not sure if `fromHex` is the right thing here...
# https://github.com/ethereum/eth2.0-APIs/issues/37#issuecomment-638566144
# we return whatever was passed to us (this is a nonsense request)
fromHex(Eth2Digest, stateId[2..<stateId.len]) # skip first 2 chars
else:
withStateForSlot(stateId):
hashedState.root
rpcServer.rpc("get_v1_beacon_states_fork") do (stateId: string) -> Fork:
debug "get_v1_beacon_states_fork", stateId = stateId
result = case stateId:
of "head":
discard node.updateHead() # TODO do we need this?
node.blockPool.headState.data.data.fork
of "genesis":
Fork(previous_version: Version(GENESIS_FORK_VERSION),
current_version: Version(GENESIS_FORK_VERSION),
epoch: GENESIS_EPOCH)
of "finalized":
# TODO
Fork()
node.blockPool.withState(node.blockPool.tmpState, node.blockPool.finalizedHead):
state.fork
of "justified":
# TODO
Fork()
node.blockPool.justifiedState.data.data.fork
else:
# TODO parse `stateId` as either a number (slot) or a hash (stateRoot)
Fork()
if stateId.startsWith("0x"):
# TODO not sure if `fromHex` is the right thing here...
# https://github.com/ethereum/eth2.0-APIs/issues/37#issuecomment-638566144
let blckRoot = fromHex(Eth2Digest, stateId[2..<stateId.len]) # skip first 2 chars
let blckRef = node.blockPool.getRef(blckRoot)
if blckRef.isNil:
raise newException(CatchableError, "Block not found")
let blckSlot = blckRef.atSlot(blckRef.slot)
node.blockPool.withState(node.blockPool.tmpState, blckSlot):
state.fork
else:
withStateForSlot(stateId):
state.fork
# TODO Probably the `beacon` ones (and not `validator`) should be defined elsewhere...
rpcServer.rpc("get_v1_beacon_genesis") do () -> BeaconGenesisTuple:
notice "== get_v1_beacon_genesis"
return BeaconGenesisTuple(genesis_time: node.blockPool.headState.data.data.genesis_time,
genesis_validators_root: node.blockPool.headState.data.data.genesis_validators_root,
genesis_fork_version: Version(GENESIS_FORK_VERSION))
rpcServer.rpc("post_v1_beacon_pool_attestations") do (attestation: Attestation) -> bool:
#notice "== post_v1_beacon_pool_attestations"
rpcServer.rpc("post_v1_beacon_pool_attestations") do (
attestation: Attestation) -> bool:
node.sendAttestation(attestation)
return true
rpcServer.rpc("get_v1_validator_blocks") do (
slot: Slot, graffiti: Eth2Digest, randao_reveal: ValidatorSig) -> BeaconBlock:
notice "== get_v1_validator_blocks", slot = slot
debug "get_v1_validator_blocks", slot = slot
let head = node.updateHead()
let proposer = node.blockPool.getProposer(head, slot)
# TODO how do we handle the case when we cannot return a meaningful block? 404...
doAssert(proposer.isSome())
if proposer.isNone():
raise newException(CatchableError, "could not retrieve block for slot: " & $slot)
let valInfo = ValidatorInfoForMakeBeaconBlock(kind: viRandao_reveal,
randao_reveal: randao_reveal)
let res = makeBeaconBlockForHeadAndSlot(
node, valInfo, proposer.get()[0], graffiti, head, slot)
# TODO how do we handle the case when we cannot return a meaningful block? 404...
doAssert(res.message.isSome())
return res.message.get(BeaconBlock()) # returning a default if empty
if res.message.isNone():
raise newException(CatchableError, "could not retrieve block for slot: " & $slot)
return res.message.get()
rpcServer.rpc("post_v1_beacon_blocks") do (body: SignedBeaconBlock) -> bool:
notice "== post_v1_beacon_blocks"
debug "post_v1_beacon_blocks",
slot = body.message.slot,
prop_idx = body.message.proposer_index
logScope: pcs = "block_proposal"
let head = node.updateHead()
if head.slot >= body.message.slot:
warn "Skipping proposal, have newer head already",
@ -92,14 +130,15 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
headBlockRoot = shortLog(head.root),
slot = shortLog(body.message.slot),
cat = "fastforward"
return false
return head != await proposeSignedBlock(node, head, AttachedValidator(),
body, hash_tree_root(body.message))
raise newException(CatchableError,
"Proposal is for a past slot: " & $body.message.slot)
if head == await proposeSignedBlock(node, head, AttachedValidator(),
body, hash_tree_root(body.message)):
raise newException(CatchableError, "Could not propose block")
return true
rpcServer.rpc("get_v1_validator_attestation_data") do (
slot: Slot, committee_index: CommitteeIndex) -> AttestationData:
#notice "== get_v1_validator_attestation_data"
# Obtain the data to form an attestation
let head = node.updateHead()
let attestationHead = head.atSlot(slot)
node.blockPool.withState(node.blockPool.tmpState, attestationHead):
@ -107,45 +146,43 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
rpcServer.rpc("get_v1_validator_aggregate_attestation") do (
attestation_data: AttestationData)-> Attestation:
notice "== get_v1_validator_aggregate_attestation"
debug "get_v1_validator_aggregate_attestation"
rpcServer.rpc("post_v1_validator_aggregate_and_proof") do (
payload: SignedAggregateAndProof) -> bool:
notice "== post_v1_validator_aggregate_and_proof"
# TODO is this enough?
node.network.broadcast(node.topicAggregateAndProofs, payload)
return true
rpcServer.rpc("post_v1_validator_duties_attester") do (
epoch: Epoch, public_keys: seq[ValidatorPubKey]) -> seq[AttesterDuties]:
notice "== post_v1_validator_duties_attester", epoch = epoch
discard node.updateHead() # TODO do we need this?
for pubkey in public_keys:
let idx = node.blockPool.headState.data.data.validators.asSeq.findIt(it.pubKey == pubkey)
if idx != -1:
# TODO this might crash if the requested epoch is further than the BN epoch
# because of this: `doAssert epoch <= next_epoch`
let res = node.blockPool.headState.data.data.get_committee_assignment(
epoch, idx.ValidatorIndex)
if res.isSome:
result.add(AttesterDuties(public_key: pubkey,
committee_index: res.get.b,
committee_length: res.get.a.len.uint64,
validator_committee_index: res.get.a.find(idx.ValidatorIndex).uint64,
slot: res.get.c))
debug "post_v1_validator_duties_attester", epoch = epoch
let head = node.updateHead()
let attestationHead = head.atSlot(compute_start_slot_at_epoch(epoch))
node.blockPool.withState(node.blockPool.tmpState, attestationHead):
for pubkey in public_keys:
let idx = state.validators.asSeq.findIt(it.pubKey == pubkey)
if idx == -1:
continue
let ca = state.get_committee_assignment(epoch, idx.ValidatorIndex)
if ca.isSome:
result.add((public_key: pubkey,
committee_index: ca.get.b,
committee_length: ca.get.a.len.uint64,
validator_committee_index: ca.get.a.find(idx.ValidatorIndex).uint64,
slot: ca.get.c))
rpcServer.rpc("get_v1_validator_duties_proposer") do (
epoch: Epoch) -> seq[ValidatorPubkeySlotPair]:
notice "== get_v1_validator_duties_proposer", epoch = epoch
debug "get_v1_validator_duties_proposer", epoch = epoch
let head = node.updateHead()
for i in 0 ..< SLOTS_PER_EPOCH:
let currSlot = (compute_start_slot_at_epoch(epoch).int + i).Slot
let proposer = node.blockPool.getProposer(head, currSlot)
if proposer.isSome():
result.add(ValidatorPubkeySlotPair(public_key: proposer.get()[1], slot: currSlot))
result.add((public_key: proposer.get()[1], slot: currSlot))
rpcServer.rpc("post_v1_validator_beacon_committee_subscription") do (
rpcServer.rpc("post_v1_validator_beacon_committee_subscriptions") do (
committee_index: CommitteeIndex, slot: Slot, aggregator: bool,
validator_pubkey: ValidatorPubKey, slot_signature: ValidatorSig):
notice "== post_v1_validator_beacon_committee_subscription"
# TODO
validator_pubkey: ValidatorPubKey, slot_signature: ValidatorSig) -> bool:
debug "post_v1_validator_beacon_committee_subscriptions"
raise newException(CatchableError, "Not implemented")

View File

@ -21,8 +21,8 @@ import
eth2_network, eth2_discovery, validator_pool, beacon_node_types,
nimbus_binary_common,
version, ssz/merkleization,
sync_manager,
spec/eth2_apis/validator_callsigs_types,
sync_manager, keystore_management,
spec/eth2_apis/callsigs_types,
eth2_json_rpc_serialization
logScope: topics = "vc"
@ -31,6 +31,7 @@ template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
## Generate client convenience marshalling wrappers from forward declarations
createRpcSigs(RpcClient, sourceDir / "spec" / "eth2_apis" / "validator_callsigs.nim")
createRpcSigs(RpcClient, sourceDir / "spec" / "eth2_apis" / "beacon_callsigs.nim")
type
ValidatorClient = ref object
@ -39,31 +40,66 @@ type
beaconClock: BeaconClock
attachedValidators: ValidatorPool
fork: Fork
proposalsForEpoch: Table[Slot, ValidatorPubKey]
attestationsForEpoch: Table[Slot, seq[AttesterDuties]]
proposalsForCurrentEpoch: Table[Slot, ValidatorPubKey]
attestationsForEpoch: Table[Epoch, Table[Slot, seq[AttesterDuties]]]
beaconGenesis: BeaconGenesisTuple
proc connectToBN(vc: ValidatorClient) {.gcsafe, async.} =
while true:
try:
await vc.client.connect($vc.config.rpcAddress, Port(vc.config.rpcPort))
info "Connected to BN",
port = vc.config.rpcPort,
address = vc.config.rpcAddress
return
except CatchableError as err:
warn "Could not connect to the BN - retrying!", err = err.msg
await sleepAsync(chronos.seconds(1)) # 1 second before retrying
template attemptUntilSuccess(vc: ValidatorClient, body: untyped) =
while true:
try:
body
break
except CatchableError as err:
warn "Caught an unexpected error", err = err.msg
waitFor vc.connectToBN()
proc getValidatorDutiesForEpoch(vc: ValidatorClient, epoch: Epoch) {.gcsafe, async.} =
let proposals = await vc.client.get_v1_validator_duties_proposer(epoch)
# update the block proposal duties this VC should do during this epoch
vc.proposalsForEpoch.clear()
vc.proposalsForCurrentEpoch.clear()
for curr in proposals:
if vc.attachedValidators.validators.contains curr.public_key:
vc.proposalsForEpoch.add(curr.slot, curr.public_key)
vc.proposalsForCurrentEpoch.add(curr.slot, curr.public_key)
# couldn't use mapIt in ANY shape or form so reverting to raw loops - sorry Sean Parent :|
var validatorPubkeys: seq[ValidatorPubKey]
for key in vc.attachedValidators.validators.keys:
validatorPubkeys.add key
# update the attestation duties this VC should do during this epoch
let attestations = await vc.client.post_v1_validator_duties_attester(
epoch, validatorPubkeys)
vc.attestationsForEpoch.clear()
for a in attestations:
if vc.attestationsForEpoch.hasKeyOrPut(a.slot, @[a]):
vc.attestationsForEpoch[a.slot].add(a)
proc getAttesterDutiesForEpoch(epoch: Epoch) {.gcsafe, async.} =
let attestations = await vc.client.post_v1_validator_duties_attester(
epoch, validatorPubkeys)
# make sure there's an entry
if not vc.attestationsForEpoch.contains epoch:
vc.attestationsForEpoch.add(epoch, Table[Slot, seq[AttesterDuties]]())
for a in attestations:
if vc.attestationsForEpoch[epoch].hasKeyOrPut(a.slot, @[a]):
vc.attestationsForEpoch[epoch][a.slot].add(a)
# obtain the attestation duties this VC should do during the next epoch
await getAttesterDutiesForEpoch(epoch + 1)
# also get the attestation duties for the current epoch if missing
if not vc.attestationsForEpoch.contains epoch:
await getAttesterDutiesForEpoch(epoch)
# cleanup old epoch attestation duties
vc.attestationsForEpoch.del(epoch - 1)
# TODO handle subscriptions to beacon committees for both the next epoch and
# for the current if missing (beacon_committee_subscriptions from the REST api)
# for now we will get the fork each time we update the validator duties for each epoch
# TODO should poll occasionally `/v1/config/fork_schedule`
vc.fork = await vc.client.get_v1_beacon_states_fork("head")
proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, async.} =
@ -76,6 +112,7 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
let
slot = wallSlot.slot # afterGenesis == true!
nextSlot = slot + 1
epoch = slot.compute_epoch_at_slot
info "Slot start",
lastSlot = shortLog(lastSlot),
@ -91,11 +128,11 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
# could take up time for attesting... Perhaps this should be called more
# than once per epoch because of forks & other events...
if slot.isEpoch:
await getValidatorDutiesForEpoch(vc, slot.compute_epoch_at_slot)
await getValidatorDutiesForEpoch(vc, epoch)
# check if we have a validator which needs to propose on this slot
if vc.proposalsForEpoch.contains slot:
let public_key = vc.proposalsForEpoch[slot]
if vc.proposalsForCurrentEpoch.contains slot:
let public_key = vc.proposalsForCurrentEpoch[slot]
let validator = vc.attachedValidators.validators[public_key]
let randao_reveal = validator.genRandaoReveal(
@ -111,7 +148,7 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
discard await vc.client.post_v1_beacon_blocks(newBlock)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#attesting
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#attesting
# A validator should create and broadcast the attestation to the associated
# attestation subnet when either (a) the validator has received a valid
# block from the expected block proposer for the assigned slot or
@ -121,8 +158,8 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
seconds(int64(SECONDS_PER_SLOT)) div 3, slot, "Waiting to send attestations")
# check if we have validators which need to attest on this slot
if vc.attestationsForEpoch.contains slot:
for a in vc.attestationsForEpoch[slot]:
if vc.attestationsForEpoch[epoch].contains slot:
for a in vc.attestationsForEpoch[epoch][slot]:
let validator = vc.attachedValidators.validators[a.public_key]
let ad = await vc.client.get_v1_validator_attestation_data(slot, a.committee_index)
@ -135,7 +172,8 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
discard await vc.client.post_v1_beacon_pool_attestations(attestation)
except CatchableError as err:
error "Caught an unexpected error", err = err.msg
warn "Caught an unexpected error", err = err.msg, slot = shortLog(slot)
await vc.connectToBN()
let
nextSlotStart = saturate(vc.beaconClock.fromNow(nextSlot))
@ -177,34 +215,27 @@ programMain:
var vc = ValidatorClient(
config: config,
client: newRpcHttpClient(),
attachedValidators: ValidatorPool.init()
client: newRpcHttpClient()
)
vc.proposalsForEpoch.init()
vc.attestationsForEpoch.init()
# load all the validators from the data dir into memory
for curr in vc.config.validatorKeys:
vc.attachedValidators.addLocalValidator(curr.toPubKey, curr)
# TODO perhaps we should handle the case if the BN is down and try to connect to it
# untill success, and also later on disconnets we should continue trying to reconnect
waitFor vc.client.connect("localhost", Port(config.rpcPort)) # TODO: use config.rpcAddress
info "Connected to beacon node", port = config.rpcPort
waitFor vc.connectToBN()
# init the beacon clock
vc.beaconGenesis = waitFor vc.client.get_v1_beacon_genesis()
vc.beaconClock = BeaconClock.init(vc.beaconGenesis.genesis_time)
vc.attemptUntilSuccess:
# init the beacon clock
vc.beaconGenesis = waitFor vc.client.get_v1_beacon_genesis()
vc.beaconClock = BeaconClock.init(vc.beaconGenesis.genesis_time)
let
curSlot = vc.beaconClock.now().slotOrZero()
nextSlot = curSlot + 1 # No earlier than GENESIS_SLOT + 1
fromNow = saturate(vc.beaconClock.fromNow(nextSlot))
# onSlotStart() requests the validator duties only on the start of each epoch
# so we should request the duties here when the VC binary boots up in order
# to handle the case when in the middle of an epoch. Also for the genesis slot.
waitFor vc.getValidatorDutiesForEpoch(curSlot.compute_epoch_at_slot)
vc.attemptUntilSuccess:
waitFor vc.getValidatorDutiesForEpoch(curSlot.compute_epoch_at_slot)
info "Scheduling first slot action",
beaconTime = shortLog(vc.beaconClock.now()),

View File

@ -20,7 +20,7 @@ import
# Local modules
spec/[datatypes, digest, crypto, beaconstate, helpers, validator, network],
conf, time, validator_pool, state_transition,
attestation_pool, block_pool, eth2_network,
attestation_pool, block_pool, eth2_network, keystore_management,
beacon_node_common, beacon_node_types, nimbus_binary_common,
mainchain_monitor, version, ssz/merkleization, interop,
attestation_aggregation, sync_manager, sszdump
@ -34,15 +34,15 @@ declareCounter beacon_blocks_proposed,
logScope: topics = "beacval"
proc saveValidatorKey*(keyName, key: string, conf: BeaconNodeConf) =
let validatorsDir = conf.localValidatorsDir
let validatorsDir = conf.validatorsDir
let outputFile = validatorsDir / keyName
createDir validatorsDir
writeFile(outputFile, key)
info "Imported validator key", file = outputFile
proc addLocalValidator*(node: BeaconNode,
state: BeaconState,
privKey: ValidatorPrivKey) =
state: BeaconState,
privKey: ValidatorPrivKey) =
let pubKey = privKey.toPubKey()
let idx = state.validators.asSeq.findIt(it.pubKey == pubKey)
@ -53,15 +53,22 @@ proc addLocalValidator*(node: BeaconNode,
node.attachedValidators.addLocalValidator(pubKey, privKey)
proc addLocalValidators*(node: BeaconNode, state: BeaconState) =
for validatorKey in node.config.validatorKeys:
node.addLocalValidator state, validatorKey
proc addLocalValidators*(node: BeaconNode) {.async.} =
let
head = node.blockPool.head
bs = BlockSlot(blck: head.blck, slot: head.blck.slot)
info "Local validators attached ", count = node.attachedValidators.count
node.blockPool.withState(node.blockPool.tmpState, bs):
for validatorKey in node.config.validatorKeys:
node.addLocalValidator state, validatorKey
# Allow some network events to be processed:
await sleepAsync(0.seconds)
info "Local validators attached ", count = node.attachedValidators.count
func getAttachedValidator*(node: BeaconNode,
state: BeaconState,
idx: ValidatorIndex): AttachedValidator =
state: BeaconState,
idx: ValidatorIndex): AttachedValidator =
let validatorKey = state.validators[idx].pubkey
node.attachedValidators.getValidator(validatorKey)
@ -94,7 +101,7 @@ proc isSynced(node: BeaconNode, head: BlockRef): bool =
proc sendAttestation*(node: BeaconNode, attestation: Attestation) =
logScope: pcs = "send_attestation"
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#broadcast-attestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/validator.md#broadcast-attestation
node.network.broadcast(
getMainnetAttestationTopic(node.forkDigest, attestation.data.index),
attestation)
@ -115,7 +122,7 @@ proc createAndSendAttestation(node: BeaconNode,
node.sendAttestation(attestation)
if node.config.dumpEnabled:
dump(node.config.dumpDir, attestation.data, validator.pubKey)
dump(node.config.dumpDirOutgoing, attestation.data, validator.pubKey)
info "Attestation sent",
attestation = shortLog(attestation),
@ -169,7 +176,7 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
# `state_transition` that takes a `StateData` instead and updates
# the block as well
doAssert v.addr == addr poolPtr.tmpState.data
poolPtr.tmpState = poolPtr.headState
assign(poolPtr.tmpState, poolPtr.headState)
var cache = get_empty_per_epoch_cache()
let message = makeBeaconBlock(
@ -214,10 +221,7 @@ proc proposeSignedBlock*(node: BeaconNode,
cat = "consensus"
if node.config.dumpEnabled:
dump(node.config.dumpDir, newBlock, newBlockRef[])
node.blockPool.withState(
node.blockPool.tmpState, newBlockRef[].atSlot(newBlockRef[].slot)):
dump(node.config.dumpDir, hashedState, newBlockRef[])
dump(node.config.dumpDirOutgoing, newBlock, newBlockRef[])
node.network.broadcast(node.topicBeaconBlocks, newBlock)
@ -450,7 +454,7 @@ proc handleValidatorDuties*(
# with any clock discrepancies once only, at the start of slot timer
# processing..
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#attesting
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#attesting
# A validator should create and broadcast the attestation to the associated
# attestation subnet when either (a) the validator has received a valid
# block from the expected block proposer for the assigned slot or
@ -466,7 +470,7 @@ proc handleValidatorDuties*(
handleAttestations(node, head, slot)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#broadcast-aggregate
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#broadcast-aggregate
# If the validator is selected to aggregate (is_aggregator), then they
# broadcast their best aggregate as a SignedAggregateAndProof to the global
# aggregate channel (beacon_aggregate_and_proof) two-thirds of the way

View File

@ -1,107 +0,0 @@
import
os, strutils,
chronicles, chronos, blscurve, nimcrypto, json_serialization, serialization,
web3, stint, eth/keys,
spec/[datatypes, digest, crypto], conf, ssz/merkleization, interop, merkle_minimal
contract(DepositContract):
proc deposit(pubkey: Bytes48, withdrawalCredentials: Bytes32, signature: Bytes96, deposit_data_root: FixedBytes[32])
type
DelayGenerator* = proc(): chronos.Duration {.closure, gcsafe.}
proc writeTextFile(filename: string, contents: string) =
writeFile(filename, contents)
# echo "Wrote ", filename
proc writeFile(filename: string, value: auto) =
Json.saveFile(filename, value, pretty = true)
# echo "Wrote ", filename
proc ethToWei(eth: UInt256): UInt256 =
eth * 1000000000000000000.u256
proc generateDeposits*(totalValidators: int,
outputDir: string,
randomKeys: bool,
firstIdx = 0): seq[Deposit] =
info "Generating deposits", totalValidators, outputDir, randomKeys
for i in 0 ..< totalValidators:
let
v = validatorFileBaseName(firstIdx + i)
depositFn = outputDir / v & ".deposit.json"
privKeyFn = outputDir / v & ".privkey"
if existsFile(depositFn) and existsFile(privKeyFn):
try:
result.add Json.loadFile(depositFn, Deposit)
continue
except SerializationError as err:
debug "Rewriting unreadable deposit", err = err.formatMsg(depositFn)
discard
var
privkey{.noInit.}: ValidatorPrivKey
pubKey{.noInit.}: ValidatorPubKey
if randomKeys:
(pubKey, privKey) = crypto.newKeyPair().tryGet()
else:
privKey = makeInteropPrivKey(i).tryGet()
pubKey = privKey.toPubKey()
let dp = makeDeposit(pubKey, privKey)
result.add(dp)
# Does quadratic additional work, but fast enough, and otherwise more
# cleanly allows free intermixing of pre-existing and newly generated
# deposit and private key files. TODO: only generate new Merkle proof
# for the most recent deposit if this becomes bottleneck.
attachMerkleProofs(result)
writeTextFile(privKeyFn, privKey.toHex())
writeFile(depositFn, result[result.len - 1])
proc sendDeposits*(
deposits: seq[Deposit],
web3Url, depositContractAddress, privateKey: string,
delayGenerator: DelayGenerator = nil) {.async.} =
var web3 = await newWeb3(web3Url)
if privateKey.len != 0:
web3.privateKey = PrivateKey.fromHex(privateKey).tryGet()
else:
let accounts = await web3.provider.eth_accounts()
if accounts.len == 0:
error "No account offered by the web3 provider", web3Url
return
web3.defaultAccount = accounts[0]
let contractAddress = Address.fromHex(depositContractAddress)
for i, dp in deposits:
let depositContract = web3.contractSender(DepositContract, contractAddress)
discard await depositContract.deposit(
Bytes48(dp.data.pubKey.toRaw()),
Bytes32(dp.data.withdrawal_credentials.data),
Bytes96(dp.data.signature.toRaw()),
FixedBytes[32](hash_tree_root(dp.data).data)).send(value = 32.u256.ethToWei, gasPrice = 1)
if delayGenerator != nil:
await sleepAsync(delayGenerator())
when isMainModule:
import confutils
cli do (totalValidators: int = 125000,
outputDir: string = "validators",
randomKeys: bool = false,
web3Url: string = "",
depositContractAddress: string = ""):
let deposits = generateDeposits(totalValidators, outputDir, randomKeys)
if web3Url.len() > 0 and depositContractAddress.len() > 0:
echo "Sending deposits to eth1..."
waitFor sendDeposits(deposits, web3Url, depositContractAddress, "")
echo "Done"

View File

@ -1,7 +1,7 @@
import
tables,
chronos, chronicles,
spec/[datatypes, crypto, digest, state_transition_block],
spec/[datatypes, crypto, digest, signatures, helpers],
beacon_node_types
func init*(T: type ValidatorPool): T =
@ -80,10 +80,11 @@ proc signAggregateAndProof*(v: AttachedValidator,
error "Out of process signAggregateAndProof not implemented"
quit 1
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#randao-reveal
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#randao-reveal
func genRandaoReveal*(k: ValidatorPrivKey, fork: Fork,
genesis_validators_root: Eth2Digest, slot: Slot): ValidatorSig =
get_epoch_signature(fork, genesis_validators_root, slot, k)
get_epoch_signature(
fork, genesis_validators_root, slot.compute_epoch_at_slot, k)
func genRandaoReveal*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest, slot: Slot): ValidatorSig =

View File

@ -43,18 +43,32 @@ else:
switch("import", "testutils/moduletests")
const useLibStackTrace = not defined(macosx) and
not (defined(windows) and defined(i386)) and
not defined(disable_libbacktrace)
when useLibStackTrace:
--define:nimStackTraceOverride
switch("import", "libbacktrace")
else:
--stacktrace:on
--linetrace:on
# the default open files limit is too low on macOS (512), breaking the
# "--debugger:native" build. It can be increased with `ulimit -n 1024`.
if not defined(macosx):
# add debugging symbols and original files and line numbers
--debugger:native
if not (defined(windows) and defined(i386)) and not defined(disable_libbacktrace):
# light-weight stack traces using libbacktrace and libunwind
--define:nimStackTraceOverride
switch("import", "libbacktrace")
--define:nimOldCaseObjects # https://github.com/status-im/nim-confutils/issues/9
# `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'"
switch("warning", "CaseTransition:off")
# The compiler doth protest too much, methinks, about all these cases where it can't
# do its (N)RVO pass: https://github.com/nim-lang/RFCs/issues/230
switch("warning", "ObservableStores:off")
# Too many false positives for "Warning: method has lock level <unknown>, but another method has 0 [LockLevel]"
switch("warning", "LockLevel:off")

View File

@ -1,5 +1,5 @@
import
strformat, os, confutils
strformat, os, confutils, algorithm
type
Command = enum
@ -18,13 +18,14 @@ type
defaultValue: "deposits"
name: "deposits-dir" }: string
secretsDir {.
defaultValue: "secrets"
name: "secrets-dir" }: string
networkDataDir {.
defaultValue: "data"
name: "network-data-dir" }: string
totalValidators {.
name: "total-validators" }: int
totalUserValidators {.
defaultValue: 0
name: "user-validators" }: int
@ -39,6 +40,9 @@ var conf = load CliConfig
var
serverCount = 10
instancesCount = 2
validators = listDirs(conf.depositsDir)
sort(validators)
proc findOrDefault[K, V](tupleList: openarray[(K, V)], key: K, default: V): V =
for t in tupleList:
@ -60,7 +64,7 @@ iterator nodes: Node =
iterator validatorAssignments: tuple[node: Node; firstValidator, lastValidator: int] =
let
systemValidators = conf.totalValidators - conf.totalUserValidators
systemValidators = validators.len - conf.totalUserValidators
defaultValidatorAssignment = proc (nodeIdx: int): int =
(systemValidators div serverCount) div instancesCount
@ -110,26 +114,26 @@ of restart_nodes:
of reset_network:
for n, firstValidator, lastValidator in validatorAssignments():
var
keysList = ""
validatorDirs = ""
networkDataFiles = conf.networkDataDir & "/{genesis.ssz,bootstrap_nodes.txt}"
for i in firstValidator ..< lastValidator:
let validatorKey = fmt"v{i:07}.privkey"
keysList.add " "
keysList.add conf.depositsDir / validatorKey
validatorDirs.add " "
validatorDirs.add conf.depositsDir / validators[i]
secretFiles.add " "
secretFiles.add conf.secretsDir / validators[i]
let dockerPath = &"/docker/{n.container}/data/BeaconNode"
echo &"echo Syncing {lastValidator - firstValidator} keys starting from {firstValidator} to container {n.container}@{n.server} ... && \\"
echo &" ssh {n.server} 'sudo rm -rf /tmp/nimbus && mkdir -p /tmp/nimbus/' && \\"
echo &" ssh {n.server} 'sudo rm -rf /tmp/nimbus && mkdir -p /tmp/nimbus/{{validators,secrets}}' && \\"
echo &" rsync -a -zz {networkDataFiles} {n.server}:/tmp/nimbus/net-data/ && \\"
if keysList.len > 0:
echo &" rsync -a -zz {keysList} {n.server}:/tmp/nimbus/keys/ && \\"
if validator.len > 0:
echo &" rsync -a -zz {validatorDirs} {n.server}:/tmp/nimbus/validators/ && \\"
echo &" rsync -a -zz {secretFiles} {n.server}:/tmp/nimbus/secrets/ && \\"
echo &" ssh {n.server} 'sudo docker container stop {n.container}; " &
&"sudo mkdir -p {dockerPath}/validators && " &
&"sudo rm -rf {dockerPath}/validators/* && " &
&"sudo rm -rf {dockerPath}/db && " &
(if keysList.len > 0: &"sudo mv /tmp/nimbus/keys/* {dockerPath}/validators/ && " else: "") &
&"sudo rm -rf {dockerPath}/{{db,validators,secrets}}* && " &
(if validators.len > 0: &"sudo mv /tmp/nimbus/* {dockerPath}/ && " else: "") &
&"sudo mv /tmp/nimbus/net-data/* {dockerPath}/ && " &
&"sudo chown dockremap:docker -R {dockerPath}'"

View File

@ -101,27 +101,27 @@
"steppedLine": false,
"targets": [
{
"expr": "rate(process_cpu_seconds_total{node=\"0\"}[2s]) * 100",
"expr": "rate(process_cpu_seconds_total{node=\"${node}\"}[2s]) * 100",
"legendFormat": "CPU usage %",
"refId": "A"
},
{
"expr": "process_open_fds{node=\"0\"}",
"expr": "process_open_fds{node=\"${node}\"}",
"legendFormat": "open file descriptors",
"refId": "C"
},
{
"expr": "process_resident_memory_bytes{node=\"0\"}",
"expr": "process_resident_memory_bytes{node=\"${node}\"}",
"legendFormat": "RSS",
"refId": "D"
},
{
"expr": "nim_gc_mem_bytes{node=\"0\"}",
"expr": "nim_gc_mem_bytes{node=\"${node}\"}",
"legendFormat": "Nim GC mem total",
"refId": "F"
},
{
"expr": "nim_gc_mem_occupied_bytes{node=\"0\"}",
"expr": "nim_gc_mem_occupied_bytes{node=\"${node}\"}",
"legendFormat": "Nim GC mem used",
"refId": "G"
}
@ -130,7 +130,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "resources #0",
"title": "resources #${node}",
"tooltip": {
"shared": true,
"sort": 0,
@ -210,12 +210,12 @@
"steppedLine": false,
"targets": [
{
"expr": "libp2p_open_bufferstream{node=\"0\"}",
"expr": "libp2p_open_bufferstream{node=\"${node}\"}",
"legendFormat": "BufferStream",
"refId": "A"
},
{
"expr": "libp2p_open_connection{node=\"0\"}",
"expr": "libp2p_open_connection{node=\"${node}\"}",
"legendFormat": "Connection",
"refId": "B"
}
@ -224,7 +224,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "open streams #0",
"title": "open streams #${node}",
"tooltip": {
"shared": true,
"sort": 0,
@ -304,13 +304,13 @@
"steppedLine": false,
"targets": [
{
"expr": "beacon_current_validators{node=\"0\"}",
"expr": "beacon_current_validators{node=\"${node}\"}",
"interval": "",
"legendFormat": "current validators",
"refId": "A"
},
{
"expr": "beacon_current_live_validators{node=\"0\"}",
"expr": "beacon_current_live_validators{node=\"${node}\"}",
"interval": "",
"legendFormat": "current live validators",
"refId": "B"
@ -320,7 +320,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "validators #0",
"title": "validators #${node}",
"tooltip": {
"shared": true,
"sort": 0,
@ -405,7 +405,7 @@
"steppedLine": false,
"targets": [
{
"expr": "nim_gc_heap_instance_occupied_bytes{node=\"0\"}",
"expr": "nim_gc_heap_instance_occupied_bytes{node=\"${node}\"}",
"interval": "",
"legendFormat": "{{type_name}}",
"refId": "A"
@ -415,7 +415,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "GC heap objects #0",
"title": "GC heap objects #${node}",
"tooltip": {
"shared": true,
"sort": 0,
@ -493,7 +493,7 @@
"steppedLine": false,
"targets": [
{
"expr": "beacon_state_data_cache_hits_total{node=\"0\"} * 100 / (beacon_state_data_cache_hits_total{node=\"0\"} + beacon_state_data_cache_misses_total{node=\"0\"})",
"expr": "beacon_state_data_cache_hits_total{node=\"${node}\"} * 100 / (beacon_state_data_cache_hits_total{node=\"${node}\"} + beacon_state_data_cache_misses_total{node=\"${node}\"})",
"interval": "",
"legendFormat": "cache hit rate",
"refId": "A"
@ -503,7 +503,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "pool.cachedStates #0",
"title": "pool.cachedStates #${node}",
"tooltip": {
"shared": true,
"sort": 0,
@ -587,7 +587,7 @@
"steppedLine": false,
"targets": [
{
"expr": "sqlite3_memory_used_bytes{node=\"0\"}",
"expr": "sqlite3_memory_used_bytes{node=\"${node}\"}",
"interval": "",
"legendFormat": "Memory used",
"refId": "A"
@ -597,7 +597,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "SQLite3 #0",
"title": "SQLite3 #${node}",
"tooltip": {
"shared": true,
"sort": 0,
@ -698,14 +698,14 @@
"tableColumn": "",
"targets": [
{
"expr": "process_resident_memory_bytes{node=\"0\"}",
"expr": "process_resident_memory_bytes{node=\"${node}\"}",
"refId": "A"
}
],
"thresholds": "",
"timeFrom": null,
"timeShift": null,
"title": "RSS mem #0",
"title": "RSS mem #${node}",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
@ -781,14 +781,14 @@
"tableColumn": "",
"targets": [
{
"expr": "rate(process_cpu_seconds_total{node=\"0\"}[2s]) * 100",
"expr": "rate(process_cpu_seconds_total{node=\"${node}\"}[2s]) * 100",
"refId": "A"
}
],
"thresholds": "",
"timeFrom": null,
"timeShift": null,
"title": "CPU usage #0",
"title": "CPU usage #${node}",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
@ -864,7 +864,7 @@
"tableColumn": "",
"targets": [
{
"expr": "beacon_slot{node=\"0\"}",
"expr": "beacon_slot{node=\"${node}\"}",
"interval": "",
"legendFormat": "",
"refId": "A"
@ -873,7 +873,7 @@
"thresholds": "",
"timeFrom": null,
"timeShift": null,
"title": "current slot #0",
"title": "current slot #${node}",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
@ -1034,14 +1034,14 @@
"tableColumn": "",
"targets": [
{
"expr": "beacon_attestations_received_total{node=\"0\"}",
"expr": "beacon_attestations_received_total{node=\"${node}\"}",
"refId": "A"
}
],
"thresholds": "",
"timeFrom": null,
"timeShift": null,
"title": "att'ns recv'd #0",
"title": "att'ns recv'd #${node}",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
@ -1097,13 +1097,13 @@
"steppedLine": false,
"targets": [
{
"expr": "rate(beacon_blocks_received_total{node=\"0\"}[4s]) * 3",
"expr": "rate(beacon_blocks_received_total{node=\"${node}\"}[4s]) * 3",
"interval": "",
"legendFormat": "received",
"refId": "B"
},
{
"expr": "rate(beacon_blocks_proposed_total{node=\"0\"}[4s]) * 3",
"expr": "rate(beacon_blocks_proposed_total{node=\"${node}\"}[4s]) * 3",
"interval": "",
"legendFormat": "proposed",
"refId": "A"
@ -1113,7 +1113,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "blocks #0",
"title": "blocks #${node}",
"tooltip": {
"shared": true,
"sort": 0,
@ -1213,7 +1213,7 @@
"tableColumn": "",
"targets": [
{
"expr": "beacon_current_epoch{node=\"0\"}",
"expr": "beacon_current_epoch{node=\"${node}\"}",
"interval": "",
"legendFormat": "",
"refId": "A"
@ -1222,7 +1222,7 @@
"thresholds": "",
"timeFrom": null,
"timeShift": null,
"title": "current epoch #0",
"title": "current epoch #${node}",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
@ -1297,7 +1297,7 @@
"tableColumn": "",
"targets": [
{
"expr": "beacon_current_justified_epoch{node=\"0\"}",
"expr": "beacon_current_justified_epoch{node=\"${node}\"}",
"interval": "",
"legendFormat": "",
"refId": "A"
@ -1306,7 +1306,7 @@
"thresholds": "",
"timeFrom": null,
"timeShift": null,
"title": "current justified epoch #0",
"title": "current justified epoch #${node}",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
@ -1382,7 +1382,7 @@
"tableColumn": "",
"targets": [
{
"expr": "time() - process_start_time_seconds{node=\"0\"}",
"expr": "time() - process_start_time_seconds{node=\"${node}\"}",
"interval": "",
"legendFormat": "",
"refId": "A"
@ -1391,7 +1391,7 @@
"thresholds": "",
"timeFrom": null,
"timeShift": null,
"title": "runtime #0",
"title": "runtime #${node}",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
@ -1467,14 +1467,14 @@
"tableColumn": "",
"targets": [
{
"expr": "libp2p_peers{node=\"0\"}",
"expr": "libp2p_peers{node=\"${node}\"}",
"refId": "A"
}
],
"thresholds": "",
"timeFrom": null,
"timeShift": null,
"title": "peers #0",
"title": "peers #${node}",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
@ -1549,7 +1549,7 @@
"tableColumn": "",
"targets": [
{
"expr": "beacon_finalized_epoch{node=\"0\"}",
"expr": "beacon_finalized_epoch{node=\"${node}\"}",
"interval": "",
"legendFormat": "",
"refId": "A"
@ -1558,7 +1558,7 @@
"thresholds": "",
"timeFrom": null,
"timeShift": null,
"title": "last finalized epoch #0",
"title": "last finalized epoch #${node}",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
@ -1611,13 +1611,13 @@
"steppedLine": false,
"targets": [
{
"expr": "rate(beacon_attestations_received_total{node=\"0\"}[4s]) * 3",
"expr": "rate(beacon_attestations_received_total{node=\"${node}\"}[4s]) * 3",
"interval": "",
"legendFormat": "received",
"refId": "A"
},
{
"expr": "rate(beacon_attestations_sent_total{node=\"0\"}[4s]) * 3",
"expr": "rate(beacon_attestations_sent_total{node=\"${node}\"}[4s]) * 3",
"interval": "",
"legendFormat": "sent",
"refId": "B"
@ -1627,7 +1627,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "attestations #0",
"title": "attestations #${node}",
"tooltip": {
"shared": true,
"sort": 0,
@ -1697,7 +1697,7 @@
"reverseYBuckets": false,
"targets": [
{
"expr": "rate(beacon_attestation_received_seconds_from_slot_start_bucket{node=\"0\"}[4s]) * 3",
"expr": "rate(beacon_attestation_received_seconds_from_slot_start_bucket{node=\"${node}\"}[4s]) * 3",
"format": "heatmap",
"instant": false,
"interval": "",
@ -1708,7 +1708,7 @@
],
"timeFrom": null,
"timeShift": null,
"title": "received attestation delay (s) #0",
"title": "received attestation delay (s) #${node}",
"tooltip": {
"show": true,
"showHistogram": false
@ -1738,7 +1738,35 @@
"style": "dark",
"tags": [],
"templating": {
"list": []
"list": [
{
"allValue": null,
"current": {
"tags": [],
"text": "0",
"value": "0"
},
"datasource": "Prometheus",
"definition": "label_values(process_virtual_memory_bytes,node)",
"hide": 0,
"includeAll": false,
"index": -1,
"label": null,
"multi": false,
"name": "node",
"options": [],
"query": "label_values(process_virtual_memory_bytes,node)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
]
},
"time": {
"from": "now-15m",
@ -1759,10 +1787,10 @@
]
},
"timezone": "",
"title": "beacon chain sim (node0)",
"uid": "pgeNfj2Wz2",
"title": "NBC local testnet/sim (all nodes)",
"uid": "pgeNfj2Wz2a",
"variables": {
"list": []
},
"version": 38
}
}

BIN
media/jenkins_artifacts.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 99 KiB

After

Width:  |  Height:  |  Size: 197 KiB

View File

@ -18,6 +18,7 @@ type
DbCmd* = enum
bench
dumpState
rewindState
DbConf = object
databaseDir* {.
@ -40,6 +41,15 @@ type
argument
desc: "State roots to save".}: seq[string]
of rewindState:
blockRoot* {.
argument
desc: "Block root".}: string
slot* {.
argument
desc: "Slot".}: uint64
proc cmdBench(conf: DbConf) =
var timers: array[Timers, RunningStat]
@ -104,6 +114,28 @@ proc cmdDumpState(conf: DbConf) =
except CatchableError as e:
echo "Couldn't load ", stateRoot, ": ", e.msg
proc cmdRewindState(conf: DbConf) =
echo "Opening database..."
let
db = BeaconChainDB.init(
kvStore SqStoreRef.init(conf.databaseDir.string, "nbc").tryGet())
if not BlockPool.isInitialized(db):
echo "Database not initialized"
quit 1
echo "Initializing block pool..."
let pool = BlockPool.init(db, {})
let blckRef = pool.getRef(fromHex(Eth2Digest, conf.blockRoot))
if blckRef == nil:
echo "Block not found in database"
return
pool.withState(pool.tmpState, blckRef.atSlot(Slot(conf.slot))):
echo "Writing state..."
dump("./", hashedState, blck)
when isMainModule:
let
conf = DbConf.load()
@ -113,3 +145,5 @@ when isMainModule:
cmdBench(conf)
of dumpState:
cmdDumpState(conf)
of rewindState:
cmdRewindState(conf)

View File

@ -17,13 +17,17 @@ cli do(kind: string, file: string):
echo "Unknown file type: ", ext
quit 1
)
echo hash_tree_root(v[]).data.toHex()
when t is SignedBeaconBlock:
echo hash_tree_root(v.message).data.toHex()
else:
echo hash_tree_root(v[]).data.toHex()
let ext = splitFile(file).ext
case kind
of "attester_slashing": printit(AttesterSlashing)
of "attestation": printit(Attestation)
of "signed_block": printit(SignedBeaconBlock)
of "block": printit(BeaconBlock)
of "block_body": printit(BeaconBlockBody)
of "block_header": printit(BeaconBlockHeader)

View File

@ -22,6 +22,7 @@ cli do(kind: string, file: string):
case kind
of "attester_slashing": printit(AttesterSlashing)
of "attestation": printit(Attestation)
of "signed_block": printit(SignedBeaconBlock)
of "block": printit(BeaconBlock)
of "block_body": printit(BeaconBlockBody)
of "block_header": printit(BeaconBlockHeader)

View File

@ -20,8 +20,7 @@ import
options, random, tables,
../tests/[testblockutil],
../beacon_chain/spec/[
beaconstate, crypto, datatypes, digest, helpers, validator,
state_transition_block],
beaconstate, crypto, datatypes, digest, helpers, validator, signatures],
../beacon_chain/[
attestation_pool, block_pool, beacon_node_types, beacon_chain_db,
interop, state_transition, validator_pool],

View File

@ -41,7 +41,7 @@ func verifyConsensus*(state: BeaconState, attesterRatio: auto) =
doAssert state.finalized_checkpoint.epoch + 2 >= current_epoch
proc loadGenesis*(validators: int, validate: bool): ref HashedBeaconState =
let fn = &"genesim_{const_preset}_{validators}.ssz"
let fn = &"genesim_{const_preset}_{validators}_{SPEC_VERSION}.ssz"
let res = (ref HashedBeaconState)()
if fileExists(fn):
res.data = SSZ.loadFile(fn, BeaconState)
@ -72,6 +72,7 @@ proc loadGenesis*(validators: int, validate: bool): ref HashedBeaconState =
echo &"Saving to {fn}..."
SSZ.saveFile(fn, res.data)
res
proc printTimers*[Timers: enum](

View File

@ -106,9 +106,12 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
# some variation
let
target_slot = state[].data.slot + MIN_ATTESTATION_INCLUSION_DELAY - 1
commitee_count = get_committee_count_at_slot(state[].data, target_slot)
let
scass = withTimerRet(timers[tShuffle]):
mapIt(
0'u64 ..< get_committee_count_at_slot(state[].data, target_slot),
0 ..< commitee_count.int,
get_beacon_committee(state[].data, target_slot, it.CommitteeIndex, cache))
for i, scas in scass:

View File

@ -1,24 +0,0 @@
#!/bin/bash
set -eu
NETWORK_NAME=$1
NODE_ID=$2
FIRST_VALIDATOR=$3
LAST_VALIDATOR=$4
cd $(dirname "$0")
cd ..
if [ -f .env ]; then
source .env
fi
NETWORK_DIR=$WWW_DIR/$NETWORK_NAME
for i in $(seq $FIRST_VALIDATOR $LAST_VALIDATOR); do
VALIDATOR=v$(printf '%07d' $i)
beacon_chain/beacon_node --data-dir="$DATA_DIR/node-$NODE_ID" importValidator \
--keyfile="$NETWORK_DIR/$VALIDATOR.privkey"
done

View File

@ -30,9 +30,17 @@ cli do (skipGoerliKey {.
desc: "The Ethereum 2.0 const preset of the network (optional)"
name: "const-preset" .} = "",
devBuild {.
desc: "Enables more extensive logging and debugging support"
name: "dev-build" .} = false,
nodeID {.
desc: "Node ID" .} = 0.int,
basePort {.
desc: "Base TCP/UDP port (nodeID will be added to it)" .} = 9000.int,
baseMetricsPort {.
desc: "Base metrics port (nodeID will be added to it)" .} = 8008.int,
baseRpcPort {.
desc: "Base rpc port (nodeID will be added to it)" .} = 9190.int,
testnetName {.argument .}: string):
let
@ -84,15 +92,16 @@ cli do (skipGoerliKey {.
let
dataDirName = testnetName.replace("/", "_")
.replace("(", "_")
.replace(")", "_")
.replace(")", "_") & "_" & $nodeID
dataDir = buildDir / "data" / dataDirName
validatorsDir = dataDir / "validators"
secretsDir = dataDir / "secrets"
beaconNodeBinary = buildDir / "beacon_node_" & dataDirName
var
nimFlags = "-d:chronicles_log_level=TRACE " & getEnv("NIM_PARAMS")
if devBuild:
nimFlags.add """ -d:"chronicles_sinks=textlines,json[file(nbc.log)]" """
# write the logs to a file
nimFlags.add """ -d:"chronicles_sinks=textlines,json[file(nbc""" & staticExec("date +\"%Y%m%d%H%M%S\"") & """.log)]" """
let depositContractFile = testnetDir / depositContractFileName
if system.fileExists(depositContractFile):
@ -111,9 +120,6 @@ cli do (skipGoerliKey {.
echo "Detected testnet restart. Deleting previous database..."
rmDir dataDir
cd rootDir
exec &"""nim c {nimFlags} -d:"const_preset={preset}" -o:"{beaconNodeBinary}" beacon_chain/beacon_node.nim"""
proc execIgnoringExitCode(s: string) =
# reduces the error output when interrupting an external command with Ctrl+C
try:
@ -121,6 +127,14 @@ cli do (skipGoerliKey {.
except OsError:
discard
cd rootDir
mkDir dataDir
# macOS may not have gnu-getopts installed and in the PATH
execIgnoringExitCode &"""./scripts/make_prometheus_config.sh --nodes """ & $(1 + nodeID) & &""" --base-metrics-port {baseMetricsPort} --config-file "{dataDir}/prometheus.yml""""
exec &"""nim c {nimFlags} -d:"const_preset={preset}" -o:"{beaconNodeBinary}" beacon_chain/beacon_node.nim"""
if not skipGoerliKey and depositContractOpt.len > 0 and not system.dirExists(validatorsDir):
mode = Silent
echo "\nPlease enter your Goerli Eth1 private key in hex form (e.g. 0x1a2...f3c) in order to become a validator (you'll need access to 32 GoETH)."
@ -132,8 +146,9 @@ cli do (skipGoerliKey {.
mkDir validatorsDir
mode = Verbose
exec replace(&"""{beaconNodeBinary} makeDeposits
--random-deposits=1
--deposits-dir="{validatorsDir}"
--count=1
--out-validators-dir="{validatorsDir}"
--out-secrets-dir="{secretsDir}"
--deposit-private-key={privKey}
--web3-url={web3Url}
{depositContractOpt}
@ -148,11 +163,17 @@ cli do (skipGoerliKey {.
logLevelOpt = &"""--log-level="{logLevel}" """
mode = Verbose
cd dataDir
execIgnoringExitCode replace(&"""{beaconNodeBinary}
--data-dir="{dataDir}"
--dump
--web3-url={web3Url}
--tcp-port=""" & $(basePort + nodeID) & &"""
--udp-port=""" & $(basePort + nodeID) & &"""
--metrics
--metrics-port=""" & $(baseMetricsPort + nodeID) & &"""
--rpc
--rpc-port=""" & $(baseRpcPort + nodeID) & &"""
{bootstrapFileOpt}
{logLevelOpt}
{depositContractOpt}

View File

@ -24,7 +24,7 @@ if [ ${PIPESTATUS[0]} != 4 ]; then
fi
OPTS="ht:n:d:"
LONGOPTS="help,testnet:,nodes:,data-dir:,disable-htop,log-level:,grafana,base-port:,base-metrics-port:"
LONGOPTS="help,testnet:,nodes:,data-dir:,disable-htop,log-level:,base-port:,base-metrics-port:"
# default values
TESTNET="1"
@ -32,7 +32,6 @@ NUM_NODES="10"
DATA_DIR="local_testnet_data"
USE_HTOP="1"
LOG_LEVEL="DEBUG"
ENABLE_GRAFANA="0"
BASE_PORT="9000"
BASE_METRICS_PORT="8008"
@ -51,7 +50,6 @@ CI run: $(basename $0) --disable-htop -- --verify-finalization --stop-at-epoch=5
--base-metrics-port bootstrap node's metrics server port (default: ${BASE_METRICS_PORT})
--disable-htop don't use "htop" to see the beacon_node processes
--log-level set the log level (default: ${LOG_LEVEL})
--grafana generate Grafana dashboards (and Prometheus config file)
EOF
}
@ -89,10 +87,6 @@ while true; do
LOG_LEVEL="$2"
shift 2
;;
--grafana)
ENABLE_GRAFANA="1"
shift
;;
--base-port)
BASE_PORT="$2"
shift 2
@ -120,8 +114,13 @@ fi
NETWORK="testnet${TESTNET}"
rm -rf "${DATA_DIR}"
DEPOSITS_DIR="${DATA_DIR}/deposits_dir"
mkdir -p "${DEPOSITS_DIR}"
SECRETS_DIR="${DATA_DIR}/secrets"
mkdir -p "${SECRETS_DIR}"
NETWORK_DIR="${DATA_DIR}/network_dir"
mkdir -p "${NETWORK_DIR}"
@ -137,49 +136,31 @@ else
fi
NETWORK_NIM_FLAGS=$(scripts/load-testnet-nim-flags.sh ${NETWORK})
$MAKE -j2 LOG_LEVEL="${LOG_LEVEL}" NIMFLAGS="-d:insecure -d:testnet_servers_image ${NETWORK_NIM_FLAGS}" beacon_node process_dashboard
$MAKE LOG_LEVEL="${LOG_LEVEL}" NIMFLAGS="-d:insecure -d:testnet_servers_image ${NETWORK_NIM_FLAGS}" beacon_node
./build/beacon_node makeDeposits \
--quickstart-deposits=${QUICKSTART_VALIDATORS} \
--random-deposits=${RANDOM_VALIDATORS} \
--deposits-dir="${DEPOSITS_DIR}"
--count=${TOTAL_VALIDATORS} \
--out-validators-dir="${DEPOSITS_DIR}" \
--out-secrets-dir="${SECRETS_DIR}"
GENESIS_OFFSET=30
TOTAL_VALIDATORS="$(( $QUICKSTART_VALIDATORS + $RANDOM_VALIDATORS ))"
BOOTSTRAP_IP="127.0.0.1"
./build/beacon_node createTestnet \
--data-dir="${DATA_DIR}/node0" \
--validators-dir="${DEPOSITS_DIR}" \
--total-validators=${TOTAL_VALIDATORS} \
--last-user-validator=${QUICKSTART_VALIDATORS} \
--last-user-validator=${USER_VALIDATORS} \
--output-genesis="${NETWORK_DIR}/genesis.ssz" \
--output-bootstrap-file="${NETWORK_DIR}/bootstrap_nodes.txt" \
--bootstrap-address=${BOOTSTRAP_IP} \
--bootstrap-port=${BASE_PORT} \
--genesis-offset=30 # Delay in seconds
--genesis-offset=${GENESIS_OFFSET} # Delay in seconds
if [[ "$ENABLE_GRAFANA" == "1" ]]; then
# Prometheus config
cat > "${DATA_DIR}/prometheus.yml" <<EOF
global:
scrape_interval: 1s
scrape_configs:
- job_name: "nimbus"
static_configs:
EOF
for NUM_NODE in $(seq 0 $(( ${NUM_NODES} - 1 ))); do
cat >> "${DATA_DIR}/prometheus.yml" <<EOF
- targets: ['127.0.0.1:$(( BASE_METRICS_PORT + NUM_NODE ))']
labels:
node: '$NUM_NODE'
EOF
done
# use the exported Grafana dashboard for a single node to create one for all nodes
./build/process_dashboard \
--in="tests/simulation/beacon-chain-sim-node0-Grafana-dashboard.json" \
--out="${DATA_DIR}/local-testnet-all-nodes-Grafana-dashboard.json"
fi
./scripts/make_prometheus_config.sh \
--nodes ${NUM_NODES} \
--base-metrics-port ${BASE_METRICS_PORT} \
--config-file "${DATA_DIR}/prometheus.yml"
# Kill child processes on Ctrl-C/SIGTERM/exit, passing the PID of this shell
# instance as the parent and the target process name as a pattern to the
@ -202,10 +183,11 @@ dump_logs() {
PIDS=""
NODES_WITH_VALIDATORS=${NODES_WITH_VALIDATORS:-4}
VALIDATORS_PER_NODE=$(( $RANDOM_VALIDATORS / $NODES_WITH_VALIDATORS ))
SYSTEM_VALIDATORS=$(( TOTAL_VALIDATORS - USER_VALIDATORS ))
VALIDATORS_PER_NODE=$(( SYSTEM_VALIDATORS / NODES_WITH_VALIDATORS ))
BOOTSTRAP_TIMEOUT=10 # in seconds
for NUM_NODE in $(seq 0 $(( ${NUM_NODES} - 1 ))); do
for NUM_NODE in $(seq 0 $((NUM_NODES - 1))); do
if [[ ${NUM_NODE} == 0 ]]; then
BOOTSTRAP_ARG=""
else
@ -215,7 +197,7 @@ for NUM_NODE in $(seq 0 $(( ${NUM_NODES} - 1 ))); do
while [ ! -f "${DATA_DIR}/node0/beacon_node.address" ]; do
sleep 0.1
NOW_TIMESTAMP=$(date +%s)
if [[ "$(( NOW_TIMESTAMP - START_TIMESTAMP ))" -ge "$BOOTSTRAP_TIMEOUT" ]]; then
if [[ "$(( NOW_TIMESTAMP - START_TIMESTAMP - GENESIS_OFFSET ))" -ge "$BOOTSTRAP_TIMEOUT" ]]; then
echo "Bootstrap node failed to start in ${BOOTSTRAP_TIMEOUT} seconds. Aborting."
dump_logs
exit 1
@ -224,17 +206,21 @@ for NUM_NODE in $(seq 0 $(( ${NUM_NODES} - 1 ))); do
fi
# Copy validators to individual nodes.
# The first $NODES_WITH_VALIDATORS nodes split them equally between them, after skipping the first $QUICKSTART_VALIDATORS.
# The first $NODES_WITH_VALIDATORS nodes split them equally between them, after skipping the first $USER_VALIDATORS.
NODE_DATA_DIR="${DATA_DIR}/node${NUM_NODE}"
mkdir -p "${NODE_DATA_DIR}/validators"
mkdir -p "${NODE_DATA_DIR}/secrets"
if [[ $NUM_NODE -lt $NODES_WITH_VALIDATORS ]]; then
for KEYFILE in $(ls ${DEPOSITS_DIR}/*.privkey | tail -n +$(( $QUICKSTART_VALIDATORS + ($VALIDATORS_PER_NODE * $NUM_NODE) + 1 )) | head -n $VALIDATORS_PER_NODE); do
cp -a "$KEYFILE" "${NODE_DATA_DIR}/validators/"
for VALIDATOR in $(ls ${DEPOSITS_DIR} | tail -n +$(( $USER_VALIDATORS + ($VALIDATORS_PER_NODE * $NUM_NODE) + 1 )) | head -n $VALIDATORS_PER_NODE); do
cp -ar "${DEPOSITS_DIR}/$VALIDATOR" "${NODE_DATA_DIR}/validators/"
cp -a "${SECRETS_DIR}/${VALIDATOR}" "${NODE_DATA_DIR}/secrets/"
done
fi
./build/beacon_node \
--nat:extip:127.0.0.1 \
--non-interactive \
--nat:extip:127.0.0.1 \
--log-level="${LOG_LEVEL}" \
--tcp-port=$(( BASE_PORT + NUM_NODE )) \
--udp-port=$(( BASE_PORT + NUM_NODE )) \

View File

@ -0,0 +1,92 @@
#!/bin/bash
# Copyright (c) 2020 Status Research & Development GmbH. Licensed under
# either of:
# - Apache License, version 2.0
# - MIT license
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
set -e
####################
# argument parsing #
####################
! getopt --test > /dev/null
if [ ${PIPESTATUS[0]} != 4 ]; then
echo '`getopt --test` failed in this environment.'
exit 1
fi
OPTS="h"
LONGOPTS="help,nodes:,base-metrics-port:,config-file:"
# default values
NUM_NODES="10"
BASE_METRICS_PORT="8008"
CONFIG_FILE="prometheus.yml"
print_help() {
cat <<EOF
Usage: $(basename $0) --nodes ${NUM_NODES} --base-metrics-port ${BASE_METRICS_PORT} --config-file "${CONFIG_FILE}"
-h, --help this help message
--nodes number of nodes to launch (default: ${NUM_NODES})
--base-metrics-port bootstrap node's metrics server port (default: ${BASE_METRICS_PORT})
--config-file write the Prometheus config to this file (default: ${CONFIG_FILE})
EOF
}
! PARSED=$(getopt --options=${OPTS} --longoptions=${LONGOPTS} --name "$0" -- "$@")
if [ ${PIPESTATUS[0]} != 0 ]; then
# getopt has complained about wrong arguments to stdout
exit 1
fi
# read getopt's output this way to handle the quoting right
eval set -- "$PARSED"
while true; do
case "$1" in
-h|--help)
print_help
exit
;;
-n|--nodes)
NUM_NODES="$2"
shift 2
;;
--base-metrics-port)
BASE_METRICS_PORT="$2"
shift 2
;;
--config-file)
CONFIG_FILE="$2"
shift 2
;;
--)
shift
break
;;
*)
echo "argument parsing error"
print_help
exit 1
esac
done
cat > "${CONFIG_FILE}" <<EOF
global:
scrape_interval: 1s
scrape_configs:
- job_name: "nimbus"
static_configs:
EOF
for NUM_NODE in $(seq 0 $(( ${NUM_NODES} - 1 ))); do
cat >> "${CONFIG_FILE}" <<EOF
- targets: ['127.0.0.1:$(( BASE_METRICS_PORT + NUM_NODE ))']
labels:
node: '$NUM_NODE'
EOF
done

View File

@ -46,6 +46,7 @@ ETH2_TESTNETS_ABS=$(cd "$ETH2_TESTNETS"; pwd)
NETWORK_DIR_ABS="$ETH2_TESTNETS_ABS/nimbus/$NETWORK"
DATA_DIR_ABS=$(mkdir -p "$DATA_DIR"; cd "$DATA_DIR"; pwd)
DEPOSITS_DIR_ABS="$DATA_DIR_ABS/deposits"
SECRETS_DIR_ABS="$DATA_DIR_ABS/secrets"
DEPOSIT_CONTRACT_ADDRESS=""
DEPOSIT_CONTRACT_ADDRESS_ARG=""
@ -54,6 +55,7 @@ if [ "$WEB3_URL" != "" ]; then
fi
mkdir -p "$DEPOSITS_DIR_ABS"
mkdir -p "$SECRETS_DIR_ABS"
if [ "$ETH1_PRIVATE_KEY" != "" ]; then
make deposit_contract
@ -69,9 +71,8 @@ make -j2 NIMFLAGS="-d:insecure -d:testnet_servers_image ${NETWORK_NIM_FLAGS}" be
echo "Generating Grafana dashboards for remote testnet servers"
for testnet in 0 1; do
./build/process_dashboard \
--in="tests/simulation/beacon-chain-sim-node0-Grafana-dashboard.json" \
--out="docker/beacon-chain-sim-remote-testnet${testnet}-Grafana-dashboard.json" \
--type="remote" \
--in="grafana/beacon_nodes_Grafana_dashboard.json" \
--out="docker/remote_testnet${testnet}_Grafana_dashboard.json" \
--testnet="${testnet}"
done
@ -83,17 +84,15 @@ echo "Building Docker image..."
make build
../build/beacon_node makeDeposits \
--quickstart-deposits=$QUICKSTART_VALIDATORS \
--random-deposits=$RANDOM_VALIDATORS \
--deposits-dir="$DEPOSITS_DIR_ABS"
TOTAL_VALIDATORS="$(( $QUICKSTART_VALIDATORS + $RANDOM_VALIDATORS ))"
--count=$TOTAL_VALIDATORS \
--out-validators-dir="$DEPOSITS_DIR_ABS" \
--out-secrets-dir="$SECRETS_DIR_ABS"
../build/beacon_node createTestnet \
--data-dir="$DATA_DIR_ABS" \
--validators-dir="$DEPOSITS_DIR_ABS" \
--total-validators=$TOTAL_VALIDATORS \
--last-user-validator=$QUICKSTART_VALIDATORS \
--last-user-validator=$USER_VALIDATORS \
--output-genesis="$NETWORK_DIR_ABS/genesis.ssz" \
--output-bootstrap-file="$NETWORK_DIR_ABS/bootstrap_nodes.txt" \
--bootstrap-address=$BOOTSTRAP_IP \
@ -116,9 +115,9 @@ if [[ $PUBLISH_TESTNET_RESETS != "0" ]]; then
../env.sh nim --verbosity:0 --hints:off manage_testnet_hosts.nims reset_network \
--network=$NETWORK \
--deposits-dir="$DEPOSITS_DIR_ABS" \
--secrets-dir="$SECRETS_DIR_ABS" \
--network-data-dir="$NETWORK_DIR_ABS" \
--user-validators=$QUICKSTART_VALIDATORS \
--total-validators=$TOTAL_VALIDATORS \
--user-validators=$USER_VALIDATORS \
> /tmp/reset-network.sh
bash /tmp/reset-network.sh

View File

@ -1,5 +1,5 @@
CONST_PRESET=minimal
QUICKSTART_VALIDATORS=8
RANDOM_VALIDATORS=120
USER_VALIDATORS=8
TOTAL_VALIDATORS=128
BOOTSTRAP_PORT=9000
WEB3_URL=wss://goerli.infura.io/ws/v3/809a18497dd74102b5f37d25aae3c85a

View File

@ -1,6 +1,6 @@
CONST_PRESET=mainnet
QUICKSTART_VALIDATORS=8
RANDOM_VALIDATORS=120
USER_VALIDATORS=8
TOTAL_VALIDATORS=128
BOOTSTRAP_PORT=9100
WEB3_URL=wss://goerli.infura.io/ws/v3/809a18497dd74102b5f37d25aae3c85a

View File

@ -19,6 +19,9 @@ template sszFuzzingTest*(T: type) =
let reEncoded = SSZ.encode(decoded)
when T isnot SignedBeaconBlock:
let hash = hash_tree_root(decoded)
if payload != reEncoded:
when hasSerializationTracing:
# Run deserialization again to produce a seriazation trace
@ -30,6 +33,9 @@ template sszFuzzingTest*(T: type) =
echo "Re-encoided payload with len = ", reEncoded.len
echo reEncoded
when T isnot SignedBeaconBlock:
echo "HTR: ", hash
echo repr(decoded)
doAssert false

View File

@ -13,7 +13,7 @@ import
sets,
# Specs
../../beacon_chain/spec/[datatypes, beaconstate, helpers, validator, crypto,
state_transition_block],
signatures],
# Internals
../../beacon_chain/[ssz, extras, state_transition],
# Mocking procs

View File

@ -8,7 +8,7 @@
import
options,
# Specs
../../beacon_chain/spec/[datatypes, validator, state_transition_block],
../../beacon_chain/spec/[datatypes, helpers, signatures, validator],
# Internals
../../beacon_chain/[ssz, extras],
# Mock helpers
@ -27,7 +27,8 @@ proc signMockBlockImpl(
let privkey = MockPrivKeys[signedBlock.message.proposer_index]
signedBlock.message.body.randao_reveal = get_epoch_signature(
state.fork, state.genesis_validators_root, block_slot, privkey)
state.fork, state.genesis_validators_root, block_slot.compute_epoch_at_slot,
privkey)
signedBlock.signature = get_block_signature(
state.fork, state.genesis_validators_root, block_slot,
hash_tree_root(signedBlock.message), privkey)

View File

@ -12,88 +12,34 @@ import
# Standard library
math, random,
# Specs
../../beacon_chain/spec/[datatypes, crypto, helpers, digest],
../../beacon_chain/spec/[datatypes, crypto, digest, keystore, signatures],
# Internals
../../beacon_chain/[ssz, extras, merkle_minimal],
# Mocking procs
./mock_validator_keys
func signMockDepositData(
deposit_data: var DepositData,
privkey: ValidatorPrivKey
) =
# No state --> Genesis
let domain = compute_domain(
DOMAIN_DEPOSIT,
Version(GENESIS_FORK_VERSION)
)
let signing_root = compute_signing_root(
deposit_data.getDepositMessage(),
domain
)
deposit_data.signature = blsSign(
privkey,
signing_root.data
)
func signMockDepositData(
deposit_data: var DepositData,
privkey: ValidatorPrivKey,
state: BeaconState
) =
let domain = compute_domain(
DOMAIN_DEPOSIT,
Version(GENESIS_FORK_VERSION)
)
let signing_root = compute_signing_root(
deposit_data.getDepositMessage(),
domain
)
deposit_data.signature = blsSign(
privkey,
signing_root.data
)
func mockDepositData(
deposit_data: var DepositData,
pubkey: ValidatorPubKey,
amount: uint64,
# withdrawal_credentials: Eth2Digest
) =
deposit_data.pubkey = pubkey
deposit_data.amount = amount
): DepositData =
# Insecurely use pubkey as withdrawal key
deposit_data.withdrawal_credentials.data[0] = byte BLS_WITHDRAWAL_PREFIX
deposit_data.withdrawal_credentials.data[1..^1] = pubkey.toRaw()
.eth2hash()
.data
.toOpenArray(1, 31)
DepositData(
pubkey: pubkey,
withdrawal_credentials: makeWithdrawalCredentials(pubkey),
amount: amount,
)
func mockDepositData(
deposit_data: var DepositData,
pubkey: ValidatorPubKey,
privkey: ValidatorPrivKey,
amount: uint64,
# withdrawal_credentials: Eth2Digest,
flags: UpdateFlags = {}
) =
mockDepositData(deposit_data, pubkey, amount)
): DepositData =
var ret = mockDepositData(pubkey, amount)
if skipBlsValidation notin flags:
signMockDepositData(deposit_data, privkey)
func mockDepositData(
deposit_data: var DepositData,
pubkey: ValidatorPubKey,
privkey: ValidatorPrivKey,
amount: uint64,
# withdrawal_credentials: Eth2Digest,
state: BeaconState,
flags: UpdateFlags = {}
) =
mockDepositData(deposit_data, pubkey, amount)
if skipBlsValidation notin flags:
signMockDepositData(deposit_data, privkey, state)
ret.signature = get_deposit_signature(ret, privkey)
ret
template mockGenesisDepositsImpl(
result: seq[Deposit],
@ -104,8 +50,8 @@ template mockGenesisDepositsImpl(
) =
# Genesis deposits with varying amounts
# NOTE: this could also apply for skipMerkleValidation, but prefer to er on the
# side of caution and generate a valid Deposit (it can still be skipped later).
# NOTE: prefer to er on the side of caution and generate a valid Deposit
# (it can still be skipped later).
if skipBlsValidation in flags:
# 1st loop - build deposit data
for valIdx in 0 ..< validatorCount.int:
@ -115,11 +61,7 @@ template mockGenesisDepositsImpl(
updateAmount
# DepositData
mockDepositData(
result[valIdx].data,
MockPubKeys[valIdx],
amount
)
result[valIdx].data = mockDepositData(MockPubKeys[valIdx], amount)
else: # With signing
var depositsDataHash: seq[Eth2Digest]
var depositsData: seq[DepositData]
@ -132,13 +74,8 @@ template mockGenesisDepositsImpl(
updateAmount
# DepositData
mockDepositData(
result[valIdx].data,
MockPubKeys[valIdx],
MockPrivKeys[valIdx],
amount,
flags
)
result[valIdx].data = mockDepositData(
MockPubKeys[valIdx], MockPrivKeys[valIdx], amount, flags)
depositsData.add result[valIdx].data
depositsDataHash.add hash_tree_root(result[valIdx].data)
@ -193,8 +130,7 @@ proc mockUpdateStateForNewDeposit*(
# TODO withdrawal credentials
mockDepositData(
result.data,
result.data = mockDepositData(
MockPubKeys[validator_index],
MockPrivKeys[validator_index],
amount,

View File

@ -40,12 +40,9 @@ type
TestSizeError* = object of ValueError
const
FixturesDir* = currentSourcePath.rsplit(DirSep, 1)[0] / ".." / ".." / "vendor" / "nim-eth2-scenarios"
SszTestsDir* =
when ETH2_SPEC == "v0.12.1":
FixturesDir/"tests-v0.12.1"
else:
FixturesDir/"tests-v0.11.3"
FixturesDir* =
currentSourcePath.rsplit(DirSep, 1)[0] / ".." / ".." / "vendor" / "nim-eth2-scenarios"
SszTestsDir* = FixturesDir / "tests-v" & SPEC_VERSION
proc parseTest*(path: string, Format: typedesc[Json or SSZ], T: typedesc): T =
try:

View File

@ -1,5 +1,5 @@
data/
validators/
prometheus/
beacon-chain-sim-all-nodes-Grafana-dashboard.json
/data
/validators
/prometheus
/secrets

View File

@ -1,180 +0,0 @@
import json, parseopt, strutils
# usage: process_dashboard --in=node0_dashboard.json --out=all_nodes_dashboard.json --type=local --testnet=0
type
OutputType = enum
local
remote
var
p = initOptParser()
inputFileName, outputFilename: string
outputType = OutputType.local
testnet = 0
while true:
p.next()
case p.kind:
of cmdEnd:
break
of cmdShortOption, cmdLongOption:
if p.key == "in":
inputFileName = p.val
elif p.key == "out":
outputFileName = p.val
elif p.key == "type":
outputType = parseEnum[OutputType](p.val)
elif p.key == "testnet":
testnet = p.val.parseInt()
else:
echo "unsupported argument: ", p.key
of cmdArgument:
echo "unsupported argument: ", p.key
var
inputData = parseFile(inputFileName)
panels = inputData["panels"].copy()
outputData = inputData
#############
# variables #
#############
case outputType:
of OutputType.local:
outputData["templating"]["list"] = parseJson("""
[
{
"allValue": null,
"current": {
"tags": [],
"text": "0",
"value": "0"
},
"datasource": "Prometheus",
"definition": "label_values(process_virtual_memory_bytes,node)",
"hide": 0,
"includeAll": false,
"index": -1,
"label": null,
"multi": false,
"name": "node",
"options": [],
"query": "label_values(process_virtual_memory_bytes,node)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
]
""")
of OutputType.remote:
outputData["templating"]["list"] = parseJson("""
[
{
"allValue": null,
"current": {
"tags": [],
"text": "beacon-node-testnet""" & $testnet & """-1",
"value": "beacon-node-testnet""" & $testnet & """-1"
},
"datasource": "master-01.do-ams3.metrics.hq",
"definition": "label_values(process_virtual_memory_bytes{job=\"beacon-node-metrics\"},container)",
"hide": 0,
"includeAll": false,
"index": -1,
"label": null,
"multi": false,
"name": "container",
"options": [],
"query": "label_values(process_virtual_memory_bytes{job=\"beacon-node-metrics\"},container)",
"refresh": 1,
"regex": "/.*testnet""" & $testnet & """.*/",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"allValue": null,
"current": {
"tags": [],
"text": "master-01.aws-eu-central-1a.nimbus.test",
"value": "master-01.aws-eu-central-1a.nimbus.test"
},
"datasource": "master-01.do-ams3.metrics.hq",
"definition": "label_values(process_virtual_memory_bytes{job=\"beacon-node-metrics\"},instance)",
"hide": 0,
"includeAll": false,
"index": -1,
"label": null,
"multi": false,
"name": "instance",
"options": [],
"query": "label_values(process_virtual_memory_bytes{job=\"beacon-node-metrics\"},instance)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
]
""")
##########
# panels #
##########
outputData["panels"] = %* []
for panel in panels.mitems:
case outputType:
of OutputType.local:
panel["title"] = %* replace(panel["title"].getStr(), "#0", "#${node}")
of OutputType.remote:
panel["title"] = %* replace(panel["title"].getStr(), "#0", "#${container}@${instance}")
panel["datasource"] = newJNull()
if panel.hasKey("targets"):
var targets = panel["targets"]
for target in targets.mitems:
case outputType:
of OutputType.local:
target["expr"] = %* replace(target["expr"].getStr(), "{node=\"0\"}", "{node=\"${node}\"}")
of OutputType.remote:
# The remote Prometheus instance polls once per minute, so the
# minimum rate() interval is 2 minutes.
target["expr"] = %* multiReplace(target["expr"].getStr(),
("{node=\"0\"}", "{job=\"beacon-node-metrics\",container=\"${container}\",instance=\"${instance}\"}"),
("sum(beacon_attestations_sent_total)", "sum(beacon_attestations_sent_total{job=\"beacon-node-metrics\",container=~\"beacon-node-testnet" & $testnet & "-.\"})"),
("[2s]", "[2m]"),
("[4s]) * 3", "[2m]) * 120"))
outputData["panels"].add(panel)
########
# misc #
########
case outputType:
of OutputType.local:
outputData["title"] = %* "NBC local testnet/sim (all nodes)"
outputData["uid"] = %* (outputData["uid"].getStr() & "a")
of OutputType.remote:
outputData["title"] = %* ("Nimbus testnet" & $testnet)
outputData["uid"] = %* (outputData["uid"].getStr() & $testnet)
# our annotations only work with a 1s resolution
var annotation = outputData["annotations"]["list"][0].copy()
annotation["datasource"] = %* "-- Grafana --"
outputData["annotations"]["list"] = %* [annotation]
writeFile(outputFilename, pretty(outputData))

View File

@ -31,7 +31,10 @@ source "${SIM_ROOT}/../../env.sh"
cd "$GIT_ROOT"
DATA_DIR="${SIMULATION_DIR}/node-$NODE_ID"
NODE_DATA_DIR="${SIMULATION_DIR}/node-$NODE_ID"
NODE_VALIDATORS_DIR=$NODE_DATA_DIR/validators/
NODE_SECRETS_DIR=$NODE_DATA_DIR/secrets/
PORT=$(( BASE_P2P_PORT + NODE_ID ))
NAT_ARG="--nat:extip:127.0.0.1"
@ -39,41 +42,47 @@ if [ "${NAT:-}" == "1" ]; then
NAT_ARG="--nat:any"
fi
mkdir -p "$DATA_DIR/validators"
rm -f $DATA_DIR/validators/*
rm -rf "$NODE_VALIDATORS_DIR"
mkdir -p "$NODE_VALIDATORS_DIR"
rm -rf "$NODE_SECRETS_DIR"
mkdir -p "$NODE_SECRETS_DIR"
VALIDATORS_PER_NODE=$((NUM_VALIDATORS / TOTAL_NODES))
if [[ $NODE_ID -lt $TOTAL_NODES ]]; then
VALIDATORS_PER_NODE=$((NUM_VALIDATORS / TOTAL_NODES))
VALIDATORS_PER_NODE_HALF=$((VALIDATORS_PER_NODE / 2))
FIRST_VALIDATOR_IDX=$(( VALIDATORS_PER_NODE * NODE_ID ))
# if using validator client binaries in addition to beacon nodes
# we will split the keys for this instance in half between the BN and the VC
if [ "${SPLIT_VALIDATORS_BETWEEN_BN_AND_VC:-}" == "yes" ]; then
LAST_VALIDATOR_IDX=$(( FIRST_VALIDATOR_IDX + VALIDATORS_PER_NODE_HALF - 1 ))
if [ "${BN_VC_VALIDATOR_SPLIT:-}" == "yes" ]; then
ATTACHED_VALIDATORS=$((VALIDATORS_PER_NODE / 2))
else
LAST_VALIDATOR_IDX=$(( FIRST_VALIDATOR_IDX + VALIDATORS_PER_NODE - 1 ))
ATTACHED_VALIDATORS=$VALIDATORS_PER_NODE
fi
pushd "$VALIDATORS_DIR" >/dev/null
cp $(seq -s " " -f v%07g.privkey $FIRST_VALIDATOR_IDX $LAST_VALIDATOR_IDX) "$DATA_DIR/validators"
for VALIDATOR in $(ls | tail -n +$(( ($VALIDATORS_PER_NODE * $NODE_ID) + 1 )) | head -n $ATTACHED_VALIDATORS); do
cp -a "$VALIDATOR" "$NODE_VALIDATORS_DIR"
cp -a "$SECRETS_DIR/$VALIDATOR" "$NODE_SECRETS_DIR"
done
popd >/dev/null
fi
rm -rf "$DATA_DIR/dump"
mkdir -p "$DATA_DIR/dump"
rm -rf "$NODE_DATA_DIR/dump"
mkdir -p "$NODE_DATA_DIR/dump"
SNAPSHOT_ARG=""
if [ -f "${SNAPSHOT_FILE}" ]; then
SNAPSHOT_ARG="--state-snapshot=${SNAPSHOT_FILE}"
fi
cd "$DATA_DIR"
cd "$NODE_DATA_DIR"
# if you want tracing messages, add "--log-level=TRACE" below
$BEACON_NODE_BIN \
--log-level=${LOG_LEVEL:-DEBUG} \
--bootstrap-file=$BOOTSTRAP_ADDRESS_FILE \
--data-dir=$DATA_DIR \
--data-dir=$NODE_DATA_DIR \
--secrets-dir=$NODE_SECRETS_DIR \
--node-name=$NODE_ID \
--tcp-port=$PORT \
--udp-port=$PORT \

View File

@ -15,26 +15,34 @@ source "${SIM_ROOT}/../../env.sh"
cd "$GIT_ROOT"
VC_DATA_DIR="${SIMULATION_DIR}/validator-$NODE_ID"
NODE_DATA_DIR="${SIMULATION_DIR}/validator-$NODE_ID"
NODE_VALIDATORS_DIR=$NODE_DATA_DIR/validators/
NODE_SECRETS_DIR=$NODE_DATA_DIR/secrets/
mkdir -p "$VC_DATA_DIR/validators"
rm -f $VC_DATA_DIR/validators/*
rm -rf "$NODE_VALIDATORS_DIR"
mkdir -p "$NODE_VALIDATORS_DIR"
rm -rf "$NODE_SECRETS_DIR"
mkdir -p "$NODE_SECRETS_DIR"
VALIDATORS_PER_NODE=$((NUM_VALIDATORS / TOTAL_NODES))
if [[ $NODE_ID -lt $TOTAL_NODES ]]; then
# we will split the keys for this instance in half between the BN and the VC
VALIDATORS_PER_NODE=$((NUM_VALIDATORS / TOTAL_NODES))
VALIDATORS_PER_NODE_HALF=$((VALIDATORS_PER_NODE / 2))
FIRST_VALIDATOR_IDX=$(( VALIDATORS_PER_NODE * NODE_ID + VALIDATORS_PER_NODE_HALF))
LAST_VALIDATOR_IDX=$(( FIRST_VALIDATOR_IDX + VALIDATORS_PER_NODE_HALF - 1 ))
ATTACHED_VALIDATORS=$((VALIDATORS_PER_NODE / 2))
pushd "$VALIDATORS_DIR" >/dev/null
cp $(seq -s " " -f v%07g.privkey $FIRST_VALIDATOR_IDX $LAST_VALIDATOR_IDX) "$VC_DATA_DIR/validators"
for VALIDATOR in $(ls | tail -n +$(( ($VALIDATORS_PER_NODE * $NODE_ID) + 1 + $ATTACHED_VALIDATORS )) | head -n $ATTACHED_VALIDATORS); do
cp -ar "$VALIDATOR" "$NODE_VALIDATORS_DIR"
cp -a "$SECRETS_DIR/$VALIDATOR" "$NODE_SECRETS_DIR"
done
popd >/dev/null
fi
cd "$VC_DATA_DIR"
cd "$NODE_DATA_DIR"
$VALIDATOR_CLIENT_BIN \
--log-level=${LOG_LEVEL:-DEBUG} \
--data-dir=$VC_DATA_DIR \
--data-dir=$NODE_DATA_DIR \
--secrets-dir=$NODE_SECRETS_DIR \
--rpc-port="$(( $BASE_RPC_PORT + $NODE_ID ))"

View File

@ -9,6 +9,7 @@ source "$(dirname "$0")/vars.sh"
cd "$SIM_ROOT"
mkdir -p "$SIMULATION_DIR"
mkdir -p "$VALIDATORS_DIR"
mkdir -p "$SECRETS_DIR"
cd "$GIT_ROOT"
@ -20,9 +21,6 @@ DEFS+="-d:MAX_COMMITTEES_PER_SLOT=${MAX_COMMITTEES_PER_SLOT:-1} " # Spec de
DEFS+="-d:SLOTS_PER_EPOCH=${SLOTS_PER_EPOCH:-6} " # Spec default: 32
DEFS+="-d:SECONDS_PER_SLOT=${SECONDS_PER_SLOT:-6} " # Spec default: 12
LAST_VALIDATOR_NUM=$(( NUM_VALIDATORS - 1 ))
LAST_VALIDATOR="$VALIDATORS_DIR/v$(printf '%07d' $LAST_VALIDATOR_NUM).deposit.json"
# Windows detection
if uname | grep -qiE "mingw|msys"; then
MAKE="mingw32-make"
@ -41,36 +39,30 @@ WAIT_GENESIS="${WAIT_GENESIS:-no}"
# Using tmux or multitail is an opt-in
USE_MULTITAIL="${USE_MULTITAIL:-no}"
type "$MULTITAIL" &>/dev/null || { echo "${MULTITAIL}" is missing; USE_MULTITAIL="no"; }
if [[ "$USE_MULTITAIL" != "no" ]]; then
type "$MULTITAIL" &>/dev/null || { echo "${MULTITAIL}" is missing; USE_MULTITAIL="no"; }
fi
USE_TMUX="${USE_TMUX:-no}"
type "$TMUX" &>/dev/null || { echo "${TMUX}" is missing; USE_TMUX="no"; }
if [[ "$USE_TMUX" != "no" ]]; then
type "$TMUX" &>/dev/null || { echo "${TMUX}" is missing; USE_TMUX="no"; }
fi
USE_GANACHE="${USE_GANACHE:-no}"
type "$GANACHE" &>/dev/null || { echo $GANACHE is missing; USE_GANACHE="no"; }
if [[ "$USE_GANACHE" != "no" ]]; then
type "$GANACHE" &>/dev/null || { echo $GANACHE is missing; USE_GANACHE="no"; }
fi
USE_PROMETHEUS="${LAUNCH_PROMETHEUS:-no}"
type "$PROMETHEUS" &>/dev/null || { echo $PROMETHEUS is missing; USE_PROMETHEUS="no"; }
USE_PROMETHEUS="${USE_PROMETHEUS:-no}"
if [[ "$USE_PROMETHEUS" != "no" ]]; then
type "$PROMETHEUS" &>/dev/null || { echo $PROMETHEUS is missing; USE_PROMETHEUS="no"; }
fi
# Prometheus config (continued inside the loop)
mkdir -p "${METRICS_DIR}"
cat > "${METRICS_DIR}/prometheus.yml" <<EOF
global:
scrape_interval: 1s
scrape_configs:
- job_name: "nimbus"
static_configs:
EOF
for i in $(seq $MASTER_NODE -1 $TOTAL_USER_NODES); do
# Prometheus config
cat >> "${METRICS_DIR}/prometheus.yml" <<EOF
- targets: ['127.0.0.1:$(( BASE_METRICS_PORT + i ))']
labels:
node: '$i'
EOF
done
./scripts/make_prometheus_config.sh \
--nodes ${TOTAL_NODES} \
--base-metrics-port ${BASE_METRICS_PORT} \
--config-file "${METRICS_DIR}/prometheus.yml"
COMMANDS=()
@ -110,10 +102,20 @@ if [[ "$USE_TMUX" != "no" ]]; then
$TMUX select-window -t "${TMUX_SESSION_NAME}:sim"
fi
$MAKE -j3 --no-print-directory NIMFLAGS="$CUSTOM_NIMFLAGS $DEFS" LOG_LEVEL="${LOG_LEVEL:-DEBUG}" beacon_node validator_client process_dashboard deposit_contract
$MAKE -j3 --no-print-directory NIMFLAGS="$CUSTOM_NIMFLAGS $DEFS" LOG_LEVEL="${LOG_LEVEL:-DEBUG}" beacon_node validator_client
count_files () {
{ ls -1q $1 2> /dev/null || true ; } | wc -l
}
EXISTING_VALIDATORS=$(count_files "$VALIDATORS_DIR/*/deposit.json")
if [[ $EXISTING_VALIDATORS -lt $NUM_VALIDATORS ]]; then
rm -rf "$VALIDATORS_DIR"
rm -rf "$SECRETS_DIR"
if [ ! -f "${LAST_VALIDATOR}" ]; then
if [ "$WEB3_ARG" != "" ]; then
make deposit_contract
echo Deploying the validator deposit contract...
DEPOSIT_CONTRACT_ADDRESS=$($DEPLOY_DEPOSIT_CONTRACT_BIN deploy $WEB3_ARG)
echo Contract deployed at $DEPOSIT_CONTRACT_ADDRESS
@ -133,8 +135,9 @@ if [ ! -f "${LAST_VALIDATOR}" ]; then
fi
$BEACON_NODE_BIN makeDeposits \
--quickstart-deposits="${NUM_VALIDATORS}" \
--deposits-dir="$VALIDATORS_DIR" \
--count="${NUM_VALIDATORS}" \
--out-validators-dir="$VALIDATORS_DIR" \
--out-secrets-dir="$SECRETS_DIR" \
$MAKE_DEPOSITS_WEB3_ARG $DELAY_ARGS \
--deposit-contract="${DEPOSIT_CONTRACT_ADDRESS}"
@ -164,12 +167,6 @@ if [ -f "${MASTER_NODE_ADDRESS_FILE}" ]; then
rm "${MASTER_NODE_ADDRESS_FILE}"
fi
# use the exported Grafana dashboard for a single node to create one for all nodes
echo Creating grafana dashboards...
./build/process_dashboard \
--in="${SIM_ROOT}/beacon-chain-sim-node0-Grafana-dashboard.json" \
--out="${SIM_ROOT}/beacon-chain-sim-all-nodes-Grafana-dashboard.json"
# Kill child processes on Ctrl-C/SIGTERM/exit, passing the PID of this shell
# instance as the parent and the target process name as a pattern to the
# "pkill" command.
@ -182,6 +179,7 @@ LAST_WAITING_NODE=0
function run_cmd {
i=$1
CMD=$2
bin_name=$3
if [[ "$USE_TMUX" != "no" ]]; then
echo "Starting node $i..."
echo $TMUX split-window -t "${TMUX_SESSION_NAME}" "$CMD"
@ -194,7 +192,7 @@ function run_cmd {
SLEEP="3"
fi
# "multitail" closes the corresponding panel when a command exits, so let's make sure it doesn't exit
COMMANDS+=( " -cT ansi -t 'node #$i' -l 'sleep $SLEEP; $CMD; echo [node execution completed]; while true; do sleep 100; done'" )
COMMANDS+=( " -cT ansi -t '$bin_name #$i' -l 'sleep $SLEEP; $CMD; echo [node execution completed]; while true; do sleep 100; done'" )
else
eval "${CMD}" &
fi
@ -212,11 +210,11 @@ for i in $(seq $MASTER_NODE -1 $TOTAL_USER_NODES); do
done
fi
run_cmd $i "${SIM_ROOT}/run_node.sh ${i} --verify-finalization"
run_cmd $i "${SIM_ROOT}/run_node.sh ${i} --verify-finalization" "node"
if [ "${SPLIT_VALIDATORS_BETWEEN_BN_AND_VC:-}" == "yes" ]; then
if [ "${BN_VC_VALIDATOR_SPLIT:-}" == "yes" ]; then
# start the VC with a few seconds of delay so that we can connect through RPC
run_cmd $i "sleep 3 && ${SIM_ROOT}/run_validator.sh ${i}"
run_cmd $i "sleep 3 && ${SIM_ROOT}/run_validator.sh ${i}" "validator"
fi
done

View File

@ -19,7 +19,7 @@ cd - &>/dev/null
# When changing these, also update the readme section on running simulation
# so that the run_node example is correct!
NUM_VALIDATORS=${VALIDATORS:-192}
NUM_VALIDATORS=${VALIDATORS:-128}
TOTAL_NODES=${NODES:-4}
TOTAL_USER_NODES=${USER_NODES:-0}
TOTAL_SYSTEM_NODES=$(( TOTAL_NODES - TOTAL_USER_NODES ))
@ -28,6 +28,7 @@ MASTER_NODE=$(( TOTAL_NODES - 1 ))
SIMULATION_DIR="${SIM_ROOT}/data"
METRICS_DIR="${SIM_ROOT}/prometheus"
VALIDATORS_DIR="${SIM_ROOT}/validators"
SECRETS_DIR="${SIM_ROOT}/secrets"
SNAPSHOT_FILE="${SIMULATION_DIR}/state_snapshot.ssz"
NETWORK_BOOTSTRAP_FILE="${SIMULATION_DIR}/bootstrap_nodes.txt"
BEACON_NODE_BIN="${GIT_ROOT}/build/beacon_node"
@ -45,6 +46,3 @@ else
WEB3_ARG=""
DEPOSIT_CONTRACT_ADDRESS="0x"
fi
# uncomment to enable the use of VCs in addition to BNs - will split the validators equally
#SPLIT_VALIDATORS_BETWEEN_BN_AND_VC="yes"

View File

@ -13,6 +13,9 @@ import
../beacon_chain/spec/[datatypes, digest, validator],
../beacon_chain/[beacon_node_types, block_pool, state_transition, ssz]
when isMainModule:
import chronicles # or some random compile error happens...
suiteReport "BlockRef and helpers" & preset():
timedTest "isAncestorOf sanity" & preset():
let
@ -175,7 +178,7 @@ suiteReport "Block pool processing" & preset():
check:
pool.get(b2Root).isNone() # Unresolved, shouldn't show up
FetchRecord(root: b1Root, historySlots: 1) in pool.checkMissing()
FetchRecord(root: b1Root) in pool.checkMissing()
check: pool.add(b1Root, b1).isOk
@ -367,3 +370,4 @@ when const_preset == "minimal": # These require some minutes in mainnet
hash_tree_root(pool.headState.data.data)
hash_tree_root(pool2.justifiedState.data.data) ==
hash_tree_root(pool.justifiedState.data.data)

View File

@ -19,45 +19,89 @@ suiteReport "Honest validator":
true
getAggregateAndProofsTopic(forkDigest) == "/eth2/00000000/beacon_aggregate_and_proof/ssz"
timedTest "Mainnet attestation topics":
check:
getMainnetAttestationTopic(forkDigest, 0) ==
"/eth2/00000000/committee_index0_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 9) ==
"/eth2/00000000/committee_index9_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 10) ==
"/eth2/00000000/committee_index10_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 11) ==
"/eth2/00000000/committee_index11_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 14) ==
"/eth2/00000000/committee_index14_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 22) ==
"/eth2/00000000/committee_index22_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 34) ==
"/eth2/00000000/committee_index34_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 46) ==
"/eth2/00000000/committee_index46_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 60) ==
"/eth2/00000000/committee_index60_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 63) ==
"/eth2/00000000/committee_index63_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 200) ==
"/eth2/00000000/committee_index8_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 400) ==
"/eth2/00000000/committee_index16_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 469) ==
"/eth2/00000000/committee_index21_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 550) ==
"/eth2/00000000/committee_index38_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 600) ==
"/eth2/00000000/committee_index24_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 613) ==
"/eth2/00000000/committee_index37_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 733) ==
"/eth2/00000000/committee_index29_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 775) ==
"/eth2/00000000/committee_index7_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 888) ==
"/eth2/00000000/committee_index56_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 995) ==
"/eth2/00000000/committee_index35_beacon_attestation/ssz"
when ETH2_SPEC == "v0.11.3":
timedTest "Mainnet attestation topics":
check:
getMainnetAttestationTopic(forkDigest, 0) ==
"/eth2/00000000/committee_index0_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 9) ==
"/eth2/00000000/committee_index9_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 10) ==
"/eth2/00000000/committee_index10_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 11) ==
"/eth2/00000000/committee_index11_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 14) ==
"/eth2/00000000/committee_index14_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 22) ==
"/eth2/00000000/committee_index22_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 34) ==
"/eth2/00000000/committee_index34_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 46) ==
"/eth2/00000000/committee_index46_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 60) ==
"/eth2/00000000/committee_index60_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 63) ==
"/eth2/00000000/committee_index63_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 200) ==
"/eth2/00000000/committee_index8_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 400) ==
"/eth2/00000000/committee_index16_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 469) ==
"/eth2/00000000/committee_index21_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 550) ==
"/eth2/00000000/committee_index38_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 600) ==
"/eth2/00000000/committee_index24_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 613) ==
"/eth2/00000000/committee_index37_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 733) ==
"/eth2/00000000/committee_index29_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 775) ==
"/eth2/00000000/committee_index7_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 888) ==
"/eth2/00000000/committee_index56_beacon_attestation/ssz"
getMainnetAttestationTopic(forkDigest, 995) ==
"/eth2/00000000/committee_index35_beacon_attestation/ssz"
else:
timedTest "Mainnet attestation topics":
check:
getAttestationTopic(forkDigest, 0) ==
"/eth2/00000000/beacon_attestation_0/ssz"
getAttestationTopic(forkDigest, 5) ==
"/eth2/00000000/beacon_attestation_5/ssz"
getAttestationTopic(forkDigest, 7) ==
"/eth2/00000000/beacon_attestation_7/ssz"
getAttestationTopic(forkDigest, 9) ==
"/eth2/00000000/beacon_attestation_9/ssz"
getAttestationTopic(forkDigest, 13) ==
"/eth2/00000000/beacon_attestation_13/ssz"
getAttestationTopic(forkDigest, 19) ==
"/eth2/00000000/beacon_attestation_19/ssz"
getAttestationTopic(forkDigest, 20) ==
"/eth2/00000000/beacon_attestation_20/ssz"
getAttestationTopic(forkDigest, 22) ==
"/eth2/00000000/beacon_attestation_22/ssz"
getAttestationTopic(forkDigest, 25) ==
"/eth2/00000000/beacon_attestation_25/ssz"
getAttestationTopic(forkDigest, 27) ==
"/eth2/00000000/beacon_attestation_27/ssz"
getAttestationTopic(forkDigest, 31) ==
"/eth2/00000000/beacon_attestation_31/ssz"
getAttestationTopic(forkDigest, 39) ==
"/eth2/00000000/beacon_attestation_39/ssz"
getAttestationTopic(forkDigest, 45) ==
"/eth2/00000000/beacon_attestation_45/ssz"
getAttestationTopic(forkDigest, 47) ==
"/eth2/00000000/beacon_attestation_47/ssz"
getAttestationTopic(forkDigest, 48) ==
"/eth2/00000000/beacon_attestation_48/ssz"
getAttestationTopic(forkDigest, 50) ==
"/eth2/00000000/beacon_attestation_50/ssz"
getAttestationTopic(forkDigest, 53) ==
"/eth2/00000000/beacon_attestation_53/ssz"
getAttestationTopic(forkDigest, 54) ==
"/eth2/00000000/beacon_attestation_54/ssz"
getAttestationTopic(forkDigest, 62) ==
"/eth2/00000000/beacon_attestation_62/ssz"
getAttestationTopic(forkDigest, 63) ==
"/eth2/00000000/beacon_attestation_63/ssz"

View File

@ -2,7 +2,7 @@
import
unittest, stint, ./testutil, stew/byteutils,
../beacon_chain/[extras, interop, ssz],
../beacon_chain/[interop, merkle_minimal, ssz],
../beacon_chain/spec/[beaconstate, crypto, datatypes]
# Interop test yaml, found here:
@ -119,7 +119,7 @@ suiteReport "Interop":
timedTest "Mocked start private key":
for i, k in privateKeys:
let
key = makeInteropPrivKey(i)[]
key = makeInteropPrivKey(i)
v = k.parse(UInt256, 16)
check:
@ -144,16 +144,14 @@ suiteReport "Interop":
var deposits: seq[Deposit]
for i in 0..<64:
let
privKey = makeInteropPrivKey(i)[]
deposits.add(makeDeposit(privKey.toPubKey(), privKey))
let privKey = makeInteropPrivKey(i)
deposits.add makeDeposit(privKey.toPubKey(), privKey)
attachMerkleProofs(deposits)
const genesis_time = 1570500000
var
# TODO this currently requires skipMerkleValidation to pass the test
# makeDeposit doesn't appear to produce a proof?
initialState = initialize_beacon_state_from_eth1(
eth1BlockHash, genesis_time, deposits, {skipMerkleValidation})
eth1BlockHash, genesis_time, deposits, {})
# https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#create-genesis-state
initialState.genesis_time = genesis_time

View File

@ -9,11 +9,14 @@
import
unittest, ./testutil, json,
stew/byteutils,
../beacon_chain/spec/keystore
stew/byteutils, blscurve,
../beacon_chain/spec/[crypto, keystore]
from strutils import replace
template `==`*(a, b: ValidatorPrivKey): bool =
blscurve.SecretKey(a) == blscurve.SecretKey(b)
const
scryptVector = """{
"crypto": {
@ -79,23 +82,27 @@ const
}""" #"
password = "testpassword"
secret = hexToSeqByte("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")
secretBytes = hexToSeqByte("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")
salt = hexToSeqByte("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3")
iv = hexToSeqByte("264daa3f303d7259501c93d997d84fe6")
suiteReport "Keystore":
setup:
let secret = ValidatorPrivKey.fromRaw(secretBytes).get
timedTest "Pbkdf2 decryption":
let decrypt = decryptKeystore(pbkdf2Vector, password)
let decrypt = decryptKeystore(KeyStoreContent pbkdf2Vector,
KeyStorePass password)
check decrypt.isOk
check secret == decrypt.get()
timedTest "Pbkdf2 encryption":
let encrypt = encryptKeystore[KdfPbkdf2](secret, password, salt=salt, iv=iv,
path="m/12381/60/0/0")
check encrypt.isOk
let encrypt = encryptKeystore(KdfPbkdf2, secret,
KeyStorePass password,
salt=salt, iv=iv,
path = validateKeyPath "m/12381/60/0/0")
var
encryptJson = parseJson(encrypt.get())
encryptJson = parseJson(encrypt.string)
pbkdf2Json = parseJson(pbkdf2Vector)
encryptJson{"uuid"} = %""
pbkdf2Json{"uuid"} = %""
@ -103,16 +110,27 @@ suiteReport "Keystore":
check encryptJson == pbkdf2Json
timedTest "Pbkdf2 errors":
check encryptKeystore[KdfPbkdf2](secret, "", salt = [byte 1]).isErr
check encryptKeystore[KdfPbkdf2](secret, "", iv = [byte 1]).isErr
expect Defect:
echo encryptKeystore(KdfPbkdf2, secret, salt = [byte 1]).string
check decryptKeystore(pbkdf2Vector, "wrong pass").isErr
check decryptKeystore(pbkdf2Vector, "").isErr
check decryptKeystore("{\"a\": 0}", "").isErr
check decryptKeystore("", "").isErr
expect Defect:
echo encryptKeystore(KdfPbkdf2, secret, iv = [byte 1]).string
check decryptKeystore(KeyStoreContent pbkdf2Vector,
KeyStorePass "wrong pass").isErr
check decryptKeystore(KeyStoreContent pbkdf2Vector,
KeyStorePass "").isErr
check decryptKeystore(KeyStoreContent "{\"a\": 0}",
KeyStorePass "").isErr
check decryptKeystore(KeyStoreContent "",
KeyStorePass "").isErr
template checkVariant(remove): untyped =
check decryptKeystore(pbkdf2Vector.replace(remove, ""), password).isErr
check decryptKeystore(KeyStoreContent pbkdf2Vector.replace(remove, ""),
KeyStorePass password).isErr
checkVariant "d4e5" # salt
checkVariant "18b1" # checksum
@ -122,4 +140,5 @@ suiteReport "Keystore":
var badKdf = parseJson(pbkdf2Vector)
badKdf{"crypto", "kdf", "function"} = %"invalid"
check decryptKeystore($badKdf, password).iserr
check decryptKeystore(KeyStoreContent $badKdf,
KeyStorePass password).iserr

View File

@ -36,5 +36,5 @@ asyncTest "connect two nodes":
c2.nat = "none"
var n2 = await createEth2Node(c2, ENRForkID())
await n2.connectToNetwork(@[n1PersistentAddress])
await n2.startLookingForPeers(@[n1PersistentAddress])

View File

@ -13,6 +13,9 @@ proc `$`*(peer: SomeTPeer): string =
proc updateScore(peer: SomeTPeer, score: int) =
discard
proc getFirstSlotAtFinalizedEpoch(): Slot =
Slot(0)
suite "SyncManager test suite":
proc createChain(start, finish: Slot): seq[SignedBeaconBlock] =
doAssert(start <= finish)
@ -30,7 +33,8 @@ suite "SyncManager test suite":
test "[SyncQueue] Start and finish slots equal":
let p1 = SomeTPeer()
var queue = SyncQueue.init(SomeTPeer, Slot(0), Slot(0), 1'u64, syncUpdate)
var queue = SyncQueue.init(SomeTPeer, Slot(0), Slot(0), 1'u64, syncUpdate,
getFirstSlotAtFinalizedEpoch)
check len(queue) == 1
var r11 = queue.pop(Slot(0), p1)
check len(queue) == 0
@ -45,7 +49,8 @@ suite "SyncManager test suite":
r11.slot == Slot(0) and r11.count == 1'u64 and r11.step == 1'u64
test "[SyncQueue] Two full requests success/fail":
var queue = SyncQueue.init(SomeTPeer, Slot(0), Slot(1), 1'u64, syncUpdate)
var queue = SyncQueue.init(SomeTPeer, Slot(0), Slot(1), 1'u64, syncUpdate,
getFirstSlotAtFinalizedEpoch)
let p1 = SomeTPeer()
let p2 = SomeTPeer()
check len(queue) == 2
@ -72,7 +77,8 @@ suite "SyncManager test suite":
r22.slot == Slot(1) and r22.count == 1'u64 and r22.step == 1'u64
test "[SyncQueue] Full and incomplete success/fail start from zero":
var queue = SyncQueue.init(SomeTPeer, Slot(0), Slot(4), 2'u64, syncUpdate)
var queue = SyncQueue.init(SomeTPeer, Slot(0), Slot(4), 2'u64, syncUpdate,
getFirstSlotAtFinalizedEpoch)
let p1 = SomeTPeer()
let p2 = SomeTPeer()
let p3 = SomeTPeer()
@ -110,7 +116,8 @@ suite "SyncManager test suite":
r33.slot == Slot(4) and r33.count == 1'u64 and r33.step == 1'u64
test "[SyncQueue] Full and incomplete success/fail start from non-zero":
var queue = SyncQueue.init(SomeTPeer, Slot(1), Slot(5), 3'u64, syncUpdate)
var queue = SyncQueue.init(SomeTPeer, Slot(1), Slot(5), 3'u64, syncUpdate,
getFirstSlotAtFinalizedEpoch)
let p1 = SomeTPeer()
let p2 = SomeTPeer()
check len(queue) == 5
@ -137,7 +144,8 @@ suite "SyncManager test suite":
r42.slot == Slot(4) and r42.count == 2'u64 and r42.step == 1'u64
test "[SyncQueue] Smart and stupid success/fail":
var queue = SyncQueue.init(SomeTPeer, Slot(0), Slot(4), 5'u64, syncUpdate)
var queue = SyncQueue.init(SomeTPeer, Slot(0), Slot(4), 5'u64, syncUpdate,
getFirstSlotAtFinalizedEpoch)
let p1 = SomeTPeer()
let p2 = SomeTPeer()
check len(queue) == 5
@ -164,7 +172,8 @@ suite "SyncManager test suite":
r52.slot == Slot(4) and r52.count == 1'u64 and r52.step == 1'u64
test "[SyncQueue] One smart and one stupid + debt split + empty":
var queue = SyncQueue.init(SomeTPeer, Slot(0), Slot(4), 5'u64, syncUpdate)
var queue = SyncQueue.init(SomeTPeer, Slot(0), Slot(4), 5'u64, syncUpdate,
getFirstSlotAtFinalizedEpoch)
let p1 = SomeTPeer()
let p2 = SomeTPeer()
let p3 = SomeTPeer()
@ -210,7 +219,7 @@ suite "SyncManager test suite":
var chain = createChain(Slot(0), Slot(2))
var queue = SyncQueue.init(SomeTPeer, Slot(0), Slot(2), 1'u64,
syncReceiver, 1)
syncReceiver, getFirstSlotAtFinalizedEpoch, 1)
let p1 = SomeTPeer()
let p2 = SomeTPeer()
let p3 = SomeTPeer()
@ -253,7 +262,7 @@ suite "SyncManager test suite":
var chain = createChain(Slot(5), Slot(11))
var queue = SyncQueue.init(SomeTPeer, Slot(5), Slot(11), 2'u64,
syncReceiver, 2)
syncReceiver, getFirstSlotAtFinalizedEpoch, 2)
let p1 = SomeTPeer()
let p2 = SomeTPeer()
let p3 = SomeTPeer()
@ -303,7 +312,7 @@ suite "SyncManager test suite":
var chain = createChain(Slot(5), Slot(18))
var queue = SyncQueue.init(SomeTPeer, Slot(5), Slot(18), 2'u64,
syncReceiver, 2)
syncReceiver, getFirstSlotAtFinalizedEpoch, 2)
let p1 = SomeTPeer()
let p2 = SomeTPeer()
let p3 = SomeTPeer()

View File

@ -12,7 +12,7 @@ import
../beacon_chain/ssz/merkleization,
state_transition, validator_pool],
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest,
helpers, validator, state_transition_block]
helpers, validator, signatures]
func makeFakeValidatorPrivKey(i: int): ValidatorPrivKey =
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
@ -44,7 +44,6 @@ func makeDeposit(i: int, flags: UpdateFlags): Deposit =
privkey = makeFakeValidatorPrivKey(i)
pubkey = privkey.toPubKey()
withdrawal_credentials = makeFakeHash(i)
domain = compute_domain(DOMAIN_DEPOSIT, Version(GENESIS_FORK_VERSION))
result = Deposit(
data: DepositData(
@ -55,8 +54,7 @@ func makeDeposit(i: int, flags: UpdateFlags): Deposit =
)
if skipBLSValidation notin flags:
let signing_root = compute_signing_root(result.getDepositMessage, domain)
result.data.signature = bls_sign(privkey, signing_root.data)
result.data.signature = get_deposit_signature(result.data, privkey)
proc makeInitialDeposits*(
n = SLOTS_PER_EPOCH, flags: UpdateFlags = {}): seq[Deposit] =
@ -69,8 +67,7 @@ proc makeInitialDeposits*(
# and ideally (but not yet) efficiently only once calculating a Merkle
# tree utilizing as much of the shared substructure as feasible, means
# attaching proofs all together, as a separate step.
if skipMerkleValidation notin flags:
attachMerkleProofs(result)
attachMerkleProofs(result)
func signBlock*(
fork: Fork, genesis_validators_root: Eth2Digest, blck: BeaconBlock,

126
tools/process_dashboard.nim Normal file
View File

@ -0,0 +1,126 @@
import json, parseopt, strutils
# usage: process_dashboard --in=local_dashboard.json --out=remote_dashboard.json --testnet=0
var
p = initOptParser()
inputFileName, outputFilename: string
testnet = 0
while true:
p.next()
case p.kind:
of cmdEnd:
break
of cmdShortOption, cmdLongOption:
if p.key == "in":
inputFileName = p.val
elif p.key == "out":
outputFileName = p.val
elif p.key == "testnet":
testnet = p.val.parseInt()
else:
echo "unsupported argument: ", p.key
of cmdArgument:
echo "unsupported argument: ", p.key
var
inputData = parseFile(inputFileName)
panels = inputData["panels"].copy()
outputData = inputData
#############
# variables #
#############
outputData["templating"]["list"] = parseJson("""
[
{
"allValue": null,
"current": {
"tags": [],
"text": "beacon-node-testnet""" & $testnet & """-1",
"value": "beacon-node-testnet""" & $testnet & """-1"
},
"datasource": "master-01.do-ams3.metrics.hq",
"definition": "label_values(process_virtual_memory_bytes{job=\"beacon-node-metrics\"},container)",
"hide": 0,
"includeAll": false,
"index": -1,
"label": null,
"multi": false,
"name": "container",
"options": [],
"query": "label_values(process_virtual_memory_bytes{job=\"beacon-node-metrics\"},container)",
"refresh": 1,
"regex": "/.*testnet""" & $testnet & """.*/",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"allValue": null,
"current": {
"tags": [],
"text": "master-01.aws-eu-central-1a.nimbus.test",
"value": "master-01.aws-eu-central-1a.nimbus.test"
},
"datasource": "master-01.do-ams3.metrics.hq",
"definition": "label_values(process_virtual_memory_bytes{job=\"beacon-node-metrics\"},instance)",
"hide": 0,
"includeAll": false,
"index": -1,
"label": null,
"multi": false,
"name": "instance",
"options": [],
"query": "label_values(process_virtual_memory_bytes{job=\"beacon-node-metrics\"},instance)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
]
""")
##########
# panels #
##########
outputData["panels"] = %* []
for panel in panels.mitems:
panel["title"] = %* replace(panel["title"].getStr(), "${node}", "${container}@${instance}")
panel["datasource"] = newJNull()
if panel.hasKey("targets"):
var targets = panel["targets"]
for target in targets.mitems:
# The remote Prometheus instance polls once per minute, so the
# minimum rate() interval is 2 minutes.
target["expr"] = %* multiReplace(target["expr"].getStr(),
("{node=\"${node}\"}", "{job=\"beacon-node-metrics\",container=\"${container}\",instance=\"${instance}\"}"),
("sum(beacon_attestations_sent_total)", "sum(beacon_attestations_sent_total{job=\"beacon-node-metrics\",container=~\"beacon-node-testnet" & $testnet & "-.\"})"),
("[2s]", "[2m]"),
("[4s]) * 3", "[2m]) * 120"))
outputData["panels"].add(panel)
########
# misc #
########
outputData["title"] = %* ("Nimbus testnet" & $testnet)
outputData["uid"] = %* (outputData["uid"].getStr()[0..^2] & $testnet)
# our annotations only work with a 1s resolution
var annotation = outputData["annotations"]["list"][0].copy()
annotation["datasource"] = %* "-- Grafana --"
outputData["annotations"]["list"] = %* [annotation]
writeFile(outputFilename, pretty(outputData))

2
vendor/news vendored

@ -1 +1 @@
Subproject commit 1caa232d63f4607f90d5a9eb428fbe772e010d21
Subproject commit 55d3214c57a880d31ac7542364820e07f8c8abe5

2
vendor/nim-eth vendored

@ -1 +1 @@
Subproject commit be9a87848e068d68aa8fa1a7bfa07d7c7271eba7
Subproject commit 4d0a7a46ba38947b8daecb1b5ae817c82c8e16c5

@ -1 +1 @@
Subproject commit 81c24860e2622a15e05c81d15e3d1cc02c460870
Subproject commit 5df69fc6961e58205189cd92ae2477769fa8c4c0

Some files were not shown because too many files have changed in this diff Show More