Merge pull request #567 from status-im/devel

Monday release
This commit is contained in:
zah 2019-11-19 07:50:31 +00:00 committed by GitHub
commit b643ee945e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 165 additions and 151 deletions

View File

@ -60,8 +60,8 @@ task test, "Run all tests":
buildBinary "all_fixtures_require_ssz", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG -d:const_preset=minimal" buildBinary "all_fixtures_require_ssz", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG -d:const_preset=minimal"
buildBinary "all_fixtures_require_ssz", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG -d:const_preset=mainnet" buildBinary "all_fixtures_require_ssz", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG -d:const_preset=mainnet"
# State sim; getting into 3rd epoch useful # State sim; getting into 4th epoch useful to trigger consensus checks
buildBinary "state_sim", "research/", "-r -d:release", "--validators=128 --slots=24" buildBinary "state_sim", "research/", "-r -d:release", "--validators=128 --slots=40"
task sync_lfs_tests, "Sync LFS json tests": task sync_lfs_tests, "Sync LFS json tests":
# Syncs the json test files (but not the EF yaml tests) # Syncs the json test files (but not the EF yaml tests)

View File

@ -1,9 +1,9 @@
import import
# Standard library # Standard library
os, net, tables, random, strutils, times, strformat, memfiles, os, net, tables, random, strutils, times, memfiles,
# Nimble packages # Nimble packages
stew/[objects, bitseqs, byteutils], stew/ranges/ptr_arith, stew/[objects, bitseqs], stew/ranges/ptr_arith,
chronos, chronicles, confutils, metrics, chronos, chronicles, confutils, metrics,
json_serialization/std/[options, sets], serialization/errors, json_serialization/std/[options, sets], serialization/errors,
eth/trie/db, eth/trie/backends/rocksdb_backend, eth/async_utils, eth/trie/db, eth/trie/backends/rocksdb_backend, eth/async_utils,

View File

@ -174,7 +174,7 @@ type
slots*: uint64 # number of slots that are suspected missing slots*: uint64 # number of slots that are suspected missing
tries*: int tries*: int
BlockRef* = ref object {.acyclic.} BlockRef* {.acyclic.} = ref object
## Node in object graph guaranteed to lead back to tail block, and to have ## Node in object graph guaranteed to lead back to tail block, and to have
## a corresponding entry in database. ## a corresponding entry in database.
## Block graph should form a tree - in particular, there are no cycles. ## Block graph should form a tree - in particular, there are no cycles.

View File

@ -197,7 +197,7 @@ else:
proc createEth2Node*(conf: BeaconNodeConf, proc createEth2Node*(conf: BeaconNodeConf,
bootstrapNodes: seq[BootstrapAddr]): Future[Eth2Node] {.async.} = bootstrapNodes: seq[BootstrapAddr]): Future[Eth2Node] {.async.} =
var var
(extIp, extTcpPort, extUdpPort) = setupNat(conf) (extIp, extTcpPort, _) = setupNat(conf)
hostAddress = tcpEndPoint(globalListeningAddr, Port conf.tcpPort) hostAddress = tcpEndPoint(globalListeningAddr, Port conf.tcpPort)
announcedAddresses = if extIp == globalListeningAddr: @[] announcedAddresses = if extIp == globalListeningAddr: @[]
else: @[tcpEndPoint(extIp, extTcpPort)] else: @[tcpEndPoint(extIp, extTcpPort)]

View File

@ -45,4 +45,4 @@ proc fetchAncestorBlocks*(requestManager: RequestManager,
var fetchComplete = false var fetchComplete = false
for peer in requestManager.network.randomPeers(ParallelRequests, BeaconSync): for peer in requestManager.network.randomPeers(ParallelRequests, BeaconSync):
traceAsyncErrors peer.fetchAncestorBlocksFromPeer(roots.rand(), responseHandler) traceAsyncErrors peer.fetchAncestorBlocksFromPeer(roots.sample(), responseHandler)

View File

@ -12,7 +12,7 @@ import
./crypto, ./datatypes, ./digest, ./helpers, ./validator ./crypto, ./datatypes, ./digest, ./helpers, ./validator
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.1/specs/core/0_beacon-chain.md#is_valid_merkle_branch # https://github.com/ethereum/eth2.0-specs/blob/v0.9.1/specs/core/0_beacon-chain.md#is_valid_merkle_branch
func is_valid_merkle_branch(leaf: Eth2Digest, branch: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool = func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool =
## Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and ## Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and
## ``branch``. ## ``branch``.
var var

View File

@ -97,6 +97,7 @@ type
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.1/specs/core/0_beacon-chain.md#indexedattestation # https://github.com/ethereum/eth2.0-specs/blob/v0.9.1/specs/core/0_beacon-chain.md#indexedattestation
IndexedAttestation* = object IndexedAttestation* = object
# TODO ValidatorIndex, but that doesn't serialize properly
attesting_indices*: List[uint64, MAX_VALIDATORS_PER_COMMITTEE] attesting_indices*: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
data*: AttestationData data*: AttestationData
signature*: ValidatorSig signature*: ValidatorSig
@ -205,25 +206,17 @@ type
## Needed to process attestations, older to newer ## Needed to process attestations, older to newer
state_roots*: array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] state_roots*: array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
historical_roots*: List[Eth2Digest, HISTORICAL_ROOTS_LIMIT]
historical_roots*: seq[Eth2Digest] ##\
## model List with HISTORICAL_ROOTS_LIMIT limit as seq
## TODO bound explicitly somewhere
# Eth1 # Eth1
eth1_data*: Eth1Data eth1_data*: Eth1Data
eth1_data_votes*: List[Eth1Data, SLOTS_PER_ETH1_VOTING_PERIOD]
eth1_data_votes*: seq[Eth1Data] ##\
## As with `hitorical_roots`, this is a `List`. TODO bound explicitly.
eth1_deposit_index*: uint64 eth1_deposit_index*: uint64
# Registry # Registry
# TODO List[] won't construct due to VALIDATOR_REGISTRY_LIMIT > high(int)
validators*: seq[Validator] validators*: seq[Validator]
balances*: seq[uint64] ##\ balances*: seq[uint64]
## Validator balances in Gwei!
## Also more `List`s which need to be bounded explicitly at
## VALIDATOR_REGISTRY_LIMIT
# Shuffling # Shuffling
randao_mixes*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest] randao_mixes*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest]
@ -233,8 +226,10 @@ type
## Per-epoch sums of slashed effective balances ## Per-epoch sums of slashed effective balances
# Attestations # Attestations
previous_epoch_attestations*: seq[PendingAttestation] previous_epoch_attestations*:
current_epoch_attestations*: seq[PendingAttestation] List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
current_epoch_attestations*:
List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
# Finality # Finality
justification_bits*: uint8 ##\ justification_bits*: uint8 ##\
@ -308,7 +303,7 @@ type
root*: Eth2Digest # hash_tree_root (not signing_root!) root*: Eth2Digest # hash_tree_root (not signing_root!)
StateCache* = object StateCache* = object
crosslink_committee_cache*: beacon_committee_cache*:
Table[tuple[a: int, b: Eth2Digest], seq[ValidatorIndex]] Table[tuple[a: int, b: Eth2Digest], seq[ValidatorIndex]]
active_validator_indices_cache*: active_validator_indices_cache*:
Table[Epoch, seq[ValidatorIndex]] Table[Epoch, seq[ValidatorIndex]]

View File

@ -232,10 +232,11 @@ proc process_justification_and_finalization*(
cat = "finalization" cat = "finalization"
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.1/specs/core/0_beacon-chain.md#rewards-and-penalties-1 # https://github.com/ethereum/eth2.0-specs/blob/v0.9.1/specs/core/0_beacon-chain.md#rewards-and-penalties-1
func get_base_reward(state: BeaconState, index: ValidatorIndex): Gwei = func get_base_reward(state: BeaconState, index: ValidatorIndex,
let total_balance: auto): Gwei =
total_balance = get_total_active_balance(state) # Spec function recalculates total_balance every time, which creates an
effective_balance = state.validators[index].effective_balance # O(n^2) situation.
let effective_balance = state.validators[index].effective_balance
effective_balance * BASE_REWARD_FACTOR div effective_balance * BASE_REWARD_FACTOR div
integer_squareroot(total_balance) div BASE_REWARDS_PER_EPOCH integer_squareroot(total_balance) div BASE_REWARDS_PER_EPOCH
@ -274,9 +275,10 @@ func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
for index in eligible_validator_indices: for index in eligible_validator_indices:
if index in unslashed_attesting_indices: if index in unslashed_attesting_indices:
rewards[index] += rewards[index] +=
get_base_reward(state, index) * attesting_balance div total_balance get_base_reward(state, index, total_balance) * attesting_balance div
total_balance
else: else:
penalties[index] += get_base_reward(state, index) penalties[index] += get_base_reward(state, index, total_balance)
# Proposer and inclusion delay micro-rewards # Proposer and inclusion delay micro-rewards
## This depends on matching_source_attestations being an indexable seq, not a ## This depends on matching_source_attestations being an indexable seq, not a
@ -309,11 +311,12 @@ func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
if a.inclusion_delay < attestation.inclusion_delay: if a.inclusion_delay < attestation.inclusion_delay:
attestation = a attestation = a
let proposer_reward = let
(get_base_reward(state, index) div PROPOSER_REWARD_QUOTIENT).Gwei base_reward = get_base_reward(state, index, total_balance)
proposer_reward = (base_reward div PROPOSER_REWARD_QUOTIENT).Gwei
rewards[attestation.proposer_index.int] += proposer_reward rewards[attestation.proposer_index.int] += proposer_reward
let max_attester_reward = get_base_reward(state, index) - proposer_reward let max_attester_reward = base_reward - proposer_reward
rewards[index] += max_attester_reward div attestation.inclusion_delay rewards[index] += max_attester_reward div attestation.inclusion_delay
@ -325,7 +328,7 @@ func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
state, matching_target_attestations, stateCache) state, matching_target_attestations, stateCache)
for index in eligible_validator_indices: for index in eligible_validator_indices:
penalties[index] += penalties[index] +=
BASE_REWARDS_PER_EPOCH.uint64 * get_base_reward(state, index) BASE_REWARDS_PER_EPOCH.uint64 * get_base_reward(state, index, total_balance)
if index notin matching_target_attesting_indices: if index notin matching_target_attesting_indices:
penalties[index] += penalties[index] +=
state.validators[index].effective_balance * state.validators[index].effective_balance *
@ -426,9 +429,7 @@ proc process_epoch*(state: var BeaconState) =
## Caching here for get_beacon_committee(...) can break otherwise, since ## Caching here for get_beacon_committee(...) can break otherwise, since
## get_active_validator_indices(...) usually changes. ## get_active_validator_indices(...) usually changes.
# TODO is this cache still necessary/useful? presumably not, but can't remove clear(per_epoch_cache.beacon_committee_cache)
# quite yet
clear(per_epoch_cache.crosslink_committee_cache)
# @process_reveal_deadlines # @process_reveal_deadlines
# @process_challenge_deadlines # @process_challenge_deadlines

View File

@ -98,8 +98,8 @@ func compute_committee(indices: seq[ValidatorIndex], seed: Eth2Digest,
endIdx = (len(indices).uint64 * (index + 1)) div count endIdx = (len(indices).uint64 * (index + 1)) div count
key = (indices.len, seed) key = (indices.len, seed)
if key notin stateCache.crosslink_committee_cache: if key notin stateCache.beacon_committee_cache:
stateCache.crosslink_committee_cache[key] = stateCache.beacon_committee_cache[key] =
get_shuffled_seq(seed, len(indices).uint64) get_shuffled_seq(seed, len(indices).uint64)
# These assertions from compute_shuffled_index(...) # These assertions from compute_shuffled_index(...)
@ -110,15 +110,13 @@ func compute_committee(indices: seq[ValidatorIndex], seed: Eth2Digest,
# In spec, this calls get_shuffled_index() every time, but that's wasteful # In spec, this calls get_shuffled_index() every time, but that's wasteful
mapIt( mapIt(
start.int .. (endIdx.int-1), start.int .. (endIdx.int-1),
indices[stateCache.crosslink_committee_cache[key][it]]) indices[stateCache.beacon_committee_cache[key][it]])
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.1/specs/core/0_beacon-chain.md#get_beacon_committee # https://github.com/ethereum/eth2.0-specs/blob/v0.9.1/specs/core/0_beacon-chain.md#get_beacon_committee
func get_beacon_committee*(state: BeaconState, slot: Slot, index: uint64, cache: var StateCache): seq[ValidatorIndex] = func get_beacon_committee*(state: BeaconState, slot: Slot, index: uint64, cache: var StateCache): seq[ValidatorIndex] =
# Return the beacon committee at ``slot`` for ``index``. # Return the beacon committee at ``slot`` for ``index``.
let let
epoch = compute_epoch_at_slot(slot) epoch = compute_epoch_at_slot(slot)
# TODO use state caching for this or not?
committees_per_slot = get_committee_count_at_slot(state, slot)
## This is a somewhat more fragile, but high-ROI, caching setup -- ## This is a somewhat more fragile, but high-ROI, caching setup --
## get_active_validator_indices() is slow to run in a loop and only ## get_active_validator_indices() is slow to run in a loop and only
@ -127,22 +125,22 @@ func get_beacon_committee*(state: BeaconState, slot: Slot, index: uint64, cache:
cache.active_validator_indices_cache[epoch] = cache.active_validator_indices_cache[epoch] =
get_active_validator_indices(state, epoch) get_active_validator_indices(state, epoch)
# TODO remove or replace this... # Constant throughout an epoch
#if epoch notin cache.committee_count_cache: if epoch notin cache.committee_count_cache:
# cache.committee_count_cache[epoch] = get_committee_count(state, epoch) cache.committee_count_cache[epoch] =
get_committee_count_at_slot(state, slot)
# TODO profiling & make sure caches populated
compute_committee( compute_committee(
cache.active_validator_indices_cache[epoch], cache.active_validator_indices_cache[epoch],
get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
(slot mod SLOTS_PER_EPOCH) * committees_per_slot + index, (slot mod SLOTS_PER_EPOCH) * cache.committee_count_cache[epoch] + index,
committees_per_slot * SLOTS_PER_EPOCH, cache.committee_count_cache[epoch] * SLOTS_PER_EPOCH,
cache cache
) )
# Not from spec # Not from spec
func get_empty_per_epoch_cache*(): StateCache = func get_empty_per_epoch_cache*(): StateCache =
result.crosslink_committee_cache = result.beacon_committee_cache =
initTable[tuple[a: int, b: Eth2Digest], seq[ValidatorIndex]]() initTable[tuple[a: int, b: Eth2Digest], seq[ValidatorIndex]]()
result.active_validator_indices_cache = result.active_validator_indices_cache =
initTable[Epoch, seq[ValidatorIndex]]() initTable[Epoch, seq[ValidatorIndex]]()

View File

@ -324,9 +324,8 @@ func getZeroHashWithoutSideEffect(idx: int): Eth2Digest =
# TODO this is a work-around for the somewhat broken side # TODO this is a work-around for the somewhat broken side
# effects analysis of Nim - reading from global let variables # effects analysis of Nim - reading from global let variables
# is considered a side-effect. # is considered a side-effect.
# Nim 0.19 doesnt have the `{.noSideEffect.}:` override, so {.noSideEffect.}:
# we should revisit this in Nim 0.20.2. zeroHashes[idx]
{.emit: "`result` = `zeroHashes`[`idx`];".}
func addChunk*(merkelizer: SszChunksMerkelizer, data: openarray[byte]) = func addChunk*(merkelizer: SszChunksMerkelizer, data: openarray[byte]) =
doAssert data.len > 0 and data.len <= bytesPerChunk doAssert data.len > 0 and data.len <= bytesPerChunk

View File

@ -52,7 +52,7 @@ func toSlot*(t: BeaconTime): tuple[afterGenesis: bool, slot: Slot] =
(false, Slot(uint64(-ti) div SECONDS_PER_SLOT)) (false, Slot(uint64(-ti) div SECONDS_PER_SLOT))
func toBeaconTime*(c: BeaconClock, t: Time): BeaconTime = func toBeaconTime*(c: BeaconClock, t: Time): BeaconTime =
BeaconTime(times.seconds(t - c.genesis)) BeaconTime(times.inSeconds(t - c.genesis))
func toSlot*(c: BeaconClock, t: Time): tuple[afterGenesis: bool, slot: Slot] = func toSlot*(c: BeaconClock, t: Time): tuple[afterGenesis: bool, slot: Slot] =
c.toBeaconTime(t).toSlot() c.toBeaconTime(t).toSlot()

View File

@ -63,7 +63,7 @@ proc sendDeposits*(
for i, dp in deposits: for i, dp in deposits:
web3.defaultAccount = eth1Addresses[i] web3.defaultAccount = eth1Addresses[i]
let depositContract = web3.contractSender(DepositContract, contractAddress) let depositContract = web3.contractSender(DepositContract, contractAddress)
let tx = await depositContract.deposit( discard await depositContract.deposit(
Bytes48(dp.data.pubKey.getBytes()), Bytes48(dp.data.pubKey.getBytes()),
Bytes32(dp.data.withdrawal_credentials.data), Bytes32(dp.data.withdrawal_credentials.data),
Bytes96(dp.data.signature.getBytes()), Bytes96(dp.data.signature.getBytes()),

View File

@ -1,66 +1,52 @@
FROM statusteam/nim-base AS build FROM debian:bullseye-slim AS build
SHELL ["/bin/bash", "-c"]
RUN apt-get -qq update \ RUN apt-get -qq update \
&& apt-get -qq -y install build-essential make wget 2>/dev/null >/dev/null && apt-get -qq -y install build-essential make wget librocksdb-dev libpcre3-dev golang-go git &>/dev/null \
&& apt-get -qq clean
RUN export GO_TAR_GZ=go1.12.6.linux-amd64.tar.gz \ # let Docker cache this between Git revision and testnet version changes
&& cd /tmp/ \ RUN cd /root \
&& wget --progress=dot:giga https://dl.google.com/go/$GO_TAR_GZ \
&& tar -xf $GO_TAR_GZ \
&& mv go /usr/local \
&& rm $GO_TAR_GZ
ARG GIT_REVISION
RUN export GOROOT=/usr/local/go \
&& export PATH=$GOROOT/bin:$PATH \
&& git clone https://github.com/status-im/nim-beacon-chain.git \ && git clone https://github.com/status-im/nim-beacon-chain.git \
&& cd nim-beacon-chain \ && cd nim-beacon-chain \
&& git reset --hard ${GIT_REVISION} \ && { make &>/dev/null || true; } \
&& { make 2>/dev/null >/dev/null || true; } \ && make -j$(nproc) update \
&& make -j8 update \ && make deps
&& make deps \
&& cp vendor/go/bin/p2pd /usr/bin/p2pd \
&& cp docker/run_in_docker.sh /usr/bin/run_beacon_node.sh
ARG NETWORK # Please note that the commands above have the goal of caching the compilation
# of Nim and p2pd, but don't depend on the current git revision. This means
# that the cache can become outdated over time and you'll start seeing Nim
# being compiled on every run. If this happens, just prune your docker cache
# to get a fresh up-to-date version of Nim and p2pd.
ARG GIT_REVISION
ARG NETWORK_NIM_FLAGS ARG NETWORK_NIM_FLAGS
RUN cd nim-beacon-chain \ RUN cd /root/nim-beacon-chain \
&& ./env.sh nim \ && git fetch \
-o:/usr/bin/beacon_node \ && git reset --hard ${GIT_REVISION} \
-d:release \ && make -j$(nproc) update \
-d:insecure \ && make LOG_LEVEL=DEBUG NIMFLAGS="-d:release -d:insecure -d:testnet_servers_image ${NETWORK_NIM_FLAGS}" beacon_node
--debugger:native \
--debugInfo \
--verbosity:0 \
--hints:off \
--warnings:off \
${NETWORK_NIM_FLAGS} \
-d:"chronicles_log_level=DEBUG" \
-d:"testnet_servers_image" \
c beacon_chain/beacon_node.nim
# --------------------------------- # # --------------------------------- #
# Starting new image to reduce size # # Starting new image to reduce size #
# --------------------------------- # # --------------------------------- #
FROM debian:9-slim FROM debian:bullseye-slim
SHELL ["/bin/bash", "-c"]
RUN apt-get -qq update \ RUN apt-get -qq update \
&& apt-get -qq -y install librocksdb-dev psmisc 2>/dev/null >/dev/null \ && apt-get -qq -y install librocksdb-dev libpcre3 psmisc &>/dev/null \
&& apt-get -qq clean \ && apt-get -qq clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY --from=build /usr/bin/beacon_node /usr/bin/beacon_node COPY run_in_docker.sh /usr/bin/run_beacon_node.sh
COPY --from=build /usr/bin/run_beacon_node.sh /usr/bin/run_beacon_node.sh COPY --from=build /root/nim-beacon-chain/build/beacon_node /usr/bin/beacon_node
COPY --from=build /usr/bin/p2pd /usr/bin/p2pd COPY --from=build /root/nim-beacon-chain/vendor/go/bin/p2pd /usr/bin/p2pd
MAINTAINER Zahary Karadjov <zahary@status.im> MAINTAINER Zahary Karadjov <zahary@status.im>
LABEL description="Nimbus installation that can act as an ETH2 network bootstrap node." LABEL description="Nimbus installation that can act as an ETH2 network bootstrap node."
ARG GIT_REVISION
RUN echo Built from Git revision: ${GIT_REVISION}
# TODO: This custom entry script is necessary only because we must clean up # TODO: This custom entry script is necessary only because we must clean up
# temporary files left by previous executions of the Go daeamon. # temporary files left by previous executions of the Go daeamon.
# We should be able to remove it once we have a native LibP2P impl. # We should be able to remove it once we have a native LibP2P impl.

View File

@ -1,3 +1,5 @@
SHELL := bash # the shell used internally by "make"
# These default settings can be overriden by exporting env variables # These default settings can be overriden by exporting env variables
GIT_REVISION ?= $(shell git rev-parse HEAD) GIT_REVISION ?= $(shell git rev-parse HEAD)
@ -7,16 +9,23 @@ NETWORK_NIM_FLAGS ?= $(shell ../scripts/load-testnet-nim-flags.sh $(NETWORK))
IMAGE_TAG ?= $(NETWORK) IMAGE_TAG ?= $(NETWORK)
IMAGE_NAME ?= statusteam/nimbus_beacon_node:$(IMAGE_TAG) IMAGE_NAME ?= statusteam/nimbus_beacon_node:$(IMAGE_TAG)
CURRENT_BRANCH = $(shell git rev-parse --abbrev-ref HEAD)
COMPUTER_SAYS_NO = { echo "I'm sorry, Dave. I'm afraid I can't do that."; exit 1; }
build: build:
@ DOCKER_BUILDKIT=1 \
docker build \ docker build \
--build-arg="GIT_REVISION=$(GIT_REVISION)" \ --build-arg="GIT_REVISION=$(GIT_REVISION)" \
--build-arg="NETWORK=$(NETWORK)" \
--build-arg="NETWORK_NIM_FLAGS=$(NETWORK_NIM_FLAGS)" \ --build-arg="NETWORK_NIM_FLAGS=$(NETWORK_NIM_FLAGS)" \
-t $(IMAGE_NAME) . -t $(IMAGE_NAME) \
--progress=plain \
.
push: build push: build
docker push $(IMAGE_NAME) +@ $(MAKE) push-last
push-last: push-last:
@ [[ "$(CURRENT_BRANCH)" != "master" && "$(NETWORK)" == "testnet0" ]] && $(COMPUTER_SAYS_NO) || true
@ [[ "$(CURRENT_BRANCH)" != "devel" && "$(NETWORK)" == "testnet1" ]] && $(COMPUTER_SAYS_NO) || true
docker push $(IMAGE_NAME) docker push $(IMAGE_NAME)

View File

@ -37,8 +37,24 @@ proc writeJson*(prefix, slot, v: auto) =
let fileName = fmt"{prefix:04}-{shortLog(slot):08}.json" let fileName = fmt"{prefix:04}-{shortLog(slot):08}.json"
Json.saveFile(fileName, v, pretty = true) Json.saveFile(fileName, v, pretty = true)
cli do(slots = 448'u, func verifyConsensus(state: BeaconState, attesterRatio: auto) =
validators = SLOTS_PER_EPOCH * 9, # One per shard is minimum if attesterRatio < 0.63:
doAssert state.current_justified_checkpoint.epoch == 0
doAssert state.finalized_checkpoint.epoch == 0
# Quorum is 2/3 of validators, and at low numbers, quantization effects
# can dominate, so allow for play above/below attesterRatio of 2/3.
if attesterRatio < 0.72:
return
let current_epoch = get_current_epoch(state)
if current_epoch >= 3:
doAssert state.current_justified_checkpoint.epoch + 1 >= current_epoch
if current_epoch >= 4:
doAssert state.finalized_checkpoint.epoch + 2 >= current_epoch
cli do(slots = SLOTS_PER_EPOCH * 6,
validators = SLOTS_PER_EPOCH * 11, # One per shard is minimum
json_interval = SLOTS_PER_EPOCH, json_interval = SLOTS_PER_EPOCH,
prefix = 0, prefix = 0,
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.75, attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.75,
@ -69,6 +85,7 @@ cli do(slots = 448'u,
for i in 0..<slots: for i in 0..<slots:
maybeWrite() maybeWrite()
verifyConsensus(state, attesterRatio)
let let
attestations_idx = state.slot attestations_idx = state.slot
@ -113,11 +130,13 @@ cli do(slots = 448'u,
for v in scas: for v in scas:
if (rand(r, high(int)).float * attesterRatio).int <= high(int): if (rand(r, high(int)).float * attesterRatio).int <= high(int):
if first: if first:
attestation = makeAttestation(state, latest_block_root, v, flags) attestation =
makeAttestation(state, latest_block_root, v, cache, flags)
first = false first = false
else: else:
attestation.combine( attestation.combine(
makeAttestation(state, latest_block_root, v, flags), flags) makeAttestation(state, latest_block_root, v, cache, flags),
flags)
if not first: if not first:
# add the attestation if any of the validators attested, as given # add the attestation if any of the validators attested, as given

View File

@ -18,7 +18,6 @@ add_var () {
add_var CONST_PRESET add_var CONST_PRESET
add_var NETWORK_TYPE add_var NETWORK_TYPE
add_var SLOTS_PER_EPOCH add_var SLOTS_PER_EPOCH
add_var SLOTS_PER_EPOCH
add_var MAX_COMMITTEES_PER_SLOT add_var MAX_COMMITTEES_PER_SLOT
echo $NIM_FLAGS echo $NIM_FLAGS

View File

@ -8,6 +8,10 @@ export NETWORK=$1
export NETWORK_NIM_FLAGS=$(./load-testnet-nim-flags.sh $NETWORK) export NETWORK_NIM_FLAGS=$(./load-testnet-nim-flags.sh $NETWORK)
export GIT_REVISION=$(git rev-parse HEAD) export GIT_REVISION=$(git rev-parse HEAD)
set -a
source $NETWORK.env
set +a
cd .. cd ..
if [ -f .env ]; then if [ -f .env ]; then
@ -20,10 +24,11 @@ echo Execution plan:
echo "Testnet name : $NETWORK" echo "Testnet name : $NETWORK"
echo "Bootstrap node hostname : ${BOOTSTRAP_HOST:="master-01.do-ams3.nimbus.test.statusim.net"}" echo "Bootstrap node hostname : ${BOOTSTRAP_HOST:="master-01.do-ams3.nimbus.test.statusim.net"}"
echo "Bootstrap node ip : ${BOOTSTRAP_IP:="$(dig +short $BOOTSTRAP_HOST)"}" echo "Bootstrap node ip : ${BOOTSTRAP_IP:="$(dig +short $BOOTSTRAP_HOST)"}"
echo "Bootstrap node port : ${BOOTSTRAP_PORT:=9000}"
echo "Reset testnet at end : ${PUBLISH_TESTNET_RESETS:="1"}" echo "Reset testnet at end : ${PUBLISH_TESTNET_RESETS:="1"}"
echo "Testnet metadata repo : ${ETH2_TESTNETS_GIT_URL:="git@github.com:${ETH2_TESTNETS_ORG:=eth2-clients}/eth2-testnets"}" echo "Testnet metadata repo : ${ETH2_TESTNETS_GIT_URL:="git@github.com:${ETH2_TESTNETS_ORG:=eth2-clients}/eth2-testnets"}"
echo "Testnet metadata dir : ${ETH2_TESTNETS:="build/eth2-testnets"}" echo "Testnet metadata dir : ${ETH2_TESTNETS:="build/eth2-testnets"}"
echo "Beacon node data dir : ${DATA_DIR:="build/testnet-reset-data"}" echo "Beacon node data dir : ${DATA_DIR:="build/testnet-reset-data/$NETWORK"}"
echo "Nim build flags : $NETWORK_NIM_FLAGS" echo "Nim build flags : $NETWORK_NIM_FLAGS"
while true; do while true; do

View File

@ -44,8 +44,7 @@ proc merkleTreeFromLeaves*(
values: openarray[Eth2Digest], values: openarray[Eth2Digest],
Depth: static[int] = DEPOSIT_CONTRACT_TREE_DEPTH Depth: static[int] = DEPOSIT_CONTRACT_TREE_DEPTH
): SparseMerkleTree[Depth] = ): SparseMerkleTree[Depth] =
## Depth should be the same as ## Depth should be the same as is_valid_merkle_branch
## verify_merkle_branch / is_valid_merkle_branch
result.nnznodes[0] = @values result.nnznodes[0] = @values
@ -150,7 +149,7 @@ when isMainModule: # Checks
let proof = getMerkleProof(tree, index) let proof = getMerkleProof(tree, index)
echo "Proof: ", proof echo "Proof: ", proof
doAssert verify_merkle_branch( doAssert is_valid_merkle_branch(
a, get_merkle_proof(tree, index = index), a, get_merkle_proof(tree, index = index),
depth = `depth`, depth = `depth`,
index = index.uint64, index = index.uint64,
@ -163,7 +162,7 @@ when isMainModule: # Checks
let proof = getMerkleProof(tree, index) let proof = getMerkleProof(tree, index)
# echo "Proof: ", proof # echo "Proof: ", proof
doAssert verify_merkle_branch( doAssert is_valid_merkle_branch(
b, get_merkle_proof(tree, index = index), b, get_merkle_proof(tree, index = index),
depth = `depth`, depth = `depth`,
index = index.uint64, index = index.uint64,
@ -176,7 +175,7 @@ when isMainModule: # Checks
let proof = getMerkleProof(tree, index) let proof = getMerkleProof(tree, index)
# echo "Proof: ", proof # echo "Proof: ", proof
doAssert verify_merkle_branch( doAssert is_valid_merkle_branch(
c, get_merkle_proof(tree, index = index), c, get_merkle_proof(tree, index = index),
depth = `depth`, depth = `depth`,
index = index.uint64, index = index.uint64,

View File

@ -100,9 +100,6 @@ proc mockAttestationImpl(
var cache = get_empty_per_epoch_cache() var cache = get_empty_per_epoch_cache()
let let
committees_per_slot = get_committee_count_at_slot(
state, slot)
beacon_committee = get_beacon_committee( beacon_committee = get_beacon_committee(
state, state,
result.data.slot, result.data.slot,

View File

@ -14,7 +14,7 @@ import
# 0.19.6 shims # 0.19.6 shims
stew/objects, # import default stew/objects, # import default
# Specs # Specs
../../beacon_chain/spec/[datatypes, crypto, helpers, digest], ../../beacon_chain/spec/[datatypes, crypto, helpers, digest, beaconstate],
# Internals # Internals
../../beacon_chain/[ssz, extras], ../../beacon_chain/[ssz, extras],
# Mocking procs # Mocking procs
@ -141,15 +141,17 @@ template mockGenesisDepositsImpl(
# 4th loop - append proof # 4th loop - append proof
for valIdx in 0 ..< validatorCount.int: for valIdx in 0 ..< validatorCount.int:
when false: # TODO # TODO ensure genesis & deposit process tests suffice to catch whether
# changes here break things; ensure that this matches the merkle proof
# sequence is_valid_merkle_branch(...) now looks for
result[valIdx].proof[0..31] = tree.getMerkleProof(valIdx) result[valIdx].proof[0..31] = tree.getMerkleProof(valIdx)
result[valIdx].proof[32] = int_to_bytes32(index + 1) result[valIdx].proof[32] =
doAssert: Eth2Digest(data: int_to_bytes32((valIdx + 1).uint64))
verify_merkle_branch( doAssert is_valid_merkle_branch(
depositsDataHash[valIdx], depositsDataHash[valIdx],
result[valIdx].proof, result[valIdx].proof,
DEPOSIT_CONTRACT_TREE_DEPTH, DEPOSIT_CONTRACT_TREE_DEPTH,
valIdx, valIdx.uint64,
root root
) )
@ -198,6 +200,7 @@ proc mockUpdateStateForNewDeposit*(
flags: UpdateFlags flags: UpdateFlags
): Deposit = ): Deposit =
# TODO withdrawal credentials # TODO withdrawal credentials
mockDepositData( mockDepositData(
@ -209,11 +212,11 @@ proc mockUpdateStateForNewDeposit*(
flags flags
) )
let tree = merkleTreeFromLeaves([hash_tree_root(result.data)])
when false: # TODO when false: # TODO
let tree = merkleTreeFromLeaves([hash_tree_root(result.data)])
result[valIdx].proof[0..31] = tree.getMerkleProof(0) result[valIdx].proof[0..31] = tree.getMerkleProof(0)
result[valIdx].proof[32] = int_to_bytes32(0 + 1) result[valIdx].proof[32] = int_to_bytes32(0 + 1)
# doAssert: verify_merkle_branch(...) # doAssert is_valid_merkle_branch(...)
# TODO: this logic from the eth2.0-specs test suite seems strange # TODO: this logic from the eth2.0-specs test suite seems strange
# but confirmed by running it # but confirmed by running it

View File

@ -62,7 +62,7 @@ suite "Official - Sanity - Blocks " & preset():
expect(AssertionError): expect(AssertionError):
# assert in process_slots. This should not be triggered # assert in process_slots. This should not be triggered
# for blocks from block_pool/network # for blocks from block_pool/network
let done = state_transition(stateRef[], blck, flags = {skipValidation}) discard state_transition(stateRef[], blck, flags = {skipValidation})
runValidTest("Same slot block transition", same_slot_block_transition, 1) runValidTest("Same slot block transition", same_slot_block_transition, 1)
runValidTest("Empty block transition", empty_block_transition, 1) runValidTest("Empty block transition", empty_block_transition, 1)
@ -77,7 +77,7 @@ suite "Official - Sanity - Blocks " & preset():
let blck = parseTest(testDir/"blocks_0.ssz", SSZ, BeaconBlock) let blck = parseTest(testDir/"blocks_0.ssz", SSZ, BeaconBlock)
expect(AssertionError): expect(AssertionError):
let done = state_transition(stateRef[], blck, flags = {skipValidation}) discard state_transition(stateRef[], blck, flags = {skipValidation})
runValidTest("Skipped Slots", skipped_slots, 1) runValidTest("Skipped Slots", skipped_slots, 1)
runValidTest("Empty epoch transition", empty_epoch_transition, 1) runValidTest("Empty epoch transition", empty_epoch_transition, 1)

View File

@ -45,7 +45,7 @@ suite "Attestation pool processing" & preset():
beacon_committee = get_beacon_committee(state.data.data, beacon_committee = get_beacon_committee(state.data.data,
state.data.data.slot, 0, cache) state.data.data.slot, 0, cache)
attestation = makeAttestation( attestation = makeAttestation(
state.data.data, state.blck.root, beacon_committee[0]) state.data.data, state.blck.root, beacon_committee[0], cache)
pool.add(state.data.data, state.blck, attestation) pool.add(state.data.data, state.blck, attestation)
@ -65,7 +65,7 @@ suite "Attestation pool processing" & preset():
bc0 = get_beacon_committee(state.data.data, bc0 = get_beacon_committee(state.data.data,
state.data.data.slot, 0, cache) state.data.data.slot, 0, cache)
attestation0 = makeAttestation( attestation0 = makeAttestation(
state.data.data, state.blck.root, bc0[0]) state.data.data, state.blck.root, bc0[0], cache)
process_slots(state.data, state.data.data.slot + 1) process_slots(state.data, state.data.data.slot + 1)
@ -73,7 +73,7 @@ suite "Attestation pool processing" & preset():
bc1 = get_beacon_committee(state.data.data, bc1 = get_beacon_committee(state.data.data,
state.data.data.slot, 0, cache) state.data.data.slot, 0, cache)
attestation1 = makeAttestation( attestation1 = makeAttestation(
state.data.data, state.blck.root, bc1[0]) state.data.data, state.blck.root, bc1[0], cache)
# test reverse order # test reverse order
pool.add(state.data.data, state.blck, attestation1) pool.add(state.data.data, state.blck, attestation1)
@ -95,9 +95,9 @@ suite "Attestation pool processing" & preset():
bc0 = get_beacon_committee(state.data.data, bc0 = get_beacon_committee(state.data.data,
state.data.data.slot, 0, cache) state.data.data.slot, 0, cache)
attestation0 = makeAttestation( attestation0 = makeAttestation(
state.data.data, state.blck.root, bc0[0]) state.data.data, state.blck.root, bc0[0], cache)
attestation1 = makeAttestation( attestation1 = makeAttestation(
state.data.data, state.blck.root, bc0[1]) state.data.data, state.blck.root, bc0[1], cache)
pool.add(state.data.data, state.blck, attestation0) pool.add(state.data.data, state.blck, attestation0)
pool.add(state.data.data, state.blck, attestation1) pool.add(state.data.data, state.blck, attestation1)
@ -119,9 +119,9 @@ suite "Attestation pool processing" & preset():
bc0 = get_beacon_committee(state.data.data, bc0 = get_beacon_committee(state.data.data,
state.data.data.slot, 0, cache) state.data.data.slot, 0, cache)
attestation0 = makeAttestation( attestation0 = makeAttestation(
state.data.data, state.blck.root, bc0[0]) state.data.data, state.blck.root, bc0[0], cache)
attestation1 = makeAttestation( attestation1 = makeAttestation(
state.data.data, state.blck.root, bc0[1]) state.data.data, state.blck.root, bc0[1], cache)
attestation0.combine(attestation1, {skipValidation}) attestation0.combine(attestation1, {skipValidation})
@ -144,9 +144,9 @@ suite "Attestation pool processing" & preset():
bc0 = get_beacon_committee(state.data.data, bc0 = get_beacon_committee(state.data.data,
state.data.data.slot, 0, cache) state.data.data.slot, 0, cache)
attestation0 = makeAttestation( attestation0 = makeAttestation(
state.data.data, state.blck.root, bc0[0]) state.data.data, state.blck.root, bc0[0], cache)
attestation1 = makeAttestation( attestation1 = makeAttestation(
state.data.data, state.blck.root, bc0[1]) state.data.data, state.blck.root, bc0[1], cache)
attestation0.combine(attestation1, {skipValidation}) attestation0.combine(attestation1, {skipValidation})

View File

@ -7,9 +7,10 @@
{.used.} {.used.}
import import unittest
unittest,
../beacon_chain/beacon_node when false:
import ../beacon_chain/beacon_node
suite "Beacon node": suite "Beacon node":
# Compile test # Compile test

View File

@ -85,10 +85,10 @@ suite "Block processing" & preset():
let let
# Create an attestation for slot 1 signed by the only attester we have! # Create an attestation for slot 1 signed by the only attester we have!
crosslink_committee = beacon_committee =
get_beacon_committee(state, state.slot, 0, cache) get_beacon_committee(state, state.slot, 0, cache)
attestation = makeAttestation( attestation = makeAttestation(
state, previous_block_root, crosslink_committee[0]) state, previous_block_root, beacon_committee[0], cache)
# Some time needs to pass before attestations are included - this is # Some time needs to pass before attestations are included - this is
# to let the attestation propagate properly to interested participants # to let the attestation propagate properly to interested participants

View File

@ -7,9 +7,10 @@
{.used.} {.used.}
import import unittest
unittest,
../beacon_chain/sync_protocol when false:
import ../beacon_chain/sync_protocol
suite "Sync protocol": suite "Sync protocol":
# Compile test # Compile test

View File

@ -158,9 +158,9 @@ proc makeBlock*(
addBlock(next_state, previous_block_root, body) addBlock(next_state, previous_block_root, body)
proc find_beacon_committee( proc find_beacon_committee(
state: BeaconState, validator_index: ValidatorIndex): auto = state: BeaconState, validator_index: ValidatorIndex,
cache: var StateCache): auto =
let epoch = compute_epoch_at_slot(state.slot) let epoch = compute_epoch_at_slot(state.slot)
var cache = get_empty_per_epoch_cache()
for epoch_committee_index in 0'u64 ..< get_committee_count_at_slot( for epoch_committee_index in 0'u64 ..< get_committee_count_at_slot(
state, epoch.compute_start_slot_at_epoch) * SLOTS_PER_EPOCH: state, epoch.compute_start_slot_at_epoch) * SLOTS_PER_EPOCH:
let let
@ -174,14 +174,16 @@ proc find_beacon_committee(
proc makeAttestation*( proc makeAttestation*(
state: BeaconState, beacon_block_root: Eth2Digest, state: BeaconState, beacon_block_root: Eth2Digest,
validator_index: ValidatorIndex, flags: UpdateFlags = {}): Attestation = validator_index: ValidatorIndex, cache: var StateCache,
flags: UpdateFlags = {}): Attestation =
let let
(committee, slot, index) = find_beacon_committee(state, validator_index) (committee, slot, index) =
find_beacon_committee(state, validator_index, cache)
validator = state.validators[validator_index] validator = state.validators[validator_index]
sac_index = committee.find(validator_index) sac_index = committee.find(validator_index)
data = makeAttestationData(state, slot, index, beacon_block_root) data = makeAttestationData(state, slot, index, beacon_block_root)
doAssert sac_index != -1, "find_shard_committee should guarantee this" doAssert sac_index != -1, "find_beacon_committee should guarantee this"
var aggregation_bits = CommitteeValidatorsBits.init(committee.len) var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
aggregation_bits.raiseBit sac_index aggregation_bits.raiseBit sac_index

@ -1 +1 @@
Subproject commit 798b1019105a80fe0f1b49e9f95a4bbab4c29e70 Subproject commit 28e88efada2ae62cecf51b29ea52c69d842db051