Merge pull request #567 from status-im/devel

Monday release
This commit is contained in:
zah 2019-11-19 07:50:31 +00:00 committed by GitHub
commit b643ee945e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 165 additions and 151 deletions

View File

@ -60,8 +60,8 @@ task test, "Run all tests":
buildBinary "all_fixtures_require_ssz", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG -d:const_preset=minimal"
buildBinary "all_fixtures_require_ssz", "tests/official/", "-r -d:release -d:chronicles_log_level=DEBUG -d:const_preset=mainnet"
# State sim; getting into 3rd epoch useful
buildBinary "state_sim", "research/", "-r -d:release", "--validators=128 --slots=24"
# State sim; getting into 4th epoch useful to trigger consensus checks
buildBinary "state_sim", "research/", "-r -d:release", "--validators=128 --slots=40"
task sync_lfs_tests, "Sync LFS json tests":
# Syncs the json test files (but not the EF yaml tests)

View File

@ -1,9 +1,9 @@
import
# Standard library
os, net, tables, random, strutils, times, strformat, memfiles,
os, net, tables, random, strutils, times, memfiles,
# Nimble packages
stew/[objects, bitseqs, byteutils], stew/ranges/ptr_arith,
stew/[objects, bitseqs], stew/ranges/ptr_arith,
chronos, chronicles, confutils, metrics,
json_serialization/std/[options, sets], serialization/errors,
eth/trie/db, eth/trie/backends/rocksdb_backend, eth/async_utils,

View File

@ -174,7 +174,7 @@ type
slots*: uint64 # number of slots that are suspected missing
tries*: int
BlockRef* = ref object {.acyclic.}
BlockRef* {.acyclic.} = ref object
## Node in object graph guaranteed to lead back to tail block, and to have
## a corresponding entry in database.
## Block graph should form a tree - in particular, there are no cycles.

View File

@ -197,7 +197,7 @@ else:
proc createEth2Node*(conf: BeaconNodeConf,
bootstrapNodes: seq[BootstrapAddr]): Future[Eth2Node] {.async.} =
var
(extIp, extTcpPort, extUdpPort) = setupNat(conf)
(extIp, extTcpPort, _) = setupNat(conf)
hostAddress = tcpEndPoint(globalListeningAddr, Port conf.tcpPort)
announcedAddresses = if extIp == globalListeningAddr: @[]
else: @[tcpEndPoint(extIp, extTcpPort)]

View File

@ -45,4 +45,4 @@ proc fetchAncestorBlocks*(requestManager: RequestManager,
var fetchComplete = false
for peer in requestManager.network.randomPeers(ParallelRequests, BeaconSync):
traceAsyncErrors peer.fetchAncestorBlocksFromPeer(roots.rand(), responseHandler)
traceAsyncErrors peer.fetchAncestorBlocksFromPeer(roots.sample(), responseHandler)

View File

@ -12,7 +12,7 @@ import
./crypto, ./datatypes, ./digest, ./helpers, ./validator
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.1/specs/core/0_beacon-chain.md#is_valid_merkle_branch
func is_valid_merkle_branch(leaf: Eth2Digest, branch: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool =
func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool =
## Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and
## ``branch``.
var

View File

@ -97,6 +97,7 @@ type
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.1/specs/core/0_beacon-chain.md#indexedattestation
IndexedAttestation* = object
# TODO ValidatorIndex, but that doesn't serialize properly
attesting_indices*: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
data*: AttestationData
signature*: ValidatorSig
@ -205,25 +206,17 @@ type
## Needed to process attestations, older to newer
state_roots*: array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
historical_roots*: seq[Eth2Digest] ##\
## model List with HISTORICAL_ROOTS_LIMIT limit as seq
## TODO bound explicitly somewhere
historical_roots*: List[Eth2Digest, HISTORICAL_ROOTS_LIMIT]
# Eth1
eth1_data*: Eth1Data
eth1_data_votes*: seq[Eth1Data] ##\
## As with `hitorical_roots`, this is a `List`. TODO bound explicitly.
eth1_data_votes*: List[Eth1Data, SLOTS_PER_ETH1_VOTING_PERIOD]
eth1_deposit_index*: uint64
# Registry
# TODO List[] won't construct due to VALIDATOR_REGISTRY_LIMIT > high(int)
validators*: seq[Validator]
balances*: seq[uint64] ##\
## Validator balances in Gwei!
## Also more `List`s which need to be bounded explicitly at
## VALIDATOR_REGISTRY_LIMIT
balances*: seq[uint64]
# Shuffling
randao_mixes*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest]
@ -233,8 +226,10 @@ type
## Per-epoch sums of slashed effective balances
# Attestations
previous_epoch_attestations*: seq[PendingAttestation]
current_epoch_attestations*: seq[PendingAttestation]
previous_epoch_attestations*:
List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
current_epoch_attestations*:
List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
# Finality
justification_bits*: uint8 ##\
@ -308,7 +303,7 @@ type
root*: Eth2Digest # hash_tree_root (not signing_root!)
StateCache* = object
crosslink_committee_cache*:
beacon_committee_cache*:
Table[tuple[a: int, b: Eth2Digest], seq[ValidatorIndex]]
active_validator_indices_cache*:
Table[Epoch, seq[ValidatorIndex]]

View File

@ -232,10 +232,11 @@ proc process_justification_and_finalization*(
cat = "finalization"
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.1/specs/core/0_beacon-chain.md#rewards-and-penalties-1
func get_base_reward(state: BeaconState, index: ValidatorIndex): Gwei =
let
total_balance = get_total_active_balance(state)
effective_balance = state.validators[index].effective_balance
func get_base_reward(state: BeaconState, index: ValidatorIndex,
total_balance: auto): Gwei =
# Spec function recalculates total_balance every time, which creates an
# O(n^2) situation.
let effective_balance = state.validators[index].effective_balance
effective_balance * BASE_REWARD_FACTOR div
integer_squareroot(total_balance) div BASE_REWARDS_PER_EPOCH
@ -274,9 +275,10 @@ func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
for index in eligible_validator_indices:
if index in unslashed_attesting_indices:
rewards[index] +=
get_base_reward(state, index) * attesting_balance div total_balance
get_base_reward(state, index, total_balance) * attesting_balance div
total_balance
else:
penalties[index] += get_base_reward(state, index)
penalties[index] += get_base_reward(state, index, total_balance)
# Proposer and inclusion delay micro-rewards
## This depends on matching_source_attestations being an indexable seq, not a
@ -309,11 +311,12 @@ func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
if a.inclusion_delay < attestation.inclusion_delay:
attestation = a
let proposer_reward =
(get_base_reward(state, index) div PROPOSER_REWARD_QUOTIENT).Gwei
let
base_reward = get_base_reward(state, index, total_balance)
proposer_reward = (base_reward div PROPOSER_REWARD_QUOTIENT).Gwei
rewards[attestation.proposer_index.int] += proposer_reward
let max_attester_reward = get_base_reward(state, index) - proposer_reward
let max_attester_reward = base_reward - proposer_reward
rewards[index] += max_attester_reward div attestation.inclusion_delay
@ -325,7 +328,7 @@ func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
state, matching_target_attestations, stateCache)
for index in eligible_validator_indices:
penalties[index] +=
BASE_REWARDS_PER_EPOCH.uint64 * get_base_reward(state, index)
BASE_REWARDS_PER_EPOCH.uint64 * get_base_reward(state, index, total_balance)
if index notin matching_target_attesting_indices:
penalties[index] +=
state.validators[index].effective_balance *
@ -426,9 +429,7 @@ proc process_epoch*(state: var BeaconState) =
## Caching here for get_beacon_committee(...) can break otherwise, since
## get_active_validator_indices(...) usually changes.
# TODO is this cache still necessary/useful? presumably not, but can't remove
# quite yet
clear(per_epoch_cache.crosslink_committee_cache)
clear(per_epoch_cache.beacon_committee_cache)
# @process_reveal_deadlines
# @process_challenge_deadlines

View File

@ -98,8 +98,8 @@ func compute_committee(indices: seq[ValidatorIndex], seed: Eth2Digest,
endIdx = (len(indices).uint64 * (index + 1)) div count
key = (indices.len, seed)
if key notin stateCache.crosslink_committee_cache:
stateCache.crosslink_committee_cache[key] =
if key notin stateCache.beacon_committee_cache:
stateCache.beacon_committee_cache[key] =
get_shuffled_seq(seed, len(indices).uint64)
# These assertions from compute_shuffled_index(...)
@ -110,15 +110,13 @@ func compute_committee(indices: seq[ValidatorIndex], seed: Eth2Digest,
# In spec, this calls get_shuffled_index() every time, but that's wasteful
mapIt(
start.int .. (endIdx.int-1),
indices[stateCache.crosslink_committee_cache[key][it]])
indices[stateCache.beacon_committee_cache[key][it]])
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.1/specs/core/0_beacon-chain.md#get_beacon_committee
func get_beacon_committee*(state: BeaconState, slot: Slot, index: uint64, cache: var StateCache): seq[ValidatorIndex] =
# Return the beacon committee at ``slot`` for ``index``.
let
epoch = compute_epoch_at_slot(slot)
# TODO use state caching for this or not?
committees_per_slot = get_committee_count_at_slot(state, slot)
## This is a somewhat more fragile, but high-ROI, caching setup --
## get_active_validator_indices() is slow to run in a loop and only
@ -127,22 +125,22 @@ func get_beacon_committee*(state: BeaconState, slot: Slot, index: uint64, cache:
cache.active_validator_indices_cache[epoch] =
get_active_validator_indices(state, epoch)
# TODO remove or replace this...
#if epoch notin cache.committee_count_cache:
# cache.committee_count_cache[epoch] = get_committee_count(state, epoch)
# Constant throughout an epoch
if epoch notin cache.committee_count_cache:
cache.committee_count_cache[epoch] =
get_committee_count_at_slot(state, slot)
# TODO profiling & make sure caches populated
compute_committee(
cache.active_validator_indices_cache[epoch],
get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
(slot mod SLOTS_PER_EPOCH) * committees_per_slot + index,
committees_per_slot * SLOTS_PER_EPOCH,
(slot mod SLOTS_PER_EPOCH) * cache.committee_count_cache[epoch] + index,
cache.committee_count_cache[epoch] * SLOTS_PER_EPOCH,
cache
)
# Not from spec
func get_empty_per_epoch_cache*(): StateCache =
result.crosslink_committee_cache =
result.beacon_committee_cache =
initTable[tuple[a: int, b: Eth2Digest], seq[ValidatorIndex]]()
result.active_validator_indices_cache =
initTable[Epoch, seq[ValidatorIndex]]()

View File

@ -324,9 +324,8 @@ func getZeroHashWithoutSideEffect(idx: int): Eth2Digest =
# TODO this is a work-around for the somewhat broken side
# effects analysis of Nim - reading from global let variables
# is considered a side-effect.
# Nim 0.19 doesnt have the `{.noSideEffect.}:` override, so
# we should revisit this in Nim 0.20.2.
{.emit: "`result` = `zeroHashes`[`idx`];".}
{.noSideEffect.}:
zeroHashes[idx]
func addChunk*(merkelizer: SszChunksMerkelizer, data: openarray[byte]) =
doAssert data.len > 0 and data.len <= bytesPerChunk

View File

@ -52,7 +52,7 @@ func toSlot*(t: BeaconTime): tuple[afterGenesis: bool, slot: Slot] =
(false, Slot(uint64(-ti) div SECONDS_PER_SLOT))
func toBeaconTime*(c: BeaconClock, t: Time): BeaconTime =
BeaconTime(times.seconds(t - c.genesis))
BeaconTime(times.inSeconds(t - c.genesis))
func toSlot*(c: BeaconClock, t: Time): tuple[afterGenesis: bool, slot: Slot] =
c.toBeaconTime(t).toSlot()

View File

@ -63,7 +63,7 @@ proc sendDeposits*(
for i, dp in deposits:
web3.defaultAccount = eth1Addresses[i]
let depositContract = web3.contractSender(DepositContract, contractAddress)
let tx = await depositContract.deposit(
discard await depositContract.deposit(
Bytes48(dp.data.pubKey.getBytes()),
Bytes32(dp.data.withdrawal_credentials.data),
Bytes96(dp.data.signature.getBytes()),

View File

@ -1,66 +1,52 @@
FROM statusteam/nim-base AS build
FROM debian:bullseye-slim AS build
SHELL ["/bin/bash", "-c"]
RUN apt-get -qq update \
&& apt-get -qq -y install build-essential make wget 2>/dev/null >/dev/null
&& apt-get -qq -y install build-essential make wget librocksdb-dev libpcre3-dev golang-go git &>/dev/null \
&& apt-get -qq clean
RUN export GO_TAR_GZ=go1.12.6.linux-amd64.tar.gz \
&& cd /tmp/ \
&& wget --progress=dot:giga https://dl.google.com/go/$GO_TAR_GZ \
&& tar -xf $GO_TAR_GZ \
&& mv go /usr/local \
&& rm $GO_TAR_GZ
ARG GIT_REVISION
RUN export GOROOT=/usr/local/go \
&& export PATH=$GOROOT/bin:$PATH \
# let Docker cache this between Git revision and testnet version changes
RUN cd /root \
&& git clone https://github.com/status-im/nim-beacon-chain.git \
&& cd nim-beacon-chain \
&& git reset --hard ${GIT_REVISION} \
&& { make 2>/dev/null >/dev/null || true; } \
&& make -j8 update \
&& make deps \
&& cp vendor/go/bin/p2pd /usr/bin/p2pd \
&& cp docker/run_in_docker.sh /usr/bin/run_beacon_node.sh
&& { make &>/dev/null || true; } \
&& make -j$(nproc) update \
&& make deps
ARG NETWORK
# Please note that the commands above have the goal of caching the compilation
# of Nim and p2pd, but don't depend on the current git revision. This means
# that the cache can become outdated over time and you'll start seeing Nim
# being compiled on every run. If this happens, just prune your docker cache
# to get a fresh up-to-date version of Nim and p2pd.
ARG GIT_REVISION
ARG NETWORK_NIM_FLAGS
RUN cd nim-beacon-chain \
&& ./env.sh nim \
-o:/usr/bin/beacon_node \
-d:release \
-d:insecure \
--debugger:native \
--debugInfo \
--verbosity:0 \
--hints:off \
--warnings:off \
${NETWORK_NIM_FLAGS} \
-d:"chronicles_log_level=DEBUG" \
-d:"testnet_servers_image" \
c beacon_chain/beacon_node.nim
RUN cd /root/nim-beacon-chain \
&& git fetch \
&& git reset --hard ${GIT_REVISION} \
&& make -j$(nproc) update \
&& make LOG_LEVEL=DEBUG NIMFLAGS="-d:release -d:insecure -d:testnet_servers_image ${NETWORK_NIM_FLAGS}" beacon_node
# --------------------------------- #
# Starting new image to reduce size #
# --------------------------------- #
FROM debian:9-slim
FROM debian:bullseye-slim
SHELL ["/bin/bash", "-c"]
RUN apt-get -qq update \
&& apt-get -qq -y install librocksdb-dev psmisc 2>/dev/null >/dev/null \
&& apt-get -qq -y install librocksdb-dev libpcre3 psmisc &>/dev/null \
&& apt-get -qq clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY --from=build /usr/bin/beacon_node /usr/bin/beacon_node
COPY --from=build /usr/bin/run_beacon_node.sh /usr/bin/run_beacon_node.sh
COPY --from=build /usr/bin/p2pd /usr/bin/p2pd
COPY run_in_docker.sh /usr/bin/run_beacon_node.sh
COPY --from=build /root/nim-beacon-chain/build/beacon_node /usr/bin/beacon_node
COPY --from=build /root/nim-beacon-chain/vendor/go/bin/p2pd /usr/bin/p2pd
MAINTAINER Zahary Karadjov <zahary@status.im>
LABEL description="Nimbus installation that can act as an ETH2 network bootstrap node."
ARG GIT_REVISION
RUN echo Built from Git revision: ${GIT_REVISION}
# TODO: This custom entry script is necessary only because we must clean up
# temporary files left by previous executions of the Go daeamon.
# We should be able to remove it once we have a native LibP2P impl.

View File

@ -1,3 +1,5 @@
SHELL := bash # the shell used internally by "make"
# These default settings can be overriden by exporting env variables
GIT_REVISION ?= $(shell git rev-parse HEAD)
@ -7,16 +9,23 @@ NETWORK_NIM_FLAGS ?= $(shell ../scripts/load-testnet-nim-flags.sh $(NETWORK))
IMAGE_TAG ?= $(NETWORK)
IMAGE_NAME ?= statusteam/nimbus_beacon_node:$(IMAGE_TAG)
CURRENT_BRANCH = $(shell git rev-parse --abbrev-ref HEAD)
COMPUTER_SAYS_NO = { echo "I'm sorry, Dave. I'm afraid I can't do that."; exit 1; }
build:
@ DOCKER_BUILDKIT=1 \
docker build \
--build-arg="GIT_REVISION=$(GIT_REVISION)" \
--build-arg="NETWORK=$(NETWORK)" \
--build-arg="NETWORK_NIM_FLAGS=$(NETWORK_NIM_FLAGS)" \
-t $(IMAGE_NAME) .
-t $(IMAGE_NAME) \
--progress=plain \
.
push: build
docker push $(IMAGE_NAME)
+@ $(MAKE) push-last
push-last:
@ [[ "$(CURRENT_BRANCH)" != "master" && "$(NETWORK)" == "testnet0" ]] && $(COMPUTER_SAYS_NO) || true
@ [[ "$(CURRENT_BRANCH)" != "devel" && "$(NETWORK)" == "testnet1" ]] && $(COMPUTER_SAYS_NO) || true
docker push $(IMAGE_NAME)

View File

@ -37,8 +37,24 @@ proc writeJson*(prefix, slot, v: auto) =
let fileName = fmt"{prefix:04}-{shortLog(slot):08}.json"
Json.saveFile(fileName, v, pretty = true)
cli do(slots = 448'u,
validators = SLOTS_PER_EPOCH * 9, # One per shard is minimum
func verifyConsensus(state: BeaconState, attesterRatio: auto) =
if attesterRatio < 0.63:
doAssert state.current_justified_checkpoint.epoch == 0
doAssert state.finalized_checkpoint.epoch == 0
# Quorum is 2/3 of validators, and at low numbers, quantization effects
# can dominate, so allow for play above/below attesterRatio of 2/3.
if attesterRatio < 0.72:
return
let current_epoch = get_current_epoch(state)
if current_epoch >= 3:
doAssert state.current_justified_checkpoint.epoch + 1 >= current_epoch
if current_epoch >= 4:
doAssert state.finalized_checkpoint.epoch + 2 >= current_epoch
cli do(slots = SLOTS_PER_EPOCH * 6,
validators = SLOTS_PER_EPOCH * 11, # One per shard is minimum
json_interval = SLOTS_PER_EPOCH,
prefix = 0,
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.75,
@ -69,6 +85,7 @@ cli do(slots = 448'u,
for i in 0..<slots:
maybeWrite()
verifyConsensus(state, attesterRatio)
let
attestations_idx = state.slot
@ -113,11 +130,13 @@ cli do(slots = 448'u,
for v in scas:
if (rand(r, high(int)).float * attesterRatio).int <= high(int):
if first:
attestation = makeAttestation(state, latest_block_root, v, flags)
attestation =
makeAttestation(state, latest_block_root, v, cache, flags)
first = false
else:
attestation.combine(
makeAttestation(state, latest_block_root, v, flags), flags)
makeAttestation(state, latest_block_root, v, cache, flags),
flags)
if not first:
# add the attestation if any of the validators attested, as given

View File

@ -18,7 +18,6 @@ add_var () {
add_var CONST_PRESET
add_var NETWORK_TYPE
add_var SLOTS_PER_EPOCH
add_var SLOTS_PER_EPOCH
add_var MAX_COMMITTEES_PER_SLOT
echo $NIM_FLAGS

View File

@ -8,6 +8,10 @@ export NETWORK=$1
export NETWORK_NIM_FLAGS=$(./load-testnet-nim-flags.sh $NETWORK)
export GIT_REVISION=$(git rev-parse HEAD)
set -a
source $NETWORK.env
set +a
cd ..
if [ -f .env ]; then
@ -20,10 +24,11 @@ echo Execution plan:
echo "Testnet name : $NETWORK"
echo "Bootstrap node hostname : ${BOOTSTRAP_HOST:="master-01.do-ams3.nimbus.test.statusim.net"}"
echo "Bootstrap node ip : ${BOOTSTRAP_IP:="$(dig +short $BOOTSTRAP_HOST)"}"
echo "Bootstrap node port : ${BOOTSTRAP_PORT:=9000}"
echo "Reset testnet at end : ${PUBLISH_TESTNET_RESETS:="1"}"
echo "Testnet metadata repo : ${ETH2_TESTNETS_GIT_URL:="git@github.com:${ETH2_TESTNETS_ORG:=eth2-clients}/eth2-testnets"}"
echo "Testnet metadata dir : ${ETH2_TESTNETS:="build/eth2-testnets"}"
echo "Beacon node data dir : ${DATA_DIR:="build/testnet-reset-data"}"
echo "Beacon node data dir : ${DATA_DIR:="build/testnet-reset-data/$NETWORK"}"
echo "Nim build flags : $NETWORK_NIM_FLAGS"
while true; do

View File

@ -44,8 +44,7 @@ proc merkleTreeFromLeaves*(
values: openarray[Eth2Digest],
Depth: static[int] = DEPOSIT_CONTRACT_TREE_DEPTH
): SparseMerkleTree[Depth] =
## Depth should be the same as
## verify_merkle_branch / is_valid_merkle_branch
## Depth should be the same as is_valid_merkle_branch
result.nnznodes[0] = @values
@ -150,7 +149,7 @@ when isMainModule: # Checks
let proof = getMerkleProof(tree, index)
echo "Proof: ", proof
doAssert verify_merkle_branch(
doAssert is_valid_merkle_branch(
a, get_merkle_proof(tree, index = index),
depth = `depth`,
index = index.uint64,
@ -163,7 +162,7 @@ when isMainModule: # Checks
let proof = getMerkleProof(tree, index)
# echo "Proof: ", proof
doAssert verify_merkle_branch(
doAssert is_valid_merkle_branch(
b, get_merkle_proof(tree, index = index),
depth = `depth`,
index = index.uint64,
@ -176,7 +175,7 @@ when isMainModule: # Checks
let proof = getMerkleProof(tree, index)
# echo "Proof: ", proof
doAssert verify_merkle_branch(
doAssert is_valid_merkle_branch(
c, get_merkle_proof(tree, index = index),
depth = `depth`,
index = index.uint64,

View File

@ -100,9 +100,6 @@ proc mockAttestationImpl(
var cache = get_empty_per_epoch_cache()
let
committees_per_slot = get_committee_count_at_slot(
state, slot)
beacon_committee = get_beacon_committee(
state,
result.data.slot,

View File

@ -14,7 +14,7 @@ import
# 0.19.6 shims
stew/objects, # import default
# Specs
../../beacon_chain/spec/[datatypes, crypto, helpers, digest],
../../beacon_chain/spec/[datatypes, crypto, helpers, digest, beaconstate],
# Internals
../../beacon_chain/[ssz, extras],
# Mocking procs
@ -141,15 +141,17 @@ template mockGenesisDepositsImpl(
# 4th loop - append proof
for valIdx in 0 ..< validatorCount.int:
when false: # TODO
# TODO ensure genesis & deposit process tests suffice to catch whether
# changes here break things; ensure that this matches the merkle proof
# sequence is_valid_merkle_branch(...) now looks for
result[valIdx].proof[0..31] = tree.getMerkleProof(valIdx)
result[valIdx].proof[32] = int_to_bytes32(index + 1)
doAssert:
verify_merkle_branch(
result[valIdx].proof[32] =
Eth2Digest(data: int_to_bytes32((valIdx + 1).uint64))
doAssert is_valid_merkle_branch(
depositsDataHash[valIdx],
result[valIdx].proof,
DEPOSIT_CONTRACT_TREE_DEPTH,
valIdx,
valIdx.uint64,
root
)
@ -198,6 +200,7 @@ proc mockUpdateStateForNewDeposit*(
flags: UpdateFlags
): Deposit =
# TODO withdrawal credentials
mockDepositData(
@ -209,11 +212,11 @@ proc mockUpdateStateForNewDeposit*(
flags
)
let tree = merkleTreeFromLeaves([hash_tree_root(result.data)])
when false: # TODO
let tree = merkleTreeFromLeaves([hash_tree_root(result.data)])
result[valIdx].proof[0..31] = tree.getMerkleProof(0)
result[valIdx].proof[32] = int_to_bytes32(0 + 1)
# doAssert: verify_merkle_branch(...)
# doAssert is_valid_merkle_branch(...)
# TODO: this logic from the eth2.0-specs test suite seems strange
# but confirmed by running it

View File

@ -62,7 +62,7 @@ suite "Official - Sanity - Blocks " & preset():
expect(AssertionError):
# assert in process_slots. This should not be triggered
# for blocks from block_pool/network
let done = state_transition(stateRef[], blck, flags = {skipValidation})
discard state_transition(stateRef[], blck, flags = {skipValidation})
runValidTest("Same slot block transition", same_slot_block_transition, 1)
runValidTest("Empty block transition", empty_block_transition, 1)
@ -77,7 +77,7 @@ suite "Official - Sanity - Blocks " & preset():
let blck = parseTest(testDir/"blocks_0.ssz", SSZ, BeaconBlock)
expect(AssertionError):
let done = state_transition(stateRef[], blck, flags = {skipValidation})
discard state_transition(stateRef[], blck, flags = {skipValidation})
runValidTest("Skipped Slots", skipped_slots, 1)
runValidTest("Empty epoch transition", empty_epoch_transition, 1)

View File

@ -45,7 +45,7 @@ suite "Attestation pool processing" & preset():
beacon_committee = get_beacon_committee(state.data.data,
state.data.data.slot, 0, cache)
attestation = makeAttestation(
state.data.data, state.blck.root, beacon_committee[0])
state.data.data, state.blck.root, beacon_committee[0], cache)
pool.add(state.data.data, state.blck, attestation)
@ -65,7 +65,7 @@ suite "Attestation pool processing" & preset():
bc0 = get_beacon_committee(state.data.data,
state.data.data.slot, 0, cache)
attestation0 = makeAttestation(
state.data.data, state.blck.root, bc0[0])
state.data.data, state.blck.root, bc0[0], cache)
process_slots(state.data, state.data.data.slot + 1)
@ -73,7 +73,7 @@ suite "Attestation pool processing" & preset():
bc1 = get_beacon_committee(state.data.data,
state.data.data.slot, 0, cache)
attestation1 = makeAttestation(
state.data.data, state.blck.root, bc1[0])
state.data.data, state.blck.root, bc1[0], cache)
# test reverse order
pool.add(state.data.data, state.blck, attestation1)
@ -95,9 +95,9 @@ suite "Attestation pool processing" & preset():
bc0 = get_beacon_committee(state.data.data,
state.data.data.slot, 0, cache)
attestation0 = makeAttestation(
state.data.data, state.blck.root, bc0[0])
state.data.data, state.blck.root, bc0[0], cache)
attestation1 = makeAttestation(
state.data.data, state.blck.root, bc0[1])
state.data.data, state.blck.root, bc0[1], cache)
pool.add(state.data.data, state.blck, attestation0)
pool.add(state.data.data, state.blck, attestation1)
@ -119,9 +119,9 @@ suite "Attestation pool processing" & preset():
bc0 = get_beacon_committee(state.data.data,
state.data.data.slot, 0, cache)
attestation0 = makeAttestation(
state.data.data, state.blck.root, bc0[0])
state.data.data, state.blck.root, bc0[0], cache)
attestation1 = makeAttestation(
state.data.data, state.blck.root, bc0[1])
state.data.data, state.blck.root, bc0[1], cache)
attestation0.combine(attestation1, {skipValidation})
@ -144,9 +144,9 @@ suite "Attestation pool processing" & preset():
bc0 = get_beacon_committee(state.data.data,
state.data.data.slot, 0, cache)
attestation0 = makeAttestation(
state.data.data, state.blck.root, bc0[0])
state.data.data, state.blck.root, bc0[0], cache)
attestation1 = makeAttestation(
state.data.data, state.blck.root, bc0[1])
state.data.data, state.blck.root, bc0[1], cache)
attestation0.combine(attestation1, {skipValidation})

View File

@ -7,9 +7,10 @@
{.used.}
import
unittest,
../beacon_chain/beacon_node
import unittest
when false:
import ../beacon_chain/beacon_node
suite "Beacon node":
# Compile test

View File

@ -85,10 +85,10 @@ suite "Block processing" & preset():
let
# Create an attestation for slot 1 signed by the only attester we have!
crosslink_committee =
beacon_committee =
get_beacon_committee(state, state.slot, 0, cache)
attestation = makeAttestation(
state, previous_block_root, crosslink_committee[0])
state, previous_block_root, beacon_committee[0], cache)
# Some time needs to pass before attestations are included - this is
# to let the attestation propagate properly to interested participants

View File

@ -7,9 +7,10 @@
{.used.}
import
unittest,
../beacon_chain/sync_protocol
import unittest
when false:
import ../beacon_chain/sync_protocol
suite "Sync protocol":
# Compile test

View File

@ -158,9 +158,9 @@ proc makeBlock*(
addBlock(next_state, previous_block_root, body)
proc find_beacon_committee(
state: BeaconState, validator_index: ValidatorIndex): auto =
state: BeaconState, validator_index: ValidatorIndex,
cache: var StateCache): auto =
let epoch = compute_epoch_at_slot(state.slot)
var cache = get_empty_per_epoch_cache()
for epoch_committee_index in 0'u64 ..< get_committee_count_at_slot(
state, epoch.compute_start_slot_at_epoch) * SLOTS_PER_EPOCH:
let
@ -174,14 +174,16 @@ proc find_beacon_committee(
proc makeAttestation*(
state: BeaconState, beacon_block_root: Eth2Digest,
validator_index: ValidatorIndex, flags: UpdateFlags = {}): Attestation =
validator_index: ValidatorIndex, cache: var StateCache,
flags: UpdateFlags = {}): Attestation =
let
(committee, slot, index) = find_beacon_committee(state, validator_index)
(committee, slot, index) =
find_beacon_committee(state, validator_index, cache)
validator = state.validators[validator_index]
sac_index = committee.find(validator_index)
data = makeAttestationData(state, slot, index, beacon_block_root)
doAssert sac_index != -1, "find_shard_committee should guarantee this"
doAssert sac_index != -1, "find_beacon_committee should guarantee this"
var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
aggregation_bits.raiseBit sac_index

@ -1 +1 @@
Subproject commit 798b1019105a80fe0f1b49e9f95a4bbab4c29e70
Subproject commit 28e88efada2ae62cecf51b29ea52c69d842db051