Merge pull request #614 from status-im/devel

Redo the 2019-12-02 release due to a wrong merge
This commit is contained in:
zah 2019-12-03 02:32:40 +02:00 committed by GitHub
commit e56792fe55
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 1492 additions and 306 deletions

View File

@ -53,7 +53,8 @@ install:
script:
- set -e # fail fast
- make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}" update # to allow a newer Nim version to be detected
# Building Nim-1.0.4 takes up to 10 minutes on Travis - the time limit after which jobs are cancelled for having no output
- make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}" V=1 update # to allow a newer Nim version to be detected
- make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}"
- make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}" DISABLE_TEST_FIXTURES_SCRIPT=1 test

42
Jenkinsfile vendored
View File

@ -1,9 +1,43 @@
node('linux') {
stage('Clone') {
def runStages() {
stage("Clone") {
/* The Git repo seems to be cached in some Jenkins plugin, so this is not always a clean clone. */
checkout scm
sh "make build-system-checks || true"
}
stage('Build') {
sh 'echo "nproc:"; nproc'
stage("Build") {
sh "make -j${env.NPROC} update" /* to allow a newer Nim version to be detected */
sh "make -j${env.NPROC} V=1 deps" /* to allow the following parallel stages */
}
stage("Test") {
parallel(
"tools": {
stage("Tools") {
sh "make -j${env.NPROC}"
}
},
"test suite": {
stage("Test suite") {
sh "make -j${env.NPROC} test"
}
}
)
}
}
parallel(
"Linux": {
node("linux") {
withEnv(["NPROC=${sh(returnStdout: true, script: 'nproc').trim()}"]) {
runStages()
}
}
},
"macOS": {
node("macos") {
withEnv(["NPROC=${sh(returnStdout: true, script: 'sysctl -n hw.logicalcpu').trim()}"]) {
runStages()
}
}
}
)

View File

@ -13,7 +13,7 @@ BUILD_SYSTEM_DIR := vendor/nimbus-build-system
# we don't want an error here, so we can handle things later, in the build-system-checks target
-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk
TOOLS := beacon_node bench_bls_sig_agggregation state_sim ncli_hash_tree_root ncli_pretty ncli_signing_root ncli_transition process_dashboard deposit_contract
TOOLS := beacon_node bench_bls_sig_agggregation ncli_hash_tree_root ncli_pretty ncli_signing_root ncli_transition process_dashboard deposit_contract
TOOLS_DIRS := beacon_chain benchmarks research ncli tests/simulation
TOOLS_CSV := $(subst $(SPACE),$(COMMA),$(TOOLS))
@ -70,10 +70,10 @@ clean_eth2_network_simulation_files:
eth2_network_simulation: | build deps p2pd clean_eth2_network_simulation_files process_dashboard
GIT_ROOT="$$PWD" tests/simulation/start.sh
testnet0: | build deps clean-testnet0
testnet0: | build deps
+ $(MAKE) testnet0-no-clean
testnet1: | build deps clean-testnet1
testnet1: | build deps
+ $(MAKE) testnet1-no-clean
clean-testnet0:

337
README.md
View File

@ -1,4 +1,4 @@
# Ethereum Beacon Chain
# Nimbus Eth2 (Beacon Chain)
[![Build Status (Travis)](https://img.shields.io/travis/status-im/nim-beacon-chain/master.svg?label=Linux%20/%20macOS "Linux/macOS build status (Travis)")](https://travis-ci.org/status-im/nim-beacon-chain)
[![Build Status (Azure)](https://dev.azure.com/nimbus-dev/nim-beacon-chain/_apis/build/status/status-im.nim-beacon-chain?branchName=master)](https://dev.azure.com/nimbus-dev/nim-beacon-chain/_build/latest?definitionId=3&branchName=master)
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
@ -9,82 +9,185 @@
[![Gitter: #status-im/nimbus](https://img.shields.io/badge/gitter-status--im%2Fnimbus-orange.svg)](https://gitter.im/status-im/nimbus)
[![Status: #nimbus-general](https://img.shields.io/badge/status-nimbus--general-orange.svg)](https://get.status.im/chat/public/nimbus-general)
Nimbus beacon chain is a research implementation of the beacon chain component of the upcoming Ethereum Serenity upgrade, aka Eth2. See the main [Nimbus](https://github.com/status-im/nimbus/) project for the bigger picture.
Welcome to Nimbus for Ethereum 2.0.
## Interop (for other Eth2 clients)
Nimbus is currently going through interoperability testing with several other beacon chain implementations - several tools are available to make this testing easier:
* [multinet](https://github.com/status-im/nim-beacon-chain/tree/master/multinet) - a set of scripts to build and run several Eth2 clients locally
* [ncli](https://github.com/status-im/nim-beacon-chain/tree/master/multinet) - command line tools for working with SSZ files and state transitions
Nimbus beacon chain is a research implementation of the beacon chain component of the upcoming Ethereum Serenity upgrade, aka Eth2.
## Related
* [status-im/nimbus](https://github.com/status-im/nimbus/): main Nimbus repository - start here to learn more about the Nimbus eco-system
* [status-im/nimbus](https://github.com/status-im/nimbus/): Nimbus for Ethereum 1
* [ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md): Serenity specification that this project implements
* [ethereum/beacon\_chain](https://github.com/ethereum/beacon_chain): reference implementation from the Ethereum foundation
You can check where the beacon chain fits in the Ethereum research ecosystem in the [Status Athenaeum](https://github.com/status-im/athenaeum/blob/b465626cc551e361492e56d32517b2cdadd7493f/ethereum_research_records.json#L38).
You can check where the beacon chain fits in the Ethereum ecosystem our Two-Point-Oh series: https://our.status.im/tag/two-point-oh/
## Building and Testing
## Table of Contents
### Prerequisites
- [Nimbus Eth2 (Beacon Chain)](#nimbus-eth2-beacon-chain)
- [Related](#related)
- [Table of Contents](#table-of-contents)
- [Prerequisites for everyone](#prerequisites-for-everyone)
- [Linux](#linux)
- [MacOS](#macos)
- [Windows](#windows)
- [For users](#for-users)
- [Connecting to testnets](#connecting-to-testnets)
- [Interop (for other Eth2 clients)](#interop-for-other-eth2-clients)
- [For researchers](#for-researchers)
- [State transition simulation](#state-transition-simulation)
- [Local network simulation](#local-network-simulation)
- [Visualising simulation metrics](#visualising-simulation-metrics)
- [For developers](#for-developers)
- [Windows dev environment](#windows-dev-environment)
- [Linux, MacOS](#linux-macos)
- [Raspberry Pi](#raspberry-pi)
- [Makefile tips and tricks for developers](#makefile-tips-and-tricks-for-developers)
- [License](#license)
## Prerequisites for everyone
At the moment, Nimbus has to be built from source.
Nimbus has 4 external dependencies:
* Go 1.12 (for compiling libp2p daemon - being phased out)
* Developer tools (C compiler, Make, Bash, Git)
* [RocksDB](https://github.com/facebook/rocksdb/)
* PCRE
* Go 1.12 (for compiling libp2p daemon - being phased out)
* GNU Make, Bash and the usual POSIX utilities. Git 2.9.4 or newer.
On Windows, a precompiled DLL collection download is available through the `fetch-dlls` Makefile target: ([Windows instructions](#windows)).
Nim is not an external dependency, Nimbus will build its own local copy.
```bash
# MacOS with Homebrew
brew install rocksdb pcre
### Linux
On common Linux distributions the dependencies can be installed with:
```sh
# Debian and Ubuntu
sudo apt-get install build-essentials golang-go librocksdb-dev libpcre3-dev
# Fedora
dnf install rocksdb-devel pcre
dnf install @development-tools go rocksdb-devel pcre
# Debian and Ubuntu
sudo apt-get install librocksdb-dev libpcre3-dev
# Arch (AUR)
pakku -S rocksdb pcre-static
# Archlinux, using an AUR manager for pcre-static
yourAURmanager -S base-devel go rocksdb pcre-static
```
`rocksdb` can also be installed by following [their instructions](https://github.com/facebook/rocksdb/blob/master/INSTALL.md).
### MacOS
### Build & Develop
Assuming you use [Homebrew](https://brew.sh/) to manage packages
#### POSIX-compatible OS
```sh
brew install go rocksdb pcre
```
### Windows
* install [Go](https://golang.org/doc/install#windows)
You can install the developer tools by following the instruction in our [Windows dev environment section](#windows-dev-environment).
It also provides a downloading script for prebuilt PCRE and RocksDB.
If you choose to install Go from source, both Go and Nimbus requires the same initial steps of installing Mingw.
## For users
### Connecting to testnets
Nimbus connects to any of the testnets published in the [eth2-clients/eth2-testnets repo](https://github.com/eth2-clients/eth2-testnets/tree/master/nimbus).
Once the [prerequisites](#prerequisites) are installed you can connect to testnet0 with the following commands:
```bash
make # The first `make` invocation will update all Git submodules and prompt you to run `make` again.
# It's only required once per Git clone. You'll run `make update` after each `git pull`, in the future,
# to keep those submodules up to date.
# Run tests
make test
# Update to latest version
git pull
make update
git clone https://github.com/status-im/nim-beacon-chain
cd nim-beacon-chain
make # This invocation will bootstrap the build system with additional Makefiles
make update deps # This will build Nim and all other dependencies
./connect-to-testnet testnet0
```
To run a command that might use binaries from the Status Nim fork:
The testnets are restarted once per week, usually on Monday evenings (UTC)) and integrate the changes for the past week.
## Interop (for other Eth2 clients)
After installing the [prerequisites](#prerequisites)
To run the Nimbus state transition, we provide the `ncli` tool:
* [ncli](ncli)
The interop scripts have been moved in a common repo, the interop relied on 0.8.3 specs which had seen significant changes. The interop branch still exist but is unmaintained.
* [multinet](https://github.com/status-im/nim-beacon-chain/tree/master/multinet) - a set of scripts to build and run several Eth2 clients locally
* [interop branch](https://github.com/status-im/nim-beacon-chain/tree/interop) (unmaintained)
## For researchers
### State transition simulation
The state transition simulator can quickly run the Beacon chain state transition function in isolation and output JSON snapshots of the state. The simulation runs without networking and blocks are processed without slot time delays.
```bash
./env.sh bash # start a new interactive shell with the right env vars set
which nim
nim --version
# or without starting a new interactive shell:
./env.sh which nim
./env.sh nim --version
# build and run the state simulator, then display its help ("-d:release" speeds it
# up substantially, allowing the simulation of longer runs in reasonable time)
make NIMFLAGS="-d:release" state_sim
build/state_sim --help
```
#### Windows
### Local network simulation
_(Experimental support!)_
The local network simulation will create a full peer-to-peer network of beacon nodes and validators on a single machine, and run the beacon chain in real time.
Parameters such as shard, validator counts, and data folders are configured [vars.sh](tests/simulation/vars.sh). They can be set in as environment variables before launching the simulation.
```bash
# Clear data files from your last run and start the simulation with a new genesis block:
make VALIDATORS=192 NODES=6 USER_NODES=1 eth2_network_simulation
# In another terminal, get a shell with the right environment variables set:
./env.sh bash
# In the above example, the network is prepared for 7 beacon nodes but one of
# them is not started by default (`USER_NODES`) - this is useful to test
# catching up to the consensus. The following command will start the missing node.
./tests/simulation/run_node.sh 0 # (or the index (0-based) of the missing node)
# Running a separate node allows you to test sync as well as see what the action
# looks like from a single nodes' perspective.
```
You can also separate the output from each beacon node in its own panel, using [multitail](http://www.vanheusden.com/multitail/):
```bash
make USE_MULTITAIL="yes" eth2_network_simulation
```
You can find out more about it in the [development update](https://our.status.im/nimbus-development-update-2018-12-2/).
_Alternatively, fire up our [experimental Vagrant instance with Nim pre-installed](https://our.status.im/setting-up-a-local-vagrant-environment-for-nim-development/) and give us yout feedback about the process!_
### Visualising simulation metrics
The [generic instructions from the Nimbus repo](https://github.com/status-im/nimbus/#metric-visualisation) apply here as well.
Specific steps:
```bash
# This will generate the Prometheus config and the Grafana dashboard on the fly,
# based on the number of nodes (which you can control by passing something like NODES=6 to `make`).
make VALIDATORS=192 NODES=6 USER_NODES=0 eth2_network_simulation
# In another terminal tab, after the sim started:
cd tests/simulation/prometheus
prometheus
```
The dashboard you need to import in Grafana is "tests/simulation/beacon-chain-sim-all-nodes-Grafana-dashboard.json".
![monitoring dashboard](./media/monitoring.png)
## For developers
Latest updates happen in the `devel` branch which is merged into `master` every week on Tuesday before deploying a new testnets
The following sections explain how to setup your build environment on your platform.
### Windows dev environment
Install Mingw-w64 for your architecture using the "[MinGW-W64 Online
Installer](https://sourceforge.net/projects/mingw-w64/files/)" (first link
@ -103,16 +206,43 @@ mingw32-make # this first invocation will update the Git submodules
mingw32-make fetch-dlls # this will place the right DLLs for your architecture in the "build/" directory
```
> If you were following the Windows testnet instructions, you can jump back to [Connecting to testnets](#connecting-to-testnets) now
You can now follow those instructions in the previous section by replacing `make` with `mingw32-make` (regardless of your 32-bit or 64-bit architecture):
```bash
mingw32-make test # run the test suite
```
### Linux, MacOS
#### Raspberry PI
After cloning the repo:
Raspberry Pi support is experimental.
```bash
make # The first `make` invocation will update all Git submodules and prompt you to run `make` again.
# It's only required once per Git clone. You'll run `make update` after each `git pull`, in the future,
# to keep those submodules up to date.
# Run tests
make test
# Update to latest version
git pull
make update
```
To run a command that might use binaries from the Status Nim fork:
```bash
./env.sh bash # start a new interactive shell with the right env vars set
which nim
nim --version # Nimbus is tested and supported on 1.0.2 at the moment
# or without starting a new interactive shell:
./env.sh which nim
./env.sh nim --version
```
### Raspberry Pi
We recommend you remove any cover or use a fan; the Raspberry Pi will get hot (85°C) and throttle.
@ -163,77 +293,16 @@ source ~/.profile
git clone https://github.com/status-im/nim-beacon-chain.git
cd nim-beacon-chain
# Follow instructions above!
# Then you can follow instructions for Linux.
```
## Connecting to testnets
Nimbus should be able to connect to any of the testnets published in the [eth2-testnets repo](https://github.com/zah/eth2-testnets).
For example, connecting to our testnet0 is done with the following commands:
```bash
cd nim-beacon-chain
source env.sh
nim scripts/connect_to_testnet.nims nimbus/testnet0
```
## Beacon node simulation
The beacon node simulation will create a full peer-to-peer network of beacon nodes and validators, and run the beacon chain in real time. To change network parameters such as shard and validator counts, see [start.sh](tests/simulation/start.sh).
```bash
# Clear data files from your last run and start the simulation with a new genesis block:
make VALIDATORS=192 NODES=6 USER_NODES=1 eth2_network_simulation
# In another terminal, get a shell with the right environment variables set:
./env.sh bash
# In the above example, the network is prepared for 7 beacon nodes but one of
# them is not started by default (`USER_NODES`) - you can start it separately
# by running:
./tests/simulation/run_node.sh 0 # (or the 0-based node number of the missing node)
# Running a separate node allows you to test sync as well as see what the action
# looks like from a single nodes' perspective.
```
You can also separate the output from each beacon node in its own panel, using [multitail](http://www.vanheusden.com/multitail/):
```bash
make USE_MULTITAIL="yes" eth2_network_simulation
```
You can find out more about it in the [development update](https://our.status.im/nimbus-development-update-2018-12-2/).
_Alternatively, fire up our [experimental Vagrant instance with Nim pre-installed](https://our.status.im/setting-up-a-local-vagrant-environment-for-nim-development/) and give us yout feedback about the process!_
### Visualising simulation metrics
Those [generic instructions from the Nimbus repo](https://github.com/status-im/nimbus/#metric-visualisation) apply here as well.
Specific steps:
```bash
# This will generate the Prometheus config and the Grafana dashboard on the fly,
# based on the number of nodes (which you can control by passing something like NODES=6 to `make`).
make VALIDATORS=192 NODES=6 USER_NODES=0 eth2_network_simulation
# In another terminal tab, after the sim started:
cd tests/simulation/prometheus
prometheus
```
The dashboard you need to import in Grafana is "tests/simulation/beacon-chain-sim-all-nodes-Grafana-dashboard.json".
[Obligatory screenshot.](https://i.imgur.com/pLvLhID.png)
### Makefile tips and tricks for developers
- build all those tools known to the Makefile:
```bash
# (assuming you have 4 CPU cores and want to take advantage of them):
make -j4
# $(nproc) corresponds to the number of cores you have
make -j$(nproc)
```
- build a specific tool:
@ -265,45 +334,9 @@ make NIMFLAGS="-d:release"
- you can freely combine those variables on the `make` command line:
```bash
make -j8 NIMFLAGS="-d:release" USE_MULTITAIL=yes eth2_network_simulation
make -j$(nproc) NIMFLAGS="-d:release" USE_MULTITAIL=yes eth2_network_simulation
```
## State transition simulation
The state transition simulator can quickly run the Beacon chain state transition function in isolation and output JSON snapshots of the state. The simulation runs without networking and blocks are processed without slot time delays.
```bash
# build and run the state simulator, then display its help ("-d:release" speeds it
# up substantially, allowing the simulation of longer runs in reasonable time)
make NIMFLAGS="-d:release" state_sim
build/state_sim --help
```
## Testnet
The beacon chain now has a public testnet available. Connect to it with:
```bash
make testnet0
scripts/testnet0.sh # this launches the testnet0-specific node you just built
```
For more information about the testnet and to find out how to launch your own, see [this announcement](https://our.status.im/the-nimbus-mvp-testnet-is-here/) and the [official docs on launching the testnets](https://nimbus.status.im/docs/t0.html).
## Convention
Ethereum Foundation uses:
- snake_case for fields and procedure names
- MACRO_CASE for constants
- PascalCase for types
Nim NEP-1 recommends:
- camelCase for fields and procedure names
- PascalCase for constants
- PascalCase for types
To facilitate collaboration and comparison, nim-beacon-chain uses the Ethereum Foundation convention.
## License
Licensed and distributed under either of

View File

@ -14,13 +14,13 @@ jobs:
- task: CacheBeta@1
displayName: 'cache Nim binaries'
inputs:
key: NimBinaries | $(Agent.OS) | $(PLATFORM) | $(Build.SourceBranchName)
key: NimBinaries | $(Agent.OS) | $(PLATFORM) | "$(Build.SourceBranchName)"
path: NimBinaries
- task: CacheBeta@1
displayName: 'cache p2pd binaries'
inputs:
key: p2pdCache | $(Agent.OS) | $(PLATFORM) | $(Build.SourceBranchName)
key: p2pdCache | $(Agent.OS) | $(PLATFORM) | "$(Build.SourceBranchName)"
path: p2pdCache
- task: CacheBeta@1

View File

@ -19,6 +19,7 @@ const
dataDirValidators = "validators"
genesisFile = "genesis.ssz"
hasPrompt = not defined(withoutPrompt)
maxEmptySlotCount = uint64(24*60*60) div SECONDS_PER_SLOT
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
declareGauge beacon_slot,
@ -145,9 +146,9 @@ proc commitGenesisState(node: BeaconNode, tailState: BeaconState) =
let tailBlock = get_initial_beacon_block(tailState)
BlockPool.preInit(node.db, tailState, tailBlock)
except:
except CatchableError as e:
stderr.write "Failed to initialize database\n"
stderr.write getCurrentExceptionMsg(), "\n"
stderr.write e.msg, "\n"
quit 1
proc addBootstrapNode(node: BeaconNode, bootstrapNode: BootstrapAddr) =
@ -287,7 +288,12 @@ proc addLocalValidators(node: BeaconNode, state: BeaconState) =
for kind, file in walkDir(node.config.localValidatorsDir):
if kind in {pcFile, pcLinkToFile}:
node.addLocalValidator state, ValidatorPrivKey.init(readFile(file).string)
if cmpIgnoreCase(".privkey", splitFile(file).ext) == 0:
try:
let keyText = ValidatorPrivKey.init(readFile(file).string)
node.addLocalValidator state, keyText
except CatchableError:
warn "Failed to load a validator private key", file
info "Local validators attached ", count = node.attachedValidators.count
@ -297,6 +303,33 @@ func getAttachedValidator(node: BeaconNode,
let validatorKey = state.validators[idx].pubkey
node.attachedValidators.getValidator(validatorKey)
proc isSynced(node: BeaconNode, head: BlockRef): bool =
## TODO This function is here as a placeholder for some better heurestics to
## determine if we're in sync and should be producing blocks and
## attestations. Generally, the problem is that slot time keeps advancing
## even when there are no blocks being produced, so there's no way to
## distinguish validators geniunely going missing from the node not being
## well connected (during a network split or an internet outage for
## example). It would generally be correct to simply keep running as if
## we were the only legit node left alive, but then we run into issues:
## with enough many empty slots, the validator pool is emptied leading
## to empty committees and lots of empty slot processing that will be
## thrown away as soon as we're synced again.
let
# The slot we should be at, according to the clock
beaconTime = node.beaconClock.now()
wallSlot = beaconTime.toSlot()
# TODO if everyone follows this logic, the network will not recover from a
# halt: nobody will be producing blocks because everone expects someone
# else to do it
if wallSlot.afterGenesis and (wallSlot.slot > head.slot) and
(wallSlot.slot - head.slot) > maxEmptySlotCount:
false
else:
true
proc updateHead(node: BeaconNode, slot: Slot): BlockRef =
# Use head state for attestation resolution below
@ -484,11 +517,16 @@ proc onAttestation(node: BeaconNode, attestation: Attestation) =
# though - maybe we should use the state from the block pointed to by
# the attestation for some of the check? Consider interop with block
# production!
let
bs = BlockSlot(blck: head.blck, slot: wallSlot.slot)
if attestation.data.slot > head.blck.slot and
(attestation.data.slot - head.blck.slot) > maxEmptySlotCount:
warn "Ignoring attestation, head block too old (out of sync?)",
attestationSlot = attestation.data.slot, headSlot = head.blck.slot
else:
let
bs = BlockSlot(blck: head.blck, slot: wallSlot.slot)
node.blockPool.withState(node.stateCache, bs):
node.attestationPool.add(state, attestedBlock, attestation)
node.blockPool.withState(node.stateCache, bs):
node.attestationPool.add(state, attestedBlock, attestation)
else:
node.attestationPool.addUnresolved(attestation)
@ -697,65 +735,67 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
# disappear naturally - risky because user is not aware,
# and might lose stake on canonical chain but "just works"
# when reconnected..
# Right now, we keep going
if not node.isSynced(head):
warn "Node out of sync, skipping block and attestation production for this slot",
slot, headSlot = head.slot
else:
var curSlot = lastSlot + 1
while curSlot < slot:
# Timers may be delayed because we're busy processing, and we might have
# more work to do. We'll try to do so in an expedited way.
# TODO maybe even collect all work synchronously to avoid unnecessary
# state rewinds while waiting for async operations like validator
# signature..
notice "Catching up",
curSlot = shortLog(curSlot),
lastSlot = shortLog(lastSlot),
slot = shortLog(slot),
cat = "overload"
var curSlot = lastSlot + 1
while curSlot < slot:
# Timers may be delayed because we're busy processing, and we might have
# more work to do. We'll try to do so in an expedited way.
# TODO maybe even collect all work synchronously to avoid unnecessary
# state rewinds while waiting for async operations like validator
# signature..
notice "Catching up",
curSlot = shortLog(curSlot),
lastSlot = shortLog(lastSlot),
slot = shortLog(slot),
cat = "overload"
# For every slot we're catching up, we'll propose then send
# attestations - head should normally be advancing along the same branch
# in this case
# TODO what if we receive blocks / attestations while doing this work?
head = await handleProposal(node, head, curSlot)
# For every slot we're catching up, we'll propose then send
# attestations - head should normally be advancing along the same branch
# in this case
# TODO what if we receive blocks / attestations while doing this work?
head = await handleProposal(node, head, curSlot)
# For each slot we missed, we need to send out attestations - if we were
# proposing during this time, we'll use the newly proposed head, else just
# keep reusing the same - the attestation that goes out will actually
# rewind the state to what it looked like at the time of that slot
# TODO smells like there's an optimization opportunity here
handleAttestations(node, head, curSlot)
# For each slot we missed, we need to send out attestations - if we were
# proposing during this time, we'll use the newly proposed head, else just
# keep reusing the same - the attestation that goes out will actually
# rewind the state to what it looked like at the time of that slot
# TODO smells like there's an optimization opportunity here
handleAttestations(node, head, curSlot)
curSlot += 1
curSlot += 1
head = await handleProposal(node, head, slot)
head = await handleProposal(node, head, slot)
# We've been doing lots of work up until now which took time. Normally, we
# send out attestations at the slot mid-point, so we go back to the clock
# to see how much time we need to wait.
# TODO the beacon clock might jump here also. It's probably easier to complete
# the work for the whole slot using a monotonic clock instead, then deal
# with any clock discrepancies once only, at the start of slot timer
# processing..
let
attestationStart = node.beaconClock.fromNow(slot)
halfSlot = seconds(int64(SECONDS_PER_SLOT div 2))
# We've been doing lots of work up until now which took time. Normally, we
# send out attestations at the slot mid-point, so we go back to the clock
# to see how much time we need to wait.
# TODO the beacon clock might jump here also. It's probably easier to complete
# the work for the whole slot using a monotonic clock instead, then deal
# with any clock discrepancies once only, at the start of slot timer
# processing..
let
attestationStart = node.beaconClock.fromNow(slot)
halfSlot = seconds(int64(SECONDS_PER_SLOT div 2))
if attestationStart.inFuture or attestationStart.offset <= halfSlot:
let fromNow =
if attestationStart.inFuture: attestationStart.offset + halfSlot
else: halfSlot - attestationStart.offset
if attestationStart.inFuture or attestationStart.offset <= halfSlot:
let fromNow =
if attestationStart.inFuture: attestationStart.offset + halfSlot
else: halfSlot - attestationStart.offset
trace "Waiting to send attestations",
slot = shortLog(slot),
fromNow = shortLog(fromNow),
cat = "scheduling"
trace "Waiting to send attestations",
slot = shortLog(slot),
fromNow = shortLog(fromNow),
cat = "scheduling"
await sleepAsync(fromNow)
await sleepAsync(fromNow)
# Time passed - we might need to select a new head in that case
head = node.updateHead(slot)
# Time passed - we might need to select a new head in that case
head = node.updateHead(slot)
handleAttestations(node, head, slot)
handleAttestations(node, head, slot)
# TODO ... and beacon clock might jump here also. sigh.
let
@ -764,12 +804,28 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
addTimer(nextSlotStart) do (p: pointer):
asyncCheck node.onSlotStart(slot, nextSlot)
proc onSecond(node: BeaconNode, moment: Moment) {.async.} =
proc handleMissingBlocks(node: BeaconNode) =
let missingBlocks = node.blockPool.checkMissing()
if missingBlocks.len > 0:
var left = missingBlocks.len
info "Requesting detected missing blocks", missingBlocks
node.requestManager.fetchAncestorBlocks(missingBlocks) do (b: BeaconBlock):
onBeaconBlock(node ,b)
onBeaconBlock(node, b)
# TODO instead of waiting for a full second to try the next missing block
# fetching, we'll do it here again in case we get all blocks we asked
# for (there might be new parents to fetch). of course, this is not
# good because the onSecond fetching also kicks in regardless but
# whatever - this is just a quick fix for making the testnet easier
# work with while the sync problem is dealt with more systematically
dec left
if left == 0:
addTimer(Moment.now()) do (p: pointer):
handleMissingBlocks(node)
proc onSecond(node: BeaconNode, moment: Moment) {.async.} =
node.handleMissingBlocks()
let nextSecond = max(Moment.now(), moment + chronos.seconds(1))
addTimer(nextSecond) do (p: pointer):
@ -1087,6 +1143,10 @@ when isMainModule:
firstIdx = config.totalQuickstartDeposits)
if config.depositWeb3Url.len > 0 and config.depositContractAddress.len > 0:
info "Sending deposits",
web3 = config.depositWeb3Url,
depositContract = config.depositContractAddress
waitFor sendDeposits(
quickstartDeposits & randomDeposits,
config.depositWeb3Url,

View File

@ -2,6 +2,7 @@ import
deques, tables, options,
stew/[endians2], chronicles,
spec/[datatypes, crypto, digest],
nimcrypto/utils,
beacon_chain_db
type
@ -135,6 +136,8 @@ type
heads*: seq[Head]
inAdd*: bool
MissingBlock* = object
slots*: uint64 # number of slots that are suspected missing
tries*: int
@ -169,15 +172,14 @@ type
## blck.state_root == rdata.root
BlockSlot* = object
## Unique identifier for a particular fork in the block chain - normally,
## there's a block for every slot, but in the case a block is not produced,
## the chain progresses anyway, producing a new state for every slot.
#
# TODO: Isn't this type unnecessary?
# The `BlockRef` stored here already includes the `slot` number as well.
# We should either remove it or write a comment clarifying why it exists.
## Unique identifier for a particular fork and time in the block chain -
## normally, there's a block for every slot, but in the case a block is not
## produced, the chain progresses anyway, producing a new state for every
## slot.
blck*: BlockRef
slot*: Slot
slot*: Slot ##\
## Slot time for this BlockSlot which may differ from blck.slot when time
## has advanced without blocks
Head* = object
blck*: BlockRef
@ -213,8 +215,9 @@ type
proc shortLog*(v: AttachedValidator): string = shortLog(v.pubKey)
chronicles.formatIt BlockSlot:
($it.blck.root)[0..7] & ":" & $it.slot
mixin toHex
it.blck.root.data[0..3].toHex(true) & ":" & $it.slot
chronicles.formatIt BlockRef:
($it.root)[0..7] & ":" & $it.slot
mixin toHex
it.root.data[0..3].toHex(true) & ":" & $it.slot

View File

@ -81,7 +81,7 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
link(newRef, curRef)
curRef = curRef.parent
blocks[curRef.root] = curRef
trace "Populating block pool", key = curRef.root, val = curRef
debug "Populating block pool", key = curRef.root, val = curRef
if latestStateRoot.isNone() and db.containsState(blck.state_root):
latestStateRoot = some(blck.state_root)
@ -169,7 +169,7 @@ proc addResolvedBlock(
link(parent, blockRef)
pool.blocks[blockRoot] = blockRef
trace "Populating block pool", key = blockRoot, val = blockRef
debug "Populating block pool", key = blockRoot, val = blockRef
pool.addSlotMapping(blockRef)
@ -214,10 +214,21 @@ proc addResolvedBlock(
# unresolved blocks magically become resolved
# TODO there are more efficient ways of doing this that don't risk
# running out of stack etc
let retries = pool.pending
for k, v in retries:
discard pool.add(state, k, v)
# TODO This code is convoluted because when there are more than ~1.5k
# blocks being synced, there's a stack overflow as `add` gets called
# for the whole chain of blocks. Instead we use this ugly field in `pool`
# which could be avoided by refactoring the code
if not pool.inAdd:
pool.inAdd = true
defer: pool.inAdd = false
var keepGoing = true
while keepGoing:
let retries = pool.pending
for k, v in retries:
discard pool.add(state, k, v)
# Keep going for as long as the pending pool is shrinking
# TODO inefficient! so what?
keepGoing = pool.pending.len < retries.len
blockRef
proc add*(
@ -352,20 +363,26 @@ proc getBlockRange*(pool: BlockPool, headBlock: Eth2Digest,
##
result = output.len
trace "getBlockRange entered", headBlock, startSlot, skipStep
var b = pool.getRef(headBlock)
if b == nil:
trace "head block not found", headBlock
return
trace "head block found", headBlock = b
if b.slot < startSlot:
trace "head block is older than startSlot", headBlockSlot = b.slot, startSlot
return
template skip(n: int) =
for i in 0 ..< n:
let targetSlot = b.slot - Slot(n)
while b.slot > targetSlot:
if b.parent == nil:
trace "stopping at parentless block", slot = b.slot, root = b.root
return
trace "skipping block", nextBlock = b.parent
b = b.parent
# We must compute the last block that is eligible for inclusion
@ -382,10 +399,11 @@ proc getBlockRange*(pool: BlockPool, headBlock: Eth2Digest,
blocksToSkip += (alignedHeadSlot - lastWantedSlot)
# Finally, we skip the computed number of blocks
trace "aligning head", blocksToSkip
skip blocksToSkip
# From here, we can just write out the requested block range:
while b != nil and result > 0:
while b != nil and b.slot >= startSlot and result > 0:
dec result
output[result] = b
trace "getBlockRange result", position = result, blockSlot = b.slot
@ -597,12 +615,22 @@ proc loadTailState*(pool: BlockPool): StateData =
)
func isAncestorOf*(a, b: BlockRef): bool =
if a == b:
true
elif a.slot >= b.slot or b.parent.isNil:
false
else:
a.isAncestorOf(b.parent)
var b = b
var depth = 0
const maxDepth = (100'i64 * 365 * 24 * 60 * 60 div SECONDS_PER_SLOT.int)
while true:
if a == b: return true
# for now, use an assert for block chain length since a chain this long
# indicates a circular reference here..
doAssert depth < maxDepth
depth += 1
if a.slot >= b.slot or b.parent.isNil:
return false
doAssert b.slot > b.parent.slot
b = b.parent
proc delBlockAndState(pool: BlockPool, blockRoot: Eth2Digest) =
if (let blk = pool.db.getBlock(blockRoot); blk.isSome):

View File

@ -250,9 +250,9 @@ proc run(conf: InspectorConf) {.async.} =
info InspectorIdent & " started", peerID = getPeerId(identity.peer, conf),
bound = identity.addresses,
options = flags
except:
except CatchableError as e:
error "Could not initialize p2pd daemon",
exception = getCurrentExceptionMsg()
exception = e.msg
quit(1)
try:
@ -265,8 +265,8 @@ proc run(conf: InspectorConf) {.async.} =
let t = await api.pubsubSubscribe(filter, pubsubLogger)
info "Subscribed to custom topic", topic = filter
subs.add((ticket: t, future: t.transp.join()))
except:
error "Could not subscribe to topics", exception = getCurrentExceptionMsg()
except CatchableError as e:
error "Could not subscribe to topics", exception = e.msg
quit(1)
# Starting DHT resolver task

View File

@ -147,7 +147,7 @@ proc disconnectAndRaise(peer: Peer,
template reraiseAsPeerDisconnected(peer: Peer, errMsgExpr: static string,
reason = FaultOrError): auto =
const errMsg = errMsgExpr
debug errMsg, err = getCurrentExceptionMsg()
debug errMsg
disconnectAndRaise(peer, reason, errMsg)
proc registerProtocol(protocol: ProtocolInfo) =

View File

@ -147,7 +147,7 @@ proc disconnectAndRaise(peer: Peer,
template reraiseAsPeerDisconnected(peer: Peer, errMsgExpr: static string,
reason = FaultOrError): auto =
const errMsg = errMsgExpr
debug errMsg, err = getCurrentExceptionMsg()
debug errMsg
disconnectAndRaise(peer, reason, errMsg)
proc registerProtocol(protocol: ProtocolInfo) =

477
beacon_chain/peer_pool.nim Normal file
View File

@ -0,0 +1,477 @@
import tables, heapqueue
import chronos
type
PeerType* = enum
None, Incoming, Outgoing
PeerFlags = enum
Acquired, DeleteOnRelease
PeerItem[T] = object
data: T
peerType: PeerType
flags: set[PeerFlags]
index: int
PeerIndex = object
data: int
cmp: proc(a, b: PeerIndex): bool {.closure, gcsafe.}
PeerPool*[A, B] = ref object
incNeEvent: AsyncEvent
outNeEvent: AsyncEvent
incQueue: HeapQueue[PeerIndex]
outQueue: HeapQueue[PeerIndex]
registry: Table[B, PeerIndex]
storage: seq[PeerItem[A]]
cmp: proc(a, b: PeerIndex): bool {.closure, gcsafe.}
maxPeersCount: int
maxIncPeersCount: int
maxOutPeersCount: int
curIncPeersCount: int
curOutPeersCount: int
acqIncPeersCount: int
acqOutPeersCount: int
proc `<`*(a, b: PeerIndex): bool =
result = a.cmp(b, a)
proc fireEvent[A, B](pool: PeerPool[A, B], item: PeerItem[A]) {.inline.} =
if item.peerType == PeerType.Incoming:
pool.incNeEvent.fire()
elif item.peerType == PeerType.Outgoing:
pool.outNeEvent.fire()
proc waitEvent[A, B](pool: PeerPool[A, B],
filter: set[PeerType]) {.async.} =
if filter == {PeerType.Incoming, PeerType.Outgoing} or filter == {}:
var fut1 = pool.incNeEvent.wait()
var fut2 = pool.outNeEvent.wait()
try:
discard await one(fut1, fut2)
if fut1.finished:
if not(fut2.finished):
fut2.cancel()
pool.incNeEvent.clear()
else:
if not(fut1.finished):
fut1.cancel()
pool.outNeEvent.clear()
except CancelledError:
if not(fut1.finished):
fut1.cancel()
if not(fut2.finished):
fut2.cancel()
raise
elif PeerType.Incoming in filter:
await pool.incNeEvent.wait()
pool.incNeEvent.clear()
elif PeerType.Outgoing in filter:
await pool.outNeEvent.wait()
pool.outNeEvent.clear()
template getItem[A, B](pool: PeerPool[A, B],
filter: set[PeerType]): ptr PeerItem[A] =
doAssert((len(pool.outQueue) > 0) or (len(pool.incQueue) > 0))
var pindex: int
if filter == {PeerType.Incoming, PeerType.Outgoing}:
if len(pool.outQueue) > 0 and len(pool.incQueue) > 0:
# Don't think `<` is actually `<` here.
if pool.incQueue[0] < pool.outQueue[0]:
inc(pool.acqIncPeersCount)
pindex = pool.incQueue.pop().data
else:
inc(pool.acqOutPeersCount)
pindex = pool.outQueue.pop().data
else:
if len(pool.outQueue) > 0:
inc(pool.acqOutPeersCount)
pindex = pool.outQueue.pop().data
else:
inc(pool.acqIncPeersCount)
pindex = pool.incQueue.pop().data
else:
if PeerType.Outgoing in filter:
inc(pool.acqOutPeersCount)
pindex = pool.outQueue.pop().data
elif PeerType.Incoming in filter:
inc(pool.acqIncPeersCount)
pindex = pool.incQueue.pop().data
addr(pool.storage[pindex])
proc newPeerPool*[A, B](maxPeers = -1,
maxIncomingPeers = -1,
maxOutgoingPeers = -1): PeerPool[A, B] =
## Create new PeerPool.
##
## ``maxPeers`` - maximum number of peers allowed. All the peers which
## exceeds this number will be rejected (``addPeer()`` procedure will return
## ``false``). By default this number is infinite.
##
## ``maxIncomingPeers`` - maximum number of incoming peers allowed. All the
## incoming peers exceeds this number will be rejected. By default this
## number is infinite.
##
## ``maxOutgoingPeers`` - maximum number of outgoing peers allowed. All the
## outgoing peers exceeds this number will be rejected. By default this
## number if infinite.
##
## Please note, that if ``maxPeers`` is positive non-zero value, then equation
## ``maxPeers >= maxIncomingPeers + maxOutgoingPeers`` must be ``true``.
var res = PeerPool[A, B]()
if maxPeers != -1:
doAssert(maxPeers >= maxIncomingPeers + maxOutgoingPeers)
res.maxPeersCount = if maxPeers < 0: high(int)
else: maxPeers
res.maxIncPeersCount = if maxIncomingPeers < 0: high(int)
else: maxIncomingPeers
res.maxOutPeersCount = if maxOutgoingPeers < 0: high(int)
else: maxOutgoingPeers
res.incNeEvent = newAsyncEvent()
res.outNeEvent = newAsyncEvent()
res.incQueue = initHeapQueue[PeerIndex]()
res.outQueue = initHeapQueue[PeerIndex]()
res.registry = initTable[B, PeerIndex]()
res.storage = newSeq[PeerItem[A]]()
proc peerCmp(a, b: PeerIndex): bool {.closure, gcsafe.} =
let p1 = res.storage[a.data].data
let p2 = res.storage[b.data].data
result = p1 < p2
res.cmp = peerCmp
result = res
proc len*[A, B](pool: PeerPool[A, B]): int =
## Returns number of registered peers in PeerPool ``pool``. This number
## includes all the peers (acquired and available).
result = len(pool.registry)
proc lenAvailable*[A, B](pool: PeerPool[A, B],
filter = {PeerType.Incoming,
PeerType.Outgoing}): int {.inline.} =
## Returns number of available peers in PeerPool ``pool`` which satisfies
## filter ``filter``.
if PeerType.Incoming in filter:
result = result + len(pool.incQueue)
if PeerType.Outgoing in filter:
result = result + len(pool.outQueue)
proc lenAcquired*[A, B](pool: PeerPool[A, B],
filter = {PeerType.Incoming,
PeerType.Outgoing}): int {.inline.} =
## Returns number of acquired peers in PeerPool ``pool`` which satisifies
## filter ``filter``.
if PeerType.Incoming in filter:
result = result + pool.acqIncPeersCount
if PeerType.Outgoing in filter:
result = result + pool.acqOutPeersCount
proc deletePeer*[A, B](pool: PeerPool[A, B], peer: A, force = false): bool =
## Remove ``peer`` from PeerPool ``pool``.
##
## Deletion occurs immediately only if peer is available, otherwise it will
## be deleted only when peer will be released. You can change this behavior
## with ``force`` option.
mixin getKey
var key = getKey(peer)
if pool.registry.hasKey(key):
let pindex = pool.registry[key].data
var item = addr(pool.storage[pindex])
if (PeerFlags.Acquired in item[].flags):
if not(force):
item[].flags.incl(PeerFlags.DeleteOnRelease)
else:
if item[].peerType == PeerType.Incoming:
dec(pool.curIncPeersCount)
dec(pool.acqIncPeersCount)
elif item[].peerType == PeerType.Outgoing:
dec(pool.curOutPeersCount)
dec(pool.acqOutPeersCount)
# Cleanup storage with default item, and removing key from hashtable.
pool.storage[pindex] = PeerItem[A]()
pool.registry.del(key)
else:
if item[].peerType == PeerType.Incoming:
# If peer is available, then its copy present in heapqueue, so we need
# to remove it.
for i in 0 ..< len(pool.incQueue):
if pool.incQueue[i].data == pindex:
pool.incQueue.del(i)
break
dec(pool.curIncPeersCount)
elif item[].peerType == PeerType.Outgoing:
# If peer is available, then its copy present in heapqueue, so we need
# to remove it.
for i in 0 ..< len(pool.outQueue):
if pool.outQueue[i].data == pindex:
pool.outQueue.del(i)
break
dec(pool.curOutPeersCount)
# Cleanup storage with default item, and removing key from hashtable.
pool.storage[pindex] = PeerItem[A]()
pool.registry.del(key)
result = true
proc addPeer*[A, B](pool: PeerPool[A, B], peer: A, peerType: PeerType): bool =
## Add peer ``peer`` of type ``peerType`` to PeerPool ``pool``.
##
## Returns ``true`` on success.
mixin getKey, getFuture
if len(pool.registry) >= pool.maxPeersCount:
return false
var item = PeerItem[A](data: peer, peerType: peerType,
index: len(pool.storage))
var key = getKey(peer)
if not(pool.registry.hasKey(key)):
pool.storage.add(item)
var pitem = addr(pool.storage[^1])
let pindex = PeerIndex(data: item.index, cmp: pool.cmp)
pool.registry[key] = pindex
proc onPeerClosed(udata: pointer) {.gcsafe.} =
discard pool.deletePeer(peer)
pitem[].data.getFuture().addCallback(onPeerClosed)
if peerType == PeerType.Incoming:
if pool.curIncPeersCount >= pool.maxIncPeersCount:
return false
else:
inc(pool.curIncPeersCount)
pool.incQueue.push(pindex)
pool.incNeEvent.fire()
elif peerType == PeerType.Outgoing:
if pool.curOutPeersCount >= pool.maxOutPeersCount:
return false
else:
inc(pool.curOutPeersCount)
pool.outQueue.push(pindex)
pool.outNeEvent.fire()
result = true
proc addIncomingPeer*[A, B](pool: PeerPool[A, B], peer: A): bool {.inline.} =
## Add incoming peer ``peer`` to PeerPool ``pool``.
##
## Returns ``true`` on success.
result = pool.addPeer(peer, PeerType.Incoming)
proc addOutgoingPeer*[A, B](pool: PeerPool[A, B], peer: A): bool {.inline.} =
## Add outgoing peer ``peer`` to PeerPool ``pool``.
##
## Returns ``true`` on success.
result = pool.addPeer(peer, PeerType.Outgoing)
proc acquire*[A, B](pool: PeerPool[A, B],
filter = {PeerType.Incoming,
PeerType.Outgoing}): Future[A] {.async.} =
## Acquire peer from PeerPool ``pool``, which match the filter ``filter``.
doAssert(filter != {}, "Filter must not be empty")
while true:
var count = 0
if PeerType.Incoming in filter:
count = count + len(pool.incQueue)
if PeerType.Outgoing in filter:
count = count + len(pool.outQueue)
if count == 0:
await pool.waitEvent(filter)
else:
var item = pool.getItem(filter)
doAssert(PeerFlags.Acquired notin item[].flags)
item[].flags.incl(PeerFlags.Acquired)
result = item[].data
break
proc release*[A, B](pool: PeerPool[A, B], peer: A) =
## Release peer ``peer`` back to PeerPool ``pool``
mixin getKey
var key = getKey(peer)
var titem = pool.registry.getOrDefault(key, PeerIndex(data: -1))
if titem.data >= 0:
let pindex = titem.data
var item = addr(pool.storage[pindex])
if PeerFlags.Acquired in item[].flags:
item[].flags.excl(PeerFlags.Acquired)
if PeerFlags.DeleteOnRelease in item[].flags:
if item[].peerType == PeerType.Incoming:
dec(pool.curIncPeersCount)
dec(pool.acqIncPeersCount)
elif item[].peerType == PeerType.Outgoing:
dec(pool.curOutPeersCount)
dec(pool.acqOutPeersCount)
pool.storage[pindex] = PeerItem[A]()
pool.registry.del(key)
else:
if item[].peerType == PeerType.Incoming:
pool.incQueue.push(titem)
dec(pool.acqIncPeersCount)
elif item[].peerType == PeerType.Outgoing:
pool.outQueue.push(titem)
dec(pool.acqOutPeersCount)
pool.fireEvent(item[])
proc release*[A, B](pool: PeerPool[A, B], peers: openarray[A]) {.inline.} =
## Release array of peers ``peers`` back to PeerPool ``pool``.
for item in peers:
pool.release(item)
proc acquire*[A, B](pool: PeerPool[A, B],
number: int,
filter = {PeerType.Incoming,
PeerType.Outgoing}): Future[seq[A]] {.async.} =
## Acquire ``number`` number of peers from PeerPool ``pool``, which match the
## filter ``filter``.
doAssert(filter != {}, "Filter must not be empty")
var peers = newSeq[A]()
try:
if number > 0:
while true:
if len(peers) >= number:
break
var count = 0
if PeerType.Incoming in filter:
count = count + len(pool.incQueue)
if PeerType.Outgoing in filter:
count = count + len(pool.outQueue)
if count == 0:
await pool.waitEvent(filter)
else:
var item = pool.getItem(filter)
doAssert(PeerFlags.Acquired notin item[].flags)
item[].flags.incl(PeerFlags.Acquired)
peers.add(item[].data)
except CancelledError:
# If we got cancelled, we need to return all the acquired peers back to
# pool.
for item in peers:
pool.release(item)
peers.setLen(0)
raise
result = peers
proc acquireIncomingPeer*[A, B](pool: PeerPool[A, B]): Future[A] {.inline.} =
## Acquire single incoming peer from PeerPool ``pool``.
pool.acquire({PeerType.Incoming})
proc acquireOutgoingPeer*[A, B](pool: PeerPool[A, B]): Future[A] {.inline.} =
## Acquire single outgoing peer from PeerPool ``pool``.
pool.acquire({PeerType.Outgoing})
proc acquireIncomingPeers*[A, B](pool: PeerPool[A, B],
number: int): Future[seq[A]] {.inline.} =
## Acquire ``number`` number of incoming peers from PeerPool ``pool``.
pool.acquire(number, {PeerType.Incoming})
proc acquireOutgoingPeers*[A, B](pool: PeerPool[A, B],
number: int): Future[seq[A]] {.inline.} =
## Acquire ``number`` number of outgoing peers from PeerPool ``pool``.
pool.acquire(number, {PeerType.Outgoing})
iterator peers*[A, B](pool: PeerPool[A, B],
filter = {PeerType.Incoming,
PeerType.Outgoing}): A =
## Iterate over sorted list of peers.
##
## All peers will be sorted by equation `>`(Peer1, Peer2), so biggest values
## will be first.
var sorted = initHeapQueue[PeerIndex]()
for i in 0 ..< len(pool.storage):
if pool.storage[i].peerType in filter:
sorted.push(PeerIndex(data: i, cmp: pool.cmp))
while len(sorted) > 0:
let pindex = sorted.pop().data
yield pool.storage[pindex].data
iterator availablePeers*[A, B](pool: PeerPool[A, B],
filter = {PeerType.Incoming,
PeerType.Outgoing}): A =
## Iterate over sorted list of available peers.
##
## All peers will be sorted by equation `>`(Peer1, Peer2), so biggest values
## will be first.
var sorted = initHeapQueue[PeerIndex]()
for i in 0 ..< len(pool.storage):
if (PeerFlags.Acquired notin pool.storage[i].flags) and
(pool.storage[i].peerType in filter):
sorted.push(PeerIndex(data: i, cmp: pool.cmp))
while len(sorted) > 0:
let pindex = sorted.pop().data
yield pool.storage[pindex].data
iterator acquiredPeers*[A, B](pool: PeerPool[A, B],
filter = {PeerType.Incoming,
PeerType.Outgoing}): A =
## Iterate over sorted list of acquired (non-available) peers.
##
## All peers will be sorted by equation `>`(Peer1, Peer2), so biggest values
## will be first.
var sorted = initHeapQueue[PeerIndex]()
for i in 0 ..< len(pool.storage):
if (PeerFlags.Acquired in pool.storage[i].flags) and
(pool.storage[i].peerType in filter):
sorted.push(PeerIndex(data: i, cmp: pool.cmp))
while len(sorted) > 0:
let pindex = sorted.pop().data
yield pool.storage[pindex].data
proc `[]`*[A, B](pool: PeerPool[A, B], key: B): A {.inline.} =
## Retrieve peer with key ``key`` from PeerPool ``pool``.
let pindex = pool.registry[key]
result = pool.storage[pindex.data]
proc `[]`*[A, B](pool: var PeerPool[A, B], key: B): var A {.inline.} =
## Retrieve peer with key ``key`` from PeerPool ``pool``.
let pindex = pool.registry[key]
result = pool.storage[pindex.data].data
proc hasPeer*[A, B](pool: PeerPool[A, B], key: B): bool {.inline.} =
## Returns ``true`` if peer with ``key`` present in PeerPool ``pool``.
result = pool.registry.hasKey(key)
proc getOrDefault*[A, B](pool: PeerPool[A, B], key: B): A {.inline.} =
## Retrieves the peer from PeerPool ``pool`` using key ``key``. If peer is
## not present, default initialization value for type ``A`` is returned
## (e.g. 0 for any integer type).
let pindex = pool.registry.getOrDefault(key, PeerIndex(data: -1))
if pindex.data >= 0:
result = pool.storage[pindex.data].data
proc getOrDefault*[A, B](pool: PeerPool[A, B], key: B,
default: A): A {.inline.} =
## Retrieves the peer from PeerPool ``pool`` using key ``key``. If peer is
## not present, default value ``default`` is returned.
let pindex = pool.registry.getOrDefault(key, PeerIndex(data: -1))
if pindex.data >= 0:
result = pool.storage[pindex.data].data
else:
result = default
proc clear*[A, B](pool: PeerPool[A, B]) =
## Performs PeerPool's ``pool`` storage and counters reset.
pool.incQueue.clear()
pool.outQueue.clear()
pool.registry.clear()
for i in 0 ..< len(pool.storage):
pool.storage[i] = PeerItem[A]()
pool.storage.setLen(0)
pool.curIncPeersCount = 0
pool.curOutPeersCount = 0
pool.acqIncPeersCount = 0
pool.acqOutPeersCount = 0
proc clearSafe*[A, B](pool: PeerPool[A, B]) {.async.} =
## Performs "safe" clear. Safe means that it first acquires all the peers
## in PeerPool, and only after that it will reset storage.
var acquired = newSeq[A]()
while len(pool.registry) > len(acquired):
var peers = await pool.acquire(len(pool.registry) - len(acquired))
for item in peers:
acquired.add(item)
pool.clear()

View File

@ -258,8 +258,8 @@ func is_valid_genesis_state*(state: BeaconState): bool =
return false
return true
# TODO candidate for spec?
# https://github.com/ethereum/eth2.0-specs/blob/0.5.1/specs/core/0_beacon-chain.md#on-genesis
# TODO this is now a non-spec helper function, and it's not really accurate
# so only usable/used in research/ and tests/
func get_initial_beacon_block*(state: BeaconState): BeaconBlock =
BeaconBlock(
slot: GENESIS_SLOT,
@ -300,6 +300,15 @@ proc process_registry_updates*(state: var BeaconState) =
## Process activation eligibility and ejections
## Try to avoid caching here, since this could easily become undefined
# Make visible, e.g.,
# https://github.com/status-im/nim-beacon-chain/pull/608
# https://github.com/sigp/lighthouse/pull/657
let epoch = get_current_epoch(state)
trace "process_registry_updates validator balances",
balances=state.balances,
active_validator_indices=get_active_validator_indices(state, epoch),
epoch=epoch
for index, validator in state.validators:
if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
validator.effective_balance == MAX_EFFECTIVE_BALANCE:

View File

@ -33,10 +33,10 @@ type
chronicles.formatIt Eth2Digest:
mixin toHex
it.data.toHex(true)[0..7]
it.data[0..3].toHex(true)
func shortLog*(x: Eth2Digest): string =
x.data.toHex(true)[0..7]
x.data[0..3].toHex(true)
# TODO: expose an in-place digest function
# when hashing in loop or into a buffer

View File

@ -10,7 +10,7 @@
import
endians, stew/shims/macros, options, algorithm, options,
stew/[bitops2, bitseqs, objects, varints, ptrops], stint,
stew/[bitops2, bitseqs, objects, varints, ptrops, ranges/ptr_arith], stint,
faststreams/input_stream, serialization, serialization/testing/tracing,
nimcrypto/sha2, blscurve, eth/common,
./spec/[crypto, datatypes, digest],
@ -139,7 +139,7 @@ func writeFixedSized(c: var WriteCursor, x: auto) =
c.append x
else:
for elem in x:
trs "WRITING FIXED SIZE ARRAY ELEMENENT"
trs "WRITING FIXED SIZE ARRAY ELEMENT"
c.writeFixedSized toSszType(elem)
elif x is tuple|object:
enumInstanceSerializedFields(x, fieldName, field):
@ -269,9 +269,6 @@ template fromSszBytes*[T; N](_: type TypeWithMaxLen[T, N],
mixin fromSszBytes
fromSszBytes(T, bytes)
func fromSszBytes(T: type BlsCurveType, bytes: openarray[byte]): auto =
init(T, bytes)
proc readValue*(r: var SszReader, val: var auto) =
val = readSszValue(r.stream.readBytes(r.stream.endPos), val.type)
@ -508,7 +505,19 @@ func bitlistHashTreeRoot(merkelizer: SszChunksMerkelizer, x: BitSeq): Eth2Digest
mixInLength contentsHash, x.len
func hashTreeRootImpl[T](x: T): Eth2Digest =
when (T is BasicType) or (when T is array: ElemType(T) is BasicType else: false):
when T is uint64:
trs "UINT64; LITTLE-ENDIAN IDENTITY MAPPING"
when system.cpuEndian == bigEndian:
littleEndian64(addr result.data[0], x.unsafeAddr)
else:
let valueAddr = unsafeAddr x
result.data[0..7] = makeOpenArray(cast[ptr byte](valueAddr), 8)
elif (when T is array: ElemType(T) is byte and
sizeof(T) == sizeof(Eth2Digest) else: false):
# TODO is this sizeof comparison guranteed? it's whole structure vs field
trs "ETH2DIGEST; IDENTITY MAPPING"
Eth2Digest(data: x)
elif (T is BasicType) or (when T is array: ElemType(T) is BasicType else: false):
trs "FIXED TYPE; USE CHUNK STREAM"
merkelizeSerializedChunks x
elif T is string or (when T is (seq|openarray): ElemType(T) is BasicType else: false):

View File

@ -233,5 +233,6 @@ proc handleInitialStatus(peer: Peer,
error "Did not get any blocks from peer. Aborting sync."
break
except CatchableError:
warn "Failed to sync with peer", peer, err = getCurrentExceptionMsg()
except CatchableError as e:
warn "Failed to sync with peer", peer, err = e.msg

View File

@ -52,16 +52,19 @@ proc generateDeposits*(totalValidators: int,
proc sendDeposits*(
deposits: seq[Deposit],
depositWeb3Url, depositContractAddress, privateKey: string) {.async.} =
let
web3 = await newWeb3(depositWeb3Url)
contractAddress = Address.fromHex(depositContractAddress)
eth1Addresses = await web3.provider.eth_accounts()
var web3 = await newWeb3(depositWeb3Url)
if privateKey.len != 0:
web3.privateKey = initPrivateKey(privateKey)
let eth1Addresses = await web3.provider.eth_accounts()
if eth1Addresses.len == 0:
error "Eth1 account rejected"
return
let contractAddress = Address.fromHex(depositContractAddress)
for i, dp in deposits:
web3.defaultAccount = eth1Addresses[i]
web3.defaultAccount = eth1Addresses[0]
let depositContract = web3.contractSender(DepositContract, contractAddress)
discard await depositContract.deposit(
Bytes48(dp.data.pubKey.getBytes()),

5
connect-to-testnet Executable file
View File

@ -0,0 +1,5 @@
#!/bin/bash
cd $(dirname "$0")
./env.sh nim scripts/connect_to_testnet.nims $1

6
connect-to-testnet.cmd Normal file
View File

@ -0,0 +1,6 @@
@echo off
cd /D "%~dp0"
vendor/nimbus-build-system/vendor/Nim/bin/nim scripts/connect_to_testnet.nims %1

View File

@ -26,7 +26,7 @@ RUN cd /root/nim-beacon-chain \
&& git fetch \
&& git reset --hard ${GIT_REVISION} \
&& make -j$(nproc) update \
&& make LOG_LEVEL=DEBUG NIMFLAGS="-d:debug -d:insecure -d:testnet_servers_image ${NETWORK_NIM_FLAGS}" beacon_node
&& make LOG_LEVEL=TRACE NIMFLAGS="-d:debug -d:insecure -d:testnet_servers_image ${NETWORK_NIM_FLAGS}" beacon_node
# --------------------------------- #
# Starting new image to reduce size #

BIN
media/monitoring.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

View File

@ -34,3 +34,5 @@
--debugger:native
@end
-d:nimOldCaseObjects # https://github.com/status-im/nim-confutils/issues/9

View File

@ -57,7 +57,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
validators = SLOTS_PER_EPOCH * 11, # One per shard is minimum
json_interval = SLOTS_PER_EPOCH,
prefix = 0,
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.75,
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.73,
validate = true):
let
flags = if validate: {} else: {skipValidation}
@ -83,6 +83,10 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
else:
write(stdout, ".")
# TODO doAssert against this up-front
# indexed attestation: validator index beyond max validators per committee
# len(indices) <= MAX_VALIDATORS_PER_COMMITTEE
for i in 0..<slots:
maybeWrite()
verifyConsensus(state, attesterRatio)
@ -111,15 +115,13 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
# work for every slot - we'll randomize it deterministically to give
# some variation
let
epoch = compute_epoch_at_slot(state.slot)
target_slot = state.slot + MIN_ATTESTATION_INCLUSION_DELAY - 1
scass = withTimerRet(timers[tShuffle]):
mapIt(
0'u64 .. (get_committee_count_at_slot(state, state.slot) *
SLOTS_PER_EPOCH - 1),
get_beacon_committee(state, epoch.compute_start_slot_at_epoch + (it mod SLOTS_PER_EPOCH),
it div SLOTS_PER_EPOCH, cache))
0'u64 ..< get_committee_count_at_slot(state, target_slot),
get_beacon_committee(state, target_slot, it, cache))
for scas in scass:
for i, scas in scass:
var
attestation: Attestation
first = true
@ -131,11 +133,13 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
if (rand(r, high(int)).float * attesterRatio).int <= high(int):
if first:
attestation =
makeAttestation(state, latest_block_root, v, cache, flags)
makeAttestation(state, latest_block_root, scas, target_slot,
i.uint64, v, cache, flags)
first = false
else:
attestation.combine(
makeAttestation(state, latest_block_root, v, cache, flags),
makeAttestation(state, latest_block_root, scas, target_slot,
i.uint64, v, cache, flags),
flags)
if not first:
@ -145,15 +149,11 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
let target_slot =
attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY - 1
## In principle, should enumerate possible shard/slot combinations by
## inverting get_attestation_data_slot(...), but this works. Could be
## filtering earlier if we know that this attestation's being created
## too late to be useful, as well.
if target_slot > attestations_idx:
var target_slot_attestations =
getOrDefault(attestations, target_slot)
target_slot_attestations.add attestation
attestations[target_slot] = target_slot_attestations
doAssert target_slot > attestations_idx
var target_slot_attestations =
getOrDefault(attestations, target_slot)
target_slot_attestations.add attestation
attestations[target_slot] = target_slot_attestations
flushFile(stdout)

View File

@ -55,6 +55,7 @@ cli do (testnetName {.argument.}: string):
let
dataDirName = testnetName.replace("/", "_")
dataDir = buildDir / "data" / dataDirName
validatorsDir = dataDir / "validators"
beaconNodeBinary = buildDir / "beacon_node_" & dataDirName
nimFlags = "-d:chronicles_log_level=DEBUG " & getEnv("NIM_PARAMS")
@ -73,6 +74,31 @@ cli do (testnetName {.argument.}: string):
cd rootDir
exec &"""nim c {nimFlags} -d:"const_preset={preset}" -o:"{beaconNodeBinary}" beacon_chain/beacon_node.nim"""
if depositContractOpt.len > 0 and not system.dirExists(validatorsDir):
mode = Silent
echo "Would you like to become a validator (you'll need access to 32 GoETH)? [Yn]"
while true:
let answer = readLineFromStdin()
if answer in ["y", "Y", "yes", ""]:
echo "Please enter your Eth1 private key in hex form (e.g. 0x1a2...f3c). Hit Enter to cancel."
let privKey = readLineFromStdin()
if privKey.len > 0:
mkDir validatorsDir
exec replace(&"""{beaconNodeBinary} makeDeposits
--random-deposits=1
--deposits-dir="{validatorsDir}"
--deposit-private-key={privKey}
--web3-url=wss://goerli.infura.io/ws/v3/809a18497dd74102b5f37d25aae3c85a
{depositContractOpt}
""", "\n", " ")
break
elif answer in ["n", "N", "no"]:
break
else:
echo "Please answer 'yes' or 'no'"
mode = Verbose
exec replace(&"""{beaconNodeBinary}
--data-dir="{dataDir}"
--bootstrap-file="{testnetDir/bootstrapFile}"

View File

@ -20,7 +20,8 @@ import # Unit test
./test_state_transition,
./test_sync_protocol,
# ./test_validator # Empty!
./test_zero_signature
./test_zero_signature,
./test_peer_pool
import # Refactor state transition unit tests
./spec_block_processing/test_genesis,

View File

@ -12,14 +12,16 @@ import
# Specs
../../beacon_chain/spec/[datatypes, crypto]
# this is being indexed inside "mock_deposits.nim" by a value up to `validatorCount`
# which is `num_validators` which is `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT`
let MockPrivKeys* = block:
var privkeys: array[SLOTS_PER_EPOCH * 16, ValidatorPrivKey]
var privkeys: array[MIN_GENESIS_ACTIVE_VALIDATOR_COUNT, ValidatorPrivKey]
for pk in privkeys.mitems():
pk = newPrivKey()
privkeys
let MockPubKeys* = block:
var pubkeys: array[SLOTS_PER_EPOCH * 16, ValidatorPubKey]
var pubkeys: array[MIN_GENESIS_ACTIVE_VALIDATOR_COUNT, ValidatorPubKey]
for idx, privkey in MockPrivKeys:
pubkeys[idx] = pubkey(privkey)
pubkeys

View File

@ -20,7 +20,7 @@ cd - &>/dev/null
# When changing these, also update the readme section on running simulation
# so that the run_node example is correct!
NUM_VALIDATORS=${VALIDATORS:-192}
TOTAL_NODES=${NODES:-2}
TOTAL_NODES=${NODES:-4}
TOTAL_USER_NODES=${USER_NODES:-0}
TOTAL_SYSTEM_NODES=$(( TOTAL_NODES - TOTAL_USER_NODES ))
MASTER_NODE=$(( TOTAL_NODES - 1 ))

View File

@ -17,8 +17,6 @@ import
unittest,
# Specs
../../beacon_chain/spec/datatypes,
# Internals
../../beacon_chain/ssz,
# Mock helpers
../mocking/mock_genesis,
../testutil

View File

@ -9,7 +9,6 @@
import
options, sequtils, unittest,
chronicles,
./testutil,
../beacon_chain/spec/[beaconstate, datatypes, digest],
../beacon_chain/[beacon_node_types, block_pool, beacon_chain_db, extras, ssz]
@ -95,3 +94,19 @@ suite "Block pool processing" & preset():
hash_tree_root(state.data.data) == state.data.root
pool2.get(b1Root).isSome()
pool2.get(b2Root).isSome()
test "isAncestorOf sanity" & preset():
let
a = BlockRef(slot: Slot(1))
b = BlockRef(slot: Slot(2), parent: a)
c = BlockRef(slot: Slot(3), parent: b)
check:
a.isAncestorOf(a)
a.isAncestorOf(b)
a.isAncestorOf(c)
b.isAncestorOf(c)
not c.isAncestorOf(a)
not c.isAncestorOf(b)
not b.isAncestorOf(a)

461
tests/test_peer_pool.nim Normal file
View File

@ -0,0 +1,461 @@
# beacon_chain
# Copyright (c) 2019 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.used.}
import
unittest, random, heapqueue, tables, strutils,
chronos,
../beacon_chain/peer_pool
type
PeerTestID* = string
PeerTest* = object
id: PeerTestID
weight: int
future: Future[void]
proc getKey*(peer: PeerTest): PeerTestID =
result = peer.id
proc getFuture*(peer: PeerTest): Future[void] =
result = peer.future
proc `<`*(a, b: PeerTest): bool =
result = `<`(a.weight, b.weight)
proc init*(t: typedesc[PeerTest], id: string = "",
weight: int = 0): PeerTest =
result = PeerTest(id: id, weight: weight, future: newFuture[void]())
proc close*(peer: PeerTest) =
peer.future.complete()
suite "PeerPool testing suite":
test "addPeer() test":
const peersCount = [
[10, 5, 5, 10, 5, 5],
[-1, 5, 5, 10, 5, 5],
[-1, -1, -1, 10, 5, 5]
]
for item in peersCount:
var pool = newPeerPool[PeerTest, PeerTestID](item[0], item[1], item[2])
for i in 0 ..< item[4]:
var peer = PeerTest.init("idInc" & $i)
check pool.addIncomingPeer(peer) == true
for i in 0 ..< item[5]:
var peer = PeerTest.init("idOut" & $i)
check pool.addOutgoingPeer(peer) == true
var peer = PeerTest.init("idCheck")
if item[1] != -1:
for i in 0 ..< item[3]:
check pool.addIncomingPeer(peer) == false
if item[2] != -1:
for i in 0 ..< item[3]:
check pool.addOutgoingPeer(peer) == false
check:
pool.lenAvailable == item[3]
pool.lenAvailable({PeerType.Incoming}) == item[4]
pool.lenAvailable({PeerType.Outgoing}) == item[5]
test "Acquire from empty pool":
var pool0 = newPeerPool[PeerTest, PeerTestID]()
var pool1 = newPeerPool[PeerTest, PeerTestID]()
var pool2 = newPeerPool[PeerTest, PeerTestID]()
var itemFut01 = pool0.acquire({PeerType.Incoming})
var itemFut02 = pool0.acquire({PeerType.Outgoing})
var itemFut03 = pool0.acquire({PeerType.Incoming, PeerType.Outgoing})
var itemFut04 = pool0.acquire()
var itemFut05 = pool0.acquire(5, {PeerType.Incoming})
var itemFut06 = pool0.acquire(5, {PeerType.Outgoing})
var itemFut07 = pool0.acquire(5, {PeerType.Incoming, PeerType.Outgoing})
var itemFut08 = pool0.acquire(5)
check:
itemFut01.finished == false
itemFut02.finished == false
itemFut03.finished == false
itemFut04.finished == false
itemFut05.finished == false
itemFut06.finished == false
itemFut07.finished == false
itemFut08.finished == false
var peer11 = PeerTest.init("peer11")
var peer12 = PeerTest.init("peer12")
var peer21 = PeerTest.init("peer21")
var peer22 = PeerTest.init("peer22")
check:
pool1.addPeer(peer11, PeerType.Incoming) == true
pool1.addPeer(peer12, PeerType.Incoming) == true
pool2.addPeer(peer21, PeerType.Outgoing) == true
pool2.addPeer(peer22, PeerType.Outgoing) == true
var itemFut11 = pool1.acquire({PeerType.Outgoing})
var itemFut12 = pool1.acquire(10, {PeerType.Outgoing})
var itemFut13 = pool1.acquire(3, {PeerType.Incoming})
var itemFut14 = pool1.acquire({PeerType.Incoming})
var itemFut21 = pool2.acquire({PeerType.Incoming})
var itemFut22 = pool2.acquire(10, {PeerType.Incoming})
var itemFut23 = pool2.acquire(3, {PeerType.Outgoing})
var itemFut24 = pool1.acquire({PeerType.Outgoing})
check:
itemFut11.finished == false
itemFut12.finished == false
itemFut13.finished == false
itemFut14.finished == false
itemFut21.finished == false
itemFut22.finished == false
itemFut23.finished == false
itemFut24.finished == false
test "Acquire/Sorting and consistency test":
const
TestsCount = 1000
MaxNumber = 1_000_000
var pool = newPeerPool[PeerTest, PeerTestID]()
proc testAcquireRelease(): Future[int] {.async.} =
var weight: int
var incoming, outgoing, total: seq[PeerTest]
var incWeight1, outWeight1, totWeight1: int
incoming.setLen(0)
for i in 0 ..< pool.lenAvailable({PeerType.Incoming}):
var peer = await pool.acquire({PeerType.Incoming})
incoming.add(peer)
outgoing.setLen(0)
for i in 0 ..< pool.lenAvailable({PeerType.Outgoing}):
var peer = await pool.acquire({PeerType.Outgoing})
outgoing.add(peer)
weight = MaxNumber + 1
incWeight1 = 0
for i in 0 ..< len(incoming):
incWeight1 = incWeight1 + incoming[i].weight
if incoming[i].weight > weight:
raise newException(ValueError, "Incoming items are not sorted")
weight = incoming[i].weight
pool.release(incoming[i])
weight = MaxNumber + 1
outWeight1 = 0
for i in 0..<len(outgoing):
outWeight1 = outWeight1 + outgoing[i].weight
if outgoing[i].weight > weight:
raise newException(ValueError, "Outgoing items are not sorted")
weight = outgoing[i].weight
pool.release(outgoing[i])
for i in 0 ..< pool.lenAvailable():
var peer = await pool.acquire()
total.add(peer)
weight = MaxNumber + 1
totWeight1 = 0
for i in 0 ..< len(total):
totWeight1 = totWeight1 + total[i].weight
if total[i].weight > weight:
raise newException(ValueError, "Outgoing items are not sorted")
weight = total[i].weight
pool.release(total[i])
doAssert(totWeight1 == incWeight1 + outWeight1)
doAssert(len(total) == len(incoming) + len(outgoing))
result = TestsCount
randomize()
for i in 0 ..< TestsCount:
var peer = PeerTest.init("peer" & $i, rand(MaxNumber))
# echo repr peer
if rand(100) mod 2 == 0:
check pool.addPeer(peer, PeerType.Incoming) == true
else:
check pool.addPeer(peer, PeerType.Outgoing) == true
check waitFor(testAcquireRelease()) == TestsCount
test "deletePeer() test":
proc testDeletePeer(): Future[bool] {.async.} =
var pool = newPeerPool[PeerTest, PeerTestID]()
var peer = PeerTest.init("deletePeer")
## Delete available peer
doAssert(pool.addIncomingPeer(peer) == true)
doAssert(pool.len == 1)
doAssert(pool.lenAvailable == 1)
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
doAssert(pool.lenAvailable({PeerType.Incoming}) == 1)
doAssert(pool.deletePeer(peer) == true)
doAssert(pool.len == 0)
doAssert(pool.lenAvailable == 0)
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
doAssert(pool.lenAvailable({PeerType.Incoming}) == 0)
## Delete acquired peer
peer = PeerTest.init("closingPeer")
doAssert(pool.addIncomingPeer(peer) == true)
doAssert(pool.len == 1)
doAssert(pool.lenAvailable == 1)
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
doAssert(pool.lenAvailable({PeerType.Incoming}) == 1)
var apeer = await pool.acquire()
doAssert(pool.deletePeer(peer) == true)
doAssert(pool.len == 1)
doAssert(pool.lenAvailable == 0)
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
doAssert(pool.lenAvailable({PeerType.Incoming}) == 0)
pool.release(apeer)
doAssert(pool.len == 0)
doAssert(pool.lenAvailable == 0)
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
doAssert(pool.lenAvailable({PeerType.Incoming}) == 0)
## Force delete acquired peer
peer = PeerTest.init("closingPeer")
doAssert(pool.addIncomingPeer(peer) == true)
doAssert(pool.len == 1)
doAssert(pool.lenAvailable == 1)
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
doAssert(pool.lenAvailable({PeerType.Incoming}) == 1)
apeer = await pool.acquire()
doAssert(pool.deletePeer(peer, true) == true)
doAssert(pool.len == 0)
doAssert(pool.lenAvailable == 0)
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
doAssert(pool.lenAvailable({PeerType.Incoming}) == 0)
result = true
check waitFor(testDeletePeer()) == true
test "Peer lifetime test":
proc testPeerLifetime(): Future[bool] {.async.} =
var pool = newPeerPool[PeerTest, PeerTestID]()
var peer = PeerTest.init("closingPeer")
## Close available peer
doAssert(pool.addIncomingPeer(peer) == true)
doAssert(pool.len == 1)
doAssert(pool.lenAvailable == 1)
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
doAssert(pool.lenAvailable({PeerType.Incoming}) == 1)
close(peer)
# We need to wait next callback scheduler
await sleepAsync(1.milliseconds)
doAssert(pool.len == 0)
doAssert(pool.lenAvailable == 0)
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
doAssert(pool.lenAvailable({PeerType.Incoming}) == 0)
## Close acquired peer
peer = PeerTest.init("closingPeer")
doAssert(pool.addIncomingPeer(peer) == true)
doAssert(pool.len == 1)
doAssert(pool.lenAvailable == 1)
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
doAssert(pool.lenAvailable({PeerType.Incoming}) == 1)
var apeer = await pool.acquire()
doAssert(pool.len == 1)
doAssert(pool.lenAvailable == 0)
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
doAssert(pool.lenAvailable({PeerType.Incoming}) == 0)
close(peer)
await sleepAsync(1.milliseconds)
doAssert(pool.len == 1)
doAssert(pool.lenAvailable == 0)
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
doAssert(pool.lenAvailable({PeerType.Incoming}) == 0)
pool.release(apeer)
doAssert(pool.len == 0)
doAssert(pool.lenAvailable == 0)
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
doAssert(pool.lenAvailable({PeerType.Incoming}) == 0)
result = true
check waitFor(testPeerLifetime()) == true
test "Safe/Clear test":
var pool = newPeerPool[PeerTest, PeerTestID]()
var peer1 = PeerTest.init("peer1", 10)
var peer2 = PeerTest.init("peer2", 9)
var peer3 = PeerTest.init("peer3", 8)
check:
pool.addPeer(peer1, PeerType.Incoming) == true
pool.addPeer(peer2, PeerType.Incoming) == true
pool.addPeer(peer3, PeerType.Outgoing) == true
pool.lenAvailable == 3
pool.lenAvailable({PeerType.Outgoing}) == 1
pool.lenAvailable({PeerType.Incoming}) == 2
pool.lenAcquired == 0
pool.len == 3
pool.clear()
check:
pool.lenAvailable == 0
pool.lenAvailable({PeerType.Outgoing}) == 0
pool.lenAvailable({PeerType.Incoming}) == 0
pool.lenAcquired == 0
pool.len == 0
check:
pool.addPeer(peer1, PeerType.Incoming) == true
pool.addPeer(peer2, PeerType.Incoming) == true
pool.addPeer(peer3, PeerType.Outgoing) == true
pool.lenAvailable == 3
pool.lenAvailable({PeerType.Outgoing}) == 1
pool.lenAvailable({PeerType.Incoming}) == 2
pool.lenAcquired == 0
pool.len == 3
proc testConsumer() {.async.} =
var p = await pool.acquire()
await sleepAsync(100.milliseconds)
pool.release(p)
proc testClose(): Future[bool] {.async.} =
await pool.clearSafe()
result = true
asyncCheck testConsumer()
check waitFor(testClose()) == true
test "Access peers by key test":
var pool = newPeerPool[PeerTest, PeerTestID]()
var peer1 = PeerTest.init("peer1", 10)
var peer2 = PeerTest.init("peer2", 9)
var peer3 = PeerTest.init("peer3", 8)
check:
pool.addPeer(peer1, PeerType.Incoming) == true
pool.addPeer(peer2, PeerType.Incoming) == true
pool.addPeer(peer3, PeerType.Outgoing) == true
pool.hasPeer("peer4") == false
pool.hasPeer("peer1") == true
pool.hasPeer("peer2") == true
pool.hasPeer("peer3") == true
pool.getOrDefault("peer4").id == ""
pool.getOrDefault("peer4", PeerTest.init("peer5")).id == "peer5"
pool.getOrDefault("peer1").id == "peer1"
pool.getOrDefault("peer1", PeerTest.init("peer5")).id == "peer1"
pool["peer1"].id == "peer1"
pool["peer1"].weight == 10
pool["peer2"].id == "peer2"
pool["peer2"].weight == 9
pool["peer3"].id == "peer3"
pool["peer3"].weight == 8
var ppeer = addr(pool["peer1"])
ppeer[].weight = 100
check pool["peer1"].weight == 100
test "Iterators test":
var pool = newPeerPool[PeerTest, PeerTestID]()
var peer1 = PeerTest.init("peer1", 10)
var peer2 = PeerTest.init("peer2", 9)
var peer3 = PeerTest.init("peer3", 8)
var peer4 = PeerTest.init("peer4", 7)
var peer5 = PeerTest.init("peer5", 6)
var peer6 = PeerTest.init("peer6", 5)
var peer7 = PeerTest.init("peer7", 4)
var peer8 = PeerTest.init("peer8", 3)
var peer9 = PeerTest.init("peer9", 2)
check:
pool.addPeer(peer2, PeerType.Incoming) == true
pool.addPeer(peer3, PeerType.Incoming) == true
pool.addPeer(peer1, PeerType.Incoming) == true
pool.addPeer(peer4, PeerType.Incoming) == true
pool.addPeer(peer5, PeerType.Outgoing) == true
pool.addPeer(peer8, PeerType.Outgoing) == true
pool.addPeer(peer7, PeerType.Outgoing) == true
pool.addPeer(peer6, PeerType.Outgoing) == true
pool.addPeer(peer9, PeerType.Outgoing) == true
var total1, total2, total3: seq[PeerTest]
var avail1, avail2, avail3: seq[PeerTest]
var acqui1, acqui2, acqui3: seq[PeerTest]
for item in pool.peers():
total1.add(item)
for item in pool.peers({PeerType.Incoming}):
total2.add(item)
for item in pool.peers({PeerType.Outgoing}):
total3.add(item)
for item in pool.availablePeers():
avail1.add(item)
for item in pool.availablePeers({PeerType.Incoming}):
avail2.add(item)
for item in pool.availablePeers({PeerType.Outgoing}):
avail3.add(item)
for item in pool.acquiredPeers():
acqui1.add(item)
for item in pool.acquiredPeers({PeerType.Incoming}):
acqui2.add(item)
for item in pool.acquiredPeers({PeerType.Outgoing}):
acqui3.add(item)
check:
len(total1) == 9
len(total2) == 4
len(total3) == 5
len(avail1) == 9
len(avail2) == 4
len(avail3) == 5
len(acqui1) == 0
len(acqui2) == 0
len(acqui3) == 0
discard waitFor(pool.acquire({PeerType.Incoming}))
discard waitFor(pool.acquire({PeerType.Incoming}))
discard waitFor(pool.acquire({PeerType.Outgoing}))
total1.setLen(0); total2.setLen(0); total3.setLen(0)
avail1.setLen(0); avail2.setLen(0); avail3.setLen(0)
acqui1.setLen(0); acqui2.setLen(0); acqui3.setLen(0)
for item in pool.peers():
total1.add(item)
for item in pool.peers({PeerType.Incoming}):
total2.add(item)
for item in pool.peers({PeerType.Outgoing}):
total3.add(item)
for item in pool.availablePeers():
avail1.add(item)
for item in pool.availablePeers({PeerType.Incoming}):
avail2.add(item)
for item in pool.availablePeers({PeerType.Outgoing}):
avail3.add(item)
for item in pool.acquiredPeers():
acqui1.add(item)
for item in pool.acquiredPeers({PeerType.Incoming}):
acqui2.add(item)
for item in pool.acquiredPeers({PeerType.Outgoing}):
acqui3.add(item)
check:
len(total1) == 9
len(total2) == 4
len(total3) == 5
len(avail1) == 6
len(avail2) == 2
len(avail3) == 4
len(acqui1) == 3
len(acqui2) == 2
len(acqui3) == 1

View File

@ -8,7 +8,7 @@
{.used.}
import
options, unittest, chronicles,
unittest,
./testutil,
../beacon_chain/spec/[beaconstate, datatypes, digest, validator],
../beacon_chain/[state_transition, ssz]

View File

@ -164,11 +164,14 @@ proc find_beacon_committee(
proc makeAttestation*(
state: BeaconState, beacon_block_root: Eth2Digest,
validator_index: ValidatorIndex, cache: var StateCache,
committee: seq[ValidatorIndex], slot: Slot, index: uint64,
validator_index: auto, cache: var StateCache,
flags: UpdateFlags = {}): Attestation =
# Avoids state_sim silliness; as it's responsible for all validators,
# transforming, from monotonic enumerable index -> committee index ->
# montonoic enumerable index, is wasteful and slow. Most test callers
# want ValidatorIndex, so that's supported too.
let
(committee, slot, index) =
find_beacon_committee(state, validator_index, cache)
validator = state.validators[validator_index]
sac_index = committee.find(validator_index)
data = makeAttestationData(state, slot, index, beacon_block_root)
@ -197,6 +200,15 @@ proc makeAttestation*(
signature: sig
)
proc makeAttestation*(
state: BeaconState, beacon_block_root: Eth2Digest,
validator_index: ValidatorIndex, cache: var StateCache,
flags: UpdateFlags = {}): Attestation =
let (committee, slot, index) =
find_beacon_committee(state, validator_index, cache)
makeAttestation(state, beacon_block_root, committee, slot, index,
validator_index, cache, flags)
proc makeTestDB*(tailState: BeaconState, tailBlock: BeaconBlock): BeaconChainDB =
result = init(BeaconChainDB, newMemoryDB())
BlockPool.preInit(result, tailState, tailBlock)

2
vendor/nim-chronos vendored

@ -1 +1 @@
Subproject commit 2518a4161f723405004c3e2a743fa08ec67404dc
Subproject commit c39c0696806a0ef09bc90e477ea6b177d2824699

@ -1 +1 @@
Subproject commit 7a607bfd3d83be86f153517636370b76f3d7cf25
Subproject commit 0bdfb3786cad3a1c045934a1d003dedd3498074d

2
vendor/nim-eth vendored

@ -1 +1 @@
Subproject commit a54fdc8073be75e1e7f3273fc7382b580d6a8339
Subproject commit 4976bd9fb95c36df3688867a4e2fe7bbfed8f966

2
vendor/nim-json-rpc vendored

@ -1 +1 @@
Subproject commit b6336cb7257a02a1074aaab7891150f2ecc83fc9
Subproject commit fc0665f88a4f24e9f51fe059ad4e943f3eed927e

2
vendor/nim-libp2p vendored

@ -1 +1 @@
Subproject commit f3fc763895986e96d53349e0696c897303104765
Subproject commit f9eed172d4a61f142e596f891f371ecf7c21b415

@ -1 +1 @@
Subproject commit 68ed95957d614fa91d0b20bdeea3cf876c46c120
Subproject commit e3fd23e90f661f51f028f78202eb4de1e6128ea9

@ -1 +1 @@
Subproject commit 28e88efada2ae62cecf51b29ea52c69d842db051
Subproject commit 42c5e97a5ba2cf31a573160e7d0a2efd615334c5