Merge pull request #713 from status-im/devel
Testnet release 04-02-2020
This commit is contained in:
commit
7b38269be4
|
@ -37,6 +37,7 @@ build_script:
|
|||
test_script:
|
||||
# the "go-checks" target fails in AppVeyor, for some reason; easier to disable than to debug
|
||||
- mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_GO_CHECKS=1 P2PD_CACHE=p2pdCache
|
||||
- mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_GO_CHECKS=1 P2PD_CACHE=p2pdCache NIMFLAGS="-d:NETWORK_TYPE=libp2p"
|
||||
- mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_TEST_FIXTURES_SCRIPT=1 DISABLE_GO_CHECKS=1 test
|
||||
|
||||
deploy: off
|
||||
|
|
|
@ -41,6 +41,8 @@ jobs:
|
|||
|
||||
- bash: |
|
||||
set -e
|
||||
# https://developercommunity.visualstudio.com/content/problem/891929/windows-2019-cygheap-base-mismatch-detected-git-ba.html
|
||||
export PATH="/mingw64/bin:/usr/bin:$PATH"
|
||||
echo "Installing MinGW-w64"
|
||||
if [[ $PLATFORM == "x86" ]]; then
|
||||
MINGW_FILE="i686-8.1.0-release-posix-dwarf-rt_v6-rev0.7z"
|
||||
|
@ -64,6 +66,7 @@ jobs:
|
|||
export PATH="/c/custom/${MINGW_DIR}/bin:$PATH"
|
||||
echo "Fetching submodules"
|
||||
git config --global core.longpaths true
|
||||
git config --global core.autocrlf false
|
||||
git submodule --quiet update --init --recursive
|
||||
scripts/setup_official_tests.sh jsonTestsCache
|
||||
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} CI_CACHE=NimBinaries update
|
||||
|
|
|
@ -19,8 +19,10 @@ type
|
|||
cmp: proc(a, b: PeerIndex): bool {.closure, gcsafe.}
|
||||
|
||||
PeerPool*[A, B] = ref object
|
||||
incNeEvent: AsyncEvent
|
||||
outNeEvent: AsyncEvent
|
||||
incNotEmptyEvent: AsyncEvent
|
||||
outNotEmptyEvent: AsyncEvent
|
||||
incNotFullEvent: AsyncEvent
|
||||
outNotFullEvent: AsyncEvent
|
||||
incQueue: HeapQueue[PeerIndex]
|
||||
outQueue: HeapQueue[PeerIndex]
|
||||
registry: Table[B, PeerIndex]
|
||||
|
@ -34,30 +36,40 @@ type
|
|||
acqIncPeersCount: int
|
||||
acqOutPeersCount: int
|
||||
|
||||
PeerPoolError* = object of CatchableError
|
||||
|
||||
proc `<`*(a, b: PeerIndex): bool =
|
||||
result = a.cmp(b, a)
|
||||
|
||||
proc fireEvent[A, B](pool: PeerPool[A, B], item: PeerItem[A]) {.inline.} =
|
||||
proc fireNotEmptyEvent[A, B](pool: PeerPool[A, B],
|
||||
item: PeerItem[A]) {.inline.} =
|
||||
if item.peerType == PeerType.Incoming:
|
||||
pool.incNeEvent.fire()
|
||||
pool.incNotEmptyEvent.fire()
|
||||
elif item.peerType == PeerType.Outgoing:
|
||||
pool.outNeEvent.fire()
|
||||
pool.outNotEmptyEvent.fire()
|
||||
|
||||
proc waitEvent[A, B](pool: PeerPool[A, B],
|
||||
filter: set[PeerType]) {.async.} =
|
||||
proc fireNotFullEvent[A, B](pool: PeerPool[A, B],
|
||||
item: PeerItem[A]) {.inline.} =
|
||||
if item.peerType == PeerType.Incoming:
|
||||
pool.incNotFullEvent.fire()
|
||||
elif item.peerType == PeerType.Outgoing:
|
||||
pool.outNotFullEvent.fire()
|
||||
|
||||
proc waitNotEmptyEvent[A, B](pool: PeerPool[A, B],
|
||||
filter: set[PeerType]) {.async.} =
|
||||
if filter == {PeerType.Incoming, PeerType.Outgoing} or filter == {}:
|
||||
var fut1 = pool.incNeEvent.wait()
|
||||
var fut2 = pool.outNeEvent.wait()
|
||||
var fut1 = pool.incNotEmptyEvent.wait()
|
||||
var fut2 = pool.outNotEmptyEvent.wait()
|
||||
try:
|
||||
discard await one(fut1, fut2)
|
||||
if fut1.finished:
|
||||
if not(fut2.finished):
|
||||
fut2.cancel()
|
||||
pool.incNeEvent.clear()
|
||||
pool.incNotEmptyEvent.clear()
|
||||
else:
|
||||
if not(fut1.finished):
|
||||
fut1.cancel()
|
||||
pool.outNeEvent.clear()
|
||||
pool.outNotEmptyEvent.clear()
|
||||
except CancelledError:
|
||||
if not(fut1.finished):
|
||||
fut1.cancel()
|
||||
|
@ -65,11 +77,20 @@ proc waitEvent[A, B](pool: PeerPool[A, B],
|
|||
fut2.cancel()
|
||||
raise
|
||||
elif PeerType.Incoming in filter:
|
||||
await pool.incNeEvent.wait()
|
||||
pool.incNeEvent.clear()
|
||||
await pool.incNotEmptyEvent.wait()
|
||||
pool.incNotEmptyEvent.clear()
|
||||
elif PeerType.Outgoing in filter:
|
||||
await pool.outNeEvent.wait()
|
||||
pool.outNeEvent.clear()
|
||||
await pool.outNotEmptyEvent.wait()
|
||||
pool.outNotEmptyEvent.clear()
|
||||
|
||||
proc waitNotFullEvent[A, B](pool: PeerPool[A, B],
|
||||
peerType: PeerType) {.async.} =
|
||||
if peerType == PeerType.Incoming:
|
||||
await pool.incNotFullEvent.wait()
|
||||
pool.incNotFullEvent.clear()
|
||||
elif peerType == PeerType.Outgoing:
|
||||
await pool.outNotFullEvent.wait()
|
||||
pool.outNotFullEvent.clear()
|
||||
|
||||
template getItem[A, B](pool: PeerPool[A, B],
|
||||
filter: set[PeerType]): ptr PeerItem[A] =
|
||||
|
@ -124,13 +145,15 @@ proc newPeerPool*[A, B](maxPeers = -1,
|
|||
doAssert(maxPeers >= maxIncomingPeers + maxOutgoingPeers)
|
||||
|
||||
res.maxPeersCount = if maxPeers < 0: high(int)
|
||||
else: maxPeers
|
||||
else: maxPeers
|
||||
res.maxIncPeersCount = if maxIncomingPeers < 0: high(int)
|
||||
else: maxIncomingPeers
|
||||
else: maxIncomingPeers
|
||||
res.maxOutPeersCount = if maxOutgoingPeers < 0: high(int)
|
||||
else: maxOutgoingPeers
|
||||
res.incNeEvent = newAsyncEvent()
|
||||
res.outNeEvent = newAsyncEvent()
|
||||
else: maxOutgoingPeers
|
||||
res.incNotEmptyEvent = newAsyncEvent()
|
||||
res.outNotEmptyEvent = newAsyncEvent()
|
||||
res.incNotFullEvent = newAsyncEvent()
|
||||
res.outNotFullEvent = newAsyncEvent()
|
||||
res.incQueue = initHeapQueue[PeerIndex]()
|
||||
res.outQueue = initHeapQueue[PeerIndex]()
|
||||
res.registry = initTable[B, PeerIndex]()
|
||||
|
@ -190,6 +213,8 @@ proc deletePeer*[A, B](pool: PeerPool[A, B], peer: A, force = false): bool =
|
|||
elif item[].peerType == PeerType.Outgoing:
|
||||
dec(pool.curOutPeersCount)
|
||||
dec(pool.acqOutPeersCount)
|
||||
|
||||
pool.fireNotFullEvent(item[])
|
||||
# Cleanup storage with default item, and removing key from hashtable.
|
||||
pool.storage[pindex] = PeerItem[A]()
|
||||
pool.registry.del(key)
|
||||
|
@ -210,60 +235,125 @@ proc deletePeer*[A, B](pool: PeerPool[A, B], peer: A, force = false): bool =
|
|||
pool.outQueue.del(i)
|
||||
break
|
||||
dec(pool.curOutPeersCount)
|
||||
|
||||
pool.fireNotFullEvent(item[])
|
||||
# Cleanup storage with default item, and removing key from hashtable.
|
||||
pool.storage[pindex] = PeerItem[A]()
|
||||
pool.registry.del(key)
|
||||
|
||||
result = true
|
||||
|
||||
proc addPeer*[A, B](pool: PeerPool[A, B], peer: A, peerType: PeerType): bool =
|
||||
## Add peer ``peer`` of type ``peerType`` to PeerPool ``pool``.
|
||||
##
|
||||
## Returns ``true`` on success.
|
||||
mixin getKey, getFuture
|
||||
|
||||
if len(pool.registry) >= pool.maxPeersCount:
|
||||
return false
|
||||
proc addPeerImpl[A, B](pool: PeerPool[A, B], peer: A, peerKey: B,
|
||||
peerType: PeerType): PeerIndex =
|
||||
proc onPeerClosed(udata: pointer) {.gcsafe.} =
|
||||
discard pool.deletePeer(peer)
|
||||
|
||||
var item = PeerItem[A](data: peer, peerType: peerType,
|
||||
index: len(pool.storage))
|
||||
var key = getKey(peer)
|
||||
pool.storage.add(item)
|
||||
var pitem = addr(pool.storage[^1])
|
||||
let pindex = PeerIndex(data: item.index, cmp: pool.cmp)
|
||||
pool.registry[peerKey] = pindex
|
||||
pitem[].data.getFuture().addCallback(onPeerClosed)
|
||||
result = pindex
|
||||
|
||||
if not(pool.registry.hasKey(key)):
|
||||
pool.storage.add(item)
|
||||
var pitem = addr(pool.storage[^1])
|
||||
let pindex = PeerIndex(data: item.index, cmp: pool.cmp)
|
||||
pool.registry[key] = pindex
|
||||
proc addPeerNoWait*[A, B](pool: PeerPool[A, B],
|
||||
peer: A, peerType: PeerType): bool =
|
||||
## Add peer ``peer`` of type ``peerType`` to PeerPool ``pool``.
|
||||
##
|
||||
## Procedure returns ``false`` in case
|
||||
## * if ``peer`` is already closed.
|
||||
## * if ``pool`` already has peer ``peer`` inside.
|
||||
## * if ``pool`` currently has a maximum of peers.
|
||||
## * if ``pool`` currently has a maximum of `Incoming` or `Outgoing` peers.
|
||||
##
|
||||
## Procedure returns ``true`` on success.
|
||||
mixin getKey, getFuture
|
||||
|
||||
proc onPeerClosed(udata: pointer) {.gcsafe.} =
|
||||
discard pool.deletePeer(peer)
|
||||
result = false
|
||||
let peerKey = getKey(peer)
|
||||
|
||||
pitem[].data.getFuture().addCallback(onPeerClosed)
|
||||
if not(pool.registry.hasKey(peerKey)) and not(peer.getFuture().finished):
|
||||
if len(pool.registry) < pool.maxPeersCount:
|
||||
if peerType == PeerType.Incoming:
|
||||
if pool.curIncPeersCount < pool.maxIncPeersCount:
|
||||
let pindex = pool.addPeerImpl(peer, peerKey, peerType)
|
||||
inc(pool.curIncPeersCount)
|
||||
pool.incQueue.push(pindex)
|
||||
pool.incNotEmptyEvent.fire()
|
||||
result = true
|
||||
elif peerType == PeerType.Outgoing:
|
||||
if pool.curOutPeersCount < pool.maxOutPeersCount:
|
||||
let pindex = pool.addPeerImpl(peer, peerKey, peerType)
|
||||
inc(pool.curOutPeersCount)
|
||||
pool.outQueue.push(pindex)
|
||||
pool.outNotEmptyEvent.fire()
|
||||
result = true
|
||||
|
||||
proc addPeer*[A, B](pool: PeerPool[A, B],
|
||||
peer: A, peerType: PeerType): Future[bool] {.async.} =
|
||||
## Add peer ``peer`` of type ``peerType`` to PeerPool ``pool``.
|
||||
##
|
||||
## This procedure will wait for an empty space in PeerPool ``pool``, if
|
||||
## PeerPool ``pool`` is full.
|
||||
##
|
||||
## Procedure returns ``false`` in case:
|
||||
## * if ``peer`` is already closed.
|
||||
## * if ``pool`` already has peer ``peer`` inside.
|
||||
##
|
||||
## Procedure returns ``true`` on success.
|
||||
mixin getKey, getFuture
|
||||
|
||||
var res = false
|
||||
let peerKey = getKey(peer)
|
||||
|
||||
if not(pool.registry.hasKey(peerKey)) and not(peer.getFuture().finished):
|
||||
if len(pool.registry) >= pool.maxPeersCount:
|
||||
await pool.waitNotFullEvent(peerType)
|
||||
if peerType == PeerType.Incoming:
|
||||
if pool.curIncPeersCount >= pool.maxIncPeersCount:
|
||||
return false
|
||||
else:
|
||||
inc(pool.curIncPeersCount)
|
||||
pool.incQueue.push(pindex)
|
||||
pool.incNeEvent.fire()
|
||||
await pool.waitNotFullEvent(peerType)
|
||||
|
||||
let pindex = pool.addPeerImpl(peer, peerKey, peerType)
|
||||
inc(pool.curIncPeersCount)
|
||||
pool.incQueue.push(pindex)
|
||||
pool.incNotEmptyEvent.fire()
|
||||
res = true
|
||||
elif peerType == PeerType.Outgoing:
|
||||
if pool.curOutPeersCount >= pool.maxOutPeersCount:
|
||||
return false
|
||||
else:
|
||||
inc(pool.curOutPeersCount)
|
||||
pool.outQueue.push(pindex)
|
||||
pool.outNeEvent.fire()
|
||||
await pool.waitNotFullEvent(peerType)
|
||||
|
||||
result = true
|
||||
let pindex = pool.addPeerImpl(peer, peerKey, peerType)
|
||||
inc(pool.curOutPeersCount)
|
||||
pool.outQueue.push(pindex)
|
||||
pool.outNotEmptyEvent.fire()
|
||||
res = true
|
||||
|
||||
proc addIncomingPeer*[A, B](pool: PeerPool[A, B], peer: A): bool {.inline.} =
|
||||
result = res
|
||||
|
||||
proc addIncomingPeerNoWait*[A, B](pool: PeerPool[A, B],
|
||||
peer: A): bool {.inline.} =
|
||||
## Add incoming peer ``peer`` to PeerPool ``pool``.
|
||||
##
|
||||
## Returns ``true`` on success.
|
||||
result = pool.addPeerNoWait(peer, PeerType.Incoming)
|
||||
|
||||
proc addOutgoingPeerNoWait*[A, B](pool: PeerPool[A, B],
|
||||
peer: A): bool {.inline.} =
|
||||
## Add outgoing peer ``peer`` to PeerPool ``pool``.
|
||||
##
|
||||
## Returns ``true`` on success.
|
||||
result = pool.addPeerNoWait(peer, PeerType.Outgoing)
|
||||
|
||||
proc addIncomingPeer*[A, B](pool: PeerPool[A, B],
|
||||
peer: A): Future[bool] {.inline.} =
|
||||
## Add incoming peer ``peer`` to PeerPool ``pool``.
|
||||
##
|
||||
## Returns ``true`` on success.
|
||||
result = pool.addPeer(peer, PeerType.Incoming)
|
||||
|
||||
proc addOutgoingPeer*[A, B](pool: PeerPool[A, B], peer: A): bool {.inline.} =
|
||||
proc addOutgoingPeer*[A, B](pool: PeerPool[A, B],
|
||||
peer: A): Future[bool] {.inline.} =
|
||||
## Add outgoing peer ``peer`` to PeerPool ``pool``.
|
||||
##
|
||||
## Returns ``true`` on success.
|
||||
|
@ -281,7 +371,7 @@ proc acquire*[A, B](pool: PeerPool[A, B],
|
|||
if PeerType.Outgoing in filter:
|
||||
count = count + len(pool.outQueue)
|
||||
if count == 0:
|
||||
await pool.waitEvent(filter)
|
||||
await pool.waitNotEmptyEvent(filter)
|
||||
else:
|
||||
var item = pool.getItem(filter)
|
||||
doAssert(PeerFlags.Acquired notin item[].flags)
|
||||
|
@ -289,6 +379,22 @@ proc acquire*[A, B](pool: PeerPool[A, B],
|
|||
result = item[].data
|
||||
break
|
||||
|
||||
proc acquireNoWait*[A, B](pool: PeerPool[A, B],
|
||||
filter = {PeerType.Incoming,
|
||||
PeerType.Outgoing}): A =
|
||||
doAssert(filter != {}, "Filter must not be empty")
|
||||
var count = 0
|
||||
if PeerType.Incoming in filter:
|
||||
count = count + len(pool.incQueue)
|
||||
if PeerType.Outgoing in filter:
|
||||
count = count + len(pool.outQueue)
|
||||
if count < 1:
|
||||
raise newException(PeerPoolError, "Not enough peers in pool")
|
||||
var item = pool.getItem(filter)
|
||||
doAssert(PeerFlags.Acquired notin item[].flags)
|
||||
item[].flags.incl(PeerFlags.Acquired)
|
||||
result = item[].data
|
||||
|
||||
proc release*[A, B](pool: PeerPool[A, B], peer: A) =
|
||||
## Release peer ``peer`` back to PeerPool ``pool``
|
||||
mixin getKey
|
||||
|
@ -315,7 +421,7 @@ proc release*[A, B](pool: PeerPool[A, B], peer: A) =
|
|||
elif item[].peerType == PeerType.Outgoing:
|
||||
pool.outQueue.push(titem)
|
||||
dec(pool.acqOutPeersCount)
|
||||
pool.fireEvent(item[])
|
||||
pool.fireNotEmptyEvent(item[])
|
||||
|
||||
proc release*[A, B](pool: PeerPool[A, B], peers: openarray[A]) {.inline.} =
|
||||
## Release array of peers ``peers`` back to PeerPool ``pool``.
|
||||
|
@ -341,7 +447,7 @@ proc acquire*[A, B](pool: PeerPool[A, B],
|
|||
if PeerType.Outgoing in filter:
|
||||
count = count + len(pool.outQueue)
|
||||
if count == 0:
|
||||
await pool.waitEvent(filter)
|
||||
await pool.waitNotEmptyEvent(filter)
|
||||
else:
|
||||
var item = pool.getItem(filter)
|
||||
doAssert(PeerFlags.Acquired notin item[].flags)
|
||||
|
@ -356,6 +462,28 @@ proc acquire*[A, B](pool: PeerPool[A, B],
|
|||
raise
|
||||
result = peers
|
||||
|
||||
proc acquireNoWait*[A, B](pool: PeerPool[A, B],
|
||||
number: int,
|
||||
filter = {PeerType.Incoming,
|
||||
PeerType.Outgoing}): seq[A] =
|
||||
## Acquire ``number`` number of peers from PeerPool ``pool``, which match the
|
||||
## filter ``filter``.
|
||||
doAssert(filter != {}, "Filter must not be empty")
|
||||
var peers = newSeq[A]()
|
||||
var count = 0
|
||||
if PeerType.Incoming in filter:
|
||||
count = count + len(pool.incQueue)
|
||||
if PeerType.Outgoing in filter:
|
||||
count = count + len(pool.outQueue)
|
||||
if count < number:
|
||||
raise newException(PeerPoolError, "Not enough peers in pool")
|
||||
for i in 0 ..< number:
|
||||
var item = pool.getItem(filter)
|
||||
doAssert(PeerFlags.Acquired notin item[].flags)
|
||||
item[].flags.incl(PeerFlags.Acquired)
|
||||
peers.add(item[].data)
|
||||
result = peers
|
||||
|
||||
proc acquireIncomingPeer*[A, B](pool: PeerPool[A, B]): Future[A] {.inline.} =
|
||||
## Acquire single incoming peer from PeerPool ``pool``.
|
||||
pool.acquire({PeerType.Incoming})
|
||||
|
|
|
@ -47,27 +47,20 @@ func decrease_balance*(
|
|||
else:
|
||||
state.balances[index] - delta
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.4/specs/core/0_beacon-chain.md#deposits
|
||||
func process_deposit*(
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#deposits
|
||||
proc process_deposit*(
|
||||
state: var BeaconState, deposit: Deposit, flags: UpdateFlags = {}): bool {.nbench.}=
|
||||
# Process an Eth1 deposit, registering a validator or increasing its balance.
|
||||
|
||||
# Verify the Merkle branch
|
||||
# TODO enable this check, but don't use doAssert
|
||||
if not is_valid_merkle_branch(
|
||||
hash_tree_root(deposit.getDepositMessage),
|
||||
deposit.proof,
|
||||
DEPOSIT_CONTRACT_TREE_DEPTH,
|
||||
state.eth1_deposit_index,
|
||||
if skipValidation notin flags and not is_valid_merkle_branch(
|
||||
hash_tree_root(deposit.data),
|
||||
deposit.proof,
|
||||
DEPOSIT_CONTRACT_TREE_DEPTH + 1,
|
||||
state.eth1_deposit_index,
|
||||
state.eth1_data.deposit_root,
|
||||
):
|
||||
## TODO: a notice-like mechanism which works in a func
|
||||
## here and elsewhere, one minimal approach is a check-if-true
|
||||
## and return false iff so.
|
||||
## obviously, better/more principled ones exist, but
|
||||
## generally require broader rearchitecting, and this is what
|
||||
## mostly happens now, just haphazardly.
|
||||
discard
|
||||
return false
|
||||
|
||||
# Deposits must be processed in order
|
||||
state.eth1_deposit_index += 1
|
||||
|
@ -380,7 +373,7 @@ proc is_valid_indexed_attestation*(
|
|||
notice "indexed attestation: validator index beyond max validators per committee"
|
||||
return false
|
||||
|
||||
# Verify indices are sorted
|
||||
# Verify indices are sorted and unique
|
||||
# TODO but why? this is a local artifact
|
||||
if indices != sorted(indices, system.cmp):
|
||||
notice "indexed attestation: indices not sorted"
|
||||
|
|
|
@ -13,7 +13,7 @@ import
|
|||
# Third-party
|
||||
blscurve, # defines Domain
|
||||
# Internal
|
||||
./datatypes, ./digest
|
||||
./datatypes, ./digest, ../ssz
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#integer_squareroot
|
||||
func integer_squareroot*(n: SomeInteger): SomeInteger =
|
||||
|
@ -144,6 +144,16 @@ func get_domain*(
|
|||
func get_domain*(state: BeaconState, domain_type: DomainType): Domain =
|
||||
get_domain(state, domain_type, get_current_epoch(state))
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#compute_signing_root
|
||||
func compute_signing_root*(ssz_object: auto, domain: Domain): Eth2Digest =
|
||||
# Return the signing root of an object by calculating the root of the
|
||||
# object-domain tree.
|
||||
let domain_wrapped_object = SigningRoot(
|
||||
object_root: hash_tree_root(ssz_object),
|
||||
domain: domain
|
||||
)
|
||||
hash_tree_root(domain_wrapped_object)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#get_seed
|
||||
func get_seed*(state: BeaconState, epoch: Epoch, domain_type: DomainType): Eth2Digest =
|
||||
# Return the seed at ``epoch``.
|
||||
|
|
|
@ -213,7 +213,7 @@ func is_slashable_attestation_data(
|
|||
(data_1.source.epoch < data_2.source.epoch and
|
||||
data_2.target.epoch < data_1.target.epoch)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#attester-slashings
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#attester-slashings
|
||||
proc process_attester_slashing*(
|
||||
state: var BeaconState,
|
||||
attester_slashing: AttesterSlashing,
|
||||
|
@ -250,7 +250,7 @@ proc process_attester_slashing*(
|
|||
return false
|
||||
return true
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#attester-slashings
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#attester-slashings
|
||||
proc processAttesterSlashings(state: var BeaconState, blck: BeaconBlock,
|
||||
stateCache: var StateCache): bool {.nbench.}=
|
||||
# Process ``AttesterSlashing`` operation.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,5 +1,5 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2019 Status Research & Development GmbH
|
||||
# Copyright (c) 2019-2020 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -78,7 +78,8 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
|
||||
let
|
||||
genesisState =
|
||||
initialize_beacon_state_from_eth1(Eth2Digest(), 0, deposits, flags)
|
||||
initialize_beacon_state_from_eth1(
|
||||
Eth2Digest(), 0, deposits, {skipValidation})
|
||||
genesisBlock = get_initial_beacon_block(genesisState)
|
||||
|
||||
echo "Starting simulation..."
|
||||
|
|
|
@ -75,7 +75,7 @@ cli do (testnetName {.argument.}: string):
|
|||
validatorsDir = dataDir / "validators"
|
||||
dumpDir = dataDir / "dump"
|
||||
beaconNodeBinary = buildDir / "beacon_node_" & dataDirName
|
||||
nimFlags = "-d:chronicles_log_level=DEBUG " & getEnv("NIM_PARAMS")
|
||||
nimFlags = "-d:chronicles_log_level=TRACE " & getEnv("NIM_PARAMS")
|
||||
|
||||
let depositContractFile = testnetDir / depositContractFileName
|
||||
if system.fileExists(depositContractFile):
|
||||
|
|
|
@ -25,7 +25,8 @@ import # Unit test
|
|||
./test_sync_protocol,
|
||||
# ./test_validator # Empty!
|
||||
./test_zero_signature,
|
||||
./test_peer_pool
|
||||
./test_peer_pool,
|
||||
./test_sync_manager
|
||||
|
||||
import # Refactor state transition unit tests
|
||||
# TODO re-enable when useful
|
||||
|
|
|
@ -66,18 +66,27 @@ template runTest(testName: string, identifier: untyped) =
|
|||
`testImpl _ operations_attestations _ identifier`()
|
||||
|
||||
suite "Official - Operations - Attestations " & preset():
|
||||
runTest("success", success)
|
||||
runTest("success previous epoch", success_previous_epoch)
|
||||
runTest("invalid attestation signature", invalid_attestation_signature)
|
||||
runTest("before inclusion delay", before_inclusion_delay)
|
||||
# https://github.com/ethereum/eth2.0-spec-tests/tree/v0.10.1/tests/minimal/phase0/operations/attestation/pyspec_tests
|
||||
# https://github.com/ethereum/eth2.0-spec-tests/tree/v0.10.1/tests/mainnet/phase0/operations/attestation/pyspec_tests
|
||||
runTest("after_epoch_slots", after_epoch_slots)
|
||||
runTest("bad source root", bad_source_root)
|
||||
runTest("before inclusion delay", before_inclusion_delay)
|
||||
runTest("empty aggregation bits", empty_aggregation_bits)
|
||||
runTest("future target epoch", future_target_epoch)
|
||||
runTest("invalid attestation signature", invalid_attestation_signature)
|
||||
runTest("invalid current source root", invalid_current_source_root)
|
||||
runTest("invalid index", invalid_index)
|
||||
runTest("mismatched target and slot", mismatched_target_and_slot)
|
||||
runTest("new source epoch", new_source_epoch)
|
||||
runTest("old source epoch", old_source_epoch)
|
||||
runTest("old target epoch", old_target_epoch)
|
||||
runTest("future target epoch", future_target_epoch)
|
||||
runTest("new source epoch", new_source_epoch)
|
||||
runTest("source root is target root", source_root_is_target_root)
|
||||
runTest("invalid current source root", invalid_current_source_root)
|
||||
runTest("bad source root", bad_source_root)
|
||||
runTest("empty aggregation bits", empty_aggregation_bits)
|
||||
runTest("too many aggregation bits", too_many_aggregation_bits)
|
||||
runTest("success", success)
|
||||
runTest("success multi-proposer index interations",
|
||||
success_multi_proposer_index_iterations)
|
||||
runTest("success previous epoch", success_previous_epoch)
|
||||
runTest("too few aggregation bits", too_few_aggregation_bits)
|
||||
runTest("too many aggregation bits", too_many_aggregation_bits)
|
||||
runTest("wrong index for committee signature",
|
||||
wrong_index_for_committee_signature)
|
||||
runTest("wrong index for slot", wrong_index_for_slot)
|
||||
|
|
|
@ -66,13 +66,15 @@ template runTest(identifier: untyped) =
|
|||
`testImpl_proposer_slashing _ identifier`()
|
||||
|
||||
suite "Official - Operations - Proposer slashing " & preset():
|
||||
runTest(success)
|
||||
runTest(invalid_sig_1)
|
||||
runTest(invalid_sig_2)
|
||||
runTest(invalid_sig_1_and_2)
|
||||
runTest(invalid_proposer_index)
|
||||
# https://github.com/ethereum/eth2.0-spec-tests/tree/v0.10.1/tests/minimal/phase0/operations/proposer_slashing/pyspec_tests
|
||||
# https://github.com/ethereum/eth2.0-spec-tests/tree/v0.10.1/tests/mainnet/phase0/operations/proposer_slashing/pyspec_tests
|
||||
runTest(epochs_are_different)
|
||||
runTest(headers_are_same)
|
||||
runTest(invalid_proposer_index)
|
||||
runTest(invalid_sig_1)
|
||||
runTest(invalid_sig_1_and_2)
|
||||
runTest(invalid_sig_2)
|
||||
runTest(proposer_is_not_activated)
|
||||
runTest(proposer_is_slashed)
|
||||
runTest(proposer_is_withdrawn)
|
||||
runTest(success)
|
||||
|
|
|
@ -64,16 +64,18 @@ template runTest(identifier: untyped) =
|
|||
`testImpl _ voluntary_exit _ identifier`()
|
||||
|
||||
suite "Official - Operations - Voluntary exit " & preset():
|
||||
# https://github.com/ethereum/eth2.0-spec-tests/tree/v0.10.1/tests/minimal/phase0/operations/voluntary_exit/pyspec_tests
|
||||
# https://github.com/ethereum/eth2.0-spec-tests/tree/v0.10.1/tests/mainnet/phase0/operations/voluntary_exit/pyspec_tests
|
||||
runTest(success)
|
||||
|
||||
when false:
|
||||
# TODO not sure how this particularly could falsely succeed
|
||||
runTest(invalid_signature)
|
||||
|
||||
runTest(validator_invalid_validator_index)
|
||||
runTest(validator_already_exited)
|
||||
runTest(success_exit_queue)
|
||||
runTest(validator_exit_in_future)
|
||||
runTest(validator_invalid_validator_index)
|
||||
runTest(validator_not_active)
|
||||
runTest(validator_already_exited)
|
||||
runTest(default_exit_epoch_subsequent_exit)
|
||||
runTest(validator_not_active_long_enough)
|
||||
|
||||
runTest(validator_not_active)
|
||||
|
|
|
@ -47,6 +47,8 @@ template runTest(testName: string, identifier: untyped, num_slots: uint64): unty
|
|||
# ---------------------------------------------------------------
|
||||
|
||||
suite "Official - Sanity - Slots " & preset():
|
||||
# https://github.com/ethereum/eth2.0-spec-tests/tree/v0.10.1/tests/minimal/phase0/sanity/slots/pyspec_tests
|
||||
# https://github.com/ethereum/eth2.0-spec-tests/tree/v0.10.1/tests/mainnet/phase0/sanity/slots/pyspec_tests
|
||||
runTest("Advance 1 slot", slots_1, 1)
|
||||
runTest("Advance 2 slots", slots_2, 2)
|
||||
runTest("Advance an empty epoch", empty_epoch, SLOTS_PER_EPOCH)
|
||||
|
|
|
@ -16,7 +16,7 @@ mkdir -p "$VALIDATORS_DIR"
|
|||
|
||||
cd "$GIT_ROOT"
|
||||
|
||||
NIMFLAGS="-d:chronicles_log_level=DEBUG --hints:off --warnings:off --verbosity:0 --opt:speed --debuginfo"
|
||||
NIMFLAGS="-d:chronicles_log_level=TRACE --hints:off --warnings:off --verbosity:0 --opt:speed --debuginfo"
|
||||
|
||||
# Run with "SLOTS_PER_EPOCH=8 ./start.sh" to change these
|
||||
DEFS=""
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -10,11 +10,12 @@
|
|||
import
|
||||
times, unittest,
|
||||
./testutil, ./testblockutil,
|
||||
../beacon_chain/spec/[beaconstate, datatypes, digest]
|
||||
../beacon_chain/spec/[beaconstate, datatypes, digest],
|
||||
../beacon_chain/extras
|
||||
|
||||
suite "Beacon state" & preset():
|
||||
timedTest "Smoke test initialize_beacon_state_from_eth1" & preset():
|
||||
let state = initialize_beacon_state_from_eth1(
|
||||
Eth2Digest(), 0,
|
||||
makeInitialDeposits(SLOTS_PER_EPOCH, {}), {})
|
||||
makeInitialDeposits(SLOTS_PER_EPOCH, {}), {skipValidation})
|
||||
check: state.validators.len == SLOTS_PER_EPOCH
|
||||
|
|
|
@ -37,7 +37,7 @@ proc close*(peer: PeerTest) =
|
|||
peer.future.complete()
|
||||
|
||||
suite "PeerPool testing suite":
|
||||
timedTest "addPeer() test":
|
||||
timedTest "addPeerNoWait() test":
|
||||
const peersCount = [
|
||||
[10, 5, 5, 10, 5, 5],
|
||||
[-1, 5, 5, 10, 5, 5],
|
||||
|
@ -47,23 +47,100 @@ suite "PeerPool testing suite":
|
|||
var pool = newPeerPool[PeerTest, PeerTestID](item[0], item[1], item[2])
|
||||
for i in 0 ..< item[4]:
|
||||
var peer = PeerTest.init("idInc" & $i)
|
||||
check pool.addIncomingPeer(peer) == true
|
||||
check pool.addIncomingPeerNoWait(peer) == true
|
||||
|
||||
for i in 0 ..< item[5]:
|
||||
var peer = PeerTest.init("idOut" & $i)
|
||||
check pool.addOutgoingPeer(peer) == true
|
||||
check pool.addOutgoingPeerNoWait(peer) == true
|
||||
|
||||
var peer = PeerTest.init("idCheck")
|
||||
if item[1] != -1:
|
||||
for i in 0 ..< item[3]:
|
||||
check pool.addIncomingPeer(peer) == false
|
||||
check pool.addIncomingPeerNoWait(peer) == false
|
||||
if item[2] != -1:
|
||||
for i in 0 ..< item[3]:
|
||||
check pool.addOutgoingPeer(peer) == false
|
||||
check pool.addOutgoingPeerNoWait(peer) == false
|
||||
check:
|
||||
pool.lenAvailable == item[3]
|
||||
pool.lenAvailable({PeerType.Incoming}) == item[4]
|
||||
pool.lenAvailable({PeerType.Outgoing}) == item[5]
|
||||
|
||||
timedTest "addPeer() test":
|
||||
proc testAddPeer1(): Future[bool] {.async.} =
|
||||
var pool = newPeerPool[PeerTest, PeerTestID](maxPeers = 1,
|
||||
maxIncomingPeers = 1,
|
||||
maxOutgoingPeers = 0)
|
||||
var peer0 = PeerTest.init("idInc0")
|
||||
var peer1 = PeerTest.init("idOut0")
|
||||
var peer2 = PeerTest.init("idInc1")
|
||||
var fut0 = pool.addIncomingPeer(peer0)
|
||||
var fut1 = pool.addOutgoingPeer(peer1)
|
||||
var fut2 = pool.addIncomingPeer(peer2)
|
||||
doAssert(fut0.finished == true and fut0.failed == false)
|
||||
doAssert(fut1.finished == false)
|
||||
doAssert(fut2.finished == false)
|
||||
peer0.close()
|
||||
await sleepAsync(100.milliseconds)
|
||||
doAssert(fut1.finished == false)
|
||||
doAssert(fut2.finished == true and fut2.failed == false)
|
||||
result = true
|
||||
|
||||
proc testAddPeer2(): Future[bool] {.async.} =
|
||||
var pool = newPeerPool[PeerTest, PeerTestID](maxPeers = 2,
|
||||
maxIncomingPeers = 1,
|
||||
maxOutgoingPeers = 1)
|
||||
var peer0 = PeerTest.init("idInc0")
|
||||
var peer1 = PeerTest.init("idOut0")
|
||||
var peer2 = PeerTest.init("idInc1")
|
||||
var peer3 = PeerTest.init("idOut1")
|
||||
var fut0 = pool.addIncomingPeer(peer0)
|
||||
var fut1 = pool.addOutgoingPeer(peer1)
|
||||
var fut2 = pool.addIncomingPeer(peer2)
|
||||
var fut3 = pool.addOutgoingPeer(peer3)
|
||||
doAssert(fut0.finished == true and fut0.failed == false)
|
||||
doAssert(fut1.finished == true and fut1.failed == false)
|
||||
doAssert(fut2.finished == false)
|
||||
doAssert(fut3.finished == false)
|
||||
peer0.close()
|
||||
await sleepAsync(100.milliseconds)
|
||||
doAssert(fut2.finished == true and fut2.failed == false)
|
||||
doAssert(fut3.finished == false)
|
||||
peer1.close()
|
||||
await sleepAsync(100.milliseconds)
|
||||
doAssert(fut3.finished == true and fut3.failed == false)
|
||||
result = true
|
||||
|
||||
proc testAddPeer3(): Future[bool] {.async.} =
|
||||
var pool = newPeerPool[PeerTest, PeerTestID](maxPeers = 3,
|
||||
maxIncomingPeers = 1,
|
||||
maxOutgoingPeers = 1)
|
||||
var peer0 = PeerTest.init("idInc0")
|
||||
var peer1 = PeerTest.init("idInc1")
|
||||
var peer2 = PeerTest.init("idOut0")
|
||||
var peer3 = PeerTest.init("idOut1")
|
||||
|
||||
var fut0 = pool.addIncomingPeer(peer0)
|
||||
var fut1 = pool.addIncomingPeer(peer1)
|
||||
var fut2 = pool.addOutgoingPeer(peer2)
|
||||
var fut3 = pool.addOutgoingPeer(peer3)
|
||||
doAssert(fut0.finished == true and fut0.failed == false)
|
||||
doAssert(fut1.finished == false)
|
||||
doAssert(fut2.finished == true and fut2.failed == false)
|
||||
doAssert(fut3.finished == false)
|
||||
peer0.close()
|
||||
await sleepAsync(100.milliseconds)
|
||||
doAssert(fut1.finished == true and fut1.failed == false)
|
||||
doAssert(fut3.finished == false)
|
||||
peer2.close()
|
||||
await sleepAsync(100.milliseconds)
|
||||
doAssert(fut3.finished == true and fut3.failed == false)
|
||||
result = true
|
||||
|
||||
check:
|
||||
waitFor(testAddPeer1()) == true
|
||||
waitFor(testAddPeer2()) == true
|
||||
waitFor(testAddPeer3()) == true
|
||||
|
||||
timedTest "Acquire from empty pool":
|
||||
var pool0 = newPeerPool[PeerTest, PeerTestID]()
|
||||
var pool1 = newPeerPool[PeerTest, PeerTestID]()
|
||||
|
@ -92,10 +169,10 @@ suite "PeerPool testing suite":
|
|||
var peer21 = PeerTest.init("peer21")
|
||||
var peer22 = PeerTest.init("peer22")
|
||||
check:
|
||||
pool1.addPeer(peer11, PeerType.Incoming) == true
|
||||
pool1.addPeer(peer12, PeerType.Incoming) == true
|
||||
pool2.addPeer(peer21, PeerType.Outgoing) == true
|
||||
pool2.addPeer(peer22, PeerType.Outgoing) == true
|
||||
pool1.addPeerNoWait(peer11, PeerType.Incoming) == true
|
||||
pool1.addPeerNoWait(peer12, PeerType.Incoming) == true
|
||||
pool2.addPeerNoWait(peer21, PeerType.Outgoing) == true
|
||||
pool2.addPeerNoWait(peer22, PeerType.Outgoing) == true
|
||||
|
||||
var itemFut11 = pool1.acquire({PeerType.Outgoing})
|
||||
var itemFut12 = pool1.acquire(10, {PeerType.Outgoing})
|
||||
|
@ -179,9 +256,9 @@ suite "PeerPool testing suite":
|
|||
var peer = PeerTest.init("peer" & $i, rand(MaxNumber))
|
||||
# echo repr peer
|
||||
if rand(100) mod 2 == 0:
|
||||
check pool.addPeer(peer, PeerType.Incoming) == true
|
||||
check pool.addPeerNoWait(peer, PeerType.Incoming) == true
|
||||
else:
|
||||
check pool.addPeer(peer, PeerType.Outgoing) == true
|
||||
check pool.addPeerNoWait(peer, PeerType.Outgoing) == true
|
||||
|
||||
check waitFor(testAcquireRelease()) == TestsCount
|
||||
|
||||
|
@ -191,7 +268,7 @@ suite "PeerPool testing suite":
|
|||
var peer = PeerTest.init("deletePeer")
|
||||
|
||||
## Delete available peer
|
||||
doAssert(pool.addIncomingPeer(peer) == true)
|
||||
doAssert(pool.addIncomingPeerNoWait(peer) == true)
|
||||
doAssert(pool.len == 1)
|
||||
doAssert(pool.lenAvailable == 1)
|
||||
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
|
||||
|
@ -204,7 +281,7 @@ suite "PeerPool testing suite":
|
|||
|
||||
## Delete acquired peer
|
||||
peer = PeerTest.init("closingPeer")
|
||||
doAssert(pool.addIncomingPeer(peer) == true)
|
||||
doAssert(pool.addIncomingPeerNoWait(peer) == true)
|
||||
doAssert(pool.len == 1)
|
||||
doAssert(pool.lenAvailable == 1)
|
||||
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
|
||||
|
@ -223,7 +300,7 @@ suite "PeerPool testing suite":
|
|||
|
||||
## Force delete acquired peer
|
||||
peer = PeerTest.init("closingPeer")
|
||||
doAssert(pool.addIncomingPeer(peer) == true)
|
||||
doAssert(pool.addIncomingPeerNoWait(peer) == true)
|
||||
doAssert(pool.len == 1)
|
||||
doAssert(pool.lenAvailable == 1)
|
||||
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
|
||||
|
@ -244,7 +321,7 @@ suite "PeerPool testing suite":
|
|||
var peer = PeerTest.init("closingPeer")
|
||||
|
||||
## Close available peer
|
||||
doAssert(pool.addIncomingPeer(peer) == true)
|
||||
doAssert(pool.addIncomingPeerNoWait(peer) == true)
|
||||
doAssert(pool.len == 1)
|
||||
doAssert(pool.lenAvailable == 1)
|
||||
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
|
||||
|
@ -259,7 +336,7 @@ suite "PeerPool testing suite":
|
|||
|
||||
## Close acquired peer
|
||||
peer = PeerTest.init("closingPeer")
|
||||
doAssert(pool.addIncomingPeer(peer) == true)
|
||||
doAssert(pool.addIncomingPeerNoWait(peer) == true)
|
||||
doAssert(pool.len == 1)
|
||||
doAssert(pool.lenAvailable == 1)
|
||||
doAssert(pool.lenAvailable({PeerType.Outgoing}) == 0)
|
||||
|
@ -292,9 +369,9 @@ suite "PeerPool testing suite":
|
|||
var peer3 = PeerTest.init("peer3", 8)
|
||||
|
||||
check:
|
||||
pool.addPeer(peer1, PeerType.Incoming) == true
|
||||
pool.addPeer(peer2, PeerType.Incoming) == true
|
||||
pool.addPeer(peer3, PeerType.Outgoing) == true
|
||||
pool.addPeerNoWait(peer1, PeerType.Incoming) == true
|
||||
pool.addPeerNoWait(peer2, PeerType.Incoming) == true
|
||||
pool.addPeerNoWait(peer3, PeerType.Outgoing) == true
|
||||
pool.lenAvailable == 3
|
||||
pool.lenAvailable({PeerType.Outgoing}) == 1
|
||||
pool.lenAvailable({PeerType.Incoming}) == 2
|
||||
|
@ -311,9 +388,9 @@ suite "PeerPool testing suite":
|
|||
pool.len == 0
|
||||
|
||||
check:
|
||||
pool.addPeer(peer1, PeerType.Incoming) == true
|
||||
pool.addPeer(peer2, PeerType.Incoming) == true
|
||||
pool.addPeer(peer3, PeerType.Outgoing) == true
|
||||
pool.addPeerNoWait(peer1, PeerType.Incoming) == true
|
||||
pool.addPeerNoWait(peer2, PeerType.Incoming) == true
|
||||
pool.addPeerNoWait(peer3, PeerType.Outgoing) == true
|
||||
pool.lenAvailable == 3
|
||||
pool.lenAvailable({PeerType.Outgoing}) == 1
|
||||
pool.lenAvailable({PeerType.Incoming}) == 2
|
||||
|
@ -339,9 +416,9 @@ suite "PeerPool testing suite":
|
|||
var peer3 = PeerTest.init("peer3", 8)
|
||||
|
||||
check:
|
||||
pool.addPeer(peer1, PeerType.Incoming) == true
|
||||
pool.addPeer(peer2, PeerType.Incoming) == true
|
||||
pool.addPeer(peer3, PeerType.Outgoing) == true
|
||||
pool.addPeerNoWait(peer1, PeerType.Incoming) == true
|
||||
pool.addPeerNoWait(peer2, PeerType.Incoming) == true
|
||||
pool.addPeerNoWait(peer3, PeerType.Outgoing) == true
|
||||
pool.hasPeer("peer4") == false
|
||||
pool.hasPeer("peer1") == true
|
||||
pool.hasPeer("peer2") == true
|
||||
|
@ -374,16 +451,16 @@ suite "PeerPool testing suite":
|
|||
var peer9 = PeerTest.init("peer9", 2)
|
||||
|
||||
check:
|
||||
pool.addPeer(peer2, PeerType.Incoming) == true
|
||||
pool.addPeer(peer3, PeerType.Incoming) == true
|
||||
pool.addPeer(peer1, PeerType.Incoming) == true
|
||||
pool.addPeer(peer4, PeerType.Incoming) == true
|
||||
pool.addPeerNoWait(peer2, PeerType.Incoming) == true
|
||||
pool.addPeerNoWait(peer3, PeerType.Incoming) == true
|
||||
pool.addPeerNoWait(peer1, PeerType.Incoming) == true
|
||||
pool.addPeerNoWait(peer4, PeerType.Incoming) == true
|
||||
|
||||
pool.addPeer(peer5, PeerType.Outgoing) == true
|
||||
pool.addPeer(peer8, PeerType.Outgoing) == true
|
||||
pool.addPeer(peer7, PeerType.Outgoing) == true
|
||||
pool.addPeer(peer6, PeerType.Outgoing) == true
|
||||
pool.addPeer(peer9, PeerType.Outgoing) == true
|
||||
pool.addPeerNoWait(peer5, PeerType.Outgoing) == true
|
||||
pool.addPeerNoWait(peer8, PeerType.Outgoing) == true
|
||||
pool.addPeerNoWait(peer7, PeerType.Outgoing) == true
|
||||
pool.addPeerNoWait(peer6, PeerType.Outgoing) == true
|
||||
pool.addPeerNoWait(peer9, PeerType.Outgoing) == true
|
||||
|
||||
var total1, total2, total3: seq[PeerTest]
|
||||
var avail1, avail2, avail3: seq[PeerTest]
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -11,7 +11,7 @@ import
|
|||
unittest,
|
||||
./testutil, ./testblockutil,
|
||||
../beacon_chain/spec/[beaconstate, datatypes, digest, validator],
|
||||
../beacon_chain/[state_transition, ssz]
|
||||
../beacon_chain/[extras, state_transition, ssz]
|
||||
|
||||
suite "Block processing" & preset():
|
||||
## For now just test that we can compile and execute block processing with
|
||||
|
@ -22,7 +22,7 @@ suite "Block processing" & preset():
|
|||
# TODO bls verification is a bit of a bottleneck here
|
||||
genesisState = initialize_beacon_state_from_eth1(
|
||||
Eth2Digest(), 0,
|
||||
makeInitialDeposits(), {})
|
||||
makeInitialDeposits(), {skipValidation})
|
||||
genesisBlock = get_initial_beacon_block(genesisState)
|
||||
genesisRoot = hash_tree_root(genesisBlock.message)
|
||||
|
||||
|
|
|
@ -0,0 +1,906 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2019 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import options, hashes, unittest
|
||||
import ./testutil
|
||||
import chronos
|
||||
import ../beacon_chain/peer_pool, ../beacon_chain/sync_manager
|
||||
|
||||
type
|
||||
PeerRequest = object
|
||||
headRoot: Eth2Digest
|
||||
startSlot: Slot
|
||||
count: uint64
|
||||
step: uint64
|
||||
data: seq[Slot]
|
||||
|
||||
SimplePeerKey = string
|
||||
|
||||
SimplePeer = ref object
|
||||
id: SimplePeerKey
|
||||
weight: int
|
||||
lifu: Future[void]
|
||||
blockchain: seq[SignedBeaconBlock]
|
||||
latestSlot: Slot
|
||||
delay: Duration
|
||||
malicious: bool
|
||||
failure: bool
|
||||
disconnect: bool
|
||||
requests: seq[PeerRequest]
|
||||
|
||||
proc getKey*(peer: SimplePeer): SimplePeerKey =
|
||||
result = peer.id
|
||||
|
||||
proc getFuture*(peer: SimplePeer): Future[void] =
|
||||
result = peer.lifu
|
||||
|
||||
proc `<`*(a, b: SimplePeer): bool =
|
||||
result = `<`(a.weight, b.weight)
|
||||
|
||||
proc getHeadSlot*(peer: SimplePeer): Slot =
|
||||
if len(peer.blockchain) == 0:
|
||||
result = peer.latestSlot
|
||||
else:
|
||||
result = peer.blockchain[len(peer.blockchain) - 1].message.slot
|
||||
|
||||
proc init*(t: typedesc[SimplePeer], id: string = "", malicious = false,
|
||||
weight: int = 0, slot: int = 0,
|
||||
delay: Duration = ZeroDuration): SimplePeer =
|
||||
result = SimplePeer(id: id, weight: weight, lifu: newFuture[void](),
|
||||
delay: delay, latestSlot: Slot(slot),
|
||||
malicious: malicious)
|
||||
|
||||
proc update*(peer: SimplePeer, chain: openarray[SignedBeaconBlock],
|
||||
malicious = false, failure = false, disconnect = false,
|
||||
delay: Duration = ZeroDuration) =
|
||||
peer.malicious = malicious
|
||||
peer.delay = delay
|
||||
peer.failure = failure
|
||||
peer.disconnect = disconnect
|
||||
peer.blockchain.setLen(0)
|
||||
for item in chain:
|
||||
peer.blockchain.add(item)
|
||||
|
||||
proc close*(peer: SimplePeer) =
|
||||
peer.lifu.complete()
|
||||
|
||||
proc getHeadRoot*(peer: SimplePeer): Eth2Digest =
|
||||
discard
|
||||
|
||||
proc updateStatus*[A](peer: A): Future[void] =
|
||||
var res = newFuture[void]("updateStatus")
|
||||
res.complete()
|
||||
return res
|
||||
|
||||
proc getBeaconBlocksByRange*[A](peer: A, headRoot: Eth2Digest, startSlot: Slot,
|
||||
count: uint64,
|
||||
step: uint64): Future[OptionBeaconBlockSeq] {.async.} =
|
||||
var req = PeerRequest(headRoot: headRoot, startSlot: startSlot, count: count,
|
||||
step: step)
|
||||
var res = newSeq[SignedBeaconBlock]()
|
||||
var reqres = newSeq[Slot]()
|
||||
if peer.delay != ZeroDuration:
|
||||
await sleepAsync(peer.delay)
|
||||
|
||||
var counter = 0'u64
|
||||
|
||||
if peer.failure:
|
||||
peer.requests.add(req)
|
||||
if peer.disconnect:
|
||||
peer.close()
|
||||
raise newException(SyncManagerError, "Error")
|
||||
|
||||
if peer.malicious:
|
||||
var index = 0
|
||||
while counter < count:
|
||||
if index < len(peer.blockchain):
|
||||
res.add(peer.blockchain[index])
|
||||
reqres.add(peer.blockchain[index].message.slot)
|
||||
else:
|
||||
break
|
||||
index = index + int(step)
|
||||
counter = counter + 1'u64
|
||||
req.data = reqres
|
||||
peer.requests.add(req)
|
||||
result = some(res)
|
||||
else:
|
||||
var index = -1
|
||||
for i in 0 ..< len(peer.blockchain):
|
||||
if peer.blockchain[i].message.slot == startSlot:
|
||||
index = i
|
||||
break
|
||||
|
||||
if index >= 0:
|
||||
while counter < count:
|
||||
if index < len(peer.blockchain):
|
||||
res.add(peer.blockchain[index])
|
||||
reqres.add(peer.blockchain[index].message.slot)
|
||||
else:
|
||||
break
|
||||
index = index + int(step)
|
||||
counter = counter + 1'u64
|
||||
req.data = reqres
|
||||
result = some(res)
|
||||
peer.requests.add(req)
|
||||
|
||||
proc newTempChain*(number: int, start: Slot): seq[SignedBeaconBlock] =
|
||||
result = newSeq[SignedBeaconBlock](number)
|
||||
for i in 0 ..< number:
|
||||
result[i].message.slot = start + uint64(i)
|
||||
|
||||
proc `==`*(a1, a2: SignedBeaconBlock): bool {.inline.} =
|
||||
result = (a1.message.slot == a2.message.slot) and
|
||||
(a1.message.parent_root == a2.message.parent_root) and
|
||||
(a1.message.state_root == a2.message.state_root)
|
||||
|
||||
proc peerSlotTests(): Future[bool] {.async.} =
|
||||
# slot0: 3 ok
|
||||
# slot1: 2 ok 1 timeout
|
||||
# slot2: 1 ok 2 timeout
|
||||
# slot3: 2 ok 1 bad
|
||||
# slot4: 1 ok 2 bad
|
||||
# slot5: 2 ok 1 failure
|
||||
# slot6: 1 ok 2 failure
|
||||
# slot7: 1 ok 1 bad 1 failure
|
||||
# slot8: 1 bad 1 timeout 1 failure
|
||||
# slot9: 3 bad
|
||||
# slot10: 3 timeout
|
||||
# slot11: 3 failure
|
||||
var pool = newPeerPool[SimplePeer, SimplePeerKey]()
|
||||
var sman = newSyncManager[SimplePeer,
|
||||
SimplePeerKey](pool, nil, nil,
|
||||
peersInSlot = 3,
|
||||
peerSlotTimeout = 1.seconds,
|
||||
slotsInGroup = 6)
|
||||
|
||||
var chain1 = newTempChain(10, Slot(10000))
|
||||
var chain2 = newTempChain(10, Slot(11000))
|
||||
|
||||
var peers = newSeq[SimplePeer]()
|
||||
for i in 0 ..< 36:
|
||||
var peer = SimplePeer.init("id" & $i)
|
||||
peers.add(peer)
|
||||
|
||||
peers[0].update(chain1)
|
||||
peers[1].update(chain1)
|
||||
peers[2].update(chain1)
|
||||
|
||||
peers[3].update(chain1)
|
||||
peers[4].update(chain1, delay = 2.seconds)
|
||||
peers[5].update(chain1)
|
||||
|
||||
peers[6].update(chain1)
|
||||
peers[7].update(chain1, delay = 2.seconds)
|
||||
peers[8].update(chain1, delay = 2.seconds)
|
||||
|
||||
peers[9].update(chain1)
|
||||
peers[10].update(chain1)
|
||||
peers[11].update(chain2, malicious = true)
|
||||
|
||||
peers[12].update(chain1)
|
||||
peers[13].update(chain2, malicious = true)
|
||||
peers[14].update(chain2, malicious = true)
|
||||
|
||||
peers[15].update(chain1)
|
||||
peers[16].update(chain1)
|
||||
peers[17].update(chain1, failure = true)
|
||||
|
||||
peers[18].update(chain1)
|
||||
peers[19].update(chain1, failure = true)
|
||||
peers[20].update(chain1, failure = true)
|
||||
|
||||
peers[21].update(chain1)
|
||||
peers[22].update(chain2, malicious = true)
|
||||
peers[23].update(chain1, failure = true)
|
||||
|
||||
peers[24].update(chain2, malicious = true)
|
||||
peers[25].update(chain1, failure = true)
|
||||
peers[26].update(chain1, delay = 2.seconds)
|
||||
|
||||
peers[27].update(chain2, malicious = true)
|
||||
peers[28].update(chain2, malicious = true)
|
||||
peers[29].update(chain2, malicious = true)
|
||||
|
||||
peers[30].update(chain1, delay = 2.seconds)
|
||||
peers[31].update(chain1, delay = 2.seconds)
|
||||
peers[32].update(chain1, delay = 2.seconds)
|
||||
|
||||
peers[33].update(chain1, failure = true)
|
||||
peers[34].update(chain1, failure = true)
|
||||
peers[35].update(chain1, failure = true)
|
||||
|
||||
var slot0 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot0.peers = @[peers[0], peers[1], peers[2]]
|
||||
|
||||
var slot1 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot1.peers = @[peers[3], peers[4], peers[5]]
|
||||
|
||||
var slot2 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot2.peers = @[peers[6], peers[7], peers[8]]
|
||||
|
||||
var slot3 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot3.peers = @[peers[9], peers[10], peers[11]]
|
||||
|
||||
var slot4 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot4.peers = @[peers[12], peers[13], peers[14]]
|
||||
|
||||
var slot5 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot5.peers = @[peers[15], peers[16], peers[17]]
|
||||
|
||||
var slot6 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot6.peers = @[peers[18], peers[19], peers[20]]
|
||||
|
||||
var slot7 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot7.peers = @[peers[21], peers[22], peers[23]]
|
||||
|
||||
var slot8 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot8.peers = @[peers[24], peers[25], peers[26]]
|
||||
|
||||
var slot9 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot9.peers = @[peers[27], peers[28], peers[29]]
|
||||
|
||||
var slot10 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot10.peers = @[peers[30], peers[31], peers[32]]
|
||||
|
||||
var slot11 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot11.peers = @[peers[33], peers[34], peers[35]]
|
||||
|
||||
var s0 = await slot0.getBlocks(Slot(10000), 10'u64, 1'u64)
|
||||
var s1 = await slot1.getBlocks(Slot(10000), 10'u64, 1'u64)
|
||||
var s2 = await slot2.getBlocks(Slot(10000), 10'u64, 1'u64)
|
||||
var s3 = await slot3.getBlocks(Slot(10000), 10'u64, 1'u64)
|
||||
var s4 = await slot4.getBlocks(Slot(10000), 10'u64, 1'u64)
|
||||
var s5 = await slot5.getBlocks(Slot(10000), 10'u64, 1'u64)
|
||||
var s6 = await slot6.getBlocks(Slot(10000), 10'u64, 1'u64)
|
||||
var s7 = await slot7.getBlocks(Slot(10000), 10'u64, 1'u64)
|
||||
var s8 = await slot8.getBlocks(Slot(10000), 10'u64, 1'u64)
|
||||
var s9 = await slot9.getBlocks(Slot(10000), 10'u64, 1'u64)
|
||||
var s10 = await slot10.getBlocks(Slot(10000), 10'u64, 1'u64)
|
||||
var s11 = await slot11.getBlocks(Slot(10000), 10'u64, 1'u64)
|
||||
|
||||
var expected = BlockList.init(Slot(10000), 10'u64, 1'u64, chain1).get()
|
||||
|
||||
doAssert(s0.isSome())
|
||||
doAssert(s1.isSome())
|
||||
doAssert(s2.isNone())
|
||||
doAssert(s3.isSome())
|
||||
doAssert(s4.isNone())
|
||||
doAssert(s5.isSome())
|
||||
doAssert(s6.isNone())
|
||||
doAssert(s7.isNone())
|
||||
doAssert(s8.isNone())
|
||||
doAssert(s9.isNone())
|
||||
doAssert(s10.isNone())
|
||||
doAssert(s11.isNone())
|
||||
doAssert($s0.get() == $expected)
|
||||
doAssert($s1.get() == $expected)
|
||||
doAssert($s3.get() == $expected)
|
||||
doAssert($s5.get() == $expected)
|
||||
|
||||
result = true
|
||||
|
||||
proc peerGroupTests(): Future[bool] {.async.} =
|
||||
# group0: 3 ok
|
||||
# group1: 2 ok 1 bad
|
||||
# group2: 1 ok 2 bad
|
||||
# group3: 3 bad
|
||||
var pool = newPeerPool[SimplePeer, SimplePeerKey]()
|
||||
var sman = newSyncManager[SimplePeer,
|
||||
SimplePeerKey](pool, nil, nil,
|
||||
peersInSlot = 3,
|
||||
peerSlotTimeout = 1.seconds,
|
||||
slotsInGroup = 6)
|
||||
|
||||
var chain1 = newTempChain(10, Slot(10000))
|
||||
var chain2 = newTempChain(10, Slot(11000))
|
||||
|
||||
var peers = newSeq[SimplePeer]()
|
||||
for i in 0 ..< 18:
|
||||
var peer = SimplePeer.init("id" & $i)
|
||||
peers.add(peer)
|
||||
|
||||
proc cleanup() =
|
||||
for i in 0 ..< 18:
|
||||
peers[i].requests.setLen(0)
|
||||
|
||||
peers[0].update(chain1)
|
||||
peers[1].update(chain1)
|
||||
peers[2].update(chain1)
|
||||
|
||||
peers[3].update(chain1)
|
||||
peers[4].update(chain1)
|
||||
peers[5].update(chain1)
|
||||
|
||||
peers[6].update(chain1)
|
||||
peers[7].update(chain1)
|
||||
peers[8].update(chain1)
|
||||
|
||||
peers[9].update(chain1)
|
||||
peers[10].update(chain2, malicious = true)
|
||||
peers[11].update(chain2, malicious = true)
|
||||
|
||||
peers[12].update(chain1, delay = 2.seconds)
|
||||
peers[13].update(chain1, delay = 2.seconds)
|
||||
peers[14].update(chain1, delay = 2.seconds)
|
||||
|
||||
peers[15].update(chain1, failure = true)
|
||||
peers[16].update(chain1, failure = true)
|
||||
peers[17].update(chain1, failure = true)
|
||||
|
||||
var slot0 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot0.peers = @[peers[0], peers[1], peers[2]]
|
||||
var slot1 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot1.peers = @[peers[3], peers[4], peers[5]]
|
||||
var slot2 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot2.peers = @[peers[6], peers[7], peers[8]]
|
||||
var slot3 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot3.peers = @[peers[9], peers[10], peers[11]]
|
||||
var slot4 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot4.peers = @[peers[12], peers[13], peers[14]]
|
||||
var slot5 = newPeerSlot[SimplePeer, SimplePeerKey](sman)
|
||||
slot5.peers = @[peers[15], peers[16], peers[17]]
|
||||
|
||||
var group0 = newPeerGroup(sman)
|
||||
group0.slots = @[slot0, slot1, slot2]
|
||||
var group1 = newPeerGroup(sman)
|
||||
group1.slots = @[slot0, slot1, slot3]
|
||||
var group2 = newPeerGroup(sman)
|
||||
group2.slots = @[slot0, slot3, slot4]
|
||||
var group3 = newPeerGroup(sman)
|
||||
group3.slots = @[slot3, slot4, slot5]
|
||||
|
||||
var s0 = await group0.getBlocks(Slot(10000), 10'u64)
|
||||
cleanup()
|
||||
var s1 = await group1.getBlocks(Slot(10000), 10'u64)
|
||||
cleanup()
|
||||
var s2 = await group2.getBlocks(Slot(10000), 10'u64)
|
||||
cleanup()
|
||||
var s3 = await group3.getBlocks(Slot(10000), 10'u64)
|
||||
cleanup()
|
||||
|
||||
var expected = BlockList.init(Slot(10000), 10'u64, 1'u64, chain1).get()
|
||||
|
||||
doAssert(s0.isSome())
|
||||
doAssert(s1.isSome())
|
||||
doAssert(s2.isSome())
|
||||
doAssert(s3.isNone())
|
||||
|
||||
doAssert($s0.get() == $expected)
|
||||
doAssert($s1.get() == $expected)
|
||||
doAssert($s2.get() == $expected)
|
||||
|
||||
result = true
|
||||
|
||||
proc syncQueueNonAsyncTests(): bool =
|
||||
var q1 = SyncQueue.init(Slot(0), Slot(0), 1'u64, nil)
|
||||
doAssert(len(q1) == 1)
|
||||
var r11 = q1.pop()
|
||||
doAssert(len(q1) == 0)
|
||||
q1.push(r11)
|
||||
doAssert(len(q1) == 1)
|
||||
var r11e = q1.pop()
|
||||
doAssert(len(q1) == 0)
|
||||
doAssert(r11e == r11)
|
||||
doAssert(r11.slot == Slot(0) and r11.count == 1'u64)
|
||||
|
||||
var q2 = SyncQueue.init(Slot(0), Slot(1), 1'u64, nil)
|
||||
doAssert(len(q2) == 2)
|
||||
var r21 = q2.pop()
|
||||
doAssert(len(q2) == 1)
|
||||
var r22 = q2.pop()
|
||||
doAssert(len(q2) == 0)
|
||||
q2.push(r22)
|
||||
doAssert(len(q2) == 1)
|
||||
q2.push(r21)
|
||||
doAssert(len(q2) == 2)
|
||||
var r21e = q2.pop()
|
||||
doAssert(len(q2) == 1)
|
||||
var r22e = q2.pop()
|
||||
doAssert(len(q2) == 0)
|
||||
doAssert(r21 == r21e)
|
||||
doAssert(r22 == r22e)
|
||||
doAssert(r21.slot == Slot(0) and r21.count == 1'u64)
|
||||
doAssert(r22.slot == Slot(1) and r22.count == 1'u64)
|
||||
|
||||
var q3 = SyncQueue.init(Slot(0), Slot(4), 2'u64, nil)
|
||||
doAssert(len(q3) == 5)
|
||||
var r31 = q3.pop()
|
||||
doAssert(len(q3) == 3)
|
||||
var r32 = q3.pop()
|
||||
doAssert(len(q3) == 1)
|
||||
var r33 = q3.pop()
|
||||
doAssert(len(q3) == 0)
|
||||
q3.push(r33)
|
||||
doAssert(len(q3) == 1)
|
||||
q3.push(r32)
|
||||
doAssert(len(q3) == 3)
|
||||
q3.push(r31)
|
||||
doAssert(len(q3) == 5)
|
||||
var r31e = q3.pop()
|
||||
doAssert(len(q3) == 3)
|
||||
var r32e = q3.pop()
|
||||
doAssert(len(q3) == 1)
|
||||
var r33e = q3.pop()
|
||||
doAssert(len(q3) == 0)
|
||||
doAssert(r31 == r31e)
|
||||
doAssert(r32 == r32e)
|
||||
doAssert(r33 == r33e)
|
||||
doAssert(r31.slot == Slot(0) and r31.count == 2'u64)
|
||||
doAssert(r32.slot == Slot(2) and r32.count == 2'u64)
|
||||
doAssert(r33.slot == Slot(4) and r33.count == 1'u64)
|
||||
|
||||
var q4 = SyncQueue.init(Slot(1), Slot(5), 3'u64, nil)
|
||||
doAssert(len(q4) == 5)
|
||||
var r41 = q4.pop()
|
||||
doAssert(len(q4) == 2)
|
||||
var r42 = q4.pop()
|
||||
doAssert(len(q4) == 0)
|
||||
q4.push(r42)
|
||||
doAssert(len(q4) == 2)
|
||||
q4.push(r41)
|
||||
doAssert(len(q4) == 5)
|
||||
var r41e = q4.pop()
|
||||
doAssert(len(q4) == 2)
|
||||
var r42e = q4.pop()
|
||||
doAssert(len(q4) == 0)
|
||||
doAssert(r41 == r41e)
|
||||
doAssert(r42 == r42e)
|
||||
doAssert(r41.slot == Slot(1) and r41.count == 3'u64)
|
||||
doAssert(r42.slot == Slot(4) and r42.count == 2'u64)
|
||||
|
||||
var q5 = SyncQueue.init(Slot(1), Slot(30), 2'u64, nil)
|
||||
doAssert(len(q5) == 30)
|
||||
var r51 = q5.pop(5)
|
||||
doAssert(len(q5) == 20)
|
||||
doAssert(r51.slot == Slot(1) and r51.count == 10 and r51.step == 5)
|
||||
q5.push(r51, 3'u64)
|
||||
doAssert(len(q5) == 30)
|
||||
var r511 = q5.pop()
|
||||
var r512 = q5.pop()
|
||||
doAssert(len(q5) == 20)
|
||||
doAssert(r511.slot == Slot(1) and r511.count == 6 and r511.step == 3)
|
||||
doAssert(r512.slot == Slot(7) and r512.count == 4 and r512.step == 2)
|
||||
q5.push(r511, 2'u64)
|
||||
q5.push(r512, 1'u64)
|
||||
doAssert(len(q5) == 30)
|
||||
var r5111 = q5.pop()
|
||||
var r5112 = q5.pop()
|
||||
var r5121 = q5.pop()
|
||||
var r5122 = q5.pop()
|
||||
doAssert(len(q5) == 20)
|
||||
doAssert(r5111.slot == Slot(1) and r5111.count == 4 and r5111.step == 2)
|
||||
doAssert(r5112.slot == Slot(5) and r5112.count == 2 and r5112.step == 1)
|
||||
doAssert(r5121.slot == Slot(7) and r5121.count == 2 and r5121.step == 1)
|
||||
doAssert(r5122.slot == Slot(9) and r5122.count == 2 and r5122.step == 1)
|
||||
|
||||
var q6 = SyncQueue.init(Slot(1), Slot(7), 10'u64, nil)
|
||||
doAssert(len(q6) == 7)
|
||||
var r61 = q6.pop()
|
||||
doAssert(r61.slot == Slot(1) and r61.count == 7 and r61.step == 1)
|
||||
doAssert(len(q6) == 0)
|
||||
|
||||
var q7 = SyncQueue.init(Slot(1), Slot(7), 10'u64, nil)
|
||||
doAssert(len(q7) == 7)
|
||||
var r71 = q7.pop(5)
|
||||
doAssert(len(q7) == 0)
|
||||
doAssert(r71.slot == Slot(1) and r71.count == 7 and r71.step == 5)
|
||||
q7.push(r71, 3'u64)
|
||||
doAssert(len(q7) == 7)
|
||||
var r72 = q7.pop()
|
||||
doAssert(r72.slot == Slot(1) and r72.count == 7 and r72.step == 3)
|
||||
q7.push(r72, 2'u64)
|
||||
doAssert(len(q7) == 7)
|
||||
var r73 = q7.pop()
|
||||
doAssert(len(q7) == 0)
|
||||
doAssert(r73.slot == Slot(1) and r73.count == 7 and r73.step == 2)
|
||||
q7.push(r73, 1'u64)
|
||||
doAssert(len(q7) == 7)
|
||||
var r74 = q7.pop()
|
||||
doAssert(len(q7) == 0)
|
||||
doAssert(r74.slot == Slot(1) and r74.count == 7 and r74.step == 1)
|
||||
|
||||
result = true
|
||||
|
||||
proc syncQueueAsyncTests(): Future[bool] {.async.} =
|
||||
var chain1 = newSeq[SignedBeaconBlock](3)
|
||||
chain1[0].message.slot = Slot(0)
|
||||
chain1[1].message.slot = Slot(1)
|
||||
chain1[2].message.slot = Slot(2)
|
||||
var chain2 = newSeq[SignedBeaconBlock](7)
|
||||
chain2[0].message.slot = Slot(5)
|
||||
chain2[1].message.slot = Slot(6)
|
||||
chain2[2].message.slot = Slot(7)
|
||||
chain2[3].message.slot = Slot(8)
|
||||
chain2[4].message.slot = Slot(9)
|
||||
chain2[5].message.slot = Slot(10)
|
||||
chain2[6].message.slot = Slot(11)
|
||||
|
||||
var counter = 0
|
||||
proc receiver1(list: openarray[SignedBeaconBlock]): bool =
|
||||
result = true
|
||||
for item in list:
|
||||
if item.message.slot == uint64(counter):
|
||||
inc(counter)
|
||||
else:
|
||||
result = false
|
||||
break
|
||||
|
||||
var q1 = SyncQueue.init(Slot(0), Slot(2), 1'u64, receiver1, 1)
|
||||
var r11 = q1.pop()
|
||||
var r12 = q1.pop()
|
||||
var r13 = q1.pop()
|
||||
var f13 = q1.push(r13, @[chain1[2]])
|
||||
var f12 = q1.push(r12, @[chain1[1]])
|
||||
await sleepAsync(100.milliseconds)
|
||||
doAssert(f12.finished == false)
|
||||
doAssert(f13.finished == false)
|
||||
doAssert(counter == 0)
|
||||
var f11 = q1.push(r11, @[chain1[0]])
|
||||
doAssert(counter == 1)
|
||||
doAssert(f11.finished == true and f11.failed == false)
|
||||
await sleepAsync(100.milliseconds)
|
||||
doAssert(f12.finished == true and f12.failed == false)
|
||||
doAssert(f13.finished == true and f13.failed == false)
|
||||
doAssert(counter == 3)
|
||||
|
||||
var q2 = SyncQueue.init(Slot(5), Slot(11), 2'u64, receiver1, 2)
|
||||
var r21 = q2.pop()
|
||||
var r22 = q2.pop()
|
||||
var r23 = q2.pop()
|
||||
var r24 = q2.pop()
|
||||
|
||||
counter = 5
|
||||
|
||||
var f24 = q2.push(r24, @[chain2[6]])
|
||||
var f22 = q2.push(r22, @[chain2[2], chain2[3]])
|
||||
doAssert(f24.finished == false)
|
||||
doAssert(f22.finished == false)
|
||||
doAssert(counter == 5)
|
||||
var f21 = q2.push(r21, @[chain2[0], chain2[1]])
|
||||
doAssert(f21.finished == true and f21.failed == false)
|
||||
await sleepAsync(100.milliseconds)
|
||||
doAssert(f22.finished == true and f22.failed == false)
|
||||
doAssert(f24.finished == false)
|
||||
doAssert(counter == 9)
|
||||
var f23 = q2.push(r23, @[chain2[4], chain2[5]])
|
||||
doAssert(f23.finished == true and f23.failed == false)
|
||||
doAssert(counter == 11)
|
||||
await sleepAsync(100.milliseconds)
|
||||
doAssert(f24.finished == true and f24.failed == false)
|
||||
doAssert(counter == 12)
|
||||
|
||||
result = true
|
||||
|
||||
proc checkRequest(req: PeerRequest, slot, count, step: int,
|
||||
data: varargs[int]): bool =
|
||||
result = (req.startSlot == Slot(slot)) and (req.count == uint64(count)) and
|
||||
(req.step == uint64(step))
|
||||
if result:
|
||||
if len(data) != len(req.data):
|
||||
result = false
|
||||
else:
|
||||
for i in 0 ..< len(data):
|
||||
if Slot(data[i]) != req.data[i]:
|
||||
result = false
|
||||
break
|
||||
|
||||
proc checkRequest(peer: SimplePeer, index: int, slot, count, step: int,
|
||||
data: varargs[int]): bool {.inline.} =
|
||||
result = checkRequest(peer.requests[index], slot, count, step, data)
|
||||
|
||||
proc syncManagerOnePeerTest(): Future[bool] {.async.} =
|
||||
# Syncing with one peer only.
|
||||
var pool = newPeerPool[SimplePeer, SimplePeerKey]()
|
||||
var peer = SimplePeer.init("id1")
|
||||
var srcChain = newTempChain(100, Slot(10000))
|
||||
var dstChain = newSeq[SignedBeaconBlock]()
|
||||
|
||||
proc lastLocalSlot(): Slot =
|
||||
if len(dstChain) == 0:
|
||||
result = Slot(9999)
|
||||
else:
|
||||
result = dstChain[^1].message.slot
|
||||
|
||||
proc updateBlocks(list: openarray[SignedBeaconBlock]): bool =
|
||||
for item in list:
|
||||
dstChain.add(item)
|
||||
result = true
|
||||
|
||||
peer.update(srcChain)
|
||||
doAssert(pool.addIncomingPeerNoWait(peer) == true)
|
||||
|
||||
var sman = newSyncManager[SimplePeer,
|
||||
SimplePeerKey](pool, lastLocalSlot, updateBlocks,
|
||||
peersInSlot = 3,
|
||||
peerSlotTimeout = 1.seconds,
|
||||
slotsInGroup = 6)
|
||||
await sman.synchronize()
|
||||
doAssert(checkRequest(peer, 0, 10000, 20, 1,
|
||||
10000, 10001, 10002, 10003, 10004,
|
||||
10005, 10006, 10007, 10008, 10009,
|
||||
10010, 10011, 10012, 10013, 10014,
|
||||
10015, 10016, 10017, 10018, 10019) == true)
|
||||
doAssert(checkRequest(peer, 1, 10020, 20, 1,
|
||||
10020, 10021, 10022, 10023, 10024,
|
||||
10025, 10026, 10027, 10028, 10029,
|
||||
10030, 10031, 10032, 10033, 10034,
|
||||
10035, 10036, 10037, 10038, 10039) == true)
|
||||
doAssert(checkRequest(peer, 2, 10040, 20, 1,
|
||||
10040, 10041, 10042, 10043, 10044,
|
||||
10045, 10046, 10047, 10048, 10049,
|
||||
10050, 10051, 10052, 10053, 10054,
|
||||
10055, 10056, 10057, 10058, 10059) == true)
|
||||
doAssert(checkRequest(peer, 3, 10060, 20, 1,
|
||||
10060, 10061, 10062, 10063, 10064,
|
||||
10065, 10066, 10067, 10068, 10069,
|
||||
10070, 10071, 10072, 10073, 10074,
|
||||
10075, 10076, 10077, 10078, 10079) == true)
|
||||
doAssert(checkRequest(peer, 4, 10080, 20, 1,
|
||||
10080, 10081, 10082, 10083, 10084,
|
||||
10085, 10086, 10087, 10088, 10089,
|
||||
10090, 10091, 10092, 10093, 10094,
|
||||
10095, 10096, 10097, 10098, 10099) == true)
|
||||
result = true
|
||||
|
||||
proc syncManagerOneSlotTest(): Future[bool] {.async.} =
|
||||
# Syncing with one slot (2n + 1 number of peers) only.
|
||||
var pool = newPeerPool[SimplePeer, SimplePeerKey]()
|
||||
|
||||
var peers = newSeq[SimplePeer](3)
|
||||
for i in 0 ..< len(peers):
|
||||
peers[i] = SimplePeer.init("id" & $(i + 1))
|
||||
|
||||
var srcChain = newTempChain(100, Slot(10000))
|
||||
var dstChain = newSeq[SignedBeaconBlock]()
|
||||
|
||||
proc lastLocalSlot(): Slot =
|
||||
if len(dstChain) == 0:
|
||||
result = Slot(9999)
|
||||
else:
|
||||
result = dstChain[^1].message.slot
|
||||
|
||||
proc updateBlocks(list: openarray[SignedBeaconBlock]): bool =
|
||||
for item in list:
|
||||
dstChain.add(item)
|
||||
result = true
|
||||
|
||||
for i in 0 ..< len(peers):
|
||||
peers[i].update(srcChain)
|
||||
doAssert(pool.addIncomingPeerNoWait(peers[0]) == true)
|
||||
doAssert(pool.addOutgoingPeerNoWait(peers[1]) == true)
|
||||
doAssert(pool.addOutgoingPeerNoWait(peers[2]) == true)
|
||||
|
||||
var sman = newSyncManager[SimplePeer,
|
||||
SimplePeerKey](pool, lastLocalSlot, updateBlocks,
|
||||
peersInSlot = 3,
|
||||
peerSlotTimeout = 1.seconds,
|
||||
slotsInGroup = 6)
|
||||
await sman.synchronize()
|
||||
for i in 0 ..< len(peers):
|
||||
doAssert(checkRequest(peers[i], 0, 10000, 20, 1,
|
||||
10000, 10001, 10002, 10003, 10004,
|
||||
10005, 10006, 10007, 10008, 10009,
|
||||
10010, 10011, 10012, 10013, 10014,
|
||||
10015, 10016, 10017, 10018, 10019) == true)
|
||||
doAssert(checkRequest(peers[i], 1, 10020, 20, 1,
|
||||
10020, 10021, 10022, 10023, 10024,
|
||||
10025, 10026, 10027, 10028, 10029,
|
||||
10030, 10031, 10032, 10033, 10034,
|
||||
10035, 10036, 10037, 10038, 10039) == true)
|
||||
doAssert(checkRequest(peers[i], 2, 10040, 20, 1,
|
||||
10040, 10041, 10042, 10043, 10044,
|
||||
10045, 10046, 10047, 10048, 10049,
|
||||
10050, 10051, 10052, 10053, 10054,
|
||||
10055, 10056, 10057, 10058, 10059) == true)
|
||||
doAssert(checkRequest(peers[i], 3, 10060, 20, 1,
|
||||
10060, 10061, 10062, 10063, 10064,
|
||||
10065, 10066, 10067, 10068, 10069,
|
||||
10070, 10071, 10072, 10073, 10074,
|
||||
10075, 10076, 10077, 10078, 10079) == true)
|
||||
doAssert(checkRequest(peers[i], 4, 10080, 20, 1,
|
||||
10080, 10081, 10082, 10083, 10084,
|
||||
10085, 10086, 10087, 10088, 10089,
|
||||
10090, 10091, 10092, 10093, 10094,
|
||||
10095, 10096, 10097, 10098, 10099) == true)
|
||||
result = true
|
||||
|
||||
proc syncManagerOneGroupTest(): Future[bool] {.async.} =
|
||||
# Syncing with one group of peers (n peer slots).
|
||||
var pool = newPeerPool[SimplePeer, SimplePeerKey]()
|
||||
var peers = newSeq[SimplePeer](6)
|
||||
for i in 0 ..< len(peers):
|
||||
peers[i] = SimplePeer.init("id" & $(i + 1), weight = 10 - i)
|
||||
|
||||
var srcChain = newTempChain(100, Slot(10000))
|
||||
var dstChain = newSeq[SignedBeaconBlock]()
|
||||
|
||||
proc lastLocalSlot(): Slot =
|
||||
if len(dstChain) == 0:
|
||||
result = Slot(9999)
|
||||
else:
|
||||
result = dstChain[^1].message.slot
|
||||
|
||||
proc updateBlocks(list: openarray[SignedBeaconBlock]): bool =
|
||||
for item in list:
|
||||
dstChain.add(item)
|
||||
result = true
|
||||
|
||||
for i in 0 ..< len(peers):
|
||||
peers[i].update(srcChain)
|
||||
if i mod 2 == 0:
|
||||
doAssert(pool.addIncomingPeerNoWait(peers[i]) == true)
|
||||
else:
|
||||
doAssert(pool.addOutgoingPeerNoWait(peers[i]) == true)
|
||||
|
||||
var sman = newSyncManager[SimplePeer,
|
||||
SimplePeerKey](pool, lastLocalSlot, updateBlocks,
|
||||
peersInSlot = 3,
|
||||
peerSlotTimeout = 1.seconds,
|
||||
slotsInGroup = 2)
|
||||
await sman.synchronize()
|
||||
for i in 0 ..< len(peers):
|
||||
if i in {0, 1, 2}:
|
||||
doAssert(checkRequest(peers[i], 0, 10000, 20, 2,
|
||||
10000, 10002, 10004, 10006, 10008,
|
||||
10010, 10012, 10014, 10016, 10018,
|
||||
10020, 10022, 10024, 10026, 10028,
|
||||
10030, 10032, 10034, 10036, 10038) == true)
|
||||
doAssert(checkRequest(peers[i], 1, 10040, 20, 2,
|
||||
10040, 10042, 10044, 10046, 10048,
|
||||
10050, 10052, 10054, 10056, 10058,
|
||||
10060, 10062, 10064, 10066, 10068,
|
||||
10070, 10072, 10074, 10076, 10078) == true)
|
||||
doAssert(checkRequest(peers[i], 2, 10080, 10, 2,
|
||||
10080, 10082, 10084, 10086, 10088,
|
||||
10090, 10092, 10094, 10096, 10098) == true)
|
||||
elif i in {3, 4, 5}:
|
||||
doAssert(checkRequest(peers[i], 0, 10001, 20, 2,
|
||||
10001, 10003, 10005, 10007, 10009,
|
||||
10011, 10013, 10015, 10017, 10019,
|
||||
10021, 10023, 10025, 10027, 10029,
|
||||
10031, 10033, 10035, 10037, 10039) == true)
|
||||
doAssert(checkRequest(peers[i], 1, 10041, 20, 2,
|
||||
10041, 10043, 10045, 10047, 10049,
|
||||
10051, 10053, 10055, 10057, 10059,
|
||||
10061, 10063, 10065, 10067, 10069,
|
||||
10071, 10073, 10075, 10077, 10079) == true)
|
||||
doAssert(checkRequest(peers[i], 2, 10081, 10, 2,
|
||||
10081, 10083, 10085, 10087, 10089,
|
||||
10091, 10093, 10095, 10097, 10099) == true)
|
||||
|
||||
result = true
|
||||
|
||||
proc syncManagerGroupRecoveryTest(): Future[bool] {.async.} =
|
||||
# Syncing with two groups of peers (n peer slots), when one groups is failed
|
||||
# to deliver request, and this request is bigger then other group.
|
||||
var pool = newPeerPool[SimplePeer, SimplePeerKey]()
|
||||
var peers = newSeq[SimplePeer](6 + 3)
|
||||
for i in 0 ..< len(peers):
|
||||
peers[i] = SimplePeer.init("id" & $(i + 1), weight = 9 - i)
|
||||
|
||||
var srcChain = newTempChain(100, Slot(10000))
|
||||
var dstChain = newSeq[SignedBeaconBlock]()
|
||||
|
||||
for i in 0 ..< 6:
|
||||
peers[i].update(srcChain, failure = true, disconnect = true)
|
||||
for i in 6 ..< len(peers):
|
||||
peers[i].update(srcChain)
|
||||
|
||||
proc lastLocalSlot(): Slot =
|
||||
if len(dstChain) == 0:
|
||||
result = Slot(9999)
|
||||
else:
|
||||
result = dstChain[^1].message.slot
|
||||
|
||||
proc updateBlocks(list: openarray[SignedBeaconBlock]): bool =
|
||||
for item in list:
|
||||
dstChain.add(item)
|
||||
result = true
|
||||
|
||||
for i in 0 ..< len(peers):
|
||||
if i mod 2 == 0:
|
||||
doAssert(pool.addIncomingPeerNoWait(peers[i]) == true)
|
||||
else:
|
||||
doAssert(pool.addOutgoingPeerNoWait(peers[i]) == true)
|
||||
|
||||
var sman = newSyncManager[SimplePeer,
|
||||
SimplePeerKey](pool, lastLocalSlot, updateBlocks,
|
||||
peersInSlot = 3,
|
||||
peerSlotTimeout = 1.seconds,
|
||||
slotsInGroup = 2)
|
||||
await sman.synchronize()
|
||||
for i in 0 ..< len(peers):
|
||||
if i in {0, 1, 2}:
|
||||
doAssert(checkRequest(peers[i], 0, 10020, 20, 2) == true)
|
||||
elif i in {3, 4, 5}:
|
||||
doAssert(checkRequest(peers[i], 0, 10021, 20, 2) == true)
|
||||
elif i in {6, 7, 8}:
|
||||
doAssert(checkRequest(peers[i], 0, 10000, 20, 1,
|
||||
10000, 10001, 10002, 10003, 10004,
|
||||
10005, 10006, 10007, 10008, 10009,
|
||||
10010, 10011, 10012, 10013, 10014,
|
||||
10015, 10016, 10017, 10018, 10019) == true)
|
||||
doAssert(checkRequest(peers[i], 1, 10020, 20, 1,
|
||||
10020, 10021, 10022, 10023, 10024,
|
||||
10025, 10026, 10027, 10028, 10029,
|
||||
10030, 10031, 10032, 10033, 10034,
|
||||
10035, 10036, 10037, 10038, 10039) == true)
|
||||
doAssert(checkRequest(peers[i], 2, 10040, 20, 1,
|
||||
10040, 10041, 10042, 10043, 10044,
|
||||
10045, 10046, 10047, 10048, 10049,
|
||||
10050, 10051, 10052, 10053, 10054,
|
||||
10055, 10056, 10057, 10058, 10059) == true)
|
||||
doAssert(checkRequest(peers[i], 3, 10060, 20, 1,
|
||||
10060, 10061, 10062, 10063, 10064,
|
||||
10065, 10066, 10067, 10068, 10069,
|
||||
10070, 10071, 10072, 10073, 10074,
|
||||
10075, 10076, 10077, 10078, 10079) == true)
|
||||
doAssert(checkRequest(peers[i], 4, 10080, 20, 1,
|
||||
10080, 10081, 10082, 10083, 10084,
|
||||
10085, 10086, 10087, 10088, 10089,
|
||||
10090, 10091, 10092, 10093, 10094,
|
||||
10095, 10096, 10097, 10098, 10099) == true)
|
||||
result = true
|
||||
|
||||
proc syncManagerFailureTest(): Future[bool] {.async.} =
|
||||
# Failure test
|
||||
const FailuresCount = 3
|
||||
var pool = newPeerPool[SimplePeer, SimplePeerKey]()
|
||||
var peer = SimplePeer.init("id1", weight = 0)
|
||||
|
||||
var srcChain = newTempChain(100, Slot(10000))
|
||||
var dstChain = newSeq[SignedBeaconBlock]()
|
||||
|
||||
peer.update(srcChain, failure = true)
|
||||
|
||||
proc lastLocalSlot(): Slot =
|
||||
if len(dstChain) == 0:
|
||||
result = Slot(9999)
|
||||
else:
|
||||
result = dstChain[^1].message.slot
|
||||
|
||||
proc updateBlocks(list: openarray[SignedBeaconBlock]): bool =
|
||||
for item in list:
|
||||
dstChain.add(item)
|
||||
result = true
|
||||
|
||||
doAssert(pool.addIncomingPeerNoWait(peer) == true)
|
||||
|
||||
var sman = newSyncManager[SimplePeer,
|
||||
SimplePeerKey](pool, lastLocalSlot, updateBlocks,
|
||||
peersInSlot = 3,
|
||||
peerSlotTimeout = 1.seconds,
|
||||
slotsInGroup = 2,
|
||||
failuresCount = FailuresCount,
|
||||
failurePause = 100.milliseconds)
|
||||
await sman.synchronize()
|
||||
doAssert(len(peer.requests) == FailuresCount)
|
||||
for i in 0 ..< len(peer.requests):
|
||||
doAssert(checkRequest(peer, i, 10000, 20, 1) == true)
|
||||
result = true
|
||||
|
||||
suite "SyncManager test suite":
|
||||
timedTest "PeerSlot tests":
|
||||
check waitFor(peerSlotTests()) == true
|
||||
timedTest "PeerGroup tests":
|
||||
check waitFor(peerGroupTests()) == true
|
||||
timedTest "SyncQueue non-async tests":
|
||||
check syncQueueNonAsyncTests() == true
|
||||
timedTest "SyncQueue async tests":
|
||||
check waitFor(syncQueueAsyncTests()) == true
|
||||
timedTest "SyncManager one-peer test":
|
||||
check waitFor(syncManagerOnePeerTest()) == true
|
||||
timedTest "SyncManager one-peer-slot test":
|
||||
check waitFor(syncManagerOneSlotTest()) == true
|
||||
timedTest "SyncManager one-peer-group test":
|
||||
check waitFor(syncManagerOneGroupTest()) == true
|
||||
timedTest "SyncManager group-recovery test":
|
||||
check waitFor(syncManagerGroupRecoveryTest()) == true
|
||||
timedTest "SyncManager failure test":
|
||||
check waitFor(syncManagerFailureTest()) == true
|
|
@ -1 +1 @@
|
|||
Subproject commit 6cfabf7820834cb99bd30fc664d3ab91eb0493bf
|
||||
Subproject commit 2a70c4f152ee849db1ededa92c1d80f7102dd718
|
Loading…
Reference in New Issue