Merge remote-tracking branch 'origin/stable' into merge-stable

This commit is contained in:
Jacek Sieka 2021-08-09 13:14:28 +02:00
commit 7bb76a6cd1
No known key found for this signature in database
GPG Key ID: A1B09461ABB656B8
6 changed files with 117 additions and 8 deletions

View File

@ -1,3 +1,49 @@
2021-07-10 v1.4.2
=================
Nimbus `v1.4.2` - "Upgrade procedure: Hotfix release"
This release is marked as `low-urgency` for all Nimbus users other than those who have recently updated to `v1.4.1` from a version earlier than `v1.1.0` - for these users this is a `high-urgency` release.
## Notable changes
This release fixes an issue in the upgrade procedure when upgrading from a version earlier than `1.1.0` to `1.4.x`.
**How can I tell if I've been affected?**
If you've already upgraded to `1.4.1`, you can tell that you've been affected if you're seeing the following` WRN log`:
```
Received invalid sequence of blocks
```
To re-iterate, this issue only affects users who are upgrading from `1.0.12` or earlier (released on `2021-03-10`), **and have not run any release in between**. Everyone else can ignore this release.
2021-07-10 v1.4.1
=================
Nimbus v1.4.1 - "Every attestation counts"
This release is marked as `low-urgency`
## Notable changes
Nimbus `v1.4.0` users might have noticed that they are missing a small number of (seemingly random) attestations since the update. Our investigation into the matter has showed that, due to `v1.4.0`'s significant performance improvements, Nimbus validators occasionally send their first attestation for a new epoch before some peers are ready. These "slow" peers end up dropping early attestations because they're busy with the epoch transition.
It's a rare occurrence, since it requires a validator to be scheduled to attest in the first slot of an epoch *and* for the beacon node to only be connected to "slow" peers for the respective libp2p topic. If both these conditions are true, a premature attestation may be lost in time, like tears in the rain.
As a fix, we are using a larger send delay: [#2705](https://github.com/status-im/nimbus-eth2/pull/2705).
Fo those Nimbus `v1.4.0` users who are concerned about reaching optimal attestation effectiveness, we encourage you to upgrade as soon as possible.
Other changes include log flushing and metrics fixes.
Full list:
- increase attestation wait time ([#2705](https://github.com/status-im/nimbus-eth2/pull/2705))
- ensure logs are printed without delays ([#2669](https://github.com/status-im/nimbus-eth2/pull/2669))
- fix metrics on Windows ([#2707](https://github.com/status-im/nimbus-eth2/pull/2707))
2021-06-21 v1.4.0
=================

View File

@ -899,3 +899,15 @@ iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
newSummaries.add(res)
res.root = res.summary.parent_root
# Test operations used to create broken and/or legacy database
proc putStateV0*(db: BeaconChainDB, key: Eth2Digest, value: phase0.BeaconState) =
# Writes to KVStore, as done in 1.0.12 and earlier
db.v0.backend.putSnappySSZ(subkey(type value, key), value)
proc putBlockV0*(db: BeaconChainDB, value: phase0.TrustedSignedBeaconBlock) =
# Write to KVStore, as done in 1.0.12 and earlier
# In particular, no summary is written here - it should be recreated
# automatically
db.v0.backend.putSnappySSZ(subkey(phase0.SignedBeaconBlock, value.root), value)

View File

@ -238,11 +238,15 @@ proc addRawBlockKnownParent(
onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded
): Result[BlockRef, (ValidationResult, BlockError)] =
## Add a block whose parent is known, after performing validity checks
logScope:
blck = shortLog(signedBlock.message)
blockRoot = shortLog(signedBlock.root)
signature = shortLog(signedBlock.signature)
if parent.slot >= signedBlock.message.slot:
# A block whose parent is newer than the block itself is clearly invalid -
# discard it immediately
debug "Invalid block slot",
info "Block with invalid parent, dropping",
parentBlock = shortLog(parent)
return err((ValidationResult.Reject, Invalid))
@ -258,7 +262,7 @@ proc addRawBlockKnownParent(
# correct - from their point of view, the head block they have is the
# latest thing that happened on the chain and they're performing their
# duty correctly.
debug "Unviable block, dropping",
info "Unviable block, dropping",
finalizedHead = shortLog(dag.finalizedHead),
tail = shortLog(dag.tail)
@ -281,11 +285,16 @@ proc addRawBlockKnownParent(
if skipBLSValidation notin dag.updateFlags:
# TODO: remove skipBLSValidation
var sigs: seq[SignatureSet]
if sigs.collectSignatureSets(
signedBlock, dag.db.immutableValidators, dag.clearanceState.data, cache).isErr():
if (let e = sigs.collectSignatureSets(
signedBlock, dag.db.immutableValidators,
dag.clearanceState.data, cache); e.isErr()):
info "Unable to load signature sets",
err = e.error()
# A PublicKey or Signature isn't on the BLS12-381 curve
return err((ValidationResult.Reject, Invalid))
if not quarantine.batchVerify(sigs):
info "Block signature verification failed"
return err((ValidationResult.Reject, Invalid))
let sigVerifyTick = Moment.now()

View File

@ -446,6 +446,10 @@ proc init*(T: type ChainDAGRef,
# Pruning metadata
dag.lastPrunePoint = dag.finalizedHead
# Fill validator key cache in case we're loading an old database that doesn't
# have a cache
dag.updateValidatorKeys(getStateField(dag.headState.data, validators).asSeq())
info "Block dag initialized",
head = shortLog(headRef),
finalizedHead = shortLog(dag.finalizedHead),

View File

@ -15,7 +15,7 @@ when not defined(nimscript):
const
versionMajor* = 1
versionMinor* = 4
versionBuild* = 0
versionBuild* = 2
versionBlob* = "stateofus" # Single word - ends up in the default graffitti

View File

@ -15,9 +15,10 @@ import
eth/keys,
../beacon_chain/spec/datatypes/base,
../beacon_chain/spec/[
digest, forkedbeaconstate_helpers, helpers, state_transition, presets],
beaconstate, digest, forkedbeaconstate_helpers, helpers, state_transition,
presets],
../beacon_chain/beacon_node_types,
../beacon_chain/[beacon_chain_db, ssz],
../beacon_chain/[beacon_chain_db, ssz, extras],
../beacon_chain/consensus_object_pools/[
blockchain_dag, block_quarantine, block_clearance],
./testutil, ./testdbutil, ./testblockutil
@ -341,13 +342,14 @@ suite "Block pool processing" & preset():
tmpState.blck == b1Add[].parent
getStateField(tmpState.data, slot) == bs1.parent.slot
const nilPhase0Callback = OnPhase0BlockAdded(nil)
suite "chain DAG finalization tests" & preset():
setup:
var
db = makeTestDB(SLOTS_PER_EPOCH)
dag = init(ChainDAGRef, defaultRuntimeConfig, db, {})
quarantine = QuarantineRef.init(keys.newRng())
nilPhase0Callback: OnPhase0BlockAdded
cache = StateCache()
rewards = RewardInfo()
@ -528,3 +530,39 @@ suite "chain DAG finalization tests" & preset():
dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root
dag2.finalizedHead.slot == dag.finalizedHead.slot
hash_tree_root(dag2.headState.data) == hash_tree_root(dag.headState.data)
suite "Old database versions" & preset():
setup:
let
genState = initialize_beacon_state_from_eth1(
defaultRuntimeConfig,
Eth2Digest(),
0,
makeInitialDeposits(SLOTS_PER_EPOCH.uint64, flags = {skipBlsValidation}),
{skipBlsValidation})
genBlock = get_initial_beacon_block(genState[])
quarantine = QuarantineRef.init(keys.newRng())
test "pre-1.1.0":
# only kvstore, no immutable validator keys
let db = BeaconChainDB.new("", inMemory = true)
# preInit a database to a v1.0.12 state
db.putStateV0(genBlock.message.state_root, genState[])
db.putBlockV0(genBlock)
db.putTailBlock(genBlock.root)
db.putHeadBlock(genBlock.root)
db.putStateRoot(genBlock.root, genState.slot, genBlock.message.state_root)
db.putGenesisBlockRoot(genBlock.root)
var
dag = init(ChainDAGRef, defaultRuntimeConfig, db, {})
state = newClone(dag.headState.data)
cache = StateCache()
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
b1 = addTestBlock(state[], dag.tail.root, cache, attestations = att0)
b1Add = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
check:
b1Add.isOk()