diff --git a/Jenkinsfile b/Jenkinsfile index ba3fe5538..c90352f85 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -35,7 +35,9 @@ def runStages() { sh """#!/bin/bash set -e make -j${env.NPROC} V=1 - make -j${env.NPROC} V=1 LOG_LEVEL=TRACE NIMFLAGS='-d:testnet_servers_image' beacon_node + make -j${env.NPROC} V=1 LOG_LEVEL=TRACE NIMFLAGS='-d:UseSlashingProtection=true -d:testnet_servers_image' beacon_node + # Miracl fallback + # make -j${env.NPROC} V=1 LOG_LEVEL=TRACE NIMFLAGS='-d:BLS_FORCE_BACKEND=miracl -d:UseSlashingProtection=true -d:testnet_servers_image' beacon_node """ } }, @@ -47,18 +49,11 @@ def runStages() { // EXECUTOR_NUMBER will be 0 or 1, since we have 2 executors per Jenkins node sh """#!/bin/bash set -e + export NIMFLAGS='-d:UseSlashingProtection=true' ./scripts/launch_local_testnet.sh --testnet 0 --nodes 4 --stop-at-epoch 5 --log-level DEBUG --disable-htop --enable-logtrace --data-dir local_testnet0_data --base-port \$(( 9000 + EXECUTOR_NUMBER * 100 )) --base-rpc-port \$(( 7000 + EXECUTOR_NUMBER * 100 )) --base-metrics-port \$(( 8008 + EXECUTOR_NUMBER * 100 )) -- --verify-finalization --discv5:no ./scripts/launch_local_testnet.sh --testnet 1 --nodes 4 --stop-at-epoch 5 --log-level DEBUG --disable-htop --enable-logtrace --data-dir local_testnet1_data --base-port \$(( 9000 + EXECUTOR_NUMBER * 100 )) --base-rpc-port \$(( 7000 + EXECUTOR_NUMBER * 100 )) --base-metrics-port \$(( 8008 + EXECUTOR_NUMBER * 100 )) -- --verify-finalization --discv5:no """ } - // stage("testnet finalization - Miracl/Milagro fallback") { - // // EXECUTOR_NUMBER will be 0 or 1, since we have 2 executors per Jenkins node - // sh """#!/bin/bash - // set -e - // NIMFLAGS="-d:BLS_FORCE_BACKEND=miracl" ./scripts/launch_local_testnet.sh --testnet 0 --nodes 4 --stop-at-epoch 5 --log-level INFO --disable-htop --data-dir local_testnet0_data --base-port \$(( 9000 + EXECUTOR_NUMBER * 100 )) --base-rpc-port \$(( 7000 + EXECUTOR_NUMBER * 100 )) --base-metrics-port \$(( 8008 + EXECUTOR_NUMBER * 100 )) -- --verify-finalization - // NIMFLAGS="-d:BLS_FORCE_BACKEND=miracl" ./scripts/launch_local_testnet.sh --testnet 1 --nodes 4 --stop-at-epoch 5 --log-level INFO --disable-htop --data-dir local_testnet1_data --base-port \$(( 9000 + EXECUTOR_NUMBER * 100 )) --base-rpc-port \$(( 7000 + EXECUTOR_NUMBER * 100 )) --base-metrics-port \$(( 8008 + EXECUTOR_NUMBER * 100 )) -- --verify-finalization - // """ - // } } ) } @@ -100,4 +95,3 @@ parallel( } }, ) - diff --git a/beacon_chain.nimble b/beacon_chain.nimble index 7f3ef7630..85fdccbc0 100644 --- a/beacon_chain.nimble +++ b/beacon_chain.nimble @@ -61,10 +61,17 @@ task test, "Run all tests": # Just the part of minimal config which explicitly differs from mainnet buildAndRunBinary "test_fixture_const_sanity_check", "tests/official/", """-d:const_preset=minimal -d:chronicles_sinks="json[file]"""" + # Generic SSZ test, doesn't use consensus objects minimal/mainnet presets + buildAndRunBinary "test_fixture_ssz_generic_types", "tests/official/", """-d:chronicles_log_level=TRACE -d:chronicles_sinks="json[file]"""" + # Consensus object SSZ tests + buildAndRunBinary "test_fixture_ssz_consensus_objects", "tests/official/", """-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:chronicles_sinks="json[file]"""" + # EF tests + buildAndRunBinary "all_fixtures_require_ssz", "tests/official/", """-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:chronicles_sinks="json[file]"""" + # Mainnet config buildAndRunBinary "proto_array", "beacon_chain/fork_choice/", """-d:const_preset=mainnet -d:chronicles_sinks="json[file]"""" buildAndRunBinary "fork_choice", "beacon_chain/fork_choice/", """-d:const_preset=mainnet -d:chronicles_sinks="json[file]"""" - buildAndRunBinary "all_tests", "tests/", """-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:chronicles_sinks="json[file]"""" + buildAndRunBinary "all_tests", "tests/", """-d:UseSlashingProtection=true -d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:chronicles_sinks="json[file]"""" # Check Miracl/Milagro fallback on select tests buildAndRunBinary "test_interop", "tests/", """-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:BLS_FORCE_BACKEND=miracl -d:chronicles_sinks="json[file]"""" @@ -74,14 +81,6 @@ task test, "Run all tests": buildAndRunBinary "test_attestation_pool", "tests/", """-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:BLS_FORCE_BACKEND=miracl -d:chronicles_sinks="json[file]"""" buildAndRunBinary "test_block_pool", "tests/", """-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:BLS_FORCE_BACKEND=miracl -d:chronicles_sinks="json[file]"""" - # Generic SSZ test, doesn't use consensus objects minimal/mainnet presets - buildAndRunBinary "test_fixture_ssz_generic_types", "tests/official/", """-d:chronicles_log_level=TRACE -d:chronicles_sinks="json[file]"""" - - # Consensus object SSZ tests - buildAndRunBinary "test_fixture_ssz_consensus_objects", "tests/official/", """-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:chronicles_sinks="json[file]"""" - - buildAndRunBinary "all_fixtures_require_ssz", "tests/official/", """-d:chronicles_log_level=TRACE -d:const_preset=mainnet -d:chronicles_sinks="json[file]"""" - # State and block sims; getting to 4th epoch triggers consensus checks buildAndRunBinary "state_sim", "research/", "-d:const_preset=mainnet -d:chronicles_log_level=INFO", "--validators=3000 --slots=128" # buildAndRunBinary "state_sim", "research/", "-d:const_preset=mainnet -d:BLS_FORCE_BACKEND=miracl -d:chronicles_log_level=INFO", "--validators=3000 --slots=128" diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index e5bcbcaf3..f0b76819d 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -32,6 +32,7 @@ import mainchain_monitor, version, ssz/[merkleization], merkle_minimal, sync_protocol, request_manager, keystore_management, interop, statusbar, sync_manager, validator_duties, validator_api, + validator_slashing_protection, ./eth2_processor const @@ -258,7 +259,6 @@ proc init*(T: type BeaconNode, netKeys: netKeys, db: db, config: conf, - attachedValidators: ValidatorPool.init(), chainDag: chainDag, quarantine: quarantine, attestationPool: attestationPool, @@ -271,6 +271,16 @@ proc init*(T: type BeaconNode, topicAggregateAndProofs: topicAggregateAndProofs, ) + res.attachedValidators = ValidatorPool.init( + SlashingProtectionDB.init( + chainDag.headState.data.data.genesis_validators_root, + when UseSlashingProtection: + kvStore SqStoreRef.init(conf.validatorsDir(), "slashing_protection").tryGet() + else: + KvStoreRef() + ) + ) + proc getWallTime(): BeaconTime = res.beaconClock.now() res.processor = Eth2Processor.new( @@ -1312,4 +1322,3 @@ programMain: of WalletsCmd.restore: restoreWalletInteractively(rng[], config) - diff --git a/beacon_chain/beacon_node_types.nim b/beacon_chain/beacon_node_types.nim index e7ce86eb6..70a97fb4b 100644 --- a/beacon_chain/beacon_node_types.nim +++ b/beacon_chain/beacon_node_types.nim @@ -5,7 +5,8 @@ import stew/endians2, spec/[datatypes, crypto], block_pools/block_pools_types, - fork_choice/fork_choice_types + fork_choice/fork_choice_types, + validator_slashing_protection export block_pools_types @@ -105,5 +106,6 @@ type ValidatorPool* = object validators*: Table[ValidatorPubKey, AttachedValidator] + slashingProtection*: SlashingProtectionDB func shortLog*(v: AttachedValidator): string = shortLog(v.pubKey) diff --git a/beacon_chain/validator_client.nim b/beacon_chain/validator_client.nim index 5ffbe2dbd..3ede71bc9 100644 --- a/beacon_chain/validator_client.nim +++ b/beacon_chain/validator_client.nim @@ -16,14 +16,16 @@ import json_serialization/std/[options, sets, net], # Local modules - spec/[datatypes, digest, crypto, helpers, network], + spec/[datatypes, digest, crypto, helpers, network, signatures], conf, time, version, eth2_network, eth2_discovery, validator_pool, beacon_node_types, nimbus_binary_common, version, ssz/merkleization, sync_manager, keystore_management, spec/eth2_apis/callsigs_types, - eth2_json_rpc_serialization + eth2_json_rpc_serialization, + validator_slashing_protection, + eth/db/[kvstore, kvstore_sqlite3] logScope: topics = "vc" @@ -132,22 +134,35 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a # check if we have a validator which needs to propose on this slot if vc.proposalsForCurrentEpoch.contains slot: let public_key = vc.proposalsForCurrentEpoch[slot] - let validator = vc.attachedValidators.validators[public_key] - info "Proposing block", slot = slot, public_key = public_key + let notSlashable = vc.attachedValidators + .slashingProtection + .checkSlashableBlockProposal(public_key, slot) + if notSlashable.isOk: + let validator = vc.attachedValidators.validators[public_key] + info "Proposing block", slot = slot, public_key = public_key + let randao_reveal = await validator.genRandaoReveal( + vc.fork, vc.beaconGenesis.genesis_validators_root, slot) + var newBlock = SignedBeaconBlock( + message: await vc.client.get_v1_validator_block(slot, vc.graffitiBytes, randao_reveal) + ) + newBlock.root = hash_tree_root(newBlock.message) - let randao_reveal = await validator.genRandaoReveal( - vc.fork, vc.beaconGenesis.genesis_validators_root, slot) + # TODO: signing_root is recomputed in signBlockProposal just after + let signing_root = compute_block_root(vc.fork, vc.beaconGenesis.genesis_validators_root, slot, newBlock.root) + vc.attachedValidators + .slashingProtection + .registerBlock(public_key, slot, signing_root) - var newBlock = SignedBeaconBlock( - message: await vc.client.get_v1_validator_block(slot, vc.graffitiBytes, randao_reveal) - ) + newBlock.signature = await validator.signBlockProposal( + vc.fork, vc.beaconGenesis.genesis_validators_root, slot, newBlock.root) - newBlock.root = hash_tree_root(newBlock.message) - newBlock.signature = await validator.signBlockProposal( - vc.fork, vc.beaconGenesis.genesis_validators_root, slot, newBlock.root) - - discard await vc.client.post_v1_validator_block(newBlock) + discard await vc.client.post_v1_validator_block(newBlock) + else: + warn "Slashing protection activated for block proposal", + validator = public_key, + slot = slot, + existingProposal = notSlashable.error # https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#attesting # A validator should create and broadcast the attestation to the associated @@ -167,12 +182,31 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a let validator = vc.attachedValidators.validators[a.public_key] let ad = await vc.client.get_v1_validator_attestation(slot, a.committee_index) - # TODO I don't like these (u)int64-to-int conversions... - let attestation = await validator.produceAndSignAttestation( - ad, a.committee_length.int, a.validator_committee_index.int, - vc.fork, vc.beaconGenesis.genesis_validators_root) + let notSlashable = vc.attachedValidators + .slashingProtection + .checkSlashableAttestation( + a.public_key, + ad.source.epoch, + ad.target.epoch) + if notSlashable.isOk(): + # TODO signing_root is recomputed in produceAndSignAttestation/signAttestation just after + let signing_root = compute_attestation_root( + vc.fork, vc.beaconGenesis.genesis_validators_root, ad) + vc.attachedValidators + .slashingProtection + .registerAttestation( + a.public_key, ad.source.epoch, ad.target.epoch, signing_root) - discard await vc.client.post_v1_beacon_pool_attestations(attestation) + # TODO I don't like these (u)int64-to-int conversions... + let attestation = await validator.produceAndSignAttestation( + ad, a.committee_length.int, a.validator_committee_index.int, + vc.fork, vc.beaconGenesis.genesis_validators_root) + + discard await vc.client.post_v1_beacon_pool_attestations(attestation) + else: + warn "Slashing protection activated for attestation", + validator = a.public_key, + badVoteDetails = $notSlashable.error except CatchableError as err: warn "Caught an unexpected error", err = err.msg, slot = shortLog(slot) @@ -230,6 +264,13 @@ programMain: vc.beaconGenesis = waitFor vc.client.get_v1_beacon_genesis() vc.beaconClock = BeaconClock.init(vc.beaconGenesis.genesis_time) + when UseSlashingProtection: + vc.attachedValidators.slashingProtection = + SlashingProtectionDB.init( + vc.beaconGenesis.genesis_validators_root, + kvStore SqStoreRef.init(config.validatorsDir(), "slashing_protection").tryGet() + ) + let curSlot = vc.beaconClock.now().slotOrZero() nextSlot = curSlot + 1 # No earlier than GENESIS_SLOT + 1 diff --git a/beacon_chain/validator_duties.nim b/beacon_chain/validator_duties.nim index d959d2dbf..c80c71e87 100644 --- a/beacon_chain/validator_duties.nim +++ b/beacon_chain/validator_duties.nim @@ -7,7 +7,7 @@ import # Standard library - std/[os, tables, strutils, sequtils, osproc, streams], + std/[os, tables, sequtils, osproc, streams], # Nimble packages stew/[objects], stew/shims/macros, @@ -18,13 +18,14 @@ import eth/[keys, async_utils], eth/p2p/discoveryv5/[protocol, enr], # Local modules - spec/[datatypes, digest, crypto, helpers, validator, network], + spec/[datatypes, digest, crypto, helpers, validator, network, signatures], spec/state_transition, conf, time, validator_pool, attestation_pool, block_pools/[spec_cache, chain_dag, clearance], eth2_network, keystore_management, beacon_node_common, beacon_node_types, nimbus_binary_common, mainchain_monitor, version, ssz/merkleization, interop, - attestation_aggregation, sync_manager, sszdump + attestation_aggregation, sync_manager, sszdump, + validator_slashing_protection # Metrics for tracking attestation and beacon block loss declareCounter beacon_attestations_sent, @@ -120,6 +121,8 @@ proc isSynced*(node: BeaconNode, head: BlockRef): bool = beaconTime = node.beaconClock.now() wallSlot = beaconTime.toSlot() + # TODO: MaxEmptySlotCount should likely involve the weak subjectivity period. + # TODO if everyone follows this logic, the network will not recover from a # halt: nobody will be producing blocks because everone expects someone # else to do it @@ -293,6 +296,16 @@ proc proposeBlock(node: BeaconNode, slot = shortLog(slot) return head + let notSlashable = node.attachedValidators + .slashingProtection + .checkSlashableBlockProposal(validator.pubkey, slot) + if notSlashable.isErr: + warn "Slashing protection activated", + validator = validator.pubkey, + slot = slot, + existingProposal = notSlashable.error + return head + let valInfo = ValidatorInfoForMakeBeaconBlock(kind: viValidator, validator: validator) let beaconBlockTuple = await makeBeaconBlockForHeadAndSlot( node, valInfo, validator_index, node.graffitiBytes, head, slot) @@ -304,6 +317,14 @@ proc proposeBlock(node: BeaconNode, ) newBlock.root = hash_tree_root(newBlock.message) + + # TODO: recomputed in block proposal + let signing_root = compute_block_root( + beaconBlockTuple.fork, beaconBlockTuple.genesis_validators_root, slot, newBlock.root) + node.attachedValidators + .slashingProtection + .registerBlock(validator.pubkey, slot, signing_root) + newBlock.signature = await validator.signBlockProposal( beaconBlockTuple.fork, beaconBlockTuple.genesis_validators_root, slot, newBlock.root) @@ -368,9 +389,21 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) = attestations.add((ad, committee.len, index_in_committee, validator)) for a in attestations: - traceAsyncErrors createAndSendAttestation( - node, fork, genesis_validators_root, a.validator, a.data, - a.committeeLen, a.indexInCommittee, num_active_validators) + let notSlashable = node.attachedValidators + .slashingProtection + .checkSlashableAttestation( + a.validator.pubkey, + a.data.source.epoch, + a.data.target.epoch) + + if notSlashable.isOk(): + traceAsyncErrors createAndSendAttestation( + node, fork, genesis_validators_root, a.validator, a.data, + a.committeeLen, a.indexInCommittee, num_active_validators) + else: + warn "Slashing protection activated for attestation", + validator = a.validator.pubkey, + badVoteDetails = $notSlashable.error proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot): Future[BlockRef] {.async.} = diff --git a/beacon_chain/validator_pool.nim b/beacon_chain/validator_pool.nim index b11b1b1ed..42f3839bf 100644 --- a/beacon_chain/validator_pool.nim +++ b/beacon_chain/validator_pool.nim @@ -3,10 +3,18 @@ import chronos, chronicles, spec/[datatypes, crypto, digest, signatures, helpers], beacon_node_types, - json_serialization/std/[sets, net] + json_serialization/std/[sets, net], + validator_slashing_protection, + eth/db/[kvstore, kvstore_sqlite3] -func init*(T: type ValidatorPool): T = +func init*(T: type ValidatorPool, + slashingProtectionDB: SlashingProtectionDB): T = + ## Initialize the validator pool and the slashing protection service + ## `genesis_validator_root` is used as an unique ID for the + ## blockchain + ## `backend` is the KeyValue Store backend result.validators = initTable[ValidatorPubKey, AttachedValidator]() + result.slashingProtection = slashingProtectionDB template count*(pool: ValidatorPool): int = pool.validators.len diff --git a/beacon_chain/validator_slashing_protection.nim b/beacon_chain/validator_slashing_protection.nim new file mode 100644 index 000000000..66f830e84 --- /dev/null +++ b/beacon_chain/validator_slashing_protection.nim @@ -0,0 +1,1099 @@ +# beacon_chain +# Copyright (c) 2018-2020 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import + # Standard library + std/tables, + # Status + eth/db/kvstore, + chronicles, + nimcrypto/[hash, utils], + serialization, + json_serialization, + # Internal + ./spec/[datatypes, digest, crypto], + ./ssz + +# Requirements +# -------------------------------------------- +# +# Overview of slashing and how it ties in with the rest of Eth2.0 +# +# Phase 0 for humans - Validator responsibilities: +# - https://notes.ethereum.org/@djrtwo/Bkn3zpwxB#Validator-responsibilities +# +# Phase 0 spec - Honest Validator - how to avoid slashing +# - https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#how-to-avoid-slashing +# +# In-depth reading on slashing conditions +# +# - Detecting slashing conditions https://hackmd.io/@n0ble/By897a5sH +# - Open issue on writing a slashing detector https://github.com/ethereum/eth2.0-pm/issues/63 +# - Casper the Friendly Finality Gadget, Vitalik Buterin and Virgil Griffith +# https://arxiv.org/pdf/1710.09437.pdf +# Figure 2 +# An individual validator ν MUST NOT publish two distinct votes, +# 〈ν,s1,t1,h(s1),h(t1) AND〈ν,s2,t2,h(s2),h(t2)〉, +# such that either: +# I. h(t1) = h(t2). +# Equivalently, a validator MUST NOT publish two distinct votes for the same target height. +# OR +# II. h(s1) < h(s2) < h(t2) < h(t1). +# Equivalently, a validator MUST NOT vote within the span of its other votes. +# - Vitalik's annotated spec: https://github.com/ethereum/annotated-spec/blob/d8c51af84f9f309d91c37379c1fcb0810bc5f10a/phase0/beacon-chain.md#proposerslashing +# 1. A proposer can get slashed for signing two distinct headers at the same slot. +# 2. An attester can get slashed for signing +# two attestations that together violate +# the Casper FFG slashing conditions. +# - https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#ffg-vote +# The "source" is the current_justified_epoch +# The "target" is the current_epoch +# +# Reading on weak subjectivity +# - https://notes.ethereum.org/@adiasg/weak-subjectvity-eth2 +# - https://www.symphonious.net/2019/11/27/exploring-ethereum-2-weak-subjectivity-period/ +# - https://ethresear.ch/t/weak-subjectivity-under-the-exit-queue-model/5187 +# +# Reading of interop serialization format +# - Import/export format: https://hackmd.io/@sproul/Bk0Y0qdGD +# - Tests: https://github.com/eth2-clients/slashing-protection-interchange-tests +# +# Relaxation for Nimbus +# +# We are not building a slashing detector but only protecting +# attached validator from slashing, hence we make the following assumptions +# +# 1. We only need to store specific validators signed blocks and attestations +# 2. We assume that our node is synced past +# the last finalized epoch +# hence we only need to keep track of blocks and attestations +# since the last finalized epoch and we don't need to care +# about the weak subjectivity period. +# i.e. if `Node.isSynced()` returns false +# a node skips its validator duties and doesn't invoke slashing protection. +# and `isSynced` syncs at least up to the blockchain last finalized epoch. +# +# Hence the database or key-value store should support +# +# Queries +# 1. db.signedBlockExistsFor(validator, slot) -> bool +# 2. db.attestationExistsFor(validator, target_epoch) -> bool +# 3. db.attestationSurrounds(validator, source_epoch, target_epoch) +# +# Update +# 1. db.registerBlock(validator, slot, block_root) +# 2. db.registerAttestation(validator, source_epoch, target_epoch, attestation_root) +# +# Maintenance +# 1. db.prune(finalized_epoch) +# +# Interop +# 1. db.import(json) +# 2. db.export(json) +# 3. db.export(json, validator) +# 4. db.export(json, seq[validator]) + +# Technical Discussion +# -------------------------------------------- +# +# TODO: Merge with BeaconChainDB? +# - https://stackoverflow.com/questions/21844479/multiple-databases-vs-single-database-with-logically-partitioned-data +# +# Reasons for merging +# - Single database +# +# Reasons for not merging +# - BeaconChainDB is about the beacon node itself +# while slashing protection is about validators +# - BeaconChainDB is append-only +# while slashing protection will be pruned +# at each finalization. +# Hence we might want different backend in the future +# - In a VC/BN split configuration the slashing protection +# may be better attached to the VC. (VC: Validator Client, BN: Beacon Node) +# - The slashing protection DB only held cryptographic hashes +# and epoch/slot integers which are uncompressible +# while BeaconChainDB is snappy-compressed. +# +# TODO: if we enshrine the split we likely want to use +# a relational DB instead of KV-Store, +# for efficient pruning and range queries support + +# DB primitives +# -------------------------------------------- +# Implementation +# +# As mentioned in the technical discussion +# we currently use a simple KV-store abstraction +# with no range queries or iterators. +# +# To support our requirements +# we store block proposals and attestations +# as per-validator linked lists + +type + SlashingProtectionDB* = ref object + ## Database storing the blocks attested + ## by validators attached to a beacon node + ## or validator client. + backend: KvStoreRef + + BadVoteKind* = enum + ## Attestation bad vote kind + # h: height (i.e. epoch for attestation, slot for blocks) + # t: target + # s: source + # 1: existing attestations + # 2: candidate attestation + + # Spec slashing condition + DoubleVote # h(t1) = h(t2) + SurroundedVote # h(s1) < h(s2) < h(t2) < h(t1) + SurroundingVote # h(s2) < h(s1) < h(t1) < h(t2) + # Non-spec, should never happen in a well functioning client + TargetPrecedesSource # h(t1) < h(s1) - current epoch precedes last justified epoch + + BadVote* = object + case kind*: BadVoteKind + of DoubleVote: + existingAttestation*: Eth2Digest + of SurroundedVote, SurroundingVote: + existingAttestationRoot*: Eth2Digest # Many roots might be in conflict + sourceExisting*, targetExisting*: Epoch + sourceSlashable*, targetSlashable*: Epoch + of TargetPrecedesSource: + discard + + SlotDesc = object + # Using tuple instead of objects, crashes the Nim compiler + # with SSZ serialization + # Making this generic as well + start, stop: Slot + isInit: bool + EpochDesc = object + start, stop: Epoch + isInit: bool + + KeysEpochs = object + ## Per-validator linked lists start/stop + blockSlots: SlotDesc + sourceEpochs: EpochDesc + targetEpochs: EpochDesc + + SlashingKeyKind = enum + # Note: source epochs are not unique + # and so cannot be used to build a key + kBlock + kTargetEpoch + kLinkedListMeta + # Interchange format + kGenesisValidatorRoot + kNumValidators + kValidator + + BlockNode = object + prev, next: Slot + # TODO distinct type for block root vs all other ETH2Digest + block_root: Eth2Digest + + TargetEpochNode = object + prev, next: Epoch + # TODO distinct type for attestation root vs all other ETH2Digest + attestation_root: Eth2Digest + source: Epoch + + ValID = array[RawPubKeySize, byte] + ## This is the serialized byte representation + ## of a Validator Public Key. + ## Portable between Miracl/BLST + ## and limits serialization/deserialization call + +{.push raises: [Defect].} +logScope: + topics = "antislash" + +const UseSlashingProtection* {.booldefine.} = true + +when UseSlashingProtection: + static: echo " Built with slashing protection" +else: + static: echo " Built without slashing protection" + +func subkey( + kind: static SlashingKeyKind, + validator: ValID, + slot: Slot + ): array[RawPubKeySize+8, byte] = + static: doAssert kind == kBlock + + # Big endian to get a naturally ascending order on slots in sorted indices + result[0..<8] = toBytesBE(slot.uint64) + # .. but 7 bytes should be enough for slots - in return, we get a nicely + # rounded key length + result[0] = byte ord(kBlock) + result[8..<56] = validator + +func subkey( + kind: static SlashingKeyKind, + validator: ValID, + epoch: Epoch + ): array[RawPubKeySize+8, byte] = + static: doAssert kind == kTargetEpoch, "Got invalid kind " & $kind + + # Big endian to get a naturally ascending order on slots in sorted indices + result[0..<8] = toBytesBE(epoch.uint64) + # .. but 7 bytes should be enough for slots - in return, we get a nicely + # rounded key length + result[0] = byte ord(kind) + result[8..<56] = validator + +func subkey( + kind: static SlashingKeyKind, + validator: ValID + ): array[RawPubKeySize+1, byte] = + static: doAssert kind == kLinkedListMeta + + result[0] = byte ord(kLinkedListMeta) + result[1 .. ^1] = validator + +func subkey(kind: static SlashingKeyKind): array[1, byte] = + static: doAssert kind in {kNumValidators, kGenesisValidatorRoot} + result[0] = byte ord(kind) + +func subkey(kind: static SlashingKeyKind, valIndex: uint32): array[5, byte] = + static: doAssert kind == kValidator + # Big endian to get a naturally ascending order on slots in sorted indices + result[1..<5] = toBytesBE(valIndex) + result[0] = byte ord(kind) + +proc put(db: SlashingProtectionDB, key: openarray[byte], v: auto) = + db.backend.put( + key, + SSZ.encode(v) + ).expect("working database") + +proc get(db: SlashingProtectionDB, + key: openarray[byte], + T: typedesc): Opt[T] = + + const ExpectedNodeSszSize = block: + when T is BlockNode: + 2*sizeof(Epoch) + sizeof(Eth2Digest) + elif T is TargetEpochNode: + 2*sizeof(Epoch) + sizeof(Eth2Digest) + sizeof(Epoch) + elif T is KeysEpochs: + 2*sizeof(Slot) + 4*sizeof(Epoch) + 3*sizeof(bool) + elif T is Eth2Digest: + sizeof(Eth2Digest) + elif T is uint32: + sizeof(uint32) + elif T is ValidatorPubKey: + RawPubKeySize + else: + {.error: "Invalid database node type: " & $T.} + ## SSZ serialization is packed + ## However in-memory, BlockNode, TargetEpochNode + ## might be bigger due to alignment/compiler padding + + var res: Opt[T] + proc decode(data: openArray[byte]) = + # We are capturing "result" and "T" from outer scope + # And allocating on the heap which are not ideal + # from a safety and performance point of view. + try: + if data.len == ExpectedNodeSszSize: + when T is ValidatorPubKey: + # symbol resolution bug + # SSZ.decode doesn't see "fromSSZBytes" + res.ok ValidatorPubKey.fromSszBytes(data) + else: + res.ok SSZ.decode(data, T) # captures from `get` scope + else: + # If the data can't be deserialized, it could be because it's from a + # version of the software that uses a different SSZ encoding + warn "Unable to deserialize data, old database?", + typ = $T, + dataLen = data.len, + expectedSize = ExpectedNodeSszSize + discard + except SerializationError as e: + # If the data can't be deserialized, it could be because it's from a + # version of the software that uses a different SSZ encoding + warn "Unable to deserialize data, old database?", + typ = $T, + dataLen = data.len, + expectedSize = ExpectedNodeSszSize + discard + + discard db.backend.get(key, decode).expect("working database") + + res + +proc setGenesis(db: SlashingProtectionDB, genesis_validator_root: Eth2Digest) = + # Workaround SSZ / nim-serialization visibility issue + # "template WriterType(T: type SSZ): type" + # by having a non-generic proc + db.put( + subkey(kGenesisValidatorRoot), + genesis_validator_root + ) + +proc init*( + T: type SlashingProtectionDB, + genesis_validator_root: Eth2Digest, + backend: KVStoreRef): SlashingProtectionDB = + when UseSlashingProtection: + result = T(backend: backend) + result.setGenesis(genesis_validator_root) + +proc close*(db: SlashingProtectionDB) = + when UseSlashingProtection: + discard db.backend.close() + +# DB Queries +# -------------------------------------------- + +proc checkSlashableBlockProposalImpl( + db: SlashingProtectionDB, + validator: ValidatorPubKey, + slot: Slot + ): Result[void, Eth2Digest] = + ## Returns an error if the specified validator + ## already proposed a block for the specified slot. + ## This would lead to slashing. + ## The error contains the blockroot that was already proposed + ## + ## Returns success otherwise + # TODO distinct type for the result block root + let valID = validator.toRaw() + let foundBlock = db.get( + subkey(kBlock, valID, slot), + BlockNode + ) + if foundBlock.isNone(): + return ok() + return err(foundBlock.unsafeGet().block_root) + +proc checkSlashableBlockProposal*( + db: SlashingProtectionDB, + validator: ValidatorPubKey, + slot: Slot + ): Result[void, Eth2Digest] = + ## Returns an error if the specified validator + ## already proposed a block for the specified slot. + ## This would lead to slashing. + ## The error contains the blockroot that was already proposed + ## + ## Returns success otherwise + # TODO distinct type for the result block root + when UseSlashingProtection: + checkSlashableBlockProposalImpl( + db, validator, slot + ) + else: + ok() + +proc checkSlashableAttestationImpl( + db: SlashingProtectionDB, + validator: ValidatorPubKey, + source: Epoch, + target: Epoch + ): Result[void, BadVote] = + ## Returns an error if the specified validator + ## already proposed a block for the specified slot. + ## This would lead to slashing. + ## The error contains the blockroot that was already proposed + ## + ## Returns success otherwise + # TODO distinct type for the result attestation root + + let valID = validator.toRaw() + + # Sanity + # --------------------------------- + if source > target: + return err(BadVote(kind: TargetPrecedesSource)) + + # Casper FFG 1st slashing condition + # Detect h(t1) = h(t2) + # --------------------------------- + let foundAttestation = db.get( + subkey(kTargetEpoch, valID, target), + TargetEpochNode + ) + if foundAttestation.isSome(): + # Logged by caller + return err(BadVote( + kind: DoubleVote, + existingAttestation: foundAttestation.unsafeGet().attestation_root + )) + + # TODO: we hack KV-store range queries + # --------------------------------- + let maybeLL = db.get( + subkey(kLinkedListMeta, valID), + KeysEpochs + ) + + if maybeLL.isNone: + info "No slashing protection data - first attestation?", + validator = validator, + attSource = source, + attTarget = target + return ok() + let ll = maybeLL.unsafeGet() + if not ll.targetEpochs.isInit: + info "No attestation slashing protection data - first attestation?", + validator = validator, + attSource = source, + attTarget = target + return ok() + + # Chain reorg + # Detect h(s2) < h(s1) + # If the candidate attestation source precedes + # source(s) we have in the SlashingProtectionDB + # we have a chain reorg + # --------------------------------- + if source < ll.sourceEpochs.stop: + warn "Detected a chain reorg", + earliestJustifiedEpoch = ll.sourceEpochs.start, + oldestJustifiedEpoch = ll.sourceEpochs.stop, + reorgJustifiedEpoch = source, + monitoredValidator = validator + + # Casper FFG 2nd slashing condition + # -> Surrounded vote + # Detect h(s1) < h(s2) < h(t2) < h(t1) + # --------------------------------- + # Casper FFG 2nd slashing condition + # -> Surrounding vote + # Detect h(s2) < h(s1) < h(t1) < h(t2) + # --------------------------------- + + template s2: untyped = source + template t2: untyped = target + + # We start from the final target epoch + var t1: Epoch + var t1Node: TargetEpochNode + + t1 = ll.targetEpochs.stop + t1Node = db.get( + subkey(kTargetEpoch, valID, t1), + TargetEpochNode + # bug in Nim results, ".e" field inaccessible + # ).expect("Consistent linked-list in DB") + ).unsafeGet() + template s1: untyped = t1Node.source + template ar1: untyped = t1Node.attestation_root + + # Surrounding vote + while true: + if not(t1 < t2): + # s1|s2 < t2 < t1 -> surrounded vote case + break + if s2 < s1: + # s2 < s1 < t1 < t2 + # Logged by caller + return err(BadVote( + kind: SurroundingVote, + existingAttestationRoot: ar1, + sourceExisting: s1, + targetExisting: t1, + sourceSlashable: s2, + targetSlashable: t2 + )) + + # Next iteration + if t1Node.prev == default(Epoch) or + t1Node.prev == ll.targetEpochs.stop: + return ok() + else: + t1 = t1Node.prev + t1Node = db.get( + subkey(kTargetEpoch, valID, t1Node.prev), + TargetEpochNode + # bug in Nim results, ".e" field inaccessible + # ).expect("Consistent linked-list in DB") + ).unsafeGet() + + # Surrounded vote + doAssert t2 < t1, "Checking surrounded vote" + while true: + if t1 < s2: + # s1 < t1 < s2 < t2 + return ok() + if s1 < s2: + # s1 < s2 < t2 < t1 + # Logged by caller + return err(BadVote( + kind: SurroundedVote, + existingAttestationRoot: ar1, + sourceExisting: s1, + targetExisting: t1, + sourceSlashable: s2, + targetSlashable: t2 + )) + + # Next iteration + if t1Node.prev == default(Epoch) or + t1Node.prev == ll.targetEpochs.stop: + return ok() + else: + t1 = t1Node.prev + t1Node = db.get( + subkey(kTargetEpoch, valID, t1Node.prev), + TargetEpochNode + # bug in Nim results, ".e" field inaccessible + # ).expect("Consistent linked-list in DB") + ).unsafeGet() + + doAssert false, "Unreachable" + +proc checkSlashableAttestation*( + db: SlashingProtectionDB, + validator: ValidatorPubKey, + source: Epoch, + target: Epoch + ): Result[void, BadVote] = + ## Returns an error if the specified validator + ## already proposed a block for the specified slot. + ## This would lead to slashing. + ## The error contains the blockroot that was already proposed + ## + ## Returns success otherwise + # TODO distinct type for the result attestation root + when UseSlashingProtection: + checkSlashableAttestationImpl( + db, validator, source, target + ) + else: + ok() + +# DB update +# -------------------------------------------- + +proc registerValidator(db: SlashingProtectionDB, validator: ValidatorPubKey) = + ## Add a new validator to the database + ## Assumes the validator does not exist + let maybeNumVals = db.get( + subkey(kNumValidators), + uint32 + ) + var valIndex = 0'u32 + if maybeNumVals.isNone(): + db.put(subkey(kNumValidators), 1'u32) + else: + valIndex = maybeNumVals.unsafeGet() + db.put(subkey(kNumValidators), valIndex + 1) + + db.put(subkey(kValidator, valIndex), validator) + +proc registerBlockImpl( + db: SlashingProtectionDB, + validator: ValidatorPubKey, + slot: Slot, block_root: Eth2Digest) = + ## Add a block to the slashing protection DB + ## `checkSlashableBlockProposal` MUST be run + ## before to ensure no overwrite. + + let valID = validator.toRaw() + + # We want to keep the linked-list ordered + # to ease pruning. + # TODO: DB instead of KV-store, + # at the very least we should isolate that logic + let maybeLL = db.get( + subkey(kLinkedListMeta, valID), + KeysEpochs + ) + + if maybeLL.isNone: + info "No slashing protection data - initiating block tracking for validator", + validator = validator + + db.registerValidator(validator) + + let node = BlockNode( + block_root: block_root + ) + db.put(subkey(kBlock, valID, slot), node) + db.put( + subkey(kLinkedListMeta, valID), + KeysEpochs( + blockSlots: SlotDesc(start: slot, stop: slot, isInit: true), + # targetEpochs.isInit will be false + ) + ) + return + + var ll = maybeLL.unsafeGet() + var cur = ll.blockSlots.stop + if not ll.blockSlots.isInit: + let node = BlockNode( + block_root: block_root + ) + ll.blockSlots = SlotDesc(start: slot, stop: slot, isInit: true) + db.put(subkey(kBlock, valID, slot), node) + # TODO: what if crash here? + db.put(subkey(kLinkedListMeta, valID), ll) + return + + if cur < slot: + # Adding a block later than all known blocks + let node = BlockNode( + prev: cur, + block_root: block_root + ) + var prevNode = db.get( + subkey(kBlock, valID, cur), + BlockNode + # bug in Nim results, ".e" field inaccessible + # ).expect("Consistent linked-list in DB") + ).unsafeGet() + prevNode.next = slot + ll.blockSlots.stop = slot + db.put(subkey(kBlock, valID, slot), node) + db.put(subkey(kBlock, valID, cur), prevNode) + # TODO: what if crash here? + db.put(subkey(kLinkedListMeta, valID), ll) + return + + # TODO: we likely want a proper DB or better KV-store high-level API + # in the future. + while true: + var curNode = db.get( + subkey(kBlock, valID, cur), + BlockNode + # bug in Nim results, ".e" field inaccessible + # ).expect("Consistent linked-list in DB") + ).unsafeGet() + + if curNode.prev == ll.blockSlots.start: + # Reached the beginning + # Change: Metadata.start <-> cur + # to: Metadata.start <-> new <-> cur + # This should happen only if registerBlock + # is called out-of-order + warn "Validator proposal in the past - out-of-order antislash registration?", + validator = validator, + slot = slot, + blockroot = blockroot, + earliestBlockProposalSlotInDB = ll.blockSlots.start, + latestBlockProposalSlotInDB = ll.blockSlots.stop + var node = BlockNode( + prev: ll.blockSlots.start, + next: cur, + block_root: block_root + ) + ll.blockSlots.start = slot + curNode.prev = slot + db.put(subkey(kBlock, valID, slot), node) + # TODO: what if crash here? + db.put(subkey(kBlock, valID, cur), curNode) + db.put(subkey(kLinkedListMeta, valID), ll) + return + elif slot < curNode.prev: + # Reached: prev < slot < cur + # Change: prev <-> cur + # to: prev <-> new <-> cur + let prev = curNode.prev + var node = BlockNode( + prev: prev, next: cur, + block_root: block_root + ) + var prevNode = db.get( + subkey(kBlock, valID, prev), + BlockNode + # bug in Nim results, ".e" field inaccessible + # ).expect("Consistent linked-list in DB") + ).unsafeGet() + prevNode.next = slot + curNode.prev = slot + db.put(subkey(kBlock, valID, slot), node) + # TODO: what if crash here? + db.put(subkey(kBlock, valID, cur), curNode) + db.put(subkey(kBlock, valID, prev), prevNode) + return + + # Previous + cur = curNode.prev + curNode = db.get( + subkey(kBlock, valID, cur), + BlockNode + # bug in Nim results, ".e" field inaccessible + # ).expect("Consistent linked-list in DB") + ).unsafeGet() + +proc registerBlock*( + db: SlashingProtectionDB, + validator: ValidatorPubKey, + slot: Slot, block_root: Eth2Digest) = + ## Add a block to the slashing protection DB + ## `checkSlashableBlockProposal` MUST be run + ## before to ensure no overwrite. + when UseSlashingProtection: + registerBlockImpl( + db, validator, slot, block_root + ) + else: + discard + +proc registerAttestationImpl( + db: SlashingProtectionDB, + validator: ValidatorPubKey, + source, target: Epoch, + attestation_root: Eth2Digest) = + ## Add an attestation to the slashing protection DB + ## `checkSlashableAttestation` MUST be run + ## before to ensure no overwrite. + + let valID = validator.toRaw() + + # We want to keep the linked-list ordered + # to ease pruning. + # TODO: DB instead of KV-store, + # at the very least we should isolate that logic + let maybeLL = db.get( + subkey(kLinkedListMeta, valID), + KeysEpochs + ) + + if maybeLL.isNone: + info "No slashing protection data - initiating attestation tracking for validator", + validator = validator + + db.registerValidator(validator) + + let node = TargetEpochNode( + source: source, + attestation_root: attestation_root + ) + db.put(subkey(kTargetEpoch, valID, target), node) + db.put( + subkey(kLinkedListMeta, valID), + KeysEpochs( + # blockSlots.isInit will be false + sourceEpochs: EpochDesc(start: source, stop: source, isInit: true), + targetEpochs: EpochDesc(start: target, stop: target, isInit: true) + ) + ) + return + + var ll = maybeLL.unsafeGet() + var cur = ll.targetEpochs.stop + if not ll.targetEpochs.isInit: + let node = TargetEpochNode( + attestation_root: attestation_root, + source: source + ) + ll.targetEpochs = EpochDesc(start: target, stop: target, isInit: true) + ll.sourceEpochs = EpochDesc(start: source, stop: source, isInit: true) + db.put(subkey(kTargetEpoch, valID, target), node) + # TODO: what if crash here? + db.put(subkey(kLinkedListMeta, valID), ll) + return + + block: # Update source epoch + if ll.sourceEpochs.stop < source: + ll.sourceEpochs.stop = source + if source < ll.sourceEpochs.start: + ll.sourceEpochs.start = source + + if cur < target: + # Adding an attestation later than all known blocks + let node = TargetEpochNode( + prev: cur, + source: source, + attestation_root: attestation_root + ) + var prevNode = db.get( + subkey(kTargetEpoch, valID, cur), + TargetEpochNode + # bug in Nim results, ".e" field inaccessible + # ).expect("Consistent linked-list in DB") + ).unsafeGet() + prevNode.next = target + ll.targetEpochs.stop = target + db.put(subkey(kTargetEpoch, valID, target), node) + db.put(subkey(kTargetEpoch, valID, cur), prevNode) + # TODO: what if crash here? + db.put(subkey(kLinkedListMeta, valID), ll) + return + + # TODO: we likely want a proper DB or better KV-store high-level API + # in the future. + while true: + var curNode = db.get( + subkey(kTargetEpoch, valID, cur), + TargetEpochNode + # bug in Nim results, ".e" field inaccessible + # ).expect("Consistent linked-list in DB") + ).unsafeGet() + if curNode.prev == ll.targetEpochs.start: + # Reached the beginning + # Change: Metadata.start <-> cur + # to: Metadata.start <-> new <-> cur + # This should happen only if registerAttestation + # is called out-of-order or if the validator + # changes its vote for an earlier fork than its latest vote + warn "Validator vote targeting the past - out-of-order antislash registration or chain reorg?", + validator = validator, + source_epoch = source, + target_epoch = target, + attestation_root = attestation_root + var node = TargetEpochNode( + prev: ll.targetEpochs.start, + next: cur, + source: source, + attestation_root: attestation_root + ) + ll.targetEpochs.start = target + curNode.prev = target + db.put(subkey(kTargetEpoch, valID, target), node) + # TODO: what if crash here? + db.put(subkey(kTargetEpoch, valID, cur), curNode) + db.put(subkey(kLinkedListMeta, valID), ll) + return + elif target < curNode.prev: + # Reached: prev < target < cur + # Change: prev <-> cur + # to: prev <-> new <-> cur + let prev = curNode.prev + var node = TargetEpochNode( + prev: prev, next: cur, + source: source, + attestation_root: attestation_root + ) + var prevNode = db.get( + subkey(kTargetEpoch, valID, prev), + TargetEpochNode + # bug in Nim results, ".e" field inaccessible + # ).expect("Consistent linked-list in DB") + ).unsafeGet() + prevNode.next = target + curNode.prev = target + db.put(subkey(kTargetEpoch, valID, target), node) + # TODO: what if crash here? + db.put(subkey(kTargetEpoch, valID, cur), curNode) + db.put(subkey(kTargetEpoch, valID, prev), prevNode) + return + + # Previous + cur = curNode.prev + curNode = db.get( + subkey(kTargetEpoch, valID, cur), + TargetEpochNode + # bug in Nim results, ".e" field inaccessible + # ).expect("Consistent linked-list in DB") + ).unsafeGet() + +proc registerAttestation*( + db: SlashingProtectionDB, + validator: ValidatorPubKey, + source, target: Epoch, + attestation_root: Eth2Digest) = + ## Add an attestation to the slashing protection DB + ## `checkSlashableAttestation` MUST be run + ## before to ensure no overwrite. + when UseSlashingProtection: + registerAttestationImpl( + db, validator, source, target, attestation_root + ) + else: + discard + +# DB maintenance +# -------------------------------------------- +# TODO: pruning +# Note that the complete interchange format +# requires all proposals/attestations ever and so prevent pruning. + +# Interchange +# -------------------------------------------- + +type + SPDIF = object + ## Slashing Protection Database Interchange Format + metadata: SPDIF_Meta + data: seq[SPDIF_Validator] + + Eth2Digest0x = distinct Eth2Digest + ## The spec mandates "0x" prefix on serialization + ## So we need to set custom read/write + PubKey0x = distinct ValidatorPubKey + ## The spec mandates "0x" prefix on serialization + ## So we need to set custom read/write + + SPDIF_Meta = object + interchange_format: string + interchange_format_version: string + genesis_validator_root: Eth2Digest0x + + SPDIF_Validator = object + pubkey: PubKey0x + signed_blocks: seq[SPDIF_SignedBlock] + signed_attestations: seq[SPDIF_SignedAttestation] + + SPDIF_SignedBlock = object + slot: Slot + signing_root: Eth2Digest0x # compute_signing_root(block, domain) + + SPDIF_SignedAttestation = object + source_epoch: Epoch + target_epoch: Epoch + signing_root: Eth2Digest0x # compute_signing_root(attestation, domain) + +proc writeValue*(writer: var JsonWriter, value: PubKey0x) + {.inline, raises: [IOError, Defect].} = + writer.writeValue("0x" & value.ValidatorPubKey.toHex()) + +proc readValue*(reader: var JsonReader, value: var PubKey0x) + {.raises: [SerializationError, IOError, Defect].} = + let key = ValidatorPubKey.fromHex(reader.readValue(string)) + if key.isOk: + value = PubKey0x key.get + else: + # TODO: Can we provide better diagnostic? + raiseUnexpectedValue(reader, "Valid hex-encoded public key expected") + +proc writeValue*(w: var JsonWriter, a: Eth2Digest0x) + {.inline, raises: [IOError, Defect].} = + w.writeValue "0x" & a.Eth2Digest.data.toHex(lowercase = true) + +proc readValue*(r: var JsonReader, a: var Eth2Digest0x) + {.raises: [SerializationError, IOError, Defect].} = + try: + a = Eth2Digest0x fromHex(Eth2Digest, r.readValue(string)) + except ValueError: + raiseUnexpectedValue(r, "Hex string expected") + +proc toSPDIF*(db: SlashingProtectionDB, path: string) + {.raises: [IOError, Defect].} = + ## Export the full slashing protection database + ## to a json the Slashing Protection Database Interchange (Complete) Format + var extract: SPDIF + extract.metadata.interchange_format = "complete" + extract.metadata.interchange_format_version = "3" + extract.metadata.genesis_validator_root = Eth2Digest0x db.get( + subkey(kGenesisValidatorRoot), ETH2Digest + # Bug in results.nim + # ).expect("Slashing Protection requires genesis_validator_root at init") + ).unsafeGet() + + let numValidators = db.get( + subkey(kNumValidators), + uint32 + ).get(otherwise = 0'u32) + + for i in 0'u32 ..< numValidators: + var validator: SPDIF_Validator + validator.pubkey = PubKey0x db.get( + subkey(kValidator, i), + ValidatorPubKey + ).unsafeGet() + + let valID = validator.pubkey.ValidatorPubKey.toRaw() + let ll = db.get( + subkey(kLinkedListMeta, valID), + KeysEpochs + ).unsafeGet() + + if ll.blockSlots.isInit: + var curSlot = ll.blockSlots.start + while true: + let node = db.get( + subkey(kBlock, valID, curSlot), + BlockNode + ).unsafeGet() + + validator.signed_blocks.add SPDIF_SignedBlock( + slot: curSlot, + signing_root: Eth2Digest0x node.block_root + ) + + if curSlot == ll.blockSlots.stop: + break + else: + curSlot = node.next + + if ll.targetEpochs.isInit: + var curEpoch = ll.targetEpochs.start + var count = 0 + while true: + let node = db.get( + subkey(kTargetEpoch, valID, curEpoch), + TargetEpochNode + ).unsafeGet() + + validator.signed_attestations.add SPDIF_SignedAttestation( + source_epoch: node.source, target_epoch: curEpoch, + signing_root: Eth2Digest0x node.attestation_root + ) + + if curEpoch == ll.targetEpochs.stop: + break + else: + curEpoch = node.next + + inc count + doAssert count < 5 + + # Update extract without reallocating seqs + # by manually transferring ownership + extract.data.setLen(extract.data.len + 1) + shallowCopy(extract.data[^1], validator) + + Json.saveFile(path, extract, pretty = true) + echo "Exported slashing protection DB to '", path, "'" + +proc fromSPDIF*(db: SlashingProtectionDB, path: string): bool + {.raises: [SerializationError, IOError, Defect].} = + ## Import a (Complete) Slashing Protection Database Interchange Format + ## file into the specified slahsing protection DB + ## + ## The database must be initialized. + ## The genesis_validator_root must match or + ## the DB must have a zero root + + let extract = Json.loadFile(path, SPDIF) + + doAssert not db.isNil, "The Slashing Protection DB must be initialized." + doAssert not db.backend.isNil, "The Slashing Protection DB must be initialized." + + let dbGenValRoot = db.get( + subkey(kGenesisValidatorRoot), ETH2Digest + ).unsafeGet() + + if dbGenValRoot != default(Eth2Digest) and + dbGenValRoot != extract.metadata.genesis_validator_root.Eth2Digest: + echo "The slashing protection database and imported file refer to different blockchains." + return false + + if dbGenValRoot == default(Eth2Digest): + db.put( + subkey(kGenesisValidatorRoot), + extract.metadata.genesis_validator_root.Eth2Digest + ) + + for v in 0 ..< extract.data.len: + for b in 0 ..< extract.data[v].signed_blocks.len: + db.registerBlock( + extract.data[v].pubkey.ValidatorPubKey, + extract.data[v].signed_blocks[b].slot, + extract.data[v].signed_blocks[b].signing_root.Eth2Digest + ) + for a in 0 ..< extract.data[v].signed_attestations.len: + db.registerAttestation( + extract.data[v].pubkey.ValidatorPubKey, + extract.data[v].signed_attestations[a].source_epoch, + extract.data[v].signed_attestations[a].target_epoch, + extract.data[v].signed_attestations[a].signing_root.Eth2Digest + ) + + return true diff --git a/tests/all_tests.nim b/tests/all_tests.nim index b20192695..a88113d1b 100644 --- a/tests/all_tests.nim +++ b/tests/all_tests.nim @@ -30,7 +30,9 @@ import # Unit test ./test_sync_manager, ./test_honest_validator, ./test_interop, - ./fork_choice/tests_fork_choice + ./fork_choice/tests_fork_choice, + ./slashing_protection/test_slashing_interchange, + ./slashing_protection/test_slashing_protection_db import # Refactor state transition unit tests # In mainnet these take 2 minutes and are empty TODOs diff --git a/tests/slashing_protection/.gitignore b/tests/slashing_protection/.gitignore new file mode 100644 index 000000000..a6c57f5fb --- /dev/null +++ b/tests/slashing_protection/.gitignore @@ -0,0 +1 @@ +*.json diff --git a/tests/slashing_protection/test_slashing_interchange.nim b/tests/slashing_protection/test_slashing_interchange.nim new file mode 100644 index 000000000..2dd5c6f7a --- /dev/null +++ b/tests/slashing_protection/test_slashing_interchange.nim @@ -0,0 +1,97 @@ +# Nimbus +# Copyright (c) 2018 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import + # Standard library + std/[unittest, os], + # Status lib + eth/db/kvstore, + stew/results, + nimcrypto/utils, + # Internal + ../../beacon_chain/validator_slashing_protection, + ../../beacon_chain/spec/[datatypes, digest, crypto, presets], + # Test utilies + ../testutil + +static: doAssert UseSlashingProtection, "The test was compiled without slashing protection, pass -d:UseSlashingProtection=true" + +template wrappedTimedTest(name: string, body: untyped) = + # `check` macro takes a copy of whatever it's checking, on the stack! + block: # Symbol namespacing + proc wrappedTest() = + timedTest name: + body + wrappedTest() + +func fakeRoot(index: SomeInteger): Eth2Digest = + ## Create fake roots + ## Those are just the value serialized in big-endian + ## We prevent zero hash special case via a power of 2 prefix + result.data[0 ..< 8] = (1'u64 shl 32 + index.uint64).toBytesBE() + +func fakeValidator(index: SomeInteger): ValidatorPubKey = + ## Create fake validator public key + result = ValidatorPubKey(kind: OpaqueBlob) + result.blob[0 ..< 8] = (1'u64 shl 48 + index.uint64).toBytesBE() + +func hexToDigest(hex: string): Eth2Digest = + result = Eth2Digest.fromHex(hex) + +suiteReport "Slashing Protection DB - Interchange" & preset(): + # https://hackmd.io/@sproul/Bk0Y0qdGD#Format-1-Complete + wrappedTimedTest "Smoke test - Complete format" & preset(): + let genesis_validators_root = hexToDigest"0x04700007fabc8282644aed6d1c7c9e21d38a03a0c4ba193f3afe428824b3a673" + block: # export + let db = SlashingProtectionDB.init(genesis_validators_root, kvStore MemStoreRef.init()) + + let pubkey = ValidatorPubKey + .fromHex"0xb845089a1457f811bfc000588fbb4e713669be8ce060ea6be3c6ece09afc3794106c91ca73acda5e5457122d58723bed" + .get() + db.registerBlock( + pubkey, + Slot 81952, + hexToDigest"0x4ff6f743a43f3b4f95350831aeaf0a122a1a392922c45d804280284a69eb850b" + ) + # db.registerBlock( + # pubkey, + # Slot 81951, + # fakeRoot(65535) + # ) + + db.registerAttestation( + pubkey, + source = Epoch 2290, + target = Epoch 3007, + hexToDigest"0x587d6a4f59a58fe24f406e0502413e77fe1babddee641fda30034ed37ecc884d" + ) + db.registerAttestation( + pubkey, + source = Epoch 2290, + target = Epoch 3008, + fakeRoot(65535) + ) + + db.toSPDIF(currentSourcePath.parentDir/"test_complete_export_slashing_protection.json") + + block: # import - zero root db + let db2 = SlashingProtectionDB.init(Eth2Digest(), kvStore MemStoreRef.init()) + + doAssert db2.fromSPDIF(currentSourcePath.parentDir/"test_complete_export_slashing_protection.json") + db2.toSPDIF(currentSourcePath.parentDir/"test_complete_export_slashing_protection_roundtrip1.json") + + block: # import - same root db + let db3 = SlashingProtectionDB.init(genesis_validators_root, kvStore MemStoreRef.init()) + + doAssert db3.fromSPDIF(currentSourcePath.parentDir/"test_complete_export_slashing_protection.json") + db3.toSPDIF(currentSourcePath.parentDir/"test_complete_export_slashing_protection_roundtrip2.json") + + block: # import - invalid root db + let invalid_genvalroot = hexToDigest"0x1234" + let db3 = SlashingProtectionDB.init(invalid_genvalroot, kvStore MemStoreRef.init()) + + doAssert not db3.fromSPDIF(currentSourcePath.parentDir/"test_complete_export_slashing_protection.json") diff --git a/tests/slashing_protection/test_slashing_protection_db.nim b/tests/slashing_protection/test_slashing_protection_db.nim new file mode 100644 index 000000000..cb29be59e --- /dev/null +++ b/tests/slashing_protection/test_slashing_protection_db.nim @@ -0,0 +1,566 @@ +# Nimbus +# Copyright (c) 2018 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import + # Standard library + std/unittest, + # Status lib + eth/db/kvstore, + stew/results, + # Internal + ../../beacon_chain/validator_slashing_protection, + ../../beacon_chain/spec/[datatypes, digest, crypto, presets], + # Test utilies + ../testutil + +static: doAssert UseSlashingProtection, "The test was compiled without slashing protection, pass -d:UseSlashingProtection=true" + +template wrappedTimedTest(name: string, body: untyped) = + # `check` macro takes a copy of whatever it's checking, on the stack! + block: # Symbol namespacing + proc wrappedTest() = + timedTest name: + body + wrappedTest() + +func fakeRoot(index: SomeInteger): Eth2Digest = + ## Create fake roots + ## Those are just the value serialized in big-endian + ## We prevent zero hash special case via a power of 2 prefix + result.data[0 ..< 8] = (1'u64 shl 32 + index.uint64).toBytesBE() + +func fakeValidator(index: SomeInteger): ValidatorPubKey = + ## Create fake validator public key + result = ValidatorPubKey(kind: OpaqueBlob) + result.blob[0 ..< 8] = (1'u64 shl 48 + index.uint64).toBytesBE() + +suiteReport "Slashing Protection DB" & preset(): + wrappedTimedTest "Empty database" & preset(): + let db = SlashingProtectionDB.init(default(Eth2Digest), kvStore MemStoreRef.init()) + + check: + db.checkSlashableBlockProposal( + fakeValidator(1234), + slot = Slot 1 + ).isOk() + db.checkSlashableAttestation( + fakeValidator(1234), + source = Epoch 1, + target = Epoch 2 + ).isOk() + db.checkSlashableAttestation( + fakeValidator(1234), + source = Epoch 2, + target = Epoch 1 + ).error.kind == TargetPrecedesSource + + db.close() + + wrappedTimedTest "SP for block proposal - linear append": + let db = SlashingProtectionDB.init(default(Eth2Digest), kvStore MemStoreRef.init()) + + db.registerBlock( + fakeValidator(100), + Slot 10, + fakeRoot(100) + ) + db.registerBlock( + fakeValidator(111), + Slot 15, + fakeRoot(111) + ) + check: + # Slot occupied by same validator + db.checkSlashableBlockProposal( + fakeValidator(100), + slot = Slot 10 + ).isErr() + # Slot occupied by another validator + db.checkSlashableBlockProposal( + fakeValidator(111), + slot = Slot 10 + ).isOk() + # Slot occupied by another validator + db.checkSlashableBlockProposal( + fakeValidator(100), + slot = Slot 15 + ).isOk() + # Slot occupied by same validator + db.checkSlashableBlockProposal( + fakeValidator(111), + slot = Slot 15 + ).isErr() + + # Slot inoccupied + db.checkSlashableBlockProposal( + fakeValidator(255), + slot = Slot 20 + ).isOk() + + db.registerBlock( + fakeValidator(255), + slot = Slot 20, + fakeRoot(4321) + ) + + check: + # Slot now occupied + db.checkSlashableBlockProposal( + fakeValidator(255), + slot = Slot 20 + ).isErr() + + wrappedTimedTest "SP for block proposal - backtracking append": + let db = SlashingProtectionDB.init(default(Eth2Digest), kvStore MemStoreRef.init()) + + # last finalized block + db.registerBlock( + fakeValidator(0), + Slot 0, + fakeRoot(0) + ) + + db.registerBlock( + fakeValidator(100), + Slot 10, + fakeRoot(10) + ) + db.registerBlock( + fakeValidator(100), + Slot 20, + fakeRoot(20) + ) + for i in 0 ..< 30: + if i notin {10, 20}: + check: + db.checkSlashableBlockProposal( + fakeValidator(100), + Slot i + ).isOk() + else: + check: + db.checkSlashableBlockProposal( + fakeValidator(100), + Slot i + ).isErr() + db.registerBlock( + fakeValidator(100), + Slot 15, + fakeRoot(15) + ) + for i in 0 ..< 30: + if i notin {10, 15, 20}: + check: + db.checkSlashableBlockProposal( + fakeValidator(100), + Slot i + ).isOk() + else: + check: + db.checkSlashableBlockProposal( + fakeValidator(100), + Slot i + ).isErr() + db.checkSlashableBlockProposal( + fakeValidator(0xDEADBEEF), + Slot i + ).isOk() + db.registerBlock( + fakeValidator(100), + Slot 12, + fakeRoot(12) + ) + db.registerBlock( + fakeValidator(100), + Slot 17, + fakeRoot(17) + ) + for i in 0 ..< 30: + if i notin {10, 12, 15, 17, 20}: + check: + db.checkSlashableBlockProposal( + fakeValidator(100), + Slot i + ).isOk() + else: + check: + db.checkSlashableBlockProposal( + fakeValidator(100), + Slot i + ).isErr() + db.checkSlashableBlockProposal( + fakeValidator(0xDEADBEEF), + Slot i + ).isOk() + db.registerBlock( + fakeValidator(100), + Slot 9, + fakeRoot(9) + ) + db.registerBlock( + fakeValidator(100), + Slot 1, + fakeRoot(1) + ) + db.registerBlock( + fakeValidator(100), + Slot 3, + fakeRoot(3) + ) + for i in 0 ..< 30: + if i notin {1, 3, 9, 10, 12, 15, 17, 20}: + check: + db.checkSlashableBlockProposal( + fakeValidator(100), + Slot i + ).isOk() + else: + check: + db.checkSlashableBlockProposal( + fakeValidator(100), + Slot i + ).isErr() + db.checkSlashableBlockProposal( + fakeValidator(0xDEADBEEF), + Slot i + ).isOk() + db.registerBlock( + fakeValidator(100), + Slot 29, + fakeRoot(29) + ) + db.registerBlock( + fakeValidator(100), + Slot 2, + fakeRoot(2) + ) + for i in 0 ..< 30: + if i notin {1, 2, 3, 9, 10, 12, 15, 17, 20, 29}: + check: + db.checkSlashableBlockProposal( + fakeValidator(100), + Slot i + ).isOk() + else: + check: + db.checkSlashableBlockProposal( + fakeValidator(100), + Slot i + ).isErr() + db.checkSlashableBlockProposal( + fakeValidator(0xDEADBEEF), + Slot i + ).isOk() + + wrappedTimedTest "SP for same epoch attestation target - linear append": + let db = SlashingProtectionDB.init(default(Eth2Digest), kvStore MemStoreRef.init()) + + db.registerAttestation( + fakeValidator(100), + Epoch 0, Epoch 10, + fakeRoot(100) + ) + db.registerAttestation( + fakeValidator(111), + Epoch 0, Epoch 15, + fakeRoot(111) + ) + check: + # Epoch occupied by same validator + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch 10, + ).error.kind == DoubleVote + # Epoch occupied by another validator + db.checkSlashableAttestation( + fakeValidator(111), + Epoch 0, Epoch 10 + ).isOk() + # Epoch occupied by another validator + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch 15 + ).isOk() + # Epoch occupied by same validator + db.checkSlashableAttestation( + fakeValidator(111), + Epoch 0, Epoch 15 + ).error.kind == DoubleVote + + # Epoch inoccupied + db.checkSlashableAttestation( + fakeValidator(255), + Epoch 0, Epoch 20 + ).isOk() + + db.registerAttestation( + fakeValidator(255), + Epoch 0, Epoch 20, + fakeRoot(4321) + ) + + check: + # Epoch now occupied + db.checkSlashableAttestation( + fakeValidator(255), + Epoch 0, Epoch 20 + ).error.kind == DoubleVote + + wrappedTimedTest "SP for same epoch attestation target - backtracking append": + let db = SlashingProtectionDB.init(default(Eth2Digest), kvStore MemStoreRef.init()) + + # last finalized block + db.registerAttestation( + fakeValidator(0), + Epoch 0, Epoch 0, + fakeRoot(0) + ) + + db.registerAttestation( + fakeValidator(100), + Epoch 0, Epoch 10, + fakeRoot(10) + ) + db.registerAttestation( + fakeValidator(100), + Epoch 0, Epoch 20, + fakeRoot(20) + ) + for i in 0 ..< 30: + if i notin {10, 20}: + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch i + ).isOk() + else: + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch i + ).error.kind == DoubleVote + db.checkSlashableAttestation( + fakeValidator(0xDEADBEEF), + Epoch 0, Epoch i + ).isOk() + db.registerAttestation( + fakeValidator(100), + Epoch 0, Epoch 15, + fakeRoot(15) + ) + for i in 0 ..< 30: + if i notin {10, 15, 20}: + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch i + ).isOk() + else: + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch i + ).error.kind == DoubleVote + db.checkSlashableAttestation( + fakeValidator(0xDEADBEEF), + Epoch 0, Epoch i + ).isOk() + db.registerAttestation( + fakeValidator(100), + Epoch 0, Epoch 12, + fakeRoot(12) + ) + db.registerAttestation( + fakeValidator(100), + Epoch 0, Epoch 17, + fakeRoot(17) + ) + for i in 0 ..< 30: + if i notin {10, 12, 15, 17, 20}: + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch i + ).isOk() + else: + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch i + ).error.kind == DoubleVote + db.checkSlashableAttestation( + fakeValidator(0xDEADBEEF), + Epoch 0, Epoch i + ).isOk() + db.registerAttestation( + fakeValidator(100), + Epoch 0, Epoch 9, + fakeRoot(9) + ) + db.registerAttestation( + fakeValidator(100), + Epoch 0, Epoch 1, + fakeRoot(1) + ) + db.registerAttestation( + fakeValidator(100), + Epoch 0, Epoch 3, + fakeRoot(3) + ) + for i in 0 ..< 30: + if i notin {1, 3, 9, 10, 12, 15, 17, 20}: + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch i + ).isOk() + else: + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch i + ).error.kind == DoubleVote + db.checkSlashableAttestation( + fakeValidator(0xDEADBEEF), + Epoch 0, Epoch i + ).isOk() + db.registerAttestation( + fakeValidator(100), + Epoch 0, Epoch 29, + fakeRoot(29) + ) + db.registerAttestation( + fakeValidator(100), + Epoch 0, Epoch 2, + fakeRoot(2) + ) + for i in 0 ..< 30: + if i notin {1, 2, 3, 9, 10, 12, 15, 17, 20, 29}: + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch i + ).isOk() + else: + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch i + ).error.kind == DoubleVote + db.checkSlashableAttestation( + fakeValidator(0xDEADBEEF), + Epoch 0, Epoch i + ).isOk() + + wrappedTimedTest "SP for surrounded attestations": + block: + let db = SlashingProtectionDB.init(default(Eth2Digest), kvStore MemStoreRef.init()) + + db.registerAttestation( + fakeValidator(100), + Epoch 10, Epoch 20, + fakeRoot(20) + ) + + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 11, Epoch 19 + ).error.kind == SurroundedVote + db.checkSlashableAttestation( + fakeValidator(200), + Epoch 11, Epoch 19 + ).isOk + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 11, Epoch 21 + ).isOk + # TODO: is that possible? + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 9, Epoch 19 + ).isOk + + block: + let db = SlashingProtectionDB.init(default(Eth2Digest), kvStore MemStoreRef.init()) + + db.registerAttestation( + fakeValidator(100), + Epoch 0, Epoch 1, + fakeRoot(0) + ) + + db.registerAttestation( + fakeValidator(100), + Epoch 10, Epoch 20, + fakeRoot(20) + ) + + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 11, Epoch 19 + ).error.kind == SurroundedVote + db.checkSlashableAttestation( + fakeValidator(200), + Epoch 11, Epoch 19 + ).isOk + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 11, Epoch 21 + ).isOk + # TODO: is that possible? + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 9, Epoch 19 + ).isOk + + + wrappedTimedTest "SP for surrounding attestations": + block: + let db = SlashingProtectionDB.init(default(Eth2Digest), kvStore MemStoreRef.init()) + + db.registerAttestation( + fakeValidator(100), + Epoch 10, Epoch 20, + fakeRoot(20) + ) + + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 9, Epoch 21 + ).error.kind == SurroundingVote + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch 21 + ).error.kind == SurroundingVote + + block: + let db = SlashingProtectionDB.init(default(Eth2Digest), kvStore MemStoreRef.init()) + + db.registerAttestation( + fakeValidator(100), + Epoch 0, Epoch 1, + fakeRoot(20) + ) + + db.registerAttestation( + fakeValidator(100), + Epoch 10, Epoch 20, + fakeRoot(20) + ) + + check: + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 9, Epoch 21 + ).error.kind == SurroundingVote + db.checkSlashableAttestation( + fakeValidator(100), + Epoch 0, Epoch 21 + ).error.kind == SurroundingVote