mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-02-28 06:00:39 +00:00
Merge branch 'unstable' of github.com:status-im/nim-beacon-chain into unstable
This commit is contained in:
commit
b6e02136f7
@ -74,6 +74,13 @@ OK: 3/3 Fail: 0/3 Skip: 0/3
|
|||||||
+ parent sanity [Preset: mainnet] OK
|
+ parent sanity [Preset: mainnet] OK
|
||||||
```
|
```
|
||||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||||
|
## Eth2 specific discovery tests
|
||||||
|
```diff
|
||||||
|
+ Invalid attnets field OK
|
||||||
|
+ Subnet query OK
|
||||||
|
+ Subnet query after ENR update OK
|
||||||
|
```
|
||||||
|
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||||
## Exit pool testing suite
|
## Exit pool testing suite
|
||||||
```diff
|
```diff
|
||||||
+ addExitMessage/getAttesterSlashingMessage OK
|
+ addExitMessage/getAttesterSlashingMessage OK
|
||||||
@ -282,4 +289,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||||||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||||
|
|
||||||
---TOTAL---
|
---TOTAL---
|
||||||
OK: 151/160 Fail: 0/160 Skip: 9/160
|
OK: 154/163 Fail: 0/163 Skip: 9/163
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
TBD
|
TBD
|
||||||
==================
|
==================
|
||||||
|
|
||||||
|
**New features:**
|
||||||
|
|
||||||
|
* Added the `setGraffiti` RPC (POST /api/nimbus/v1/graffiti in the REST API)
|
||||||
|
|
||||||
**Breaking changes:**
|
**Breaking changes:**
|
||||||
|
|
||||||
* Renamed some semi-internal debug rpc to be more explicit about their nature:
|
* Renamed some semi-internal debug rpc to be more explicit about their nature:
|
||||||
|
@ -59,7 +59,7 @@ type
|
|||||||
immutableValidators*: ImmutableValidatorsSeq
|
immutableValidators*: ImmutableValidatorsSeq
|
||||||
immutableValidatorsMem*: seq[ImmutableValidatorData]
|
immutableValidatorsMem*: seq[ImmutableValidatorData]
|
||||||
|
|
||||||
checkpoint*: proc() {.gcsafe.}
|
checkpoint*: proc() {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
Keyspaces* = enum
|
Keyspaces* = enum
|
||||||
defaultKeyspace = "kvstore"
|
defaultKeyspace = "kvstore"
|
||||||
@ -277,11 +277,18 @@ proc snappyEncode(inp: openArray[byte]): seq[byte] =
|
|||||||
except CatchableError as err:
|
except CatchableError as err:
|
||||||
raiseAssert err.msg
|
raiseAssert err.msg
|
||||||
|
|
||||||
|
proc sszEncode(v: auto): seq[byte] =
|
||||||
|
try:
|
||||||
|
SSZ.encode(v)
|
||||||
|
except IOError as err:
|
||||||
|
# In-memory encode shouldn't fail!
|
||||||
|
raiseAssert err.msg
|
||||||
|
|
||||||
proc put(db: BeaconChainDB, key: openArray[byte], v: Eth2Digest) =
|
proc put(db: BeaconChainDB, key: openArray[byte], v: Eth2Digest) =
|
||||||
db.backend.put(key, v.data).expect("working database (disk broken/full?)")
|
db.backend.put(key, v.data).expect("working database (disk broken/full?)")
|
||||||
|
|
||||||
proc put(db: BeaconChainDB, key: openArray[byte], v: auto) =
|
proc put(db: BeaconChainDB, key: openArray[byte], v: auto) =
|
||||||
db.backend.put(key, snappyEncode(SSZ.encode(v))).expect("working database (disk broken/full?)")
|
db.backend.put(key, snappyEncode(sszEncode(v))).expect("working database (disk broken/full?)")
|
||||||
|
|
||||||
proc get(db: BeaconChainDB, key: openArray[byte], T: type Eth2Digest): Opt[T] =
|
proc get(db: BeaconChainDB, key: openArray[byte], T: type Eth2Digest): Opt[T] =
|
||||||
var res: Opt[T]
|
var res: Opt[T]
|
||||||
|
@ -9,14 +9,14 @@
|
|||||||
|
|
||||||
import
|
import
|
||||||
tables,
|
tables,
|
||||||
stew/[assign2, endians2, io2, objects, results],
|
stew/[assign2, io2, objects, results],
|
||||||
serialization, chronicles,
|
serialization,
|
||||||
eth/db/[kvstore, kvstore_sqlite3],
|
eth/db/[kvstore, kvstore_sqlite3],
|
||||||
./spec/[crypto, datatypes, digest],
|
./spec/[crypto, datatypes, digest],
|
||||||
./ssz/[ssz_serialization, merkleization],
|
./ssz/[ssz_serialization, merkleization],
|
||||||
filepath
|
filepath
|
||||||
|
|
||||||
type
|
type
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#beaconstate
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#beaconstate
|
||||||
# Memory-representation-equivalent to a v1.0.1 BeaconState for in-place SSZ reading and writing
|
# Memory-representation-equivalent to a v1.0.1 BeaconState for in-place SSZ reading and writing
|
||||||
BeaconStateNoImmutableValidators* = object
|
BeaconStateNoImmutableValidators* = object
|
||||||
|
@ -1,3 +1,10 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
@ -6,6 +13,8 @@ import
|
|||||||
|
|
||||||
from times import Time, getTime, fromUnix, `<`, `-`, inNanoseconds
|
from times import Time, getTime, fromUnix, `<`, `-`, inNanoseconds
|
||||||
|
|
||||||
|
export chronos.Duration, Moment, now
|
||||||
|
|
||||||
type
|
type
|
||||||
BeaconClock* = object
|
BeaconClock* = object
|
||||||
## The beacon clock represents time as it passes on a beacon chain. Beacon
|
## The beacon clock represents time as it passes on a beacon chain. Beacon
|
||||||
@ -98,13 +107,7 @@ func saturate*(d: tuple[inFuture: bool, offset: Duration]): Duration =
|
|||||||
if d.inFuture: d.offset else: seconds(0)
|
if d.inFuture: d.offset else: seconds(0)
|
||||||
|
|
||||||
proc addTimer*(fromNow: Duration, cb: CallbackFunc, udata: pointer = nil) =
|
proc addTimer*(fromNow: Duration, cb: CallbackFunc, udata: pointer = nil) =
|
||||||
try:
|
discard setTimer(Moment.now() + fromNow, cb, udata)
|
||||||
discard setTimer(Moment.now() + fromNow, cb, udata)
|
|
||||||
except Exception as e:
|
|
||||||
# TODO https://github.com/status-im/nim-chronos/issues/94
|
|
||||||
# shouldn't happen because we should have initialized chronos by now
|
|
||||||
# https://github.com/nim-lang/Nim/issues/10288 - sigh
|
|
||||||
raiseAssert e.msg
|
|
||||||
|
|
||||||
func shortLog*(d: Duration): string =
|
func shortLog*(d: Duration): string =
|
||||||
$d
|
$d
|
||||||
|
@ -11,7 +11,7 @@ import
|
|||||||
std/osproc,
|
std/osproc,
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
chronos, json_rpc/rpcserver,
|
chronos, json_rpc/servers/httpserver,
|
||||||
|
|
||||||
# Local modules
|
# Local modules
|
||||||
./conf, ./beacon_clock, ./beacon_chain_db,
|
./conf, ./beacon_clock, ./beacon_chain_db,
|
||||||
@ -24,20 +24,19 @@ import
|
|||||||
./sync/[sync_manager, request_manager]
|
./sync/[sync_manager, request_manager]
|
||||||
|
|
||||||
export
|
export
|
||||||
osproc, chronos, rpcserver, conf, beacon_clock, beacon_chain_db,
|
osproc, chronos, httpserver, conf, beacon_clock, beacon_chain_db,
|
||||||
attestation_pool, eth2_network, beacon_node_types, eth1_monitor,
|
attestation_pool, eth2_network, beacon_node_types, eth1_monitor,
|
||||||
request_manager, sync_manager, eth2_processor, blockchain_dag, block_quarantine,
|
request_manager, sync_manager, eth2_processor, blockchain_dag, block_quarantine,
|
||||||
datatypes
|
datatypes
|
||||||
|
|
||||||
type
|
type
|
||||||
RpcServer* = RpcHttpServer
|
RpcServer* = RpcHttpServer
|
||||||
KeyPair* = eth2_network.KeyPair
|
|
||||||
|
|
||||||
BeaconNode* = ref object
|
BeaconNode* = ref object
|
||||||
nickname*: string
|
nickname*: string
|
||||||
graffitiBytes*: GraffitiBytes
|
graffitiBytes*: GraffitiBytes
|
||||||
network*: Eth2Node
|
network*: Eth2Node
|
||||||
netKeys*: KeyPair
|
netKeys*: NetKeyPair
|
||||||
db*: BeaconChainDB
|
db*: BeaconChainDB
|
||||||
config*: BeaconNodeConf
|
config*: BeaconNodeConf
|
||||||
attachedValidators*: ref ValidatorPool
|
attachedValidators*: ref ValidatorPool
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
# beacon_chain
|
# beacon_chain
|
||||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
type
|
type
|
||||||
# "state" is already taken by BeaconState
|
# "state" is already taken by BeaconState
|
||||||
BeaconNodeStatus* = enum
|
BeaconNodeStatus* = enum
|
||||||
|
@ -1,3 +1,10 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
|
@ -1,3 +1,10 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
|
@ -215,8 +215,7 @@ type
|
|||||||
|
|
||||||
OnBlockAdded* = proc(
|
OnBlockAdded* = proc(
|
||||||
blckRef: BlockRef, blck: TrustedSignedBeaconBlock,
|
blckRef: BlockRef, blck: TrustedSignedBeaconBlock,
|
||||||
epochRef: EpochRef, state: HashedBeaconState) {.raises: [Defect], gcsafe.}
|
epochRef: EpochRef, state: HashedBeaconState) {.gcsafe, raises: [Defect].}
|
||||||
# The `{.gcsafe.}` annotation is needed to shut up the compiler.
|
|
||||||
|
|
||||||
template validator_keys*(e: EpochRef): untyped = e.validator_key_store[1][]
|
template validator_keys*(e: EpochRef): untyped = e.validator_key_store[1][]
|
||||||
|
|
||||||
|
@ -18,7 +18,6 @@ import
|
|||||||
../beacon_clock,
|
../beacon_clock,
|
||||||
"."/[block_pools_types, block_quarantine]
|
"."/[block_pools_types, block_quarantine]
|
||||||
|
|
||||||
from std/times import getTime, `-`
|
|
||||||
export block_pools_types, helpers
|
export block_pools_types, helpers
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
|
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
|
||||||
@ -884,7 +883,7 @@ proc pruneBlocksDAG(dag: ChainDAGRef) =
|
|||||||
|
|
||||||
# Clean up block refs, walking block by block
|
# Clean up block refs, walking block by block
|
||||||
if dag.lastPrunePoint != dag.finalizedHead:
|
if dag.lastPrunePoint != dag.finalizedHead:
|
||||||
let start = getTime()
|
let start = Moment.now()
|
||||||
|
|
||||||
# Finalization means that we choose a single chain as the canonical one -
|
# Finalization means that we choose a single chain as the canonical one -
|
||||||
# it also means we're no longer interested in any branches from that chain
|
# it also means we're no longer interested in any branches from that chain
|
||||||
@ -914,7 +913,7 @@ proc pruneBlocksDAG(dag: ChainDAGRef) =
|
|||||||
|
|
||||||
dag.heads.del(n)
|
dag.heads.del(n)
|
||||||
|
|
||||||
let stop = getTime()
|
let stop = Moment.now()
|
||||||
let dur = stop - start
|
let dur = stop - start
|
||||||
|
|
||||||
debug "Pruned the blockchain DAG",
|
debug "Pruned the blockchain DAG",
|
||||||
@ -932,7 +931,7 @@ proc pruneStateCachesDAG*(dag: ChainDAGRef) =
|
|||||||
## This updates the `dag.lastPrunePoint` variable
|
## This updates the `dag.lastPrunePoint` variable
|
||||||
doAssert dag.needStateCachesAndForkChoicePruning()
|
doAssert dag.needStateCachesAndForkChoicePruning()
|
||||||
|
|
||||||
let startState = getTime()
|
let startState = Moment.now()
|
||||||
block: # Remove states, walking slot by slot
|
block: # Remove states, walking slot by slot
|
||||||
# We remove all state checkpoints that come _before_ the current finalized
|
# We remove all state checkpoints that come _before_ the current finalized
|
||||||
# head, as we might frequently be asked to replay states from the
|
# head, as we might frequently be asked to replay states from the
|
||||||
@ -950,10 +949,10 @@ proc pruneStateCachesDAG*(dag: ChainDAGRef) =
|
|||||||
if cur.slot.epoch mod 32 != 0 and cur.slot != dag.tail.slot:
|
if cur.slot.epoch mod 32 != 0 and cur.slot != dag.tail.slot:
|
||||||
dag.delState(cur)
|
dag.delState(cur)
|
||||||
cur = cur.parentOrSlot
|
cur = cur.parentOrSlot
|
||||||
let stopState = getTime()
|
let stopState = Moment.now()
|
||||||
let durState = stopState - startState
|
let durState = stopState - startState
|
||||||
|
|
||||||
let startEpochRef = getTime()
|
let startEpochRef = Moment.now()
|
||||||
block: # Clean up old EpochRef instances
|
block: # Clean up old EpochRef instances
|
||||||
# After finalization, we can clear up the epoch cache and save memory -
|
# After finalization, we can clear up the epoch cache and save memory -
|
||||||
# it will be recomputed if needed
|
# it will be recomputed if needed
|
||||||
@ -961,7 +960,7 @@ proc pruneStateCachesDAG*(dag: ChainDAGRef) =
|
|||||||
if dag.epochRefs[i][1] != nil and
|
if dag.epochRefs[i][1] != nil and
|
||||||
dag.epochRefs[i][1].epoch < dag.finalizedHead.slot.epoch:
|
dag.epochRefs[i][1].epoch < dag.finalizedHead.slot.epoch:
|
||||||
dag.epochRefs[i] = (nil, nil)
|
dag.epochRefs[i] = (nil, nil)
|
||||||
let stopEpochRef = getTime()
|
let stopEpochRef = Moment.now()
|
||||||
let durEpochRef = stopEpochRef - startEpochRef
|
let durEpochRef = stopEpochRef - startEpochRef
|
||||||
|
|
||||||
dag.lastPrunePoint = dag.finalizedHead
|
dag.lastPrunePoint = dag.finalizedHead
|
||||||
|
@ -8,10 +8,10 @@
|
|||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[algorithm, sequtils],
|
std/[algorithm, intsets, sequtils],
|
||||||
chronicles,
|
chronicles,
|
||||||
../spec/[
|
../spec/[
|
||||||
crypto, datatypes, digest, helpers, presets, signatures,
|
crypto, datatypes, digest, helpers, network, presets, signatures,
|
||||||
validator],
|
validator],
|
||||||
../extras,
|
../extras,
|
||||||
./block_pools_types, ./blockchain_dag
|
./block_pools_types, ./blockchain_dag
|
||||||
@ -193,3 +193,26 @@ func makeAttestationData*(
|
|||||||
root: epoch_boundary_block.blck.root
|
root: epoch_boundary_block.blck.root
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#validator-assignments
|
||||||
|
iterator get_committee_assignments*(
|
||||||
|
epochRef: EpochRef, epoch: Epoch, validator_indices: IntSet):
|
||||||
|
tuple[validatorIndices: IntSet,
|
||||||
|
committeeIndex: CommitteeIndex,
|
||||||
|
subnetIndex: uint8, slot: Slot] =
|
||||||
|
let
|
||||||
|
committees_per_slot = get_committee_count_per_slot(epochRef)
|
||||||
|
start_slot = compute_start_slot_at_epoch(epoch)
|
||||||
|
|
||||||
|
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
|
||||||
|
for index in 0'u64 ..< committees_per_slot:
|
||||||
|
let
|
||||||
|
idx = index.CommitteeIndex
|
||||||
|
includedIndices =
|
||||||
|
toIntSet(get_beacon_committee(epochRef, slot, idx)) *
|
||||||
|
validator_indices
|
||||||
|
if includedIndices.len > 0:
|
||||||
|
yield (
|
||||||
|
includedIndices, idx,
|
||||||
|
compute_subnet_for_attestation(committees_per_slot, slot, idx).uint8,
|
||||||
|
slot)
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
os, sequtils, strutils, options, json, terminal,
|
os, sequtils, strutils, options, json, terminal,
|
||||||
chronos, chronicles, confutils, stint, json_serialization,
|
chronos, chronicles, confutils, stint, json_serialization,
|
||||||
@ -122,7 +131,7 @@ proc sendEth(web3: Web3, to: Eth1Address, valueEth: int): Future[TxHash] =
|
|||||||
web3.send(tr)
|
web3.send(tr)
|
||||||
|
|
||||||
type
|
type
|
||||||
DelayGenerator* = proc(): chronos.Duration {.closure, gcsafe.}
|
DelayGenerator* = proc(): chronos.Duration {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
proc ethToWei(eth: UInt256): UInt256 =
|
proc ethToWei(eth: UInt256): UInt256 =
|
||||||
eth * 1000000000000000000.u256
|
eth * 1000000000000000000.u256
|
||||||
@ -172,6 +181,7 @@ proc sendDeposits*(deposits: seq[LaunchPadDeposit],
|
|||||||
await sleepAsync(60.seconds)
|
await sleepAsync(60.seconds)
|
||||||
web3 = await initWeb3(web3Url, privateKey)
|
web3 = await initWeb3(web3Url, privateKey)
|
||||||
|
|
||||||
|
{.pop.} # TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||||
proc main() {.async.} =
|
proc main() {.async.} =
|
||||||
var cfg = CliConfig.load()
|
var cfg = CliConfig.load()
|
||||||
let rng = keys.newRng()
|
let rng = keys.newRng()
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[deques, hashes, options, strformat, strutils, sequtils, tables,
|
std/[deques, hashes, options, strformat, strutils, sequtils, tables,
|
||||||
typetraits, uri],
|
typetraits, uri],
|
||||||
@ -31,9 +40,6 @@ contract(DepositContract):
|
|||||||
amount: Bytes8,
|
amount: Bytes8,
|
||||||
signature: Bytes96,
|
signature: Bytes96,
|
||||||
index: Bytes8) {.event.}
|
index: Bytes8) {.event.}
|
||||||
# TODO
|
|
||||||
# The raises list of this module are still not usable due to general
|
|
||||||
# Exceptions being reported from Chronos's asyncfutures2.
|
|
||||||
|
|
||||||
const
|
const
|
||||||
web3Timeouts = 60.seconds
|
web3Timeouts = 60.seconds
|
||||||
@ -112,7 +118,7 @@ type
|
|||||||
pubkey: Bytes48,
|
pubkey: Bytes48,
|
||||||
withdrawalCredentials: Bytes32,
|
withdrawalCredentials: Bytes32,
|
||||||
amount: Bytes8,
|
amount: Bytes8,
|
||||||
signature: Bytes96, merkleTreeIndex: Bytes8, j: JsonNode) {.raises: [Defect], gcsafe.}
|
signature: Bytes96, merkleTreeIndex: Bytes8, j: JsonNode) {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
BlockProposalEth1Data* = object
|
BlockProposalEth1Data* = object
|
||||||
vote*: Eth1Data
|
vote*: Eth1Data
|
||||||
@ -295,7 +301,9 @@ template asBlockHash(x: Eth2Digest): BlockHash =
|
|||||||
BlockHash(x.data)
|
BlockHash(x.data)
|
||||||
|
|
||||||
func shortLog*(b: Eth1Block): string =
|
func shortLog*(b: Eth1Block): string =
|
||||||
&"{b.number}:{shortLog b.voteData.block_hash}(deposits = {b.voteData.deposit_count})"
|
try:
|
||||||
|
&"{b.number}:{shortLog b.voteData.block_hash}(deposits = {b.voteData.deposit_count})"
|
||||||
|
except ValueError as exc: raiseAssert exc.msg
|
||||||
|
|
||||||
template findBlock*(chain: Eth1Chain, eth1Data: Eth1Data): Eth1Block =
|
template findBlock*(chain: Eth1Chain, eth1Data: Eth1Data): Eth1Block =
|
||||||
getOrDefault(chain.blocksByHash, asBlockHash(eth1Data.block_hash), nil)
|
getOrDefault(chain.blocksByHash, asBlockHash(eth1Data.block_hash), nil)
|
||||||
@ -386,14 +394,15 @@ proc getBlockByHash*(p: Web3DataProviderRef, hash: BlockHash):
|
|||||||
|
|
||||||
proc getBlockByNumber*(p: Web3DataProviderRef,
|
proc getBlockByNumber*(p: Web3DataProviderRef,
|
||||||
number: Eth1BlockNumber): Future[BlockObject] =
|
number: Eth1BlockNumber): Future[BlockObject] =
|
||||||
return p.web3.provider.eth_getBlockByNumber(&"0x{number:X}", false)
|
p.web3.provider.eth_getBlockByNumber("0x" & toHex(number), false)
|
||||||
|
|
||||||
template readJsonField(j: JsonNode, fieldName: string, ValueType: type): untyped =
|
template readJsonField(j: JsonNode, fieldName: string, ValueType: type): untyped =
|
||||||
var res: ValueType
|
var res: ValueType
|
||||||
fromJson(j[fieldName], fieldName, res)
|
fromJson(j[fieldName], fieldName, res)
|
||||||
res
|
res
|
||||||
|
|
||||||
proc depositEventsToBlocks(depositsList: JsonNode): seq[Eth1Block] =
|
proc depositEventsToBlocks(depositsList: JsonNode): seq[Eth1Block] {.
|
||||||
|
raises: [Defect, CatchableError].} =
|
||||||
if depositsList.kind != JArray:
|
if depositsList.kind != JArray:
|
||||||
raise newException(CatchableError,
|
raise newException(CatchableError,
|
||||||
"Web3 provider didn't return a list of deposit events")
|
"Web3 provider didn't return a list of deposit events")
|
||||||
@ -499,8 +508,6 @@ proc onBlockHeaders*(p: Web3DataProviderRef,
|
|||||||
p.blockHeadersSubscription = awaitWithRetries(
|
p.blockHeadersSubscription = awaitWithRetries(
|
||||||
p.web3.subscribeForBlockHeaders(blockHeaderHandler, errorHandler))
|
p.web3.subscribeForBlockHeaders(blockHeaderHandler, errorHandler))
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
|
|
||||||
func getDepositsRoot*(m: DepositsMerkleizer): Eth2Digest =
|
func getDepositsRoot*(m: DepositsMerkleizer): Eth2Digest =
|
||||||
mixInLength(m.getFinalHash, int m.totalChunks)
|
mixInLength(m.getFinalHash, int m.totalChunks)
|
||||||
|
|
||||||
|
@ -5,13 +5,13 @@
|
|||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
|
||||||
|
|
||||||
# Merkle tree helpers
|
# Merkle tree helpers
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
|
|
||||||
import
|
import
|
||||||
sequtils,
|
sequtils,
|
||||||
stew/endians2,
|
stew/endians2,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# beacon_chain
|
# beacon_chain
|
||||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import chronicles
|
import chronicles
|
||||||
import stew/io2
|
import stew/io2
|
||||||
export io2
|
export io2
|
||||||
|
@ -146,12 +146,7 @@ proc blockValidator*(
|
|||||||
# sync, we don't lose the gossip blocks, but also don't block the gossip
|
# sync, we don't lose the gossip blocks, but also don't block the gossip
|
||||||
# propagation of seemingly good blocks
|
# propagation of seemingly good blocks
|
||||||
trace "Block validated"
|
trace "Block validated"
|
||||||
try:
|
self.verifQueues[].addBlock(SyncBlock(blk: signedBlock))
|
||||||
self.verifQueues[].addBlock(SyncBlock(blk: signedBlock))
|
|
||||||
except Exception as e:
|
|
||||||
# Chronos can in theory raise an untyped exception in `internalCheckComplete`
|
|
||||||
# but in practice that's always a Defect not a Catchable exception
|
|
||||||
raiseAssert e.msg
|
|
||||||
|
|
||||||
ValidationResult.Accept
|
ValidationResult.Accept
|
||||||
|
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/math,
|
std/math,
|
||||||
stew/results,
|
stew/results,
|
||||||
@ -76,8 +78,6 @@ type
|
|||||||
consensusManager: ref ConsensusManager
|
consensusManager: ref ConsensusManager
|
||||||
## Blockchain DAG, AttestationPool and Quarantine
|
## Blockchain DAG, AttestationPool and Quarantine
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
|
|
||||||
# Initialization
|
# Initialization
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
@ -131,9 +131,7 @@ proc complete*(blk: SyncBlock, res: Result[void, BlockError]) =
|
|||||||
# Enqueue
|
# Enqueue
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
{.pop.}
|
proc addBlock*(self: var VerifQueueManager, syncBlock: SyncBlock) =
|
||||||
|
|
||||||
proc addBlock*(self: var VerifQueueManager, syncBlock: SyncBlock) {.raises: [Exception].} =
|
|
||||||
## Enqueue a Gossip-validated block for consensus verification
|
## Enqueue a Gossip-validated block for consensus verification
|
||||||
# Backpressure:
|
# Backpressure:
|
||||||
# If no item can be enqueued because buffer is full,
|
# If no item can be enqueued because buffer is full,
|
||||||
@ -143,24 +141,13 @@ proc addBlock*(self: var VerifQueueManager, syncBlock: SyncBlock) {.raises: [Exc
|
|||||||
# - SyncManager (during sync)
|
# - SyncManager (during sync)
|
||||||
# - RequestManager (missing ancestor blocks)
|
# - RequestManager (missing ancestor blocks)
|
||||||
|
|
||||||
# TODO: solve the signature requiring raise: [Exception]
|
# addLast doesn't fail
|
||||||
# even when push/pop is used
|
asyncSpawn(self.blocksQueue.addLast(BlockEntry(v: syncBlock)))
|
||||||
|
|
||||||
asyncSpawn(
|
|
||||||
try:
|
|
||||||
self.blocksQueue.addLast(BlockEntry(v: syncBlock))
|
|
||||||
except Exception as e:
|
|
||||||
# Chronos can in theory raise an untyped exception in `internalCheckComplete`
|
|
||||||
# which asyncSpawn doesn't like.
|
|
||||||
raiseAssert e.msg
|
|
||||||
)
|
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
|
|
||||||
proc addAttestation*(self: var VerifQueueManager, att: Attestation, att_indices: seq[ValidatorIndex]) =
|
proc addAttestation*(self: var VerifQueueManager, att: Attestation, att_indices: seq[ValidatorIndex]) =
|
||||||
## Enqueue a Gossip-validated attestation for consensus verification
|
## Enqueue a Gossip-validated attestation for consensus verification
|
||||||
# Backpressure:
|
# Backpressure:
|
||||||
# no handling
|
# If buffer is full, the oldest attestation is dropped and the newest is enqueued
|
||||||
# Producer:
|
# Producer:
|
||||||
# - Gossip (when synced)
|
# - Gossip (when synced)
|
||||||
while self.attestationsQueue.full():
|
while self.attestationsQueue.full():
|
||||||
@ -180,7 +167,7 @@ proc addAttestation*(self: var VerifQueueManager, att: Attestation, att_indices:
|
|||||||
proc addAggregate*(self: var VerifQueueManager, agg: SignedAggregateAndProof, att_indices: seq[ValidatorIndex]) =
|
proc addAggregate*(self: var VerifQueueManager, agg: SignedAggregateAndProof, att_indices: seq[ValidatorIndex]) =
|
||||||
## Enqueue a Gossip-validated aggregate attestation for consensus verification
|
## Enqueue a Gossip-validated aggregate attestation for consensus verification
|
||||||
# Backpressure:
|
# Backpressure:
|
||||||
# no handling
|
# If buffer is full, the oldest aggregate is dropped and the newest is enqueued
|
||||||
# Producer:
|
# Producer:
|
||||||
# - Gossip (when synced)
|
# - Gossip (when synced)
|
||||||
|
|
||||||
@ -332,8 +319,6 @@ proc processBlock(self: var VerifQueueManager, entry: BlockEntry) =
|
|||||||
if entry.v.resFut != nil:
|
if entry.v.resFut != nil:
|
||||||
entry.v.resFut.complete(Result[void, BlockError].err(res.error()))
|
entry.v.resFut.complete(Result[void, BlockError].err(res.error()))
|
||||||
|
|
||||||
{.pop.} # Chronos: Error: can raise an unlisted exception: ref Exception
|
|
||||||
|
|
||||||
proc runQueueProcessingLoop*(self: ref VerifQueueManager) {.async.} =
|
proc runQueueProcessingLoop*(self: ref VerifQueueManager) {.async.} =
|
||||||
# Blocks in eth2 arrive on a schedule for every slot:
|
# Blocks in eth2 arrive on a schedule for every slot:
|
||||||
#
|
#
|
||||||
|
@ -1,3 +1,10 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
|
@ -1,3 +1,10 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
@ -78,7 +85,7 @@ proc new*(T: type Eth2DiscoveryProtocol,
|
|||||||
enrIp: Option[ValidIpAddress], enrTcpPort, enrUdpPort: Option[Port],
|
enrIp: Option[ValidIpAddress], enrTcpPort, enrUdpPort: Option[Port],
|
||||||
pk: PrivateKey,
|
pk: PrivateKey,
|
||||||
enrFields: openArray[(string, seq[byte])], rng: ref BrHmacDrbgContext):
|
enrFields: openArray[(string, seq[byte])], rng: ref BrHmacDrbgContext):
|
||||||
T {.raises: [Exception, Defect].} =
|
T =
|
||||||
# TODO
|
# TODO
|
||||||
# Implement more configuration options:
|
# Implement more configuration options:
|
||||||
# * for setting up a specific key
|
# * for setting up a specific key
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
# Std lib
|
# Std lib
|
||||||
std/[typetraits, sequtils, os, algorithm, math, sets],
|
std/[typetraits, sequtils, os, algorithm, math, sets],
|
||||||
@ -40,7 +49,7 @@ logScope:
|
|||||||
topics = "networking"
|
topics = "networking"
|
||||||
|
|
||||||
type
|
type
|
||||||
KeyPair* = crypto.KeyPair
|
NetKeyPair* = crypto.KeyPair
|
||||||
PublicKey* = crypto.PublicKey
|
PublicKey* = crypto.PublicKey
|
||||||
PrivateKey* = crypto.PrivateKey
|
PrivateKey* = crypto.PrivateKey
|
||||||
|
|
||||||
@ -157,13 +166,13 @@ type
|
|||||||
InvalidRequest
|
InvalidRequest
|
||||||
ServerError
|
ServerError
|
||||||
|
|
||||||
PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe.}
|
PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe, raises: [Defect].}
|
||||||
NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe.}
|
NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe, raises: [Defect].}
|
||||||
OnPeerConnectedHandler* = proc(peer: Peer, incoming: bool): Future[void] {.gcsafe.}
|
OnPeerConnectedHandler* = proc(peer: Peer, incoming: bool): Future[void] {.gcsafe.}
|
||||||
OnPeerDisconnectedHandler* = proc(peer: Peer): Future[void] {.gcsafe.}
|
OnPeerDisconnectedHandler* = proc(peer: Peer): Future[void] {.gcsafe, raises: [Defect].}
|
||||||
ThunkProc* = LPProtoHandler
|
ThunkProc* = LPProtoHandler
|
||||||
MounterProc* = proc(network: Eth2Node) {.gcsafe.}
|
MounterProc* = proc(network: Eth2Node) {.gcsafe, raises: [Defect, CatchableError].}
|
||||||
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.}
|
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#goodbye
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#goodbye
|
||||||
DisconnectionReason* = enum
|
DisconnectionReason* = enum
|
||||||
@ -422,7 +431,8 @@ proc isSeen*(network: ETh2Node, peerId: PeerID): bool =
|
|||||||
if peerId notin network.seenTable:
|
if peerId notin network.seenTable:
|
||||||
false
|
false
|
||||||
else:
|
else:
|
||||||
let item = network.seenTable[peerId]
|
let item = try: network.seenTable[peerId]
|
||||||
|
except KeyError: raiseAssert "checked with notin"
|
||||||
if currentTime >= item.stamp:
|
if currentTime >= item.stamp:
|
||||||
# Peer is in SeenTable, but the time period has expired.
|
# Peer is in SeenTable, but the time period has expired.
|
||||||
network.seenTable.del(peerId)
|
network.seenTable.del(peerId)
|
||||||
@ -473,10 +483,12 @@ proc getRequestProtoName(fn: NimNode): NimNode =
|
|||||||
let pragmas = fn.pragma
|
let pragmas = fn.pragma
|
||||||
if pragmas.kind == nnkPragma and pragmas.len > 0:
|
if pragmas.kind == nnkPragma and pragmas.len > 0:
|
||||||
for pragma in pragmas:
|
for pragma in pragmas:
|
||||||
if pragma.len > 0 and $pragma[0] == "libp2pProtocol":
|
try:
|
||||||
let protoName = $(pragma[1])
|
if pragma.len > 0 and $pragma[0] == "libp2pProtocol":
|
||||||
let protoVer = $(pragma[2].intVal)
|
let protoName = $(pragma[1])
|
||||||
return newLit("/eth2/beacon_chain/req/" & protoName & "/" & protoVer & "/")
|
let protoVer = $(pragma[2].intVal)
|
||||||
|
return newLit("/eth2/beacon_chain/req/" & protoName & "/" & protoVer & "/")
|
||||||
|
except Exception as exc: raiseAssert exc.msg # TODO https://github.com/nim-lang/Nim/issues/17454
|
||||||
|
|
||||||
return newLit("")
|
return newLit("")
|
||||||
|
|
||||||
@ -485,13 +497,18 @@ proc writeChunk*(conn: Connection,
|
|||||||
payload: Bytes): Future[void] =
|
payload: Bytes): Future[void] =
|
||||||
var output = memoryOutput()
|
var output = memoryOutput()
|
||||||
|
|
||||||
if responseCode.isSome:
|
try:
|
||||||
output.write byte(responseCode.get)
|
if responseCode.isSome:
|
||||||
|
output.write byte(responseCode.get)
|
||||||
|
|
||||||
output.write toBytes(payload.lenu64, Leb128).toOpenArray()
|
output.write toBytes(payload.lenu64, Leb128).toOpenArray()
|
||||||
framingFormatCompress(output, payload)
|
framingFormatCompress(output, payload)
|
||||||
|
except IOError as exc:
|
||||||
conn.write(output.getOutput)
|
raiseAssert exc.msg # memoryOutput shouldn't raise
|
||||||
|
try:
|
||||||
|
conn.write(output.getOutput)
|
||||||
|
except Exception as exc: # TODO fix libp2p
|
||||||
|
raiseAssert exc.msg
|
||||||
|
|
||||||
template errorMsgLit(x: static string): ErrorMsg =
|
template errorMsgLit(x: static string): ErrorMsg =
|
||||||
const val = ErrorMsg toBytes(x)
|
const val = ErrorMsg toBytes(x)
|
||||||
@ -619,7 +636,7 @@ proc setEventHandlers(p: ProtocolInfo,
|
|||||||
p.onPeerConnected = onPeerConnected
|
p.onPeerConnected = onPeerConnected
|
||||||
p.onPeerDisconnected = onPeerDisconnected
|
p.onPeerDisconnected = onPeerDisconnected
|
||||||
|
|
||||||
proc implementSendProcBody(sendProc: SendProc) =
|
proc implementSendProcBody(sendProc: SendProc) {.raises: [Exception].} =
|
||||||
let
|
let
|
||||||
msg = sendProc.msg
|
msg = sendProc.msg
|
||||||
UntypedResponse = bindSym "UntypedResponse"
|
UntypedResponse = bindSym "UntypedResponse"
|
||||||
@ -950,7 +967,7 @@ proc runDiscoveryLoop*(node: Eth2Node) {.async.} =
|
|||||||
# when no peers are in the routing table. Don't run it in continuous loop.
|
# when no peers are in the routing table. Don't run it in continuous loop.
|
||||||
await sleepAsync(1.seconds)
|
await sleepAsync(1.seconds)
|
||||||
|
|
||||||
proc getPersistentNetMetadata*(config: BeaconNodeConf): Eth2Metadata =
|
proc getPersistentNetMetadata*(config: BeaconNodeConf): Eth2Metadata {.raises: [Defect, IOError, SerializationError].} =
|
||||||
let metadataPath = config.dataDir / nodeMetadataFilename
|
let metadataPath = config.dataDir / nodeMetadataFilename
|
||||||
if not fileExists(metadataPath):
|
if not fileExists(metadataPath):
|
||||||
result = Eth2Metadata()
|
result = Eth2Metadata()
|
||||||
@ -1098,7 +1115,7 @@ proc onConnEvent(node: Eth2Node, peerId: PeerID, event: ConnEvent) {.async.} =
|
|||||||
proc new*(T: type Eth2Node, config: BeaconNodeConf, enrForkId: ENRForkID,
|
proc new*(T: type Eth2Node, config: BeaconNodeConf, enrForkId: ENRForkID,
|
||||||
switch: Switch, pubsub: GossipSub, ip: Option[ValidIpAddress],
|
switch: Switch, pubsub: GossipSub, ip: Option[ValidIpAddress],
|
||||||
tcpPort, udpPort: Option[Port], privKey: keys.PrivateKey, discovery: bool,
|
tcpPort, udpPort: Option[Port], privKey: keys.PrivateKey, discovery: bool,
|
||||||
rng: ref BrHmacDrbgContext): T =
|
rng: ref BrHmacDrbgContext): T {.raises: [Defect, CatchableError].} =
|
||||||
let
|
let
|
||||||
metadata = getPersistentNetMetadata(config)
|
metadata = getPersistentNetMetadata(config)
|
||||||
when not defined(local_testnet):
|
when not defined(local_testnet):
|
||||||
@ -1141,12 +1158,15 @@ proc new*(T: type Eth2Node, config: BeaconNodeConf, enrForkId: ENRForkID,
|
|||||||
if msg.protocolMounter != nil:
|
if msg.protocolMounter != nil:
|
||||||
msg.protocolMounter node
|
msg.protocolMounter node
|
||||||
|
|
||||||
|
|
||||||
proc peerHook(peerId: PeerID, event: ConnEvent): Future[void] {.gcsafe.} =
|
proc peerHook(peerId: PeerID, event: ConnEvent): Future[void] {.gcsafe.} =
|
||||||
onConnEvent(node, peerId, event)
|
onConnEvent(node, peerId, event)
|
||||||
|
|
||||||
switch.addConnEventHandler(peerHook, ConnEventKind.Connected)
|
try:
|
||||||
switch.addConnEventHandler(peerHook, ConnEventKind.Disconnected)
|
switch.addConnEventHandler(peerHook, ConnEventKind.Connected)
|
||||||
|
switch.addConnEventHandler(peerHook, ConnEventKind.Disconnected)
|
||||||
|
except Exception as exc: # TODO fix libp2p, shouldn't happen
|
||||||
|
raiseAssert exc.msg
|
||||||
node
|
node
|
||||||
|
|
||||||
template publicKey*(node: Eth2Node): keys.PublicKey =
|
template publicKey*(node: Eth2Node): keys.PublicKey =
|
||||||
@ -1368,7 +1388,7 @@ template tcpEndPoint(address, port): auto =
|
|||||||
MultiAddress.init(address, tcpProtocol, port)
|
MultiAddress.init(address, tcpProtocol, port)
|
||||||
|
|
||||||
proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
||||||
config: BeaconNodeConf): KeyPair =
|
config: BeaconNodeConf): NetKeyPair =
|
||||||
case config.cmd
|
case config.cmd
|
||||||
of noCommand, record:
|
of noCommand, record:
|
||||||
if config.netKeyFile == "random":
|
if config.netKeyFile == "random":
|
||||||
@ -1376,15 +1396,16 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
|||||||
if res.isErr():
|
if res.isErr():
|
||||||
fatal "Could not generate random network key file"
|
fatal "Could not generate random network key file"
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
let privKey = res.get()
|
let
|
||||||
let pubKey = privKey.getKey().tryGet()
|
privKey = res.get()
|
||||||
let pres = PeerID.init(pubKey)
|
pubKey = privKey.getKey().expect("working public key from random")
|
||||||
|
pres = PeerID.init(pubKey)
|
||||||
if pres.isErr():
|
if pres.isErr():
|
||||||
fatal "Could not obtain PeerID from network key"
|
fatal "Could not obtain PeerID from network key"
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
info "Generating new networking key", network_public_key = pubKey,
|
info "Generating new networking key", network_public_key = pubKey,
|
||||||
network_peer_id = $pres.get()
|
network_peer_id = $pres.get()
|
||||||
return KeyPair(seckey: privKey, pubkey: privKey.getKey().tryGet())
|
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
||||||
else:
|
else:
|
||||||
let keyPath =
|
let keyPath =
|
||||||
if isAbsolute(config.netKeyFile):
|
if isAbsolute(config.netKeyFile):
|
||||||
@ -1406,11 +1427,12 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
|||||||
if res.isNone():
|
if res.isNone():
|
||||||
fatal "Could not load network key file"
|
fatal "Could not load network key file"
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
let privKey = res.get()
|
let
|
||||||
let pubKey = privKey.getKey().tryGet()
|
privKey = res.get()
|
||||||
|
pubKey = privKey.getKey().expect("working public key from file")
|
||||||
info "Network key storage was successfully unlocked",
|
info "Network key storage was successfully unlocked",
|
||||||
key_path = keyPath, network_public_key = pubKey
|
key_path = keyPath, network_public_key = pubKey
|
||||||
return KeyPair(seckey: privKey, pubkey: pubKey)
|
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
||||||
else:
|
else:
|
||||||
info "Network key storage is missing, creating a new one",
|
info "Network key storage is missing, creating a new one",
|
||||||
key_path = keyPath
|
key_path = keyPath
|
||||||
@ -1419,8 +1441,9 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
|||||||
fatal "Could not generate random network key file"
|
fatal "Could not generate random network key file"
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
|
|
||||||
let privKey = rres.get()
|
let
|
||||||
let pubKey = privKey.getKey().tryGet()
|
privKey = rres.get()
|
||||||
|
pubKey = privKey.getKey().expect("working public key from random")
|
||||||
|
|
||||||
# Insecure password used only for automated testing.
|
# Insecure password used only for automated testing.
|
||||||
let insecurePassword =
|
let insecurePassword =
|
||||||
@ -1436,7 +1459,7 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
|||||||
|
|
||||||
info "New network key storage was created", key_path = keyPath,
|
info "New network key storage was created", key_path = keyPath,
|
||||||
network_public_key = pubKey
|
network_public_key = pubKey
|
||||||
return KeyPair(seckey: privKey, pubkey: pubKey)
|
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
||||||
|
|
||||||
of createTestnet:
|
of createTestnet:
|
||||||
if config.netKeyFile == "random":
|
if config.netKeyFile == "random":
|
||||||
@ -1454,8 +1477,9 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
|||||||
fatal "Could not generate random network key file"
|
fatal "Could not generate random network key file"
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
|
|
||||||
let privKey = rres.get()
|
let
|
||||||
let pubKey = privKey.getKey().tryGet()
|
privKey = rres.get()
|
||||||
|
pubKey = privKey.getKey().expect("working public key from random")
|
||||||
|
|
||||||
# Insecure password used only for automated testing.
|
# Insecure password used only for automated testing.
|
||||||
let insecurePassword =
|
let insecurePassword =
|
||||||
@ -1472,15 +1496,17 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
|||||||
info "New network key storage was created", key_path = keyPath,
|
info "New network key storage was created", key_path = keyPath,
|
||||||
network_public_key = pubKey
|
network_public_key = pubKey
|
||||||
|
|
||||||
return KeyPair(seckey: privKey, pubkey: privkey.getKey().tryGet())
|
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
||||||
else:
|
else:
|
||||||
let res = PrivateKey.random(Secp256k1, rng)
|
let res = PrivateKey.random(Secp256k1, rng)
|
||||||
if res.isErr():
|
if res.isErr():
|
||||||
fatal "Could not generate random network key file"
|
fatal "Could not generate random network key file"
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
|
|
||||||
let privKey = res.get()
|
let
|
||||||
return KeyPair(seckey: privKey, pubkey: privkey.getKey().tryGet())
|
privKey = res.get()
|
||||||
|
pubKey = privKey.getKey().expect("working public key from random")
|
||||||
|
NetKeyPair(seckey: privKey, pubkey: pubKey)
|
||||||
|
|
||||||
func gossipId(data: openArray[byte], valid: bool): seq[byte] =
|
func gossipId(data: openArray[byte], valid: bool): seq[byte] =
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#topics-and-messages
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#topics-and-messages
|
||||||
@ -1504,36 +1530,43 @@ func msgIdProvider(m: messages.Message): seq[byte] =
|
|||||||
|
|
||||||
proc newBeaconSwitch*(config: BeaconNodeConf, seckey: PrivateKey,
|
proc newBeaconSwitch*(config: BeaconNodeConf, seckey: PrivateKey,
|
||||||
address: MultiAddress,
|
address: MultiAddress,
|
||||||
rng: ref BrHmacDrbgContext): Switch =
|
rng: ref BrHmacDrbgContext): Switch {.raises: [Defect, CatchableError].} =
|
||||||
proc createMplex(conn: Connection): Muxer =
|
try:
|
||||||
Mplex.init(conn, inTimeout = 5.minutes, outTimeout = 5.minutes)
|
proc createMplex(conn: Connection): Muxer =
|
||||||
|
Mplex.init(conn, inTimeout = 5.minutes, outTimeout = 5.minutes)
|
||||||
|
|
||||||
let
|
let
|
||||||
peerInfo = PeerInfo.init(seckey, [address])
|
peerInfo = PeerInfo.init(seckey, [address])
|
||||||
mplexProvider = newMuxerProvider(createMplex, MplexCodec)
|
mplexProvider = newMuxerProvider(createMplex, MplexCodec)
|
||||||
transports = @[Transport(TcpTransport.init({ServerFlags.ReuseAddr}))]
|
transports = @[Transport(TcpTransport.init({ServerFlags.ReuseAddr}))]
|
||||||
muxers = {MplexCodec: mplexProvider}.toTable
|
muxers = {MplexCodec: mplexProvider}.toTable
|
||||||
secureManagers = [Secure(newNoise(rng, seckey))]
|
secureManagers = [Secure(newNoise(rng, seckey))]
|
||||||
|
|
||||||
peerInfo.agentVersion = config.agentString
|
peerInfo.agentVersion = config.agentString
|
||||||
|
|
||||||
let identify = newIdentify(peerInfo)
|
let identify = newIdentify(peerInfo)
|
||||||
|
|
||||||
newSwitch(
|
newSwitch(
|
||||||
peerInfo,
|
peerInfo,
|
||||||
transports,
|
transports,
|
||||||
identify,
|
identify,
|
||||||
muxers,
|
muxers,
|
||||||
secureManagers,
|
secureManagers,
|
||||||
maxConnections = config.maxPeers)
|
maxConnections = config.maxPeers)
|
||||||
|
except CatchableError as exc: raise exc
|
||||||
|
except Exception as exc: # TODO fix libp2p
|
||||||
|
if exc is Defect: raise (ref Defect)exc
|
||||||
|
raiseAssert exc.msg
|
||||||
|
|
||||||
proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
||||||
config: BeaconNodeConf,
|
config: BeaconNodeConf,
|
||||||
netKeys: KeyPair,
|
netKeys: NetKeyPair,
|
||||||
enrForkId: ENRForkID): Eth2Node =
|
enrForkId: ENRForkID): Eth2Node {.raises: [Defect, CatchableError].} =
|
||||||
var
|
var
|
||||||
(extIp, extTcpPort, extUdpPort) = setupAddress(config.nat,
|
(extIp, extTcpPort, extUdpPort) = try: setupAddress(
|
||||||
config.listenAddress, config.tcpPort, config.udpPort, clientId)
|
config.nat, config.listenAddress, config.tcpPort, config.udpPort, clientId)
|
||||||
|
except CatchableError as exc: raise exc
|
||||||
|
except Exception as exc: raiseAssert exc.msg
|
||||||
hostAddress = tcpEndPoint(config.listenAddress, config.tcpPort)
|
hostAddress = tcpEndPoint(config.listenAddress, config.tcpPort)
|
||||||
announcedAddresses = if extIp.isNone() or extTcpPort.isNone(): @[]
|
announcedAddresses = if extIp.isNone() or extTcpPort.isNone(): @[]
|
||||||
else: @[tcpEndPoint(extIp.get(), extTcpPort.get())]
|
else: @[tcpEndPoint(extIp.get(), extTcpPort.get())]
|
||||||
@ -1590,7 +1623,7 @@ proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
|||||||
info "Adding priviledged direct peer", peerId, address = maddress
|
info "Adding priviledged direct peer", peerId, address = maddress
|
||||||
res
|
res
|
||||||
)
|
)
|
||||||
pubsub = GossipSub.init(
|
pubsub = try: GossipSub.init(
|
||||||
switch = switch,
|
switch = switch,
|
||||||
msgIdProvider = msgIdProvider,
|
msgIdProvider = msgIdProvider,
|
||||||
triggerSelf = true,
|
triggerSelf = true,
|
||||||
@ -1598,7 +1631,8 @@ proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
|||||||
verifySignature = false,
|
verifySignature = false,
|
||||||
anonymize = true,
|
anonymize = true,
|
||||||
parameters = params)
|
parameters = params)
|
||||||
|
except CatchableError as exc: raise exc
|
||||||
|
except Exception as exc: raiseAssert exc.msg # TODO fix libp2p
|
||||||
switch.mount(pubsub)
|
switch.mount(pubsub)
|
||||||
|
|
||||||
Eth2Node.new(config, enrForkId, switch, pubsub,
|
Eth2Node.new(config, enrForkId, switch, pubsub,
|
||||||
@ -1611,10 +1645,12 @@ proc announcedENR*(node: Eth2Node): enr.Record =
|
|||||||
doAssert node.discovery != nil, "The Eth2Node must be initialized"
|
doAssert node.discovery != nil, "The Eth2Node must be initialized"
|
||||||
node.discovery.localNode.record
|
node.discovery.localNode.record
|
||||||
|
|
||||||
proc shortForm*(id: KeyPair): string =
|
proc shortForm*(id: NetKeyPair): string =
|
||||||
$PeerID.init(id.pubkey)
|
$PeerID.init(id.pubkey)
|
||||||
|
|
||||||
proc subscribe*(node: Eth2Node, topic: string, topicParams: TopicParams, enableTopicMetrics: bool = false) =
|
proc subscribe*(
|
||||||
|
node: Eth2Node, topic: string, topicParams: TopicParams,
|
||||||
|
enableTopicMetrics: bool = false) {.raises: [Defect, CatchableError].} =
|
||||||
proc dummyMsgHandler(topic: string, data: seq[byte]) {.async.} =
|
proc dummyMsgHandler(topic: string, data: seq[byte]) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
@ -1625,7 +1661,10 @@ proc subscribe*(node: Eth2Node, topic: string, topicParams: TopicParams, enableT
|
|||||||
node.pubsub.knownTopics.incl(topicName)
|
node.pubsub.knownTopics.incl(topicName)
|
||||||
|
|
||||||
node.pubsub.topicParams[topicName] = topicParams
|
node.pubsub.topicParams[topicName] = topicParams
|
||||||
node.pubsub.subscribe(topicName, dummyMsgHandler)
|
try:
|
||||||
|
node.pubsub.subscribe(topicName, dummyMsgHandler)
|
||||||
|
except CatchableError as exc: raise exc # TODO fix libp2p
|
||||||
|
except Exception as exc: raiseAssert exc.msg
|
||||||
|
|
||||||
proc setValidTopics*(node: Eth2Node, topics: openArray[string]) =
|
proc setValidTopics*(node: Eth2Node, topics: openArray[string]) =
|
||||||
let topicsSnappy = topics.mapIt(it & "_snappy")
|
let topicsSnappy = topics.mapIt(it & "_snappy")
|
||||||
@ -1638,7 +1677,7 @@ proc setValidTopics*(node: Eth2Node, topics: openArray[string]) =
|
|||||||
proc addValidator*[MsgType](node: Eth2Node,
|
proc addValidator*[MsgType](node: Eth2Node,
|
||||||
topic: string,
|
topic: string,
|
||||||
msgValidator: proc(msg: MsgType):
|
msgValidator: proc(msg: MsgType):
|
||||||
ValidationResult {.gcsafe.} ) =
|
ValidationResult {.gcsafe, raises: [Defect].} ) =
|
||||||
# Validate messages as soon as subscribed
|
# Validate messages as soon as subscribed
|
||||||
proc execValidator(
|
proc execValidator(
|
||||||
topic: string, message: GossipMsg): Future[ValidationResult] {.async.} =
|
topic: string, message: GossipMsg): Future[ValidationResult] {.async.} =
|
||||||
@ -1661,10 +1700,15 @@ proc addValidator*[MsgType](node: Eth2Node,
|
|||||||
ValidationResult.Ignore
|
ValidationResult.Ignore
|
||||||
return res
|
return res
|
||||||
|
|
||||||
node.pubsub.addValidator(topic & "_snappy", execValidator)
|
try:
|
||||||
|
node.pubsub.addValidator(topic & "_snappy", execValidator)
|
||||||
|
except Exception as exc: raiseAssert exc.msg # TODO fix libp2p
|
||||||
|
|
||||||
proc unsubscribe*(node: Eth2Node, topic: string) =
|
proc unsubscribe*(node: Eth2Node, topic: string) {.raises: [Defect, CatchableError].} =
|
||||||
node.pubsub.unsubscribeAll(topic & "_snappy")
|
try:
|
||||||
|
node.pubsub.unsubscribeAll(topic & "_snappy")
|
||||||
|
except CatchableError as exc: raise exc
|
||||||
|
except Exception as exc: raiseAssert exc.msg # TODO fix libp2p
|
||||||
|
|
||||||
proc traceMessage(fut: FutureBase, msgId: seq[byte]) =
|
proc traceMessage(fut: FutureBase, msgId: seq[byte]) =
|
||||||
fut.addCallback do (arg: pointer):
|
fut.addCallback do (arg: pointer):
|
||||||
@ -1678,14 +1722,21 @@ proc traceMessage(fut: FutureBase, msgId: seq[byte]) =
|
|||||||
msgId = byteutils.toHex(msgId), state = fut.state
|
msgId = byteutils.toHex(msgId), state = fut.state
|
||||||
|
|
||||||
proc broadcast*(node: Eth2Node, topic: string, msg: auto) =
|
proc broadcast*(node: Eth2Node, topic: string, msg: auto) =
|
||||||
let
|
try:
|
||||||
uncompressed = SSZ.encode(msg)
|
let
|
||||||
compressed = snappy.encode(uncompressed)
|
uncompressed = SSZ.encode(msg)
|
||||||
|
compressed = try: snappy.encode(uncompressed)
|
||||||
|
except InputTooLarge:
|
||||||
|
raiseAssert "More than 4gb? not likely.."
|
||||||
|
|
||||||
# This is only for messages we create. A message this large amounts to an
|
# This is only for messages we create. A message this large amounts to an
|
||||||
# internal logic error.
|
# internal logic error.
|
||||||
doAssert uncompressed.len <= GOSSIP_MAX_SIZE
|
doAssert uncompressed.len <= GOSSIP_MAX_SIZE
|
||||||
inc nbc_gossip_messages_sent
|
inc nbc_gossip_messages_sent
|
||||||
|
|
||||||
var futSnappy = node.pubsub.publish(topic & "_snappy", compressed)
|
var futSnappy = try: node.pubsub.publish(topic & "_snappy", compressed)
|
||||||
traceMessage(futSnappy, gossipId(uncompressed, true))
|
except Exception as exc:
|
||||||
|
raiseAssert exc.msg # TODO fix libp2p
|
||||||
|
traceMessage(futSnappy, gossipId(uncompressed, true))
|
||||||
|
except IOError as exc:
|
||||||
|
raiseAssert exc.msg # TODO in-memory compression shouldn't fail
|
||||||
|
@ -1,23 +1,35 @@
|
|||||||
import libp2p/daemon/daemonapi, json_serialization
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
import libp2p/[peerid, multiaddress], json_serialization
|
||||||
export json_serialization
|
export json_serialization
|
||||||
|
|
||||||
proc writeValue*(writer: var JsonWriter, value: PeerID) {.inline.} =
|
proc writeValue*(writer: var JsonWriter, value: PeerID) {.
|
||||||
|
raises: [Defect, IOError].} =
|
||||||
writer.writeValue $value
|
writer.writeValue $value
|
||||||
|
|
||||||
proc readValue*(reader: var JsonReader, value: var PeerID) {.inline.} =
|
proc readValue*(reader: var JsonReader, value: var PeerID) {.
|
||||||
|
raises: [Defect, IOError, SerializationError].} =
|
||||||
let res = PeerID.init reader.readValue(string)
|
let res = PeerID.init reader.readValue(string)
|
||||||
if res.isOk:
|
if res.isOk:
|
||||||
value = res.get()
|
value = res.get()
|
||||||
else:
|
else:
|
||||||
raiseUnexpectedValue(reader, $res.error)
|
raiseUnexpectedValue(reader, $res.error)
|
||||||
|
|
||||||
proc writeValue*(writer: var JsonWriter, value: MultiAddress) {.inline.} =
|
proc writeValue*(writer: var JsonWriter, value: MultiAddress) {.
|
||||||
|
raises: [Defect, IOError].} =
|
||||||
writer.writeValue $value
|
writer.writeValue $value
|
||||||
|
|
||||||
proc readValue*(reader: var JsonReader, value: var MultiAddress) {.inline.} =
|
proc readValue*(reader: var JsonReader, value: var MultiAddress) {.
|
||||||
|
raises: [Defect, IOError, SerializationError].} =
|
||||||
let res = MultiAddress.init reader.readValue(string)
|
let res = MultiAddress.init reader.readValue(string)
|
||||||
if res.isOk:
|
if res.isOk:
|
||||||
value = res.value
|
value = res.value
|
||||||
else:
|
else:
|
||||||
raiseUnexpectedValue(reader, $res.error)
|
raiseUnexpectedValue(reader, $res.error)
|
||||||
|
|
||||||
|
@ -1,9 +1,12 @@
|
|||||||
# Copyright (c) 2020-2021 Status Research & Development GmbH
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
tables, strutils, os,
|
tables, strutils, os,
|
||||||
stew/shims/macros, nimcrypto/hash,
|
stew/shims/macros, nimcrypto/hash,
|
||||||
@ -13,6 +16,7 @@ import
|
|||||||
json_serialization,
|
json_serialization,
|
||||||
json_serialization/std/[options, sets, net], serialization/errors,
|
json_serialization/std/[options, sets, net], serialization/errors,
|
||||||
../ssz/navigator,
|
../ssz/navigator,
|
||||||
|
eth/common/eth_types_json_serialization,
|
||||||
../spec/[presets, datatypes, digest]
|
../spec/[presets, datatypes, digest]
|
||||||
|
|
||||||
# ATTENTION! This file will produce a large C file, because we are inlining
|
# ATTENTION! This file will produce a large C file, because we are inlining
|
||||||
@ -23,8 +27,6 @@ import
|
|||||||
# TODO(zah):
|
# TODO(zah):
|
||||||
# We can compress the embedded states with snappy before embedding them here.
|
# We can compress the embedded states with snappy before embedding them here.
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
|
|
||||||
export
|
export
|
||||||
ethtypes, conversions, RuntimePreset
|
ethtypes, conversions, RuntimePreset
|
||||||
|
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import std/[tables, heapqueue]
|
import std/[tables, heapqueue]
|
||||||
import chronos
|
import chronos
|
||||||
|
|
||||||
@ -28,13 +37,13 @@ type
|
|||||||
|
|
||||||
PeerIndex = object
|
PeerIndex = object
|
||||||
data: int
|
data: int
|
||||||
cmp: proc(a, b: PeerIndex): bool {.closure, gcsafe.}
|
cmp: proc(a, b: PeerIndex): bool {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
PeerScoreCheckCallback*[T] = proc(peer: T): bool {.gcsafe, raises: [Defect].}
|
PeerScoreCheckCallback*[T] = proc(peer: T): bool {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
PeerCounterCallback* = proc() {.gcsafe, raises: [Defect].}
|
PeerCounterCallback* = proc() {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
PeerOnDeleteCallback*[T] = proc(peer: T) {.gcsafe.}
|
PeerOnDeleteCallback*[T] = proc(peer: T) {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
PeerPool*[A, B] = ref object
|
PeerPool*[A, B] = ref object
|
||||||
incNotEmptyEvent*: AsyncEvent
|
incNotEmptyEvent*: AsyncEvent
|
||||||
@ -45,7 +54,7 @@ type
|
|||||||
outQueue: HeapQueue[PeerIndex]
|
outQueue: HeapQueue[PeerIndex]
|
||||||
registry: Table[B, PeerIndex]
|
registry: Table[B, PeerIndex]
|
||||||
storage: seq[PeerItem[A]]
|
storage: seq[PeerItem[A]]
|
||||||
cmp: proc(a, b: PeerIndex): bool {.closure, gcsafe.}
|
cmp: proc(a, b: PeerIndex): bool {.gcsafe, raises: [Defect].}
|
||||||
scoreCheck: PeerScoreCheckCallback[A]
|
scoreCheck: PeerScoreCheckCallback[A]
|
||||||
onDeletePeer: PeerOnDeleteCallback[A]
|
onDeletePeer: PeerOnDeleteCallback[A]
|
||||||
peerCounter: PeerCounterCallback
|
peerCounter: PeerCounterCallback
|
||||||
@ -288,7 +297,8 @@ proc deletePeer*[A, B](pool: PeerPool[A, B], peer: A, force = false): bool =
|
|||||||
mixin getKey
|
mixin getKey
|
||||||
let key = getKey(peer)
|
let key = getKey(peer)
|
||||||
if pool.registry.hasKey(key):
|
if pool.registry.hasKey(key):
|
||||||
let pindex = pool.registry[key].data
|
let pindex = try: pool.registry[key].data
|
||||||
|
except KeyError: raiseAssert "checked with hasKey"
|
||||||
var item = addr(pool.storage[pindex])
|
var item = addr(pool.storage[pindex])
|
||||||
if (PeerFlags.Acquired in item[].flags):
|
if (PeerFlags.Acquired in item[].flags):
|
||||||
if not(force):
|
if not(force):
|
||||||
@ -339,7 +349,7 @@ proc deletePeer*[A, B](pool: PeerPool[A, B], peer: A, force = false): bool =
|
|||||||
|
|
||||||
proc addPeerImpl[A, B](pool: PeerPool[A, B], peer: A, peerKey: B,
|
proc addPeerImpl[A, B](pool: PeerPool[A, B], peer: A, peerKey: B,
|
||||||
peerType: PeerType) =
|
peerType: PeerType) =
|
||||||
proc onPeerClosed(udata: pointer) {.gcsafe.} =
|
proc onPeerClosed(udata: pointer) {.gcsafe, raises: [Defect].} =
|
||||||
discard pool.deletePeer(peer)
|
discard pool.deletePeer(peer)
|
||||||
|
|
||||||
let item = PeerItem[A](data: peer, peerType: peerType,
|
let item = PeerItem[A](data: peer, peerType: peerType,
|
||||||
@ -675,12 +685,12 @@ iterator acquiredPeers*[A, B](pool: PeerPool[A, B],
|
|||||||
let pindex = sorted.pop().data
|
let pindex = sorted.pop().data
|
||||||
yield pool.storage[pindex].data
|
yield pool.storage[pindex].data
|
||||||
|
|
||||||
proc `[]`*[A, B](pool: PeerPool[A, B], key: B): A {.inline.} =
|
proc `[]`*[A, B](pool: PeerPool[A, B], key: B): A {.inline, raises: [Defect, KeyError].} =
|
||||||
## Retrieve peer with key ``key`` from PeerPool ``pool``.
|
## Retrieve peer with key ``key`` from PeerPool ``pool``.
|
||||||
let pindex = pool.registry[key]
|
let pindex = pool.registry[key]
|
||||||
pool.storage[pindex.data]
|
pool.storage[pindex.data]
|
||||||
|
|
||||||
proc `[]`*[A, B](pool: var PeerPool[A, B], key: B): var A {.inline.} =
|
proc `[]`*[A, B](pool: var PeerPool[A, B], key: B): var A {.inline, raises: [Defect, KeyError].} =
|
||||||
## Retrieve peer with key ``key`` from PeerPool ``pool``.
|
## Retrieve peer with key ``key`` from PeerPool ``pool``.
|
||||||
let pindex = pool.registry[key]
|
let pindex = pool.registry[key]
|
||||||
pool.storage[pindex.data].data
|
pool.storage[pindex.data].data
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[math, os, osproc, random, sequtils, strformat, strutils,
|
std/[math, os, osproc, random, sequtils, strformat, strutils,
|
||||||
@ -14,7 +16,6 @@ import
|
|||||||
# Nimble packages
|
# Nimble packages
|
||||||
stew/[objects, byteutils, endians2, io2], stew/shims/macros,
|
stew/[objects, byteutils, endians2, io2], stew/shims/macros,
|
||||||
chronos, confutils, metrics, metrics/chronos_httpserver,
|
chronos, confutils, metrics, metrics/chronos_httpserver,
|
||||||
json_rpc/[rpcclient, rpcserver, jsonmarshal],
|
|
||||||
chronicles, bearssl, blscurve,
|
chronicles, bearssl, blscurve,
|
||||||
json_serialization/std/[options, sets, net], serialization/errors,
|
json_serialization/std/[options, sets, net], serialization/errors,
|
||||||
|
|
||||||
@ -31,7 +32,9 @@ import
|
|||||||
beacon_clock, version],
|
beacon_clock, version],
|
||||||
./networking/[eth2_discovery, eth2_network, network_metadata],
|
./networking/[eth2_discovery, eth2_network, network_metadata],
|
||||||
./gossip_processing/[eth2_processor, gossip_to_consensus, consensus_manager],
|
./gossip_processing/[eth2_processor, gossip_to_consensus, consensus_manager],
|
||||||
./validators/[attestation_aggregation, validator_duties, validator_pool, slashing_protection, keystore_management],
|
./validators/[
|
||||||
|
attestation_aggregation, validator_duties, validator_pool,
|
||||||
|
slashing_protection, keystore_management],
|
||||||
./sync/[sync_manager, sync_protocol, request_manager],
|
./sync/[sync_manager, sync_protocol, request_manager],
|
||||||
./rpc/[beacon_api, config_api, debug_api, event_api, nimbus_api, node_api,
|
./rpc/[beacon_api, config_api, debug_api, event_api, nimbus_api, node_api,
|
||||||
validator_api],
|
validator_api],
|
||||||
@ -93,7 +96,8 @@ proc init*(T: type BeaconNode,
|
|||||||
depositContractDeployedAt: BlockHashOrNumber,
|
depositContractDeployedAt: BlockHashOrNumber,
|
||||||
eth1Network: Option[Eth1Network],
|
eth1Network: Option[Eth1Network],
|
||||||
genesisStateContents: string,
|
genesisStateContents: string,
|
||||||
genesisDepositsSnapshotContents: string): BeaconNode =
|
genesisDepositsSnapshotContents: string): BeaconNode {.
|
||||||
|
raises: [Defect, CatchableError].} =
|
||||||
let
|
let
|
||||||
db = BeaconChainDB.init(runtimePreset, config.databaseDir)
|
db = BeaconChainDB.init(runtimePreset, config.databaseDir)
|
||||||
|
|
||||||
@ -212,8 +216,8 @@ proc init*(T: type BeaconNode,
|
|||||||
try:
|
try:
|
||||||
ChainDAGRef.preInit(db, genesisState[], tailState[], tailBlock)
|
ChainDAGRef.preInit(db, genesisState[], tailState[], tailBlock)
|
||||||
doAssert ChainDAGRef.isInitialized(db), "preInit should have initialized db"
|
doAssert ChainDAGRef.isInitialized(db), "preInit should have initialized db"
|
||||||
except CatchableError as e:
|
except CatchableError as exc:
|
||||||
error "Failed to initialize database", err = e.msg
|
error "Failed to initialize database", err = exc.msg
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
# Doesn't use std/random directly, but dependencies might
|
# Doesn't use std/random directly, but dependencies might
|
||||||
@ -298,7 +302,8 @@ proc init*(T: type BeaconNode,
|
|||||||
slashingProtectionDB =
|
slashingProtectionDB =
|
||||||
case config.slashingDbKind
|
case config.slashingDbKind
|
||||||
of SlashingDbKind.v1:
|
of SlashingDbKind.v1:
|
||||||
info "Loading slashing protection database", path = config.validatorsDir()
|
info "Loading slashing protection database",
|
||||||
|
path = config.validatorsDir()
|
||||||
SlashingProtectionDB.init(
|
SlashingProtectionDB.init(
|
||||||
chainDag.headState.data.data.genesis_validators_root,
|
chainDag.headState.data.data.genesis_validators_root,
|
||||||
config.validatorsDir(), "slashing_protection",
|
config.validatorsDir(), "slashing_protection",
|
||||||
@ -306,13 +311,15 @@ proc init*(T: type BeaconNode,
|
|||||||
disagreementBehavior = kChooseV1
|
disagreementBehavior = kChooseV1
|
||||||
)
|
)
|
||||||
of SlashingDbKind.v2:
|
of SlashingDbKind.v2:
|
||||||
info "Loading slashing protection database (v2)", path = config.validatorsDir()
|
info "Loading slashing protection database (v2)",
|
||||||
|
path = config.validatorsDir()
|
||||||
SlashingProtectionDB.init(
|
SlashingProtectionDB.init(
|
||||||
chainDag.headState.data.data.genesis_validators_root,
|
chainDag.headState.data.data.genesis_validators_root,
|
||||||
config.validatorsDir(), "slashing_protection"
|
config.validatorsDir(), "slashing_protection"
|
||||||
)
|
)
|
||||||
of SlashingDbKind.both:
|
of SlashingDbKind.both:
|
||||||
info "Loading slashing protection database (dual DB mode)", path = config.validatorsDir()
|
info "Loading slashing protection database (dual DB mode)",
|
||||||
|
path = config.validatorsDir()
|
||||||
SlashingProtectionDB.init(
|
SlashingProtectionDB.init(
|
||||||
chainDag.headState.data.data.genesis_validators_root,
|
chainDag.headState.data.data.genesis_validators_root,
|
||||||
config.validatorsDir(), "slashing_protection",
|
config.validatorsDir(), "slashing_protection",
|
||||||
@ -380,7 +387,9 @@ proc init*(T: type BeaconNode,
|
|||||||
let cmd = getAppDir() / "nimbus_signing_process".addFileExt(ExeExt)
|
let cmd = getAppDir() / "nimbus_signing_process".addFileExt(ExeExt)
|
||||||
let args = [$res.config.validatorsDir, $res.config.secretsDir]
|
let args = [$res.config.validatorsDir, $res.config.secretsDir]
|
||||||
let workdir = io2.getCurrentDir().tryGet()
|
let workdir = io2.getCurrentDir().tryGet()
|
||||||
res.vcProcess = startProcess(cmd, workdir, args)
|
res.vcProcess = try: startProcess(cmd, workdir, args)
|
||||||
|
except CatchableError as exc: raise exc
|
||||||
|
except Exception as exc: raiseAssert exc.msg
|
||||||
res.addRemoteValidators()
|
res.addRemoteValidators()
|
||||||
|
|
||||||
# This merely configures the BeaconSync
|
# This merely configures the BeaconSync
|
||||||
@ -410,7 +419,8 @@ func verifyFinalization(node: BeaconNode, slot: Slot) =
|
|||||||
# finalization occurs every slot, to 4 slots vs scheduledSlot.
|
# finalization occurs every slot, to 4 slots vs scheduledSlot.
|
||||||
doAssert finalizedEpoch + 4 >= epoch
|
doAssert finalizedEpoch + 4 >= epoch
|
||||||
|
|
||||||
proc installAttestationSubnetHandlers(node: BeaconNode, subnets: set[uint8]) =
|
proc installAttestationSubnetHandlers(node: BeaconNode, subnets: set[uint8]) {.
|
||||||
|
raises: [Defect, CatchableError].} =
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
||||||
# nimbus won't score attestation subnets for now, we just rely on block and aggregate which are more stabe and reliable
|
# nimbus won't score attestation subnets for now, we just rely on block and aggregate which are more stabe and reliable
|
||||||
for subnet in subnets:
|
for subnet in subnets:
|
||||||
@ -453,8 +463,6 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
|||||||
attachedValidators = node.getAttachedValidators()
|
attachedValidators = node.getAttachedValidators()
|
||||||
validatorIndices = toIntSet(toSeq(attachedValidators.keys()))
|
validatorIndices = toIntSet(toSeq(attachedValidators.keys()))
|
||||||
|
|
||||||
var cache = StateCache()
|
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#lookahead
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#lookahead
|
||||||
# Only subscribe when this node should aggregate; libp2p broadcasting works
|
# Only subscribe when this node should aggregate; libp2p broadcasting works
|
||||||
# on subnet topics regardless.
|
# on subnet topics regardless.
|
||||||
@ -486,8 +494,7 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
|||||||
static: doAssert SLOTS_PER_EPOCH <= 32
|
static: doAssert SLOTS_PER_EPOCH <= 32
|
||||||
|
|
||||||
for (validatorIndices, committeeIndex, subnetIndex, slot) in
|
for (validatorIndices, committeeIndex, subnetIndex, slot) in
|
||||||
get_committee_assignments(
|
get_committee_assignments(epochRef, epoch, validatorIndices):
|
||||||
node.chainDag.headState.data.data, epoch, validatorIndices, cache):
|
|
||||||
|
|
||||||
doAssert compute_epoch_at_slot(slot) == epoch
|
doAssert compute_epoch_at_slot(slot) == epoch
|
||||||
|
|
||||||
@ -643,17 +650,16 @@ proc getInitialAttestationSubnets(node: BeaconNode): Table[uint8, Slot] =
|
|||||||
wallEpoch = node.beaconClock.now().slotOrZero().epoch
|
wallEpoch = node.beaconClock.now().slotOrZero().epoch
|
||||||
validatorIndices = toIntSet(toSeq(node.getAttachedValidators().keys()))
|
validatorIndices = toIntSet(toSeq(node.getAttachedValidators().keys()))
|
||||||
|
|
||||||
var cache = StateCache()
|
|
||||||
|
|
||||||
template mergeAttestationSubnets(epoch: Epoch) =
|
template mergeAttestationSubnets(epoch: Epoch) =
|
||||||
# TODO when https://github.com/nim-lang/Nim/issues/15972 and
|
# TODO when https://github.com/nim-lang/Nim/issues/15972 and
|
||||||
# https://github.com/nim-lang/Nim/issues/16217 are fixed, in
|
# https://github.com/nim-lang/Nim/issues/16217 are fixed, in
|
||||||
# Nimbus's Nim, use (_, _, subnetIndex, slot).
|
# Nimbus's Nim, use (_, _, subnetIndex, slot).
|
||||||
|
let epochRef = node.chainDag.getEpochRef(node.chainDag.head, epoch)
|
||||||
for (_, ci, subnetIndex, slot) in get_committee_assignments(
|
for (_, ci, subnetIndex, slot) in get_committee_assignments(
|
||||||
node.chainDag.headState.data.data, epoch, validatorIndices, cache):
|
epochRef, epoch, validatorIndices):
|
||||||
if subnetIndex in result:
|
result.withValue(subnetIndex, v) do:
|
||||||
result[subnetIndex] = max(result[subnetIndex], slot + 1)
|
v[] = max(v[], slot + 1)
|
||||||
else:
|
do:
|
||||||
result[subnetIndex] = slot + 1
|
result[subnetIndex] = slot + 1
|
||||||
|
|
||||||
# Either wallEpoch is 0, in which case it might be pre-genesis, but we only
|
# Either wallEpoch is 0, in which case it might be pre-genesis, but we only
|
||||||
@ -664,7 +670,8 @@ proc getInitialAttestationSubnets(node: BeaconNode): Table[uint8, Slot] =
|
|||||||
mergeAttestationSubnets(wallEpoch)
|
mergeAttestationSubnets(wallEpoch)
|
||||||
mergeAttestationSubnets(wallEpoch + 1)
|
mergeAttestationSubnets(wallEpoch + 1)
|
||||||
|
|
||||||
proc getAttestationSubnetHandlers(node: BeaconNode) =
|
proc getAttestationSubnetHandlers(node: BeaconNode) {.
|
||||||
|
raises: [Defect, CatchableError].} =
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
||||||
# TODO:
|
# TODO:
|
||||||
# We might want to reuse the previous stability subnet if not expired when:
|
# We might want to reuse the previous stability subnet if not expired when:
|
||||||
@ -705,7 +712,8 @@ proc getAttestationSubnetHandlers(node: BeaconNode) =
|
|||||||
for i in 0'u8 ..< ATTESTATION_SUBNET_COUNT:
|
for i in 0'u8 ..< ATTESTATION_SUBNET_COUNT:
|
||||||
if i in initialSubnets:
|
if i in initialSubnets:
|
||||||
node.attestationSubnets.subscribedSubnets.incl i
|
node.attestationSubnets.subscribedSubnets.incl i
|
||||||
node.attestationSubnets.unsubscribeSlot[i] = initialSubnets[i]
|
node.attestationSubnets.unsubscribeSlot[i] =
|
||||||
|
try: initialSubnets[i] except KeyError: raiseAssert "checked with in"
|
||||||
else:
|
else:
|
||||||
node.attestationSubnets.subscribedSubnets.excl i
|
node.attestationSubnets.subscribedSubnets.excl i
|
||||||
node.attestationSubnets.subscribeSlot[i] = FAR_FUTURE_SLOT
|
node.attestationSubnets.subscribeSlot[i] = FAR_FUTURE_SLOT
|
||||||
@ -719,7 +727,7 @@ proc getAttestationSubnetHandlers(node: BeaconNode) =
|
|||||||
node.installAttestationSubnetHandlers(
|
node.installAttestationSubnetHandlers(
|
||||||
node.attestationSubnets.subscribedSubnets + initialStabilitySubnets)
|
node.attestationSubnets.subscribedSubnets + initialStabilitySubnets)
|
||||||
|
|
||||||
proc addMessageHandlers(node: BeaconNode) =
|
proc addMessageHandlers(node: BeaconNode) {.raises: [Defect, CatchableError].} =
|
||||||
# inspired by lighthouse research here
|
# inspired by lighthouse research here
|
||||||
# https://gist.github.com/blacktemplar/5c1862cb3f0e32a1a7fb0b25e79e6e2c#file-generate-scoring-params-py
|
# https://gist.github.com/blacktemplar/5c1862cb3f0e32a1a7fb0b25e79e6e2c#file-generate-scoring-params-py
|
||||||
const
|
const
|
||||||
@ -779,7 +787,7 @@ proc addMessageHandlers(node: BeaconNode) =
|
|||||||
func getTopicSubscriptionEnabled(node: BeaconNode): bool =
|
func getTopicSubscriptionEnabled(node: BeaconNode): bool =
|
||||||
node.attestationSubnets.enabled
|
node.attestationSubnets.enabled
|
||||||
|
|
||||||
proc removeMessageHandlers(node: BeaconNode) =
|
proc removeMessageHandlers(node: BeaconNode) {.raises: [Defect, CatchableError].} =
|
||||||
node.attestationSubnets.enabled = false
|
node.attestationSubnets.enabled = false
|
||||||
doAssert not node.getTopicSubscriptionEnabled()
|
doAssert not node.getTopicSubscriptionEnabled()
|
||||||
|
|
||||||
@ -809,7 +817,7 @@ proc setupDoppelgangerDetection(node: BeaconNode, slot: Slot) =
|
|||||||
broadcastStartEpoch =
|
broadcastStartEpoch =
|
||||||
node.processor.doppelgangerDetection.broadcastStartEpoch
|
node.processor.doppelgangerDetection.broadcastStartEpoch
|
||||||
|
|
||||||
proc updateGossipStatus(node: BeaconNode, slot: Slot) =
|
proc updateGossipStatus(node: BeaconNode, slot: Slot) {.raises: [Defect, CatchableError].} =
|
||||||
# Syncing tends to be ~1 block/s, and allow for an epoch of time for libp2p
|
# Syncing tends to be ~1 block/s, and allow for an epoch of time for libp2p
|
||||||
# subscribing to spin up. The faster the sync, the more wallSlot - headSlot
|
# subscribing to spin up. The faster the sync, the more wallSlot - headSlot
|
||||||
# lead time is required
|
# lead time is required
|
||||||
@ -1120,12 +1128,14 @@ proc startSyncManager(node: BeaconNode) =
|
|||||||
debug "Peer was removed from PeerPool due to low score", peer = peer,
|
debug "Peer was removed from PeerPool due to low score", peer = peer,
|
||||||
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
|
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
|
||||||
score_high_limit = PeerScoreHighLimit
|
score_high_limit = PeerScoreHighLimit
|
||||||
asyncSpawn peer.disconnect(PeerScoreLow)
|
asyncSpawn(try: peer.disconnect(PeerScoreLow)
|
||||||
|
except Exception as exc: raiseAssert exc.msg) # Shouldn't actually happen!
|
||||||
else:
|
else:
|
||||||
debug "Peer was removed from PeerPool", peer = peer,
|
debug "Peer was removed from PeerPool", peer = peer,
|
||||||
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
|
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
|
||||||
score_high_limit = PeerScoreHighLimit
|
score_high_limit = PeerScoreHighLimit
|
||||||
asyncSpawn peer.disconnect(FaultOrError)
|
asyncSpawn(try: peer.disconnect(FaultOrError)
|
||||||
|
except Exception as exc: raiseAssert exc.msg) # Shouldn't actually happen!
|
||||||
|
|
||||||
node.network.peerPool.setScoreCheck(scoreCheck)
|
node.network.peerPool.setScoreCheck(scoreCheck)
|
||||||
node.network.peerPool.setOnDeletePeer(onDeletePeer)
|
node.network.peerPool.setOnDeletePeer(onDeletePeer)
|
||||||
@ -1140,13 +1150,15 @@ func connectedPeersCount(node: BeaconNode): int =
|
|||||||
len(node.network.peerPool)
|
len(node.network.peerPool)
|
||||||
|
|
||||||
proc installRpcHandlers(rpcServer: RpcServer, node: BeaconNode) =
|
proc installRpcHandlers(rpcServer: RpcServer, node: BeaconNode) =
|
||||||
rpcServer.installBeaconApiHandlers(node)
|
try:
|
||||||
rpcServer.installConfigApiHandlers(node)
|
rpcServer.installBeaconApiHandlers(node)
|
||||||
rpcServer.installDebugApiHandlers(node)
|
rpcServer.installConfigApiHandlers(node)
|
||||||
rpcServer.installEventApiHandlers(node)
|
rpcServer.installDebugApiHandlers(node)
|
||||||
rpcServer.installNimbusApiHandlers(node)
|
rpcServer.installEventApiHandlers(node)
|
||||||
rpcServer.installNodeApiHandlers(node)
|
rpcServer.installNimbusApiHandlers(node)
|
||||||
rpcServer.installValidatorApiHandlers(node)
|
rpcServer.installNodeApiHandlers(node)
|
||||||
|
rpcServer.installValidatorApiHandlers(node)
|
||||||
|
except Exception as exc: raiseAssert exc.msg # TODO fix json-rpc
|
||||||
|
|
||||||
proc installMessageValidators(node: BeaconNode) =
|
proc installMessageValidators(node: BeaconNode) =
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
||||||
@ -1190,13 +1202,20 @@ proc stop*(node: BeaconNode) =
|
|||||||
bnStatus = BeaconNodeStatus.Stopping
|
bnStatus = BeaconNodeStatus.Stopping
|
||||||
notice "Graceful shutdown"
|
notice "Graceful shutdown"
|
||||||
if not node.config.inProcessValidators:
|
if not node.config.inProcessValidators:
|
||||||
node.vcProcess.close()
|
try:
|
||||||
waitFor node.network.stop()
|
node.vcProcess.close()
|
||||||
|
except Exception as exc:
|
||||||
|
warn "Couldn't close vc process", msg = exc.msg
|
||||||
|
try:
|
||||||
|
waitFor node.network.stop()
|
||||||
|
except CatchableError as exc:
|
||||||
|
warn "Couldn't stop network", msg = exc.msg
|
||||||
|
|
||||||
node.attachedValidators.slashingProtection.close()
|
node.attachedValidators.slashingProtection.close()
|
||||||
node.db.close()
|
node.db.close()
|
||||||
notice "Databases closed"
|
notice "Databases closed"
|
||||||
|
|
||||||
proc run*(node: BeaconNode) =
|
proc run*(node: BeaconNode) {.raises: [Defect, CatchableError].} =
|
||||||
if bnStatus == BeaconNodeStatus.Starting:
|
if bnStatus == BeaconNodeStatus.Starting:
|
||||||
# it might have been set to "Stopping" with Ctrl+C
|
# it might have been set to "Stopping" with Ctrl+C
|
||||||
bnStatus = BeaconNodeStatus.Running
|
bnStatus = BeaconNodeStatus.Running
|
||||||
@ -1224,10 +1243,16 @@ proc run*(node: BeaconNode) =
|
|||||||
proc controlCHandler() {.noconv.} =
|
proc controlCHandler() {.noconv.} =
|
||||||
when defined(windows):
|
when defined(windows):
|
||||||
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
||||||
setupForeignThreadGc()
|
try:
|
||||||
|
setupForeignThreadGc()
|
||||||
|
except Exception as exc: raiseAssert exc.msg # shouldn't happen
|
||||||
notice "Shutting down after having received SIGINT"
|
notice "Shutting down after having received SIGINT"
|
||||||
bnStatus = BeaconNodeStatus.Stopping
|
bnStatus = BeaconNodeStatus.Stopping
|
||||||
setControlCHook(controlCHandler)
|
try:
|
||||||
|
setControlCHook(controlCHandler)
|
||||||
|
except Exception as exc: # TODO Exception
|
||||||
|
warn "Cannot set ctrl-c handler", msg = exc.msg
|
||||||
|
|
||||||
# equivalent SIGTERM handler
|
# equivalent SIGTERM handler
|
||||||
when defined(posix):
|
when defined(posix):
|
||||||
proc SIGTERMHandler(signal: cint) {.noconv.} =
|
proc SIGTERMHandler(signal: cint) {.noconv.} =
|
||||||
@ -1237,16 +1262,13 @@ proc run*(node: BeaconNode) =
|
|||||||
|
|
||||||
# main event loop
|
# main event loop
|
||||||
while bnStatus == BeaconNodeStatus.Running:
|
while bnStatus == BeaconNodeStatus.Running:
|
||||||
try:
|
poll() # if poll fails, the network is broken
|
||||||
poll()
|
|
||||||
except CatchableError as e:
|
|
||||||
debug "Exception in poll()", exc = e.name, err = e.msg
|
|
||||||
|
|
||||||
# time to say goodbye
|
# time to say goodbye
|
||||||
node.stop()
|
node.stop()
|
||||||
|
|
||||||
var gPidFile: string
|
var gPidFile: string
|
||||||
proc createPidFile(filename: string) =
|
proc createPidFile(filename: string) {.raises: [Defect, IOError].} =
|
||||||
writeFile filename, $os.getCurrentProcessId()
|
writeFile filename, $os.getCurrentProcessId()
|
||||||
gPidFile = filename
|
gPidFile = filename
|
||||||
addQuitProc proc {.noconv.} = discard io2.removeFile(gPidFile)
|
addQuitProc proc {.noconv.} = discard io2.removeFile(gPidFile)
|
||||||
@ -1264,7 +1286,7 @@ func shouldWeStartWeb3(node: BeaconNode): bool =
|
|||||||
(node.config.web3Mode == Web3Mode.enabled) or
|
(node.config.web3Mode == Web3Mode.enabled) or
|
||||||
(node.config.web3Mode == Web3Mode.auto and node.attachedValidators[].count > 0)
|
(node.config.web3Mode == Web3Mode.auto and node.attachedValidators[].count > 0)
|
||||||
|
|
||||||
proc start(node: BeaconNode) =
|
proc start(node: BeaconNode) {.raises: [Defect, CatchableError].} =
|
||||||
let
|
let
|
||||||
head = node.chainDag.head
|
head = node.chainDag.head
|
||||||
finalizedHead = node.chainDag.finalizedHead
|
finalizedHead = node.chainDag.finalizedHead
|
||||||
@ -1313,13 +1335,16 @@ func formatGwei(amount: uint64): string =
|
|||||||
while result[^1] == '0':
|
while result[^1] == '0':
|
||||||
result.setLen(result.len - 1)
|
result.setLen(result.len - 1)
|
||||||
|
|
||||||
proc initStatusBar(node: BeaconNode) =
|
proc initStatusBar(node: BeaconNode) {.raises: [Defect, ValueError].} =
|
||||||
if not isatty(stdout): return
|
if not isatty(stdout): return
|
||||||
if not node.config.statusBarEnabled: return
|
if not node.config.statusBarEnabled: return
|
||||||
|
|
||||||
enableTrueColors()
|
try:
|
||||||
|
enableTrueColors()
|
||||||
|
except Exception as exc: # TODO Exception
|
||||||
|
error "Couldn't enable colors", err = exc.msg
|
||||||
|
|
||||||
proc dataResolver(expr: string): string =
|
proc dataResolver(expr: string): string {.raises: [Defect].} =
|
||||||
template justified: untyped = node.chainDag.head.atEpochStart(
|
template justified: untyped = node.chainDag.head.atEpochStart(
|
||||||
node.chainDag.headState.data.data.current_justified_checkpoint.epoch)
|
node.chainDag.headState.data.data.current_justified_checkpoint.epoch)
|
||||||
# TODO:
|
# TODO:
|
||||||
@ -1593,7 +1618,7 @@ proc handleValidatorExitCommand(config: BeaconNodeConf) {.async.} =
|
|||||||
err = err.msg
|
err = err.msg
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
proc loadEth2Network(config: BeaconNodeConf): Eth2NetworkMetadata =
|
proc loadEth2Network(config: BeaconNodeConf): Eth2NetworkMetadata {.raises: [Defect, IOError].} =
|
||||||
if config.eth2Network.isSome:
|
if config.eth2Network.isSome:
|
||||||
getMetadataForNetwork(config.eth2Network.get)
|
getMetadataForNetwork(config.eth2Network.get)
|
||||||
else:
|
else:
|
||||||
@ -1605,7 +1630,8 @@ proc loadEth2Network(config: BeaconNodeConf): Eth2NetworkMetadata =
|
|||||||
echo "Must specify network on non-mainnet node"
|
echo "Must specify network on non-mainnet node"
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
proc loadBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext): BeaconNode =
|
proc loadBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext): BeaconNode {.
|
||||||
|
raises: [Defect, CatchableError].} =
|
||||||
let metadata = config.loadEth2Network()
|
let metadata = config.loadEth2Network()
|
||||||
|
|
||||||
# Updating the config based on the metadata certainly is not beautiful but it
|
# Updating the config based on the metadata certainly is not beautiful but it
|
||||||
@ -1623,7 +1649,7 @@ proc loadBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext): Bea
|
|||||||
metadata.genesisData,
|
metadata.genesisData,
|
||||||
metadata.genesisDepositsSnapshot)
|
metadata.genesisDepositsSnapshot)
|
||||||
|
|
||||||
proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext) =
|
proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext) {.raises: [Defect, CatchableError].} =
|
||||||
info "Launching beacon node",
|
info "Launching beacon node",
|
||||||
version = fullVersionStr,
|
version = fullVersionStr,
|
||||||
bls_backend = $BLS_BACKEND,
|
bls_backend = $BLS_BACKEND,
|
||||||
@ -1639,7 +1665,10 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext) =
|
|||||||
let metricsAddress = config.metricsAddress
|
let metricsAddress = config.metricsAddress
|
||||||
notice "Starting metrics HTTP server",
|
notice "Starting metrics HTTP server",
|
||||||
url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics"
|
url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics"
|
||||||
startMetricsHttpServer($metricsAddress, config.metricsPort)
|
try:
|
||||||
|
startMetricsHttpServer($metricsAddress, config.metricsPort)
|
||||||
|
except CatchableError as exc: raise exc
|
||||||
|
except Exception as exc: raiseAssert exc.msg # TODO fix metrics
|
||||||
else:
|
else:
|
||||||
warn "Metrics support disabled, see https://status-im.github.io/nimbus-eth2/metrics-pretty-pictures.html#simple-metrics"
|
warn "Metrics support disabled, see https://status-im.github.io/nimbus-eth2/metrics-pretty-pictures.html#simple-metrics"
|
||||||
|
|
||||||
@ -1661,7 +1690,7 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext) =
|
|||||||
else:
|
else:
|
||||||
node.start()
|
node.start()
|
||||||
|
|
||||||
proc doCreateTestnet(config: BeaconNodeConf, rng: var BrHmacDrbgContext) =
|
proc doCreateTestnet(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.raises: [Defect, CatchableError].} =
|
||||||
let launchPadDeposits = try:
|
let launchPadDeposits = try:
|
||||||
Json.loadFile(config.testnetDepositsFile.string, seq[LaunchPadDeposit])
|
Json.loadFile(config.testnetDepositsFile.string, seq[LaunchPadDeposit])
|
||||||
except SerializationError as err:
|
except SerializationError as err:
|
||||||
@ -1714,7 +1743,8 @@ proc doCreateTestnet(config: BeaconNodeConf, rng: var BrHmacDrbgContext) =
|
|||||||
writeFile(bootstrapFile, bootstrapEnr.tryGet().toURI)
|
writeFile(bootstrapFile, bootstrapEnr.tryGet().toURI)
|
||||||
echo "Wrote ", bootstrapFile
|
echo "Wrote ", bootstrapFile
|
||||||
|
|
||||||
proc doDeposits(config: BeaconNodeConf, rng: var BrHmacDrbgContext) =
|
proc doDeposits(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
||||||
|
raises: [Defect, CatchableError].} =
|
||||||
case config.depositsCmd
|
case config.depositsCmd
|
||||||
#[
|
#[
|
||||||
of DepositsCmd.create:
|
of DepositsCmd.create:
|
||||||
@ -1821,7 +1851,8 @@ proc doDeposits(config: BeaconNodeConf, rng: var BrHmacDrbgContext) =
|
|||||||
of DepositsCmd.exit:
|
of DepositsCmd.exit:
|
||||||
waitFor handleValidatorExitCommand(config)
|
waitFor handleValidatorExitCommand(config)
|
||||||
|
|
||||||
proc doWallets(config: BeaconNodeConf, rng: var BrHmacDrbgContext) =
|
proc doWallets(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
||||||
|
raises: [Defect, CatchableError].} =
|
||||||
template findWalletWithoutErrors(name: WalletName): auto =
|
template findWalletWithoutErrors(name: WalletName): auto =
|
||||||
let res = keystore_management.findWallet(config, name)
|
let res = keystore_management.findWallet(config, name)
|
||||||
if res.isErr:
|
if res.isErr:
|
||||||
@ -1862,7 +1893,8 @@ proc doWallets(config: BeaconNodeConf, rng: var BrHmacDrbgContext) =
|
|||||||
of WalletsCmd.restore:
|
of WalletsCmd.restore:
|
||||||
restoreWalletInteractively(rng, config)
|
restoreWalletInteractively(rng, config)
|
||||||
|
|
||||||
proc doRecord(config: BeaconNodeConf, rng: var BrHmacDrbgContext) =
|
proc doRecord(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
||||||
|
raises: [Defect, CatchableError].} =
|
||||||
case config.recordCmd:
|
case config.recordCmd:
|
||||||
of RecordCmd.create:
|
of RecordCmd.create:
|
||||||
let netKeys = getPersistentNetKeys(rng, config)
|
let netKeys = getPersistentNetKeys(rng, config)
|
||||||
@ -1889,13 +1921,14 @@ proc doRecord(config: BeaconNodeConf, rng: var BrHmacDrbgContext) =
|
|||||||
of RecordCmd.print:
|
of RecordCmd.print:
|
||||||
echo $config.recordPrint
|
echo $config.recordPrint
|
||||||
|
|
||||||
proc doWeb3Cmd(config: BeaconNodeConf) =
|
proc doWeb3Cmd(config: BeaconNodeConf) {.raises: [Defect, CatchableError].} =
|
||||||
case config.web3Cmd:
|
case config.web3Cmd:
|
||||||
of Web3Cmd.test:
|
of Web3Cmd.test:
|
||||||
let metadata = config.loadEth2Network()
|
let metadata = config.loadEth2Network()
|
||||||
waitFor testWeb3Provider(config.web3TestUrl,
|
waitFor testWeb3Provider(config.web3TestUrl,
|
||||||
metadata.depositContractAddress)
|
metadata.depositContractAddress)
|
||||||
|
|
||||||
|
{.pop.} # TODO moduletests exceptions
|
||||||
programMain:
|
programMain:
|
||||||
var
|
var
|
||||||
config = makeBannerAndConfig(clientId, BeaconNodeConf)
|
config = makeBannerAndConfig(clientId, BeaconNodeConf)
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
# Common routines for a BeaconNode and a ValidatorClient
|
# Common routines for a BeaconNode and a ValidatorClient
|
||||||
|
|
||||||
import
|
import
|
||||||
@ -32,7 +34,7 @@ proc setupStdoutLogging*(logLevel: string) =
|
|||||||
except IOError as err:
|
except IOError as err:
|
||||||
logLoggingFailure(cstring(msg), err)
|
logLoggingFailure(cstring(msg), err)
|
||||||
|
|
||||||
proc updateLogLevel*(logLevel: string) =
|
proc updateLogLevel*(logLevel: string) {.raises: [Defect, ValueError].} =
|
||||||
# Updates log levels (without clearing old ones)
|
# Updates log levels (without clearing old ones)
|
||||||
let directives = logLevel.split(";")
|
let directives = logLevel.split(";")
|
||||||
try:
|
try:
|
||||||
@ -58,15 +60,22 @@ proc setupLogging*(logLevel: string, logFile: Option[OutFile]) =
|
|||||||
path = logFileDir, err = ioErrorMsg(lres.error)
|
path = logFileDir, err = ioErrorMsg(lres.error)
|
||||||
break openLogFile
|
break openLogFile
|
||||||
|
|
||||||
if not defaultChroniclesStream.outputs[1].open(logFile):
|
try:
|
||||||
error "Failed to create log file", logFile
|
if not defaultChroniclesStream.outputs[1].open(logFile):
|
||||||
|
error "Failed to create log file", logFile
|
||||||
|
except CatchableError as exc:
|
||||||
|
# TODO why is there both exception and bool?
|
||||||
|
error "Failed to create log file", logFile, msg = exc.msg
|
||||||
else:
|
else:
|
||||||
warn "The --log-file option is not active in the current build"
|
warn "The --log-file option is not active in the current build"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
updateLogLevel(logLevel)
|
updateLogLevel(logLevel)
|
||||||
except ValueError as err:
|
except ValueError as err:
|
||||||
stderr.write "Invalid value for --log-level. " & err.msg
|
try:
|
||||||
|
stderr.write "Invalid value for --log-level. " & err.msg
|
||||||
|
except IOError as exc:
|
||||||
|
echo "Invalid value for --log-level. " & err.msg
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
template makeBannerAndConfig*(clientId: string, ConfType: type): untyped =
|
template makeBannerAndConfig*(clientId: string, ConfType: type): untyped =
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
# beacon_chain
|
# beacon_chain
|
||||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
os, strutils, tables,
|
os, strutils, tables,
|
||||||
@ -13,6 +15,8 @@ import
|
|||||||
./spec/[digest, crypto],
|
./spec/[digest, crypto],
|
||||||
./validators/keystore_management
|
./validators/keystore_management
|
||||||
|
|
||||||
|
{.pop.} # TODO moduletests exceptions
|
||||||
|
|
||||||
programMain:
|
programMain:
|
||||||
var validators: Table[ValidatorPubKey, ValidatorPrivKey]
|
var validators: Table[ValidatorPubKey, ValidatorPrivKey]
|
||||||
# load and send all public keys so the BN knows for which ones to ping us
|
# load and send all public keys so the BN knows for which ones to ping us
|
||||||
|
@ -5,13 +5,15 @@
|
|||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[os, json, random, strutils],
|
std/[os, random, strutils],
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
stew/shims/[tables, macros],
|
stew/shims/[tables, macros],
|
||||||
chronos, confutils, metrics, json_rpc/[rpcclient, jsonmarshal],
|
chronos, confutils, metrics,
|
||||||
chronicles,
|
chronicles,
|
||||||
json_serialization/std/[options, net],
|
json_serialization/std/[options, net],
|
||||||
|
|
||||||
@ -21,7 +23,6 @@ import
|
|||||||
./sync/sync_manager,
|
./sync/sync_manager,
|
||||||
"."/[conf, beacon_clock, version],
|
"."/[conf, beacon_clock, version],
|
||||||
./networking/[eth2_network, eth2_discovery],
|
./networking/[eth2_network, eth2_discovery],
|
||||||
./rpc/eth2_json_rpc_serialization,
|
|
||||||
./beacon_node_types,
|
./beacon_node_types,
|
||||||
./nimbus_binary_common,
|
./nimbus_binary_common,
|
||||||
./ssz/merkleization,
|
./ssz/merkleization,
|
||||||
@ -275,6 +276,8 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
|
|||||||
addTimer(nextSlotStart) do (p: pointer):
|
addTimer(nextSlotStart) do (p: pointer):
|
||||||
asyncCheck vc.onSlotStart(slot, nextSlot)
|
asyncCheck vc.onSlotStart(slot, nextSlot)
|
||||||
|
|
||||||
|
{.pop.} # TODO moduletests exceptions
|
||||||
|
|
||||||
programMain:
|
programMain:
|
||||||
let config = makeBannerAndConfig("Nimbus validator client " & fullVersionStr, ValidatorClientConf)
|
let config = makeBannerAndConfig("Nimbus validator client " & fullVersionStr, ValidatorClientConf)
|
||||||
|
|
||||||
|
@ -1,13 +1,16 @@
|
|||||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[parseutils, sequtils, strutils, deques, sets],
|
std/[parseutils, sequtils, strutils, deques, sets],
|
||||||
stew/results,
|
stew/results,
|
||||||
json_rpc/[rpcserver, jsonmarshal],
|
json_rpc/servers/httpserver,
|
||||||
chronicles,
|
chronicles,
|
||||||
nimcrypto/utils as ncrutils,
|
nimcrypto/utils as ncrutils,
|
||||||
../beacon_node_common,
|
../beacon_node_common,
|
||||||
@ -35,7 +38,7 @@ type
|
|||||||
template unimplemented() =
|
template unimplemented() =
|
||||||
raise (ref CatchableError)(msg: "Unimplemented")
|
raise (ref CatchableError)(msg: "Unimplemented")
|
||||||
|
|
||||||
proc parsePubkey(str: string): ValidatorPubKey =
|
proc parsePubkey(str: string): ValidatorPubKey {.raises: [Defect, ValueError].} =
|
||||||
const expectedLen = RawPubKeySize * 2 + 2
|
const expectedLen = RawPubKeySize * 2 + 2
|
||||||
if str.len != expectedLen: # +2 because of the `0x` prefix
|
if str.len != expectedLen: # +2 because of the `0x` prefix
|
||||||
raise newException(ValueError,
|
raise newException(ValueError,
|
||||||
@ -43,7 +46,7 @@ proc parsePubkey(str: string): ValidatorPubKey =
|
|||||||
$str.len & " provided")
|
$str.len & " provided")
|
||||||
let pubkeyRes = fromHex(ValidatorPubKey, str)
|
let pubkeyRes = fromHex(ValidatorPubKey, str)
|
||||||
if pubkeyRes.isErr:
|
if pubkeyRes.isErr:
|
||||||
raise newException(CatchableError, "Not a valid public key")
|
raise newException(ValueError, "Not a valid public key")
|
||||||
return pubkeyRes[]
|
return pubkeyRes[]
|
||||||
|
|
||||||
proc createIdQuery(ids: openArray[string]): Result[ValidatorQuery, string] =
|
proc createIdQuery(ids: openArray[string]): Result[ValidatorQuery, string] =
|
||||||
@ -71,8 +74,11 @@ proc createIdQuery(ids: openArray[string]): Result[ValidatorQuery, string] =
|
|||||||
res.keyset.incl(pubkeyRes.get())
|
res.keyset.incl(pubkeyRes.get())
|
||||||
else:
|
else:
|
||||||
var tmp: uint64
|
var tmp: uint64
|
||||||
if parseBiggestUInt(item, tmp) != len(item):
|
try:
|
||||||
return err("Incorrect index value")
|
if parseBiggestUInt(item, tmp) != len(item):
|
||||||
|
return err("Incorrect index value")
|
||||||
|
except ValueError:
|
||||||
|
return err("Cannot parse index value: " & item)
|
||||||
res.ids.add(tmp)
|
res.ids.add(tmp)
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
@ -151,7 +157,7 @@ proc getStatus(validator: Validator,
|
|||||||
else:
|
else:
|
||||||
err("Invalid validator status")
|
err("Invalid validator status")
|
||||||
|
|
||||||
proc getBlockDataFromBlockId(node: BeaconNode, blockId: string): BlockData =
|
proc getBlockDataFromBlockId(node: BeaconNode, blockId: string): BlockData {.raises: [Defect, CatchableError].} =
|
||||||
result = case blockId:
|
result = case blockId:
|
||||||
of "head":
|
of "head":
|
||||||
node.chainDag.get(node.chainDag.head)
|
node.chainDag.get(node.chainDag.head)
|
||||||
@ -172,7 +178,8 @@ proc getBlockDataFromBlockId(node: BeaconNode, blockId: string): BlockData =
|
|||||||
raise newException(CatchableError, "Block not found")
|
raise newException(CatchableError, "Block not found")
|
||||||
node.chainDag.get(blockSlot.blck)
|
node.chainDag.get(blockSlot.blck)
|
||||||
|
|
||||||
proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||||
|
raises: [Exception].} = # TODO fix json-rpc
|
||||||
rpcServer.rpc("get_v1_beacon_genesis") do () -> BeaconGenesisTuple:
|
rpcServer.rpc("get_v1_beacon_genesis") do () -> BeaconGenesisTuple:
|
||||||
return (
|
return (
|
||||||
genesis_time: node.chainDag.headState.data.data.genesis_time,
|
genesis_time: node.chainDag.headState.data.data.genesis_time,
|
||||||
|
@ -1,12 +1,15 @@
|
|||||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
stew/endians2,
|
stew/endians2,
|
||||||
json_rpc/[rpcserver, jsonmarshal],
|
json_rpc/servers/httpserver,
|
||||||
chronicles,
|
chronicles,
|
||||||
nimcrypto/utils as ncrutils,
|
nimcrypto/utils as ncrutils,
|
||||||
../beacon_node_common,
|
../beacon_node_common,
|
||||||
@ -27,7 +30,8 @@ func getDepositAddress(node: BeaconNode): string =
|
|||||||
else:
|
else:
|
||||||
$node.eth1Monitor.depositContractAddress
|
$node.eth1Monitor.depositContractAddress
|
||||||
|
|
||||||
proc installConfigApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
proc installConfigApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||||
|
raises: [Exception].} = # TODO fix json-rpc
|
||||||
rpcServer.rpc("get_v1_config_fork_schedule") do () -> seq[Fork]:
|
rpcServer.rpc("get_v1_config_fork_schedule") do () -> seq[Fork]:
|
||||||
return @[node.chainDag.headState.data.data.fork]
|
return @[node.chainDag.headState.data.data.fork]
|
||||||
|
|
||||||
|
@ -1,6 +1,15 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/sequtils,
|
std/sequtils,
|
||||||
json_rpc/[rpcserver, jsonmarshal],
|
json_rpc/servers/httpserver,
|
||||||
chronicles,
|
chronicles,
|
||||||
../version, ../beacon_node_common,
|
../version, ../beacon_node_common,
|
||||||
../networking/[eth2_network, peer_pool],
|
../networking/[eth2_network, peer_pool],
|
||||||
@ -12,7 +21,8 @@ logScope: topics = "debugapi"
|
|||||||
type
|
type
|
||||||
RpcServer = RpcHttpServer
|
RpcServer = RpcHttpServer
|
||||||
|
|
||||||
proc installDebugApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
proc installDebugApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||||
|
raises: [Exception].} = # TODO fix json-rpc
|
||||||
rpcServer.rpc("get_v1_debug_beacon_states_stateId") do (
|
rpcServer.rpc("get_v1_debug_beacon_states_stateId") do (
|
||||||
stateId: string) -> BeaconState:
|
stateId: string) -> BeaconState:
|
||||||
withStateForStateId(stateId):
|
withStateForStateId(stateId):
|
||||||
|
@ -1,6 +1,28 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
# The serializations in this file are approximations of
|
||||||
|
# https://ethereum.github.io/eth2.0-APIs/#/ but where written before the standard
|
||||||
|
# had materialized - they've now made it out to releases which means the easiest
|
||||||
|
# thing to do is to maintain them as-is, even if there are mismatches. In
|
||||||
|
# particular, numbers are serialized as strings in the eth2 API - here, they
|
||||||
|
# use numbers instead.
|
||||||
|
#
|
||||||
|
# Using numbers creates problems - uint64 which often appears in eth2 can't
|
||||||
|
# portably be represented since many json parsers balk at anything >2^53 and
|
||||||
|
# start losing precision. The other issue is the json parser in nim - it can't
|
||||||
|
# handle numbers >2^63, either crashing or giving wrong results:
|
||||||
|
# https://github.com/status-im/nimbus-eth2/issues/2430
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[tables, json, typetraits],
|
std/[tables, typetraits],
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
@ -10,11 +32,13 @@ import
|
|||||||
../ssz/types,
|
../ssz/types,
|
||||||
../spec/[datatypes, crypto, digest]
|
../spec/[datatypes, crypto, digest]
|
||||||
|
|
||||||
|
export jsonmarshal, datatypes, crypto, digest
|
||||||
|
|
||||||
proc toJsonHex(data: openArray[byte]): string =
|
proc toJsonHex(data: openArray[byte]): string =
|
||||||
# Per the eth2 API spec, hex arrays are printed with leading 0x
|
# Per the eth2 API spec, hex arrays are printed with leading 0x
|
||||||
"0x" & toHex(data)
|
"0x" & toHex(data)
|
||||||
|
|
||||||
proc fromJson*(n: JsonNode, argName: string, result: var ValidatorPubKey) =
|
proc fromJson*(n: JsonNode, argName: string, result: var ValidatorPubKey) {.raises: [Defect, ValueError].} =
|
||||||
n.kind.expect(JString, argName)
|
n.kind.expect(JString, argName)
|
||||||
var tmp = ValidatorPubKey.fromHex(n.getStr()).tryGet()
|
var tmp = ValidatorPubKey.fromHex(n.getStr()).tryGet()
|
||||||
if not tmp.loadWithCache().isSome():
|
if not tmp.loadWithCache().isSome():
|
||||||
@ -24,33 +48,33 @@ proc fromJson*(n: JsonNode, argName: string, result: var ValidatorPubKey) =
|
|||||||
proc `%`*(pubkey: ValidatorPubKey): JsonNode =
|
proc `%`*(pubkey: ValidatorPubKey): JsonNode =
|
||||||
newJString(toJsonHex(toRaw(pubkey)))
|
newJString(toJsonHex(toRaw(pubkey)))
|
||||||
|
|
||||||
proc fromJson*(n: JsonNode, argName: string, result: var List) =
|
proc fromJson*(n: JsonNode, argName: string, result: var List) {.raises: [Defect, ValueError].} =
|
||||||
fromJson(n, argName, asSeq result)
|
fromJson(n, argName, asSeq result)
|
||||||
|
|
||||||
proc `%`*(list: List): JsonNode = %(asSeq(list))
|
proc `%`*(list: List): JsonNode = %(asSeq(list))
|
||||||
|
|
||||||
proc fromJson*(n: JsonNode, argName: string, result: var BitList) =
|
proc fromJson*(n: JsonNode, argName: string, result: var BitList) {.raises: [Defect, ValueError].} =
|
||||||
n.kind.expect(JString, argName)
|
n.kind.expect(JString, argName)
|
||||||
result = type(result)(hexToSeqByte(n.getStr()))
|
result = type(result)(hexToSeqByte(n.getStr()))
|
||||||
|
|
||||||
proc `%`*(bitlist: BitList): JsonNode =
|
proc `%`*(bitlist: BitList): JsonNode =
|
||||||
newJString(toJsonHex(seq[byte](BitSeq(bitlist))))
|
newJString(toJsonHex(seq[byte](BitSeq(bitlist))))
|
||||||
|
|
||||||
proc fromJson*(n: JsonNode, argName: string, result: var ValidatorSig) =
|
proc fromJson*(n: JsonNode, argName: string, result: var ValidatorSig) {.raises: [Defect, ValueError].} =
|
||||||
n.kind.expect(JString, argName)
|
n.kind.expect(JString, argName)
|
||||||
result = ValidatorSig.fromHex(n.getStr()).tryGet()
|
result = ValidatorSig.fromHex(n.getStr()).tryGet()
|
||||||
|
|
||||||
proc `%`*(value: ValidatorSig): JsonNode =
|
proc `%`*(value: ValidatorSig): JsonNode =
|
||||||
newJString(toJsonHex(toRaw(value)))
|
newJString(toJsonHex(toRaw(value)))
|
||||||
|
|
||||||
proc fromJson*(n: JsonNode, argName: string, result: var TrustedSig) =
|
proc fromJson*(n: JsonNode, argName: string, result: var TrustedSig) {.raises: [Defect, ValueError].} =
|
||||||
n.kind.expect(JString, argName)
|
n.kind.expect(JString, argName)
|
||||||
hexToByteArray(n.getStr(), result.data)
|
hexToByteArray(n.getStr(), result.data)
|
||||||
|
|
||||||
proc `%`*(value: TrustedSig): JsonNode =
|
proc `%`*(value: TrustedSig): JsonNode =
|
||||||
newJString(toJsonHex(toRaw(value)))
|
newJString(toJsonHex(toRaw(value)))
|
||||||
|
|
||||||
proc fromJson*(n: JsonNode, argName: string, result: var Version) =
|
proc fromJson*(n: JsonNode, argName: string, result: var Version) {.raises: [Defect, ValueError].} =
|
||||||
n.kind.expect(JString, argName)
|
n.kind.expect(JString, argName)
|
||||||
hexToByteArray(n.getStr(), array[4, byte](result))
|
hexToByteArray(n.getStr(), array[4, byte](result))
|
||||||
|
|
||||||
@ -58,7 +82,7 @@ proc `%`*(value: Version): JsonNode =
|
|||||||
newJString(toJsonHex(distinctBase(value)))
|
newJString(toJsonHex(distinctBase(value)))
|
||||||
|
|
||||||
template genFromJsonForIntType(T: untyped) =
|
template genFromJsonForIntType(T: untyped) =
|
||||||
proc fromJson*(n: JsonNode, argName: string, result: var T) =
|
proc fromJson*(n: JsonNode, argName: string, result: var T) {.raises: [Defect, ValueError].} =
|
||||||
n.kind.expect(JInt, argName)
|
n.kind.expect(JInt, argName)
|
||||||
let asInt = n.getBiggestInt()
|
let asInt = n.getBiggestInt()
|
||||||
when T is Epoch:
|
when T is Epoch:
|
||||||
@ -83,10 +107,21 @@ genFromJsonForIntType(Slot)
|
|||||||
genFromJsonForIntType(CommitteeIndex)
|
genFromJsonForIntType(CommitteeIndex)
|
||||||
genFromJsonForIntType(ValidatorIndex)
|
genFromJsonForIntType(ValidatorIndex)
|
||||||
|
|
||||||
|
proc `%`*(value: Epoch): JsonNode =
|
||||||
|
# In nim <= 1.2.6, `uint64` was silently cast to int64 resulting in
|
||||||
|
# FAR_FUTURE_EPOCH showing as -1 - this is a hack to maintain that behaviour
|
||||||
|
# in a world where a Defect or an actual correct value is used - the eth2
|
||||||
|
# REST api instead prints all epochs and similar large numbers as strings!
|
||||||
|
# See also https://github.com/status-im/nimbus-eth2/issues/2430
|
||||||
|
newJInt(cast[int64](value))
|
||||||
|
|
||||||
|
proc `%`*(value: Slot): JsonNode =
|
||||||
|
newJInt(cast[int64](value))
|
||||||
|
|
||||||
proc `%`*(value: GraffitiBytes): JsonNode =
|
proc `%`*(value: GraffitiBytes): JsonNode =
|
||||||
newJString(toJsonHex(distinctBase(value)))
|
newJString(toJsonHex(distinctBase(value)))
|
||||||
|
|
||||||
proc fromJson*(n: JsonNode, argName: string, value: var GraffitiBytes) =
|
proc fromJson*(n: JsonNode, argName: string, value: var GraffitiBytes) {.raises: [Defect, ValueError].} =
|
||||||
n.kind.expect(JString, argName)
|
n.kind.expect(JString, argName)
|
||||||
value = GraffitiBytes.init n.getStr()
|
value = GraffitiBytes.init n.getStr()
|
||||||
|
|
||||||
@ -99,13 +134,13 @@ proc `%`*(value: ValidatorIndex): JsonNode =
|
|||||||
proc `%`*(value: Eth2Digest): JsonNode =
|
proc `%`*(value: Eth2Digest): JsonNode =
|
||||||
newJString(toJsonHex(value.data))
|
newJString(toJsonHex(value.data))
|
||||||
|
|
||||||
proc fromJson*(n: JsonNode, argName: string, result: var Eth2Digest) =
|
proc fromJson*(n: JsonNode, argName: string, result: var Eth2Digest) {.raises: [Defect, ValueError].} =
|
||||||
n.kind.expect(JString, argName)
|
n.kind.expect(JString, argName)
|
||||||
hexToByteArray(n.getStr(), result.data)
|
hexToByteArray(n.getStr(), result.data)
|
||||||
|
|
||||||
proc `%`*(value: BitSeq): JsonNode =
|
proc `%`*(value: BitSeq): JsonNode =
|
||||||
newJString(toJsonHex(value.bytes))
|
newJString(toJsonHex(value.bytes))
|
||||||
|
|
||||||
proc fromJson*(n: JsonNode, argName: string, result: var BitSeq) =
|
proc fromJson*(n: JsonNode, argName: string, result: var BitSeq) {.raises: [Defect, ValueError].} =
|
||||||
n.kind.expect(JString, argName)
|
n.kind.expect(JString, argName)
|
||||||
result = BitSeq(hexToSeqByte(n.getStr()))
|
result = BitSeq(hexToSeqByte(n.getStr()))
|
||||||
|
@ -1,11 +1,14 @@
|
|||||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
json_rpc/[rpcserver, jsonmarshal],
|
json_rpc/servers/httpserver,
|
||||||
chronicles,
|
chronicles,
|
||||||
../beacon_node_common
|
../beacon_node_common
|
||||||
|
|
||||||
@ -17,6 +20,7 @@ type
|
|||||||
template unimplemented() =
|
template unimplemented() =
|
||||||
raise (ref CatchableError)(msg: "Unimplemented")
|
raise (ref CatchableError)(msg: "Unimplemented")
|
||||||
|
|
||||||
proc installEventApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
proc installEventApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||||
|
raises: [Exception].} = # TODO fix json-rpc
|
||||||
rpcServer.rpc("get_v1_events") do () -> JsonNode:
|
rpcServer.rpc("get_v1_events") do () -> JsonNode:
|
||||||
unimplemented()
|
unimplemented()
|
||||||
|
@ -1,24 +1,25 @@
|
|||||||
|
# beacon_chain
|
||||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[deques, sequtils, sets],
|
std/[deques, sequtils, sets],
|
||||||
chronos,
|
chronos,
|
||||||
stew/shims/macros,
|
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
json_rpc/[rpcserver, jsonmarshal],
|
json_rpc/servers/httpserver,
|
||||||
|
libp2p/protocols/pubsub/pubsubpeer,
|
||||||
|
|
||||||
rpc_utils,
|
"."/[rpc_utils, eth2_json_rpc_serialization],
|
||||||
../beacon_node_common, ../nimbus_binary_common,
|
".."/[
|
||||||
../networking/eth2_network,
|
beacon_node_common, nimbus_binary_common, networking/eth2_network,
|
||||||
../eth1/eth1_monitor,
|
eth1/eth1_monitor, validators/validator_duties],
|
||||||
../validators/validator_duties,
|
../spec/[digest, datatypes, presets]
|
||||||
../spec/[digest, datatypes, presets],
|
|
||||||
|
|
||||||
libp2p/protocols/pubsub/pubsubpeer
|
|
||||||
|
|
||||||
logScope: topics = "nimbusapi"
|
logScope: topics = "nimbusapi"
|
||||||
|
|
||||||
@ -32,7 +33,8 @@ type
|
|||||||
line*: int
|
line*: int
|
||||||
state*: string
|
state*: string
|
||||||
|
|
||||||
proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||||
|
raises: [Exception].} = # TODO fix json-rpc
|
||||||
## Install non-standard api handlers - some of these are used by 3rd-parties
|
## Install non-standard api handlers - some of these are used by 3rd-parties
|
||||||
## such as eth2stats, pending a full REST api
|
## such as eth2stats, pending a full REST api
|
||||||
rpcServer.rpc("getBeaconHead") do () -> Slot:
|
rpcServer.rpc("getBeaconHead") do () -> Slot:
|
||||||
@ -85,6 +87,10 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
|||||||
updateLogLevel(level)
|
updateLogLevel(level)
|
||||||
return true
|
return true
|
||||||
|
|
||||||
|
rpcServer.rpc("setGraffiti") do (graffiti: string) -> bool:
|
||||||
|
node.graffitiBytes = GraffitiBytes.init(graffiti)
|
||||||
|
return true
|
||||||
|
|
||||||
rpcServer.rpc("getEth1Chain") do () -> seq[Eth1Block]:
|
rpcServer.rpc("getEth1Chain") do () -> seq[Eth1Block]:
|
||||||
result = if node.eth1Monitor != nil:
|
result = if node.eth1Monitor != nil:
|
||||||
mapIt(node.eth1Monitor.blocks, it)
|
mapIt(node.eth1Monitor.blocks, it)
|
||||||
@ -152,7 +158,6 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
|||||||
for peer in v:
|
for peer in v:
|
||||||
peers.add(peer.toNode(backOff.getOrDefault(peer.peerId)))
|
peers.add(peer.toNode(backOff.getOrDefault(peer.peerId)))
|
||||||
|
|
||||||
|
|
||||||
gossipsub.add(topic, peers)
|
gossipsub.add(topic, peers)
|
||||||
|
|
||||||
res.add("gossipsub", gossipsub)
|
res.add("gossipsub", gossipsub)
|
||||||
|
@ -1,9 +1,19 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import std/options,
|
import std/options,
|
||||||
chronicles,
|
chronicles,
|
||||||
json_rpc/[rpcserver, jsonmarshal],
|
json_rpc/servers/httpserver,
|
||||||
eth/p2p/discoveryv5/enr,
|
eth/p2p/discoveryv5/enr,
|
||||||
libp2p/[multiaddress, multicodec],
|
libp2p/[multiaddress, multicodec],
|
||||||
nimcrypto/utils as ncrutils,
|
nimcrypto/utils as ncrutils,
|
||||||
|
./eth2_json_rpc_serialization,
|
||||||
../beacon_node_common, ../version,
|
../beacon_node_common, ../version,
|
||||||
../networking/[eth2_network, peer_pool],
|
../networking/[eth2_network, peer_pool],
|
||||||
../sync/sync_manager,
|
../sync/sync_manager,
|
||||||
@ -15,9 +25,6 @@ logScope: topics = "nodeapi"
|
|||||||
type
|
type
|
||||||
RpcServer = RpcHttpServer
|
RpcServer = RpcHttpServer
|
||||||
|
|
||||||
template unimplemented() =
|
|
||||||
raise (ref CatchableError)(msg: "Unimplemented")
|
|
||||||
|
|
||||||
proc validateState(state: Option[seq[string]]): Option[set[ConnectionState]] =
|
proc validateState(state: Option[seq[string]]): Option[set[ConnectionState]] =
|
||||||
var res: set[ConnectionState]
|
var res: set[ConnectionState]
|
||||||
if state.isSome():
|
if state.isSome():
|
||||||
@ -142,7 +149,8 @@ proc getP2PAddresses(node: BeaconNode): Option[seq[string]] =
|
|||||||
addresses.add($(resa.get()))
|
addresses.add($(resa.get()))
|
||||||
return some(addresses)
|
return some(addresses)
|
||||||
|
|
||||||
proc installNodeApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
proc installNodeApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||||
|
raises: [Exception].} = # TODO fix json-rpc
|
||||||
rpcServer.rpc("get_v1_node_identity") do () -> NodeIdentityTuple:
|
rpcServer.rpc("get_v1_node_identity") do () -> NodeIdentityTuple:
|
||||||
let discoveryAddresses =
|
let discoveryAddresses =
|
||||||
block:
|
block:
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[strutils, parseutils],
|
std/[strutils, parseutils],
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
@ -26,16 +35,16 @@ template withStateForStateId*(stateId: string, body: untyped): untyped =
|
|||||||
proc toBlockSlot*(blckRef: BlockRef): BlockSlot =
|
proc toBlockSlot*(blckRef: BlockRef): BlockSlot =
|
||||||
blckRef.atSlot(blckRef.slot)
|
blckRef.atSlot(blckRef.slot)
|
||||||
|
|
||||||
proc parseRoot*(str: string): Eth2Digest =
|
proc parseRoot*(str: string): Eth2Digest {.raises: [Defect, ValueError].} =
|
||||||
return Eth2Digest(data: hexToByteArray[32](str))
|
Eth2Digest(data: hexToByteArray[32](str))
|
||||||
|
|
||||||
func checkEpochToSlotOverflow*(epoch: Epoch) =
|
func checkEpochToSlotOverflow*(epoch: Epoch) {.raises: [Defect, ValueError].} =
|
||||||
const maxEpoch = compute_epoch_at_slot(not 0'u64)
|
const maxEpoch = compute_epoch_at_slot(not 0'u64)
|
||||||
if epoch >= maxEpoch:
|
if epoch >= maxEpoch:
|
||||||
raise newException(
|
raise newException(
|
||||||
ValueError, "Requesting epoch for which slot would overflow")
|
ValueError, "Requesting epoch for which slot would overflow")
|
||||||
|
|
||||||
proc doChecksAndGetCurrentHead*(node: BeaconNode, slot: Slot): BlockRef =
|
proc doChecksAndGetCurrentHead*(node: BeaconNode, slot: Slot): BlockRef {.raises: [Defect, CatchableError].} =
|
||||||
result = node.chainDag.head
|
result = node.chainDag.head
|
||||||
if not node.isSynced(result):
|
if not node.isSynced(result):
|
||||||
raise newException(CatchableError, "Cannot fulfill request until node is synced")
|
raise newException(CatchableError, "Cannot fulfill request until node is synced")
|
||||||
@ -43,36 +52,36 @@ proc doChecksAndGetCurrentHead*(node: BeaconNode, slot: Slot): BlockRef =
|
|||||||
if result.slot + uint64(2 * SLOTS_PER_EPOCH) < slot:
|
if result.slot + uint64(2 * SLOTS_PER_EPOCH) < slot:
|
||||||
raise newException(CatchableError, "Requesting way ahead of the current head")
|
raise newException(CatchableError, "Requesting way ahead of the current head")
|
||||||
|
|
||||||
proc doChecksAndGetCurrentHead*(node: BeaconNode, epoch: Epoch): BlockRef =
|
proc doChecksAndGetCurrentHead*(node: BeaconNode, epoch: Epoch): BlockRef {.raises: [Defect, CatchableError].} =
|
||||||
checkEpochToSlotOverflow(epoch)
|
checkEpochToSlotOverflow(epoch)
|
||||||
node.doChecksAndGetCurrentHead(epoch.compute_start_slot_at_epoch)
|
node.doChecksAndGetCurrentHead(epoch.compute_start_slot_at_epoch)
|
||||||
|
|
||||||
proc getBlockSlotFromString*(node: BeaconNode, slot: string): BlockSlot =
|
proc getBlockSlotFromString*(node: BeaconNode, slot: string): BlockSlot {.raises: [Defect, CatchableError].} =
|
||||||
if slot.len == 0:
|
if slot.len == 0:
|
||||||
raise newException(ValueError, "Empty slot number not allowed")
|
raise newException(ValueError, "Empty slot number not allowed")
|
||||||
var parsed: BiggestUInt
|
var parsed: BiggestUInt
|
||||||
if parseBiggestUInt(slot, parsed) != slot.len:
|
if parseBiggestUInt(slot, parsed) != slot.len:
|
||||||
raise newException(ValueError, "Not a valid slot number")
|
raise newException(ValueError, "Not a valid slot number")
|
||||||
let head = node.doChecksAndGetCurrentHead(parsed.Slot)
|
let head = node.doChecksAndGetCurrentHead(parsed.Slot)
|
||||||
return head.atSlot(parsed.Slot)
|
head.atSlot(parsed.Slot)
|
||||||
|
|
||||||
proc stateIdToBlockSlot*(node: BeaconNode, stateId: string): BlockSlot =
|
proc stateIdToBlockSlot*(node: BeaconNode, stateId: string): BlockSlot {.raises: [Defect, CatchableError].} =
|
||||||
result = case stateId:
|
case stateId:
|
||||||
of "head":
|
of "head":
|
||||||
node.chainDag.head.toBlockSlot()
|
node.chainDag.head.toBlockSlot()
|
||||||
of "genesis":
|
of "genesis":
|
||||||
node.chainDag.getGenesisBlockSlot()
|
node.chainDag.getGenesisBlockSlot()
|
||||||
of "finalized":
|
of "finalized":
|
||||||
node.chainDag.finalizedHead
|
node.chainDag.finalizedHead
|
||||||
of "justified":
|
of "justified":
|
||||||
node.chainDag.head.atEpochStart(
|
node.chainDag.head.atEpochStart(
|
||||||
node.chainDag.headState.data.data.current_justified_checkpoint.epoch)
|
node.chainDag.headState.data.data.current_justified_checkpoint.epoch)
|
||||||
|
else:
|
||||||
|
if stateId.startsWith("0x"):
|
||||||
|
let blckRoot = parseRoot(stateId)
|
||||||
|
let blckRef = node.chainDag.getRef(blckRoot)
|
||||||
|
if blckRef.isNil:
|
||||||
|
raise newException(CatchableError, "Block not found")
|
||||||
|
blckRef.toBlockSlot()
|
||||||
else:
|
else:
|
||||||
if stateId.startsWith("0x"):
|
node.getBlockSlotFromString(stateId)
|
||||||
let blckRoot = parseRoot(stateId)
|
|
||||||
let blckRef = node.chainDag.getRef(blckRoot)
|
|
||||||
if blckRef.isNil:
|
|
||||||
raise newException(CatchableError, "Block not found")
|
|
||||||
blckRef.toBlockSlot()
|
|
||||||
else:
|
|
||||||
node.getBlockSlotFromString(stateId)
|
|
||||||
|
@ -5,13 +5,15 @@
|
|||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[tables],
|
std/[tables],
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
stew/[objects],
|
stew/[objects],
|
||||||
json_rpc/[rpcserver, jsonmarshal],
|
json_rpc/servers/httpserver,
|
||||||
chronicles,
|
chronicles,
|
||||||
|
|
||||||
# Local modules
|
# Local modules
|
||||||
@ -29,7 +31,8 @@ logScope: topics = "valapi"
|
|||||||
type
|
type
|
||||||
RpcServer* = RpcHttpServer
|
RpcServer* = RpcHttpServer
|
||||||
|
|
||||||
proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||||
|
raises: [Exception].} = # TODO fix json-rpc
|
||||||
rpcServer.rpc("get_v1_validator_block") do (
|
rpcServer.rpc("get_v1_validator_block") do (
|
||||||
slot: Slot, graffiti: GraffitiBytes, randao_reveal: ValidatorSig) -> BeaconBlock:
|
slot: Slot, graffiti: GraffitiBytes, randao_reveal: ValidatorSig) -> BeaconBlock:
|
||||||
debug "get_v1_validator_block", slot = slot
|
debug "get_v1_validator_block", slot = slot
|
||||||
|
116
beacon_chain/spec/datatypes/altair.nim
Normal file
116
beacon_chain/spec/datatypes/altair.nim
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
# This file contains data types that are part of the spec and thus subject to
|
||||||
|
# serialization and spec updates.
|
||||||
|
#
|
||||||
|
# The spec folder in general contains code that has been hoisted from the
|
||||||
|
# specification and that follows the spec as closely as possible, so as to make
|
||||||
|
# it easy to keep up-to-date.
|
||||||
|
#
|
||||||
|
# These datatypes are used as specifications for serialization - thus should not
|
||||||
|
# be altered outside of what the spec says. Likewise, they should not be made
|
||||||
|
# `ref` - this can be achieved by wrapping them in higher-level
|
||||||
|
# types / composition
|
||||||
|
|
||||||
|
# TODO Careful, not nil analysis is broken / incomplete and the semantics will
|
||||||
|
# likely change in future versions of the language:
|
||||||
|
# https://github.com/nim-lang/RFCs/issues/250
|
||||||
|
{.experimental: "notnil".}
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
import
|
||||||
|
std/macros,
|
||||||
|
stew/assign2,
|
||||||
|
json_serialization/types as jsonTypes,
|
||||||
|
../../ssz/types as sszTypes, ../crypto, ../digest, ../presets
|
||||||
|
|
||||||
|
import ./base
|
||||||
|
export base
|
||||||
|
|
||||||
|
const
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/specs/altair/beacon-chain.md#incentivization-weights
|
||||||
|
TIMELY_HEAD_WEIGHT* = 12
|
||||||
|
TIMELY_SOURCE_WEIGHT* = 12
|
||||||
|
TIMELY_TARGET_WEIGHT* = 24
|
||||||
|
SYNC_REWARD_WEIGHT* = 8
|
||||||
|
WEIGHT_DENOMINATOR* = 64
|
||||||
|
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/specs/altair/validator.md#misc
|
||||||
|
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE* = 4
|
||||||
|
SYNC_COMMITTEE_SUBNET_COUNT* = 8
|
||||||
|
|
||||||
|
type
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/specs/altair/beacon-chain.md#syncaggregate
|
||||||
|
SyncAggregate* = object
|
||||||
|
sync_committee_bits*: BitArray[SYNC_COMMITTEE_SIZE]
|
||||||
|
sync_committee_signature*: ValidatorSig
|
||||||
|
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/specs/altair/beacon-chain.md#synccommittee
|
||||||
|
SyncCommittee* = object
|
||||||
|
pubkeys*: HashArray[Limit SYNC_COMMITTEE_SIZE, ValidatorPubKey]
|
||||||
|
pubkey_aggregates*:
|
||||||
|
HashArray[
|
||||||
|
Limit SYNC_COMMITTEE_SIZE div SYNC_PUBKEYS_PER_AGGREGATE,
|
||||||
|
ValidatorPubKey]
|
||||||
|
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/specs/altair/validator.md#synccommitteesignature
|
||||||
|
SyncCommitteeSignature* = object
|
||||||
|
slot*: Slot ##\
|
||||||
|
## Slot to which this contribution pertains
|
||||||
|
|
||||||
|
beacon_block_root*: Eth2Digest ##\
|
||||||
|
## Block root for this signature
|
||||||
|
|
||||||
|
validator_index*: uint64 ##\
|
||||||
|
## Index of the validator that produced this signature
|
||||||
|
|
||||||
|
signature*: ValidatorSig ##\
|
||||||
|
## Signature by the validator over the block root of `slot`
|
||||||
|
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/specs/altair/validator.md#synccommitteecontribution
|
||||||
|
SyncCommitteeContribution* = object
|
||||||
|
slot*: Slot ##\
|
||||||
|
## Slot to which this contribution pertains
|
||||||
|
|
||||||
|
beacon_block_root*: Eth2Digest ##\
|
||||||
|
## Block root for this contribution
|
||||||
|
|
||||||
|
subcommittee_index*: uint64 ##\
|
||||||
|
## The subcommittee this contribution pertains to out of the broader sync
|
||||||
|
## committee
|
||||||
|
|
||||||
|
aggregation_bits*:
|
||||||
|
BitArray[SYNC_COMMITTEE_SIZE div SYNC_COMMITTEE_SUBNET_COUNT] ##\
|
||||||
|
## A bit is set if a signature from the validator at the corresponding
|
||||||
|
## index in the subcommittee is present in the aggregate `signature`.
|
||||||
|
|
||||||
|
signature*: ValidatorSig ##\
|
||||||
|
## Signature by the validator(s) over the block root of `slot`
|
||||||
|
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/specs/altair/validator.md#contributionandproof
|
||||||
|
ContributionAndProof* = object
|
||||||
|
aggregator_index*: uint64
|
||||||
|
contribution*: SyncCommitteeContribution
|
||||||
|
selection_proof*: ValidatorSig
|
||||||
|
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/specs/altair/validator.md#signedcontributionandproof
|
||||||
|
SignedContributionAndProof* = object
|
||||||
|
message*: ContributionAndProof
|
||||||
|
signature*: ValidatorSig
|
||||||
|
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/specs/altair/validator.md#synccommitteesigningdata
|
||||||
|
SyncCommitteeSigningData* = object
|
||||||
|
slot*: Slot
|
||||||
|
subcommittee_index*: uint64
|
||||||
|
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/specs/altair/beacon-chain.md#participation-flag-indices
|
||||||
|
ValidatorFlag* = enum
|
||||||
|
TIMELY_HEAD_FLAG = 0
|
||||||
|
TIMELY_SOURCE_FLAG = 1
|
||||||
|
TIMELY_TARGET_FLAG = 2
|
@ -25,13 +25,13 @@
|
|||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[macros, hashes, intsets, json, strutils, tables, typetraits],
|
std/[macros, hashes, intsets, strutils, tables, typetraits],
|
||||||
stew/[assign2, byteutils], chronicles,
|
stew/[assign2, byteutils], chronicles,
|
||||||
json_serialization/types as jsonTypes,
|
json_serialization,
|
||||||
../../version, ../../ssz/types as sszTypes, ../crypto, ../digest, ../presets
|
../../version, ../../ssz/types as sszTypes, ../crypto, ../digest, ../presets
|
||||||
|
|
||||||
export
|
export
|
||||||
sszTypes, presets
|
sszTypes, presets, json_serialization
|
||||||
|
|
||||||
# Presently, we're reusing the data types from the serialization (uint64) in the
|
# Presently, we're reusing the data types from the serialization (uint64) in the
|
||||||
# objects we pass around to the beacon chain logic, thus keeping the two
|
# objects we pass around to the beacon chain logic, thus keeping the two
|
||||||
@ -126,7 +126,7 @@ type
|
|||||||
DOMAIN_SYNC_COMMITTEE = 7
|
DOMAIN_SYNC_COMMITTEE = 7
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#custom-types
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#custom-types
|
||||||
Domain* = array[32, byte]
|
Eth2Domain* = array[32, byte]
|
||||||
|
|
||||||
# https://github.com/nim-lang/Nim/issues/574 and be consistent across
|
# https://github.com/nim-lang/Nim/issues/574 and be consistent across
|
||||||
# 32-bit and 64-bit word platforms.
|
# 32-bit and 64-bit word platforms.
|
||||||
@ -324,7 +324,7 @@ type
|
|||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#signingdata
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#signingdata
|
||||||
SigningData* = object
|
SigningData* = object
|
||||||
object_root*: Eth2Digest
|
object_root*: Eth2Digest
|
||||||
domain*: Domain
|
domain*: Eth2Domain
|
||||||
|
|
||||||
GraffitiBytes* = distinct array[MAX_GRAFFITI_SIZE, byte]
|
GraffitiBytes* = distinct array[MAX_GRAFFITI_SIZE, byte]
|
||||||
|
|
||||||
@ -659,14 +659,6 @@ func getImmutableValidatorData*(validator: Validator): ImmutableValidatorData =
|
|||||||
pubkey: validator.pubkey,
|
pubkey: validator.pubkey,
|
||||||
withdrawal_credentials: validator.withdrawal_credentials)
|
withdrawal_credentials: validator.withdrawal_credentials)
|
||||||
|
|
||||||
func getDepositMessage*(depositData: DepositData): DepositMessage =
|
|
||||||
result.pubkey = depositData.pubkey
|
|
||||||
result.amount = depositData.amount
|
|
||||||
result.withdrawal_credentials = depositData.withdrawal_credentials
|
|
||||||
|
|
||||||
func getDepositMessage*(deposit: Deposit): DepositMessage =
|
|
||||||
deposit.data.getDepositMessage
|
|
||||||
|
|
||||||
# TODO when https://github.com/nim-lang/Nim/issues/14440 lands in Status's Nim,
|
# TODO when https://github.com/nim-lang/Nim/issues/14440 lands in Status's Nim,
|
||||||
# switch proc {.noSideEffect.} to func.
|
# switch proc {.noSideEffect.} to func.
|
||||||
template ethTimeUnit(typ: type) {.dirty.} =
|
template ethTimeUnit(typ: type) {.dirty.} =
|
||||||
@ -702,7 +694,6 @@ template ethTimeUnit(typ: type) {.dirty.} =
|
|||||||
# Nim integration
|
# Nim integration
|
||||||
proc `$`*(x: typ): string {.borrow, noSideEffect.}
|
proc `$`*(x: typ): string {.borrow, noSideEffect.}
|
||||||
proc hash*(x: typ): Hash {.borrow, noSideEffect.}
|
proc hash*(x: typ): Hash {.borrow, noSideEffect.}
|
||||||
proc `%`*(x: typ): JsonNode {.borrow, noSideEffect.}
|
|
||||||
|
|
||||||
# Serialization
|
# Serialization
|
||||||
proc writeValue*(writer: var JsonWriter, value: typ)
|
proc writeValue*(writer: var JsonWriter, value: typ)
|
||||||
@ -729,6 +720,14 @@ proc readValue*(reader: var JsonReader, value: var CommitteeIndex)
|
|||||||
{.raises: [IOError, SerializationError, Defect].} =
|
{.raises: [IOError, SerializationError, Defect].} =
|
||||||
value = CommitteeIndex reader.readValue(distinctBase CommitteeIndex)
|
value = CommitteeIndex reader.readValue(distinctBase CommitteeIndex)
|
||||||
|
|
||||||
|
proc writeValue*(writer: var JsonWriter, value: HashList)
|
||||||
|
{.raises: [IOError, SerializationError, Defect].} =
|
||||||
|
writeValue(writer, value.data)
|
||||||
|
|
||||||
|
proc readValue*(reader: var JsonReader, value: var HashList)
|
||||||
|
{.raises: [IOError, SerializationError, Defect].} =
|
||||||
|
readValue(reader, value.data)
|
||||||
|
|
||||||
template writeValue*(writer: var JsonWriter, value: Version | ForkDigest) =
|
template writeValue*(writer: var JsonWriter, value: Version | ForkDigest) =
|
||||||
writeValue(writer, $value)
|
writeValue(writer, $value)
|
||||||
|
|
||||||
@ -991,10 +990,6 @@ chronicles.formatIt AttestationData: it.shortLog
|
|||||||
chronicles.formatIt Attestation: it.shortLog
|
chronicles.formatIt Attestation: it.shortLog
|
||||||
chronicles.formatIt Checkpoint: it.shortLog
|
chronicles.formatIt Checkpoint: it.shortLog
|
||||||
|
|
||||||
import json_serialization
|
|
||||||
export json_serialization
|
|
||||||
export writeValue, readValue
|
|
||||||
|
|
||||||
const
|
const
|
||||||
# http://facweb.cs.depaul.edu/sjost/it212/documents/ascii-pr.htm
|
# http://facweb.cs.depaul.edu/sjost/it212/documents/ascii-pr.htm
|
||||||
PrintableAsciiChars = {'!'..'~'}
|
PrintableAsciiChars = {'!'..'~'}
|
||||||
|
@ -24,17 +24,17 @@
|
|||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/hashes,
|
std/hashes,
|
||||||
#Status libraries
|
# Status libraries
|
||||||
chronicles,
|
chronicles,
|
||||||
nimcrypto/[sha2, hash],
|
nimcrypto/[sha2, hash],
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
eth/common/eth_types_json_serialization,
|
json_serialization,
|
||||||
blscurve
|
blscurve
|
||||||
|
|
||||||
export
|
export
|
||||||
# Exports from sha2 / hash are explicit to avoid exporting upper-case `$` and
|
# Exports from sha2 / hash are explicit to avoid exporting upper-case `$` and
|
||||||
# constant-time `==`
|
# constant-time `==`
|
||||||
sha2.update, hash.fromHex, readValue, writeValue
|
sha2.update, hash.fromHex, json_serialization
|
||||||
|
|
||||||
type
|
type
|
||||||
Eth2Digest* = MDigest[32 * 8] ## `hash32` from spec
|
Eth2Digest* = MDigest[32 * 8] ## `hash32` from spec
|
||||||
@ -110,3 +110,12 @@ func `==`*(a, b: Eth2Digest): bool =
|
|||||||
# nimcrypto uses a constant-time comparison for all MDigest types which for
|
# nimcrypto uses a constant-time comparison for all MDigest types which for
|
||||||
# Eth2Digest is unnecessary - the type should never hold a secret!
|
# Eth2Digest is unnecessary - the type should never hold a secret!
|
||||||
equalMem(unsafeAddr a.data[0], unsafeAddr b.data[0], sizeof(a.data))
|
equalMem(unsafeAddr a.data[0], unsafeAddr b.data[0], sizeof(a.data))
|
||||||
|
|
||||||
|
proc writeValue*(w: var JsonWriter, a: Eth2Digest) {.raises: [Defect, IOError, SerializationError].} =
|
||||||
|
w.writeValue $a
|
||||||
|
|
||||||
|
proc readValue*(r: var JsonReader, a: var Eth2Digest) {.raises: [Defect, IOError, SerializationError].} =
|
||||||
|
try:
|
||||||
|
a = fromHex(type(a), r.readValue(string))
|
||||||
|
except ValueError:
|
||||||
|
raiseUnexpectedValue(r, "Hex string expected")
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
import
|
import
|
||||||
options,
|
options,
|
||||||
../[datatypes, digest, crypto],
|
|
||||||
json_rpc/jsonmarshal,
|
|
||||||
callsigs_types
|
callsigs_types
|
||||||
|
|
||||||
proc get_v1_beacon_genesis(): BeaconGenesisTuple
|
proc get_v1_beacon_genesis(): BeaconGenesisTuple
|
||||||
|
@ -1,13 +1,11 @@
|
|||||||
import
|
import
|
||||||
std/[os, json],
|
std/os,
|
||||||
json_rpc/[rpcclient, jsonmarshal],
|
json_rpc/rpcclient,
|
||||||
../../rpc/eth2_json_rpc_serialization,
|
../../rpc/eth2_json_rpc_serialization,
|
||||||
../crypto, ../digest, ../datatypes,
|
./callsigs_types
|
||||||
callsigs_types
|
|
||||||
|
|
||||||
export
|
export
|
||||||
rpcclient,
|
rpcclient,
|
||||||
crypto, digest, datatypes,
|
|
||||||
callsigs_types,
|
callsigs_types,
|
||||||
eth2_json_rpc_serialization
|
eth2_json_rpc_serialization
|
||||||
|
|
||||||
|
@ -1,11 +1,7 @@
|
|||||||
import
|
import
|
||||||
# Standard library
|
".."/[datatypes, digest, crypto]
|
||||||
options,
|
|
||||||
# Local modules
|
export datatypes, digest, crypto
|
||||||
# TODO for some reason "../[datatypes, digest, crypto]" results in "Error: cannot open file"
|
|
||||||
../datatypes,
|
|
||||||
../digest,
|
|
||||||
../crypto
|
|
||||||
|
|
||||||
type
|
type
|
||||||
AttesterDuties* = tuple
|
AttesterDuties* = tuple
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
import
|
import
|
||||||
options,
|
|
||||||
../[datatypes, digest, crypto],
|
|
||||||
json_rpc/jsonmarshal,
|
|
||||||
callsigs_types
|
callsigs_types
|
||||||
|
|
||||||
|
export callsigs_types
|
||||||
|
|
||||||
proc get_v1_debug_beacon_states_stateId(stateId: string): BeaconState
|
proc get_v1_debug_beacon_states_stateId(stateId: string): BeaconState
|
||||||
proc get_v1_debug_beacon_heads(): seq[tuple[root: Eth2Digest, slot: Slot]]
|
proc get_v1_debug_beacon_heads(): seq[tuple[root: Eth2Digest, slot: Slot]]
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
import
|
import
|
||||||
options,
|
|
||||||
../[datatypes, digest, crypto],
|
|
||||||
json_rpc/jsonmarshal,
|
|
||||||
callsigs_types
|
callsigs_types
|
||||||
|
|
||||||
|
export callsigs_types
|
||||||
|
|
||||||
proc getBeaconHead(): Slot
|
proc getBeaconHead(): Slot
|
||||||
proc getChainHead(): JsonNode
|
proc getChainHead(): JsonNode
|
||||||
proc getSyncing(): bool
|
proc getSyncing(): bool
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
import
|
import
|
||||||
options,
|
options,
|
||||||
../[datatypes, digest, crypto],
|
|
||||||
json_rpc/jsonmarshal,
|
|
||||||
callsigs_types
|
callsigs_types
|
||||||
|
|
||||||
proc get_v1_node_identity(): NodeIdentityTuple
|
proc get_v1_node_identity(): NodeIdentityTuple
|
||||||
|
@ -1,12 +1,7 @@
|
|||||||
import
|
import
|
||||||
# Standard library
|
|
||||||
options,
|
options,
|
||||||
# Local modules
|
|
||||||
../[datatypes, digest, crypto],
|
|
||||||
json_rpc/jsonmarshal,
|
|
||||||
callsigs_types
|
callsigs_types
|
||||||
|
|
||||||
|
|
||||||
# calls that return a bool are actually without a return type in the main REST API
|
# calls that return a bool are actually without a return type in the main REST API
|
||||||
# spec but nim-json-rpc requires that all RPC calls have a return type.
|
# spec but nim-json-rpc requires that all RPC calls have a return type.
|
||||||
|
|
||||||
|
@ -17,11 +17,6 @@ import
|
|||||||
# Internal
|
# Internal
|
||||||
./datatypes, ./digest, ./crypto, ../ssz/merkleization
|
./datatypes, ./digest, ./crypto, ../ssz/merkleization
|
||||||
|
|
||||||
type
|
|
||||||
# This solves an ambiguous identifier Error in some contexts
|
|
||||||
# (other candidate is nativesockets.Domain)
|
|
||||||
Domain = datatypes.Domain
|
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#integer_squareroot
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#integer_squareroot
|
||||||
func integer_squareroot*(n: SomeInteger): SomeInteger =
|
func integer_squareroot*(n: SomeInteger): SomeInteger =
|
||||||
## Return the largest integer ``x`` such that ``x**2 <= n``.
|
## Return the largest integer ``x`` such that ``x**2 <= n``.
|
||||||
@ -129,7 +124,7 @@ func compute_fork_digest*(current_version: Version,
|
|||||||
func compute_domain*(
|
func compute_domain*(
|
||||||
domain_type: DomainType,
|
domain_type: DomainType,
|
||||||
fork_version: Version,
|
fork_version: Version,
|
||||||
genesis_validators_root: Eth2Digest = ZERO_HASH): Domain =
|
genesis_validators_root: Eth2Digest = ZERO_HASH): Eth2Domain =
|
||||||
## Return the domain for the ``domain_type`` and ``fork_version``.
|
## Return the domain for the ``domain_type`` and ``fork_version``.
|
||||||
let fork_data_root =
|
let fork_data_root =
|
||||||
compute_fork_data_root(fork_version, genesis_validators_root)
|
compute_fork_data_root(fork_version, genesis_validators_root)
|
||||||
@ -138,7 +133,10 @@ func compute_domain*(
|
|||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_domain
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_domain
|
||||||
func get_domain*(
|
func get_domain*(
|
||||||
fork: Fork, domain_type: DomainType, epoch: Epoch, genesis_validators_root: Eth2Digest): Domain =
|
fork: Fork,
|
||||||
|
domain_type: DomainType,
|
||||||
|
epoch: Epoch,
|
||||||
|
genesis_validators_root: Eth2Digest): Eth2Domain =
|
||||||
## Return the signature domain (fork version concatenated with domain type)
|
## Return the signature domain (fork version concatenated with domain type)
|
||||||
## of a message.
|
## of a message.
|
||||||
let fork_version =
|
let fork_version =
|
||||||
@ -149,13 +147,13 @@ func get_domain*(
|
|||||||
compute_domain(domain_type, fork_version, genesis_validators_root)
|
compute_domain(domain_type, fork_version, genesis_validators_root)
|
||||||
|
|
||||||
func get_domain*(
|
func get_domain*(
|
||||||
state: BeaconState, domain_type: DomainType, epoch: Epoch): Domain =
|
state: BeaconState, domain_type: DomainType, epoch: Epoch): Eth2Domain =
|
||||||
## Return the signature domain (fork version concatenated with domain type)
|
## Return the signature domain (fork version concatenated with domain type)
|
||||||
## of a message.
|
## of a message.
|
||||||
get_domain(state.fork, domain_type, epoch, state.genesis_validators_root)
|
get_domain(state.fork, domain_type, epoch, state.genesis_validators_root)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#compute_signing_root
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#compute_signing_root
|
||||||
func compute_signing_root*(ssz_object: auto, domain: Domain): Eth2Digest =
|
func compute_signing_root*(ssz_object: auto, domain: Eth2Domain): Eth2Digest =
|
||||||
## Return the signing root of an object by calculating the root of the
|
## Return the signing root of an object by calculating the root of the
|
||||||
## object-domain tree.
|
## object-domain tree.
|
||||||
let domain_wrapped_object = SigningData(
|
let domain_wrapped_object = SigningData(
|
||||||
|
@ -8,8 +8,8 @@
|
|||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[intsets, strformat],
|
std/strformat,
|
||||||
./datatypes, ./helpers, ./validator
|
./datatypes
|
||||||
|
|
||||||
const
|
const
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#topics-and-messages
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#topics-and-messages
|
||||||
@ -85,28 +85,3 @@ func getAttestationTopic*(forkDigest: ForkDigest, subnetIndex: uint64):
|
|||||||
&"/eth2/{$forkDigest}/beacon_attestation_{subnetIndex}/ssz"
|
&"/eth2/{$forkDigest}/beacon_attestation_{subnetIndex}/ssz"
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
raiseAssert e.msg
|
raiseAssert e.msg
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#validator-assignments
|
|
||||||
iterator get_committee_assignments*(
|
|
||||||
state: BeaconState, epoch: Epoch,
|
|
||||||
validator_indices: IntSet,
|
|
||||||
cache: var StateCache):
|
|
||||||
tuple[validatorIndices: IntSet,
|
|
||||||
committeeIndex: CommitteeIndex,
|
|
||||||
subnetIndex: uint8, slot: Slot] =
|
|
||||||
let
|
|
||||||
committees_per_slot = get_committee_count_per_slot(state, epoch, cache)
|
|
||||||
start_slot = compute_start_slot_at_epoch(epoch)
|
|
||||||
|
|
||||||
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
|
|
||||||
for index in 0'u64 ..< committees_per_slot:
|
|
||||||
let
|
|
||||||
idx = index.CommitteeIndex
|
|
||||||
includedIndices =
|
|
||||||
toIntSet(get_beacon_committee(state, slot, idx, cache)) *
|
|
||||||
validator_indices
|
|
||||||
if includedIndices.len > 0:
|
|
||||||
yield (
|
|
||||||
includedIndices, idx,
|
|
||||||
compute_subnet_for_attestation(committees_per_slot, slot, idx).uint8,
|
|
||||||
slot)
|
|
||||||
|
@ -152,13 +152,29 @@ const
|
|||||||
|
|
||||||
when const_preset == "mainnet":
|
when const_preset == "mainnet":
|
||||||
template defaultRuntimePreset*: auto = mainnetRuntimePreset
|
template defaultRuntimePreset*: auto = mainnetRuntimePreset
|
||||||
import ./presets/v1_0_1/mainnet
|
import
|
||||||
export mainnet
|
./presets/v1_0_1/mainnet as phase0Mainnet,
|
||||||
|
./presets/altair/mainnet as altairMainnet
|
||||||
|
|
||||||
|
# https://github.com/nim-lang/Nim/issues/17511 workaround
|
||||||
|
static:
|
||||||
|
discard phase0Mainnet.CONFIG_NAME
|
||||||
|
discard altairMainnet.CONFIG_NAME
|
||||||
|
|
||||||
|
export phase0Mainnet, altairMainnet
|
||||||
|
|
||||||
elif const_preset == "minimal":
|
elif const_preset == "minimal":
|
||||||
template defaultRuntimePreset*: auto = minimalRuntimePreset
|
template defaultRuntimePreset*: auto = minimalRuntimePreset
|
||||||
import ./presets/v1_0_1/minimal
|
import
|
||||||
export minimal
|
./presets/v1_0_1/minimal as phase0Minimal,
|
||||||
|
./presets/altair/minimal as altairMinimal
|
||||||
|
|
||||||
|
# https://github.com/nim-lang/Nim/issues/17511 workaround
|
||||||
|
static:
|
||||||
|
discard phase0Minimal.CONFIG_NAME
|
||||||
|
discard altairMinimal.CONFIG_NAME
|
||||||
|
|
||||||
|
export phase0Minimal, altairMinimal
|
||||||
|
|
||||||
else:
|
else:
|
||||||
macro createConstantsFromPreset*(path: static string): untyped =
|
macro createConstantsFromPreset*(path: static string): untyped =
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
const
|
const
|
||||||
# Updated penalty values
|
# Updated penalty values
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.1/configs/mainnet/altair.yaml#L5
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/configs/mainnet/altair.yaml#L5
|
||||||
CONFIG_NAME* = "mainnet"
|
CONFIG_NAME* = "mainnet"
|
||||||
|
|
||||||
INACTIVITY_PENALTY_QUOTIENT_ALTAIR* = 50331648 ##\
|
INACTIVITY_PENALTY_QUOTIENT_ALTAIR* = 50331648 ##\
|
||||||
@ -22,21 +22,21 @@ const
|
|||||||
|
|
||||||
# Misc
|
# Misc
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.1/configs/mainnet/altair.yaml#L15
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/configs/mainnet/altair.yaml#L15
|
||||||
SYNC_COMMITTEE_SIZE* = 1024
|
SYNC_COMMITTEE_SIZE* = 1024
|
||||||
SYNC_SUBCOMMITTEE_SIZE* = 64
|
SYNC_PUBKEYS_PER_AGGREGATE* = 64
|
||||||
INACTIVITY_SCORE_BIAS* = 4
|
INACTIVITY_SCORE_BIAS* = 4
|
||||||
|
|
||||||
# Time parameters
|
# Time parameters
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.1/configs/mainnet/altair.yaml#L25
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/configs/mainnet/altair.yaml#L25
|
||||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD* = 256
|
EPOCHS_PER_SYNC_COMMITTEE_PERIOD* = 256
|
||||||
|
|
||||||
# Signature domains (DOMAIN_SYNC_COMMITTEE) in spec/datatypes/base
|
# Signature domains (DOMAIN_SYNC_COMMITTEE) in spec/datatypes/base
|
||||||
|
|
||||||
# Fork
|
# Fork
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.1/configs/mainnet/altair.yaml#L36
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/configs/mainnet/altair.yaml#L36
|
||||||
|
|
||||||
# ALTAIR_FORK_VERSION is a runtime preset
|
# ALTAIR_FORK_VERSION is a runtime preset
|
||||||
|
|
||||||
@ -44,7 +44,7 @@ const
|
|||||||
|
|
||||||
# Sync protocol
|
# Sync protocol
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.1/configs/mainnet/altair.yaml#L43
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/configs/mainnet/altair.yaml#L43
|
||||||
MIN_SYNC_COMMITTEE_PARTICIPANTS* = 1
|
MIN_SYNC_COMMITTEE_PARTICIPANTS* = 1
|
||||||
MAX_VALID_LIGHT_CLIENT_UPDATES* = 8192
|
MAX_VALID_LIGHT_CLIENT_UPDATES* = 8192
|
||||||
LIGHT_CLIENT_UPDATE_TIMEOUT* = 8192
|
LIGHT_CLIENT_UPDATE_TIMEOUT* = 8192
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
const
|
const
|
||||||
# Updated penalty values
|
# Updated penalty values
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.1/configs/minimal/altair.yaml#L5
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/configs/minimal/altair.yaml#L5
|
||||||
CONFIG_NAME* = "minimal"
|
CONFIG_NAME* = "minimal"
|
||||||
|
|
||||||
INACTIVITY_PENALTY_QUOTIENT_ALTAIR* = 50331648 ##\
|
INACTIVITY_PENALTY_QUOTIENT_ALTAIR* = 50331648 ##\
|
||||||
@ -22,21 +22,21 @@ const
|
|||||||
|
|
||||||
# Misc
|
# Misc
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.1/configs/minimal/altair.yaml#L15
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/configs/minimal/altair.yaml#L15
|
||||||
SYNC_COMMITTEE_SIZE* = 32
|
SYNC_COMMITTEE_SIZE* = 32
|
||||||
SYNC_SUBCOMMITTEE_SIZE* = 16
|
SYNC_PUBKEYS_PER_AGGREGATE* = 16
|
||||||
INACTIVITY_SCORE_BIAS* = 4
|
INACTIVITY_SCORE_BIAS* = 4
|
||||||
|
|
||||||
# Time parameters
|
# Time parameters
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.1/configs/minimal/altair.yaml#L25
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/configs/minimal/altair.yaml#L25
|
||||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD* = 8
|
EPOCHS_PER_SYNC_COMMITTEE_PERIOD* = 8
|
||||||
|
|
||||||
# Signature domains (DOMAIN_SYNC_COMMITTEE) in spec/datatypes/base
|
# Signature domains (DOMAIN_SYNC_COMMITTEE) in spec/datatypes/base
|
||||||
|
|
||||||
# Fork
|
# Fork
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.1/configs/minimal/altair.yaml#L36
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/configs/minimal/altair.yaml#L36
|
||||||
|
|
||||||
# ALTAIR_FORK_VERSION is a runtime preset
|
# ALTAIR_FORK_VERSION is a runtime preset
|
||||||
|
|
||||||
@ -44,7 +44,7 @@ const
|
|||||||
|
|
||||||
# Sync protocol
|
# Sync protocol
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.1/configs/minimal/altair.yaml#L43
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.2/configs/minimal/altair.yaml#L43
|
||||||
MIN_SYNC_COMMITTEE_PARTICIPANTS* = 1
|
MIN_SYNC_COMMITTEE_PARTICIPANTS* = 1
|
||||||
MAX_VALID_LIGHT_CLIENT_UPDATES* = 32
|
MAX_VALID_LIGHT_CLIENT_UPDATES* = 32
|
||||||
LIGHT_CLIENT_UPDATE_TIMEOUT* = 32
|
LIGHT_CLIENT_UPDATE_TIMEOUT* = 32
|
||||||
|
@ -17,6 +17,12 @@ template withTrust(sig: SomeSig, body: untyped): bool =
|
|||||||
else:
|
else:
|
||||||
body
|
body
|
||||||
|
|
||||||
|
func getDepositMessage(depositData: DepositData): DepositMessage =
|
||||||
|
DepositMessage(
|
||||||
|
pubkey: depositData.pubkey,
|
||||||
|
amount: depositData.amount,
|
||||||
|
withdrawal_credentials: depositData.withdrawal_credentials)
|
||||||
|
|
||||||
func compute_slot_root*(
|
func compute_slot_root*(
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot
|
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot
|
||||||
): Eth2Digest =
|
): Eth2Digest =
|
||||||
|
@ -91,7 +91,7 @@ proc noRollback*(state: var BeaconState) =
|
|||||||
trace "Skipping rollback of broken state"
|
trace "Skipping rollback of broken state"
|
||||||
|
|
||||||
type
|
type
|
||||||
RollbackHashedProc* = proc(state: var HashedBeaconState) {.gcsafe.}
|
RollbackHashedProc* = proc(state: var HashedBeaconState) {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
# Hashed-state transition functions
|
# Hashed-state transition functions
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
@ -51,7 +51,7 @@ type
|
|||||||
|
|
||||||
proc jsonPrinterImpl[T](m: MemRange, outStream: OutputStream, pretty: bool) {.raisesssz.} =
|
proc jsonPrinterImpl[T](m: MemRange, outStream: OutputStream, pretty: bool) {.raisesssz.} =
|
||||||
var typedNavigator = sszMount(m, T)
|
var typedNavigator = sszMount(m, T)
|
||||||
var jsonWriter = init(JsonWriter, outStream, pretty)
|
var jsonWriter = Json.Writer.init(outStream, pretty)
|
||||||
# TODO: it should be possible to serialize the navigator object
|
# TODO: it should be possible to serialize the navigator object
|
||||||
# without dereferencing it (to avoid the intermediate value).
|
# without dereferencing it (to avoid the intermediate value).
|
||||||
writeValue(jsonWriter, typedNavigator[])
|
writeValue(jsonWriter, typedNavigator[])
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
# beacon_chain
|
# beacon_chain
|
||||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[typetraits],
|
std/[typetraits],
|
||||||
../spec/[crypto, digest]
|
../spec/[crypto, digest]
|
||||||
|
@ -5,9 +5,7 @@
|
|||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
# TODO Cannot override push, even though the function is annotated
|
{.push raises: [Defect].}
|
||||||
# nimbus-eth2/beacon_chain/ssz.nim(212, 18) Error: can raise an unlisted exception: IOError
|
|
||||||
# {.push raises: [Defect].}
|
|
||||||
{.pragma: raisesssz, raises: [Defect, MalformedSszError, SszSizeMismatchError].}
|
{.pragma: raisesssz, raises: [Defect, MalformedSszError, SszSizeMismatchError].}
|
||||||
|
|
||||||
## SSZ serialization for core SSZ types, as specified in:
|
## SSZ serialization for core SSZ types, as specified in:
|
||||||
@ -40,10 +38,10 @@ type
|
|||||||
|
|
||||||
FixedSizedWriterCtx = object
|
FixedSizedWriterCtx = object
|
||||||
|
|
||||||
serializationFormat SSZ,
|
serializationFormat SSZ
|
||||||
Reader = SszReader,
|
|
||||||
Writer = SszWriter,
|
SSZ.setReader SszReader
|
||||||
PreferedOutput = seq[byte]
|
SSZ.setWriter SszWriter, PreferredOutput = seq[byte]
|
||||||
|
|
||||||
template sizePrefixed*[TT](x: TT): untyped =
|
template sizePrefixed*[TT](x: TT): untyped =
|
||||||
type T = TT
|
type T = TT
|
||||||
@ -91,7 +89,7 @@ template supports*(_: type SSZ, T: type): bool =
|
|||||||
func init*(T: type SszWriter, stream: OutputStream): T {.raises: [Defect].} =
|
func init*(T: type SszWriter, stream: OutputStream): T {.raises: [Defect].} =
|
||||||
result.stream = stream
|
result.stream = stream
|
||||||
|
|
||||||
proc writeVarSizeType(w: var SszWriter, value: auto) {.gcsafe.}
|
proc writeVarSizeType(w: var SszWriter, value: auto) {.gcsafe, raises: [Defect, IOError].}
|
||||||
|
|
||||||
proc beginRecord*(w: var SszWriter, TT: type): auto {.raises: [Defect].} =
|
proc beginRecord*(w: var SszWriter, TT: type): auto {.raises: [Defect].} =
|
||||||
type T = TT
|
type T = TT
|
||||||
|
@ -1,3 +1,10 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
|
@ -75,6 +75,11 @@ template layer*(vIdx: int64): int =
|
|||||||
## index 0 for the mixed-in-length
|
## index 0 for the mixed-in-length
|
||||||
log2trunc(vIdx.uint64).int
|
log2trunc(vIdx.uint64).int
|
||||||
|
|
||||||
|
func hashListIndicesLen(maxChunkIdx: int64): int =
|
||||||
|
# TODO: This exists only to work-around a compilation issue when the complex
|
||||||
|
# expression is used directly in the HastList array size definition below
|
||||||
|
int(layer(maxChunkIdx)) + 1
|
||||||
|
|
||||||
type
|
type
|
||||||
List*[T; maxLen: static Limit] = distinct seq[T]
|
List*[T; maxLen: static Limit] = distinct seq[T]
|
||||||
BitList*[maxLen: static Limit] = distinct BitSeq
|
BitList*[maxLen: static Limit] = distinct BitSeq
|
||||||
@ -86,7 +91,7 @@ type
|
|||||||
HashList*[T; maxLen: static Limit] = object
|
HashList*[T; maxLen: static Limit] = object
|
||||||
data*: List[T, maxLen]
|
data*: List[T, maxLen]
|
||||||
hashes* {.dontSerialize.}: seq[Eth2Digest]
|
hashes* {.dontSerialize.}: seq[Eth2Digest]
|
||||||
indices* {.dontSerialize.}: array[int(layer(maxChunkIdx(T, maxLen))) + 1, int64]
|
indices* {.dontSerialize.}: array[hashListIndicesLen(maxChunkIdx(T, maxLen)), int64]
|
||||||
|
|
||||||
# Note for readers:
|
# Note for readers:
|
||||||
# We use `array` for `Vector` and
|
# We use `array` for `Vector` and
|
||||||
|
@ -1,5 +1,14 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
tables, strutils, parseutils, sequtils, terminal, colors
|
std/[strutils, parseutils, sequtils, terminal, colors]
|
||||||
|
|
||||||
type
|
type
|
||||||
ContentFragments = seq[tuple[kind: InterpolatedKind, value: string]]
|
ContentFragments = seq[tuple[kind: InterpolatedKind, value: string]]
|
||||||
@ -12,7 +21,8 @@ type
|
|||||||
cellsLeft: seq[StatusBarCell]
|
cellsLeft: seq[StatusBarCell]
|
||||||
cellsRight: seq[StatusBarCell]
|
cellsRight: seq[StatusBarCell]
|
||||||
|
|
||||||
DataItemResolver* = proc (dataItem: string): string
|
DataItemResolver* = proc (dataItem: string): string {.
|
||||||
|
gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
StatusBarView* = object
|
StatusBarView* = object
|
||||||
model: DataItemResolver
|
model: DataItemResolver
|
||||||
@ -29,10 +39,12 @@ const
|
|||||||
backgroundColor = rgb(36, 36, 36)
|
backgroundColor = rgb(36, 36, 36)
|
||||||
foregroundColor = colWhiteSmoke
|
foregroundColor = colWhiteSmoke
|
||||||
|
|
||||||
func loadFragmentsLayout(contentLayout: string): ContentFragments =
|
func loadFragmentsLayout(contentLayout: string): ContentFragments {.
|
||||||
|
raises: [Defect, ValueError].} =
|
||||||
result = toSeq(interpolatedFragments(strip contentLayout))
|
result = toSeq(interpolatedFragments(strip contentLayout))
|
||||||
|
|
||||||
func loadCellsLayout(cellsLayout: string): seq[StatusBarCell] =
|
func loadCellsLayout(cellsLayout: string): seq[StatusBarCell] {.
|
||||||
|
raises: [Defect, ValueError].} =
|
||||||
var cells = cellsLayout.split(';')
|
var cells = cellsLayout.split(';')
|
||||||
for cell in cells:
|
for cell in cells:
|
||||||
var columns = cell.split(':', maxSplit = 1)
|
var columns = cell.split(':', maxSplit = 1)
|
||||||
@ -49,7 +61,7 @@ func loadLayout(layout: string): Layout {.raises: [Defect, ValueError].} =
|
|||||||
result.cellsLeft = loadCellsLayout(sections[0])
|
result.cellsLeft = loadCellsLayout(sections[0])
|
||||||
if sections.len == 2: result.cellsRight = loadCellsLayout(sections[1])
|
if sections.len == 2: result.cellsRight = loadCellsLayout(sections[1])
|
||||||
|
|
||||||
func updateContent(cell: var StatusBarCell, model: DataItemResolver) =
|
proc updateContent(cell: var StatusBarCell, model: DataItemResolver) =
|
||||||
cell.content.setLen 0
|
cell.content.setLen 0
|
||||||
for fragment in cell.contentFragments:
|
for fragment in cell.contentFragments:
|
||||||
case fragment[0]
|
case fragment[0]
|
||||||
@ -58,11 +70,11 @@ func updateContent(cell: var StatusBarCell, model: DataItemResolver) =
|
|||||||
of ikExpr, ikVar:
|
of ikExpr, ikVar:
|
||||||
cell.content.add model(fragment[1])
|
cell.content.add model(fragment[1])
|
||||||
|
|
||||||
func updateCells(cells: var seq[StatusBarCell], model: DataItemResolver) =
|
proc updateCells(cells: var seq[StatusBarCell], model: DataItemResolver) =
|
||||||
for cell in mitems(cells):
|
for cell in mitems(cells):
|
||||||
cell.updateContent(model)
|
cell.updateContent(model)
|
||||||
|
|
||||||
func update*(s: var StatusBarView) =
|
proc update*(s: var StatusBarView) =
|
||||||
updateCells s.layout.cellsLeft, s.model
|
updateCells s.layout.cellsLeft, s.model
|
||||||
updateCells s.layout.cellsRight, s.model
|
updateCells s.layout.cellsRight, s.model
|
||||||
|
|
||||||
@ -73,18 +85,29 @@ func width(cells: seq[StatusBarCell]): int =
|
|||||||
result = max(0, cells.len - 1) # the number of separators
|
result = max(0, cells.len - 1) # the number of separators
|
||||||
for cell in cells: result += cell.width
|
for cell in cells: result += cell.width
|
||||||
|
|
||||||
|
var complained = false
|
||||||
|
template ignoreException(body: untyped) =
|
||||||
|
try:
|
||||||
|
body
|
||||||
|
except Exception as exc:
|
||||||
|
if not complained:
|
||||||
|
# TODO terminal.nim exception leak
|
||||||
|
echo "Unable to update status bar: ", exc.msg
|
||||||
|
complained = true
|
||||||
|
|
||||||
proc renderCells(cells: seq[StatusBarCell], sep: string) =
|
proc renderCells(cells: seq[StatusBarCell], sep: string) =
|
||||||
for i, cell in cells:
|
for i, cell in cells:
|
||||||
stdout.setBackgroundColor backgroundColor
|
ignoreException:
|
||||||
stdout.setForegroundColor foregroundColor
|
stdout.setBackgroundColor backgroundColor
|
||||||
stdout.setStyle {styleDim}
|
stdout.setForegroundColor foregroundColor
|
||||||
if i > 0: stdout.write sep
|
stdout.setStyle {styleDim}
|
||||||
stdout.write " ", cell.label, ": "
|
if i > 0: stdout.write sep
|
||||||
stdout.setStyle {styleBright}
|
stdout.write " ", cell.label, ": "
|
||||||
stdout.write cell.content, " "
|
stdout.setStyle {styleBright}
|
||||||
stdout.resetAttributes()
|
stdout.write cell.content, " "
|
||||||
|
stdout.resetAttributes()
|
||||||
|
|
||||||
proc render*(s: var StatusBarView) =
|
proc render*(s: var StatusBarView) {.raises: [Defect, ValueError].} =
|
||||||
doAssert s.consumedLines == 0
|
doAssert s.consumedLines == 0
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -92,21 +115,23 @@ proc render*(s: var StatusBarView) =
|
|||||||
allCellsWidth = s.layout.cellsLeft.width + s.layout.cellsRight.width
|
allCellsWidth = s.layout.cellsLeft.width + s.layout.cellsRight.width
|
||||||
|
|
||||||
if allCellsWidth > 0:
|
if allCellsWidth > 0:
|
||||||
renderCells(s.layout.cellsLeft, sepLeft)
|
ignoreException:
|
||||||
stdout.setBackgroundColor backgroundColor
|
renderCells(s.layout.cellsLeft, sepLeft)
|
||||||
if termWidth > allCellsWidth:
|
stdout.setBackgroundColor backgroundColor
|
||||||
stdout.write spaces(termWidth - allCellsWidth)
|
if termWidth > allCellsWidth:
|
||||||
s.consumedLines = 1
|
stdout.write spaces(termWidth - allCellsWidth)
|
||||||
else:
|
s.consumedLines = 1
|
||||||
stdout.write spaces(max(0, termWidth - s.layout.cellsLeft.width)), "\p"
|
else:
|
||||||
s.consumedLines = 2
|
stdout.write spaces(max(0, termWidth - s.layout.cellsLeft.width)), "\p"
|
||||||
renderCells(s.layout.cellsRight, sepRight)
|
s.consumedLines = 2
|
||||||
stdout.flushFile
|
renderCells(s.layout.cellsRight, sepRight)
|
||||||
|
stdout.flushFile
|
||||||
|
|
||||||
proc erase*(s: var StatusBarView) =
|
proc erase*(s: var StatusBarView) =
|
||||||
for i in 1 ..< s.consumedLines: cursorUp()
|
ignoreException:
|
||||||
for i in 0 ..< s.consumedLines: eraseLine()
|
for i in 1 ..< s.consumedLines: cursorUp()
|
||||||
s.consumedLines = 0
|
for i in 0 ..< s.consumedLines: eraseLine()
|
||||||
|
s.consumedLines = 0
|
||||||
|
|
||||||
func init*(T: type StatusBarView,
|
func init*(T: type StatusBarView,
|
||||||
layout: string,
|
layout: string,
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import options, sequtils, strutils
|
import options, sequtils, strutils
|
||||||
import chronos, chronicles
|
import chronos, chronicles
|
||||||
import ../spec/[datatypes, digest],
|
import ../spec/[datatypes, digest],
|
||||||
@ -168,4 +177,6 @@ proc fetchAncestorBlocks*(rman: RequestManager, roots: seq[FetchRecord]) =
|
|||||||
## Enqueue list missing blocks roots ``roots`` for download by
|
## Enqueue list missing blocks roots ``roots`` for download by
|
||||||
## Request Manager ``rman``.
|
## Request Manager ``rman``.
|
||||||
for item in roots:
|
for item in roots:
|
||||||
rman.inpQueue.addLastNoWait(item)
|
try:
|
||||||
|
rman.inpQueue.addLastNoWait(item)
|
||||||
|
except AsyncQueueFullError: raiseAssert "unbounded queue"
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import chronicles
|
import chronicles
|
||||||
import options, deques, heapqueue, tables, strutils, sequtils, math, algorithm
|
import options, deques, heapqueue, tables, strutils, sequtils, math, algorithm
|
||||||
import stew/results, chronos, chronicles
|
import stew/results, chronos, chronicles
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
options, tables, sets, macros,
|
options, tables, sets, macros,
|
||||||
chronicles, chronos, stew/ranges/bitranges, libp2p/switch,
|
chronicles, chronos, stew/ranges/bitranges, libp2p/switch,
|
||||||
@ -35,7 +44,7 @@ type
|
|||||||
else:
|
else:
|
||||||
index: uint32
|
index: uint32
|
||||||
|
|
||||||
BeaconBlockCallback* = proc(signedBlock: SignedBeaconBlock) {.gcsafe.}
|
BeaconBlockCallback* = proc(signedBlock: SignedBeaconBlock) {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
BeaconSyncNetworkState* = ref object
|
BeaconSyncNetworkState* = ref object
|
||||||
chainDag*: ChainDAGRef
|
chainDag*: ChainDAGRef
|
||||||
@ -87,6 +96,7 @@ proc handleStatus(peer: Peer,
|
|||||||
|
|
||||||
proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) {.gcsafe.}
|
proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) {.gcsafe.}
|
||||||
|
|
||||||
|
{.pop.} # TODO fix p2p macro for raises
|
||||||
p2pProtocol BeaconSync(version = 1,
|
p2pProtocol BeaconSync(version = 1,
|
||||||
networkState = BeaconSyncNetworkState,
|
networkState = BeaconSyncNetworkState,
|
||||||
peerState = BeaconSyncPeerState):
|
peerState = BeaconSyncPeerState):
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[os, strutils, terminal, wordwrap, unicode],
|
std/[os, strutils, terminal, wordwrap, unicode],
|
||||||
chronicles, chronos, web3, stint, json_serialization, zxcvbn,
|
chronicles, chronos, web3, stint, json_serialization, zxcvbn,
|
||||||
@ -14,7 +23,6 @@ export
|
|||||||
when defined(windows):
|
when defined(windows):
|
||||||
import stew/[windows/acl]
|
import stew/[windows/acl]
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
{.localPassC: "-fno-lto".} # no LTO for crypto
|
{.localPassC: "-fno-lto".} # no LTO for crypto
|
||||||
|
|
||||||
const
|
const
|
||||||
@ -193,7 +201,7 @@ proc keyboardCreatePassword(prompt: string,
|
|||||||
return ok(password)
|
return ok(password)
|
||||||
|
|
||||||
proc keyboardGetPassword[T](prompt: string, attempts: int,
|
proc keyboardGetPassword[T](prompt: string, attempts: int,
|
||||||
pred: proc(p: string): KsResult[T] {.closure.}): KsResult[T] =
|
pred: proc(p: string): KsResult[T] {.gcsafe, raises: [Defect].}): KsResult[T] =
|
||||||
var
|
var
|
||||||
remainingAttempts = attempts
|
remainingAttempts = attempts
|
||||||
counter = 1
|
counter = 1
|
||||||
|
@ -1,10 +1,13 @@
|
|||||||
# beacon_chain
|
# beacon_chain
|
||||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
# TODO doesn't work with concepts (sigh)
|
||||||
|
# {.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
# stdlib
|
# stdlib
|
||||||
std/os,
|
std/os,
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
# beacon_chain
|
# beacon_chain
|
||||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
# Stdlib
|
# Stdlib
|
||||||
std/[typetraits, strutils, algorithm],
|
std/[typetraits, strutils, algorithm],
|
||||||
@ -237,7 +239,7 @@ proc writeValue*(writer: var JsonWriter, value: PubKey0x)
|
|||||||
proc readValue*(reader: var JsonReader, value: var PubKey0x)
|
proc readValue*(reader: var JsonReader, value: var PubKey0x)
|
||||||
{.raises: [SerializationError, IOError, Defect].} =
|
{.raises: [SerializationError, IOError, Defect].} =
|
||||||
try:
|
try:
|
||||||
value = PubKey0x reader.readValue(string).hexToByteArray[:RawPubKeySize]()
|
value = PubKey0x reader.readValue(string).hexToByteArray(RawPubKeySize)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raiseUnexpectedValue(reader, "Hex string expected")
|
raiseUnexpectedValue(reader, "Hex string expected")
|
||||||
|
|
||||||
@ -265,7 +267,7 @@ proc readValue*(r: var JsonReader, a: var (SlotString or EpochString))
|
|||||||
|
|
||||||
proc exportSlashingInterchange*(
|
proc exportSlashingInterchange*(
|
||||||
db: SlashingProtectionDB_Concept,
|
db: SlashingProtectionDB_Concept,
|
||||||
path: string, prettify = true) =
|
path: string, prettify = true) {.raises: [Defect, IOError].} =
|
||||||
## Export a database to the Slashing Protection Database Interchange Format
|
## Export a database to the Slashing Protection Database Interchange Format
|
||||||
let spdir = db.toSPDIR()
|
let spdir = db.toSPDIR()
|
||||||
Json.saveFile(path, spdir, prettify)
|
Json.saveFile(path, spdir, prettify)
|
||||||
@ -273,7 +275,7 @@ proc exportSlashingInterchange*(
|
|||||||
|
|
||||||
proc importSlashingInterchange*(
|
proc importSlashingInterchange*(
|
||||||
db: SlashingProtectionDB_Concept,
|
db: SlashingProtectionDB_Concept,
|
||||||
path: string): SlashingImportStatus =
|
path: string): SlashingImportStatus {.raises: [Defect, IOError, SerializationError].} =
|
||||||
## Import a Slashing Protection Database Interchange Format
|
## Import a Slashing Protection Database Interchange Format
|
||||||
## into a Nimbus DB.
|
## into a Nimbus DB.
|
||||||
## This adds data to already existing data.
|
## This adds data to already existing data.
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[tables, os],
|
std/[tables, os],
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[os, options, typetraits, decls],
|
std/[os, options, typetraits, decls],
|
||||||
|
@ -5,13 +5,15 @@
|
|||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[os, osproc, sequtils, streams, tables],
|
std/[os, osproc, sequtils, streams, tables],
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
stew/[assign2, objects, shims/macros],
|
stew/[assign2, objects],
|
||||||
chronos, metrics, json_rpc/[rpcserver, jsonmarshal],
|
chronos, metrics,
|
||||||
chronicles,
|
chronicles,
|
||||||
json_serialization/std/[options, sets, net], serialization/errors,
|
json_serialization/std/[options, sets, net], serialization/errors,
|
||||||
eth/db/kvstore,
|
eth/db/kvstore,
|
||||||
@ -75,7 +77,7 @@ proc addLocalValidators*(node: BeaconNode) =
|
|||||||
for validatorKey in node.config.validatorKeys:
|
for validatorKey in node.config.validatorKeys:
|
||||||
node.addLocalValidator node.chainDag.headState.data.data, validatorKey
|
node.addLocalValidator node.chainDag.headState.data.data, validatorKey
|
||||||
|
|
||||||
proc addRemoteValidators*(node: BeaconNode) =
|
proc addRemoteValidators*(node: BeaconNode) {.raises: [Defect, OSError, IOError].} =
|
||||||
# load all the validators from the child process - loop until `end`
|
# load all the validators from the child process - loop until `end`
|
||||||
var line = newStringOfCap(120).TaintedString
|
var line = newStringOfCap(120).TaintedString
|
||||||
while line != "end" and running(node.vcProcess):
|
while line != "end" and running(node.vcProcess):
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[tables, json, streams],
|
std/[tables, json, streams],
|
||||||
chronos, chronicles, metrics,
|
chronos, chronicles, metrics,
|
||||||
|
@ -1,3 +1,10 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import strutils
|
import strutils
|
||||||
|
@ -333,6 +333,15 @@ Set the current logging level dynamically: TRACE, DEBUG, INFO, NOTICE, WARN, ERR
|
|||||||
curl -d '{"jsonrpc":"2.0","id":"id","method":"setLogLevel","params":["DEBUG; TRACE:discv5,libp2p; REQUIRED:none; DISABLED:none"] }' -H 'Content-Type: application/json' localhost:9190 -s | jq
|
curl -d '{"jsonrpc":"2.0","id":"id","method":"setLogLevel","params":["DEBUG; TRACE:discv5,libp2p; REQUIRED:none; DISABLED:none"] }' -H 'Content-Type: application/json' localhost:9190 -s | jq
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### setGraffiti
|
||||||
|
|
||||||
|
Set the graffiti bytes that will be included in proposed blocks. The graffiti bytes can be
|
||||||
|
specified as an UTF-8 encoded string or as an 0x-prefixed hex string specifying raw bytes.
|
||||||
|
|
||||||
|
```
|
||||||
|
curl -d '{"jsonrpc":"2.0","id":"id","method":"setGraffiti","params":["Mr F was here"] }' -H 'Content-Type: application/json' localhost:9190 -s | jq
|
||||||
|
```
|
||||||
|
|
||||||
### getEth1Chain
|
### getEth1Chain
|
||||||
|
|
||||||
Get the list of Eth1 blocks that the beacon node is currently storing in memory.
|
Get the list of Eth1 blocks that the beacon node is currently storing in memory.
|
||||||
|
32
scripts/slowlogs.py
Executable file
32
scripts/slowlogs.py
Executable file
@ -0,0 +1,32 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Print logs that have gaps between them larger than the threshold - useful for
|
||||||
|
# finding slowdowns in the code where the thread is busy for long periods of
|
||||||
|
# time
|
||||||
|
# usage:
|
||||||
|
# tail -F logfile | python slowlogs.py 0.75
|
||||||
|
|
||||||
|
import sys, re
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
THRESHOLD = 0.75
|
||||||
|
|
||||||
|
if len(sys.argv) > 0:
|
||||||
|
THRESHOLD = float(sys.argv[1])
|
||||||
|
|
||||||
|
last = None
|
||||||
|
prevline = None
|
||||||
|
|
||||||
|
dt = re.compile(r"([0-9-]+ [0-9:.]+)")
|
||||||
|
|
||||||
|
for line in sys.stdin:
|
||||||
|
match = dt.search(line)
|
||||||
|
|
||||||
|
if match:
|
||||||
|
current = datetime.strptime(match.group(1), "%Y-%m-%d %H:%M:%S.%f")
|
||||||
|
if last != None and (current - last).total_seconds() > THRESHOLD:
|
||||||
|
print((current - last).total_seconds())
|
||||||
|
print(prevline, end="")
|
||||||
|
print(line)
|
||||||
|
last = current
|
||||||
|
prevline = line
|
@ -25,7 +25,7 @@ type
|
|||||||
BLSSignMsgInput = object
|
BLSSignMsgInput = object
|
||||||
privkey*: ValidatorPrivKey
|
privkey*: ValidatorPrivKey
|
||||||
message*: seq[byte]
|
message*: seq[byte]
|
||||||
domain*: Domain
|
domain*: Eth2Domain
|
||||||
|
|
||||||
BLSSignMsg* = object
|
BLSSignMsg* = object
|
||||||
input*: BLSSignMsgInput
|
input*: BLSSignMsgInput
|
||||||
@ -39,8 +39,8 @@ type
|
|||||||
input*: seq[ValidatorPubKey]
|
input*: seq[ValidatorPubKey]
|
||||||
output*: ValidatorPubKey
|
output*: ValidatorPubKey
|
||||||
|
|
||||||
proc readValue*(r: var JsonReader, a: var Domain) =
|
proc readValue*(r: var JsonReader, a: var Eth2Domain) =
|
||||||
## Custom deserializer for Domain
|
## Custom deserializer for Eth2Domain
|
||||||
# Furthermore Nim parseHex doesn't support uint
|
# Furthermore Nim parseHex doesn't support uint
|
||||||
# until https://github.com/nim-lang/Nim/pull/11067
|
# until https://github.com/nim-lang/Nim/pull/11067
|
||||||
# (0.20)
|
# (0.20)
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
{.used.}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/unittest,
|
std/unittest,
|
||||||
chronos, stew/shims/net, eth/keys, eth/p2p/discoveryv5/enr,
|
chronos, stew/shims/net, eth/keys, eth/p2p/discoveryv5/enr,
|
||||||
|
2
vendor/nim-chronicles
vendored
2
vendor/nim-chronicles
vendored
@ -1 +1 @@
|
|||||||
Subproject commit b42899070a7daa5cf6f0843faf3d6d41659e9591
|
Subproject commit ea0368cc303b6ed59792a7c2556285adf310e455
|
2
vendor/nim-chronos
vendored
2
vendor/nim-chronos
vendored
@ -1 +1 @@
|
|||||||
Subproject commit c8eefb9382a786993fc703386b0bd446ecf9c037
|
Subproject commit e08deb47c2b1a64d771c9f13eb7cfc7d691c436d
|
2
vendor/nim-confutils
vendored
2
vendor/nim-confutils
vendored
@ -1 +1 @@
|
|||||||
Subproject commit cfa95661913b0ff8b1609e3954894f8ab31bbf3e
|
Subproject commit f091a70a5bf95ec772c8b4d9978e81b8ae89af0c
|
2
vendor/nim-eth
vendored
2
vendor/nim-eth
vendored
@ -1 +1 @@
|
|||||||
Subproject commit be5e088b21e06a85cac4826454412db8459ed4f1
|
Subproject commit 16802c0e5218cce405cd623a554ce95549dd5181
|
2
vendor/nim-eth2-scenarios
vendored
2
vendor/nim-eth2-scenarios
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 26cd96259aa8047096589361d96b86673f8a3ecd
|
Subproject commit 26f2b9153d295584ccbe6a32aa681497e49b4da7
|
2
vendor/nim-json-rpc
vendored
2
vendor/nim-json-rpc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 4eb39203ebd391c77d16a1c387dc8a6b7d90bc69
|
Subproject commit 64d40d6c1a095761a03d1ba55eb45877596e8e7b
|
2
vendor/nim-json-serialization
vendored
2
vendor/nim-json-serialization
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 32f75d93b0762328d1d85ce62cef84ed919ae31e
|
Subproject commit fe8a82ca76150b60a950d5aa4e5baa382441ada4
|
2
vendor/nim-libp2p
vendored
2
vendor/nim-libp2p
vendored
@ -1 +1 @@
|
|||||||
Subproject commit a54e1cc699f036f3a8eeff33c3d9f893b8a284e9
|
Subproject commit 54031c9e9bc9882a2e8c2d5937031731ed63ab5e
|
2
vendor/nim-protobuf-serialization
vendored
2
vendor/nim-protobuf-serialization
vendored
@ -1 +1 @@
|
|||||||
Subproject commit e8e84cfc11de78c7bce0cded800060a29220e76f
|
Subproject commit d79b5c884965b2ab395315d8e5f3f8ae134ff99a
|
2
vendor/nim-serialization
vendored
2
vendor/nim-serialization
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 261de741b73601821cb6e749fc9b4092f1cc5377
|
Subproject commit f9a1121b8733eb75e624ab59f8d79e707f15f76f
|
2
vendor/nim-stew
vendored
2
vendor/nim-stew
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 6bcb21184aeb96ce6c62e187a64d678b74609f1e
|
Subproject commit ee78822e057ac5f39804ecb6ac1096734be13ef8
|
2
vendor/nim-web3
vendored
2
vendor/nim-web3
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 21b465fcd58460e6018dcb1048254f2514696778
|
Subproject commit 75a1a0e5d8cc8fc7bb9cdb3bfe68a73e11b5c71a
|
Loading…
x
Reference in New Issue
Block a user