spec: regulate exceptions (#913)
* spec: regulate exceptions * a few more simple raises
This commit is contained in:
parent
bdea75e4c3
commit
ed74770451
|
@ -1277,13 +1277,17 @@ when hasPrompt:
|
||||||
|
|
||||||
when compiles(defaultChroniclesStream.output.writer):
|
when compiles(defaultChroniclesStream.output.writer):
|
||||||
defaultChroniclesStream.output.writer =
|
defaultChroniclesStream.output.writer =
|
||||||
proc (logLevel: LogLevel, msg: LogOutputStr) {.gcsafe.} =
|
proc (logLevel: LogLevel, msg: LogOutputStr) {.gcsafe, raises: [Defect].} =
|
||||||
# p.hidePrompt
|
try:
|
||||||
erase statusBar
|
# p.hidePrompt
|
||||||
# p.writeLine msg
|
erase statusBar
|
||||||
stdout.write msg
|
# p.writeLine msg
|
||||||
render statusBar
|
stdout.write msg
|
||||||
# p.showPrompt
|
render statusBar
|
||||||
|
# p.showPrompt
|
||||||
|
except Exception as e: # render raises Exception
|
||||||
|
if e is Defect: raise (ref Defect)(e)
|
||||||
|
discard # Status bar not critical
|
||||||
|
|
||||||
proc statusBarUpdatesPollingLoop() {.async.} =
|
proc statusBarUpdatesPollingLoop() {.async.} =
|
||||||
while true:
|
while true:
|
||||||
|
@ -1302,8 +1306,11 @@ when isMainModule:
|
||||||
|
|
||||||
when compiles(defaultChroniclesStream.output.writer):
|
when compiles(defaultChroniclesStream.output.writer):
|
||||||
defaultChroniclesStream.output.writer =
|
defaultChroniclesStream.output.writer =
|
||||||
proc (logLevel: LogLevel, msg: LogOutputStr) {.gcsafe.} =
|
proc (logLevel: LogLevel, msg: LogOutputStr) {.gcsafe, raises: [Defect].} =
|
||||||
stdout.write(msg)
|
try:
|
||||||
|
stdout.write(msg)
|
||||||
|
except IOError:
|
||||||
|
discard # nothing to do..
|
||||||
|
|
||||||
randomize()
|
randomize()
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
deques, tables, options,
|
deques, tables, options,
|
||||||
stew/[endians2, byteutils], chronicles,
|
stew/[endians2, byteutils], chronicles,
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
os, options, strformat, strutils,
|
os, options, strformat, strutils,
|
||||||
chronicles, confutils, json_serialization,
|
chronicles, confutils, json_serialization,
|
||||||
|
@ -278,7 +280,10 @@ proc defaultDataDir*(conf: BeaconNodeConf): string =
|
||||||
|
|
||||||
proc validatorFileBaseName*(validatorIdx: int): string =
|
proc validatorFileBaseName*(validatorIdx: int): string =
|
||||||
# there can apparently be tops 4M validators so we use 7 digits..
|
# there can apparently be tops 4M validators so we use 7 digits..
|
||||||
fmt"v{validatorIdx:07}"
|
try:
|
||||||
|
fmt"v{validatorIdx:07}"
|
||||||
|
except ValueError as e:
|
||||||
|
raiseAssert e.msg
|
||||||
|
|
||||||
func dumpDir*(conf: BeaconNodeConf): string =
|
func dumpDir*(conf: BeaconNodeConf): string =
|
||||||
conf.dataDir / "dump"
|
conf.dataDir / "dump"
|
||||||
|
@ -292,10 +297,10 @@ func databaseDir*(conf: BeaconNodeConf): string =
|
||||||
func defaultListenAddress*(conf: BeaconNodeConf): IpAddress =
|
func defaultListenAddress*(conf: BeaconNodeConf): IpAddress =
|
||||||
# TODO: How should we select between IPv4 and IPv6
|
# TODO: How should we select between IPv4 and IPv6
|
||||||
# Maybe there should be a config option for this.
|
# Maybe there should be a config option for this.
|
||||||
parseIpAddress("0.0.0.0")
|
return static: parseIpAddress("0.0.0.0")
|
||||||
|
|
||||||
func defaultAdminListenAddress*(conf: BeaconNodeConf): IpAddress =
|
func defaultAdminListenAddress*(conf: BeaconNodeConf): IpAddress =
|
||||||
parseIpAddress("127.0.0.1")
|
return static: parseIpAddress("127.0.0.1")
|
||||||
|
|
||||||
iterator validatorKeys*(conf: BeaconNodeConf): ValidatorPrivKey =
|
iterator validatorKeys*(conf: BeaconNodeConf): ValidatorPrivKey =
|
||||||
for validatorKeyFile in conf.validators:
|
for validatorKeyFile in conf.validators:
|
||||||
|
@ -305,13 +310,17 @@ iterator validatorKeys*(conf: BeaconNodeConf): ValidatorPrivKey =
|
||||||
warn "Failed to load validator private key",
|
warn "Failed to load validator private key",
|
||||||
file = validatorKeyFile.string, err = err.msg
|
file = validatorKeyFile.string, err = err.msg
|
||||||
|
|
||||||
for kind, file in walkDir(conf.localValidatorsDir):
|
try:
|
||||||
if kind in {pcFile, pcLinkToFile} and
|
for kind, file in walkDir(conf.localValidatorsDir):
|
||||||
cmpIgnoreCase(".privkey", splitFile(file).ext) == 0:
|
if kind in {pcFile, pcLinkToFile} and
|
||||||
try:
|
cmpIgnoreCase(".privkey", splitFile(file).ext) == 0:
|
||||||
yield ValidatorPrivKey.init(readFile(file).string)
|
try:
|
||||||
except CatchableError as err:
|
yield ValidatorPrivKey.init(readFile(file).string)
|
||||||
warn "Failed to load a validator private key", file, err = err.msg
|
except CatchableError as err:
|
||||||
|
warn "Failed to load a validator private key", file, err = err.msg
|
||||||
|
except OSError as err:
|
||||||
|
warn "Cannot load validator keys",
|
||||||
|
dir = conf.localValidatorsDir, err = err.msg
|
||||||
|
|
||||||
template writeValue*(writer: var JsonWriter,
|
template writeValue*(writer: var JsonWriter,
|
||||||
value: TypedInputFile|InputFile|InputDir|OutPath|OutDir|OutFile) =
|
value: TypedInputFile|InputFile|InputDir|OutPath|OutDir|OutFile) =
|
||||||
|
|
|
@ -5,6 +5,8 @@
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
# Temporary dumping ground for extra types and helpers that could make it into
|
# Temporary dumping ground for extra types and helpers that could make it into
|
||||||
# the spec potentially
|
# the spec potentially
|
||||||
#
|
#
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
stew/endians2, stint,
|
stew/endians2, stint,
|
||||||
./extras, ./ssz,
|
./extras, ./ssz,
|
||||||
|
@ -14,7 +16,7 @@ func get_eth1data_stub*(deposit_count: uint64, current_epoch: Epoch): Eth1Data =
|
||||||
block_hash: hash_tree_root(hash_tree_root(voting_period).data),
|
block_hash: hash_tree_root(hash_tree_root(voting_period).data),
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeInteropPrivKey*(i: int): ValidatorPrivKey =
|
func makeInteropPrivKey*(i: int): BlsResult[ValidatorPrivKey] =
|
||||||
var bytes: array[32, byte]
|
var bytes: array[32, byte]
|
||||||
bytes[0..7] = uint64(i).toBytesLE()
|
bytes[0..7] = uint64(i).toBytesLE()
|
||||||
|
|
||||||
|
@ -26,7 +28,7 @@ func makeInteropPrivKey*(i: int): ValidatorPrivKey =
|
||||||
privkeyBytes = eth2hash(bytes)
|
privkeyBytes = eth2hash(bytes)
|
||||||
key = (UInt256.fromBytesLE(privkeyBytes.data) mod curveOrder).toBytesBE()
|
key = (UInt256.fromBytesLE(privkeyBytes.data) mod curveOrder).toBytesBE()
|
||||||
|
|
||||||
ValidatorPrivKey.fromRaw(key).tryGet()
|
ValidatorPrivKey.fromRaw(key)
|
||||||
|
|
||||||
const eth1BlockHash* = block:
|
const eth1BlockHash* = block:
|
||||||
var x: Eth2Digest
|
var x: Eth2Digest
|
||||||
|
|
|
@ -10,6 +10,8 @@
|
||||||
# Merkle tree helpers
|
# Merkle tree helpers
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
sequtils, strutils, macros, bitops,
|
sequtils, strutils, macros, bitops,
|
||||||
# Specs
|
# Specs
|
||||||
|
|
|
@ -5,6 +5,8 @@
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
tables, algorithm, math, sequtils, options,
|
tables, algorithm, math, sequtils, options,
|
||||||
json_serialization/std/sets, chronicles, stew/bitseqs,
|
json_serialization/std/sets, chronicles, stew/bitseqs,
|
||||||
|
|
|
@ -17,6 +17,9 @@
|
||||||
# `ref` - this can be achieved by wrapping them in higher-level
|
# `ref` - this can be achieved by wrapping them in higher-level
|
||||||
# types / composition
|
# types / composition
|
||||||
|
|
||||||
|
# TODO report compiler crash when this is uncommented
|
||||||
|
# {.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
macros, hashes, json, strutils, tables,
|
macros, hashes, json, strutils, tables,
|
||||||
stew/[byteutils, bitseqs], chronicles,
|
stew/[byteutils, bitseqs], chronicles,
|
||||||
|
|
|
@ -7,6 +7,8 @@
|
||||||
|
|
||||||
# Uncategorized helper functions from the spec
|
# Uncategorized helper functions from the spec
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard lib
|
# Standard lib
|
||||||
math,
|
math,
|
||||||
|
|
|
@ -5,6 +5,8 @@
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
strformat,
|
strformat,
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
|
@ -27,22 +29,39 @@ const
|
||||||
defaultEth2RpcPort* = 9090
|
defaultEth2RpcPort* = 9090
|
||||||
|
|
||||||
func getBeaconBlocksTopic*(forkDigest: ForkDigest): string =
|
func getBeaconBlocksTopic*(forkDigest: ForkDigest): string =
|
||||||
&"/eth2/{toHex forkDigest}/{topicBeaconBlocksSuffix}"
|
try:
|
||||||
|
&"/eth2/{toHex forkDigest}/{topicBeaconBlocksSuffix}"
|
||||||
|
except ValueError as e:
|
||||||
|
raiseAssert e.msg
|
||||||
|
|
||||||
func getVoluntaryExitsTopic*(forkDigest: ForkDigest): string =
|
func getVoluntaryExitsTopic*(forkDigest: ForkDigest): string =
|
||||||
&"/eth2/{toHex forkDigest}/{topicVoluntaryExitsSuffix}"
|
try:
|
||||||
|
&"/eth2/{toHex forkDigest}/{topicVoluntaryExitsSuffix}"
|
||||||
|
except ValueError as e:
|
||||||
|
raiseAssert e.msg
|
||||||
|
|
||||||
func getProposerSlashingsTopic*(forkDigest: ForkDigest): string =
|
func getProposerSlashingsTopic*(forkDigest: ForkDigest): string =
|
||||||
&"/eth2/{toHex forkDigest}/{topicProposerSlashingsSuffix}"
|
try:
|
||||||
|
&"/eth2/{toHex forkDigest}/{topicProposerSlashingsSuffix}"
|
||||||
|
except ValueError as e:
|
||||||
|
raiseAssert e.msg
|
||||||
|
|
||||||
func getAttesterSlashingsTopic*(forkDigest: ForkDigest): string =
|
func getAttesterSlashingsTopic*(forkDigest: ForkDigest): string =
|
||||||
&"/eth2/{toHex forkDigest}/{topicAttesterSlashingsSuffix}"
|
try:
|
||||||
|
&"/eth2/{toHex forkDigest}/{topicAttesterSlashingsSuffix}"
|
||||||
|
except ValueError as e:
|
||||||
|
raiseAssert e.msg
|
||||||
|
|
||||||
func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string =
|
func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string =
|
||||||
&"/eth2/{toHex forkDigest}/{topicAggregateAndProofsSuffix}"
|
try:
|
||||||
|
&"/eth2/{toHex forkDigest}/{topicAggregateAndProofsSuffix}"
|
||||||
|
except ValueError as e:
|
||||||
|
raiseAssert e.msg
|
||||||
|
|
||||||
func getAttestationTopic*(forkDigest: ForkDigest, committeeIndex: uint64): string =
|
func getAttestationTopic*(forkDigest: ForkDigest, committeeIndex: uint64): string =
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/validator.md#broadcast-attestation
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/validator.md#broadcast-attestation
|
||||||
let topicIndex = committeeIndex mod ATTESTATION_SUBNET_COUNT
|
try:
|
||||||
&"/eth2/{toHex forkDigest}/committee_index{topicIndex}{topicAttestationsSuffix}"
|
let topicIndex = committeeIndex mod ATTESTATION_SUBNET_COUNT
|
||||||
|
&"/eth2/{toHex forkDigest}/committee_index{topicIndex}{topicAttestationsSuffix}"
|
||||||
|
except ValueError as e:
|
||||||
|
raiseAssert e.msg
|
||||||
|
|
|
@ -32,6 +32,8 @@
|
||||||
# improvements to be made - other than that, keep things similar to spec for
|
# improvements to be made - other than that, keep things similar to spec for
|
||||||
# now.
|
# now.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
algorithm, collections/sets, chronicles, options, sequtils, sets,
|
algorithm, collections/sets, chronicles, options, sequtils, sets,
|
||||||
../extras, ../ssz, metrics,
|
../extras, ../ssz, metrics,
|
||||||
|
@ -400,16 +402,19 @@ proc process_block*(
|
||||||
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
|
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
|
||||||
# doesn't seem to specify at what point in block processing this metric is to be read,
|
# doesn't seem to specify at what point in block processing this metric is to be read,
|
||||||
# and this avoids the early-return issue (could also use defer, etc).
|
# and this avoids the early-return issue (could also use defer, etc).
|
||||||
beacon_pending_deposits.set(
|
try:
|
||||||
state.eth1_data.deposit_count.int64 - state.eth1_deposit_index.int64)
|
beacon_pending_deposits.set(
|
||||||
beacon_processed_deposits_total.set(state.eth1_deposit_index.int64)
|
state.eth1_data.deposit_count.int64 - state.eth1_deposit_index.int64)
|
||||||
|
beacon_processed_deposits_total.set(state.eth1_deposit_index.int64)
|
||||||
|
|
||||||
# Adds nontrivial additional computation, but only does so when metrics
|
# Adds nontrivial additional computation, but only does so when metrics
|
||||||
# enabled.
|
# enabled.
|
||||||
beacon_current_live_validators.set(toHashSet(
|
beacon_current_live_validators.set(toHashSet(
|
||||||
mapIt(state.current_epoch_attestations, it.proposerIndex)).len.int64)
|
mapIt(state.current_epoch_attestations, it.proposerIndex)).len.int64)
|
||||||
beacon_previous_live_validators.set(toHashSet(
|
beacon_previous_live_validators.set(toHashSet(
|
||||||
mapIt(state.previous_epoch_attestations, it.proposerIndex)).len.int64)
|
mapIt(state.previous_epoch_attestations, it.proposerIndex)).len.int64)
|
||||||
|
except Exception as e: # TODO https://github.com/status-im/nim-metrics/pull/22
|
||||||
|
trace "Couldn't update metrics", msg = e.msg
|
||||||
|
|
||||||
if not process_block_header(state, blck, flags, stateCache):
|
if not process_block_header(state, blck, flags, stateCache):
|
||||||
notice "Block header not valid", slot = shortLog(state.slot)
|
notice "Block header not valid", slot = shortLog(state.slot)
|
||||||
|
|
|
@ -32,6 +32,8 @@
|
||||||
# improvements to be made - other than that, keep things similar to spec for
|
# improvements to be made - other than that, keep things similar to spec for
|
||||||
# now.
|
# now.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
math, options, sequtils, tables,
|
math, options, sequtils, tables,
|
||||||
stew/[bitseqs, bitops2], chronicles, json_serialization/std/sets,
|
stew/[bitseqs, bitops2], chronicles, json_serialization/std/sets,
|
||||||
|
@ -449,13 +451,16 @@ proc process_epoch*(state: var BeaconState) {.nbench.}=
|
||||||
process_final_updates(state)
|
process_final_updates(state)
|
||||||
|
|
||||||
# Once per epoch metrics
|
# Once per epoch metrics
|
||||||
beacon_finalized_epoch.set(state.finalized_checkpoint.epoch.int64)
|
try:
|
||||||
beacon_finalized_root.set(state.finalized_checkpoint.root.toGaugeValue)
|
beacon_finalized_epoch.set(state.finalized_checkpoint.epoch.int64)
|
||||||
beacon_current_justified_epoch.set(
|
beacon_finalized_root.set(state.finalized_checkpoint.root.toGaugeValue)
|
||||||
state.current_justified_checkpoint.epoch.int64)
|
beacon_current_justified_epoch.set(
|
||||||
beacon_current_justified_root.set(
|
state.current_justified_checkpoint.epoch.int64)
|
||||||
state.current_justified_checkpoint.root.toGaugeValue)
|
beacon_current_justified_root.set(
|
||||||
beacon_previous_justified_epoch.set(
|
state.current_justified_checkpoint.root.toGaugeValue)
|
||||||
state.previous_justified_checkpoint.epoch.int64)
|
beacon_previous_justified_epoch.set(
|
||||||
beacon_previous_justified_root.set(
|
state.previous_justified_checkpoint.epoch.int64)
|
||||||
state.previous_justified_checkpoint.root.toGaugeValue)
|
beacon_previous_justified_root.set(
|
||||||
|
state.previous_justified_checkpoint.root.toGaugeValue)
|
||||||
|
except Exception as e: # TODO https://github.com/status-im/nim-metrics/pull/22
|
||||||
|
trace "Couldn't update metrics", msg = e.msg
|
||||||
|
|
|
@ -5,6 +5,8 @@
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
sets,
|
sets,
|
||||||
|
|
|
@ -6,6 +6,8 @@
|
||||||
|
|
||||||
# Helpers and functions pertaining to managing the validator set
|
# Helpers and functions pertaining to managing the validator set
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
options, nimcrypto, sequtils, math, tables,
|
options, nimcrypto, sequtils, math, tables,
|
||||||
./datatypes, ./digest, ./helpers
|
./datatypes, ./digest, ./helpers
|
||||||
|
@ -91,25 +93,27 @@ func compute_committee(indices: seq[ValidatorIndex], seed: Eth2Digest,
|
||||||
index: uint64, count: uint64, stateCache: var StateCache): seq[ValidatorIndex] =
|
index: uint64, count: uint64, stateCache: var StateCache): seq[ValidatorIndex] =
|
||||||
## Return the committee corresponding to ``indices``, ``seed``, ``index``,
|
## Return the committee corresponding to ``indices``, ``seed``, ``index``,
|
||||||
## and committee ``count``.
|
## and committee ``count``.
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
start = (len(indices).uint64 * index) div count
|
||||||
|
endIdx = (len(indices).uint64 * (index + 1)) div count
|
||||||
|
key = (indices.len, seed)
|
||||||
|
|
||||||
let
|
if key notin stateCache.beacon_committee_cache:
|
||||||
start = (len(indices).uint64 * index) div count
|
stateCache.beacon_committee_cache[key] =
|
||||||
endIdx = (len(indices).uint64 * (index + 1)) div count
|
get_shuffled_seq(seed, len(indices).uint64)
|
||||||
key = (indices.len, seed)
|
|
||||||
|
|
||||||
if key notin stateCache.beacon_committee_cache:
|
# These assertions from compute_shuffled_index(...)
|
||||||
stateCache.beacon_committee_cache[key] =
|
let index_count = indices.len().uint64
|
||||||
get_shuffled_seq(seed, len(indices).uint64)
|
doAssert endIdx <= index_count
|
||||||
|
doAssert index_count <= 2'u64^40
|
||||||
|
|
||||||
# These assertions from compute_shuffled_index(...)
|
# In spec, this calls get_shuffled_index() every time, but that's wasteful
|
||||||
let index_count = indices.len().uint64
|
mapIt(
|
||||||
doAssert endIdx <= index_count
|
start.int .. (endIdx.int-1),
|
||||||
doAssert index_count <= 2'u64^40
|
indices[stateCache.beacon_committee_cache[key][it]])
|
||||||
|
except KeyError:
|
||||||
# In spec, this calls get_shuffled_index() every time, but that's wasteful
|
raiseAssert("Cached entries are added before use")
|
||||||
mapIt(
|
|
||||||
start.int .. (endIdx.int-1),
|
|
||||||
indices[stateCache.beacon_committee_cache[key][it]])
|
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#get_beacon_committee
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#get_beacon_committee
|
||||||
func get_beacon_committee*(
|
func get_beacon_committee*(
|
||||||
|
@ -119,26 +123,29 @@ func get_beacon_committee*(
|
||||||
let
|
let
|
||||||
epoch = compute_epoch_at_slot(slot)
|
epoch = compute_epoch_at_slot(slot)
|
||||||
|
|
||||||
## This is a somewhat more fragile, but high-ROI, caching setup --
|
try:
|
||||||
## get_active_validator_indices() is slow to run in a loop and only
|
## This is a somewhat more fragile, but high-ROI, caching setup --
|
||||||
## changes once per epoch.
|
## get_active_validator_indices() is slow to run in a loop and only
|
||||||
if epoch notin cache.active_validator_indices_cache:
|
## changes once per epoch.
|
||||||
cache.active_validator_indices_cache[epoch] =
|
if epoch notin cache.active_validator_indices_cache:
|
||||||
get_active_validator_indices(state, epoch)
|
cache.active_validator_indices_cache[epoch] =
|
||||||
|
get_active_validator_indices(state, epoch)
|
||||||
|
|
||||||
# Constant throughout an epoch
|
# Constant throughout an epoch
|
||||||
if epoch notin cache.committee_count_cache:
|
if epoch notin cache.committee_count_cache:
|
||||||
cache.committee_count_cache[epoch] =
|
cache.committee_count_cache[epoch] =
|
||||||
get_committee_count_at_slot(state, slot)
|
get_committee_count_at_slot(state, slot)
|
||||||
|
|
||||||
compute_committee(
|
compute_committee(
|
||||||
cache.active_validator_indices_cache[epoch],
|
cache.active_validator_indices_cache[epoch],
|
||||||
get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
|
get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
|
||||||
(slot mod SLOTS_PER_EPOCH) * cache.committee_count_cache[epoch] +
|
(slot mod SLOTS_PER_EPOCH) * cache.committee_count_cache[epoch] +
|
||||||
index.uint64,
|
index.uint64,
|
||||||
cache.committee_count_cache[epoch] * SLOTS_PER_EPOCH,
|
cache.committee_count_cache[epoch] * SLOTS_PER_EPOCH,
|
||||||
cache
|
cache
|
||||||
)
|
)
|
||||||
|
except KeyError:
|
||||||
|
raiseAssert "values are added to cache before using them"
|
||||||
|
|
||||||
# Not from spec
|
# Not from spec
|
||||||
func get_empty_per_epoch_cache*(): StateCache =
|
func get_empty_per_epoch_cache*(): StateCache =
|
||||||
|
|
|
@ -8,6 +8,10 @@
|
||||||
# SSZ Serialization (simple serialize)
|
# SSZ Serialization (simple serialize)
|
||||||
# See https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md
|
# See https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md
|
||||||
|
|
||||||
|
# TODO Cannot override push, even though the function is annotated
|
||||||
|
# nim-beacon-chain/beacon_chain/ssz.nim(212, 18) Error: can raise an unlisted exception: IOError
|
||||||
|
#{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
stew/shims/macros, options, algorithm, options,
|
stew/shims/macros, options, algorithm, options,
|
||||||
stew/[bitops2, bitseqs, endians2, objects, varints, ptrops, ranges/ptr_arith], stint,
|
stew/[bitops2, bitseqs, endians2, objects, varints, ptrops, ranges/ptr_arith], stint,
|
||||||
|
@ -72,15 +76,15 @@ template sizePrefixed*[TT](x: TT): untyped =
|
||||||
|
|
||||||
proc init*(T: type SszReader,
|
proc init*(T: type SszReader,
|
||||||
stream: InputStream,
|
stream: InputStream,
|
||||||
maxObjectSize = defaultMaxObjectSize): T =
|
maxObjectSize = defaultMaxObjectSize): T {.raises: [Defect].} =
|
||||||
T(stream: stream, maxObjectSize: maxObjectSize)
|
T(stream: stream, maxObjectSize: maxObjectSize)
|
||||||
|
|
||||||
proc mount*(F: type SSZ, stream: InputStream, T: type): T =
|
proc mount*(F: type SSZ, stream: InputStream, T: type): T {.raises: [Defect].} =
|
||||||
mixin readValue
|
mixin readValue
|
||||||
var reader = init(SszReader, stream)
|
var reader = init(SszReader, stream)
|
||||||
reader.readValue(T)
|
reader.readValue(T)
|
||||||
|
|
||||||
method formatMsg*(err: ref SszSizeMismatchError, filename: string): string {.gcsafe.} =
|
method formatMsg*(err: ref SszSizeMismatchError, filename: string): string {.gcsafe, raises: [Defect].} =
|
||||||
# TODO: implement proper error string
|
# TODO: implement proper error string
|
||||||
"Serialisation error while processing " & filename
|
"Serialisation error while processing " & filename
|
||||||
|
|
||||||
|
@ -111,7 +115,7 @@ template toSszType*(x: auto): auto =
|
||||||
elif useListType and x is List: seq[x.T](x)
|
elif useListType and x is List: seq[x.T](x)
|
||||||
else: x
|
else: x
|
||||||
|
|
||||||
proc writeFixedSized(c: var WriteCursor, x: auto) =
|
proc writeFixedSized(c: var WriteCursor, x: auto) {.raises: [Defect, IOError].} =
|
||||||
mixin toSszType
|
mixin toSszType
|
||||||
|
|
||||||
when x is byte:
|
when x is byte:
|
||||||
|
@ -146,7 +150,7 @@ template supports*(_: type SSZ, T: type): bool =
|
||||||
mixin toSszType
|
mixin toSszType
|
||||||
anonConst compiles(fixedPortionSize toSszType(default(T)))
|
anonConst compiles(fixedPortionSize toSszType(default(T)))
|
||||||
|
|
||||||
func init*(T: type SszWriter, stream: OutputStream): T =
|
func init*(T: type SszWriter, stream: OutputStream): T {.raises: [Defect].} =
|
||||||
result.stream = stream
|
result.stream = stream
|
||||||
|
|
||||||
template enumerateSubFields(holder, fieldVar, body: untyped) =
|
template enumerateSubFields(holder, fieldVar, body: untyped) =
|
||||||
|
@ -157,7 +161,7 @@ template enumerateSubFields(holder, fieldVar, body: untyped) =
|
||||||
|
|
||||||
proc writeVarSizeType(w: var SszWriter, value: auto) {.gcsafe.}
|
proc writeVarSizeType(w: var SszWriter, value: auto) {.gcsafe.}
|
||||||
|
|
||||||
proc beginRecord*(w: var SszWriter, TT: type): auto =
|
proc beginRecord*(w: var SszWriter, TT: type): auto {.raises: [Defect].} =
|
||||||
type T = TT
|
type T = TT
|
||||||
when isFixedSize(T):
|
when isFixedSize(T):
|
||||||
FixedSizedWriterCtx()
|
FixedSizedWriterCtx()
|
||||||
|
@ -193,7 +197,7 @@ template endRecord*(w: var SszWriter, ctx: var auto) =
|
||||||
when ctx is VarSizedWriterCtx:
|
when ctx is VarSizedWriterCtx:
|
||||||
finalize ctx.fixedParts
|
finalize ctx.fixedParts
|
||||||
|
|
||||||
proc writeVarSizeType(w: var SszWriter, value: auto) =
|
proc writeVarSizeType(w: var SszWriter, value: auto) {.raises: [Defect, IOError].} =
|
||||||
trs "STARTING VAR SIZE TYPE"
|
trs "STARTING VAR SIZE TYPE"
|
||||||
mixin toSszType
|
mixin toSszType
|
||||||
type T = type toSszType(value)
|
type T = type toSszType(value)
|
||||||
|
@ -230,7 +234,7 @@ proc writeVarSizeType(w: var SszWriter, value: auto) =
|
||||||
writeField w, ctx, astToStr(field), field
|
writeField w, ctx, astToStr(field), field
|
||||||
endRecord w, ctx
|
endRecord w, ctx
|
||||||
|
|
||||||
proc writeValue*(w: var SszWriter, x: auto) {.gcsafe.} =
|
proc writeValue*(w: var SszWriter, x: auto) {.gcsafe, raises: [Defect, IOError].} =
|
||||||
mixin toSszType
|
mixin toSszType
|
||||||
type T = type toSszType(x)
|
type T = type toSszType(x)
|
||||||
|
|
||||||
|
@ -241,7 +245,7 @@ proc writeValue*(w: var SszWriter, x: auto) {.gcsafe.} =
|
||||||
else:
|
else:
|
||||||
unsupported type(x)
|
unsupported type(x)
|
||||||
|
|
||||||
proc writeValue*[T](w: var SszWriter, x: SizePrefixed[T]) =
|
proc writeValue*[T](w: var SszWriter, x: SizePrefixed[T]) {.raises: [Defect, IOError].} =
|
||||||
var cursor = w.stream.delayVarSizeWrite(10)
|
var cursor = w.stream.delayVarSizeWrite(10)
|
||||||
let initPos = w.stream.pos
|
let initPos = w.stream.pos
|
||||||
w.writeValue T(x)
|
w.writeValue T(x)
|
||||||
|
@ -260,7 +264,7 @@ template fromSszBytes*[T; N](_: type TypeWithMaxLen[T, N],
|
||||||
mixin fromSszBytes
|
mixin fromSszBytes
|
||||||
fromSszBytes(T, bytes)
|
fromSszBytes(T, bytes)
|
||||||
|
|
||||||
proc readValue*[T](r: var SszReader, val: var T) =
|
proc readValue*[T](r: var SszReader, val: var T) {.raises: [Defect, MalformedSszError, SszSizeMismatchError].} =
|
||||||
when isFixedSize(T):
|
when isFixedSize(T):
|
||||||
const minimalSize = fixedPortionSize(T)
|
const minimalSize = fixedPortionSize(T)
|
||||||
if r.stream.readable(minimalSize):
|
if r.stream.readable(minimalSize):
|
||||||
|
@ -272,7 +276,7 @@ proc readValue*[T](r: var SszReader, val: var T) =
|
||||||
# the dynamic portion to consume the right number of bytes.
|
# the dynamic portion to consume the right number of bytes.
|
||||||
val = readSszValue(r.stream.read(r.stream.endPos), T)
|
val = readSszValue(r.stream.read(r.stream.endPos), T)
|
||||||
|
|
||||||
proc readValue*[T](r: var SszReader, val: var SizePrefixed[T]) =
|
proc readValue*[T](r: var SszReader, val: var SizePrefixed[T]) {.raises: [Defect].} =
|
||||||
let length = r.stream.readVarint(uint64)
|
let length = r.stream.readVarint(uint64)
|
||||||
if length > r.maxObjectSize:
|
if length > r.maxObjectSize:
|
||||||
raise newException(SszMaxSizeExceeded,
|
raise newException(SszMaxSizeExceeded,
|
||||||
|
@ -415,19 +419,23 @@ func mixInLength(root: Eth2Digest, length: int): Eth2Digest =
|
||||||
|
|
||||||
func merkleizeSerializedChunks(merkleizer: SszChunksMerkleizer,
|
func merkleizeSerializedChunks(merkleizer: SszChunksMerkleizer,
|
||||||
obj: auto): Eth2Digest =
|
obj: auto): Eth2Digest =
|
||||||
|
try:
|
||||||
var hashingStream = newSszHashingStream merkleizer
|
var hashingStream = newSszHashingStream merkleizer
|
||||||
{.noSideEffect.}:
|
{.noSideEffect.}:
|
||||||
# We assume there are no side-effects here, because the
|
# We assume there are no side-effects here, because the
|
||||||
# SszHashingStream is keeping all of its output in memory.
|
# SszHashingStream is keeping all of its output in memory.
|
||||||
hashingStream.writeFixedSized obj
|
hashingStream.writeFixedSized obj
|
||||||
hashingStream.flush
|
hashingStream.flush
|
||||||
merkleizer.getFinalHash
|
merkleizer.getFinalHash
|
||||||
|
except IOError as e:
|
||||||
|
# Hashing shouldn't raise in theory but because of abstraction
|
||||||
|
# tax in the faststreams library, we have to do this at runtime
|
||||||
|
raiseAssert($e.msg)
|
||||||
|
|
||||||
func merkleizeSerializedChunks(obj: auto): Eth2Digest =
|
func merkleizeSerializedChunks(obj: auto): Eth2Digest =
|
||||||
merkleizeSerializedChunks(SszChunksMerkleizer(), obj)
|
merkleizeSerializedChunks(SszChunksMerkleizer(), obj)
|
||||||
|
|
||||||
func hash_tree_root*(x: auto): Eth2Digest {.gcsafe.}
|
func hash_tree_root*(x: auto): Eth2Digest {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
template merkleizeFields(body: untyped): Eth2Digest {.dirty.} =
|
template merkleizeFields(body: untyped): Eth2Digest {.dirty.} =
|
||||||
var merkleizer {.inject.} = SszChunksMerkleizer()
|
var merkleizer {.inject.} = SszChunksMerkleizer()
|
||||||
|
@ -546,7 +554,7 @@ func maxChunksCount(T: type, maxLen: static int64): int64 {.compileTime.} =
|
||||||
else:
|
else:
|
||||||
unsupported T # This should never happen
|
unsupported T # This should never happen
|
||||||
|
|
||||||
func hash_tree_root*(x: auto): Eth2Digest =
|
func hash_tree_root*(x: auto): Eth2Digest {.raises: [Defect].} =
|
||||||
trs "STARTING HASH TREE ROOT FOR TYPE ", name(type(x))
|
trs "STARTING HASH TREE ROOT FOR TYPE ", name(type(x))
|
||||||
mixin toSszType
|
mixin toSszType
|
||||||
when x is SignedBeaconBlock:
|
when x is SignedBeaconBlock:
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
{.pragma: raisesssz, raises: [Defect, MalformedSszError, SszSizeMismatchError].}
|
||||||
|
|
||||||
import
|
import
|
||||||
typetraits, options,
|
typetraits, options,
|
||||||
stew/[bitseqs, endians2, objects, bitseqs], serialization/testing/tracing,
|
stew/[bitseqs, endians2, objects, bitseqs], serialization/testing/tracing,
|
||||||
|
@ -10,19 +13,19 @@ template setOutputSize[R, T](a: var array[R, T], length: int) =
|
||||||
if length != a.len:
|
if length != a.len:
|
||||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||||
|
|
||||||
proc setOutputSize[T](s: var seq[T], length: int) {.inline.} =
|
proc setOutputSize[T](s: var seq[T], length: int) {.inline, raisesssz.} =
|
||||||
if sizeof(T) * length > maxListAllocation:
|
if sizeof(T) * length > maxListAllocation:
|
||||||
raise newException(MalformedSszError, "SSZ list size is too large to fit in memory")
|
raise newException(MalformedSszError, "SSZ list size is too large to fit in memory")
|
||||||
s.setLen length
|
s.setLen length
|
||||||
|
|
||||||
proc setOutputSize(s: var string, length: int) {.inline.} =
|
proc setOutputSize(s: var string, length: int) {.inline, raisesssz.} =
|
||||||
if length > maxListAllocation:
|
if length > maxListAllocation:
|
||||||
raise newException(MalformedSszError, "SSZ string is too large to fit in memory")
|
raise newException(MalformedSszError, "SSZ string is too large to fit in memory")
|
||||||
s.setLen length
|
s.setLen length
|
||||||
|
|
||||||
# fromSszBytes copies the wire representation to a Nim variable,
|
# fromSszBytes copies the wire representation to a Nim variable,
|
||||||
# assuming there's enough data in the buffer
|
# assuming there's enough data in the buffer
|
||||||
func fromSszBytes*(T: type SomeInteger, data: openarray[byte]): T =
|
func fromSszBytes*(T: type SomeInteger, data: openarray[byte]): T {.raisesssz.} =
|
||||||
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
||||||
## All integers are serialized as **little endian**.
|
## All integers are serialized as **little endian**.
|
||||||
if data.len < sizeof(result):
|
if data.len < sizeof(result):
|
||||||
|
@ -30,14 +33,14 @@ func fromSszBytes*(T: type SomeInteger, data: openarray[byte]): T =
|
||||||
|
|
||||||
T.fromBytesLE(data)
|
T.fromBytesLE(data)
|
||||||
|
|
||||||
func fromSszBytes*(T: type bool, data: openarray[byte]): T =
|
func fromSszBytes*(T: type bool, data: openarray[byte]): T {.raisesssz.} =
|
||||||
# TODO: spec doesn't say what to do if the value is >1 - we'll use the C
|
# TODO: spec doesn't say what to do if the value is >1 - we'll use the C
|
||||||
# definition for now, but maybe this should be a parse error instead?
|
# definition for now, but maybe this should be a parse error instead?
|
||||||
if data.len == 0 or data[0] > byte(1):
|
if data.len == 0 or data[0] > byte(1):
|
||||||
raise newException(MalformedSszError, "invalid boolean value")
|
raise newException(MalformedSszError, "invalid boolean value")
|
||||||
data[0] == 1
|
data[0] == 1
|
||||||
|
|
||||||
func fromSszBytes*(T: type Eth2Digest, data: openarray[byte]): T =
|
func fromSszBytes*(T: type Eth2Digest, data: openarray[byte]): T {.raisesssz.} =
|
||||||
if data.len < sizeof(result.data):
|
if data.len < sizeof(result.data):
|
||||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||||
copyMem(result.data.addr, unsafeAddr data[0], sizeof(result.data))
|
copyMem(result.data.addr, unsafeAddr data[0], sizeof(result.data))
|
||||||
|
@ -48,16 +51,16 @@ template fromSszBytes*(T: type Slot, bytes: openarray[byte]): Slot =
|
||||||
template fromSszBytes*(T: type Epoch, bytes: openarray[byte]): Epoch =
|
template fromSszBytes*(T: type Epoch, bytes: openarray[byte]): Epoch =
|
||||||
Epoch fromSszBytes(uint64, bytes)
|
Epoch fromSszBytes(uint64, bytes)
|
||||||
|
|
||||||
template fromSszBytes*(T: type enum, bytes: openarray[byte]): auto =
|
template fromSszBytes*(T: type enum, bytes: openarray[byte]): auto =
|
||||||
T fromSszBytes(uint64, bytes)
|
T fromSszBytes(uint64, bytes)
|
||||||
|
|
||||||
template fromSszBytes*(T: type BitSeq, bytes: openarray[byte]): auto =
|
template fromSszBytes*(T: type BitSeq, bytes: openarray[byte]): auto =
|
||||||
BitSeq @bytes
|
BitSeq @bytes
|
||||||
|
|
||||||
func fromSszBytes*[N](T: type BitList[N], bytes: openarray[byte]): auto =
|
func fromSszBytes*[N](T: type BitList[N], bytes: openarray[byte]): auto {.raisesssz.} =
|
||||||
BitList[N] @bytes
|
BitList[N] @bytes
|
||||||
|
|
||||||
func fromSszBytes*[N](T: type BitArray[N], bytes: openarray[byte]): T =
|
func fromSszBytes*[N](T: type BitArray[N], bytes: openarray[byte]): T {.raisesssz.} =
|
||||||
# A bit vector doesn't have a marker bit, but we'll use the helper from
|
# A bit vector doesn't have a marker bit, but we'll use the helper from
|
||||||
# nim-stew to determine the position of the leading (marker) bit.
|
# nim-stew to determine the position of the leading (marker) bit.
|
||||||
# If it's outside the BitArray size, we have an overflow:
|
# If it's outside the BitArray size, we have an overflow:
|
||||||
|
@ -68,7 +71,7 @@ func fromSszBytes*[N](T: type BitArray[N], bytes: openarray[byte]): T =
|
||||||
proc `[]`[T, U, V](s: openArray[T], x: HSlice[U, V]) {.error:
|
proc `[]`[T, U, V](s: openArray[T], x: HSlice[U, V]) {.error:
|
||||||
"Please don't use openarray's [] as it allocates a result sequence".}
|
"Please don't use openarray's [] as it allocates a result sequence".}
|
||||||
|
|
||||||
func readSszValue*(input: openarray[byte], T: type): T =
|
func readSszValue*(input: openarray[byte], T: type): T {.raisesssz.} =
|
||||||
mixin fromSszBytes, toSszType
|
mixin fromSszBytes, toSszType
|
||||||
|
|
||||||
type T {.used.} = type(result)
|
type T {.used.} = type(result)
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
{.pragma: raisesssz, raises: [Defect, IOError, MalformedSszError, SszSizeMismatchError].}
|
||||||
|
|
||||||
import
|
import
|
||||||
strutils, parseutils,
|
strutils, parseutils,
|
||||||
faststreams/output_stream, json_serialization/writer,
|
faststreams/output_stream, json_serialization/writer,
|
||||||
|
@ -18,8 +21,7 @@ type
|
||||||
fieldType: TypeInfo
|
fieldType: TypeInfo
|
||||||
navigator: proc (m: MemRange): MemRange {. gcsafe
|
navigator: proc (m: MemRange): MemRange {. gcsafe
|
||||||
noSideEffect
|
noSideEffect
|
||||||
raises: [Defect,
|
raisesssz }
|
||||||
MalformedSszError] }
|
|
||||||
TypeInfo = ref object
|
TypeInfo = ref object
|
||||||
case kind: ObjKind
|
case kind: ObjKind
|
||||||
of Record:
|
of Record:
|
||||||
|
@ -28,20 +30,19 @@ type
|
||||||
elemType: TypeInfo
|
elemType: TypeInfo
|
||||||
navigator: proc (m: MemRange, idx: int): MemRange {. gcsafe
|
navigator: proc (m: MemRange, idx: int): MemRange {. gcsafe
|
||||||
noSideEffect
|
noSideEffect
|
||||||
raises: [Defect,
|
raisesssz }
|
||||||
MalformedSszError] }
|
|
||||||
else:
|
else:
|
||||||
discard
|
discard
|
||||||
|
|
||||||
jsonPrinter: proc (m: MemRange,
|
jsonPrinter: proc (m: MemRange,
|
||||||
outStream: OutputStream,
|
outStream: OutputStream,
|
||||||
pretty: bool) {.gcsafe.}
|
pretty: bool) {.gcsafe, raisesssz.}
|
||||||
|
|
||||||
DynamicSszNavigator* = object
|
DynamicSszNavigator* = object
|
||||||
m: MemRange
|
m: MemRange
|
||||||
typ: TypeInfo
|
typ: TypeInfo
|
||||||
|
|
||||||
proc jsonPrinterImpl[T](m: MemRange, outStream: OutputStream, pretty: bool) =
|
proc jsonPrinterImpl[T](m: MemRange, outStream: OutputStream, pretty: bool) {.raisesssz.} =
|
||||||
var typedNavigator = sszMount(m, T)
|
var typedNavigator = sszMount(m, T)
|
||||||
var jsonWriter = init(JsonWriter, outStream, pretty)
|
var jsonWriter = init(JsonWriter, outStream, pretty)
|
||||||
# TODO: it should be possible to serialize the navigator object
|
# TODO: it should be possible to serialize the navigator object
|
||||||
|
@ -55,12 +56,12 @@ func findField(fields: seq[FieldInfo], name: string): FieldInfo =
|
||||||
if field.name == name:
|
if field.name == name:
|
||||||
return field
|
return field
|
||||||
|
|
||||||
func indexableNavigatorImpl[T](m: MemRange, idx: int): MemRange =
|
func indexableNavigatorImpl[T](m: MemRange, idx: int): MemRange {.raisesssz.} =
|
||||||
var typedNavigator = sszMount(m, T)
|
var typedNavigator = sszMount(m, T)
|
||||||
getMemRange(typedNavigator[idx])
|
getMemRange(typedNavigator[idx])
|
||||||
|
|
||||||
func fieldNavigatorImpl[RecordType; FieldType;
|
func fieldNavigatorImpl[RecordType; FieldType;
|
||||||
fieldName: static string](m: MemRange): MemRange {.raises: [MalformedSszError].} =
|
fieldName: static string](m: MemRange): MemRange {.raisesssz.} =
|
||||||
# TODO: Make sure this doesn't fail with a Defect when
|
# TODO: Make sure this doesn't fail with a Defect when
|
||||||
# navigating to an inactive field in a case object.
|
# navigating to an inactive field in a case object.
|
||||||
var typedNavigator = sszMount(m, RecordType)
|
var typedNavigator = sszMount(m, RecordType)
|
||||||
|
@ -97,12 +98,12 @@ func genTypeInfo(T: type): TypeInfo =
|
||||||
|
|
||||||
result.jsonPrinter = jsonPrinterImpl[T]
|
result.jsonPrinter = jsonPrinterImpl[T]
|
||||||
|
|
||||||
func `[]`*(n: DynamicSszNavigator, idx: int): DynamicSszNavigator =
|
func `[]`*(n: DynamicSszNavigator, idx: int): DynamicSszNavigator {.raisesssz.} =
|
||||||
doAssert n.typ.kind == Indexable
|
doAssert n.typ.kind == Indexable
|
||||||
DynamicSszNavigator(m: n.typ.navigator(n.m, idx), typ: n.typ.elemType)
|
DynamicSszNavigator(m: n.typ.navigator(n.m, idx), typ: n.typ.elemType)
|
||||||
|
|
||||||
func navigate*(n: DynamicSszNavigator, path: string): DynamicSszNavigator {.
|
func navigate*(n: DynamicSszNavigator, path: string): DynamicSszNavigator {.
|
||||||
raises: [Defect, ValueError, MalformedSszError] .} =
|
raises: [Defect, KeyError, IOError, MalformedSszError, SszSizeMismatchError, ValueError] .} =
|
||||||
case n.typ.kind
|
case n.typ.kind
|
||||||
of Record:
|
of Record:
|
||||||
let fieldInfo = n.typ.fields.findField(path)
|
let fieldInfo = n.typ.fields.findField(path)
|
||||||
|
@ -129,11 +130,11 @@ template navigatePathImpl(nav, iterabalePathFragments: untyped) =
|
||||||
return
|
return
|
||||||
|
|
||||||
func navigatePath*(n: DynamicSszNavigator, path: string): DynamicSszNavigator {.
|
func navigatePath*(n: DynamicSszNavigator, path: string): DynamicSszNavigator {.
|
||||||
raises: [Defect, ValueError, MalformedSszError] .} =
|
raises: [Defect, IOError, ValueError, MalformedSszError, SszSizeMismatchError] .} =
|
||||||
navigatePathImpl n, split(path, '/')
|
navigatePathImpl n, split(path, '/')
|
||||||
|
|
||||||
func navigatePath*(n: DynamicSszNavigator, path: openarray[string]): DynamicSszNavigator {.
|
func navigatePath*(n: DynamicSszNavigator, path: openarray[string]): DynamicSszNavigator {.
|
||||||
raises: [Defect, ValueError, MalformedSszError] .} =
|
raises: [Defect, IOError, ValueError, MalformedSszError, SszSizeMismatchError] .} =
|
||||||
navigatePathImpl n, path
|
navigatePathImpl n, path
|
||||||
|
|
||||||
func init*(T: type DynamicSszNavigator,
|
func init*(T: type DynamicSszNavigator,
|
||||||
|
@ -141,10 +142,10 @@ func init*(T: type DynamicSszNavigator,
|
||||||
T(m: MemRange(startAddr: unsafeAddr bytes[0], length: bytes.len),
|
T(m: MemRange(startAddr: unsafeAddr bytes[0], length: bytes.len),
|
||||||
typ: typeInfo(Navigated))
|
typ: typeInfo(Navigated))
|
||||||
|
|
||||||
proc writeJson*(n: DynamicSszNavigator, outStream: OutputStream, pretty = true) =
|
proc writeJson*(n: DynamicSszNavigator, outStream: OutputStream, pretty = true) {.raisesssz.} =
|
||||||
n.typ.jsonPrinter(n.m, outStream, pretty)
|
n.typ.jsonPrinter(n.m, outStream, pretty)
|
||||||
|
|
||||||
func toJson*(n: DynamicSszNavigator, pretty = true): string =
|
func toJson*(n: DynamicSszNavigator, pretty = true): string {.raisesssz.} =
|
||||||
var outStream = memoryOutput()
|
var outStream = memoryOutput()
|
||||||
{.noSideEffect.}:
|
{.noSideEffect.}:
|
||||||
# We are assuming that there are no side-effects here
|
# We are assuming that there are no side-effects here
|
||||||
|
@ -154,4 +155,3 @@ func toJson*(n: DynamicSszNavigator, pretty = true): string =
|
||||||
# from a file or a network device.
|
# from a file or a network device.
|
||||||
writeJson(n, outStream, pretty)
|
writeJson(n, outStream, pretty)
|
||||||
outStream.getOutput(string)
|
outStream.getOutput(string)
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
{.pragma: raisesssz, raises: [Defect, MalformedSszError, SszSizeMismatchError].}
|
||||||
|
|
||||||
import
|
import
|
||||||
stew/[ptrops, objects], stew/ranges/ptr_arith,
|
stew/[ptrops, objects], stew/ranges/ptr_arith,
|
||||||
./types, ./bytes_reader
|
./types, ./bytes_reader
|
||||||
|
@ -35,7 +38,7 @@ template toOpenArray(m: MemRange): auto =
|
||||||
|
|
||||||
func navigateToField*[T](n: SszNavigator[T],
|
func navigateToField*[T](n: SszNavigator[T],
|
||||||
fieldName: static string,
|
fieldName: static string,
|
||||||
FieldType: type): SszNavigator[FieldType] =
|
FieldType: type): SszNavigator[FieldType] {.raisesssz.} =
|
||||||
mixin toSszType
|
mixin toSszType
|
||||||
type SszFieldType = type toSszType(default FieldType)
|
type SszFieldType = type toSszType(default FieldType)
|
||||||
|
|
||||||
|
@ -67,7 +70,7 @@ template `.`*[T](n: SszNavigator[T], field: untyped): auto =
|
||||||
type FieldType = type(default(RecType).field)
|
type FieldType = type(default(RecType).field)
|
||||||
navigateToField(n, astToStr(field), FieldType)
|
navigateToField(n, astToStr(field), FieldType)
|
||||||
|
|
||||||
func indexVarSizeList(m: MemRange, idx: int): MemRange =
|
func indexVarSizeList(m: MemRange, idx: int): MemRange {.raisesssz.} =
|
||||||
template readOffset(pos): int =
|
template readOffset(pos): int =
|
||||||
int fromSszBytes(uint32, makeOpenArray(offset(m.startAddr, pos), offsetSize))
|
int fromSszBytes(uint32, makeOpenArray(offset(m.startAddr, pos), offsetSize))
|
||||||
|
|
||||||
|
@ -114,7 +117,7 @@ template `[]`*[T](n: SszNavigator[seq[T]], idx: int): SszNavigator[T] =
|
||||||
template `[]`*[R, T](n: SszNavigator[array[R, T]], idx: int): SszNavigator[T] =
|
template `[]`*[R, T](n: SszNavigator[array[R, T]], idx: int): SszNavigator[T] =
|
||||||
indexList(n, idx, T)
|
indexList(n, idx, T)
|
||||||
|
|
||||||
func `[]`*[T](n: SszNavigator[T]): T =
|
func `[]`*[T](n: SszNavigator[T]): T {.raisesssz.} =
|
||||||
mixin toSszType, fromSszBytes
|
mixin toSszType, fromSszBytes
|
||||||
type SszRepr = type(toSszType default(T))
|
type SszRepr = type(toSszType default(T))
|
||||||
when type(SszRepr) is type(T):
|
when type(SszRepr) is type(T):
|
||||||
|
@ -122,6 +125,6 @@ func `[]`*[T](n: SszNavigator[T]): T =
|
||||||
else:
|
else:
|
||||||
fromSszBytes(T, toOpenArray(n.m))
|
fromSszBytes(T, toOpenArray(n.m))
|
||||||
|
|
||||||
converter derefNavigator*[T](n: SszNavigator[T]): T =
|
converter derefNavigator*[T](n: SszNavigator[T]): T {.raisesssz.} =
|
||||||
n[]
|
n[]
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
tables, options,
|
tables, options,
|
||||||
stew/shims/macros, stew/[objects, bitseqs],
|
stew/shims/macros, stew/[objects, bitseqs],
|
||||||
|
@ -185,9 +187,12 @@ proc fieldInfos*(RecordType: type): seq[tuple[name: string,
|
||||||
fieldOffset = val[]
|
fieldOffset = val[]
|
||||||
val[] += fieldSize
|
val[] += fieldSize
|
||||||
do:
|
do:
|
||||||
let parentBranch = nestedUnder.getOrDefault(fieldCaseDiscriminator, "")
|
try:
|
||||||
fieldOffset = offsetInBranch[parentBranch]
|
let parentBranch = nestedUnder.getOrDefault(fieldCaseDiscriminator, "")
|
||||||
offsetInBranch[branchKey] = fieldOffset + fieldSize
|
fieldOffset = offsetInBranch[parentBranch]
|
||||||
|
offsetInBranch[branchKey] = fieldOffset + fieldSize
|
||||||
|
except KeyError as e:
|
||||||
|
raiseAssert e.msg
|
||||||
|
|
||||||
result.add((fieldName, fieldOffset, fixedSize, branchKey))
|
result.add((fieldName, fieldOffset, fixedSize, branchKey))
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,8 @@
|
||||||
# improvements to be made - other than that, keep things similar to spec for
|
# improvements to be made - other than that, keep things similar to spec for
|
||||||
# now.
|
# now.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
chronicles,
|
chronicles,
|
||||||
./extras, ./ssz, metrics,
|
./extras, ./ssz, metrics,
|
||||||
|
@ -95,11 +97,17 @@ proc process_slots*(state: var BeaconState, slot: Slot) {.nbench.}=
|
||||||
let is_epoch_transition = (state.slot + 1) mod SLOTS_PER_EPOCH == 0
|
let is_epoch_transition = (state.slot + 1) mod SLOTS_PER_EPOCH == 0
|
||||||
if is_epoch_transition:
|
if is_epoch_transition:
|
||||||
# Note: Genesis epoch = 0, no need to test if before Genesis
|
# Note: Genesis epoch = 0, no need to test if before Genesis
|
||||||
beacon_previous_validators.set(get_epoch_validator_count(state))
|
try:
|
||||||
|
beacon_previous_validators.set(get_epoch_validator_count(state))
|
||||||
|
except Exception as e: # TODO https://github.com/status-im/nim-metrics/pull/22
|
||||||
|
trace "Couldn't update metrics", msg = e.msg
|
||||||
process_epoch(state)
|
process_epoch(state)
|
||||||
state.slot += 1
|
state.slot += 1
|
||||||
if is_epoch_transition:
|
if is_epoch_transition:
|
||||||
beacon_current_validators.set(get_epoch_validator_count(state))
|
try:
|
||||||
|
beacon_current_validators.set(get_epoch_validator_count(state))
|
||||||
|
except Exception as e: # TODO https://github.com/status-im/nim-metrics/pull/22
|
||||||
|
trace "Couldn't update metrics", msg = e.msg
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#verify_block_signature
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#verify_block_signature
|
||||||
proc verify_block_signature*(
|
proc verify_block_signature*(
|
||||||
|
@ -234,11 +242,17 @@ proc process_slots*(state: var HashedBeaconState, slot: Slot) =
|
||||||
let is_epoch_transition = (state.data.slot + 1) mod SLOTS_PER_EPOCH == 0
|
let is_epoch_transition = (state.data.slot + 1) mod SLOTS_PER_EPOCH == 0
|
||||||
if is_epoch_transition:
|
if is_epoch_transition:
|
||||||
# Note: Genesis epoch = 0, no need to test if before Genesis
|
# Note: Genesis epoch = 0, no need to test if before Genesis
|
||||||
beacon_previous_validators.set(get_epoch_validator_count(state.data))
|
try:
|
||||||
|
beacon_previous_validators.set(get_epoch_validator_count(state.data))
|
||||||
|
except Exception as e: # TODO https://github.com/status-im/nim-metrics/pull/22
|
||||||
|
trace "Couldn't update metrics", msg = e.msg
|
||||||
process_epoch(state.data)
|
process_epoch(state.data)
|
||||||
state.data.slot += 1
|
state.data.slot += 1
|
||||||
if is_epoch_transition:
|
if is_epoch_transition:
|
||||||
beacon_current_validators.set(get_epoch_validator_count(state.data))
|
try:
|
||||||
|
beacon_current_validators.set(get_epoch_validator_count(state.data))
|
||||||
|
except Exception as e: # TODO https://github.com/status-im/nim-metrics/pull/22
|
||||||
|
trace "Couldn't update metrics", msg = e.msg
|
||||||
state.root = hash_tree_root(state.data)
|
state.root = hash_tree_root(state.data)
|
||||||
|
|
||||||
proc state_transition*(
|
proc state_transition*(
|
||||||
|
|
|
@ -44,7 +44,7 @@ proc generateDeposits*(totalValidators: int,
|
||||||
if randomKeys:
|
if randomKeys:
|
||||||
(pubKey, privKey) = crypto.newKeyPair().tryGet()
|
(pubKey, privKey) = crypto.newKeyPair().tryGet()
|
||||||
else:
|
else:
|
||||||
privKey = makeInteropPrivKey(i)
|
privKey = makeInteropPrivKey(i).tryGet()
|
||||||
pubKey = privKey.toPubKey()
|
pubKey = privKey.toPubKey()
|
||||||
|
|
||||||
let dp = makeDeposit(pubKey, privKey)
|
let dp = makeDeposit(pubKey, privKey)
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
when not defined(nimscript):
|
when not defined(nimscript):
|
||||||
import times
|
import times
|
||||||
let copyrights* = "Copyright (c) 2019-" & $(now().utc.year) & " Status Research & Development GmbH"
|
let copyrights* = "Copyright (c) 2019-" & $(now().utc.year) & " Status Research & Development GmbH"
|
||||||
|
@ -23,4 +25,3 @@ const
|
||||||
|
|
||||||
fullVersionStr* =
|
fullVersionStr* =
|
||||||
versionAsStr & " (" & gitRevision & ")"
|
versionAsStr & " (" & gitRevision & ")"
|
||||||
|
|
||||||
|
|
|
@ -9,12 +9,12 @@ import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/strformat, std/tables, std/options,
|
std/strformat, std/tables, std/options,
|
||||||
# Status libraries
|
# Status libraries
|
||||||
stew/[result, endians2],
|
stew/[results, endians2],
|
||||||
# Internals
|
# Internals
|
||||||
../../beacon_chain/spec/[datatypes, digest],
|
../../beacon_chain/spec/[datatypes, digest],
|
||||||
../../beacon_chain/fork_choice/[fork_choice, fork_choice_types]
|
../../beacon_chain/fork_choice/[fork_choice, fork_choice_types]
|
||||||
|
|
||||||
export result, datatypes, digest, fork_choice, fork_choice_types, tables, options
|
export results, datatypes, digest, fork_choice, fork_choice_types, tables, options
|
||||||
|
|
||||||
func fakeHash*(index: SomeInteger): Eth2Digest =
|
func fakeHash*(index: SomeInteger): Eth2Digest =
|
||||||
## Create fake hashes
|
## Create fake hashes
|
||||||
|
|
|
@ -119,7 +119,7 @@ suiteReport "Interop":
|
||||||
timedTest "Mocked start private key":
|
timedTest "Mocked start private key":
|
||||||
for i, k in privateKeys:
|
for i, k in privateKeys:
|
||||||
let
|
let
|
||||||
key = makeInteropPrivKey(i)
|
key = makeInteropPrivKey(i)[]
|
||||||
v = k.parse(UInt256, 16)
|
v = k.parse(UInt256, 16)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
@ -144,7 +144,7 @@ suiteReport "Interop":
|
||||||
|
|
||||||
for i in 0..<64:
|
for i in 0..<64:
|
||||||
let
|
let
|
||||||
privKey = makeInteropPrivKey(i)
|
privKey = makeInteropPrivKey(i)[]
|
||||||
deposits.add(makeDeposit(privKey.toPubKey(), privKey))
|
deposits.add(makeDeposit(privKey.toPubKey(), privKey))
|
||||||
|
|
||||||
const genesis_time = 1570500000
|
const genesis_time = 1570500000
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
{.used.}
|
||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
import chronos
|
import chronos
|
||||||
import ../beacon_chain/sync_manager
|
import ../beacon_chain/sync_manager
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 114cdccaa087c54abcc4dad9ed3366af247de79e
|
Subproject commit 3e7f422f11754732df673ed280389024e1ed3cb1
|
Loading…
Reference in New Issue