commit
56b7ae31f9
|
@ -56,5 +56,6 @@ script:
|
|||
# Building Nim-1.0.4 takes up to 10 minutes on Travis - the time limit after which jobs are cancelled for having no output
|
||||
- make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}" V=1 update # to allow a newer Nim version to be detected
|
||||
- make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}"
|
||||
- make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}" NIMFLAGS="-d:NETWORK_TYPE=libp2p"
|
||||
- make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}" DISABLE_TEST_FIXTURES_SCRIPT=1 test
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ def runStages() {
|
|||
"tools": {
|
||||
stage("Tools") {
|
||||
sh "make -j${env.NPROC}"
|
||||
sh "make -j${env.NPROC} NIMFLAGS='-d:NETWORK_TYPE=libp2p'"
|
||||
}
|
||||
},
|
||||
"test suite": {
|
||||
|
|
|
@ -69,6 +69,7 @@ jobs:
|
|||
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} CI_CACHE=NimBinaries update
|
||||
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} fetch-dlls
|
||||
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} P2PD_CACHE=p2pdCache
|
||||
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} P2PD_CACHE=p2pdCache NIMFLAGS="-d:NETWORK_TYPE=libp2p"
|
||||
file build/beacon_node
|
||||
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} DISABLE_TEST_FIXTURES_SCRIPT=1 test
|
||||
displayName: 'build and test'
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
import
|
||||
algorithm, typetraits,
|
||||
stew/varints, stew/shims/[macros, tables], chronos, chronicles,
|
||||
stew/[varints,base58], stew/shims/[macros, tables], chronos, chronicles,
|
||||
faststreams/output_stream, serialization,
|
||||
json_serialization/std/options, eth/p2p/p2p_protocol_dsl,
|
||||
# TODO: create simpler to use libp2p modules that use re-exports
|
||||
libp2p/[switch, multistream, connection,
|
||||
base58, multiaddress, peerinfo, peer,
|
||||
multiaddress, peerinfo, peer,
|
||||
crypto/crypto, protocols/identify, protocols/protocol],
|
||||
libp2p/muxers/mplex/[mplex, types],
|
||||
libp2p/protocols/secure/[secure, secio],
|
||||
|
@ -105,8 +105,8 @@ template openStream(node: Eth2Node, peer: Peer, protocolId: string): untyped =
|
|||
dial(node.switch, peer.info, protocolId)
|
||||
|
||||
proc peer(stream: P2PStream): PeerID =
|
||||
# TODO: Can this be `none`?
|
||||
stream.peerInfo.get.peerId
|
||||
# TODO: Can this be `nil`?
|
||||
stream.peerInfo.peerId
|
||||
#
|
||||
# End of compatibility layer
|
||||
|
||||
|
@ -120,8 +120,8 @@ proc getPeer*(node: Eth2Node, peerInfo: PeerInfo): Peer {.gcsafe.} =
|
|||
node.peers[peerId] = result
|
||||
|
||||
proc peerFromStream(network: Eth2Node, stream: P2PStream): Peer {.gcsafe.} =
|
||||
# TODO: Can this be `none`?
|
||||
return network.getPeer(stream.peerInfo.get)
|
||||
# TODO: Can this be `nil`?
|
||||
return network.getPeer(stream.peerInfo)
|
||||
|
||||
proc disconnect*(peer: Peer, reason: DisconnectionReason, notifyOtherPeer = false) {.async.} =
|
||||
# TODO: How should we notify the other peer?
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import
|
||||
endians, stew/ptrops, stew/ranges/ptr_arith,
|
||||
../beacon_chain/[ssz, state_transition],
|
||||
../beacon_chain/spec/[datatypes, helpers, digest, validator, beaconstate, state_transition_block],
|
||||
# Required for deserialisation of ValidatorSig in Attestation due to
|
||||
../beacon_chain/spec/[datatypes, helpers, digest, validator, beaconstate,
|
||||
state_transition_block],
|
||||
# Required for deserialisation of ValidatorSig in Attestation due to
|
||||
# https://github.com/nim-lang/Nim/issues/11225
|
||||
../beacon_chain/spec/crypto,
|
||||
../beacon_chain/extras
|
||||
|
@ -19,29 +20,28 @@ type
|
|||
state: BeaconState
|
||||
attestation: Attestation
|
||||
# This and AssertionError are raised to indicate programming bugs
|
||||
# Used as a wrapper to allow exception tracking to identify unexpected exceptions
|
||||
# A wrapper to allow exception tracking to identify unexpected exceptions
|
||||
FuzzCrashError = object of Exception
|
||||
|
||||
# TODO: change ptr uint to ptr csize_t when available in newer Nim version.
|
||||
proc copyState(state: BeaconState, output: ptr byte,
|
||||
output_size: ptr uint): bool {.raises:[FuzzCrashError, Defect].} =
|
||||
output_size: ptr uint): bool {.raises: [FuzzCrashError, Defect].} =
|
||||
var resultState: seq[byte]
|
||||
|
||||
try:
|
||||
resultState = SSZ.encode(state)
|
||||
except IOError as e:
|
||||
# TODO is an IOError indicative of a bug? e.g. any state passed to it after processing should be valid and serializable?
|
||||
# How can this raise an IOError (as the writer isn't to a file?)?
|
||||
# Shouldn't occur as the writer isn't a file
|
||||
raise newException(FuzzCrashError, "Unexpected failure to serialize.", e)
|
||||
|
||||
if unlikely(resultState.len.uint > output_size[]):
|
||||
let msg = (
|
||||
"Not enough output buffer provided to nimbus harness. Provided: " &
|
||||
$(output_size[]) &
|
||||
"Required: " &
|
||||
$resultState.len.uint
|
||||
)
|
||||
raise newException(FuzzCrashError, msg)
|
||||
let msg = (
|
||||
"Not enough output buffer provided to nimbus harness. Provided: " &
|
||||
$(output_size[]) &
|
||||
"Required: " &
|
||||
$resultState.len.uint
|
||||
)
|
||||
raise newException(FuzzCrashError, msg)
|
||||
output_size[] = resultState.len.uint
|
||||
# TODO: improvement might be to write directly to buffer with OutputStream
|
||||
# and SszWriter (but then need to ensure length doesn't overflow)
|
||||
|
@ -50,7 +50,7 @@ proc copyState(state: BeaconState, output: ptr byte,
|
|||
|
||||
|
||||
proc nfuzz_attestation(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint): bool {.exportc, raises:[FuzzCrashError, Defect].} =
|
||||
output_size: ptr uint): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
var
|
||||
data: AttestationInput
|
||||
cache = get_empty_per_epoch_cache()
|
||||
|
@ -59,23 +59,31 @@ proc nfuzz_attestation(input: openArray[byte], output: ptr byte,
|
|||
data = SSZ.decode(input, AttestationInput)
|
||||
except MalformedSszError, SszSizeMismatchError:
|
||||
let e = getCurrentException()
|
||||
raise newException(FuzzCrashError, "SSZ deserialisation failed, likely bug in preprocessing.", e)
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"SSZ deserialisation failed, likely bug in preprocessing.",
|
||||
e,
|
||||
)
|
||||
|
||||
try:
|
||||
result = process_attestation(data.state, data.attestation,
|
||||
{skipValidation}, cache)
|
||||
except ValueError:
|
||||
# TODO is a ValueError indicative of correct or incorrect processing code?
|
||||
# If correct (but given invalid input), we should return false
|
||||
# If incorrect, we should allow it to crash
|
||||
result = false
|
||||
except ValueError as e:
|
||||
# These exceptions are expected to be raised by chronicles logging:
|
||||
# See status-im/nim-chronicles#60
|
||||
# TODO remove this when resolved
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"Unexpected (logging?) error in attestation processing",
|
||||
e
|
||||
)
|
||||
|
||||
if result:
|
||||
result = copyState(data.state, output, output_size)
|
||||
|
||||
|
||||
proc nfuzz_attester_slashing(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint): bool {.exportc, raises:[FuzzCrashError, Defect].} =
|
||||
output_size: ptr uint): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
var
|
||||
data: AttesterSlashingInput
|
||||
cache = get_empty_per_epoch_cache()
|
||||
|
@ -84,51 +92,61 @@ proc nfuzz_attester_slashing(input: openArray[byte], output: ptr byte,
|
|||
data = SSZ.decode(input, AttesterSlashingInput)
|
||||
except MalformedSszError, SszSizeMismatchError:
|
||||
let e = getCurrentException()
|
||||
raise newException(FuzzCrashError, "SSZ deserialisation failed, likely bug in preprocessing.", e)
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"SSZ deserialisation failed, likely bug in preprocessing.",
|
||||
e,
|
||||
)
|
||||
|
||||
try:
|
||||
result = process_attester_slashing(data.state, data.attesterSlashing, cache)
|
||||
except ValueError:
|
||||
# TODO is a ValueError indicative of correct or incorrect processing code?
|
||||
# If correct (but given invalid input), we should return false
|
||||
# If incorrect, we should allow it to crash
|
||||
result = false
|
||||
except ValueError as e:
|
||||
# TODO remove when status-im/nim-chronicles#60 is resolved
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"Unexpected (logging?) error in attester slashing",
|
||||
e,
|
||||
)
|
||||
|
||||
if result:
|
||||
result = copyState(data.state, output, output_size)
|
||||
|
||||
|
||||
proc nfuzz_block(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint): bool {.exportc, raises:[FuzzCrashError, Defect].} =
|
||||
output_size: ptr uint): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
var data: BlockInput
|
||||
|
||||
try:
|
||||
data = SSZ.decode(input, BlockInput)
|
||||
except MalformedSszError, SszSizeMismatchError:
|
||||
let e = getCurrentException()
|
||||
raise newException(FuzzCrashError, "SSZ deserialisation failed, likely bug in preprocessing.", e)
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"SSZ deserialisation failed, likely bug in preprocessing.",
|
||||
e,
|
||||
)
|
||||
|
||||
try:
|
||||
result = state_transition(data.state, data.beaconBlock, {})
|
||||
except IOError as e:
|
||||
# TODO why an IOError?
|
||||
raise newException(FuzzCrashError, "Unexpected IOError in state transition", e)
|
||||
except IOError, ValueError:
|
||||
# TODO remove when status-im/nim-chronicles#60 is resolved
|
||||
let e = getCurrentException()
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"Unexpected (logging?) error in state transition",
|
||||
e,
|
||||
)
|
||||
except Exception as e:
|
||||
# TODO why an Exception?
|
||||
# Lots of vendor code looks like it might raise a bare exception type
|
||||
raise newException(FuzzCrashError, "Unexpected IOError in state transition", e)
|
||||
except ValueError:
|
||||
# TODO is a ValueError indicative of correct or incorrect processing code?
|
||||
# If correct (but given invalid input), we should return false
|
||||
# If incorrect, we should allow it to crash
|
||||
result = false
|
||||
raise newException(FuzzCrashError, "Unexpected Exception in state transition", e)
|
||||
|
||||
if result:
|
||||
result = copyState(data.state, output, output_size)
|
||||
|
||||
|
||||
proc nfuzz_block_header(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint): bool {.exportc, raises:[FuzzCrashError, Defect].} =
|
||||
output_size: ptr uint): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
var
|
||||
data: BlockHeaderInput
|
||||
cache = get_empty_per_epoch_cache()
|
||||
|
@ -137,19 +155,23 @@ proc nfuzz_block_header(input: openArray[byte], output: ptr byte,
|
|||
data = SSZ.decode(input, BlockHeaderInput)
|
||||
except MalformedSszError, SszSizeMismatchError:
|
||||
let e = getCurrentException()
|
||||
raise newException(FuzzCrashError, "SSZ deserialisation failed, likely bug in preprocessing.", e)
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"SSZ deserialisation failed, likely bug in preprocessing.",
|
||||
e,
|
||||
)
|
||||
|
||||
try:
|
||||
# TODO disable bls
|
||||
result = process_block_header(data.state, data.beaconBlock, {}, cache)
|
||||
except IOError as e:
|
||||
# TODO why an IOError? - is this expected/should we return false?
|
||||
raise newException(FuzzCrashError, "Unexpected IOError in block header processing", e)
|
||||
except ValueError:
|
||||
# TODO is a ValueError indicative of correct or incorrect processing code?
|
||||
# If correct (but given invalid input), we should return false
|
||||
# If incorrect, we should allow it to crash
|
||||
result = false
|
||||
except IOError, ValueError:
|
||||
let e = getCurrentException()
|
||||
# TODO remove when status-im/nim-chronicles#60 is resolved
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"Unexpected IOError in block header processing",
|
||||
e,
|
||||
)
|
||||
|
||||
if result:
|
||||
result = copyState(data.state, output, output_size)
|
||||
|
@ -160,7 +182,7 @@ proc nfuzz_block_header(input: openArray[byte], output: ptr byte,
|
|||
# TODO: rework to copy immediatly in an uint8 openArray, considering we have to
|
||||
# go over the list anyhow?
|
||||
proc nfuzz_shuffle(input_seed: ptr byte, output: var openArray[uint64]): bool
|
||||
{.exportc, raises:[Defect].} =
|
||||
{.exportc, raises: [Defect].} =
|
||||
var seed: Eth2Digest
|
||||
# Should be OK as max 2 bytes are passed by the framework.
|
||||
let list_size = output.len.uint64
|
||||
|
@ -170,7 +192,10 @@ proc nfuzz_shuffle(input_seed: ptr byte, output: var openArray[uint64]): bool
|
|||
var shuffled_seq: seq[ValidatorIndex]
|
||||
shuffled_seq = get_shuffled_seq(seed, list_size)
|
||||
|
||||
doAssert(list_size == shuffled_seq.len.uint64, "Shuffled list should be of requested size.")
|
||||
doAssert(
|
||||
list_size == shuffled_seq.len.uint64,
|
||||
"Shuffled list should be of requested size."
|
||||
)
|
||||
|
||||
for i in 0..<list_size:
|
||||
# ValidatorIndex is currently wrongly uint32 so we copy this 1 by 1,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
set -e
|
||||
|
||||
cd $(dirname "$0")
|
||||
|
||||
|
@ -32,7 +32,7 @@ echo "Beacon node data dir : ${DATA_DIR:="build/testnet-reset-data/$NETWORK"}
|
|||
echo "Nim build flags : $NETWORK_NIM_FLAGS"
|
||||
|
||||
while true; do
|
||||
read -p "Continue?" yn
|
||||
read -p "Continue? [yn] " yn
|
||||
case $yn in
|
||||
[Yy]* ) break;;
|
||||
[Nn]* ) exit 1;;
|
||||
|
@ -68,6 +68,8 @@ if [ "$ETH1_PRIVATE_KEY" != "" ]; then
|
|||
fi
|
||||
|
||||
cd docker
|
||||
|
||||
echo "Building Docker image..."
|
||||
make build
|
||||
|
||||
$DOCKER_BEACON_NODE makeDeposits \
|
||||
|
@ -111,25 +113,30 @@ if [[ $PUBLISH_TESTNET_RESETS != "0" ]]; then
|
|||
> /tmp/reset-network.sh
|
||||
|
||||
bash /tmp/reset-network.sh
|
||||
rm /tmp/reset-network.sh
|
||||
|
||||
echo Uploading bootstrap node network key
|
||||
BOOTSTRAP_NODE_DOCKER_PATH=/docker/beacon-node-$NETWORK-1/data/BeaconNode/
|
||||
scp "$DATA_DIR_ABS/privkey.protobuf" $BOOTSTRAP_HOST:/tmp/
|
||||
ssh $BOOTSTRAP_HOST "sudo install -o dockremap -g docker /tmp/privkey.protobuf $BOOTSTRAP_NODE_DOCKER_PATH"
|
||||
|
||||
echo Publishing docker image...
|
||||
make push-last
|
||||
|
||||
echo Persisting testnet data to git...
|
||||
pushd "$NETWORK_DIR_ABS"
|
||||
git add $COMMITTED_FILES
|
||||
git commit -m "Reset of Nimbus $NETWORK"
|
||||
git push
|
||||
popd
|
||||
|
||||
../env.sh nim --verbosity:0 manage_testnet_hosts.nims restart_nodes \
|
||||
--network=$NETWORK \
|
||||
> /tmp/restart-nodes.sh
|
||||
|
||||
bash /tmp/restart-nodes.sh
|
||||
fi
|
||||
|
||||
echo "Publishing Docker image..."
|
||||
make push-last
|
||||
|
||||
#echo -e "\nA Watchtower systemd service will pull the new image and start new containers based on it, on each testnet host, in the next 2 minutes."
|
||||
|
||||
../env.sh nim --verbosity:0 manage_testnet_hosts.nims restart_nodes \
|
||||
--network=$NETWORK \
|
||||
> /tmp/restart-nodes.sh
|
||||
|
||||
bash /tmp/restart-nodes.sh
|
||||
rm /tmp/restart-nodes.sh
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 77e866d29a58ad6cfefaf9c8a8ee7159a43bcfe5
|
||||
Subproject commit d42833947a4baddf21da8ac3105e2d5956a6daac
|
Loading…
Reference in New Issue