Merge pull request #706 from status-im/devel
Testnet release 29-01-2020
This commit is contained in:
commit
cf14b7ac37
|
@ -1,6 +1,6 @@
|
|||
import
|
||||
options,
|
||||
serialization,
|
||||
options, typetraits,
|
||||
serialization, chronicles,
|
||||
spec/[datatypes, digest, crypto],
|
||||
kvstore, ssz
|
||||
|
||||
|
@ -100,12 +100,17 @@ proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
|||
|
||||
proc get(db: BeaconChainDB, key: auto, T: typedesc): Option[T] =
|
||||
var res: Option[T]
|
||||
discard db.backend.get(key, proc (data: openArray[byte]) =
|
||||
discard db.backend.get(key) do (data: openArray[byte]):
|
||||
try:
|
||||
res = some(SSZ.decode(data, T))
|
||||
except SerializationError:
|
||||
discard
|
||||
)
|
||||
# Please note that this is intentionally a normal assert.
|
||||
# We consider this a hard failure in debug mode, because
|
||||
# it suggests a corrupted database. Release builds "recover"
|
||||
# from the situation by failing to deliver a result from the
|
||||
# database.
|
||||
assert false
|
||||
error "Corrupt database entry", key, `type` = name(T)
|
||||
res
|
||||
|
||||
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Option[SignedBeaconBlock] =
|
||||
|
|
|
@ -436,7 +436,7 @@ func get_indexed_attestation(state: BeaconState, attestation: Attestation,
|
|||
signature: attestation.signature
|
||||
)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#attestations
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#attestations
|
||||
proc check_attestation*(
|
||||
state: BeaconState, attestation: Attestation, flags: UpdateFlags,
|
||||
stateCache: var StateCache): bool =
|
||||
|
@ -453,11 +453,23 @@ proc check_attestation*(
|
|||
trace "process_attestation: beginning",
|
||||
attestation=attestation
|
||||
|
||||
if not (data.index < get_committee_count_at_slot(state, data.slot)):
|
||||
warn("Data index exceeds committee count",
|
||||
data_index = data.index,
|
||||
committee_count = get_committee_count_at_slot(state, data.slot))
|
||||
return
|
||||
|
||||
if not (data.target.epoch == get_previous_epoch(state) or
|
||||
data.target.epoch == get_current_epoch(state)):
|
||||
warn("Target epoch not current or previous epoch")
|
||||
return
|
||||
|
||||
if not (data.target.epoch == compute_epoch_at_slot(data.slot)):
|
||||
warn("Target epoch inconsistent with epoch of data slot",
|
||||
target_epoch = data.target.epoch,
|
||||
data_slot_epoch = compute_epoch_at_slot(data.slot))
|
||||
return
|
||||
|
||||
if not (data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= stateSlot):
|
||||
warn("Attestation too new",
|
||||
attestation_slot = shortLog(data.slot),
|
||||
|
|
|
@ -269,8 +269,17 @@ template fromSszBytes*[T; N](_: type TypeWithMaxLen[T, N],
|
|||
mixin fromSszBytes
|
||||
fromSszBytes(T, bytes)
|
||||
|
||||
proc readValue*(r: var SszReader, val: var auto) =
|
||||
val = readSszValue(r.stream.readBytes(r.stream.endPos), val.type)
|
||||
proc readValue*[T](r: var SszReader, val: var T) =
|
||||
const minimalSize = fixedPortionSize(T)
|
||||
when isFixedSize(T):
|
||||
if r.stream[].ensureBytes(minimalSize):
|
||||
val = readSszValue(r.stream.readBytes(minimalSize), T)
|
||||
else:
|
||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||
else:
|
||||
# TODO Read the fixed portion first and precisely measure the size of
|
||||
# the dynamic portion to consume the right number of bytes.
|
||||
val = readSszValue(r.stream.readBytes(r.stream.endPos), T)
|
||||
|
||||
proc readValue*[T](r: var SszReader, val: var SizePrefixed[T]) =
|
||||
let length = r.stream.readVarint(uint64)
|
||||
|
|
|
@ -3,10 +3,23 @@ import
|
|||
stew/[objects, bitseqs], serialization/testing/tracing,
|
||||
../spec/[digest, datatypes], ./types
|
||||
|
||||
template setLen[R, T](a: var array[R, T], length: int) =
|
||||
const
|
||||
maxListAllocation = 1 * 1024 * 1024 * 1024 # 1 GiB
|
||||
|
||||
template setOutputSize[R, T](a: var array[R, T], length: int) =
|
||||
if length != a.len:
|
||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||
|
||||
proc setOutputSize[T](s: var seq[T], length: int) {.inline.} =
|
||||
if sizeof(T) * length > maxListAllocation:
|
||||
raise newException(MalformedSszError, "SSZ list size is too large to fit in memory")
|
||||
s.setLen length
|
||||
|
||||
proc setOutputSize(s: var string, length: int) {.inline.} =
|
||||
if length > maxListAllocation:
|
||||
raise newException(MalformedSszError, "SSZ string is too large to fit in memory")
|
||||
s.setLen length
|
||||
|
||||
template assignNullValue(loc: untyped, T: type): auto =
|
||||
when T is ref|ptr:
|
||||
loc = nil
|
||||
|
@ -20,8 +33,9 @@ template assignNullValue(loc: untyped, T: type): auto =
|
|||
func fromSszBytes*(T: type SomeInteger, data: openarray[byte]): T =
|
||||
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
||||
## All integers are serialized as **little endian**.
|
||||
## TODO: Assumes data points to a sufficiently large buffer
|
||||
doAssert data.len == sizeof(result)
|
||||
if data.len < sizeof(result):
|
||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||
|
||||
# TODO: any better way to get a suitably aligned buffer in nim???
|
||||
# see also: https://github.com/nim-lang/Nim/issues/9206
|
||||
var tmp: uint64
|
||||
|
@ -37,10 +51,13 @@ func fromSszBytes*(T: type SomeInteger, data: openarray[byte]): T =
|
|||
func fromSszBytes*(T: type bool, data: openarray[byte]): T =
|
||||
# TODO: spec doesn't say what to do if the value is >1 - we'll use the C
|
||||
# definition for now, but maybe this should be a parse error instead?
|
||||
fromSszBytes(uint8, data) != 0
|
||||
if data.len == 0 or data[0] > byte(1):
|
||||
raise newException(MalformedSszError, "invalid boolean value")
|
||||
data[0] == 1
|
||||
|
||||
func fromSszBytes*(T: type Eth2Digest, data: openarray[byte]): T =
|
||||
doAssert data.len == sizeof(result.data)
|
||||
if data.len < sizeof(result.data):
|
||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||
copyMem(result.data.addr, unsafeAddr data[0], sizeof(result.data))
|
||||
|
||||
template fromSszBytes*(T: type Slot, bytes: openarray[byte]): Slot =
|
||||
|
@ -58,10 +75,18 @@ template fromSszBytes*(T: type BitSeq, bytes: openarray[byte]): auto =
|
|||
func fromSszBytes*[N](T: type BitList[N], bytes: openarray[byte]): auto =
|
||||
BitList[N] @bytes
|
||||
|
||||
func fromSszBytes*[N](T: type BitArray[N], bytes: openarray[byte]): T =
|
||||
# A bit vector doesn't have a marker bit, but we'll use the helper from
|
||||
# nim-stew to determine the position of the leading (marker) bit.
|
||||
# If it's outside the BitArray size, we have an overflow:
|
||||
if bitsLen(bytes) > N - 1:
|
||||
raise newException(MalformedSszError, "SSZ bit array overflow")
|
||||
copyMem(addr result.bytes[0], unsafeAddr bytes[0], bytes.len)
|
||||
|
||||
func readSszValue*(input: openarray[byte], T: type): T =
|
||||
mixin fromSszBytes, toSszType
|
||||
|
||||
type T {.used.}= type(result)
|
||||
type T {.used.} = type(result)
|
||||
|
||||
template readOffset(n: int): int {.used.}=
|
||||
int fromSszBytes(uint32, input[n ..< n + offsetSize])
|
||||
|
@ -69,17 +94,20 @@ func readSszValue*(input: openarray[byte], T: type): T =
|
|||
when useListType and result is List:
|
||||
type ElemType = type result[0]
|
||||
result = T readSszValue(input, seq[ElemType])
|
||||
|
||||
elif result is ptr|ref:
|
||||
if input.len > 0:
|
||||
new result
|
||||
result[] = readSszValue(input, type(result[]))
|
||||
|
||||
elif result is Option:
|
||||
if input.len > 0:
|
||||
result = some readSszValue(input, result.T)
|
||||
|
||||
elif result is string|seq|openarray|array:
|
||||
type ElemType = type result[0]
|
||||
when ElemType is byte|char:
|
||||
result.setLen input.len
|
||||
result.setOutputSize input.len
|
||||
if input.len > 0:
|
||||
copyMem(addr result[0], unsafeAddr input[0], input.len)
|
||||
|
||||
|
@ -91,7 +119,7 @@ func readSszValue*(input: openarray[byte], T: type): T =
|
|||
ex.actualSszSize = input.len
|
||||
ex.elementSize = elemSize
|
||||
raise ex
|
||||
result.setLen input.len div elemSize
|
||||
result.setOutputSize input.len div elemSize
|
||||
trs "READING LIST WITH LEN ", result.len
|
||||
for i in 0 ..< result.len:
|
||||
trs "TRYING TO READ LIST ELEM ", i
|
||||
|
@ -104,23 +132,36 @@ func readSszValue*(input: openarray[byte], T: type): T =
|
|||
# This is an empty list.
|
||||
# The default initialization of the return value is fine.
|
||||
return
|
||||
elif input.len < offsetSize:
|
||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||
|
||||
var offset = readOffset 0
|
||||
trs "GOT OFFSET ", offset
|
||||
let resultLen = offset div offsetSize
|
||||
trs "LEN ", resultLen
|
||||
result.setLen resultLen
|
||||
result.setOutputSize resultLen
|
||||
for i in 1 ..< resultLen:
|
||||
let nextOffset = readOffset(i * offsetSize)
|
||||
if nextOffset == offset:
|
||||
assignNullValue result[i - 1], ElemType
|
||||
if nextOffset <= offset:
|
||||
raise newException(MalformedSszError, "SSZ list element offsets are not monotonically increasing")
|
||||
elif nextOffset > input.len:
|
||||
raise newException(MalformedSszError, "SSZ list element offset points past the end of the input")
|
||||
else:
|
||||
result[i - 1] = readSszValue(input[offset ..< nextOffset], ElemType)
|
||||
offset = nextOffset
|
||||
|
||||
result[resultLen - 1] = readSszValue(input[offset ..< input.len], ElemType)
|
||||
|
||||
elif result is SomeInteger|bool|enum|BitArray:
|
||||
trs "READING BASIC TYPE ", type(result).name, " input=", input.len
|
||||
result = fromSszBytes(type(result), input)
|
||||
trs "RESULT WAS ", repr(result)
|
||||
|
||||
elif result is object|tuple:
|
||||
const minimallyExpectedSize = fixedPortionSize(T)
|
||||
if input.len < minimallyExpectedSize:
|
||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||
|
||||
enumInstanceSerializedFields(result, fieldName, field):
|
||||
const boundingOffsets = T.getFieldBoundingOffsets(fieldName)
|
||||
trs "BOUNDING OFFSET FOR FIELD ", fieldName, " = ", boundingOffsets
|
||||
|
@ -139,6 +180,10 @@ func readSszValue*(input: openarray[byte], T: type): T =
|
|||
endOffset = if boundingOffsets[1] == -1: input.len
|
||||
else: readOffset(boundingOffsets[1])
|
||||
trs "VAR FIELD ", startOffset, "-", endOffset
|
||||
if startOffset > endOffset:
|
||||
raise newException(MalformedSszError, "SSZ field offsets are not monotonically increasing")
|
||||
elif endOffset > input.len:
|
||||
raise newException(MalformedSszError, "SSZ field offset points past the end of the input")
|
||||
|
||||
# TODO The extra type escaping here is a work-around for a Nim issue:
|
||||
when type(FieldType) is type(SszType):
|
||||
|
@ -151,10 +196,5 @@ func readSszValue*(input: openarray[byte], T: type): T =
|
|||
trs "READING FOREIGN ", fieldName, ": ", name(SszType)
|
||||
field = fromSszBytes(FieldType, input[startOffset ..< endOffset])
|
||||
|
||||
elif result is SomeInteger|bool|enum:
|
||||
trs "READING BASIC TYPE ", type(result).name, " input=", input.len
|
||||
result = fromSszBytes(type(result), input)
|
||||
trs "RESULT WAS ", repr(result)
|
||||
|
||||
else:
|
||||
unsupported T
|
||||
|
|
|
@ -12,6 +12,8 @@ IMAGE_NAME ?= statusteam/nimbus_beacon_node:$(IMAGE_TAG)
|
|||
CURRENT_BRANCH = $(shell git rev-parse --abbrev-ref HEAD)
|
||||
COMPUTER_SAYS_NO = { echo "I'm sorry, Dave. I'm afraid I can't do that."; exit 1; }
|
||||
|
||||
.PHONY: build push push-last
|
||||
|
||||
build:
|
||||
@ DOCKER_BUILDKIT=1 \
|
||||
docker build \
|
||||
|
|
|
@ -51,7 +51,7 @@ iterator nodes: Node =
|
|||
for i in 0 ..< serverCount:
|
||||
let
|
||||
serverShortName = if i == 0: "master-01" else: &"node-0{i}"
|
||||
server = &"{serverShortName}.do-ams3.nimbus.test.statusim.net"
|
||||
server = &"{serverShortName}.aws-eu-central-1a.nimbus.test.statusim.net"
|
||||
|
||||
for j in 0 ..< instancesCount:
|
||||
yield Node(id: i*instancesCount + j,
|
||||
|
|
|
@ -20,7 +20,13 @@ bool nfuzz_block(uint8_t* input_ptr, size_t input_size,
|
|||
uint8_t* output_ptr, size_t* output_size);
|
||||
bool nfuzz_block_header(uint8_t* input_ptr, size_t input_size,
|
||||
uint8_t* output_ptr, size_t* output_size);
|
||||
bool nfuzz_deposit(uint8_t* input_ptr, size_t input_size,
|
||||
uint8_t* output_ptr, size_t* output_size);
|
||||
bool nfuzz_proposer_slashing(uint8_t* input_ptr, size_t input_size,
|
||||
uint8_t* output_ptr, size_t* output_size);
|
||||
bool nfuzz_shuffle(uint8_t* seed_ptr, uint64_t* output_ptr, size_t output_size);
|
||||
bool nfuzz_voluntary_exit(uint8_t* input_ptr, size_t input_size,
|
||||
uint8_t* output_ptr, size_t* output_size);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -9,16 +9,25 @@ import
|
|||
../beacon_chain/extras
|
||||
|
||||
type
|
||||
AttestationInput = object
|
||||
state: BeaconState
|
||||
attestation: Attestation
|
||||
AttesterSlashingInput = object
|
||||
state: BeaconState
|
||||
attesterSlashing: AttesterSlashing
|
||||
BlockInput = object
|
||||
state: BeaconState
|
||||
beaconBlock: BeaconBlock
|
||||
BlockHeaderInput = BlockInput
|
||||
AttesterSlashingInput = object
|
||||
DepositInput = object
|
||||
state: BeaconState
|
||||
attesterSlashing: AttesterSlashing
|
||||
AttestationInput = object
|
||||
deposit: Deposit
|
||||
ProposerSlashingInput = object
|
||||
state: BeaconState
|
||||
attestation: Attestation
|
||||
proposerSlashing: ProposerSlashing
|
||||
VoluntaryExitInput = object
|
||||
state: BeaconState
|
||||
exit: VoluntaryExit
|
||||
# This and AssertionError are raised to indicate programming bugs
|
||||
# A wrapper to allow exception tracking to identify unexpected exceptions
|
||||
FuzzCrashError = object of Exception
|
||||
|
@ -48,7 +57,6 @@ proc copyState(state: BeaconState, output: ptr byte,
|
|||
copyMem(output, unsafeAddr resultState[0], output_size[])
|
||||
result = true
|
||||
|
||||
|
||||
proc nfuzz_attestation(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
var
|
||||
|
@ -81,7 +89,6 @@ proc nfuzz_attestation(input: openArray[byte], output: ptr byte,
|
|||
if result:
|
||||
result = copyState(data.state, output, output_size)
|
||||
|
||||
|
||||
proc nfuzz_attester_slashing(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
var
|
||||
|
@ -99,6 +106,7 @@ proc nfuzz_attester_slashing(input: openArray[byte], output: ptr byte,
|
|||
)
|
||||
|
||||
try:
|
||||
# TODO flags
|
||||
result = process_attester_slashing(data.state, data.attesterSlashing, cache)
|
||||
except ValueError as e:
|
||||
# TODO remove when status-im/nim-chronicles#60 is resolved
|
||||
|
@ -111,7 +119,6 @@ proc nfuzz_attester_slashing(input: openArray[byte], output: ptr byte,
|
|||
if result:
|
||||
result = copyState(data.state, output, output_size)
|
||||
|
||||
|
||||
proc nfuzz_block(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
var data: BlockInput
|
||||
|
@ -144,7 +151,6 @@ proc nfuzz_block(input: openArray[byte], output: ptr byte,
|
|||
if result:
|
||||
result = copyState(data.state, output, output_size)
|
||||
|
||||
|
||||
proc nfuzz_block_header(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
var
|
||||
|
@ -177,6 +183,64 @@ proc nfuzz_block_header(input: openArray[byte], output: ptr byte,
|
|||
result = copyState(data.state, output, output_size)
|
||||
|
||||
|
||||
proc nfuzz_deposit(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
var
|
||||
data: DepositInput
|
||||
|
||||
try:
|
||||
data = SSZ.decode(input, DepositInput)
|
||||
except MalformedSszError, SszSizeMismatchError:
|
||||
let e = getCurrentException()
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"SSZ deserialisation failed, likely bug in preprocessing.",
|
||||
e,
|
||||
)
|
||||
|
||||
try:
|
||||
result = process_deposit(data.state, data.deposit, {})
|
||||
except IOError, ValueError:
|
||||
let e = getCurrentException()
|
||||
# TODO remove when status-im/nim-chronicles#60 is resolved
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"Unexpected (logging?) error in deposit processing",
|
||||
e,
|
||||
)
|
||||
|
||||
if result:
|
||||
result = copyState(data.state, output, output_size)
|
||||
|
||||
proc nfuzz_proposer_slashing(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
var
|
||||
data: ProposerSlashingInput
|
||||
cache = get_empty_per_epoch_cache()
|
||||
|
||||
try:
|
||||
data = SSZ.decode(input, ProposerSlashingInput)
|
||||
except MalformedSszError, SszSizeMismatchError:
|
||||
let e = getCurrentException()
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"SSZ deserialisation failed, likely bug in preprocessing.",
|
||||
e,
|
||||
)
|
||||
|
||||
try:
|
||||
result = process_proposer_slashing(data.state, data.proposerSlashing, {}, cache)
|
||||
except ValueError as e:
|
||||
# TODO remove when status-im/nim-chronicles#60 is resolved
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"Unexpected (logging?) error in proposer slashing",
|
||||
e,
|
||||
)
|
||||
|
||||
if result:
|
||||
result = copyState(data.state, output, output_size)
|
||||
|
||||
# Note: Could also accept raw input pointer and access list_size + seed here.
|
||||
# However, list_size needs to be known also outside this proc to allocate output.
|
||||
# TODO: rework to copy immediatly in an uint8 openArray, considering we have to
|
||||
|
@ -204,3 +268,31 @@ proc nfuzz_shuffle(input_seed: ptr byte, output: var openArray[uint64]): bool
|
|||
sizeof(ValidatorIndex))
|
||||
|
||||
result = true
|
||||
|
||||
proc nfuzz_voluntary_exit(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
var
|
||||
data: VoluntaryExitInput
|
||||
|
||||
try:
|
||||
data = SSZ.decode(input, VoluntaryExitInput)
|
||||
except MalformedSszError, SszSizeMismatchError:
|
||||
let e = getCurrentException()
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"SSZ deserialisation failed, likely bug in preprocessing.",
|
||||
e,
|
||||
)
|
||||
|
||||
try:
|
||||
result = process_voluntary_exit(data.state, data.exit, {})
|
||||
except ValueError as e:
|
||||
# TODO remove when status-im/nim-chronicles#60 is resolved
|
||||
raise newException(
|
||||
FuzzCrashError,
|
||||
"Unexpected (logging?) error in voluntary exit processing",
|
||||
e,
|
||||
)
|
||||
|
||||
if result:
|
||||
result = copyState(data.state, output, output_size)
|
||||
|
|
|
@ -69,6 +69,8 @@ cli do (testnetName {.argument.}: string):
|
|||
|
||||
let
|
||||
dataDirName = testnetName.replace("/", "_")
|
||||
.replace("(", "_")
|
||||
.replace(")", "_")
|
||||
dataDir = buildDir / "data" / dataDirName
|
||||
validatorsDir = dataDir / "validators"
|
||||
dumpDir = dataDir / "dump"
|
||||
|
|
|
@ -22,7 +22,7 @@ fi
|
|||
echo Execution plan:
|
||||
|
||||
echo "Testnet name : $NETWORK"
|
||||
echo "Bootstrap node hostname : ${BOOTSTRAP_HOST:="master-01.do-ams3.nimbus.test.statusim.net"}"
|
||||
echo "Bootstrap node hostname : ${BOOTSTRAP_HOST:="master-01.aws-eu-central-1a.nimbus.test.statusim.net"}"
|
||||
echo "Bootstrap node ip : ${BOOTSTRAP_IP:="$(dig +short $BOOTSTRAP_HOST)"}"
|
||||
echo "Bootstrap node port : ${BOOTSTRAP_PORT:=9000}"
|
||||
echo "Reset testnet at end : ${PUBLISH_TESTNET_RESETS:="1"}"
|
||||
|
|
|
@ -8,9 +8,9 @@
|
|||
import
|
||||
# Standard library
|
||||
os, unittest, strutils, streams, strformat, strscans,
|
||||
macros,
|
||||
macros, typetraits,
|
||||
# Status libraries
|
||||
stint, stew/bitseqs, ../testutil,
|
||||
faststreams, stint, stew/bitseqs, ../testutil,
|
||||
# Third-party
|
||||
yaml,
|
||||
# Beacon chain internals
|
||||
|
@ -33,6 +33,9 @@ type
|
|||
# Containers have a root (thankfully) and signing_root field
|
||||
signing_root: string
|
||||
|
||||
UnconsumedInput* = object of CatchableError
|
||||
TestSizeError* = object of ValueError
|
||||
|
||||
# Make signing root optional
|
||||
setDefaultValue(SSZHashTreeRoot, signing_root, "")
|
||||
|
||||
|
@ -73,10 +76,26 @@ type
|
|||
# Type specific checks
|
||||
# ------------------------------------------------------------------------
|
||||
|
||||
proc checkBasic(T: typedesc, dir: string, expectedHash: SSZHashTreeRoot) =
|
||||
let deserialized = SSZ.loadFile(dir/"serialized.ssz", T)
|
||||
check:
|
||||
expectedHash.root == "0x" & toLowerASCII($deserialized.hashTreeRoot())
|
||||
proc checkBasic(T: typedesc,
|
||||
dir: string,
|
||||
expectedHash: SSZHashTreeRoot) =
|
||||
var fileContents = readFile(dir/"serialized.ssz")
|
||||
var stream = memoryStream(fileContents)
|
||||
var reader = init(SszReader, stream)
|
||||
|
||||
# We are using heap allocation to avoid stack overflow
|
||||
# issues caused by large objects such as `BeaconState`:
|
||||
var deserialized = new T
|
||||
reader.readValue(deserialized[])
|
||||
|
||||
if not stream[].eof:
|
||||
raise newException(UnconsumedInput, "Remaining bytes in the input")
|
||||
|
||||
let
|
||||
expectedHash = expectedHash.root
|
||||
actualHash = "0x" & toLowerASCII($deserialized.hashTreeRoot())
|
||||
check expectedHash == actualHash
|
||||
|
||||
# TODO check the value
|
||||
|
||||
macro testVector(typeIdent: string, size: int): untyped =
|
||||
|
@ -100,19 +119,13 @@ macro testVector(typeIdent: string, size: int): untyped =
|
|||
ident"array", newLit(s), ident(t)
|
||||
)
|
||||
var testStmt = quote do:
|
||||
# Need heap alloc
|
||||
var deserialized: ref `T`
|
||||
new deserialized
|
||||
deserialized[] = SSZ.loadFile(dir/"serialized.ssz", `T`)
|
||||
check:
|
||||
expectedHash.root == "0x" & toLowerASCII($deserialized.hashTreeRoot())
|
||||
# TODO check the value
|
||||
checkBasic(`T`, dir, expectedHash)
|
||||
sizeDispatch.add nnkElifBranch.newTree(
|
||||
newCall(ident"==", size, newLit(s)),
|
||||
testStmt
|
||||
)
|
||||
sizeDispatch.add nnkElse.newTree quote do:
|
||||
raise newException(ValueError,
|
||||
raise newException(TestSizeError,
|
||||
"Unsupported **size** in type/size combination: array[" &
|
||||
$size & "," & typeIdent & ']')
|
||||
dispatcher.add nnkElifBranch.newTree(
|
||||
|
@ -136,31 +149,25 @@ proc checkVector(sszSubType, dir: string, expectedHash: SSZHashTreeRoot) =
|
|||
doAssert wasMatched
|
||||
testVector(typeIdent, size)
|
||||
|
||||
type BitContainer[N: static int] = BitList[N] or BitArray[N]
|
||||
|
||||
proc testBitContainer(T: typedesc[BitContainer], dir: string, expectedHash: SSZHashTreeRoot) =
|
||||
let deserialized = SSZ.loadFile(dir/"serialized.ssz", T)
|
||||
check:
|
||||
expectedHash.root == "0x" & toLowerASCII($deserialized.hashTreeRoot())
|
||||
# TODO check the value
|
||||
|
||||
proc checkBitVector(sszSubType, dir: string, expectedHash: SSZHashTreeRoot) =
|
||||
var size: int
|
||||
let wasMatched = scanf(sszSubType, "bitvec_$i", size)
|
||||
doAssert wasMatched
|
||||
case size
|
||||
of 1: testBitContainer(BitArray[1], dir, expectedHash)
|
||||
of 2: testBitContainer(BitArray[2], dir, expectedHash)
|
||||
of 3: testBitContainer(BitArray[3], dir, expectedHash)
|
||||
of 4: testBitContainer(BitArray[4], dir, expectedHash)
|
||||
of 5: testBitContainer(BitArray[5], dir, expectedHash)
|
||||
of 8: testBitContainer(BitArray[8], dir, expectedHash)
|
||||
of 16: testBitContainer(BitArray[16], dir, expectedHash)
|
||||
of 31: testBitContainer(BitArray[31], dir, expectedHash)
|
||||
of 512: testBitContainer(BitArray[512], dir, expectedHash)
|
||||
of 513: testBitContainer(BitArray[513], dir, expectedHash)
|
||||
of 1: checkBasic(BitArray[1], dir, expectedHash)
|
||||
of 2: checkBasic(BitArray[2], dir, expectedHash)
|
||||
of 3: checkBasic(BitArray[3], dir, expectedHash)
|
||||
of 4: checkBasic(BitArray[4], dir, expectedHash)
|
||||
of 5: checkBasic(BitArray[5], dir, expectedHash)
|
||||
of 8: checkBasic(BitArray[8], dir, expectedHash)
|
||||
of 9: checkBasic(BitArray[9], dir, expectedHash)
|
||||
of 16: checkBasic(BitArray[16], dir, expectedHash)
|
||||
of 31: checkBasic(BitArray[31], dir, expectedHash)
|
||||
of 32: checkBasic(BitArray[32], dir, expectedHash)
|
||||
of 512: checkBasic(BitArray[512], dir, expectedHash)
|
||||
of 513: checkBasic(BitArray[513], dir, expectedHash)
|
||||
else:
|
||||
raise newException(ValueError, "Unsupported BitVector of size " & $size)
|
||||
raise newException(TestSizeError, "Unsupported BitVector of size " & $size)
|
||||
|
||||
# TODO: serialization of "type BitList[maxLen] = distinct BitSeq is not supported"
|
||||
# https://github.com/status-im/nim-beacon-chain/issues/518
|
||||
|
@ -168,30 +175,31 @@ proc checkBitVector(sszSubType, dir: string, expectedHash: SSZHashTreeRoot) =
|
|||
# var maxLen: int
|
||||
# let wasMatched = scanf(sszSubType, "bitlist_$i", maxLen)
|
||||
# case maxLen
|
||||
# of 1: testBitContainer(BitList[1], dir, expectedHash)
|
||||
# of 2: testBitContainer(BitList[2], dir, expectedHash)
|
||||
# of 3: testBitContainer(BitList[3], dir, expectedHash)
|
||||
# of 4: testBitContainer(BitList[4], dir, expectedHash)
|
||||
# of 5: testBitContainer(BitList[5], dir, expectedHash)
|
||||
# of 8: testBitContainer(BitList[8], dir, expectedHash)
|
||||
# of 16: testBitContainer(BitList[16], dir, expectedHash)
|
||||
# of 31: testBitContainer(BitList[31], dir, expectedHash)
|
||||
# of 512: testBitContainer(BitList[512], dir, expectedHash)
|
||||
# of 513: testBitContainer(BitList[513], dir, expectedHash)
|
||||
# of 1: checkBasic(BitList[1], dir, expectedHash)
|
||||
# of 2: checkBasic(BitList[2], dir, expectedHash)
|
||||
# of 3: checkBasic(BitList[3], dir, expectedHash)
|
||||
# of 4: checkBasic(BitList[4], dir, expectedHash)
|
||||
# of 5: checkBasic(BitList[5], dir, expectedHash)
|
||||
# of 8: checkBasic(BitList[8], dir, expectedHash)
|
||||
# of 16: checkBasic(BitList[16], dir, expectedHash)
|
||||
# of 31: checkBasic(BitList[31], dir, expectedHash)
|
||||
# of 512: checkBasic(BitList[512], dir, expectedHash)
|
||||
# of 513: checkBasic(BitList[513], dir, expectedHash)
|
||||
# else:
|
||||
# raise newException(ValueError, "Unsupported Bitlist of max length " & $maxLen)
|
||||
|
||||
# Test dispatch for valid inputs
|
||||
# ------------------------------------------------------------------------
|
||||
|
||||
proc sszCheck(sszType, sszSubType: string) =
|
||||
let dir = SSZDir/sszType/"valid"/sszSubType
|
||||
proc sszCheck(baseDir, sszType, sszSubType: string) =
|
||||
let dir = baseDir/sszSubType
|
||||
|
||||
# Hash tree root
|
||||
var expectedHash: SSZHashTreeRoot
|
||||
var s = openFileStream(dir/"meta.yaml")
|
||||
yaml.load(s, expectedHash)
|
||||
s.close()
|
||||
if fileExists(dir/"meta.yaml"):
|
||||
var s = openFileStream(dir/"meta.yaml")
|
||||
defer: close(s)
|
||||
yaml.load(s, expectedHash)
|
||||
|
||||
# Deserialization and checks
|
||||
case sszType
|
||||
|
@ -269,8 +277,24 @@ proc runSSZtests() =
|
|||
timedTest &"Testing {sszType:12} inputs - valid" & skipped:
|
||||
let path = SSZDir/sszType/"valid"
|
||||
for pathKind, sszSubType in walkDir(path, relative = true):
|
||||
doAssert pathKind == pcDir
|
||||
sszCheck(sszType, sszSubType)
|
||||
if pathKind != pcDir: continue
|
||||
sszCheck(path, sszType, sszSubType)
|
||||
|
||||
timedTest &"Testing {sszType:12} inputs - invalid" & skipped:
|
||||
let path = SSZDir/sszType/"invalid"
|
||||
for pathKind, sszSubType in walkDir(path, relative = true):
|
||||
if pathKind != pcDir: continue
|
||||
try:
|
||||
sszCheck(path, sszType, sszSubType)
|
||||
except SszError, UnconsumedInput:
|
||||
discard
|
||||
except TestSizeError as err:
|
||||
echo err.msg
|
||||
skip()
|
||||
except:
|
||||
checkpoint getStackTrace(getCurrentException())
|
||||
checkpoint getCurrentExceptionMsg()
|
||||
check false
|
||||
|
||||
# TODO: nim-serialization forces us to use exceptions as control flow
|
||||
# as we always have to check user supplied inputs
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 74700fdcab9387843975650ed326937c4d59df9e
|
||||
Subproject commit e3ced62d4ba06b7ff638f6784e782046b1fadc70
|
|
@ -1 +1 @@
|
|||
Subproject commit f5be0ab63a64c314e74223891a7651e2eaaa8fec
|
||||
Subproject commit 50562b515a771cfc443557ee8e2dceee59207d52
|
Loading…
Reference in New Issue