mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-02-03 02:05:03 +00:00
Introduce BeaconNodeRef and use it in all the right places
This commit is contained in:
parent
fdcbfdff05
commit
4e9fa51ae9
@ -180,7 +180,7 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
|
||||
pool.blockPool, pool.blockPool.tmpState,
|
||||
BlockSlot(blck: blck, slot: attestation.data.slot))
|
||||
|
||||
template state(): BeaconState = pool.blockPool.tmpState.data.data
|
||||
template state(): BeaconState = pool.blockPool.tmpState.data.data[]
|
||||
|
||||
if not validate(state, attestation):
|
||||
notice "Invalid attestation",
|
||||
@ -309,8 +309,8 @@ proc getAttestationsForSlot(pool: AttestationPool, newBlockSlot: Slot):
|
||||
let slotDequeIdx = int(attestationSlot - pool.startingSlot)
|
||||
some(pool.slots[slotDequeIdx])
|
||||
|
||||
proc getAttestationsForBlock*(
|
||||
pool: AttestationPool, state: BeaconState): seq[Attestation] =
|
||||
proc getAttestationsForBlock*(pool: AttestationPool,
|
||||
state: BeaconState): seq[Attestation] =
|
||||
## Retrieve attestations that may be added to a new block at the slot of the
|
||||
## given state
|
||||
logScope: pcs = "retrieve_attestation"
|
||||
@ -456,7 +456,7 @@ proc selectHead*(pool: AttestationPool): BlockRef =
|
||||
justifiedHead = pool.blockPool.latestJustifiedBlock()
|
||||
|
||||
let newHead =
|
||||
lmdGhost(pool, pool.blockPool.justifiedState.data.data, justifiedHead.blck)
|
||||
lmdGhost(pool, pool.blockPool.justifiedState.data.data[], justifiedHead.blck)
|
||||
|
||||
newHead
|
||||
|
||||
@ -529,9 +529,9 @@ proc isValidAttestation*(
|
||||
# as it supports aggregated attestations (which this can't be)
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
if not is_valid_indexed_attestation(
|
||||
pool.blockPool.headState.data.data,
|
||||
pool.blockPool.headState.data.data[],
|
||||
get_indexed_attestation(
|
||||
pool.blockPool.headState.data.data, attestation, cache), {}):
|
||||
pool.blockPool.headState.data.data[], attestation, cache), {}):
|
||||
debug "isValidAttestation: signature verification failed"
|
||||
return false
|
||||
|
||||
|
@ -45,7 +45,7 @@ func subkey[N: static int](kind: DbKeyKind, key: array[N, byte]):
|
||||
result[0] = byte ord(kind)
|
||||
result[1 .. ^1] = key
|
||||
|
||||
func subkey(kind: type BeaconState, key: Eth2Digest): auto =
|
||||
func subkey(kind: type BeaconStateRef, key: Eth2Digest): auto =
|
||||
subkey(kHashToState, key.data)
|
||||
|
||||
func subkey(kind: type SignedBeaconBlock, key: Eth2Digest): auto =
|
||||
@ -86,13 +86,13 @@ proc get(db: BeaconChainDB, key: openArray[byte], T: typedesc): Opt[T] =
|
||||
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: SignedBeaconBlock) =
|
||||
db.put(subkey(type value, key), value)
|
||||
|
||||
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
|
||||
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconStateRef) =
|
||||
# TODO prune old states - this is less easy than it seems as we never know
|
||||
# when or if a particular state will become finalized.
|
||||
|
||||
db.put(subkey(type value, key), value)
|
||||
|
||||
proc putState*(db: BeaconChainDB, value: BeaconState) =
|
||||
proc putState*(db: BeaconChainDB, value: BeaconStateRef) =
|
||||
db.putState(hash_tree_root(value), value)
|
||||
|
||||
proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
|
||||
@ -108,7 +108,7 @@ proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
||||
"working database")
|
||||
|
||||
proc delState*(db: BeaconChainDB, key: Eth2Digest) =
|
||||
db.backend.del(subkey(BeaconState, key)).expect("working database")
|
||||
db.backend.del(subkey(BeaconStateRef, key)).expect("working database")
|
||||
|
||||
proc delStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot) =
|
||||
db.backend.del(subkey(root, slot)).expect("working database")
|
||||
@ -122,8 +122,8 @@ proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
||||
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[SignedBeaconBlock] =
|
||||
db.get(subkey(SignedBeaconBlock, key), SignedBeaconBlock)
|
||||
|
||||
proc getState*(db: BeaconChainDB, key: Eth2Digest): Opt[BeaconState] =
|
||||
db.get(subkey(BeaconState, key), BeaconState)
|
||||
proc getState*(db: BeaconChainDB, key: Eth2Digest): Opt[BeaconStateRef] =
|
||||
db.get(subkey(BeaconStateRef, key), BeaconStateRef)
|
||||
|
||||
proc getStateRoot*(db: BeaconChainDB,
|
||||
root: Eth2Digest,
|
||||
@ -136,13 +136,11 @@ proc getHeadBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
||||
proc getTailBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
||||
db.get(subkey(kTailBlock), Eth2Digest)
|
||||
|
||||
proc containsBlock*(
|
||||
db: BeaconChainDB, key: Eth2Digest): bool =
|
||||
proc containsBlock*(db: BeaconChainDB, key: Eth2Digest): bool =
|
||||
db.backend.contains(subkey(SignedBeaconBlock, key)).expect("working database")
|
||||
|
||||
proc containsState*(
|
||||
db: BeaconChainDB, key: Eth2Digest): bool =
|
||||
db.backend.contains(subkey(BeaconState, key)).expect("working database")
|
||||
proc containsState*(db: BeaconChainDB, key: Eth2Digest): bool =
|
||||
db.backend.contains(subkey(BeaconStateRef, key)).expect("working database")
|
||||
|
||||
iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
|
||||
tuple[root: Eth2Digest, blck: SignedBeaconBlock] =
|
||||
|
@ -86,7 +86,7 @@ proc saveValidatorKey(keyName, key: string, conf: BeaconNodeConf) =
|
||||
writeFile(outputFile, key)
|
||||
info "Imported validator key", file = outputFile
|
||||
|
||||
proc getStateFromSnapshot(conf: BeaconNodeConf): NilableBeaconState =
|
||||
proc getStateFromSnapshot(conf: BeaconNodeConf): NilableBeaconStateRef =
|
||||
var
|
||||
genesisPath = conf.dataDir/genesisFile
|
||||
snapshotContents: TaintedString
|
||||
@ -122,7 +122,7 @@ proc getStateFromSnapshot(conf: BeaconNodeConf): NilableBeaconState =
|
||||
quit 1
|
||||
|
||||
try:
|
||||
result = SSZ.decode(snapshotContents, BeaconState)
|
||||
result = SSZ.decode(snapshotContents, BeaconStateRef)
|
||||
except SerializationError:
|
||||
error "Failed to import genesis file", path = genesisPath
|
||||
quit 1
|
||||
@ -189,7 +189,7 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
|
||||
stateRoot = hash_tree_root(genesisState)
|
||||
quit 1
|
||||
|
||||
let tailBlock = get_initial_beacon_block(genesisState)
|
||||
let tailBlock = get_initial_beacon_block(genesisState[])
|
||||
|
||||
try:
|
||||
BlockPool.preInit(db, genesisState, tailBlock)
|
||||
@ -219,7 +219,7 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
|
||||
nil
|
||||
|
||||
let
|
||||
enrForkId = enrForkIdFromState(blockPool.headState.data.data)
|
||||
enrForkId = enrForkIdFromState(blockPool.headState.data.data[])
|
||||
topicBeaconBlocks = getBeaconBlocksTopic(enrForkId.forkDigest)
|
||||
topicAggregateAndProofs = getAggregateAndProofsTopic(enrForkId.forkDigest)
|
||||
network = await createEth2Node(conf, enrForkId)
|
||||
@ -235,7 +235,7 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
|
||||
blockPool: blockPool,
|
||||
attestationPool: AttestationPool.init(blockPool),
|
||||
mainchainMonitor: mainchainMonitor,
|
||||
beaconClock: BeaconClock.init(blockPool.headState.data.data),
|
||||
beaconClock: BeaconClock.init(blockPool.headState.data.data[]),
|
||||
rpcServer: rpcServer,
|
||||
forkDigest: enrForkId.forkDigest,
|
||||
topicBeaconBlocks: topicBeaconBlocks,
|
||||
@ -281,8 +281,9 @@ template findIt(s: openarray, predicate: untyped): int =
|
||||
break
|
||||
res
|
||||
|
||||
proc addLocalValidator(
|
||||
node: BeaconNode, state: BeaconState, privKey: ValidatorPrivKey) =
|
||||
proc addLocalValidator(node: BeaconNode,
|
||||
state: BeaconState,
|
||||
privKey: ValidatorPrivKey) =
|
||||
let pubKey = privKey.toPubKey()
|
||||
|
||||
let idx = state.validators.findIt(it.pubKey == pubKey)
|
||||
@ -409,15 +410,15 @@ proc proposeBlock(node: BeaconNode,
|
||||
(get_eth1data_stub(state.eth1_deposit_index, slot.compute_epoch_at_slot()),
|
||||
newSeq[Deposit]())
|
||||
else:
|
||||
node.mainchainMonitor.getBlockProposalData(state)
|
||||
node.mainchainMonitor.getBlockProposalData(state[])
|
||||
|
||||
let message = makeBeaconBlock(
|
||||
state,
|
||||
state[],
|
||||
head.root,
|
||||
validator.genRandaoReveal(state.fork, state.genesis_validators_root, slot),
|
||||
eth1data,
|
||||
Eth2Digest(),
|
||||
node.attestationPool.getAttestationsForBlock(state),
|
||||
node.attestationPool.getAttestationsForBlock(state[]),
|
||||
deposits)
|
||||
|
||||
if not message.isSome():
|
||||
@ -458,7 +459,7 @@ proc proposeBlock(node: BeaconNode,
|
||||
SSZ.saveFile(
|
||||
node.config.dumpDir / "state-" & $state.slot & "-" &
|
||||
shortLog(newBlockRef.root) & "-" & shortLog(root()) & ".ssz",
|
||||
state())
|
||||
state)
|
||||
|
||||
node.network.broadcast(node.topicBeaconBlocks, newBlock)
|
||||
|
||||
@ -575,16 +576,16 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
||||
# version here that calculates the committee for a single slot only
|
||||
node.blockPool.withState(node.blockPool.tmpState, attestationHead):
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
let committees_per_slot = get_committee_count_at_slot(state, slot)
|
||||
let committees_per_slot = get_committee_count_at_slot(state[], slot)
|
||||
|
||||
for committee_index in 0'u64..<committees_per_slot:
|
||||
let committee = get_beacon_committee(
|
||||
state, slot, committee_index.CommitteeIndex, cache)
|
||||
state[], slot, committee_index.CommitteeIndex, cache)
|
||||
|
||||
for index_in_committee, validatorIdx in committee:
|
||||
let validator = node.getAttachedValidator(state, validatorIdx)
|
||||
let validator = node.getAttachedValidator(state[], validatorIdx)
|
||||
if validator != nil:
|
||||
let ad = makeAttestationData(state, slot, committee_index, blck.root)
|
||||
let ad = makeAttestationData(state[], slot, committee_index, blck.root)
|
||||
attestations.add((ad, committee.len, index_in_committee, validator))
|
||||
|
||||
for a in attestations:
|
||||
@ -648,21 +649,21 @@ proc broadcastAggregatedAttestations(
|
||||
let bs = BlockSlot(blck: aggregationHead, slot: aggregationSlot)
|
||||
node.blockPool.withState(node.blockPool.tmpState, bs):
|
||||
let
|
||||
committees_per_slot = get_committee_count_at_slot(state, aggregationSlot)
|
||||
committees_per_slot = get_committee_count_at_slot(state[], aggregationSlot)
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
for committee_index in 0'u64..<committees_per_slot:
|
||||
let committee = get_beacon_committee(
|
||||
state, aggregationSlot, committee_index.CommitteeIndex, cache)
|
||||
state[], aggregationSlot, committee_index.CommitteeIndex, cache)
|
||||
|
||||
for index_in_committee, validatorIdx in committee:
|
||||
let validator = node.getAttachedValidator(state, validatorIdx)
|
||||
let validator = node.getAttachedValidator(state[], validatorIdx)
|
||||
if validator != nil:
|
||||
# This is slightly strange/inverted control flow, since really it's
|
||||
# going to happen once per slot, but this is the best way to get at
|
||||
# the validator index and private key pair. TODO verify it only has
|
||||
# one isSome() with test.
|
||||
let aggregateAndProof =
|
||||
aggregate_attestations(node.attestationPool, state,
|
||||
aggregate_attestations(node.attestationPool, state[],
|
||||
committee_index.CommitteeIndex,
|
||||
# TODO https://github.com/status-im/nim-beacon-chain/issues/545
|
||||
# this assumes in-process private keys
|
||||
@ -1192,7 +1193,7 @@ proc start(node: BeaconNode) =
|
||||
bs = BlockSlot(blck: head.blck, slot: head.blck.slot)
|
||||
|
||||
node.blockPool.withState(node.blockPool.tmpState, bs):
|
||||
node.addLocalValidators(state)
|
||||
node.addLocalValidators(state[])
|
||||
|
||||
node.run()
|
||||
|
||||
@ -1281,7 +1282,7 @@ when hasPrompt:
|
||||
# TODO slow linear scan!
|
||||
for idx, b in node.blockPool.headState.data.data.balances:
|
||||
if node.getAttachedValidator(
|
||||
node.blockPool.headState.data.data, ValidatorIndex(idx)) != nil:
|
||||
node.blockPool.headState.data.data[], ValidatorIndex(idx)) != nil:
|
||||
balance += b
|
||||
formatGwei(balance)
|
||||
|
||||
@ -1393,7 +1394,7 @@ programMain:
|
||||
some(config.bootstrapAddress),
|
||||
config.bootstrapPort,
|
||||
config.bootstrapPort,
|
||||
[toFieldPair("eth2", SSZ.encode(enrForkIdFromState initialState)),
|
||||
[toFieldPair("eth2", SSZ.encode(enrForkIdFromState initialState[])),
|
||||
toFieldPair("attnets", SSZ.encode(metadata.attnets))])
|
||||
|
||||
writeFile(bootstrapFile, bootstrapEnr.toURI)
|
||||
|
@ -230,7 +230,7 @@ func emptyStateData*: StateData =
|
||||
data: HashedBeaconState(
|
||||
# Please note that this initialization is needed in order
|
||||
# to allocate memory for the BeaconState:
|
||||
data: BeaconState(),
|
||||
data: BeaconStateRef(),
|
||||
root: default(Eth2Digest)
|
||||
),
|
||||
blck: default(BlockRef))
|
||||
|
@ -33,7 +33,7 @@ template withState*(
|
||||
updateStateData(pool, cache, blockSlot)
|
||||
|
||||
template hashedState(): HashedBeaconState {.inject, used.} = cache.data
|
||||
template state(): BeaconState {.inject, used.} = cache.data.data
|
||||
template state(): BeaconStateRef {.inject, used.} = cache.data.data
|
||||
template blck(): BlockRef {.inject, used.} = cache.blck
|
||||
template root(): Eth2Digest {.inject, used.} = cache.data.root
|
||||
|
||||
@ -409,7 +409,7 @@ proc add*(
|
||||
# Careful, tmpState.data has been updated but not blck - we need to create
|
||||
# the BlockRef first!
|
||||
pool.tmpState.blck = pool.addResolvedBlock(
|
||||
pool.tmpState.data.data, blockRoot, signedBlock, parent)
|
||||
pool.tmpState.data.data[], blockRoot, signedBlock, parent)
|
||||
|
||||
return pool.tmpState.blck
|
||||
|
||||
@ -980,7 +980,7 @@ proc isInitialized*(T: type BlockPool, db: BeaconChainDB): bool =
|
||||
return true
|
||||
|
||||
proc preInit*(
|
||||
T: type BlockPool, db: BeaconChainDB, state: BeaconState,
|
||||
T: type BlockPool, db: BeaconChainDB, state: BeaconStateRef,
|
||||
signedBlock: SignedBeaconBlock) =
|
||||
# write a genesis state, the way the BlockPool expects it to be stored in
|
||||
# database
|
||||
@ -1009,14 +1009,14 @@ proc getProposer*(pool: BlockPool, head: BlockRef, slot: Slot): Option[Validator
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#validator-assignments
|
||||
let proposerIdx = get_beacon_proposer_index(state, cache)
|
||||
let proposerIdx = get_beacon_proposer_index(state[], cache)
|
||||
if proposerIdx.isNone:
|
||||
warn "Missing proposer index",
|
||||
slot=slot,
|
||||
epoch=slot.compute_epoch_at_slot,
|
||||
num_validators=state.validators.len,
|
||||
active_validators=
|
||||
get_active_validator_indices(state, slot.compute_epoch_at_slot),
|
||||
get_active_validator_indices(state[], slot.compute_epoch_at_slot),
|
||||
balances=state.balances
|
||||
return
|
||||
|
||||
@ -1118,7 +1118,7 @@ proc isValidBeaconBlock*(pool: var BlockPool,
|
||||
pool.withState(pool.tmpState, bs):
|
||||
let
|
||||
blockRoot = hash_tree_root(signed_beacon_block.message)
|
||||
domain = get_domain(pool.headState.data.data, DOMAIN_BEACON_PROPOSER,
|
||||
domain = get_domain(pool.headState.data.data[], DOMAIN_BEACON_PROPOSER,
|
||||
compute_epoch_at_slot(signed_beacon_block.message.slot))
|
||||
signing_root = compute_signing_root(blockRoot, domain)
|
||||
proposer_index = signed_beacon_block.message.proposer_index
|
||||
|
@ -41,7 +41,7 @@ type
|
||||
depositContractAddress: Address
|
||||
dataProviderFactory*: DataProviderFactory
|
||||
|
||||
genesisState: NilableBeaconState
|
||||
genesisState: NilableBeaconStateRef
|
||||
genesisStateFut: Future[void]
|
||||
|
||||
eth1Chain: Eth1Chain
|
||||
@ -346,11 +346,11 @@ proc checkForGenesisEvent(m: MainchainMonitor) =
|
||||
let startTime = lastBlock.timestamp.uint64
|
||||
var s = initialize_beacon_state_from_eth1(lastBlock.voteData.block_hash,
|
||||
startTime, m.eth1Chain.allDeposits, {})
|
||||
if is_valid_genesis_state(s):
|
||||
if is_valid_genesis_state(s[]):
|
||||
# https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#create-genesis-state
|
||||
s.genesis_time = startTime
|
||||
|
||||
m.genesisState = clone(s)
|
||||
m.genesisState = s
|
||||
if not m.genesisStateFut.isNil:
|
||||
m.genesisStateFut.complete()
|
||||
m.genesisStateFut = nil
|
||||
@ -432,18 +432,17 @@ proc processDeposits(m: MainchainMonitor, dataProvider: DataProviderRef) {.
|
||||
proc isRunning*(m: MainchainMonitor): bool =
|
||||
not m.runFut.isNil
|
||||
|
||||
proc getGenesis*(m: MainchainMonitor): Future[BeaconState] {.async.} =
|
||||
proc getGenesis*(m: MainchainMonitor): Future[BeaconStateRef] {.async.} =
|
||||
if m.genesisState.isNil:
|
||||
if m.genesisStateFut.isNil:
|
||||
m.genesisStateFut = newFuture[void]("getGenesis")
|
||||
await m.genesisStateFut
|
||||
m.genesisStateFut = nil
|
||||
|
||||
if m.genesisState == nil:
|
||||
doAssert(false)
|
||||
return BeaconState()
|
||||
else:
|
||||
if m.genesisState != nil:
|
||||
return m.genesisState
|
||||
else:
|
||||
raiseAssert "Unreachable code"
|
||||
|
||||
method getBlockByHash*(p: Web3DataProviderRef, hash: BlockHash): Future[BlockObject] =
|
||||
discard
|
||||
|
@ -200,7 +200,7 @@ proc initialize_beacon_state_from_eth1*(
|
||||
eth1_block_hash: Eth2Digest,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: openArray[Deposit],
|
||||
flags: UpdateFlags = {}): BeaconState {.nbench.}=
|
||||
flags: UpdateFlags = {}): BeaconStateRef {.nbench.}=
|
||||
## Get the genesis ``BeaconState``.
|
||||
##
|
||||
## Before the beacon chain starts, validators will register in the Eth1 chain
|
||||
@ -218,7 +218,7 @@ proc initialize_beacon_state_from_eth1*(
|
||||
doAssert deposits.len >= SLOTS_PER_EPOCH
|
||||
|
||||
const SECONDS_PER_DAY = uint64(60*60*24)
|
||||
var state = BeaconState(
|
||||
var state = BeaconStateRef(
|
||||
fork: Fork(
|
||||
previous_version: GENESIS_FORK_VERSION,
|
||||
current_version: GENESIS_FORK_VERSION,
|
||||
@ -246,7 +246,7 @@ proc initialize_beacon_state_from_eth1*(
|
||||
for prefix_root in hash_tree_roots_prefix(
|
||||
leaves, 2'i64^DEPOSIT_CONTRACT_TREE_DEPTH):
|
||||
state.eth1_data.deposit_root = prefix_root
|
||||
discard process_deposit(state, deposits[i], flags)
|
||||
discard process_deposit(state[], deposits[i], flags)
|
||||
i += 1
|
||||
|
||||
# Process activations
|
||||
|
@ -296,8 +296,9 @@ type
|
||||
current_justified_checkpoint*: Checkpoint
|
||||
finalized_checkpoint*: Checkpoint
|
||||
|
||||
BeaconState* = ref BeaconStateObj not nil
|
||||
NilableBeaconState* = ref BeaconStateObj
|
||||
BeaconState* = BeaconStateObj
|
||||
BeaconStateRef* = ref BeaconStateObj not nil
|
||||
NilableBeaconStateRef* = ref BeaconStateObj
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#validator
|
||||
Validator* = object
|
||||
@ -390,7 +391,7 @@ type
|
||||
|
||||
# TODO to be replaced with some magic hash caching
|
||||
HashedBeaconState* = object
|
||||
data*: BeaconState
|
||||
data*: BeaconStateRef
|
||||
root*: Eth2Digest # hash_tree_root(data)
|
||||
|
||||
StateCache* = object
|
||||
@ -570,13 +571,17 @@ template readValue*(reader: var JsonReader, value: var BitList) =
|
||||
template writeValue*(writer: var JsonWriter, value: BitList) =
|
||||
writeValue(writer, BitSeq value)
|
||||
|
||||
func clone*[T](x: ref T): ref T not nil =
|
||||
func newClone*[T](x: T): ref T not nil =
|
||||
new result
|
||||
result[] = x
|
||||
|
||||
func newClone*[T](x: ref T): ref T not nil =
|
||||
new result
|
||||
result[] = x[]
|
||||
|
||||
func clone*(other: HashedBeaconState): HashedBeaconState =
|
||||
HashedBeaconState(
|
||||
data: clone(other.data),
|
||||
data: newClone(other.data),
|
||||
root: other.root)
|
||||
|
||||
template init*(T: type BitList, len: int): auto = T init(BitSeq, len)
|
||||
|
@ -485,8 +485,8 @@ proc makeBeaconBlock*(
|
||||
deposits: deposits)
|
||||
)
|
||||
|
||||
var tmpState = clone(state)
|
||||
let ok = process_block(tmpState, blck, {skipBlsValidation}, cache)
|
||||
var tmpState = newClone(state)
|
||||
let ok = process_block(tmpState[], blck, {skipBlsValidation}, cache)
|
||||
|
||||
if not ok:
|
||||
warn "Unable to apply new block to state", blck = shortLog(blck)
|
||||
|
@ -170,7 +170,7 @@ proc state_transition*(
|
||||
|
||||
## TODO, of cacheState/processEpoch/processSlot/processBloc, only the last
|
||||
## might fail, so should this bother capturing here, or?
|
||||
var old_state = clone(state)
|
||||
var old_state = newClone(state)
|
||||
|
||||
# These should never fail.
|
||||
process_slots(state, signedBlock.message.slot)
|
||||
@ -194,7 +194,7 @@ proc state_transition*(
|
||||
return true
|
||||
|
||||
# Block processing failed, roll back changes
|
||||
state[] = old_state[]
|
||||
state = old_state[]
|
||||
false
|
||||
|
||||
# Hashed-state transition functions
|
||||
@ -238,14 +238,14 @@ proc process_slots*(state: var HashedBeaconState, slot: Slot) =
|
||||
if is_epoch_transition:
|
||||
# Note: Genesis epoch = 0, no need to test if before Genesis
|
||||
try:
|
||||
beacon_previous_validators.set(get_epoch_validator_count(state.data))
|
||||
beacon_previous_validators.set(get_epoch_validator_count(state.data[]))
|
||||
except Exception as e: # TODO https://github.com/status-im/nim-metrics/pull/22
|
||||
trace "Couldn't update metrics", msg = e.msg
|
||||
process_epoch(state.data)
|
||||
process_epoch(state.data[])
|
||||
state.data.slot += 1
|
||||
if is_epoch_transition:
|
||||
try:
|
||||
beacon_current_validators.set(get_epoch_validator_count(state.data))
|
||||
beacon_current_validators.set(get_epoch_validator_count(state.data[]))
|
||||
except Exception as e: # TODO https://github.com/status-im/nim-metrics/pull/22
|
||||
trace "Couldn't update metrics", msg = e.msg
|
||||
state.root = hash_tree_root(state.data)
|
||||
@ -258,18 +258,18 @@ proc state_transition*(
|
||||
process_slots(state, signedBlock.message.slot)
|
||||
|
||||
if skipBLSValidation in flags or
|
||||
verify_block_signature(state.data, signedBlock):
|
||||
verify_block_signature(state.data[], signedBlock):
|
||||
|
||||
var per_epoch_cache = get_empty_per_epoch_cache()
|
||||
if processBlock(state.data, signedBlock.message, flags, per_epoch_cache):
|
||||
if skipStateRootValidation in flags or verifyStateRoot(state.data, signedBlock.message):
|
||||
if processBlock(state.data[], signedBlock.message, flags, per_epoch_cache):
|
||||
if skipStateRootValidation in flags or verifyStateRoot(state.data[], signedBlock.message):
|
||||
# State root is what it should be - we're done!
|
||||
|
||||
# TODO when creating a new block, state_root is not yet set.. comparing
|
||||
# with zero hash here is a bit fragile however, but this whole thing
|
||||
# should go away with proper hash caching
|
||||
state.root =
|
||||
if signedBlock.message.state_root == Eth2Digest(): hash_tree_root(state.data)
|
||||
if signedBlock.message.state_root == Eth2Digest(): hash_tree_root(state.data[])
|
||||
else: signedBlock.message.state_root
|
||||
|
||||
return true
|
||||
|
@ -2,7 +2,7 @@ import
|
||||
os, chronos, json_serialization,
|
||||
spec/[datatypes], beacon_chain_db
|
||||
|
||||
proc obtainTrustedStateSnapshot*(db: BeaconChainDB): Future[BeaconState] {.async.} =
|
||||
proc obtainTrustedStateSnapshot*(db: BeaconChainDB): Future[BeaconStateRef] {.async.} =
|
||||
# In case our latest state is too old, we must obtain a recent snapshot
|
||||
# of the state from a trusted location. This is explained in detail here:
|
||||
# https://notes.ethereum.org/oaQV3IF5R2qlJuW-V1r1ew#Beacon-chain-sync
|
||||
|
@ -140,7 +140,7 @@ proc runFullTransition*(dir, preState, blocksPrefix: string, blocksQty: int, ski
|
||||
let prePath = dir / preState & ".ssz"
|
||||
|
||||
echo "Running: ", prePath
|
||||
var state = parseSSZ(prePath, BeaconState)
|
||||
var state = parseSSZ(prePath, BeaconStateRef)
|
||||
|
||||
for i in 0 ..< blocksQty:
|
||||
let blockPath = dir / blocksPrefix & $i & ".ssz"
|
||||
@ -149,16 +149,16 @@ proc runFullTransition*(dir, preState, blocksPrefix: string, blocksQty: int, ski
|
||||
let signedBlock = parseSSZ(blockPath, SignedBeaconBlock)
|
||||
let flags = if skipBLS: {skipBlsValidation}
|
||||
else: {}
|
||||
let success = state_transition(state, signedBlock.message, flags)
|
||||
let success = state_transition(state[], signedBlock.message, flags)
|
||||
echo "State transition status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
||||
|
||||
proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
|
||||
let prePath = dir / preState & ".ssz"
|
||||
|
||||
echo "Running: ", prePath
|
||||
var state = parseSSZ(prePath, BeaconState)
|
||||
var state = parseSSZ(prePath, BeaconStateRef)
|
||||
|
||||
process_slots(state, state.slot + numSlots)
|
||||
process_slots(state[], state.slot + numSlots)
|
||||
|
||||
template processEpochScenarioImpl(
|
||||
dir, preState: string,
|
||||
@ -167,16 +167,16 @@ template processEpochScenarioImpl(
|
||||
let prePath = dir/preState & ".ssz"
|
||||
|
||||
echo "Running: ", prePath
|
||||
var state = parseSSZ(prePath, BeaconState)
|
||||
var state = parseSSZ(prePath, BeaconStateRef)
|
||||
|
||||
when needCache:
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
|
||||
# Epoch transitions can't fail (TODO is this true?)
|
||||
when needCache:
|
||||
transitionFn(state, cache)
|
||||
transitionFn(state[], cache)
|
||||
else:
|
||||
transitionFn(state)
|
||||
transitionFn(state[])
|
||||
|
||||
echo astToStr(transitionFn) & " status: ", "Done" # if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
||||
|
||||
@ -187,15 +187,13 @@ template genProcessEpochScenario(name, transitionFn: untyped, needCache: static
|
||||
template processBlockScenarioImpl(
|
||||
dir, preState: string, skipBLS: bool,
|
||||
transitionFn, paramName: untyped,
|
||||
ConsensusObject: typedesc,
|
||||
ConsensusObjectRefType: typedesc,
|
||||
needFlags, needCache: static bool): untyped =
|
||||
let prePath = dir/preState & ".ssz"
|
||||
|
||||
echo "Running: ", prePath
|
||||
var state = parseSSZ(prePath, BeaconState)
|
||||
var state = parseSSZ(prePath, BeaconStateRef)
|
||||
|
||||
var consObj: ref `ConsensusObject`
|
||||
new consObj
|
||||
when needCache:
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
when needFlags:
|
||||
@ -204,12 +202,12 @@ template processBlockScenarioImpl(
|
||||
|
||||
let consObjPath = dir/paramName & ".ssz"
|
||||
echo "Processing: ", consObjPath
|
||||
consObj[] = parseSSZ(consObjPath, ConsensusObject)
|
||||
var consObj = parseSSZ(consObjPath, ConsensusObjectRefType)
|
||||
|
||||
when needFlags and needCache:
|
||||
let success = transitionFn(state, consObj[], flags, cache)
|
||||
let success = transitionFn(state[], consObj[], flags, cache)
|
||||
elif needFlags:
|
||||
let success = transitionFn(state, consObj[], flags)
|
||||
let success = transitionFn(state[], consObj[], flags)
|
||||
elif needCache:
|
||||
let success = transitionFn(state, consObj[], flags, cache)
|
||||
else:
|
||||
@ -217,23 +215,74 @@ template processBlockScenarioImpl(
|
||||
|
||||
echo astToStr(transitionFn) & " status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
||||
|
||||
template genProcessBlockScenario(name, transitionFn, paramName: untyped, ConsensusObject: typedesc, needFlags, needCache: static bool): untyped =
|
||||
template genProcessBlockScenario(name, transitionFn,
|
||||
paramName: untyped,
|
||||
ConsensusObjectType: typedesc,
|
||||
needFlags,
|
||||
needCache: static bool): untyped =
|
||||
when needFlags:
|
||||
proc `name`*(dir, preState, `paramName`: string, skipBLS: bool) =
|
||||
processBlockScenarioImpl(dir, preState, skipBLS, transitionFn, paramName, ConsensusObject, needFlags, needCache)
|
||||
processBlockScenarioImpl(dir, preState, skipBLS, transitionFn, paramName, ref ConsensusObjectType, needFlags, needCache)
|
||||
else:
|
||||
proc `name`*(dir, preState, `paramName`: string) =
|
||||
# skipBLS is a dummy to avoid undeclared identifier
|
||||
processBlockScenarioImpl(dir, preState, skipBLS = false, transitionFn, paramName, ConsensusObject, needFlags, needCache)
|
||||
processBlockScenarioImpl(dir, preState, skipBLS = false, transitionFn, paramName, ref ConsensusObjectType, needFlags, needCache)
|
||||
|
||||
genProcessEpochScenario(runProcessJustificationFinalization, process_justification_and_finalization, needCache = true)
|
||||
genProcessEpochScenario(runProcessRegistryUpdates, process_registry_updates, needCache = false)
|
||||
genProcessEpochScenario(runProcessSlashings, process_slashings, needCache = false)
|
||||
genProcessEpochScenario(runProcessFinalUpdates, process_final_updates, needCache = false)
|
||||
genProcessEpochScenario(runProcessJustificationFinalization,
|
||||
process_justification_and_finalization,
|
||||
needCache = true)
|
||||
|
||||
genProcessEpochScenario(runProcessRegistryUpdates,
|
||||
process_registry_updates,
|
||||
needCache = false)
|
||||
|
||||
genProcessEpochScenario(runProcessSlashings,
|
||||
process_slashings,
|
||||
needCache = false)
|
||||
|
||||
genProcessEpochScenario(runProcessFinalUpdates,
|
||||
process_final_updates,
|
||||
needCache = false)
|
||||
|
||||
genProcessBlockScenario(runProcessBlockHeader,
|
||||
process_block_header,
|
||||
block_header,
|
||||
BeaconBlock,
|
||||
needFlags = true,
|
||||
needCache = true)
|
||||
|
||||
genProcessBlockScenario(runProcessProposerSlashing,
|
||||
process_proposer_slashing,
|
||||
proposer_slashing,
|
||||
ProposerSlashing,
|
||||
needFlags = true,
|
||||
needCache = true)
|
||||
|
||||
genProcessBlockScenario(runProcessAttestation,
|
||||
process_attestation,
|
||||
attestation,
|
||||
Attestation,
|
||||
needFlags = true,
|
||||
needCache = true)
|
||||
|
||||
genProcessBlockScenario(runProcessAttesterSlashing,
|
||||
process_attester_slashing,
|
||||
att_slash,
|
||||
AttesterSlashing,
|
||||
needFlags = true,
|
||||
needCache = true)
|
||||
|
||||
genProcessBlockScenario(runProcessDeposit,
|
||||
process_deposit,
|
||||
deposit,
|
||||
Deposit,
|
||||
needFlags = true,
|
||||
needCache = false)
|
||||
|
||||
genProcessBlockScenario(runProcessVoluntaryExits,
|
||||
process_voluntary_exit,
|
||||
deposit,
|
||||
SignedVoluntaryExit,
|
||||
needFlags = true,
|
||||
needCache = false)
|
||||
|
||||
genProcessBlockScenario(runProcessBlockHeader, process_block_header, block_header, BeaconBlock, needFlags = true, needCache = true)
|
||||
genProcessBlockScenario(runProcessProposerSlashing, process_proposer_slashing, proposer_slashing, ProposerSlashing, needFlags = true, needCache = true)
|
||||
genProcessBlockScenario(runProcessAttestation, process_attestation, attestation, Attestation, needFlags = true, needCache = true)
|
||||
genProcessBlockScenario(runProcessAttesterSlashing, process_attester_slashing, att_slash, AttesterSlashing, needFlags = true, needCache = true)
|
||||
genProcessBlockScenario(runProcessDeposit, process_deposit, deposit, Deposit, needFlags = true, needCache = false)
|
||||
genProcessBlockScenario(runProcessVoluntaryExits, process_voluntary_exit, deposit, SignedVoluntaryExit, needFlags = true, needCache = false)
|
||||
|
@ -2,7 +2,7 @@ import
|
||||
confutils, os, strutils, chronicles, json_serialization,
|
||||
nimcrypto/utils,
|
||||
../beacon_chain/spec/[crypto, datatypes, digest],
|
||||
../beacon_chain/[ssz]
|
||||
../beacon_chain/ssz
|
||||
|
||||
# TODO turn into arguments
|
||||
cli do(kind: string, file: string):
|
||||
@ -30,6 +30,6 @@ cli do(kind: string, file: string):
|
||||
of "deposit": printit(Deposit)
|
||||
of "deposit_data": printit(DepositData)
|
||||
of "eth1_data": printit(Eth1Data)
|
||||
of "state": printit(BeaconState)
|
||||
of "state": printit(BeaconStateRef)
|
||||
of "proposer_slashing": printit(ProposerSlashing)
|
||||
of "voluntary_exit": printit(VoluntaryExit)
|
||||
|
@ -29,7 +29,7 @@ cli do(kind: string, file: string):
|
||||
of "deposit": printit(Deposit)
|
||||
of "deposit_data": printit(DepositData)
|
||||
of "eth1_data": printit(Eth1Data)
|
||||
of "state": printit(BeaconState)
|
||||
of "state": printit(BeaconStateRef)
|
||||
of "proposer_slashing": printit(ProposerSlashing)
|
||||
of "voluntary_exit": printit(VoluntaryExit)
|
||||
else: echo "Unknown kind"
|
||||
|
@ -5,7 +5,7 @@ import
|
||||
|
||||
cli do(pre: string, blck: string, post: string, verifyStateRoot = false):
|
||||
let
|
||||
stateX = SSZ.loadFile(pre, BeaconState)
|
||||
stateX = SSZ.loadFile(pre, BeaconStateRef)
|
||||
blckX = SSZ.loadFile(blck, SignedBeaconBlock)
|
||||
flags = if verifyStateRoot: {skipStateRootValidation} else: {}
|
||||
|
||||
|
@ -10,23 +10,23 @@ import
|
||||
|
||||
type
|
||||
AttestationInput = object
|
||||
state: BeaconState
|
||||
state: BeaconStateRef
|
||||
attestation: Attestation
|
||||
AttesterSlashingInput = object
|
||||
state: BeaconState
|
||||
state: BeaconStateRef
|
||||
attesterSlashing: AttesterSlashing
|
||||
BlockInput = object
|
||||
state: BeaconState
|
||||
state: BeaconStateRef
|
||||
beaconBlock: SignedBeaconBlock
|
||||
BlockHeaderInput = BlockInput
|
||||
DepositInput = object
|
||||
state: BeaconState
|
||||
state: BeaconStateRef
|
||||
deposit: Deposit
|
||||
ProposerSlashingInput = object
|
||||
state: BeaconState
|
||||
state: BeaconStateRef
|
||||
proposerSlashing: ProposerSlashing
|
||||
VoluntaryExitInput = object
|
||||
state: BeaconState
|
||||
state: BeaconStateRef
|
||||
exit: SignedVoluntaryExit
|
||||
# This and AssertionError are raised to indicate programming bugs
|
||||
# A wrapper to allow exception tracking to identify unexpected exceptions
|
||||
@ -89,44 +89,44 @@ template decodeAndProcess(typ, process: untyped): bool =
|
||||
raise newException(FuzzCrashError, "Unexpected Exception in state transition", e)
|
||||
|
||||
if processOk:
|
||||
copyState(data.state, output, output_size)
|
||||
copyState(data.state[], output, output_size)
|
||||
else:
|
||||
false
|
||||
|
||||
proc nfuzz_attestation(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
decodeAndProcess(AttestationInput):
|
||||
process_attestation(data.state, data.attestation, flags, cache)
|
||||
process_attestation(data.state[], data.attestation, flags, cache)
|
||||
|
||||
proc nfuzz_attester_slashing(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
decodeAndProcess(AttesterSlashingInput):
|
||||
process_attester_slashing(data.state, data.attesterSlashing, flags, cache)
|
||||
process_attester_slashing(data.state[], data.attesterSlashing, flags, cache)
|
||||
|
||||
proc nfuzz_block(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
decodeAndProcess(BlockInput):
|
||||
state_transition(data.state, data.beaconBlock, flags)
|
||||
state_transition(data.state[], data.beaconBlock, flags)
|
||||
|
||||
proc nfuzz_block_header(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
decodeAndProcess(BlockHeaderInput):
|
||||
process_block_header(data.state, data.beaconBlock.message, flags, cache)
|
||||
process_block_header(data.state[], data.beaconBlock.message, flags, cache)
|
||||
|
||||
proc nfuzz_deposit(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
decodeAndProcess(DepositInput):
|
||||
process_deposit(data.state, data.deposit, flags)
|
||||
process_deposit(data.state[], data.deposit, flags)
|
||||
|
||||
proc nfuzz_proposer_slashing(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
decodeAndProcess(ProposerSlashingInput):
|
||||
process_proposer_slashing(data.state, data.proposerSlashing, flags, cache)
|
||||
process_proposer_slashing(data.state[], data.proposerSlashing, flags, cache)
|
||||
|
||||
proc nfuzz_voluntary_exit(input: openArray[byte], output: ptr byte,
|
||||
output_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError, Defect].} =
|
||||
decodeAndProcess(VoluntaryExitInput):
|
||||
process_voluntary_exit(data.state, data.exit, flags)
|
||||
process_voluntary_exit(data.state[], data.exit, flags)
|
||||
|
||||
# Note: Could also accept raw input pointer and access list_size + seed here.
|
||||
# However, list_size needs to be known also outside this proc to allocate output.
|
||||
|
@ -80,7 +80,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
let
|
||||
genesisState =
|
||||
initialize_beacon_state_from_eth1(Eth2Digest(), 0, deposits, flags)
|
||||
genesisBlock = get_initial_beacon_block(genesisState)
|
||||
genesisBlock = get_initial_beacon_block(genesisState[])
|
||||
|
||||
echo "Starting simulation..."
|
||||
|
||||
@ -116,7 +116,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
|
||||
for i in 0..<slots:
|
||||
maybeWrite(false)
|
||||
verifyConsensus(state, attesterRatio)
|
||||
verifyConsensus(state[], attesterRatio)
|
||||
|
||||
let
|
||||
attestations_idx = state.slot
|
||||
@ -133,7 +133,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
|
||||
withTimer(timers[t]):
|
||||
signedBlock = addTestBlock(
|
||||
state, latest_block_root, attestations = blockAttestations, flags = flags)
|
||||
state[], latest_block_root, attestations = blockAttestations, flags = flags)
|
||||
latest_block_root = withTimerRet(timers[tHashBlock]):
|
||||
hash_tree_root(signedBlock.message)
|
||||
|
||||
@ -145,8 +145,8 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
target_slot = state.slot + MIN_ATTESTATION_INCLUSION_DELAY - 1
|
||||
scass = withTimerRet(timers[tShuffle]):
|
||||
mapIt(
|
||||
0'u64 ..< get_committee_count_at_slot(state, target_slot),
|
||||
get_beacon_committee(state, target_slot, it.CommitteeIndex, cache))
|
||||
0'u64 ..< get_committee_count_at_slot(state[], target_slot),
|
||||
get_beacon_committee(state[], target_slot, it.CommitteeIndex, cache))
|
||||
|
||||
for i, scas in scass:
|
||||
var
|
||||
@ -160,12 +160,12 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
if (rand(r, high(int)).float * attesterRatio).int <= high(int):
|
||||
if first:
|
||||
attestation =
|
||||
makeAttestation(state, latest_block_root, scas, target_slot,
|
||||
makeAttestation(state[], latest_block_root, scas, target_slot,
|
||||
i.uint64, v, cache, flags)
|
||||
first = false
|
||||
else:
|
||||
attestation.combine(
|
||||
makeAttestation(state, latest_block_root, scas, target_slot,
|
||||
makeAttestation(state[], latest_block_root, scas, target_slot,
|
||||
i.uint64, v, cache, flags),
|
||||
flags)
|
||||
|
||||
|
@ -17,7 +17,7 @@ import
|
||||
./mock_deposits
|
||||
|
||||
|
||||
proc initGenesisState*(num_validators: uint64, genesis_time: uint64 = 0): BeaconState =
|
||||
proc initGenesisState*(num_validators: uint64, genesis_time: uint64 = 0): BeaconStateRef =
|
||||
let deposits = mockGenesisBalancedDeposits(
|
||||
validatorCount = num_validators,
|
||||
amountInEth = 32, # We create canonical validators with 32 Eth
|
||||
|
@ -40,16 +40,16 @@ proc runTest(identifier: string) =
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
|
||||
let attestation = parseTest(testDir/"attestation.ssz", SSZ, Attestation)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconState)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconStateRef)
|
||||
|
||||
if existsFile(testDir/"post.ssz"):
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconState)
|
||||
let done = process_attestation(preState, attestation, {}, cache)
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconStateRef)
|
||||
let done = process_attestation(preState[], attestation, {}, cache)
|
||||
doAssert done, "Valid attestation not processed"
|
||||
check: preState.hash_tree_root() == postState.hash_tree_root()
|
||||
reportDiff(preState, postState)
|
||||
else:
|
||||
let done = process_attestation(preState, attestation, {}, cache)
|
||||
let done = process_attestation(preState[], attestation, {}, cache)
|
||||
doAssert done == false, "We didn't expect this invalid attestation to be processed."
|
||||
|
||||
`testImpl _ operations_attestations _ identifier`()
|
||||
|
@ -40,17 +40,17 @@ proc runTest(identifier: string) =
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
|
||||
let attesterSlashing = parseTest(testDir/"attester_slashing.ssz", SSZ, AttesterSlashing)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconState)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconStateRef)
|
||||
|
||||
if existsFile(testDir/"post.ssz"):
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconState)
|
||||
let done = process_attester_slashing(preState, attesterSlashing,
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconStateRef)
|
||||
let done = process_attester_slashing(preState[], attesterSlashing,
|
||||
{}, cache)
|
||||
doAssert done, "Valid attestater slashing not processed"
|
||||
check: preState.hash_tree_root() == postState.hash_tree_root()
|
||||
reportDiff(preState, postState)
|
||||
else:
|
||||
let done = process_attester_slashing(preState, attesterSlashing,
|
||||
let done = process_attester_slashing(preState[], attesterSlashing,
|
||||
{}, cache)
|
||||
doAssert done == false, "We didn't expect this invalid attester slashing to be processed."
|
||||
|
||||
|
@ -40,16 +40,16 @@ proc runTest(identifier: string) =
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
|
||||
let blck = parseTest(testDir/"block.ssz", SSZ, BeaconBlock)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconState)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconStateRef)
|
||||
|
||||
if existsFile(testDir/"post.ssz"):
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconState)
|
||||
let done = process_block_header(preState, blck, {}, cache)
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconStateRef)
|
||||
let done = process_block_header(preState[], blck, {}, cache)
|
||||
doAssert done, "Valid block header not processed"
|
||||
check: preState.hash_tree_root() == postState.hash_tree_root()
|
||||
reportDiff(preState, postState)
|
||||
else:
|
||||
let done = process_block_header(preState, blck, {}, cache)
|
||||
let done = process_block_header(preState[], blck, {}, cache)
|
||||
doAssert done == false, "We didn't expect this invalid block header to be processed."
|
||||
|
||||
`testImpl _ blockheader _ identifier`()
|
||||
|
@ -41,14 +41,14 @@ proc runTest(identifier: string) =
|
||||
|
||||
timedTest prefix & " " & identifier:
|
||||
let deposit = parseTest(testDir/"deposit.ssz", SSZ, Deposit)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconState)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconStateRef)
|
||||
|
||||
if existsFile(testDir/"post.ssz"):
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconState)
|
||||
discard process_deposit(preState, deposit, flags)
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconStateRef)
|
||||
discard process_deposit(preState[], deposit, flags)
|
||||
reportDiff(preState, postState)
|
||||
else:
|
||||
check not process_deposit(preState, deposit, flags)
|
||||
check not process_deposit(preState[], deposit, flags)
|
||||
|
||||
`testImpl _ operations_deposits _ identifier`()
|
||||
|
||||
|
@ -38,18 +38,18 @@ proc runTest(identifier: string) =
|
||||
|
||||
timedTest prefix & astToStr(identifier):
|
||||
let proposerSlashing = parseTest(testDir/"proposer_slashing.ssz", SSZ, ProposerSlashing)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconState)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconStateRef)
|
||||
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
|
||||
if existsFile(testDir/"post.ssz"):
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconState)
|
||||
let done = process_proposer_slashing(preState, proposerSlashing, {}, cache)
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconStateRef)
|
||||
let done = process_proposer_slashing(preState[], proposerSlashing, {}, cache)
|
||||
doAssert done, "Valid proposer slashing not processed"
|
||||
check: preState.hash_tree_root() == postState.hash_tree_root()
|
||||
reportDiff(preState, postState)
|
||||
else:
|
||||
let done = process_proposer_slashing(preState, proposerSlashing, {}, cache)
|
||||
let done = process_proposer_slashing(preState[], proposerSlashing, {}, cache)
|
||||
doAssert done == false, "We didn't expect this invalid proposer slashing to be processed."
|
||||
|
||||
`testImpl_proposer_slashing _ identifier`()
|
||||
|
@ -38,16 +38,16 @@ proc runTest(identifier: string) =
|
||||
|
||||
timedTest prefix & identifier:
|
||||
let voluntaryExit = parseTest(testDir/"voluntary_exit.ssz", SSZ, SignedVoluntaryExit)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconState)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconStateRef)
|
||||
|
||||
if existsFile(testDir/"post.ssz"):
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconState)
|
||||
let done = process_voluntary_exit(preState, voluntaryExit, {})
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconStateRef)
|
||||
let done = process_voluntary_exit(preState[], voluntaryExit, {})
|
||||
doAssert done, "Valid voluntary exit not processed"
|
||||
check: preState.hash_tree_root() == postState.hash_tree_root()
|
||||
reportDiff(preState, postState)
|
||||
else:
|
||||
let done = process_voluntary_exit(preState, voluntaryExit, {})
|
||||
let done = process_voluntary_exit(preState[], voluntaryExit, {})
|
||||
doAssert done == false, "We didn't expect this invalid voluntary exit to be processed."
|
||||
|
||||
`testImpl _ voluntary_exit _ identifier`()
|
||||
|
@ -34,7 +34,7 @@ proc runTest(identifier: string) =
|
||||
"[Invalid] "
|
||||
|
||||
timedTest prefix & identifier:
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconState)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconStateRef)
|
||||
var hasPostState = existsFile(testDir/"post.ssz")
|
||||
|
||||
# In test cases with more than 10 blocks the first 10 aren't 0-prefixed,
|
||||
@ -44,15 +44,15 @@ proc runTest(identifier: string) =
|
||||
|
||||
if hasPostState:
|
||||
# TODO: The EF is using invalid BLS keys so we can't verify them
|
||||
let success = state_transition(preState, blck, flags = {skipBlsValidation})
|
||||
let success = state_transition(preState[], blck, flags = {skipBlsValidation})
|
||||
doAssert success, "Failure when applying block " & $i
|
||||
else:
|
||||
let success = state_transition(preState, blck, flags = {})
|
||||
let success = state_transition(preState[], blck, flags = {})
|
||||
doAssert not success, "We didn't expect this invalid block to be processed"
|
||||
|
||||
# check: preState.hash_tree_root() == postState.hash_tree_root()
|
||||
if hasPostState:
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconState)
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconStateRef)
|
||||
when false:
|
||||
reportDiff(preState, postState)
|
||||
doAssert preState.hash_tree_root() == postState.hash_tree_root()
|
||||
|
@ -31,10 +31,10 @@ proc runTest(identifier: string) =
|
||||
|
||||
proc `testImpl _ slots _ identifier`() =
|
||||
timedTest "Slots - " & identifier:
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconState)
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconState)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconStateRef)
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconStateRef)
|
||||
|
||||
process_slots(preState, preState.slot + num_slots)
|
||||
process_slots(preState[], preState.slot + num_slots)
|
||||
|
||||
# check: preState.hash_tree_root() == postState.hash_tree_root()
|
||||
reportDiff(preState, postState)
|
||||
|
@ -43,9 +43,7 @@ setDefaultValue(SSZHashTreeRoot, signing_root, "")
|
||||
|
||||
proc checkSSZ(T: typedesc, dir: string, expectedHash: SSZHashTreeRoot) =
|
||||
# Deserialize into a ref object to not fill Nim stack
|
||||
var deserialized: ref T
|
||||
new deserialized
|
||||
deserialized[] = SSZ.loadFile(dir/"serialized.ssz", T)
|
||||
var deserialized = SSZ.loadFile(dir/"serialized.ssz", ref T)
|
||||
|
||||
check: expectedHash.root == "0x" & toLowerASCII($deserialized.hashTreeRoot())
|
||||
|
||||
|
@ -38,14 +38,14 @@ template runSuite(suiteDir, testName: string, transitionProc: untyped{ident}, us
|
||||
|
||||
let unitTestName = testDir.rsplit(DirSep, 1)[1]
|
||||
timedTest testName & " - " & unitTestName & preset():
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconState)
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconState)
|
||||
var preState = parseTest(testDir/"pre.ssz", SSZ, BeaconStateRef)
|
||||
let postState = parseTest(testDir/"post.ssz", SSZ, BeaconStateRef)
|
||||
|
||||
when useCache:
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
transitionProc(preState, cache)
|
||||
transitionProc(preState[], cache)
|
||||
else:
|
||||
transitionProc(preState)
|
||||
transitionProc(preState[])
|
||||
|
||||
reportDiff(preState, postState)
|
||||
|
||||
|
@ -33,7 +33,7 @@ suiteReport "[Unit - Spec - Block processing] Attestations " & preset():
|
||||
# The attestation to process must be named "attestation" in the calling context
|
||||
|
||||
timedTest name:
|
||||
var state {.inject.} = clone(genesisState)
|
||||
var state {.inject.} = newClone(genesisState)
|
||||
|
||||
# Attestation setup body
|
||||
# ----------------------------------------
|
||||
@ -49,24 +49,24 @@ suiteReport "[Unit - Spec - Block processing] Attestations " & preset():
|
||||
# ----------------------------------------
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
check process_attestation(
|
||||
state, attestation, flags = {}, cache
|
||||
state[], attestation, flags = {}, cache
|
||||
)
|
||||
|
||||
# Check that the attestation was processed
|
||||
if attestation.data.target.epoch == state.get_current_epoch():
|
||||
if attestation.data.target.epoch == get_current_epoch(state[]):
|
||||
check(state.current_epoch_attestations.len == current_epoch_count + 1)
|
||||
else:
|
||||
check(state.previous_epoch_attestations.len == previous_epoch_count + 1)
|
||||
|
||||
valid_attestation("Valid attestation"):
|
||||
let attestation = mockAttestation(state)
|
||||
let attestation = mockAttestation(state[])
|
||||
state.slot += MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
valid_attestation("Valid attestation from previous epoch"):
|
||||
let attestation = mockAttestation(state)
|
||||
let attestation = mockAttestation(state[])
|
||||
state.slot = Slot(SLOTS_PER_EPOCH - 1)
|
||||
nextEpoch(state)
|
||||
applyEmptyBlock(state)
|
||||
nextEpoch(state[])
|
||||
applyEmptyBlock(state[])
|
||||
|
||||
# TODO check if this should be replaced
|
||||
when false:
|
||||
@ -76,14 +76,14 @@ suiteReport "[Unit - Spec - Block processing] Attestations " & preset():
|
||||
else:
|
||||
valid_attestation("Valid attestation since max epochs per crosslinks"):
|
||||
for _ in 0 ..< MAX_EPOCHS_PER_CROSSLINK + 2:
|
||||
nextEpoch(state)
|
||||
applyEmptyBlock(state)
|
||||
nextEpoch(state[])
|
||||
applyEmptyBlock(state[])
|
||||
|
||||
let attestation = mockAttestation(state)
|
||||
let attestation = mockAttestation(state[])
|
||||
check: attestation.data.crosslink.end_epoch - attestation.data.crosslink.start_epoch == MAX_EPOCHS_PER_CROSSLINK
|
||||
|
||||
for _ in 0 ..< MIN_ATTESTATION_INCLUSION_DELAY:
|
||||
nextSlot(state)
|
||||
nextSlot(state[])
|
||||
|
||||
# TODO: regression BLS V0.10.1
|
||||
echo "[Skipping] \"Empty aggregation bit\""
|
||||
|
@ -33,13 +33,13 @@ suiteReport "[Unit - Spec - Block processing] Deposits " & preset():
|
||||
# TODO: BLS signature
|
||||
timedTest "Deposit " & name & " MAX_EFFECTIVE_BALANCE balance (" &
|
||||
$(MAX_EFFECTIVE_BALANCE div 10'u64^9) & " ETH)":
|
||||
var state = clone(genesisState)
|
||||
var state = newClone(genesisState)
|
||||
|
||||
# Test configuration
|
||||
# ----------------------------------------
|
||||
let validator_index = state.validators.len
|
||||
let deposit = mockUpdateStateForNewDeposit(
|
||||
state,
|
||||
state[],
|
||||
uint64 validator_index,
|
||||
deposit_amount,
|
||||
flags = {skipBlsValidation}
|
||||
@ -55,7 +55,7 @@ suiteReport "[Unit - Spec - Block processing] Deposits " & preset():
|
||||
|
||||
# State transition
|
||||
# ----------------------------------------
|
||||
check: state.process_deposit(deposit, {skipBlsValidation})
|
||||
check: process_deposit(state[], deposit, {skipBlsValidation})
|
||||
|
||||
# Check invariants
|
||||
# ----------------------------------------
|
||||
@ -74,14 +74,14 @@ suiteReport "[Unit - Spec - Block processing] Deposits " & preset():
|
||||
valid_deposit(MAX_EFFECTIVE_BALANCE + 1, "over")
|
||||
|
||||
timedTest "Validator top-up":
|
||||
var state = clone(genesisState)
|
||||
var state = newClone(genesisState)
|
||||
|
||||
# Test configuration
|
||||
# ----------------------------------------
|
||||
let validator_index = 0
|
||||
let deposit_amount = MAX_EFFECTIVE_BALANCE div 4
|
||||
let deposit = mockUpdateStateForNewDeposit(
|
||||
state,
|
||||
state[],
|
||||
uint64 validator_index,
|
||||
deposit_amount,
|
||||
flags = {skipBlsValidation}
|
||||
@ -97,7 +97,7 @@ suiteReport "[Unit - Spec - Block processing] Deposits " & preset():
|
||||
|
||||
# State transition
|
||||
# ----------------------------------------
|
||||
check: state.process_deposit(deposit, {skipBlsValidation})
|
||||
check: process_deposit(state[], deposit, {skipBlsValidation})
|
||||
|
||||
# Check invariants
|
||||
# ----------------------------------------
|
||||
|
@ -221,30 +221,30 @@ proc payload =
|
||||
doAssert genesisState.validators.len == int NumValidators
|
||||
|
||||
setup:
|
||||
var state = clone(genesisState)
|
||||
var state = newClone(genesisState)
|
||||
|
||||
timedTest " Rule I - 234 finalization with enough support":
|
||||
finalizeOn234(state, Epoch 5, sufficient_support = true)
|
||||
finalizeOn234(state[], Epoch 5, sufficient_support = true)
|
||||
|
||||
timedTest " Rule I - 234 finalization without support":
|
||||
finalizeOn234(state, Epoch 5, sufficient_support = false)
|
||||
finalizeOn234(state[], Epoch 5, sufficient_support = false)
|
||||
|
||||
timedTest " Rule II - 23 finalization with enough support":
|
||||
finalizeOn23(state, Epoch 4, sufficient_support = true)
|
||||
finalizeOn23(state[], Epoch 4, sufficient_support = true)
|
||||
|
||||
timedTest " Rule II - 23 finalization without support":
|
||||
finalizeOn23(state, Epoch 4, sufficient_support = false)
|
||||
finalizeOn23(state[], Epoch 4, sufficient_support = false)
|
||||
|
||||
timedTest " Rule III - 123 finalization with enough support":
|
||||
finalizeOn123(state, Epoch 6, sufficient_support = true)
|
||||
finalizeOn123(state[], Epoch 6, sufficient_support = true)
|
||||
|
||||
timedTest " Rule III - 123 finalization without support":
|
||||
finalizeOn123(state, Epoch 6, sufficient_support = false)
|
||||
finalizeOn123(state[], Epoch 6, sufficient_support = false)
|
||||
|
||||
timedTest " Rule IV - 12 finalization with enough support":
|
||||
finalizeOn12(state, Epoch 3, sufficient_support = true)
|
||||
finalizeOn12(state[], Epoch 3, sufficient_support = true)
|
||||
|
||||
timedTest " Rule IV - 12 finalization without support":
|
||||
finalizeOn12(state, Epoch 3, sufficient_support = false)
|
||||
finalizeOn12(state[], Epoch 3, sufficient_support = false)
|
||||
|
||||
payload()
|
||||
|
@ -38,15 +38,15 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
let
|
||||
# Create an attestation for slot 1!
|
||||
beacon_committee = get_beacon_committee(
|
||||
state.data.data, state.data.data.slot, 0.CommitteeIndex, cache)
|
||||
state.data.data[], state.data.data.slot, 0.CommitteeIndex, cache)
|
||||
attestation = makeAttestation(
|
||||
state.data.data, state.blck.root, beacon_committee[0], cache)
|
||||
state.data.data[], state.blck.root, beacon_committee[0], cache)
|
||||
|
||||
pool.add(attestation)
|
||||
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
|
||||
|
||||
let attestations = pool.getAttestationsForBlock(state.data.data)
|
||||
let attestations = pool.getAttestationsForBlock(state.data.data[])
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
@ -56,17 +56,17 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
let
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(
|
||||
state.data.data, state.data.data.slot, 0.CommitteeIndex, cache)
|
||||
state.data.data[], state.data.data.slot, 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[0], cache)
|
||||
state.data.data[], state.blck.root, bc0[0], cache)
|
||||
|
||||
process_slots(state.data, state.data.data.slot + 1)
|
||||
|
||||
let
|
||||
bc1 = get_beacon_committee(state.data.data,
|
||||
bc1 = get_beacon_committee(state.data.data[],
|
||||
state.data.data.slot, 0.CommitteeIndex, cache)
|
||||
attestation1 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc1[0], cache)
|
||||
state.data.data[], state.blck.root, bc1[0], cache)
|
||||
|
||||
# test reverse order
|
||||
pool.add(attestation1)
|
||||
@ -74,7 +74,7 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
|
||||
|
||||
let attestations = pool.getAttestationsForBlock(state.data.data)
|
||||
let attestations = pool.getAttestationsForBlock(state.data.data[])
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
@ -84,18 +84,18 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
let
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(
|
||||
state.data.data, state.data.data.slot, 0.CommitteeIndex, cache)
|
||||
state.data.data[], state.data.data.slot, 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[0], cache)
|
||||
state.data.data[], state.blck.root, bc0[0], cache)
|
||||
attestation1 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[1], cache)
|
||||
state.data.data[], state.blck.root, bc0[1], cache)
|
||||
|
||||
pool.add(attestation0)
|
||||
pool.add(attestation1)
|
||||
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
|
||||
|
||||
let attestations = pool.getAttestationsForBlock(state.data.data)
|
||||
let attestations = pool.getAttestationsForBlock(state.data.data[])
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
@ -106,11 +106,11 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
var
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(
|
||||
state.data.data, state.data.data.slot, 0.CommitteeIndex, cache)
|
||||
state.data.data[], state.data.data.slot, 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[0], cache)
|
||||
state.data.data[], state.blck.root, bc0[0], cache)
|
||||
attestation1 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[1], cache)
|
||||
state.data.data[], state.blck.root, bc0[1], cache)
|
||||
|
||||
attestation0.combine(attestation1, {})
|
||||
|
||||
@ -119,7 +119,7 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
|
||||
|
||||
let attestations = pool.getAttestationsForBlock(state.data.data)
|
||||
let attestations = pool.getAttestationsForBlock(state.data.data[])
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
@ -128,12 +128,12 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
var
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(state.data.data,
|
||||
bc0 = get_beacon_committee(state.data.data[],
|
||||
state.data.data.slot, 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[0], cache)
|
||||
state.data.data[], state.blck.root, bc0[0], cache)
|
||||
attestation1 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[1], cache)
|
||||
state.data.data[], state.blck.root, bc0[1], cache)
|
||||
|
||||
attestation0.combine(attestation1, {})
|
||||
|
||||
@ -142,14 +142,14 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
|
||||
|
||||
let attestations = pool.getAttestationsForBlock(state.data.data)
|
||||
let attestations = pool.getAttestationsForBlock(state.data.data[])
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
||||
timedTest "Fork choice returns latest block with no attestations":
|
||||
let
|
||||
b1 = addTestBlock(state.data.data, blockPool.tail.root)
|
||||
b1 = addTestBlock(state.data.data[], blockPool.tail.root)
|
||||
b1Root = hash_tree_root(b1.message)
|
||||
b1Add = blockPool.add(b1Root, b1)
|
||||
head = pool.selectHead()
|
||||
@ -158,7 +158,7 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
head == b1Add
|
||||
|
||||
let
|
||||
b2 = addTestBlock(state.data.data, b1Root)
|
||||
b2 = addTestBlock(state.data.data[], b1Root)
|
||||
b2Root = hash_tree_root(b2.message)
|
||||
b2Add = blockPool.add(b2Root, b2)
|
||||
head2 = pool.selectHead()
|
||||
@ -169,7 +169,7 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
timedTest "Fork choice returns block with attestation":
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
let
|
||||
b10 = makeTestBlock(state.data.data, blockPool.tail.root)
|
||||
b10 = makeTestBlock(state.data.data[], blockPool.tail.root)
|
||||
b10Root = hash_tree_root(b10.message)
|
||||
b10Add = blockPool.add(b10Root, b10)
|
||||
head = pool.selectHead()
|
||||
@ -178,15 +178,15 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
head == b10Add
|
||||
|
||||
let
|
||||
b11 = makeTestBlock(state.data.data, blockPool.tail.root,
|
||||
b11 = makeTestBlock(state.data.data[], blockPool.tail.root,
|
||||
graffiti = Eth2Digest(data: [1'u8, 0, 0, 0 ,0 ,0 ,0 ,0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
|
||||
)
|
||||
b11Root = hash_tree_root(b11.message)
|
||||
b11Add = blockPool.add(b11Root, b11)
|
||||
|
||||
bc1 = get_beacon_committee(
|
||||
state.data.data, state.data.data.slot, 1.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(state.data.data, b10Root, bc1[0], cache)
|
||||
state.data.data[], state.data.data.slot, 1.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(state.data.data[], b10Root, bc1[0], cache)
|
||||
|
||||
pool.add(attestation0)
|
||||
|
||||
@ -197,8 +197,8 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
head2 == b10Add
|
||||
|
||||
let
|
||||
attestation1 = makeAttestation(state.data.data, b11Root, bc1[1], cache)
|
||||
attestation2 = makeAttestation(state.data.data, b11Root, bc1[2], cache)
|
||||
attestation1 = makeAttestation(state.data.data[], b11Root, bc1[1], cache)
|
||||
attestation2 = makeAttestation(state.data.data[], b11Root, bc1[2], cache)
|
||||
pool.add(attestation1)
|
||||
|
||||
let head3 = pool.selectHead()
|
||||
|
@ -48,7 +48,7 @@ suiteReport "Beacon chain DB" & preset():
|
||||
db = init(BeaconChainDB, kvStore MemStoreRef.init())
|
||||
|
||||
let
|
||||
state = BeaconState()
|
||||
state = BeaconStateRef()
|
||||
root = hash_tree_root(state)
|
||||
|
||||
db.putState(state)
|
||||
|
@ -95,9 +95,9 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
db = makeTestDB(SLOTS_PER_EPOCH)
|
||||
pool = BlockPool.init(db)
|
||||
state = pool.loadTailState().data.data
|
||||
b1 = addTestBlock(state, pool.tail.root)
|
||||
b1 = addTestBlock(state[], pool.tail.root)
|
||||
b1Root = hash_tree_root(b1.message)
|
||||
b2 = addTestBlock(state, b1Root)
|
||||
b2 = addTestBlock(state[], b1Root)
|
||||
b2Root {.used.} = hash_tree_root(b2.message)
|
||||
|
||||
timedTest "getRef returns nil for missing blocks":
|
||||
@ -135,10 +135,10 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
pool.heads[0].blck == b2Add
|
||||
|
||||
# Skip one slot to get a gap
|
||||
process_slots(state, state.slot + 1)
|
||||
process_slots(state[], state.slot + 1)
|
||||
|
||||
let
|
||||
b4 = addTestBlock(state, b2Root)
|
||||
b4 = addTestBlock(state[], b2Root)
|
||||
b4Root = hash_tree_root(b4.message)
|
||||
b4Add = pool.add(b4Root, b4)
|
||||
|
||||
@ -292,7 +292,7 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
block:
|
||||
# Create a fork that will not be taken
|
||||
var
|
||||
blck = makeTestBlock(pool.headState.data.data, pool.head.blck.root)
|
||||
blck = makeTestBlock(pool.headState.data.data[], pool.head.blck.root)
|
||||
discard pool.add(hash_tree_root(blck.message), blck)
|
||||
|
||||
for i in 0 ..< (SLOTS_PER_EPOCH * 6):
|
||||
@ -304,9 +304,9 @@ when const_preset == "minimal": # Too much stack space used on mainnet
|
||||
var
|
||||
cache = get_empty_per_epoch_cache()
|
||||
blck = makeTestBlock(
|
||||
pool.headState.data.data, pool.head.blck.root,
|
||||
pool.headState.data.data[], pool.head.blck.root,
|
||||
attestations = makeFullAttestations(
|
||||
pool.headState.data.data, pool.head.blck.root,
|
||||
pool.headState.data.data[], pool.head.blck.root,
|
||||
pool.headState.data.data.slot, cache, {}))
|
||||
let added = pool.add(hash_tree_root(blck.message), blck)
|
||||
pool.updateHead(added)
|
||||
|
@ -22,23 +22,23 @@ suiteReport "Block processing" & preset():
|
||||
# TODO bls verification is a bit of a bottleneck here
|
||||
genesisState = initialize_beacon_state_from_eth1(
|
||||
Eth2Digest(), 0, makeInitialDeposits(), {})
|
||||
genesisBlock = get_initial_beacon_block(genesisState)
|
||||
genesisBlock = get_initial_beacon_block(genesisState[])
|
||||
genesisRoot = hash_tree_root(genesisBlock.message)
|
||||
|
||||
timedTest "Passes from genesis state, no block" & preset():
|
||||
var state = clone(genesisState)
|
||||
setup:
|
||||
var state = newClone(genesisState)
|
||||
|
||||
process_slots(state, state.slot + 1)
|
||||
timedTest "Passes from genesis state, no block" & preset():
|
||||
process_slots(state[], state.slot + 1)
|
||||
check:
|
||||
state.slot == genesisState.slot + 1
|
||||
|
||||
timedTest "Passes from genesis state, empty block" & preset():
|
||||
var
|
||||
state = clone(genesisState)
|
||||
previous_block_root = hash_tree_root(genesisBlock.message)
|
||||
new_block = makeTestBlock(state, previous_block_root)
|
||||
new_block = makeTestBlock(state[], previous_block_root)
|
||||
|
||||
let block_ok = state_transition(state, new_block, {})
|
||||
let block_ok = state_transition(state[], new_block, {})
|
||||
|
||||
check:
|
||||
block_ok
|
||||
@ -46,22 +46,19 @@ suiteReport "Block processing" & preset():
|
||||
state.slot == genesisState.slot + 1
|
||||
|
||||
timedTest "Passes through epoch update, no block" & preset():
|
||||
var state = clone(genesisState)
|
||||
|
||||
process_slots(state, Slot(SLOTS_PER_EPOCH))
|
||||
process_slots(state[], Slot(SLOTS_PER_EPOCH))
|
||||
|
||||
check:
|
||||
state.slot == genesisState.slot + SLOTS_PER_EPOCH
|
||||
|
||||
timedTest "Passes through epoch update, empty block" & preset():
|
||||
var
|
||||
state = clone(genesisState)
|
||||
previous_block_root = genesisRoot
|
||||
|
||||
for i in 1..SLOTS_PER_EPOCH.int:
|
||||
let new_block = makeTestBlock(state, previous_block_root)
|
||||
let new_block = makeTestBlock(state[], previous_block_root)
|
||||
|
||||
let block_ok = state_transition(state, new_block, {})
|
||||
let block_ok = state_transition(state[], new_block, {})
|
||||
|
||||
check:
|
||||
block_ok
|
||||
@ -73,29 +70,28 @@ suiteReport "Block processing" & preset():
|
||||
|
||||
timedTest "Attestation gets processed at epoch" & preset():
|
||||
var
|
||||
state = clone(genesisState)
|
||||
previous_block_root = genesisRoot
|
||||
cache = get_empty_per_epoch_cache()
|
||||
|
||||
# Slot 0 is a finalized slot - won't be making attestations for it..
|
||||
process_slots(state, state.slot + 1)
|
||||
process_slots(state[], state.slot + 1)
|
||||
|
||||
let
|
||||
# Create an attestation for slot 1 signed by the only attester we have!
|
||||
beacon_committee =
|
||||
get_beacon_committee(state, state.slot, 0.CommitteeIndex, cache)
|
||||
get_beacon_committee(state[], state.slot, 0.CommitteeIndex, cache)
|
||||
attestation = makeAttestation(
|
||||
state, previous_block_root, beacon_committee[0], cache)
|
||||
state[], previous_block_root, beacon_committee[0], cache)
|
||||
|
||||
# Some time needs to pass before attestations are included - this is
|
||||
# to let the attestation propagate properly to interested participants
|
||||
process_slots(state, GENESIS_SLOT + MIN_ATTESTATION_INCLUSION_DELAY + 1)
|
||||
process_slots(state[], GENESIS_SLOT + MIN_ATTESTATION_INCLUSION_DELAY + 1)
|
||||
|
||||
let
|
||||
new_block = makeTestBlock(state, previous_block_root,
|
||||
new_block = makeTestBlock(state[], previous_block_root,
|
||||
attestations = @[attestation]
|
||||
)
|
||||
discard state_transition(state, new_block, {})
|
||||
discard state_transition(state[], new_block, {})
|
||||
|
||||
check:
|
||||
# TODO epoch attestations can get multiplied now; clean up paths to
|
||||
@ -104,7 +100,7 @@ suiteReport "Block processing" & preset():
|
||||
|
||||
when const_preset=="minimal":
|
||||
# Can take several minutes with mainnet settings
|
||||
process_slots(state, Slot(191))
|
||||
process_slots(state[], Slot(191))
|
||||
|
||||
# Would need to process more epochs for the attestation to be removed from
|
||||
# the state! (per above bug)
|
||||
|
@ -148,9 +148,9 @@ proc makeTestBlock*(
|
||||
# It's a bit awkward - in order to produce a block for N+1, we need to
|
||||
# calculate what the state will look like after that block has been applied,
|
||||
# because the block includes the state root.
|
||||
var tmpState = clone(state)
|
||||
var tmpState = newClone(state)
|
||||
addTestBlock(
|
||||
tmpState, parent_root, eth1_data, attestations, deposits, graffiti, flags)
|
||||
tmpState[], parent_root, eth1_data, attestations, deposits, graffiti, flags)
|
||||
|
||||
proc makeAttestation*(
|
||||
state: BeaconState, beacon_block_root: Eth2Digest,
|
||||
|
@ -92,7 +92,7 @@ template timedTest*(name, body) =
|
||||
# TODO noto thread-safe as-is
|
||||
testTimes.add (f, name)
|
||||
|
||||
proc makeTestDB*(tailState: BeaconState, tailBlock: SignedBeaconBlock): BeaconChainDB =
|
||||
proc makeTestDB*(tailState: BeaconStateRef, tailBlock: SignedBeaconBlock): BeaconChainDB =
|
||||
result = init(BeaconChainDB, kvStore MemStoreRef.init())
|
||||
BlockPool.preInit(result, tailState, tailBlock)
|
||||
|
||||
@ -102,7 +102,7 @@ proc makeTestDB*(validators: int): BeaconChainDB =
|
||||
Eth2Digest(), 0,
|
||||
makeInitialDeposits(validators, flags = {skipBlsValidation}),
|
||||
{skipBlsValidation})
|
||||
genBlock = get_initial_beacon_block(genState)
|
||||
genBlock = get_initial_beacon_block(genState[])
|
||||
makeTestDB(genState, genBlock)
|
||||
|
||||
export inMicroseconds
|
||||
|
Loading…
x
Reference in New Issue
Block a user