Get the code to compiling state
This commit is contained in:
parent
1a849f82e1
commit
e4d1bebf4d
|
@ -1,7 +1,7 @@
|
||||||
import
|
import
|
||||||
os, json,
|
os, json,
|
||||||
chronicles, json_serialization, eth_common/eth_types_json_serialization,
|
chronicles, json_serialization, eth_common/eth_types_json_serialization,
|
||||||
datatypes
|
spec/datatypes
|
||||||
|
|
||||||
type
|
type
|
||||||
BeaconChainDB* = ref object
|
BeaconChainDB* = ref object
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import
|
import
|
||||||
os, net,
|
os, net,
|
||||||
asyncdispatch2, confutils, eth_p2p, eth_keys,
|
asyncdispatch2, chronicles, confutils, eth_p2p, eth_keys,
|
||||||
private/helpers, conf, datatypes, time, fork_choise,
|
spec/[beaconstate, datatypes], conf, time, fork_choice,
|
||||||
beacon_chain_db, validator_pool, mainchain_monitor,
|
beacon_chain_db, validator_pool, mainchain_monitor,
|
||||||
sync_protocol, gossipsub_protocol, trusted_state_snapshots
|
sync_protocol, gossipsub_protocol, trusted_state_snapshots
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ type
|
||||||
beaconState*: BeaconState
|
beaconState*: BeaconState
|
||||||
network*: EthereumNode
|
network*: EthereumNode
|
||||||
db*: BeaconChainDB
|
db*: BeaconChainDB
|
||||||
config*: Configuration
|
config*: BeaconNodeConf
|
||||||
keys*: KeyPair
|
keys*: KeyPair
|
||||||
attachedValidators: ValidatorPool
|
attachedValidators: ValidatorPool
|
||||||
attestations: AttestationPool
|
attestations: AttestationPool
|
||||||
|
@ -18,7 +18,7 @@ type
|
||||||
mainchainMonitor: MainchainMonitor
|
mainchainMonitor: MainchainMonitor
|
||||||
|
|
||||||
const
|
const
|
||||||
version = "v0.1" # read this from the nimble file
|
version = "v0.1" # TODO: read this from the nimble file
|
||||||
clientId = "nimbus beacon node " & version
|
clientId = "nimbus beacon node " & version
|
||||||
|
|
||||||
topicBeaconBlocks = "ethereum/2.1/beacon_chain/blocks"
|
topicBeaconBlocks = "ethereum/2.1/beacon_chain/blocks"
|
||||||
|
@ -31,11 +31,11 @@ proc ensureNetworkKeys*(dataDir: string): KeyPair =
|
||||||
# if necessary
|
# if necessary
|
||||||
return newKeyPair()
|
return newKeyPair()
|
||||||
|
|
||||||
proc init*(T: type BeaconNode, conf: Configuration): T =
|
proc init*(T: type BeaconNode, conf: BeaconNodeConf): T =
|
||||||
new result
|
new result
|
||||||
result.config = conf
|
result.config = conf
|
||||||
result.db = BeaconChainDB.init(conf.dataDir)
|
result.db = BeaconChainDB.init(string conf.dataDir)
|
||||||
result.keys = ensureNetworkKeys(conf.dataDir)
|
result.keys = ensureNetworkKeys(string conf.dataDir)
|
||||||
|
|
||||||
var address: Address
|
var address: Address
|
||||||
address.ip = parseIpAddress("0.0.0.0")
|
address.ip = parseIpAddress("0.0.0.0")
|
||||||
|
@ -46,14 +46,14 @@ proc init*(T: type BeaconNode, conf: Configuration): T =
|
||||||
proc sync*(node: BeaconNode): Future[bool] {.async.} =
|
proc sync*(node: BeaconNode): Future[bool] {.async.} =
|
||||||
let persistedState = node.db.lastFinalizedState()
|
let persistedState = node.db.lastFinalizedState()
|
||||||
if persistedState.isNil or
|
if persistedState.isNil or
|
||||||
persistedState[].slotDistanceFromNow() > WITHDRAWAL_PERIOD:
|
persistedState[].slotDistanceFromNow() > WEAK_SUBJECTVITY_PERIOD:
|
||||||
node.beaconState = await obtainTrustedStateSnapshot(node.db)
|
node.beaconState = await obtainTrustedStateSnapshot(node.db)
|
||||||
else:
|
else:
|
||||||
node.beaconState = persistedState[]
|
node.beaconState = persistedState[]
|
||||||
var targetSlot = toSlot timeSinceGenesis(node.beaconState)
|
var targetSlot = toSlot timeSinceGenesis(node.beaconState)
|
||||||
|
|
||||||
while node.beaconState.last_finalized_slot < targetSlot:
|
while node.beaconState.last_finalized_slot.int < targetSlot:
|
||||||
var (peer, changeLog) = node.network.getValidatorChangeLog(
|
var (peer, changeLog) = await node.network.getValidatorChangeLog(
|
||||||
node.beaconState.validator_set_delta_hash_chain)
|
node.beaconState.validator_set_delta_hash_chain)
|
||||||
|
|
||||||
if peer == nil:
|
if peer == nil:
|
||||||
|
@ -61,7 +61,7 @@ proc sync*(node: BeaconNode): Future[bool] {.async.} =
|
||||||
return false
|
return false
|
||||||
|
|
||||||
if applyValidatorChangeLog(changeLog, node.beaconState):
|
if applyValidatorChangeLog(changeLog, node.beaconState):
|
||||||
node.db.persistBlock(changeLog.signedBlock, node.beaconState)
|
node.db.persistBlock(node.beaconState, changeLog.signedBlock)
|
||||||
else:
|
else:
|
||||||
warn "Ignoring invalid validator change log", sentFrom = peer
|
warn "Ignoring invalid validator change log", sentFrom = peer
|
||||||
|
|
||||||
|
@ -82,58 +82,54 @@ proc getAttachedValidator(node: BeaconNode, idx: int): AttachedValidator =
|
||||||
let validatorKey = node.beaconState.validators[idx].pubkey
|
let validatorKey = node.beaconState.validators[idx].pubkey
|
||||||
return node.attachedValidators.getValidator(validatorKey)
|
return node.attachedValidators.getValidator(validatorKey)
|
||||||
|
|
||||||
proc makeAttestationCallback(node: BeaconNode,
|
proc makeAttestation(node: BeaconNode,
|
||||||
validator: AttachedValidator): auto =
|
validator: AttachedValidator) {.async.} =
|
||||||
proc makeAttestation {.async.} =
|
var attestation: Attestation
|
||||||
var attestation: Attestation
|
attestation.validator = validator.idx
|
||||||
attestation.validator = validator.idx
|
|
||||||
|
|
||||||
# TODO: Populate attestation.data
|
# TODO: Populate attestation.data
|
||||||
|
|
||||||
attestation.signature = await validator.signAttestation(attestation.data)
|
attestation.signature = await validator.signAttestation(attestation.data)
|
||||||
await node.network.broadcast(topicAttestations, attestation)
|
await node.network.broadcast(topicAttestations, attestation)
|
||||||
|
|
||||||
return proc =
|
proc proposeBlock(node: BeaconNode,
|
||||||
asyncCheck makeAttestation
|
validator: AttachedValidator,
|
||||||
|
slot: int) {.async.} =
|
||||||
|
var proposal: BeaconBlock
|
||||||
|
|
||||||
proc proposeBlockCallback(node: BeaconNode,
|
# TODO:
|
||||||
validator: AttachedValidator): auto =
|
# 1. Produce a RANDAO reveal from attachedVadalidator.randaoSecret
|
||||||
proc proposeBlock {.async.} =
|
# and its matching ValidatorRecord.
|
||||||
var proposal: BeaconBlock
|
|
||||||
|
|
||||||
# TODO:
|
# 2. Get ancestors from the beacon_db
|
||||||
# 1. Produce a RANDAO reveal from attachedVadalidator.randaoSecret
|
|
||||||
# and its matching ValidatorRecord.
|
|
||||||
|
|
||||||
# 2. Get ancestors from the beacon_db
|
# 3. Calculate the correct state hash
|
||||||
|
|
||||||
# 3. Calculate the correct state hash
|
proposal.candidate_pow_receipt_root =
|
||||||
|
node.mainchainMonitor.getBeaconBlockRef()
|
||||||
|
|
||||||
proposal.candidate_pow_receipt_root =
|
for a in node.attestations.each(firstSlot = node.headBlock.slot.int + 1,
|
||||||
node.mainchainMonitor.getBeaconBlockRef()
|
lastSlot = slot - MIN_ATTESTATION_INCLUSION_DELAY):
|
||||||
|
# TODO: this is not quite right,
|
||||||
|
# the attestations from individual validators have to be merged.
|
||||||
|
# proposal.attestations.add a
|
||||||
|
discard
|
||||||
|
|
||||||
for a in node.attestations.each(firstSlot = node.headBlock.slot + 1,
|
for r in node.mainchainMonitor.getValidatorActions(
|
||||||
lastSlot = slot - MIN_ATTESTATION_INCLUSION_DELAY):
|
node.headBlock.candidate_pow_receipt_root,
|
||||||
proposal.attestations.add a
|
proposal.candidate_pow_receipt_root):
|
||||||
# TODO: this is not quite right,
|
proposal.specials.add r
|
||||||
# the attestations from individual validators have to be merged.
|
|
||||||
|
|
||||||
for r in node.mainchainMonitor.getValidatorActions():
|
var signedData: ProposalSignedData
|
||||||
proposal.specials.add r
|
# TODO: populate the signed data
|
||||||
|
|
||||||
var signedData: ProposalSignedData
|
proposal.proposer_signature = await validator.signBlockProposal(signedData)
|
||||||
# TODO: populate the signed data
|
await node.network.broadcast(topicBeaconBlocks, proposal)
|
||||||
|
|
||||||
proposal.proposer_signature = await validator.signBlockProposal(signedData)
|
proc scheduleCycleActions(node: BeaconNode) =
|
||||||
await node.network.broadcast(topicProposals, proposal)
|
|
||||||
|
|
||||||
return proc =
|
|
||||||
asyncCheck proposeBlock
|
|
||||||
|
|
||||||
proc scheduleCycleActions(node: BeaconNode)
|
|
||||||
## This schedules the required block proposals and
|
## This schedules the required block proposals and
|
||||||
## attestations from our attached validators.
|
## attestations from our attached validators.
|
||||||
let cycleStart = node.last_state_recalculation_slot
|
let cycleStart = node.beaconState.last_state_recalculation_slot.int
|
||||||
|
|
||||||
for i in 0 ..< CYCLE_LENGTH:
|
for i in 0 ..< CYCLE_LENGTH:
|
||||||
# Schedule block proposals
|
# Schedule block proposals
|
||||||
|
@ -147,19 +143,19 @@ proc scheduleCycleActions(node: BeaconNode)
|
||||||
# Warm-up the proposer earlier to try to obtain previous
|
# Warm-up the proposer earlier to try to obtain previous
|
||||||
# missing blocks if necessary
|
# missing blocks if necessary
|
||||||
|
|
||||||
addTimer slotStart(slot),
|
addTimer(node.beaconState.slotStart(slot)) do (p: pointer):
|
||||||
proposeBlockCallback(node, attachedValidator)
|
asyncCheck proposeBlock(node, attachedValidator, slot)
|
||||||
|
|
||||||
# Schedule attestations
|
# Schedule attestations
|
||||||
let
|
let
|
||||||
committeesIdx = get_shard_and_committees_idx(node.beaconState, slot)
|
committeesIdx = get_shard_and_committees_idx(node.beaconState, slot)
|
||||||
|
|
||||||
for shard in node.beaconState.shard_and_committee_for_slots[committees_idx]:
|
for shard in node.beaconState.shard_and_committee_for_slots[committees_idx]:
|
||||||
for validatorIdx in shard.committee:
|
for validatorIdx in shard.committee:
|
||||||
let attachedValidator = node.getAttachedValidator(validatorIdx)
|
let attachedValidator = node.getAttachedValidator(validatorIdx)
|
||||||
if attachedValidator != nil:
|
if attachedValidator != nil:
|
||||||
addTimer slotMiddle(slot),
|
addTimer(node.beaconState.slotMiddle(slot)) do (p: pointer):
|
||||||
makeAttestationCallback(node, attachedValidator)
|
asyncCheck makeAttestation(node, attachedValidator)
|
||||||
|
|
||||||
proc processBlocks*(node: BeaconNode) {.async.} =
|
proc processBlocks*(node: BeaconNode) {.async.} =
|
||||||
node.scheduleCycleActions()
|
node.scheduleCycleActions()
|
||||||
|
@ -186,9 +182,9 @@ proc processBlocks*(node: BeaconNode) {.async.} =
|
||||||
node.attestations.add(a, node.beaconState)
|
node.attestations.add(a, node.beaconState)
|
||||||
|
|
||||||
when isMainModule:
|
when isMainModule:
|
||||||
let conf = Configuration.load()
|
let config = BeaconNodeConf.load()
|
||||||
waitFor syncrhronizeClock()
|
waitFor syncrhronizeClock()
|
||||||
var node = BeaconNode.init conf
|
var node = BeaconNode.init config
|
||||||
|
|
||||||
if not waitFor node.sync():
|
if not waitFor node.sync():
|
||||||
quit 1
|
quit 1
|
||||||
|
|
|
@ -4,7 +4,7 @@ import
|
||||||
type
|
type
|
||||||
ValidatorKeyPath* = distinct string
|
ValidatorKeyPath* = distinct string
|
||||||
|
|
||||||
Configuration* = object
|
BeaconNodeConf* = object
|
||||||
dataDir* {.
|
dataDir* {.
|
||||||
desc: "The directory where nimbus will store all blockchain data.",
|
desc: "The directory where nimbus will store all blockchain data.",
|
||||||
shorthand: "d",
|
shorthand: "d",
|
||||||
|
@ -25,3 +25,9 @@ type
|
||||||
"Nimbus will automatically add the extensions .privkey and .pubkey.",
|
"Nimbus will automatically add the extensions .privkey and .pubkey.",
|
||||||
shorthand: "v".}: seq[ValidatorKeyPath]
|
shorthand: "v".}: seq[ValidatorKeyPath]
|
||||||
|
|
||||||
|
proc parse*(T: type ValidatorKeyPath, input: TaintedString): T =
|
||||||
|
# TODO:
|
||||||
|
# Check that the entered string is a valid base file name and
|
||||||
|
# that it has matching .privkey, .pubkey and .randaosecret files
|
||||||
|
T(input)
|
||||||
|
|
||||||
|
|
|
@ -1,31 +1,42 @@
|
||||||
import
|
import
|
||||||
deque,
|
deques,
|
||||||
datatypes
|
spec/[datatypes, crypto]
|
||||||
|
|
||||||
type
|
type
|
||||||
Attestation* = object
|
Attestation* = object
|
||||||
validator*: int
|
validator*: int
|
||||||
data*: AttestationSignedData
|
data*: AttestationSignedData
|
||||||
signature*: BLSsig
|
signature*: ValidatorSig
|
||||||
|
|
||||||
AttestationPool* = object
|
AttestationPool* = object
|
||||||
attestations: deque[seq[Attestation]]
|
attestations: Deque[seq[Attestation]]
|
||||||
startingSlot: int
|
startingSlot: int
|
||||||
|
|
||||||
proc init*(T: type AttestationPool, startingSlot: int): T =
|
proc init*(T: type AttestationPool, startingSlot: int): T =
|
||||||
result.attestationsPerSlot = initDeque[seq[Attestation]]()
|
result.attestationsPerSlot = initDeque[seq[Attestation]]()
|
||||||
result.startingSlot = startingSlot
|
result.startingSlot = startingSlot
|
||||||
|
|
||||||
|
proc setLen*[T](d: var Deque[T], len: int) =
|
||||||
|
# TODO: The upstream `Deque` type should gain a proper resize API
|
||||||
|
let delta = len - d.len
|
||||||
|
if delta > 0:
|
||||||
|
for i in 0 ..< delta:
|
||||||
|
var defaultVal: T
|
||||||
|
d.addLast(defaultVal)
|
||||||
|
else:
|
||||||
|
d.shrink(fromLast = delta)
|
||||||
|
|
||||||
proc add*(pool: var AttestationPool,
|
proc add*(pool: var AttestationPool,
|
||||||
attestation: Attestation,
|
attestation: Attestation,
|
||||||
beaconState: BeaconState) =
|
beaconState: BeaconState) =
|
||||||
let slotIdxInPool = attestation.slot - pool.startingSlot
|
|
||||||
# The caller of this function is responsible for ensuring that
|
# The caller of this function is responsible for ensuring that
|
||||||
# the attestations will be given in a strictly slot increasing order:
|
# the attestations will be given in a strictly slot increasing order:
|
||||||
doAssert slotIdxInPool < 0
|
doAssert attestation.data.slot.int >= pool.startingSlot
|
||||||
|
|
||||||
|
let slotIdxInPool = attestation.data.slot.int - pool.startingSlot
|
||||||
if slotIdxInPool >= pool.attestations.len:
|
if slotIdxInPool >= pool.attestations.len:
|
||||||
pool.attestations.setLen(slotIdxInPool + 1)
|
pool.attestations.setLen(slotIdxInPool + 1)
|
||||||
|
|
||||||
pool.attestations[slotIdxInPool].add attestation
|
pool.attestations[slotIdxInPool].add attestation
|
||||||
|
|
||||||
iterator each*(pool: AttestationPool,
|
iterator each*(pool: AttestationPool,
|
||||||
|
@ -34,7 +45,7 @@ iterator each*(pool: AttestationPool,
|
||||||
## TODO: this should return a lent value
|
## TODO: this should return a lent value
|
||||||
doAssert firstSlot <= lastSlot
|
doAssert firstSlot <= lastSlot
|
||||||
for idx in countup(max(0, firstSlot - pool.startingSlot),
|
for idx in countup(max(0, firstSlot - pool.startingSlot),
|
||||||
min(pool.attestation.len - 1, lastSlot - pool.startingSlot)):
|
min(pool.attestations.len - 1, lastSlot - pool.startingSlot)):
|
||||||
for attestation in pool.attestations[idx]:
|
for attestation in pool.attestations[idx]:
|
||||||
yield attestation
|
yield attestation
|
||||||
|
|
||||||
|
@ -42,7 +53,7 @@ proc discardHistoryToSlot*(pool: var AttestationPool, slot: int) =
|
||||||
## The index is treated inclusively
|
## The index is treated inclusively
|
||||||
let slotIdx = slot - pool.startingSlot
|
let slotIdx = slot - pool.startingSlot
|
||||||
if slotIdx < 0: return
|
if slotIdx < 0: return
|
||||||
pool.attestation.shrink(fromFirst = slotIdx + 1)
|
pool.attestations.shrink(fromFirst = slotIdx + 1)
|
||||||
|
|
||||||
proc getLatestAttestation*(pool: AttestationPool, validator: ValidatorRecord) =
|
proc getLatestAttestation*(pool: AttestationPool, validator: ValidatorRecord) =
|
||||||
discard
|
discard
|
||||||
|
|
|
@ -1,20 +1,26 @@
|
||||||
import
|
import
|
||||||
tables, sets,
|
tables, sets,
|
||||||
eth_p2p, eth_p2p/rlpx
|
asyncdispatch2, chronicles, rlp, eth_p2p, eth_p2p/rlpx
|
||||||
|
|
||||||
type
|
type
|
||||||
TopicMsgHandler = proc(data: seq[byte]): Future[void]
|
TopicMsgHandler = proc(data: seq[byte]): Future[void]
|
||||||
|
|
||||||
GossipSubNetwork = type
|
GossibSubPeer = ref object
|
||||||
|
sentMessages: HashSet[string]
|
||||||
|
|
||||||
|
GossipSubNetwork = ref object
|
||||||
deliveredMessages: Table[Peer, HashSet[string]]
|
deliveredMessages: Table[Peer, HashSet[string]]
|
||||||
topicSubscribers: Table[string, seq[TopicMsgHandler]]
|
topicSubscribers: Table[string, seq[TopicMsgHandler]]
|
||||||
|
|
||||||
protocol GossipSub(version = 1):
|
p2pProtocol GossipSub(version = 1,
|
||||||
|
shortName = "gss",
|
||||||
|
peerState = GossibSubPeer,
|
||||||
|
networkState = GossipSubNetwork):
|
||||||
# This is a very barebones emulation of the GossipSub protocol
|
# This is a very barebones emulation of the GossipSub protocol
|
||||||
# available in LibP2P:
|
# available in LibP2P:
|
||||||
|
|
||||||
proc interestedIn(topic: string)
|
proc interestedIn(peer: Peer, topic: string)
|
||||||
proc emit(topic: string, msgId: string, data: openarray[byte])
|
proc emit(peer: Peer, topic: string, msgId: string, data: openarray[byte])
|
||||||
|
|
||||||
proc subscribeImpl(node: EthereumNode,
|
proc subscribeImpl(node: EthereumNode,
|
||||||
topic: string,
|
topic: string,
|
||||||
|
@ -24,9 +30,9 @@ proc subscribeImpl(node: EthereumNode,
|
||||||
proc broadcastImpl(node: EthereumNode, topic: string, data: seq[byte]) =
|
proc broadcastImpl(node: EthereumNode, topic: string, data: seq[byte]) =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
macro subscribe*(node: EthereumNode, topic: string, handler: body): untyped =
|
macro subscribe*(node: EthereumNode, topic: string, handler: untyped): untyped =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc broadcast*(node: EthereumNode, topic: string, data: auto) =
|
proc broadcast*(node: EthereumNode, topic: string, data: auto) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,12 @@
|
||||||
import
|
import
|
||||||
asyncdispatch2, json_rpc,
|
asyncdispatch2, json_rpc/rpcclient,
|
||||||
datatypes
|
spec/[datatypes, digest]
|
||||||
|
|
||||||
type
|
type
|
||||||
MainchainMonitor* = object
|
MainchainMonitor* = object
|
||||||
gethAddress: string
|
gethAddress: string
|
||||||
gethPort: Port
|
gethPort: Port
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
proc init*(T: type MainchainMonitor, gethAddress: string, gethPort: Port): T =
|
proc init*(T: type MainchainMonitor, gethAddress: string, gethPort: Port): T =
|
||||||
result.gethAddress = gethAddress
|
result.gethAddress = gethAddress
|
||||||
result.gethPort = gethPort
|
result.gethPort = gethPort
|
||||||
|
@ -19,12 +17,12 @@ proc start*(m: var MainchainMonitor) =
|
||||||
# interface and keep an always-up-to-date receipt reference here
|
# interface and keep an always-up-to-date receipt reference here
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc getBeaconBlockRef*(m: MainchainMonitor): Blake2_256_Digest =
|
proc getBeaconBlockRef*(m: MainchainMonitor): Eth2Digest =
|
||||||
# This should be a simple accessor for the reference kept above
|
# This should be a simple accessor for the reference kept above
|
||||||
discard
|
discard
|
||||||
|
|
||||||
iterator getValidatorActions*(fromBlock,
|
iterator getValidatorActions*(m: MainchainMonitor,
|
||||||
toBlock: Blake2_256_Digest): SpecialRecord =
|
fromBlock, toBlock: Eth2Digest): SpecialRecord =
|
||||||
# It's probably better if this doesn't return a SpecialRecord, but
|
# It's probably better if this doesn't return a SpecialRecord, but
|
||||||
# rather a more readable description of the change that can be packed
|
# rather a more readable description of the change that can be packed
|
||||||
# in a SpecialRecord by the client of the API.
|
# in a SpecialRecord by the client of the API.
|
||||||
|
|
|
@ -1,134 +0,0 @@
|
||||||
# beacon_chain
|
|
||||||
# Copyright (c) 2018 Status Research & Development GmbH
|
|
||||||
# Licensed and distributed under either of
|
|
||||||
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
|
|
||||||
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
# Helper functions
|
|
||||||
import ../datatypes, sequtils, nimcrypto, math
|
|
||||||
|
|
||||||
func get_active_validator_indices(validators: seq[ValidatorRecord]): seq[Uint24] =
|
|
||||||
## Select the active validators
|
|
||||||
result = @[]
|
|
||||||
for idx, val in validators:
|
|
||||||
if val.status == ACTIVE:
|
|
||||||
result.add idx.Uint24
|
|
||||||
|
|
||||||
func shuffle(values: seq[Uint24], seed: Blake2_256_Digest): seq[Uint24] {.noInit.}=
|
|
||||||
## Returns the shuffled ``values`` with seed as entropy.
|
|
||||||
## TODO: this calls out for tests, but I odn't particularly trust spec
|
|
||||||
## right now.
|
|
||||||
|
|
||||||
let values_count = values.len
|
|
||||||
|
|
||||||
# Entropy is consumed from the seed in 3-byte (24 bit) chunks
|
|
||||||
const rand_bytes = 3
|
|
||||||
let rand_max = 2^(rand_bytes * 8) - 1
|
|
||||||
|
|
||||||
# The range of the RNG places an upper-bound on the size of the list that
|
|
||||||
# may be shuffled. It is a logic error to supply an oversized list.
|
|
||||||
assert values_count < rand_max
|
|
||||||
|
|
||||||
deepCopy(result, values)
|
|
||||||
var source = seed
|
|
||||||
|
|
||||||
var i = 0
|
|
||||||
while i < values.len - 1:
|
|
||||||
# Re-hash the `source` to obtain a new pattern of bytes
|
|
||||||
source = blake2_256.digest source.data
|
|
||||||
# Iterate through the `source` bytes in 3-byte chunks
|
|
||||||
for pos in countup(0, 29, 3):
|
|
||||||
let remaining = values_count - i
|
|
||||||
if remaining == 1:
|
|
||||||
break
|
|
||||||
|
|
||||||
# Read 3-bytes of `source` as a 24-bit big-endian integer.
|
|
||||||
let sample_from_source = source.data[pos].Uint24 shl 16 or source.data[pos+1].Uint24 shl 8 or source.data[pos+2].Uint24
|
|
||||||
|
|
||||||
# Sample values greater than or equal to `sample_max` will cause
|
|
||||||
# modulo bias when mapped into the `remaining` range.
|
|
||||||
let sample_max = rand_max - rand_max mod remaining
|
|
||||||
|
|
||||||
# Perform a swap if the consumed entropy will not cause modulo bias.
|
|
||||||
if sample_from_source < sample_max:
|
|
||||||
let replacement_position = sample_from_source mod remaining + i
|
|
||||||
swap result[i], result[replacement_position]
|
|
||||||
inc i
|
|
||||||
|
|
||||||
func split[T](lst: seq[T], N: Positive): seq[seq[T]] =
|
|
||||||
# TODO: implement as an iterator
|
|
||||||
result = newSeq[seq[T]](N)
|
|
||||||
for i in 0 ..< N:
|
|
||||||
result[i] = lst[lst.len * i div N ..< lst.len * (i+1) div N] # TODO: avoid alloc via toOpenArray
|
|
||||||
|
|
||||||
func get_new_shuffling*(seed: Blake2_256_Digest, validators: seq[ValidatorRecord],
|
|
||||||
dynasty: int64, crosslinking_start_shard: int16): seq[seq[ShardAndCommittee]] {.noInit.} =
|
|
||||||
## Split up validators into groups at the start of every epoch,
|
|
||||||
## determining at what height they can make attestations and what shard they are making crosslinks for
|
|
||||||
## Implementation should do the following: http://vitalik.ca/files/ShuffleAndAssign.png
|
|
||||||
|
|
||||||
let avs = get_active_validator_indices(validators)
|
|
||||||
var committees_per_slot, slots_per_committee: uint16
|
|
||||||
|
|
||||||
if avs.len >= CYCLE_LENGTH * MIN_COMMITTEE_SIZE:
|
|
||||||
committees_per_slot = uint16 avs.len div CYCLE_LENGTH div (MIN_COMMITTEE_SIZE * 2) + 1
|
|
||||||
slots_per_committee = 1
|
|
||||||
else:
|
|
||||||
committees_per_slot = 1
|
|
||||||
slots_per_committee = 1
|
|
||||||
while avs.len.uint16 * slots_per_committee < CYCLE_LENGTH * MIN_COMMITTEE_SIZE and
|
|
||||||
slots_per_committee < CYCLE_LENGTH:
|
|
||||||
slots_per_committee *= 2
|
|
||||||
|
|
||||||
result = @[]
|
|
||||||
for slot, slot_indices in shuffle(avs, seed).split(CYCLE_LENGTH):
|
|
||||||
let shard_indices = slot_indices.split(committees_per_slot)
|
|
||||||
let shard_id_start = crosslinking_start_shard.uint16 +
|
|
||||||
slot.uint16 * committees_per_slot div slots_per_committee
|
|
||||||
|
|
||||||
var committees = newSeq[ShardAndCommittee](shard_indices.len)
|
|
||||||
for j, indices in shard_indices:
|
|
||||||
committees[j].shard_id = (shard_id_start + j.uint16) mod SHARD_COUNT
|
|
||||||
committees[j].committee = indices
|
|
||||||
|
|
||||||
result.add committees
|
|
||||||
|
|
||||||
func mod_get[T](arr: openarray[T], pos: Natural): T =
|
|
||||||
arr[pos mod arr.len]
|
|
||||||
|
|
||||||
func get_shard_and_committees_idx*(state: BeaconState, slot: uint64): int =
|
|
||||||
# This replaces `get_shards_and_committees_for_slot` from the spec
|
|
||||||
# since in Nim, it's not currently efficient to create read-only
|
|
||||||
# accessors to expensive-to-copy members (such as sequences).
|
|
||||||
let earliest_slot_in_array = state.last_state_recalculation_slot - CYCLE_LENGTH
|
|
||||||
doAssert earliest_slot_in_array <= slot and
|
|
||||||
slot < earliest_slot_in_array + CYCLE_LENGTH * 2
|
|
||||||
return int(slot - earliest_slot_in_array)
|
|
||||||
|
|
||||||
func get_beacon_proposer_idx*(state: BeaconState, slot: int): int =
|
|
||||||
# This replaces `get_beacon_proposer` from the spec since in Nim,
|
|
||||||
# it's not currently efficient to create read-only accessors to
|
|
||||||
# expensive-to-copy members (such as ValidatorRecord).
|
|
||||||
let idx = get_shard_and_committees_idx(state, slot)
|
|
||||||
return state.shard_and_committee_for_slots[idx][0].committee.mod_get(slot)
|
|
||||||
|
|
||||||
func get_block_hash*(state: BeaconState, current_block: BeaconBlock, slot: int): Blake2_256_Digest =
|
|
||||||
let earliest_slot_in_array = current_block.slot.int - state.recent_block_hashes.len
|
|
||||||
assert earliest_slot_in_array <= slot
|
|
||||||
assert slot < current_block.slot.int
|
|
||||||
|
|
||||||
return state.recent_block_hashes[slot - earliest_slot_in_array]
|
|
||||||
|
|
||||||
func get_new_recent_block_hashes*(
|
|
||||||
old_block_hashes: seq[Blake2_256_Digest],
|
|
||||||
parent_slot, current_slot: int64,
|
|
||||||
parent_hash: Blake2_256_Digest
|
|
||||||
): seq[Blake2_256_Digest] =
|
|
||||||
|
|
||||||
# Should throw for `current_slot - CYCLE_LENGTH * 2 - 1` according to spec comment
|
|
||||||
let d = current_slot - parent_slot
|
|
||||||
result = old_block_hashes[d .. ^1]
|
|
||||||
for _ in 0 ..< min(d, old_block_hashes.len):
|
|
||||||
result.add parent_hash
|
|
||||||
|
|
|
@ -8,25 +8,22 @@
|
||||||
import
|
import
|
||||||
./datatypes, ./digest, ./helpers, ./validator
|
./datatypes, ./digest, ./helpers, ./validator
|
||||||
|
|
||||||
func get_shards_and_committees_for_slot*(state: BeaconState,
|
func mod_get[T](arr: openarray[T], pos: Natural): T =
|
||||||
slot: uint64
|
arr[pos mod arr.len]
|
||||||
): seq[ShardAndCommittee] =
|
|
||||||
let earliest_slot_in_array = state.last_state_recalculation_slot - CYCLE_LENGTH
|
|
||||||
assert earliest_slot_in_array <= slot
|
|
||||||
assert slot < earliest_slot_in_array + CYCLE_LENGTH * 2
|
|
||||||
|
|
||||||
return state.shard_and_committee_for_slots[int slot - earliest_slot_in_array]
|
func get_shard_and_committees_idx*(state: BeaconState, slot: int): int =
|
||||||
# TODO, slot is a uint64; will be an issue on int32 arch.
|
# This replaces `get_shards_and_committees_for_slot` from the spec
|
||||||
# Clarify with EF if light clients will need the beacon chain
|
# since in Nim, it's not currently efficient to create read-only
|
||||||
|
# accessors to expensive-to-copy members (such as sequences).
|
||||||
|
let earliest_slot_in_array = state.last_state_recalculation_slot.int - CYCLE_LENGTH
|
||||||
|
doAssert earliest_slot_in_array <= slot and
|
||||||
|
slot < earliest_slot_in_array + CYCLE_LENGTH * 2
|
||||||
|
return int(slot - earliest_slot_in_array)
|
||||||
|
|
||||||
func get_block_hash*(state: BeaconState, current_block: BeaconBlock, slot: int): Eth2Digest =
|
proc get_shards_and_committees_for_slot*(state: BeaconState, slot: int): seq[ShardAndCommittee] =
|
||||||
let earliest_slot_in_array = current_block.slot.int - state.recent_block_hashes.len
|
return state.shard_and_committee_for_slots[state.get_shard_and_committees_idx(slot)]
|
||||||
assert earliest_slot_in_array <= slot
|
|
||||||
assert slot < current_block.slot.int
|
|
||||||
|
|
||||||
return state.recent_block_hashes[slot - earliest_slot_in_array]
|
func get_beacon_proposer_idx*(state: BeaconState, slot: int): int =
|
||||||
|
|
||||||
func get_beacon_proposer*(state: BeaconState, slot: uint64): ValidatorRecord =
|
|
||||||
## From Casper RPJ mini-spec:
|
## From Casper RPJ mini-spec:
|
||||||
## When slot i begins, validator Vidx is expected
|
## When slot i begins, validator Vidx is expected
|
||||||
## to create ("propose") a block, which contains a pointer to some parent block
|
## to create ("propose") a block, which contains a pointer to some parent block
|
||||||
|
@ -35,7 +32,20 @@ func get_beacon_proposer*(state: BeaconState, slot: uint64): ValidatorRecord =
|
||||||
## that have not yet been included into that chain.
|
## that have not yet been included into that chain.
|
||||||
##
|
##
|
||||||
## idx in Vidx == p(i mod N), pi being a random permutation of validators indices (i.e. a committee)
|
## idx in Vidx == p(i mod N), pi being a random permutation of validators indices (i.e. a committee)
|
||||||
let
|
|
||||||
first_committee = get_shards_and_committees_for_slot(state, slot)[0].committee
|
# This replaces `get_beacon_proposer` from the spec since in Nim,
|
||||||
index = first_committee[(slot mod len(first_committee).uint64).int]
|
# it's not currently efficient to create read-only accessors to
|
||||||
state.validators[index]
|
# expensive-to-copy members (such as ValidatorRecord).
|
||||||
|
|
||||||
|
let idx = get_shard_and_committees_idx(state, slot)
|
||||||
|
return state.shard_and_committee_for_slots[idx][0].committee.mod_get(slot)
|
||||||
|
|
||||||
|
func get_block_hash*(state: BeaconState,
|
||||||
|
current_block: BeaconBlock,
|
||||||
|
slot: int): Eth2Digest =
|
||||||
|
let earliest_slot_in_array = current_block.slot.int - state.recent_block_hashes.len
|
||||||
|
assert earliest_slot_in_array <= slot
|
||||||
|
assert slot < current_block.slot.int
|
||||||
|
|
||||||
|
return state.recent_block_hashes[slot - earliest_slot_in_array]
|
||||||
|
|
||||||
|
|
|
@ -10,8 +10,13 @@
|
||||||
# hashed out. This layer helps isolate those chagnes.
|
# hashed out. This layer helps isolate those chagnes.
|
||||||
|
|
||||||
import
|
import
|
||||||
milagro_crypto
|
milagro_crypto, hashes
|
||||||
|
|
||||||
type
|
type
|
||||||
Eth2PublicKey* = milagro_crypto.VerKey
|
ValidatorPubKey* = milagro_crypto.VerKey
|
||||||
Eth2Signature* = milagro_crypto.Signature
|
ValidatorPrivKey* = milagro_crypto.SigKey
|
||||||
|
ValidatorSig* = milagro_crypto.Signature
|
||||||
|
|
||||||
|
template hash*(k: ValidatorPubKey|ValidatorPrivKey): Hash =
|
||||||
|
hash(k.getRaw)
|
||||||
|
|
||||||
|
|
|
@ -69,13 +69,13 @@ type
|
||||||
state_root*: Eth2Digest # State root
|
state_root*: Eth2Digest # State root
|
||||||
attestations*: seq[AttestationRecord] # Attestations
|
attestations*: seq[AttestationRecord] # Attestations
|
||||||
specials*: seq[SpecialRecord] # Specials (e.g. logouts, penalties)
|
specials*: seq[SpecialRecord] # Specials (e.g. logouts, penalties)
|
||||||
proposer_signature*: Eth2Signature # Proposer signature
|
proposer_signature*: ValidatorSig # Proposer signature
|
||||||
|
|
||||||
AttestationRecord* = object
|
AttestationRecord* = object
|
||||||
data*: AttestationSignedData #
|
data*: AttestationSignedData #
|
||||||
attester_bitfield*: seq[byte] # Attester participation bitfield
|
attester_bitfield*: seq[byte] # Attester participation bitfield
|
||||||
poc_bitfield*: seq[byte] # Proof of custody bitfield
|
poc_bitfield*: seq[byte] # Proof of custody bitfield
|
||||||
aggregate_sig*: Eth2Signature # BLS aggregate signature
|
aggregate_sig*: ValidatorSig # BLS aggregate signature
|
||||||
|
|
||||||
AttestationSignedData* = object
|
AttestationSignedData* = object
|
||||||
slot*: uint64 # Slot number
|
slot*: uint64 # Slot number
|
||||||
|
@ -93,22 +93,8 @@ type
|
||||||
block_hash*: Eth2Digest # Block hash
|
block_hash*: Eth2Digest # Block hash
|
||||||
|
|
||||||
SpecialRecord* = object
|
SpecialRecord* = object
|
||||||
kind*: SpecialRecordTypes # Kind
|
kind*: SpecialRecordType # Kind
|
||||||
data*: seq[byte] # Data
|
data*: seq[byte] # Data
|
||||||
|
|
||||||
AttestationRecord* = object
|
|
||||||
slot*: uint64 # Slot number
|
|
||||||
shard*: uint16 # Shard number
|
|
||||||
oblique_parent_hashes*: seq[Blake2_256_Digest]
|
|
||||||
# Beacon block hashes not part of the current chain, oldest to newest
|
|
||||||
shard_block_hash*: Blake2_256_Digest # Shard block hash being attested to
|
|
||||||
last_crosslink_hash*: Blake2_256_Digest # Last crosslink hash
|
|
||||||
shard_block_combined_data_root*: Blake2_256_Digest
|
|
||||||
# Root of data between last hash and this one
|
|
||||||
attester_bitfield*: seq[byte] # Attester participation bitfield (1 bit per attester)
|
|
||||||
justified_slot*: uint64 # Slot of last justified beacon block
|
|
||||||
justified_block_hash*: Blake2_256_Digest # Hash of last justified beacon block
|
|
||||||
aggregate_sig*: BLSSig # BLS aggregate signature
|
|
||||||
|
|
||||||
BeaconState* = object
|
BeaconState* = object
|
||||||
validator_set_change_slot*: uint64 # Slot of last validator set change
|
validator_set_change_slot*: uint64 # Slot of last validator set change
|
||||||
|
@ -123,10 +109,6 @@ type
|
||||||
## Committee members and their assigned shard, per slot, covers 2 cycles
|
## Committee members and their assigned shard, per slot, covers 2 cycles
|
||||||
## worth of assignments
|
## worth of assignments
|
||||||
persistent_committees*: seq[seq[Uint24]] # Persistent shard committees
|
persistent_committees*: seq[seq[Uint24]] # Persistent shard committees
|
||||||
last_justified_slot*: uint64 # Last justified slot
|
|
||||||
justified_streak*: uint64 # Number of consecutive justified slots
|
|
||||||
shard_and_committee_for_slots*: seq[seq[ShardAndCommittee]] # Committee members and their assigned shard, per slot
|
|
||||||
persistent_committees*: seq[seq[Uint24]] # Persistent shard committees
|
|
||||||
persistent_committee_reassignments*: seq[ShardReassignmentRecord]
|
persistent_committee_reassignments*: seq[ShardReassignmentRecord]
|
||||||
next_shuffling_seed*: Eth2Digest # Randao seed used for next shuffling
|
next_shuffling_seed*: Eth2Digest # Randao seed used for next shuffling
|
||||||
deposits_penalized_in_period*: uint32 # Total deposits penalized in the given withdrawal period
|
deposits_penalized_in_period*: uint32 # Total deposits penalized in the given withdrawal period
|
||||||
|
@ -142,7 +124,7 @@ type
|
||||||
randao_mix*: Eth2Digest # RANDAO state
|
randao_mix*: Eth2Digest # RANDAO state
|
||||||
|
|
||||||
ValidatorRecord* = object
|
ValidatorRecord* = object
|
||||||
pubkey*: Eth2PublicKey # Public key
|
pubkey*: ValidatorPubKey # Public key
|
||||||
withdrawal_credentials*: Eth2Digest # Withdrawal credentials
|
withdrawal_credentials*: Eth2Digest # Withdrawal credentials
|
||||||
randao_commitment*: Eth2Digest # RANDAO commitment
|
randao_commitment*: Eth2Digest # RANDAO commitment
|
||||||
randao_skips*: uint64 # Slot the proposer has skipped (ie. layers of RANDAO expected)
|
randao_skips*: uint64 # Slot the proposer has skipped (ie. layers of RANDAO expected)
|
||||||
|
@ -187,7 +169,7 @@ type
|
||||||
Withdrawn = 4
|
Withdrawn = 4
|
||||||
Penalized = 127
|
Penalized = 127
|
||||||
|
|
||||||
SpecialRecordTypes* {.pure.} = enum
|
SpecialRecordType* {.pure.} = enum
|
||||||
Logout = 0
|
Logout = 0
|
||||||
CasperSlashing = 1
|
CasperSlashing = 1
|
||||||
RandaoChange = 2
|
RandaoChange = 2
|
||||||
|
@ -210,3 +192,26 @@ type
|
||||||
#
|
#
|
||||||
# Also, IntSets uses machine int size while we require int64 even on 32-bit platform.
|
# Also, IntSets uses machine int size while we require int64 even on 32-bit platform.
|
||||||
|
|
||||||
|
when true:
|
||||||
|
# TODO: Remove these once RLP serialization is no longer used
|
||||||
|
import nimcrypto, rlp
|
||||||
|
export append, read
|
||||||
|
|
||||||
|
proc append*(rlpWriter: var RlpWriter, value: ValidatorPubKey) =
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc read*(rlp: var Rlp, T: type ValidatorPubKey): T {.inline.} =
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc append*(rlpWriter: var RlpWriter, value: Uint24) =
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc read*(rlp: var Rlp, T: type Uint24): T {.inline.} =
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc append*(rlpWriter: var RlpWriter, value: ValidatorSig) =
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc read*(rlp: var Rlp, T: type ValidatorSig): T {.inline.} =
|
||||||
|
discard
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ func min_empty_validator(validators: seq[ValidatorRecord], current_slot: uint64)
|
||||||
return some(i)
|
return some(i)
|
||||||
|
|
||||||
func add_validator*(validators: var seq[ValidatorRecord],
|
func add_validator*(validators: var seq[ValidatorRecord],
|
||||||
pubkey: Eth2PublicKey,
|
pubkey: ValidatorPubKey,
|
||||||
proof_of_possession: seq[byte],
|
proof_of_possession: seq[byte],
|
||||||
withdrawal_credentials: Eth2Digest,
|
withdrawal_credentials: Eth2Digest,
|
||||||
randao_commitment: Eth2Digest,
|
randao_commitment: Eth2Digest,
|
||||||
|
|
|
@ -226,11 +226,11 @@ func hashSSZ*(x: enum): array[32, byte] =
|
||||||
withHash:
|
withHash:
|
||||||
h.update [uint8 x]
|
h.update [uint8 x]
|
||||||
|
|
||||||
func hashSSZ*(x: Eth2Signature): array[32, byte] =
|
func hashSSZ*(x: ValidatorSig): array[32, byte] =
|
||||||
## TODO - Warning ⚠️: not part of the spec
|
## TODO - Warning ⚠️: not part of the spec
|
||||||
## as of https://github.com/ethereum/beacon_chain/pull/133/files
|
## as of https://github.com/ethereum/beacon_chain/pull/133/files
|
||||||
## This is a "stub" needed for BeaconBlock hashing
|
## This is a "stub" needed for BeaconBlock hashing
|
||||||
x.getraw().hash()
|
x.getRaw().hash()
|
||||||
|
|
||||||
func hashSSZ*(x: AttestationRecord): array[32, byte] =
|
func hashSSZ*(x: AttestationRecord): array[32, byte] =
|
||||||
## TODO - Warning ⚠️: not part of the spec
|
## TODO - Warning ⚠️: not part of the spec
|
||||||
|
|
|
@ -25,8 +25,6 @@ import
|
||||||
intsets, endians, nimcrypto,
|
intsets, endians, nimcrypto,
|
||||||
milagro_crypto # nimble install https://github.com/status-im/nim-milagro-crypto@#master
|
milagro_crypto # nimble install https://github.com/status-im/nim-milagro-crypto@#master
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
func process_block*(active_state: BeaconState, crystallized_state: BeaconState, blck: BeaconBlock, slot: uint64) =
|
func process_block*(active_state: BeaconState, crystallized_state: BeaconState, blck: BeaconBlock, slot: uint64) =
|
||||||
# TODO: non-attestation verification parts of per-block processing
|
# TODO: non-attestation verification parts of per-block processing
|
||||||
|
|
||||||
|
@ -44,7 +42,7 @@ func process_block*(active_state: BeaconState, crystallized_state: BeaconState,
|
||||||
|
|
||||||
# Let attestation_indices be get_shards_and_committees_for_slot(crystallized_state, slot)[x], choosing x so that attestation_indices.shard_id equals the shard_id value provided to find the set of validators that is creating this attestation record.
|
# Let attestation_indices be get_shards_and_committees_for_slot(crystallized_state, slot)[x], choosing x so that attestation_indices.shard_id equals the shard_id value provided to find the set of validators that is creating this attestation record.
|
||||||
let attestation_indices = block:
|
let attestation_indices = block:
|
||||||
let shard_and_committees = get_shards_and_committees_for_slot(crystallized_state, slot)
|
let shard_and_committees = get_shards_and_committees_for_slot(crystallized_state, slot.int)
|
||||||
var
|
var
|
||||||
x = 1
|
x = 1
|
||||||
record_creator = shard_and_committees[0]
|
record_creator = shard_and_committees[0]
|
||||||
|
@ -53,11 +51,10 @@ func process_block*(active_state: BeaconState, crystallized_state: BeaconState,
|
||||||
inc x
|
inc x
|
||||||
record_creator
|
record_creator
|
||||||
|
|
||||||
# Verify that len(attester_bitfield) == ceil_div8(len(attestation_indices)), where ceil_div8 = (x + 7) // 8. Verify that bits len(attestation_indices).... and higher, if present (i.e. len(attestation_indices) is not a multiple of 8), are all zero
|
# TODO: Verify that len(attester_bitfield) == ceil_div8(len(attestation_indices)), where ceil_div8 = (x + 7) // 8. Verify that bits len(attestation_indices).... and higher, if present (i.e. len(attestation_indices) is not a multiple of 8), are all zero
|
||||||
# doAssert attestation.attester_bitfield.len == attestation_indices.committee.len
|
|
||||||
|
|
||||||
# Derive a group public key by adding the public keys of all of the attesters in attestation_indices for whom the corresponding bit in attester_bitfield (the ith bit is (attester_bitfield[i // 8] >> (7 - (i %8))) % 2) equals 1
|
# Derive a group public key by adding the public keys of all of the attesters in attestation_indices for whom the corresponding bit in attester_bitfield (the ith bit is (attester_bitfield[i // 8] >> (7 - (i %8))) % 2) equals 1
|
||||||
var agg_pubkey: Eth2PublicKey
|
var agg_pubkey: ValidatorPubKey
|
||||||
var empty = true
|
var empty = true
|
||||||
for attester_idx in attestation_indices.committee:
|
for attester_idx in attestation_indices.committee:
|
||||||
# TODO re-enable, but currently this whole function's a nonfunctional stub
|
# TODO re-enable, but currently this whole function's a nonfunctional stub
|
||||||
|
|
|
@ -1,25 +1,27 @@
|
||||||
import
|
import
|
||||||
rlp, asyncdispatch2, ranges/bitranges, eth_p2p, eth_p2p/rlpx,
|
options,
|
||||||
datatypes
|
chronicles, rlp, asyncdispatch2, ranges/bitranges, eth_p2p, eth_p2p/rlpx,
|
||||||
|
spec/[datatypes, crypto, digest]
|
||||||
|
|
||||||
type
|
type
|
||||||
ValidatorChangeLogEntry* = object
|
ValidatorChangeLogEntry* = object
|
||||||
case kind*: ValidatorSetDeltaFlags
|
case kind*: ValidatorSetDeltaFlags
|
||||||
of Entry:
|
of Entry:
|
||||||
pubkey: BLSPublicKey
|
pubkey: ValidatorPubKey
|
||||||
else:
|
else:
|
||||||
index: uint32
|
index: uint32
|
||||||
|
|
||||||
ValidatorSet = seq[ValidatorRecord]
|
ValidatorSet = seq[ValidatorRecord]
|
||||||
|
|
||||||
protocol BeaconSync(version = 1):
|
p2pProtocol BeaconSync(version = 1,
|
||||||
|
shortName = "bcs"):
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getValidatorChangeLog(peer: Peer, changeLogHead: Blake2_256_Digest)
|
proc getValidatorChangeLog(peer: Peer, changeLogHead: Eth2Digest)
|
||||||
|
|
||||||
proc validatorChangeLog(peer: Peer,
|
proc validatorChangeLog(peer: Peer,
|
||||||
signedBlock: BeaconBlock,
|
signedBlock: BeaconBlock,
|
||||||
beaconState: BeaconState,
|
beaconState: BeaconState,
|
||||||
added: openarray[BLSPublicKey],
|
added: openarray[ValidatorPubKey],
|
||||||
removed: openarray[uint32],
|
removed: openarray[uint32],
|
||||||
order: seq[byte])
|
order: seq[byte])
|
||||||
|
|
||||||
|
@ -33,29 +35,41 @@ type
|
||||||
ChangeLog = BeaconSync.validatorChangeLog
|
ChangeLog = BeaconSync.validatorChangeLog
|
||||||
ChangeLogEntry = ValidatorChangeLogEntry
|
ChangeLogEntry = ValidatorChangeLogEntry
|
||||||
|
|
||||||
iterator changes*(cl: ChangeLog): ChangeLogEntry =
|
func validate*(log: ChangeLog): bool =
|
||||||
|
# TODO:
|
||||||
|
# Assert that the number of raised bits in log.order (a.k.a population count)
|
||||||
|
# matches the number of elements in log.added
|
||||||
|
# https://en.wikichip.org/wiki/population_count
|
||||||
|
return true
|
||||||
|
|
||||||
|
iterator changes*(log: ChangeLog): ChangeLogEntry =
|
||||||
var
|
var
|
||||||
bits = cl.added.len + cl.removed.len
|
bits = log.added.len + log.removed.len
|
||||||
addedIdx = 0
|
addedIdx = 0
|
||||||
removedIdx = 0
|
removedIdx = 0
|
||||||
|
|
||||||
for i in 0 ..< bits:
|
template nextItem(collection): auto =
|
||||||
yield if order.getBit(i):
|
let idx = `collection Idx`
|
||||||
ChangeLogEntry(kind: Entry, pubkey: added[addedIdx++])
|
inc `collection Idx`
|
||||||
else:
|
log.collection[idx]
|
||||||
ChangeLogEntry(kind: Exit, index: removed[removedIdx++])
|
|
||||||
|
|
||||||
proc getValidatorChangeLog*(node: EthereumNode):
|
for i in 0 ..< bits:
|
||||||
|
yield if log.order.getBit(i):
|
||||||
|
ChangeLogEntry(kind: Entry, pubkey: nextItem(added))
|
||||||
|
else:
|
||||||
|
ChangeLogEntry(kind: Exit, index: nextItem(removed))
|
||||||
|
|
||||||
|
proc getValidatorChangeLog*(node: EthereumNode, changeLogHead: Eth2Digest):
|
||||||
Future[(Peer, ChangeLog)] {.async.} =
|
Future[(Peer, ChangeLog)] {.async.} =
|
||||||
while true:
|
while true:
|
||||||
let peer = node.randomPeerWith(BeaconSync):
|
let peer = node.randomPeerWith(BeaconSync)
|
||||||
if peer == nil: return
|
if peer == nil: return
|
||||||
|
|
||||||
let res = await peer.getValidatorChangeLog(timeout = 1)
|
let res = await peer.getValidatorChangeLog(changeLogHead, timeout = 1)
|
||||||
if res.isSome:
|
if res.isSome:
|
||||||
return (peer, res.get)
|
return (peer, res.get)
|
||||||
|
|
||||||
proc applyValidatorChangeLog*(changeLog: ChangeLog,
|
proc applyValidatorChangeLog*(log: ChangeLog,
|
||||||
outBeaconState: var BeaconState): bool =
|
outBeaconState: var BeaconState): bool =
|
||||||
# TODO:
|
# TODO:
|
||||||
#
|
#
|
||||||
|
@ -72,8 +86,8 @@ proc applyValidatorChangeLog*(changeLog: ChangeLog,
|
||||||
#
|
#
|
||||||
|
|
||||||
outBeaconState.last_finalized_slot =
|
outBeaconState.last_finalized_slot =
|
||||||
changeLog.signedBlock.slot div CYCLE_LENGTH
|
log.signedBlock.slot div CYCLE_LENGTH
|
||||||
|
|
||||||
outBeaconState.validator_set_delta_hash_chain =
|
outBeaconState.validator_set_delta_hash_chain =
|
||||||
changeLog.beaconState.validator_set_delta_hash_chain
|
log.beaconState.validator_set_delta_hash_chain
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
import
|
import
|
||||||
asyncdispatch2, datatypes, random
|
random,
|
||||||
|
asyncdispatch2,
|
||||||
|
spec/datatypes
|
||||||
|
|
||||||
type
|
type
|
||||||
Timestamp = uint64 # Unix epoch timestamp in millisecond resolution
|
Timestamp = uint64 # Unix epoch timestamp in millisecond resolution
|
||||||
|
@ -11,16 +13,16 @@ proc timeSinceGenesis*(s: BeaconState): Timestamp =
|
||||||
Timestamp(int64(fastEpochTime() - s.genesis_time * 1000) -
|
Timestamp(int64(fastEpochTime() - s.genesis_time * 1000) -
|
||||||
detectedClockDrift)
|
detectedClockDrift)
|
||||||
|
|
||||||
template toSlot*(t: Timestamp): uint64 =
|
template toSlot*(t: Timestamp): int =
|
||||||
t div uint64(SLOT_DURATION * 1000)
|
int(t div uint64(SLOT_DURATION * 1000))
|
||||||
|
|
||||||
template slotStart*(s: BeaconState, slot: Natural): Timestamp =
|
template slotStart*(s: BeaconState, slot: int): Timestamp =
|
||||||
(s.genesis_time + uint64(slot * SLOT_DURATION)) * 1000
|
(s.genesis_time + uint64(slot * SLOT_DURATION)) * 1000
|
||||||
|
|
||||||
template slotMiddle*(s: BeaconState, slot: Natural): Timestamp =
|
template slotMiddle*(s: BeaconState, slot: int): Timestamp =
|
||||||
s.slotStart + SLOT_DURATION * 500
|
s.slotStart(slot) + SLOT_DURATION * 500
|
||||||
|
|
||||||
template slotEnd*(s: BeaconState, slot: Natural): Timestamp =
|
template slotEnd*(s: BeaconState, slot: int): Timestamp =
|
||||||
s.slotStart(slot + 1)
|
s.slotStart(slot + 1)
|
||||||
|
|
||||||
proc randomTimeInSlot*(s: BeaconState,
|
proc randomTimeInSlot*(s: BeaconState,
|
||||||
|
|
|
@ -1,6 +1,11 @@
|
||||||
import
|
import
|
||||||
asyncdispatch2,
|
asyncdispatch2,
|
||||||
datatypes, beacon_chain_db
|
spec/datatypes, beacon_chain_db
|
||||||
|
|
||||||
|
const
|
||||||
|
WEAK_SUBJECTVITY_PERIOD* = 4 * 30 * 24 * 60 * 60 div SLOT_DURATION
|
||||||
|
# TODO: This needs revisiting.
|
||||||
|
# Why was the validator WITHDRAWAL_PERIOD altered in the spec?
|
||||||
|
|
||||||
proc obtainTrustedStateSnapshot*(db: BeaconChainDB): Future[BeaconState] {.async.} =
|
proc obtainTrustedStateSnapshot*(db: BeaconChainDB): Future[BeaconState] {.async.} =
|
||||||
# In case our latest state is too old, we must obtain a recent snapshot
|
# In case our latest state is too old, we must obtain a recent snapshot
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import
|
import
|
||||||
tables, random,
|
tables, random,
|
||||||
asyncdispatch2,
|
asyncdispatch2,
|
||||||
datatypes
|
spec/[datatypes, crypto]
|
||||||
|
|
||||||
type
|
type
|
||||||
ValidatorKind = enum
|
ValidatorKind = enum
|
||||||
|
@ -10,36 +10,40 @@ type
|
||||||
|
|
||||||
ValidatorConnection = object
|
ValidatorConnection = object
|
||||||
|
|
||||||
RandaoValue = seq[bytes]
|
RandaoSecret = seq[byte]
|
||||||
|
|
||||||
AttachedValidator* = ref object
|
AttachedValidator* = ref object
|
||||||
idx*: int
|
idx*: int
|
||||||
case kind: ValidatorKind
|
case kind: ValidatorKind
|
||||||
of inProcess:
|
of inProcess:
|
||||||
randaoValue: RandaoValue
|
privKey: ValidatorPrivKey
|
||||||
privKey: BLSPrivateKey
|
randaoSecret: RandaoSecret
|
||||||
randaoSecret: seq[bytes]
|
|
||||||
else:
|
else:
|
||||||
connection: ValidatorConnection
|
connection: ValidatorConnection
|
||||||
|
|
||||||
ValidatorPool* = object
|
ValidatorPool* = object
|
||||||
validators: Table[BLSPublicKey, AttachedValidator]
|
validators: Table[ValidatorPubKey, AttachedValidator]
|
||||||
|
|
||||||
proc init*(T: type ValidatorPool): T =
|
proc init*(T: type ValidatorPool): T =
|
||||||
result.validators = initTable[BLSPublicKey, AttachedValidator]()
|
result.validators = initTable[ValidatorPubKey, AttachedValidator]()
|
||||||
|
|
||||||
proc addLocalValidator*(pool: var ValidatorPool,
|
proc addLocalValidator*(pool: var ValidatorPool,
|
||||||
pubKey: BLSPublicKey,
|
idx: int,
|
||||||
privKey: BLSPrivateKey) =
|
pubKey: ValidatorPubKey,
|
||||||
discard
|
privKey: ValidatorPrivKey,
|
||||||
|
randaoSecret: RandaoSecret) =
|
||||||
|
pool.validators[pubKey] = AttachedValidator(idx: idx,
|
||||||
|
kind: inProcess,
|
||||||
|
privKey: privKey,
|
||||||
|
randaoSecret: randaoSecret)
|
||||||
|
|
||||||
proc getValidator*(pool: ValidatorPool,
|
proc getValidator*(pool: ValidatorPool,
|
||||||
validatorKey: BLSPublicKey): AttachedValidator =
|
validatorKey: ValidatorPubKey): AttachedValidator =
|
||||||
pool.validatators.getOrDefault(validatorKey)
|
pool.validators.getOrDefault(validatorKey)
|
||||||
|
|
||||||
proc signBlockProposal*(v: AttachedValidator,
|
proc signBlockProposal*(v: AttachedValidator,
|
||||||
proposal: ProposalSignedData): Future[Signature] {.async.} =
|
proposal: ProposalSignedData): Future[ValidatorSig] {.async.} =
|
||||||
if v.inProcess:
|
if v.kind == inProcess:
|
||||||
await sleepAsync(1)
|
await sleepAsync(1)
|
||||||
# TODO:
|
# TODO:
|
||||||
# return sign(proposal, v.privKey)
|
# return sign(proposal, v.privKey)
|
||||||
|
@ -49,9 +53,9 @@ proc signBlockProposal*(v: AttachedValidator,
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc signAttestation*(v: AttachedValidator,
|
proc signAttestation*(v: AttachedValidator,
|
||||||
attestation: AttestationSignedData): Future[Signature] {.async.} =
|
attestation: AttestationSignedData): Future[ValidatorSig] {.async.} =
|
||||||
# TODO: implement this
|
# TODO: implement this
|
||||||
if v.inProcess:
|
if v.kind == inProcess:
|
||||||
await sleepAsync(1)
|
await sleepAsync(1)
|
||||||
# TODO:
|
# TODO:
|
||||||
# return sign(proposal, v.privKey)
|
# return sign(proposal, v.privKey)
|
||||||
|
|
Loading…
Reference in New Issue