mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-02-17 00:47:03 +00:00
work-in-progress beacon node skeleton (not compiling at the moment)
This commit is contained in:
parent
53f819689e
commit
4920c0a357
@ -21,7 +21,7 @@ Ethereum Foundation uses:
|
|||||||
Nim NEP-1 recommends:
|
Nim NEP-1 recommends:
|
||||||
- camelCase for fields and procedure names
|
- camelCase for fields and procedure names
|
||||||
- PascalCase for constants
|
- PascalCase for constants
|
||||||
- PsacalCase for types
|
- PascalCase for types
|
||||||
|
|
||||||
To facilitate collaboration and comparison, Nim-beacon-chain uses the Ethereum Foundation convention.
|
To facilitate collaboration and comparison, Nim-beacon-chain uses the Ethereum Foundation convention.
|
||||||
|
|
||||||
|
@ -3,13 +3,21 @@ version = "0.0.1"
|
|||||||
author = "Status Research & Development GmbH"
|
author = "Status Research & Development GmbH"
|
||||||
description = "Eth2.0 research implementation of the beacon chain"
|
description = "Eth2.0 research implementation of the beacon chain"
|
||||||
license = "MIT or Apache License 2.0"
|
license = "MIT or Apache License 2.0"
|
||||||
srcDir = "src"
|
installDirs = @["beacon_chain"]
|
||||||
|
bin = @["beacon_chain/beacon_node"]
|
||||||
|
|
||||||
### Dependencies
|
### Dependencies
|
||||||
requires "nim >= 0.18.0",
|
requires "nim >= 0.18.0",
|
||||||
"eth_common",
|
"eth_common",
|
||||||
|
"eth_keys",
|
||||||
"nimcrypto",
|
"nimcrypto",
|
||||||
"https://github.com/status-im/nim-milagro-crypto#master"
|
"https://github.com/status-im/nim-milagro-crypto#master",
|
||||||
|
"eth_p2p",
|
||||||
|
"chronicles",
|
||||||
|
"confutils",
|
||||||
|
"serialization",
|
||||||
|
"json_serialization",
|
||||||
|
"ranges"
|
||||||
|
|
||||||
### Helper functions
|
### Helper functions
|
||||||
proc test(name: string, defaultLang = "c") =
|
proc test(name: string, defaultLang = "c") =
|
||||||
|
28
beacon_chain/beacon_chain_db.nim
Normal file
28
beacon_chain/beacon_chain_db.nim
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
import
|
||||||
|
os, json,
|
||||||
|
chronicles, json_serialization, eth_common/eth_types_json_serialization,
|
||||||
|
datatypes
|
||||||
|
|
||||||
|
type
|
||||||
|
BeaconChainDB* = ref object
|
||||||
|
dataRoot: string
|
||||||
|
|
||||||
|
BeaconStateRef* = ref BeaconState
|
||||||
|
|
||||||
|
proc init*(T: type BeaconChainDB, dataDir: string): BeaconChainDB =
|
||||||
|
new result
|
||||||
|
result.dataRoot = dataDir / "beacon_db"
|
||||||
|
createDir(result.dataRoot)
|
||||||
|
|
||||||
|
proc lastFinalizedState*(db: BeaconChainDB): BeaconStateRef =
|
||||||
|
try:
|
||||||
|
var stateJson = parseJson readFile(db.dataRoot / "BeaconState.json")
|
||||||
|
# TODO implement this
|
||||||
|
except:
|
||||||
|
return nil
|
||||||
|
|
||||||
|
proc persistBlock*(db: BeaconChainDB, s: BeaconState, b: BeaconBlock) =
|
||||||
|
let stateJson = StringJsonWriter.encode(s, pretty = true)
|
||||||
|
writeFile(db.dataRoot / "BeaconState.json", stateJson)
|
||||||
|
debug "State persisted"
|
||||||
|
|
125
beacon_chain/beacon_node.nim
Normal file
125
beacon_chain/beacon_node.nim
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
import
|
||||||
|
os, net,
|
||||||
|
asyncdispatch2, confutils, eth_p2p, eth_keys,
|
||||||
|
conf, datatypes, time, beacon_chain_db, validator_pool,
|
||||||
|
sync_protocol, gossipsub_protocol, trusted_state_snapshots,
|
||||||
|
private/helpers
|
||||||
|
|
||||||
|
type
|
||||||
|
BeaconNode* = ref object
|
||||||
|
beaconState*: BeaconState
|
||||||
|
network*: EthereumNode
|
||||||
|
db*: BeaconChainDB
|
||||||
|
config*: Configuration
|
||||||
|
keys*: KeyPair
|
||||||
|
attachedValidators: Table[BLSPublicKey, AttachedValidator]
|
||||||
|
|
||||||
|
const
|
||||||
|
version = "v0.1" # read this from the nimble file
|
||||||
|
clientId = "nimbus beacon node " & version
|
||||||
|
topicBeaconBlocks = "ethereum/2.1/beacon_blocks"
|
||||||
|
|
||||||
|
proc ensureNetworkKeys*(dataDir: string): KeyPair =
|
||||||
|
# TODO:
|
||||||
|
# 1. Check if keys already exist in the data dir
|
||||||
|
# 2. Generate new ones and save them in the directory
|
||||||
|
# if necessary
|
||||||
|
return newKeyPair()
|
||||||
|
|
||||||
|
proc init*(T: type BeaconNode, conf: Configuration): T =
|
||||||
|
new result
|
||||||
|
result.config = conf
|
||||||
|
result.db = BeaconChainDB.init(conf.dataDir)
|
||||||
|
result.keys = ensureNetworkKeys(conf.dataDir)
|
||||||
|
|
||||||
|
var address: Address
|
||||||
|
address.ip = parseIpAddress("0.0.0.0")
|
||||||
|
address.tcpPort = Port(conf.tcpPort)
|
||||||
|
address.udpPort = Port(conf.udpPort)
|
||||||
|
result.network = newEthereumNode(result.keys, address, 0, nil, clientId)
|
||||||
|
|
||||||
|
proc sync*(node: BeaconNode): Future[bool] {.async.} =
|
||||||
|
let persistedState = node.db.lastFinalizedState()
|
||||||
|
if persistedState.isNil or
|
||||||
|
persistedState[].slotDistanceFromNow() > WITHDRAWAL_PERIOD:
|
||||||
|
node.beaconState = await obtainTrustedStateSnapshot(node.db)
|
||||||
|
else:
|
||||||
|
node.beaconState = persistedState[]
|
||||||
|
var targetSlot = toSlot timeSinceGenesis(node.beaconState)
|
||||||
|
|
||||||
|
while node.beaconState.last_finalized_slot < targetSlot:
|
||||||
|
var (peer, changeLog) = node.network.getValidatorChangeLog(
|
||||||
|
node.beaconState.validator_set_delta_hash_chain)
|
||||||
|
|
||||||
|
if peer == nil:
|
||||||
|
error "Failed to sync with any peer"
|
||||||
|
return false
|
||||||
|
|
||||||
|
if applyValidatorChangeLog(changeLog, node.beaconState):
|
||||||
|
node.db.persistBlock(changeLog.signedBlock, node.beaconState)
|
||||||
|
else:
|
||||||
|
warn "Ignoring invalid validator change log", sentFrom = peer
|
||||||
|
|
||||||
|
return true
|
||||||
|
|
||||||
|
proc addLocalValidators*(node: BeaconNode) =
|
||||||
|
for validator in node.config.validatorKeys:
|
||||||
|
# TODO:
|
||||||
|
# 1. Parse the validator keys
|
||||||
|
#
|
||||||
|
# 2. Check whether the validators exist in the beacon state.
|
||||||
|
# (Report a warning otherwise)
|
||||||
|
#
|
||||||
|
# 3. Add the validators to node.attachedValidators
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc scheduleCycleActions(node: BeaconNode)
|
||||||
|
## This schedules the required block proposals and
|
||||||
|
## attestations from our attached validators.
|
||||||
|
let cycle_start = node.last_state_recalculation_slot
|
||||||
|
|
||||||
|
# Schedule block proposals
|
||||||
|
for i in 0 ..< CYCLE_LENGTH:
|
||||||
|
let
|
||||||
|
proposer_idx = get_beacon_proposer_idx(node.beaconState, cycle_start + i)
|
||||||
|
proposer_key = node.beaconState.validators[proposer_idx].pubkey
|
||||||
|
attached_validator = node.attachedValidators.getAttachedValidator(proposer_key)
|
||||||
|
|
||||||
|
if attached_validator != nil:
|
||||||
|
proc proposeBlock =
|
||||||
|
# TODO
|
||||||
|
discard
|
||||||
|
|
||||||
|
# TODO:
|
||||||
|
# Warm-up the proposer earlier to try to obtain previous
|
||||||
|
# missing blocks if necessary
|
||||||
|
|
||||||
|
addTimer slotMiddle(cycle_start + i), proposeBlock
|
||||||
|
|
||||||
|
# Schedule attestations
|
||||||
|
# TODO:
|
||||||
|
# Similar to the above, but using `get_shard_and_committees_idx`
|
||||||
|
|
||||||
|
proc processBlocks*(node: BeaconNode) {.async.} =
|
||||||
|
node.scheduleCycleActions()
|
||||||
|
|
||||||
|
node.network.subscribe(topicBeaconBlocks) do (b: BeaconBlock):
|
||||||
|
# TODO:
|
||||||
|
#
|
||||||
|
# 1. Check for missing blocks and obtain them
|
||||||
|
|
||||||
|
if b.slot mod CYCLE_LENGTH == 0:
|
||||||
|
node.scheduleCycleActions()
|
||||||
|
|
||||||
|
when isMainModule:
|
||||||
|
let conf = Configuration.load()
|
||||||
|
waitFor syncrhronizeClock()
|
||||||
|
var node = BeaconNode.init conf
|
||||||
|
|
||||||
|
if not waitFor node.sync():
|
||||||
|
quit 1
|
||||||
|
|
||||||
|
node.addLocalValidators()
|
||||||
|
|
||||||
|
waitFor node.processBlocks()
|
||||||
|
|
27
beacon_chain/conf.nim
Normal file
27
beacon_chain/conf.nim
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
import
|
||||||
|
confutils/defs
|
||||||
|
|
||||||
|
type
|
||||||
|
ValidatorKeyPath* = distinct string
|
||||||
|
|
||||||
|
Configuration* = object
|
||||||
|
dataDir* {.
|
||||||
|
desc: "The directory where nimbus will store all blockchain data.",
|
||||||
|
shorthand: "d",
|
||||||
|
defaultValue: getConfigDir() / "nimbus".}: DirPath
|
||||||
|
|
||||||
|
bootstrapNodes* {.
|
||||||
|
desc: "Specifies one or more bootstrap nodes to use when connecting to the network.",
|
||||||
|
shorthand: "b".}: seq[string]
|
||||||
|
|
||||||
|
tcpPort* {.
|
||||||
|
desc: "TCP listening port".}: int
|
||||||
|
|
||||||
|
udpPort* {.
|
||||||
|
desc: "UDP listening port".}: int
|
||||||
|
|
||||||
|
validatorKeys* {.
|
||||||
|
desc: "A path to a pair of public and private keys for a validator. " &
|
||||||
|
"Nimbus will automatically add the extensions .privkey and .pubkey.",
|
||||||
|
shorthand: "v".}: seq[ValidatorKeyPath]
|
||||||
|
|
7
beacon_chain/gossipsub_protocol.nim
Normal file
7
beacon_chain/gossipsub_protocol.nim
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
import
|
||||||
|
eth_p2p, eth_p2p/rlpx
|
||||||
|
|
||||||
|
type
|
||||||
|
|
||||||
|
protocol GossipSub(version = 1):
|
||||||
|
|
134
beacon_chain/private/helpers.nim
Normal file
134
beacon_chain/private/helpers.nim
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
# Helper functions
|
||||||
|
import ../datatypes, sequtils, nimcrypto, math
|
||||||
|
|
||||||
|
func get_active_validator_indices(validators: seq[ValidatorRecord]): seq[Uint24] =
|
||||||
|
## Select the active validators
|
||||||
|
result = @[]
|
||||||
|
for idx, val in validators:
|
||||||
|
if val.status == ACTIVE:
|
||||||
|
result.add idx.Uint24
|
||||||
|
|
||||||
|
func shuffle(values: seq[Uint24], seed: Blake2_256_Digest): seq[Uint24] {.noInit.}=
|
||||||
|
## Returns the shuffled ``values`` with seed as entropy.
|
||||||
|
## TODO: this calls out for tests, but I odn't particularly trust spec
|
||||||
|
## right now.
|
||||||
|
|
||||||
|
let values_count = values.len
|
||||||
|
|
||||||
|
# Entropy is consumed from the seed in 3-byte (24 bit) chunks
|
||||||
|
const rand_bytes = 3
|
||||||
|
let rand_max = 2^(rand_bytes * 8) - 1
|
||||||
|
|
||||||
|
# The range of the RNG places an upper-bound on the size of the list that
|
||||||
|
# may be shuffled. It is a logic error to supply an oversized list.
|
||||||
|
assert values_count < rand_max
|
||||||
|
|
||||||
|
deepCopy(result, values)
|
||||||
|
var source = seed
|
||||||
|
|
||||||
|
var i = 0
|
||||||
|
while i < values.len - 1:
|
||||||
|
# Re-hash the `source` to obtain a new pattern of bytes
|
||||||
|
source = blake2_256.digest source.data
|
||||||
|
# Iterate through the `source` bytes in 3-byte chunks
|
||||||
|
for pos in countup(0, 29, 3):
|
||||||
|
let remaining = values_count - i
|
||||||
|
if remaining == 1:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Read 3-bytes of `source` as a 24-bit big-endian integer.
|
||||||
|
let sample_from_source = source.data[pos].Uint24 shl 16 or source.data[pos+1].Uint24 shl 8 or source.data[pos+2].Uint24
|
||||||
|
|
||||||
|
# Sample values greater than or equal to `sample_max` will cause
|
||||||
|
# modulo bias when mapped into the `remaining` range.
|
||||||
|
let sample_max = rand_max - rand_max mod remaining
|
||||||
|
|
||||||
|
# Perform a swap if the consumed entropy will not cause modulo bias.
|
||||||
|
if sample_from_source < sample_max:
|
||||||
|
let replacement_position = sample_from_source mod remaining + i
|
||||||
|
swap result[i], result[replacement_position]
|
||||||
|
inc i
|
||||||
|
|
||||||
|
func split[T](lst: seq[T], N: Positive): seq[seq[T]] =
|
||||||
|
# TODO: implement as an iterator
|
||||||
|
result = newSeq[seq[T]](N)
|
||||||
|
for i in 0 ..< N:
|
||||||
|
result[i] = lst[lst.len * i div N ..< lst.len * (i+1) div N] # TODO: avoid alloc via toOpenArray
|
||||||
|
|
||||||
|
func get_new_shuffling*(seed: Blake2_256_Digest, validators: seq[ValidatorRecord],
|
||||||
|
dynasty: int64, crosslinking_start_shard: int16): seq[seq[ShardAndCommittee]] {.noInit.} =
|
||||||
|
## Split up validators into groups at the start of every epoch,
|
||||||
|
## determining at what height they can make attestations and what shard they are making crosslinks for
|
||||||
|
## Implementation should do the following: http://vitalik.ca/files/ShuffleAndAssign.png
|
||||||
|
|
||||||
|
let avs = get_active_validator_indices(validators)
|
||||||
|
var committees_per_slot, slots_per_committee: uint16
|
||||||
|
|
||||||
|
if avs.len >= CYCLE_LENGTH * MIN_COMMITTEE_SIZE:
|
||||||
|
committees_per_slot = uint16 avs.len div CYCLE_LENGTH div (MIN_COMMITTEE_SIZE * 2) + 1
|
||||||
|
slots_per_committee = 1
|
||||||
|
else:
|
||||||
|
committees_per_slot = 1
|
||||||
|
slots_per_committee = 1
|
||||||
|
while avs.len.uint16 * slots_per_committee < CYCLE_LENGTH * MIN_COMMITTEE_SIZE and
|
||||||
|
slots_per_committee < CYCLE_LENGTH:
|
||||||
|
slots_per_committee *= 2
|
||||||
|
|
||||||
|
result = @[]
|
||||||
|
for slot, slot_indices in shuffle(avs, seed).split(CYCLE_LENGTH):
|
||||||
|
let shard_indices = slot_indices.split(committees_per_slot)
|
||||||
|
let shard_id_start = crosslinking_start_shard.uint16 +
|
||||||
|
slot.uint16 * committees_per_slot div slots_per_committee
|
||||||
|
|
||||||
|
var committees = newSeq[ShardAndCommittee](shard_indices.len)
|
||||||
|
for j, indices in shard_indices:
|
||||||
|
committees[j].shard_id = (shard_id_start + j.uint16) mod SHARD_COUNT
|
||||||
|
committees[j].committee = indices
|
||||||
|
|
||||||
|
result.add committees
|
||||||
|
|
||||||
|
func mod_get[T](arr: openarray[T], pos: Natural): T =
|
||||||
|
arr[pos mod arr.len]
|
||||||
|
|
||||||
|
func get_shard_and_committees_idx*(state: BeaconState, slot: uint64): int =
|
||||||
|
# This replaces `get_shards_and_committees_for_slot` from the spec,
|
||||||
|
# since in Nim, it's not currently efficient to create read-only
|
||||||
|
# accessors to expensive-to-copy members (such as sequences).
|
||||||
|
let earliest_slot_in_array = state.last_state_recalculation_slot - CYCLE_LENGTH
|
||||||
|
doAssert earliest_slot_in_array <= slot and
|
||||||
|
slot < earliest_slot_in_array + CYCLE_LENGTH * 2
|
||||||
|
return int(slot - earliest_slot_in_array)
|
||||||
|
|
||||||
|
func get_beacon_proposer_idx*(state: BeaconState, slot: int): int =
|
||||||
|
# This replaces `get_beacon_proposer` from the spec,
|
||||||
|
# since in Nim, it's not currently efficient to create read-only
|
||||||
|
# accessors to expensive-to-copy members (such as ValidatorRecord).
|
||||||
|
let idx = get_shard_and_committees_idx(state, slot)
|
||||||
|
return state.shard_and_committee_for_slots[idx][0].committee.mod_get(slot)
|
||||||
|
|
||||||
|
func get_block_hash*(state: BeaconState, current_block: BeaconBlock, slot: int): Blake2_256_Digest =
|
||||||
|
let earliest_slot_in_array = current_block.slot.int - state.recent_block_hashes.len
|
||||||
|
assert earliest_slot_in_array <= slot
|
||||||
|
assert slot < current_block.slot.int
|
||||||
|
|
||||||
|
return state.recent_block_hashes[slot - earliest_slot_in_array]
|
||||||
|
|
||||||
|
func get_new_recent_block_hashes*(
|
||||||
|
old_block_hashes: seq[Blake2_256_Digest],
|
||||||
|
parent_slot, current_slot: int64,
|
||||||
|
parent_hash: Blake2_256_Digest
|
||||||
|
): seq[Blake2_256_Digest] =
|
||||||
|
|
||||||
|
# Should throw for `current_slot - CYCLE_LENGTH * 2 - 1` according to spec comment
|
||||||
|
let d = current_slot - parent_slot
|
||||||
|
result = old_block_hashes[d .. ^1]
|
||||||
|
for _ in 0 ..< min(d, old_block_hashes.len):
|
||||||
|
result.add parent_hash
|
||||||
|
|
@ -95,6 +95,20 @@ type
|
|||||||
SpecialRecord* = object
|
SpecialRecord* = object
|
||||||
kind*: SpecialRecordTypes # Kind
|
kind*: SpecialRecordTypes # Kind
|
||||||
data*: seq[byte] # Data
|
data*: seq[byte] # Data
|
||||||
|
|
||||||
|
AttestationRecord* = object
|
||||||
|
slot*: uint64 # Slot number
|
||||||
|
shard*: uint16 # Shard number
|
||||||
|
oblique_parent_hashes*: seq[Blake2_256_Digest]
|
||||||
|
# Beacon block hashes not part of the current chain, oldest to newest
|
||||||
|
shard_block_hash*: Blake2_256_Digest # Shard block hash being attested to
|
||||||
|
last_crosslink_hash*: Blake2_256_Digest # Last crosslink hash
|
||||||
|
shard_block_combined_data_root*: Blake2_256_Digest
|
||||||
|
# Root of data between last hash and this one
|
||||||
|
attester_bitfield*: seq[byte] # Attester participation bitfield (1 bit per attester)
|
||||||
|
justified_slot*: uint64 # Slot of last justified beacon block
|
||||||
|
justified_block_hash*: Blake2_256_Digest # Hash of last justified beacon block
|
||||||
|
aggregate_sig*: BLSSig # BLS aggregate signature
|
||||||
|
|
||||||
BeaconState* = object
|
BeaconState* = object
|
||||||
validator_set_change_slot*: uint64 # Slot of last validator set change
|
validator_set_change_slot*: uint64 # Slot of last validator set change
|
||||||
@ -109,6 +123,10 @@ type
|
|||||||
## Committee members and their assigned shard, per slot, covers 2 cycles
|
## Committee members and their assigned shard, per slot, covers 2 cycles
|
||||||
## worth of assignments
|
## worth of assignments
|
||||||
persistent_committees*: seq[seq[Uint24]] # Persistent shard committees
|
persistent_committees*: seq[seq[Uint24]] # Persistent shard committees
|
||||||
|
last_justified_slot*: uint64 # Last justified slot
|
||||||
|
justified_streak*: uint64 # Number of consecutive justified slots
|
||||||
|
shard_and_committee_for_slots*: seq[seq[ShardAndCommittee]] # Committee members and their assigned shard, per slot
|
||||||
|
persistent_committees*: seq[seq[Uint24]] # Persistent shard committees
|
||||||
persistent_committee_reassignments*: seq[ShardReassignmentRecord]
|
persistent_committee_reassignments*: seq[ShardReassignmentRecord]
|
||||||
next_shuffling_seed*: Eth2Digest # Randao seed used for next shuffling
|
next_shuffling_seed*: Eth2Digest # Randao seed used for next shuffling
|
||||||
deposits_penalized_in_period*: uint32 # Total deposits penalized in the given withdrawal period
|
deposits_penalized_in_period*: uint32 # Total deposits penalized in the given withdrawal period
|
||||||
@ -191,3 +209,4 @@ type
|
|||||||
# with room to spare.
|
# with room to spare.
|
||||||
#
|
#
|
||||||
# Also, IntSets uses machine int size while we require int64 even on 32-bit platform.
|
# Also, IntSets uses machine int size while we require int64 even on 32-bit platform.
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ func process_block*(active_state: BeaconState, crystallized_state: BeaconState,
|
|||||||
record_creator
|
record_creator
|
||||||
|
|
||||||
# Verify that len(attester_bitfield) == ceil_div8(len(attestation_indices)), where ceil_div8 = (x + 7) // 8. Verify that bits len(attestation_indices).... and higher, if present (i.e. len(attestation_indices) is not a multiple of 8), are all zero
|
# Verify that len(attester_bitfield) == ceil_div8(len(attestation_indices)), where ceil_div8 = (x + 7) // 8. Verify that bits len(attestation_indices).... and higher, if present (i.e. len(attestation_indices) is not a multiple of 8), are all zero
|
||||||
doAssert attestation.attester_bitfield.len == attestation_indices.committee.len
|
# doAssert attestation.attester_bitfield.len == attestation_indices.committee.len
|
||||||
|
|
||||||
# Derive a group public key by adding the public keys of all of the attesters in attestation_indices for whom the corresponding bit in attester_bitfield (the ith bit is (attester_bitfield[i // 8] >> (7 - (i %8))) % 2) equals 1
|
# Derive a group public key by adding the public keys of all of the attesters in attestation_indices for whom the corresponding bit in attester_bitfield (the ith bit is (attester_bitfield[i // 8] >> (7 - (i %8))) % 2) equals 1
|
||||||
var agg_pubkey: Eth2PublicKey
|
var agg_pubkey: Eth2PublicKey
|
||||||
|
79
beacon_chain/sync_protocol.nim
Normal file
79
beacon_chain/sync_protocol.nim
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
import
|
||||||
|
rlp, asyncdispatch2, ranges/bitranges, eth_p2p, eth_p2p/rlpx,
|
||||||
|
datatypes
|
||||||
|
|
||||||
|
type
|
||||||
|
ValidatorChangeLogEntry* = object
|
||||||
|
case kind*: ValidatorSetDeltaFlags
|
||||||
|
of Entry:
|
||||||
|
pubkey: BLSPublicKey
|
||||||
|
else:
|
||||||
|
index: uint32
|
||||||
|
|
||||||
|
ValidatorSet = seq[ValidatorRecord]
|
||||||
|
|
||||||
|
protocol BeaconSync(version = 1):
|
||||||
|
requestResponse:
|
||||||
|
proc getValidatorChangeLog(peer: Peer, changeLogHead: Blake2_256_Digest)
|
||||||
|
|
||||||
|
proc validatorChangeLog(peer: Peer,
|
||||||
|
signedBlock: BeaconBlock,
|
||||||
|
beaconState: BeaconState,
|
||||||
|
added: openarray[BLSPublicKey],
|
||||||
|
removed: openarray[uint32],
|
||||||
|
order: seq[byte])
|
||||||
|
|
||||||
|
template `++`(x: var int): int =
|
||||||
|
let y = x
|
||||||
|
inc x
|
||||||
|
y
|
||||||
|
|
||||||
|
type
|
||||||
|
# A bit shorter names for convenience
|
||||||
|
ChangeLog = BeaconSync.validatorChangeLog
|
||||||
|
ChangeLogEntry = ValidatorChangeLogEntry
|
||||||
|
|
||||||
|
iterator changes*(cl: ChangeLog): ChangeLogEntry =
|
||||||
|
var
|
||||||
|
bits = cl.added.len + cl.removed.len
|
||||||
|
addedIdx = 0
|
||||||
|
removedIdx = 0
|
||||||
|
|
||||||
|
for i in 0 ..< bits:
|
||||||
|
yield if order.getBit(i):
|
||||||
|
ChangeLogEntry(kind: Entry, pubkey: added[addedIdx++])
|
||||||
|
else:
|
||||||
|
ChangeLogEntry(kind: Exit, index: removed[removedIdx++])
|
||||||
|
|
||||||
|
proc getValidatorChangeLog*(node: EthereumNode):
|
||||||
|
Future[(Peer, ChangeLog)] {.async.} =
|
||||||
|
while true:
|
||||||
|
let peer = node.randomPeerWith(BeaconSync):
|
||||||
|
if peer == nil: return
|
||||||
|
|
||||||
|
let res = await peer.getValidatorChangeLog(timeout = 1)
|
||||||
|
if res.isSome:
|
||||||
|
return (peer, res.get)
|
||||||
|
|
||||||
|
proc applyValidatorChangeLog*(changeLog: ChangeLog,
|
||||||
|
outBeaconState: var BeaconState): bool =
|
||||||
|
# TODO:
|
||||||
|
#
|
||||||
|
# 1. Validate that the signedBlock state root hash matches the
|
||||||
|
# provided beaconState
|
||||||
|
#
|
||||||
|
# 2. Validate that the applied changelog produces the correct
|
||||||
|
# new change log head
|
||||||
|
#
|
||||||
|
# 3. Check that enough signatures from the known validator set
|
||||||
|
# are present
|
||||||
|
#
|
||||||
|
# 4. Apply all changes to the validator set
|
||||||
|
#
|
||||||
|
|
||||||
|
outBeaconState.last_finalized_slot =
|
||||||
|
changeLog.signedBlock.slot div CYCLE_LENGTH
|
||||||
|
|
||||||
|
outBeaconState.validator_set_delta_hash_chain =
|
||||||
|
changeLog.beaconState.validator_set_delta_hash_chain
|
||||||
|
|
44
beacon_chain/time.nim
Normal file
44
beacon_chain/time.nim
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
import
|
||||||
|
asyncdispatch2, datatypes, random
|
||||||
|
|
||||||
|
type
|
||||||
|
Timestamp = uint64 # Unix epoch timestamp in millisecond resolution
|
||||||
|
|
||||||
|
var
|
||||||
|
detectedClockDrift: int64
|
||||||
|
|
||||||
|
proc timeSinceGenesis*(s: BeaconState): Timestamp =
|
||||||
|
Timestamp(int64(fastEpochTime() - s.genesis_time * 1000) -
|
||||||
|
detectedClockDrift)
|
||||||
|
|
||||||
|
template toSlot*(t: Timestamp): uint64 =
|
||||||
|
t div uint64(SLOT_DURATION * 1000)
|
||||||
|
|
||||||
|
template slotStart*(s: BeaconState, slot: Natural): Timestamp =
|
||||||
|
(s.genesis_time + uint64(slot * SLOT_DURATION)) * 1000
|
||||||
|
|
||||||
|
template slotMiddle*(s: BeaconState, slot: Natural): Timestamp =
|
||||||
|
s.slotStart + SLOT_DURATION * 500
|
||||||
|
|
||||||
|
template slotEnd*(s: BeaconState, slot: Natural): Timestamp =
|
||||||
|
s.slotStart(slot + 1)
|
||||||
|
|
||||||
|
proc randomTimeInSlot*(s: BeaconState,
|
||||||
|
slot: Natural,
|
||||||
|
interval: HSlice[float, float]): Timestamp =
|
||||||
|
## Returns a random moment within the slot.
|
||||||
|
## The interval must be a sub-interval of [0..1].
|
||||||
|
## Zero marks the begginning of the slot and One marks the end.
|
||||||
|
s.slotStart(slot) + Timestamp(rand(interval) * float(SLOT_DURATION * 1000))
|
||||||
|
|
||||||
|
proc slotDistanceFromNow*(s: BeaconState): int64 =
|
||||||
|
## Returns how many slots have passed since a particular BeaconState was finalized
|
||||||
|
int64(s.timeSinceGenesis() div (SLOT_DURATION * 1000)) - int64(s.last_finalized_slot)
|
||||||
|
|
||||||
|
proc syncrhronizeClock*() {.async.} =
|
||||||
|
## This should determine the offset of the local clock against a global
|
||||||
|
## trusted time (e.g. it can be obtained from multiple time servers).
|
||||||
|
|
||||||
|
# TODO: implement this properly
|
||||||
|
detectedClockDrift = 0
|
||||||
|
|
26
beacon_chain/trusted_state_snapshots.nim
Normal file
26
beacon_chain/trusted_state_snapshots.nim
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
import
|
||||||
|
asyncdispatch2,
|
||||||
|
datatypes, beacon_chain_db
|
||||||
|
|
||||||
|
proc obtainTrustedStateSnapshot*(db: BeaconChainDB): Future[BeaconState] {.async.} =
|
||||||
|
# In case our latest state is too old, we must obtain a recent snapshot
|
||||||
|
# of the state from a trusted location. This is explained in detail here:
|
||||||
|
# https://notes.ethereum.org/oaQV3IF5R2qlJuW-V1r1ew#Beacon-chain-sync
|
||||||
|
|
||||||
|
# TODO: implement this:
|
||||||
|
#
|
||||||
|
# 1. Specify a large set of trusted state signees
|
||||||
|
# (perhaps stored in a config file)
|
||||||
|
#
|
||||||
|
# 2. Download a signed state hash from a known location
|
||||||
|
# (The known location can be either a HTTPS host or a DHT record)
|
||||||
|
#
|
||||||
|
# 3. Check that enough of the specified required signatures are present
|
||||||
|
#
|
||||||
|
# 4. Download a snapshot file from a known location
|
||||||
|
# (or just obtain it from the network using the ETH protocols)
|
||||||
|
#
|
||||||
|
# 5. Check that the state snapshot hash is correct and save it in the DB.
|
||||||
|
|
||||||
|
discard
|
||||||
|
|
61
beacon_chain/validator_pool.nim
Normal file
61
beacon_chain/validator_pool.nim
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
import
|
||||||
|
tables, random,
|
||||||
|
asyncdispatch2,
|
||||||
|
datatypes
|
||||||
|
|
||||||
|
type
|
||||||
|
ValidatorKind = enum
|
||||||
|
inProcess
|
||||||
|
remote
|
||||||
|
|
||||||
|
ValidatorConnection = object
|
||||||
|
|
||||||
|
RandaoValue = seq[bytes]
|
||||||
|
|
||||||
|
AttachedValidator* = ref object
|
||||||
|
validatorSlot: int
|
||||||
|
case kind: ValidatorKind
|
||||||
|
of inProcess:
|
||||||
|
randaoValue: RandaoValue
|
||||||
|
privKey: BLSPrivateKey
|
||||||
|
else:
|
||||||
|
connection: ValidatorConnection
|
||||||
|
|
||||||
|
ValidatorPool* = object
|
||||||
|
validators: Table[BLSPublicKey, AttachedValidator]
|
||||||
|
|
||||||
|
proc init*(T: type ValidatorPool): T =
|
||||||
|
result.validators = initTable[BLSPublicKey, AttachedValidator]()
|
||||||
|
|
||||||
|
proc addLocalValidator*(pool: var ValidatorPool,
|
||||||
|
pubKey: BLSPublicKey,
|
||||||
|
privKey: BLSPrivateKey) =
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc getAttachedValidator*(pool: ValidatorPool,
|
||||||
|
validatorKey: BLSPublicKey): AttachedValidator =
|
||||||
|
pool.validatators.getOrDefault(validatorKey)
|
||||||
|
|
||||||
|
proc signBlockProposal*(v: AttachedValidator,
|
||||||
|
proposal: ProposalSignedData): Future[Signature] {.async.} =
|
||||||
|
if v.inProcess:
|
||||||
|
await sleepAsync(1)
|
||||||
|
# TODO:
|
||||||
|
# return sign(proposal, v.privKey)
|
||||||
|
else:
|
||||||
|
# TODO:
|
||||||
|
# send RPC
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc signAttestation*(v: AttachedValidator,
|
||||||
|
attestation: AttestationSignedData): Future[Signature] {.async.} =
|
||||||
|
# TODO: implement this
|
||||||
|
if v.inProcess:
|
||||||
|
await sleepAsync(1)
|
||||||
|
# TODO:
|
||||||
|
# return sign(proposal, v.privKey)
|
||||||
|
else:
|
||||||
|
# TODO:
|
||||||
|
# send RPC
|
||||||
|
discard
|
||||||
|
|
Loading…
x
Reference in New Issue
Block a user