mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-02-17 00:47:03 +00:00
Merge pull request #14 from status-im/HelperSpecUpdates
update some helper functions to current spec, along with a couple of data types that needed tweaking/fixing
This commit is contained in:
commit
eff23ba182
@ -65,7 +65,7 @@ type
|
|||||||
randao_commitment*: Blake2_256_Digest # RANDAO commitment
|
randao_commitment*: Blake2_256_Digest # RANDAO commitment
|
||||||
randao_last_change*: uint64 # Slot the RANDAO commitment was last changed
|
randao_last_change*: uint64 # Slot the RANDAO commitment was last changed
|
||||||
balance*: uint64 # Balance in Gwei
|
balance*: uint64 # Balance in Gwei
|
||||||
status*: uint8 # Status code [used to be more enum-like]
|
status*: ValidatorStatusCodes # Status code
|
||||||
exit_slot*: uint64 # Slot when validator exited (or 0)
|
exit_slot*: uint64 # Slot when validator exited (or 0)
|
||||||
|
|
||||||
CrosslinkRecord* = object
|
CrosslinkRecord* = object
|
||||||
@ -112,7 +112,7 @@ type
|
|||||||
fork_slot_number*: uint64
|
fork_slot_number*: uint64
|
||||||
pending_attestations*: seq[AttestationRecord] # Attestations not yet processed
|
pending_attestations*: seq[AttestationRecord] # Attestations not yet processed
|
||||||
pending_specials*: seq[SpecialRecord] # Specials not yet been processed
|
pending_specials*: seq[SpecialRecord] # Specials not yet been processed
|
||||||
recent_block_hashes*: Blake2_256_Digest # recent beacon block hashes needed to process attestations, older to newer
|
recent_block_hashes*: seq[Blake2_256_Digest] # recent beacon block hashes needed to process attestations, older to newer
|
||||||
randao_mix*: Blake2_256_Digest # RANDAO state
|
randao_mix*: Blake2_256_Digest # RANDAO state
|
||||||
|
|
||||||
ValidatorStatusCodes* {.pure.} = enum
|
ValidatorStatusCodes* {.pure.} = enum
|
||||||
|
@ -8,37 +8,52 @@
|
|||||||
# Helper functions
|
# Helper functions
|
||||||
import ../datatypes, sequtils, nimcrypto, math
|
import ../datatypes, sequtils, nimcrypto, math
|
||||||
|
|
||||||
func get_active_validator_indices(validators: seq[ValidatorRecord], dynasty: int64): seq[Uint24] =
|
func get_active_validator_indices(validators: seq[ValidatorRecord]): seq[Uint24] =
|
||||||
## Select the active validators
|
## Select the active validators
|
||||||
result = @[]
|
result = @[]
|
||||||
for idx, val in validators:
|
for idx, val in validators:
|
||||||
if val.start_dynasty <= dynasty and
|
if val.status == ACTIVE:
|
||||||
dynasty < val.end_dynasty:
|
|
||||||
result.add idx.Uint24
|
result.add idx.Uint24
|
||||||
|
|
||||||
func shuffle(validators: seq[Uint24], seed: Blake2_256_Digest): seq[Uint24] {.noInit.}=
|
func shuffle(values: seq[Uint24], seed: Blake2_256_Digest): seq[Uint24] {.noInit.}=
|
||||||
## Pseudorandomly shuffles the validator set based on some seed
|
## Returns the shuffled ``values`` with seed as entropy.
|
||||||
|
## TODO: this calls out for tests, but I odn't particularly trust spec
|
||||||
|
## right now.
|
||||||
|
|
||||||
const UpperBound = 2^24 # 16777216
|
let values_count = values.len
|
||||||
assert validators.len <= UpperBound
|
|
||||||
|
|
||||||
deepCopy(result, validators)
|
# Entropy is consumed from the seed in 3-byte (24 bit) chunks
|
||||||
|
const rand_bytes = 3
|
||||||
|
let rand_max = 2^(rand_bytes * 8) - 1
|
||||||
|
|
||||||
|
# The range of the RNG places an upper-bound on the size of the list that
|
||||||
|
# may be shuffled. It is a logic error to supply an oversized list.
|
||||||
|
assert values_count < rand_max
|
||||||
|
|
||||||
|
deepCopy(result, values)
|
||||||
var source = seed
|
var source = seed
|
||||||
|
|
||||||
var i = 0
|
var i = 0
|
||||||
while i < validators.len:
|
while i < values.len - 1:
|
||||||
|
# Re-hash the `source` to obtain a new pattern of bytes
|
||||||
source = blake2_256.digest source.data
|
source = blake2_256.digest source.data
|
||||||
|
# Iterate through the `source` bytes in 3-byte chunks
|
||||||
for pos in countup(0, 29, 3):
|
for pos in countup(0, 29, 3):
|
||||||
let remaining = validators.len - i
|
let remaining = values_count - i
|
||||||
if remaining == 0:
|
if remaining == 1:
|
||||||
break
|
break
|
||||||
|
|
||||||
let m = source.data[pos].Uint24 shl 16 or source.data[pos+1].Uint24 shl 8 or source.data[pos+2].Uint24
|
# Read 3-bytes of `source` as a 24-bit big-endian integer.
|
||||||
let rand_max = Uint24 UpperBound - UpperBound mod remaining
|
let sample_from_source = source.data[pos].Uint24 shl 16 or source.data[pos+1].Uint24 shl 8 or source.data[pos+2].Uint24
|
||||||
|
|
||||||
if m < randMax:
|
# Sample values greater than or equal to `sample_max` will cause
|
||||||
let replacementPos = m mod remaining + i
|
# modulo bias when mapped into the `remaining` range.
|
||||||
swap result[i], result[replacementPos]
|
let sample_max = rand_max - rand_max mod remaining
|
||||||
|
|
||||||
|
# Perform a swap if the consumed entropy will not cause modulo bias.
|
||||||
|
if sample_from_source < sample_max:
|
||||||
|
let replacement_position = sample_from_source mod remaining + i
|
||||||
|
swap result[i], result[replacement_position]
|
||||||
inc i
|
inc i
|
||||||
|
|
||||||
func split[T](lst: seq[T], N: Positive): seq[seq[T]] =
|
func split[T](lst: seq[T], N: Positive): seq[seq[T]] =
|
||||||
@ -53,52 +68,51 @@ func get_new_shuffling*(seed: Blake2_256_Digest, validators: seq[ValidatorRecord
|
|||||||
## determining at what height they can make attestations and what shard they are making crosslinks for
|
## determining at what height they can make attestations and what shard they are making crosslinks for
|
||||||
## Implementation should do the following: http://vitalik.ca/files/ShuffleAndAssign.png
|
## Implementation should do the following: http://vitalik.ca/files/ShuffleAndAssign.png
|
||||||
|
|
||||||
let avs = get_active_validator_indices(validators, dynasty)
|
let avs = get_active_validator_indices(validators)
|
||||||
var committees_per_slot, slots_per_committee: int16
|
var committees_per_slot, slots_per_committee: uint16
|
||||||
|
|
||||||
if avs.len >= CYCLE_LENGTH * MIN_COMMITTEE_SIZE:
|
if avs.len >= CYCLE_LENGTH * MIN_COMMITTEE_SIZE:
|
||||||
committees_per_slot = int16 avs.len div CYCLE_LENGTH div (MIN_COMMITTEE_SIZE * 2) + 1
|
committees_per_slot = uint16 avs.len div CYCLE_LENGTH div (MIN_COMMITTEE_SIZE * 2) + 1
|
||||||
slots_per_committee = 1
|
slots_per_committee = 1
|
||||||
else:
|
else:
|
||||||
committees_per_slot = 1
|
committees_per_slot = 1
|
||||||
slots_per_committee = 1
|
slots_per_committee = 1
|
||||||
while avs.len * slots_per_committee < CYCLE_LENGTH * MIN_COMMITTEE_SIZE and
|
while avs.len.uint16 * slots_per_committee < CYCLE_LENGTH * MIN_COMMITTEE_SIZE and
|
||||||
slots_per_committee < CYCLE_LENGTH:
|
slots_per_committee < CYCLE_LENGTH:
|
||||||
slots_per_committee *= 2
|
slots_per_committee *= 2
|
||||||
|
|
||||||
result = @[]
|
result = @[]
|
||||||
for slot, slot_indices in shuffle(avs, seed).split(CYCLE_LENGTH):
|
for slot, slot_indices in shuffle(avs, seed).split(CYCLE_LENGTH):
|
||||||
let shard_indices = slot_indices.split(committees_per_slot)
|
let shard_indices = slot_indices.split(committees_per_slot)
|
||||||
let shard_id_start = crosslinking_start_shard +
|
let shard_id_start = crosslinking_start_shard.uint16 +
|
||||||
slot.int16 * committees_per_slot div slots_per_committee
|
slot.uint16 * committees_per_slot div slots_per_committee
|
||||||
|
|
||||||
var committees = newSeq[ShardAndCommittee](shard_indices.len)
|
var committees = newSeq[ShardAndCommittee](shard_indices.len)
|
||||||
for j, indices in shard_indices:
|
for j, indices in shard_indices:
|
||||||
committees[j].shard_id = (shard_id_start + j.int16) mod SHARD_COUNT
|
committees[j].shard_id = (shard_id_start + j.uint16) mod SHARD_COUNT
|
||||||
committees[j].committee = indices
|
committees[j].committee = indices
|
||||||
|
|
||||||
result.add committees
|
result.add committees
|
||||||
|
|
||||||
func get_shards_and_committees_for_slot*(crystallized_state: CrystallizedState,
|
func get_shards_and_committees_for_slot*(state: BeaconState,
|
||||||
slot: uint64): seq[ShardAndCommittee] =
|
slot: uint64): ShardAndCommittee =
|
||||||
# TODO: Spec why is active_state an argument?
|
# TODO: Spec why is active_state an argument?
|
||||||
|
# TODO: this returns a scalar, not vector, but its return type in spec is a seq/list?
|
||||||
|
|
||||||
let start = crystallized_state.last_state_recalc - CYCLE_LENGTH
|
let earliest_slot_in_array = state.last_state_recalculation_slot - CYCLE_LENGTH
|
||||||
assert start <= slot
|
assert earliest_slot_in_array <= slot
|
||||||
assert slot < start + CYCLE_LENGTH * 2
|
assert slot < earliest_slot_in_array + CYCLE_LENGTH * 2
|
||||||
|
|
||||||
result = crystallized_state.shard_and_committee_for_slots[int slot - start]
|
return state.shard_and_committee_for_slots[int slot - earliest_slot_in_array]
|
||||||
# TODO, slot is a uint64; will be an issue on int32 arch.
|
# TODO, slot is a uint64; will be an issue on int32 arch.
|
||||||
# Clarify with EF if light clients will need the beacon chain
|
# Clarify with EF if light clients will need the beacon chain
|
||||||
|
|
||||||
func get_block_hash*(active_state: ActiveState,
|
func get_block_hash*(state: BeaconState, current_block: BeaconBlock, slot: int): Blake2_256_Digest =
|
||||||
beacon_block: BeaconBlock, slot: uint64): Blake2_256_Digest =
|
let earliest_slot_in_array = current_block.slot.int - state.recent_block_hashes.len
|
||||||
|
assert earliest_slot_in_array <= slot
|
||||||
|
assert slot < current_block.slot.int
|
||||||
|
|
||||||
let sback = beacon_block.slot - CYCLE_LENGTH * 2
|
return state.recent_block_hashes[slot - earliest_slot_in_array]
|
||||||
assert sback <= slot
|
|
||||||
assert slot < sback + CYCLE_LENGTH * 2
|
|
||||||
|
|
||||||
result = active_state.recent_block_hashes[int slot - sback]
|
|
||||||
|
|
||||||
func get_new_recent_block_hashes*(
|
func get_new_recent_block_hashes*(
|
||||||
old_block_hashes: seq[Blake2_256_Digest],
|
old_block_hashes: seq[Blake2_256_Digest],
|
||||||
|
Loading…
x
Reference in New Issue
Block a user