Merge branch 'master' into vbuterin-patch-1

This commit is contained in:
Hsiao-Wei Wang 2018-11-23 20:06:43 +08:00
commit e7852de0c2
No known key found for this signature in database
GPG Key ID: 95B070122902DEA4
2 changed files with 36 additions and 36 deletions

View File

@ -50,6 +50,7 @@ The primary source of load on the beacon chain are "attestations". Attestations
| `MIN_WITHDRAWAL_PERIOD` | 2**13 (= 8192) | slots | ~14 hours | | `MIN_WITHDRAWAL_PERIOD` | 2**13 (= 8192) | slots | ~14 hours |
| `DELETION_PERIOD` | 2**22 (= 4,194,304) | slots | ~290 days | | `DELETION_PERIOD` | 2**22 (= 4,194,304) | slots | ~290 days |
| `COLLECTIVE_PENALTY_CALCULATION_PERIOD` | 2**20 (= 1,048,576) | slots | ~2.4 months | | `COLLECTIVE_PENALTY_CALCULATION_PERIOD` | 2**20 (= 1,048,576) | slots | ~2.4 months |
| `SLASHING_WHISTLEBLOWER_REWARD_DENOMINATOR` | 2**9 (= 512) |
| `BASE_REWARD_QUOTIENT` | 2**11 (= 2,048) | — | | `BASE_REWARD_QUOTIENT` | 2**11 (= 2,048) | — |
| `MAX_VALIDATOR_CHURN_QUOTIENT` | 2**5 (= 32) | — | | `MAX_VALIDATOR_CHURN_QUOTIENT` | 2**5 (= 32) | — |
| `POW_HASH_VOTING_PERIOD` | 2**10 (=1024) | - | | `POW_HASH_VOTING_PERIOD` | 2**10 (=1024) | - |
@ -59,8 +60,8 @@ The primary source of load on the beacon chain are "attestations". Attestations
**Notes** **Notes**
* See a recommended min committee size of 111 here https://vitalik.ca/files/Ithaca201807_Sharding.pdf); our algorithm will generally ensure the committee size is at least half the target. * See a recommended min committee size of 111 [here](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); our algorithm will generally ensure the committee size is at least half the target.
* The `SQRT_E_DROP_TIME` constant is the amount of time it takes for the quadratic leak to cut deposits of non-participating validators by ~39.4%. * The `SQRT_E_DROP_TIME` constant is the amount of time it takes for the quadratic leak to cut deposits of non-participating validators by ~39.4%.
* The `BASE_REWARD_QUOTIENT` constant dictates the per-cycle interest rate assuming all validators are participating, assuming total deposits of 1 ETH. It corresponds to ~2.57% annual interest assuming 10 million participating ETH. * The `BASE_REWARD_QUOTIENT` constant dictates the per-cycle interest rate assuming all validators are participating, assuming total deposits of 1 ETH. It corresponds to ~2.57% annual interest assuming 10 million participating ETH.
* At most `1/MAX_VALIDATOR_CHURN_QUOTIENT` of the validators can change during each validator set change. * At most `1/MAX_VALIDATOR_CHURN_QUOTIENT` of the validators can change during each validator set change.
@ -576,8 +577,8 @@ total_deposit_count: int128
@public @public
def deposit(deposit_params: bytes[2048]): def deposit(deposit_params: bytes[2048]):
index:int128 = self.total_deposit_count + 2**POW_CONTRACT_MERKLE_TREE_DEPTH index:int128 = self.total_deposit_count + 2**POW_CONTRACT_MERKLE_TREE_DEPTH
msg_gwei_bytes8: bytes[8] = slice(as_bytes32(msg.value / 10**9), 24, 8) msg_gwei_bytes8: bytes[8] = slice(convert(msg.value / 10**9, 'bytes32'), 24, 8)
timestamp_bytes8: bytes[8] = slice(s_bytes32(block.timestamp), 24, 8) timestamp_bytes8: bytes[8] = slice(convert(block.timestamp, 'bytes32'), 24, 8)
deposit_data: bytes[2064] = concat(deposit_params, msg_gwei_bytes8, timestamp_bytes8) deposit_data: bytes[2064] = concat(deposit_params, msg_gwei_bytes8, timestamp_bytes8)
log.HashChainValue(self.receipt_tree[1], deposit_data, self.total_deposit_count) log.HashChainValue(self.receipt_tree[1], deposit_data, self.total_deposit_count)
@ -718,7 +719,7 @@ def add_validator(validators: List[ValidatorRecord],
current_slot: int) -> int: current_slot: int) -> int:
# if following assert fails, validator induction failed # if following assert fails, validator induction failed
# move on to next validator registration log # move on to next validator registration log
signed_message = as_bytes32(pubkey) + as_bytes2(withdrawal_shard) + withdrawal_address + randao_commitment signed_message = bytes32(pubkey) + bytes2(withdrawal_shard) + withdrawal_address + randao_commitment
assert BLSVerify(pub=pubkey, assert BLSVerify(pub=pubkey,
msg=hash(signed_message), msg=hash(signed_message),
sig=proof_of_possession) sig=proof_of_possession)
@ -748,7 +749,7 @@ def add_validator(validators: List[ValidatorRecord],
### Routine for removing a validator ### Routine for removing a validator
```python ```python
def exit_validator(index, state, penalize, current_slot): def exit_validator(index, state, block, penalize, current_slot):
validator = state.validators[index] validator = state.validators[index]
validator.exit_slot = current_slot validator.exit_slot = current_slot
validator.exit_seq = state.current_exit_seq validator.exit_seq = state.current_exit_seq
@ -760,6 +761,9 @@ def exit_validator(index, state, penalize, current_slot):
break break
if penalize: if penalize:
validator.status = PENALIZED validator.status = PENALIZED
whistleblower_xfer_amount = validator.deposit // SLASHING_WHISTLEBLOWER_REWARD_DENOMINATOR
validator.deposit -= whistleblower_xfer_amount
get_beacon_proposer(state, block.slot).deposit += whistleblower_xfer_amount
state.deposits_penalized_in_period[current_slot // COLLECTIVE_PENALTY_CALCULATION_PERIOD] += validator.balance state.deposits_penalized_in_period[current_slot // COLLECTIVE_PENALTY_CALCULATION_PERIOD] += validator.balance
else: else:
validator.status = PENDING_EXIT validator.status = PENDING_EXIT
@ -868,7 +872,7 @@ Extend the list of `AttestationRecord` objects in the `state` with those include
### Verify proposer signature ### Verify proposer signature
Let `proposal_hash = hash(ProposalSignedData(fork_version, block.slot, 2**64 - 1, block_hash_without_sig))` where `block_hash_without_sig` is the hash of the block except setting `proposer_signature` to `[0, 0]`. Let `proposal_hash = hash(ProposalSignedData(fork_version, block.slot, 2**64 - 1, block_hash_without_sig))` where `block_hash_without_sig` is the hash of the block except setting `proposer_signature` to `[0, 0]`.
Verify that `BLSVerify(pubkey=get_beacon_proposer(state, block.slot).pubkey, data=proposal_hash, sig=block.proposer_signature)` passes. Verify that `BLSVerify(pubkey=get_beacon_proposer(state, block.slot).pubkey, data=proposal_hash, sig=block.proposer_signature)` passes.
@ -900,7 +904,7 @@ Perform the following checks:
* Let `fork_version = pre_fork_version if block.slot < fork_slot_number else post_fork_version`. Verify that `BLSVerify(pubkey=validators[data.validator_index].pubkey, msg=hash(LOGOUT_MESSAGE + bytes8(fork_version)), sig=data.signature)` * Let `fork_version = pre_fork_version if block.slot < fork_slot_number else post_fork_version`. Verify that `BLSVerify(pubkey=validators[data.validator_index].pubkey, msg=hash(LOGOUT_MESSAGE + bytes8(fork_version)), sig=data.signature)`
* Verify that `validators[validator_index].status == ACTIVE`. * Verify that `validators[validator_index].status == ACTIVE`.
Run `exit_validator(data.validator_index, state, penalize=False, current_slot=block.slot)`. Run `exit_validator(data.validator_index, state, block, penalize=False, current_slot=block.slot)`.
#### CASPER_SLASHING #### CASPER_SLASHING
@ -922,7 +926,7 @@ Perform the following checks:
* Let `intersection = [x for x in vote1_aggregate_sig_indices if x in vote2_aggregate_sig_indices]`. Verify that `len(intersection) >= 1`. * Let `intersection = [x for x in vote1_aggregate_sig_indices if x in vote2_aggregate_sig_indices]`. Verify that `len(intersection) >= 1`.
* Verify that `vote1_data.justified_slot < vote2_data.justified_slot < vote2_data.slot <= vote1_data.slot`. * Verify that `vote1_data.justified_slot < vote2_data.justified_slot < vote2_data.slot <= vote1_data.slot`.
For each validator index `v` in `intersection`, if `state.validators[v].status` does not equal `PENALIZED`, then run `exit_validator(v, state, penalize=True, current_slot=block.slot)` For each validator index `v` in `intersection`, if `state.validators[v].status` does not equal `PENALIZED`, then run `exit_validator(v, state, block, penalize=True, current_slot=block.slot)`
#### PROPOSER_SLASHING #### PROPOSER_SLASHING
@ -1030,7 +1034,7 @@ For every shard number `shard` for which a crosslink committee exists in the cyc
* Adjust balances as follows: * Adjust balances as follows:
* Participating validators gain `B // reward_quotient * (2 * total_balance_of_v_participating - total_balance_of_v) // total_balance_of_v`. * Participating validators gain `B // reward_quotient * (2 * total_balance_of_v_participating - total_balance_of_v) // total_balance_of_v`.
* Non-participating validators lose `B // reward_quotient`. * Non-participating validators lose `B // reward_quotient`.
#### PoW chain related rules #### PoW chain related rules
If `last_state_recalculation_slot % POW_HASH_VOTING_PERIOD == 0`, then: If `last_state_recalculation_slot % POW_HASH_VOTING_PERIOD == 0`, then:
@ -1154,7 +1158,7 @@ def change_validators(validators: List[ValidatorRecord], current_slot: int) -> N
#### Finally... #### Finally...
* Remove all attestation records older than slot `s` * Remove all attestation records older than slot `s`
* For any validator with index `v` with balance less than `MIN_ONLINE_DEPOSIT_SIZE` and status `ACTIVE`, run `exit_validator(v, state, penalize=False, current_slot=block.slot)` * For any validator with index `v` with balance less than `MIN_ONLINE_DEPOSIT_SIZE` and status `ACTIVE`, run `exit_validator(v, state, block, penalize=False, current_slot=block.slot)`
* Set `state.recent_block_hashes = state.recent_block_hashes[CYCLE_LENGTH:]` * Set `state.recent_block_hashes = state.recent_block_hashes[CYCLE_LENGTH:]`
* Set `state.last_state_recalculation_slot += CYCLE_LENGTH` * Set `state.last_state_recalculation_slot += CYCLE_LENGTH`

View File

@ -402,40 +402,32 @@ Return the hash of the serialization of the value.
First, we define some helpers and then the Merkle tree function. The constant `CHUNK_SIZE` is set to 128. First, we define some helpers and then the Merkle tree function. The constant `CHUNK_SIZE` is set to 128.
```python ```python
# Returns the smallest power of 2 equal to or higher than x # Merkle tree hash of a list of homogenous, non-empty items
def next_power_of_2(x):
return x if x == 1 else next_power_of_2((x+1) // 2) * 2
# Extends data length to a power of 2 by minimally right-zero-padding
def extend_to_power_of_2(data):
return data + b'\x00' * (next_power_of_2(len(data)) - len(data))
# Concatenate a list of homogeneous objects into data and pad it
def list_to_glob(lst):
if len(lst) == 0:
return b''
if len(lst[0]) != next_power_of_2(len(lst[0])):
lst = [extend_to_power_of_2(x) for x in lst]
data = b''.join(lst)
# Pad to chunksize
data += b'\x00' * (CHUNKSIZE - (len(data) % CHUNKSIZE or CHUNKSIZE))
return data
# Merkle tree hash of a list of items
def merkle_hash(lst): def merkle_hash(lst):
# Turn list into padded data
data = list_to_glob(lst)
# Store length of list (to compensate for non-bijectiveness of padding) # Store length of list (to compensate for non-bijectiveness of padding)
datalen = len(lst).to_bytes(32, 'big') datalen = len(lst).to_bytes(32, 'big')
# Convert to chunks
chunkz = [data[i:i+CHUNKSIZE] for i in range(0, len(data), CHUNKSIZE)] if len(lst) == 0:
# Handle empty list case
chunkz = [b'\x00' * CHUNKSIZE]
elif len(lst[0]) < CHUNKSIZE:
# See how many items fit in a chunk
items_per_chunk = CHUNKSIZE // len(lst[0])
# Build a list of chunks based on the number of items in the chunk
chunkz = [b''.join(lst[i:i+items_per_chunk]) for i in range(0, len(lst), items_per_chunk)]
else:
# Leave large items alone
chunkz = lst
# Tree-hash # Tree-hash
while len(chunkz) > 1: while len(chunkz) > 1:
if len(chunkz) % 2 == 1: if len(chunkz) % 2 == 1:
chunkz.append(b'\x00' * CHUNKSIZE) chunkz.append(b'\x00' * CHUNKSIZE)
chunkz = [hash(chunkz[i] + chunkz[i+1]) for i in range(0, len(chunkz), 2)] chunkz = [hash(chunkz[i] + chunkz[i+1]) for i in range(0, len(chunkz), 2)]
# Return hash of root and length data # Return hash of root and length data
return hash((chunkz[0] if len(chunks) > 0 else b'\x00' * 32) + datalen) return hash(chunkz[0] + datalen)
``` ```
To `tree_hash` a list, we simply do: To `tree_hash` a list, we simply do:
@ -465,3 +457,7 @@ return hash(b''.join([tree_hash(getattr(x, field)) for field in sorted(value.fie
| Nim | [ https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim ](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) | Nim Implementation maintained SSZ. | | Nim | [ https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim ](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) | Nim Implementation maintained SSZ. |
| Rust | [ https://github.com/paritytech/shasper/tree/master/util/ssz ](https://github.com/paritytech/shasper/tree/master/util/ssz) | Shasper implementation of SSZ maintained by ParityTech. | | Rust | [ https://github.com/paritytech/shasper/tree/master/util/ssz ](https://github.com/paritytech/shasper/tree/master/util/ssz) | Shasper implementation of SSZ maintained by ParityTech. |
| Javascript | [ https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js ](https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js) | Javascript Implementation maintained SSZ | | Javascript | [ https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js ](https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js) | Javascript Implementation maintained SSZ |
## Copyright
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).