Updated total stake inference

This commit is contained in:
David Rusu 2026-01-21 12:20:58 +04:00
parent c763d47339
commit 2b5ce42e58
7 changed files with 160 additions and 161 deletions

View File

@ -73,36 +73,36 @@ class Config:
def cryptarchia_v0_0_1(initial_total_active_stake) -> "Config":
return Config(
k=2160,
active_slot_coeff=0.05,
active_slot_coeff=1 / 30,
epoch_stake_distribution_stabilization=3,
epoch_period_nonce_buffer=3,
epoch_period_nonce_stabilization=4,
initial_total_active_stake=initial_total_active_stake,
total_active_stake_learning_rate=0.8,
total_active_stake_learning_rate=1.0,
time=TimeConfig(
slot_duration=1,
chain_start_time=0,
),
)
@property
@functools.cached_property
def base_period_length(self) -> int:
return int(floor(self.k / self.active_slot_coeff))
@property
@functools.cached_property
def epoch_relative_nonce_slot(self) -> int:
return (
self.epoch_stake_distribution_stabilization + self.epoch_period_nonce_buffer
) * self.base_period_length
@property
@functools.cached_property
def epoch_length(self) -> int:
return (
self.epoch_relative_nonce_slot
+ self.epoch_period_nonce_stabilization * self.base_period_length
)
@property
@functools.cached_property
def s(self):
"""
The Security Paramater. This paramter controls how many slots one must
@ -162,6 +162,7 @@ class Note:
def encode_pk(self) -> bytes:
return int.to_bytes(self.pk, length=32, byteorder="big")
@functools.cached_property
def commitment(self) -> Hash:
value_bytes = int.to_bytes(self.value, length=32, byteorder="big")
return Hash(
@ -174,8 +175,9 @@ class Note:
self.zone_id,
)
@functools.cached_property
def nullifier(self) -> Hash:
return Hash(b"NOMOS_NOTE_NF", self.commitment(), self.encode_sk())
return Hash(b"NOMOS_NOTE_NF", self.commitment, self.encode_sk())
@dataclass
@ -188,7 +190,7 @@ class MockLeaderProof:
return Hash(
b"NOMOS_NONCE_CONTRIB",
self.slot.encode(),
self.note.commitment(),
self.note.commitment,
self.note.encode_sk(),
)
@ -199,8 +201,8 @@ class MockLeaderProof:
return (
slot == self.slot
and parent == self.parent
and self.note.commitment() in commitments
and self.note.nullifier() not in nullifiers
and self.note.commitment in commitments
and self.note.nullifier not in nullifiers
)
@ -217,6 +219,7 @@ class BlockHeader:
# as serialized in the format specified by the 'HEADER' rule in 'messages.abnf'.
#
# The following code is to be considered as a reference implementation, mostly to be used for testing.
@functools.cached_property
def id(self) -> Hash:
return Hash(
b"BLOCK_ID",
@ -231,7 +234,7 @@ class BlockHeader:
)
def __hash__(self):
return hash(self.id())
return hash(self.id)
@dataclass
@ -271,7 +274,7 @@ class LedgerState:
return replace(self, **kwarg)
def apply(self, block: BlockHeader):
assert block.parent == self.block.id()
assert block.parent == self.block.id
self.nonce = Hash(
b"EPOCH_NONCE",
@ -317,12 +320,12 @@ class Follower:
def __init__(self, genesis_state: LedgerState, config: Config):
self.config = config
self.forks: list[Hash] = []
self.local_chain = genesis_state.block.id()
self.local_chain = genesis_state.block.id
self.genesis_state = genesis_state
self.ledger_state = {genesis_state.block.id(): genesis_state.copy()}
self.ledger_state = {genesis_state.block.id: genesis_state.copy()}
self.epoch_state = {}
self.state = State.BOOTSTRAPPING
self.lib = genesis_state.block.id() # Last immutable block, initially the genesis block
self.lib = genesis_state.block.id # Last immutable block, initially the genesis block
def to_online(self):
"""
@ -359,7 +362,7 @@ class Follower:
raise InvalidLeaderProof
def on_block(self, block: BlockHeader):
if block.id() in self.ledger_state:
if block.id in self.ledger_state:
logger.warning("dropping already processed block")
return
@ -367,14 +370,14 @@ class Follower:
new_state = self.ledger_state[block.parent].copy()
new_state.apply(block)
self.ledger_state[block.id()] = new_state
self.ledger_state[block.id] = new_state
if block.parent == self.local_chain:
# simply extending the local chain
self.local_chain = block.id()
self.local_chain = block.id
else:
# otherwise, this block creates a fork
self.forks.append(block.id())
self.forks.append(block.id)
# remove any existing fork that is superceded by this block
if block.parent in self.forks:
@ -389,7 +392,6 @@ class Follower:
if self.state == State.ONLINE:
self.update_lib()
# Update the lib, and prune forks that do not descend from it.
def update_lib(self):
"""
@ -401,7 +403,7 @@ class Follower:
return
# prune forks that do not descend from the last immutable block, this is needed to avoid Genesis rule to roll back
# past the LIB
self.lib = next(islice(iter_chain(self.local_chain, self.ledger_state), self.config.k, None), self.genesis_state).block.id()
self.lib = next(islice(iter_chain(self.local_chain, self.ledger_state), self.config.k, None), self.genesis_state).block.id
self.forks = [
f for f in self.forks if is_ancestor(self.lib, f, self.ledger_state)
]
@ -411,7 +413,6 @@ class Follower:
if is_ancestor(self.lib, k, self.ledger_state) or is_ancestor(k, self.lib, self.ledger_state)
}
# Evaluate the fork choice rule and return the chain we should be following
def fork_choice(self) -> Hash:
if self.state == State.BOOTSTRAPPING:
@ -450,20 +451,18 @@ class Follower:
def epoch_start_slot(self, epoch) -> Slot:
return Slot(epoch.epoch * self.config.epoch_length)
def stake_distribution_snapshot(self, epoch, tip: Hash):
def stake_distribution_snapshot_slot(self, epoch):
# stake distribution snapshot happens at the beginning of the previous epoch,
# i.e. for epoch e, the snapshot is taken at the last block of epoch e-2
slot = Slot(epoch.prev().epoch * self.config.epoch_length)
return self.state_at_slot_beginning(tip, slot)
return self.epoch_start_slot(epoch.prev())
def nonce_snapshot(self, epoch, tip):
def nonce_snapshot_slot(self, epoch):
# nonce snapshot happens partway through the previous epoch after the
# stake distribution has stabilized
slot = Slot(
return Slot(
self.config.epoch_relative_nonce_slot
+ self.epoch_start_slot(epoch.prev()).absolute_slot
)
return self.state_at_slot_beginning(tip, slot)
def compute_epoch_state(self, epoch: Epoch, tip: Hash) -> EpochState:
if epoch.epoch == 0:
@ -473,18 +472,21 @@ class Follower:
inferred_total_active_stake=self.config.initial_total_active_stake,
)
stake_distribution_snapshot = self.stake_distribution_snapshot(epoch, tip)
nonce_snapshot = self.nonce_snapshot(epoch, tip)
# we memoize epoch states to avoid recursion killing our performance
memo_block_id = nonce_snapshot.block.id()
if state := self.epoch_state.get((epoch, memo_block_id)):
if state := self.epoch_state.get((epoch, tip)):
return state
nonce_slot = self.nonce_snapshot_slot(epoch)
stake_distribution_slot = self.stake_distribution_snapshot_slot(epoch)
stake_distribution_snapshot = self.state_at_slot_beginning(
tip, stake_distribution_slot
)
nonce_snapshot = self.state_at_slot_beginning(tip, nonce_slot)
# To update our inference of total stake, we need the prior estimate which
# was calculated last epoch. Thus we recurse here to retreive the previous
# estimate of total stake.
prev_epoch = self.compute_epoch_state(epoch.prev(), tip)
prev_epoch = self.compute_epoch_state(epoch.prev(), nonce_snapshot.block.id)
inferred_total_active_stake = self._infer_total_active_stake(
prev_epoch, nonce_snapshot, stake_distribution_snapshot
)
@ -495,7 +497,7 @@ class Follower:
inferred_total_active_stake=inferred_total_active_stake,
)
self.epoch_state[(epoch, memo_block_id)] = state
self.epoch_state[(epoch, tip)] = state
return state
def _infer_total_active_stake(
@ -509,19 +511,19 @@ class Follower:
# Since we need a stable inference of total stake for the start of this epoch,
# we limit our look back period to the start of last epoch until when the nonce
# snapshot was taken.
block_proposals_last_epoch = (
period_block_density = (
nonce_snapshot.leader_count - stake_distribution_snapshot.leader_count
)
T = self.config.epoch_relative_nonce_slot
mean_blocks_per_slot = block_proposals_last_epoch / T
expected_blocks_per_slot = np.log(1 / (1 - self.config.active_slot_coeff))
blocks_per_slot_err = expected_blocks_per_slot - mean_blocks_per_slot
h = (
self.config.total_active_stake_learning_rate
* prev_epoch.inferred_total_active_stake
/ expected_blocks_per_slot
)
return int(prev_epoch.inferred_total_active_stake - h * blocks_per_slot_err)
# Use epoch_relative_nonce_slot as this is the actual observation window
# (the slot range from stake_distribution_snapshot to nonce_snapshot)
period = self.config.epoch_relative_nonce_slot
f = self.config.active_slot_coeff
beta = self.config.total_active_stake_learning_rate
total_stake_estimate = prev_epoch.inferred_total_active_stake
slot_activation_error = 1 - period_block_density / (period * f)
coefficient = total_stake_estimate * beta
return max(1, int(total_stake_estimate - coefficient * slot_activation_error))
def blocks_by_slot(self, from_slot: Slot) -> Generator[BlockHeader, None, None]:
# Returns blocks in the given range of slots in order of slot
@ -564,7 +566,7 @@ class Leader:
b"LEAD",
epoch.nonce(),
slot.encode(),
self.note.commitment(),
self.note.commitment,
self.note.encode_sk(),
)
ticket = int.from_bytes(ticket)
@ -605,7 +607,7 @@ def is_ancestor(a: Hash, b: Hash, states: Dict[Hash, LedgerState]) -> bool:
Returns True if `a` is an ancestor of `b` in the chain.
"""
for state in iter_chain(b, states):
if state.block.id() == a:
if state.block.id == a:
return True
return False
@ -623,7 +625,7 @@ def common_prefix_depth(
try:
a_block = next(a_blocks)
a_suffix.append(a_block)
a_block_id = a_block.id()
a_block_id = a_block.id
if a_block_id in seen:
# we had seen this block from the fork chain
return (
@ -640,7 +642,7 @@ def common_prefix_depth(
try:
b_block = next(b_blocks)
b_suffix.append(b_block)
b_block_id = b_block.id()
b_block_id = b_block.id
if b_block_id in seen:
# we had seen the fork in the local chain
return (

View File

@ -39,8 +39,8 @@ def sync(local: Follower, peers: list[Follower], checkpoint: LedgerState | None
num_blocks += 1
# Reject blocks that have been rejected in the past
# or whose parent has been rejected.
if {block.id(), block.parent} & rejected_blocks:
rejected_blocks.add(block.id())
if {block.id, block.parent} & rejected_blocks:
rejected_blocks.add(block.id)
continue
try:
@ -49,7 +49,7 @@ def sync(local: Follower, peers: list[Follower], checkpoint: LedgerState | None
except ParentNotFound:
orphans.add(block)
except Exception:
rejected_blocks.add(block.id())
rejected_blocks.add(block.id)
# Finish the sync process if no block has been fetched,
# which means that no peer has a tip ahead of the local tip.
@ -63,14 +63,11 @@ def sync(local: Follower, peers: list[Follower], checkpoint: LedgerState | None
# Skip the orphan block if it has been processed during the previous backfillings
# (i.e. if it has been already added to the local block tree).
# Or, skip if it has been rejected during the previous backfillings.
if (
orphan.id() not in local.ledger_state
and orphan.id() not in rejected_blocks
):
if orphan.id not in local.ledger_state and orphan.id not in rejected_blocks:
try:
backfill_fork(local, orphan, block_fetcher)
except InvalidBlockFromBackfillFork as e:
rejected_blocks.update(block.id() for block in e.invalid_suffix)
rejected_blocks.update(block.id for block in e.invalid_suffix)
def backfill_fork(
@ -83,7 +80,7 @@ def backfill_fork(
suffix = find_missing_part(
local,
block_fetcher.fetch_chain_backward(fork_tip.id(), local),
block_fetcher.fetch_chain_backward(fork_tip.id, local),
)
# Add blocks in the fork suffix with applying fork choice rule.
@ -105,7 +102,7 @@ def find_missing_part(
suffix: list[BlockHeader] = []
for block in fork:
if block.id() in local.ledger_state:
if block.id in local.ledger_state:
break
suffix.append(block)
suffix.reverse()
@ -167,7 +164,7 @@ class BlockFetcher:
# First, try to iterate the chain from the local block tree.
for block in iter_chain_blocks(id, local.ledger_state):
yield block
if block.id() == local.genesis_state.block.id():
if block.id == local.genesis_state.block.id:
return
id = block.parent
@ -175,7 +172,7 @@ class BlockFetcher:
for peer in self.peers:
for block in iter_chain_blocks(id, peer.ledger_state):
yield block
if block.id() == local.genesis_state.block.id():
if block.id == local.genesis_state.block.id:
return
id = block.parent

View File

@ -59,7 +59,7 @@ def mk_genesis_state(initial_stake_distribution: list[Note]) -> LedgerState:
),
),
nonce=bytes(32),
commitments={n.commitment() for n in initial_stake_distribution},
commitments={n.commitment for n in initial_stake_distribution},
nullifiers=set(),
)
@ -72,10 +72,10 @@ def mk_block(
return BlockHeader(
slot=Slot(slot),
parent=parent.id(),
parent=parent.id,
content_size=len(content),
content_id=Hash(b"CONTENT_ID", content),
leader_proof=MockLeaderProof(note, Slot(slot), parent=parent.id()),
leader_proof=MockLeaderProof(note, Slot(slot), parent=parent.id),
)

View File

@ -31,137 +31,135 @@ class TestForkChoice(TestCase):
b4, b5 = mk_chain(b0, Note(sk=2, value=1), slots=[1, 2])
b6, b7 = mk_chain(b2, Note(sk=3, value=1), slots=[3, 4])
states = {
b.id(): LedgerState(block=b) for b in [b0, b1, b2, b3, b4, b5, b6, b7]
}
states = {b.id: LedgerState(block=b) for b in [b0, b1, b2, b3, b4, b5, b6, b7]}
assert (d := common_prefix_depth(b0.id(), b0.id(), states)) == (
assert (d := common_prefix_depth(b0.id, b0.id, states)) == (
0,
[b0],
0,
[b0],
), d
assert (d := common_prefix_depth(b1.id(), b0.id(), states)) == (
assert (d := common_prefix_depth(b1.id, b0.id, states)) == (
1,
[b0, b1],
0,
[b0],
), d
assert (d := common_prefix_depth(b0.id(), b1.id(), states)) == (
assert (d := common_prefix_depth(b0.id, b1.id, states)) == (
0,
[b0],
1,
[b0, b1],
), d
assert (d := common_prefix_depth(b1.id(), b1.id(), states)) == (
assert (d := common_prefix_depth(b1.id, b1.id, states)) == (
0,
[b1],
0,
[b1],
), d
assert (d := common_prefix_depth(b2.id(), b0.id(), states)) == (
assert (d := common_prefix_depth(b2.id, b0.id, states)) == (
2,
[b0, b1, b2],
0,
[b0],
), d
assert (d := common_prefix_depth(b0.id(), b2.id(), states)) == (
assert (d := common_prefix_depth(b0.id, b2.id, states)) == (
0,
[b0],
2,
[b0, b1, b2],
), d
assert (d := common_prefix_depth(b3.id(), b0.id(), states)) == (
assert (d := common_prefix_depth(b3.id, b0.id, states)) == (
3,
[b0, b1, b2, b3],
0,
[b0],
), d
assert (d := common_prefix_depth(b0.id(), b3.id(), states)) == (
assert (d := common_prefix_depth(b0.id, b3.id, states)) == (
0,
[b0],
3,
[b0, b1, b2, b3],
), d
assert (d := common_prefix_depth(b1.id(), b4.id(), states)) == (
assert (d := common_prefix_depth(b1.id, b4.id, states)) == (
1,
[b0, b1],
1,
[b0, b4],
), d
assert (d := common_prefix_depth(b4.id(), b1.id(), states)) == (
assert (d := common_prefix_depth(b4.id, b1.id, states)) == (
1,
[b0, b4],
1,
[b0, b1],
), d
assert (d := common_prefix_depth(b1.id(), b5.id(), states)) == (
assert (d := common_prefix_depth(b1.id, b5.id, states)) == (
1,
[b0, b1],
2,
[b0, b4, b5],
), d
assert (d := common_prefix_depth(b5.id(), b1.id(), states)) == (
assert (d := common_prefix_depth(b5.id, b1.id, states)) == (
2,
[b0, b4, b5],
1,
[b0, b1],
), d
assert (d := common_prefix_depth(b2.id(), b5.id(), states)) == (
assert (d := common_prefix_depth(b2.id, b5.id, states)) == (
2,
[b0, b1, b2],
2,
[b0, b4, b5],
), d
assert (d := common_prefix_depth(b5.id(), b2.id(), states)) == (
assert (d := common_prefix_depth(b5.id, b2.id, states)) == (
2,
[b0, b4, b5],
2,
[b0, b1, b2],
), d
assert (d := common_prefix_depth(b3.id(), b5.id(), states)) == (
assert (d := common_prefix_depth(b3.id, b5.id, states)) == (
3,
[b0, b1, b2, b3],
2,
[b0, b4, b5],
), d
assert (d := common_prefix_depth(b5.id(), b3.id(), states)) == (
assert (d := common_prefix_depth(b5.id, b3.id, states)) == (
2,
[b0, b4, b5],
3,
[b0, b1, b2, b3],
), d
assert (d := common_prefix_depth(b3.id(), b6.id(), states)) == (
assert (d := common_prefix_depth(b3.id, b6.id, states)) == (
1,
[b2, b3],
1,
[b2, b6],
), d
assert (d := common_prefix_depth(b6.id(), b3.id(), states)) == (
assert (d := common_prefix_depth(b6.id, b3.id, states)) == (
1,
[b2, b6],
1,
[b2, b3],
), d
assert (d := common_prefix_depth(b3.id(), b7.id(), states)) == (
assert (d := common_prefix_depth(b3.id, b7.id, states)) == (
1,
[b2, b3],
2,
[b2, b6, b7],
), d
assert (d := common_prefix_depth(b7.id(), b3.id(), states)) == (
assert (d := common_prefix_depth(b7.id, b3.id, states)) == (
2,
[b2, b6, b7],
1,
[b2, b3],
), d
assert (d := common_prefix_depth(b5.id(), b7.id(), states)) == (
assert (d := common_prefix_depth(b5.id, b7.id, states)) == (
2,
[b0, b4, b5],
4,
[b0, b1, b2, b6, b7],
), d
assert (d := common_prefix_depth(b7.id(), b5.id(), states)) == (
assert (d := common_prefix_depth(b7.id, b5.id, states)) == (
4,
[b0, b1, b2, b6, b7],
2,
@ -196,28 +194,28 @@ class TestForkChoice(TestCase):
k = 1
s = 50
states = {b.id(): LedgerState(block=b) for b in short_chain + long_chain}
states = {b.id: LedgerState(block=b) for b in short_chain + long_chain}
assert (
maxvalid_bg(short_chain[-1].id(), [long_chain[-1].id()], k, s, states)
== short_chain[-1].id()
maxvalid_bg(short_chain[-1].id, [long_chain[-1].id], k, s, states)
== short_chain[-1].id
)
assert (
maxvalid_mc(short_chain[-1].id(), [long_chain[-1].id()], k,states)
== short_chain[-1].id()
maxvalid_mc(short_chain[-1].id, [long_chain[-1].id], k, states)
== short_chain[-1].id
)
# However, if we set k to the fork length, it will be accepted
k = len(long_chain)
assert (
maxvalid_bg(short_chain[-1].id(), [long_chain[-1].id()], k, s, states)
== long_chain[-1].id()
maxvalid_bg(short_chain[-1].id, [long_chain[-1].id], k, s, states)
== long_chain[-1].id
)
assert (
maxvalid_mc(short_chain[-1].id(), [long_chain[-1].id()], k, states)
== long_chain[-1].id()
maxvalid_mc(short_chain[-1].id, [long_chain[-1].id], k, states)
== long_chain[-1].id
)
def test_fork_choice_long_dense_chain(self):
@ -241,18 +239,18 @@ class TestForkChoice(TestCase):
k = 1
s = 50
states = {b.id(): LedgerState(block=b) for b in short_chain + long_chain}
states = {b.id: LedgerState(block=b) for b in short_chain + long_chain}
assert (
maxvalid_bg(short_chain[-1].id(), [long_chain[-1].id()], k, s, states)
== long_chain[-1].id()
maxvalid_bg(short_chain[-1].id, [long_chain[-1].id], k, s, states)
== long_chain[-1].id
)
# praos fc rule should not accept a chain that diverged more than k blocks,
# even if it is longer
assert (
maxvalid_mc(short_chain[-1].id(), [long_chain[-1].id()], k, states)
== short_chain[-1].id()
maxvalid_mc(short_chain[-1].id, [long_chain[-1].id], k, states)
== short_chain[-1].id
)
def test_fork_choice_integration(self):
@ -266,7 +264,7 @@ class TestForkChoice(TestCase):
follower.on_block(b1)
assert follower.tip_id() == b1.id()
assert follower.tip_id() == b1.id
assert follower.forks == [], follower.forks
# -- then we fork --
@ -284,8 +282,8 @@ class TestForkChoice(TestCase):
follower.on_block(b2)
follower.on_block(b3)
assert follower.tip_id() == b2.id()
assert len(follower.forks) == 1 and follower.forks[0] == b3.id()
assert follower.tip_id() == b2.id
assert len(follower.forks) == 1 and follower.forks[0] == b3.id
# -- extend the fork causing a re-org --
#
@ -299,8 +297,8 @@ class TestForkChoice(TestCase):
b4 = mk_block(b3, 3, n_b)
follower.on_block(b4)
assert follower.tip_id() == b4.id()
assert len(follower.forks) == 1 and follower.forks[0] == b2.id(), follower.forks
assert follower.tip_id() == b4.id
assert len(follower.forks) == 1 and follower.forks[0] == b2.id, follower.forks
# -- switch to online mode --
#
@ -311,9 +309,9 @@ class TestForkChoice(TestCase):
# b3 (LIB) - b4 == tip
#
follower.to_online()
assert follower.lib == b3.id(), follower.lib
assert follower.lib == b3.id, follower.lib
assert len(follower.forks) == 0, follower.forks
assert b2.id() not in follower.forks
assert b2.id not in follower.forks
# -- extend a fork deeper than the LIB --
#
@ -338,10 +336,10 @@ class TestForkChoice(TestCase):
b8 = mk_block(b7, 5, n_b)
follower.on_block(b7)
assert len(follower.forks) == 1 and b7.id() in follower.forks
assert len(follower.forks) == 1 and b7.id in follower.forks
follower.on_block(b8)
assert follower.tip_id() == b8.id()
assert follower.tip_id() == b8.id
# b4 was pruned as it forks deeper than the LIB
assert len(follower.forks) == 0, follower.forks
@ -366,14 +364,14 @@ class TestForkChoice(TestCase):
follower.on_block(b8)
follower.on_block(b9)
assert follower.tip_id() == b9.id()
assert follower.tip_id() == b9.id
follower.on_block(b10)
follower.on_block(b11)
follower.on_block(b12)
assert follower.tip_id() == b12.id()
assert follower.lib == b7.id(), follower.lib
assert follower.tip_id() == b12.id
assert follower.lib == b7.id, follower.lib
def test_lib_calc_short_chain(self):
# Test that the LIB is correctly calculated for a short chain
@ -385,15 +383,15 @@ class TestForkChoice(TestCase):
follower = Follower(genesis, config)
follower.to_online()
assert follower.lib == genesis.block.id(), follower.lib
assert follower.lib == genesis.block.id, follower.lib
blocks = [genesis.block]
for i in range(1, 11):
b = mk_block(blocks[-1], i, n_a)
blocks.append(b)
follower.on_block(b)
assert follower.lib == genesis.block.id(), follower.lib
assert follower.lib == genesis.block.id, follower.lib
b11 = mk_block(blocks[-1], 11, n_a)
follower.on_block(b11)
assert follower.lib == blocks[1].id(), follower.lib
assert follower.lib == blocks[1].id, follower.lib

View File

@ -96,7 +96,7 @@ class TestLedgerStateUpdate(TestCase):
follower.on_block(block_2)
assert follower.tip() == block_1
assert len(follower.forks) == 1, f"{len(follower.forks)}"
assert follower.forks[0] == block_2.id()
assert follower.forks[0] == block_2.id
# note_2 wins slot 1 and chooses to extend from block_1
# note_3 also wins slot 1 and but chooses to extend from block_2
@ -107,7 +107,7 @@ class TestLedgerStateUpdate(TestCase):
follower.on_block(block_4)
assert follower.tip() == block_3
assert len(follower.forks) == 1, f"{len(follower.forks)}"
assert follower.forks[0] == block_4.id()
assert follower.forks[0] == block_4.id
# note_4 wins slot 1 and but chooses to extend from block_2 as well
# The block is accepted. A new fork is created "from the block_2".
@ -115,8 +115,8 @@ class TestLedgerStateUpdate(TestCase):
follower.on_block(block_5)
assert follower.tip() == block_3
assert len(follower.forks) == 2, f"{len(follower.forks)}"
assert follower.forks[0] == block_4.id()
assert follower.forks[1] == block_5.id()
assert follower.forks[0] == block_4.id
assert follower.forks[1] == block_5.id
# A block based on an unknown parent is not accepted.
# Nothing changes from the local chain and forks.
@ -126,8 +126,8 @@ class TestLedgerStateUpdate(TestCase):
follower.on_block(block_6)
assert follower.tip() == block_3
assert len(follower.forks) == 2, f"{len(follower.forks)}"
assert follower.forks[0] == block_4.id()
assert follower.forks[1] == block_5.id()
assert follower.forks[0] == block_4.id
assert follower.forks[1] == block_5.id
def test_epoch_transition(self):
leader_notes = [Note(sk=i, value=100) for i in range(4)]
@ -170,8 +170,8 @@ class TestLedgerStateUpdate(TestCase):
follower.on_block(block_4)
assert follower.tip() == block_3
# then we add the note to "commitments" associated with slot 9
follower.ledger_state[block_2.id()].commitments.add(
Note(sk=4, value=100).commitment()
follower.ledger_state[block_2.id].commitments.add(
Note(sk=4, value=100).commitment
)
follower.on_block(block_4)
assert follower.tip() == block_4
@ -196,7 +196,7 @@ class TestLedgerStateUpdate(TestCase):
# but the a new note is ineligible
note_new = Note(sk=1, value=10)
follower.tip_state().commitments.add(note_new.commitment())
follower.tip_state().commitments.add(note_new.commitment)
block_3_new = mk_block(slot=2, parent=block_2, note=note_new)
with self.assertRaises(InvalidLeaderProof):
follower.on_block(block_3_new)
@ -220,7 +220,7 @@ class TestLedgerStateUpdate(TestCase):
# mint a new note to be used for leader elections in upcoming epochs
note_new = Note(sk=1, value=10)
follower.ledger_state[block_0_0.id()].commitments.add(note_new.commitment())
follower.ledger_state[block_0_0.id].commitments.add(note_new.commitment)
# the new note is not yet eligible for elections
block_0_1_attempt = mk_block(slot=1, parent=block_0_0, note=note_new)

View File

@ -71,7 +71,7 @@ class TestStakeRelativization(TestCase):
PRINT_DEBUG = False
seed = 0
N = 3
N = 10
EPOCHS = 2
np.random.seed(seed)
@ -79,7 +79,7 @@ class TestStakeRelativization(TestCase):
stake = np.array((np.random.pareto(10, N) + 1) * 1000, dtype=np.int64)
notes = [Note(sk=i, value=int(s)) for i, s in enumerate(stake)]
config = Config.cryptarchia_v0_0_1(stake.sum() * 2).replace(k=40)
config = Config.cryptarchia_v0_0_1(stake.sum() * 2).replace(k=150)
genesis = mk_genesis_state(notes)
nodes = [TestNode(config, genesis, n) for n in notes]
@ -124,11 +124,13 @@ class TestStakeRelativization(TestCase):
f"var={slot_leaders.var():.3f}",
)
print("true total stake\t", stake.sum())
print("D_0\t", config.initial_total_stake)
print("D_0\t", config.initial_total_active_stake)
inferred_stake_by_epoch_by_rep = [
[
r.epoch_state(Slot(e * config.epoch_length)).total_stake()
r.epoch_state(
Slot(e * config.epoch_length)
).inferred_total_active_stake
for e in range(EPOCHS + 1)
]
for r in reps
@ -138,7 +140,7 @@ class TestStakeRelativization(TestCase):
"\n\t".join(
[
f"Rep {i}: {stakes}"
for i, stakes in inferred_stake_by_epoch_by_rep
for i, stakes in enumerate(inferred_stake_by_epoch_by_rep)
]
),
)
@ -154,10 +156,10 @@ class TestStakeRelativization(TestCase):
for node in reps:
inferred_stake = node.epoch_state(Slot(T)).total_active_stake()
pct_err = (
abs(stake.sum() - inferred_stake) / config.initial_total_active_stake
)
eps = (1 - config.total_active_stake_learning_rate) ** EPOCHS
pct_err = abs(stake.sum() - inferred_stake) / stake.sum()
# With beta=1.0, convergence is fast. Allow small error due to stochastic
# block production not matching the expected slot occupancy rate perfectly.
eps = 0.01
assert pct_err < eps, f"pct_err={pct_err} < eps={eps}"

View File

@ -67,7 +67,7 @@ class TestSync(TestCase):
peer.on_block(b)
self.assertEqual(peer.tip(), b5)
self.assertEqual(peer.forks, [b4.id()])
self.assertEqual(peer.forks, [b4.id])
# Start a sync from genesis.
# Result: The same block tree as the peer's.
@ -92,7 +92,7 @@ class TestSync(TestCase):
for b in [b0, b1, b2, b3, b4, b5]:
peer.on_block(b)
self.assertEqual(peer.tip(), b5)
self.assertEqual(peer.forks, [b4.id()])
self.assertEqual(peer.forks, [b4.id])
# Start a sync from a tree:
# b0 - b1
@ -123,7 +123,7 @@ class TestSync(TestCase):
for b in [b0, b1, b2, b3, b4, b5]:
peer.on_block(b)
self.assertEqual(peer.tip(), b5)
self.assertEqual(peer.forks, [b4.id()])
self.assertEqual(peer.forks, [b4.id])
self.assertEqual(len(peer.ledger_state), 7)
# Start a sync from a tree without the fork:
@ -179,7 +179,7 @@ class TestSync(TestCase):
local = Follower(genesis, config)
sync(local, [peer0, peer1, peer2])
self.assertEqual(local.tip(), b5)
self.assertEqual(local.forks, [b4.id()])
self.assertEqual(local.forks, [b4.id])
self.assertEqual(len(local.ledger_state), 7)
def test_reject_invalid_blocks(self):
@ -236,7 +236,7 @@ class TestSync(TestCase):
for b in [b0, b1, b2, b3, b4, b5]:
peer.on_block(b)
self.assertEqual(peer.tip(), b5)
self.assertEqual(peer.forks, [b2.id()])
self.assertEqual(peer.forks, [b2.id])
# And deliberately, add invalid blocks (b6 ~ b7):
fake_note = Note(sk=2, value=10)
@ -246,7 +246,7 @@ class TestSync(TestCase):
apply_invalid_block_to_ledger_state(peer, b7)
# the tip shouldn't be changed.
self.assertEqual(peer.tip(), b5)
self.assertEqual(peer.forks, [b2.id()])
self.assertEqual(peer.forks, [b2.id])
# Start a sync from a tree:
# b0 - b1 - b3 - b4
@ -261,8 +261,8 @@ class TestSync(TestCase):
sync(local, [peer])
self.assertEqual(local.tip(), peer.tip())
self.assertEqual(local.forks, peer.forks)
self.assertNotIn(b6.id(), local.ledger_state)
self.assertNotIn(b7.id(), local.ledger_state)
self.assertNotIn(b6.id, local.ledger_state)
self.assertNotIn(b7.id, local.ledger_state)
class TestSyncFromCheckpoint(TestCase):
@ -290,7 +290,7 @@ class TestSyncFromCheckpoint(TestCase):
#
# Result:
# b0 - b1 - b2 - b3
checkpoint = peer.ledger_state[b2.id()]
checkpoint = peer.ledger_state[b2.id]
local = Follower(genesis, config)
sync(local, [peer], checkpoint)
self.assertEqual(local.tip(), peer.tip())
@ -315,7 +315,7 @@ class TestSyncFromCheckpoint(TestCase):
for b in [b0, b1, b2, b3, b4, b5]:
peer.on_block(b)
self.assertEqual(peer.tip(), b5)
self.assertEqual(peer.forks, [b4.id()])
self.assertEqual(peer.forks, [b4.id])
# Start a sync from the checkpoint:
# checkpoint
@ -326,7 +326,7 @@ class TestSyncFromCheckpoint(TestCase):
# b0 - b1 - b2 - b5 == tip
# \
# b3 - b4
checkpoint = peer.ledger_state[b2.id()]
checkpoint = peer.ledger_state[b2.id]
local = Follower(genesis, config)
sync(local, [peer], checkpoint)
self.assertEqual(local.tip(), peer.tip())
@ -367,11 +367,11 @@ class TestSyncFromCheckpoint(TestCase):
# b0 - b1 - b2 - b5 == tip
# \
# b3 - b4
checkpoint = peer1.ledger_state[b4.id()]
checkpoint = peer1.ledger_state[b4.id]
local = Follower(genesis, config)
sync(local, [peer0, peer1], checkpoint)
self.assertEqual(local.tip(), b5)
self.assertEqual(local.forks, [b4.id()])
self.assertEqual(local.forks, [b4.id])
self.assertEqual(len(local.ledger_state.keys()), 7)
def test_reject_invalid_blocks_from_backfilling_fork(self):
@ -392,7 +392,7 @@ class TestSyncFromCheckpoint(TestCase):
for b in [b0, b1, b2, b3, b4, b5]:
peer.on_block(b)
self.assertEqual(peer.tip(), b5)
self.assertEqual(peer.forks, [b2.id()])
self.assertEqual(peer.forks, [b2.id])
# And deliberately, add invalid blocks (b6 ~ b7):
fake_note = Note(sk=2, value=10)
@ -401,7 +401,7 @@ class TestSyncFromCheckpoint(TestCase):
apply_invalid_block_to_ledger_state(peer, b7)
# the tip shouldn't be changed.
self.assertEqual(peer.tip(), b5)
self.assertEqual(peer.forks, [b2.id()])
self.assertEqual(peer.forks, [b2.id])
# Start a sync from a checkpoint where all anscestors are valid:
# checkpoint
@ -412,13 +412,13 @@ class TestSyncFromCheckpoint(TestCase):
# b0 - b1 - b3 - b4 - b5 == tip
# \
# b2
checkpoint = peer.ledger_state[b4.id()]
checkpoint = peer.ledger_state[b4.id]
local = Follower(genesis, config)
sync(local, [peer], checkpoint)
self.assertEqual(local.tip(), peer.tip())
self.assertEqual(local.forks, peer.forks)
self.assertNotIn(b6.id(), local.ledger_state)
self.assertNotIn(b7.id(), local.ledger_state)
self.assertNotIn(b6.id, local.ledger_state)
self.assertNotIn(b7.id, local.ledger_state)
def test_reject_invalid_blocks_from_backfilling_checkpoint_chain(self):
# Prepare a peer with invalid blocks in a fork:
@ -438,7 +438,7 @@ class TestSyncFromCheckpoint(TestCase):
for b in [b0, b1, b2, b3, b4, b5]:
peer.on_block(b)
self.assertEqual(peer.tip(), b5)
self.assertEqual(peer.forks, [b2.id()])
self.assertEqual(peer.forks, [b2.id])
# And deliberately, add invalid blocks (b6 ~ b7):
fake_note = Note(sk=2, value=10)
@ -447,7 +447,7 @@ class TestSyncFromCheckpoint(TestCase):
apply_invalid_block_to_ledger_state(peer, b7)
# the tip shouldn't be changed.
self.assertEqual(peer.tip(), b5)
self.assertEqual(peer.forks, [b2.id()])
self.assertEqual(peer.forks, [b2.id])
# Start a sync from a checkpoint where some anscestors are invalid:
# () checkpoint
@ -455,7 +455,7 @@ class TestSyncFromCheckpoint(TestCase):
# () - () - (invalid_b7)
#
# Result: `InvalidBlockTree` exception
checkpoint = peer.ledger_state[b7.id()]
checkpoint = peer.ledger_state[b7.id]
local = Follower(genesis, config)
with self.assertRaises(InvalidBlockFromBackfillFork):
sync(local, [peer], checkpoint)
@ -464,4 +464,4 @@ class TestSyncFromCheckpoint(TestCase):
def apply_invalid_block_to_ledger_state(follower: Follower, block: BlockHeader):
state = follower.ledger_state[block.parent].copy()
state.apply(block)
follower.ledger_state[block.id()] = state
follower.ledger_state[block.id] = state