diff --git a/cryptarchia/cryptarchia.py b/cryptarchia/cryptarchia.py index b510962..6f2f310 100644 --- a/cryptarchia/cryptarchia.py +++ b/cryptarchia/cryptarchia.py @@ -1,4 +1,4 @@ -from typing import TypeAlias, List, Dict +from typing import TypeAlias, List, Dict, Generator from hashlib import sha256, blake2b from math import floor from copy import deepcopy @@ -10,7 +10,6 @@ from collections import defaultdict import numpy as np - logger = logging.getLogger(__name__) @@ -124,6 +123,9 @@ class Slot: def __lt__(self, other): return self.absolute_slot < other.absolute_slot + def __hash__(self): + return hash(self.absolute_slot) + @dataclass class Coin: @@ -248,6 +250,9 @@ class BlockHeader: self.update_header_hash(h) return h.digest() + def __hash__(self): + return hash(self.id()) + @dataclass class LedgerState: @@ -362,17 +367,16 @@ class EpochState: class Follower: def __init__(self, genesis_state: LedgerState, config: Config): self.config = config - self.forks = [] + self.forks: list[Id] = [] self.local_chain = genesis_state.block.id() self.genesis_state = genesis_state self.ledger_state = {genesis_state.block.id(): genesis_state.copy()} self.epoch_state = {} - def validate_header(self, block: BlockHeader) -> bool: + def validate_header(self, block: BlockHeader): # TODO: verify blocks are not in the 'future' if block.parent not in self.ledger_state: - logger.warning("We have not seen block parent") - return False + raise ParentNotFound current_state = self.ledger_state[block.parent].copy() @@ -391,8 +395,7 @@ class Follower: # We take a shortcut for (1.) by restricting orphans to proofs we've # already processed in other branches. if orphan.id() not in self.ledger_state: - logger.warning("missing orphan proof") - return False + raise MissingOrphanProof # (2.) is satisfied by verifying the proof against current state ensuring: # - it is a valid proof @@ -404,21 +407,21 @@ class Follower: epoch_state, current_state, ): - logger.warning("invalid orphan proof") - return False + raise InvalidOrphanProof # if an adopted leadership proof is valid we need to apply its # effects to the ledger state current_state.apply_leader_proof(orphan.leader_proof) # TODO: this is not the full block validation spec, only slot leader is verified - return self.verify_slot_leader( + if not self.verify_slot_leader( block.slot, block.parent, block.leader_proof, epoch_state, current_state, - ) + ): + raise InvalidLeaderProof def verify_slot_leader( self, @@ -441,19 +444,23 @@ class Follower: and current_state.verify_unspent(proof.nullifier) ) - def on_block(self, block: BlockHeader): + def apply_block_to_ledger_state(self, block: BlockHeader) -> bool: if block.id() in self.ledger_state: logger.warning("dropping already processed block") - return + return False - if not self.validate_header(block): - logger.warning("invalid header") - return + self.validate_header(block) new_state = self.ledger_state[block.parent].copy() new_state.apply(block) self.ledger_state[block.id()] = new_state + return True + + def on_block(self, block: BlockHeader): + if not self.apply_block_to_ledger_state(block): + return + if block.parent == self.local_chain: # simply extending the local chain self.local_chain = block.id() @@ -471,6 +478,15 @@ class Follower: self.forks.remove(new_tip) self.local_chain = new_tip + def apply_checkpoint(self, checkpoint: LedgerState): + checkpoint_block_id = checkpoint.block.id() + self.ledger_state[checkpoint_block_id] = checkpoint + if self.local_chain != self.genesis_state.block.id(): + self.forks.append(self.local_chain) + if checkpoint_block_id in self.forks: + self.forks.remove(checkpoint_block_id) + self.local_chain = checkpoint_block_id + def unimported_orphans(self) -> list[BlockHeader]: """ Returns all unimported orphans w.r.t. the given tip's state. @@ -482,9 +498,10 @@ class Follower: orphans = [] for fork in self.forks: - _, fork_depth = common_prefix_depth(tip, fork, self.ledger_state) - for block_state in chain_suffix(fork, fork_depth, self.ledger_state): - b = block_state.block + _, _, fork_depth, fork_suffix = common_prefix_depth( + tip, fork, self.ledger_state + ) + for b in fork_suffix: if b.leader_proof.nullifier not in tip_state.nullifiers: tip_state.nullifiers.add(b.leader_proof.nullifier) orphans += [b] @@ -592,6 +609,17 @@ class Follower: ) return int(prev_epoch.inferred_total_active_stake - h * blocks_per_slot_err) + def blocks_by_slot(self, from_slot: Slot) -> Generator[BlockHeader, None, None]: + # Returns blocks in the given range of slots in order of slot + # NOTE: In real implementation, this should be done by optimized data structures. + blocks_by_slot: dict[Slot, list[BlockHeader]] = defaultdict(list) + for state in self.ledger_state.values(): + if from_slot <= state.block.slot: + blocks_by_slot[state.block.slot].append(state.block) + for slot in sorted(blocks_by_slot.keys()): + for block in blocks_by_slot[slot]: + yield block + def phi(f: float, alpha: float) -> float: """ @@ -646,39 +674,68 @@ class Leader: ) -def iter_chain(tip: Id, states: Dict[Id, LedgerState]): +def iter_chain( + tip: Id, states: Dict[Id, LedgerState] +) -> Generator[LedgerState, None, None]: while tip in states: yield states[tip] tip = states[tip].block.parent -def chain_suffix(tip: Id, n: int, states: Dict[Id, LedgerState]) -> list[LedgerState]: - return list(reversed(list(itertools.islice(iter_chain(tip, states), n)))) +def iter_chain_blocks( + tip: Id, states: Dict[Id, LedgerState] +) -> Generator[BlockHeader, None, None]: + for state in iter_chain(tip, states): + yield state.block -def common_prefix_depth(a: Id, b: Id, states: Dict[Id, LedgerState]) -> (int, int): - a_blocks = iter_chain(a, states) - b_blocks = iter_chain(b, states) +def common_prefix_depth( + a: Id, b: Id, states: Dict[Id, LedgerState] +) -> tuple[int, list[BlockHeader], int, list[BlockHeader]]: + return common_prefix_depth_from_chains( + iter_chain_blocks(a, states), iter_chain_blocks(b, states) + ) + +def common_prefix_depth_from_chains( + a_blocks: Generator[BlockHeader, None, None], + b_blocks: Generator[BlockHeader, None, None], +) -> tuple[int, list[BlockHeader], int, list[BlockHeader]]: seen = {} + a_suffix: list[BlockHeader] = [] + b_suffix: list[BlockHeader] = [] depth = 0 while True: try: - a_block = next(a_blocks).block.id() - if a_block in seen: + a_block = next(a_blocks) + a_suffix.append(a_block) + a_block_id = a_block.id() + if a_block_id in seen: # we had seen this block from the fork chain - return depth, seen[a_block] + return ( + depth, + list(reversed(a_suffix[: depth + 1])), + seen[a_block_id], + list(reversed(b_suffix[: seen[a_block_id] + 1])), + ) - seen[a_block] = depth + seen[a_block_id] = depth except StopIteration: pass try: - b_block = next(b_blocks).block.id() - if b_block in seen: + b_block = next(b_blocks) + b_suffix.append(b_block) + b_block_id = b_block.id() + if b_block_id in seen: # we had seen the fork in the local chain - return seen[b_block], depth - seen[b_block] = depth + return ( + seen[b_block_id], + list(reversed(a_suffix[: seen[b_block_id] + 1])), + depth, + list(reversed(b_suffix[: depth + 1])), + ) + seen[b_block_id] = depth except StopIteration: pass @@ -687,13 +744,8 @@ def common_prefix_depth(a: Id, b: Id, states: Dict[Id, LedgerState]) -> (int, in assert False -def chain_density( - head: Id, slot: Slot, reorg_depth: int, states: Dict[Id, LedgerState] -) -> int: - assert type(head) == Id - chain = iter_chain(head, states) - segment = itertools.islice(chain, reorg_depth) - return sum(1 for b in segment if b.block.slot < slot) +def chain_density(chain: list[BlockHeader], slot: Slot) -> int: + return sum(1 for b in chain if b.slot < slot) def block_children(states: Dict[Id, LedgerState]) -> Dict[Id, set[Id]]: @@ -723,7 +775,9 @@ def maxvalid_bg( cmax = local_chain for fork in forks: - cmax_depth, fork_depth = common_prefix_depth(cmax, fork, states) + cmax_depth, cmax_suffix, fork_depth, fork_suffix = common_prefix_depth( + cmax, fork, states + ) if cmax_depth <= k: # Longest chain fork choice rule if cmax_depth < fork_depth: @@ -731,11 +785,11 @@ def maxvalid_bg( else: # The chain is forking too much, we need to pay a bit more attention # In particular, select the chain that is the densest after the fork - cmax_divergent_block = chain_suffix(cmax, cmax_depth, states)[0].block + cmax_divergent_block = cmax_suffix[0] forking_slot = Slot(cmax_divergent_block.slot.absolute_slot + s) - cmax_density = chain_density(cmax, forking_slot, cmax_depth, states) - fork_density = chain_density(fork, forking_slot, fork_depth, states) + cmax_density = chain_density(cmax_suffix, forking_slot) + fork_density = chain_density(fork_suffix, forking_slot) if cmax_density < fork_density: cmax = fork @@ -743,5 +797,25 @@ def maxvalid_bg( return cmax +class ParentNotFound(Exception): + def __str__(self): + return "Parent not found" + + +class MissingOrphanProof(Exception): + def __str__(self): + return "Missing orphan proof" + + +class InvalidOrphanProof(Exception): + def __str__(self): + return "Invalid orphan proof" + + +class InvalidLeaderProof(Exception): + def __str__(self): + return "Invalid leader proof" + + if __name__ == "__main__": pass diff --git a/cryptarchia/sync.py b/cryptarchia/sync.py new file mode 100644 index 0000000..6921c3b --- /dev/null +++ b/cryptarchia/sync.py @@ -0,0 +1,181 @@ +from collections import defaultdict +from typing import Generator + +from cryptarchia.cryptarchia import ( + BlockHeader, + Follower, + Id, + ParentNotFound, + Slot, + common_prefix_depth_from_chains, + iter_chain_blocks, +) + + +def sync(local: Follower, peers: list[Follower]): + # Syncs the local block tree with the peers, starting from the local tip. + # This covers the case where the local tip is not on the latest honest chain anymore. + + # Repeat the sync process until no peer has a tip ahead of the local tip, + # because peers' tips may advance during the sync process. + block_fetcher = BlockFetcher(peers) + rejected_blocks: set[Id] = set() + while True: + # Fetch blocks from the peers in the range of slots from the local tip to the latest tip. + # Gather orphaned blocks, which are blocks from forks that are absent in the local block tree. + + start_slot = local.tip().slot + orphans: set[BlockHeader] = set() + num_blocks = 0 + for block in block_fetcher.fetch_blocks_from(start_slot): + num_blocks += 1 + # Reject blocks that have been rejected in the past + # or whose parent has been rejected. + if {block.id(), block.parent} & rejected_blocks: + rejected_blocks.add(block.id()) + continue + + try: + local.on_block(block) + orphans.discard(block) + except ParentNotFound: + orphans.add(block) + except Exception: + rejected_blocks.add(block.id()) + + # Finish the sync process if no block has been fetched, + # which means that no peer has a tip ahead of the local tip. + if num_blocks == 0: + return + + # Backfill the orphan forks starting from the orphan blocks with applying fork choice rule. + # + # Sort the orphan blocks by slot in descending order to minimize the number of backfillings. + for orphan in sorted(orphans, key=lambda b: b.slot, reverse=True): + # Skip the orphan block if it has been processed during the previous backfillings + # (i.e. if it has been already added to the local block tree). + # Or, skip if it has been rejected during the previous backfillings. + if ( + orphan.id() not in local.ledger_state + and orphan.id() not in rejected_blocks + ): + try: + backfill_fork(local, orphan, block_fetcher) + except InvalidBlockFromBackfillFork as e: + rejected_blocks.update(block.id() for block in e.invalid_suffix) + + +def backfill_fork( + local: Follower, + fork_tip: BlockHeader, + block_fetcher: "BlockFetcher", +): + # Backfills a fork, which is absent in the local block tree, by fetching blocks from the peers. + # During backfilling, the fork choice rule is continuously applied. + # + # If necessary, the local honest chain is also backfilled for the fork choice rule. + # This can happen if the honest chain has been built not from the genesis (i.e. checkpoint sync). + + _, tip_suffix, _, fork_suffix = common_prefix_depth_from_chains( + block_fetcher.fetch_chain_backward(local.tip_id(), local), + block_fetcher.fetch_chain_backward(fork_tip.id(), local), + ) + + # First, backfill the local honest chain if some blocks are missing. + # In other words, backfill the local block tree, which contains the honest chain. + for block in tip_suffix: + try: + # Just apply the block to the ledger state is enough + # instead of calling `on_block` which runs the fork choice rule. + local.apply_block_to_ledger_state(block) + except Exception as e: + raise InvalidBlockTree(e) + + # Then, add blocks in the fork suffix with applying fork choice rule. + # After all, add the tip of the fork suffix to apply the fork choice rule. + for i, block in enumerate(fork_suffix): + try: + local.on_block(block) + except Exception as e: + raise InvalidBlockFromBackfillFork(e, fork_suffix[i:]) + + +class BlockFetcher: + # NOTE: This class is a mock, which uses a naive approach to fetch blocks from multiple peers. + # In real implementation, any optimized way can be used, such as parallel fetching. + + def __init__(self, peers: list[Follower]): + self.peers = peers + + def fetch_blocks_from(self, start_slot: Slot) -> Generator[BlockHeader, None, None]: + # Filter peers that have a tip ahead of the local tip + # and group peers by their tip to minimize the number of fetches. + groups = self.filter_and_group_peers_by_tip(start_slot) + for group in groups.values(): + for block in BlockFetcher.fetch_blocks_by_slot(group, start_slot): + yield block + + def filter_and_group_peers_by_tip( + self, start_slot: Slot + ) -> dict[BlockHeader, list[Follower]]: + # Group peers by their tip. + # Filter only the peers whose tip is ahead of the start_slot. + groups: dict[BlockHeader, list[Follower]] = defaultdict(list) + for peer in self.peers: + if peer.tip().slot.absolute_slot > start_slot.absolute_slot: + groups[peer.tip()].append(peer) + return groups + + @staticmethod + def fetch_blocks_by_slot( + peers: list[Follower], start_slot: Slot + ) -> Generator[BlockHeader, None, None]: + # Fetch blocks in the given range of slots from one of the peers. + # Blocks should be returned in order of slot. + # If a peer fails, try the next peer. + for peer in peers: + try: + for block in peer.blocks_by_slot(start_slot): + yield block + # Update start_slot for the potential try with the next peer. + start_slot = block.slot + # The peer successfully returned all blocks. No need to try the next peer. + break + except Exception: + continue + + def fetch_chain_backward( + self, tip: Id, local: Follower + ) -> Generator[BlockHeader, None, None]: + # Fetches a chain of blocks from the peers, starting from the given tip to the genesis. + # Attempts to extend the chain as much as possible by querying multiple peers, + # considering that not all peers may have the full chain (from the genesis). + + id = tip + # First, try to iterate the chain from the local block tree. + for block in iter_chain_blocks(id, local.ledger_state): + yield block + if block.id() == local.genesis_state.block.id(): + return + id = block.parent + + # Try to continue by fetching the remaining blocks from the peers + for peer in self.peers: + for block in iter_chain_blocks(id, peer.ledger_state): + yield block + if block.id() == local.genesis_state.block.id(): + return + id = block.parent + + +class InvalidBlockTree(Exception): + def __init__(self, cause: Exception): + super().__init__() + self.cause = cause + + +class InvalidBlockFromBackfillFork(Exception): + def __init__(self, cause: Exception, invalid_suffix: list[BlockHeader]): + super().__init__() + self.cause = cause + self.invalid_suffix = invalid_suffix diff --git a/cryptarchia/test_fork_choice.py b/cryptarchia/test_fork_choice.py index 1a283c5..24b295d 100644 --- a/cryptarchia/test_fork_choice.py +++ b/cryptarchia/test_fork_choice.py @@ -36,28 +36,138 @@ class TestForkChoice(TestCase): b.id(): LedgerState(block=b) for b in [b0, b1, b2, b3, b4, b5, b6, b7] } - assert (d := common_prefix_depth(b0.id(), b0.id(), states)) == (0, 0), d - assert (d := common_prefix_depth(b1.id(), b0.id(), states)) == (1, 0), d - assert (d := common_prefix_depth(b0.id(), b1.id(), states)) == (0, 1), d - assert (d := common_prefix_depth(b1.id(), b1.id(), states)) == (0, 0), d - assert (d := common_prefix_depth(b2.id(), b0.id(), states)) == (2, 0), d - assert (d := common_prefix_depth(b0.id(), b2.id(), states)) == (0, 2), d - assert (d := common_prefix_depth(b3.id(), b0.id(), states)) == (3, 0), d - assert (d := common_prefix_depth(b0.id(), b3.id(), states)) == (0, 3), d - assert (d := common_prefix_depth(b1.id(), b4.id(), states)) == (1, 1), d - assert (d := common_prefix_depth(b4.id(), b1.id(), states)) == (1, 1), d - assert (d := common_prefix_depth(b1.id(), b5.id(), states)) == (1, 2), d - assert (d := common_prefix_depth(b5.id(), b1.id(), states)) == (2, 1), d - assert (d := common_prefix_depth(b2.id(), b5.id(), states)) == (2, 2), d - assert (d := common_prefix_depth(b5.id(), b2.id(), states)) == (2, 2), d - assert (d := common_prefix_depth(b3.id(), b5.id(), states)) == (3, 2), d - assert (d := common_prefix_depth(b5.id(), b3.id(), states)) == (2, 3), d - assert (d := common_prefix_depth(b3.id(), b6.id(), states)) == (1, 1), d - assert (d := common_prefix_depth(b6.id(), b3.id(), states)) == (1, 1), d - assert (d := common_prefix_depth(b3.id(), b7.id(), states)) == (1, 2), d - assert (d := common_prefix_depth(b7.id(), b3.id(), states)) == (2, 1), d - assert (d := common_prefix_depth(b5.id(), b7.id(), states)) == (2, 4), d - assert (d := common_prefix_depth(b7.id(), b5.id(), states)) == (4, 2), d + assert (d := common_prefix_depth(b0.id(), b0.id(), states)) == ( + 0, + [b0], + 0, + [b0], + ), d + assert (d := common_prefix_depth(b1.id(), b0.id(), states)) == ( + 1, + [b0, b1], + 0, + [b0], + ), d + assert (d := common_prefix_depth(b0.id(), b1.id(), states)) == ( + 0, + [b0], + 1, + [b0, b1], + ), d + assert (d := common_prefix_depth(b1.id(), b1.id(), states)) == ( + 0, + [b1], + 0, + [b1], + ), d + assert (d := common_prefix_depth(b2.id(), b0.id(), states)) == ( + 2, + [b0, b1, b2], + 0, + [b0], + ), d + assert (d := common_prefix_depth(b0.id(), b2.id(), states)) == ( + 0, + [b0], + 2, + [b0, b1, b2], + ), d + assert (d := common_prefix_depth(b3.id(), b0.id(), states)) == ( + 3, + [b0, b1, b2, b3], + 0, + [b0], + ), d + assert (d := common_prefix_depth(b0.id(), b3.id(), states)) == ( + 0, + [b0], + 3, + [b0, b1, b2, b3], + ), d + assert (d := common_prefix_depth(b1.id(), b4.id(), states)) == ( + 1, + [b0, b1], + 1, + [b0, b4], + ), d + assert (d := common_prefix_depth(b4.id(), b1.id(), states)) == ( + 1, + [b0, b4], + 1, + [b0, b1], + ), d + assert (d := common_prefix_depth(b1.id(), b5.id(), states)) == ( + 1, + [b0, b1], + 2, + [b0, b4, b5], + ), d + assert (d := common_prefix_depth(b5.id(), b1.id(), states)) == ( + 2, + [b0, b4, b5], + 1, + [b0, b1], + ), d + assert (d := common_prefix_depth(b2.id(), b5.id(), states)) == ( + 2, + [b0, b1, b2], + 2, + [b0, b4, b5], + ), d + assert (d := common_prefix_depth(b5.id(), b2.id(), states)) == ( + 2, + [b0, b4, b5], + 2, + [b0, b1, b2], + ), d + assert (d := common_prefix_depth(b3.id(), b5.id(), states)) == ( + 3, + [b0, b1, b2, b3], + 2, + [b0, b4, b5], + ), d + assert (d := common_prefix_depth(b5.id(), b3.id(), states)) == ( + 2, + [b0, b4, b5], + 3, + [b0, b1, b2, b3], + ), d + assert (d := common_prefix_depth(b3.id(), b6.id(), states)) == ( + 1, + [b2, b3], + 1, + [b2, b6], + ), d + assert (d := common_prefix_depth(b6.id(), b3.id(), states)) == ( + 1, + [b2, b6], + 1, + [b2, b3], + ), d + assert (d := common_prefix_depth(b3.id(), b7.id(), states)) == ( + 1, + [b2, b3], + 2, + [b2, b6, b7], + ), d + assert (d := common_prefix_depth(b7.id(), b3.id(), states)) == ( + 2, + [b2, b6, b7], + 1, + [b2, b3], + ), d + assert (d := common_prefix_depth(b5.id(), b7.id(), states)) == ( + 2, + [b0, b4, b5], + 4, + [b0, b1, b2, b6, b7], + ), d + assert (d := common_prefix_depth(b7.id(), b5.id(), states)) == ( + 4, + [b0, b1, b2, b6, b7], + 2, + [b0, b4, b5], + ), d def test_fork_choice_long_sparse_chain(self): # The longest chain is not dense after the fork diff --git a/cryptarchia/test_ledger_state_update.py b/cryptarchia/test_ledger_state_update.py index d67abd4..d66abf0 100644 --- a/cryptarchia/test_ledger_state_update.py +++ b/cryptarchia/test_ledger_state_update.py @@ -1,10 +1,14 @@ from unittest import TestCase -import numpy as np - -from .cryptarchia import Follower, Coin, iter_chain - -from .test_common import mk_config, mk_block, mk_genesis_state +from .cryptarchia import ( + Coin, + Follower, + InvalidLeaderProof, + MissingOrphanProof, + ParentNotFound, + iter_chain, +) +from .test_common import mk_block, mk_config, mk_genesis_state class TestLedgerStateUpdate(TestCase): @@ -46,7 +50,8 @@ class TestLedgerStateUpdate(TestCase): assert follower.tip_state().verify_unspent(leader_coin.nullifier()) == False reuse_coin_block = mk_block(slot=1, parent=block, coin=leader_coin) - follower.on_block(reuse_coin_block) + with self.assertRaises(InvalidLeaderProof): + follower.on_block(reuse_coin_block) # Follower should *not* have accepted the block assert len(list(iter_chain(follower.tip_id(), follower.ledger_state))) == 2 @@ -126,7 +131,8 @@ class TestLedgerStateUpdate(TestCase): # Nothing changes from the local chain and forks. unknown_block = mk_block(parent=block_5, slot=2, coin=coins[5]) block_6 = mk_block(parent=unknown_block, slot=2, coin=coins[6]) - follower.on_block(block_6) + with self.assertRaises(ParentNotFound): + follower.on_block(block_6) assert follower.tip() == block_3 assert len(follower.forks) == 2, f"{len(follower.forks)}" assert follower.forks[0] == block_4.id() @@ -169,7 +175,8 @@ class TestLedgerStateUpdate(TestCase): # so that the new block can be accepted only if that is the snapshot used # first, verify that if we don't change the state, the block is not accepted block_4 = mk_block(slot=40, parent=block_3, coin=Coin(sk=4, value=100)) - follower.on_block(block_4) + with self.assertRaises(InvalidLeaderProof): + follower.on_block(block_4) assert follower.tip() == block_3 # then we add the coin to "spendable commitments" associated with slot 9 follower.ledger_state[block_2.id()].commitments_spend.add( @@ -193,7 +200,8 @@ class TestLedgerStateUpdate(TestCase): # coin can't be reused to win following slots: block_2_reuse = mk_block(slot=1, parent=block_1, coin=coin) - follower.on_block(block_2_reuse) + with self.assertRaises(InvalidLeaderProof): + follower.on_block(block_2_reuse) assert follower.tip() == block_1 # but the evolved coin is eligible @@ -224,7 +232,8 @@ class TestLedgerStateUpdate(TestCase): # the new coin is not yet eligible for elections block_0_1_attempt = mk_block(slot=1, parent=block_0_0, coin=coin_new) - follower.on_block(block_0_1_attempt) + with self.assertRaises(InvalidLeaderProof): + follower.on_block(block_0_1_attempt) assert follower.tip() == block_0_0 # whereas the evolved coin from genesis can be spent immediately @@ -238,7 +247,8 @@ class TestLedgerStateUpdate(TestCase): # stake distribution snapshot is taken at the beginning of the previous epoch block_1_0 = mk_block(slot=20, parent=block_0_1, coin=coin_new) - follower.on_block(block_1_0) + with self.assertRaises(InvalidLeaderProof): + follower.on_block(block_1_0) assert follower.tip() == block_0_1 # ---- EPOCH 2 ---- @@ -267,7 +277,8 @@ class TestLedgerStateUpdate(TestCase): coin_new = coin.evolve() coin_new_new = coin_new.evolve() block_0_1 = mk_block(slot=1, parent=block_0_0, coin=coin_new_new) - follower.on_block(block_0_1) + with self.assertRaises(InvalidLeaderProof): + follower.on_block(block_0_1) # the coin evolved twice should not be accepted as it is not in the lead commitments assert follower.tip() == block_0_0 @@ -283,7 +294,8 @@ class TestLedgerStateUpdate(TestCase): coin=coin_orphan.evolve(), orphaned_proofs=[orphan], ) - follower.on_block(block_0_1) + with self.assertRaises(MissingOrphanProof): + follower.on_block(block_0_1) # since follower had not seen this orphan prior to being included as # an orphan proof, it will be rejected diff --git a/cryptarchia/test_sync.py b/cryptarchia/test_sync.py new file mode 100644 index 0000000..101e8e0 --- /dev/null +++ b/cryptarchia/test_sync.py @@ -0,0 +1,501 @@ +from unittest import TestCase + +from cryptarchia.cryptarchia import BlockHeader, Coin, Follower +from cryptarchia.sync import InvalidBlockTree, sync +from cryptarchia.test_common import mk_block, mk_config, mk_genesis_state + + +class TestSync(TestCase): + def test_sync_single_chain_from_genesis(self): + # Prepare a peer with a single chain: + # b0 - b1 - b2 - b3 + coin = Coin(sk=0, value=10) + config = mk_config([coin]) + genesis = mk_genesis_state([coin]) + peer = Follower(genesis, config) + b0, coin = mk_block(genesis.block, 1, coin), coin.evolve() + b1, coin = mk_block(b0, 2, coin), coin.evolve() + b2, coin = mk_block(b1, 3, coin), coin.evolve() + b3, coin = mk_block(b2, 4, coin), coin.evolve() + for b in [b0, b1, b2, b3]: + peer.on_block(b) + self.assertEqual(peer.tip(), b3) + self.assertEqual(peer.forks, []) + + # Start a sync from genesis. + # Result: The same block tree as the peer's. + local = Follower(genesis, config) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + + def test_sync_single_chain_from_middle(self): + # Prepare a peer with a single chain: + # b0 - b1 - b2 - b3 + coin = Coin(sk=0, value=10) + config = mk_config([coin]) + genesis = mk_genesis_state([coin]) + peer = Follower(genesis, config) + b0, coin = mk_block(genesis.block, 1, coin), coin.evolve() + b1, coin = mk_block(b0, 2, coin), coin.evolve() + b2, coin = mk_block(b1, 3, coin), coin.evolve() + b3, coin = mk_block(b2, 4, coin), coin.evolve() + for b in [b0, b1, b2, b3]: + peer.on_block(b) + self.assertEqual(peer.tip(), b3) + self.assertEqual(peer.forks, []) + + # Start a sync from a tree: + # b0 - b1 + # + # Result: The same block tree as the peer's. + local = Follower(genesis, config) + for b in [b0, b1]: + peer.on_block(b) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + + def test_sync_forks_from_genesis(self): + # Prepare a peer with forks: + # b0 - b1 - b2 - b5 == tip + # \ + # b3 - b4 + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + peer = Follower(genesis, config) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b3, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b4, c_b = mk_block(b3, 3, c_b), c_b.evolve() + b5, c_a = mk_block(b2, 4, c_a), c_a.evolve() + for b in [b0, b1, b2, b3, b4, b5]: + peer.on_block(b) + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b4.id()]) + + # Start a sync from genesis. + # Result: The same block tree as the peer's. + local = Follower(genesis, config) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + + def test_sync_forks_from_middle(self): + # Prepare a peer with forks: + # b0 - b1 - b2 - b5 == tip + # \ + # b3 - b4 + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + peer = Follower(genesis, config) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b3, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b4, c_b = mk_block(b3, 3, c_b), c_b.evolve() + b5, c_a = mk_block(b2, 4, c_a), c_a.evolve() + for b in [b0, b1, b2, b3, b4, b5]: + peer.on_block(b) + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b4.id()]) + + # Start a sync from a tree: + # b0 - b1 + # \ + # b3 + # + # Result: The same block tree as the peer's. + local = Follower(genesis, config) + for b in [b0, b1, b3]: + peer.on_block(b) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + + def test_sync_forks_by_backfilling(self): + # Prepare a peer with forks: + # b0 - b1 - b2 - b5 == tip + # \ + # b3 - b4 + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + peer = Follower(genesis, config) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b3, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b4, c_b = mk_block(b3, 3, c_b), c_b.evolve() + b5, c_a = mk_block(b2, 4, c_a), c_a.evolve() + for b in [b0, b1, b2, b3, b4, b5]: + peer.on_block(b) + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b4.id()]) + self.assertEqual(len(peer.ledger_state), 7) + + # Start a sync from a tree without the fork: + # b0 - b1 + # + # Result: The same block tree as the peer's. + local = Follower(genesis, config) + for b in [b0, b1]: + peer.on_block(b) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + self.assertEqual(len(local.ledger_state), len(peer.ledger_state)) + + def test_sync_multiple_peers_from_genesis(self): + # Prepare multiple peers: + # Peer-0: b5 + # / + # Peer-1: b0 - b1 - b2 + # \ + # Peer-2: b3 - b4 + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b3, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b4, c_b = mk_block(b3, 3, c_b), c_b.evolve() + b5, c_a = mk_block(b2, 4, c_a), c_a.evolve() + peer0 = Follower(genesis, config) + for b in [b0, b1, b2, b5]: + peer0.on_block(b) + self.assertEqual(peer0.tip(), b5) + self.assertEqual(peer0.forks, []) + peer1 = Follower(genesis, config) + for b in [b0, b1, b2]: + peer1.on_block(b) + self.assertEqual(peer1.tip(), b2) + self.assertEqual(peer1.forks, []) + peer2 = Follower(genesis, config) + for b in [b0, b3, b4]: + peer2.on_block(b) + self.assertEqual(peer2.tip(), b4) + self.assertEqual(peer2.forks, []) + + # Start a sync from genesis. + # + # Result: A merged block tree + # b5 + # / + # b0 - b1 - b2 + # \ + # b3 - b4 + local = Follower(genesis, config) + sync(local, [peer0, peer1, peer2]) + self.assertEqual(local.tip(), b5) + self.assertEqual(local.forks, [b4.id()]) + self.assertEqual(len(local.ledger_state), 7) + + def test_reject_invalid_blocks(self): + # Prepare a peer with invalid blocks: + # b0 - b1 - b2 - b3 - (invalid_b4) - (invalid_b5) + # + # First, build a valid chain (b0 ~ b3): + coin = Coin(sk=0, value=10) + config = mk_config([coin]) + genesis = mk_genesis_state([coin]) + peer = Follower(genesis, config) + b0, coin = mk_block(genesis.block, 1, coin), coin.evolve() + b1, coin = mk_block(b0, 2, coin), coin.evolve() + b2, coin = mk_block(b1, 3, coin), coin.evolve() + b3, coin = mk_block(b2, 4, coin), coin.evolve() + for b in [b0, b1, b2, b3]: + peer.on_block(b) + self.assertEqual(peer.tip(), b3) + self.assertEqual(peer.forks, []) + + # And deliberately, add invalid blocks (b4 ~ b5): + fake_coin = Coin(sk=1, value=10) + b4, fake_coin = mk_block(b3, 5, fake_coin), fake_coin.evolve() + b5, fake_coin = mk_block(b4, 6, fake_coin), fake_coin.evolve() + apply_invalid_block_to_ledger_state(peer, b4) + apply_invalid_block_to_ledger_state(peer, b5) + # the tip shouldn't be changed. + self.assertEqual(peer.tip(), b3) + self.assertEqual(peer.forks, []) + + # Start a sync from genesis. + # + # Result: The same honest chain, but without invalid blocks. + # b0 - b1 - b2 - b3 == tip + local = Follower(genesis, config) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + + def test_reject_invalid_blocks_from_backfilling(self): + # Prepare a peer with invalid blocks in a fork: + # b0 - b1 - b3 - b4 - b5 == tip + # \ + # b2 - (invalid_b6) - (invalid_b7) + # + # First, build a valid chain (b0 ~ b5): + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + peer = Follower(genesis, config) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b3, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b4, c_a = mk_block(b3, 4, c_a), c_a.evolve() + b5, c_a = mk_block(b4, 5, c_a), c_a.evolve() + for b in [b0, b1, b2, b3, b4, b5]: + peer.on_block(b) + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b2.id()]) + + # And deliberately, add invalid blocks (b6 ~ b7): + fake_coin = Coin(sk=2, value=10) + b6, fake_coin = mk_block(b2, 3, fake_coin), fake_coin.evolve() + b7, fake_coin = mk_block(b6, 4, fake_coin), fake_coin.evolve() + apply_invalid_block_to_ledger_state(peer, b6) + apply_invalid_block_to_ledger_state(peer, b7) + # the tip shouldn't be changed. + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b2.id()]) + + # Start a sync from a tree: + # b0 - b1 - b3 - b4 + # + # Result: The same forks, but without invalid blocks + # b0 - b1 - b3 - b4 - b5 == tip + # \ + # b2 + local = Follower(genesis, config) + for b in [b0, b1, b3, b4]: + peer.on_block(b) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + self.assertNotIn(b6.id(), local.ledger_state) + self.assertNotIn(b7.id(), local.ledger_state) + + +class TestSyncFromCheckpoint(TestCase): + def test_sync_single_chain(self): + # Prepare a peer with a single chain: + # b0 - b1 - b2 - b3 + # || + # checkpoint + coin = Coin(sk=0, value=10) + config = mk_config([coin]) + genesis = mk_genesis_state([coin]) + peer = Follower(genesis, config) + b0, coin = mk_block(genesis.block, 1, coin), coin.evolve() + b1, coin = mk_block(b0, 2, coin), coin.evolve() + b2, coin = mk_block(b1, 3, coin), coin.evolve() + b3, coin = mk_block(b2, 4, coin), coin.evolve() + for b in [b0, b1, b2, b3]: + peer.on_block(b) + self.assertEqual(peer.tip(), b3) + self.assertEqual(peer.forks, []) + + # Start a sync from the checkpoint: + # () - () - b2 + # || + # checkpoint + # + # Result: A honest chain without historical blocks + # () - () - b2 - b3 + checkpoint = peer.ledger_state[b2.id()] + local = Follower(genesis, config) + local.apply_checkpoint(checkpoint) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + self.assertEqual( + set(local.ledger_state.keys()), set([genesis.block.id(), b2.id(), b3.id()]) + ) + + def test_sync_forks(self): + # Prepare a peer with forks: + # checkpoint + # || + # b0 - b1 - b2 - b5 == tip + # \ + # b3 - b4 + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + peer = Follower(genesis, config) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b3, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b4, c_b = mk_block(b3, 3, c_b), c_b.evolve() + b5, c_a = mk_block(b2, 4, c_a), c_a.evolve() + for b in [b0, b1, b2, b3, b4, b5]: + peer.on_block(b) + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b4.id()]) + + # Start a sync from the checkpoint: + # checkpoint + # || + # () - () - b2 + # + # Result: Backfilled forks. + # b0 - b1 - b2 - b5 == tip + # \ + # b3 - b4 + checkpoint = peer.ledger_state[b2.id()] + local = Follower(genesis, config) + local.apply_checkpoint(checkpoint) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + self.assertEqual(set(local.ledger_state.keys()), set(peer.ledger_state.keys())) + + def test_sync_from_dishonest_checkpoint(self): + # Prepare multiple peers and a dishonest checkpoint: + # Peer0: b0 - b1 - b2 - b5 == tip + # \ + # Peer1: b3 - b4 + # || + # checkpoint + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b3, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b4, c_b = mk_block(b3, 3, c_b), c_b.evolve() + b5, c_a = mk_block(b2, 4, c_a), c_a.evolve() + peer0 = Follower(genesis, config) + for b in [b0, b1, b2, b5]: + peer0.on_block(b) + self.assertEqual(peer0.tip(), b5) + self.assertEqual(peer0.forks, []) + peer1 = Follower(genesis, config) + for b in [b0, b3, b4]: + peer1.on_block(b) + self.assertEqual(peer1.tip(), b4) + self.assertEqual(peer1.forks, []) + + # Start a sync from the dishonest checkpoint: + # checkpoint + # || + # () - () - b4 + # + # Result: The honest chain is found evetually by backfilling. + # b0 - b1 - b2 - b5 == tip + # \ + # b3 - b4 + checkpoint = peer1.ledger_state[b4.id()] + local = Follower(genesis, config) + local.apply_checkpoint(checkpoint) + sync(local, [peer0, peer1]) + self.assertEqual(local.tip(), b5) + self.assertEqual(local.forks, [b4.id()]) + self.assertEqual(len(local.ledger_state.keys()), 7) + + def test_reject_invalid_blocks_from_backfilling_fork(self): + # Prepare a peer with invalid blocks in a fork: + # b0 - b1 - b3 - b4 - b5 == tip + # \ + # b2 - (invalid_b6) - (invalid_b7) + # + # First, build a valid chain (b0 ~ b5): + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + peer = Follower(genesis, config) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b3, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b4, c_a = mk_block(b3, 4, c_a), c_a.evolve() + b5, c_a = mk_block(b4, 5, c_a), c_a.evolve() + for b in [b0, b1, b2, b3, b4, b5]: + peer.on_block(b) + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b2.id()]) + + # And deliberately, add invalid blocks (b6 ~ b7): + fake_coin = Coin(sk=2, value=10) + b6, fake_coin = mk_block(b2, 3, fake_coin), fake_coin.evolve() + b7, fake_coin = mk_block(b6, 4, fake_coin), fake_coin.evolve() + apply_invalid_block_to_ledger_state(peer, b6) + apply_invalid_block_to_ledger_state(peer, b7) + # the tip shouldn't be changed. + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b2.id()]) + + # Start a sync from a checkpoint where all anscestors are valid: + # checkpoint + # || + # () - () - () - b4 + # + # Result: A fork is backfilled, but without invalid blocks. + # b0 - b1 - b3 - b4 - b5 == tip + # \ + # b2 + checkpoint = peer.ledger_state[b4.id()] + local = Follower(genesis, config) + local.apply_checkpoint(checkpoint) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + self.assertNotIn(b6.id(), local.ledger_state) + self.assertNotIn(b7.id(), local.ledger_state) + + def test_reject_invalid_blocks_from_backfilling_block_tree(self): + # Prepare a peer with invalid blocks in a fork: + # b0 - b1 - b3 - b4 - b5 == tip + # \ + # b2 - (invalid_b6) - (invalid_b7) + # + # First, build a valid chain (b0 ~ b5): + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + peer = Follower(genesis, config) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b3, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b4, c_a = mk_block(b3, 4, c_a), c_a.evolve() + b5, c_a = mk_block(b4, 5, c_a), c_a.evolve() + for b in [b0, b1, b2, b3, b4, b5]: + peer.on_block(b) + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b2.id()]) + + # And deliberately, add invalid blocks (b6 ~ b7): + fake_coin = Coin(sk=2, value=10) + b6, fake_coin = mk_block(b2, 3, fake_coin), fake_coin.evolve() + b7, fake_coin = mk_block(b6, 4, fake_coin), fake_coin.evolve() + apply_invalid_block_to_ledger_state(peer, b6) + apply_invalid_block_to_ledger_state(peer, b7) + # the tip shouldn't be changed. + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b2.id()]) + + # Start a sync from a checkpoint where some anscestors are invalid: + # () checkpoint + # \ || + # () - () - (invalid_b7) + # + # Result: `InvalidBlockTree` exception + checkpoint = peer.ledger_state[b7.id()] + local = Follower(genesis, config) + local.apply_checkpoint(checkpoint) + with self.assertRaises(InvalidBlockTree): + sync(local, [peer]) + + +def apply_invalid_block_to_ledger_state(follower: Follower, block: BlockHeader): + state = follower.ledger_state[block.parent].copy() + state.apply(block) + follower.ledger_state[block.id()] = state