diff --git a/cryptarchia/cryptarchia.py b/cryptarchia/cryptarchia.py index b510962..a8e3ac0 100644 --- a/cryptarchia/cryptarchia.py +++ b/cryptarchia/cryptarchia.py @@ -1,4 +1,4 @@ -from typing import TypeAlias, List, Dict +from typing import TypeAlias, List, Dict, Generator from hashlib import sha256, blake2b from math import floor from copy import deepcopy @@ -124,6 +124,9 @@ class Slot: def __lt__(self, other): return self.absolute_slot < other.absolute_slot + def __hash__(self): + return hash(self.absolute_slot) + @dataclass class Coin: @@ -248,6 +251,9 @@ class BlockHeader: self.update_header_hash(h) return h.digest() + def __hash__(self): + return hash(self.id()) + @dataclass class LedgerState: @@ -372,7 +378,7 @@ class Follower: # TODO: verify blocks are not in the 'future' if block.parent not in self.ledger_state: logger.warning("We have not seen block parent") - return False + raise ParentNotFound current_state = self.ledger_state[block.parent].copy() @@ -441,19 +447,25 @@ class Follower: and current_state.verify_unspent(proof.nullifier) ) - def on_block(self, block: BlockHeader): + def apply_block_to_ledger_state(self, block: BlockHeader) -> bool: if block.id() in self.ledger_state: logger.warning("dropping already processed block") - return + return False if not self.validate_header(block): logger.warning("invalid header") - return + return False new_state = self.ledger_state[block.parent].copy() new_state.apply(block) self.ledger_state[block.id()] = new_state + return True + + def on_block(self, block: BlockHeader): + if not self.apply_block_to_ledger_state(block): + return + if block.parent == self.local_chain: # simply extending the local chain self.local_chain = block.id() @@ -471,6 +483,15 @@ class Follower: self.forks.remove(new_tip) self.local_chain = new_tip + def apply_checkpoint(self, checkpoint: LedgerState): + checkpoint_block_id = checkpoint.block.id() + self.ledger_state[checkpoint_block_id] = checkpoint + if self.local_chain != self.genesis_state.block.id(): + self.forks.append(self.local_chain) + if checkpoint_block_id in self.forks: + self.forks.remove(checkpoint_block_id) + self.local_chain = checkpoint_block_id + def unimported_orphans(self) -> list[BlockHeader]: """ Returns all unimported orphans w.r.t. the given tip's state. @@ -482,9 +503,10 @@ class Follower: orphans = [] for fork in self.forks: - _, fork_depth = common_prefix_depth(tip, fork, self.ledger_state) - for block_state in chain_suffix(fork, fork_depth, self.ledger_state): - b = block_state.block + _, _, fork_depth, fork_suffix = common_prefix_depth( + tip, fork, self.ledger_state + ) + for b in fork_suffix: if b.leader_proof.nullifier not in tip_state.nullifiers: tip_state.nullifiers.add(b.leader_proof.nullifier) orphans += [b] @@ -592,6 +614,17 @@ class Follower: ) return int(prev_epoch.inferred_total_active_stake - h * blocks_per_slot_err) + def blocks_by_slot(self, from_slot: Slot) -> Generator[BlockHeader, None, None]: + # Returns blocks in the given range of slots in order of slot + # NOTE: In real implementation, this should be done by optimized data structures. + blocks_by_slot: dict[Slot, list[BlockHeader]] = defaultdict(list) + for state in self.ledger_state.values(): + if from_slot <= state.block.slot: + blocks_by_slot[state.block.slot].append(state.block) + for slot in sorted(blocks_by_slot.keys()): + for block in blocks_by_slot[slot]: + yield block + def phi(f: float, alpha: float) -> float: """ @@ -646,39 +679,68 @@ class Leader: ) -def iter_chain(tip: Id, states: Dict[Id, LedgerState]): +def iter_chain( + tip: Id, states: Dict[Id, LedgerState] +) -> Generator[LedgerState, None, None]: while tip in states: yield states[tip] tip = states[tip].block.parent -def chain_suffix(tip: Id, n: int, states: Dict[Id, LedgerState]) -> list[LedgerState]: - return list(reversed(list(itertools.islice(iter_chain(tip, states), n)))) +def iter_chain_blocks( + tip: Id, states: Dict[Id, LedgerState] +) -> Generator[BlockHeader, None, None]: + for state in iter_chain(tip, states): + yield state.block -def common_prefix_depth(a: Id, b: Id, states: Dict[Id, LedgerState]) -> (int, int): - a_blocks = iter_chain(a, states) - b_blocks = iter_chain(b, states) +def common_prefix_depth( + a: Id, b: Id, states: Dict[Id, LedgerState] +) -> tuple[int, list[BlockHeader], int, list[BlockHeader]]: + return common_prefix_depth_from_chains( + iter_chain_blocks(a, states), iter_chain_blocks(b, states) + ) + +def common_prefix_depth_from_chains( + a_blocks: Generator[BlockHeader, None, None], + b_blocks: Generator[BlockHeader, None, None], +) -> tuple[int, list[BlockHeader], int, list[BlockHeader]]: seen = {} + a_suffix: list[BlockHeader] = [] + b_suffix: list[BlockHeader] = [] depth = 0 while True: try: - a_block = next(a_blocks).block.id() - if a_block in seen: + a_block = next(a_blocks) + a_suffix.append(a_block) + a_block_id = a_block.id() + if a_block_id in seen: # we had seen this block from the fork chain - return depth, seen[a_block] + return ( + depth, + list(reversed(a_suffix[: depth + 1])), + seen[a_block_id], + list(reversed(b_suffix[: seen[a_block_id] + 1])), + ) - seen[a_block] = depth + seen[a_block_id] = depth except StopIteration: pass try: - b_block = next(b_blocks).block.id() - if b_block in seen: + b_block = next(b_blocks) + b_suffix.append(b_block) + b_block_id = b_block.id() + if b_block_id in seen: # we had seen the fork in the local chain - return seen[b_block], depth - seen[b_block] = depth + return ( + seen[b_block_id], + list(reversed(a_suffix[: seen[b_block_id] + 1])), + depth, + list(reversed(b_suffix[: depth + 1])), + ) + seen[b_block_id] = depth except StopIteration: pass @@ -687,13 +749,8 @@ def common_prefix_depth(a: Id, b: Id, states: Dict[Id, LedgerState]) -> (int, in assert False -def chain_density( - head: Id, slot: Slot, reorg_depth: int, states: Dict[Id, LedgerState] -) -> int: - assert type(head) == Id - chain = iter_chain(head, states) - segment = itertools.islice(chain, reorg_depth) - return sum(1 for b in segment if b.block.slot < slot) +def chain_density(chain: list[BlockHeader], slot: Slot) -> int: + return sum(1 for b in chain if b.slot < slot) def block_children(states: Dict[Id, LedgerState]) -> Dict[Id, set[Id]]: @@ -723,7 +780,9 @@ def maxvalid_bg( cmax = local_chain for fork in forks: - cmax_depth, fork_depth = common_prefix_depth(cmax, fork, states) + cmax_depth, cmax_suffix, fork_depth, fork_suffix = common_prefix_depth( + cmax, fork, states + ) if cmax_depth <= k: # Longest chain fork choice rule if cmax_depth < fork_depth: @@ -731,11 +790,11 @@ def maxvalid_bg( else: # The chain is forking too much, we need to pay a bit more attention # In particular, select the chain that is the densest after the fork - cmax_divergent_block = chain_suffix(cmax, cmax_depth, states)[0].block + cmax_divergent_block = cmax_suffix[0] forking_slot = Slot(cmax_divergent_block.slot.absolute_slot + s) - cmax_density = chain_density(cmax, forking_slot, cmax_depth, states) - fork_density = chain_density(fork, forking_slot, fork_depth, states) + cmax_density = chain_density(cmax_suffix, forking_slot) + fork_density = chain_density(fork_suffix, forking_slot) if cmax_density < fork_density: cmax = fork @@ -743,5 +802,9 @@ def maxvalid_bg( return cmax +class ParentNotFound(Exception): + pass + + if __name__ == "__main__": pass diff --git a/cryptarchia/sync.py b/cryptarchia/sync.py new file mode 100644 index 0000000..c87549a --- /dev/null +++ b/cryptarchia/sync.py @@ -0,0 +1,117 @@ +from collections import defaultdict +from typing import Generator + +from cryptarchia.cryptarchia import ( + BlockHeader, + Follower, + Id, + ParentNotFound, + Slot, + common_prefix_depth_from_chains, + iter_chain_blocks, +) + + +def sync(local: Follower, peers: list[Follower]): + # Syncs the local block tree with the peers, starting from the local tip. + # This covers the case where the local tip is not on the latest honest chain anymore. + + # Fetch blocks from the peers in the range of slots from the local tip to the latest tip. + # Gather orphaned blocks, which are blocks from forks that are absent in the local block tree. + start_slot = local.tip().slot + orphans: set[BlockHeader] = set() + # Filter and group peers by their tip to minimize the number of fetches. + for group in filter_and_group_peers_by_tip(peers, start_slot).values(): + for block in fetch_blocks_by_slot(group, start_slot): + try: + local.on_block(block) + orphans.discard(block) + except ParentNotFound: + orphans.add(block) + + # Backfill the orphan forks starting from the orphan blocks with applying fork choice rule. + # + # Sort the orphan blocks by slot in descending order to minimize the number of backfillings. + for orphan in sorted(orphans, key=lambda b: b.slot, reverse=True): + # Skip the orphan block processed during the previous backfillings. + if orphan not in local.ledger_state: + backfill_fork(local, peers, orphan) + + +def filter_and_group_peers_by_tip( + peers: list[Follower], start_slot: Slot +) -> dict[BlockHeader, list[Follower]]: + # Group peers by their tip. + # Filter only the peers whose tip is ahead of the start_slot. + groups: dict[BlockHeader, list[Follower]] = defaultdict(list) + for peer in peers: + if peer.tip().slot.absolute_slot > start_slot.absolute_slot: + groups[peer.tip()].append(peer) + return groups + + +def fetch_blocks_by_slot( + peers: list[Follower], start_slot: Slot +) -> Generator[BlockHeader, None, None]: + # Fetch blocks in the given range of slots from one of the peers. + # Blocks should be returned in order of slot. + # If a peer fails, try the next peer. + for peer in peers: + try: + for block in peer.blocks_by_slot(start_slot): + yield block + # Update start_slot for the potential try with the next peer. + start_slot = block.slot + # The peer successfully returned all blocks. No need to try the next peer. + break + except Exception: + continue + + +def backfill_fork(local: Follower, peers: list[Follower], fork_tip: BlockHeader): + # Backfills a fork, which is absent in the local block tree, by fetching blocks from the peers. + # During backfilling, the fork choice rule is continuously applied. + # + # If necessary, the local honest chain is also backfilled for the fork choice rule. + # This can happen if the honest chain has been built not from the genesis + # (e.g. checkpoint sync, or a partially backfilled chain). + + _, tip_suffix, _, fork_suffix = common_prefix_depth_from_chains( + fetch_chain_blocks(local.tip_id(), local, peers), + fetch_chain_blocks(fork_tip.id(), local, peers), + ) + + # First, backfill the local honest chain if some blocks are missing. + # Just applying the blocks to the ledger state is enough, + # instead of calling `on_block` which updates the tip (by fork choice). + # because we're just backfilling the old part of the current tip. + for block in tip_suffix: + local.apply_block_to_ledger_state(block) + + # Then, process blocks in the fork suffix by applying fork choice rule. + for block in fork_suffix: + local.on_block(block) + + +def fetch_chain_blocks( + tip: Id, local: Follower, peers: list[Follower] +) -> Generator[BlockHeader, None, None]: + # Fetches a chain of blocks from the peers, starting from the given tip to the genesis. + # Attempts to extend the chain as much as possible by querying multiple peers, + # considering that not all peers may have the full chain (from the genesis). + + id = tip + # First, try to iterate the chain from the local block tree. + for block in iter_chain_blocks(id, local.ledger_state): + yield block + if block.id() == local.genesis_state.block.id(): + return + id = block.parent + + # Try to continue by fetching the remaining blocks from the peers + for peer in peers: + for block in iter_chain_blocks(id, peer.ledger_state): + yield block + if block.id() == local.genesis_state.block.id(): + return + id = block.parent diff --git a/cryptarchia/test_fork_choice.py b/cryptarchia/test_fork_choice.py index 1a283c5..24b295d 100644 --- a/cryptarchia/test_fork_choice.py +++ b/cryptarchia/test_fork_choice.py @@ -36,28 +36,138 @@ class TestForkChoice(TestCase): b.id(): LedgerState(block=b) for b in [b0, b1, b2, b3, b4, b5, b6, b7] } - assert (d := common_prefix_depth(b0.id(), b0.id(), states)) == (0, 0), d - assert (d := common_prefix_depth(b1.id(), b0.id(), states)) == (1, 0), d - assert (d := common_prefix_depth(b0.id(), b1.id(), states)) == (0, 1), d - assert (d := common_prefix_depth(b1.id(), b1.id(), states)) == (0, 0), d - assert (d := common_prefix_depth(b2.id(), b0.id(), states)) == (2, 0), d - assert (d := common_prefix_depth(b0.id(), b2.id(), states)) == (0, 2), d - assert (d := common_prefix_depth(b3.id(), b0.id(), states)) == (3, 0), d - assert (d := common_prefix_depth(b0.id(), b3.id(), states)) == (0, 3), d - assert (d := common_prefix_depth(b1.id(), b4.id(), states)) == (1, 1), d - assert (d := common_prefix_depth(b4.id(), b1.id(), states)) == (1, 1), d - assert (d := common_prefix_depth(b1.id(), b5.id(), states)) == (1, 2), d - assert (d := common_prefix_depth(b5.id(), b1.id(), states)) == (2, 1), d - assert (d := common_prefix_depth(b2.id(), b5.id(), states)) == (2, 2), d - assert (d := common_prefix_depth(b5.id(), b2.id(), states)) == (2, 2), d - assert (d := common_prefix_depth(b3.id(), b5.id(), states)) == (3, 2), d - assert (d := common_prefix_depth(b5.id(), b3.id(), states)) == (2, 3), d - assert (d := common_prefix_depth(b3.id(), b6.id(), states)) == (1, 1), d - assert (d := common_prefix_depth(b6.id(), b3.id(), states)) == (1, 1), d - assert (d := common_prefix_depth(b3.id(), b7.id(), states)) == (1, 2), d - assert (d := common_prefix_depth(b7.id(), b3.id(), states)) == (2, 1), d - assert (d := common_prefix_depth(b5.id(), b7.id(), states)) == (2, 4), d - assert (d := common_prefix_depth(b7.id(), b5.id(), states)) == (4, 2), d + assert (d := common_prefix_depth(b0.id(), b0.id(), states)) == ( + 0, + [b0], + 0, + [b0], + ), d + assert (d := common_prefix_depth(b1.id(), b0.id(), states)) == ( + 1, + [b0, b1], + 0, + [b0], + ), d + assert (d := common_prefix_depth(b0.id(), b1.id(), states)) == ( + 0, + [b0], + 1, + [b0, b1], + ), d + assert (d := common_prefix_depth(b1.id(), b1.id(), states)) == ( + 0, + [b1], + 0, + [b1], + ), d + assert (d := common_prefix_depth(b2.id(), b0.id(), states)) == ( + 2, + [b0, b1, b2], + 0, + [b0], + ), d + assert (d := common_prefix_depth(b0.id(), b2.id(), states)) == ( + 0, + [b0], + 2, + [b0, b1, b2], + ), d + assert (d := common_prefix_depth(b3.id(), b0.id(), states)) == ( + 3, + [b0, b1, b2, b3], + 0, + [b0], + ), d + assert (d := common_prefix_depth(b0.id(), b3.id(), states)) == ( + 0, + [b0], + 3, + [b0, b1, b2, b3], + ), d + assert (d := common_prefix_depth(b1.id(), b4.id(), states)) == ( + 1, + [b0, b1], + 1, + [b0, b4], + ), d + assert (d := common_prefix_depth(b4.id(), b1.id(), states)) == ( + 1, + [b0, b4], + 1, + [b0, b1], + ), d + assert (d := common_prefix_depth(b1.id(), b5.id(), states)) == ( + 1, + [b0, b1], + 2, + [b0, b4, b5], + ), d + assert (d := common_prefix_depth(b5.id(), b1.id(), states)) == ( + 2, + [b0, b4, b5], + 1, + [b0, b1], + ), d + assert (d := common_prefix_depth(b2.id(), b5.id(), states)) == ( + 2, + [b0, b1, b2], + 2, + [b0, b4, b5], + ), d + assert (d := common_prefix_depth(b5.id(), b2.id(), states)) == ( + 2, + [b0, b4, b5], + 2, + [b0, b1, b2], + ), d + assert (d := common_prefix_depth(b3.id(), b5.id(), states)) == ( + 3, + [b0, b1, b2, b3], + 2, + [b0, b4, b5], + ), d + assert (d := common_prefix_depth(b5.id(), b3.id(), states)) == ( + 2, + [b0, b4, b5], + 3, + [b0, b1, b2, b3], + ), d + assert (d := common_prefix_depth(b3.id(), b6.id(), states)) == ( + 1, + [b2, b3], + 1, + [b2, b6], + ), d + assert (d := common_prefix_depth(b6.id(), b3.id(), states)) == ( + 1, + [b2, b6], + 1, + [b2, b3], + ), d + assert (d := common_prefix_depth(b3.id(), b7.id(), states)) == ( + 1, + [b2, b3], + 2, + [b2, b6, b7], + ), d + assert (d := common_prefix_depth(b7.id(), b3.id(), states)) == ( + 2, + [b2, b6, b7], + 1, + [b2, b3], + ), d + assert (d := common_prefix_depth(b5.id(), b7.id(), states)) == ( + 2, + [b0, b4, b5], + 4, + [b0, b1, b2, b6, b7], + ), d + assert (d := common_prefix_depth(b7.id(), b5.id(), states)) == ( + 4, + [b0, b1, b2, b6, b7], + 2, + [b0, b4, b5], + ), d def test_fork_choice_long_sparse_chain(self): # The longest chain is not dense after the fork diff --git a/cryptarchia/test_ledger_state_update.py b/cryptarchia/test_ledger_state_update.py index d67abd4..32425e5 100644 --- a/cryptarchia/test_ledger_state_update.py +++ b/cryptarchia/test_ledger_state_update.py @@ -2,7 +2,7 @@ from unittest import TestCase import numpy as np -from .cryptarchia import Follower, Coin, iter_chain +from .cryptarchia import Follower, Coin, ParentNotFound, iter_chain from .test_common import mk_config, mk_block, mk_genesis_state @@ -126,7 +126,8 @@ class TestLedgerStateUpdate(TestCase): # Nothing changes from the local chain and forks. unknown_block = mk_block(parent=block_5, slot=2, coin=coins[5]) block_6 = mk_block(parent=unknown_block, slot=2, coin=coins[6]) - follower.on_block(block_6) + with self.assertRaises(ParentNotFound): + follower.on_block(block_6) assert follower.tip() == block_3 assert len(follower.forks) == 2, f"{len(follower.forks)}" assert follower.forks[0] == block_4.id() diff --git a/cryptarchia/test_sync.py b/cryptarchia/test_sync.py new file mode 100644 index 0000000..b0ce4ad --- /dev/null +++ b/cryptarchia/test_sync.py @@ -0,0 +1,285 @@ +from unittest import TestCase + +from cryptarchia.cryptarchia import Coin, Follower +from cryptarchia.sync import sync +from cryptarchia.test_common import mk_block, mk_config, mk_genesis_state + + +class TestSync(TestCase): + def test_sync_single_chain_from_genesis(self): + # b0 - b1 - b2 - b3 + coin = Coin(sk=0, value=10) + config = mk_config([coin]) + genesis = mk_genesis_state([coin]) + peer = Follower(genesis, config) + b0, coin = mk_block(genesis.block, 1, coin), coin.evolve() + b1, coin = mk_block(b0, 2, coin), coin.evolve() + b2, coin = mk_block(b1, 3, coin), coin.evolve() + b3, coin = mk_block(b2, 4, coin), coin.evolve() + for b in [b0, b1, b2, b3]: + peer.on_block(b) + self.assertEqual(peer.tip(), b3) + self.assertEqual(peer.forks, []) + + local = Follower(genesis, config) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + + def test_sync_single_chain_from_middle(self): + # b0 - b1 - b2 - b3 + coin = Coin(sk=0, value=10) + config = mk_config([coin]) + genesis = mk_genesis_state([coin]) + peer = Follower(genesis, config) + b0, coin = mk_block(genesis.block, 1, coin), coin.evolve() + b1, coin = mk_block(b0, 2, coin), coin.evolve() + b2, coin = mk_block(b1, 3, coin), coin.evolve() + b3, coin = mk_block(b2, 4, coin), coin.evolve() + for b in [b0, b1, b2, b3]: + peer.on_block(b) + self.assertEqual(peer.tip(), b3) + self.assertEqual(peer.forks, []) + + local = Follower(genesis, config) + # add until b1 + for b in [b0, b1]: + peer.on_block(b) + # start syncing from b1 + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + + def test_sync_forks_from_genesis(self): + # b0 - b1 - b2 - b5 == tip + # \ + # b3 - b4 + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + peer = Follower(genesis, config) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b3, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b4, c_b = mk_block(b3, 3, c_b), c_b.evolve() + b5, c_a = mk_block(b2, 4, c_a), c_a.evolve() + for b in [b0, b1, b2, b3, b4, b5]: + peer.on_block(b) + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b4.id()]) + + local = Follower(genesis, config) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + + def test_sync_forks_from_middle(self): + # b0 - b1 - b2 - b5 == tip + # \ + # b3 - b4 + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + peer = Follower(genesis, config) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b3, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b4, c_b = mk_block(b3, 3, c_b), c_b.evolve() + b5, c_a = mk_block(b2, 4, c_a), c_a.evolve() + for b in [b0, b1, b2, b3, b4, b5]: + peer.on_block(b) + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b4.id()]) + + # b0 - b1 + # \ + # b3 + local = Follower(genesis, config) + for b in [b0, b1, b3]: + peer.on_block(b) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + + def test_sync_forks_by_backfilling(self): + # b0 - b1 - b2 - b5 == tip + # \ + # b3 - b4 + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + peer = Follower(genesis, config) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b3, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b4, c_b = mk_block(b3, 3, c_b), c_b.evolve() + b5, c_a = mk_block(b2, 4, c_a), c_a.evolve() + for b in [b0, b1, b2, b3, b4, b5]: + peer.on_block(b) + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b4.id()]) + self.assertEqual(len(peer.ledger_state), 7) + + # b0 - b1 + local = Follower(genesis, config) + for b in [b0, b1]: + peer.on_block(b) + sync(local, [peer]) + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + self.assertEqual(len(local.ledger_state), len(peer.ledger_state)) + + def test_sync_multiple_peers_from_genesis(self): + # Peer-0: b5 + # / + # Peer-1: b0 - b1 - b2 + # \ + # Peer-2: b3 - b4 + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b3, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b4, c_b = mk_block(b3, 3, c_b), c_b.evolve() + b5, c_a = mk_block(b2, 4, c_a), c_a.evolve() + peer0 = Follower(genesis, config) + for b in [b0, b1, b2, b5]: + peer0.on_block(b) + self.assertEqual(peer0.tip(), b5) + self.assertEqual(peer0.forks, []) + peer1 = Follower(genesis, config) + for b in [b0, b1, b2]: + peer1.on_block(b) + self.assertEqual(peer1.tip(), b2) + self.assertEqual(peer1.forks, []) + peer2 = Follower(genesis, config) + for b in [b0, b3, b4]: + peer2.on_block(b) + self.assertEqual(peer2.tip(), b4) + self.assertEqual(peer2.forks, []) + + local = Follower(genesis, config) + sync(local, [peer0, peer1, peer2]) + self.assertEqual(local.tip(), b5) + self.assertEqual(local.forks, [b4.id()]) + self.assertEqual(len(local.ledger_state), 7) + + +class TestSyncFromCheckpoint(TestCase): + def test_sync_single_chain(self): + # b0 - b1 - b2 - b3 + # || + # checkpoint + coin = Coin(sk=0, value=10) + config = mk_config([coin]) + genesis = mk_genesis_state([coin]) + peer = Follower(genesis, config) + b0, coin = mk_block(genesis.block, 1, coin), coin.evolve() + b1, coin = mk_block(b0, 2, coin), coin.evolve() + b2, coin = mk_block(b1, 3, coin), coin.evolve() + b3, coin = mk_block(b2, 4, coin), coin.evolve() + for b in [b0, b1, b2, b3]: + peer.on_block(b) + self.assertEqual(peer.tip(), b3) + self.assertEqual(peer.forks, []) + + # Start from the checkpoint: + # () - () - b2 + # || + # checkpoint + checkpoint = peer.ledger_state[b2.id()] + local = Follower(genesis, config) + local.apply_checkpoint(checkpoint) + sync(local, [peer]) + # Result: + # () - () - b2 - b3 + # || + # checkpoint + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + self.assertEqual( + set(local.ledger_state.keys()), set([genesis.block.id(), b2.id(), b3.id()]) + ) + + def test_sync_forks(self): + # checkpoint + # || + # b0 - b1 - b2 - b5 == tip + # \ + # b3 - b4 + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + peer = Follower(genesis, config) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b3, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b4, c_b = mk_block(b3, 3, c_b), c_b.evolve() + b5, c_a = mk_block(b2, 4, c_a), c_a.evolve() + for b in [b0, b1, b2, b3, b4, b5]: + peer.on_block(b) + self.assertEqual(peer.tip(), b5) + self.assertEqual(peer.forks, [b4.id()]) + + # Start from the checkpoint: + # checkpoint + # || + # () - () - b2 + checkpoint = peer.ledger_state[b2.id()] + local = Follower(genesis, config) + local.apply_checkpoint(checkpoint) + sync(local, [peer]) + # Result: + # b0 - b1 - b2 - b5 == tip + # \ + # b3 - b4 + self.assertEqual(local.tip(), peer.tip()) + self.assertEqual(local.forks, peer.forks) + self.assertEqual(set(local.ledger_state.keys()), set(peer.ledger_state.keys())) + + def test_sync_from_dishonest_checkpoint(self): + # Peer0: b0 - b1 - b2 - b5 == tip + # \ + # Peer1: b3 - b4 + # || + # checkpoint + c_a, c_b = Coin(sk=0, value=10), Coin(sk=1, value=10) + config = mk_config([c_a, c_b]) + genesis = mk_genesis_state([c_a, c_b]) + b0, c_a = mk_block(genesis.block, 1, c_a), c_a.evolve() + b1, c_a = mk_block(b0, 2, c_a), c_a.evolve() + b2, c_a = mk_block(b1, 3, c_a), c_a.evolve() + b3, c_b = mk_block(b0, 2, c_b), c_b.evolve() + b4, c_b = mk_block(b3, 3, c_b), c_b.evolve() + b5, c_a = mk_block(b2, 4, c_a), c_a.evolve() + peer0 = Follower(genesis, config) + for b in [b0, b1, b2, b5]: + peer0.on_block(b) + self.assertEqual(peer0.tip(), b5) + self.assertEqual(peer0.forks, []) + peer1 = Follower(genesis, config) + for b in [b0, b3, b4]: + peer1.on_block(b) + self.assertEqual(peer1.tip(), b4) + self.assertEqual(peer1.forks, []) + + # Start from the checkpoint: + # checkpoint + # || + # () - () - b4 + checkpoint = peer1.ledger_state[b4.id()] + local = Follower(genesis, config) + local.apply_checkpoint(checkpoint) + sync(local, [peer0, peer1]) + # b0 - b1 - b2 - b5 == tip + # \ + # b3 - b4 + self.assertEqual(local.tip(), b5) + self.assertEqual(local.forks, [b4.id()]) + self.assertEqual(len(local.ledger_state.keys()), 7)