2024-01-24 22:04:35 +00:00
|
|
|
from typing import TypeAlias, List, Optional
|
2024-01-31 22:09:03 +00:00
|
|
|
from hashlib import sha256, blake2b
|
2024-02-06 13:38:20 +00:00
|
|
|
from math import floor
|
|
|
|
from copy import deepcopy
|
2024-02-09 14:12:12 +00:00
|
|
|
from itertools import chain
|
2024-02-06 13:38:20 +00:00
|
|
|
import functools
|
2024-03-09 13:34:08 +00:00
|
|
|
from dataclasses import dataclass, field, replace
|
2024-03-23 01:50:00 +00:00
|
|
|
import logging
|
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2024-01-24 11:52:30 +00:00
|
|
|
|
|
|
|
Id: TypeAlias = bytes
|
|
|
|
|
2024-01-24 22:04:35 +00:00
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
@dataclass(frozen=True)
|
2024-01-24 11:52:30 +00:00
|
|
|
class Epoch:
|
|
|
|
# identifier of the epoch, counting incrementally from 0
|
|
|
|
epoch: int
|
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
def prev(self) -> "Epoch":
|
|
|
|
return Epoch(self.epoch - 1)
|
|
|
|
|
2024-01-24 22:04:35 +00:00
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class TimeConfig:
|
|
|
|
# How long a slot lasts in seconds
|
|
|
|
slot_duration: int
|
|
|
|
# Start of the first epoch, in unix timestamp second precision
|
|
|
|
chain_start_time: int
|
|
|
|
|
|
|
|
|
2024-02-01 10:53:59 +00:00
|
|
|
@dataclass
|
|
|
|
class Config:
|
2024-03-09 13:34:08 +00:00
|
|
|
k: int # The depth of a block before it is considered immutable.
|
2024-02-01 17:33:37 +00:00
|
|
|
active_slot_coeff: float # 'f', the rate of occupied slots
|
2024-03-09 13:34:08 +00:00
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
# The stake distribution is taken at the beginning of the previous epoch.
|
2024-02-06 13:38:20 +00:00
|
|
|
# This parameters controls how many slots to wait for it to be stabilized
|
2024-03-23 01:50:00 +00:00
|
|
|
# The value is computed as
|
|
|
|
# epoch_stake_distribution_stabilization * int(floor(k / f))
|
2024-02-06 13:38:20 +00:00
|
|
|
epoch_stake_distribution_stabilization: int
|
2024-03-23 01:50:00 +00:00
|
|
|
# This parameter controls how many `base periods` we wait after stake
|
|
|
|
# distribution snapshot has stabilized to take the nonce snapshot.
|
2024-02-06 13:38:20 +00:00
|
|
|
epoch_period_nonce_buffer: int
|
2024-03-23 01:50:00 +00:00
|
|
|
# This parameter controls how many `base periods` we wait for the nonce
|
|
|
|
# snapshot to be considered stabilized
|
2024-02-06 13:38:20 +00:00
|
|
|
epoch_period_nonce_stabilization: int
|
2024-03-09 13:34:08 +00:00
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
# -- Stake Relativization Params
|
|
|
|
initial_total_active_stake: int # D_0
|
|
|
|
total_active_stake_learning_rate: int # beta
|
|
|
|
|
2024-02-01 10:53:59 +00:00
|
|
|
time: TimeConfig
|
|
|
|
|
2024-03-09 13:34:08 +00:00
|
|
|
@staticmethod
|
2024-03-23 01:50:00 +00:00
|
|
|
def cryptarchia_v0_0_1(initial_total_active_stake) -> "Config":
|
2024-03-09 13:34:08 +00:00
|
|
|
return Config(
|
|
|
|
k=2160,
|
|
|
|
active_slot_coeff=0.05,
|
|
|
|
epoch_stake_distribution_stabilization=3,
|
|
|
|
epoch_period_nonce_buffer=3,
|
|
|
|
epoch_period_nonce_stabilization=4,
|
2024-03-23 01:50:00 +00:00
|
|
|
initial_total_active_stake=initial_total_active_stake,
|
|
|
|
total_active_stake_learning_rate=0.8,
|
2024-03-09 13:34:08 +00:00
|
|
|
time=TimeConfig(
|
|
|
|
slot_duration=1,
|
|
|
|
chain_start_time=0,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
2024-02-06 13:38:20 +00:00
|
|
|
@property
|
|
|
|
def base_period_length(self) -> int:
|
|
|
|
return int(floor(self.k / self.active_slot_coeff))
|
|
|
|
|
|
|
|
@property
|
2024-03-23 01:50:00 +00:00
|
|
|
def epoch_relative_nonce_slot(self) -> int:
|
2024-02-06 13:38:20 +00:00
|
|
|
return (
|
2024-03-23 01:50:00 +00:00
|
|
|
self.epoch_stake_distribution_stabilization + self.epoch_period_nonce_buffer
|
2024-02-06 13:38:20 +00:00
|
|
|
) * self.base_period_length
|
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
@property
|
|
|
|
def epoch_length(self) -> int:
|
|
|
|
return (
|
|
|
|
self.epoch_relative_nonce_slot
|
|
|
|
+ self.epoch_period_nonce_stabilization * self.base_period_length
|
|
|
|
)
|
|
|
|
|
2024-02-01 17:33:37 +00:00
|
|
|
@property
|
|
|
|
def s(self):
|
2024-03-09 13:34:08 +00:00
|
|
|
"""
|
2024-03-23 01:50:00 +00:00
|
|
|
The Security Paramater. This paramter controls how many slots one must
|
|
|
|
wait before we have high confidence that k blocks have been produced.
|
2024-03-09 13:34:08 +00:00
|
|
|
"""
|
|
|
|
return self.base_period_length * 3
|
|
|
|
|
|
|
|
def replace(self, **kwarg) -> "Config":
|
|
|
|
return replace(self, **kwarg)
|
2024-02-01 17:33:37 +00:00
|
|
|
|
2024-02-01 10:53:59 +00:00
|
|
|
|
2024-01-24 11:52:30 +00:00
|
|
|
# An absolute unique indentifier of a slot, counting incrementally from 0
|
|
|
|
@dataclass
|
2024-02-06 13:38:20 +00:00
|
|
|
@functools.total_ordering
|
2024-01-24 11:52:30 +00:00
|
|
|
class Slot:
|
|
|
|
absolute_slot: int
|
|
|
|
|
2024-01-24 22:04:35 +00:00
|
|
|
def from_unix_timestamp_s(config: TimeConfig, timestamp_s: int) -> "Slot":
|
2024-01-30 10:57:54 +00:00
|
|
|
absolute_slot = (timestamp_s - config.chain_start_time) // config.slot_duration
|
2024-01-24 11:52:30 +00:00
|
|
|
return Slot(absolute_slot)
|
|
|
|
|
2024-02-06 13:38:20 +00:00
|
|
|
def epoch(self, config: Config) -> Epoch:
|
|
|
|
return Epoch(self.absolute_slot // config.epoch_length)
|
|
|
|
|
2024-02-07 14:28:36 +00:00
|
|
|
def encode(self) -> bytes:
|
|
|
|
return int.to_bytes(self.absolute_slot, length=8, byteorder="big")
|
|
|
|
|
2024-02-06 13:38:20 +00:00
|
|
|
def __eq__(self, other):
|
|
|
|
return self.absolute_slot == other.absolute_slot
|
|
|
|
|
|
|
|
def __lt__(self, other):
|
|
|
|
return self.absolute_slot < other.absolute_slot
|
2024-01-24 11:52:30 +00:00
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
2024-02-01 10:53:59 +00:00
|
|
|
class Coin:
|
2024-02-06 15:31:34 +00:00
|
|
|
sk: int
|
2024-02-01 10:53:59 +00:00
|
|
|
value: int
|
2024-02-06 15:31:34 +00:00
|
|
|
nonce: bytes = bytes(32)
|
|
|
|
|
|
|
|
@property
|
2024-02-07 14:28:36 +00:00
|
|
|
def pk(self) -> int:
|
2024-02-06 15:31:34 +00:00
|
|
|
return self.sk
|
|
|
|
|
2024-02-07 14:28:36 +00:00
|
|
|
def encode_sk(self) -> bytes:
|
|
|
|
return int.to_bytes(self.sk, length=32, byteorder="big")
|
|
|
|
|
|
|
|
def encode_pk(self) -> bytes:
|
|
|
|
return int.to_bytes(self.pk, length=32, byteorder="big")
|
2024-02-06 15:31:34 +00:00
|
|
|
|
2024-02-07 14:28:36 +00:00
|
|
|
def evolve(self) -> "Coin":
|
2024-02-06 15:31:34 +00:00
|
|
|
h = blake2b(digest_size=32)
|
|
|
|
h.update(b"coin-evolve")
|
2024-02-07 14:28:36 +00:00
|
|
|
h.update(self.encode_sk())
|
2024-02-06 15:31:34 +00:00
|
|
|
h.update(self.nonce)
|
|
|
|
evolved_nonce = h.digest()
|
|
|
|
|
|
|
|
return Coin(nonce=evolved_nonce, sk=self.sk, value=self.value)
|
2024-02-01 10:53:59 +00:00
|
|
|
|
|
|
|
def commitment(self) -> Id:
|
|
|
|
# TODO: mocked until CL is understood
|
2024-02-07 14:28:36 +00:00
|
|
|
value_bytes = int.to_bytes(self.value, length=32, byteorder="big")
|
2024-02-01 10:53:59 +00:00
|
|
|
|
|
|
|
h = sha256()
|
2024-02-06 15:31:34 +00:00
|
|
|
h.update(b"coin-commitment")
|
|
|
|
h.update(self.nonce)
|
2024-02-07 14:28:36 +00:00
|
|
|
h.update(self.encode_pk())
|
2024-02-01 10:53:59 +00:00
|
|
|
h.update(value_bytes)
|
|
|
|
return h.digest()
|
|
|
|
|
|
|
|
def nullifier(self) -> Id:
|
|
|
|
# TODO: mocked until CL is understood
|
2024-02-07 14:28:36 +00:00
|
|
|
value_bytes = int.to_bytes(self.value, length=32, byteorder="big")
|
2024-02-01 10:53:59 +00:00
|
|
|
|
|
|
|
h = sha256()
|
2024-02-06 15:31:34 +00:00
|
|
|
h.update(b"coin-nullifier")
|
|
|
|
h.update(self.nonce)
|
2024-02-07 14:28:36 +00:00
|
|
|
h.update(self.encode_pk())
|
2024-02-01 10:53:59 +00:00
|
|
|
h.update(value_bytes)
|
|
|
|
return h.digest()
|
2024-01-24 11:52:30 +00:00
|
|
|
|
2024-01-24 22:04:35 +00:00
|
|
|
|
2024-02-01 09:56:49 +00:00
|
|
|
@dataclass
|
|
|
|
class MockLeaderProof:
|
|
|
|
commitment: Id
|
|
|
|
nullifier: Id
|
2024-02-06 15:31:34 +00:00
|
|
|
evolved_commitment: Id
|
2024-02-09 14:12:12 +00:00
|
|
|
slot: Slot
|
|
|
|
parent: Id
|
2024-02-01 09:56:49 +00:00
|
|
|
|
2024-02-01 10:53:59 +00:00
|
|
|
@staticmethod
|
2024-02-09 14:12:12 +00:00
|
|
|
def new(coin: Coin, slot: Slot, parent: Id):
|
2024-02-06 15:31:34 +00:00
|
|
|
evolved_coin = coin.evolve()
|
|
|
|
|
|
|
|
return MockLeaderProof(
|
|
|
|
commitment=coin.commitment(),
|
|
|
|
nullifier=coin.nullifier(),
|
|
|
|
evolved_commitment=evolved_coin.commitment(),
|
2024-02-09 14:12:12 +00:00
|
|
|
slot=slot,
|
|
|
|
parent=parent,
|
2024-02-06 15:31:34 +00:00
|
|
|
)
|
2024-02-01 10:53:59 +00:00
|
|
|
|
2024-02-09 14:12:12 +00:00
|
|
|
def verify(self, slot: Slot, parent: Id):
|
2024-02-01 09:56:49 +00:00
|
|
|
# TODO: verification not implemented
|
2024-02-09 14:12:12 +00:00
|
|
|
return slot == self.slot and parent == self.parent
|
2024-02-01 09:56:49 +00:00
|
|
|
|
|
|
|
|
2024-01-24 11:52:30 +00:00
|
|
|
@dataclass
|
|
|
|
class BlockHeader:
|
|
|
|
slot: Slot
|
|
|
|
parent: Id
|
2024-01-31 22:09:03 +00:00
|
|
|
content_size: int
|
|
|
|
content_id: Id
|
2024-02-01 09:56:49 +00:00
|
|
|
leader_proof: MockLeaderProof
|
2024-02-09 14:12:12 +00:00
|
|
|
orphaned_proofs: List["BlockHeader"] = field(default_factory=list)
|
2024-01-31 22:09:03 +00:00
|
|
|
|
2024-02-09 14:12:12 +00:00
|
|
|
def update_header_hash(self, h):
|
2024-02-01 21:16:14 +00:00
|
|
|
# version byte
|
2024-01-31 22:09:03 +00:00
|
|
|
h.update(b"\x01")
|
2024-02-01 21:16:14 +00:00
|
|
|
|
2024-01-31 22:09:03 +00:00
|
|
|
# content size
|
|
|
|
h.update(int.to_bytes(self.content_size, length=4, byteorder="big"))
|
2024-02-01 21:16:14 +00:00
|
|
|
|
2024-01-31 22:09:03 +00:00
|
|
|
# content id
|
|
|
|
assert len(self.content_id) == 32
|
|
|
|
h.update(self.content_id)
|
2024-02-01 21:16:14 +00:00
|
|
|
|
2024-01-31 22:09:03 +00:00
|
|
|
# slot
|
2024-02-07 14:28:36 +00:00
|
|
|
h.update(self.slot.encode())
|
2024-02-01 21:16:14 +00:00
|
|
|
|
2024-01-31 22:09:03 +00:00
|
|
|
# parent
|
|
|
|
assert len(self.parent) == 32
|
|
|
|
h.update(self.parent)
|
2024-02-01 09:56:49 +00:00
|
|
|
|
2024-02-01 21:16:14 +00:00
|
|
|
# leader proof
|
|
|
|
assert len(self.leader_proof.commitment) == 32
|
|
|
|
h.update(self.leader_proof.commitment)
|
|
|
|
assert len(self.leader_proof.nullifier) == 32
|
|
|
|
h.update(self.leader_proof.nullifier)
|
2024-02-06 16:19:30 +00:00
|
|
|
assert len(self.leader_proof.evolved_commitment) == 32
|
|
|
|
h.update(self.leader_proof.evolved_commitment)
|
2024-02-01 09:56:49 +00:00
|
|
|
|
2024-02-09 14:12:12 +00:00
|
|
|
# orphaned proofs
|
|
|
|
h.update(int.to_bytes(len(self.orphaned_proofs), length=4, byteorder="big"))
|
|
|
|
for proof in self.orphaned_proofs:
|
|
|
|
proof.update_header_hash(h)
|
|
|
|
|
|
|
|
# **Attention**:
|
|
|
|
# The ID of a block header is defined as the 32byte blake2b hash of its fields
|
|
|
|
# as serialized in the format specified by the 'HEADER' rule in 'messages.abnf'.
|
|
|
|
#
|
|
|
|
# The following code is to be considered as a reference implementation, mostly to be used for testing.
|
|
|
|
def id(self) -> Id:
|
|
|
|
h = blake2b(digest_size=32)
|
|
|
|
self.update_header_hash(h)
|
2024-01-31 22:09:03 +00:00
|
|
|
return h.digest()
|
2024-01-24 11:52:30 +00:00
|
|
|
|
|
|
|
|
2024-01-24 22:04:35 +00:00
|
|
|
@dataclass
|
2024-01-24 11:52:30 +00:00
|
|
|
class Chain:
|
|
|
|
blocks: List[BlockHeader]
|
2024-02-09 14:12:12 +00:00
|
|
|
genesis: Id
|
|
|
|
|
|
|
|
def tip_id(self) -> Id:
|
|
|
|
if len(self.blocks) == 0:
|
|
|
|
return self.genesis
|
|
|
|
return self.tip().id()
|
2024-01-24 11:52:30 +00:00
|
|
|
|
|
|
|
def tip(self) -> BlockHeader:
|
|
|
|
return self.blocks[-1]
|
|
|
|
|
|
|
|
def length(self) -> int:
|
|
|
|
return len(self.blocks)
|
|
|
|
|
2024-03-21 00:55:38 +00:00
|
|
|
def block_position(self, block: Id) -> Optional[int]:
|
2024-01-24 22:04:35 +00:00
|
|
|
for i, b in enumerate(self.blocks):
|
2024-03-21 00:55:38 +00:00
|
|
|
if b.id() == block:
|
2024-01-24 11:52:30 +00:00
|
|
|
return i
|
2024-03-21 00:55:38 +00:00
|
|
|
return None
|
2024-01-24 11:52:30 +00:00
|
|
|
|
2024-01-24 22:04:35 +00:00
|
|
|
|
2024-02-01 09:56:49 +00:00
|
|
|
@dataclass
|
|
|
|
class LedgerState:
|
|
|
|
"""
|
|
|
|
A snapshot of the ledger state up to some block
|
|
|
|
"""
|
|
|
|
|
|
|
|
block: Id = None
|
2024-03-23 01:50:00 +00:00
|
|
|
|
|
|
|
# This nonce is used to derive the seed for the slot leader lottery.
|
|
|
|
# It's updated at every block by hashing the previous nonce with the
|
|
|
|
# leader proof's nullifier.
|
|
|
|
#
|
|
|
|
# NOTE that this does not prevent nonce grinding at the last slot
|
|
|
|
# when the nonce snapshot is taken
|
2024-02-01 10:53:59 +00:00
|
|
|
nonce: Id = None
|
2024-02-06 16:07:26 +00:00
|
|
|
|
|
|
|
# set of commitments
|
|
|
|
commitments_spend: set[Id] = field(default_factory=set)
|
|
|
|
|
2024-02-06 16:19:30 +00:00
|
|
|
# set of commitments eligible to lead
|
2024-02-06 16:07:26 +00:00
|
|
|
commitments_lead: set[Id] = field(default_factory=set)
|
|
|
|
|
|
|
|
# set of nullified coins
|
|
|
|
nullifiers: set[Id] = field(default_factory=set)
|
2024-02-01 09:56:49 +00:00
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
# -- Stake Relativization State
|
|
|
|
# The number of observed leaders (blocks + orphans), this measurement is
|
|
|
|
# used in inferring total active stake in the network.
|
|
|
|
leader_count: int = 0
|
|
|
|
|
2024-02-01 17:33:37 +00:00
|
|
|
def copy(self):
|
|
|
|
return LedgerState(
|
|
|
|
block=self.block,
|
|
|
|
nonce=self.nonce,
|
2024-02-06 16:07:26 +00:00
|
|
|
commitments_spend=deepcopy(self.commitments_spend),
|
|
|
|
commitments_lead=deepcopy(self.commitments_lead),
|
2024-02-06 13:38:20 +00:00
|
|
|
nullifiers=deepcopy(self.nullifiers),
|
2024-03-23 01:50:00 +00:00
|
|
|
leader_count=self.leader_count,
|
2024-02-01 17:33:37 +00:00
|
|
|
)
|
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
def replace(self, **kwarg) -> "LedgerState":
|
|
|
|
return replace(self, **kwarg)
|
|
|
|
|
2024-02-06 16:19:30 +00:00
|
|
|
def verify_eligible_to_spend(self, commitment: Id) -> bool:
|
2024-02-06 16:07:26 +00:00
|
|
|
return commitment in self.commitments_spend
|
|
|
|
|
2024-02-06 16:19:30 +00:00
|
|
|
def verify_eligible_to_lead(self, commitment: Id) -> bool:
|
2024-02-06 16:07:26 +00:00
|
|
|
return commitment in self.commitments_lead
|
2024-02-01 10:53:59 +00:00
|
|
|
|
|
|
|
def verify_unspent(self, nullifier: Id) -> bool:
|
|
|
|
return nullifier not in self.nullifiers
|
|
|
|
|
|
|
|
def apply(self, block: BlockHeader):
|
|
|
|
assert block.parent == self.block
|
2024-02-06 15:37:49 +00:00
|
|
|
|
|
|
|
h = blake2b(digest_size=32)
|
|
|
|
h.update("epoch-nonce".encode(encoding="utf-8"))
|
|
|
|
h.update(self.nonce)
|
|
|
|
h.update(block.leader_proof.nullifier)
|
2024-02-07 14:28:36 +00:00
|
|
|
h.update(block.slot.encode())
|
2024-02-06 15:37:49 +00:00
|
|
|
|
|
|
|
self.nonce = h.digest()
|
2024-02-01 10:53:59 +00:00
|
|
|
self.block = block.id()
|
2024-02-09 14:12:12 +00:00
|
|
|
for proof in chain(block.orphaned_proofs, [block]):
|
2024-03-23 01:50:00 +00:00
|
|
|
self.apply_leader_proof(proof.leader_proof)
|
|
|
|
|
|
|
|
def apply_leader_proof(self, proof: MockLeaderProof):
|
|
|
|
self.nullifiers.add(proof.nullifier)
|
|
|
|
self.commitments_spend.add(proof.evolved_commitment)
|
|
|
|
self.commitments_lead.add(proof.evolved_commitment)
|
|
|
|
self.leader_count += 1
|
2024-02-01 09:56:49 +00:00
|
|
|
|
|
|
|
|
2024-02-06 13:38:20 +00:00
|
|
|
@dataclass
|
|
|
|
class EpochState:
|
|
|
|
# for details of snapshot schedule please see:
|
|
|
|
# https://github.com/IntersectMBO/ouroboros-consensus/blob/fe245ac1d8dbfb563ede2fdb6585055e12ce9738/docs/website/contents/for-developers/Glossary.md#epoch-structure
|
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
# Stake distribution snapshot is taken at the start of the previous epoch
|
2024-02-06 13:38:20 +00:00
|
|
|
stake_distribution_snapshot: LedgerState
|
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
# Nonce snapshot is taken 6k/f slots into the previous epoch
|
2024-02-06 13:38:20 +00:00
|
|
|
nonce_snapshot: LedgerState
|
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
# Total stake is inferred from watching block production rate over the past
|
|
|
|
# epoch. This inferred total stake is used to relativize stake values in the
|
|
|
|
# leadership lottery.
|
|
|
|
inferred_total_active_stake: int
|
|
|
|
|
2024-02-06 16:19:30 +00:00
|
|
|
def verify_eligible_to_lead_due_to_age(self, commitment: Id) -> bool:
|
|
|
|
# A coin is eligible to lead if it was committed to before the the stake
|
2024-03-23 01:50:00 +00:00
|
|
|
# distribution snapshot was taken or it was produced by a leader proof
|
|
|
|
# since the snapshot was taken.
|
2024-02-06 16:07:26 +00:00
|
|
|
#
|
|
|
|
# This verification is checking that first condition.
|
|
|
|
#
|
|
|
|
# NOTE: `ledger_state.commitments_spend` is a super-set of `ledger_state.commitments_lead`
|
2024-02-06 16:19:30 +00:00
|
|
|
return self.stake_distribution_snapshot.verify_eligible_to_spend(commitment)
|
2024-02-06 13:38:20 +00:00
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
def total_active_stake(self) -> int:
|
|
|
|
"""
|
|
|
|
Returns the inferred total stake participating in consensus.
|
|
|
|
Total active stake is used to reletivize a coin's value in leadership proofs.
|
|
|
|
"""
|
|
|
|
return self.inferred_total_active_stake
|
2024-02-06 13:38:20 +00:00
|
|
|
|
|
|
|
def nonce(self) -> bytes:
|
|
|
|
return self.nonce_snapshot.nonce
|
|
|
|
|
|
|
|
|
2024-01-24 11:52:30 +00:00
|
|
|
class Follower:
|
2024-02-01 09:56:49 +00:00
|
|
|
def __init__(self, genesis_state: LedgerState, config: Config):
|
2024-01-24 11:52:30 +00:00
|
|
|
self.config = config
|
|
|
|
self.forks = []
|
2024-02-09 14:12:12 +00:00
|
|
|
self.local_chain = Chain([], genesis=genesis_state.block)
|
2024-02-01 16:25:49 +00:00
|
|
|
self.genesis_state = genesis_state
|
2024-02-06 13:38:20 +00:00
|
|
|
self.ledger_state = {genesis_state.block: genesis_state.copy()}
|
2024-03-23 01:50:00 +00:00
|
|
|
self.epoch_state = {}
|
2024-01-24 11:52:30 +00:00
|
|
|
|
2024-02-06 13:38:20 +00:00
|
|
|
def validate_header(self, block: BlockHeader, chain: Chain) -> bool:
|
|
|
|
# TODO: verify blocks are not in the 'future'
|
2024-03-23 01:50:00 +00:00
|
|
|
if block.parent != chain.tip_id():
|
|
|
|
logger.warning("block parent is not chain tip")
|
|
|
|
return False
|
|
|
|
|
|
|
|
current_state = self.ledger_state[block.parent].copy()
|
|
|
|
|
2024-02-09 14:12:12 +00:00
|
|
|
# first, we verify adopted leadership transactions
|
2024-03-23 01:50:00 +00:00
|
|
|
for orphan in block.orphaned_proofs:
|
|
|
|
# orphan proofs are checked in two ways
|
|
|
|
# 1. ensure they are valid locally in their original branch
|
|
|
|
# 2. ensure it does not conflict with current state
|
|
|
|
|
|
|
|
# We take a shortcut for (1.) by restricting orphans to proofs we've
|
|
|
|
# already processed in other branches.
|
|
|
|
if orphan.id() not in self.ledger_state:
|
|
|
|
logger.warning("missing orphan proof")
|
|
|
|
return False
|
|
|
|
|
|
|
|
# we use the proposed block epoch state here instead of the orphan's
|
|
|
|
# epoch state. For very old orphans, these states may be different.
|
|
|
|
epoch_state = self.compute_epoch_state(block.slot.epoch(self.config), chain)
|
|
|
|
|
|
|
|
# (2.) is satisfied by verifying the proof against current state ensuring:
|
|
|
|
# - it is a valid proof
|
|
|
|
# - and the nullifier has not already been spent
|
|
|
|
if not self.verify_slot_leader(
|
|
|
|
orphan.slot,
|
|
|
|
orphan.parent,
|
|
|
|
orphan.leader_proof,
|
|
|
|
epoch_state,
|
|
|
|
current_state,
|
2024-02-09 14:12:12 +00:00
|
|
|
):
|
2024-03-23 01:50:00 +00:00
|
|
|
logger.warning("invalid orphan proof")
|
2024-02-09 14:12:12 +00:00
|
|
|
return False
|
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
# if an adopted leadership proof is valid we need to apply its
|
|
|
|
# effects to the ledger state
|
|
|
|
current_state.apply_leader_proof(orphan.leader_proof)
|
|
|
|
|
2024-02-06 13:38:20 +00:00
|
|
|
epoch_state = self.compute_epoch_state(block.slot.epoch(self.config), chain)
|
2024-02-01 09:56:49 +00:00
|
|
|
# TODO: this is not the full block validation spec, only slot leader is verified
|
2024-02-06 13:38:20 +00:00
|
|
|
return self.verify_slot_leader(
|
2024-03-23 01:50:00 +00:00
|
|
|
block.slot,
|
|
|
|
block.parent,
|
|
|
|
block.leader_proof,
|
|
|
|
epoch_state,
|
|
|
|
current_state,
|
2024-02-06 13:38:20 +00:00
|
|
|
)
|
2024-02-01 09:56:49 +00:00
|
|
|
|
2024-02-06 13:38:20 +00:00
|
|
|
def verify_slot_leader(
|
|
|
|
self,
|
|
|
|
slot: Slot,
|
2024-03-23 01:50:00 +00:00
|
|
|
parent: Id,
|
2024-02-06 13:38:20 +00:00
|
|
|
proof: MockLeaderProof,
|
2024-02-09 14:12:12 +00:00
|
|
|
# coins are old enough if their commitment is in the stake distribution snapshot
|
2024-02-06 13:38:20 +00:00
|
|
|
epoch_state: EpochState,
|
2024-03-23 01:50:00 +00:00
|
|
|
# nullifiers (and commitments) are checked against the current state.
|
|
|
|
# For now, we assume proof parent state and current state are identical.
|
|
|
|
# This will change once we start putting merkle roots in headers
|
2024-02-09 14:12:12 +00:00
|
|
|
current_state: LedgerState,
|
2024-02-06 13:38:20 +00:00
|
|
|
) -> bool:
|
2024-02-01 09:56:49 +00:00
|
|
|
return (
|
2024-03-23 01:50:00 +00:00
|
|
|
proof.verify(slot, parent) # verify slot leader proof
|
2024-02-06 16:07:26 +00:00
|
|
|
and (
|
2024-03-23 01:50:00 +00:00
|
|
|
current_state.verify_eligible_to_lead(proof.commitment)
|
2024-02-06 16:19:30 +00:00
|
|
|
or epoch_state.verify_eligible_to_lead_due_to_age(proof.commitment)
|
2024-02-06 16:07:26 +00:00
|
|
|
)
|
2024-02-09 14:12:12 +00:00
|
|
|
and current_state.verify_unspent(proof.nullifier)
|
2024-02-01 09:56:49 +00:00
|
|
|
)
|
2024-01-24 11:52:30 +00:00
|
|
|
|
|
|
|
# Try appending this block to an existing chain and return whether
|
|
|
|
# the operation was successful
|
2024-02-06 13:38:20 +00:00
|
|
|
def try_extend_chains(self, block: BlockHeader) -> Optional[Chain]:
|
2024-02-01 10:53:59 +00:00
|
|
|
if self.tip_id() == block.parent:
|
2024-02-06 13:38:20 +00:00
|
|
|
return self.local_chain
|
2024-01-24 22:04:35 +00:00
|
|
|
|
2024-01-24 11:52:30 +00:00
|
|
|
for chain in self.forks:
|
2024-03-23 01:50:00 +00:00
|
|
|
if chain.tip_id() == block.parent:
|
2024-02-06 13:38:20 +00:00
|
|
|
return chain
|
2024-01-24 11:52:30 +00:00
|
|
|
|
2024-02-06 13:38:20 +00:00
|
|
|
return None
|
2024-01-24 11:52:30 +00:00
|
|
|
|
2024-01-24 22:04:35 +00:00
|
|
|
def try_create_fork(self, block: BlockHeader) -> Optional[Chain]:
|
2024-02-01 17:33:37 +00:00
|
|
|
if self.genesis_state.block == block.parent:
|
|
|
|
# this block is forking off the genesis state
|
2024-02-09 14:12:12 +00:00
|
|
|
return Chain(blocks=[], genesis=self.genesis_state.block)
|
2024-02-01 17:33:37 +00:00
|
|
|
|
2024-01-24 11:52:30 +00:00
|
|
|
chains = self.forks + [self.local_chain]
|
|
|
|
for chain in chains:
|
2024-03-21 00:55:38 +00:00
|
|
|
block_position = chain.block_position(block.parent)
|
|
|
|
if block_position is not None:
|
2024-02-09 14:12:12 +00:00
|
|
|
return Chain(
|
2024-03-21 00:55:38 +00:00
|
|
|
blocks=chain.blocks[: block_position + 1],
|
2024-02-09 14:12:12 +00:00
|
|
|
genesis=self.genesis_state.block,
|
|
|
|
)
|
2024-01-24 22:04:35 +00:00
|
|
|
|
2024-01-24 11:52:30 +00:00
|
|
|
return None
|
|
|
|
|
|
|
|
def on_block(self, block: BlockHeader):
|
|
|
|
# check if the new block extends an existing chain
|
2024-02-06 13:38:20 +00:00
|
|
|
new_chain = self.try_extend_chains(block)
|
|
|
|
if new_chain is None:
|
2024-02-01 10:53:59 +00:00
|
|
|
# we failed to extend one of the existing chains,
|
|
|
|
# therefore we might need to create a new fork
|
|
|
|
new_chain = self.try_create_fork(block)
|
|
|
|
if new_chain is not None:
|
|
|
|
self.forks.append(new_chain)
|
|
|
|
else:
|
2024-03-23 01:50:00 +00:00
|
|
|
logger.warning("missing parent block")
|
2024-02-01 10:53:59 +00:00
|
|
|
# otherwise, we're missing the parent block
|
|
|
|
# in that case, just ignore the block
|
|
|
|
return
|
|
|
|
|
2024-02-06 13:38:20 +00:00
|
|
|
if not self.validate_header(block, new_chain):
|
2024-03-23 01:50:00 +00:00
|
|
|
logger.warning("invalid header")
|
2024-02-06 13:38:20 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
new_chain.blocks.append(block)
|
|
|
|
|
2024-02-01 10:53:59 +00:00
|
|
|
# We may need to switch forks, lets run the fork choice rule to check.
|
2024-02-01 17:33:37 +00:00
|
|
|
new_chain = self.fork_choice()
|
2024-03-23 01:50:00 +00:00
|
|
|
if new_chain != self.local_chain:
|
|
|
|
self.forks.remove(new_chain)
|
|
|
|
self.forks.append(self.local_chain)
|
|
|
|
self.local_chain = new_chain
|
2024-02-01 10:53:59 +00:00
|
|
|
|
2024-02-06 13:38:20 +00:00
|
|
|
new_state = self.ledger_state[block.parent].copy()
|
|
|
|
new_state.apply(block)
|
|
|
|
self.ledger_state[block.id()] = new_state
|
2024-01-24 11:52:30 +00:00
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
def unimported_orphans(self, tip: Id) -> list[BlockHeader]:
|
|
|
|
"""
|
|
|
|
Returns all unimported orphans w.r.t. the given tip's state.
|
|
|
|
Orphans are returned in the order that they should be imported.
|
|
|
|
"""
|
|
|
|
tip_state = self.ledger_state[tip].copy()
|
|
|
|
|
|
|
|
orphans = []
|
|
|
|
for fork in [self.local_chain, *self.forks]:
|
|
|
|
if fork.block_position(tip) is not None:
|
|
|
|
# the tip is a member of this fork, it doesn't make sense
|
|
|
|
# to take orphans from this fork as they are all already "imported"
|
|
|
|
continue
|
|
|
|
|
|
|
|
for block in fork.blocks:
|
|
|
|
for b in [*block.orphaned_proofs, block]:
|
|
|
|
if b.leader_proof.nullifier not in tip_state.nullifiers:
|
|
|
|
tip_state.nullifiers.add(b.leader_proof.nullifier)
|
|
|
|
orphans += [b]
|
|
|
|
|
|
|
|
return orphans
|
|
|
|
|
|
|
|
# Evaluate the fork choice rule and return the chain we should be following
|
2024-02-01 17:33:37 +00:00
|
|
|
def fork_choice(self) -> Chain:
|
|
|
|
return maxvalid_bg(
|
|
|
|
self.local_chain, self.forks, k=self.config.k, s=self.config.s
|
|
|
|
)
|
2024-01-24 11:52:30 +00:00
|
|
|
|
2024-02-06 13:38:20 +00:00
|
|
|
def tip(self) -> BlockHeader:
|
|
|
|
return self.local_chain.tip()
|
|
|
|
|
2024-02-01 10:53:59 +00:00
|
|
|
def tip_id(self) -> Id:
|
2024-03-23 01:50:00 +00:00
|
|
|
return self.local_chain.tip_id()
|
2024-01-24 22:04:35 +00:00
|
|
|
|
2024-03-09 13:34:08 +00:00
|
|
|
def tip_state(self) -> LedgerState:
|
|
|
|
return self.ledger_state[self.tip_id()]
|
|
|
|
|
2024-02-06 13:38:20 +00:00
|
|
|
def state_at_slot_beginning(self, chain: Chain, slot: Slot) -> LedgerState:
|
|
|
|
for block in reversed(chain.blocks):
|
|
|
|
if block.slot < slot:
|
|
|
|
return self.ledger_state[block.id()]
|
2024-01-24 22:04:35 +00:00
|
|
|
|
2024-02-06 13:38:20 +00:00
|
|
|
return self.genesis_state
|
2024-01-24 22:04:35 +00:00
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
def epoch_start_slot(self, epoch) -> Slot:
|
|
|
|
return Slot(epoch.epoch * self.config.epoch_length)
|
|
|
|
|
|
|
|
def stake_distribution_snapshot(self, epoch, chain):
|
2024-02-06 13:38:20 +00:00
|
|
|
# stake distribution snapshot happens at the beginning of the previous epoch,
|
|
|
|
# i.e. for epoch e, the snapshot is taken at the last block of epoch e-2
|
2024-03-23 01:50:00 +00:00
|
|
|
slot = Slot(epoch.prev().epoch * self.config.epoch_length)
|
|
|
|
return self.state_at_slot_beginning(chain, slot)
|
|
|
|
|
|
|
|
def nonce_snapshot(self, epoch, chain):
|
|
|
|
# nonce snapshot happens partway through the previous epoch after the
|
|
|
|
# stake distribution has stabilized
|
|
|
|
slot = Slot(
|
|
|
|
self.config.epoch_relative_nonce_slot
|
|
|
|
+ self.epoch_start_slot(epoch.prev()).absolute_slot
|
2024-02-06 13:38:20 +00:00
|
|
|
)
|
2024-03-23 01:50:00 +00:00
|
|
|
return self.state_at_slot_beginning(chain, slot)
|
2024-02-01 08:19:43 +00:00
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
def compute_epoch_state(self, epoch: Epoch, chain: Chain) -> EpochState:
|
|
|
|
if epoch.epoch == 0:
|
|
|
|
return EpochState(
|
|
|
|
stake_distribution_snapshot=self.genesis_state,
|
|
|
|
nonce_snapshot=self.genesis_state,
|
|
|
|
inferred_total_active_stake=self.config.initial_total_active_stake,
|
2024-02-06 13:38:20 +00:00
|
|
|
)
|
2024-03-23 01:50:00 +00:00
|
|
|
|
|
|
|
stake_distribution_snapshot = self.stake_distribution_snapshot(epoch, chain)
|
|
|
|
nonce_snapshot = self.nonce_snapshot(epoch, chain)
|
|
|
|
|
|
|
|
# we memoize epoch states to avoid recursion killing our performance
|
|
|
|
memo_block_id = nonce_snapshot.block
|
|
|
|
if state := self.epoch_state.get((epoch, memo_block_id)):
|
|
|
|
return state
|
|
|
|
|
|
|
|
# To update our inference of total stake, we need the prior estimate which
|
|
|
|
# was calculated last epoch. Thus we recurse here to retreive the previous
|
|
|
|
# estimate of total stake.
|
|
|
|
prev_epoch = self.compute_epoch_state(epoch.prev(), chain)
|
|
|
|
inferred_total_active_stake = self._infer_total_active_stake(
|
|
|
|
prev_epoch, nonce_snapshot, stake_distribution_snapshot
|
2024-02-06 13:38:20 +00:00
|
|
|
)
|
2024-01-24 22:04:35 +00:00
|
|
|
|
2024-03-23 01:50:00 +00:00
|
|
|
state = EpochState(
|
2024-02-06 13:38:20 +00:00
|
|
|
stake_distribution_snapshot=stake_distribution_snapshot,
|
|
|
|
nonce_snapshot=nonce_snapshot,
|
2024-03-23 01:50:00 +00:00
|
|
|
inferred_total_active_stake=inferred_total_active_stake,
|
|
|
|
)
|
|
|
|
|
|
|
|
self.epoch_state[(epoch, memo_block_id)] = state
|
|
|
|
return state
|
|
|
|
|
|
|
|
def _infer_total_active_stake(
|
|
|
|
self,
|
|
|
|
prev_epoch: EpochState,
|
|
|
|
nonce_snapshot: LedgerState,
|
|
|
|
stake_distribution_snapshot: LedgerState,
|
|
|
|
):
|
|
|
|
# Infer total stake from empirical block production rate in last epoch
|
|
|
|
|
|
|
|
# Since we need a stable inference of total stake for the start of this epoch,
|
|
|
|
# we limit our look back period to the start of last epoch until when the nonce
|
|
|
|
# snapshot was taken.
|
|
|
|
block_proposals_last_epoch = (
|
|
|
|
nonce_snapshot.leader_count - stake_distribution_snapshot.leader_count
|
2024-02-06 13:38:20 +00:00
|
|
|
)
|
2024-03-23 01:50:00 +00:00
|
|
|
T = self.config.epoch_relative_nonce_slot
|
|
|
|
mean_blocks_per_slot = block_proposals_last_epoch / T
|
|
|
|
expected_blocks_per_slot = np.log(1 / (1 - self.config.active_slot_coeff))
|
|
|
|
blocks_per_slot_err = expected_blocks_per_slot - mean_blocks_per_slot
|
|
|
|
h = (
|
|
|
|
self.config.total_active_stake_learning_rate
|
|
|
|
* prev_epoch.inferred_total_active_stake
|
|
|
|
/ expected_blocks_per_slot
|
|
|
|
)
|
|
|
|
return int(prev_epoch.inferred_total_active_stake - h * blocks_per_slot_err)
|
2024-01-24 22:04:35 +00:00
|
|
|
|
|
|
|
|
|
|
|
def phi(f: float, alpha: float) -> float:
|
|
|
|
"""
|
|
|
|
params:
|
|
|
|
f: 'active slot coefficient' - the rate of occupied slots
|
|
|
|
alpha: relative stake held by the validator
|
|
|
|
|
|
|
|
returns: the probability that this validator should win the slot lottery
|
|
|
|
"""
|
|
|
|
return 1 - (1 - f) ** alpha
|
|
|
|
|
|
|
|
|
|
|
|
class MOCK_LEADER_VRF:
|
2024-03-23 01:50:00 +00:00
|
|
|
"""NOT SECURE: A mock VRF function"""
|
2024-01-24 22:04:35 +00:00
|
|
|
|
|
|
|
ORDER = 2**256
|
|
|
|
|
|
|
|
@classmethod
|
2024-02-07 14:28:36 +00:00
|
|
|
def vrf(cls, coin: Coin, epoch_nonce: bytes, slot: Slot) -> int:
|
2024-01-24 22:04:35 +00:00
|
|
|
h = sha256()
|
2024-02-07 14:28:36 +00:00
|
|
|
h.update(b"lead")
|
|
|
|
h.update(epoch_nonce)
|
|
|
|
h.update(slot.encode())
|
|
|
|
h.update(coin.encode_sk())
|
|
|
|
h.update(coin.nonce)
|
|
|
|
|
2024-01-24 22:04:35 +00:00
|
|
|
return int.from_bytes(h.digest())
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def verify(cls, r, pk, nonce, slot):
|
|
|
|
raise NotImplemented()
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
2024-01-24 11:52:30 +00:00
|
|
|
class Leader:
|
2024-02-01 17:33:37 +00:00
|
|
|
config: Config
|
2024-01-24 22:04:35 +00:00
|
|
|
coin: Coin
|
|
|
|
|
2024-02-01 09:56:49 +00:00
|
|
|
def try_prove_slot_leader(
|
2024-02-09 14:12:12 +00:00
|
|
|
self, epoch: EpochState, slot: Slot, parent: Id
|
2024-02-01 09:56:49 +00:00
|
|
|
) -> MockLeaderProof | None:
|
|
|
|
if self._is_slot_leader(epoch, slot):
|
2024-02-09 14:12:12 +00:00
|
|
|
return MockLeaderProof.new(self.coin, slot, parent)
|
2024-01-24 11:52:30 +00:00
|
|
|
|
2024-02-01 09:56:49 +00:00
|
|
|
def _is_slot_leader(self, epoch: EpochState, slot: Slot):
|
2024-03-23 01:50:00 +00:00
|
|
|
relative_stake = self.coin.value / epoch.total_active_stake()
|
2024-02-01 09:56:49 +00:00
|
|
|
|
2024-02-07 14:28:36 +00:00
|
|
|
r = MOCK_LEADER_VRF.vrf(self.coin, epoch.nonce(), slot)
|
2024-02-01 09:56:49 +00:00
|
|
|
|
|
|
|
return r < MOCK_LEADER_VRF.ORDER * phi(
|
|
|
|
self.config.active_slot_coeff, relative_stake
|
|
|
|
)
|
|
|
|
|
2024-01-24 11:52:30 +00:00
|
|
|
|
2024-01-29 13:29:56 +00:00
|
|
|
def common_prefix_len(a: Chain, b: Chain) -> int:
|
|
|
|
for i, (x, y) in enumerate(zip(a.blocks, b.blocks)):
|
2024-01-31 22:09:03 +00:00
|
|
|
if x.id() != y.id():
|
2024-01-29 13:29:56 +00:00
|
|
|
return i
|
|
|
|
return min(len(a.blocks), len(b.blocks))
|
|
|
|
|
|
|
|
|
|
|
|
def chain_density(chain: Chain, slot: Slot) -> int:
|
|
|
|
return len(
|
|
|
|
[
|
|
|
|
block
|
|
|
|
for block in chain.blocks
|
|
|
|
if block.slot.absolute_slot < slot.absolute_slot
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
# Implementation of the fork choice rule as defined in the Ouroboros Genesis paper
|
|
|
|
# k defines the forking depth of chain we accept without more analysis
|
2024-01-30 10:57:54 +00:00
|
|
|
# s defines the length of time (unit of slots) after the fork happened we will inspect for chain density
|
2024-01-29 13:29:56 +00:00
|
|
|
def maxvalid_bg(local_chain: Chain, forks: List[Chain], k: int, s: int) -> Chain:
|
|
|
|
cmax = local_chain
|
|
|
|
for chain in forks:
|
|
|
|
lowest_common_ancestor = common_prefix_len(cmax, chain)
|
|
|
|
m = cmax.length() - lowest_common_ancestor
|
|
|
|
if m <= k:
|
|
|
|
# Classic longest chain rule with parameter k
|
|
|
|
if cmax.length() < chain.length():
|
|
|
|
cmax = chain
|
|
|
|
else:
|
|
|
|
# The chain is forking too much, we need to pay a bit more attention
|
|
|
|
# In particular, select the chain that is the densest after the fork
|
|
|
|
forking_slot = Slot(
|
|
|
|
cmax.blocks[lowest_common_ancestor].slot.absolute_slot + s
|
|
|
|
)
|
|
|
|
cmax_density = chain_density(cmax, forking_slot)
|
|
|
|
candidate_density = chain_density(chain, forking_slot)
|
|
|
|
if cmax_density < candidate_density:
|
|
|
|
cmax = chain
|
|
|
|
|
|
|
|
return cmax
|
|
|
|
|
|
|
|
|
2024-01-24 11:52:30 +00:00
|
|
|
if __name__ == "__main__":
|
2024-01-24 22:04:35 +00:00
|
|
|
pass
|