diff --git a/cryptarchia-v2/.gitignore b/cryptarchia-v2/.gitignore new file mode 100644 index 0000000..e9b37e2 --- /dev/null +++ b/cryptarchia-v2/.gitignore @@ -0,0 +1,4 @@ +.venv +.ipynb_checkpoints +*png +chain.html diff --git a/cryptarchia-v2/cryptarchia-v2.ipynb b/cryptarchia-v2/cryptarchia-v2.ipynb new file mode 100644 index 0000000..37c5272 --- /dev/null +++ b/cryptarchia-v2/cryptarchia-v2.ipynb @@ -0,0 +1,1366 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "ad657d5a-bd36-4329-b134-6745daff7ae9", + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [], + "source": [ + "from dataclasses import dataclass, replace\n", + "import collections\n", + "import os\n", + "import platform\n", + "import pulp\n", + "import random\n", + "import numpy as np\n", + "from collections import deque #, defaultdict\n", + "\n", + "import matplotlib.pyplot as plt\n", + "from pyvis.network import Network\n", + "from pyvis.options import Layout\n", + "from joblib import Parallel, delayed" + ] + }, + { + "cell_type": "markdown", + "id": "71b7ae0d-bb4d-4ec8-b498-2482e254cf5b", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "# Network model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a538cf45-d551-4603-b484-dbbc3f3d0a73", + "metadata": {}, + "outputs": [], + "source": [ + "@dataclass\n", + "class NetworkParams:\n", + " broadcast_delay_mean: int # second\n", + " pol_proof_time: int # seconds\n", + " # ---- blend network -- \n", + " blending_delay: int\n", + " dissemination_delay_mean: float\n", + " # desimenation_delay_var: float\n", + " blend_hops: int\n", + " no_network_delay: bool = False\n", + "\n", + " def sample_blending_delay(self):\n", + " return np.random.uniform(0, self.blending_delay)\n", + "\n", + " def sample_dissemination_delay(self):\n", + " return np.random.exponential(self.dissemination_delay_mean)\n", + "\n", + " def sample_blend_network_delay(self):\n", + " return sum(self.sample_blending_delay() + self.sample_dissemination_delay() for _ in range(self.blend_hops))\n", + " \n", + " def sample_broadcast_delay(self, blocks):\n", + " return np.random.exponential(self.broadcast_delay_mean, size=blocks.shape)\n", + "\n", + " def block_arrival_slot(self, block_slot):\n", + " if self.no_network_delay:\n", + " return block_slot\n", + " return self.pol_proof_time + self.sample_blend_network_delay() + self.sample_broadcast_delay(block_slot) + block_slot\n", + "\n", + " def empirical_network_delay(self, N=10000, M=1000):\n", + " return np.array([self.block_arrival_slot(np.zeros(M)) for _ in range(N)]).reshape(N*M)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17ef82f8-968c-48b0-bee7-f2642c8b3f3e", + "metadata": {}, + "outputs": [], + "source": [ + "blend_net = NetworkParams(\n", + " broadcast_delay_mean=0.5,\n", + " pol_proof_time=1,\n", + " blending_delay=3,\n", + " dissemination_delay_mean=0.5,\n", + " blend_hops=3,\n", + ")\n", + "no_blend_net = replace(blend_net, blend_hops=0)\n", + "\n", + "N = 100\n", + "M = 10000\n", + "no_blend_samples = no_blend_net.empirical_network_delay()\n", + "no_blend_mean = no_blend_samples.mean()\n", + "blend_samples = blend_net.empirical_network_delay()\n", + "blend_mean = blend_samples.mean()\n", + "\n", + "_ = plt.hist(no_blend_samples, bins=100, density=True, label=\"no-blend\")\n", + "_ = plt.hist(blend_samples, bins=100, density=True, label=\"blend\")\n", + "\n", + "for p in [50, 99, 99.9]:\n", + " no_blend_pct = np.percentile(no_blend_samples, p)\n", + " _ = plt.vlines(no_blend_pct, ymin=0, ymax=0.25, color='darkblue', label=f\"no-blend {p}p={no_blend_pct:.1f}s\")\n", + "\n", + "for p in [50, 99, 99.9]:\n", + " blend_pct = np.percentile(blend_samples, p)\n", + " _ = plt.vlines(blend_pct, ymin=0, ymax=0.25, color='brown', label=f\"blend {p}p={blend_pct:.1f}s\")\n", + "# _ = plt.vlines(blend_mean, ymin=0, ymax=1, color='brown', label=f\"blend 50p={blend_mean:.1f}s\")\n", + "# _ = plt.hist(blend_net.block_arrival_slot(np.zeros(1000)), bins=100, density=True, label=\"blend\")\n", + "_ = plt.legend()\n", + "_ = plt.xlabel(\"block delay\")" + ] + }, + { + "cell_type": "markdown", + "id": "51db3605-c164-44fe-aefa-c7bf2aad587b", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "# Transaction dependencies (probabilistic models)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38b1e549-4f83-4f37-a563-8ba724d6d845", + "metadata": {}, + "outputs": [], + "source": [ + "def weights_from_ages(\n", + " ages: np.ndarray,\n", + " peak_age: float = 360.0,\n", + " shape: float = 5.0\n", + ") -> np.ndarray:\n", + " # Compute scale parameter so mode = peak_age\n", + " theta = peak_age / (shape - 1)\n", + "\n", + " # Ensure ages >= 0\n", + " a = np.maximum(ages, 0.0)\n", + "\n", + " # Compute unnormalized weights: a^(shape-1) * exp(-a/theta)\n", + " w = (a ** (shape - 1)) * np.exp(-a / theta)\n", + "\n", + " # Zero out negative ages (already clamped, but enforce)\n", + " w[ages < 0] = 0.0\n", + "\n", + " # Normalize to sum = 1\n", + " total = w.sum()\n", + " if total > 0:\n", + " w /= total\n", + " return w\n", + "\n", + "ages = np.arange(0, 1001, 10)\n", + "\n", + "weights_360 = weights_from_ages(ages, peak_age=360.0, shape=5.0)\n", + "weights_500 = weights_from_ages(ages, peak_age=500.0, shape=5.0)\n", + "\n", + "plt.figure(figsize=(8, 4))\n", + "plt.plot(ages, weights_360, marker='o', linestyle='-', label='Peak at 360')\n", + "plt.plot(ages, weights_500, marker='s', linestyle='--', label='Peak at 500')\n", + "plt.xlabel('Age (slots)')\n", + "plt.ylabel('Normalized Weight')\n", + "plt.title('Dependency probability model (pessimistic)')\n", + "plt.grid(True)\n", + "plt.legend()\n", + "plt.tight_layout()\n", + "plt.show()\n", + "\n", + "def weights_shifted_fast_decay(\n", + " ages: np.ndarray,\n", + " shift: float = 20.0,\n", + " tau: float = 20.0\n", + ") -> np.ndarray:\n", + " w = np.zeros_like(ages, dtype=float)\n", + " # clamp shift so it’s never above ages.max()\n", + " shift_eff = min(shift, ages.max())\n", + " valid = ages >= shift_eff\n", + " a = ages[valid]\n", + " w[valid] = np.exp(-(a - shift_eff) / tau)\n", + " total = w.sum()\n", + " if total > 0:\n", + " w /= total\n", + " return w\n", + "\n", + "# Define ages from 0 to 1000 in steps of 10\n", + "ages = np.arange(0, 1001, 10)\n", + "weights = weights_shifted_fast_decay(ages, shift=20.0, tau=20.0)\n", + "\n", + "# Plotting\n", + "plt.figure(figsize=(8, 4))\n", + "plt.plot(ages, weights, marker='o', linestyle='-')\n", + "plt.axvline(20, color='red', linestyle='--', label='Shift = 20')\n", + "plt.xlabel('Age (slots)')\n", + "plt.ylabel('Normalized Weight')\n", + "plt.title('Shifted Fast-Decay Model (Shift = 20, τ = 20)')\n", + "plt.grid(True)\n", + "plt.legend()\n", + "plt.tight_layout()\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "id": "cb9828c4-7511-4bef-a125-a300fc2885b0", + "metadata": {}, + "source": [ + "# Cryptarchia v2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24779de7-284f-4200-9e4a-d2aa6e1b823b", + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [], + "source": [ + "def phi(f, alpha):\n", + " return 1 - (1-f)**alpha\n", + "\n", + "@dataclass\n", + "class Params:\n", + " SLOTS: int\n", + " f: float\n", + " honest_stake: np.array\n", + " adversary_control: float\n", + " window_size: int\n", + " use_deps: bool\n", + "\n", + " @property\n", + " def N(self):\n", + " return len(self.honest_stake) + 1\n", + "\n", + " @property\n", + " def stake(self):\n", + " return np.append(self.honest_stake, self.honest_stake.sum() / (1/self.adversary_control - 1))\n", + " \n", + " @property\n", + " def relative_stake(self):\n", + " return self.stake / self.stake.sum()\n", + "\n", + " def slot_prob(self):\n", + " return phi(self.f, self.relative_stake)\n", + "\n", + "@dataclass\n", + "class Block:\n", + " id: int\n", + " slot: int\n", + " refs: list[int]\n", + " deps: list[int]\n", + " leader: int\n", + " adversarial: bool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "055ed35f-b142-4d80-ae4a-b951381cdcd3", + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [], + "source": [ + "def visualize_chain(sim):\n", + " layout = Layout()\n", + " layout.hierachical = True\n", + "\n", + " tooltip_css = \"\"\"\n", + " \n", + " \"\"\"\n", + " G = Network(width=1000, height=800, notebook=True, directed=True, layout=layout, cdn_resources='in_line', heading=tooltip_css)\n", + " options_str = \"\"\"\n", + " {\n", + " \"layout\": {\n", + " \"hierarchical\": {\n", + " \"enabled\": true,\n", + " \"levelSeparation\": 200,\n", + " \"nodeSpacing\": 150,\n", + " \"treeSpacing\": 250,\n", + " \"direction\": \"UD\",\n", + " \"sortMethod\": \"directed\"\n", + " }\n", + " },\n", + " \"physics\": {\n", + " \"enabled\": true,\n", + " \"solver\": \"hierarchicalRepulsion\",\n", + " \"hierarchicalRepulsion\": {\n", + " \"centralGravity\": 0.0,\n", + " \"springLength\": 150,\n", + " \"springConstant\": 0.05,\n", + " \"nodeDistance\": 150,\n", + " \"damping\": 0.15\n", + " },\n", + " \"stabilization\": {\n", + " \"enabled\": true,\n", + " \"iterations\": 200000,\n", + " \"updateInterval\": 2500,\n", + " \"onlyDynamicEdges\": false,\n", + " \"fit\": true\n", + " }\n", + " },\n", + " \"interaction\": {\n", + " \"tooltipDelay\": 200,\n", + " \"hover\": true,\n", + " \"dragNodes\": true,\n", + " \"dragView\": true,\n", + " \"zoomView\": true\n", + " },\n", + " \"nodes\": {\n", + " \"font\": {\n", + " \"size\": 14\n", + " },\n", + " \"shape\": \"box\",\n", + " \"margin\": 10\n", + " },\n", + " \"edges\": {\n", + " \"smooth\": {\n", + " \"enabled\": true,\n", + " \"type\": \"cubicBezier\",\n", + " \"roundness\": 0.5\n", + " },\n", + " \"arrows\": {\n", + " \"to\": { \"enabled\": true, \"scaleFactor\": 0.7 }\n", + " }\n", + " }\n", + " }\n", + " \"\"\"\n", + " G.set_options(options_str)\n", + " for block in sim.blocks:\n", + " level = block.slot/20 # This puts all the blocks that happen within 20s in the same level (just for visual clarity)\n", + " color = \"darkgrey\"\n", + " #if block.id in honest_chain_set:\n", + " # color = \"orange\"\n", + "\n", + " if block.adversarial: color = \"red\"\n", + " G.add_node(int(block.id), level=level, color=color, label=f\"(id:{block.id})\\ns: {block.slot}\\nrefs: {len(block.refs)}\")\n", + " # if block.parent >= 0:\n", + " # G.add_edge(int(block.id), int(block.parent), width=2, color=color)\n", + " # Draw deps first so they are in the background\n", + " # for dep in block.deps:\n", + " # G.add_edge(int(block.id), int(dep), width=1, color=\"#dddddd\")\n", + " for ref in block.refs:\n", + " G.add_edge(int(block.id), int(ref), width=1, color=\"blue\")\n", + "\n", + " \n", + " return G.show(\"chain.html\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d4896b2-b1b6-4b6c-8519-be1f65747246", + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [], + "source": [ + "def normalize_from_slot(arr: np.ndarray, slot: int) -> np.ndarray:\n", + " \"\"\"\n", + " Subtract arr[slot] from every element of arr, then clip negatives to zero.\n", + " \"\"\"\n", + " base = arr[slot]\n", + " adjusted = arr - base\n", + " adjusted[adjusted < 0] = 0\n", + " return adjusted\n", + "\n", + "def longest_advantage_run(a: np.ndarray, b: np.ndarray) -> int:\n", + " \"\"\"\n", + " Return the length of the longest consecutive run where b >= a,\n", + " using purely NumPy operations.\n", + " \"\"\"\n", + " mask = b >= a\n", + " # Pad with False at both ends to catch runs at boundaries\n", + " padded = np.concatenate(([False], mask, [False]))\n", + " diff = np.diff(padded.astype(np.int8))\n", + " starts = np.where(diff == 1)[0]\n", + " ends = np.where(diff == -1)[0]\n", + " if starts.size == 0:\n", + " return 0\n", + " lengths = ends - starts\n", + " return int(lengths.max())\n", + "\n", + "def highest_advantage_index(a: np.ndarray, b: np.ndarray) -> int:\n", + " \"\"\"\n", + " Return the largest index i where b[i] >= a[i], or -1 if none exist.\n", + " \"\"\"\n", + " mask = b >= a\n", + " idxs = np.nonzero(mask)[0]\n", + " return int(idxs[-1]) if idxs.size > 0 else -1\n", + "\n", + "def highest_advantage_index_nonzero(a: np.ndarray, b: np.ndarray) -> int:\n", + " \"\"\"\n", + " Return the largest index i where b[i] >= a[i] and not (a[i] == 0 and b[i] == 0), \n", + " or -1 if none exist.\n", + " \"\"\"\n", + " mask = (b >= a) & ~((a == 0) & (b == 0))\n", + " idxs = np.nonzero(mask)[0]\n", + " return int(idxs[-1]) if idxs.size > 0 else -1\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5a615d1e-e6df-48b3-bc5d-7e3e444820d8", + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [], + "source": [ + "import pulp\n", + "\n", + "def dag_with_max_refs(blocks: list, window_size: int) -> dict[int, list[int]]:\n", + " \"\"\"\n", + " Returns a dict: block_id -> list of referenced block_ids\n", + " that maximizes the total number of references under these constraints:\n", + " - References go backward in time (from later to earlier blocks)\n", + " - Reference only within `window_size`\n", + " - No transitive references (triangle closure constraint)\n", + " \"\"\"\n", + " prob = pulp.LpProblem(\"MaxRefsDAG\", pulp.LpMaximize)\n", + "\n", + " block_ids = [b.id for b in blocks]\n", + " slots = {b.id: b.slot for b in blocks}\n", + " id_set = set(block_ids)\n", + "\n", + " # Generate all valid candidate pairs (i -> j)\n", + " pairs = [\n", + " (i, j)\n", + " for i in block_ids\n", + " for j in block_ids\n", + " if slots[i] > slots[j] and (slots[i] - slots[j]) <= window_size\n", + " ]\n", + "\n", + " # Decision variables: x[i][j] = 1 if block i references block j\n", + " x = pulp.LpVariable.dicts(\"x\", pairs, cat=\"Binary\")\n", + "\n", + " # Objective: maximize total number of references\n", + " prob += pulp.lpSum(x[i, j] for (i, j) in pairs)\n", + "\n", + " # Triangle constraints: for all i, j, k:\n", + " # if i -> j and j -> k then i must NOT reference k to avoid closure\n", + " # Note that this is a good approximation, but it would allow the adversary simulation to\n", + " # sneak in some extra refs. Since that only could improve our results, this is good enough.\n", + " for i in block_ids:\n", + " for j in block_ids:\n", + " for k in block_ids:\n", + " if (i, j) in x and (j, k) in x and (i, k) in x:\n", + " prob += x[i, j] + x[j, k] + x[i, k] <= 2\n", + "\n", + " # Solve the ILP\n", + " solver = pulp.PULP_CBC_CMD(msg=False)\n", + " result = prob.solve(solver)\n", + "\n", + " if pulp.LpStatus[result] != \"Optimal\":\n", + " raise RuntimeError(\"ILP did not find an optimal solution.\")\n", + "\n", + " # Extract result\n", + " ref_graph = {i: [] for i in block_ids}\n", + " for (i, j) in pairs:\n", + " if pulp.value(x[i, j]) > 0.5:\n", + " ref_graph[i].append(j)\n", + "\n", + " return ref_graph" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a90495a8-fcda-4e47-92b4-cc5ceaa9ff9c", + "metadata": {}, + "outputs": [], + "source": [ + "class Sim:\n", + " def __init__(self, params: Params, network: NetworkParams):\n", + " self.params = params\n", + " self.network = network\n", + "\n", + " # leaders: fixed-size (N × SLOTS)\n", + " self.leaders = np.zeros((params.N, params.SLOTS), dtype=np.int32)\n", + "\n", + " # Preallocate capacity for blocks: at most N * SLOTS blocks\n", + " max_blocks = params.N * params.SLOTS\n", + " self.block_slots = np.empty(max_blocks, dtype=np.int32)\n", + " self.block_arrivals = np.empty((params.N, max_blocks), dtype=np.int32)\n", + " self.num_blocks = 0\n", + "\n", + " self.blocks: list[Block] = []\n", + "\n", + " # Emit genesis block (id = 0)\n", + " self.emit_block(leader=0, slot=0, refs=[], deps=[])\n", + " # Set arrival times of genesis to 0\n", + " self.block_arrivals[:, 0] = 0\n", + "\n", + " def clone_for_attack(self):\n", + " new = object.__new__(Sim)\n", + " new.params = self.params\n", + " new.network = self.network\n", + "\n", + " # Copy leaders array\n", + " new.leaders = self.leaders.copy()\n", + "\n", + " # Preallocate same sizes\n", + " max_blocks = self.params.N * self.params.SLOTS\n", + " new.block_slots = np.empty_like(self.block_slots)\n", + " new.block_arrivals = np.empty_like(self.block_arrivals)\n", + " new.num_blocks = self.num_blocks\n", + "\n", + " # Copy blocks list (shallow copy of each Block)\n", + " new.blocks = [\n", + " Block(\n", + " id=b.id,\n", + " leader=b.leader,\n", + " slot=b.slot,\n", + " refs=b.refs.copy(),\n", + " deps=b.deps.copy(),\n", + " adversarial=b.adversarial\n", + " ) for b in self.blocks\n", + " ]\n", + "\n", + " # Copy underlying arrays\n", + " new.block_slots[: self.num_blocks] = self.block_slots[: self.num_blocks]\n", + " new.block_arrivals[:, : self.num_blocks] = self.block_arrivals[:, : self.num_blocks]\n", + "\n", + " return new\n", + "\n", + " def get_seen_blocks_in_window_for_node(self, node_id: int, current_slot: int, window_size: int) -> list[int]:\n", + " if not (0 <= node_id < self.params.N):\n", + " raise ValueError(f\"Invalid node_id: {node_id}. Must be between 0 and {self.params.N - 1}.\")\n", + " if window_size <= 0:\n", + " raise ValueError(f\"window_size must be positive. Got {window_size}.\")\n", + " if self.num_blocks == 0:\n", + " return []\n", + "\n", + " min_slot = current_slot - window_size + 1\n", + " max_slot = current_slot\n", + "\n", + " arrivals = self.block_arrivals[node_id, : self.num_blocks]\n", + " slots = self.block_slots[: self.num_blocks]\n", + "\n", + " mask = (\n", + " (arrivals >= min_slot) & (arrivals <= max_slot) &\n", + " (slots >= min_slot) & (slots <= max_slot)\n", + " )\n", + " return np.nonzero(mask)[0].tolist()\n", + "\n", + " def get_all_blocks_in_window_for_node(self, node_id: int, current_slot: int, window_size: int) -> list[int]:\n", + " if not (0 <= node_id < self.params.N):\n", + " raise ValueError(f\"Invalid node_id: {node_id}. Must be between 0 and {self.params.N - 1}.\")\n", + " if window_size <= 0:\n", + " raise ValueError(f\"window_size must be positive. Got {window_size}.\")\n", + " if self.num_blocks == 0:\n", + " return []\n", + "\n", + " min_slot = current_slot - window_size + 1\n", + " max_slot = current_slot\n", + "\n", + " slots = self.block_slots[: self.num_blocks]\n", + " mask = (slots >= min_slot) & (slots <= max_slot)\n", + " return np.nonzero(mask)[0].tolist()\n", + "\n", + " def get_unreachable_blocks(self, node_id: int, current_slot: int) -> list[int]:\n", + " if not (0 <= node_id < self.params.N):\n", + " raise ValueError(f\"Invalid node_id: {node_id}. Must be between 0 and {self.params.N - 1}.\")\n", + "\n", + " arrivals = self.block_arrivals[node_id, : self.num_blocks]\n", + " seen_ids = set(np.nonzero(arrivals <= current_slot)[0].tolist())\n", + "\n", + " has_incoming = set()\n", + " for b in seen_ids:\n", + " for parent in self.blocks[b].refs:\n", + " if parent in seen_ids:\n", + " has_incoming.add(parent)\n", + "\n", + " return [b for b in seen_ids if b not in has_incoming]\n", + "\n", + " def get_max_cardinality_antichain(self, node_id: int, current_slot: int, window: int = None, forbidden: set[int] = None) -> list[int]:\n", + " arrivals = self.block_arrivals[node_id, : self.num_blocks]\n", + " slots_arr = self.block_slots[: self.num_blocks]\n", + "\n", + " if window is not None:\n", + " min_slot = current_slot - window + 1\n", + " mask = (\n", + " (arrivals <= current_slot) &\n", + " (slots_arr >= min_slot) &\n", + " (slots_arr <= current_slot)\n", + " )\n", + " seen_ids = np.nonzero(mask)[0].tolist()\n", + " else:\n", + " seen_ids = np.nonzero(arrivals <= current_slot)[0].tolist()\n", + "\n", + " # Filter out any forbidden blocks right away\n", + " if forbidden is not None:\n", + " seen_ids = [i for i in seen_ids if i not in forbidden]\n", + " \n", + " if not seen_ids:\n", + " return []\n", + "\n", + " idx = {blk_id: i for i, blk_id in enumerate(seen_ids)}\n", + " n = len(seen_ids)\n", + "\n", + " adj_children = {b: [] for b in seen_ids}\n", + " for b in seen_ids:\n", + " for parent in self.blocks[b].refs:\n", + " if parent in idx:\n", + " adj_children[parent].append(b)\n", + "\n", + " graph: list[list[int]] = [[] for _ in range(n)]\n", + " for u in seen_ids:\n", + " u_idx = idx[u]\n", + " visited = set()\n", + " stack = adj_children[u].copy()\n", + " while stack:\n", + " x = stack.pop()\n", + " if x not in visited:\n", + " visited.add(x)\n", + " stack.extend(adj_children.get(x, []))\n", + " graph[u_idx] = [idx[v] for v in visited]\n", + "\n", + " pair_u = [-1] * n\n", + " pair_v = [-1] * n\n", + " dist = [0] * n\n", + "\n", + " def bfs():\n", + " queue = deque()\n", + " found_augment = False\n", + " for u in range(n):\n", + " if pair_u[u] == -1:\n", + " dist[u] = 0\n", + " queue.append(u)\n", + " else:\n", + " dist[u] = float(\"inf\")\n", + " while queue:\n", + " u = queue.popleft()\n", + " for v_idx in graph[u]:\n", + " pu = pair_v[v_idx]\n", + " if pu != -1 and dist[pu] == float(\"inf\"):\n", + " dist[pu] = dist[u] + 1\n", + " queue.append(pu)\n", + " if pu == -1:\n", + " found_augment = True\n", + " return found_augment\n", + "\n", + " def dfs(u):\n", + " for v_idx in graph[u]:\n", + " pu = pair_v[v_idx]\n", + " if pu == -1 or (dist[pu] == dist[u] + 1 and dfs(pu)):\n", + " pair_u[u] = v_idx\n", + " pair_v[v_idx] = u\n", + " return True\n", + " dist[u] = float(\"inf\")\n", + " return False\n", + "\n", + " while bfs():\n", + " for u in range(n):\n", + " if pair_u[u] == -1 and dfs(u):\n", + " pass\n", + "\n", + " visited_u = [False] * n\n", + " visited_v = [False] * n\n", + " queue = deque(u for u in range(n) if pair_u[u] == -1)\n", + " while queue:\n", + " u = queue.popleft()\n", + " if visited_u[u]:\n", + " continue\n", + " visited_u[u] = True\n", + " for v_idx in graph[u]:\n", + " if not visited_v[v_idx]:\n", + " visited_v[v_idx] = True\n", + " pu = pair_v[v_idx]\n", + " if pu != -1 and not visited_u[pu]:\n", + " queue.append(pu)\n", + "\n", + " return [\n", + " blk_id\n", + " for blk_id, u_idx in idx.items()\n", + " if visited_u[u_idx] and not visited_v[u_idx]\n", + " ]\n", + "\n", + " def emit_block(self, leader, slot, refs, deps, adversarial=False):\n", + " assert isinstance(leader, (int, np.int64))\n", + " assert isinstance(slot, (int, np.int64))\n", + " assert all(isinstance(r, (int, np.int64)) for r in refs)\n", + "\n", + " block = Block(\n", + " id=self.num_blocks,\n", + " leader=leader,\n", + " slot=slot,\n", + " refs=refs.copy(),\n", + " deps=deps.copy(),\n", + " adversarial=adversarial\n", + " )\n", + " self.blocks.append(block)\n", + "\n", + " self.block_slots[self.num_blocks] = slot\n", + "\n", + " if not adversarial:\n", + " base = np.repeat(slot, self.params.N)\n", + " arrival = self.network.block_arrival_slot(base)\n", + " else:\n", + " arrival = np.full((self.params.N,), self.params.SLOTS - 1, dtype=np.int64)\n", + " arrival[self.params.N - 1] = slot\n", + "\n", + " self.block_arrivals[:, self.num_blocks] = arrival\n", + "\n", + " bid = self.num_blocks\n", + " self.num_blocks += 1\n", + " return bid\n", + "\n", + " def emit_leader_block(self, leader, slot):\n", + " assert isinstance(leader, (int, np.int64))\n", + " assert isinstance(slot, int)\n", + "\n", + " arrivals = self.block_arrivals[leader, : self.num_blocks]\n", + " seen_ids = np.nonzero(arrivals <= slot)[0].tolist()\n", + "\n", + " deps = []\n", + " if seen_ids:\n", + " seen_slots = [self.block_slots[i] for i in seen_ids]\n", + " ages = [slot - s for s in seen_slots]\n", + " weights = weights_from_ages(np.array(ages), peak_age=60.0, shape=5.0)\n", + " dep = random.choices(seen_ids, weights=weights, k=1)[0]\n", + " deps = [int(dep)]\n", + "\n", + " refs = self.get_max_cardinality_antichain(leader, slot, window=self.params.window_size)\n", + " # print(f\"slot: {slot}, refs: {refs}\")\n", + "\n", + " emitted = self.emit_block(leader, slot, refs=refs, deps=deps)\n", + "\n", + " unreachable = self.get_unreachable_blocks(leader, slot)\n", + " unreachable = [\n", + " b for b in unreachable\n", + " if b != emitted and (slot - self.blocks[b].slot) >= self.params.window_size\n", + " ]\n", + "\n", + " if unreachable:\n", + " self.blocks[emitted].refs.append(random.choice(unreachable))\n", + "\n", + " return emitted\n", + "\n", + " def run(self):\n", + " for s in range(1, self.params.SLOTS):\n", + " self.leaders[:, s] = np.random.random(size=self.params.N) < self.params.slot_prob()\n", + " for leader in np.nonzero(self.leaders[:, s])[0]:\n", + " if self.params.adversary_control is not None and leader == self.params.N - 1:\n", + " continue\n", + " self.emit_leader_block(leader, s)\n", + "\n", + " def compute_descendants(self, start_block):\n", + " start_id = start_block.id if hasattr(start_block, \"id\") else start_block\n", + "\n", + " children = {i: [] for i in range(len(self.blocks))}\n", + " for i, blk in enumerate(self.blocks):\n", + " parents = blk.refs if self.params.use_deps else blk.refs\n", + " for r in parents:\n", + " children[r].append(i)\n", + "\n", + " desc = {start_id}\n", + " queue = collections.deque([start_id])\n", + " while queue:\n", + " cur = queue.popleft()\n", + " for child in children[cur]:\n", + " if child not in desc:\n", + " desc.add(child)\n", + " queue.append(child)\n", + " desc.remove(start_id)\n", + " return desc\n", + "\n", + " def block_ref_weights_by_slot(self, start_id: int) -> np.ndarray:\n", + " descendants = self.compute_descendants(start_id)\n", + " weights_by_slot = np.zeros(self.params.SLOTS, dtype=np.int64)\n", + " window = self.params.window_size\n", + " for did in descendants:\n", + " did_slot = self.blocks[did].slot\n", + " refs = self.blocks[did].refs\n", + " count = sum(\n", + " 1\n", + " for r in refs\n", + " if did_slot - self.blocks[r].slot < window\n", + " )\n", + " weights_by_slot[did_slot] += count\n", + " return np.cumsum(weights_by_slot)\n", + "\n", + " def adversarial_ref_weights_by_slot(self, start_id: int) -> np.ndarray:\n", + " descendants = self.compute_descendants(start_id)\n", + " adv_desc = [d for d in descendants if self.blocks[d].adversarial]\n", + " weights_by_slot = np.zeros(self.params.SLOTS, dtype=np.int64)\n", + " window = self.params.window_size\n", + " for did in adv_desc:\n", + " did_slot = self.blocks[did].slot\n", + " refs = self.blocks[did].refs\n", + " count = sum(\n", + " 1\n", + " for r in refs\n", + " if did_slot - self.blocks[r].slot < window\n", + " )\n", + " weights_by_slot[did_slot] += count\n", + " return np.cumsum(weights_by_slot)\n", + " \n", + " def attack_on_block(self, target_block: Block):\n", + " \"\"\"\n", + " Attack on a specific block. The provided block is the closest ancestor of a honest block on which\n", + " the attacker wants to introduce a conflict and win. Since the conflict resolution rules operate on\n", + " the closest common ancestor of those two conflicting blocks, we perform the attack by exhaustively\n", + " exploring every possible starting block.\n", + "\n", + " We are returning the reorg measured as from the attacked block (common ancestor) up to the last\n", + " adversarial block. This makes it imprecise, but in a DAG there is no perfect measure of \"reorg \n", + " length\". In any case, this is only relevant for \"full reorg\" counting, which is fine to compute\n", + " approximately giving some tolearance value to the definition of \"full reorg\".\n", + " \"\"\"\n", + " adv = self.params.N - 1\n", + " fid = target_block.id\n", + " fslot = target_block.slot\n", + "\n", + " # Static forbidden = all descendants of fid (and fid itself)\n", + " forbidden = set(self.compute_descendants(fid)) | {fid}\n", + "\n", + " # Precompute adversarial slots after fslot\n", + " adv_slots = list(np.flatnonzero(self.leaders[adv, fslot + 1 :]) + fslot + 1)\n", + " if not adv_slots:\n", + " return -1\n", + "\n", + " # Pre‐emit every adversarial block at its slot, chaining deps (optimal), empty refs for now\n", + " adversarial_block_ids = []\n", + " prev_bid = None\n", + " for slot in adv_slots:\n", + " deps = [] if prev_bid is None else [prev_bid]\n", + " bid = self.emit_block(adv, slot, refs=[], deps=deps, adversarial=True)\n", + " adversarial_block_ids.append(bid)\n", + " prev_bid = bid\n", + "\n", + " first_adv_block = self.blocks[adversarial_block_ids[0]]\n", + " first_adv_block.refs = self.get_max_cardinality_antichain(adv, first_adv_block.slot, window=self.params.window_size)\n", + " \n", + " #for fixed_i in range(len(adversarial_block_ids) - 1, -1, -1):\n", + " for fixed_i in range(1, len(adversarial_block_ids)-1):\n", + " fixed_adv = self.blocks[adversarial_block_ids[fixed_i]]\n", + " candidate_blocks = [\n", + " self.blocks[i] \n", + " for i in self.get_all_blocks_in_window_for_node(\n", + " adv, \n", + " fixed_adv.slot, \n", + " self.params.window_size * 2\n", + " ) \n", + " if self.blocks[i].id not in forbidden\n", + " ]\n", + " best_dag = dag_with_max_refs(candidate_blocks, self.params.window_size)\n", + " fixed_adv.refs = best_dag[fixed_adv.id]\n", + "\n", + " # Rule 2: for every adversarial block, try to add one extra reference to disconnected\n", + " for i, a in reversed(list(enumerate(adversarial_block_ids))):\n", + " a_slot = self.blocks[a].slot\n", + " desc_set = self.compute_descendants(a)\n", + " for b in adversarial_block_ids[i + 1:]:\n", + " b_slot = self.blocks[b].slot\n", + " if (\n", + " b not in desc_set and\n", + " not self.blocks[b].refs and\n", + " (b_slot - a_slot) > self.params.window_size # must violate\n", + " ):\n", + " self.blocks[b].refs.append(a)\n", + " break\n", + "\n", + " # Find the slot of the closest direct honest descendant of fid\n", + " # This is the block with the conflict that will be replaced by the adversary\n", + " ref_slot = None\n", + " for b in self.blocks[target_block.id+1:]:\n", + " if b.adversarial: # all adversarial blocks are at the end\n", + " break\n", + " if target_block.id in b.refs:\n", + " ref_slot = b.slot\n", + " break\n", + " if ref_slot is None:\n", + " return -1\n", + "\n", + " # Compute reorg length, capping at last adversarial slot\n", + " last_adv_slot = self.blocks[adversarial_block_ids[-1]].slot\n", + " first_adv_bid = adversarial_block_ids[0]\n", + " honest_weights = self.block_ref_weights_by_slot(fid)\n", + " adv_weights = self.adversarial_ref_weights_by_slot(first_adv_bid)\n", + "\n", + " # We are returning the reorg measured as from the attacked block (common ancestor)\n", + " # up to the last adversarial block. This makes it imprecise, but in a DAG there is no\n", + " # perfect measure of \"reorg length\".\n", + " hi_uncapped = highest_advantage_index_nonzero(honest_weights, adv_weights)\n", + " hi = min(hi_uncapped, last_adv_slot)\n", + " return int(hi - ref_slot) if hi - ref_slot >= 0 else -1" + ] + }, + { + "cell_type": "markdown", + "id": "911b38c8-8f8b-4ca5-b875-ea84e8161a79", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "## Single Run and Visualization" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a0123dab-cf0d-4721-81c7-bb881a27c13c", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# %%time\n", + "# random.seed(0)\n", + "# np.random.seed(0)\n", + "\n", + "# sim = Sim(\n", + "# params=Params(\n", + "# SLOTS=200,\n", + "# f=0.25,\n", + "# window_size=30,\n", + "# use_deps=True,\n", + "# adversary_control = 0.4,\n", + "# honest_stake = np.random.pareto(10, 1000)\n", + "# ),\n", + "# network=blend_net\n", + "# )\n", + "# sim.run()\n", + "\n", + "# n_blocks_per_slot = len(sim.blocks) / sim.params.SLOTS\n", + "# print(\"avg blocks per slot\", n_blocks_per_slot)\n", + "# print(\"Number of blocks\", len(sim.blocks))\n", + "\n", + "# total_refs = sum([len(b.refs) for b in sim.blocks])\n", + "# print(\"Total number of refs created\", total_refs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a252486b-8c25-4d79-9dae-085a879a3112", + "metadata": {}, + "outputs": [], + "source": [ + "# max_reorg = sim.attack_on_block(sim.blocks[3])\n", + "# print(\"reorg:\", max_reorg)\n", + "\n", + "# visualize_chain(sim)" + ] + }, + { + "cell_type": "markdown", + "id": "81d29c1d-98cb-4ab3-8f66-ea032be30eb1", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "## Attack all blocks (single-threaded)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f9d62ff7-e03a-4ef0-9521-00466019feb5", + "metadata": {}, + "outputs": [], + "source": [ + "# def attack_all_blocks(sim) -> dict[int, any]:\n", + "# sim.run()\n", + "# baseline = sim.clone_for_attack()\n", + "\n", + "# results: dict[int, any] = {}\n", + "# adv_id = baseline.params.N - 1\n", + "# for blk in baseline.blocks:\n", + "# if blk.leader == adv_id:\n", + "# continue\n", + "# sim_copy = baseline.clone_for_attack()\n", + "# results[blk.id] = sim_copy.attack_on_block(sim_copy.blocks[blk.id])\n", + "# return results\n", + "\n", + "# random.seed(0)\n", + "# np.random.seed(0)\n", + "\n", + "# sim = Sim(\n", + "# params=Params(\n", + "# SLOTS=1000,\n", + "# f=0.25,\n", + "# window_size=30,\n", + "# use_deps=True,\n", + "# adversary_control = 0.4,\n", + "# honest_stake = np.random.pareto(10, 1000)\n", + "# ),\n", + "# network=blend_net\n", + "# )\n", + "\n", + "# results = attack_all_blocks(sim)\n", + "# print(\"(ID, reorg length) -> \", max(results.items(), key=lambda x: x[1]))" + ] + }, + { + "cell_type": "markdown", + "id": "ed750bd2-c083-4768-a317-c4f8aa487cd7", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "## Attack all blocks (parallelized)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d5f0a0b9-732e-4120-a0ec-da4e867994d9", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import platform\n", + "\n", + "home_dir = os.path.expanduser(\"~\")\n", + "joblib_temp = os.path.join(home_dir, \"joblib_tmp\")\n", + "\n", + "def parallel_attack_all_blocks(sim, skip_last: int = 0):\n", + " sim.run()\n", + "\n", + " def _attack_block_copy(orig_sim, block_id):\n", + " sim_copy = orig_sim.clone_for_attack()\n", + " result = sim_copy.attack_on_block(sim_copy.blocks[block_id])\n", + " return block_id, result\n", + "\n", + " # Configuration for the Nomos experiments server\n", + " n_jobs = 26 if platform.system() == \"Linux\" else 8\n", + " \n", + " blocks = sim.blocks if skip_last == 0 else sim.blocks[:-skip_last]\n", + " block_ids = [b.id for b in blocks]\n", + " attacked_results = Parallel(\n", + " n_jobs=8,\n", + " backend=\"loky\",\n", + " temp_folder=joblib_temp\n", + " )(\n", + " delayed(_attack_block_copy)(sim, bid)\n", + " for bid in block_ids\n", + " )\n", + " return (attacked_results, sim.blocks)\n", + "\n", + "def plot_attack_histogram_binned(attacked_results, bin_size=30, figsize=(12, 6), label_fontsize=8):\n", + " indices = [result for _, result in attacked_results]\n", + " max_idx = max(indices)\n", + " \n", + " bin_start = 0\n", + " bin_end = ((max_idx // bin_size) + 1) * bin_size\n", + " bins = list(range(bin_start, bin_end + bin_size, bin_size))\n", + " \n", + " labels = [f\"{b // bin_size} ({b})\" for b in bins]\n", + " \n", + " plt.figure(figsize=figsize)\n", + " plt.hist(indices, bins=bins, edgecolor='black')\n", + " plt.xlabel('Reorg in virtual blocks and slots')\n", + " plt.ylabel('Frequency')\n", + " plt.title(f'Histogram of Attack Results (bins of {bin_size})')\n", + " plt.xticks(bins, labels, rotation='vertical', fontsize=label_fontsize)\n", + " plt.tight_layout()\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3050fdf6-c22a-44de-b1f0-77af5b42d96d", + "metadata": {}, + "outputs": [], + "source": [ + "# %%time\n", + "# random.seed(0)\n", + "# np.random.seed(0)\n", + "\n", + "# sim = Sim(\n", + "# params=Params(\n", + "# SLOTS=15000,\n", + "# f=0.25,\n", + "# window_size=30,\n", + "# use_deps=True,\n", + "# adversary_control = 0.3,\n", + "# honest_stake = np.random.pareto(10, 1000)\n", + "# ),\n", + "# network=blend_net\n", + "# )\n", + "# attack_result = parallel_attack_all_blocks(sim)\n", + "# plot_attack_histogram_binned(attack_result, bin_size=30)\n", + "\n", + "# print(max(attack_result, key=lambda x: x[1]))" + ] + }, + { + "cell_type": "markdown", + "id": "1b5f62a7-d59d-4162-9a5a-5220c061a6b5", + "metadata": {}, + "source": [ + "## Multiple Experiments and frequency analysis" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0953f236-7176-4787-8a21-77450835f728", + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [], + "source": [ + "def plot_attack_histogram_frequency(\n", + " attacked_results: list[tuple[int, int]],\n", + " total_honest_blocks: int,\n", + " adversary_stake: int,\n", + " bin_size: int = 30,\n", + " figsize: tuple[int, int] = (12, 6),\n", + " label_fontsize: int = 8\n", + "):\n", + " # Extract reorg lengths from (block_id, length) tuples\n", + " all_reorgs = [length for _, length in attacked_results]\n", + " if not all_reorgs:\n", + " print(\"No reorgs to plot\")\n", + " return\n", + " \n", + " # Compute bins\n", + " max_idx = max(all_reorgs)\n", + " bin_end = ((max_idx // bin_size) + 1) * bin_size\n", + " bins = np.arange(0, bin_end + bin_size, bin_size)\n", + "\n", + " # Compute weights (relative frequencies) for histogram\n", + " weights = np.ones_like(all_reorgs, dtype=float) / total_honest_blocks\n", + " counts, edges = np.histogram(all_reorgs, bins=bins, weights=weights)\n", + "\n", + " plt.figure(figsize=figsize)\n", + "\n", + " # Bar plot with a muted blue and slight transparency\n", + " bars = plt.bar(\n", + " edges[:-1],\n", + " counts,\n", + " width=bin_size,\n", + " align='edge',\n", + " edgecolor='#555555',\n", + " color='#4C72B0',\n", + " alpha=0.8,\n", + " label='Grouped relative frequency (virtual blocks)'\n", + " )\n", + "\n", + " # Compute exact relative frequency (no bins)\n", + " unique_vals, raw_counts = np.unique(all_reorgs, return_counts=True)\n", + " exact_rel_freq = raw_counts / total_honest_blocks\n", + "\n", + " # Line plot of exact relative frequencies with a contrasting orange and thinner line\n", + " plt.plot(\n", + " unique_vals,\n", + " exact_rel_freq,\n", + " marker='o',\n", + " markersize=4,\n", + " linestyle='-',\n", + " color='#DD8452',\n", + " linewidth=1.0,\n", + " label='Relative frequency per slot'\n", + " )\n", + "\n", + " plt.yscale('log')\n", + " plt.xlabel('Reorg in virtual blocks and slots')\n", + " plt.ylabel('Relative frequency (log scale)')\n", + " plt.title(f'Log‐Scaled Histogram over {total_honest_blocks} Honest Blocks ({ADVERSARY_STAKE * 100}% adversarial stake)')\n", + "\n", + " # Annotate bars\n", + " for bar, height in zip(bars, counts):\n", + " if height > 0:\n", + " x = bar.get_x() + bar.get_width() / 2\n", + " y = height\n", + " plt.text(\n", + " x,\n", + " y,\n", + " f'{height:.2e}',\n", + " ha='center',\n", + " va='bottom',\n", + " fontsize=label_fontsize\n", + " )\n", + "\n", + " plt.grid(True, which='both', axis='y', linestyle='--', linewidth=0.3)\n", + " plt.xticks(\n", + " edges,\n", + " [f\"{int(edge // bin_size)} ({int(edge)})\" for edge in edges],\n", + " rotation='vertical',\n", + " fontsize=label_fontsize\n", + " )\n", + " plt.legend(fontsize=label_fontsize)\n", + " plt.tight_layout()\n", + " plt.savefig('simulation_results_histogram.png', dpi=300, bbox_inches='tight')\n", + " plt.show()\n", + "\n", + "def fully_successful_attacks(\n", + " attacked_results: list[tuple[int, int]],\n", + " sim: Sim,\n", + " tolerance: int = 0\n", + ") -> int:\n", + " \"\"\"\n", + " Count how many attacks were “fully successful,” meaning\n", + " reorg_length ≥ ( (S−1) − fslot ) − tolerance,\n", + " where fslot is the honest block’s slot.\n", + "\n", + " Read the attack_on_block function for more details\n", + " \"\"\"\n", + " S = sim.params.SLOTS\n", + " count = 0\n", + "\n", + " successful = []\n", + " for honest_id, length in attacked_results:\n", + " if length < 0:\n", + " continue\n", + "\n", + " fslot = sim.blocks[honest_id].slot\n", + " max_len = (S - 1) - fslot\n", + " if length >= max_len - tolerance:\n", + " successful.append([honest_id, fslot, length])\n", + " count += 1\n", + "\n", + " return count, successful" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35f3346b-0ffa-4aa4-8be4-237a25a0f4a8", + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [], + "source": [ + "def run_multiple_attacks(\n", + " n_runs: int,\n", + " slots: int,\n", + " f: float,\n", + " window_size: int,\n", + " use_deps: bool,\n", + " adversary_control: float,\n", + " network,\n", + " base_seed: int = 0,\n", + " skip_last: int = 100,\n", + ") -> tuple[list[int], int]:\n", + " \"\"\"\n", + " Runs `parallel_attack_all_blocks` over `n_runs` independent seeds.\n", + " Returns:\n", + " - all_attacks: flattened list of (block_id, reorg_length) tuples\n", + " - total_honest_blocks: count of honest blocks across all runs\n", + " - last_sim: the Sim instance from the final run\n", + " \"\"\"\n", + " assert slots > 500, \"Must simulate more than 500 slots\"\n", + " all_attacks: list[tuple[int, int]] = []\n", + " total_honest_blocks = 0\n", + " last_sim = None\n", + "\n", + " for i in range(n_runs):\n", + " print(f\"Executing run {i + 1}/{n_runs}\")\n", + " seed = base_seed + i\n", + " random.seed(seed)\n", + " np.random.seed(seed)\n", + "\n", + " params = Params(\n", + " SLOTS=slots,\n", + " f=f,\n", + " window_size=window_size,\n", + " use_deps=use_deps,\n", + " adversary_control=adversary_control,\n", + " honest_stake=np.random.pareto(10, 1000)\n", + " )\n", + " sim = Sim(params=params, network=network)\n", + " last_sim = sim # save for return\n", + "\n", + " attacked, blocks = parallel_attack_all_blocks(sim, skip_last=skip_last)\n", + " all_attacks.extend(attacked)\n", + "\n", + " total_honest_blocks += len(blocks)-skip_last\n", + "\n", + " return all_attacks, total_honest_blocks, last_sim" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "86554739-64cd-4a8b-9187-de5991fc9fce", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "ADVERSARY_STAKE = 0.40\n", + "# Run multiple experiments\n", + "attacked_results, total_blocks, sim = run_multiple_attacks(\n", + " n_runs=1,\n", + " slots=5000,\n", + " f=0.25,\n", + " window_size=30,\n", + " use_deps=True,\n", + " adversary_control=ADVERSARY_STAKE,\n", + " network=blend_net,\n", + " base_seed=0,\n", + " # these are honest blocks that won't be attacked and will not count in the stats, but give room for the attack.\n", + " skip_last=500\n", + ")\n", + "\n", + "# Plot normalized histogram:\n", + "plot_attack_histogram_frequency(attacked_results, total_blocks, ADVERSARY_STAKE, bin_size=30)\n", + "\n", + "# Print the most extreme reorg seen:\n", + "reorg_lengths = [length for _, length in attacked_results]\n", + "max_length = max(reorg_lengths) if reorg_lengths else 0\n", + "print(f\"Largest reorg length: {max_length} (~{(max_length // 30)} virtual blocks of 30-seconds)\")\n", + "\n", + "fully = fully_successful_attacks(attacked_results, sim, tolerance=500) # Very generous tolerance\n", + "print(f\"Successful attacks: {fully[0]} ({fully[0]/total_blocks*100:.2f}%)\")\n", + "print(f\"Successful attacks list [target block, target slot, reorg length]): {fully[1]}\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "myenv", + "language": "python", + "name": "myenv" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/cryptarchia-v2/requirements.txt b/cryptarchia-v2/requirements.txt new file mode 100644 index 0000000..b9d44a2 --- /dev/null +++ b/cryptarchia-v2/requirements.txt @@ -0,0 +1,7 @@ +jupyterlab +ipykernel +numpy +matplotlib +pyvis +joblib +pulp