Merge 254c6d67b2c764fc71916f35d1e67eb941a9013c into f36c3c85ba31ed0fd27c3650794a511ad994a661

This commit is contained in:
Arunima Chaudhuri 2025-02-19 22:28:36 +05:30 committed by GitHub
commit 89f361732c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 1082 additions and 209 deletions

View File

@ -3,6 +3,8 @@
import random
import collections
import logging
from collections import defaultdict
import threading
from DAS.block import *
from DAS.tools import shuffled, shuffledDict, unionOfSamples
from bitarray.util import zeros
@ -32,11 +34,13 @@ class Neighbor:
class Validator:
i = 0
def __init__(self, rowIDs, columnIDs):
self.rowIDs = rowIDs
self.columnIDs = columnIDs
def initValidator(nbRows, custodyRows, nbCols, custodyCols):
random.seed(10 + Validator.i); Validator.i += 1
rowIDs = set(random.sample(range(nbRows), custodyRows))
columnIDs = set(random.sample(range(nbCols), custodyCols))
return Validator(rowIDs, columnIDs)
@ -48,7 +52,7 @@ class Node:
"""It returns the node ID."""
return str(self.ID)
def __init__(self, ID, amIproposer, amImalicious, logger, shape, config,
def __init__(self, ID, amIproposer, nodeClass, amImalicious, logger, shape, config,
validators, rows = set(), columns = set()):
"""It initializes the node, and eventual validators, following the simulation configuration in shape and config.
@ -76,13 +80,36 @@ class Node:
self.repairedSampleCount = 0
self.logger = logger
self.validators = validators
self.received_gossip = defaultdict(list)
self.peer_connections = set()
# query methods
self.exponential_growth = False
self.linear_growth = False
self.linear_constant_growth = False
self.hybrid_growth = False
self.exponential_constant_growth = True
self.linear_growth_constant = 10
# query results
self.query_times = []
self.query_total_time = None
self.all_original_retries = []
self.query_results = None
self.original_retries_sum = None
# Cache latency values based on horizon level
self.latency_cache = {
"level_1": [random.uniform(0.1, 0.2) for _ in range(1000)],
"level_2": [random.uniform(0.2, 0.3) for _ in range(1000)],
}
if amIproposer:
self.nodeClass = 0
self.rowIDs = range(shape.nbRows)
self.columnIDs = range(shape.nbCols)
else:
self.nodeClass = 1 if (self.ID <= shape.numberNodes * shape.class1ratio) else 2
self.nodeClass = nodeClass
self.vpn = len(validators) #TODO: needed by old code, change to fn
self.rowIDs = set(rows)
@ -96,13 +123,13 @@ class Node:
self.logger.warning("Row custody (*vpn) larger than number of rows!", extra=self.format)
self.rowIDs = range(self.shape.nbRows)
else:
self.rowIDs = set(random.sample(range(self.shape.nbRows), self.vpn*self.shape.custodyRows))
self.rowIDs = set(random.sample(range(self.shape.nbRows), max(self.vpn*self.shape.custodyRows, self.shape.minCustodyRows)))
if (self.vpn * self.shape.custodyCols) > self.shape.nbCols:
self.logger.warning("Column custody (*vpn) larger than number of columns!", extra=self.format)
self.columnIDs = range(self.shape.nbCols)
else:
self.columnIDs = set(random.sample(range(self.shape.nbCols), self.vpn*self.shape.custodyCols))
self.columnIDs = set(random.sample(range(self.shape.nbCols), max(self.vpn*self.shape.custodyCols, self.shape.minCustodyCols)))
self.rowNeighbors = collections.defaultdict(dict)
self.columnNeighbors = collections.defaultdict(dict)
@ -120,10 +147,8 @@ class Node:
# 1 Mbps ~= 1e6 mbps * 0.050 s / (560*8) bits ~= 11 segments/timestep
if self.amIproposer:
self.bwUplink = shape.bwUplinkProd
elif self.nodeClass == 1:
self.bwUplink = shape.bwUplink1
else:
self.bwUplink = shape.bwUplink2
self.bwUplink = shape.nodeTypes["classes"][self.nodeClass]["def"]['bwUplinks']
self.bwUplink *= 1e3 / 8 * config.stepDuration / config.segmentSize
self.repairOnTheFly = config.evalConf(self, config.repairOnTheFly, shape)
@ -504,6 +529,249 @@ class Node:
if self.statsTxInSlot >= self.bwUplink:
return
def sendGossip(self, peer, segments_to_send):
"""Simulate sending row and column IDs to a peer."""
have_info = {'source': self.ID, 'segments': segments_to_send}
peer.received_gossip[self.ID].append(have_info)
peer.msgRecvCount += 1
self.logger.debug(f"Gossip sent to {peer.ID}: {peer.received_gossip}", extra=self.format)
def processReceivedGossip(self, simulator):
"""
Processes received gossip messages to request and receive data segments.
For each segment not already received, it simulates requesting the segment,
logs the request and receipt, and updates the segment status and relevant counters.
"""
for sender, have_infos in self.received_gossip.items():
for have_info in have_infos:
for rowID, columnID in have_info['segments']:
if not self.receivedBlock.getSegment(rowID, columnID) and (rowID in self.rowIDs or columnID in self.columnIDs):
# request for the segment
self.logger.debug(f"Requesting segment ({rowID}, {columnID}) from {have_info['source']}", extra=self.format)
self.msgSentCount += 1
# source sends the segment
self.logger.debug(f"Sending segment ({rowID}, {columnID}) to {self.ID} from {have_info['source']}", extra=self.format)
simulator.validators[have_info['source']].sampleSentCount += 1
simulator.validators[have_info['source']].statsTxInSlot += 1
# receive the segment
self.receivedBlock.setSegment(rowID, columnID)
self.sampleRecvCount += 1
self.logger.debug(f"Received segment ({rowID}, {columnID}) via gossip from {have_info['source']}", extra=self.format)
self.received_gossip.clear()
def gossip(self, simulator):
"""
Periodically sends gossip messages to a random subset of nodes to share information
about data segments. The process involves:
1. Selecting a random subset of nodes.
2. Sending the node's current state (row and column IDs) to these nodes.
3. Process the received gossip and update their state accordingly.
This ensures data dissemination across the network,
occurring at intervals defined by the HEARTBEAT timer.
"""
total_nodes = simulator.shape.numberNodes
num_peers = random.randint(1, total_nodes - 1)
peers = random.sample(range(1, total_nodes), num_peers)
segments_to_send = []
for rID in range(0, self.shape.nbRows):
for cID in range(0, self.shape.nbCols):
if self.block.getSegment(rID, cID):
segments_to_send.append((rID, cID))
if segments_to_send:
for peer in peers:
self.sendGossip(simulator.validators[peer], segments_to_send)
self.msgSentCount += 1
simulator.validators[peer].processReceivedGossip(simulator)
if self.statsTxInSlot >= self.bwUplink:
return
def get_latency(self, peer_to_query, original_peers_with_custody, original_peers_with_custody_level_2):
if peer_to_query in original_peers_with_custody:
return random.choice(self.latency_cache["level_1"])
elif peer_to_query in original_peers_with_custody_level_2:
return random.choice(self.latency_cache["level_2"])
return None
def generate_random_samples(self, num_queries):
return [(random.randint(0, self.shape.nbRows-1), random.randint(0, self.shape.nbCols-1)) for _ in range(num_queries)]
def query_peer(self, peer_to_query, original_peers_with_custody, original_peers_with_custody_level_2, simulator, sample_row, sample_col):
"""Query peer with custody, simulate latency, and return the time taken."""
if simulator.validators[peer_to_query].amImalicious:
return 'timeout', 0.5
elif sample_row in simulator.validators[peer_to_query].rowIDs or sample_col in simulator.validators[peer_to_query].columnIDs:
if not simulator.validators[peer_to_query].block.getSegment(sample_row, sample_col):
return 'timeout', 0.5
latency = self.get_latency(peer_to_query, original_peers_with_custody, original_peers_with_custody_level_2)
if latency:
return 'success', latency
return 'invalid', 0.5
def generate_growth_series(self):
if self.exponential_growth:
return [2**i for i in range(1000)]
elif self.linear_growth:
linear_part = list(range(10, 201, self.linear_growth_constant))
return [1] + linear_part
elif self.linear_constant_growth:
series = [1, 10, 20, 30, 40]
series.extend([40] * 1000)
return series
elif self.hybrid_growth:
exponential_part = [2**i for i in range(6)] # [1, 2, 4, 8, 16, 32]
linear_part = list(range(64, 105, 10)) # [64, 74, 84, 94, 104]
constant_part = [104] * 1000
return exponential_part + linear_part + constant_part
elif self.exponential_constant_growth:
exponential_part = [2**i for i in range(6)] # [1, 2, 4, 8, 16, 32]
constant_part = [32] * 1000
return exponential_part + constant_part
else:
raise ValueError("No growth method selected!")
def query_peer_with_retries(self, peers_with_custody, peers_with_custody_level_2, simulator, sample_row, sample_col, max_retries=10150):
queried_peers = []
retries = 0
original_retries = 0
peers_with_custody = list(set(peers_with_custody))
peers_with_custody_level_2 = list(set(peers_with_custody_level_2))
original_peers_with_custody = peers_with_custody[:]
original_peers_with_custody_level_2 = peers_with_custody_level_2[:]
random.shuffle(peers_with_custody)
random.shuffle(peers_with_custody_level_2)
growth_series = self.generate_growth_series()
for num_peers_to_query in growth_series:
if not peers_with_custody and not peers_with_custody_level_2:
break
original_retries += num_peers_to_query
# Query Level 1 peers
level_1_batch = peers_with_custody[:num_peers_to_query]
for peer_to_query in level_1_batch:
queried_peers.append(peer_to_query)
result, time_taken = self.query_peer(peer_to_query, original_peers_with_custody, original_peers_with_custody_level_2, simulator, sample_row, sample_col)
if result == 'success':
if retries <= 24:
return 'success', time_taken + 0.5 * retries, queried_peers, original_retries
else:
return 'failure', time_taken + 0.5 * retries, queried_peers, original_retries
elif result == 'timeout':
if retries >= max_retries:
return 'failure', 0.5 * max_retries, queried_peers, original_retries
# Remove queried Level 1 peers
peers_with_custody = peers_with_custody[num_peers_to_query:]
# If all Level 1 peers are queried, move to Level 2 peers
if not peers_with_custody:
level_2_batch = peers_with_custody_level_2[:num_peers_to_query]
for peer_to_query in level_2_batch:
queried_peers.append(peer_to_query)
result, time_taken = self.query_peer(peer_to_query, original_peers_with_custody, original_peers_with_custody_level_2, simulator, sample_row, sample_col)
if result == 'success':
if retries <= 24:
return 'success', time_taken + 0.5 * retries, queried_peers, original_retries
else:
return 'failure', time_taken + 0.5 * retries, queried_peers, original_retries
elif result == 'timeout':
if retries >= max_retries:
return 'failure', 0.5 * max_retries, queried_peers, original_retries
# Remove queried Level 2 peers
peers_with_custody_level_2 = peers_with_custody_level_2[num_peers_to_query:]
retries += 1
return 'failure', 0.5 * retries, queried_peers, original_retries
def query_peer_for_samples(self, simulator):
if self.amImalicious:
return
num_queries = 75
samples = self.generate_random_samples(num_queries)
query_times = []
all_original_retries = []
results = 'success'
original_retries_sum = 0
for sample_row, sample_col in samples:
if (sample_row in self.rowIDs or sample_col in self.columnIDs or
len(self.columnIDs) >= self.shape.nbColsK or
len(self.rowIDs) >= self.shape.nbRowsK):
query_times.append(0)
all_original_retries.append(0)
else:
peers_with_custody = set()
for peer_id in self.peer_connections:
if (sample_row in simulator.validators[peer_id].rowIDs or
sample_col in simulator.validators[peer_id].columnIDs or
len(simulator.validators[peer_id].rowIDs) >= self.shape.nbRowsK or
len(simulator.validators[peer_id].columnIDs) >= self.shape.nbColsK):
peers_with_custody.update({peer_id})
peers_with_custody = list(peers_with_custody)
peers_with_custody_level_2 = set()
for p in self.peer_connections:
for peer_l2 in simulator.validators[p].peer_connections:
if (sample_row in simulator.validators[peer_l2].rowIDs or
sample_col in simulator.validators[peer_l2].rowIDs or
len(simulator.validators[peer_l2].rowIDs) >= self.shape.nbRowsK or
len(simulator.validators[peer_l2].columnIDs) >= self.shape.nbColsK):
peers_with_custody_level_2.update({peer_l2})
peers_with_custody_level_2 = list(peers_with_custody_level_2)
if self.ID in peers_with_custody:
peers_with_custody.remove(self.ID)
if self.ID in peers_with_custody_level_2:
peers_with_custody_level_2.remove(self.ID)
result, time_taken, queried_peers_list, original_retries = self.query_peer_with_retries(
peers_with_custody, peers_with_custody_level_2, simulator, sample_row, sample_col
)
query_times.append(time_taken)
if result == 'failure':
results = 'failure'
original_retries_sum += original_retries
all_original_retries.append(original_retries)
total_time = max(query_times)
self.query_times = query_times[:]
self.query_total_time = total_time
self.all_original_retries = all_original_retries[:]
self.query_results = results
self.original_retries_sum = original_retries_sum
def send(self):
""" Send as much as we can in the timestep, limited by bwUplink."""

View File

@ -83,8 +83,8 @@ class Observer:
sampleProgress = arrived / expected
nodeProgress = ready / (len(validators)-1)
validatorCnt = sum([v.vpn for v in validators[1:]])
validatorAllProgress = validatedall / validatorCnt
validatorProgress = validated / validatorCnt
validatorAllProgress = (validatedall / validatorCnt) if validatorCnt != 0 else 1
validatorProgress = (validated / validatorCnt) if validatorCnt != 0 else 1
return missingSamples, sampleProgress, nodeProgress, validatorAllProgress, validatorProgress
@ -96,7 +96,7 @@ class Observer:
return np.mean(l) if l else np.NaN
trafficStats = {}
for cl in range(0,3):
for cl in self.config.nodeClasses:
Tx = [v.statsTxInSlot for v in validators if v.nodeClass == cl]
Rx = [v.statsRxInSlot for v in validators if v.nodeClass == cl]
RxDup = [v.statsRxDupInSlot for v in validators if v.nodeClass == cl]

View File

@ -23,8 +23,14 @@ class Result:
self.restoreRowCount = [0] * shape.numberNodes
self.restoreColumnCount = [0] * shape.numberNodes
self.repairedSampleCount = [0] * shape.numberNodes
self.query_times = [[] for _ in range(shape.numberNodes)]
self.query_total_time = [None] * shape.numberNodes
self.all_original_retries = [[] for _ in range(shape.numberNodes)]
self.query_results = [''] * shape.numberNodes
self.original_retries_sum = [None] * shape.numberNodes
self.numberNodes = shape.numberNodes
self.class1ratio = shape.class1ratio
def copyValidators(self, validators):
"""Copy information from simulator.validators to result."""
@ -36,6 +42,18 @@ class Result:
self.restoreRowCount[i] = validators[i].restoreRowCount
self.restoreColumnCount[i] = validators[i].restoreColumnCount
self.repairedSampleCount[i] = validators[i].repairedSampleCount
if not validators[i].amImalicious or not validators[i].amIproposer:
self.query_times[i] = validators[i].query_times[:]
self.query_total_time[i] = validators[i].query_total_time
self.all_original_retries[i] = validators[i].all_original_retries[:]
self.query_results[i] = validators[i].query_results
self.original_retries_sum[i] = validators[i].original_retries_sum
else:
self.query_times[i] = None
self.query_total_time[i] = None
self.all_original_retries[i] = None
self.query_results[i] = None
self.original_retries_sum[i] = None
def populate(self, shape, config, missingVector):
"""It populates part of the result data inside a vector."""

View File

@ -3,7 +3,7 @@
class Shape:
"""This class represents a set of parameters for a specific simulation."""
def __init__(self, nbCols, nbColsK, nbRows, nbRowsK,
numberNodes, failureModel, failureRate, maliciousNodes, class1ratio, custodyRows, custodyCols, vpn1, vpn2, netDegree, bwUplinkProd, bwUplink1, bwUplink2, run):
numberNodes, failureModel, failureRate, maliciousNodes, custodyRows, custodyCols, minCustodyRows, minCustodyCols, netDegree, numPeersMin, numPeersMax, bwUplinkProd, run, nodeTypes):
"""Initializes the shape with the parameters passed in argument."""
self.run = run
self.numberNodes = numberNodes
@ -15,14 +15,14 @@ class Shape:
self.failureRate = failureRate
self.maliciousNodes = maliciousNodes
self.netDegree = netDegree
self.class1ratio = class1ratio
self.numPeers = [numPeersMin, numPeersMax]
self.custodyRows = custodyRows
self.custodyCols = custodyCols
self.vpn1 = vpn1
self.vpn2 = vpn2
self.minCustodyRows = minCustodyRows
self.minCustodyCols = minCustodyCols
self.bwUplinkProd = bwUplinkProd
self.bwUplink1 = bwUplink1
self.bwUplink2 = bwUplink2
self.nodeTypes = nodeTypes
self.nodeClasses = [0] + [_k for _k in nodeTypes["classes"].keys()]
self.randomSeed = ""
def __repr__(self):
@ -35,17 +35,16 @@ class Shape:
shastr += "-nn-"+str(self.numberNodes)
shastr += "-fm-"+str(self.failureModel)
shastr += "-fr-"+str(self.failureRate)
shastr += "-c1r-"+str(self.class1ratio)
shastr += "-cusr-"+str(self.custodyRows)
shastr += "-cusc-"+str(self.custodyCols)
shastr += "-vpn1-"+str(self.vpn1)
shastr += "-vpn2-"+str(self.vpn2)
shastr += "-mcusr-"+str(self.minCustodyRows)
shastr += "-mcusc-"+str(self.minCustodyCols)
shastr += "-bwupprod-"+str(self.bwUplinkProd)
shastr += "-bwup1-"+str(self.bwUplink1)
shastr += "-bwup2-"+str(self.bwUplink2)
shastr += "-nd-"+str(self.netDegree)
shastr += "-r-"+str(self.run)
shastr += "-mn-"+str(self.maliciousNodes)
shastr += "-ntypes-"+str(self.nodeTypes['group'])
shastr += "-np-"+str(self.numPeers)
return shastr
def setSeed(self, seed):

View File

@ -44,6 +44,15 @@ class Simulator:
self.proposerPublishToR = config.evalConf(self, config.proposerPublishToR, shape)
self.proposerPublishToC = config.evalConf(self, config.proposerPublishToR, shape)
def getNodeClass(self, nodeIdx):
nodeRatios = [_v['weight'] for _k, _v in self.shape.nodeTypes["classes"].items()]
nodeCounts = [int(self.shape.numberNodes * ratio / sum(nodeRatios)) for ratio in nodeRatios]
commulativeSum = [sum(nodeCounts[:i+1]) for i in range(len(nodeCounts))]
commulativeSum[-1] = self.shape.numberNodes
for i, idx in enumerate(commulativeSum):
if nodeIdx <= idx:
return self.shape.nodeClasses[i + 1]
def initValidators(self):
"""It initializes all the validators in the network."""
self.glob = Observer(self.logger, self.shape)
@ -77,6 +86,7 @@ class Simulator:
assignedCols = []
maliciousNodesCount = int((self.shape.maliciousNodes / 100) * self.shape.numberNodes)
remainingMaliciousNodes = maliciousNodesCount
expectedSamples = []
for i in range(self.shape.numberNodes):
if i == 0:
@ -125,17 +135,21 @@ class Simulator:
self.logger.error("custodyRows has to be smaller than %d" % self.shape.nbRows)
vs = []
nodeClass = 1 if (i <= self.shape.numberNodes * self.shape.class1ratio) else 2
vpn = self.shape.vpn1 if (nodeClass == 1) else self.shape.vpn2
nodeClass = self.getNodeClass(i)
vpn = self.shape.nodeTypes["classes"][nodeClass]["def"]['validatorsPerNode']
for v in range(vpn):
vs.append(initValidator(self.shape.nbRows, self.shape.custodyRows, self.shape.nbCols, self.shape.custodyCols))
val = Node(i, int(not i!=0), amImalicious_value, self.logger, self.shape, self.config, vs)
val = Node(i, int(not i!=0), nodeClass, amImalicious_value, self.logger, self.shape, self.config, vs)
if i != 0:
i_expectedSamples = len(val.columnIDs) * self.shape.nbRows + len(val.rowIDs) * self.shape.nbCols - len(val.columnIDs) * len(val.rowIDs)
expectedSamples.append(i_expectedSamples)
if i == self.proposerID:
val.initBlock()
else:
val.logIDs()
self.validators.append(val)
self.result.addMetric("expectedSamples", expectedSamples)
assignedRows.sort()
assignedCols.sort()
self.logger.debug("Rows assigned: %s" % str(assignedRows), extra=self.format)
@ -144,6 +158,8 @@ class Simulator:
def initNetwork(self):
"""It initializes the simulated network."""
# rowChannels and columnChannels stores the nodes that have the custody of each row/col.
# rowChannel[rowID]->node ids that have the custody of that row
rowChannels = [[] for i in range(self.shape.nbRows)]
columnChannels = [[] for i in range(self.shape.nbCols)]
for v in self.validators:
@ -154,6 +170,8 @@ class Simulator:
columnChannels[id].append(v)
# Check rows/columns distribution
# distR and distC has how many nodes have the custody of every row
# len(r) gives how many nodes have the custody of that row
for r in rowChannels:
self.distR.append(len(r))
for c in columnChannels:
@ -218,6 +236,27 @@ class Simulator:
self.logger.debug("Val %d : rowN %s", i, self.validators[i].rowNeighbors, extra=self.format)
self.logger.debug("Val %d : colN %s", i, self.validators[i].columnNeighbors, extra=self.format)
def connect_peers(self):
connections_range = self.shape.numPeers
for peer in self.validators:
num_connections = random.randint(connections_range[0], connections_range[1])
available_peers = [i for i in range(self.shape.numberNodes)]
for neighbor_dict in [peer.rowNeighbors, peer.columnNeighbors]:
for inner_dict in neighbor_dict.values():
for peers in inner_dict.values():
peer.peer_connections.add(peers.node.ID)
available_peers = list(set(available_peers) - peer.peer_connections)
random.shuffle(available_peers)
while len(peer.peer_connections) < num_connections and available_peers:
other_peer = available_peers.pop()
if other_peer != peer.ID and len(self.validators[other_peer].peer_connections) < num_connections:
peer.peer_connections.add(other_peer)
self.validators[other_peer].peer_connections.add(peer.ID)
def initLogger(self):
"""It initializes the logger."""
logging.TRACE = 5
@ -273,19 +312,30 @@ class Simulator:
trafficStatsVector = []
malicious_nodes_not_added_count = 0
steps = 0
samplesReceived = []
while(True):
missingVector.append(missingSamples)
self.logger.debug("Expected Samples: %d" % expected, extra=self.format)
self.logger.debug("Missing Samples: %d" % missingSamples, extra=self.format)
oldMissingSamples = missingSamples
i_sampleReceived = []
self.logger.debug("PHASE SEND %d" % steps, extra=self.format)
for i in range(0,self.shape.numberNodes):
if not self.validators[i].amImalicious:
self.validators[i].send()
if steps % self.config.heartbeat == 0 and self.config.gossip:
self.logger.debug("PHASE GOSSIP %d" % steps, extra=self.format)
for i in range(1,self.shape.numberNodes):
if not self.validators[i].amImalicious:
self.validators[i].gossip(self)
self.logger.debug("PHASE RECEIVE %d" % steps, extra=self.format)
for i in range(1,self.shape.numberNodes):
self.validators[i].receiveRowsColumns()
self.logger.debug("PHASE SAMPLE COUNT %d" % steps, extra=self.format)
for i in range(1,self.shape.numberNodes):
i_sampleReceived.append(self.validators[i].sampleRecvCount)
self.logger.debug("PHASE RESTORE %d" % steps, extra=self.format)
for i in range(1,self.shape.numberNodes):
self.validators[i].restoreRows()
@ -294,7 +344,10 @@ class Simulator:
for i in range(0,self.shape.numberNodes):
self.validators[i].logRows()
self.validators[i].logColumns()
# Store sample received count by each node in current step
samplesReceived.append(i_sampleReceived)
# log TX and RX statistics
trafficStats = self.glob.getTrafficStats(self.validators)
self.logger.debug("step %d: %s" %
@ -311,12 +364,9 @@ class Simulator:
cnN = "nodes ready"
cnV = "validators ready"
cnT0 = "TX builder mean"
cnT1 = "TX class1 mean"
cnT2 = "TX class2 mean"
cnR1 = "RX class1 mean"
cnR2 = "RX class2 mean"
cnD1 = "Dup class1 mean"
cnD2 = "Dup class2 mean"
cnT = lambda i: f"TX class{i} mean"
cnR = lambda i: f"RX class{i} mean"
cnD = lambda i: f"Dup class{i} mean"
# if custody is based on the requirements of underlying individual
# validators, we can get detailed data on how many validated.
@ -325,19 +375,20 @@ class Simulator:
cnVv = validatorProgress
else:
cnVv = validatorAllProgress
progressVector.append({
cnS:sampleProgress,
cnN:nodeProgress,
cnV:cnVv,
cnT0: trafficStats[0]["Tx"]["mean"],
cnT1: trafficStats[1]["Tx"]["mean"],
cnT2: trafficStats[2]["Tx"]["mean"],
cnR1: trafficStats[1]["Rx"]["mean"],
cnR2: trafficStats[2]["Rx"]["mean"],
cnD1: trafficStats[1]["RxDup"]["mean"],
cnD2: trafficStats[2]["RxDup"]["mean"],
})
progressDict = {
cnS: sampleProgress,
cnN: nodeProgress,
cnV: cnVv,
cnT0: trafficStats[0]["Tx"]["mean"]
}
for nc in self.shape.nodeClasses:
if nc != 0:
progressDict[cnT(nc)] = trafficStats[nc]["Tx"]["mean"]
progressDict[cnR(nc)] = trafficStats[nc]["Rx"]["mean"]
progressDict[cnD(nc)] = trafficStats[nc]["RxDup"]["mean"]
progressVector.append(progressDict)
if missingSamples == oldMissingSamples:
if len(missingVector) > self.config.steps4StopCondition:
@ -352,23 +403,41 @@ class Simulator:
missingVector.append(missingSamples)
break
steps += 1
self.logger.debug("PHASE QUERY SAMPLE %d" % steps, extra=self.format)
for i in range(1,self.shape.numberNodes):
if not self.validators[i].amImalicious:
self.validators[i].query_peer_for_samples(self)
# Store sample received count by each node in each step
self.result.addMetric("samplesReceived", samplesReceived)
for i in range(0,self.shape.numberNodes):
if not self.validators[i].amIaddedToQueue :
malicious_nodes_not_added_count += 1
valid_rows = set()
valid_columns = set()
for i in range(0,self.shape.numberNodes):
column_ids = []
row_ids = []
for rID in self.validators[i].rowIDs:
row_ids.append(rID)
if not self.validators[i].amImalicious and not self.validators[i].amIproposer:
valid_rows.add(rID)
for cID in self.validators[i].columnIDs:
column_ids.append(cID)
if not self.validators[i].amImalicious and not self.validators[i].amIproposer:
valid_columns.add(cID)
self.logger.debug("List of columnIDs for %d node: %s", i, column_ids, extra=self.format)
self.logger.debug("List of rowIDs for %d node: %s", i, row_ids, extra=self.format)
if len(valid_rows) >= self.shape.nbRowsK or len(valid_columns) >= self.shape.nbColsK:
self.logger.debug("Block available within the non-malicious nodes.", extra=self.format)
else:
self.logger.debug("Block not available within the non-malicious nodes.", extra=self.format)
self.logger.debug("Number of malicious nodes not added to the send queue: %d" % malicious_nodes_not_added_count, extra=self.format)
malicious_nodes_not_added_percentage = (malicious_nodes_not_added_count * 100)/(self.shape.numberNodes)
self.logger.debug("Percentage of malicious nodes not added to the send queue: %d" % malicious_nodes_not_added_percentage, extra=self.format)
@ -381,4 +450,5 @@ class Simulator:
self.result.addMetric("progress", progress.to_dict(orient='list'))
self.result.populate(self.shape, self.config, missingVector)
self.result.copyValidators(self.validators)
print(self.validators[1].statsTxPerSlot)
return self.result

File diff suppressed because it is too large Load Diff

View File

@ -59,9 +59,18 @@ maliciousNodes = range(40,41,20)
# If True, the malicious nodes will be assigned randomly; if False, a predefined pattern may be used
randomizeMaliciousNodes = True
# When set to True, nodes will use the Gossip for communication
gossip = True
# Heartbeat interval for gossip messages in simulation steps
heartbeat = 20
# Per-topic mesh neighborhood size
netDegrees = range(8, 9, 2)
# Number of peers for sampling
numPeers = [[50, 150]]
# How many copies are sent out by the block producer
# Note, previously this was set to match netDegree
proposerPublishToR = "shape.netDegree"
@ -76,18 +85,31 @@ proposerPublishToC = "shape.netDegree"
validatorBasedCustody = False
custodyRows = range(2, 3, 2)
custodyCols = range(2, 3, 2)
# ratio of class1 nodes (see below for parameters per class)
class1ratios = [0.8]
# Number of validators per beacon node
validatorsPerNode1 = [1]
validatorsPerNode2 = [5]
minCustodyRows = range(2, 3, 2)
minCustodyCols = range(2, 3, 2)
# Set uplink bandwidth in megabits/second
bwUplinksProd = [200]
bwUplinks1 = [10]
bwUplinks2 = [200]
nodeTypesGroup = [
{
"group": "g1",
"classes": {
1: {
"weight": 70,
"def": {'validatorsPerNode': 1, 'bwUplinks': 10}
},
2: {
"weight": 20,
"def": {'validatorsPerNode': 5, 'bwUplinks': 200}
},
3: {
"weight": 10,
"def": {'validatorsPerNode': 10, 'bwUplinks': 500}
}
}
}
]
# Step duration in miliseconds (Classic RTT is about 100ms)
stepDuration = 50
@ -135,11 +157,47 @@ colsK = range(32, 65, 128)
rowsK = range(32, 65, 128)
def nextShape():
for nbCols, nbColsK, nbRows, nbRowsK, run, fm, fr, mn, class1ratio, chR, chC, vpn1, vpn2, nn, netDegree, bwUplinkProd, bwUplink1, bwUplink2 in itertools.product(
cols, colsK, rows, rowsK, runs, failureModels, failureRates, maliciousNodes, class1ratios, custodyRows, custodyCols, validatorsPerNode1, validatorsPerNode2, numberNodes, netDegrees, bwUplinksProd, bwUplinks1, bwUplinks2):
# Network Degree has to be an even number
params = {
"cols": cols,
"colsK": colsK,
"rows": rows,
"rowsK": rowsK,
"runs": runs,
"failureModels": failureModels,
"failureRates": failureRates,
"maliciousNodes": maliciousNodes,
"custodyRows": custodyRows,
"custodyCols": custodyCols,
"minCustodyRows": minCustodyRows,
"minCustodyCols": minCustodyCols,
"numberNodes": numberNodes,
"netDegrees": netDegrees,
"numPeers": numPeers,
"bwUplinksProd": bwUplinksProd,
"nodeTypesGroup": nodeTypesGroup,
}
for key, value in params.items():
if not value:
logging.warning(f"The parameter '{key}' is empty. Please assign a value and start the simulation.")
exit(1)
for (
nbCols, nbColsK, nbRows, nbRowsK, run, fm, fr, mn, chR, chC, minChR, minChC,
nn, netDegree, numPeersList, bwUplinkProd, nodeTypes
) in itertools.product(
cols, colsK, rows, rowsK, runs, failureModels, failureRates, maliciousNodes,
custodyRows, custodyCols, minCustodyRows, minCustodyCols, numberNodes,
netDegrees, numPeers, bwUplinksProd, nodeTypesGroup
):
numPeersMin, numPeersMax = numPeersList # Unpack here
# Ensure netDegree is even
if netDegree % 2 == 0:
shape = Shape(nbCols, nbColsK, nbRows, nbRowsK, nn, fm, fr, mn, class1ratio, chR, chC, vpn1, vpn2, netDegree, bwUplinkProd, bwUplink1, bwUplink2, run)
shape = Shape(
nbCols, nbColsK, nbRows, nbRowsK, nn, fm, fr, mn, chR, chC, minChR,
minChC, netDegree, numPeersMin, numPeersMax, bwUplinkProd, run, nodeTypes
)
yield shape
def evalConf(self, param, shape = None):

View File

@ -43,6 +43,7 @@ def runOnce(config, shape, execID):
sim.initLogger()
sim.initValidators()
sim.initNetwork()
sim.connect_peers()
result = sim.run()
sim.logger.info("Shape: %s ... Block Available: %d in %d steps" % (str(sim.shape.__dict__), result.blockAvailable, len(result.missingVector)), extra=sim.format)
@ -213,11 +214,11 @@ def study():
logger.info("A total of %d simulations ran in %d seconds" % (len(results), end-start), extra=format)
if config.visualization:
vis = Visualizer(execID, config)
vis.plotHeatmaps()
# vis = Visualizer(execID, config)
# vis.plotHeatmaps()
visual = Visualizor(execID, config, results)
visual.plotHeatmaps("nn", "fr")
# visual.plotHeatmaps("nn", "fr")
visual.plotAllHeatMaps()
if __name__ == "__main__":