diff --git a/casper4/casper_paper.md b/casper4/casper_paper.md index db75372..6f2d3ae 100644 --- a/casper4/casper_paper.md +++ b/casper4/casper_paper.md @@ -106,7 +106,11 @@ The commit-following part of this rule can be viewed in some ways as mirroring t ### Adding Dynamic Validator Sets -The above assumes that there is a single set of validators that never changes. In reality, however, we want validators to be able to join and leave. +The above assumes that there is a single set of validators that never changes. In reality, however, we want validators to be able to join and leave. This introduces two classes of considerations. First, all of the above math assumes that the validator set that prepares and commits any two given checkpoints is identical. If this is not the case, then there may be situations where two conflicting checkpoints get finalized, but no one can be slashed because they were finalized by two completely different validator sets. Hence, we need to think carefully about how validator set transitions happen and how one validator set "passes the baton" to the next. + + + +Second, we need to establish what is the specific mechanism by which validators can join and leave. We start off with a simple one: validators apply to join the validator set by sending a transaction containing (i) the ETH they want to deposit, (ii) the "validation code" (a kind of generalized public key), and (iii) the return address that their deposit will be sent to when they withdraw. If this transaction gets included during dynasty N, then they become part of dynasty N + 2, as well as all future dynasties until they decide to log off. The two-dynasty delay ensures that the joining transaction will be confirmed by the time dynasty N + 1 begins, and so any candidate blocks that initiates dynasty N + 2 is guaranteed to have the same validator set for dynasty N + 2. Leaving the validator set is symmetrical: validators can send a transcation to log off in dynasty N, which takes effect in dynasty N + 2. ### Economic Fundamentals diff --git a/elasticity/analyzer.py b/elasticity/analyzer.py new file mode 100644 index 0000000..c10e942 --- /dev/null +++ b/elasticity/analyzer.py @@ -0,0 +1,36 @@ +lines = open('data.csv').read().split('\n') +data = [(int(x[:x.find(',')]), float(x[x.find(',')+1:])) for x in lines if x] + +REPORT_THRESHOLD = 0.23 + +def get_error(scale, elasticity, growth): + err = 0 + bs_fac, fee_fac = 1 / (1 + elasticity), elasticity / (1 + elasticity) + for i, (block_size, avg_fee) in enumerate(data): + expected = scale * (1 + growth) ** i + actual = block_size ** bs_fac * avg_fee ** fee_fac + # if i >= len(data) - 6: + # err += ((expected / actual - 1) ** 2) * 2 + err += (expected / actual - 1) ** 2 + return err + +best = (0, 0, 0, 9999999999999999999999999.0) + +for scale in [1 * 1.05 ** x for x in range(300)]: + for elasticity in [x*0.025 for x in range(120)]: + for growth in [x*0.001 for x in range(120)]: + err = get_error(scale, elasticity, growth) + if err <= REPORT_THRESHOLD: + print('%d %.3f %.3f: %.3f' % (scale, elasticity, growth, err)) + if err < best[-1]: + best = scale, elasticity, growth, err + +print('Best params: %d %.3f %.3f (err %.3f)' % best) + +scale, elasticity, growth, err = best +bs_fac, fee_fac = 1 / (1 + elasticity), elasticity / (1 + elasticity) + +for i, (block_size, avg_fee) in enumerate(data): + expected = scale * (1 + growth) ** i + actual = block_size ** bs_fac * avg_fee ** fee_fac + print(i, actual, expected) diff --git a/elasticity/data.csv b/elasticity/data.csv new file mode 100644 index 0000000..1b2eb35 --- /dev/null +++ b/elasticity/data.csv @@ -0,0 +1,28 @@ +358373,0.0001556 +373527,0.00014486 +387199,0.00014816 +404860,0.00017626 +392748,0.00015425 +454844,0.00016187 +594453,0.0002212 +463351,0.00021871 +555749,0.00019946 +543490,0.00019368 +566810,0.00017566 +623697,0.00017407 +662227,0.00019356 +732706,0.00020016 +715370,0.00022996 +743867,0.00025096 +769594,0.00022846 +820848,0.00027225 +785600,0.00027909 +777843,0.00030502 +749121,0.00027106 +827073,0.00031194 +863884,0.00032692 +873975,0.00035117 +864976,0.00040644 +949126,0.00054382 +955451,0.00080043 +957912,0.00076301 diff --git a/elasticity/data.png b/elasticity/data.png new file mode 100644 index 0000000..3e7b92a Binary files /dev/null and b/elasticity/data.png differ diff --git a/iceage.py b/iceage.py index d17474f..4ae1a8e 100644 --- a/iceage.py +++ b/iceage.py @@ -1,12 +1,12 @@ import random import datetime -diffs = [453.71 * 10**12] -hashpower = diffs[0] / 15.73 -times = [1495425834] +diffs = [512.60 * 10**12] +hashpower = diffs[0] / 15.50 +times = [1496227377] -for i in range(3746966, 6010000): +for i in range(3797763, 6010000): blocktime = random.expovariate(hashpower / diffs[-1]) adjfac = max(1 - int(blocktime / 10), -99) / 2048. newdiff = diffs[-1] * (1 + adjfac) diff --git a/uncle_regressions/base_regression.py b/uncle_regressions/base_regression.py index 7ea9bd5..04d3604 100644 --- a/uncle_regressions/base_regression.py +++ b/uncle_regressions/base_regression.py @@ -1,25 +1,30 @@ data = [[float(y) for y in x.strip().split(', ')] for x in open('block_datadump.csv').readlines()] for i in range(0, 2283416, 200000): - print 'Checking 200k blocks from %d' % i + print('Checking 200k blocks from %d' % i) dataset = [] totuncles, totuncreward = 0, 0 + totbs = [0 for j in range(40)] + totus = [0 for j in range(40)] for num, uncs, uncrew, uncgas, txs, gas, length, zeroes in data[i:i+200000]: dataset.append([gas, 0]) for i in range(int(uncs)): dataset.append([uncgas / uncs * 1.0, 1]) totuncles += uncs totuncreward += uncrew - print 'Average uncle reward:', totuncreward * 1.0 / totuncles - print 'Average nephew reward:', totuncles * 5 / 32. / len(dataset) + totus[int(gas / 100000)] += uncs + totbs[int(gas / 100000)] += 1 + print([totus[j] * 100.0 / (totbs[j] + 0.000000001) for j in range(40)]) + print('Average uncle reward:', totuncreward * 1.0 / totuncles) + print('Average nephew reward:', totuncles * 5 / 32. / len(dataset)) mean_x = sum([x[0] for x in dataset]) * 1.0 / len(dataset) mean_y = sum([x[1] for x in dataset]) * 1.0 / len(dataset) - print 'Average gas used:', mean_x - print 'Average uncle rate:', mean_y + print('Average gas used:', mean_x) + print('Average uncle rate:', mean_y) covar = sum([(x[0] - mean_x) * (x[1] - mean_y) for x in dataset]) var = sum([(x[0] - mean_x) ** 2 for x in dataset]) - print 'm = ', covar / var - print 'b = ', mean_y - mean_x * (covar / var) + print('m = ', covar / var) + print('b = ', mean_y - mean_x * (covar / var)) diff --git a/uncle_regressions/tx_and_bytes_regression.py b/uncle_regressions/tx_and_bytes_regression.py index 69c077b..84a7ae7 100644 --- a/uncle_regressions/tx_and_bytes_regression.py +++ b/uncle_regressions/tx_and_bytes_regression.py @@ -1,7 +1,7 @@ data = [[float(y) for y in x.strip().split(', ')] for x in open('block_datadump.csv').readlines()] for i in range(0, 2283416, 200000): - print 'Checking 200k blocks from %d' % i + print('Checking 200k blocks from %d' % i) dataset = [] for j in range(i, min(i + 200000, 2283400), 100): gas = 0 @@ -35,8 +35,8 @@ for i in range(0, 2283416, 200000): var2 = sum([(x[2] - mean_x2) ** 2 for x in dataset]) covar3 = sum([(x[3] - mean_x2) * (x[-1] - mean_y2) for x in dataset]) var3 = sum([(x[3] - mean_x2) ** 2 for x in dataset]) - print 'Base m =', covar / var - print 'Base b =', mean_y - mean_x * (covar / var) - print 'Excess m for txs=', covar1 / var1 - print 'Excess m for nonzero bytes=', covar2 / var2 - print 'Excess m for zero bytes=', covar3 / var3 + print('Base m =', covar / var) + print('Base b =', mean_y - mean_x * (covar / var)) + print('Excess m for txs=', covar1 / var1) + print('Excess m for nonzero bytes=', covar2 / var2) + print('Excess m for zero bytes=', covar3 / var3)