Reorganized research repo

This commit is contained in:
Vitalik Buterin 2017-06-20 02:36:22 -04:00
parent 984767ef3c
commit 91aaea20f0
30 changed files with 28 additions and 405 deletions

View File

@ -1,55 +0,0 @@
import random
try: #
shathree = __import__('sha3').sha3_256
except:
shathree = __import__('python_sha3').sha3_256
params = {
"size": 256,
"pecks": 32
}
def decode_int(x):
o = 0
for a in x:
o = o * 256 + ord(a)
return o
def sha3(x):
return shathree(x).digest()
def bloom_insert(params, bloom, val):
k = decode_int(sha3(val)) * (3**160 + 112)
for i in range(params["pecks"]):
bloom |= 1 << (k % params["size"])
k //= params["size"]
return bloom
def bloom_query(params, bloom, val):
o = bloom_insert(params, 0, val)
return (bloom & o) == o
def test_params(size, pecks, objcount):
params = {"size": size, "pecks": pecks}
count = 0
for i in range(100):
objs = [str(random.randrange(2**40)) for i in range(objcount)]
bloom = 0
for o in objs:
bloom = bloom_insert(params, bloom, o)
for o in objs:
assert bloom_query(params, bloom, o)
for i in range(100):
if bloom_query(params, bloom, str(random.randrange(2**40))):
count += 1
print 'False positive rate: %f' % (count / 10000.)

245
casper.py
View File

@ -1,245 +0,0 @@
import copy, random, hashlib
# GhostTable: { block number: { block: validators } }
# The ghost table represents the entire current "view" of a user, and
# every block produced contains the producer's ghost table at the time.
# Signature slashing rules (not implemented)
# 1. Sign two blocks at the same height
# 2. Sign an invalid block
# 3. Sign a block which fully confirms A at height H, and sign B at height H
h = [3**50]
ids = [0]
# Number of validators
NUM_VALIDATORS = 50
# Block time in ticks (eg. 1 tick = 0.1 seconds)
BLKTIME = 30
# Disparity in the blocks of nodes
CLOCK_DISPARITY = 10
# An exponential distribution for latency offset by a minimum
LATENCY_MIN = 4
LATENCY_BASE = 2
LATENCY_PROB = 0.25
def assign_hash():
h[0] = (h[0] * 3) % 2**48
return h[0]
def assign_id():
ids[0] += 1
return ids[0] - 1
def latency_distribution_sample():
v = LATENCY_BASE
while random.random() > LATENCY_PROB:
v *= 2
return v + LATENCY_MIN
def clock_offset_distribution_sample():
return random.randrange(-CLOCK_DISPARITY, CLOCK_DISPARITY)
# A signature represents the entire "view" of a signer,
# where the view is the set of blocks that the signer
# considered most likely to be valid at the time that
# they were produced
class Signature():
def __init__(self, signer, view):
self.signer = signer
self.view = copy.deepcopy(view)
# A ghost table represents the view that a user had of the signatures
# available at the time that the block was produced.
class GhostTable():
def __init__(self):
self.confirmed = []
self.unconfirmed = []
def process_signature(self, sig):
# Process every block height in the signature
for i in range(len(self.confirmed), len(sig.view)):
# A ghost table entry at a height is a mapping of
# block hash -> signers
if i >= len(self.unconfirmed):
self.unconfirmed.append({})
cur_entry = self.unconfirmed[i]
# If the block hash is not yet in the ghost table, add it, and
# initialize it with an empty signer set
if sig.view[i] not in cur_entry:
cur_entry[sig.view[i]] = {}
# Add the signer
cur_entry[sig.view[i]][sig.signer] = True
# If it has 67% signatures, finalize
if len(cur_entry[sig.view[i]]) > NUM_VALIDATORS * 2 / 3:
# prevgt = block_map[sig.view[i]].gt
prevgt = self
print 'confirmed', block_map[sig.view[i]].height, sig.view[i]
# Update blocks between the previous confirmation and the
# current confirmation based on the newly confirmed block's
# ghost table
for j in range(len(self.confirmed), i):
# At each intermediate height, add the block for which we
# havethe most signatures
maxkey, maxval = 0, 0
for k in prevgt.unconfirmed[j]:
if len(prevgt.unconfirmed[j][k]) > maxval:
maxkey, maxval = k, len(prevgt.unconfirmed[j][k])
self.confirmed.append(maxkey)
print j, {k: len(prevgt.unconfirmed[j][k]) for k in prevgt.unconfirmed[j]}
# Then add the new block that got 67% signatures
print i, sig.view[i]
self.confirmed.append(sig.view[i])
# Hash of the ghost table's contents (to make sure that it's not
# being modified when it's already supposed to be set in stone)
def hash(self):
print hashlib.sha256(repr(self.unconfirmed) +
repr(self.confirmed)).hexdigest()[:15]
# Create a new ghost table that appends to an existing ghost table, adding
# some set of signatures
def append(self, sigs):
x = GhostTable()
x.confirmed = copy.deepcopy(self.confirmed)
x.unconfirmed = copy.deepcopy(self.unconfirmed)
for sig in sigs:
x.process_signature(sig)
return x
class Block():
def __init__(self, h, gt, maker):
self.gt = gt
self.height = h
self.maker = maker
self.hash = assign_hash()
class Validator():
def __init__(self):
self.gt = GhostTable()
self.view = []
self.id = assign_id()
self.new_sigs = []
self.clock_offset = clock_offset_distribution_sample()
self.last_block_produced = -99999
self.last_unseen = 0
# Is this block compatible with our view?
def is_compatible_with_view(self, block):
return block.height >= len(self.view) or \
self.view[block.height] is None
# Add a block to this validator's view of probably valid
# blocks
def add_to_view(self, block):
while len(self.view) <= block.height:
self.view.append(None)
self.view[block.height] = block.hash
while self.last_unseen < len(self.view) and \
self.view[self.last_unseen] is not None:
self.last_unseen += 1
# Make a block
def produce_block(self):
self.gt = self.gt.append(self.new_sigs)
newblk = Block(self.last_unseen, self.gt, self.id)
print 'newblk', newblk.height
self.add_to_view(newblk)
publish(newblk)
newsig = Signature(self.id, self.view[:self.last_unseen])
self.new_sigs = [newsig]
publish(newsig)
# Callback function upon receiving a block
def on_receive(self, obj):
if isinstance(obj, Block):
desired_maker = (self.time() // BLKTIME) % NUM_VALIDATORS
if 0 <= (desired_maker - obj.maker) % 100 <= 0:
if self.is_compatible_with_view(obj):
self.add_to_view(obj)
publish(Signature(self.id, self.view[:self.last_unseen]))
if isinstance(obj, Signature):
self.new_sigs.append(obj)
# Do everything that you need to do in this particular round
def tick(self):
if (self.time() // BLKTIME) % NUM_VALIDATORS == self.id:
if self.time() - self.last_block_produced > \
BLKTIME * NUM_VALIDATORS:
self.produce_block()
self.last_block_produced = self.time()
# Calculate the validator's own clock based on the actual time
# plus a time offset that this validator happens to be wrong by
# (eg. +1 second)
def time(self):
return real_time[0] + self.clock_offset
block_map = {}
listening_queue = {}
real_time = [0]
validators = {}
# Publish a block or a signature
def publish(obj):
if isinstance(obj, Block):
block_map[obj.hash] = obj
# For every validator, add it to the validator's listening queue
# at a time randomly sampled from the latency distribution
for v in validators:
arrival_time = real_time[0] + latency_distribution_sample()
if arrival_time not in listening_queue:
listening_queue[arrival_time] = []
listening_queue[arrival_time].append((v, obj))
# One round of the clock ticking
def tick():
for _, v in validators.items():
v.tick()
if real_time[0] in listening_queue:
for validator_id, obj in listening_queue[real_time[0]]:
validators[validator_id].on_receive(obj)
real_time[0] += 1
print real_time[0]
# Main function: run(7000) = simulate casper for 7000 ticks
def run(steps):
for k in block_map.keys():
del block_map[k]
for k in listening_queue.keys():
del listening_queue[k]
for k in validators.keys():
del validators[k]
real_time[0] = 0
ids[0] = 0
for i in range(NUM_VALIDATORS):
v = Validator()
validators[v.id] = v
for i in range(steps):
tick()
c = []
for _, v in validators.items():
for i, b in enumerate(v.gt.confirmed):
assert block_map[b].height == i
if v.gt.confirmed[:len(c)] != c[:len(v.gt.confirmed)]:
for i in range(min(len(c), len(v.gt.confirmed))):
if c[i] != v.gt.confirmed[i]:
print i, c[i], v.gt.confirmed[i]
raise Exception("Confirmed block list mismatch")
c.extend(v.gt.confirmed[len(c):])
print c

View File

@ -0,0 +1,28 @@
# Expects input 224+x bytes: v, r, s, nonce, gasprice, to, value, data
with zero = ~mload(0):
# Anti re-entrancy
~jumpi(~pc(), msg.sender != ~sub(zero, 1))
# Copy calldata
~calldatacopy(32, zero, ~calldatasize())
# Compute sighash
~mstore(zero, ~sha3(32, 32 + ~calldatasize()))
# Do elliptic curve verification
~call(3000, 1, zero, zero, 128, zero, 32)
# Memory: hash, v, r, s, nonce, gasprice, to, value, data
# Check sig is correct
~jumpi(~pc(), ~mload(zero) != 0xfe2ec957647679d210034b65e9c7db2452910b0c)
with s = ~sload(zero):
# Check nonce is correct
~jumpi(~pc(), s != ~mload(128))
# Increment nonce
~sstore(zero, s + 1)
with gasprice = ~mload(160):
# Check balance
~jumpi(~pc(), self.balance < gasprice * msg.gas)
with g1 = msg.gas:
# Make the main call
~call(msg.gas - 25000, ~mload(192), ~mload(224), 256, ~calldatasize() - 224, zero, 10000)
# Pay the miner
~call(zero, block.coinbase, (g1 - msg.gas + 5000) * gasprice, zero, zero, zero, zero)
# Log to establish that the tx passed through successfully
~log0(zero, zero)

View File

@ -1,39 +0,0 @@
from ethereum import utils
def mk_forwarder(address):
code = b'\x36\x60\x00\x60\x00\x37' # CALLDATACOPY 0 0 (CALLDATASIZE)
code += b'\x61\x10\x00\x60\x00\x36\x60\x00' # 4096 0 CALLDATASIZE 0
code += b'\x73' + utils.normalize_address(address) + b'\x5a' # address gas
code += b'\xf4' # delegatecall
code += b'\x61\x10\x00\x60\x00\xf3' # 4096 0 RETURN
return code
def mk_wrapper(code):
lencodepush = b'\x60' + utils.encode_int(len(code)) # length of code
returner = lencodepush + b'\x60\x0c\x60\x00' # start from 12 in code, 0 in memory
returner += b'\x39' # CODECOPY
returner += lencodepush + b'\x60\x00' + b'\xf3' # return code
assert len(returner) == 12
return returner + code
kode = """
moose: num
def increment_moose(i: num) -> num:
self.moose += i
return self.moose
"""
def test():
from ethereum.tools import tester2
c = tester2.Chain()
x = c.contract(kode, language='viper', sender=tester2.k3)
fwdcode = mk_forwarder(x.address)
initcode = mk_wrapper(fwdcode)
y = c.contract(initcode, language='evm')
assert c.head_state.get_code(y) == fwdcode
z = tester2.ABIContract(c, x.translator, y)
assert z.increment_moose(3) == 3
assert z.increment_moose(5) == 8
if __name__ == '__main__':
test()

View File

@ -1,23 +0,0 @@
NUMSIGNERS = 15
ATTACKER_SHARE = 0.495
CHANCE_OF_SUCCESS = 0.049
SCORE_DIFFERENTIAL = 10
ATTACKER_VOTE = 0.95
import random
def sim():
d = -SCORE_DIFFERENTIAL * 15
while d < 0 and d > -(SCORE_DIFFERENTIAL * 15)-1000:
if random.random() < ATTACKER_SHARE:
for i in range(NUMSIGNERS):
if random.random() < ATTACKER_SHARE:
d += ATTACKER_VOTE
else:
d += min(CHANCE_OF_SUCCESS, 0.95)
else:
for i in range(NUMSIGNERS):
if random.random() < ATTACKER_SHARE:
pass
else:
d -= min(1 - CHANCE_OF_SUCCESS, 0.95)
return 1 if d >= 0 else 0

View File

@ -1,43 +0,0 @@
def fac(n): return 1 if n==0 else n * fac(n-1)
def choose(n,k): return fac(n) / fac(k) / fac(n-k)
def prob(n,k,p): return choose(n,k) * p ** k * (1-p) ** (n-k)
def prob_lt(n,k,p): return sum([prob(n,i,p) for i in range(p)])
SIGS = 30
ACTUALSIGS = 10
POWRETURN = 0.03
POSRETURN = 0.01
# Expected number of signatures on a block
def ev(pos):
return SIGS * pos
# Chance you have at least k sigs
def at_least_k(pos, k):
return sum([prob(SIGS, i, pos) for i in range(k, SIGS + 1)])
# Expected number of signatures on a block filtering all <k
def ev_atleast_k(pos, k):
total, subprob = 0, 0
for i in range(k, SIGS + 1):
p = prob(SIGS, i, pos)
subprob += p
total += i * p
return total / subprob
def normal_mining_return(pow, pos):
return pow * POWRETURN + ev(pos) * POSRETURN / ACTUALSIGS
def attack_mining_return(pow, pos, k):
powtotal, postotal, subprob = 0, 0, 0
# Case 1: mined PoW block, PoS at least k instances
case_1_prob = pow * at_least_k(pos, k)
subprob += case_1_prob
postotal += case_1_prob * ev_atleast_k(pos, k) * POSRETURN / ACTUALSIGS
powtotal += case_1_prob * POWRETURN
# Case 2: mined PoW block, PoS less than k: discard
# Case 3: others mined PoW block
subprob += (1 - pow)
postotal += (1 - pow) * ev(pos) * POSRETURN / ACTUALSIGS
return powtotal / subprob + postotal / subprob