Some updates

This commit is contained in:
Vitalik Buterin 2014-12-09 09:30:40 -05:00
parent d4a36a28c4
commit e41f3f15dd
1 changed files with 99 additions and 121 deletions

View File

@ -36,15 +36,10 @@ than multiple memory lookups would - even for GPUs/FPGAs/ASICs.
""" """
try:
shathree = __import__('sha3')
except:
shathree = __import__('python_sha3')
import time import time
from pyethereum import utils
def sha3(x):
return decode_int(shathree.sha3_256(x).digest()) #
def decode_int(s): def decode_int(s):
o = 0 o = 0
@ -61,107 +56,76 @@ def encode_int(x):
return o return o
def sha3(x):
return decode_int(utils.sha3(x))
def cantor_pair(x, y, p):
return ((x+y) * (x+y+1) / 2 + y) % p
def get_daggerset(params, seedset): def get_daggerset(params, seedset):
return [produce_dag(params, i) for i in seedset] return [produce_dag(params, i) for i in seedset]
def update_daggerset(params, daggerset, seedset, seed): def update_daggerset(params, daggerset, seedset, seed):
idx = decode_int(seed) % len(daggerset) idx = decode_int(seed) % len(daggerset)
seedset[idx] = seed seedset[idx] = seed
daggerset[idx] = produce_dag(params, seed) daggerset[idx] = produce_dag(params, seed)
P = (2**256 - 4294968273)**2
def produce_dag(params, seed): def produce_dag(params, seed):
k, w, d = params.k, params.w, params.d k, hk, w, hw, n, p, t = params.k, params.hk, params.w, \
o = [sha3(seed)**2] params.hw, params.dag_size, params.p, params.h_threshold
print 'Producing dag of size %d (%d memory)' % (n, n * params.wordsz)
o = [sha3(seed)]
init = o[0] init = o[0]
picker = 1 picker = 1
for i in range(1, params.dag_size): for i in range(1, n):
x = 0 x = 0
picker = (picker * init) % P picker = (picker * init) % p
#assert picker == pow(init, i, P)
curpicker = picker curpicker = picker
if i < t:
for j in range(k): # can be flattend if params are known for j in range(k): # can be flattend if params are known
pos = curpicker % i x ^= o[curpicker % i]
x |= o[pos]
curpicker >>= 10 curpicker >>= 10
o.append(pow(x, w, P)) # use any "hash function" here else:
for j in range(hk):
x ^= o[curpicker % t]
curpicker >>= 10
o.append(pow(x, w if i < t else hw, p)) # use any "hash function" here
return o return o
def quick_calc(params, seed, pos, known={}):
init = sha3(seed)**2 def quick_calc(params, seed, pos, known=None):
k, w, d = params.k, params.w, params.d k, hk, w, hw, p, t = params.k, params.hk, params.w, \
params.hw, params.p, params.h_threshold
init = sha3(seed) % p
if known is None:
known = {}
known[0] = init known[0] = init
def calc(i): def calc(i):
if i not in known: if i not in known:
curpicker = pow(init, i, P) curpicker = pow(init, i, p)
x = 0 x = 0
if i < t:
for j in range(k): for j in range(k):
pos = curpicker % i x ^= calc(curpicker % i)
x |= calc(pos)
curpicker >>= 10 curpicker >>= 10
known[i] = pow(x, w, P) known[i] = pow(x, w, p)
else:
for j in range(hk):
x ^= calc(curpicker % t)
curpicker >>= 10
known[i] = pow(x, hw, p)
return known[i] return known[i]
o = calc(pos) o = calc(pos)
print 'Calculated index %d in %d lookups' % (pos, len(known))
return o return o
def produce_dag_k2dr(params, seed): def hashimoto(params, daggerset, header, nonce):
"""
# k=2 and dependency ranges d [:i/d], [-i/d:]
Idea is to prevent partitial memory availability in
which a significant part of the higher mem acesses
can be substituted by two low mem accesses, plus some calc.
"""
w, d = params.w, params.d
o = [sha3(seed)**2]
init = o[0]
picker = 1
for i in range(1, params.dag_size):
x = 0
picker = (picker * init) % P
curpicker = picker
# higher end
f = i/d + 1
pos = i - f + curpicker % f
x |= o[pos]
curpicker >>= 10
# lower end
pos = f - curpicker % f - 1
x |= o[pos]
o.append(pow(x, w, P)) # use any "hash function" here
return o
def quick_calc_k2dr(params, seed, pos, known={}):
# k=2 and dependency ranges d [:i/d], [-i/d:]
init = sha3(seed) ** 2
k, w, d = params.k, params.w, params.d
known[0] = init
def calc(i):
if i not in known:
curpicker = pow(init, i, P)
x = 0
# higher end
f = i/d + 1
pos = i - f + curpicker % f
x |= calc(pos)
curpicker >>= 10
# lower end
pos = f - curpicker % f - 1
x |= calc(pos)
known[i] = pow(x, w, P)
return known[i]
o = calc(pos)
return o
produce_dag = produce_dag_k2dr
quick_calc = quick_calc_k2dr
def hashimoto(daggerset, lookups, header, nonce):
""" """
Requirements: Requirements:
- I/O bound: cycles spent on I/O â« cycles spent in cpu - I/O bound: cycles spent on I/O â« cycles spent in cpu
@ -178,41 +142,48 @@ def hashimoto(daggerset, lookups, header, nonce):
lookups depend on previous lookup results lookups depend on previous lookup results
impossible to route computation/lookups based on the initial sha3 impossible to route computation/lookups based on the initial sha3
""" """
num_dags = len(daggerset) rand = sha3(header + encode_int(nonce)) % params.p
dag_size = len(daggerset[0]) mix = rand
mix = sha3(header + encode_int(nonce)) ** 2
# loop, that can not be unrolled # loop, that can not be unrolled
# dag and dag[pos] depended on previous lookup # dag and dag[pos] depended on previous lookup
for i in range(lookups): for i in range(params.lookups):
dag = daggerset[mix % num_dags] # modulo v = mix if params.is_serial else rand >> i
pos = mix % dag_size # modulo dag = daggerset[v % params.num_dags] # modulo
pos = v % params.dag_size # modulo
mix ^= dag[pos] # xor mix ^= dag[pos] # xor
# print v % params.num_dags, pos, dag[pos]
print header, nonce, mix
return mix return mix
def light_hashimoto(params, seedset, header, nonce): def light_hashimoto(params, seedset, header, nonce):
lookups = params.lookups rand = sha3(header + encode_int(nonce)) % params.p
dag_size = params.dag_size mix = rand
known = dict((s, {}) for s in seedset) # cache results for each dag
mix = sha3(header + encode_int(nonce)) ** 2 for i in range(params.lookups):
for i in range(lookups): v = mix if params.is_serial else rand >> i
seed = seedset[mix % len(seedset)] seed = seedset[v % len(seedset)]
pos = mix % dag_size pos = v % params.dag_size
mix ^= quick_calc(params, seed, pos, known[seed]) qc = quick_calc(params, seed, pos)
num_accesses = sum(len(known[s]) for s in seedset) # print v % params.num_dags, pos, qc
print 'Calculated %d lookups with %d accesses' % (lookups, num_accesses) mix ^= qc
print 'Calculated %d lookups' % \
(params.lookups)
print header, nonce, mix
return mix return mix
def light_verify(params, seedset, header, nonce): def light_verify(params, seedset, header, nonce):
return light_hashimoto(params, seedset, header, nonce) \ h = light_hashimoto(params, seedset, header, nonce)
<= 2**512 / params.diff return h <= 256**params.wordsz / params.diff
def mine(daggerset, params, header, nonce=0): def mine(daggerset, params, header, nonce=0):
orignonce = nonce orignonce = nonce
origtime = time.time() origtime = time.time()
while 1: while 1:
h = hashimoto(daggerset, params.lookups, header, nonce) h = hashimoto(params, daggerset, header, nonce)
if h <= 2**512 / params.diff: if h <= 256**params.wordsz / params.diff:
noncediff = nonce - orignonce noncediff = nonce - orignonce
timediff = time.time() - origtime timediff = time.time() - origtime
print 'Found nonce: %d, tested %d nonces in %.2f seconds (%d per sec)' % \ print 'Found nonce: %d, tested %d nonces in %.2f seconds (%d per sec)' % \
@ -228,44 +199,55 @@ class params(object):
lookups: hashes_per_sec(lookups=0) â« hashes_per_sec(lookups_mem_hard) lookups: hashes_per_sec(lookups=0) â« hashes_per_sec(lookups_mem_hard)
k: ? k: ?
d: higher values enfore memory availability but require more quick_calcs d: higher values enfore memory availability but require more quick_calcs
numdags: so that a dag can be updated in reasonable time num_dags: so that a dag can be updated in reasonable time
""" """
memory = 512 * 1024**2 # memory usage p = (2 ** 256 - 4294968273)**2 # prime modulus
numdags = 128 # number of dags wordsz = 64 # word size
dag_size = memory /numdags / 64 # num 64byte values per dag memory = 10 * 1024**2 # memory usage
lookups = 512 # memory lookups per hash num_dags = 2 # number of dags
dag_size = memory/num_dags/wordsz # num 64byte values per dag
lookups = 40 # memory lookups per hash
diff = 2**14 # higher is harder diff = 2**14 # higher is harder
k = 2 # num dependecies of each dag value k = 2 # num dependecies of each dag value
hk = 8 # dependencies for final nodes
d = 8 # max distance of first dependency (1/d=fraction of size) d = 8 # max distance of first dependency (1/d=fraction of size)
w = 2 w = 2 # work factor on node generation
hw = 8 # work factor on final node generation
h_threshold = dag_size*2/5 # cutoff between final and nonfinal nodes
is_serial = False # hashimoto is serial
if __name__ == '__main__': if __name__ == '__main__':
print dict((k,v) for k,v in params.__dict__.items() if isinstance(v,int)) print dict((k, v) for k, v in params.__dict__.items()
if isinstance(v, int))
# odds of a partitial storage attack # odds of a partitial storage attack
missing_mem = 0.01 missing_mem = 0.01
P_partitial_mem_success = (1-missing_mem) ** params.lookups P_partitial_mem_success = (1-missing_mem) ** params.lookups
print 'P success per hash with %d%% mem missing: %d%%' %(missing_mem*100, P_partitial_mem_success*100) print 'P success per hash with %d%% mem missing: %d%%' % \
(missing_mem*100, P_partitial_mem_success*100)
# which actually only results in a slower mining, as more hashes must be tried # which actually only results in a slower mining,
slowdown = 1/ P_partitial_mem_success # as more hashes must be tried
print 'x%.1f speedup required to offset %d%% missing mem' % (slowdown, missing_mem*100) slowdown = 1 / P_partitial_mem_success
print 'x%.1f speedup required to offset %d%% missing mem' % \
(slowdown, missing_mem*100)
# create set of DAGs # create set of DAGs
st = time.time() st = time.time()
seedset = [str(i) for i in range(params.numdags)] seedset = [str(i) for i in range(params.num_dags)]
daggerset = get_daggerset(params, seedset) daggerset = get_daggerset(params, seedset)
print 'daggerset with %d dags' % len(daggerset), 'size:', 64*params.dag_size*params.numdags / 1024**2 , 'MB' print 'daggerset with %d dags' % len(daggerset), 'size:', \
64*params.dag_size*params.num_dags / 1024**2, 'MB'
print 'creation took %.2fs' % (time.time() - st) print 'creation took %.2fs' % (time.time() - st)
# update DAG # update DAG
st = time.time() st = time.time()
update_daggerset(params, daggerset, seedset, seed='new') update_daggerset(params, daggerset, seedset, seed='qwe')
print 'updating 1 dag took %.2fs' % (time.time() - st) print 'updating 1 dag took %.2fs' % (time.time() - st)
# Mine # Mine
for i in range(10): for i in range(1):
header = 'test%d' % i header = 'test%d' % i
print '\nmining', header print '\nmining', header
nonce = mine(daggerset, params, header) nonce = mine(daggerset, params, header)
@ -273,7 +255,3 @@ if __name__ == '__main__':
st = time.time() st = time.time()
assert light_verify(params, seedset, header, nonce) assert light_verify(params, seedset, header, nonce)
print 'verification took %.2fs' % (time.time() - st) print 'verification took %.2fs' % (time.time() - st)