Remove the hard to read concat procs
This commit is contained in:
parent
3dd362990e
commit
f11a6a2cde
|
@ -4,7 +4,7 @@
|
|||
import math, sequtils, algorithm,
|
||||
keccak_tiny
|
||||
|
||||
import ./private/[primes, casting, functional, intmath, concat]
|
||||
import ./private/[primes, casting, functional, intmath]
|
||||
export toHex, hexToByteArrayBE, hexToSeqBytesBE, toByteArrayBE
|
||||
export keccak_tiny
|
||||
|
||||
|
@ -147,7 +147,7 @@ proc calc_dataset*(full_size: Natural, cache: seq[Hash[512]]): seq[Hash[512]] {.
|
|||
# ###############################################################################
|
||||
# Main loop
|
||||
|
||||
type HashimotoHash = tuple[mix_digest: array[8, uint32], value: Hash[256]]
|
||||
type HashimotoHash = tuple[mix_digest: Hash[256], value: Hash[256]]
|
||||
# TODO use Hash as a result type
|
||||
type DatasetLookup = proc(i: Natural): Hash[512] {.noSideEffect.}
|
||||
|
||||
|
@ -157,20 +157,32 @@ proc hashimoto(header: Hash[256],
|
|||
dataset_lookup: DatasetLookup
|
||||
): HashimotoHash {.noInit, noSideEffect.}=
|
||||
let
|
||||
n = uint32 full_size div HASH_BYTES # check div operator, in spec it's Python true division
|
||||
w = uint32 MIX_BYTES div WORD_BYTES # TODO: review word bytes: uint32 vs uint64
|
||||
n = uint32 full_size div HASH_BYTES
|
||||
w = uint32 MIX_BYTES div WORD_BYTES
|
||||
mixhashes = uint32 MIX_BYTES div HASH_BYTES
|
||||
# combine header+nonce into a 64 byte seed
|
||||
s = concat_hash(header, nonce).toU512
|
||||
|
||||
assert full_size mod HASH_BYTES == 0
|
||||
assert MIX_BYTES mod HASH_BYTES == 0
|
||||
|
||||
# combine header+nonce into a 64 byte seed
|
||||
var s{.noInit.}: Hash[512]
|
||||
let s_bytes = cast[ptr array[64, byte]](addr s) # Alias for to interpret s as a byte array
|
||||
let s_words = cast[ptr array[16, uint32]](addr s) # Alias for to interpret s as an uint32 array
|
||||
|
||||
s_bytes[0..<32] = header.toByteArrayBE # We first populate the first 40 bytes of s with the concatenation
|
||||
s_bytes[32..<40] = nonce.toByteArrayBE
|
||||
|
||||
s = keccak_512 s_bytes[0..<40]
|
||||
|
||||
# start the mix with replicated s
|
||||
var mix{.noInit.}: array[32, uint32] # MIX_BYTES / HASH_BYTES * sizeof(s) => 1024
|
||||
mix[0..<16] = s
|
||||
mix[16..<32] = s
|
||||
assert MIX_BYTES div HASH_BYTES == 2
|
||||
var mix{.noInit.}: array[32, uint32]
|
||||
mix[0..<16] = s_words[]
|
||||
mix[16..<32] = s_words[]
|
||||
|
||||
# mix in random dataset nodes
|
||||
for i in 0'u32 ..< ACCESSES:
|
||||
let p = fnv(i.uint32 xor s[0].uint32, mix[i mod w]) mod (n div mixhashes) * mixhashes
|
||||
let p = fnv(i xor s_words[0], mix[i mod w]) mod (n div mixhashes) * mixhashes
|
||||
|
||||
# Unrolled: for j in range(MIX_BYTES / HASH_BYTES): => for j in 0 ..< 2
|
||||
var newdata{.noInit.}: type mix
|
||||
|
@ -179,13 +191,19 @@ proc hashimoto(header: Hash[256],
|
|||
|
||||
mix = zipMap(mix, newdata, fnv(x, y))
|
||||
|
||||
# compress mix (aka result.mix_digest)
|
||||
# TODO: what is the representation of mix during FNV? big-endian, native host endianess?
|
||||
# compress mix
|
||||
var cmix: array[8, uint32]
|
||||
for i in countup(0, mix.len - 1, 4):
|
||||
result.mix_digest[i div 4] = mix[i].fnv(mix[i+1]).fnv(mix[i+2]).fnv(mix[i+3])
|
||||
cmix[i div 4] = mix[i].fnv(mix[i+1]).fnv(mix[i+2]).fnv(mix[i+3])
|
||||
|
||||
result.value = keccak256 concat_hash(s, result.mix_digest)
|
||||
result.mix_digest = cast[Hash[256]](
|
||||
mapArray(cmix, x.toByteArrayBE) # Each uint32 must be changed to Big endian
|
||||
)
|
||||
|
||||
var concat{.noInit.}: array[64 + 32, byte]
|
||||
concat[0..<64] = s_bytes[]
|
||||
concat[64..<96] = cast[array[32, byte]](cmix)
|
||||
result.value = keccak_256(concat)
|
||||
|
||||
proc hashimoto_light*(full_size:Natural, cache: seq[Hash[512]],
|
||||
header: Hash[256], nonce: uint64): HashimotoHash {.noSideEffect, inline.} =
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Distributed under the Apache v2 License (license terms are at http://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
||||
import keccak_tiny,
|
||||
./casting
|
||||
|
||||
proc concat_hash*(header: Hash[256], nonce: uint64): Hash[512] {.noSideEffect, inline, noInit.} =
|
||||
|
||||
# Can't take compile-time sizeof of arrays in objects: https://github.com/nim-lang/Nim/issues/5802
|
||||
var cat{.noInit.}: array[256 div 8 + nonce.sizeof, byte]
|
||||
let nonceBE = nonce.toByteArrayBE # Big endian representation of the number
|
||||
|
||||
# Concatenate header and the big-endian nonce
|
||||
for i, b in header.data:
|
||||
cat[i] = b
|
||||
|
||||
for i, b in nonceBE:
|
||||
cat[i + header.sizeof] = b
|
||||
|
||||
result = keccak512 cat
|
||||
|
||||
|
||||
proc concat_hash*(s: U512, cmix: array[8, uint32]): array[(512 + 8 * 32) div 8, byte] {.noSideEffect, inline, noInit.} =
|
||||
|
||||
|
||||
# Concatenate header and the big-endian nonce
|
||||
let sb = s.toByteArrayBE
|
||||
for i, b in sb:
|
||||
result[i] = b
|
||||
|
||||
# TODO: Do we need to convert cmix to Big Endian??
|
||||
let cmixb = cast[ByteArrayBE[32]](cmix)
|
||||
for i, b in cmixb:
|
||||
let offset = sb.len + i
|
||||
result[offset] = b
|
|
@ -31,7 +31,26 @@ template zipMap*[N: static[int], T, U](
|
|||
var result: array[N, outType]
|
||||
|
||||
for i, x {.inject.}, y {.inject.} in enumerateZip(a, b):
|
||||
{.unroll: 8.}
|
||||
{.unroll: 4.}
|
||||
result[i] = op
|
||||
|
||||
result
|
||||
|
||||
|
||||
template mapArray*[N: static[int], T](
|
||||
a: array[N, T],
|
||||
op: untyped): untyped =
|
||||
## inline map operation
|
||||
|
||||
type outType = type((
|
||||
block:
|
||||
var x{.inject.}: T;
|
||||
op
|
||||
))
|
||||
|
||||
var result: array[N, outType]
|
||||
|
||||
for i, x {.inject.} in a:
|
||||
{.unroll: 4.}
|
||||
result[i] = op
|
||||
result
|
|
@ -185,47 +185,46 @@ suite "Dagger hashimoto computation":
|
|||
let full_result = hashimoto_full(full_size, dataset, header, 0)
|
||||
|
||||
# Check not null
|
||||
var zero_array: array[8, uint32]
|
||||
var zero_hash : Hash[256]
|
||||
check: light_result.mix_digest != zero_array
|
||||
check: light_result.mix_digest != zero_hash
|
||||
check: light_result.value != zero_hash
|
||||
check: light_result == full_result
|
||||
|
||||
|
||||
# test "Light compute":
|
||||
# # https://github.com/paritytech/parity/blob/05f47b635951f942b493747ca3bc71de90a95d5d/ethash/src/compute.rs#L372-L394
|
||||
test "Light compute":
|
||||
# https://github.com/paritytech/parity/blob/05f47b635951f942b493747ca3bc71de90a95d5d/ethash/src/compute.rs#L372-L394
|
||||
|
||||
# let hash = cast[Hash[256]]([
|
||||
# byte 0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3,
|
||||
# 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94,
|
||||
# 0x05, 0x52, 0x7d, 0x72
|
||||
# ])
|
||||
let hash = cast[Hash[256]]([
|
||||
byte 0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3,
|
||||
0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94,
|
||||
0x05, 0x52, 0x7d, 0x72
|
||||
])
|
||||
|
||||
# let expected_mix_hash = cast[array[8, uint32]]([
|
||||
# byte 0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce,
|
||||
# 0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a,
|
||||
# 0x64, 0x31, 0xab, 0x6d
|
||||
# ])
|
||||
let expected_mix_hash = cast[Hash[256]]([
|
||||
byte 0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce,
|
||||
0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a,
|
||||
0x64, 0x31, 0xab, 0x6d
|
||||
])
|
||||
|
||||
# let expected_boundary = cast[Hash[256]]([
|
||||
# byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
|
||||
# 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
|
||||
# 0xe9, 0x7e, 0x53, 0x84
|
||||
# ])
|
||||
let expected_boundary = cast[Hash[256]]([
|
||||
byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
|
||||
0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
|
||||
0xe9, 0x7e, 0x53, 0x84
|
||||
])
|
||||
|
||||
# let nonce = 0xd7b3ac70a301a249'u64
|
||||
# ## difficulty = 0x085657254bd9u64
|
||||
# let blk = 486382'u # block number
|
||||
# let light_cache = mkcache(blk.get_cache_size, blk.get_seedhash)
|
||||
let nonce = 0xd7b3ac70a301a249'u64
|
||||
## difficulty = 0x085657254bd9u64
|
||||
let blk = 486382'u # block number
|
||||
let light_cache = mkcache(blk.get_cache_size, blk.get_seedhash)
|
||||
|
||||
# let r = hashimoto_light(blk.get_data_size,
|
||||
# light_cache,
|
||||
# blk.get_seedhash,
|
||||
# nonce
|
||||
# )
|
||||
let r = hashimoto_light(blk.get_data_size,
|
||||
light_cache,
|
||||
blk.get_seedhash,
|
||||
nonce
|
||||
)
|
||||
|
||||
# check: r.mix_digest == expected_mix_hash
|
||||
# check: r.value == expected_boundary
|
||||
check: r.mix_digest == expected_mix_hash
|
||||
check: r.value == expected_boundary
|
||||
|
||||
|
||||
suite "Real blocks test":
|
||||
|
@ -245,9 +244,10 @@ suite "Real blocks test":
|
|||
0x495732e0ed7a801c'u
|
||||
)
|
||||
|
||||
## Todo: blockhash is not actually Hex
|
||||
check: light.value == cast[Hash[256]](
|
||||
hexToByteArrayBE[32]("00000b184f1fdd88bfd94c86c39e65db0c36144d5e43f745f722196e730cb614")
|
||||
)
|
||||
check: light.mixDigest == cast[array[8, uint32]](
|
||||
check: light.mixDigest == cast[Hash[256]](
|
||||
hexToByteArrayBE[32]("2f74cdeb198af0b9abe65d22d372e22fb2d474371774a9583c1cc427a07939f5")
|
||||
)
|
Loading…
Reference in New Issue