From 6aedd321119b789e615dca1e37a5ebdb2138c3c7 Mon Sep 17 00:00:00 2001 From: mratsim Date: Sun, 25 Feb 2018 12:34:33 +0100 Subject: [PATCH] =?UTF-8?q?Pass=20the=20tests=20:fire:=20=E2=9A=A0?= =?UTF-8?q?=E2=9A=A0=20Endianness=20in=20PoW=20spec=20seems=20wrong?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/ethash.nim | 7 ++++++- src/private/casting.nim | 2 +- tests/all_tests.nim | 12 ++++++------ 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/src/ethash.nim b/src/ethash.nim index 14c14fd..da9c0a1 100644 --- a/src/ethash.nim +++ b/src/ethash.nim @@ -170,7 +170,11 @@ proc hashimoto(header: Hash[256], let s_words = cast[ptr array[16, uint32]](addr s) # Alias for to interpret s as an uint32 array s_bytes[0..<32] = header.toByteArrayBE # We first populate the first 40 bytes of s with the concatenation - s_bytes[32..<40] = nonce.toByteArrayBE + + when system.cpuEndian == littleEndian: # ⚠⚠ Warning ⚠⚠, the spec is WRONG compared to tests here + s_bytes[32..<40] = cast[array[8,byte]](nonce) # the nonce should be concatenated with its LITTLE ENDIAN representation + else: + raise newException(ValueError, "Big endian system not supported yet") s = keccak_512 s_bytes[0..<40] # TODO: Does this allocate a seq? @@ -196,6 +200,7 @@ proc hashimoto(header: Hash[256], for i in countup(0, mix.len - 1, 4): cmix[i div 4] = mix[i].fnv(mix[i+1]).fnv(mix[i+2]).fnv(mix[i+3]) + # ⚠⚠ Warning ⚠⚠: Another big endian little endian issue? # result.mix_digest = cast[Hash[256]]( # mapArray(cmix, x.toByteArrayBE) # Each uint32 must be changed to Big endian # ) diff --git a/src/private/casting.nim b/src/private/casting.nim index 0c1fe34..320eafc 100644 --- a/src/private/casting.nim +++ b/src/private/casting.nim @@ -94,7 +94,7 @@ proc toHex*(ba: seq[byte]): string {.noSideEffect, noInit.}= result[2*i+1] = hexChars[int ba[i] and 0xF] proc toByteArrayBE*[T: SomeInteger](num: T): ByteArrayBE[T.sizeof] {.noSideEffect, noInit, inline.}= - ## Convert an UInt256 (in native host endianness) to a big-endian byte array + ## Convert an int (in native host endianness) to a big-endian byte array # Note: only works on devel when system.cpuEndian == bigEndian: diff --git a/tests/all_tests.nim b/tests/all_tests.nim index 1b14cd9..34d1ce0 100644 --- a/tests/all_tests.nim +++ b/tests/all_tests.nim @@ -166,12 +166,12 @@ suite "Dagger hashimoto computation": check: $calc_dataset_item(cache, 0) == expected - # test "Real dataset and recomputation from cache matches": - # # https://github.com/ethereum/ethash/blob/f5f0a8b1962544d2b6f40df8e4b0d9a32faf8f8e/test/c/test.cpp#L360-L374 - # for i in 0 ..< full_size div sizeof(Hash[512]): - # for j in 0 ..< 32: - # let expected = calc_dataset_item(cache, j) - # check: full[j] == expected + test "Real dataset and recomputation from cache matches": + # https://github.com/ethereum/ethash/blob/f5f0a8b1962544d2b6f40df8e4b0d9a32faf8f8e/test/c/test.cpp#L360-L374 + for i in 0 ..< full_size div sizeof(Hash[512]): + for j in 0 ..< 32: + let expected = calc_dataset_item(cache, j) + check: full[j] == expected test "Light and full Hashimoto agree": # https://github.com/ethereum/ethash/blob/f5f0a8b1962544d2b6f40df8e4b0d9a32faf8f8e/test/python/test_pyethash.py#L44-L58