From ba7b983d6bdb36f5c65173fe18ae4283abd7c0e4 Mon Sep 17 00:00:00 2001 From: tersec Date: Tue, 20 Feb 2024 21:39:23 +0000 Subject: [PATCH] use non-EOL macOS in CI; apply styleCheck:error; prefer func to proc(){.noSideEffect.}; check refc in Nim 2.0+ --- .github/workflows/ci.yml | 2 +- benchmarks/proof_of_work_keccak_tiny.nim | 36 +++++++++---------- ethash.nimble | 23 ++++++------ spec/test_internal_c.nim | 6 ++-- src/ethash.nim | 8 ++--- src/mining.nim | 10 +++--- src/private/conversion.nim | 18 +++++----- src/private/functional.nim | 4 +-- src/private/intmath.nim | 8 ++--- src/private/primes.nim | 8 ++--- src/proof_of_work.nim | 38 ++++++++++---------- tests/test_mining.nim | 6 ++-- tests/test_proof_of_work.nim | 46 ++++++++++++------------ 13 files changed, 105 insertions(+), 108 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6c5a806..7e79eb7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,7 +24,7 @@ jobs: builder: ubuntu-20.04 - target: os: macos - builder: macos-11 + builder: macos-12 - target: os: windows builder: windows-latest diff --git a/benchmarks/proof_of_work_keccak_tiny.nim b/benchmarks/proof_of_work_keccak_tiny.nim index f608ed0..ff2ba81 100644 --- a/benchmarks/proof_of_work_keccak_tiny.nim +++ b/benchmarks/proof_of_work_keccak_tiny.nim @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Status Research & Development GmbH +# Copyright (c) 2018-2024 Status Research & Development GmbH # Distributed under the Apache v2 License (license terms are at http://www.apache.org/licenses/LICENSE-2.0). import math, endians, @@ -28,7 +28,7 @@ const # ############################################################################### # Parameters -proc get_cache_size*(block_number: uint): uint {.noSideEffect.}= +func get_cache_size*(block_number: uint): uint = result = CACHE_BYTES_INIT + CACHE_BYTES_GROWTH * (block_number div EPOCH_LENGTH) result -= HASH_BYTES while (let dm = divmod(result, HASH_BYTES); @@ -37,7 +37,7 @@ proc get_cache_size*(block_number: uint): uint {.noSideEffect.}= # Means checking that reminder == 0 and quotient is prime result -= 2 * HASH_BYTES -proc get_data_size*(block_number: uint): uint {.noSideEffect.}= +func get_data_size*(block_number: uint): uint = result = DATASET_BYTES_INIT + DATASET_BYTES_GROWTH * (block_number div EPOCH_LENGTH) result -= MIX_BYTES while (let dm = divmod(result, MIX_BYTES); @@ -47,16 +47,16 @@ proc get_data_size*(block_number: uint): uint {.noSideEffect.}= # ############################################################################### # Fetch from lookup tables of 2048 epochs of data sizes and cache sizes -proc get_datasize_lut*(block_number: Natural): uint64 {.noSideEffect, inline.} = +func get_datasize_lut*(block_number: Natural): uint64 {.inline.} = data_sizes[block_number div EPOCH_LENGTH] -proc get_cachesize_lut*(block_number: Natural): uint64 {.noSideEffect, inline.} = +func get_cachesize_lut*(block_number: Natural): uint64 {.inline.} = cache_sizes[block_number div EPOCH_LENGTH] # ############################################################################### # Cache generation -proc mkcache*(cache_size: uint64, seed: Hash[256]): seq[Hash[512]] {.noSideEffect.}= +func mkcache*(cache_size: uint64, seed: Hash[256]): seq[Hash[512]] = # Cache size let n = int(cache_size div HASH_BYTES) @@ -82,7 +82,7 @@ proc mkcache*(cache_size: uint64, seed: Hash[256]): seq[Hash[512]] {.noSideEffec const FNV_PRIME = 0x01000193 -proc fnv*[T: SomeUnsignedInt or Natural](v1, v2: T): uint32 {.inline, noSideEffect.}= +func fnv*[T: SomeUnsignedInt or Natural](v1, v2: T): uint32 {.inline.}= # Original formula is ((v1 * FNV_PRIME) xor v2) mod 2^32 # However contrary to Python and depending on the type T, @@ -103,7 +103,7 @@ proc fnv*[T: SomeUnsignedInt or Natural](v1, v2: T): uint32 {.inline, noSideEffe # ############################################################################### # Full dataset calculation -proc calc_dataset_item*(cache: seq[Hash[512]], i: Natural): Hash[512] {.noSideEffect, noInit.} = +func calc_dataset_item*(cache: seq[Hash[512]], i: Natural): Hash[512] {.noinit.} = let n = cache.len const r: uint32 = HASH_BYTES div WORD_BYTES @@ -159,14 +159,14 @@ template hashimoto(header: Hash[256], assert MIX_BYTES mod HASH_BYTES == 0 # combine header+nonce into a 64 byte seed - var s{.noInit.}: Hash[512] + var s{.noinit.}: Hash[512] let s_bytes = cast[ptr array[64, byte]](addr s) # Alias for to interpret s as a byte array let s_words = cast[ptr array[16, uint32]](addr s) # Alias for to interpret s as an uint32 array s_bytes[][0..<32] = header.data # We first populate the first 40 bytes of s with the concatenation # In template we need to dereference first otherwise it's not considered as var - var nonceLE{.noInit.}: array[8, byte] # the nonce should be concatenated with its LITTLE ENDIAN representation + var nonceLE{.noinit.}: array[8, byte] # the nonce should be concatenated with its LITTLE ENDIAN representation littleEndian64(addr nonceLE, unsafeAddr nonce) s_bytes[][32..<40] = cast[array[8,byte]](nonceLE) @@ -174,7 +174,7 @@ template hashimoto(header: Hash[256], # start the mix with replicated s assert MIX_BYTES div HASH_BYTES == 2 - var mix{.noInit.}: array[32, uint32] + var mix{.noinit.}: array[32, uint32] mix[0..<16] = s_words[] mix[16..<32] = s_words[] @@ -184,7 +184,7 @@ template hashimoto(header: Hash[256], let p1{.inject.} = p + 1 # Unrolled: for j in range(MIX_BYTES / HASH_BYTES): => for j in 0 ..< 2 - var newdata{.noInit.}: type mix + var newdata{.noinit.}: type mix newdata[0..<16] = cast[array[16, uint32]](dataset_lookup_p) newdata[16..<32] = cast[array[16, uint32]](dataset_lookup_p1) @@ -198,13 +198,13 @@ template hashimoto(header: Hash[256], for i in countup(0, mix.len - 1, 4): cmix[i div 4] = mix[i].fnv(mix[i+1]).fnv(mix[i+2]).fnv(mix[i+3]) - var concat{.noInit.}: array[64 + 32, byte] + var concat{.noinit.}: array[64 + 32, byte] concat[0..<64] = s_bytes[] concat[64..<96] = cast[array[32, byte]](result.mix_digest) result.value = keccak_256(concat) -proc hashimoto_light*(full_size:Natural, cache: seq[Hash[512]], - header: Hash[256], nonce: uint64): HashimotoHash {.noSideEffect.} = +func hashimoto_light*(full_size:Natural, cache: seq[Hash[512]], + header: Hash[256], nonce: uint64): HashimotoHash = hashimoto(header, nonce, @@ -213,8 +213,8 @@ proc hashimoto_light*(full_size:Natural, cache: seq[Hash[512]], calc_data_set_item(cache, p1), result) -proc hashimoto_full*(full_size:Natural, dataset: seq[Hash[512]], - header: Hash[256], nonce: uint64): HashimotoHash {.noSideEffect.} = +func hashimoto_full*(full_size:Natural, dataset: seq[Hash[512]], + header: Hash[256], nonce: uint64): HashimotoHash = # TODO spec mentions full_size but I don't think we need it (retrieve it from dataset.len) hashimoto(header, nonce, @@ -225,6 +225,6 @@ proc hashimoto_full*(full_size:Natural, dataset: seq[Hash[512]], # ############################################################################### # Defining the seed hash -proc get_seedhash*(block_number: uint64): Hash[256] {.noSideEffect.} = +func get_seedhash*(block_number: uint64): Hash[256] = for i in 0 ..< int(block_number div EPOCH_LENGTH): result = keccak256 result.data diff --git a/ethash.nimble b/ethash.nimble index 145a570..82389f3 100644 --- a/ethash.nimble +++ b/ethash.nimble @@ -1,3 +1,5 @@ +mode = ScriptMode.Verbose + packageName = "ethash" version = "0.0.1" author = "Status Research & Development GmbH" @@ -7,25 +9,20 @@ srcDir = "src" ### Dependencies -requires "nim >= 0.18.0", "nimcrypto >= 0.1.0" +requires "nim >= 1.6.0", "nimcrypto >= 0.1.0" -proc test(name: string, lang: string = "c") = +proc test(name: string, args: string) = if not dirExists "build": mkDir "build" - --run - switch("out", ("./build/" & name)) - setCommand lang, "tests/" & name & ".nim" + exec "nim c --styleCheck:usages --styleCheck:error --outdir:./build/ " & args & " --run tests/" & name & ".nim" + if (NimMajor, NimMinor) > (1, 6): + exec "nim c --styleCheck:usages --styleCheck:error --mm:refc --outdir:./build/ " & args & " --run tests/" & name & ".nim" task test, "Run Proof-of-Work tests (without mining)": - test "all_tests" + test "all_tests", "" task testRelease, "test release mode": - switch("define", "release") - testTask() + test "all_tests", "-d:release" task test_mining, "Run Proof-of-Work and mining tests (test in release mode + OpenMP + march=native)": - switch("define", "release") - switch("define", "openmp") - switch("define", "march_native") - switch("define", "ethash_mining") - test "all_tests" + test "all_tests", "-d:release -d:openmp -d:march_native -d:ethash_mining" diff --git a/spec/test_internal_c.nim b/spec/test_internal_c.nim index fa6f8d5..666ba5d 100644 --- a/spec/test_internal_c.nim +++ b/spec/test_internal_c.nim @@ -6,7 +6,7 @@ import ./internal ############################################### -proc toHex*[N: static[int]](ba: array[N, byte]): string {.noSideEffect.}= +func toHex*[N: static[int]](ba: array[N, byte]): string = ## Convert a big-endian byte array to its hex representation ## Output is in lowercase ## @@ -19,7 +19,7 @@ proc toHex*[N: static[int]](ba: array[N, byte]): string {.noSideEffect.}= result[2*i+1] = hexChars[int ba[i] and 0xF] -proc readHexChar(c: char): byte {.noSideEffect.}= +func readHexChar(c: char): byte = ## Converts an hex char to a byte case c of '0'..'9': result = byte(ord(c) - ord('0')) @@ -28,7 +28,7 @@ proc readHexChar(c: char): byte {.noSideEffect.}= else: raise newException(ValueError, $c & "is not a hexademical character") -proc hexToByteArrayBE*[N: static[int]](hexStr: string): array[N, byte] {.noSideEffect, noInit.}= +func hexToByteArrayBE*[N: static[int]](hexStr: string): array[N, byte] {.noinit.}= ## Read an hex string and store it in a Byte Array in Big-Endian order var i = 0 if hexStr[i] == '0' and (hexStr[i+1] == 'x' or hexStr[i+1] == 'X'): diff --git a/src/ethash.nim b/src/ethash.nim index ab5d321..04b836a 100644 --- a/src/ethash.nim +++ b/src/ethash.nim @@ -1,12 +1,12 @@ -# Copyright (c) 2018 Status Research & Development GmbH +# Copyright (c) 2018-2024 Status Research & Development GmbH # Distributed under the Apache v2 License (license terms are at http://www.apache.org/licenses/LICENSE-2.0). when defined(openmp): - {.passC: "-fopenmp".} - {.passL: "-fopenmp".} + {.passc: "-fopenmp".} + {.passl: "-fopenmp".} when defined(march_native): - {.passC: "-march=native".} + {.passc: "-march=native".} import ./proof_of_work export proof_of_work diff --git a/src/mining.nim b/src/mining.nim index 71ca080..e9e983e 100644 --- a/src/mining.nim +++ b/src/mining.nim @@ -60,11 +60,11 @@ proc mulCarry(a, b: uint64): tuple[carry, unit: uint64] = result.unit += z0 result.carry = (result.unit < z0).uint64 + z2 + z1 shr 32 -proc isValid(nonce: uint64, - difficulty: uint64, - full_size: Natural, - dataset: seq[MDigest[512]], - header: MDigest[256]): bool {.noSideEffect.}= +func isValid(nonce: uint64, + difficulty: uint64, + full_size: Natural, + dataset: seq[MDigest[512]], + header: MDigest[256]): bool = # Boundary is 2^256/difficulty # A valid nonce will have: hashimoto < 2^256/difficulty # We can't represent 2^256 as an uint256 so as a workaround we use: diff --git a/src/private/conversion.nim b/src/private/conversion.nim index 1f4b2ca..c3f1b1c 100644 --- a/src/private/conversion.nim +++ b/src/private/conversion.nim @@ -1,13 +1,13 @@ -# Copyright (c) 2018 Status Research & Development GmbH +# Copyright (c) 2018-2024 Status Research & Development GmbH # Distributed under the Apache v2 License (license terms are at http://www.apache.org/licenses/LICENSE-2.0). import nimcrypto -proc as_u32_words*[bits: static[int]](x: MDigest[bits]): array[bits div 32, uint32] {.inline, noSideEffect, noInit.}= +func as_u32_words*[bits: static[int]](x: MDigest[bits]): array[bits div 32, uint32] {.inline, noinit.}= # Convert an hash to its uint32 representation cast[type result](x) -proc readHexChar(c: char): byte {.noSideEffect.}= +func readHexChar(c: char): byte = ## Converts an hex char to a byte case c of '0'..'9': result = byte(ord(c) - ord('0')) @@ -16,7 +16,7 @@ proc readHexChar(c: char): byte {.noSideEffect.}= else: raise newException(ValueError, $c & "is not a hexademical character") -proc hexToByteArrayBE*[N: static[int]](hexStr: string): array[N, byte] {.noSideEffect, noInit.}= +func hexToByteArrayBE*[N: static[int]](hexStr: string): array[N, byte] {.noinit.}= ## Read an hex string and store it in a Byte Array in Big-Endian order var i = 0 if hexStr[i] == '0' and (hexStr[i+1] == 'x' or hexStr[i+1] == 'X'): @@ -28,7 +28,7 @@ proc hexToByteArrayBE*[N: static[int]](hexStr: string): array[N, byte] {.noSideE result[i] = hexStr[2*i].readHexChar shl 4 or hexStr[2*i+1].readHexChar inc(i) -proc hexToSeqBytesBE*(hexStr: string): seq[byte] {.noSideEffect.}= +func hexToSeqBytesBE*(hexStr: string): seq[byte] = ## Read an hex string and store it in a sequence of bytes in Big-Endian order var i = 0 if hexStr[i] == '0' and (hexStr[i+1] == 'x' or hexStr[i+1] == 'X'): @@ -41,7 +41,7 @@ proc hexToSeqBytesBE*(hexStr: string): seq[byte] {.noSideEffect.}= result[i] = hexStr[2*i].readHexChar shl 4 or hexStr[2*i+1].readHexChar inc(i) -proc toHex*[N: static[int]](ba: array[N, byte]): string {.noSideEffect.}= +func toHex*[N: static[int]](ba: array[N, byte]): string = ## Convert a big-endian byte array to its hex representation ## Output is in lowercase @@ -52,7 +52,7 @@ proc toHex*[N: static[int]](ba: array[N, byte]): string {.noSideEffect.}= result[2*i] = hexChars[int ba[i] shr 4 and 0xF] result[2*i+1] = hexChars[int ba[i] and 0xF] -proc toHex*(ba: seq[byte]): string {.noSideEffect, noInit.}= +func toHex*(ba: seq[byte]): string {.noinit.}= ## Convert a big-endian byte sequence to its hex representation ## Output is in lowercase @@ -65,7 +65,7 @@ proc toHex*(ba: seq[byte]): string {.noSideEffect, noInit.}= result[2*i] = hexChars[int ba[i] shr 4 and 0xF] result[2*i+1] = hexChars[int ba[i] and 0xF] -proc toByteArrayBE*[T: SomeInteger](num: T): array[T.sizeof, byte] {.noSideEffect, noInit, inline.}= +func toByteArrayBE*[T: SomeInteger](num: T): array[T.sizeof, byte] {.noinit, inline.}= ## Convert an int (in native host endianness) to a big-endian byte array # Note: only works on devel @@ -78,5 +78,5 @@ proc toByteArrayBE*[T: SomeInteger](num: T): array[T.sizeof, byte] {.noSideEffec for i in 0 ..< N: result[i] = byte(num shr T((N-1-i) * 8)) -proc toByteArrayBE*[bits: static[int]](x: MDigest[bits]): array[bits div 8, byte] {.inline, noSideEffect, noInit.}= +func toByteArrayBE*[bits: static[int]](x: MDigest[bits]): array[bits div 8, byte] {.inline, noinit.}= cast[type result](x.data) diff --git a/src/private/functional.nim b/src/private/functional.nim index c82b28d..ca61b87 100644 --- a/src/private/functional.nim +++ b/src/private/functional.nim @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Status Research & Development GmbH +# Copyright (c) 2018-2024 Status Research & Development GmbH # Distributed under the Apache v2 License (license terms are at http://www.apache.org/licenses/LICENSE-2.0). @@ -27,7 +27,7 @@ template zipMap*[N: static[int], T, U]( )) {.pragma: align64, codegenDecl: "$# $# __attribute__((aligned(64)))".} - var result{.noInit, align64.}: array[N, outType] + var result{.noinit, align64.}: array[N, outType] for i, x {.inject.}, y {.inject.} in enumerateZip(a, b): {.unroll: 4.} # This is a no-op at the moment diff --git a/src/private/intmath.nim b/src/private/intmath.nim index 9108eb4..725a563 100644 --- a/src/private/intmath.nim +++ b/src/private/intmath.nim @@ -1,6 +1,6 @@ # From https://github.com/numforge/number-theory/ # MIT Licence -# Copyright (c) 2016 Mamy Ratsimbazafy +# Copyright (c) 2016-2024 Mamy Ratsimbazafy # ########### Number of bits to represent a number @@ -59,16 +59,16 @@ type proc ldiv(a, b: clong): ldiv_t {.importc: "ldiv", header: "".} proc lldiv(a, b: clonglong): lldiv_t {.importc: "lldiv", header: "".} -proc divmod*(a, b: int32): tuple[quot, rem: clong] {.inline, noSideEffect, noInit.}= +func divmod*(a, b: int32): tuple[quot, rem: clong] {.inline, noinit.}= ## Compute quotient and reminder of integer division in a single intrinsics operation # TODO: changing clong to int32 poses an issue for some reason cast[type result](ldiv(a,b)) -proc divmod*(a, b: int64): tuple[quot, rem: int64] {.inline, noSideEffect, noInit.}= +func divmod*(a, b: int64): tuple[quot, rem: int64] {.inline, noinit.}= ## Compute quotient and reminder of integer division in a single intrinsicsoperation cast[type result](lldiv(a,b)) -proc divmod*[T: SomeUnsignedInt](a, b: T): tuple[quot, rem: T] {.inline, noSideEffect, noInit.}= +func divmod*[T: SomeUnsignedInt](a, b: T): tuple[quot, rem: T] {.inline, noinit.}= # There is no single instruction for unsigned ints # Hopefully the compiler does its work properly (a div b, a mod b) diff --git a/src/private/primes.nim b/src/private/primes.nim index d37889c..538a61f 100644 --- a/src/private/primes.nim +++ b/src/private/primes.nim @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Status Research & Development GmbH +# Copyright (c) 2018-2024 Status Research & Development GmbH # Distributed under the Apache v2 License (license terms are at http://www.apache.org/licenses/LICENSE-2.0). # Primality testing. TODO: a scalable implementation (i.e. Miller-Rabin) @@ -6,14 +6,14 @@ import ./intmath -proc isPrime*[T: SomeUnsignedInt](x: T): bool {.noSideEffect.}= +func isPrime*[T: SomeUnsignedInt](x: T): bool = for i in 2.T .. isqrt x: if x mod i == 0: return false return true -proc isPrime*(x: Natural): bool {.noSideEffect.}= +func isPrime*(x: Natural): bool = for i in 2 .. isqrt x: if x mod i == 0: return false - return true \ No newline at end of file + return true diff --git a/src/proof_of_work.nim b/src/proof_of_work.nim index e3c0b9b..1417fe8 100644 --- a/src/proof_of_work.nim +++ b/src/proof_of_work.nim @@ -28,7 +28,7 @@ const # ############################################################################### # Parameters -proc get_cache_size*(block_number: uint64): uint64 {.noSideEffect.}= +func get_cache_size*(block_number: uint64): uint64 = result = CACHE_BYTES_INIT + CACHE_BYTES_GROWTH * (block_number div EPOCH_LENGTH) result -= HASH_BYTES while (let dm = intmath.divmod(result, HASH_BYTES); @@ -37,7 +37,7 @@ proc get_cache_size*(block_number: uint64): uint64 {.noSideEffect.}= # means checking that remainder == 0 and quotient is prime result -= 2 * HASH_BYTES -proc get_data_size*(block_number: uint64): uint64 {.noSideEffect.}= +func get_data_size*(block_number: uint64): uint64 = result = DATASET_BYTES_INIT + DATASET_BYTES_GROWTH * (block_number div EPOCH_LENGTH) result -= MIX_BYTES while (let dm = intmath.divmod(result, MIX_BYTES); @@ -48,16 +48,16 @@ proc get_data_size*(block_number: uint64): uint64 {.noSideEffect.}= # Fetch from lookup tables of 2048 epochs of data sizes and cache sizes import ./data_sizes -proc get_datasize_lut*(block_number: Natural): uint64 {.noSideEffect, inline.} = +func get_datasize_lut*(block_number: Natural): uint64 {.inline.} = data_sizes[block_number div EPOCH_LENGTH] -proc get_cachesize_lut*(block_number: Natural): uint64 {.noSideEffect, inline.} = +func get_cachesize_lut*(block_number: Natural): uint64 {.inline.} = cache_sizes[block_number div EPOCH_LENGTH] # ############################################################################### # Cache generation -proc mkcache*(cache_size: uint64, seed: MDigest[256]): seq[MDigest[512]] {.noSideEffect.}= +func mkcache*(cache_size: uint64, seed: MDigest[256]): seq[MDigest[512]] = # Cache size let n = int(cache_size div HASH_BYTES) @@ -83,7 +83,7 @@ proc mkcache*(cache_size: uint64, seed: MDigest[256]): seq[MDigest[512]] {.noSid const FNV_PRIME = 0x01000193 -proc fnv*[T: SomeUnsignedInt or Natural](v1, v2: T): uint32 {.inline, noSideEffect.}= +func fnv*[T: SomeUnsignedInt or Natural](v1, v2: T): uint32 {.inline.}= # Original formula is ((v1 * FNV_PRIME) xor v2) mod 2^32 # However contrary to Python and depending on the type T, @@ -104,7 +104,7 @@ proc fnv*[T: SomeUnsignedInt or Natural](v1, v2: T): uint32 {.inline, noSideEffe # ############################################################################### # Full dataset calculation -proc calc_dataset_item*(cache: seq[MDigest[512]], i: Natural): MDigest[512] {.noSideEffect, noInit.} = +func calc_dataset_item*(cache: seq[MDigest[512]], i: Natural): MDigest[512] {.noinit.} = let n = cache.len const r: uint32 = HASH_BYTES div WORD_BYTES @@ -161,14 +161,14 @@ template hashimoto(header: MDigest[256], # combine header+nonce into a 64 byte seed {.pragma: align64, codegenDecl: "$# $# __attribute__((aligned(64)))".} - var s{.align64, noInit.}: MDigest[512] + var s{.align64, noinit.}: MDigest[512] let s_bytes = cast[ptr array[64, byte]](addr s) # Alias to interpret s as a byte array let s_words = cast[ptr array[16, uint32]](addr s) # Alias to interpret s as an uint32 array s_bytes[][0..<32] = header.data # We first populate the first 40 bytes of s with the concatenation # In template we need to dereference first otherwise it's not considered as var - var nonceLE{.noInit.}: array[8, byte] # the nonce should be concatenated with its LITTLE ENDIAN representation + var nonceLE{.noinit.}: array[8, byte] # the nonce should be concatenated with its LITTLE ENDIAN representation littleEndian64(addr nonceLE, unsafeAddr nonce) s_bytes[][32..<40] = nonceLE @@ -176,7 +176,7 @@ template hashimoto(header: MDigest[256], # start the mix with replicated s assert MIX_BYTES div HASH_BYTES == 2 - var mix{.align64, noInit.}: array[32, uint32] + var mix{.align64, noinit.}: array[32, uint32] mix[0..<16] = s_words[] mix[16..<32] = s_words[] @@ -186,7 +186,7 @@ template hashimoto(header: MDigest[256], let p1{.inject.} = p + 1 # Unrolled: for j in range(MIX_BYTES / HASH_BYTES): => for j in 0 ..< 2 - var newdata{.noInit.}: type mix + var newdata{.noinit.}: type mix newdata[0..<16] = cast[array[16, uint32]](dataset_lookup_p) newdata[16..<32] = cast[array[16, uint32]](dataset_lookup_p1) @@ -200,23 +200,23 @@ template hashimoto(header: MDigest[256], for i in countup(0, mix.len - 1, 4): cmix[i div 4] = mix[i].fnv(mix[i+1]).fnv(mix[i+2]).fnv(mix[i+3]) - var concat{.noInit.}: array[64 + 32, byte] + var concat{.noinit.}: array[64 + 32, byte] concat[0..<64] = s_bytes[] concat[64..<96] = cast[array[32, byte]](result.mix_digest) result.value = keccak_256.digest concat -proc hashimoto_light*(full_size:Natural, cache: seq[MDigest[512]], - header: MDigest[256], nonce: uint64): HashimotoHash {.noSideEffect.} = +func hashimoto_light*(full_size:Natural, cache: seq[MDigest[512]], + header: MDigest[256], nonce: uint64): HashimotoHash = hashimoto(header, nonce, full_size, - calc_data_set_item(cache, p), - calc_data_set_item(cache, p1), + calc_dataset_item(cache, p), + calc_dataset_item(cache, p1), result) -proc hashimoto_full*(full_size:Natural, dataset: seq[MDigest[512]], - header: MDigest[256], nonce: uint64): HashimotoHash {.noSideEffect.} = +func hashimoto_full*(full_size:Natural, dataset: seq[MDigest[512]], + header: MDigest[256], nonce: uint64): HashimotoHash = # TODO spec mentions full_size but I don't think we need it (retrieve it from dataset.len) hashimoto(header, nonce, @@ -227,6 +227,6 @@ proc hashimoto_full*(full_size:Natural, dataset: seq[MDigest[512]], # ############################################################################### # Defining the seed hash -proc get_seedhash*(block_number: uint64): MDigest[256] {.noSideEffect.} = +func get_seedhash*(block_number: uint64): MDigest[256] = for i in 0 ..< int(block_number div EPOCH_LENGTH): result = keccak256.digest result.data diff --git a/tests/test_mining.nim b/tests/test_mining.nim index 34af184..9740bf5 100644 --- a/tests/test_mining.nim +++ b/tests/test_mining.nim @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Status Research & Development GmbH +# Copyright (c) 2018-2024 Status Research & Development GmbH # Distributed under the Apache v2 License (license terms are at http://www.apache.org/licenses/LICENSE-2.0). import ../src/ethash, unittest, times, strutils, nimcrypto @@ -11,12 +11,12 @@ suite "Test mining": # POC-9 testnet, epoch 0 let blck = 22'u # block number - cache = mkcache(get_cachesize(blck), get_seedhash(blck)) + cache = mkcache(get_cache_size(blck), get_seedhash(blck)) header = cast[MDigest[256]]( hexToByteArrayBE[32]("372eca2454ead349c3df0ab5d00b0b706b23e49d469387db91811cee0358fc6d") ) difficulty = 132416'u64 - full_size = get_datasize(blck) + full_size = get_data_size(blck) echo "\nGenerating dataset" var start = epochTime() diff --git a/tests/test_proof_of_work.nim b/tests/test_proof_of_work.nim index 5de3ce5..e669ae5 100644 --- a/tests/test_proof_of_work.nim +++ b/tests/test_proof_of_work.nim @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Status Research & Development GmbH +# Copyright (c) 2018-2024 Status Research & Development GmbH # Distributed under the Apache v2 License (license terms are at http://www.apache.org/licenses/LICENSE-2.0). import ../src/ethash, unittest, strutils, algorithm, random, sequtils, nimcrypto @@ -43,8 +43,8 @@ suite "Endianness (not implemented)": suite "Genesis parameters": # https://github.com/ethereum/ethash/blob/f5f0a8b1962544d2b6f40df8e4b0d9a32faf8f8e/test/c/test.cpp#L155-L180 let - full_size = get_datasize(0) - cache_size = get_cachesize(0) + full_size = get_data_size(0) + cache_size = get_cache_size(0) test "Full dataset size should be less or equal DATASET_BYTES_INIT": check: full_size <= DATASET_BYTES_INIT @@ -79,19 +79,19 @@ suite "Epoch change": check: get_cache_size(EPOCH_LENGTH * 2048 - 1) == 285081536'u test "Full dataset size at the change of epochs - Look-up tables": - check: get_data_size_lut(EPOCH_LENGTH - 1) == 1073739904'u - check: get_data_size_lut(EPOCH_LENGTH) == 1082130304'u - check: get_data_size_lut(EPOCH_LENGTH + 1) == 1082130304'u - check: get_data_size_lut(EPOCH_LENGTH * 2046) == 18236833408'u - check: get_data_size_lut(EPOCH_LENGTH * 2047) == 18245220736'u + check: get_datasize_lut(EPOCH_LENGTH - 1) == 1073739904'u + check: get_datasize_lut(EPOCH_LENGTH) == 1082130304'u + check: get_datasize_lut(EPOCH_LENGTH + 1) == 1082130304'u + check: get_datasize_lut(EPOCH_LENGTH * 2046) == 18236833408'u + check: get_datasize_lut(EPOCH_LENGTH * 2047) == 18245220736'u test "Cache size at the change of epochs - Look-up tables": - check: get_cache_size_lut(EPOCH_LENGTH - 1) == 16776896'u - check: get_cache_size_lut(EPOCH_LENGTH) == 16907456'u - check: get_cache_size_lut(EPOCH_LENGTH + 1) == 16907456'u - check: get_cache_size_lut(EPOCH_LENGTH * 2046) == 284950208'u - check: get_cache_size_lut(EPOCH_LENGTH * 2047) == 285081536'u - check: get_cache_size_lut(EPOCH_LENGTH * 2048 - 1) == 285081536'u + check: get_cachesize_lut(EPOCH_LENGTH - 1) == 16776896'u + check: get_cachesize_lut(EPOCH_LENGTH) == 16907456'u + check: get_cachesize_lut(EPOCH_LENGTH + 1) == 16907456'u + check: get_cachesize_lut(EPOCH_LENGTH * 2046) == 284950208'u + check: get_cachesize_lut(EPOCH_LENGTH * 2047) == 285081536'u + check: get_cachesize_lut(EPOCH_LENGTH * 2048 - 1) == 285081536'u test "Random testing of full size": # https://github.com/ethereum/ethash/blob/f5f0a8b1962544d2b6f40df8e4b0d9a32faf8f8e/test/python/test_pyethash.py#L23-L28 @@ -191,13 +191,13 @@ suite "Real blocks test": # https://github.com/ethereum/ethash/blob/f5f0a8b1962544d2b6f40df8e4b0d9a32faf8f8e/test/c/test.cpp#L603-L617 # POC-9 testnet, epoch 0 let blck = 22'u # block number - let cache = mkcache(get_cachesize(blck), get_seedhash(blck)) + let cache = mkcache(get_cache_size(blck), get_seedhash(blck)) let header = cast[MDigest[256]]( hexToByteArrayBE[32]("372eca2454ead349c3df0ab5d00b0b706b23e49d469387db91811cee0358fc6d") ) let light = hashimoto_light( - get_datasize(blck), + get_data_size(blck), cache, header, 0x495732e0ed7a801c'u @@ -206,7 +206,7 @@ suite "Real blocks test": check: light.value == cast[MDigest[256]]( hexToByteArrayBE[32]("00000b184f1fdd88bfd94c86c39e65db0c36144d5e43f745f722196e730cb614") ) - check: light.mixDigest == cast[MDigest[256]]( + check: light.mix_digest == cast[MDigest[256]]( hexToByteArrayBE[32]("2f74cdeb198af0b9abe65d22d372e22fb2d474371774a9583c1cc427a07939f5") ) @@ -214,19 +214,19 @@ suite "Real blocks test": # https://github.com/ethereum/ethash/blob/f5f0a8b1962544d2b6f40df8e4b0d9a32faf8f8e/ethash_test.go#L63-L69 # POC-9 testnet, epoch 1 let blck = 30001'u # block number - let cache = mkcache(get_cachesize(blck), get_seedhash(blck)) + let cache = mkcache(get_cache_size(blck), get_seedhash(blck)) let header = cast[MDigest[256]]( hexToByteArrayBE[32]("7e44356ee3441623bc72a683fd3708fdf75e971bbe294f33e539eedad4b92b34") ) let light = hashimoto_light( - get_datasize(blck), + get_data_size(blck), cache, header, 0x318df1c8adef7e5e'u ) - check: light.mixDigest == cast[MDigest[256]]( + check: light.mix_digest == cast[MDigest[256]]( hexToByteArrayBE[32]("144b180aad09ae3c81fb07be92c8e6351b5646dda80e6844ae1b697e55ddde84") ) @@ -234,18 +234,18 @@ suite "Real blocks test": # https://github.com/ethereum/ethash/blob/f5f0a8b1962544d2b6f40df8e4b0d9a32faf8f8e/ethash_test.go#L70-L78 # POC-9 testnet, epoch 2 let blck = 60000'u # block number - let cache = mkcache(get_cachesize(blck), get_seedhash(blck)) + let cache = mkcache(get_cache_size(blck), get_seedhash(blck)) let header = cast[MDigest[256]]( hexToByteArrayBE[32]("5fc898f16035bf5ac9c6d9077ae1e3d5fc1ecc3c9fd5bee8bb00e810fdacbaa0") ) let light = hashimoto_light( - get_datasize(blck), + get_data_size(blck), cache, header, 0x50377003e5d830ca'u ) - check: light.mixDigest == cast[MDigest[256]]( + check: light.mix_digest == cast[MDigest[256]]( hexToByteArrayBE[32]("ab546a5b73c452ae86dadd36f0ed83a6745226717d3798832d1b20b489e82063") )