diff --git a/src/private/conversion.nim b/src/private/conversion.nim index 3430853..53ffc57 100644 --- a/src/private/conversion.nim +++ b/src/private/conversion.nim @@ -10,22 +10,27 @@ import ./uint_type, macros +# converter mpUint64*(x: MpUintImpl[uint32]): uint64 = +# cast[ptr uint64](unsafeAddr x)[] -proc toSubtype*[T: SomeInteger](b: bool, typ: typedesc[T]): T {.noSideEffect, inline.}= +proc initMpUintImpl*[T: BaseUint](x: T): MpUintImpl[T] {.inline,noSideEffect.} = + result.lo = x + +proc toSubtype*[T: SomeInteger](b: bool, _: typedesc[T]): T {.noSideEffect, inline.}= b.T -proc toSubtype*[T: MpUintImpl](b: bool, typ: typedesc[T]): T {.noSideEffect, inline.}= +proc toSubtype*[T: MpUintImpl](b: bool, _: typedesc[T]): T {.noSideEffect, inline.}= type SubTy = type result.lo result.lo = toSubtype(b, SubTy) -proc zero*(typ: typedesc[BaseUint]): typ {.compileTime.} = - typ() +proc zero*[T: BaseUint](_: typedesc[T]): T {.noSideEffect, inline.}= + result = T(0) -proc one*[T: BaseUint](typ: typedesc[T]): T {.noSideEffect, inline.}= +proc one*[T: BaseUint](_: typedesc[T]): T {.noSideEffect, inline.}= when T is SomeUnsignedInt: - T(1) + result = T(1) else: - result.lo = 1 + result.lo = one(type result.lo) proc toUint*(n: MpUIntImpl): auto {.noSideEffect, inline.}= ## Casts a multiprecision integer to an uint of the same size diff --git a/src/private/stdlib_bitops.nim b/src/private/stdlib_bitops.nim index 6bd60d6..c56df67 100644 --- a/src/private/stdlib_bitops.nim +++ b/src/private/stdlib_bitops.nim @@ -25,7 +25,7 @@ ## may return undefined and/or platform dependant value if given invalid input. const useBuiltins* = not defined(noIntrinsicsBitOpts) -const noUndefined* = defined(noUndefinedBitOpts) +# const noUndefined* = defined(noUndefinedBitOpts) const useGCC_builtins* = (defined(gcc) or defined(llvm_gcc) or defined(clang)) and useBuiltins const useICC_builtins* = defined(icc) and useBuiltins const useVCC_builtins* = defined(vcc) and useBuiltins @@ -88,3 +88,24 @@ elif useICC_builtins: var index: uint32 discard fnc(index.addr, v) index.int + + +proc countLeadingZeroBits*(x: SomeInteger): int {.inline, nosideeffect.} = + ## Returns the number of leading zero bits in integer. + ## If `x` is zero, when ``noUndefinedBitOpts`` is set, result is 0, + ## otherwise result is undefined. + + # when noUndefined: + if x == 0: + return 0 + + when nimvm: + when sizeof(x) <= 4: result = sizeof(x)*8 - 1 - fastlog2_nim(x.uint32) + else: result = sizeof(x)*8 - 1 - fastlog2_nim(x.uint64) + else: + when useGCC_builtins: + when sizeof(x) <= 4: result = builtin_clz(x.uint32).int - (32 - sizeof(x)*8) + else: result = builtin_clzll(x.uint64).int + else: + when sizeof(x) <= 4: result = sizeof(x)*8 - 1 - fastlog2_nim(x.uint32) + else: result = sizeof(x)*8 - 1 - fastlog2_nim(x.uint64) diff --git a/src/private/uint_binary_ops.nim b/src/private/uint_binary_ops.nim index d1e4152..8c5b5f2 100644 --- a/src/private/uint_binary_ops.nim +++ b/src/private/uint_binary_ops.nim @@ -7,10 +7,13 @@ # # at your option. This file may not be copied, modified, or distributed except according to those terms. -import ./bithacks, ./conversion, +import ./bithacks, ./conversion, ./stdlib_bitops, ./uint_type, ./uint_comparison, - ./uint_bitwise_ops + ./uint_bitwise_ops, + ./size_mpuintimpl + +# ############ Addition & Substraction ############ # proc `+=`*(x: var MpUintImpl, y: MpUintImpl) {.noSideEffect, inline.}= ## In-place addition for multi-precision unsigned int @@ -45,6 +48,8 @@ proc `-`*(x, y: MpUintImpl): MpUintImpl {.noSideEffect, noInit, inline.}= result -= y +# ################### Multiplication ################### # + proc naiveMulImpl[T: MpUintImpl](x, y: T): MpUintImpl[T] {.noSideEffect, noInit, inline.} # Forward declaration @@ -57,10 +62,10 @@ proc naiveMul[T: BaseUint](x, y: T): MpUintImpl[T] {.noSideEffect, noInit, inlin elif T.sizeof == 8: # uint64 or MpUint[uint32] # We cannot double uint64 to uint128 - naiveMulImpl(x.toMpUintImpl, y.toMpUintImpl) + cast[type result](naiveMulImpl(x.toMpUintImpl, y.toMpUintImpl)) else: # Case: at least uint128 * uint128 --> uint256 - naiveMulImpl(x, y) + cast[type result](naiveMulImpl(x, y)) proc naiveMulImpl[T: MpUintImpl](x, y: T): MpUintImpl[T] {.noSideEffect, noInit, inline.}= @@ -78,7 +83,7 @@ proc naiveMulImpl[T: MpUintImpl](x, y: T): MpUintImpl[T] {.noSideEffect, noInit, # and introduce branching # - More total operations means more register moves - const halfSize = x.size_mpuintimpl div 2 + const halfSize = size_mpuintimpl(x) div 2 let z0 = naiveMul(x.lo, y.lo) tmp = naiveMul(x.hi, y.lo) @@ -87,10 +92,10 @@ proc naiveMulImpl[T: MpUintImpl](x, y: T): MpUintImpl[T] {.noSideEffect, noInit, z1 += naiveMul(x.hi, y.lo) let z2 = (z1 < tmp).toSubtype(T) + naiveMul(x.hi, y.hi) - let tmp2 = z1.lo shl halfSize + let tmp2 = initMpUintImpl(z1.lo shl halfSize) result.lo = tmp2 result.lo += z0 - result.hi = (result.lo < tmp2).toSubtype(T) + z2 + z1.hi + result.hi = (result.lo < tmp2).toSubtype(T) + z2 + initMpUintImpl(z1.hi) proc `*`*(x, y: MpUintImpl): MpUintImpl {.noSideEffect, noInit.}= ## Multiplication for multi-precision unsigned uint @@ -111,44 +116,115 @@ proc `*`*(x, y: MpUintImpl): MpUintImpl {.noSideEffect, noInit.}= # If T is a type # For T * T --> T we don't need to compute z2 as it always overflow # For T * T --> 2T (uint64 * uint64 --> uint128) we use extra precision multiplication - result = naiveMul(x.lo, y.lo) result.hi += (naiveMul(x.hi, y.lo) + naiveMul(x.lo, y.hi)).lo -proc divmod*(x, y: MpUintImpl): tuple[quot, rem: MpUintImpl] {.noSideEffect.}= - ## Division for multi-precision unsigned uint - ## Returns quotient + reminder in a (quot, rem) tuple - # - # Implementation through binary shift division - if unlikely(y.isZero): - raise newException(DivByZeroError, "You attempted to divide by zero") - type SubTy = type x.lo +# ################### Division ################### # +from ./primitive_divmod import divmod + +proc divmod*(x, y: MpUintImpl): tuple[quot, rem: MpUintImpl] {.noSideEffect.} + +proc div2n1n[T: BaseUint](x_hi, x_lo, y: T): tuple[quot, rem: T] {.noSideEffect, noInit.} = + const + size = size_mpuintimpl(x_hi) + halfSize = size div 2 + halfMask = (one(T) shl halfSize) - one(T) + base = one(T) shl halfSize + + if unlikely(x_hi >= y): + raise newException(ValueError, "Division overflow") + + let clz = countLeadingZeroBits(y) # We assume that for 0 clz returns 0 + + # normalization, shift so that the MSB is at 2^n + let xn = MpUintImpl[T](hi: x_hi, lo: x_lo) shl clz + let yn = y shl clz + + # Break divisor in 2 and dividend in 4 + let yn_hi = yn shr halfSize + let yn_lo = yn and halfMask + + let xnlohi = xn.lo shr halfSize + let xnlolo = xn.lo and halfMask + + # First half of the quotient + var (q1, r) = divmod(xn.hi, yn_hi) + + while (q1 >= base) or ((q1 * yn_lo) > (base * r + xnlohi)): + q1 -= one(T) + r += yn_hi + if r >= base: + break + + # Remove it + let xn_rest = xn.hi shl halfSize + xnlohi - (q1 * yn) + + # Second half + var (q2, s) = divmod(xn_rest, yn_hi) var - shift = x.bit_length - y.bit_length - d = y shl shift + q2_ynlo = q2 * yn_lo + sbase_xnlolo = (s shl halfSize) or xnlolo - result.rem = x + while (q2 >= base) or (q2_ynlo > sbase_xnlolo): + q2 -= one(T) + s += yn_hi + if s >= base: + break + else: + q2_ynlo -= yn_lo + sbase_xnlolo = (s shl halfSize) or xnlolo - while shift >= 0: - result.quot += result.quot - if result.rem >= d: - result.rem -= d - result.quot.lo = result.quot.lo or one(SubTy) - d = d shr 1 - dec(shift) + result.quot = (q1 shl halfSize) or q2 + result.rem = ((xn_rest shl halfSize) + xnlolo - q2 * yn) shr clz - # Performance note: - # The performance of this implementation is extremely dependant on shl and shr. - # - # Probably the most efficient algorithm that can benefit from MpUInt data structure is - # the recursive fast division by Burnikel and Ziegler (http://www.mpi-sb.mpg.de/~ziegler/TechRep.ps.gz): - # - Python implementation: https://bugs.python.org/file11060/fast_div.py and discussion https://bugs.python.org/issue3451 - # - C++ implementation: https://github.com/linbox-team/givaro/blob/master/src/kernel/recint/rudiv.h - # - The Handbook of Elliptic and Hyperelliptic Cryptography Algorithm 10.35 on page 188 has a more explicit version of the div2NxN algorithm. This algorithm is directly recursive and avoids the mutual recursion of the original paper's calls between div2NxN and div3Nx2N. - # - Comparison of fast division algorithms fro large integers: http://bioinfo.ict.ac.cn/~dbu/AlgorithmCourses/Lectures/Hasselstrom2003.pdf +proc divmod*(x, y: MpUintImpl): tuple[quot, rem: MpUintImpl] {.noSideEffect.}= + + # Using divide and conquer algorithm. + if y.hi.isZero: + if x.hi < y.lo: # Bit length of quotient x/y < bit_length(MpUintImpl) / 2 + (result.quot.lo, result.rem.lo) = div2n1n(x.hi, x.lo, y.lo) + else: # Quotient can overflow the subtype so we split work + (result.quot.hi, result.rem.hi) = divmod(x.hi, y.hi) + (result.quot.lo, result.rem.lo) = div2n1n(result.rem.hi, x.lo, y.lo) + result.rem.hi = zero(type result.rem.hi) + return + + const + size = size_mpuintimpl(x) + halfSize = size div 2 + + block: + # Normalization of divisor + let clz = countLeadingZeroBits(x.hi) + let yn = (y shl clz) + + # Prevent overflows + let xn = x shr 1 + + # Get the quotient + block: + let (qlo, _) = div2n1n(x.hi, x.lo, yn.hi) + result.quot.lo = qlo + + # Undo normalization + result.quot = result.quot shr (halfSize - 1 - clz) # -1 to correct for xn shift + + if not result.quot.isZero: + result.quot -= one(type result.quot) + # Quotient is correct or too small by one + # We will fix that once we know the remainder + + # Remainder + result.rem = x - (y * result.quot) + + # Fix quotient and reminder if we're off by one + if result.rem >= y: + # one more division round + result.quot += one(type result.quot) + result.rem -= y proc `div`*(x, y: MpUintImpl): MpUintImpl {.inline, noSideEffect.} = ## Division operation for multi-precision unsigned uint @@ -157,3 +233,72 @@ proc `div`*(x, y: MpUintImpl): MpUintImpl {.inline, noSideEffect.} = proc `mod`*(x, y: MpUintImpl): MpUintImpl {.inline, noSideEffect.} = ## Division operation for multi-precision unsigned uint divmod(x,y).rem + + +# ###################################################################### +# Division implementations +# +# Division is the most costly operation +# And also of critical importance for cryptography application + +# ##### Research ##### + +# Overview of division algorithms: +# - https://gmplib.org/manual/Division-Algorithms.html#Division-Algorithms +# - https://gmplib.org/~tege/division-paper.pdf +# - Comparison of fast division algorithms for large integers: http://bioinfo.ict.ac.cn/~dbu/AlgorithmCourses/Lectures/Hasselstrom2003.pdf + +# Libdivide has an implementations faster than hardware if dividing by the same number is needed +# - http://libdivide.com/documentation.html +# - https://github.com/ridiculousfish/libdivide/blob/master/libdivide.h +# Furthermore libdivide also has branchless implementations + +# Current implementation +# Currently we use the divide and conquer algorithm. Implementations can be found in +# - Hacker's delight: http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt +# - Libdivide +# - Code project: https://www.codeproject.com/Tips/785014/UInt-Division-Modulus +# - Cuda-uint128 (unfinished): https://github.com/curtisseizert/CUDA-uint128/blob/master/cuda_uint128.h +# - Mpdecimal: https://github.com/status-im/nim-decimal/blob/9b65e95299cb582b14e0ae9a656984a2ce0bab03/decimal/mpdecimal_wrapper/generated/basearith.c#L305-L412 + +# Probably the most efficient algorithm that can benefit from MpUInt recursive data structure is +# the recursive fast division by Burnikel and Ziegler (http://www.mpi-sb.mpg.de/~ziegler/TechRep.ps.gz): +# - Python implementation: https://bugs.python.org/file11060/fast_div.py and discussion https://bugs.python.org/issue3451 +# - C++ implementation: https://github.com/linbox-team/givaro/blob/master/src/kernel/recint/rudiv.h +# - The Handbook of Elliptic and Hyperelliptic Cryptography Algorithm 10.35 on page 188 has a more explicit version of the div2NxN algorithm. This algorithm is directly recursive and avoids the mutual recursion of the original paper's calls between div2NxN and div3Nx2N. + +# Other libraries that can be used as reference for alternative (?) implementations: +# - TTMath: https://github.com/status-im/nim-ttmath/blob/8f6ff2e57b65a350479c4012a53699e262b19975/src/headers/ttmathuint.h#L1530-L2383 +# - LibTomMath: https://github.com/libtom/libtommath +# - Google Abseil: https://github.com/abseil/abseil-cpp/tree/master/absl/numeric +# - Crypto libraries like libsecp256k1, OpenSSL, ... though they are not generics. (uint256 only for example) +# Note: GMP/MPFR are GPL. The papers can be used but not their code. + +# ###################################################################### +# School division + +# proc divmod*(x, y: MpUintImpl): tuple[quot, rem: MpUintImpl] {.noSideEffect.}= +# ## Division for multi-precision unsigned uint +# ## Returns quotient + reminder in a (quot, rem) tuple +# # +# # Implementation through binary shift division +# if unlikely(y.isZero): +# raise newException(DivByZeroError, "You attempted to divide by zero") + +# type SubTy = type x.lo + +# var +# shift = x.bit_length - y.bit_length +# d = y shl shift + +# result.rem = x + +# while shift >= 0: +# result.quot += result.quot +# if result.rem >= d: +# result.rem -= d +# result.quot.lo = result.quot.lo or one(SubTy) + +# d = d shr 1 +# dec(shift) + diff --git a/src/private/uint_bitwise_ops.nim b/src/private/uint_bitwise_ops.nim index 61b5c24..d1302d1 100644 --- a/src/private/uint_bitwise_ops.nim +++ b/src/private/uint_bitwise_ops.nim @@ -30,7 +30,7 @@ proc `xor`*(x, y: MpUintImpl): MpUintImpl {.noInit, noSideEffect, inline.}= result.lo = x.lo xor y.lo result.hi = x.hi xor y.hi -proc `shl`*[T: MpUintImpl](x: T, y: SomeInteger): T {.noInit, inline, noSideEffect.}= +proc `shl`*(x: MpUintImpl, y: SomeInteger): MpUintImpl {.noInit, inline, noSideEffect.}= ## Compute the `shift left` operation of x and y # Note: inlining this poses codegen/aliasing issue when doing `x = x shl 1` const halfSize = size_mpuintimpl(x) div 2 @@ -42,7 +42,7 @@ proc `shl`*[T: MpUintImpl](x: T, y: SomeInteger): T {.noInit, inline, noSideEffe else: 0.SubTy -proc `shr`*[T: MpUintImpl](x: T, y: SomeInteger): T {.noInit, inline, noSideEffect.}= +proc `shr`*(x: MpUintImpl, y: SomeInteger): MpUintImpl {.noInit, inline, noSideEffect.}= ## Compute the `shift right` operation of x and y # Note: inlining this poses codegen/aliasing issue when doing `x = x shl 1` const halfSize = size_mpuintimpl(x) div 2 @@ -52,4 +52,3 @@ proc `shr`*[T: MpUintImpl](x: T, y: SomeInteger): T {.noInit, inline, noSideEffe result.lo = (x.lo shr y) or (x.hi shl (y - halfSize)) # the shl is not a mistake result.hi = if y < halfSize: x.hi shr y else: 0.SubTy - diff --git a/tests/test_muldiv.nim b/tests/test_muldiv.nim index 65555f8..10a0a21 100644 --- a/tests/test_muldiv.nim +++ b/tests/test_muldiv.nim @@ -34,7 +34,7 @@ suite "Testing multiplication implementation": suite "Testing division and modulo implementation": - test "Divmod returns the correct result": + test "Divmod(100, 13) returns the correct result": let a = 100.initMpUint(64) let b = 13.initMpUint(64) @@ -42,3 +42,17 @@ suite "Testing division and modulo implementation": check: cast[uint64](qr.quot) == 7'u64 check: cast[uint64](qr.rem) == 9'u64 + + test "Divmod(2^64, 3) returns the correct result": + let a = 1.initMpUint(128) shl 64 + let b = 3.initMpUint(128) + + let qr = divmod(a, b) + + let q = cast[MpUintImpl[uint64]](qr.quot) + let r = cast[MpUintImpl[uint64]](qr.rem) + + check: q.lo == 6148914691236517205'u64 + check: q.hi == 0'u64 + check: r.lo == 1'u64 + check: r.hi == 0'u64