From 102a1b0f0c680139df6964c8eeb081c6a8a3c302 Mon Sep 17 00:00:00 2001 From: Mamy Ratsimbazafy Date: Wed, 25 Apr 2018 12:52:00 +0200 Subject: [PATCH] Rename the library to Stint (#26) --- README.md | 27 ++++++---- benchmarks/bench_mod.nim | 6 +-- src/debug/debugutils.nim | 2 +- src/private/as_words.nim | 8 +-- src/private/bithacks.nim | 2 +- src/private/conversion.nim | 26 ++++----- src/private/uint_addsub.nim | 8 +-- src/private/uint_bitwise_ops.nim | 14 ++--- src/private/uint_comparison.nim | 8 +-- src/private/uint_div.nim | 44 +++++++-------- src/private/uint_mul.nim | 24 ++++----- src/private/uint_type.nim | 34 ++++++------ src/uint_init.nim | 8 +-- src/uint_public.nim | 46 ++++++++-------- mpint.nimble => stint.nimble | 12 ++--- tests/property_based.nim | 92 ++++++++++++++++---------------- tests/property_based_uint256.nim | 46 ++++++++-------- tests/test_addsub.nim | 30 +++++------ tests/test_bitwise.nim | 12 ++--- tests/test_comparison.nim | 18 +++---- tests/test_endianness.nim | 2 +- tests/test_muldiv.nim | 26 ++++----- 22 files changed, 252 insertions(+), 243 deletions(-) rename mpint.nimble => stint.nimble (85%) diff --git a/README.md b/README.md index 419fb60..d6fb781 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,29 @@ -# Mpint (Multi-precision integers) +# Stint (Stack-based multiprecision integers) -[![Build Status (Travis)](https://img.shields.io/travis/status-im/mpint/master.svg?label=Linux%20/%20macOS "Linux/macOS build status (Travis)")](https://travis-ci.org/status-im/mpint) +[![Build Status (Travis)](https://img.shields.io/travis/status-im/stint/master.svg?label=Linux%20/%20macOS "Linux/macOS build status (Travis)")](https://travis-ci.org/status-im/stint) [![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) ![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg) -A fast and portable multi-precision integer library in pure Nim +A fast and portable stack-based multi-precision integer library in pure Nim Main focus: - - no heap/dynamic allocation - - uint256 for cryptographic and ethereum blockchain usage. - - ARM portability for usage on mobile phones + - Portability + - 32 and 64 bit arch + - ARM for usage on mobile phones + - Additionally RISC-V and MIPS for open hardware and low power IoT devices. + - Speed, library is carefully tuned to produce the best assembly given the current compilers. + However, the library itself does not resort to assembly for portability. + - No heap/dynamic allocation + - Ethereum applications + - Uint256/Int256 for Ethereum Virtual Machine usage. + - Uint2048 for Ethereum Bloom filters - Ease of use: - - casting to and from array of bytes - - converting to and from Hex - - converting to and from decimal strings + - Use traditional `+`, `-`, `+=`, etc operators like on native types + - Representation of numbers in memory is the exact same as native types and endianness aware. + - In practice that means that interfacing with binary blobs representing numbers from cryptographic libraries can be done with a `cast` if it represents a Uint256, Uint512, Uint1024, Uint2048. + - converting to and from Hex + - converting to and from decimal strings ## License diff --git a/benchmarks/bench_mod.nim b/benchmarks/bench_mod.nim index 221e1d6..f09fc6c 100644 --- a/benchmarks/bench_mod.nim +++ b/benchmarks/bench_mod.nim @@ -18,10 +18,10 @@ echo "Warmup: " & $(stop - start) & "s" start = cpuTime() block: - var foo = 123.initMpUint(256) + var foo = 123.u(256) for i in 0 ..< 10_000_000: - foo += i.initMpUint(256) * i.initMpUint(256) mod 456.initMpUint(256) - foo = foo mod 789.initMpUint(256) + foo += i.u(256) * i.u(256) mod 456.u(256) + foo = foo mod 789.u(256) stop = cpuTime() echo "Library: " & $(stop - start) & "s" diff --git a/src/debug/debugutils.nim b/src/debug/debugutils.nim index 76ae902..0382850 100644 --- a/src/debug/debugutils.nim +++ b/src/debug/debugutils.nim @@ -27,7 +27,7 @@ func tohexBE*[T: uint8 or uint16 or uint32 or uint64](x: T): string = for i in 0 ..< T.sizeof: result.add toHex(bytes[i]) -func tohexBE*(x: MpUintImpl): string = +func tohexBE*(x: UintImpl): string = ## Stringify an uint to hex, Most significant byte on the left ## i.e. a (2.uint128)^64 + 1 will be 0000000100000001 diff --git a/src/private/as_words.nim b/src/private/as_words.nim index 961f5be..0a3c83f 100644 --- a/src/private/as_words.nim +++ b/src/private/as_words.nim @@ -60,7 +60,7 @@ proc isUint(x: NimNode): static[bool] = elif eqIdent(x, "uint8"): true else: false -macro asWords*[T](n: MpUintImpl[T], ignoreEndianness: static[bool], loopBody: untyped): untyped = +macro asWords*[T](n: UintImpl[T], ignoreEndianness: static[bool], loopBody: untyped): untyped = ## Iterates over n, as an array of words. ## Input: ## - n: The Multiprecision int @@ -96,7 +96,7 @@ macro asWords*[T](n: MpUintImpl[T], ignoreEndianness: static[bool], loopBody: un else: assert false, "Not implemented" -macro asWordsZip*[T](x, y: MpUintImpl[T], ignoreEndianness: static[bool], loopBody: untyped): untyped = +macro asWordsZip*[T](x, y: UintImpl[T], ignoreEndianness: static[bool], loopBody: untyped): untyped = ## Iterates over x and y, as an array of words. ## Input: ## - x, y: The multiprecision ints @@ -155,7 +155,7 @@ macro asWordsZip*[T](x, y: MpUintImpl[T], ignoreEndianness: static[bool], loopBo for `idx` in countdown(`inner_x`[].len - 1, 0): `replacedAST` -macro m_asWordsZip*[T](m: var MpUintImpl[T], x: MpUintImpl[T], ignoreEndianness: static[bool], loopBody: untyped): untyped = +macro m_asWordsZip*[T](m: var UintImpl[T], x: UintImpl[T], ignoreEndianness: static[bool], loopBody: untyped): untyped = ## Iterates over a mutable int m and x as an array of words. ## returning a !! Pointer !! of the proper type to m. ## Input: @@ -217,7 +217,7 @@ macro m_asWordsZip*[T](m: var MpUintImpl[T], x: MpUintImpl[T], ignoreEndianness: `replacedAST` -macro m_asWordsZip*[T](m: var MpUintImpl[T], x, y: MpUintImpl[T], ignoreEndianness: static[bool], loopBody: untyped): untyped = +macro m_asWordsZip*[T](m: var UintImpl[T], x, y: UintImpl[T], ignoreEndianness: static[bool], loopBody: untyped): untyped = ## Iterates over a mutable int m and x as an array of words. ## returning a !! Pointer !! of the proper type to m. ## Input: diff --git a/src/private/bithacks.nim b/src/private/bithacks.nim index 61ca8b0..f7b4ea3 100644 --- a/src/private/bithacks.nim +++ b/src/private/bithacks.nim @@ -14,7 +14,7 @@ export stdlib_bitops # MpInt rely on no undefined behaviour as often we scan 0. (if 1 is stored in a uint128 for example) # Also countLeadingZeroBits must return the size of the type and not 0 like in the stdlib -func countLeadingZeroBits*(n: MpUintImpl): int {.inline.} = +func countLeadingZeroBits*(n: UintImpl): int {.inline.} = ## Returns the number of leading zero bits in integer. const maxHalfRepr = getSize(n) div 2 diff --git a/src/private/conversion.nim b/src/private/conversion.nim index 31b353c..a54e61b 100644 --- a/src/private/conversion.nim +++ b/src/private/conversion.nim @@ -10,7 +10,7 @@ import ./uint_type, macros, typetraits -func initMpUintImpl*[InType, OutType](x: InType, _: typedesc[OutType]): OutType {.inline.} = +func initUintImpl*[InType, OutType](x: InType, _: typedesc[OutType]): OutType {.inline.} = const size_in = getSize(x) @@ -27,12 +27,12 @@ func initMpUintImpl*[InType, OutType](x: InType, _: typedesc[OutType]): OutType elif size_in == size_out: result = cast[type result](x) else: - result.lo = initMpUintImpl(x, type result.lo) + result.lo = initUintImpl(x, type result.lo) func toSubtype*[T: SomeInteger](b: bool, _: typedesc[T]): T {.inline.}= b.T -func toSubtype*[T: MpUintImpl](b: bool, _: typedesc[T]): T {.inline.}= +func toSubtype*[T: UintImpl](b: bool, _: typedesc[T]): T {.inline.}= type SubTy = type result.lo result.lo = toSubtype(b, SubTy) @@ -49,12 +49,12 @@ func one*[T: BaseUint](_: typedesc[T]): T {.inline.}= else: r_ptr[r_ptr[].len - 1] = 1 -func toUint*(n: MpUIntImpl): auto {.inline.}= +func toUint*(n: UintImpl): auto {.inline.}= ## Casts a multiprecision integer to an uint of the same size # TODO: uint128 support when n.sizeof > 8: - raise newException("Unreachable. You are trying to cast a MpUint with more than 64-bit of precision") + raise newException("Unreachable. You are trying to cast a StUint with more than 64-bit of precision") elif n.sizeof == 8: cast[uint64](n) elif n.sizeof == 4: @@ -62,14 +62,14 @@ func toUint*(n: MpUIntImpl): auto {.inline.}= elif n.sizeof == 2: cast[uint16](n) else: - raise newException("Unreachable. MpUInt must be 16-bit minimum and a power of 2") + raise newException("Unreachable. StUint must be 16-bit minimum and a power of 2") func toUint*(n: SomeUnsignedInt): SomeUnsignedInt {.inline.}= ## No-op overload of multi-precision int casting n func asDoubleUint*(n: BaseUint): auto {.inline.} = - ## Convert an integer or MpUint to an uint with double the size + ## Convert an integer or StUint to an uint with double the size type Double = ( when n.sizeof == 4: uint64 @@ -80,17 +80,17 @@ func asDoubleUint*(n: BaseUint): auto {.inline.} = n.toUint.Double -func toMpUintImpl*(n: uint16|uint32|uint64): auto {.inline.} = - ## Cast an integer to the corresponding size MpUintImpl +func toUintImpl*(n: uint16|uint32|uint64): auto {.inline.} = + ## Cast an integer to the corresponding size UintImpl # Sometimes direct casting doesn't work and we must cast through a pointer when n is uint64: - return (cast[ptr [MpUintImpl[uint32]]](unsafeAddr n))[] + return (cast[ptr [UintImpl[uint32]]](unsafeAddr n))[] elif n is uint32: - return (cast[ptr [MpUintImpl[uint16]]](unsafeAddr n))[] + return (cast[ptr [UintImpl[uint16]]](unsafeAddr n))[] elif n is uint16: - return (cast[ptr [MpUintImpl[uint8]]](unsafeAddr n))[] + return (cast[ptr [UintImpl[uint8]]](unsafeAddr n))[] -func toMpUintImpl*(n: MpUintImpl): MpUintImpl {.inline.} = +func toUintImpl*(n: UintImpl): UintImpl {.inline.} = ## No op n diff --git a/src/private/uint_addsub.nim b/src/private/uint_addsub.nim index a8e1c95..b83b5fe 100644 --- a/src/private/uint_addsub.nim +++ b/src/private/uint_addsub.nim @@ -14,25 +14,25 @@ import ./bithacks, ./conversion, # ############ Addition & Substraction ############ # -proc `+=`*(x: var MpUintImpl, y: MpUintImpl) {.noSideEffect, inline.}= +proc `+=`*(x: var UintImpl, y: UintImpl) {.noSideEffect, inline.}= ## In-place addition for multi-precision unsigned int type SubTy = type x.lo x.lo += y.lo x.hi += (x.lo < y.lo).toSubtype(SubTy) + y.hi -proc `+`*(x, y: MpUintImpl): MpUintImpl {.noSideEffect, noInit, inline.}= +proc `+`*(x, y: UintImpl): UintImpl {.noSideEffect, noInit, inline.}= # Addition for multi-precision unsigned int result = x result += y -proc `-`*(x, y: MpUintImpl): MpUintImpl {.noSideEffect, noInit, inline.}= +proc `-`*(x, y: UintImpl): UintImpl {.noSideEffect, noInit, inline.}= # Substraction for multi-precision unsigned int type SubTy = type x.lo result.lo = x.lo - y.lo result.hi = x.hi - y.hi - (x.lo < y.lo).toSubtype(SubTy) -proc `-=`*(x: var MpUintImpl, y: MpUintImpl) {.noSideEffect, inline.}= +proc `-=`*(x: var UintImpl, y: UintImpl) {.noSideEffect, inline.}= ## In-place substraction for multi-precision unsigned int x = x - y diff --git a/src/private/uint_bitwise_ops.nim b/src/private/uint_bitwise_ops.nim index 581765e..b724de8 100644 --- a/src/private/uint_bitwise_ops.nim +++ b/src/private/uint_bitwise_ops.nim @@ -10,30 +10,30 @@ import ./uint_type, ./as_words -func `not`*(x: MpUintImpl): MpUintImpl {.noInit, inline.}= +func `not`*(x: UintImpl): UintImpl {.noInit, inline.}= ## Bitwise complement of unsigned integer x m_asWordsZip(result, x, ignoreEndianness = true): result = not x -func `or`*(x, y: MpUintImpl): MpUintImpl {.noInit, inline.}= +func `or`*(x, y: UintImpl): UintImpl {.noInit, inline.}= ## `Bitwise or` of numbers x and y m_asWordsZip(result, x, y, ignoreEndianness = true): result = x or y -func `and`*(x, y: MpUintImpl): MpUintImpl {.noInit, inline.}= +func `and`*(x, y: UintImpl): UintImpl {.noInit, inline.}= ## `Bitwise and` of numbers x and y m_asWordsZip(result, x, y, ignoreEndianness = true): result = x and y -func `xor`*(x, y: MpUintImpl): MpUintImpl {.noInit, inline.}= +func `xor`*(x, y: UintImpl): UintImpl {.noInit, inline.}= ## `Bitwise xor` of numbers x and y m_asWordsZip(result, x, y, ignoreEndianness = true): result = x xor y -func `shr`*(x: MpUintImpl, y: SomeInteger): MpUintImpl {.inline.} +func `shr`*(x: UintImpl, y: SomeInteger): UintImpl {.inline.} # Forward declaration -func `shl`*(x: MpUintImpl, y: SomeInteger): MpUintImpl {.inline.}= +func `shl`*(x: UintImpl, y: SomeInteger): UintImpl {.inline.}= ## Compute the `shift left` operation of x and y # Note: inlining this poses codegen/aliasing issue when doing `x = x shl 1` @@ -51,7 +51,7 @@ func `shl`*(x: MpUintImpl, y: SomeInteger): MpUintImpl {.inline.}= else: result.hi = x.lo shl (y - halfSize) -func `shr`*(x: MpUintImpl, y: SomeInteger): MpUintImpl {.inline.}= +func `shr`*(x: UintImpl, y: SomeInteger): UintImpl {.inline.}= ## Compute the `shift right` operation of x and y const halfSize = getSize(x) div 2 diff --git a/src/private/uint_comparison.nim b/src/private/uint_comparison.nim index 9271a43..21e1894 100644 --- a/src/private/uint_comparison.nim +++ b/src/private/uint_comparison.nim @@ -12,27 +12,27 @@ import ./uint_type, ./as_words func isZero*(n: SomeUnsignedInt): bool {.inline.} = n == 0 -func isZero*(n: MpUintImpl): bool {.inline.} = +func isZero*(n: UintImpl): bool {.inline.} = asWords(n, ignoreEndianness = true): if n != 0: return false return true -func `<`*(x, y: MpUintImpl): bool {.inline.}= +func `<`*(x, y: UintImpl): bool {.inline.}= # Lower comparison for multi-precision integers asWordsZip(x, y, ignoreEndianness = false): if x != y: return x < y return false # they're equal -func `==`*(x, y: MpUintImpl): bool {.inline.}= +func `==`*(x, y: UintImpl): bool {.inline.}= # Equal comparison for multi-precision integers asWordsZip(x, y, ignoreEndianness = true): if x != y: return false return true # they're equal -func `<=`*(x, y: MpUintImpl): bool {.inline.}= +func `<=`*(x, y: UintImpl): bool {.inline.}= # Lower or equal comparison for multi-precision integers asWordsZip(x, y, ignoreEndianness = false): if x != y: diff --git a/src/private/uint_div.nim b/src/private/uint_div.nim index 7cc09b1..61b3452 100644 --- a/src/private/uint_div.nim +++ b/src/private/uint_div.nim @@ -45,24 +45,24 @@ import ./bithacks, ./conversion, ################################################################################################################### func div2n1n[T: SomeunsignedInt](q, r: var T, n_hi, n_lo, d: T) -func div2n1n(q, r: var MpUintImpl, ah, al, b: MpUintImpl) +func div2n1n(q, r: var UintImpl, ah, al, b: UintImpl) # Forward declaration proc divmod*(x, y: SomeInteger): tuple[quot, rem: SomeInteger] {.noSideEffect, inline.}= # hopefully the compiler fuse that in a single op (x div y, x mod y) -func divmod*[T](x, y: MpUintImpl[T]): tuple[quot, rem: MpUintImpl[T]] +func divmod*[T](x, y: UintImpl[T]): tuple[quot, rem: UintImpl[T]] # Forward declaration -func div3n2n[T]( q: var MpUintImpl[T], - r: var MpUintImpl[MpUintImpl[T]], - a2, a1, a0: MpUintImpl[T], - b: MpUintImpl[MpUintImpl[T]]) = +func div3n2n[T]( q: var UintImpl[T], + r: var UintImpl[UintImpl[T]], + a2, a1, a0: UintImpl[T], + b: UintImpl[UintImpl[T]]) = var - c: MpUintImpl[T] - d: MpUintImpl[MpUintImpl[T]] + c: UintImpl[T] + d: UintImpl[UintImpl[T]] carry: bool if a2 < b.hi: @@ -74,7 +74,7 @@ func div3n2n[T]( q: var MpUintImpl[T], carry = true extPrecMul[T](d, q, b.lo) - let ca0 = MpUintImpl[type c](hi: c, lo: a0) + let ca0 = UintImpl[type c](hi: c, lo: a0) r = ca0 - d @@ -89,13 +89,13 @@ func div3n2n[T]( q: var MpUintImpl[T], proc div3n2n[T: SomeUnsignedInt]( q: var T, - r: var MpUintImpl[T], + r: var UintImpl[T], a2, a1, a0: T, - b: MpUintImpl[T]) = + b: UintImpl[T]) = var c: T - d: MpUintImpl[T] + d: UintImpl[T] carry: bool if a2 < b.hi: @@ -108,7 +108,7 @@ proc div3n2n[T: SomeUnsignedInt]( carry = true extPrecMul[T](d, q, b.lo) - let ca0 = MpUintImpl[T](hi: c, lo: a0) + let ca0 = UintImpl[T](hi: c, lo: a0) r = ca0 - d if (not carry) and d > ca0: @@ -120,11 +120,11 @@ proc div3n2n[T: SomeUnsignedInt]( dec q r += b -func div2n1n(q, r: var MpUintImpl, ah, al, b: MpUintImpl) = +func div2n1n(q, r: var UintImpl, ah, al, b: UintImpl) = # assert countLeadingZeroBits(b) == 0, "Divisor was not normalized" - var s: MpUintImpl + var s: UintImpl div3n2n(q.hi, s, ah.hi, ah.lo, al.hi, b) div3n2n(q.lo, r, s.hi, s.lo, al.lo, b) @@ -168,7 +168,7 @@ func div2n1n[T: SomeunsignedInt](q, r: var T, n_hi, n_lo, d: T) = q = (q1 shl halfSize) or q2 r = r2 -func divmodBZ[T](x, y: MpUintImpl[T], q, r: var MpUintImpl[T])= +func divmodBZ[T](x, y: UintImpl[T], q, r: var UintImpl[T])= assert y.isZero.not() # This should be checked on release mode in the divmod caller proc @@ -209,7 +209,7 @@ func divmodBZ[T](x, y: MpUintImpl[T], q, r: var MpUintImpl[T])= let clz = countLeadingZeroBits(y) let - xx = MpUintImpl[type x](lo: x) shl clz + xx = UintImpl[type x](lo: x) shl clz yy = y shl clz # Compute @@ -218,7 +218,7 @@ func divmodBZ[T](x, y: MpUintImpl[T], q, r: var MpUintImpl[T])= # Undo normalization r = r shr clz -func divmodBS(x, y: MpUintImpl, q, r: var MpuintImpl) = +func divmodBS(x, y: UintImpl, q, r: var UintImpl) = ## Division for multi-precision unsigned uint ## Implementation through binary shift division @@ -244,7 +244,7 @@ func divmodBS(x, y: MpUintImpl, q, r: var MpuintImpl) = const BinaryShiftThreshold = 8 # If the difference in bit-length is below 8 # binary shift is probably faster -func divmod*[T](x, y: MpUintImpl[T]): tuple[quot, rem: MpUintImpl[T]]= +func divmod*[T](x, y: UintImpl[T]): tuple[quot, rem: UintImpl[T]]= let x_clz = x.countLeadingZeroBits let y_clz = y.countLeadingZeroBits @@ -266,7 +266,7 @@ func divmod*[T](x, y: MpUintImpl[T]): tuple[quot, rem: MpUintImpl[T]]= # It is a bit tricky with recursive types. An empty n.lo means 0 or sizeof(n.lo) let y_ctz = getSize(y) - y_clz - 1 result.quot = x shr y_ctz - result.rem = y_ctz.initMpUintImpl(MpUintImpl[T]) + result.rem = y_ctz.initUintImpl(UintImpl[T]) result.rem = result.rem and x elif x == y: result.quot.lo = one(T) @@ -277,11 +277,11 @@ func divmod*[T](x, y: MpUintImpl[T]): tuple[quot, rem: MpUintImpl[T]]= else: divmodBZ(x, y, result.quot, result.rem) -func `div`*(x, y: MpUintImpl): MpUintImpl {.inline.} = +func `div`*(x, y: UintImpl): UintImpl {.inline.} = ## Division operation for multi-precision unsigned uint divmod(x,y).quot -func `mod`*(x, y: MpUintImpl): MpUintImpl {.inline.} = +func `mod`*(x, y: UintImpl): UintImpl {.inline.} = ## Division operation for multi-precision unsigned uint divmod(x,y).rem diff --git a/src/private/uint_mul.nim b/src/private/uint_mul.nim index 4d64fb7..1740149 100644 --- a/src/private/uint_mul.nim +++ b/src/private/uint_mul.nim @@ -26,24 +26,24 @@ func hi[T:SomeUnsignedInt](x: T): T {.inline.} = p = T.sizeof * 8 div 2 result = x shr p -# No generic, somehow Nim is given ambiguous call with the T: MpUintImpl overload -func extPrecMul*(result: var MpUintImpl[uint8], x, y: uint8) = +# No generic, somehow Nim is given ambiguous call with the T: UintImpl overload +func extPrecMul*(result: var UintImpl[uint8], x, y: uint8) = ## Extended precision multiplication result = cast[type result](x.asDoubleUint * y.asDoubleUint) -func extPrecMul*(result: var MpUintImpl[uint16], x, y: uint16) = +func extPrecMul*(result: var UintImpl[uint16], x, y: uint16) = ## Extended precision multiplication result = cast[type result](x.asDoubleUint * y.asDoubleUint) -func extPrecMul*(result: var MpUintImpl[uint32], x, y: uint32) = +func extPrecMul*(result: var UintImpl[uint32], x, y: uint32) = ## Extended precision multiplication result = cast[type result](x.asDoubleUint * y.asDoubleUint) -func extPrecAddMul[T: uint8 or uint16 or uint32](result: var MpUintImpl[T], x, y: T) = +func extPrecAddMul[T: uint8 or uint16 or uint32](result: var UintImpl[T], x, y: T) = ## Extended precision fused in-place addition & multiplication result += cast[type result](x.asDoubleUint * y.asDoubleUint) -template extPrecMulImpl(result: var MpUintImpl[uint64], op: untyped, u, v: uint64) = +template extPrecMulImpl(result: var UintImpl[uint64], op: untyped, u, v: uint64) = const p = 64 div 2 base = 1 shl p @@ -70,15 +70,15 @@ template extPrecMulImpl(result: var MpUintImpl[uint64], op: untyped, u, v: uint6 op(result.hi, x3 + x1.hi) op(result.lo, (x1 shl p) or x0.lo) -func extPrecMul*(result: var MpUintImpl[uint64], u, v: uint64) = +func extPrecMul*(result: var UintImpl[uint64], u, v: uint64) = ## Extended precision multiplication extPrecMulImpl(result, `=`, u, v) -func extPrecAddMul(result: var MpUintImpl[uint64], u, v: uint64) = +func extPrecAddMul(result: var UintImpl[uint64], u, v: uint64) = ## Extended precision fused in-place addition & multiplication extPrecMulImpl(result, `+=`, u, v) -func extPrecMul*[T](result: var MpUintImpl[MpUintImpl[T]], x, y: MpUintImpl[T]) = +func extPrecMul*[T](result: var UintImpl[UintImpl[T]], x, y: UintImpl[T]) = # See details at # https://en.wikipedia.org/wiki/Karatsuba_algorithm # https://locklessinc.com/articles/256bit_arithmetic/ @@ -93,7 +93,7 @@ func extPrecMul*[T](result: var MpUintImpl[MpUintImpl[T]], x, y: MpUintImpl[T]) # and introduce branching # - More total operations means more register moves - var z1: MpUintImpl[T] + var z1: UintImpl[T] # Low part - z0 extPrecMul(result.lo, x.lo, y.lo) @@ -112,9 +112,9 @@ func extPrecMul*[T](result: var MpUintImpl[MpUintImpl[T]], x, y: MpUintImpl[T]) # Finalize low part result.lo.hi += z1.lo if result.lo.hi < z1.lo: - result.hi += one(MpUintImpl[T]) + result.hi += one(UintImpl[T]) -func `*`*[T](x, y: MpUintImpl[T]): MpUintImpl[T] {.inline.}= +func `*`*[T](x, y: UintImpl[T]): UintImpl[T] {.inline.}= ## Multiplication for multi-precision unsigned uint # # For our representation, it is similar to school grade multiplication diff --git a/src/private/uint_type.nim b/src/private/uint_type.nim index 2587df1..68c0703 100644 --- a/src/private/uint_type.nim +++ b/src/private/uint_type.nim @@ -12,34 +12,34 @@ import macros -# The macro getMpUintImpl must be exported +# The macro getUintImpl must be exported when defined(mpint_test): - macro getMpUintImpl*(bits: static[int]): untyped = - # Test version, mpuint[64] = 2 uint32. Test the logic of the library + macro getUintImpl*(bits: static[int]): untyped = + # Test version, StUint[64] = 2 uint32. Test the logic of the library assert (bits and (bits-1)) == 0, $bits & " is not a power of 2" assert bits >= 16, "The number of bits in a should be greater or equal to 16" if bits >= 128: - let inner = getAST(getMpUintImpl(bits div 2)) - result = newTree(nnkBracketExpr, ident("MpUintImpl"), inner) + let inner = getAST(getUintImpl(bits div 2)) + result = newTree(nnkBracketExpr, ident("UintImpl"), inner) elif bits == 64: - result = newTree(nnkBracketExpr, ident("MpUintImpl"), ident("uint32")) + result = newTree(nnkBracketExpr, ident("UintImpl"), ident("uint32")) elif bits == 32: - result = newTree(nnkBracketExpr, ident("MpUintImpl"), ident("uint16")) + result = newTree(nnkBracketExpr, ident("UintImpl"), ident("uint16")) elif bits == 16: - result = newTree(nnkBracketExpr, ident("MpUintImpl"), ident("uint8")) + result = newTree(nnkBracketExpr, ident("UintImpl"), ident("uint8")) else: error "Fatal: unreachable" else: - macro getMpUintImpl*(bits: static[int]): untyped = - # Release version, mpuint[64] = uint64. + macro getUintImpl*(bits: static[int]): untyped = + # Release version, StUint[64] = uint64. assert (bits and (bits-1)) == 0, $bits & " is not a power of 2" assert bits >= 8, "The number of bits in a should be greater or equal to 8" if bits >= 128: - let inner = getAST(getMpUintImpl(bits div 2)) - result = newTree(nnkBracketExpr, ident("MpUintImpl"), inner) + let inner = getAST(getUintImpl(bits div 2)) + result = newTree(nnkBracketExpr, ident("UintImpl"), inner) elif bits == 64: result = ident("uint64") elif bits == 32: @@ -59,7 +59,7 @@ proc getSize*(x: NimNode): static[int] = var node = x.getTypeInst while node.kind == nnkBracketExpr: - assert eqIdent(node[0], "MpuintImpl") + assert eqIdent(node[0], "UintImpl") multiplier *= 2 node = node[1] @@ -86,15 +86,15 @@ type # ### Private ### # # If this is not in the same type section # the compiler has trouble - BaseUint* = MpUintImpl or SomeUnsignedInt + BaseUint* = UintImpl or SomeUnsignedInt - MpUintImpl*[Baseuint] = object + UintImpl*[Baseuint] = object when system.cpuEndian == littleEndian: lo*, hi*: BaseUint else: hi*, lo*: BaseUint # ### Private ### # - MpUint*[bits: static[int]] = object - data*: getMpUintImpl(bits) + StUint*[bits: static[int]] = object + data*: getUintImpl(bits) # wrapped in object to avoid recursive calls diff --git a/src/uint_init.nim b/src/uint_init.nim index 6da78c3..3a46478 100644 --- a/src/uint_init.nim +++ b/src/uint_init.nim @@ -13,17 +13,17 @@ import ./private/uint_type import typetraits -func initMpUint*[T: SomeInteger](n: T, bits: static[int]): MpUint[bits] {.inline.}= +func u*[T: SomeInteger](n: T, bits: static[int]): StUint[bits] {.inline.}= assert n >= 0.T - when result.data is MpuintImpl: + when result.data is UintImpl: when getSize(n) > bits: - # To avoid a costly runtime check, we refuse storing into MpUint types smaller + # To avoid a costly runtime check, we refuse storing into StUint types smaller # than the input type. raise newException(ValueError, "Input " & $n & " (" & $T & ") cannot be stored in a multi-precision " & $bits & "-bit integer." & "\nUse a smaller input type instead. This is a compile-time check" & - " to avoid a costly run-time bit_length check at each MpUint initialization.") + " to avoid a costly run-time bit_length check at each StUint initialization.") else: let r_ptr = cast[ptr array[bits div (sizeof(T) * 8), T]](result.addr) when system.cpuEndian == littleEndian: diff --git a/src/uint_public.nim b/src/uint_public.nim index 52e756a..05c2b60 100644 --- a/src/uint_public.nim +++ b/src/uint_public.nim @@ -8,55 +8,55 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import ./private/uint_type, macros -export MpUint, MpUintImpl, getMpUintImpl # TODO remove the need to export MpUintImpl and this macro +export StUint, UintImpl, getUintImpl # TODO remove the need to export UintImpl and this macro type - UInt128* = MpUint[128] - UInt256* = MpUint[256] + UInt128* = StUint[128] + UInt256* = StUint[256] template make_conv(conv_name: untyped, size: int): untyped = - func `convname`*(n: SomeInteger): MpUint[size] {.inline, noInit.}= - n.initMpUint(size) + func `convname`*(n: SomeInteger): StUint[size] {.inline, noInit.}= + n.u(size) make_conv(u128, 128) make_conv(u256, 256) template make_unary(op, ResultTy): untyped = - func `op`*(x: MpUint): ResultTy {.noInit, inline.} = - when ResultTy is MpUint: + func `op`*(x: StUint): ResultTy {.noInit, inline.} = + when ResultTy is StUint: result.data = op(x.data) else: op(x.data) export op template make_binary(op, ResultTy): untyped = - func `op`*(x, y: MpUint): ResultTy {.noInit, inline.} = - when ResultTy is MpUint: + func `op`*(x, y: StUint): ResultTy {.noInit, inline.} = + when ResultTy is StUint: result.data = op(x.data, y.data) else: op(x.data, y.data) export `op` template make_binary_inplace(op): untyped = - func `op`*(x: var MpUint, y: MpUint) {.inline.} = + func `op`*(x: var StUint, y: StUint) {.inline.} = op(x.data, y.data) export op import ./private/uint_addsub -make_binary(`+`, MpUint) +make_binary(`+`, StUint) make_binary_inplace(`+=`) -make_binary(`-`, MpUint) +make_binary(`-`, StUint) make_binary_inplace(`-=`) import ./private/uint_mul -make_binary(`*`, MpUint) +make_binary(`*`, StUint) import ./private/uint_div -make_binary(`div`, MpUint) -make_binary(`mod`, MpUint) -func divmod*(x, y: MpUint): tuple[quot, rem: MpUint] {.noInit, inline.} = +make_binary(`div`, StUint) +make_binary(`mod`, StUint) +func divmod*(x, y: StUint): tuple[quot, rem: StUint] {.noInit, inline.} = (result.quot.data, result.rem.data) = divmod(x.data, y.data) import ./private/uint_comparison @@ -64,15 +64,15 @@ import ./private/uint_comparison make_binary(`<`, bool) make_binary(`<=`, bool) make_binary(`==`, bool) -func isZero*(x: MpUint): bool {.inline.} = isZero x.data +func isZero*(x: StUint): bool {.inline.} = isZero x.data import ./private/uint_bitwise_ops -make_unary(`not`, MpUint) -make_binary(`or`, MpUint) -make_binary(`and`, MpUint) -make_binary(`xor`, MpUint) -proc `shr`*(x: Mpuint, y: SomeInteger): MpUint {.noInit, inline, noSideEffect.} = +make_unary(`not`, StUint) +make_binary(`or`, StUint) +make_binary(`and`, StUint) +make_binary(`xor`, StUint) +proc `shr`*(x: StUint, y: SomeInteger): StUint {.noInit, inline, noSideEffect.} = result.data = x.data shr y -proc `shl`*(x: Mpuint, y: SomeInteger): MpUint {.noInit, inline, noSideEffect.} = +proc `shl`*(x: StUint, y: SomeInteger): StUint {.noInit, inline, noSideEffect.} = result.data = x.data shl y diff --git a/mpint.nimble b/stint.nimble similarity index 85% rename from mpint.nimble rename to stint.nimble index d5b7d2c..cb26c3c 100644 --- a/mpint.nimble +++ b/stint.nimble @@ -1,7 +1,7 @@ -packageName = "mpint" +packageName = "stint" version = "0.0.1" author = "Status Research & Development GmbH" -description = "Efficient multiprecision int in Nim" +description = "Efficient stack-based multiprecision int in Nim" license = "Apache License 2.0 or MIT" srcDir = "src" @@ -20,19 +20,19 @@ proc test(name: string, lang: string = "c") = switch("out", ("./build/" & name)) setCommand lang, "tests/" & name & ".nim" -task test_debug, "Run all tests - test implementation (MpUint[64] = 2x uint32": +task test_debug, "Run all tests - test implementation (StUint[64] = 2x uint32": switch("define", "mpint_test") test "all_tests" -task test_release, "Run all tests - prod implementation (MpUint[64] = uint64": +task test_release, "Run all tests - prod implementation (StUint[64] = uint64": test "all_tests" -task test_property_debug, "Run random tests (normal mode) - test implementation (MpUint[64] = 2x uint32)": +task test_property_debug, "Run random tests (normal mode) - test implementation (StUint[64] = 2x uint32)": requires "quicktest > 0.0.8" switch("define", "mpint_test") test "property_based" -task test_property_release, "Run random tests (release mode) - test implementation (MpUint[64] = 2x uint32)": +task test_property_release, "Run random tests (release mode) - test implementation (StUint[64] = 2x uint32)": requires "quicktest > 0.0.8" switch("define", "mpint_test") switch("define", "release") diff --git a/tests/property_based.nim b/tests/property_based.nim index 4a05838..5cc884e 100644 --- a/tests/property_based.nim +++ b/tests/property_based.nim @@ -24,13 +24,13 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / when sizeof(int) == 8: let - tx = cast[MpUint[64]](x) - ty = cast[MpUint[64]](y) + tx = cast[StUint[64]](x) + ty = cast[StUint[64]](y) tz = tx or ty else: let - tx = cast[MpUint[32]](x) - ty = cast[MpUint[32]](y) + tx = cast[StUint[32]](x) + ty = cast[StUint[32]](y) tz = tx or ty @@ -41,13 +41,13 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / when sizeof(int) == 8: let - tx = cast[MpUint[64]](x) - ty = cast[MpUint[64]](y) + tx = cast[StUint[64]](x) + ty = cast[StUint[64]](y) tz = tx and ty else: let - tx = cast[MpUint[32]](x) - ty = cast[MpUint[32]](y) + tx = cast[StUint[32]](x) + ty = cast[StUint[32]](y) tz = tx and ty check(cast[uint](tz) == (x and y)) @@ -56,13 +56,13 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / when sizeof(int) == 8: let - tx = cast[MpUint[64]](x) - ty = cast[MpUint[64]](y) + tx = cast[StUint[64]](x) + ty = cast[StUint[64]](y) tz = tx xor ty else: let - tx = cast[MpUint[32]](x) - ty = cast[MpUint[32]](y) + tx = cast[StUint[32]](x) + ty = cast[StUint[32]](y) tz = tx xor ty check(cast[uint](tz) == (x xor y)) @@ -71,11 +71,11 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / when sizeof(int) == 8: let - tx = cast[MpUint[64]](x) + tx = cast[StUint[64]](x) tz = not tx else: let - tx = cast[MpUint[32]](x) + tx = cast[StUint[32]](x) tz = not tx check(cast[uint](tz) == (not x)) @@ -84,13 +84,13 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / when sizeof(int) == 8: let - tx = cast[MpUint[64]](x) - ty = cast[MpUint[64]](y) + tx = cast[StUint[64]](x) + ty = cast[StUint[64]](y) tz = tx < ty else: let - tx = cast[MpUint[32]](x) - ty = cast[MpUint[32]](y) + tx = cast[StUint[32]](x) + ty = cast[StUint[32]](y) tz = tx < ty check(tz == (x < y)) @@ -100,13 +100,13 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / when sizeof(int) == 8: let - tx = cast[MpUint[64]](x) - ty = cast[MpUint[64]](y) + tx = cast[StUint[64]](x) + ty = cast[StUint[64]](y) tz = tx <= ty else: let - tx = cast[MpUint[32]](x) - ty = cast[MpUint[32]](y) + tx = cast[StUint[32]](x) + ty = cast[StUint[32]](y) tz = tx <= ty check(tz == (x <= y)) @@ -115,13 +115,13 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / when sizeof(int) == 8: let - tx = cast[MpUint[64]](x) - ty = cast[MpUint[64]](y) + tx = cast[StUint[64]](x) + ty = cast[StUint[64]](y) tz = tx + ty else: let - tx = cast[MpUint[32]](x) - ty = cast[MpUint[32]](y) + tx = cast[StUint[32]](x) + ty = cast[StUint[32]](y) tz = tx + ty check(cast[uint](tz) == x+y) @@ -131,13 +131,13 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / when sizeof(int) == 8: let - tx = cast[MpUint[64]](x) - ty = cast[MpUint[64]](y) + tx = cast[StUint[64]](x) + ty = cast[StUint[64]](y) tz = tx - ty else: let - tx = cast[MpUint[32]](x) - ty = cast[MpUint[32]](y) + tx = cast[StUint[32]](x) + ty = cast[StUint[32]](y) tz = tx - ty check(cast[uint](tz) == x-y) @@ -146,13 +146,13 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / when sizeof(int) == 8: let - tx = cast[MpUint[64]](x) - ty = cast[MpUint[64]](y) + tx = cast[StUint[64]](x) + ty = cast[StUint[64]](y) tz = tx * ty else: let - tx = cast[MpUint[32]](x) - ty = cast[MpUint[32]](y) + tx = cast[StUint[32]](x) + ty = cast[StUint[32]](y) tz = tx * ty check(cast[uint](tz) == x*y) @@ -161,11 +161,11 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / when sizeof(int) == 8: let - tx = cast[MpUint[64]](x) + tx = cast[StUint[64]](x) tz = tx shl y else: let - tx = cast[MpUint[32]](x) + tx = cast[StUint[32]](x) tz = tx shl y check(cast[uint](tz) == x shl y) @@ -174,11 +174,11 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / when sizeof(int) == 8: let - tx = cast[MpUint[64]](x) + tx = cast[StUint[64]](x) tz = tx shr y else: let - tx = cast[MpUint[32]](x) + tx = cast[StUint[32]](x) tz = tx shr y check(cast[uint](tz) == x shr y) @@ -187,13 +187,13 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / when sizeof(int) == 8: let - tx = cast[MpUint[64]](x) - ty = cast[MpUint[64]](y) + tx = cast[StUint[64]](x) + ty = cast[StUint[64]](y) tz = tx mod ty else: let - tx = cast[MpUint[32]](x) - ty = cast[MpUint[32]](y) + tx = cast[StUint[32]](x) + ty = cast[StUint[32]](y) tz = tx mod ty check(cast[uint](tz) == x mod y) @@ -202,13 +202,13 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / when sizeof(int) == 8: let - tx = cast[MpUint[64]](x) - ty = cast[MpUint[64]](y) + tx = cast[StUint[64]](x) + ty = cast[StUint[64]](y) tz = tx div ty else: let - tx = cast[MpUint[32]](x) - ty = cast[MpUint[32]](y) + tx = cast[StUint[32]](x) + ty = cast[StUint[32]](y) tz = tx div ty check(cast[uint](tz) == x div y) diff --git a/tests/property_based_uint256.nim b/tests/property_based_uint256.nim index e84e107..6a2e4b1 100644 --- a/tests/property_based_uint256.nim +++ b/tests/property_based_uint256.nim @@ -35,8 +35,8 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / ttm_x = cast[ttmath.UInt256](x) ttm_y = cast[ttmath.UInt256](y) - mp_x = cast[MpUint[256]](x) - mp_y = cast[MpUint[256]](y) + mp_x = cast[StUint[256]](x) + mp_y = cast[StUint[256]](y) let ttm_z = ttm_x or ttm_y @@ -60,8 +60,8 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / ttm_x = cast[ttmath.UInt256](x) ttm_y = cast[ttmath.UInt256](y) - mp_x = cast[MpUint[256]](x) - mp_y = cast[MpUint[256]](y) + mp_x = cast[StUint[256]](x) + mp_y = cast[StUint[256]](y) let ttm_z = ttm_x and ttm_y @@ -84,8 +84,8 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / ttm_x = cast[ttmath.UInt256](x) ttm_y = cast[ttmath.UInt256](y) - mp_x = cast[MpUint[256]](x) - mp_y = cast[MpUint[256]](y) + mp_x = cast[StUint[256]](x) + mp_y = cast[StUint[256]](y) let ttm_z = ttm_x xor ttm_y @@ -104,7 +104,7 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / # y = [y0, y1, y2, y3] # ttm_x = cast[ttmath.UInt256](x) - # mp_x = cast[MpUint[256]](x) + # mp_x = cast[StUint[256]](x) # let # ttm_z = not ttm_x @@ -127,8 +127,8 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / ttm_x = cast[ttmath.UInt256](x) ttm_y = cast[ttmath.UInt256](y) - mp_x = cast[MpUint[256]](x) - mp_y = cast[MpUint[256]](y) + mp_x = cast[StUint[256]](x) + mp_y = cast[StUint[256]](y) let ttm_z = ttm_x < ttm_y @@ -152,8 +152,8 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / ttm_x = cast[ttmath.UInt256](x) ttm_y = cast[ttmath.UInt256](y) - mp_x = cast[MpUint[256]](x) - mp_y = cast[MpUint[256]](y) + mp_x = cast[StUint[256]](x) + mp_y = cast[StUint[256]](y) let ttm_z = ttm_x <= ttm_y @@ -176,8 +176,8 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / ttm_x = cast[ttmath.UInt256](x) ttm_y = cast[ttmath.UInt256](y) - mp_x = cast[MpUint[256]](x) - mp_y = cast[MpUint[256]](y) + mp_x = cast[StUint[256]](x) + mp_y = cast[StUint[256]](y) let ttm_z = ttm_x + ttm_y @@ -200,8 +200,8 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / ttm_x = cast[ttmath.UInt256](x) ttm_y = cast[ttmath.UInt256](y) - mp_x = cast[MpUint[256]](x) - mp_y = cast[MpUint[256]](y) + mp_x = cast[StUint[256]](x) + mp_y = cast[StUint[256]](y) let ttm_z = ttm_x - ttm_y @@ -224,8 +224,8 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / ttm_x = cast[ttmath.UInt256](x) ttm_y = cast[ttmath.UInt256](y) - mp_x = cast[MpUint[256]](x) - mp_y = cast[MpUint[256]](y) + mp_x = cast[StUint[256]](x) + mp_y = cast[StUint[256]](y) let ttm_z = ttm_x * ttm_y @@ -243,7 +243,7 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / x = [x0, x1, x2, x3] ttm_x = cast[ttmath.UInt256](x) - mp_x = cast[MpUint[256]](x) + mp_x = cast[StUint[256]](x) let ttm_z = ttm_x shl y @@ -261,7 +261,7 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / x = [x0, x1, x2, x3] ttm_x = cast[ttmath.UInt256](x) - mp_x = cast[MpUint[256]](x) + mp_x = cast[StUint[256]](x) let ttm_z = ttm_x shr y @@ -284,8 +284,8 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / ttm_x = cast[ttmath.UInt256](x) ttm_y = cast[ttmath.UInt256](y) - mp_x = cast[MpUint[256]](x) - mp_y = cast[MpUint[256]](y) + mp_x = cast[StUint[256]](x) + mp_y = cast[StUint[256]](y) let ttm_z = ttm_x mod ttm_y @@ -306,8 +306,8 @@ suite "Property-based testing (testing with random inputs) - uint64 on 64-bit / ttm_x = cast[ttmath.UInt256](x) ttm_y = cast[ttmath.UInt256](y) - mp_x = cast[MpUint[256]](x) - mp_y = cast[MpUint[256]](y) + mp_x = cast[StUint[256]](x) + mp_y = cast[StUint[256]](y) let ttm_z = ttm_x div ttm_y diff --git a/tests/test_addsub.nim b/tests/test_addsub.nim index 6bae122..15a822f 100644 --- a/tests/test_addsub.nim +++ b/tests/test_addsub.nim @@ -12,8 +12,8 @@ import ../src/mpint, unittest suite "Testing addition implementation": test "In-place addition gives expected result": - var a = 20182018.initMpUint(64) - let b = 20172017.initMpUint(64) + var a = 20182018.u(64) + let b = 20172017.u(64) a += b @@ -21,23 +21,23 @@ suite "Testing addition implementation": test "Addition gives expected result": - let a = 20182018.initMpUint(64) - let b = 20172017.initMpUint(64) + let a = 20182018.u(64) + let b = 20172017.u(64) check: cast[uint64](a+b) == 20182018'u64 + 20172017'u64 test "When the low half overflows, it is properly carried": # uint8 (low half) overflow at 255 - let a = 100'u16.initMpUint(16) - let b = 100'u16.initMpUint(16) + let a = 100'u16.u(16) + let b = 100'u16.u(16) check: cast[uint16](a+b) == 200 test "Full overflow is handled like native unsigned types": # uint16 overflows after 65535 - let a = 100'u16.initMpUint(16) - var z = 0'u16.initMpUint(16) - let o = 36'u16.initMpUint(16) + let a = 100'u16.u(16) + var z = 0'u16.u(16) + let o = 36'u16.u(16) for _ in 0 ..< 655: z += a @@ -54,8 +54,8 @@ suite "Testing addition implementation": suite "Testing substraction implementation": test "In-place substraction gives expected result": - var a = 20182018.initMpUint(64) - let b = 20172017.initMpUint(64) + var a = 20182018.u(64) + let b = 20172017.u(64) a -= b @@ -63,14 +63,14 @@ suite "Testing substraction implementation": test "Substraction gives expected result": - let a = 20182018.initMpUint(64) - let b = 20172017.initMpUint(64) + let a = 20182018.u(64) + let b = 20172017.u(64) check: cast[uint64](a-b) == 20182018'u64 - 20172017'u64 test "Full overflow is handled like native unsigned types": # uint16 overflows after 65535 - let a = 100'u16.initMpUint(16) - let b = 101'u16.initMpUint(16) + let a = 100'u16.u(16) + let b = 101'u16.u(16) check: cast[uint16](a-b) == high(uint16) diff --git a/tests/test_bitwise.nim b/tests/test_bitwise.nim index 181e2e9..5339a23 100644 --- a/tests/test_bitwise.nim +++ b/tests/test_bitwise.nim @@ -10,14 +10,14 @@ import ../src/mpint, unittest suite "Testing bitwise operations": - let a = 100'i16.initMpUint(16) + let a = 100'i16.u(16) let b = a * a let z = 10000'u16 assert cast[uint16](b) == z, "Test cannot proceed, something is wrong with the multiplication implementation" - let u = 10000.initMpUint(64) + let u = 10000.u(64) let v = 10000'u64 let clz = 30 @@ -35,11 +35,11 @@ suite "Testing bitwise operations": check: cast[uint16](b) == z # Sanity check check: cast[uint16](b shl 8) == z shl 8 - block: # Testing shl for nested MpUintImpl - let p2_64 = MpUintImpl[uint64](hi:1, lo:0) - let p = 1.initMpUint(128) shl 64 + block: # Testing shl for nested UintImpl + let p2_64 = UintImpl[uint64](hi:1, lo:0) + let p = 1.u(128) shl 64 - check: p == cast[MpUint[128]](p2_64) + check: p == cast[StUint[128]](p2_64) test "Shift right - by less than half the size of the integer": check: cast[uint16](b) == z # Sanity check diff --git a/tests/test_comparison.nim b/tests/test_comparison.nim index 5e3bbbb..6d92932 100644 --- a/tests/test_comparison.nim +++ b/tests/test_comparison.nim @@ -11,19 +11,19 @@ import ../src/mpint, unittest suite "Testing comparison operators": let - a = 10'i16.initMpUint(16) - b = 15'i16.initMpUint(16) + a = 10'i16.u(16) + b = 15'i16.u(16) c = 150'u16 - d = 4.initMpUint(128) shl 64 - e = 4.initMpUint(128) - f = 4.initMpUint(128) shl 65 + d = 4.u(128) shl 64 + e = 4.u(128) + f = 4.u(128) shl 65 test "< operator": check: a < b not (a + b < b) not (a + a + a < b + b) - not (a * b < cast[MpUint[16]](c)) + not (a * b < cast[StUint[16]](c)) e < d d < f @@ -32,7 +32,7 @@ suite "Testing comparison operators": a <= b not (a + b <= b) a + a + a <= b + b - a * b <= cast[MpUint[16]](c) + a * b <= cast[StUint[16]](c) e <= d d <= f @@ -41,7 +41,7 @@ suite "Testing comparison operators": b > a not (b > a + b) not (b + b > a + a + a) - not (cast[Mpuint[16]](c) > a * b) + not (cast[StUint[16]](c) > a * b) d > e f > d @@ -50,6 +50,6 @@ suite "Testing comparison operators": b >= a not (b >= a + b) b + b >= a + a + a - cast[MpUint[16]](c) >= a * b + cast[StUint[16]](c) >= a * b d >= e f >= d diff --git a/tests/test_endianness.nim b/tests/test_endianness.nim index f17c2ff..58ebc6e 100644 --- a/tests/test_endianness.nim +++ b/tests/test_endianness.nim @@ -11,7 +11,7 @@ import ../src/mpint, unittest suite "Testing byte representation": test "Byte representation conforms to the platform endianness": - let a = 20182018.initMpUint(64) + let a = 20182018.u(64) let b = 20182018'u64 type AsBytes = array[8, byte] diff --git a/tests/test_muldiv.nim b/tests/test_muldiv.nim index 10a0a21..dc3c462 100644 --- a/tests/test_muldiv.nim +++ b/tests/test_muldiv.nim @@ -12,23 +12,23 @@ import ../src/mpint, unittest suite "Testing multiplication implementation": test "Multiplication with result fitting in low half": - let a = 10000.initMpUint(64) - let b = 10000.initMpUint(64) + let a = 10000.u(64) + let b = 10000.u(64) check: cast[uint64](a*b) == 100_000_000'u64 # need 27-bits test "Multiplication with result overflowing low half": - let a = 1_000_000.initMpUint(64) - let b = 1_000_000.initMpUint(64) + let a = 1_000_000.u(64) + let b = 1_000_000.u(64) check: cast[uint64](a*b) == 1_000_000_000_000'u64 # need 40 bits test "Full overflow is handled like native unsigned types": - let a = 1_000_000_000.initMpUint(64) - let b = 1_000_000_000.initMpUint(64) - let c = 1_000.initMpUint(64) + let a = 1_000_000_000.u(64) + let b = 1_000_000_000.u(64) + let c = 1_000.u(64) check: cast[uint64](a*b*c) == 1_000_000_000_000_000_000_000'u64 # need 70-bits @@ -36,21 +36,21 @@ suite "Testing multiplication implementation": suite "Testing division and modulo implementation": test "Divmod(100, 13) returns the correct result": - let a = 100.initMpUint(64) - let b = 13.initMpUint(64) + let a = 100.u(64) + let b = 13.u(64) let qr = divmod(a, b) check: cast[uint64](qr.quot) == 7'u64 check: cast[uint64](qr.rem) == 9'u64 test "Divmod(2^64, 3) returns the correct result": - let a = 1.initMpUint(128) shl 64 - let b = 3.initMpUint(128) + let a = 1.u(128) shl 64 + let b = 3.u(128) let qr = divmod(a, b) - let q = cast[MpUintImpl[uint64]](qr.quot) - let r = cast[MpUintImpl[uint64]](qr.rem) + let q = cast[UintImpl[uint64]](qr.quot) + let r = cast[UintImpl[uint64]](qr.rem) check: q.lo == 6148914691236517205'u64 check: q.hi == 0'u64