nim-stint/src/uint_binary_ops.nim

163 lines
5.6 KiB
Nim

# Mpint
# Copyright 2018 Status Research & Development GmbH
# Licensed under either of
#
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
#
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import ./private/[bithacks, casting],
uint_type,
uint_comparison
proc `+=`*[T: MpUint](x: var T, y: T) {.noSideEffect.}=
## In-place addition for multi-precision unsigned int
#
# Optimized assembly should contain adc instruction (add with carry)
# Clang on MacOS does with the -d:release switch and MpUint[uint32] (uint64)
type SubT = type x.lo
let tmp = x.lo
x.lo += y.lo
x.hi += SubT(x.lo < tmp) + y.hi
proc `+`*[T: MpUint](x, y: T): T {.noSideEffect, noInit, inline.}=
# Addition for multi-precision unsigned int
result = x
result += y
proc `-=`*[T: MpUint](x: var T, y: T) {.noSideEffect.}=
## In-place substraction for multi-precision unsigned int
#
# Optimized assembly should contain sbb instruction (substract with borrow)
# Clang on MacOS does with the -d:release switch and MpUint[uint32] (uint64)
type SubT = type x.lo
let tmp = x.lo
x.lo -= y.lo
x.hi -= SubT(x.lo > tmp) + y.hi
proc `-`*[T: MpUint](x, y: T): T {.noSideEffect, noInit, inline.}=
# Substraction for multi-precision unsigned int
result = x
result -= y
proc naiveMul[T: BaseUint](x, y: T): MpUint[T] {.noSideEffect, noInit, inline.}
# Forward declaration
proc `*`*[T: MpUint](x, y: T): T {.noSideEffect, noInit.}=
## Multiplication for multi-precision unsigned uint
#
# For our representation, it is similar to school grade multiplication
# Consider hi and lo as if they were digits
#
# 12
# X 15
# ------
# 10 lo*lo -> z0
# 5 hi*lo -> z1
# 2 lo*hi -> z1
# 10 hi*hi -- z2
# ------
# 180
#
# If T is a type
# For T * T --> T we don't need to compute z2 as it always overflow
# For T * T --> 2T (uint64 * uint64 --> uint128) we use extra precision multiplication
result = naiveMul(x.lo, y.lo)
result.hi += (naiveMul(x.hi, y.lo) + naiveMul(x.lo, y.hi)).lo
template naiveMulImpl[T: MpUint](x, y: T): MpUint[T] =
# See details at
# https://en.wikipedia.org/wiki/Karatsuba_algorithm
# https://locklessinc.com/articles/256bit_arithmetic/
# https://www.miracl.com/press/missing-a-trick-karatsuba-variations-michael-scott
#
# We use the naive school grade multiplication instead of Karatsuba I.e.
# z1 = x.hi * y.lo + x.lo * y.hi (Naive) = (x.lo - x.hi)(y.hi - y.lo) + z0 + z2 (Karatsuba)
#
# On modern architecture:
# - addition and multiplication have the same cost
# - Karatsuba would require to deal with potentially negative intermediate result
# and introduce branching
# - More total operations means more register moves
let
halfSize = T.sizeof * 4
z0 = naiveMul(x.lo, y.lo)
tmp = naiveMul(x.hi, y.lo)
var z1 = tmp
z1 += naiveMul(x.hi, y.lo)
let z2 = (z1 < tmp).T + naiveMul(x.hi, y.hi)
let tmp2 = z1.lo shl halfSize
result.lo = tmp2
result.lo += z0
result.hi = (result.lo < tmp2).T + z2 + z1.hi
proc naiveMul[T: BaseUint](x, y: T): MpUint[T] {.noSideEffect, noInit, inline.}=
## Naive multiplication algorithm with extended precision
when T.sizeof in {1, 2, 4}:
# Use types twice bigger to do the multiplication
cast[type result](x.asDoubleUint * y.asDoubleUint)
elif T.sizeof == 8: # uint64 or MpUint[uint32]
# We cannot double uint64 to uint128
naiveMulImpl(x.toMpUint, y.toMpUint)
else:
# Case: at least uint128 * uint128 --> uint256
naiveMulImpl(x, y)
proc divmod*[T: BaseUint](x, y: T): tuple[quot, rem: T] {.noSideEffect.}=
## Division for multi-precision unsigned uint
## Returns quotient + reminder in a (quot, rem) tuple
#
# Implementation through binary shift division
const zero = T()
when x.lo is MpUInt:
const one = T(lo: getSubType(T)(1))
else:
const one: getSubType(T) = 1
if unlikely(y.isZero):
raise newException(DivByZeroError, "You attempted to divide by zero")
var
shift = x.bit_length - y.bit_length
d = y shl shift
result.rem = x
while shift >= 0:
result.quot += result.quot
if result.rem >= d:
result.rem -= d
result.quot.lo = result.quot.lo or one
d = d shr 1
dec(shift)
# Performance note:
# The performance of this implementation is extremely dependant on shl and shr.
#
# Probably the most efficient algorithm that can benefit from MpUInt data structure is
# the recursive fast division by Burnikel and Ziegler (http://www.mpi-sb.mpg.de/~ziegler/TechRep.ps.gz):
# - Python implementation: https://bugs.python.org/file11060/fast_div.py and discussion https://bugs.python.org/issue3451
# - C++ implementation: https://github.com/linbox-team/givaro/blob/master/src/kernel/recint/rudiv.h
# - The Handbook of Elliptic and Hyperelliptic Cryptography Algorithm 10.35 on page 188 has a more explicit version of the div2NxN algorithm. This algorithm is directly recursive and avoids the mutual recursion of the original paper's calls between div2NxN and div3Nx2N.
# - Comparison of fast division algorithms fro large integers: http://bioinfo.ict.ac.cn/~dbu/AlgorithmCourses/Lectures/Hasselstrom2003.pdf
proc `div`*[T: BaseUint](x, y: T): T {.inline, noSideEffect.} =
## Division operation for multi-precision unsigned uint
divmod(x,y).quot
proc `mod`*[T: BaseUint](x, y: T): T {.inline, noSideEffect.} =
## Division operation for multi-precision unsigned uint
divmod(x,y).rem