Ethereum KZG polynomial commitments / EIP-4844 (part 1) (#239)

* common error model for serialization of BLS signatures and KZG objects

* [KZG] add Ethereum's test vectors [skip ci]

* dump progress on KZG

* Stash: trusted setup generator

* implement cache optimized bit-reversal-permutation

* Add generator for the Ethereum test trusted setups

* implement naive deserialization for the trusted setup interchange format

* implement verify_kzg_proof

* Add test skeleton of verify KZG proof

* rebase import fixes
This commit is contained in:
Mamy Ratsimbazafy 2023-08-13 15:08:04 +02:00 committed by GitHub
parent 47b4f48dfb
commit f57d071f11
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
277 changed files with 4936 additions and 754 deletions

View File

@ -36,16 +36,15 @@ proc demoKeyGen(): tuple[seckey: SecretKey, pubkey: PublicKey] =
# The API for keygen is not ready in ethereum_bls_signatures
let ikm = rng.random_byte_seq(32)
doAssert cast[ptr BigInt[255]](result.seckey.addr)[].derive_master_secretKey(ikm)
let ok = result.pubkey.derive_pubkey(result.seckey)
doAssert ok == cttBLS_Success
result.pubkey.derive_pubkey(result.seckey)
proc benchDeserPubkey*(iters: int) =
let (sk, pk) = demoKeyGen()
var pk_comp{.noInit.}: array[48, byte]
# Serialize compressed
let ok = pk_comp.serialize_pubkey_compressed(pk)
doAssert ok == cttBLS_Success
let status = pk_comp.serialize_pubkey_compressed(pk)
doAssert status == cttCodecEcc_Success
var pk2{.noInit.}: PublicKey
@ -57,8 +56,8 @@ proc benchDeserPubkeyUnchecked*(iters: int) =
var pk_comp{.noInit.}: array[48, byte]
# Serialize compressed
let ok = pk_comp.serialize_pubkey_compressed(pk)
doAssert ok == cttBLS_Success
let status = pk_comp.serialize_pubkey_compressed(pk)
doAssert status == cttCodecEcc_Success
var pk2{.noInit.}: PublicKey
@ -73,12 +72,11 @@ proc benchDeserSig*(iters: int) =
sig_comp{.noInit.}: array[96, byte]
sig {.noInit.}: Signature
let status = sig.sign(sk, msg)
doAssert status == cttBLS_Success
sig.sign(sk, msg)
# Serialize compressed
let ok = sig_comp.serialize_signature_compressed(sig)
doAssert ok == cttBLS_Success
let status = sig_comp.serialize_signature_compressed(sig)
doAssert status == cttCodecEcc_Success
var sig2{.noInit.}: Signature
@ -93,12 +91,11 @@ proc benchDeserSigUnchecked*(iters: int) =
sig_comp{.noInit.}: array[96, byte]
sig {.noInit.}: Signature
let status = sig.sign(sk, msg)
doAssert status == cttBLS_Success
sig.sign(sk, msg)
# Serialize compressed
let ok = sig_comp.serialize_signature_compressed(sig)
doAssert ok == cttBLS_Success
let status = sig_comp.serialize_signature_compressed(sig)
doAssert status == cttCodecEcc_Success
var sig2{.noInit.}: Signature
@ -112,15 +109,14 @@ proc benchSign*(iters: int) =
var sig: Signature
bench("BLS signature", "BLS12_381 G2", iters):
let status = sig.sign(sk, msg)
sig.sign(sk, msg)
proc benchVerify*(iters: int) =
let (sk, pk) = demoKeyGen()
let msg = "Mr F was here"
var sig: Signature
let ok = sig.sign(sk, msg)
doAssert ok == cttBLS_Success
sig.sign(sk, msg)
bench("BLS verification", "BLS12_381", iters):
let valid = pk.verify(msg, sig)
@ -136,8 +132,7 @@ proc benchFastAggregateVerify*(numKeys, iters: int) =
for i in 0 ..< numKeys:
let (sk, pk) = demoKeyGen()
validators[i] = pk
let status = sigs[i].sign(sk, msg)
doAssert status == cttBLS_Success
sigs[i].sign(sk, msg)
aggSig.aggregate_signatures_unstable_api(sigs)
@ -155,8 +150,7 @@ proc benchVerifyMulti*(numSigs, iters: int) =
for i in 0 ..< numSigs:
let (sk, pk) = demoKeyGen()
sha256.hash(hashedMsg, "msg" & $i)
let status = sig.sign(sk, hashedMsg)
doAssert status == cttBLS_Success
sig.sign(sk, hashedMsg)
triplets.add (pk, hashedMsg, sig)
bench("BLS verif of " & $numSigs & " msgs by "& $numSigs & " pubkeys", "BLS12_381", iters):
@ -178,8 +172,7 @@ proc benchVerifyBatched*(numSigs, iters: int) =
for i in 0 ..< numSigs:
let (sk, pk) = demoKeyGen()
sha256.hash(hashedMsg, "msg" & $i)
let status = sig.sign(sk, hashedMsg)
doAssert status == cttBLS_Success
sig.sign(sk, hashedMsg)
pubkeys.add pk
messages.add hashedMsg

View File

@ -2,7 +2,7 @@ import
../constantine/ethereum_evm_precompiles,
./platforms, ./bench_blueprint,
../constantine/platforms/codecs
../constantine/serialization/codecs
proc report(op: string, elapsedNs: int64, elapsedCycles: int64, iters: int) =
let ns = elapsedNs div iters

View File

@ -2,7 +2,8 @@ import
../constantine/math/arithmetic,
../constantine/math/io/[io_bigints, io_fields],
../constantine/math/config/curves,
../constantine/platforms/[abstractions, codecs],
../constantine/platforms/abstractions,
../constantine/serialization/codecs,
../constantine/math_arbitrary_precision/arithmetic/bigints_views,
../helpers/prng_unsafe,
./platforms, ./bench_blueprint

View File

@ -23,11 +23,11 @@ template genBindingsField*(Field: untyped) =
else:
{.push noconv, exportc, raises: [].} # No exceptions allowed
func `ctt _ Field _ unmarshalBE`(dst: var Field, src: openarray[byte]) =
func `ctt _ Field _ unmarshalBE`(dst: var Field, src: openarray[byte]): bool =
## Deserialize
unmarshalBE(dst, src)
func `ctt _ Field _ marshalBE`(dst: var openarray[byte], src: Field) =
func `ctt _ Field _ marshalBE`(dst: var openarray[byte], src: Field): bool =
marshalBE(dst, src)
# --------------------------------------------------------------------------------------
func `ctt _ Field _ is_eq`(a, b: Field): SecretBool =

View File

@ -68,6 +68,12 @@ typedef __UINT64_TYPE__ uint64_t;
#else
#include <stdint.h>
#endif
#if defined(__STDC_VERSION__) && __STDC_VERSION__>=199901
# define bool _Bool
#else
# define bool unsigned char
#endif
"""
proc genCttBaseTypedef*(): string =
@ -115,6 +121,7 @@ void ctt_{libName}_init_NimMain(void);"""
# -------------------------------------------
let TypeMap {.compileTime.} = newStringTable({
"bool": "bool",
"SecretBool": "secret_bool",
"SecretWord": "secret_word"
})
@ -201,7 +208,10 @@ macro collectBindings*(cBindingsStr: untyped, body: typed): untyped =
var name = $paramDef[j]
cBindings &= toCparam(name.split('`')[0], pType)
cBindings &= ");"
if fnDef.params[0].eqIdent"bool":
cBindings &= ") __attribute__((warn_unused_result));"
else:
cBindings &= ");"
if defined(CTT_GENERATE_HEADERS):
result = newConstStmt(cBindingsStr, newLit cBindings)

View File

@ -1,6 +1,6 @@
packageName = "constantine"
version = "0.0.1"
author = "Status Research & Development GmbH"
author = "Mamy Ratsimbazafy"
description = "This library provides thoroughly tested and highly-optimized implementations of cryptography protocols."
license = "MIT or Apache License 2.0"

View File

@ -6,7 +6,9 @@
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import ../platforms/[endians, views]
import
../platforms/views,
../serialization/endians
# ############################################################
#

View File

@ -0,0 +1,27 @@
# Commitment schemes
https://en.wikipedia.org/wiki/Commitment_scheme
> A commitment scheme is a cryptographic primitive that allows one to commit to a chosen value (or chosen statement) while keeping it hidden to others, with the ability to reveal the committed value later. Commitment schemes are designed so that a party cannot change the value or statement after they have committed to it: that is, commitment schemes are binding.
## Use-cases
An important use-case missing from the Wikipedia article is:
"There exists a bundle of transactions that change the state of my database/ledger/blockchain to this state.". The whole bundle is not needed, only a short proof.
## KZG Polynomial Commitments
- Constant-Size Commitments to Polynomials and Their Applications\
Kate, Zaverucha, Goldberg, 2010\
https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf\
https://cacr.uwaterloo.ca/techreports/2010/cacr2010-10.pdf
- KZG commitments from the Lagrange basis without FFTs
Drake, 2020
https://ethresear.ch/t/kate-commitments-from-the-lagrange-basis-without-ffts/6950
- KZG Multiproofs
Feist, Khovratovich, 2020
https://dankradfeist.de/ethereum/2021/06/18/pcs-multiproofs.html\
https://github.com/khovratovich/Kate/blob/master/Kate_amortized.pdf

View File

@ -0,0 +1,324 @@
# Constantine
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Copyright (c) 2020-Present Mamy André-Ratsimbazafy
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
../math/config/curves,
../math/[ec_shortweierstrass, arithmetic, extension_fields],
../math/elliptic/[ec_scalar_mul, ec_multi_scalar_mul],
../math/pairings/pairings_generic,
../math/constants/zoo_generators,
../math/polynomials/polynomials,
../platforms/[abstractions, views]
## ############################################################
##
## KZG Polynomial Commitments
##
## ############################################################
##
## This module implements KZG-inspired Polynomial commitments (Kate, Zaverucha, Goldberg)
##
## - KZG Paper:
## Constant-Size Commitments to Polynomials and Their Applications
## Kate, Zaverucha, Goldberg, 2010
## https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf
## https://cacr.uwaterloo.ca/techreports/2010/cacr2010-10.pdf
##
##
## KZG-inspired protocol
## ------------------------------------------------------------
##
## Quick algebra refresher for developers
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##
## - A group is a set of elements:
## - with a binary operation to combine them called the group law
## - with a neutral element
## - with an inverse, applying the group law on an element and its inverse results in the neutral element.
##
## - the group order or cardinality is the number of elements in the set
## - the group can use the additive or multiplicative notation.
## - the group can be cyclic. i.e. all elements of the group can be generated
## by repeatedly applying the group law.
##
## The additive/multiplicative notation is chosen by social consensus,
## hence confusion of scalar multiplication [a]P or exponentiation Pᵃ for elliptic curves.
##
## - A field is a set of elements
## - with two group laws, addition and multiplication
## - and the corresponding group properties (additive/multiplicative inverse and neutral elements)
##
## - A field can be finite (modular arithmetic modulo a prime) or infinite (the real numbers)
##
## Sigil refreshers for developers
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##
## - ∃: there exists
## - ∀: for all
## - ∈: element of
##
## Notation
## ~~~~~~~~
##
## - 𝔽r is a finite-field of prime order r
## - 𝔾1 is an additive group of prime order r
## - 𝔾2 is an additive group of prime order r
## - 𝔾t is a multiplicative group of prime order r
##
## In practice:
## - ∀(x, y) such that y² = x³ + b has a cyclic group of r solutions, the group 𝔾1 (of the elliptic curve E1)
## - ∀(x', y') such that y'² = x'³ + b' has a cyclic group of r solutions, the group 𝔾2 (of the elliptic curve E2)
## - 𝔾t is also a cyclic subgroup of order r
## - r is the (large prime) number of elements in all those subgroups.
##
## - Implementation details (for the very curious)
## - For 𝔾1, (x, y) ∈ (𝔽p, 𝔽p)
## - For 𝔾2, (x', y') ∈ (𝔽pⁿ, 𝔽pⁿ) with n = 2 usually (BN and BLS12 curves), but it can be 1 (BW6 curves), 4 (BLS24 curves) or ...
## - 𝔾t is the cyclotomic subgroup over 𝔽pᵏ, k being the curve embedding degree, with k = 12 usually (BN and BLS12 curves) but it can be 6 (BW6 curves), 24 (BLS24 curves) or ...
## - p is completely unused in the protocol so don't use mental space to keep these details.
##
## We use the notation:
## [a]P to represent P+P+ .... + P
## Applying the group law `a` times, i.e. the scalar multiplication.
##
## There exist a pairing function (bilinear map)
## e: 𝔾1 x 𝔾2 -> 𝔾t
## That map is bilinear
## ∀a ∈ 𝔽r, ∀b ∈ 𝔽r, ∀P ∈ 𝔾1, ∀Q ∈ 𝔾2,
## e([a]P, [b]Q) = e(P, Q)ᵃᵇ
##
## We use the notation:
## G₁ for the protocol-chosen generator of 𝔾1
## G₂ for the protocol-chosen generator of 𝔾2
## [a]₁ for the scalar multiplication of the 𝔾1 generator by a, a ∈ 𝔽r
## [b]₂ for the scalar multiplication of the 𝔾2 generator by b, b ∈ 𝔽r
##
## Polynomial Commitment Scheme
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##
## We have 2 parties, a Prover and a Verifier.
##
## They share a public Structured Reference String (SRS), also called trusted setup:
## srs_g1: [[1]₁, [τ]₁, [τ²]₁, ... [τⁿ]₁] also called powers of tau, with a bounded degree n
## srs_g2: [[1]₂, [τ]₂]
##
## τ and its powers are secrets that no one know, we only work with [τⁱ]₁ and [τ]₂
## not with τ directly. (τ cannot be deduced due to the elliptic curve discrete logarithm problem)
##
## Info
## τ and its powers are created through a secure multi-party computation (MPC) ceremony
## called powers of tau. Each participant contribute randomness.
## Only one honest participant (who ensures that the randomness created cannot be recreated)
## is necessary for the ceremony success.
##
## Protocol
##
## 0. A data blob is interpreted as up to n 𝔽r elements
## corresponding to a polynomial p(x) = blob₀ + blob₁ x + blob₂ x² + ... + blobₙ₋₁ xⁿ⁻¹
## p(x) = ∑ blobᵢ xⁱ
##
## So we can commit/prove up to 4096*log₂(r) bits of data
## For Ethereum, n = 4096 and log₂(r) = 255 bits
## so 130.560kB of transaction data committed per 48B proof stored in the blockchain
##
## 1. commit(srs_g1, blob) -> commitment C = ∑ blobᵢ.srs_g1ᵢ = ∑ [blobᵢ.τⁱ]₁ = [p(τ)]₁
##
## 2. The verifier chooses a random challenge `z` in 𝔽r that the prover does not control.
## To make the protocol non-interactive, z may be computed via the Fiat-Shamir heuristic.
##
## 3. compute_proof(blob, [commitment]₁, challenge) -> (eval_at_challenge, [proof]₁)
## blob: p(x)
## [commitment]₁: [p(τ)]₁
## challenge: z
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## -> The prover needs to provide a proof that it knows a polynomial p(x)
## such that p(z) = y. With the proof, the verifier doesn't need access to the polynomial to verify the claim.
## -> Compute a witness polynomial w(x, z) = (p(x) - p(z)) / (x-z)
## We can evaluate it at τ from the public SRS and challenge point `z` chosen by the verifier (indifferentiable from random).
## We don't know τ, but we know [τ]₁ so we transport the problem from 𝔽r to 𝔾1
## => The proof is the evaluation of the witness polynomial for a challenge `z` of the verifier choosing.
## w(τ, z) = proof
## We output [proof]₁ = [proof]G₁
##
## 4. verify_commitment([commitment]₁, challenge, eval_at_challenge, [proof]₁) -> bool
## [commitment]₁: [p(τ)]₁
## challenge: z
## eval_at_challenge: p(z) = y
## [proof]₁: [(p(τ) - p(z)) / (τ-z)]₁
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## -> proof = w(τ, z) = (p(τ) - p(z)) / (τ-z) = (p(τ) - y) / (τ-z)
## hence proof.(τ-z) = p(τ) - y
##
## => using a bilinear pairing function e(𝔾1, 𝔾2)->𝔾t we can rewrite this equality to
## e([proof]₁, [τ]₂ - [z]₂) = e(C - [y]₁, [1]₂)
##
## According to the Schwartz-zippel Lemma it is cryptographically unlikely
## that this equation holds unless what the prover provided for [commitment]₁ = [p(τ)]₁
##
## Variants
## - srs_g1 and blob may be either polynomial in monomial basis
## p(x) = blob₀ + blob₁ x + blob₂ x² + ... + blobₙ₋₁ xⁿ⁻¹
## - or polynomial in Lagrange basis, defined over tuples
## [(ω⁰, p(ω⁰)), (ω¹, p(ω¹)), (ω², p(ω²)), ..., (ωⁿ⁻¹, p(ωⁿ⁻¹))]
## with ω ∈ 𝔽r a root of unity of order n, i.e. ωⁿ = 1
type
PowersOfTauCoef[D: static int, F; G: static Subgroup] = object
coefs: array[D, ECP_ShortW_Aff[F, G]]
PowersOfTauEval[D: static int, F; G: static Subgroup] = object
evals: array[D, ECP_ShortW_Aff[F, G]]
G1aff[C: static Curve] = ECP_ShortW_Aff[Fp[C], G1]
G1jac[C: static Curve] = ECP_ShortW_Jac[Fp[C], G1]
# Helper functions
# ------------------------------------------------------------
func g1_lincomb[C: static Curve](r: var G1jac[C],
points: ptr UncheckedArray[G1aff[C]],
scalars: ptr UncheckedArray[matchingOrderBigInt(C)],
len: int) =
## Multi-scalar-multiplication / linear combination
r.raw.multiScalarMul_vartime(
scalars,
cast[ptr UncheckedArray[typeof points[0].raw]](points),
len)
func g1_lincomb[C: static Curve](r: var G1jac[C],
points: ptr UncheckedArray[G1aff[C]],
scalars: ptr UncheckedArray[Fr[C]],
len: int) =
## Multi-scalar-multiplication / linear combination
let scalars2 = allocHeapArray(matchingOrderBigInt(C), len)
for i in 0 ..< len:
scalars2[i].fromField(scalars[i])
r.g1_lincomb(points, scalars2, len)
scalars2.freeHeap()
# KZG - Prover - Lagrange basis
# ------------------------------------------------------------
#
# For now we assume that the input polynomial always has the same degree
# as the powers of τ
func kzg_commit*[N: static int, C: static Curve](
commitment: var ECP_ShortW_Jac[Fp[C], G1],
poly_evals: array[N, matchingOrderBigInt(C)],
powers_of_tau: PowersOfTauEval[N, Fp[C], G1]) =
commitment.g1_lincomb(powers_of_tau.evals.asUnchecked(), poly_evals.asUnchecked(), N)
func kzg_prove*[N: static int, C: static Curve](
proof: var ECP_ShortW_Jac[Fp[C], G1],
eval_at_challenge: var Fr[C],
poly: PolynomialEval[N, Fr[C]],
domain: PolyDomainEval[N, Fr[C]],
challenge: Fr[C],
powers_of_tau: PowersOfTauEval[N, Fp[C], G1]) =
# Note:
# The order of inputs in
# `kzg_prove`, `evalPolyAt_vartime`, `differenceQuotientEvalOffDomain`, `differenceQuotientEvalInDomain`
# minimizes register changes when parameter passing.
# z = challenge
let invRootsMinusZ = allocHeap(array[N, Fr[C]])
let diffQuotientPoly = allocHeap(PolynomialEval[N, Fr[C]])
let zIndex = invRootsMinusZ.inverseRootsMinusZ_vartime(domain, challenge)
if zIndex == -1:
# p(z)
eval_at_challenge.evalPolyAt_vartime(
invRootsMinusZ,
poly, domain,
challenge)
# q(x) = (p(x) - p(z)) / (x - z)
diffQuotientPoly.differenceQuotientEvalOffDomain(
invRootsMinusZ, poly, eval_at_challenge)
else:
# p(z)
# But the challenge z is equal to one of the roots of unity (how likely is that?)
eval_at_challenge = poly[zIndex]
# q(x) = (p(x) - p(z)) / (x - z)
diffQuotientPoly.differenceQuotientEvalInDomain(
invRootsMinusZ, poly, domain, zIndex)
proof.g1_lincomb(powers_of_tau.evals.asUnchecked(), diffQuotientPoly.asUnchecked(), N)
freeHeap(diffQuotientPoly)
freeHeap(invRootsMinusZ)
# KZG - Verifier
# ------------------------------------------------------------
func kzg_verify*[F2; C: static Curve](
commitment: ECP_ShortW_Aff[Fp[C], G1],
challenge: BigInt, # matchingOrderBigInt(C),
eval_at_challenge: BigInt, # matchingOrderBigInt(C),
proof: ECP_ShortW_Aff[Fp[C], G1],
tauG2: ECP_ShortW_Aff[F2, G2]): bool =
## Verify a short KZG proof that ``p(challenge) = eval_at_challenge``
## without doing the whole p(challenge) computation
#
# Scalar inputs
# challenge
# eval_at_challenge = p(challenge)
#
# Group inputs
# [commitment]₁ = [p(τ)]G
# [proof]₁ = [proof]G
# [τ]₂ = [τ]H in the trusted setup
#
# With z = challenge, we want to verify
# proof.(τ - z) = p(τ)-p(z)
#
# However τ is a secret from the trusted setup that cannot be used raw.
# We transport the equation in the pairing group 𝔾T with bilinear pairings e
#
# e([proof]₁, [τ]₂ - [z]₂) = e([p(τ)]₁ - [p(z)]₁, [1]₂)
# e([proof]₁, [τ]₂ - [z]₂) . e([p(τ)]₁ - [p(z)]₁, [-1]₂) = 1
#
# Finally
# e([proof]₁, [τ]₂ - [challenge]₂) . e([commitment]₁ - [eval_at_challenge]₁, [-1]₂) = 1
var
tau_minus_challenge_G2 {.noInit.}: ECP_ShortW_Jac[F2, G2]
commitment_minus_eval_at_challenge_G1 {.noInit.}: ECP_ShortW_Jac[Fp[C], G1]
negG2 {.noInit.}: ECP_ShortW_Aff[F2, G2]
tauG2Jac {.noInit.}: ECP_ShortW_Jac[F2, G2]
commitmentJac {.noInit.}: ECP_ShortW_Jac[Fp[C], G1]
tau_minus_challenge_G2.fromAffine(C.getGenerator("G2"))
commitment_minus_eval_at_challenge_G1.fromAffine(C.getGenerator("G1"))
negG2.neg(C.getGenerator("G2"))
tauG2Jac.fromAffine(tauG2)
commitmentJac.fromAffine(commitment)
tau_minus_challenge_G2.scalarMul(challenge)
tau_minus_challenge_G2.diff(tauG2Jac, tau_minus_challenge_G2)
commitment_minus_eval_at_challenge_G1.scalarMul(eval_at_challenge)
commitment_minus_eval_at_challenge_G1.diff(commitmentJac, commitment_minus_eval_at_challenge_G1)
var tmzG2 {.noInit.}: ECP_ShortW_Aff[F2, G2]
var cmyG1 {.noInit.}: ECP_ShortW_Aff[Fp[C], G1]
tmzG2.affine(tau_minus_challenge_G2)
cmyG1.affine(commitment_minus_eval_at_challenge_G1)
# e([proof]₁, [τ]₂ - [challenge]₂) * e([commitment]₁ - [eval_at_challenge]₁, [-1]₂)
var gt {.noInit.}: C.getGT()
gt.pairing([proof, cmyG1], [tmzG2, negG2])
return gt.isOne().bool()

View File

@ -50,15 +50,22 @@ export
type_ff.Fr,
type_ff.FF
func unmarshalBE*(dst: var FF, src: openarray[byte]) =
func unmarshalBE*(dst: var FF, src: openarray[byte]): bool =
## Return true on success
## Return false if destination is too small compared to source
var raw {.noInit.}: typeof dst.mres
raw.unmarshal(src, bigEndian)
let ok = raw.unmarshal(src, bigEndian)
if not ok:
return false
dst.fromBig(raw)
return true
func marshalBE*(dst: var openarray[byte], src: FF) =
func marshalBE*(dst: var openarray[byte], src: FF): bool =
## Return true on success
## Return false if destination is too small compared to source
var raw {.noInit.}: typeof src.mres
raw.fromField(src)
dst.marshal(src, bigEndian)
return dst.marshal(src, bigEndian)
export arithmetic.ccopy
export arithmetic.cswap

View File

@ -39,7 +39,6 @@
## in the blockchain consensus protocol, hence PopProve and PopVerify
## as defined in the IETF spec are not needed.
const DST = "BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_"
const prefix_ffi = "ctt_eth_bls_"
# Dependencies exports for C FFI
@ -48,7 +47,7 @@ const prefix_ffi = "ctt_eth_bls_"
import ./zoo_exports
static:
# Xxport SHA256 routines with a protocol specific prefix
# Export SHA256 routines with a protocol specific prefix
# This exports sha256.init(), sha256.update(), sha256.finish() and sha256.clear()
prefix_sha256 = prefix_ffi & "sha256_"
@ -80,13 +79,19 @@ import
constants/zoo_subgroups
],
./math/io/[io_bigints, io_fields],
signatures/bls_signatures
signatures/bls_signatures,
serialization/codecs_bls12_381
export
abstractions, # generic sandwich on SecretBool and SecretBool in Jacobian sumImpl
curves, # generic sandwich on matchingBigInt
extension_fields, # generic sandwich on extension field access
ec_shortweierstrass # generic sandwich on affine
ec_shortweierstrass, # generic sandwich on affine
CttCodecScalarStatus,
CttCodecEccStatus
const DomainSeparationTag = asBytes"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_"
# Protocol types
# ------------------------------------------------------------------------------------------------
@ -109,13 +114,7 @@ type
CttBLSStatus* = enum
cttBLS_Success
cttBLS_VerificationFailure
cttBLS_InvalidEncoding
cttBLS_CoordinateGreaterOrEqualThanModulus
cttBLS_PointAtInfinity
cttBLS_PointNotOnCurve
cttBLS_PointNotInSubgroup
cttBLS_ZeroSecretKey
cttBLS_SecretKeyLargerThanCurveOrder
cttBLS_ZeroLengthAggregation
cttBLS_InconsistentLengthsOfInputs
@ -141,263 +140,93 @@ func signatures_are_equal*(a, b: Signature): bool {.libPrefix: prefix_ffi.} =
# Input validation
# ------------------------------------------------------------------------------------------------
func validate_seckey*(secret_key: SecretKey): CttBLSStatus {.libPrefix: prefix_ffi.} =
func validate_seckey*(secret_key: SecretKey): CttCodecScalarStatus {.libPrefix: prefix_ffi.} =
## Validate the secret key.
## Regarding timing attacks, this will leak timing information only if the key is invalid.
## Namely, the secret key is 0 or the secret key is too large.
if secret_key.raw.isZero().bool():
return cttBLS_ZeroSecretKey
if bool(secret_key.raw >= BLS12_381.getCurveOrder()):
return cttBLS_SecretKeyLargerThanCurveOrder
return cttBLS_Success
return secret_key.raw.validate_scalar()
func validate_pubkey*(public_key: PublicKey): CttBLSStatus {.libPrefix: prefix_ffi.} =
func validate_pubkey*(public_key: PublicKey): CttCodecEccStatus {.libPrefix: prefix_ffi.} =
## Validate the public key.
## This is an expensive operation that can be cached
if public_key.raw.isInf().bool():
return cttBLS_PointAtInfinity
if not isOnCurve(public_key.raw.x, public_key.raw.y, G1).bool():
return cttBLS_PointNotOnCurve
if not public_key.raw.isInSubgroup().bool():
return cttBLS_PointNotInSubgroup
return public_key.raw.validate_g1()
func validate_signature*(signature: Signature): CttBLSStatus {.libPrefix: prefix_ffi.} =
func validate_signature*(signature: Signature): CttCodecEccStatus {.libPrefix: prefix_ffi.} =
## Validate the signature.
## This is an expensive operation that can be cached
if signature.raw.isInf().bool():
return cttBLS_PointAtInfinity
if not isOnCurve(signature.raw.x, signature.raw.y, G2).bool():
return cttBLS_PointNotOnCurve
if not signature.raw.isInSubgroup().bool():
return cttBLS_PointNotInSubgroup
return signature.raw.validate_g2()
# Codecs
# ------------------------------------------------------------------------------------------------
## BLS12-381 serialization
##
## 𝔽p elements are encoded in big-endian form. They occupy 48 bytes in this form.
## 𝔽p2 elements are encoded in big-endian form, meaning that the 𝔽p2 element c0+c1u
## is represented by the 𝔽p element c1 followed by the 𝔽p element c0.
## This means 𝔽p2 elements occupy 96 bytes in this form.
## The group 𝔾1 uses 𝔽p elements for coordinates. The group 𝔾2 uses 𝔽p2 elements for coordinates.
## 𝔾1 and 𝔾2 elements can be encoded in uncompressed form (the x-coordinate followed by the y-coordinate) or in compressed form (just the x-coordinate).
## 𝔾1 elements occupy 96 bytes in uncompressed form, and 48 bytes in compressed form.
## 𝔾2 elements occupy 192 bytes in uncompressed form, and 96 bytes in compressed form.
##
## The most-significant three bits of a 𝔾1 or 𝔾2 encoding should be masked away before the coordinate(s) are interpreted. These bits are used to unambiguously represent the underlying element:
##
## The most significant bit, when set, indicates that the point is in compressed form. Otherwise, the point is in uncompressed form.
## The second-most significant bit indicates that the point is at infinity. If this bit is set, the remaining bits of the group elements encoding should be set to zero.
## The third-most significant bit is set if (and only if) this point is in compressed form
## and it is not the point at infinity and its y-coordinate is the lexicographically largest of the two associated with the encoded x-coordinate.
##
## - https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-05#appendix-A
## - https://docs.rs/bls12_381/latest/bls12_381/notes/serialization/index.html
## - https://github.com/zkcrypto/bls12_381/blob/0.6.0/src/notes/serialization.rs
func serialize_seckey*(dst: var array[32, byte], secret_key: SecretKey): CttBLSStatus {.libPrefix: prefix_ffi.} =
func serialize_seckey*(dst: var array[32, byte], secret_key: SecretKey): CttCodecScalarStatus {.libPrefix: prefix_ffi.} =
## Serialize a secret key
## Returns cttBLS_Success if successful
dst.marshal(secret_key.raw, bigEndian)
return cttBLS_Success
## Returns cttCodecScalar_Success if successful
return dst.serialize_scalar(secret_key.raw)
func serialize_pubkey_compressed*(dst: var array[48, byte], public_key: PublicKey): CttBLSStatus {.libPrefix: prefix_ffi.} =
func serialize_pubkey_compressed*(dst: var array[48, byte], public_key: PublicKey): CttCodecEccStatus {.libPrefix: prefix_ffi.} =
## Serialize a public key in compressed (Zcash) format
##
## Returns cttBLS_Success if successful
if public_key.raw.isInf().bool():
for i in 0 ..< dst.len:
dst[i] = byte 0
dst[0] = byte 0b11000000 # Compressed + Infinity
return cttBLS_Success
## Returns cttCodecEcc_Success if successful
return dst.serialize_g1_compressed(public_key.raw)
dst.marshal(public_key.raw.x, bigEndian)
# The curve equation has 2 solutions for y² = x³ + 4 with y unknown and x known
# The lexicographically largest will have bit 381 set to 1
# (and bit 383 for the compressed representation)
# The solutions are {y, p-y} hence the lexicographyically largest is greater than p/2
# so with exact integers, as p is odd, greater or equal (p+1)/2
let lexicographicallyLargest = byte(public_key.raw.y.toBig() >= Fp[BLS12_381].getPrimePlus1div2())
dst[0] = dst[0] or (0b10000000 or (lexicographicallyLargest shl 5))
return cttBLS_Success
func serialize_signature_compressed*(dst: var array[96, byte], signature: Signature): CttBLSStatus {.libPrefix: prefix_ffi.} =
func serialize_signature_compressed*(dst: var array[96, byte], signature: Signature): CttCodecEccStatus {.libPrefix: prefix_ffi.} =
## Serialize a signature in compressed (Zcash) format
##
## Returns cttBLS_Success if successful
if signature.raw.isInf().bool():
for i in 0 ..< dst.len:
dst[i] = byte 0
dst[0] = byte 0b11000000 # Compressed + Infinity
return cttBLS_Success
return dst.serialize_g2_compressed(signature.raw)
dst.toOpenArray(0, 48-1).marshal(signature.raw.x.c1, bigEndian)
dst.toOpenArray(48, 96-1).marshal(signature.raw.x.c0, bigEndian)
let isLexicographicallyLargest =
if signature.raw.y.c1.isZero().bool():
byte(signature.raw.y.c0.toBig() >= Fp[BLS12_381].getPrimePlus1div2())
else:
byte(signature.raw.y.c1.toBig() >= Fp[BLS12_381].getPrimePlus1div2())
dst[0] = dst[0] or (byte 0b10000000 or (isLexicographicallyLargest shl 5))
return cttBLS_Success
func deserialize_seckey*(dst: var SecretKey, src: array[32, byte]): CttBLSStatus {.libPrefix: prefix_ffi.} =
func deserialize_seckey*(dst: var SecretKey, src: array[32, byte]): CttCodecScalarStatus {.libPrefix: prefix_ffi.} =
## Deserialize a secret key
## This also validates the secret key.
##
## This is protected against side-channel unless your key is invalid.
## In that case it will like whether it's all zeros or larger than the curve order.
dst.raw.unmarshal(src, bigEndian)
let status = validate_seckey(dst)
if status != cttBLS_Success:
dst.raw.setZero()
return status
return cttBLS_Success
return dst.raw.deserialize_scalar(src)
func deserialize_pubkey_compressed_unchecked*(dst: var PublicKey, src: array[48, byte]): CttBLSStatus {.libPrefix: prefix_ffi.} =
func deserialize_pubkey_compressed_unchecked*(dst: var PublicKey, src: array[48, byte]): CttCodecEccStatus {.libPrefix: prefix_ffi.} =
## Deserialize a public_key in compressed (Zcash) format.
##
## Warning ⚠:
## This procedure skips the very expensive subgroup checks.
## Not checking subgroup exposes a protocol to small subgroup attacks.
##
## Returns cttBLS_Success if successful
## Returns cttCodecEcc_Success if successful
return dst.raw.deserialize_g1_compressed_unchecked(src)
# src must have the compressed flag
if (src[0] and byte 0b10000000) == byte 0:
return cttBLS_InvalidEncoding
# if infinity, src must be all zeros
if (src[0] and byte 0b01000000) != 0:
if (src[0] and byte 0b00111111) != 0: # Check all the remaining bytes in MSB
return cttBLS_InvalidEncoding
for i in 1 ..< src.len:
if src[i] != byte 0:
return cttBLS_InvalidEncoding
dst.raw.setInf()
return cttBLS_PointAtInfinity
# General case
var t{.noInit.}: matchingBigInt(BLS12_381)
t.unmarshal(src, bigEndian)
t.limbs[t.limbs.len-1] = t.limbs[t.limbs.len-1] and (MaxWord shr 3) # The first 3 bytes contain metadata to mask out
if bool(t >= BLS12_381.Mod()):
return cttBLS_CoordinateGreaterOrEqualThanModulus
var x{.noInit.}: Fp[BLS12_381]
x.fromBig(t)
let onCurve = dst.raw.trySetFromCoordX(x)
if not(bool onCurve):
return cttBLS_PointNotOnCurve
let isLexicographicallyLargest = dst.raw.y.toBig() >= Fp[BLS12_381].getPrimePlus1div2()
let srcIsLargest = SecretBool((src[0] shr 5) and byte 1)
dst.raw.y.cneg(isLexicographicallyLargest xor srcIsLargest)
func deserialize_pubkey_compressed*(dst: var PublicKey, src: array[48, byte]): CttBLSStatus {.libPrefix: prefix_ffi.} =
func deserialize_pubkey_compressed*(dst: var PublicKey, src: array[48, byte]): CttCodecEccStatus {.libPrefix: prefix_ffi.} =
## Deserialize a public_key in compressed (Zcash) format
## This also validates the public key.
##
## Returns cttBLS_Success if successful
## Returns cttCodecEcc_Success if successful
return dst.raw.deserialize_g1_compressed(src)
result = deserialize_pubkey_compressed_unchecked(dst, src)
if result != cttBLS_Success:
return result
if not(bool dst.raw.isInSubgroup()):
return cttBLS_PointNotInSubgroup
func deserialize_signature_compressed_unchecked*(dst: var Signature, src: array[96, byte]): CttBLSStatus {.libPrefix: prefix_ffi.} =
func deserialize_signature_compressed_unchecked*(dst: var Signature, src: array[96, byte]): CttCodecEccStatus {.libPrefix: prefix_ffi.} =
## Deserialize a signature in compressed (Zcash) format.
##
## Warning ⚠:
## This procedure skips the very expensive subgroup checks.
## Not checking subgroup exposes a protocol to small subgroup attacks.
##
## Returns cttBLS_Success if successful
## Returns cttCodecEcc_Success if successful
return dst.raw.deserialize_g2_compressed_unchecked(src)
# src must have the compressed flag
if (src[0] and byte 0b10000000) == byte 0:
return cttBLS_InvalidEncoding
# if infinity, src must be all zeros
if (src[0] and byte 0b01000000) != 0:
if (src[0] and byte 0b00111111) != 0: # Check all the remaining bytes in MSB
return cttBLS_InvalidEncoding
for i in 1 ..< src.len:
if src[i] != byte 0:
return cttBLS_InvalidEncoding
dst.raw.setInf()
return cttBLS_PointAtInfinity
# General case
var t{.noInit.}: matchingBigInt(BLS12_381)
t.unmarshal(src.toOpenArray(0, 48-1), bigEndian)
t.limbs[t.limbs.len-1] = t.limbs[t.limbs.len-1] and (MaxWord shr 3) # The first 3 bytes contain metadata to mask out
if bool(t >= BLS12_381.Mod()):
return cttBLS_CoordinateGreaterOrEqualThanModulus
var x{.noInit.}: Fp2[BLS12_381]
x.c1.fromBig(t)
t.unmarshal(src.toOpenArray(48, 96-1), bigEndian)
if bool(t >= BLS12_381.Mod()):
return cttBLS_CoordinateGreaterOrEqualThanModulus
x.c0.fromBig(t)
let onCurve = dst.raw.trySetFromCoordX(x)
if not(bool onCurve):
return cttBLS_PointNotOnCurve
let isLexicographicallyLargest =
if dst.raw.y.c1.isZero().bool():
dst.raw.y.c0.toBig() >= Fp[BLS12_381].getPrimePlus1div2()
else:
dst.raw.y.c1.toBig() >= Fp[BLS12_381].getPrimePlus1div2()
let srcIsLargest = SecretBool((src[0] shr 5) and byte 1)
dst.raw.y.cneg(isLexicographicallyLargest xor srcIsLargest)
func deserialize_signature_compressed*(dst: var Signature, src: array[96, byte]): CttBLSStatus {.libPrefix: prefix_ffi.} =
func deserialize_signature_compressed*(dst: var Signature, src: array[96, byte]): CttCodecEccStatus {.libPrefix: prefix_ffi.} =
## Deserialize a public_key in compressed (Zcash) format
##
## Returns cttBLS_Success if successful
result = deserialize_signature_compressed_unchecked(dst, src)
if result != cttBLS_Success:
return result
if not(bool dst.raw.isInSubgroup()):
return cttBLS_PointNotInSubgroup
## Returns cttCodecEcc_Success if successful
return dst.raw.deserialize_g2_compressed(src)
# BLS Signatures
# ------------------------------------------------------------------------------------------------
func derive_pubkey*(public_key: var PublicKey, secret_key: SecretKey): CttBLSStatus {.libPrefix: prefix_ffi.} =
func derive_pubkey*(public_key: var PublicKey, secret_key: SecretKey) {.libPrefix: prefix_ffi.} =
## Derive the public key matching with a secret key
##
## Secret protection:
## - A valid secret key will only leak that it is valid.
## - An invalid secret key will leak whether it's all zero or larger than the curve order.
let status = validate_seckey(secret_key)
if status != cttBLS_Success:
return status
## The secret_key MUST be validated
public_key.raw.derivePubkey(secret_key.raw)
let ok = public_key.raw.derivePubkey(secret_key.raw)
if not ok:
# This is unreachable since validate_seckey would have caught those
return cttBLS_InvalidEncoding
return cttBLS_Success
func sign*(signature: var Signature, secret_key: SecretKey, message: openArray[byte]): CttBLSStatus {.libPrefix: prefix_ffi, genCharAPI.} =
func sign*(signature: var Signature, secret_key: SecretKey, message: openArray[byte]) {.libPrefix: prefix_ffi, genCharAPI.} =
## Produce a signature for the message under the specified secret key
## Signature is on BLS12-381 G2 (and public key on G1)
##
@ -407,21 +236,12 @@ func sign*(signature: var Signature, secret_key: SecretKey, message: openArray[b
## - A secret key
## - A message
##
## The secret_key MUST be validated
##
## Output:
## - `signature` is overwritten with `message` signed with `secretKey`
## with the scheme
## - A status code indicating success or if the secret key is invalid.
##
## Secret protection:
## - A valid secret key will only leak that it is valid.
## - An invalid secret key will leak whether it's all zero or larger than the curve order.
let status = validate_seckey(secret_key)
if status != cttBLS_Success:
signature.raw.setInf()
return status
coreSign(signature.raw, secretKey.raw, message, sha256, 128, augmentation = "", DST)
return cttBLS_Success
coreSign(signature.raw, secretKey.raw, message, sha256, 128, augmentation = "", DomainSeparationTag)
func verify*(public_key: PublicKey, message: openArray[byte], signature: Signature): CttBLSStatus {.libPrefix: prefix_ffi, genCharAPI.} =
## Check that a signature is valid for a message
@ -447,7 +267,7 @@ func verify*(public_key: PublicKey, message: openArray[byte], signature: Signatu
if bool(public_key.raw.isInf() or signature.raw.isInf()):
return cttBLS_PointAtInfinity
let verified = coreVerify(public_key.raw, message, signature.raw, sha256, 128, augmentation = "", DST)
let verified = coreVerify(public_key.raw, message, signature.raw, sha256, 128, augmentation = "", DomainSeparationTag)
if verified:
return cttBLS_Success
return cttBLS_VerificationFailure
@ -509,7 +329,7 @@ func fast_aggregate_verify*(pubkeys: openArray[PublicKey], message: openArray[by
let verified = fastAggregateVerify(
pubkeys.unwrap(),
message, aggregate_sig.raw,
sha256, 128, DST)
sha256, 128, DomainSeparationTag)
if verified:
return cttBLS_Success
return cttBLS_VerificationFailure
@ -553,7 +373,7 @@ func aggregate_verify*(pubkeys: ptr UncheckedArray[PublicKey],
pubkeys.toOpenArray(len).unwrap(),
messages.toOpenArray(len),
aggregate_sig.raw,
sha256, 128, DST)
sha256, 128, DomainSeparationTag)
if verified:
return cttBLS_Success
return cttBLS_VerificationFailure
@ -596,7 +416,7 @@ func aggregate_verify*[Msg](pubkeys: openArray[PublicKey], messages: openArray[M
let verified = aggregateVerify(
pubkeys.unwrap(),
messages, aggregate_sig.raw,
sha256, 128, DST)
sha256, 128, DomainSeparationTag)
if verified:
return cttBLS_Success
return cttBLS_VerificationFailure
@ -647,7 +467,7 @@ func batch_verify*[Msg](pubkeys: ptr UncheckedArray[PublicKey],
pubkeys.toOpenArray(len).unwrap(),
messages,
signatures.toOpenArray(len).unwrap(),
sha256, 128, DST, secureRandomBytes)
sha256, 128, DomainSeparationTag, secureRandomBytes)
if verified:
return cttBLS_Success
return cttBLS_VerificationFailure
@ -697,7 +517,7 @@ func batch_verify*[Msg](pubkeys: openArray[PublicKey], messages: openarray[Msg],
pubkeys.unwrap(),
messages,
signatures.unwrap(),
sha256, 128, DST, secureRandomBytes)
sha256, 128, DomainSeparationTag, secureRandomBytes)
if verified:
return cttBLS_Success
return cttBLS_VerificationFailure

View File

@ -12,7 +12,8 @@ import
./math/config/[curves, type_ff],
./math/arithmetic/[bigints, limbs_montgomery],
./math/io/io_bigints,
./platforms/[primitives, endians]
./platforms/primitives,
./serialization/endians
# EIP2333: BLS12-381 Key Generation
# ------------------------------------------------------------
@ -51,7 +52,7 @@ func hkdf_mod_r(secretKey: var SecretKey, ikm: openArray[byte], key_info: openAr
# 6. OKM = HKDF-Expand(PRK, key_info || I2OSP(L, 2), L)
const L = 48
var okm{.noInit.}: array[L, byte]
const L_octetstring = L.uint16.toBytesBE()
const L_octetstring = L.uint16.toBytes(bigEndian)
ctx.hkdfExpand(okm, prk, key_info, append = L_octetstring, clearMem = true)
# 7. x = OS2IP(OKM) mod r
# We reduce mod r via Montgomery reduction, instead of bigint division
@ -105,7 +106,7 @@ func parent_SK_to_lamport_PK(
## from the parent SecretKey
# 0. salt = I2OSP(index, 4)
let salt{.noInit.} = index.toBytesBE()
let salt{.noInit.} = index.toBytes(bigEndian)
# 1. IKM = I2OSP(parent_SK, 32)
var ikm {.noinit.}: array[32, byte]

View File

@ -8,7 +8,8 @@
import
./platforms/abstractions,
./math/config/[curves, precompute],
./serialization/io_limbs,
./math/config/curves,
./math/[arithmetic, extension_fields],
./math/arithmetic/limbs_montgomery,
./math/ec_shortweierstrass,
@ -138,11 +139,8 @@ func eth_evm_ecadd*(r: var openArray[byte], inputs: openarray[byte]): CttEVMStat
var aff{.noInit.}: ECP_ShortW_Aff[Fp[BN254_Snarks], G1]
aff.affine(R)
r.toOpenArray(0, 31).marshal(
aff.x, bigEndian)
r.toOpenArray(32, 63).marshal(
aff.y, bigEndian)
r.toOpenArray(0, 31).marshal(aff.x, bigEndian)
r.toOpenArray(32, 63).marshal(aff.y, bigEndian)
return cttEVM_Success
func eth_evm_ecmul*(r: var openArray[byte], inputs: openarray[byte]): CttEVMStatus =
@ -211,11 +209,8 @@ func eth_evm_ecmul*(r: var openArray[byte], inputs: openarray[byte]): CttEVMStat
var aff{.noInit.}: ECP_ShortW_Aff[Fp[BN254_Snarks], G1]
aff.affine(P)
r.toOpenArray(0, 31).marshal(
aff.x, bigEndian)
r.toOpenArray(32, 63).marshal(
aff.y, bigEndian)
r.toOpenArray(0, 31).marshal(aff.x, bigEndian)
r.toOpenArray(32, 63).marshal(aff.y, bigEndian)
return cttEVM_Success
func subgroupCheck(P: ECP_ShortW_Aff[Fp2[BN254_Snarks], G2]): bool =

View File

@ -0,0 +1,308 @@
# Constantine
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Copyright (c) 2020-Present Mamy André-Ratsimbazafy
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
math/config/curves,
math/io/io_bigints,
math/[ec_shortweierstrass, arithmetic, extension_fields],
math/arithmetic/limbs_montgomery,
math/elliptic/ec_multi_scalar_mul,
math/polynomials/polynomials,
commitments/kzg_polynomial_commitments,
hashes,
platforms/[abstractions, views, allocs],
serialization/[codecs_bls12_381, endians],
trusted_setups/ethereum_kzg_srs
export loadTrustedSetup, TrustedSetupStatus, EthereumKZGContext
## ############################################################
##
## KZG Polynomial Commitments for Ethereum
##
## ############################################################
##
## This module implements KZG Polynomial commitments (Kate, Zaverucha, Goldberg)
## for the Ethereum blockchain.
##
## References:
## - Ethereum spec:
## https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/polynomial-commitments.md
## - KZG Paper:
## Constant-Size Commitments to Polynomials and Their Applications
## Kate, Zaverucha, Goldberg, 2010
## https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf
## https://cacr.uwaterloo.ca/techreports/2010/cacr2010-10.pdf
## - Audited reference implementation
## https://github.com/ethereum/c-kzg-4844
# Constants
# ------------------------------------------------------------
# Spec "ENDIANNESS" for deserialization is little-endian
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#misc
const BYTES_PER_COMMITMENT = 48
const BYTES_PER_PROOF = 48
const BYTES_PER_FIELD_ELEMENT = 32
# Presets
# ------------------------------------------------------------
const FIELD_ELEMENTS_PER_BLOB {.intdefine.} = 4096
const FIAT_SHAMIR_PROTOCOL_DOMAIN = asBytes"FSBLOBVERIFY_V1_"
const RANDOM_CHALLENGE_KZG_BATCH_DOMAIN = asBytes"RCKZGBATCH___V1_"
# Derived
# ------------------------------------------------------------
const BYTES_PER_BLOB = BYTES_PER_FIELD_ELEMENT*FIELD_ELEMENTS_PER_BLOB
# Protocol Types
# ------------------------------------------------------------
type
Blob* = array[BYTES_PER_BLOB, byte]
KZGCommitment* = object
raw: ECP_ShortW_Aff[Fp[BLS12_381], G1]
KZGProof* = object
raw: ECP_ShortW_Aff[Fp[BLS12_381], G1]
CttEthKzgStatus* = enum
cttEthKZG_Success
cttEthKZG_VerificationFailure
cttEthKZG_ScalarZero
cttEthKZG_ScalarLargerThanCurveOrder
cttEthKZG_EccInvalidEncoding
cttEthKZG_EccCoordinateGreaterThanOrEqualModulus
cttEthKZG_EccPointNotOnCurve
cttEthKZG_EccPointNotInSubGroup
# Fiat-Shamir challenges
# ------------------------------------------------------------
# https://en.wikipedia.org/wiki/Fiat%E2%80%93Shamir_heuristic
func fromDigest(dst: var Fr[BLS12_381], src: array[32, byte]) =
## Convert a SHA256 digest to an element in the scalar field Fr[BLS12-381]
## hash_to_bls_field: https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/polynomial-commitments.md#hash_to_bls_field
var scalar {.noInit.}: BigInt[256]
scalar.unmarshal(src, littleEndian)
# Due to mismatch between the BigInt[256] input
# and Fr[BLS12_381] being built on top of BigInt[255]
# we use the low-level getMont instead of 'fromBig'
getMont(dst.mres.limbs, scalar.limbs,
Fr[BLS12_381].fieldMod().limbs,
Fr[BLS12_381].getR2modP().limbs,
Fr[BLS12_381].getNegInvModWord(),
Fr[BLS12_381].getSpareBits())
func fiatShamirChallenge(dst: var Fr[BLS12_381], blob: Blob, commitmentBytes: array[BYTES_PER_COMMITMENT, byte]) =
## Compute a Fiat-Shamir challenge
## compute_challenge: https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/polynomial-commitments.md#compute_challenge
var transcript {.noInit.}: sha256
transcript.init()
transcript.update(FIAT_SHAMIR_PROTOCOL_DOMAIN)
# Append the degree of polynomial as a domain separator
transcript.update(FIELD_ELEMENTS_PER_BLOB.uint64.toBytes(littleEndian))
transcript.update(default(array[16-sizeof(uint64), byte]))
transcript.update(blob)
transcript.update(commitmentBytes)
var challenge {.noInit.}: array[32, byte]
transcript.finish(challenge)
dst.fromDigest(challenge)
func computePowers(dst: MutableView[Fr[BLS12_381]], base: Fr[BLS12_381]) =
## We need linearly independent random numbers
## for batch proof sampling.
## Powers are linearly independent.
## It's also likely faster than calling a fast RNG + modular reduction
## to be in 0 < number < curve_order
## since modular reduction needs modular multiplication anyway.
let N = dst.len
if N >= 1:
dst[0].setOne()
if N >= 2:
dst[1] = base
if N >= 3:
for i in 2 ..< N:
dst[i].prod(dst[i-1], base)
# Conversion
# ------------------------------------------------------------
func bytes_to_bls_bigint(dst: var matchingOrderBigInt(BLS12_381), src: array[32, byte]): CttCodecScalarStatus =
## Convert untrusted bytes to a trusted and validated BLS scalar field element.
## This function does not accept inputs greater than the BLS modulus.
let status = dst.deserialize_scalar(src)
if status notin {cttCodecScalar_Success, cttCodecScalar_Zero}:
return status
return cttCodecScalar_Success
func bytes_to_bls_field(dst: var Fr[BLS12_381], src: array[32, byte]): CttCodecScalarStatus =
## Convert untrusted bytes to a trusted and validated BLS scalar field element.
## This function does not accept inputs greater than the BLS modulus.
var scalar {.noInit.}: matchingOrderBigInt(BLS12_381)
let status = scalar.deserialize_scalar(src)
if status notin {cttCodecScalar_Success, cttCodecScalar_Zero}:
return status
dst.fromBig(scalar)
return cttCodecScalar_Success
func bytes_to_kzg_commitment(dst: var KZGCommitment, src: array[48, byte]): CttCodecEccStatus =
## Convert untrusted bytes into a trusted and validated KZGCommitment.
let status = dst.raw.deserialize_g1_compressed(src)
if status == cttCodecEcc_PointAtInfinity:
# Point at infinity is allowed
return cttCodecEcc_Success
return status
func bytes_to_kzg_proof(dst: var KZGProof, src: array[48, byte]): CttCodecEccStatus =
## Convert untrusted bytes into a trusted and validated KZGProof.
let status = dst.raw.deserialize_g1_compressed(src)
if status == cttCodecEcc_PointAtInfinity:
# Point at infinity is allowed
return cttCodecEcc_Success
return status
func blob_to_bigint_polynomial(
dst: ptr PolynomialEval[FIELD_ELEMENTS_PER_BLOB, matchingOrderBigInt(BLS12_381)],
blob: ptr Blob): CttCodecScalarStatus =
## Convert a blob to a polynomial in evaluation form
static:
doAssert sizeof(dst[]) == sizeof(Blob)
doAssert sizeof(array[FIELD_ELEMENTS_PER_BLOB, array[32, byte]]) == sizeof(Blob)
let view = cast[ptr array[FIELD_ELEMENTS_PER_BLOB, array[32, byte]]](blob.unsafeAddr())
for i in 0 ..< FIELD_ELEMENTS_PER_BLOB:
let status = dst.evals[i].bytes_to_bls_bigint(view[i])
if status != cttCodecScalar_Success:
return status
return cttCodecScalar_Success
func blob_to_field_polynomial(
dst: ptr PolynomialEval[FIELD_ELEMENTS_PER_BLOB, Fr[BLS12_381]],
blob: ptr Blob): CttCodecScalarStatus =
## Convert a blob to a polynomial in evaluation form
static:
doAssert sizeof(dst[]) == sizeof(Blob)
doAssert sizeof(array[FIELD_ELEMENTS_PER_BLOB, array[32, byte]]) == sizeof(Blob)
let view = cast[ptr array[FIELD_ELEMENTS_PER_BLOB, array[32, byte]]](blob.unsafeAddr())
for i in 0 ..< FIELD_ELEMENTS_PER_BLOB:
let status = dst.evals[i].bytes_to_bls_field(view[i])
if status != cttCodecScalar_Success:
return status
return cttCodecScalar_Success
# Ethereum KZG public API
# ------------------------------------------------------------
template check(evalExpr: CttCodecScalarStatus): untyped =
block:
let status = evalExpr # Ensure single evaluation
case status
of cttCodecScalar_Success: discard
of cttCodecScalar_Zero: discard
of cttCodecScalar_ScalarLargerThanCurveOrder: return cttEthKZG_ScalarLargerThanCurveOrder
template check(evalExpr: CttCodecEccStatus): untyped =
block:
let status = evalExpr # Ensure single evaluation
case status
of cttCodecEcc_Success: discard
of cttCodecEcc_InvalidEncoding: return cttEthKZG_EccInvalidEncoding
of cttCodecEcc_CoordinateGreaterThanOrEqualModulus: return cttEthKZG_EccCoordinateGreaterThanOrEqualModulus
of cttCodecEcc_PointNotOnCurve: return cttEthKZG_EccPointNotOnCurve
of cttCodecEcc_PointNotInSubgroup: return cttEthKZG_EccPointNotInSubGroup
of cttCodecEcc_PointAtInfinity: discard
func blob_to_kzg_commitment*(
ctx: ptr EthereumKZGContext,
dst: var array[48, byte],
blob: ptr Blob): CttEthKzgStatus =
let poly = allocHeapAligned(PolynomialEval[FIELD_ELEMENTS_PER_BLOB, matchingOrderBigInt(BLS12_381)], 64)
let status = poly.blob_to_bigint_polynomial(blob)
if status == cttCodecScalar_Zero:
return cttEthKZG_ScalarZero
elif status == cttCodecScalar_ScalarLargerThanCurveOrder:
return cttEthKZG_ScalarLargerThanCurveOrder
var r {.noInit.}: ECP_ShortW_Jac[Fp[BLS12_381], G1]
r.multiScalarMul_vartime(poly.evals, ctx.srs_lagrange_g1)
var r_aff {.noinit.}: ECP_ShortW_Aff[Fp[BLS12_381], G1]
r_aff.affine(r)
discard dst.serialize_g1_compressed(r_aff)
freeHeap(poly)
return cttEthKZG_Success
func verify_kzg_proof*(
ctx: ptr EthereumKZGContext,
commitment_bytes: array[48, byte],
z_bytes: array[32, byte],
y_bytes: array[32, byte],
proof_bytes: array[48, byte]): CttEthKzgStatus =
## Verify KZG proof that p(z) == y where p(z) is the polynomial represented by "polynomial_kzg"
var commitment {.noInit.}: KZGCommitment
check commitment.bytes_to_kzg_commitment(commitment_bytes)
var challenge {.noInit.}: matchingOrderBigInt(BLS12_381)
check challenge.bytes_to_bls_bigint(z_bytes)
var eval_at_challenge {.noInit.}: matchingOrderBigInt(BLS12_381)
check eval_at_challenge.bytes_to_bls_bigint(y_bytes)
var proof {.noInit.}: KZGProof
check proof.bytes_to_kzg_proof(proof_bytes)
let verif = kzg_verify(commitment.raw, challenge, eval_at_challenge, proof.raw, ctx.srs_monomial_g2[1])
if verif:
return cttEthKZG_Success
else:
return cttEthKZG_VerificationFailure
# Ethereum Trusted Setup
# ------------------------------------------------------------
# Temporary workaround, hardcoding the testing trusted setups
# To be removed, no modules that use heap allocation are used at runtime
import std/[os, strutils]
const TrustedSetupMainnet =
currentSourcePath.rsplit(DirSep, 1)[0] /
"trusted_setups" /
"trusted_setup_ethereum_kzg_test_mainnet.tsif"
proc load_ethereum_kzg_test_trusted_setup_mainnet*(): ptr EthereumKZGContext =
## This is a convenience function for the Ethereum mainnet testing trusted setups.
## It is insecure and will be replaced once the KZG ceremony is done.
let ctx = allocHeapAligned(EthereumKZGContext, alignment = 64)
let tsStatus = ctx.loadTrustedSetup(TrustedSetupMainnet)
doAssert tsStatus == tsSuccess, "\n[Trusted Setup Error] " & $tsStatus
echo "Trusted Setup loaded successfully"
return ctx
proc delete*(ctx: ptr EthereumKZGContext) =
freeHeapAligned(ctx)

View File

@ -8,7 +8,8 @@
import
# Internals
../platforms/[abstractions, endians, views],
../platforms/[abstractions, views],
../serialization/endians,
../hashes,
../math/io/[io_bigints, io_fields],
../math/config/curves,

View File

@ -9,7 +9,8 @@
import ../zoo_exports
import
../platforms/[abstractions, endians, views],
../platforms/[abstractions, views],
../serialization/endians,
./sha256/sha256_generic
when UseASM_X86_32:

View File

@ -742,7 +742,7 @@ func batchedDivsteps_vartime(
while true:
# Count zeros up to bitsLeft and process a batch of divsteps up to that number
let zeros = (g.BaseType or (1.BaseType shl bitsLeft)).countTrailingZeroBits_vartime()
let zeros = (BaseType(g) or (BaseType(1) shl bitsLeft)).countTrailingZeroBits_vartime()
g = g shr zeros
u = u shl zeros
v = v shl zeros

View File

@ -409,7 +409,7 @@ func primeMinus3div4_BE*[bits: static int](
discard tmp.sub(3)
tmp.shiftRight(2)
result.marshal(tmp, bigEndian)
discard result.marshal(tmp, bigEndian)
func primeMinus5div8_BE*[bits: static int](
P: BigInt[bits]
@ -428,7 +428,7 @@ func primeMinus5div8_BE*[bits: static int](
discard tmp.sub(5)
tmp.shiftRight(3)
result.marshal(tmp, bigEndian)
discard result.marshal(tmp, bigEndian)
# ############################################################
#

View File

@ -442,6 +442,16 @@ func multiScalarMul_dispatch_vartime[bits: static int, F, G](
else:
unreachable()
func multiScalarMul_vartime*[bits: static int, F, G](
r: var ECP_ShortW[F, G],
coefs: ptr UncheckedArray[BigInt[bits]],
points: ptr UncheckedArray[ECP_ShortW_Aff[F, G]],
N: int) {.tags:[VarTime, Alloca, HeapAlloc], meter.} =
## Multiscalar multiplication:
## r <- [a₀]P₀ + [a₁]P₁ + ... + [aₙ]Pₙ
multiScalarMul_dispatch_vartime(r, coefs, points, len)
func multiScalarMul_vartime*[bits: static int, F, G](
r: var ECP_ShortW[F, G],
coefs: openArray[BigInt[bits]],

View File

@ -645,10 +645,17 @@ func `-=`*(P: var ECP_ShortW_Jac, Q: ECP_ShortW_Aff) {.inline.} =
nQ.neg(Q)
P.madd(P, nQ)
# Conversions
# -----------
template affine*[F, G](_: type ECP_ShortW_Jac[F, G]): typedesc =
## Returns the affine type that corresponds to the Jacobian type input
ECP_ShortW_Aff[F, G]
template jacobian*[F, G](_: type ECP_ShortW_Aff[F, G]): typedesc =
## Returns the jacobian type that corresponds to the affine type input
ECP_ShortW_Jac[F, G]
func affine*[F; G](
aff: var ECP_ShortW_Aff[F, G],
jac: ECP_ShortW_Jac[F, G]) {.meter.} =

View File

@ -439,6 +439,11 @@ template affine*[F, G](_: type ECP_ShortW_Prj[F, G]): typedesc =
## Returns the affine type that corresponds to the Jacobian type input
ECP_ShortW_Aff[F, G]
template projective*[F, G](_: type ECP_ShortW_Aff[F, G]): typedesc =
## Returns the projective type that corresponds to the affine type input
ECP_ShortW_Prj[F, G]
func affine*[F, G](
aff: var ECP_ShortW_Aff[F, G],
proj: ECP_ShortW_Prj[F, G]) {.meter.} =

View File

@ -11,7 +11,8 @@
# - Burning memory to ensure secrets are not left after dealloc.
import
../../platforms/[abstractions, endians, codecs],
../../platforms/abstractions,
../../serialization/[codecs, io_limbs],
../arithmetic/bigints,
../config/type_bigint
@ -28,129 +29,10 @@ export BigInt, wordsRequired
# that contains secret data
{.push raises: [], checks: off.}
# Note: the parsing/serialization routines were initially developed
# with an internal representation that used 31 bits out of a uint32
# or 63-bits out of an uint64
# TODO: the in-place API should return a bool
# to indicate success.
# the out-of place API are for configuration,
# prototyping, research and debugging purposes,
# and can use exceptions.
func unmarshalLE[T](
dst: var openArray[T],
src: openarray[byte],
wordBitWidth: static int) =
## Parse an unsigned integer from its canonical
## little-endian unsigned representation
## and store it into a BigInt
##
## Constant-Time:
## - no leaks
##
## Can work at compile-time
##
## It is possible to use a 63-bit representation out of a 64-bit words
## by setting `wordBitWidth` to something different from sizeof(T) * 8
## This might be useful for architectures with no add-with-carry instructions.
# TODO: error on destination to small
var
dst_idx = 0
acc = T(0)
acc_len = 0
for src_idx in 0 ..< src.len:
let src_byte = T(src[src_idx])
# buffer reads
acc = acc or (src_byte shl acc_len)
acc_len += 8 # We count bit by bit
# if full, dump
if acc_len >= wordBitWidth:
dst[dst_idx] = acc
inc dst_idx
acc_len -= wordBitWidth
acc = src_byte shr (8 - acc_len)
if dst_idx < dst.len:
dst[dst_idx] = acc
for i in dst_idx + 1 ..< dst.len:
dst[i] = T(0)
func unmarshalBE[T](
dst: var openArray[T],
src: openarray[byte],
wordBitWidth: static int) =
## Parse an unsigned integer from its canonical
## big-endian unsigned representation (octet string)
## and store it into a BigInt.
##
## In cryptography specifications, this is often called
## "Octet string to Integer"
##
## Constant-Time:
## - no leaks
##
## Can work at compile-time
##
## It is possible to use a 63-bit representation out of a 64-bit words
## by setting `wordBitWidth` to something different from sizeof(T) * 8
## This might be useful for architectures with no add-with-carry instructions.
var
dst_idx = 0
acc = T(0)
acc_len = 0
const wordBitWidth = sizeof(T) * 8
for src_idx in countdown(src.len-1, 0):
let src_byte = T(src[src_idx])
# buffer reads
acc = acc or (src_byte shl acc_len)
acc_len += 8 # We count bit by bit
# if full, dump
if acc_len >= wordBitWidth:
dst[dst_idx] = acc
inc dst_idx
acc_len -= wordBitWidth
acc = src_byte shr (8 - acc_len)
if dst_idx < dst.len:
dst[dst_idx] = acc
for i in dst_idx + 1 ..< dst.len:
dst[i] = T(0)
func unmarshal*[T](
dst: var openArray[T],
src: openarray[byte],
wordBitWidth: static int,
srcEndianness: static Endianness) {.inline.} =
## Parse an unsigned integer from its canonical
## big-endian or little-endian unsigned representation
##
## Constant-Time:
## - no leaks
##
## Can work at compile-time to embed curve moduli
## from a canonical integer representation
when srcEndianness == littleEndian:
dst.unmarshalLE(src, wordBitWidth)
else:
dst.unmarshalBE(src, wordBitWidth)
func unmarshal*(
dst: var BigInt,
src: openarray[byte],
srcEndianness: static Endianness) {.inline.}=
srcEndianness: static Endianness): bool {.discardable, inline.} =
## Parse an unsigned integer from its canonical
## big-endian or little-endian unsigned representation
## And store it into a BigInt of size `bits`
@ -160,12 +42,26 @@ func unmarshal*(
##
## Can work at compile-time to embed curve moduli
## from a canonical integer representation
dst.limbs.unmarshal(src, WordBitWidth, srcEndianness)
##
## Returns "true" on success
## Returns "false" if destination buffer is too small
##
## As this is used internally to build serialization primitives
## we assume that the buffer is properly-sized
## and hence the result is discardable
debug:
doAssert BigInt.bits.ceilDiv_vartime(8) >= src.len, block:
"Raw int -> BigInt conversion: destination buffer is too small\n" &
" bits: " & $BigInt.bits & "\n" &
" input bytes: " & $src.len & '\n'
return dst.limbs.unmarshal(src, WordBitWidth, srcEndianness)
func unmarshal*(
T: type BigInt,
src: openarray[byte],
srcEndianness: static Endianness): T {.inline.}=
srcEndianness: static Endianness): T {.inline.} =
## Parse an unsigned integer from its canonical
## big-endian or little-endian unsigned representation
## And store it into a BigInt of size `bits`
@ -175,21 +71,21 @@ func unmarshal*(
##
## Can work at compile-time to embed curve moduli
## from a canonical integer representation
result.limbs.unmarshal(src, WordBitWidth, srcEndianness)
discard result.limbs.unmarshal(src, WordBitWidth, srcEndianness)
func fromUint*(
T: type BigInt,
src: SomeUnsignedInt): T {.inline.}=
src: SomeUnsignedInt): T {.inline.} =
## Parse a regular unsigned integer
## and store it into a BigInt of size `bits`
result.unmarshal(cast[array[sizeof(src), byte]](src), cpuEndian)
discard result.unmarshal(cast[array[sizeof(src), byte]](src), cpuEndian)
func fromUint*(
dst: var BigInt,
src: SomeUnsignedInt) {.inline.}=
src: SomeUnsignedInt) {.inline.} =
## Parse a regular unsigned integer
## and store it into a BigInt of size `bits`
dst.unmarshal(cast[array[sizeof(src), byte]](src), cpuEndian)
discard dst.unmarshal(cast[array[sizeof(src), byte]](src), cpuEndian)
# ############################################################
#
@ -197,153 +93,10 @@ func fromUint*(
#
# ############################################################
func marshalLE[T](
dst: var openarray[byte],
src: openArray[T],
wordBitWidth: static int) =
## Serialize a bigint into its canonical little-endian representation
## I.e least significant bit first
##
## It is possible to use a 63-bit representation out of a 64-bit words
## by setting `wordBitWidth` to something different from sizeof(T) * 8
## This might be useful for architectures with no add-with-carry instructions.
var
src_idx, dst_idx = 0
acc_len = 0
when sizeof(T) == 8:
type BT = uint64
elif sizeof(T) == 4:
type BT = uint32
else:
{.error "Unsupported word size uint" & $(sizeof(T) * 8).}
var acc = BT(0)
var tail = dst.len
while tail > 0:
let w = if src_idx < src.len: BT(src[src_idx])
else: 0
inc src_idx
if acc_len == 0:
# We need to refill the buffer to output 64-bit
acc = w
acc_len = wordBitWidth
else:
when wordBitWidth == sizeof(T) * 8:
let lo = acc
acc = w
else: # If using 63-bit (or less) out of uint64
let lo = (w shl acc_len) or acc
dec acc_len
acc = w shr (wordBitWidth - acc_len)
if tail >= sizeof(T):
# Unrolled copy
dst.blobFrom(src = lo, dst_idx, littleEndian)
dst_idx += sizeof(T)
tail -= sizeof(T)
else:
# Process the tail and exit
when cpuEndian == littleEndian:
# When requesting little-endian on little-endian platform
# we can just copy each byte
# tail is inclusive
for i in 0 ..< tail:
dst[dst_idx+i] = toByte(lo shr (i*8))
else: # TODO check this
# We need to copy from the end
for i in 0 ..< tail:
dst[dst_idx+i] = toByte(lo shr ((tail-i)*8))
return
func marshalBE[T](
dst: var openarray[byte],
src: openArray[T],
wordBitWidth: static int) =
## Serialize a bigint into its canonical big-endian representation
## (octet string)
## I.e most significant bit first
##
## In cryptography specifications, this is often called
## "Octet string to Integer"
##
## It is possible to use a 63-bit representation out of a 64-bit words
## by setting `wordBitWidth` to something different from sizeof(T) * 8
## This might be useful for architectures with no add-with-carry instructions.
var
src_idx = 0
acc_len = 0
when sizeof(T) == 8:
type BT = uint64
elif sizeof(T) == 4:
type BT = uint32
else:
{.error "Unsupported word size uint" & $(sizeof(T) * 8).}
var acc = BT(0)
var tail = dst.len
while tail > 0:
let w = if src_idx < src.len: BT(src[src_idx])
else: 0
inc src_idx
if acc_len == 0:
# We need to refill the buffer to output 64-bit
acc = w
acc_len = wordBitWidth
else:
when wordBitWidth == sizeof(T) * 8:
let lo = acc
acc = w
else: # If using 63-bit (or less) out of uint64
let lo = (w shl acc_len) or acc
dec acc_len
acc = w shr (wordBitWidth - acc_len)
if tail >= sizeof(T):
# Unrolled copy
tail -= sizeof(T)
dst.blobFrom(src = lo, tail, bigEndian)
else:
# Process the tail and exit
when cpuEndian == littleEndian:
# When requesting little-endian on little-endian platform
# we can just copy each byte
# tail is inclusive
for i in 0 ..< tail:
dst[tail-1-i] = toByte(lo shr (i*8))
else: # TODO check this
# We need to copy from the end
for i in 0 ..< tail:
dst[tail-1-i] = toByte(lo shr ((tail-i)*8))
return
func marshal*[T](
dst: var openArray[byte],
src: openArray[T],
wordBitWidth: static int,
dstEndianness: static Endianness) {.inline.} =
## Serialize a bigint into its canonical big-endian or little endian
## representation.
##
## If the buffer is bigger, output will be zero-padded left for big-endian
## or zero-padded right for little-endian.
## I.e least significant bit is aligned to buffer boundary
when dstEndianness == littleEndian:
marshalLE(dst, src, wordBitWidth)
else:
marshalBE(dst, src, wordBitWidth)
func marshal*(
dst: var openArray[byte],
src: BigInt,
dstEndianness: static Endianness) {.inline.} =
dstEndianness: static Endianness): bool {.discardable, inline.} =
## Serialize a bigint into its canonical big-endian or little endian
## representation.
## A destination buffer of size "(BigInt.bits + 7) div 8" at minimum is needed,
@ -352,6 +105,13 @@ func marshal*(
## If the buffer is bigger, output will be zero-padded left for big-endian
## or zero-padded right for little-endian.
## I.e least significant bit is aligned to buffer boundary
##
## Returns "true" on success
## Returns "false" if destination buffer is too small
##
## As this is used internally to build serialization primitives
## we assume that the buffer is properly-sized
## and hence the result is discardable
debug:
doAssert dst.len >= BigInt.bits.ceilDiv_vartime(8), block:
"BigInt -> Raw int conversion: destination buffer is too small\n" &
@ -361,7 +121,7 @@ func marshal*(
when BigInt.bits == 0:
zeroMem(dst, dst.len)
dst.marshal(src.limbs, WordBitWidth, dstEndianness)
return dst.marshal(src.limbs, WordBitWidth, dstEndianness)
{.pop.} # {.push raises: [].}

View File

@ -48,7 +48,7 @@ func fromInt*(dst: var FF,
func marshal*(dst: var openarray[byte],
src: FF,
dstEndianness: static Endianness) =
dstEndianness: static Endianness): bool {.discardable.} =
## Serialize a finite field element to its canonical big-endian or little-endian
## representation
## With `bits` the number of bits of the field modulus

View File

@ -622,7 +622,7 @@ func fromFpk*[Fpkdiv6, Fpk](
#
# with z = SNR¹ᐟ⁶
#
# The cubic over quadatric towering
# The cubic over quadratic towering
# ---------------------------------
#
# (a₀ + a₁ u) + (a₂ + a₃u) v + (a₄ + a₅u) v²

View File

@ -0,0 +1,460 @@
# Constantine
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Copyright (c) 2020-Present Mamy André-Ratsimbazafy
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
../config/curves,
../arithmetic,
../ec_shortweierstrass,
../elliptic/ec_scalar_mul_vartime,
../../platforms/[abstractions, allocs, views]
# ############################################################
#
# Fast Fourier Transform
#
# ############################################################
# Elliptic curve Fast Fourier Transform
# ----------------------------------------------------------------
type
FFTStatus* = enum
FFTS_Success
FFTS_TooManyValues = "Input length greater than the field 2-adicity (number of roots of unity)"
FFTS_SizeNotPowerOfTwo = "Input must be of a power of 2 length"
ECFFT_Descriptor*[EC] = object
## Metadata for FFT on Elliptic Curve
order*: int
rootsOfUnity*: ptr UncheckedArray[matchingOrderBigInt(EC.F.C)]
## domain, starting and ending with 1, length is cardinality+1
## This allows FFT and inverse FFT to use the same buffer for roots.
func computeRootsOfUnity[EC](ctx: var ECFFT_Descriptor[EC], generatorRootOfUnity: auto) =
static: doAssert typeof(generatorRootOfUnity) is Fr[EC.F.C]
ctx.rootsOfUnity[0].setOne()
var cur = generatorRootOfUnity
for i in 1 .. ctx.order:
ctx.rootsOfUnity[i].fromField(cur)
cur *= generatorRootOfUnity
doAssert ctx.rootsOfUnity[ctx.order].isOne().bool()
func new(T: type ECFFT_Descriptor, order: int, generatorRootOfUnity: auto): T =
result.order = order
result.rootsOfUnity = allocHeapArrayAligned(matchingOrderBigInt(T.EC.F.C), order+1, alignment = 64)
result.computeRootsOfUnity(generatorRootOfUnity)
func simpleFT[EC; bits: static int](
output: var StridedView[EC],
vals: StridedView[EC],
rootsOfUnity: StridedView[BigInt[bits]]) =
# FFT is a recursive algorithm
# This is the base-case using a O(n²) algorithm
# TODO: endomorphism acceleration for windowed-NAF
let L = output.len
var last {.noInit.}, v {.noInit.}: EC
for i in 0 ..< L:
last = vals[0]
last.scalarMul_minHammingWeight_windowed_vartime(rootsOfUnity[0], window = 5)
for j in 1 ..< L:
v = vals[j]
v.scalarMul_minHammingWeight_windowed_vartime(rootsOfUnity[(i*j) mod L], window = 5)
last += v
output[i] = last
func fft_internal[EC; bits: static int](
output: var StridedView[EC],
vals: StridedView[EC],
rootsOfUnity: StridedView[BigInt[bits]]) =
if output.len <= 4:
simpleFT(output, vals, rootsOfUnity)
return
# Recursive Divide-and-Conquer
let (evenVals, oddVals) = vals.splitAlternate()
var (outLeft, outRight) = output.splitMiddle()
let halfROI = rootsOfUnity.skipHalf()
fft_internal(outLeft, evenVals, halfROI)
fft_internal(outRight, oddVals, halfROI)
let half = outLeft.len
var y_times_root{.noinit.}: EC
for i in 0 ..< half:
# FFT Butterfly
y_times_root = output[i+half]
y_times_root .scalarMul_minHammingWeight_windowed_vartime(rootsOfUnity[i], window = 5)
output[i+half] .diff(output[i], y_times_root)
output[i] += y_times_root
func fft*[EC](
desc: ECFFT_Descriptor[EC],
output: var openarray[EC],
vals: openarray[EC]): FFT_Status =
if vals.len > desc.order:
return FFTS_TooManyValues
if not vals.len.uint64.isPowerOf2_vartime():
return FFTS_SizeNotPowerOfTwo
let rootz = desc.rootsOfUnity
.toStridedView(desc.order)
.slice(0, desc.order-1, desc.order div vals.len)
var voutput = output.toStridedView()
fft_internal(voutput, vals.toStridedView(), rootz)
return FFTS_Success
func ifft*[EC](
desc: ECFFT_Descriptor[EC],
output: var openarray[EC],
vals: openarray[EC]): FFT_Status =
## Inverse FFT
if vals.len > desc.order:
return FFTS_TooManyValues
if not vals.len.uint64.isPowerOf2_vartime():
return FFTS_SizeNotPowerOfTwo
let rootz = desc.rootsOfUnity
.toStridedView(desc.order+1) # Extra 1 at the end so that when reversed the buffer starts with 1
.reversed()
.slice(0, desc.order-1, desc.order div vals.len)
var voutput = output.toStridedView()
fft_internal(voutput, vals.toStridedView(), rootz)
var invLen {.noInit.}: Fr[EC.F.C]
invLen.fromUint(vals.len.uint64)
invLen.inv_vartime()
let inv = invLen.toBig()
for i in 0 ..< output.len:
output[i].scalarMul_minHammingWeight_windowed_vartime(inv, window = 5)
return FFTS_Success
# ############################################################
#
# Bit reversal permutations
#
# ############################################################
# - Towards an Optimal Bit-Reversal Permutation Program
# Larry Carter and Kang Su Gatlin, 1998
# https://csaws.cs.technion.ac.il/~itai/Courses/Cache/bit.pdf
#
# - Practically efficient methods for performing bit-reversed
# permutation in C++11 on the x86-64 architecture
# Knauth, Adas, Whitfield, Wang, Ickler, Conrad, Serang, 2017
# https://arxiv.org/pdf/1708.01873.pdf
func optimalLogTileSize(T: type): uint =
## Returns the optimal log of the tile size
## depending on the type and common L1 cache size
# `lscpu` can return desired cache values.
# We underestimate modern cache sizes so that performance is good even on older architectures.
# 1. Derive ideal size depending on the type
const cacheLine = 64'u # Size of a cache line
const l1Size = 32'u * 1024 # Size of L1 cache
const elems_per_cacheline = max(1'u, cacheLine div T.sizeof().uint)
var q = l1Size div T.sizeof().uint
q = q div 2 # use only half of the cache, this limits cache eviction, especially with hyperthreading.
q = q.nextPowerOfTwo_vartime().log2_vartime()
q = q div 2 # 2²𐞥 should be smaller than the cache
# If the cache line can accommodate spare elements
# increment the tile size
while 1'u shl q < elems_per_cacheline:
q += 1
return q
func deriveLogTileSize(T: type, logN: uint): uint =
## Returns the log of the tile size
# 1. Compute the optimal tile size
# type typ = T # Workaround "cannot evaluate at compile-time"
# var q = static(optimalLogTileSize(T)) # crashes the compiler in Error: internal error: nightlies/nim-1.6.12/compiler/semtypes.nim(1921, 22)
var q = optimalLogTileSize(T)
# 2. We want to ensure logN - 2*q > 0
while int(logN) - int(q+q) < 0:
q -= 1
return q
func bit_reversal_permutation*[T](buf: var openArray[T]) =
## In-place bit reversal permutation using a cache-blocking algorithm
#
# We adapt the following out-of-place algorithm to in-place.
#
# for b = 0 to 2ˆ(lgN-2q) - 1
# b = r(b)
# for a = 0 to 2ˆq - 1
# a = r(a)
# for c = 0 to 2ˆq - 1
# T[ac] = A[abc]
#
# for c = 0 to 2ˆq - 1
# c = r(c) <- Note: typo in paper, they say c'=r(a)
# for a = 0 to 2ˆq - 1
# B[cba] = T[ac]
#
# As we are in-place, A and B refer to the same buffer and
# we don't want to destructively write to B.
# Instead we swap B and T to save the overwritten slot.
#
# Due to bitreversal being an involution, we can redo the first loop
# to place the overwritten data in there corect slot.
#
# Hence
#
# for b = 0 to 2ˆ(lgN-2q) - 1
# b = r(b)
# for a = 0 to 2ˆq - 1
# a = r(a)
# for c = 0 to 2ˆq - 1
# T[ac] = A[abc]
#
# for c = 0 to 2ˆq - 1
# c = r(c)
# for a = 0 to 2ˆq - 1
# if abc < c'b'a'
# swap(A[cba], T[ac])
#
# for a = 0 to 2ˆq - 1
# a = r(a)
# for c = 0 to 2ˆq - 1
# c = r(c)
# if abc < c'b'a'
# swap(A[abc], T[ac])
debug: doAssert buf.len.uint.isPowerOf2_vartime()
let logN = log2_vartime(uint buf.len)
let logTileSize = deriveLogTileSize(T, logN)
let logBLen = logN - 2*logTileSize
let bLen = 1'u shl logBlen
let tileSize = 1'u shl logTileSize
let t = allocHeapArray(T, tileSize*tileSize)
for b in 0'u ..< bLen:
let bRev = reverseBits(b, logBLen)
for a in 0'u ..< tileSize:
let aRev = reverseBits(a, logTileSize)
for c in 0'u ..< tileSize:
# T[ac] = A[abc]
let tIdx = (aRev shl logTileSize) or c
let idx = (a shl (logBLen+logTileSize)) or
(b shl logTileSize) or c
t[tIdx] = buf[idx]
for c in 0'u ..< tileSize:
let cRev = reverseBits(c, logTileSize)
for aRev in 0'u ..< tileSize:
let a = reverseBits(aRev, logTileSize)
let idx = (a shl (logBLen+logTileSize)) or
(b shl logTileSize) or c
let idxRev = (cRev shl (logBLen+logTileSize)) or
(bRev shl logTileSize) or aRev
if idx < idxRev:
let tIdx = (aRev shl logTileSize) or c
swap(buf[idxRev], t[tIdx])
for a in 0'u ..< tileSize:
let aRev = reverseBits(a, logTileSize)
for c in 0'u ..< tileSize:
let cRev = reverseBits(c, logTileSize)
let idx = (a shl (logBLen+logTileSize)) or
(b shl logTileSize) or c
let idxRev = (cRev shl (logBLen+logTileSize)) or
(bRev shl logTileSize) or aRev
if idx < idxRev:
let tIdx = (aRev shl logTileSize) or c
swap(buf[idx], t[tIdx])
freeHeap(t)
# ############################################################
#
# Sanity checks
#
# ############################################################
when isMainModule:
import
std/[times, monotimes, strformat],
../../../helpers/prng_unsafe,
../constants/zoo_generators,
../io/[io_fields, io_ec],
../../platforms/static_for
const ctt_eth_kzg_fr_pow2_roots_of_unity = [
# primitive_root⁽ᵐᵒᵈᵘˡᵘˢ⁻¹⁾/⁽²^ⁱ⁾ for i in [0, 32)
# The primitive root chosen is 7
Fr[BLS12_381].fromHex"0x1",
Fr[BLS12_381].fromHex"0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000",
Fr[BLS12_381].fromHex"0x8d51ccce760304d0ec030002760300000001000000000000",
Fr[BLS12_381].fromHex"0x345766f603fa66e78c0625cd70d77ce2b38b21c28713b7007228fd3397743f7a",
Fr[BLS12_381].fromHex"0x20b1ce9140267af9dd1c0af834cec32c17beb312f20b6f7653ea61d87742bcce",
Fr[BLS12_381].fromHex"0x50e0903a157988bab4bcd40e22f55448bf6e88fb4c38fb8a360c60997369df4e",
Fr[BLS12_381].fromHex"0x45af6345ec055e4d14a1e27164d8fdbd2d967f4be2f951558140d032f0a9ee53",
Fr[BLS12_381].fromHex"0x6898111413588742b7c68b4d7fdd60d098d0caac87f5713c5130c2c1660125be",
Fr[BLS12_381].fromHex"0x4f9b4098e2e9f12e6b368121ac0cf4ad0a0865a899e8deff4935bd2f817f694b",
Fr[BLS12_381].fromHex"0x95166525526a65439feec240d80689fd697168a3a6000fe4541b8ff2ee0434e",
Fr[BLS12_381].fromHex"0x325db5c3debf77a18f4de02c0f776af3ea437f9626fc085e3c28d666a5c2d854",
Fr[BLS12_381].fromHex"0x6d031f1b5c49c83409f1ca610a08f16655ea6811be9c622d4a838b5d59cd79e5",
Fr[BLS12_381].fromHex"0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306",
Fr[BLS12_381].fromHex"0x485d512737b1da3d2ccddea2972e89ed146b58bc434906ac6fdd00bfc78c8967",
Fr[BLS12_381].fromHex"0x56624634b500a166dc86b01c0d477fa6ae4622f6a9152435034d2ff22a5ad9e1",
Fr[BLS12_381].fromHex"0x3291357ee558b50d483405417a0cbe39c8d5f51db3f32699fbd047e11279bb6e",
Fr[BLS12_381].fromHex"0x2155379d12180caa88f39a78f1aeb57867a665ae1fcadc91d7118f85cd96b8ad",
Fr[BLS12_381].fromHex"0x224262332d8acbf4473a2eef772c33d6cd7f2bd6d0711b7d08692405f3b70f10",
Fr[BLS12_381].fromHex"0x2d3056a530794f01652f717ae1c34bb0bb97a3bf30ce40fd6f421a7d8ef674fb",
Fr[BLS12_381].fromHex"0x520e587a724a6955df625e80d0adef90ad8e16e84419c750194e8c62ecb38d9d",
Fr[BLS12_381].fromHex"0x3e1c54bcb947035a57a6e07cb98de4a2f69e02d265e09d9fece7e0e39898d4b",
Fr[BLS12_381].fromHex"0x47c8b5817018af4fc70d0874b0691d4e46b3105f04db5844cd3979122d3ea03a",
Fr[BLS12_381].fromHex"0xabe6a5e5abcaa32f2d38f10fbb8d1bbe08fec7c86389beec6e7a6ffb08e3363",
Fr[BLS12_381].fromHex"0x73560252aa0655b25121af06a3b51e3cc631ffb2585a72db5616c57de0ec9eae",
Fr[BLS12_381].fromHex"0x291cf6d68823e6876e0bcd91ee76273072cf6a8029b7d7bc92cf4deb77bd779c",
Fr[BLS12_381].fromHex"0x19fe632fd3287390454dc1edc61a1a3c0ba12bb3da64ca5ce32ef844e11a51e",
Fr[BLS12_381].fromHex"0xa0a77a3b1980c0d116168bffbedc11d02c8118402867ddc531a11a0d2d75182",
Fr[BLS12_381].fromHex"0x23397a9300f8f98bece8ea224f31d25db94f1101b1d7a628e2d0a7869f0319ed",
Fr[BLS12_381].fromHex"0x52dd465e2f09425699e276b571905a7d6558e9e3f6ac7b41d7b688830a4f2089",
Fr[BLS12_381].fromHex"0xc83ea7744bf1bee8da40c1ef2bb459884d37b826214abc6474650359d8e211b",
Fr[BLS12_381].fromHex"0x2c6d4e4511657e1e1339a815da8b398fed3a181fabb30adc694341f608c9dd56",
Fr[BLS12_381].fromHex"0x4b5371495990693fad1715b02e5713b5f070bb00e28a193d63e7cb4906ffc93f"
]
type EC_G1 = ECP_ShortW_Prj[Fp[BLS12_381], G1]
proc roundtrip() =
let fftDesc = ECFFT_Descriptor[EC_G1].new(order = 1 shl 4, ctt_eth_kzg_fr_pow2_roots_of_unity[4])
var data = newSeq[EC_G1](fftDesc.order)
data[0].fromAffine(BLS12_381.getGenerator("G1"))
for i in 1 ..< fftDesc.order:
data[i].madd(data[i-1], BLS12_381.getGenerator("G1"))
var coefs = newSeq[EC_G1](data.len)
let fftOk = fft(fftDesc, coefs, data)
doAssert fftOk == FFTS_Success
# display("coefs", 0, coefs)
var res = newSeq[EC_G1](data.len)
let ifftOk = ifft(fftDesc, res, coefs)
doAssert ifftOk == FFTS_Success
# display("res", 0, res)
for i in 0 ..< res.len:
if bool(res[i] != data[i]):
echo "Error: expected ", data[i].toHex(), " but got ", res[i].toHex()
quit 1
echo "FFT round-trip check SUCCESS"
proc warmup() =
# Warmup - make sure cpu is on max perf
let start = cpuTime()
var foo = 123
for i in 0 ..< 300_000_000:
foo += i*i mod 456
foo = foo mod 789
# Compiler shouldn't optimize away the results as cpuTime rely on sideeffects
let stop = cpuTime()
echo &"Warmup: {stop - start:>4.4f} s, result {foo} (displayed to avoid compiler optimizing warmup away)\n"
proc bench() =
echo "Starting benchmark ..."
const NumIters = 3
var rng: RngState
rng.seed 0x1234
# TODO: view types complain about mutable borrow
# in `random_unsafe` due to pseudo view type LimbsViewMut
# (which was views before Nim properly supported them)
warmup()
for scale in 4 ..< 10:
# Setup
let fftDesc = ECFFTDescriptor[EC_G1].new(order = 1 shl scale, ctt_eth_kzg_fr_pow2_roots_of_unity[scale])
var data = newSeq[EC_G1](fftDesc.order)
data[0].fromAffine(BLS12_381.getGenerator("G1"))
for i in 1 ..< fftDesc.order:
data[i].madd(data[i-1], BLS12_381.getGenerator("G1"))
var coefsOut = newSeq[EC_G1](data.len)
# Bench
let start = getMonotime()
for i in 0 ..< NumIters:
let status = fftDesc.fft(coefsOut, data)
doAssert status == FFTS_Success
let stop = getMonotime()
let ns = inNanoseconds((stop-start) div NumIters)
echo &"FFT scale {scale:>2} {ns:>8} ns/op"
proc bit_reversal() =
let k = 28
echo "Bit-reversal permutation 2^", k, " = ", 1 shl k, " int64"
var a = newSeq[int64](1 shl k)
for i in 0'i64 ..< a.len:
a[i] = i
var b = newSeq[int64](1 shl k)
let startNaive = getMonotime()
for i in 0'i64 ..< a.len:
# It's better to make prefetching easy on the write side
b[i] = a[int reverseBits(uint64 i, uint64 k)]
let stopNaive = getMonotime()
echo "Naive bit-reversal: ", inMilliseconds(stopNaive-startNaive), " ms"
let startOpt = getMonotime()
a.bit_reversal_permutation()
let stopOpt = getMonotime()
echo "Optimized bit-reversal: ", inMilliseconds(stopOpt-startOpt), " ms"
doAssert a == b
echo "SUCCESS bit reversal permutation"
block:
let optTile = 1 shl optimalLogTileSize(uint64)
echo "optimal tile size for uint64: ", optTile, "x", optTile," (", sizeof(uint64) * optTile * optTile, " bytes)"
block:
let optTile = 1 shl optimalLogTileSize(ECP_ShortW_Aff[Fp[BLS12_381], G1])
echo "optimal tile size for ECP_ShortW_Aff[Fp[BLS12_381], G1]: ", optTile, "x", optTile," (", sizeof(ECP_ShortW_Aff[Fp[BLS12_381], G1]) * optTile * optTile, " bytes)"
roundtrip()
warmup()
bench()
bit_reversal()

View File

@ -0,0 +1,192 @@
# Constantine
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Copyright (c) 2020-Present Mamy André-Ratsimbazafy
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
../config/curves,
../arithmetic,
../../platforms/bithacks
## ############################################################
##
## Polynomials
##
## ############################################################
type
PolynomialCoef*[N: static int, Field] = object
## A polynomial in monomial basis
## [a₀, a₁, a₂, ..., aₙ]
##
## mapping to the canonical formula
## p(x) = a₀ + a₁ x + a₂ x² + ... + aₙ xⁿ
coefs*{.align: 64.}: array[N, Field]
PolynomialEval*[N: static int, Field] = object
## A polynomial in Lagrange basis (evaluation form)
## [f(0), f(ω), ..., f(ωⁿ⁻¹)]
## with n < 2³² and ω a root of unity
##
## mapping to the barycentric Lagrange formula
## p(z) = (1-zⁿ)/n ∑ ωⁱ/(ωⁱ-z) . p(ωⁱ)
##
## https://ethresear.ch/t/kate-commitments-from-the-lagrange-basis-without-ffts/6950
## https://en.wikipedia.org/wiki/Lagrange_polynomial#Barycentric_form
evals*{.align: 64.}: array[N, Field]
PolyDomainEval*[N: static int, Field] = object
## Metadata for polynomial in Lagrange basis (evaluation form)
rootsOfUnity*{.align: 64.}: array[N, Field]
invMaxDegree*: Field
func inverseRootsMinusZ_vartime*[N: static int, Field](
invRootsMinusZ: var array[N, Field],
domain: PolyDomainEval[N, Field],
z: Field): int =
## Compute 1/(ωⁱ-z) for i in [0, N)
##
## Returns -1 if z ∉ {1, ω, ω², ... , ωⁿ⁻¹}
## Returns the index of ωⁱ==z otherwise
##
## If ωⁱ-z == 0, the other inverses are still computed
## and 0 is returned at that index.
# Mongomery's batch inversion
# ω is a root of unity of order N,
# so if ωⁱ-z == 0, it can only happen in one place
var accInv{.noInit.}: Field
var index0 = -1
for i in 0 ..< N:
invRootsMinusZ[i].diff(domain.rootsOfUnity[i], z)
if invRootsMinusZ[i].isZero().bool():
index0 = i
continue
if i == 0:
accInv = invRootsMinusZ[i]
else:
accInv *= invRootsMinusZ[i]
accInv.inv_vartime()
for i in countdown(N-1, 1):
if i == index0:
invRootsMinusZ[i].setZero()
continue
invRootsMinusZ[i] *= accInv
accInv *= domain.rootsOfUnity[i]
invRootsMinusZ[0] *= accInv
return index0
func evalPolyAt_vartime*[N: static int, Field](
r: var Field,
poly: PolynomialEval[N, Field],
domain: PolyDomainEval[N, Field],
invRootsMinusZ: array[N, Field],
z: Field) =
## Evaluate a polynomial in evaluation form
## at the point z
## z MUST NOT be one of the roots of unity
# p(z) = (1-zⁿ)/n ∑ ωⁱ/(ωⁱ-z) . p(ωⁱ)
static: doAssert N.isPowerOf2_vartime()
r.setZero()
for i in 0 ..< N:
var summand {.noInit.}: Field
summand.prod(domain.rootsOfUnity[i], invRootsMinusZ[i])
summand *= poly[i]
r += summand
var t {.noInit.}: Field
t = z
const numDoublings = log2_vartime(N) # N is a power of 2
t.square_repeated(numDoublings) # exponentiation by a power of 2
t.diff(Field(mres: Field.getMontyOne()), t) # TODO: refactor getMontyOne to getOne and return a field element.
r *= t
r *= domain.invMaxDegree
func differenceQuotientEvalOffDomain*[N: static int, Field](
r: var PolynomialEval[N, Field],
invRootsMinusZ: array[N, Field],
poly: PolynomialEval[N, Field],
pZ: Field) =
## Compute r(x) = (p(x) - p(z)) / (x - z)
##
## for z != ωⁱ a power of a root of unity
##
## Input:
## - invRootsMinusZ: 1/(ωⁱ-z)
## - poly: p(x) a polynomial in evaluation form as an array of p(ωⁱ)
## - rootsOfUnity: ωⁱ
## - p(z)
for i in 0 ..< N:
# qᵢ = (p(ωⁱ) - p(z))/(ωⁱ-z)
var qi {.noinit.}: Field
qi.diff(poly[i], pZ)
r[i].prod(qi, invRootsMinusZ[i])
func differenceQuotientEvalInDomain*[N: static int, Field](
r: var PolynomialEval[N, Field],
invRootsMinusZ: array[N, Field],
poly: PolynomialEval[N, Field],
domain: PolyDomainEval[N, Field],
zIndex: int) =
## Compute r(x) = (p(x) - p(z)) / (x - z)
##
## for z = ωⁱ a power of a root of unity
##
## Input:
## - poly: p(x) a polynomial in evaluation form as an array of p(ωⁱ)
## - rootsOfUnity: ωⁱ
## - invRootsMinusZ: 1/(ωⁱ-z)
## - zIndex: the index of the root of unity power that matches z = ωⁱᵈˣ
r[zIndex].setZero()
template invZ(): untyped =
# 1/z
# from ωⁿ = 1 and z = ωⁱᵈˣ
# hence ωⁿ⁻ⁱᵈˣ = 1/z
# Note if using bit-reversal permutation (BRP):
# BRP maintains the relationship
# that the inverse of ωⁱ is at position n-i (mod n) in the array of roots of unity
static: doAssert N.isPowerOf2_vartime()
domain.rootsOfUnity[(N-zIndex) and (N-1)]
for i in 0 ..< N:
if i == zIndex:
# https://dankradfeist.de/ethereum/2021/06/18/pcs-multiproofs.html
# section "Dividing when one of the points is zero".
continue
# qᵢ = (p(ωⁱ) - p(z))/(ωⁱ-z)
var qi {.noinit.}: Field
qi.diff(poly[i], poly[zIndex])
r[i].prod(qi, invRootsMinusZ[i])
# q'ᵢ = -qᵢ * ωⁱ/z
# q'idx = ∑ q'ᵢ
# since z is a power of ω, ωⁱ/z = ωⁱ⁻ⁱᵈˣ
# However some protocols use bit-reversal permutation (brp) to store the ωⁱ
# Hence retrieving the data would require roots[brp((brp(i)-brp(index)) mod n)] for those
# But is this fast? There is no single instruction for reversing bits of an integer.
# and the reversal depends on N.
# - https://stackoverflow.com/questions/746171/efficient-algorithm-for-bit-reversal-from-msb-lsb-to-lsb-msb-in-c
# - https://stackoverflow.com/questions/52226858/bit-reversal-algorithm-by-rutkowska
# - https://www.hpl.hp.com/techreports/93/HPL-93-89.pdf
# - https://graphics.stanford.edu/~seander/bithacks.html#BitReverseObvious
# The C version from Stanford's bithacks need log₂(n) loop iterations
# A 254~255-bit multiplication takes 38 cycles, we need 3 brp so at most ~13 cycles per brp
# For small Ethereum KZG, n = 2¹² = 4096, we're already at the breaking point
# even if an iteration takes a single cycle with instruction-level parallelism
var ri {.noinit.}: Field
ri.neg(domain.rootsOfUnity[i])
ri *= invZ
r[zIndex].prod(ri, qi)

View File

@ -153,7 +153,46 @@ func isPowerOf2_vartime*(n: SomeUnsignedInt): bool {.inline.} =
## for compile-time or explicit vartime proc only.
(n and (n - 1)) == 0 and n > 0
func nextPowerOfTwo_vartime*(n: uint32): uint32 {.inline.} =
func nextPowerOfTwo_vartime*(n: SomeUnsignedInt): SomeUnsignedInt {.inline.} =
## Returns x if x is a power of 2
## or the next biggest power of 2
1'u32 shl (log2_vartime(n-1) + 1)
1.SomeUnsignedInt shl (log2_vartime(n-1) + 1)
func swapBytes_impl(n: uint32): uint32 {.inline.} =
result = n
result = ((result shl 8) and 0xff00ff00'u32) or ((result shr 8) and 0x00ff00ff'u32)
result = (result shl 16) or (result shr 16)
func swapBytes_impl(n: uint64): uint64 {.inline.} =
result = n
result = ((result shl 8) and 0xff00ff00ff00ff00'u64) or ((result shr 8) and 0x00ff00ff00ff00ff'u64)
result = ((result shl 16) and 0xffff0000ffff0000'u64) or ((result shr 16) and 0x0000ffff0000ffff'u64)
result = (result shl 32) or (result shr 32)
func swapBytes*(n: SomeUnsignedInt): SomeUnsignedInt {.inline.} =
# Note:
# using the raw Nim implementation:
# - leads to vectorized code if swapping an array
# - leads to builtin swap on modern compilers
when nimvm:
swapBytes_impl(n)
else:
swapBytes_c_compiler(n)
func reverseBits*(n, k : uint32): uint32 {.inline.} =
## Bit reversal permutation with n ∈ [0, 2ᵏ)
# Swap bytes - allow vectorization by using raw Nim impl instead of compiler builtin
var n = swapBytes_impl(n)
n = ((n and 0x55555555'u32) shl 1) or ((n and 0xaaaaaaaa'u32) shr 1)
n = ((n and 0x33333333'u32) shl 2) or ((n and 0xcccccccc'u32) shr 2)
n = ((n and 0x0f0f0f0f'u32) shl 4) or ((n and 0xf0f0f0f0'u32) shr 4)
return n shr (32 - k)
func reverseBits*(n, k: uint64): uint64 {.inline.} =
## Bit reversal permutation with n ∈ [0, 2ᵏ)
# Swap bytes - allow vectorization by using raw Nim impl instead of compiler builtin
var n = swapBytes_impl(n)
n = ((n and 0x5555555555555555'u64) shl 1) or ((n and 0xaaaaaaaaaaaaaaaa'u64) shr 1)
n = ((n and 0x3333333333333333'u64) shl 2) or ((n and 0xcccccccccccccccc'u64) shr 2)
n = ((n and 0x0f0f0f0f0f0f0f0f'u64) shl 4) or ((n and 0xf0f0f0f0f0f0f0f0'u64) shr 4)
return n shr (64 - k)

View File

@ -9,7 +9,8 @@
import
../../math/config/[curves, precompute],
../../math/io/io_bigints,
../primitives, ../bithacks, ../endians, ../codecs,
../primitives, ../bithacks,
../../serialization/[endians, codecs],
./llvm
# ############################################################

View File

@ -49,6 +49,12 @@ when GCC_Compatible:
else:
builtin_ctz(n.uint32)
func builtin_swapBytes(n: uint32): uint32 {.importc: "__builtin_bswap32", nodecl.}
func builtin_swapBytes(n: uint64): uint64 {.importc: "__builtin_bswap64", nodecl.}
func swapBytes_c_compiler*(n: SomeUnsignedInt): SomeUnsignedInt {.inline.} =
builtin_swapBytes(n)
elif defined(icc):
func bitScanReverse(r: var uint32, n: uint32): uint8 {.importc: "_BitScanReverse", header: "<immintrin.h>".}
## Returns 0 if n is zero and non-zero otherwise
@ -93,6 +99,12 @@ elif defined(icc):
else:
bitscan(bitScanForward, c.uint32, default = 0)
func builtin_swapBytes(n: uint32): uint32 {.importc: "_bswap", nodecl.}
func builtin_swapBytes(n: uint64): uint64 {.importc: "_bswap64", nodecl.}
func swapBytes_c_compiler*(n: SomeUnsignedInt): SomeUnsignedInt {.inline.} =
builtin_swapBytes(n)
elif defined(vcc):
func bitScanReverse(p: ptr uint32, b: uint32): uint8 {.importc: "_BitScanReverse", header: "<intrin.h>".}
## Returns 0 if n s no set bit and non-zero otherwise
@ -137,5 +149,11 @@ elif defined(vcc):
else:
bitscan(bitScanForward, c.uint32, default = sizeof(n) * 8)
func builtin_swapBytes(n: uint32): uint32 {.importc: "_byteswap_ulong", cdecl, header: "<intrin.h>".}
func builtin_swapBytes(n: uint64): uint64 {.importc: "_byteswap_uint64", cdecl, header: "<intrin.h>".}
func swapBytes_c_compiler*(n: SomeUnsignedInt): SomeUnsignedInt {.inline.} =
builtin_swapBytes(n)
else:
{. error: "Unsupported compiler".}

View File

@ -139,18 +139,11 @@ func toString*(nimSymbol: NimNode): string =
let nimSymbol = if isPtr: nimSymbol[0]
elif isAddr: nimSymbol[1]
else: nimSymbol
{.noSideEffect.}:
try: # Why does this raise a generic exception?
return $nimSymbol
except:
raise newException(Defect, "Broke Nim!")
return $nimSymbol
func hash(od: OperandDesc): Hash =
{.noSideEffect.}:
try: # Why does this raise a generic exception?
hash(od.nimSymbol.toString())
except:
raise newException(Defect, "Broke Nim")
hash(od.nimSymbol.toString())
func len*(opArray: OperandArray): int =
opArray.buf.len
@ -211,11 +204,7 @@ func genMemClobber(nimSymbol: NimNode, len: int, memIndirect: MemIndirectAccess)
doAssert false, "Indirect access kind not specified"
func asmValue*(nimSymbol: NimNode, rm: RM, constraint: Constraint): Operand =
{.noSideEffect.}:
let symStr = try: # Why does this raise a generic exception?
$nimSymbol
except:
raise newException(Defect, "Broke Nim!")
let symStr = $nimSymbol
let desc = OperandDesc(
asmId: "[" & symStr & "]",
@ -361,11 +350,7 @@ func setToCarryFlag*(a: var Assembler_x86, carry: NimNode) =
let isHiddenDeref = carry.kind == nnkHiddenDeref
let nimSymbol = if isHiddenDeref: carry[0]
else: carry
{.noSideEffect.}:
let symStr = try: # Why does this raise a generic exception?
$nimSymbol
except:
raise newException(Defect, "Broke Nim!")
let symStr = $nimSymbol
let desc = OperandDesc(
asmId: "",

View File

@ -90,6 +90,14 @@ func setOne*(a: var openArray[SomeNumber]){.inline.} =
a[0] = 1
for i in 1 ..< a.len:
a[i] = 0
func asBytes*(s: static string): auto =
## Reinterpret a compile-time string as an array of bytes
const N = s.len
var r: array[N, byte]
for i in 0 ..< s.len:
r[i] = byte s[i]
return r
func rawCopy*(
dst: var openArray[byte],
@ -107,7 +115,7 @@ func rawCopy*(
{.push checks: off.} # No OverflowError or IndexError allowed
for i in 0 ..< len:
dst[dStart + i] = byte src[sStart + i]
dst[dStart + i] = src[sStart + i]
func rotateRight*[N: static int, T](a: var array[N, T]) {.inline.} =
# Rotate right (Somehow we can't use a generic template here)

View File

@ -24,11 +24,196 @@ template toOpenArray*[T](p: ptr UncheckedArray[T], len: int): openArray[T] =
type View*[T] = object
# TODO, use `lent UncheckedArray[T]` for proper borrow-checking - https://github.com/nim-lang/Nim/issues/21674
data: ptr UncheckedArray[T]
len: int
len*: int
template toOpenArray*[T](v: View[T]): openArray[T] =
v.data.toOpenArray(0, v.len-1)
func toView*[T](data: ptr UncheckedArray[T], len: int) {.inline.} =
View[T](data: data, len: len)
func `[]`*[T](v: View[T], idx: int): lent T {.inline.} =
v.data[idx]
type MutableView*[T] {.borrow: `.`.} = distinct View[T]
template toOpenArray*[T](v: MutableView[T]): openArray[T] =
v.data.toOpenArray(0, v.len-1)
func toMutableView*[T](data: ptr UncheckedArray[T], len: int) {.inline.} =
View[T](data: data, len: len)
func `[]`*[T](v: MutableView[T], idx: int): var T {.inline.} =
v.data[idx]
func `[]=`*[T](v: MutableView[T], idx: int, val: T) {.inline.} =
v.data[idx] = val
# StridedView type
# ---------------------------------------------------------
# using the borrow checker with `lent` requires a recent Nim
# https://github.com/nim-lang/Nim/issues/21674
type
StridedView*[T] = object
## A strided view over an (unowned) data buffer
len*: int
stride: int
offset: int
data: ptr UncheckedArray[T]
func `[]`*[T](v: StridedView[T], idx: int): lent T {.inline.} =
v.data[v.offset + idx*v.stride]
func `[]`*[T](v: var StridedView[T], idx: int): var T {.inline.} =
v.data[v.offset + idx*v.stride]
func `[]=`*[T](v: var StridedView[T], idx: int, val: T) {.inline.} =
v.data[v.offset + idx*v.stride] = val
func toStridedView*[T](oa: openArray[T]): StridedView[T] {.inline.} =
result.len = oa.len
result.stride = 1
result.offset = 0
result.data = cast[ptr UncheckedArray[T]](oa[0].unsafeAddr)
func toStridedView*[T](p: ptr UncheckedArray[T], len: int): StridedView[T] {.inline.} =
result.len = len
result.stride = 1
result.offset = 0
result.data = p
iterator items*[T](v: StridedView[T]): lent T =
var cur = v.offset
for _ in 0 ..< v.len:
yield v.data[cur]
cur += v.stride
func `$`*(v: StridedView): string =
result = "StridedView["
var first = true
for elem in v:
if not first:
result &= ", "
else:
first = false
result &= $elem
result &= ']'
func toHex*(v: StridedView): string =
mixin toHex
result = "StridedView["
var first = true
for elem in v:
if not first:
result &= ", "
else:
first = false
result &= elem.toHex()
result &= ']'
# FFT-specific splitting
# -------------------------------------------------------------------------------
func splitAlternate*(t: StridedView): tuple[even, odd: StridedView] {.inline.} =
## Split the tensor into 2
## partitioning the input every other index
## even: indices [0, 2, 4, ...]
## odd: indices [ 1, 3, 5, ...]
assert (t.len and 1) == 0, "The tensor must contain an even number of elements"
let half = t.len shr 1
let skipHalf = t.stride shl 1
result.even.len = half
result.even.stride = skipHalf
result.even.offset = t.offset
result.even.data = t.data
result.odd.len = half
result.odd.stride = skipHalf
result.odd.offset = t.offset + t.stride
result.odd.data = t.data
func splitMiddle*(t: StridedView): tuple[left, right: StridedView] {.inline.} =
## Split the tensor into 2
## partitioning into left and right halves.
## left: indices [0, 1, 2, 3]
## right: indices [4, 5, 6, 7]
assert (t.len and 1) == 0, "The tensor must contain an even number of elements"
let half = t.len shr 1
result.left.len = half
result.left.stride = t.stride
result.left.offset = t.offset
result.left.data = t.data
result.right.len = half
result.right.stride = t.stride
result.right.offset = t.offset + half
result.right.data = t.data
func skipHalf*(t: StridedView): StridedView {.inline.} =
## Pick one every other indices
## output: [0, 2, 4, ...]
assert (t.len and 1) == 0, "The tensor must contain an even number of elements"
result.len = t.len shr 1
result.stride = t.stride shl 1
result.offset = t.offset
result.data = t.data
func slice*(v: StridedView, start, stop, step: int): StridedView {.inline.} =
## Slice a view
## stop is inclusive
# General tensor slicing algorithm is
# https://github.com/mratsim/Arraymancer/blob/71cf616/src/arraymancer/tensor/private/p_accessors_macros_read.nim#L26-L56
#
# for i, slice in slices:
# # Check if we start from the end
# let a = if slice.a_from_end: result.shape[i] - slice.a
# else: slice.a
#
# let b = if slice.b_from_end: result.shape[i] - slice.b
# else: slice.b
#
# # Compute offset:
# result.offset += a * result.strides[i]
# # Now change shape and strides
# result.strides[i] *= slice.step
# result.shape[i] = abs((b-a) div slice.step) + 1
#
# with slices being of size 1, as we have a monodimensional Tensor
# and the slice being a..<b with the reverse case: len-1 -> 0
#
# result is preinitialized with a copy of v (shape, stride, offset, data)
result.offset = v.offset + start * v.stride
result.stride = v.stride * step
result.len = abs((stop-start) div step) + 1
result.data = v.data
func reversed*(v: StridedView): StridedView {.inline.} =
# Hopefully the compiler optimizes div by -1
v.slice(v.len-1, 0, -1)
# Debugging helpers
# ---------------------------------------------------------
when defined(debugConstantine):
import std/[strformat, strutils]
func display*[F](name: string, indent: int, oa: openArray[F]) =
debugEcho strutils.indent(name & ", openarray of " & $F & " of length " & $oa.len, indent)
for i in 0 ..< oa.len:
debugEcho strutils.indent(&" {i:>2}: {oa[i].toHex()}", indent)
debugEcho strutils.indent(name & " " & $F & " -- FIN\n", indent)
func display*[F](name: string, indent: int, v: StridedView[F]) =
debugEcho strutils.indent(name & ", view of " & $F & " of length " & $v.len, indent)
for i in 0 ..< v.len:
debugEcho strutils.indent(&" {i:>2}: {v[i].toHex()}", indent)
debugEcho strutils.indent(name & " " & $F & " -- FIN\n", indent)
# Binary blob API
# ---------------------------------------------------------
#

View File

@ -6,7 +6,7 @@
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import ./abstractions
import ../platforms/abstractions
# ############################################################
#

View File

@ -0,0 +1,300 @@
# Constantine
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Copyright (c) 2020-Present Mamy André-Ratsimbazafy
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
## ############################################################
##
## BLS12-381 Serialization
##
## ############################################################
##
## Blockchains have standardized BLS12-381 serialization on Zcash format.
## That format is mentioned in Appendix-A of the IETF BLS signatures draft
##
## BLS12-381 serialization
##
## 𝔽p elements are encoded in big-endian form. They occupy 48 bytes in this form.
## 𝔽p2 elements are encoded in big-endian form, meaning that the 𝔽p2 element c0+c1u
## is represented by the 𝔽p element c1 followed by the 𝔽p element c0.
## This means 𝔽p2 elements occupy 96 bytes in this form.
## The group 𝔾1 uses 𝔽p elements for coordinates. The group 𝔾2 uses 𝔽p2 elements for coordinates.
## 𝔾1 and 𝔾2 elements can be encoded in uncompressed form (the x-coordinate followed by the y-coordinate) or in compressed form (just the x-coordinate).
## 𝔾1 elements occupy 96 bytes in uncompressed form, and 48 bytes in compressed form.
## 𝔾2 elements occupy 192 bytes in uncompressed form, and 96 bytes in compressed form.
##
## The most-significant three bits of a 𝔾1 or 𝔾2 encoding should be masked away before the coordinate(s) are interpreted. These bits are used to unambiguously represent the underlying element:
##
## The most significant bit, when set, indicates that the point is in compressed form. Otherwise, the point is in uncompressed form.
## The second-most significant bit indicates that the point is at infinity. If this bit is set, the remaining bits of the group elements encoding should be set to zero.
## The third-most significant bit is set if (and only if) this point is in compressed form
## and it is not the point at infinity and its y-coordinate is the lexicographically largest of the two associated with the encoded x-coordinate.
##
## - https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-05#appendix-A
## - https://docs.rs/bls12_381/latest/bls12_381/notes/serialization/index.html
## - https://github.com/zkcrypto/bls12_381/blob/0.6.0/src/notes/serialization.rs
import
../platforms/abstractions,
../math/config/curves,
../math/[
ec_shortweierstrass,
extension_fields,
arithmetic,
constants/zoo_subgroups],
../math/io/[io_bigints, io_fields]
type
CttCodecScalarStatus* = enum
cttCodecScalar_Success
cttCodecScalar_Zero
cttCodecScalar_ScalarLargerThanCurveOrder
CttCodecEccStatus* = enum
cttCodecEcc_Success
cttCodecEcc_InvalidEncoding
cttCodecEcc_CoordinateGreaterThanOrEqualModulus
cttCodecEcc_PointNotOnCurve
cttCodecEcc_PointNotInSubgroup
cttCodecEcc_PointAtInfinity
Scalar* = matchingOrderBigInt(BLS12_381)
G1P* = ECP_ShortW_Aff[Fp[BLS12_381], G1]
G2P* = ECP_ShortW_Aff[Fp2[BLS12_381], G2]
# Input validation
# ------------------------------------------------------------------------------------------------
func validate_scalar*(scalar: Scalar): CttCodecScalarStatus =
## Validate a scalar
## Regarding timing attacks, this will leak information
## if the scalar is 0 or larger than the curve order.
if scalar.isZero().bool():
return cttCodecScalar_Zero
if bool(scalar >= BLS12_381.getCurveOrder()):
return cttCodecScalar_ScalarLargerThanCurveOrder
return cttCodecScalar_Success
func validate_g1*(g1Point: G1P): CttCodecEccStatus =
## Validate a G1 point
## This is an expensive operation that can be cached
if g1Point.isInf().bool():
return cttCodecEcc_PointAtInfinity
if not isOnCurve(g1Point.x, g1Point.y, G1).bool():
return cttCodecEcc_PointNotOnCurve
if not g1Point.isInSubgroup().bool():
return cttCodecEcc_PointNotInSubgroup
return cttCodecEcc_Success
func validate_g2*(g2Point: G2P): CttCodecEccStatus =
## Validate a G2 point.
## This is an expensive operation that can be cached
if g2Point.isInf().bool():
return cttCodecEcc_PointAtInfinity
if not isOnCurve(g2Point.x, g2Point.y, G2).bool():
return cttCodecEcc_PointNotOnCurve
if not g2Point.isInSubgroup().bool():
return cttCodecEcc_PointNotInSubgroup
return cttCodecEcc_Success
# Codecs
# ------------------------------------------------------------------------------------------------
func serialize_scalar*(dst: var array[32, byte], scalar: Scalar): CttCodecScalarStatus =
## Serialize a scalar
## Returns cttCodecScalar_Success if successful
dst.marshal(scalar, bigEndian)
return cttCodecScalar_Success
func deserialize_scalar*(dst: var Scalar, src: array[32, byte]): CttCodecScalarStatus =
## Deserialize a scalar
## Also validates the scalar range
##
## This is protected against side-channel unless the scalar is invalid.
## In that case it will leak whether it's all zeros or larger than the curve order.
##
## This special-cases (and leaks) 0 scalar as this is a special-case in most protocols
## or completely invalid (for secret keys).
dst.unmarshal(src, bigEndian)
let status = validate_scalar(dst)
if status != cttCodecScalar_Success:
dst.setZero()
return status
return cttCodecScalar_Success
func serialize_g1_compressed*(dst: var array[48, byte], g1Point: G1P): CttCodecEccStatus =
## Serialize a BLS12-381 G1 point in compressed (Zcash) format
##
## Returns cttCodecEcc_Success if successful
if g1Point.isInf().bool():
for i in 0 ..< dst.len:
dst[i] = byte 0
dst[0] = byte 0b11000000 # Compressed + Infinity
return cttCodecEcc_Success
dst.marshal(g1Point.x, bigEndian)
# The curve equation has 2 solutions for y² = x³ + 4 with y unknown and x known
# The lexicographically largest will have bit 381 set to 1
# (and bit 383 for the compressed representation)
# The solutions are {y, p-y} hence the lexicographyically largest is greater than p/2
# so with exact integers, as p is odd, greater or equal (p+1)/2
let lexicographicallyLargest = byte(g1Point.y.toBig() >= Fp[BLS12_381].getPrimePlus1div2())
dst[0] = dst[0] or (0b10000000 or (lexicographicallyLargest shl 5))
return cttCodecEcc_Success
func deserialize_g1_compressed_unchecked*(dst: var G1P, src: array[48, byte]): CttCodecEccStatus =
## Deserialize a BLS12-381 G1 point in compressed (Zcash) format.
##
## Warning ⚠:
## This procedure skips the very expensive subgroup checks.
## Not checking subgroup exposes a protocol to small subgroup attacks.
##
## Returns cttCodecEcc_Success if successful
# src must have the compressed flag
if (src[0] and byte 0b10000000) == byte 0:
return cttCodecEcc_InvalidEncoding
# if infinity, src must be all zeros
if (src[0] and byte 0b01000000) != 0:
if (src[0] and byte 0b00111111) != 0: # Check all the remaining bytes in MSB
return cttCodecEcc_InvalidEncoding
for i in 1 ..< src.len:
if src[i] != byte 0:
return cttCodecEcc_InvalidEncoding
dst.setInf()
return cttCodecEcc_PointAtInfinity
# General case
var t{.noInit.}: matchingBigInt(BLS12_381)
t.unmarshal(src, bigEndian)
t.limbs[t.limbs.len-1] = t.limbs[t.limbs.len-1] and (MaxWord shr 3) # The first 3 bytes contain metadata to mask out
if bool(t >= BLS12_381.Mod()):
return cttCodecEcc_CoordinateGreaterThanOrEqualModulus
var x{.noInit.}: Fp[BLS12_381]
x.fromBig(t)
let onCurve = dst.trySetFromCoordX(x)
if not(bool onCurve):
return cttCodecEcc_PointNotOnCurve
let isLexicographicallyLargest = dst.y.toBig() >= Fp[BLS12_381].getPrimePlus1div2()
let srcIsLargest = SecretBool((src[0] shr 5) and byte 1)
dst.y.cneg(isLexicographicallyLargest xor srcIsLargest)
return cttCodecEcc_Success
func deserialize_g1_compressed*(dst: var G1P, src: array[48, byte]): CttCodecEccStatus =
## Deserialize a BLS12-381 G1 point in compressed (Zcash) format
## This also validates the G1 point
##
## Returns cttCodecEcc_Success if successful
result = deserialize_g1_compressed_unchecked(dst, src)
if result != cttCodecEcc_Success:
return result
if not(bool dst.isInSubgroup()):
return cttCodecEcc_PointNotInSubgroup
return cttCodecEcc_Success
func serialize_g2_compressed*(dst: var array[96, byte], g2Point: G2P): CttCodecEccStatus =
## Serialize a BLS12-381 G2 point in compressed (Zcash) format
##
## Returns cttCodecEcc_Success if successful
if g2Point.isInf().bool():
for i in 0 ..< dst.len:
dst[i] = byte 0
dst[0] = byte 0b11000000 # Compressed + Infinity
return cttCodecEcc_Success
dst.toOpenArray(0, 48-1).marshal(g2Point.x.c1, bigEndian)
dst.toOpenArray(48, 96-1).marshal(g2Point.x.c0, bigEndian)
let isLexicographicallyLargest =
if g2Point.y.c1.isZero().bool():
byte(g2Point.y.c0.toBig() >= Fp[BLS12_381].getPrimePlus1div2())
else:
byte(g2Point.y.c1.toBig() >= Fp[BLS12_381].getPrimePlus1div2())
dst[0] = dst[0] or (byte 0b10000000 or (isLexicographicallyLargest shl 5))
return cttCodecEcc_Success
func deserialize_g2_compressed_unchecked*(dst: var G2P, src: array[96, byte]): CttCodecEccStatus =
## Deserialize a BLS12-381 G2 point in compressed (Zcash) format.
##
## Warning ⚠:
## This procedure skips the very expensive subgroup checks.
## Not checking subgroup exposes a protocol to small subgroup attacks.
##
## Returns cttCodecEcc_Success if successful
# src must have the compressed flag
if (src[0] and byte 0b10000000) == byte 0:
return cttCodecEcc_InvalidEncoding
# if infinity, src must be all zeros
if (src[0] and byte 0b01000000) != 0:
if (src[0] and byte 0b00111111) != 0: # Check all the remaining bytes in MSB
return cttCodecEcc_InvalidEncoding
for i in 1 ..< src.len:
if src[i] != byte 0:
return cttCodecEcc_InvalidEncoding
dst.setInf()
return cttCodecEcc_PointAtInfinity
# General case
var t{.noInit.}: matchingBigInt(BLS12_381)
t.unmarshal(src.toOpenArray(0, 48-1), bigEndian)
t.limbs[t.limbs.len-1] = t.limbs[t.limbs.len-1] and (MaxWord shr 3) # The first 3 bytes contain metadata to mask out
if bool(t >= BLS12_381.Mod()):
return cttCodecEcc_CoordinateGreaterThanOrEqualModulus
var x{.noInit.}: Fp2[BLS12_381]
x.c1.fromBig(t)
t.unmarshal(src.toOpenArray(48, 96-1), bigEndian)
if bool(t >= BLS12_381.Mod()):
return cttCodecEcc_CoordinateGreaterThanOrEqualModulus
x.c0.fromBig(t)
let onCurve = dst.trySetFromCoordX(x)
if not(bool onCurve):
return cttCodecEcc_PointNotOnCurve
let isLexicographicallyLargest =
if dst.y.c1.isZero().bool():
dst.y.c0.toBig() >= Fp[BLS12_381].getPrimePlus1div2()
else:
dst.y.c1.toBig() >= Fp[BLS12_381].getPrimePlus1div2()
let srcIsLargest = SecretBool((src[0] shr 5) and byte 1)
dst.y.cneg(isLexicographicallyLargest xor srcIsLargest)
return cttCodecEcc_Success
func deserialize_g2_compressed*(dst: var G2P, src: array[96, byte]): CttCodecEccStatus =
## Deserialize a BLS12-381 G2 point in compressed (Zcash) format
##
## Returns cttCodecEcc_Success if successful
result = deserialize_g2_compressed_unchecked(dst, src)
if result != cttCodecEcc_Success:
return result
if not(bool dst.isInSubgroup()):
return cttCodecEcc_PointNotInSubgroup
return cttCodecEcc_Success

View File

@ -6,7 +6,7 @@
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import ./abstractions
import ../platforms/abstractions
# perf critical we don't want bound checks here
# So no checks and we avoid signed int to ensur eno exceptions.
@ -88,9 +88,26 @@ func dumpRawInt*(
for i in 0'u ..< L:
dst[cursor+i] = toByte(src shr ((L-i-1) * 8))
func toBytesBE*(num: SomeUnsignedInt): array[sizeof(num), byte] {.noInit, inline.}=
func toBytes*(num: SomeUnsignedInt, endianness: static Endianness): array[sizeof(num), byte] {.noInit, inline.}=
## Store an integer into an array of bytes
## in big endian representation
const L = sizeof(num)
for i in 0 ..< L:
result[i] = toByte(num shr ((L-1-i) * 8))
when endianness == bigEndian:
for i in 0 ..< L:
result[i] = toByte(num shr ((L-1-i) * 8))
else:
for i in 0 ..< L:
result[i] = toByte(num shr (i * 8))
func fromBytes*(T: type SomeUnsignedInt, bytes: openArray[byte], endianness: static Endianness): T {.inline.} =
const L = sizeof(T)
debug:
doAssert bytes.len == L
# Note: result is zero-init
when endianness == cpuEndian:
for i in 0 ..< L:
result = result or (T(bytes[i]) shl (i*8))
else:
for i in 0 ..< L:
result = result or (T(bytes[i]) shl ((L-1-i) * 8))

View File

@ -0,0 +1,335 @@
# Constantine
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Copyright (c) 2020-Present Mamy André-Ratsimbazafy
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# TODO ⚠️:
# - Constant-time validation for parsing secret keys
# - Burning memory to ensure secrets are not left after dealloc.
import
../platforms/abstractions,
./endians
# ############################################################
#
# Parsing from canonical inputs to internal representation
#
# ############################################################
# No exceptions for the byte API.
# In particular we don't want if-branches when indexing an array
# that contains secret data
{.push raises: [], checks: off.}
# Note: the parsing/serialization routines were initially developed
# with an internal representation that used 31 bits out of a uint32
# or 63-bits out of an uint64
func unmarshalLE[T](
dst: var openArray[T],
src: openarray[byte],
wordBitWidth: static int): bool =
## Parse an unsigned integer from its canonical
## little-endian unsigned representation
## and store it into a BigInt
##
## Returns "true" on success
## Returns "false" if destination buffer is too small
##
## Constant-Time:
## - no leaks
##
## Can work at compile-time
##
## It is possible to use a 63-bit representation out of a 64-bit words
## by setting `wordBitWidth` to something different from sizeof(T) * 8
## This might be useful for architectures with no add-with-carry instructions.
var
dst_idx = 0
acc = T(0)
acc_len = 0
for src_idx in 0 ..< src.len:
let src_byte = T(src[src_idx])
# buffer reads
acc = acc or (src_byte shl acc_len)
acc_len += 8 # We count bit by bit
# if full, dump
if acc_len >= wordBitWidth:
if dst_idx == dst.len:
return false
dst[dst_idx] = acc
inc dst_idx
acc_len -= wordBitWidth
acc = src_byte shr (8 - acc_len)
if dst_idx < dst.len:
dst[dst_idx] = acc
for i in dst_idx + 1 ..< dst.len:
dst[i] = T(0)
return true
func unmarshalBE[T](
dst: var openArray[T],
src: openarray[byte],
wordBitWidth: static int): bool =
## Parse an unsigned integer from its canonical
## big-endian unsigned representation (octet string)
## and store it into a BigInt.
##
## In cryptography specifications, this is often called
## "Octet string to Integer"
##
## Returns "true" on success
## Returns "false" if destination buffer is too small
##
## Constant-Time:
## - no leaks
##
## Can work at compile-time
##
## It is possible to use a 63-bit representation out of a 64-bit words
## by setting `wordBitWidth` to something different from sizeof(T) * 8
## This might be useful for architectures with no add-with-carry instructions.
var
dst_idx = 0
acc = T(0)
acc_len = 0
const wordBitWidth = sizeof(T) * 8
for src_idx in countdown(src.len-1, 0):
let src_byte = T(src[src_idx])
# buffer reads
acc = acc or (src_byte shl acc_len)
acc_len += 8 # We count bit by bit
# if full, dump
if acc_len >= wordBitWidth:
if dst_idx == dst.len:
return false
dst[dst_idx] = acc
inc dst_idx
acc_len -= wordBitWidth
acc = src_byte shr (8 - acc_len)
if dst_idx < dst.len:
dst[dst_idx] = acc
for i in dst_idx + 1 ..< dst.len:
dst[i] = T(0)
return true
func unmarshal*[T](
dst: var openArray[T],
src: openarray[byte],
wordBitWidth: static int,
srcEndianness: static Endianness): bool {.inline, discardable.} =
## Parse an unsigned integer from its canonical
## big-endian or little-endian unsigned representation
##
## Returns "true" on success
## Returns "false" if destination buffer is too small
##
## Constant-Time:
## - no leaks
##
## Can work at compile-time to embed curve moduli
## from a canonical integer representation
when srcEndianness == littleEndian:
return dst.unmarshalLE(src, wordBitWidth)
else:
return dst.unmarshalBE(src, wordBitWidth)
# ############################################################
#
# Serialising from internal representation to canonical format
#
# ############################################################
func marshalLE[T](
dst: var openarray[byte],
src: openArray[T],
wordBitWidth: static int): bool =
## Serialize a bigint into its canonical little-endian representation
## I.e least significant bit first
##
## It is possible to use a 63-bit representation out of a 64-bit words
## by setting `wordBitWidth` to something different from sizeof(T) * 8
## This might be useful for architectures with no add-with-carry instructions.
##
## Returns "true" on success
## Returns "false" if destination buffer is too small
var
src_idx, dst_idx = 0
acc_len = 0
when sizeof(T) == 8:
type BT = uint64
elif sizeof(T) == 4:
type BT = uint32
else:
{.error "Unsupported word size uint" & $(sizeof(T) * 8).}
var acc = BT(0)
var tail = dst.len
while tail > 0:
let w = if src_idx < src.len: BT(src[src_idx])
else: 0
inc src_idx
if acc_len == 0:
# We need to refill the buffer to output 64-bit
acc = w
acc_len = wordBitWidth
else:
when wordBitWidth == sizeof(T) * 8:
let lo = acc
acc = w
else: # If using 63-bit (or less) out of uint64
let lo = (w shl acc_len) or acc
dec acc_len
acc = w shr (wordBitWidth - acc_len)
if tail >= sizeof(T):
# Unrolled copy
dst.blobFrom(src = lo, dst_idx, littleEndian)
dst_idx += sizeof(T)
tail -= sizeof(T)
else:
# Process the tail and exit
when cpuEndian == littleEndian:
# When requesting little-endian on little-endian platform
# we can just copy each byte
# tail is inclusive
for i in 0 ..< tail:
dst[dst_idx+i] = toByte(lo shr (i*8))
else: # TODO check this
# We need to copy from the end
for i in 0 ..< tail:
dst[dst_idx+i] = toByte(lo shr ((tail-i)*8))
if src_idx < src.len:
return false
else:
return true
if src_idx < src.len:
return false
else:
return true
func marshalBE[T](
dst: var openarray[byte],
src: openArray[T],
wordBitWidth: static int): bool =
## Serialize a bigint into its canonical big-endian representation
## (octet string)
## I.e most significant bit first
##
## In cryptography specifications, this is often called
## "Octet string to Integer"
##
## It is possible to use a 63-bit representation out of a 64-bit words
## by setting `wordBitWidth` to something different from sizeof(T) * 8
## This might be useful for architectures with no add-with-carry instructions.
##
## Returns "true" on success
## Returns "false" if destination buffer is too small
var
src_idx = 0
acc_len = 0
when sizeof(T) == 8:
type BT = uint64
elif sizeof(T) == 4:
type BT = uint32
else:
{.error "Unsupported word size uint" & $(sizeof(T) * 8).}
var acc = BT(0)
var tail = dst.len
while tail > 0:
let w = if src_idx < src.len: BT(src[src_idx])
else: 0
inc src_idx
if acc_len == 0:
# We need to refill the buffer to output 64-bit
acc = w
acc_len = wordBitWidth
else:
when wordBitWidth == sizeof(T) * 8:
let lo = acc
acc = w
else: # If using 63-bit (or less) out of uint64
let lo = (w shl acc_len) or acc
dec acc_len
acc = w shr (wordBitWidth - acc_len)
if tail >= sizeof(T):
# Unrolled copy
tail -= sizeof(T)
dst.blobFrom(src = lo, tail, bigEndian)
else:
# Process the tail and exit
when cpuEndian == littleEndian:
# When requesting little-endian on little-endian platform
# we can just copy each byte
# tail is inclusive
for i in 0 ..< tail:
dst[tail-1-i] = toByte(lo shr (i*8))
else: # TODO check this
# We need to copy from the end
for i in 0 ..< tail:
dst[tail-1-i] = toByte(lo shr ((tail-i)*8))
if src_idx < src.len:
return false
else:
return true
if src_idx < src.len:
return false
else:
return true
func marshal*[T](
dst: var openArray[byte],
src: openArray[T],
wordBitWidth: static int,
dstEndianness: static Endianness): bool {.inline, discardable.} =
## Serialize a bigint into its canonical big-endian or little endian
## representation.
##
## If the buffer is bigger, output will be zero-padded left for big-endian
## or zero-padded right for little-endian.
## I.e least significant bit is aligned to buffer boundary
##
## Returns "true" on success
## Returns "false" if destination buffer is too small
when dstEndianness == littleEndian:
return marshalLE(dst, src, wordBitWidth)
else:
return marshalBE(dst, src, wordBitWidth)
{.pop.} # {.push raises: [].}

View File

@ -34,28 +34,19 @@ import
{.push raises: [].} # No exceptions allowed in core cryptographic operations
{.push checks: off.} # No defects due to array bound checking or signed integer overflow allowed
func derivePubkey*[Pubkey, SecKey](pubkey: var Pubkey, seckey: SecKey): bool =
func derivePubkey*[Pubkey, SecKey](pubkey: var Pubkey, seckey: SecKey) =
## Generates the public key associated with the input secret key.
##
## Returns:
## - false is secret key is invalid (SK == 0 or >= BLS12-381 curve order),
## true otherwise
## By construction no public API should ever instantiate
## an invalid secretkey in the first place.
## The secret key MUST be in range (0, curve order)
## 0 is INVALID
const Group = Pubkey.G
type Field = Pubkey.F
const EC = Field.C
if seckey.isZero().bool:
return false
if bool(seckey >= EC.getCurveOrder()):
return false
var pk {.noInit.}: ECP_ShortW_Jac[Field, Group]
pk.fromAffine(EC.getGenerator($Group))
pk.scalarMul(seckey)
pubkey.affine(pk)
return true
func coreSign*[Sig, SecKey](
signature: var Sig,

View File

@ -0,0 +1,306 @@
# Trusted Setup Interchange Format
## Table of contents
<!-- TOC -->
- [Trusted Setup Interchange Format](#trusted-setup-interchange-format)
- [Table of contents](#table-of-contents)
- [Overview](#overview)
- [Metadata](#metadata)
- [Schema items descriptors](#schema-items-descriptors)
- [Quick algebra refresher](#quick-algebra-refresher)
- [Notation](#notation)
- [Schema items](#schema-items)
- [Recommendation](#recommendation)
- [Data](#data)
- [𝔾1 and 𝔾2: Elliptic curve serialization](#%F0%9D%94%BE1-and-%F0%9D%94%BE2-elliptic-curve-serialization)
- [𝔽r and 𝔽p: Finite Fields serialization](#%F0%9D%94%BDr-and-%F0%9D%94%BDp-finite-fields-serialization)
- [Representation](#representation)
- [Montgomery 32-bit vs 64-bit](#montgomery-32-bit-vs-64-bit)
- [Special-form primes [unspecified]](#special-form-primes-unspecified)
- [𝔽p² serialization](#%F0%9D%94%BDp%C2%B2-serialization)
- [Larger extension field serialization [unspecified]](#larger-extension-field-serialization-unspecified)
- [𝔽p⁴](#%F0%9D%94%BDp%E2%81%B4)
- [𝔽p¹² / 𝔾t](#%F0%9D%94%BDp%C2%B9%C2%B2--%F0%9D%94%BEt)
- [Copyright](#copyright)
- [Citation](#citation)
<!-- /TOC -->
## Overview
- Format name: `Trusted setup interchange format`
- Format extension: `.tsif`
The format is chosen to allow:
- efficient copying,
- using the trusted setups as mmap-ed files on little-endian 64-bit machines,
- parallel processing
Hence the metadata should be separated from data and data should appear at precise computable positions
without needing to scan the file first.
As little-endian 64-bit systems are significantly more likely to use trusted setups, this format optimize operations for those machines.
This covers:
- x86-64 (Intel and AMD CPUs after 2003)
- ARM64 (i.e. Apple Macs after 2020, phones after 2014)
- RISC-V
- Nvidia, AMD, Intel GPUs
Furthermore, besides word-level (int32, int64) endianness,
most (all?) big integer backends cryptographic or not (GMP, LLVM APint, Go bigints, Java bigints, ...) use a little-endian ordering of limbs.
## Metadata
We described the format with `n` schema items and `i` an integer in the range `[0, n)`
| Offset (byte) | Name | Description | Size (in bytes) | Syntax | Example | Rationale |
|---------------|--------------|-------------------------------------------|-----------------|-------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------|
| 0 | Magic number | Fixed bytes at the beginning of each file | 12 | Hex E28883 E28B83 E28888 E2888E | Unicode string "∃⋃∈∎". Read as "There exists an union of elements of proofs" Unicode: [U+2203, U+222A, U+2208, U+220E] encoded in UTF-8 | Distinguish the file format even with incorrect extension. |
| 12 | version | format version | 4 | v{major}.{minor} | `v1.0` | Compatibility and graceful decoding failures. |
| 16 | protocol | a protocol name | 32 | any lowercase a-z 0-9 and underscore, padded with NUL bytes | `ethereum_deneb_kzg` | Graceful errors. For namespacing it is recommended to use `{application}_{fork/version/proposal that introduced the trusted setup}_{protocol}` |
| 48 | Curve | Elliptic curve name | 15 | any lowercase a-z 0-9 and underscore, padded with NUL bytes | `bls12_381` or `bn254_snarks` or `bandersnatch` or `edwards25519` or `montgomery25519` | Size chosen to fit long curve names like `bandersnatch` or `edwards25519`. Ideally the name uniquely identify the curve, for example there are multiple BN254 curves in the litterature (but only one used in trusted setups) and there are multiple representations of Curve25519 (Montgomery or Twisted Edwards)
| 63 | fields | number of data fields `n` | 1 | {n}, `n` is encoded as a 8-bit integer | `3` | Compute byte offsets and buffer(s) size |
| 64 | 1ˢᵗ schema item | Metadata | 32 | see dedicated section | see dedicated section | |
| 64 + i*32 | iᵗʰ schema item | Metadata | 32 | see dedicated section | see dedicated section | |
| 64 + n*32 | Padding | Padding | `n*32 mod 64`: 0 or 32 | Either nothing or 0x00 repeated 32 times | | Ensure the data starts at 64-byte boundary for SIMD processing (can help for bit-reversal permutation, coordinates copy between serialized and memory representation, big-endian/little-endian conversion) |
| 64 + n\*32 + (n\*32 mod 64) | Data | Data | see dedicated section | see dedicated section | | |
## Schema items descriptors
### Quick algebra refresher
- A group is a set of elements:
- with a binary operation to combine them called the group law
- with a neutral element
- with an inverse, applying the group law on an element and its inverse results in the neutral element.
- the group order or cardinality is the number of elements in the set.
- the group can use the additive or multiplicative notation.
- the group can be cyclic. i.e. all elements of the group can be generated
by repeatedly applying the group law.
The additive/multiplicative notation is chosen by social consensus,
hence confusion of scalar multiplication \[a\]P or exponentiation Pᵃ for elliptic curves.
- A field is a set of elements
- with two group laws, named addition and multiplication
- and the corresponding group properties (additive/multiplicative inverse and neutral elements)
- A field can be finite (modular arithmetic modulo a prime) or infinite (the real numbers)
### Notation
- 𝔽r is a finite-field of prime order r with laws: modular addition and modular multiplication (modulo `r`)
- 𝔾1 is an additive group of prime order r with law: elliptic curve addition
- 𝔾2 is an additive group of prime order r with law: elliptic curve addition
For an additive group, we use the notation:
[a]P to represent P+P+...+P\
applying the group law `a` times, i.e. the scalar multiplication.
For a multiplicative group, we use the notation:
Pᵃ to represent P\*P\*...\*P\
applying the group law `a` times, i.e. the exponentiation
Furthermore we use the notation
- [a]₁ for the scalar multiplication of the 𝔾1 generator by a, a ∈ 𝔽r
- [b]₂ for the scalar multiplication of the 𝔾2 generator by b, b ∈ 𝔽r
### Schema items
Each schema item is described by 32 bytes of metadata, either
- `srs_monomial` + {`g1` or `g2`} + {`asc` or `brp`} + {sizeof(element)} + {number of elements}
- `srs_lagrange` + {`g1` or `g2`} + {`asc` or `brp`} + {sizeof(element)} + {number of elements}
- `roots_unity` + `fr` + {`asc` or `brp`} + {sizeof(element)} + {number of elements}
i.e.
- 15 bytes for the field description in lower-case \[a-z\], numbers and underscore. Padded right with NUL bytes.
- 2 bytes for the group or field of each element
- a 3-byte tag indicating if the srs or roots of unity are stored
- in ascending order of powers of tau (τ), the trusted setup secret.
i.e.
- for monomial storage: `[[1]₁, [τ]₁, [τ²]₁, ... [τⁿ⁻¹]₁]`
- for lagrange storage: `[[𝐿ₜₐᵤ(ω⁰)]₁, [𝐿ₜₐᵤ(ω¹)]₁, [𝐿ₜₐᵤ(ω²)]₁, ... [𝐿ₜₐᵤ(ωⁿ⁻¹)]₁]`
- for roots of unity: `[ω⁰, ω¹, ..., ωⁿ⁻¹]`
- or in [bit-reversal permutation](https://en.wikipedia.org/wiki/Bit-reversal_permutation)
- 4 bytes for the size of a single element, serialized as a little-endian 32-bit integer.
- 8 bytes for the number of elements, serialized as a little-endian 64-bit integer.
#### Recommendation
Some protocols use the same curves but different generators `[1]₁` (𝔾1 generator) and `[1]₂` (𝔾2 generator),
also most libraries hard code the generator as a constant.
For example for the Pasta curves:
- Pallas
- Arkworks and Zcash: (-1, 2)
- Mina: (1,12418654782883325593414442427049395787963493412651469444558597405572177144507)
- Vesta
- Arkworks and Zcash: (-1, 2)
- Mina: (1,11426906929455361843568202299992114520848200991084027513389447476559454104162)
Check that the first element of the deserialized SRS match the library generator.
## Data
Data sections are guaranteed to start at 64-byte boundaries. Padding is done with NUL bytes (0x00)
Data is stored in little-endian for words and limbs and in ascending order of prime power for extension fields.
Each item is stored adjacent to each other, item size and number of items are described in the schema items.
Beyond 𝔽r, 𝔾1, 𝔾2 introduced in the metadata section, we introduce:
- p, the prime modulus of the curve
p is distinct from the curve order r
- 𝔽p a finite field with prime modulus p
- 𝔽pⁿ, an extension field of characteristic p, with n coordinates, each element of 𝔽p
### 𝔾1 and 𝔾2: Elliptic curve serialization
Elliptic curve points coordinates for:
- a short Weierstrass curve with equation `y² = x³ + ax + b` are stored in order (x, y).
- a twisted Edwards curve with equation `ax² + y² = 1+dx²y²` are stored in order (x, y).
x and y are elements of 𝔽p or 𝔽pⁿ
It is possible to store only x and recover y from the curve equation.
However:
- this prevents memory copying or memory mapping
- recovery involves a square root which is extremely slow.
- Deserialization of a compressed BLS12 381 𝔾1 point (without subgroup check) is in the order of 40000 cycles.
A memcpy would take ~1.5 cycles so about 26666x faster.
- Deserialization of a compressed BLS12 381 𝔾2 point (without subgroup check) is in the order of 70000 cycles.
A memcpy would take ~3 cycles so about 23333x faster.
- Some trusted setups have hundreds of millions of points (e.g. Filecoin 2²⁷ = 134 217 728 BLS12-381 𝔾1 points)
- A compressed representation would need on a 4GHz CPU: 2²⁷ points * 40000 cycles / 4.10⁹ cycles/s = 1352 seconds to decompress, without post-processing like bit-reversal permutation, compared to 5us uncompressed.
- The doubled size (12.88GB instead of 6.44GB with 96 bytes BLS12-381 𝔾1 points)
is a reasonable price as it is not even stored in the blockchain.
Furthermore, memory-constrained devices can use memory-mapping instead of spending their RAM.
### 𝔽r and 𝔽p: Finite Fields serialization
Each element of 𝔽r or 𝔽p is stored:
- in little-endian for limb-endianness, i.e. least significant word first.
- in little-endian for word-endianness, i.e. within a word, least significant bit first.
- rounded to 8-byte boundary, padded with NUL byte.
This ensures that on little-endian machines, the bit representation is the same whether it is 32 or 64 bits:
- word₀, word₁, word₂, word₃ for 64-bit words.
- word₀, word₁, word₂, word₃, word₄, word₅, word₆, word₇ for 32-bit words.
Example, a 224-bit modulus (for P224 curve), would need 7 uint32 = 28 bytes or 4 uint64 = 32 bytes for in-memory representation.
#### Representation
For fields defined over generic primes, fields elements are stored in `Montgomery representation`.
i.e. for all a ∈ 𝔽p, we store a' = aR (mod p), with:
- `R = (2^WordBitWidth)^numWords`
- WordBitWidth = 64
- numWords = ceil_division(log₂(p), WordBitWidth) = (log₂(p) + 63)/64. `log₂(p)` is the number of bits in the prime p
Rationale:
All libraries are using the Montgomery representation for general primes for efficiency of modular reduction without division.
Storing directly in Montgomery representation allows as-is memory copies or memory mapping on little-endian 64-bit CPUs.
##### Montgomery 32-bit vs 64-bit
Note that the Montgomery representation may differ between 32-bit and 64-bit if the number of words in 32-bit is not double the number of words in 64-bit, i.e. if `32*numWords₃₂ != 64*numWords₆₄`.
This is the case for P224, but not for any curves used in zero-knowledge proofs at the time of writing (May 2023)
##### Special-form primes [unspecified]
Fields defined over pseudo-Mersenne primes (Crandall primes) in the form 2ᵏ-c like 2²⁵⁵-19
or generalized Mersenne primes (Solinas primes) in the form of a polynomial p(x) with x = 2ᵐ like secp256k1, P256, ...
can use a fast modular reduction and do not need the Montgomery representation.
So serializing them in Montgomery form is unnecessary.
However, at the time of writing (May 2023), no special-form primes are used in trusted setups as trusted setups are quite costly to create hence they need to provide significant benefits, short fixed size proofs with sublinear verification time for example which requires pairing-friendly curves.
### 𝔽p² serialization
Field-endianness is little-endian.
When 𝔾1 and/or 𝔾2 are defined over 𝔽p² with p the prime modulus of the curve,
A field element a = (x, y) ∈ 𝔽p², is represented x+𝘫y with 𝘫 a quadratic non-residue in 𝔽p
and serialized `[a, b]`
### Larger extension field serialization [unspecified]
For now, this is unspecified. Here are relevant comments.
#### 𝔽p⁴
This is relevant for BLS24 curves as 𝔾2 is defined over 𝔽p⁴.
The efficient in-memory storage is as a tower of extension fields
with 𝘶 a quadratic non-residue of 𝔽p to define 𝔽p² over 𝔽p (i.e. 𝘶 is not a square in 𝔽p)
and 𝘷 a quadratic non-residue of 𝔽p² to define 𝔽p⁴ over 𝔽p² (i.e. 𝘷 is not a square in 𝔽p)
i.e. x ∈ 𝔽p⁴ = (a + 𝘶b) + (c + 𝘶d)𝘷 = a + 𝘶b + 𝘷c + 𝘶𝘷d
And the canonical representation would use
μ ∈ 𝔽p a quartic non-residue of 𝔽p to define 𝔽p⁴ over 𝔽p (i.e. μ⁴ = x has no solution x ∈ 𝔽p)
with x ∈ 𝔽p⁴ = a' + μb' + μ²c' + μ³d'
For 𝔽p⁴, the efficient in-memory storage and the canonical representation match.
#### 𝔽p¹² / 𝔾t
For common curves of embedding degree 12 (BN254_Snarks, BLS12_381, BLS12_377),
are there situations which need to serialize 𝔾t elements, defined over 𝔽p¹²?
Given a sextic twist, we can express all elements in terms of z = SNR¹ᐟ⁶ (sextic non-residue)
The canonical direct sextic representation uses coefficients
c₀ + c₁ z + c₂ z² + c₃ z³ + c₄ z⁴ + c₅ z⁵
with z = SNR¹ᐟ⁶
__The cubic over quadratic towering__
(a₀ + a₁ u) + (a₂ + a₃u) v + (a₄ + a₅u) v²
with u = (SNR)¹ᐟ² and v = z = u¹ᐟ³ = (SNR)¹ᐟ⁶
__The quadratic over cubic towering__
(b₀ + b₁x + b₂x²) + (b₃ + b₄x + b₅x²)y
with x = (SNR)¹ᐟ³ and y = z = x¹ᐟ² = (SNR)¹ᐟ⁶
__Mapping between towering schemes__
```
canonical <=> cubic over quadratic <=> quadratic over cubic
c₀ <=> a₀ <=> b₀
c₁ <=> a₂ <=> b₃
c₂ <=> a₄ <=> b₁
c₃ <=> a₁ <=> b₄
c₄ <=> a₃ <=> b₂
c₅ <=> a₅ <=> b₅
```
In that scheme, all coordinates are defined as 𝔽p² elements.
Hence specifying 𝔽p¹² extension field representation requires to agree on:
- Towering serialization (cube over quad or quad over cube) vs direct sextic representation
- For direct representation, ascending or descending in powers of the sextic non-residue?
Furthermore 𝔾t have special properties and can be stored in compressed form using trace-based compression or torus-based compression, with compression ratio from 1/3 to 4/6 with varying decompression cost (from not decompressible but usable for pairings computations to decompressible at the cost of an inversion to decompressible at the cost of tens of 𝔽p multiplications).
## Copyright
Copyright and related rights waived via CC0.
## Citation
Please cite this document as:
Mamy Ratsimbazafy, "Trusted Setup Interchange Format [DRAFT]", May 2023, Available: https://github.com/mratsim/constantine/tree/master/constantine/trusted_setups/README.md

View File

@ -0,0 +1,266 @@
# Constantine
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Copyright (c) 2020-Present Mamy André-Ratsimbazafy
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
../math/config/curves,
../math/[ec_shortweierstrass, arithmetic, extension_fields],
../platforms/abstractions,
../serialization/endians,
../math/constants/zoo_generators,
../math/polynomials/polynomials,
../math/io/io_fields,
std/streams
# Ensure all exceptions are converted to error codes
{.push raises: [], checks: off.}
# Aliases
# ------------------------------------------------------------
type
G1Point = ECP_ShortW_Aff[Fp[BLS12_381], G1]
G2Point = ECP_ShortW_Aff[Fp2[BLS12_381], G2]
# Presets
# ------------------------------------------------------------
const FIELD_ELEMENTS_PER_BLOB* {.intdefine.} = 4096
# Trusted setup
# ------------------------------------------------------------
const KZG_SETUP_G2_LENGTH = 65
# On the number of 𝔾2 points:
# - In the Deneb specs, https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/polynomial-commitments.md
# only KZG_SETUP_G2[1] is used.
# - In SONIC, section 6.2, https://eprint.iacr.org/2019/099.pdf
# H and [α]H, the generator of 𝔾2 and its scalar multiplication by a random secret from trusted setup, are needed.
# - In Marlin, section 2.5, https://eprint.iacr.org/2019/1047.pdf
# H and [β]H, the generator of 𝔾2 and its scalar multiplication by a random secret from trusted setup, are needed.
# - In Plonk, section 3.1, https://eprint.iacr.org/2019/953
# [1]₂ and [x]₂, i.e. [1] scalar multiplied by the generator of 𝔾2 and [x] scalar multiplied by the generator of 𝔾2, x a random secret from trusted setup, are needed.
# - In Vitalik's Plonk article, section Polynomial commitments, https://vitalik.ca/general/2019/09/22/plonk.html#polynomial-commitments
# [s]G₂, i.e a random secret [s] scalar multiplied by the generator of 𝔾2, is needed
#
# The extra 63 points are expected to be used for sharding https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/_features/sharding/polynomial-commitments.md
# for KZG multiproofs for 64 shards: https://dankradfeist.de/ethereum/2021/06/18/pcs-multiproofs.html
#
# Note:
# The batched proofs (different polynomials) used in Deneb specs
# are different from multiproofs
type
EthereumKZGContext* = object
## KZG commitment context
# Trusted setup, see https://vitalik.ca/general/2022/03/14/trustedsetup.html
srs_lagrange_g1*{.align: 64.}: array[FIELD_ELEMENTS_PER_BLOB, G1Point]
# Part of the Structured Reference String (SRS) holding the 𝔾1 points
# This is used for committing to polynomials and producing an opening proof at
# a random value (chosen via Fiat-Shamir heuristic)
#
# Referring to the 𝔾1 generator as G, in monomial basis / coefficient form we would store:
# [G, [τ]G, [τ²]G, ... [τ⁴⁰⁹⁶]G]
# with τ a random secret derived from a multi-party computation ceremony
# with at least one honest random secret contributor (also called KZG ceremony or powers-of-tau ceremony)
#
# For efficiency we operate only on the evaluation form of polynomials over 𝔾1 (i.e. the Lagrange basis)
# i.e. for agreed upon [ω⁰, ω¹, ..., ω⁴⁰⁹⁶]
# we store [f(ω⁰), f(ω¹), ..., f(ω⁴⁰⁹⁶)]
#
# https://en.wikipedia.org/wiki/Lagrange_polynomial#Barycentric_form
#
# Conversion can be done with a discrete Fourier transform.
srs_monomial_g2*{.align: 64.}: array[KZG_SETUP_G2_LENGTH, G2Point]
# Part of the SRS holding the 𝔾2 points
#
# Referring to the 𝔾2 generator as H, we store
# [H, [τ]H, [τ²]H, ..., [τ⁶⁴]H]
# with τ a random secret derived from a multi-party computation ceremony
# with at least one honest random secret contributor (also called KZG ceremony or powers-of-tau ceremony)
#
# This is used to verify commitments.
# For most schemes (Marlin, Plonk, Sonic, Ethereum's Deneb), only [τ]H is needed
# but Ethereum's sharding will need 64 (65 with the generator H)
domain*{.align: 64.}: PolyDomainEval[FIELD_ELEMENTS_PER_BLOB, Fr[BLS12_381]]
TrustedSetupStatus* = enum
tsSuccess
tsMissingFile
tsWrongPreset
tsUnsupportedFileVersion
tsInvalidFile
tsLowLevelReadError
proc skipMod64(f: FileStream): TrustedSetupStatus =
## Skip to a 64-byte boundary
try:
let pos = f.getPosition()
let posMod64 = pos and 63
f.setPosition(pos+posMod64)
return tsSuccess
except IOError, OSError:
return tsInvalidFile
proc loadTrustedSetup*(ctx: ptr EthereumKZGContext, filePath: string): TrustedSetupStatus =
static: doAssert cpuEndian == littleEndian, "Trusted setup creation is only supported on little-endian CPUs at the moment."
let f = try: openFileStream(filePath, fmRead)
except IOError, OSError: return tsMissingFile
defer:
try:
f.close()
except Exception: # For some reason close can raise a bare Exception.
quit "Unrecoverable error while closing trusted setup file."
try:
var buf = newSeqOfCap[byte](32)
var len = 0
buf.setLen(12)
len = f.readData(buf[0].addr, 12)
if len != 12:
return tsInvalidFile
if buf != static(@[byte 0xE2, 0x88, 0x83, 0xE2, 0x8B, 0x83, 0xE2, 0x88, 0x88, 0xE2, 0x88, 0x8E]):
# ∃⋃∈∎ in UTF-8
return tsInvalidFile
if f.readChar() != 'v':
return tsInvalidFile
if f.readUint8() != 1:
return tsUnsupportedFileVersion
if f.readChar() != '.':
return tsUnsupportedFileVersion
if f.readUint8() != 0:
return tsUnsupportedFileVersion
buf.setLen(32)
len = f.readData(buf[0].addr, 32)
if len != 32:
return tsInvalidFile
if buf.toOpenArray(0, 17) != asBytes"ethereum_deneb_kzg":
return tsWrongPreset
if buf.toOpenArray(18, 31) != default(array[18..31, byte]):
debugEcho buf.toOpenArray(18, 31)
return tsWrongPreset
buf.setLen(15)
len = f.readData(buf[0].addr, 15)
if len != 15:
return tsInvalidFile
if buf.toOpenArray(0, 8) != asBytes"bls12_381":
return tsWrongPreset
if buf.toOpenArray(9, 14) != default(array[9..14, byte]):
return tsWrongPreset
let num_fields = f.readUint8()
if num_fields != 3:
return tsWrongPreset
block: # Read 1st metadata
buf.setLen(32)
len = f.readData(buf[0].addr, 32)
if len != 32:
return tsInvalidFile
if buf.toOpenArray(0, 11) != asBytes"srs_lagrange":
return tsWrongPreset
if buf.toOpenArray(12, 14) != default(array[12..14, byte]):
return tsWrongPreset
if buf.toOpenArray(15, 19) != asBytes"g1brp":
return tsWrongPreset
let elemSize = uint32.fromBytes(buf.toOpenArray(20, 23), littleEndian)
if elemSize != uint32 sizeof(ECP_ShortW_Aff[Fp[BLS12_381], G1]):
return tsWrongPreset
let numElems = uint64.fromBytes(buf.toOpenArray(24, 31), littleEndian)
if numElems != FIELD_ELEMENTS_PER_BLOB:
return tsWrongPreset
block: # Read 2nd metadata
buf.setLen(32)
len = f.readData(buf[0].addr, 32)
if len != 32:
return tsInvalidFile
if buf.toOpenArray(0, 11) != asBytes"srs_monomial":
return tsWrongPreset
if buf.toOpenArray(12, 14) != default(array[12..14, byte]):
return tsWrongPreset
if buf.toOpenArray(15, 19) != asBytes"g2asc":
return tsWrongPreset
let elemSize = uint32.fromBytes(buf.toOpenArray(20, 23), littleEndian)
if elemSize != uint32 sizeof(ECP_ShortW_Aff[Fp2[BLS12_381], G2]):
return tsWrongPreset
let numElems = uint64.fromBytes(buf.toOpenArray(24, 31), littleEndian)
if numElems != KZG_SETUP_G2_LENGTH:
return tsWrongPreset
block: # Read 3rd metadata
buf.setLen(32)
len = f.readData(buf[0].addr, 32)
if len != 32:
return tsInvalidFile
if buf.toOpenArray(0, 10) != asBytes"roots_unity":
return tsWrongPreset
if buf.toOpenArray(11, 14) != default(array[11..14, byte]):
return tsWrongPreset
if buf.toOpenArray(15, 19) != asBytes"frbrp":
return tsWrongPreset
let elemSize = uint32.fromBytes(buf.toOpenArray(20, 23), littleEndian)
if elemSize != uint32 sizeof(Fr[BLS12_381]):
return tsWrongPreset
let numElems = uint64.fromBytes(buf.toOpenArray(24, 31), littleEndian)
if numElems != FIELD_ELEMENTS_PER_BLOB:
return tsWrongPreset
block: # Read 1st data, assume little-endian
let status64Balign = f.skipMod64()
if status64Balign != tsSuccess:
return status64Balign
len = f.readData(ctx.srs_lagrange_g1.addr, sizeof(ctx.srs_lagrange_g1))
if len != sizeof(ctx.srs_lagrange_g1):
return tsInvalidFile
block: # Read 2nd data, assume little-endian
let status64Balign = f.skipMod64()
if status64Balign != tsSuccess:
return status64Balign
len = f.readData(ctx.srs_monomial_g2.addr, sizeof(ctx.srs_monomial_g2))
if len != sizeof(ctx.srs_monomial_g2):
return tsInvalidFile
block: # Read 3rd data, assume little-endian
let status64Balign = f.skipMod64()
if status64Balign != tsSuccess:
return status64Balign
len = f.readData(ctx.domain.rootsOfUnity.addr, sizeof(ctx.domain.rootsOfUnity))
if len != sizeof(ctx.domain.rootsOfUnity):
return tsInvalidFile
# Compute the inverse of the domain degree
ctx.domain.invMaxDegree.fromUint(ctx.domain.rootsOfUnity.len.uint64)
ctx.domain.invMaxDegree.inv_vartime()
block: # Last sanity check
# When the srs is in monomial form we can check that
# the first point is the generator
if bool(ctx.srs_monomial_g2[0] != BLS12_381.getGenerator"G2"):
return tsWrongPreset
return tsSuccess
except IOError, OSError:
return tsLowLevelReadError

View File

@ -0,0 +1,253 @@
# Constantine
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Copyright (c) 2020-Present Mamy André-Ratsimbazafy
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
../math/polynomials/fft,
../math/[arithmetic, extension_fields, ec_shortweierstrass],
../math/config/curves,
../math/elliptic/[ec_scalar_mul_vartime, ec_shortweierstrass_batch_ops],
../math/io/io_fields,
../math/constants/zoo_generators,
../platforms/abstractions,
../serialization/endians,
std/streams
# This tool generates the same testing setups that are used in Ethereum consensus-spec
# in a Constantine-specific format specified in README.md
# Trusted setup source:
#
# - Minimal preset: https://github.com/ethereum/consensus-specs/blob/v1.3.0/presets/minimal/trusted_setups/testing_trusted_setups.json
# - Mainnet preset: https://github.com/ethereum/consensus-specs/blob/v1.3.0/presets/mainnet/trusted_setups/testing_trusted_setups.json
#
# The upstream trusted setups are stored in `./tests/protocol_ethereum_deneb_kzg`
#
# The upstream testing setup generator is:
# - dump_kzg_trusted_setup_files
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/tests/core/pyspec/eth2spec/utils/kzg.py#L96-L123
# Called with
# - python3 ./gen_kzg_trusted_setups.py --secret=1337 --g1-length=4 --g2-length=65
# python3 ./gen_kzg_trusted_setups.py --secret=1337 --g1-length=4096 --g2-length=65
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/Makefile#L209-L210
# Roots of unity
# ------------------------------------------------------------
#
# Computation:
# Reference: https://crypto.stanford.edu/pbc/notes/numbertheory/gen.html
#
# 1. Find a primitive root of the finite field of modulus q
# i.e. root^k != 1 for all k < q-1 so powers of root generate the field.
#
# sagemath: GF(r).multiplicative_generator()
#
# 2. primitive_root⁽ᵐᵒᵈᵘˡᵘˢ⁻¹⁾/⁽²^ⁱ⁾ for i in [0, 32)
#
# sagemath: [primitive_root^((r-1)//(1 << i)) for i in range(32)]
#
# Usage:
# The roots of unity ω allow usage of polynomials in evaluation form (Lagrange basis)
# see ω https://dankradfeist.de/ethereum/2021/06/18/pcs-multiproofs.html
#
# Where does the 32 come from?
# Recall the definition of the BLS12-381 curve:
# sagemath:
# x = -(2^63 + 2^62 + 2^60 + 2^57 + 2^48 + 2^16)
# order = x^4 - x^2 + 1
#
# and check the 2-adicity
# factor(order-1)
# => 2^32 * 3 * 11 * 19 * 10177 * 125527 * 859267 * 906349^2 * 2508409 * 2529403 * 52437899 * 254760293^2
#
# BLS12-381 was chosen for its high 2-adicity, as 2^32 is a factor of its order-1
const ctt_eth_kzg_fr_pow2_roots_of_unity = [
# primitive_root⁽ᵐᵒᵈᵘˡᵘˢ⁻¹⁾/⁽²^ⁱ⁾ for i in [0, 32)
# The primitive root chosen is 7
Fr[BLS12_381].fromHex"0x1",
Fr[BLS12_381].fromHex"0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000",
Fr[BLS12_381].fromHex"0x8d51ccce760304d0ec030002760300000001000000000000",
Fr[BLS12_381].fromHex"0x345766f603fa66e78c0625cd70d77ce2b38b21c28713b7007228fd3397743f7a",
Fr[BLS12_381].fromHex"0x20b1ce9140267af9dd1c0af834cec32c17beb312f20b6f7653ea61d87742bcce",
Fr[BLS12_381].fromHex"0x50e0903a157988bab4bcd40e22f55448bf6e88fb4c38fb8a360c60997369df4e",
Fr[BLS12_381].fromHex"0x45af6345ec055e4d14a1e27164d8fdbd2d967f4be2f951558140d032f0a9ee53",
Fr[BLS12_381].fromHex"0x6898111413588742b7c68b4d7fdd60d098d0caac87f5713c5130c2c1660125be",
Fr[BLS12_381].fromHex"0x4f9b4098e2e9f12e6b368121ac0cf4ad0a0865a899e8deff4935bd2f817f694b",
Fr[BLS12_381].fromHex"0x95166525526a65439feec240d80689fd697168a3a6000fe4541b8ff2ee0434e",
Fr[BLS12_381].fromHex"0x325db5c3debf77a18f4de02c0f776af3ea437f9626fc085e3c28d666a5c2d854",
Fr[BLS12_381].fromHex"0x6d031f1b5c49c83409f1ca610a08f16655ea6811be9c622d4a838b5d59cd79e5",
Fr[BLS12_381].fromHex"0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306",
Fr[BLS12_381].fromHex"0x485d512737b1da3d2ccddea2972e89ed146b58bc434906ac6fdd00bfc78c8967",
Fr[BLS12_381].fromHex"0x56624634b500a166dc86b01c0d477fa6ae4622f6a9152435034d2ff22a5ad9e1",
Fr[BLS12_381].fromHex"0x3291357ee558b50d483405417a0cbe39c8d5f51db3f32699fbd047e11279bb6e",
Fr[BLS12_381].fromHex"0x2155379d12180caa88f39a78f1aeb57867a665ae1fcadc91d7118f85cd96b8ad",
Fr[BLS12_381].fromHex"0x224262332d8acbf4473a2eef772c33d6cd7f2bd6d0711b7d08692405f3b70f10",
Fr[BLS12_381].fromHex"0x2d3056a530794f01652f717ae1c34bb0bb97a3bf30ce40fd6f421a7d8ef674fb",
Fr[BLS12_381].fromHex"0x520e587a724a6955df625e80d0adef90ad8e16e84419c750194e8c62ecb38d9d",
Fr[BLS12_381].fromHex"0x3e1c54bcb947035a57a6e07cb98de4a2f69e02d265e09d9fece7e0e39898d4b",
Fr[BLS12_381].fromHex"0x47c8b5817018af4fc70d0874b0691d4e46b3105f04db5844cd3979122d3ea03a",
Fr[BLS12_381].fromHex"0xabe6a5e5abcaa32f2d38f10fbb8d1bbe08fec7c86389beec6e7a6ffb08e3363",
Fr[BLS12_381].fromHex"0x73560252aa0655b25121af06a3b51e3cc631ffb2585a72db5616c57de0ec9eae",
Fr[BLS12_381].fromHex"0x291cf6d68823e6876e0bcd91ee76273072cf6a8029b7d7bc92cf4deb77bd779c",
Fr[BLS12_381].fromHex"0x19fe632fd3287390454dc1edc61a1a3c0ba12bb3da64ca5ce32ef844e11a51e",
Fr[BLS12_381].fromHex"0xa0a77a3b1980c0d116168bffbedc11d02c8118402867ddc531a11a0d2d75182",
Fr[BLS12_381].fromHex"0x23397a9300f8f98bece8ea224f31d25db94f1101b1d7a628e2d0a7869f0319ed",
Fr[BLS12_381].fromHex"0x52dd465e2f09425699e276b571905a7d6558e9e3f6ac7b41d7b688830a4f2089",
Fr[BLS12_381].fromHex"0xc83ea7744bf1bee8da40c1ef2bb459884d37b826214abc6474650359d8e211b",
Fr[BLS12_381].fromHex"0x2c6d4e4511657e1e1339a815da8b398fed3a181fabb30adc694341f608c9dd56",
Fr[BLS12_381].fromHex"0x4b5371495990693fad1715b02e5713b5f070bb00e28a193d63e7cb4906ffc93f"
]
func newTrustedSetupImpl(
EC: typedesc, secret: auto, length: int): seq[EC] =
result.setLen(length)
var P {.noInit.}: EC
P.fromAffine(EC.F.C.getGenerator($EC.G))
result[0] = P
for i in 1 ..< length:
P.scalarMul_minHammingWeight_windowed_vartime(secret, window = 5)
result[i] = P
func newTrustedSetupMonomial(EC: typedesc, secret: auto, length: int): seq[EC] =
let ts = newTrustedSetupImpl(projective(EC), secret, length)
result.setLen(length)
batchAffine(result.asUnchecked(), ts.asUnchecked(), length)
func getLagrange[EC](fftDesc: ECFFT_Descriptor[EC], monomial: seq[EC]): seq[EC] =
## Get a polynomial in lagrange basis from a polynomial in monomial form.
## The polynomial is also bit-reversal permuted.
result.setLen(monomial.len)
let status = fftDesc.ifft(result, monomial)
doAssert status == FFTS_Success, "Ethereum testing trusted setup failure during Lagrange form: " & $status
result.bit_reversal_permutation()
func newTrustedSetupLagrange[EC](fftDesc: ECFFT_Descriptor[EC], secret: auto, length: int): auto =
let ts = newTrustedSetupImpl(EC, secret, length)
let ts2 = fftDesc.getLagrange(ts)
let tsAffine = newSeq[affine(EC)](length)
batchAffine(tsAffine.asUnchecked(), ts2.asUnchecked(), length)
return tsAffine
proc padNUL64(f: FileStream) =
## Pad NUL bytes until we reach a 64-byte boundary
let pos = f.getPosition()
let posMod64 = pos and 63
let pad = default(array[63, byte])
if posMod64 != 0:
f.writeData(pad[0].unsafeAddr, 64-posMod64)
proc genEthereumKzgTestingTrustedSetup(filepath: string, secret: auto, length: int) =
## Generate an Ethereum KZG testing trusted setup
## in the Trusted Setup Interchange Format
## `length` is the length of the SRS 𝔾1
## the SRS 𝔾2 is fixed at 65.
## SRS 𝔾1 and roots of unity are bit-reversal permuted
static: doAssert cpuEndian == littleEndian, "Trusted setup creation is only supported on little-endian CPUs at the moment."
doAssert length.uint.isPowerOf2_vartime(), "Expected power of 2 but found length " & $length
let f = openFileStream(filepath, fmWrite)
defer: f.close()
f.write"∃⋃∈∎" # ∃⋃∈∎ in UTF-8. (magic bytes)
# v1.0
f.write 'v'
f.write uint8 1
f.write '.'
f.write uint8 0
# Protocol
const proto = "ethereum_deneb_kzg"
f.write proto
let padProto = default(array[32 - proto.len, byte]) # zero-init padding
f.writeData(padProto[0].unsafeAddr, padProto.len)
# Curve
const curve = "bls12_381"
f.write curve
let padCurve = default(array[15 - curve.len, byte]) # zero-init padding
f.writeData(padCurve[0].unsafeAddr, padCurve.len)
# Number of fields
f.write uint8 3
block: # Metadata 1 - srs 𝔾1 points - bit-reversal permuted
var meta: array[32, byte]
meta[0..<12] = asBytes"srs_lagrange"
meta[15..<17] = asBytes"g1"
meta[17..<20] = asBytes"brp"
meta[20..<24] = toBytes(uint32 sizeof(ECP_ShortW_Aff[Fp[BLS12_381], G1]), littleEndian)
meta[24..<32] = toBytes(uint64 length, littleEndian)
f.write meta
block: # Metadata 2 - srs 𝔾2 points (hardcoded to 65)
var meta: array[32, byte]
meta[0..<12] = asBytes"srs_monomial"
meta[15..<17] = asBytes"g2"
meta[17..<20] = asBytes"asc"
meta[20..<24] = toBytes(uint32 sizeof(ECP_ShortW_Aff[Fp2[BLS12_381], G2]), littleEndian)
meta[24..<32] = toBytes(65'u64, littleEndian)
f.write meta
# Projective coordinates are slightly faster than jacobian on 𝔾1
var fftDesc = ECFFTDescriptor[ECP_ShortW_Prj[Fp[BLS12_381], G1]].new(
order = length, ctt_eth_kzg_fr_pow2_roots_of_unity[log2_vartime(length.uint)])
block: # Metadata 3 - roots of unity - bit-reversal permuted
var meta: array[32, byte]
meta[0..<11] = asBytes"roots_unity"
meta[15..<17] = asBytes"fr"
meta[17..<20] = asBytes"brp"
meta[20..<24] = toBytes(uint32 sizeof(fftDesc.rootsOfUnity[0]), littleEndian)
meta[24..<32] = toBytes(fftDesc.order.uint64, littleEndian)
f.write meta
f.padNUL64()
block: # Data 1 - srs 𝔾1 points - bit-reversal permuted
let ts1 = fftDesc.newTrustedSetupLagrange(secret, length)
# Raw dump requires little-endian
f.writeData(ts1[0].unsafeAddr, sizeof(ts1[0]) * length)
f.padNUL64()
block: # Data 2 - srs 𝔾2 points - bit-reversal permuted
const g2Length = 65
let ts2 = ECP_ShortW_Aff[Fp2[BLS12_381], G2].newTrustedSetupMonomial(secret, g2Length)
# Raw dump requires little-endian
f.writeData(ts2[0].unsafeAddr, sizeof(ts2[0]) * g2Length)
f.padNUL64()
bit_reversal_permutation(fftDesc.rootsOfUnity.toOpenArray(0, fftDesc.order-1))
block: # Data 2 - roots of unity - bit-reversal permuted
# Raw dump requires little-endian
# and we convert them all to Montgomery form
for i in 0 ..< fftDesc.order:
let t = Fr[BLS12_381].fromBig(fftDesc.rootsOfUnity[i])
f.writeData(t.unsafeAddr, sizeof(t))
when isMainModule:
import ../math/io/io_bigints
let testSecret = BigInt[11].fromUint(1337'u64)
genEthereumKzgTestingTrustedSetup("trusted_setup_ethereum_kzg_test_minimal.tsif", testSecret, 4)
genEthereumKzgTestingTrustedSetup("trusted_setup_ethereum_kzg_test_mainnet.tsif", testSecret, 4096)

View File

@ -0,0 +1,245 @@
# Constantine
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Copyright (c) 2020-Present Mamy André-Ratsimbazafy
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
../math/polynomials/fft,
../math/[arithmetic, extension_fields, ec_shortweierstrass],
../math/config/curves,
../math/elliptic/[ec_scalar_mul_vartime, ec_shortweierstrass_batch_ops],
../math/io/io_fields,
../math/constants/zoo_generators,
../platforms/abstractions,
../serialization/endians,
std/streams
# This tool generates the same testing setups that are used in Ethereum consensus-spec
# in a Constantine-specific format specified in README.md
# Trusted setup source:
#
# - Minimal preset: https://github.com/ethereum/consensus-specs/blob/v1.3.0/presets/minimal/trusted_setups/testing_trusted_setups.json
# - Mainnet preset: https://github.com/ethereum/consensus-specs/blob/v1.3.0/presets/mainnet/trusted_setups/testing_trusted_setups.json
#
# The upstream trusted setups are stored in `./tests/protocol_ethereum_deneb_kzg`
#
# The upstream testing setup generator is:
# - dump_kzg_trusted_setup_files
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/tests/core/pyspec/eth2spec/utils/kzg.py#L96-L123
# Called with
# - python3 ./gen_kzg_trusted_setups.py --secret=1337 --g1-length=4 --g2-length=65
# python3 ./gen_kzg_trusted_setups.py --secret=1337 --g1-length=4096 --g2-length=65
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/Makefile#L209-L210
# Roots of unity
# ------------------------------------------------------------
#
# Computation:
# Reference: https://crypto.stanford.edu/pbc/notes/numbertheory/gen.html
#
# 1. Find a primitive root of the finite field of modulus q
# i.e. root^k != 1 for all k < q-1 so powers of root generate the field.
#
# sagemath: GF(r).multiplicative_generator()
#
# 2. primitive_root⁽ᵐᵒᵈᵘˡᵘˢ⁻¹⁾/⁽²^ⁱ⁾ for i in [0, 32)
#
# sagemath: [primitive_root^((r-1)//(1 << i)) for i in range(32)]
#
# Usage:
# The roots of unity ω allow usage of polynomials in evaluation form (Lagrange basis)
# see ω https://dankradfeist.de/ethereum/2021/06/18/pcs-multiproofs.html
#
# Where does the 32 come from?
# Recall the definition of the BLS12-381 curve:
# sagemath:
# x = -(2^63 + 2^62 + 2^60 + 2^57 + 2^48 + 2^16)
# order = x^4 - x^2 + 1
#
# and check the 2-adicity
# factor(order-1)
# => 2^32 * 3 * 11 * 19 * 10177 * 125527 * 859267 * 906349^2 * 2508409 * 2529403 * 52437899 * 254760293^2
#
# BLS12-381 was chosen for its high 2-adicity, as 2^32 is a factor of its order-1
const ctt_eth_kzg_fr_pow2_roots_of_unity = [
# primitive_root⁽ᵐᵒᵈᵘˡᵘˢ⁻¹⁾/⁽²^ⁱ⁾ for i in [0, 32)
# The primitive root chosen is 7
Fr[BLS12_381].fromHex"0x1",
Fr[BLS12_381].fromHex"0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000",
Fr[BLS12_381].fromHex"0x8d51ccce760304d0ec030002760300000001000000000000",
Fr[BLS12_381].fromHex"0x345766f603fa66e78c0625cd70d77ce2b38b21c28713b7007228fd3397743f7a",
Fr[BLS12_381].fromHex"0x20b1ce9140267af9dd1c0af834cec32c17beb312f20b6f7653ea61d87742bcce",
Fr[BLS12_381].fromHex"0x50e0903a157988bab4bcd40e22f55448bf6e88fb4c38fb8a360c60997369df4e",
Fr[BLS12_381].fromHex"0x45af6345ec055e4d14a1e27164d8fdbd2d967f4be2f951558140d032f0a9ee53",
Fr[BLS12_381].fromHex"0x6898111413588742b7c68b4d7fdd60d098d0caac87f5713c5130c2c1660125be",
Fr[BLS12_381].fromHex"0x4f9b4098e2e9f12e6b368121ac0cf4ad0a0865a899e8deff4935bd2f817f694b",
Fr[BLS12_381].fromHex"0x95166525526a65439feec240d80689fd697168a3a6000fe4541b8ff2ee0434e",
Fr[BLS12_381].fromHex"0x325db5c3debf77a18f4de02c0f776af3ea437f9626fc085e3c28d666a5c2d854",
Fr[BLS12_381].fromHex"0x6d031f1b5c49c83409f1ca610a08f16655ea6811be9c622d4a838b5d59cd79e5",
Fr[BLS12_381].fromHex"0x564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d36306",
Fr[BLS12_381].fromHex"0x485d512737b1da3d2ccddea2972e89ed146b58bc434906ac6fdd00bfc78c8967",
Fr[BLS12_381].fromHex"0x56624634b500a166dc86b01c0d477fa6ae4622f6a9152435034d2ff22a5ad9e1",
Fr[BLS12_381].fromHex"0x3291357ee558b50d483405417a0cbe39c8d5f51db3f32699fbd047e11279bb6e",
Fr[BLS12_381].fromHex"0x2155379d12180caa88f39a78f1aeb57867a665ae1fcadc91d7118f85cd96b8ad",
Fr[BLS12_381].fromHex"0x224262332d8acbf4473a2eef772c33d6cd7f2bd6d0711b7d08692405f3b70f10",
Fr[BLS12_381].fromHex"0x2d3056a530794f01652f717ae1c34bb0bb97a3bf30ce40fd6f421a7d8ef674fb",
Fr[BLS12_381].fromHex"0x520e587a724a6955df625e80d0adef90ad8e16e84419c750194e8c62ecb38d9d",
Fr[BLS12_381].fromHex"0x3e1c54bcb947035a57a6e07cb98de4a2f69e02d265e09d9fece7e0e39898d4b",
Fr[BLS12_381].fromHex"0x47c8b5817018af4fc70d0874b0691d4e46b3105f04db5844cd3979122d3ea03a",
Fr[BLS12_381].fromHex"0xabe6a5e5abcaa32f2d38f10fbb8d1bbe08fec7c86389beec6e7a6ffb08e3363",
Fr[BLS12_381].fromHex"0x73560252aa0655b25121af06a3b51e3cc631ffb2585a72db5616c57de0ec9eae",
Fr[BLS12_381].fromHex"0x291cf6d68823e6876e0bcd91ee76273072cf6a8029b7d7bc92cf4deb77bd779c",
Fr[BLS12_381].fromHex"0x19fe632fd3287390454dc1edc61a1a3c0ba12bb3da64ca5ce32ef844e11a51e",
Fr[BLS12_381].fromHex"0xa0a77a3b1980c0d116168bffbedc11d02c8118402867ddc531a11a0d2d75182",
Fr[BLS12_381].fromHex"0x23397a9300f8f98bece8ea224f31d25db94f1101b1d7a628e2d0a7869f0319ed",
Fr[BLS12_381].fromHex"0x52dd465e2f09425699e276b571905a7d6558e9e3f6ac7b41d7b688830a4f2089",
Fr[BLS12_381].fromHex"0xc83ea7744bf1bee8da40c1ef2bb459884d37b826214abc6474650359d8e211b",
Fr[BLS12_381].fromHex"0x2c6d4e4511657e1e1339a815da8b398fed3a181fabb30adc694341f608c9dd56",
Fr[BLS12_381].fromHex"0x4b5371495990693fad1715b02e5713b5f070bb00e28a193d63e7cb4906ffc93f"
]
func newTrustedSetupImpl(
EC: typedesc, secret: auto, length: int): seq[EC] =
result.setLen(length)
var P {.noInit.}: EC
P.fromAffine(EC.F.C.getGenerator($EC.G))
result[0] = P
for i in 1 ..< length:
P.scalarMul_minHammingWeight_windowed_vartime(secret, window = 5)
result[i] = P
func newTrustedSetupMonomial(EC: typedesc, secret: auto, length: int): seq[EC] =
let ts = newTrustedSetupImpl(projective(EC), secret, length)
result.setLen(length)
batchAffine(result.asUnchecked(), ts.asUnchecked(), length)
func getLagrange[EC](fftDesc: ECFFT_Descriptor[EC], monomial: seq[EC]): seq[EC] =
## Get a polynomial in lagrange basis from a polynomial in monomial form.
## The polynomial is also bit-reversal permuted.
result.setLen(monomial.len)
let status = fftDesc.ifft(result, monomial)
doAssert status == FFTS_Success, "Ethereum testing trusted setup failure during Lagrange form: " & $status
result.bit_reversal_permutation()
func newTrustedSetupLagrange[EC](fftDesc: ECFFT_Descriptor[EC], secret: auto, length: int): auto =
let ts = newTrustedSetupImpl(EC, secret, length)
let ts2 = fftDesc.getLagrange(ts)
let tsAffine = newSeq[affine(EC)](length)
batchAffine(tsAffine.asUnchecked(), ts2.asUnchecked(), length)
return tsAffine
proc padNUL64(f: FileStream) =
## Pad NUL bytes until we reach a 64-byte boundary
let pos = f.getPosition()
let posMod64 = pos and 63
let pad = default(array[63, byte])
if posMod64 != 0:
f.writeData(pad[0].unsafeAddr, 64-posMod64)
proc genEthereumKzgTestingTrustedSetup(filepath: string, secret: auto, length: int) =
## Generate an Ethereum KZG testing trusted setup
## in the Trusted Setup Interchange Format
## `length` is the length of the SRS 𝔾1
## the SRS 𝔾2 is fixed at 65.
## SRS 𝔾1 and roots of unity are bit-reversal permuted
static: doAssert cpuEndian == littleEndian, "Trusted setup creation is only supported on little-endian CPUs at the moment."
doAssert length.uint.isPowerOf2_vartime(), "Expected power of 2 but found length " & $length
let f = openFileStream(filepath, fmWrite)
f.write"∃⋃∈∎" # ∃⋃∈∎ in UTF-8. (magic bytes)
# v1.0
f.write 'v'
f.write uint8 1
f.write '.'
f.write uint8 0
# Protocol
f.write"ethereum_deneb_kzg"
# Curve
const curve = "bls12_381"
f.write curve
let padCurve = default(array[15 - curve.len, byte]) # zero-init padding
f.writeData(padCurve[0].unsafeAddr, padCurve.len)
# Number of fields
f.write uint8 3
block: # Metadata 1 - srs 𝔾1 points - bit-reversal permuted
var meta: array[32, byte]
meta[0..<12] = asBytes"srs_lagrange"
meta[15..<17] = asBytes"g1"
meta[17..<20] = asBytes"brp"
meta[20..<24] = toBytes(uint32 sizeof(ECP_ShortW_Aff[Fp[BLS12_381], G1]), littleEndian)
meta[24..<32] = toBytes(uint64 length, littleEndian)
f.write meta
block: # Metadata 2 - srs 𝔾2 points (hardcoded to 65)
var meta: array[32, byte]
meta[0..<12] = asBytes"srs_monomial"
meta[15..<17] = asBytes"g2"
meta[17..<20] = asBytes"asc"
meta[20..<24] = toBytes(uint32 sizeof(ECP_ShortW_Aff[Fp2[BLS12_381], G2]), littleEndian)
meta[24..<32] = toBytes(65'u64, littleEndian)
f.write meta
# Projective coordinates are slightly faster than jacobian on 𝔾1
var fftDesc = ECFFTDescriptor[ECP_ShortW_Prj[Fp[BLS12_381], G1]].new(
order = length, ctt_eth_kzg_fr_pow2_roots_of_unity[log2_vartime(length.uint)])
block: # Metadata 3 - roots of unity - bit-reversal permuted
var meta: array[32, byte]
meta[0..<11] = asBytes"roots_unity"
meta[15..<17] = asBytes"fr"
meta[17..<20] = asBytes"brp"
meta[20..<24] = toBytes(uint32 sizeof(fftDesc.rootsOfUnity[0]), littleEndian)
meta[24..<32] = toBytes(fftDesc.order.uint64, littleEndian)
f.write meta
f.padNUL64()
block: # Data 1 - srs 𝔾1 points - bit-reversal permuted
let ts1 = fftDesc.newTrustedSetupLagrange(secret, length)
# Raw dump requires little-endian
f.writeData(ts1[0].unsafeAddr, sizeof(ts1[0]) * length)
f.padNUL64()
block: # Data 2 - srs 𝔾2 points - bit-reversal permuted
const g2Length = 65
let ts2 = ECP_ShortW_Aff[Fp2[BLS12_381], G2].newTrustedSetupMonomial(secret, g2Length)
# Raw dump requires little-endian
f.writeData(ts2[0].unsafeAddr, sizeof(ts2[0]) * g2Length)
f.padNUL64()
bit_reversal_permutation(fftDesc.rootsOfUnity.toOpenArray(0, fftDesc.order-1))
block: # Data 2 - roots of unity - bit-reversal permuted
# Raw dump requires little-endian
f.writeData(fftDesc.rootsOfUnity, sizeof(fftDesc.rootsOfUnity[0]) * fftDesc.order)
when isMainModule:
import ../math/io/io_bigints
let testSecret = BigInt[11].fromUint(1337'u64)
genEthereumKzgTestingTrustedSetup("trusted_setup_ethereum_kzg_test_minimal.tsif", testSecret, 4)
genEthereumKzgTestingTrustedSetup("trusted_setup_ethereum_kzg_test_mainnet.tsif", testSecret, 4096)

View File

@ -18,43 +18,39 @@ int main(){
// Initialize the runtime. For Constantine, it populates the CPU runtime detection dispatch.
ctt_eth_bls_init_NimMain();
ctt_eth_bls_status status;
// Protocol and deserialization statuses
ctt_eth_bls_status bls_status;
ctt_codec_scalar_status scalar_status;
ctt_codec_ecc_status ecc_status;
// Declare an example insecure non-cryptographically random non-secret key. DO NOT USE IN PRODUCTION.
byte raw_seckey[32] = "Security pb becomes key mgmt pb!";
ctt_eth_bls_seckey seckey;
status = ctt_eth_bls_deserialize_seckey(&seckey, raw_seckey);
if (status != cttBLS_Success) {
printf("Secret key deserialization failure: status %d - %s\n", status, ctt_eth_bls_status_to_string(status));
scalar_status = ctt_eth_bls_deserialize_seckey(&seckey, raw_seckey);
if (scalar_status != cttCodecScalar_Success) {
printf(
"Secret key deserialization failure: status %d - %s\n",
scalar_status,
ctt_codec_scalar_status_to_string(scalar_status)
);
exit(1);
}
// Derive the matching public key
ctt_eth_bls_pubkey pubkey;
status = ctt_eth_bls_derive_pubkey(&pubkey, &seckey);
if (status != cttBLS_Success) {
printf("Public key derivation failure: status %d - %s\n", status, ctt_eth_bls_status_to_string(status));
exit(1);
}
ctt_eth_bls_derive_pubkey(&pubkey, &seckey);
// Sign a message
byte message[32];
ctt_eth_bls_signature sig;
ctt_eth_bls_sha256_hash(message, "Mr F was here", 13, /* clear_memory = */ 0);
status = ctt_eth_bls_sign(&sig, &seckey, message, 32);
if (status != cttBLS_Success) {
printf("Message signing failure: status %d - %s\n", status, ctt_eth_bls_status_to_string(status));
exit(1);
}
ctt_eth_bls_sign(&sig, &seckey, message, 32);
// Verify that a signature is valid for a message under the provided public key
status = ctt_eth_bls_verify(&pubkey, message, 32, &sig);
if (status != cttBLS_Success) {
printf("Signature verification failure: status %d - %s\n", status, ctt_eth_bls_status_to_string(status));
bls_status = ctt_eth_bls_verify(&pubkey, message, 32, &sig);
if (bls_status != cttBLS_Success) {
printf("Signature verification failure: status %d - %s\n", bls_status, ctt_eth_bls_status_to_string(bls_status));
exit(1);
}

View File

@ -28,6 +28,12 @@ typedef __UINT64_TYPE__ uint64_t;
#include <stdint.h>
#endif
#if defined(__STDC_VERSION__) && __STDC_VERSION__>=199901
# define bool _Bool
#else
# define bool unsigned char
#endif
typedef size_t secret_word;
typedef size_t secret_bool;
typedef uint8_t byte;
@ -51,8 +57,8 @@ typedef struct { bls12381_fp2 x, y, z; } bls12381_ec_g2_prj;
*/
void ctt_bls12381_init_NimMain(void);
void ctt_bls12381_fr_unmarshalBE(bls12381_fr* dst, const byte src[], ptrdiff_t src_len);
void ctt_bls12381_fr_marshalBE(byte dst[], ptrdiff_t dst_len, const bls12381_fr* src);
bool ctt_bls12381_fr_unmarshalBE(bls12381_fr* dst, const byte src[], ptrdiff_t src_len) __attribute__((warn_unused_result));
bool ctt_bls12381_fr_marshalBE(byte dst[], ptrdiff_t dst_len, const bls12381_fr* src) __attribute__((warn_unused_result));
secret_bool ctt_bls12381_fr_is_eq(const bls12381_fr* a, const bls12381_fr* b);
secret_bool ctt_bls12381_fr_is_zero(const bls12381_fr* a);
secret_bool ctt_bls12381_fr_is_one(const bls12381_fr* a);
@ -82,8 +88,8 @@ void ctt_bls12381_fr_cset_one(bls12381_fr* a, const secret_bool ctl);
void ctt_bls12381_fr_cneg_in_place(bls12381_fr* a, const secret_bool ctl);
void ctt_bls12381_fr_cadd_in_place(bls12381_fr* a, const bls12381_fr* b, const secret_bool ctl);
void ctt_bls12381_fr_csub_in_place(bls12381_fr* a, const bls12381_fr* b, const secret_bool ctl);
void ctt_bls12381_fp_unmarshalBE(bls12381_fp* dst, const byte src[], ptrdiff_t src_len);
void ctt_bls12381_fp_marshalBE(byte dst[], ptrdiff_t dst_len, const bls12381_fp* src);
bool ctt_bls12381_fp_unmarshalBE(bls12381_fp* dst, const byte src[], ptrdiff_t src_len) __attribute__((warn_unused_result));
bool ctt_bls12381_fp_marshalBE(byte dst[], ptrdiff_t dst_len, const bls12381_fp* src) __attribute__((warn_unused_result));
secret_bool ctt_bls12381_fp_is_eq(const bls12381_fp* a, const bls12381_fp* b);
secret_bool ctt_bls12381_fp_is_zero(const bls12381_fp* a);
secret_bool ctt_bls12381_fp_is_one(const bls12381_fp* a);

View File

@ -74,13 +74,7 @@ typedef struct { struct ctt_eth_bls_fp2 x, y; } ctt_eth_bls_signature;
typedef enum __attribute__((__packed__)) {
cttBLS_Success,
cttBLS_VerificationFailure,
cttBLS_InvalidEncoding,
cttBLS_CoordinateGreaterOrEqualThanModulus,
cttBLS_PointAtInfinity,
cttBLS_PointNotOnCurve,
cttBLS_PointNotInSubgroup,
cttBLS_ZeroSecretKey,
cttBLS_SecretKeyLargerThanCurveOrder,
cttBLS_ZeroLengthAggregation,
cttBLS_InconsistentLengthsOfInputs,
} ctt_eth_bls_status;
@ -89,13 +83,7 @@ static const char* ctt_eth_bls_status_to_string(ctt_eth_bls_status status) {
static const char* const statuses[] = {
"cttBLS_Success",
"cttBLS_VerificationFailure",
"cttBLS_InvalidEncoding",
"cttBLS_CoordinateGreaterOrEqualThanModulus",
"cttBLS_PointAtInfinity",
"cttBLS_PointNotOnCurve",
"cttBLS_PointNotInSubgroup",
"cttBLS_ZeroSecretKey",
"cttBLS_SecretKeyLargerThanCurveOrder",
"cttBLS_ZeroLengthAggregation",
"cttBLS_InconsistentLengthsOfInputs",
};
@ -106,6 +94,50 @@ static const char* ctt_eth_bls_status_to_string(ctt_eth_bls_status status) {
return "cttBLS_InvalidStatusCode";
}
typedef enum __attribute__((__packed__)) {
cttCodecScalar_Success,
cttCodecScalar_Zero,
cttCodecScalar_ScalarLargerThanCurveOrder,
} ctt_codec_scalar_status;
static const char* ctt_codec_scalar_status_to_string(ctt_codec_scalar_status status) {
static const char* const statuses[] = {
"cttCodecScalar_Success",
"cttCodecScalar_Zero",
"cttCodecScalar_ScalarLargerThanCurveOrder",
};
size_t length = sizeof statuses / sizeof *statuses;
if (0 <= status && status < length) {
return statuses[status];
}
return "cttCodecScalar_InvalidStatusCode";
}
typedef enum __attribute__((__packed__)) {
cttCodecEcc_Success,
cttCodecEcc_InvalidEncoding,
cttCodecEcc_CoordinateGreaterThanOrEqualModulus,
cttCodecEcc_PointNotOnCurve,
cttCodecEcc_PointNotInSubgroup,
cttCodecEcc_PointAtInfinity,
} ctt_codec_ecc_status;
static const char* ctt_codec_ecc_status_to_string(ctt_eth_bls_status status) {
static const char* const statuses[] = {
"cttCodecEcc_Success",
"cttCodecEcc_InvalidEncoding",
"cttCodecEcc_CoordinateGreaterThanOrEqualModulus",
"cttCodecEcc_PointNotOnCurve",
"cttCodecEcc_PointNotInSubgroup",
"cttCodecEcc_PointAtInfinity",
};
size_t length = sizeof statuses / sizeof *statuses;
if (0 <= status && status < length) {
return statuses[status];
}
return "cttCodecEcc_InvalidStatusCode";
}
// Initialization
// ------------------------------------------------------------------------------------------------
@ -171,13 +203,13 @@ void ctt_eth_bls_sha256_hash(byte digest[32], const byte* message, ptrdiff_t mes
// Comparisons
// ------------------------------------------------------------------------------------------------
ctt_pure bool ctt_eth_bls_pubkey_is_zero(const ctt_eth_bls_pubkey* pubkey);
ctt_pure bool ctt_eth_bls_signature_is_zero(const ctt_eth_bls_signature* sig);
ctt_pure bool ctt_eth_bls_pubkey_is_zero(const ctt_eth_bls_pubkey* pubkey) __attribute__((warn_unused_result));
ctt_pure bool ctt_eth_bls_signature_is_zero(const ctt_eth_bls_signature* sig) __attribute__((warn_unused_result));
ctt_pure bool ctt_eth_bls_pubkeys_are_equal(const ctt_eth_bls_pubkey* a,
const ctt_eth_bls_pubkey* b);
const ctt_eth_bls_pubkey* b) __attribute__((warn_unused_result));
ctt_pure bool ctt_eth_bls_signatures_are_equal(const ctt_eth_bls_signature* a,
const ctt_eth_bls_signature* b);
const ctt_eth_bls_signature* b) __attribute__((warn_unused_result));
// Input validation
// ------------------------------------------------------------------------------------------------
@ -187,39 +219,39 @@ ctt_pure bool ctt_eth_bls_signatures_are_equal(const ctt_eth_bls_signature* a,
* Regarding timing attacks, this will leak timing information only if the key is invalid.
* Namely, the secret key is 0 or the secret key is too large.
*/
ctt_pure ctt_eth_bls_status ctt_eth_bls_validate_seckey(const ctt_eth_bls_seckey* seckey);
ctt_pure ctt_codec_scalar_status ctt_eth_bls_validate_seckey(const ctt_eth_bls_seckey* seckey) __attribute__((warn_unused_result));
/** Validate the public key.
*
* This is an expensive operation that can be cached.
*/
ctt_pure ctt_eth_bls_status ctt_eth_bls_validate_pubkey(const ctt_eth_bls_pubkey* pubkey);
ctt_pure ctt_codec_ecc_status ctt_eth_bls_validate_pubkey(const ctt_eth_bls_pubkey* pubkey) __attribute__((warn_unused_result));
/** Validate the signature.
*
* This is an expensive operation that can be cached.
*/
ctt_pure ctt_eth_bls_status ctt_eth_bls_validate_signature(const ctt_eth_bls_signature* pubkey);
ctt_pure ctt_codec_ecc_status ctt_eth_bls_validate_signature(const ctt_eth_bls_signature* pubkey) __attribute__((warn_unused_result));
// Codecs
// ------------------------------------------------------------------------------------------------
/** Serialize a secret key
*
* Returns cttBLS_Success if successful
* Returns cttCodecScalar_Success if successful
*/
ctt_eth_bls_status ctt_eth_bls_serialize_seckey(byte dst[32], const ctt_eth_bls_seckey* seckey);
ctt_codec_scalar_status ctt_eth_bls_serialize_seckey(byte dst[32], const ctt_eth_bls_seckey* seckey) __attribute__((warn_unused_result));
/** Serialize a public key in compressed (Zcash) format
*
* Returns cttBLS_Success if successful
* Returns cttCodecEcc_Success if successful
*/
ctt_eth_bls_status ctt_eth_bls_serialize_pubkey_compressed(byte dst[48], const ctt_eth_bls_pubkey* pubkey);
ctt_codec_ecc_status ctt_eth_bls_serialize_pubkey_compressed(byte dst[48], const ctt_eth_bls_pubkey* pubkey) __attribute__((warn_unused_result));
/** Serialize a signature in compressed (Zcash) format
*
* Returns cttBLS_Success if successful
* Returns cttCodecEcc_Success if successful
*/
ctt_eth_bls_status ctt_eth_bls_serialize_signature_compressed(byte dst[96], const ctt_eth_bls_signature* sig);
ctt_codec_ecc_status ctt_eth_bls_serialize_signature_compressed(byte dst[96], const ctt_eth_bls_signature* sig) __attribute__((warn_unused_result));
/** Deserialize a secret key
* This also validates the secret key.
@ -227,7 +259,7 @@ ctt_eth_bls_status ctt_eth_bls_serialize_signature_compressed(byte dst[96], cons
* This is protected against side-channel unless your key is invalid.
* In that case it will like whether it's all zeros or larger than the curve order.
*/
ctt_eth_bls_status ctt_eth_bls_deserialize_seckey(ctt_eth_bls_seckey* seckey, const byte src[32]);
ctt_codec_scalar_status ctt_eth_bls_deserialize_seckey(ctt_eth_bls_seckey* seckey, const byte src[32]) __attribute__((warn_unused_result));
/** Deserialize a public key in compressed (Zcash) format.
* This does not validate the public key.
@ -238,16 +270,16 @@ ctt_eth_bls_status ctt_eth_bls_deserialize_seckey(ctt_eth_bls_seckey* seckey, co
* This procedure skips the very expensive subgroup checks.
* Not checking subgroup exposes a protocol to small subgroup attacks.
*
* Returns cttBLS_Success if successful
* Returns cttCodecEcc_Success if successful
*/
ctt_eth_bls_status ctt_eth_bls_deserialize_pubkey_compressed_unchecked(ctt_eth_bls_pubkey* pubkey, const byte src[48]);
ctt_codec_ecc_status ctt_eth_bls_deserialize_pubkey_compressed_unchecked(ctt_eth_bls_pubkey* pubkey, const byte src[48]) __attribute__((warn_unused_result));
/** Deserialize a public_key in compressed (Zcash) format.
* This also validates the public key.
*
* Returns cttBLS_Success if successful
* Returns cttCodecEcc_Success if successful
*/
ctt_eth_bls_status ctt_eth_bls_deserialize_pubkey_compressed(ctt_eth_bls_pubkey* pubkey, const byte src[48]);
ctt_codec_ecc_status ctt_eth_bls_deserialize_pubkey_compressed(ctt_eth_bls_pubkey* pubkey, const byte src[48]) __attribute__((warn_unused_result));
/** Deserialize a signature in compressed (Zcash) format.
* This does not validate the signature.
@ -258,16 +290,16 @@ ctt_eth_bls_status ctt_eth_bls_deserialize_pubkey_compressed(ctt_eth_bls_pubkey*
* This procedure skips the very expensive subgroup checks.
* Not checking subgroup exposes a protocol to small subgroup attacks.
*
* Returns cttBLS_Success if successful
* Returns cttCodecEcc_Success if successful
*/
ctt_eth_bls_status ctt_eth_bls_deserialize_signature_compressed_unchecked(ctt_eth_bls_signature* sig, const byte src[96]);
ctt_codec_ecc_status ctt_eth_bls_deserialize_signature_compressed_unchecked(ctt_eth_bls_signature* sig, const byte src[96]) __attribute__((warn_unused_result));
/** Deserialize a signature in compressed (Zcash) format.
* This also validates the signature.
*
* Returns cttBLS_Success if successful
* Returns cttCodecEcc_Success if successful
*/
ctt_eth_bls_status ctt_eth_bls_deserialize_signature_compressed(ctt_eth_bls_signature* sig, const byte src[96]);
ctt_codec_ecc_status ctt_eth_bls_deserialize_signature_compressed(ctt_eth_bls_signature* sig, const byte src[96]) __attribute__((warn_unused_result));
// BLS signatures
// ------------------------------------------------------------------------------------------------
@ -278,7 +310,7 @@ ctt_eth_bls_status ctt_eth_bls_deserialize_signature_compressed(ctt_eth_bls_sign
* - A valid secret key will only leak that it is valid.
* - An invalid secret key will leak whether it's all zero or larger than the curve order.
*/
ctt_eth_bls_status ctt_eth_bls_derive_pubkey(ctt_eth_bls_pubkey* pubkey, const ctt_eth_bls_seckey* seckey);
void ctt_eth_bls_derive_pubkey(ctt_eth_bls_pubkey* pubkey, const ctt_eth_bls_seckey* seckey);
/** Produce a signature for the message under the specified secret key
* Signature is on BLS12-381 G2 (and public key on G1)
@ -298,9 +330,9 @@ ctt_eth_bls_status ctt_eth_bls_derive_pubkey(ctt_eth_bls_pubkey* pubkey, const c
* - A valid secret key will only leak that it is valid.
* - An invalid secret key will leak whether it's all zero or larger than the curve order.
*/
ctt_eth_bls_status ctt_eth_bls_sign(ctt_eth_bls_signature* sig,
const ctt_eth_bls_seckey* seckey,
const byte* message, ptrdiff_t message_len);
void ctt_eth_bls_sign(ctt_eth_bls_signature* sig,
const ctt_eth_bls_seckey* seckey,
const byte* message, ptrdiff_t message_len);
/** Check that a signature is valid for a message
* under the provided public key.
@ -323,7 +355,7 @@ ctt_eth_bls_status ctt_eth_bls_sign(ctt_eth_bls_signature* sig,
*/
ctt_pure ctt_eth_bls_status ctt_eth_bls_verify(const ctt_eth_bls_pubkey* pubkey,
const byte* message, ptrdiff_t message_len,
const ctt_eth_bls_signature* sig);
const ctt_eth_bls_signature* sig) __attribute__((warn_unused_result));
// TODO: API for pubkeys and signature aggregation. Return a bool or a status code or nothing?
@ -344,7 +376,7 @@ ctt_pure ctt_eth_bls_status ctt_eth_bls_verify(const ctt_eth_bls_pubkey* pubkey,
*/
ctt_pure ctt_eth_bls_status ctt_eth_bls_fast_aggregate_verify(const ctt_eth_bls_pubkey pubkeys[], ptrdiff_t pubkeys_len,
const byte* message, ptrdiff_t message_len,
const ctt_eth_bls_signature* aggregate_sig);
const ctt_eth_bls_signature* aggregate_sig) __attribute__((warn_unused_result));
#ifdef __cplusplus
}

View File

@ -28,6 +28,12 @@ typedef __UINT64_TYPE__ uint64_t;
#include <stdint.h>
#endif
#if defined(__STDC_VERSION__) && __STDC_VERSION__>=199901
# define bool _Bool
#else
# define bool unsigned char
#endif
typedef size_t secret_word;
typedef size_t secret_bool;
typedef uint8_t byte;
@ -52,8 +58,8 @@ typedef struct { vesta_fp x, y, z; } vesta_ec_prj;
*/
void ctt_pasta_init_NimMain(void);
void ctt_pallas_fr_unmarshalBE(pallas_fr* dst, const byte src[], ptrdiff_t src_len);
void ctt_pallas_fr_marshalBE(byte dst[], ptrdiff_t dst_len, const pallas_fr* src);
bool ctt_pallas_fr_unmarshalBE(pallas_fr* dst, const byte src[], ptrdiff_t src_len) __attribute__((warn_unused_result));
bool ctt_pallas_fr_marshalBE(byte dst[], ptrdiff_t dst_len, const pallas_fr* src) __attribute__((warn_unused_result));
secret_bool ctt_pallas_fr_is_eq(const pallas_fr* a, const pallas_fr* b);
secret_bool ctt_pallas_fr_is_zero(const pallas_fr* a);
secret_bool ctt_pallas_fr_is_one(const pallas_fr* a);
@ -83,8 +89,8 @@ void ctt_pallas_fr_cset_one(pallas_fr* a, const secret_bool ctl);
void ctt_pallas_fr_cneg_in_place(pallas_fr* a, const secret_bool ctl);
void ctt_pallas_fr_cadd_in_place(pallas_fr* a, const pallas_fr* b, const secret_bool ctl);
void ctt_pallas_fr_csub_in_place(pallas_fr* a, const pallas_fr* b, const secret_bool ctl);
void ctt_pallas_fp_unmarshalBE(pallas_fp* dst, const byte src[], ptrdiff_t src_len);
void ctt_pallas_fp_marshalBE(byte dst[], ptrdiff_t dst_len, const pallas_fp* src);
bool ctt_pallas_fp_unmarshalBE(pallas_fp* dst, const byte src[], ptrdiff_t src_len) __attribute__((warn_unused_result));
bool ctt_pallas_fp_marshalBE(byte dst[], ptrdiff_t dst_len, const pallas_fp* src) __attribute__((warn_unused_result));
secret_bool ctt_pallas_fp_is_eq(const pallas_fp* a, const pallas_fp* b);
secret_bool ctt_pallas_fp_is_zero(const pallas_fp* a);
secret_bool ctt_pallas_fp_is_one(const pallas_fp* a);
@ -122,8 +128,8 @@ secret_bool ctt_pallas_fp_sqrt_if_square_in_place(pallas_fp* a);
void ctt_pallas_fp_sqrt_invsqrt(pallas_fp* sqrt, pallas_fp* invsqrt, const pallas_fp* a);
secret_bool ctt_pallas_fp_sqrt_invsqrt_if_square(pallas_fp* sqrt, pallas_fp* invsqrt, const pallas_fp* a);
secret_bool ctt_pallas_fp_sqrt_ratio_if_square(pallas_fp* r, const pallas_fp* u, const pallas_fp* v);
void ctt_vesta_fr_unmarshalBE(vesta_fr* dst, const byte src[], ptrdiff_t src_len);
void ctt_vesta_fr_marshalBE(byte dst[], ptrdiff_t dst_len, const vesta_fr* src);
bool ctt_vesta_fr_unmarshalBE(vesta_fr* dst, const byte src[], ptrdiff_t src_len) __attribute__((warn_unused_result));
bool ctt_vesta_fr_marshalBE(byte dst[], ptrdiff_t dst_len, const vesta_fr* src) __attribute__((warn_unused_result));
secret_bool ctt_vesta_fr_is_eq(const vesta_fr* a, const vesta_fr* b);
secret_bool ctt_vesta_fr_is_zero(const vesta_fr* a);
secret_bool ctt_vesta_fr_is_one(const vesta_fr* a);
@ -153,8 +159,8 @@ void ctt_vesta_fr_cset_one(vesta_fr* a, const secret_bool ctl);
void ctt_vesta_fr_cneg_in_place(vesta_fr* a, const secret_bool ctl);
void ctt_vesta_fr_cadd_in_place(vesta_fr* a, const vesta_fr* b, const secret_bool ctl);
void ctt_vesta_fr_csub_in_place(vesta_fr* a, const vesta_fr* b, const secret_bool ctl);
void ctt_vesta_fp_unmarshalBE(vesta_fp* dst, const byte src[], ptrdiff_t src_len);
void ctt_vesta_fp_marshalBE(byte dst[], ptrdiff_t dst_len, const vesta_fp* src);
bool ctt_vesta_fp_unmarshalBE(vesta_fp* dst, const byte src[], ptrdiff_t src_len) __attribute__((warn_unused_result));
bool ctt_vesta_fp_marshalBE(byte dst[], ptrdiff_t dst_len, const vesta_fp* src) __attribute__((warn_unused_result));
secret_bool ctt_vesta_fp_is_eq(const vesta_fp* a, const vesta_fp* b);
secret_bool ctt_vesta_fp_is_zero(const vesta_fp* a);
secret_bool ctt_vesta_fp_is_one(const vesta_fp* a);

View File

@ -192,8 +192,6 @@ proc init*(T: type FFTDescriptor, maxScale: uint8): T =
#
# ############################################################
{.experimental: "views".}
when isMainModule:
import
std/[times, monotimes, strformat],

View File

@ -11,7 +11,7 @@ import
../../constantine/math/config/curves,
../../constantine/math/arithmetic,
../../constantine/math/ec_shortweierstrass,
../../constantine/math/io/[io_fields, io_ec],
../../constantine/math/io/[io_fields, io_ec, io_bigints],
# Research
./strided_views,
./fft_lut
@ -31,12 +31,12 @@ import
# - https://github.com/zkcrypto/bellman/blob/10c5010/src/domain.rs#L272-L315
# - Modern Computer Arithmetic, Brent and Zimmermann, p53 algorithm 2.2
# https://members.loria.fr/PZimmermann/mca/mca-cup-0.5.9.pdf
# ############################################################
#
# Finite-Field Fast Fourier Transform
#
# ############################################################
#
# This is a research, unoptimized implementation of
# Finite Field Fast Fourier Transform
@ -176,7 +176,7 @@ func ifft*[EC](
var invLen {.noInit.}: Fr[EC.F.C]
invLen.fromUint(vals.len.uint64)
invLen.inv()
invLen.inv_vartime()
let inv = invLen.toBig()
for i in 0..< output.len:
@ -201,8 +201,6 @@ proc init*(T: type FFTDescriptor, maxScale: uint8): T =
#
# ############################################################
{.experimental: "views".}
when isMainModule:
import
std/[times, monotimes, strformat],
@ -230,7 +228,7 @@ when isMainModule:
var res = newSeq[EC_G1](data.len)
let ifftOk = ifft(fftDesc, res, coefs)
doAssert ifftOk == FFTS_Success
# display("res", 0, coefs)
# display("res", 0, res)
for i in 0 ..< res.len:
if bool(res[i] != data[i]):

View File

@ -17,7 +17,7 @@ import
# we can precompute everything in Sage
# and auto-generate the file.
const BLS12_381_Fr_primitive_root = 5
const BLS12_381_Fr_primitive_root = 7
func buildRootLUT(F: type Fr): array[32, F] =
## [pow(PRIMITIVE_ROOT, (MODULUS - 1) // (2**i), MODULUS) for i in range(32)]

View File

@ -18,15 +18,13 @@
# Or the minimal tensor implementation challenge:
# https://github.com/SimonDanisch/julia-challenge/blob/b8ed3b6/nim/nim_sol_mratsim.nim#L4-L26
{.experimental: "views".}
type
View*[T] = object
## A strided view over an (unowned) data buffer
len*: int
stride: int
offset: int
data: lent UncheckedArray[T]
data: ptr UncheckedArray[T]
func `[]`*[T](v: View[T], idx: int): lent T {.inline.} =
v.data[v.offset + idx*v.stride]
@ -43,7 +41,7 @@ func toView*[T](oa: openArray[T]): View[T] {.inline.} =
result.len = oa.len
result.stride = 1
result.offset = 0
result.data = cast[lent UncheckedArray[T]](oa[0].unsafeAddr)
result.data = cast[ptr UncheckedArray[T]](oa[0].unsafeAddr)
iterator items*[T](v: View[T]): lent T =
var cur = v.offset
@ -168,16 +166,16 @@ func reversed*(v: View): View {.inline.} =
import strformat, strutils
func display*[F](name: string, indent: int, oa: openArray[F]) =
debugEcho indent(name & ", openarray of " & $F & " of length " & $oa.len, indent)
debugEcho strutils.indent(name & ", openarray of " & $F & " of length " & $oa.len, indent)
for i in 0 ..< oa.len:
debugEcho indent(&" {i:>2}: {oa[i].toHex()}", indent)
debugEcho indent(name & " " & $F & " -- FIN\n", indent)
debugEcho strutils.indent(&" {i:>2}: {oa[i].toHex()}", indent)
debugEcho strutils.indent(name & " " & $F & " -- FIN\n", indent)
func display*[F](name: string, indent: int, v: View[F]) =
debugEcho indent(name & ", view of " & $F & " of length " & $v.len, indent)
debugEcho strutils.indent(name & ", view of " & $F & " of length " & $v.len, indent)
for i in 0 ..< v.len:
debugEcho indent(&" {i:>2}: {v[i].toHex()}", indent)
debugEcho indent(name & " " & $F & " -- FIN\n", indent)
debugEcho strutils.indent(&" {i:>2}: {v[i].toHex()}", indent)
debugEcho strutils.indent(name & " " & $F & " -- FIN\n", indent)
# ############################################################
#

View File

@ -267,7 +267,7 @@ if __name__ == "__main__":
f.write('\n\n')
f.write(inspect.cleandoc(f"""
import
../config/curves,,
../config/curves,
../io/[io_bigints, io_fields]
# {curve} G1

109
sage/ethereum_kzg.sage Normal file
View File

@ -0,0 +1,109 @@
#!/usr/bin/sage
# vim: syntax=python
# vim: set ts=2 sw=2 et:
# Constantine
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Copyright (c) 2020-Present Mamy André-Ratsimbazafy
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# ############################################################
#
# Pairing constants
#
# ############################################################
# Imports
# ---------------------------------------------------------
import os
import inspect, textwrap
# Working directory
# ---------------------------------------------------------
os.chdir(os.path.dirname(__file__))
# Sage imports
# ---------------------------------------------------------
# Accelerate arithmetic by accepting probabilistic proofs
from sage.structure.proof.all import arithmetic
arithmetic(False)
load('curves.sage')
# Roots of unity
# ---------------------------------------------------------
def gen_pow2_roots_of_unity(field, num_powers):
"""
Generate the 2^i'th roots of unity
with i in [0, num_powers)
"""
# Find a primitive root of the finite field of modulus q
# i.e. root^k != 1 for all k < q-1 so powers of root generate the field.
# https://crypto.stanford.edu/pbc/notes/numbertheory/gen.html
#
# Usage, see ω usagefor polynomials in evaluation form:
# https://dankradfeist.de/ethereum/2021/06/18/pcs-multiproofs.html
primitive_root = field.multiplicative_generator()
assert primitive_root == 7, (
'The ref implementation c-kzg-4844 uses 7.'
+ ' Any primitive root is correct but the order of coefficients '
+ ' won\'t be the same which makes debugging harder.'
)
return [primitive_root^((field.characteristic()-1)//(1 << i)) for i in range(num_powers)]
# Dump
# ---------------------------------------------------------
def dumpConst(name, inner):
result = f'const {name}* = (\n'
result += inner
result += ')\n'
return result
def dumpRoots(vec):
result = f' # primitive_root⁽ᵐᵒᵈᵘˡᵘˢ⁻¹⁾/⁽²^ⁱ⁾ for i in [0, {len(vec)})\n'
lastRow = len(vec) - 1
for rowID, val in enumerate(vec):
result += ' '
result += f'BigInt[{max(1, int(val).bit_length())}].fromHex"0x{Integer(int(val)).hex()}"'
result += ',\n' if rowID != lastRow else '\n'
return result
# CLI
# ---------------------------------------------------------
if __name__ == "__main__":
with open(f'ethereum_kzg_constants.nim', 'w') as f:
f.write(copyright())
f.write('\n\n')
f.write(inspect.cleandoc(f"""
import
../config/curves,
../io/io_bigints
# Roots of unity
# ------------------------------------------------------------
"""))
f.write('\n\n')
r = Curves['BLS12_381']['field']['order']
Fr = GF(r)
f.write(dumpConst(
'ctt_eth_kzg_bls12_381_fr_pow2_roots_of_unity',
dumpRoots(gen_pow2_roots_of_unity(Fr, 32))
))
f.write('\n\n')

View File

@ -13,7 +13,8 @@ import
gmp,
# Internal
../../constantine/math/[arithmetic, io/io_bigints],
../../constantine/platforms/[primitives, codecs],
../../constantine/platforms/primitives,
../../constantine/serialization/codecs,
# Test utilities
../../helpers/prng_unsafe

View File

@ -14,11 +14,6 @@ import
std/[times, strformat],
gmp
# debug
import
../../constantine/platforms/codecs,
../../constantine/math/io/io_bigints
const # https://gmplib.org/manual/Integer-Import-and-Export.html
GMP_WordLittleEndian = -1'i32
GMP_WordNativeEndian = 0'i32

View File

@ -16,7 +16,7 @@ import
../../constantine/math/arithmetic,
../../constantine/platforms/abstractions,
# Test utilities
../../constantine/platforms/codecs,
../../constantine/serialization/codecs,
../../helpers/prng_unsafe
echo "\n------------------------------------------------------\n"

View File

@ -12,7 +12,8 @@ import
# Third-party
gmp,
# Internal
../../constantine/platforms/[abstractions, codecs],
../../constantine/platforms/abstractions,
../../constantine/serialization/codecs,
../../constantine/math/io/[io_bigints, io_fields],
../../constantine/math/arithmetic,
../../constantine/math/config/curves,

View File

@ -35,9 +35,6 @@ func random_bigint*(rng: var RngState, curve: static Curve, gen: static RandomGe
else:
rng.random_long01Seq(matchingBigInt(curve))
# debug
import std/strutils
proc testRoundtrip(curve: static Curve, gen: static RandomGen) =
const bits = curve.getCurveBitwidth()
const Excess = 2

View File

@ -0,0 +1,10 @@
# KZG polynomial commitment for Ethereum's Deneb hardfork
Test vector sources:
- https://github.com/ethereum/consensus-spec-tests/tree/v1.3.0/tests/general/deneb/kzg
- https://github.com/ethereum/c-kzg-4844/blob/v0.2.0/tests/
Trusted setup source:
- Minimal preset: https://github.com/ethereum/consensus-specs/blob/v1.3.0/presets/minimal/trusted_setups/testing_trusted_setups.json
- Mainnet preset: https://github.com/ethereum/consensus-specs/blob/v1.3.0/presets/mainnet/trusted_setups/testing_trusted_setups.json

Some files were not shown because too many files have changed in this diff Show More