mirror of
https://github.com/logos-storage/plonky2.git
synced 2026-01-03 14:23:07 +00:00
merge
This commit is contained in:
commit
59ae7103a8
@ -45,6 +45,7 @@ jobs:
|
||||
args: --all
|
||||
env:
|
||||
RUSTFLAGS: -Copt-level=3 -Cdebug-assertions -Coverflow-checks=y -Cdebuginfo=0 -Cprefer-dynamic=y
|
||||
RUST_LOG: 1
|
||||
CARGO_INCREMENTAL: 1
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
|
||||
@ -3,13 +3,13 @@ members = ["evm", "field", "maybe_rayon", "plonky2", "starky", "util"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
incremental = true
|
||||
#lto = "fat"
|
||||
#codegen-units = 1
|
||||
|
||||
[profile.bench]
|
||||
opt-level = 3
|
||||
|
||||
|
||||
[patch.crates-io]
|
||||
plonky2_evm = { path = "evm" }
|
||||
plonky2_field = { path = "field" }
|
||||
|
||||
@ -13,7 +13,7 @@ edition = "2021"
|
||||
anyhow = "1.0.40"
|
||||
blake2 = "0.10.5"
|
||||
env_logger = "0.10.0"
|
||||
eth_trie_utils = "0.5.0"
|
||||
eth_trie_utils = "0.6.0"
|
||||
ethereum-types = "0.14.0"
|
||||
hex = { version = "0.4.3", optional = true }
|
||||
hex-literal = "0.3.4"
|
||||
@ -36,6 +36,7 @@ serde = { version = "1.0.144", features = ["derive"] }
|
||||
static_assertions = "1.1.0"
|
||||
hashbrown = { version = "0.12.3" }
|
||||
tiny-keccak = "2.0.2"
|
||||
serde_json = "1.0"
|
||||
|
||||
[target.'cfg(not(target_env = "msvc"))'.dependencies]
|
||||
jemallocator = "0.5.0"
|
||||
|
||||
@ -16,6 +16,7 @@ pub(crate) fn combined_kernel() -> Kernel {
|
||||
include_str!("asm/bignum/add.asm"),
|
||||
include_str!("asm/bignum/addmul.asm"),
|
||||
include_str!("asm/bignum/cmp.asm"),
|
||||
include_str!("asm/bignum/isone.asm"),
|
||||
include_str!("asm/bignum/iszero.asm"),
|
||||
include_str!("asm/bignum/modexp.asm"),
|
||||
include_str!("asm/bignum/modmul.asm"),
|
||||
@ -54,15 +55,17 @@ pub(crate) fn combined_kernel() -> Kernel {
|
||||
include_str!("asm/curve/bn254/curve_arithmetic/constants.asm"),
|
||||
include_str!("asm/curve/bn254/curve_arithmetic/curve_add.asm"),
|
||||
include_str!("asm/curve/bn254/curve_arithmetic/curve_mul.asm"),
|
||||
include_str!("asm/curve/bn254/curve_arithmetic/final_exponent.asm"),
|
||||
include_str!("asm/curve/bn254/curve_arithmetic/glv.asm"),
|
||||
include_str!("asm/curve/bn254/curve_arithmetic/invariant_exponent.asm"),
|
||||
include_str!("asm/curve/bn254/curve_arithmetic/miller_loop.asm"),
|
||||
include_str!("asm/curve/bn254/curve_arithmetic/msm.asm"),
|
||||
include_str!("asm/curve/bn254/curve_arithmetic/pairing.asm"),
|
||||
include_str!("asm/curve/bn254/curve_arithmetic/precomputation.asm"),
|
||||
include_str!("asm/curve/bn254/curve_arithmetic/tate_pairing.asm"),
|
||||
include_str!("asm/curve/bn254/field_arithmetic/inverse.asm"),
|
||||
include_str!("asm/curve/bn254/curve_arithmetic/twisted_curve.asm"),
|
||||
include_str!("asm/curve/bn254/field_arithmetic/degree_6_mul.asm"),
|
||||
include_str!("asm/curve/bn254/field_arithmetic/degree_12_mul.asm"),
|
||||
include_str!("asm/curve/bn254/field_arithmetic/frobenius.asm"),
|
||||
include_str!("asm/curve/bn254/field_arithmetic/inverse.asm"),
|
||||
include_str!("asm/curve/bn254/field_arithmetic/util.asm"),
|
||||
include_str!("asm/curve/common.asm"),
|
||||
include_str!("asm/curve/secp256k1/curve_add.asm"),
|
||||
@ -75,14 +78,15 @@ pub(crate) fn combined_kernel() -> Kernel {
|
||||
include_str!("asm/curve/wnaf.asm"),
|
||||
include_str!("asm/exp.asm"),
|
||||
include_str!("asm/halt.asm"),
|
||||
include_str!("asm/hash/blake2b/addresses.asm"),
|
||||
include_str!("asm/hash/blake2b/compression.asm"),
|
||||
include_str!("asm/hash/blake2b/g_functions.asm"),
|
||||
include_str!("asm/hash/blake2b/hash.asm"),
|
||||
include_str!("asm/hash/blake2b/iv.asm"),
|
||||
include_str!("asm/hash/blake2b/main.asm"),
|
||||
include_str!("asm/hash/blake2b/ops.asm"),
|
||||
include_str!("asm/hash/blake2b/permutations.asm"),
|
||||
include_str!("asm/hash/blake2/addresses.asm"),
|
||||
include_str!("asm/hash/blake2/blake2_f.asm"),
|
||||
// include_str!("asm/hash/blake2/blake2b.asm"),
|
||||
// include_str!("asm/hash/blake2/compression.asm"),
|
||||
include_str!("asm/hash/blake2/g_functions.asm"),
|
||||
include_str!("asm/hash/blake2/hash.asm"),
|
||||
include_str!("asm/hash/blake2/iv.asm"),
|
||||
include_str!("asm/hash/blake2/ops.asm"),
|
||||
include_str!("asm/hash/blake2/permutations.asm"),
|
||||
include_str!("asm/hash/ripemd/box.asm"),
|
||||
include_str!("asm/hash/ripemd/compression.asm"),
|
||||
include_str!("asm/hash/ripemd/constants.asm"),
|
||||
|
||||
35
evm/src/cpu/kernel/asm/bignum/isone.asm
Normal file
35
evm/src/cpu/kernel/asm/bignum/isone.asm
Normal file
@ -0,0 +1,35 @@
|
||||
// Arithmetic on little-endian integers represented with 128-bit limbs.
|
||||
// All integers must be under a given length bound, and are padded with leading zeroes.
|
||||
|
||||
global isone_bignum:
|
||||
// stack: len, start_loc, retdest
|
||||
DUP1
|
||||
// stack: len, len, start_loc, retdest
|
||||
ISZERO
|
||||
%jumpi(eqzero)
|
||||
// stack: len, start_loc, retdest
|
||||
DUP2
|
||||
// stack: start_loc, len, start_loc, retdest
|
||||
%mload_kernel_general
|
||||
// stack: start_val, len, start_loc, retdest
|
||||
%eq_const(1)
|
||||
%jumpi(starts_with_one)
|
||||
// Does not start with one, so not equal to one.
|
||||
// stack: len, start_loc, retdest
|
||||
%stack (vals: 2, retdest) -> (retdest, 0)
|
||||
JUMP
|
||||
eqzero:
|
||||
// Is zero, so not equal to one.
|
||||
// stack: cur_loc, end_loc, retdest
|
||||
%stack (vals: 2, retdest) -> (retdest, 0)
|
||||
// stack: retdest, 0
|
||||
JUMP
|
||||
starts_with_one:
|
||||
// Starts with one, so check that the remaining limbs are zero.
|
||||
// stack: len, start_loc, retdest
|
||||
%decrement
|
||||
SWAP1
|
||||
%increment
|
||||
SWAP1
|
||||
// stack: len-1, start_loc+1, retdest
|
||||
%jump(iszero_bignum)
|
||||
@ -8,10 +8,55 @@
|
||||
// All of scratch_2..scratch_5 must have size 2 * length and be initialized with zeroes.
|
||||
// Also, scratch_2..scratch_5 must be CONSECUTIVE in memory.
|
||||
global modexp_bignum:
|
||||
// stack: len, b_loc, e_loc, m_loc, out_loc, s1 (=scratch_1), s2, s3, s4, s5, retdest
|
||||
DUP1
|
||||
ISZERO
|
||||
%jumpi(len_zero)
|
||||
// Special input cases:
|
||||
|
||||
// (1) Modulus is zero (also covers len=0 case).
|
||||
PUSH modulus_zero_return
|
||||
// stack: modulus_zero_return, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
DUP5
|
||||
// stack: m_loc, modulus_zero_return, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
DUP3
|
||||
// stack: len, m_loc, modulus_zero_return, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
%jump(iszero_bignum)
|
||||
modulus_zero_return:
|
||||
// stack: m==0, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
%jumpi(modulus_zero_or_one)
|
||||
|
||||
// (2) Modulus is one.
|
||||
PUSH modulus_one_return
|
||||
// stack: modulus_one_return, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
DUP5
|
||||
// stack: m_loc, modulus_one_return, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
DUP3
|
||||
// stack: len, m_loc, modulus_one_return, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
%jump(isone_bignum)
|
||||
modulus_one_return:
|
||||
// stack: m==1, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
%jumpi(modulus_zero_or_one)
|
||||
|
||||
// (3) Both b and e are zero.
|
||||
PUSH b_zero_return
|
||||
// stack: b_zero_return, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
DUP3
|
||||
// stack: b_loc, b_zero_return, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
DUP3
|
||||
// stack: len, b_loc, b_zero_return, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
%jump(iszero_bignum)
|
||||
b_zero_return:
|
||||
// stack: b==0, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
PUSH e_zero_return
|
||||
// stack: e_zero_return, b==0, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
DUP5
|
||||
// stack: e_loc, e_zero_return, b==0, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
DUP4
|
||||
// stack: len, e_loc, e_zero_return, b==0, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
%jump(iszero_bignum)
|
||||
e_zero_return:
|
||||
// stack: e==0, b==0, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
MUL // logical and
|
||||
%jumpi(b_and_e_zero)
|
||||
|
||||
// End of special cases.
|
||||
|
||||
// We store the repeated-squares accumulator x_i in scratch_1, starting with x_0 := b.
|
||||
DUP1
|
||||
@ -128,8 +173,18 @@ modexp_iszero_return:
|
||||
// stack: e != 0, len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
%jumpi(modexp_loop)
|
||||
// end of modexp_loop
|
||||
len_zero:
|
||||
modulus_zero_or_one:
|
||||
// If modulus is zero or one, return 0.
|
||||
// stack: len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
%pop10
|
||||
// stack: retdest
|
||||
JUMP
|
||||
b_and_e_zero:
|
||||
// If base and exponent are zero (and modulus > 1), return 1.
|
||||
// stack: len, b_loc, e_loc, m_loc, out_loc, s1, s2, s3, s4, s5, retdest
|
||||
PUSH 1
|
||||
DUP6
|
||||
%mstore_kernel_general
|
||||
%pop10
|
||||
// stack: retdest
|
||||
JUMP
|
||||
|
||||
@ -54,49 +54,55 @@ insert_accessed_addresses_found:
|
||||
|
||||
|
||||
%macro insert_accessed_storage_keys
|
||||
%stack (addr, key) -> (addr, key, %%after)
|
||||
%stack (addr, key, value) -> (addr, key, value, %%after)
|
||||
%jump(insert_accessed_storage_keys)
|
||||
%%after:
|
||||
// stack: cold_access
|
||||
%endmacro
|
||||
|
||||
/// Inserts the storage key into the access list if it is not already present.
|
||||
/// Return 1 if the storage key was inserted, 0 if it was already present.
|
||||
/// Inserts the storage key and value into the access list if it is not already present.
|
||||
/// `value` should be the current storage value at the slot `(addr, key)`.
|
||||
/// Return `1, original_value` if the storage key was inserted, `0, original_value` if it was already present.
|
||||
global insert_accessed_storage_keys:
|
||||
// stack: addr, key, retdest
|
||||
// stack: addr, key, value, retdest
|
||||
%mload_global_metadata(@GLOBAL_METADATA_ACCESSED_STORAGE_KEYS_LEN)
|
||||
// stack: len, addr, key, retdest
|
||||
// stack: len, addr, key, value, retdest
|
||||
PUSH 0
|
||||
insert_accessed_storage_keys_loop:
|
||||
%stack (i, len, addr, key, retdest) -> (i, len, i, len, addr, key, retdest)
|
||||
%stack (i, len, addr, key, value, retdest) -> (i, len, i, len, addr, key, value, retdest)
|
||||
EQ %jumpi(insert_storage_key)
|
||||
// stack: i, len, addr, key, retdest
|
||||
// stack: i, len, addr, key, value, retdest
|
||||
DUP1 %increment %mload_kernel(@SEGMENT_ACCESSED_STORAGE_KEYS)
|
||||
// stack: loaded_key, i, len, addr, key, retdest
|
||||
// stack: loaded_key, i, len, addr, key, value, retdest
|
||||
DUP2 %mload_kernel(@SEGMENT_ACCESSED_STORAGE_KEYS)
|
||||
// stack: loaded_addr, loaded_key, i, len, addr, key, retdest
|
||||
// stack: loaded_addr, loaded_key, i, len, addr, key, value, retdest
|
||||
DUP5 EQ
|
||||
// stack: loaded_addr==addr, loaded_key, i, len, addr, key, retdest
|
||||
// stack: loaded_addr==addr, loaded_key, i, len, addr, key, value, retdest
|
||||
SWAP1 DUP6 EQ
|
||||
// stack: loaded_key==key, loaded_addr==addr, i, len, addr, key, retdest
|
||||
// stack: loaded_key==key, loaded_addr==addr, i, len, addr, key, value, retdest
|
||||
MUL // AND
|
||||
%jumpi(insert_accessed_storage_keys_found)
|
||||
// stack: i, len, addr, key, retdest
|
||||
%add_const(2)
|
||||
// stack: i, len, addr, key, value, retdest
|
||||
%add_const(3)
|
||||
%jump(insert_accessed_storage_keys_loop)
|
||||
|
||||
insert_storage_key:
|
||||
// stack: i, len, addr, key, retdest
|
||||
// stack: i, len, addr, key, value, retdest
|
||||
DUP1 %increment
|
||||
%stack (i_plus_1, i, len, addr, key, retdest) -> (i, addr, i_plus_1, key, i_plus_1, retdest)
|
||||
DUP1 %increment
|
||||
%stack (i_plus_2, i_plus_1, i, len, addr, key, value) -> (i, addr, i_plus_1, key, i_plus_2, value, i_plus_2, value)
|
||||
%mstore_kernel(@SEGMENT_ACCESSED_STORAGE_KEYS) // Store new address at the end of the array.
|
||||
%mstore_kernel(@SEGMENT_ACCESSED_STORAGE_KEYS) // Store new key after that
|
||||
// stack: i_plus_1, retdest
|
||||
%mstore_kernel(@SEGMENT_ACCESSED_STORAGE_KEYS) // Store new value after that
|
||||
// stack: i_plus_2, value, retdest
|
||||
%increment
|
||||
%mstore_global_metadata(@GLOBAL_METADATA_ACCESSED_STORAGE_KEYS_LEN) // Store new length in front of the array.
|
||||
PUSH 1 // Return 1 to indicate that the storage key was inserted.
|
||||
SWAP1 JUMP
|
||||
%mstore_global_metadata(@GLOBAL_METADATA_ACCESSED_STORAGE_KEYS_LEN) // Store new length.
|
||||
%stack (value, retdest) -> (retdest, 1, value) // Return 1 to indicate that the storage key was inserted.
|
||||
JUMP
|
||||
|
||||
insert_accessed_storage_keys_found:
|
||||
%stack (i, len, addr, key, retdest) -> (retdest, 0) // Return 0 to indicate that the storage key was already present.
|
||||
// stack: i, len, addr, key, value, retdest
|
||||
%add_const(2)
|
||||
%mload_kernel(@SEGMENT_ACCESSED_STORAGE_KEYS)
|
||||
%stack (original_value, len, addr, key, value, retdest) -> (retdest, 0, original_value) // Return 0 to indicate that the storage key was already present.
|
||||
JUMP
|
||||
|
||||
@ -5,8 +5,16 @@
|
||||
// Post stack: address
|
||||
global sys_create:
|
||||
%check_static
|
||||
|
||||
%stack (kexit_info, value, code_offset, code_len) -> (code_len, code_offset, kexit_info, value, code_offset, code_len)
|
||||
%checked_mem_expansion
|
||||
// stack: kexit_info, value, code_offset, code_len
|
||||
// TODO: Charge gas.
|
||||
%charge_gas_const(@GAS_CREATE)
|
||||
// stack: kexit_info, value, code_offset, code_len
|
||||
DUP4
|
||||
// stack: code_len, kexit_info, value, code_offset, code_len
|
||||
%check_initcode_size
|
||||
|
||||
%stack (kexit_info, value, code_offset, code_len)
|
||||
-> (sys_create_got_address, value, code_offset, code_len, kexit_info)
|
||||
%address
|
||||
@ -27,8 +35,19 @@ sys_create_got_address:
|
||||
// Post stack: address
|
||||
global sys_create2:
|
||||
%check_static
|
||||
|
||||
// stack: kexit_info, value, code_offset, code_len, salt
|
||||
// TODO: Charge gas.
|
||||
%stack (kexit_info, value, code_offset, code_len) -> (code_len, code_offset, kexit_info, value, code_offset, code_len)
|
||||
%checked_mem_expansion
|
||||
// stack: kexit_info, value, code_offset, code_len, salt
|
||||
DUP4 %num_bytes_to_num_words
|
||||
%mul_const(@GAS_KECCAK256WORD) %add_const(@GAS_CREATE) %charge_gas
|
||||
// stack: kexit_info, value, code_offset, code_len, salt
|
||||
DUP4
|
||||
// stack: code_len, kexit_info, value, code_offset, code_len, salt
|
||||
%check_initcode_size
|
||||
|
||||
|
||||
SWAP4
|
||||
%stack (salt) -> (salt, create_common)
|
||||
// stack: salt, create_common, value, code_offset, code_len, kexit_info
|
||||
@ -78,7 +97,10 @@ global create_common:
|
||||
GET_CONTEXT
|
||||
// stack: src_ctx, new_ctx, address, value, code_offset, code_len, kexit_info
|
||||
|
||||
// Copy the code from txdata to the new context's code segment.
|
||||
%stack (src_ctx, new_ctx, address, value, code_offset, code_len) ->
|
||||
(code_len, new_ctx, src_ctx, new_ctx, address, value, code_offset, code_len)
|
||||
%set_new_ctx_code_size POP
|
||||
// Copy the code from memory to the new context's code segment.
|
||||
%stack (src_ctx, new_ctx, address, value, code_offset, code_len)
|
||||
-> (new_ctx, @SEGMENT_CODE, 0, // DST
|
||||
src_ctx, @SEGMENT_MAIN_MEMORY, code_offset, // SRC
|
||||
@ -113,7 +135,38 @@ after_constructor:
|
||||
// stack: success, leftover_gas, new_ctx, address, kexit_info
|
||||
SWAP2
|
||||
// stack: new_ctx, leftover_gas, success, address, kexit_info
|
||||
POP // TODO: Ignoring new_ctx for now, but we will need it to store code that was returned, if any.
|
||||
POP
|
||||
|
||||
|
||||
// TODO: Skip blocks below if success is false.
|
||||
// EIP-3541: Reject new contract code starting with the 0xEF byte
|
||||
PUSH 0 %mload_current(@SEGMENT_RETURNDATA) %eq_const(0xEF) %jumpi(fault_exception)
|
||||
|
||||
// Charge gas for the code size.
|
||||
SWAP3
|
||||
// stack: kexit_info, success, address, leftover_gas
|
||||
%returndatasize // Size of the code.
|
||||
// stack: code_size, kexit_info, success, address, leftover_gas
|
||||
DUP1 %gt_const(@MAX_CODE_SIZE)
|
||||
%jumpi(fault_exception)
|
||||
// stack: code_size, kexit_info, success, address, leftover_gas
|
||||
%mul_const(@GAS_CODEDEPOSIT) %charge_gas
|
||||
SWAP3
|
||||
|
||||
// Store the code hash of the new contract.
|
||||
GET_CONTEXT
|
||||
%returndatasize
|
||||
%stack (size, ctx) -> (ctx, @SEGMENT_RETURNDATA, 0, size) // context, segment, offset, len
|
||||
KECCAK_GENERAL
|
||||
// stack: codehash, leftover_gas, success, address, kexit_info
|
||||
%observe_new_contract
|
||||
DUP4
|
||||
// stack: address, codehash, leftover_gas, success, address, kexit_info
|
||||
%set_codehash
|
||||
|
||||
// Set the return data size to 0.
|
||||
%mstore_context_metadata(@CTX_METADATA_RETURNDATA_SIZE, 0)
|
||||
|
||||
// stack: leftover_gas, success, address, kexit_info
|
||||
%shl_const(192)
|
||||
// stack: leftover_gas << 192, success, address, kexit_info
|
||||
@ -123,6 +176,53 @@ after_constructor:
|
||||
// stack: address_if_success, leftover_gas << 192, kexit_info
|
||||
SWAP2
|
||||
// stack: kexit_info, leftover_gas << 192, address_if_success
|
||||
ADD
|
||||
SUB
|
||||
// stack: kexit_info, address_if_success
|
||||
EXIT_KERNEL
|
||||
|
||||
%macro set_codehash
|
||||
%stack (addr, codehash) -> (addr, codehash, %%after)
|
||||
%jump(set_codehash)
|
||||
%%after:
|
||||
// stack: (empty)
|
||||
%endmacro
|
||||
|
||||
// Pre stack: addr, codehash, redest
|
||||
// Post stack: (empty)
|
||||
// TODO: Should it be copy-on-write (with make_account_copy) instead of mutating the trie?
|
||||
global set_codehash:
|
||||
// stack: addr, codehash, retdest
|
||||
%mpt_read_state_trie
|
||||
// stack: account_ptr, codehash, retdest
|
||||
%add_const(3)
|
||||
// stack: codehash_ptr, codehash, retdest
|
||||
%mstore_trie_data
|
||||
// stack: retdest
|
||||
JUMP
|
||||
|
||||
// Check and charge gas cost for initcode size. See EIP-3860.
|
||||
// Pre stack: code_size, kexit_info
|
||||
// Post stack: kexit_info
|
||||
%macro check_initcode_size
|
||||
DUP1 %gt_const(@MAX_INITCODE_SIZE) %jumpi(fault_exception)
|
||||
// stack: code_size, kexit_info
|
||||
%num_bytes_to_num_words %mul_const(@INITCODE_WORD_COST)
|
||||
%charge_gas
|
||||
%endmacro
|
||||
|
||||
|
||||
// This should be called whenever a new contract is created.
|
||||
// It does nothing, but just provides a single hook where code can react to newly created contracts.
|
||||
// When called, the code corresponding to `codehash` should be stored in the return data.
|
||||
// Pre stack: codehash, retdest
|
||||
// Post stack: codehash
|
||||
global observe_new_contract:
|
||||
// stack codehash, retdest
|
||||
SWAP1 JUMP
|
||||
|
||||
%macro observe_new_contract
|
||||
%stack (codehash) -> (codehash, %%after)
|
||||
%jump(observe_new_contract)
|
||||
%%after:
|
||||
// stack: codehash
|
||||
%endmacro
|
||||
|
||||
@ -45,7 +45,21 @@ count_zeros_finish:
|
||||
// stack: gas_txndata, retdest
|
||||
|
||||
%is_contract_creation
|
||||
DUP1
|
||||
%mul_const(@GAS_TXCREATE)
|
||||
// stack: gas_creation, is_creation, gas_txndata, retdest
|
||||
SWAP1
|
||||
// stack: is_creation, gas_creation, gas_txndata, retdest
|
||||
DUP1
|
||||
// stack: is_creation, is_creation, gas_creation, gas_txndata, retdest
|
||||
%mload_txn_field(@TXN_FIELD_DATA_LEN) %gt_const(@MAX_INITCODE_SIZE)
|
||||
// stack: initcode_size > max, is_creation, is_creation, gas_creation, gas_txndata, retdest
|
||||
MUL // Cheaper than AND
|
||||
%assert_zero
|
||||
// stack: is_creation, gas_creation, gas_txndata, retdest
|
||||
%mload_txn_field(@TXN_FIELD_DATA_LEN) %num_bytes_to_num_words
|
||||
// stack: initcode_words, is_creation, gas_creation, gas_txndata, retdest
|
||||
%mul_const(@INITCODE_WORD_COST) MUL ADD
|
||||
// stack: gas_creation, gas_txndata, retdest
|
||||
|
||||
PUSH @GAS_TRANSACTION
|
||||
|
||||
@ -1,3 +1,136 @@
|
||||
global precompile_blake2_f:
|
||||
// TODO
|
||||
PANIC
|
||||
// stack: retdest, new_ctx, (old stack)
|
||||
POP
|
||||
// stack: new_ctx, (old stack)
|
||||
DUP1
|
||||
SET_CONTEXT
|
||||
// stack: (empty)
|
||||
PUSH 0x100000000 // = 2^32 (is_kernel = true)
|
||||
// stack: kexit_info
|
||||
|
||||
PUSH blake2_f_contd
|
||||
// stack: blake2_f_contd, kexit_info
|
||||
|
||||
// Load inputs from calldata memory into stack.
|
||||
|
||||
%calldatasize
|
||||
// stack: calldatasize, blake2_f_contd, kexit_info
|
||||
DUP1
|
||||
// stack: calldatasize, calldatasize, blake2_f_contd, kexit_info
|
||||
%eq_const(213) ISZERO %jumpi(fault_exception)
|
||||
// stack: calldatasize, blake2_f_contd, kexit_info
|
||||
%decrement
|
||||
// stack: flag_addr=212, blake2_f_contd, kexit_info
|
||||
DUP1
|
||||
// stack: flag_addr, flag_addr, blake2_f_contd, kexit_info
|
||||
PUSH @SEGMENT_CALLDATA
|
||||
GET_CONTEXT
|
||||
// stack: ctx, @SEGMENT_CALLDATA, flag_addr, flag_addr, blake2_f_contd, kexit_info
|
||||
MLOAD_GENERAL
|
||||
// stack: flag, flag_addr, blake2_f_contd, kexit_info
|
||||
DUP1
|
||||
// stack: flag, flag, flag_addr, blake2_f_contd, kexit_info
|
||||
%gt_const(1) %jumpi(fault_exception) // Check flag < 2 (flag = 0 or flag = 1)
|
||||
// stack: flag, flag_addr, blake2_f_contd, kexit_info
|
||||
SWAP1
|
||||
// stack: flag_addr, flag, blake2_f_contd, kexit_info
|
||||
%sub_const(8)
|
||||
// stack: t1_addr=flag_addr-8, flag, blake2_f_contd, kexit_info
|
||||
|
||||
%stack (t1_addr) -> (@SEGMENT_CALLDATA, t1_addr, 8, t1_addr)
|
||||
// stack: @SEGMENT_CALLDATA, t1_addr, 8, t1_addr, flag, blake2_f_contd, kexit_info
|
||||
GET_CONTEXT
|
||||
// stack: ctx, @SEGMENT_CALLDATA, t1_addr, 8, t1_addr, flag, blake2_f_contd, kexit_info
|
||||
%mload_packing
|
||||
// stack: t_1, t1_addr, flag, blake2_f_contd, kexit_info
|
||||
SWAP1
|
||||
// stack: t1_addr, t_1, flag, blake2_f_contd, kexit_info
|
||||
%sub_const(8)
|
||||
// stack: t0_addr=t1_addr-8, t_1, flag, blake2_f_contd, kexit_info
|
||||
|
||||
%stack (t0_addr) -> (@SEGMENT_CALLDATA, t0_addr, 8, t0_addr)
|
||||
// stack: @SEGMENT_CALLDATA, t0_addr, 8, t0_addr, t_1, flag, blake2_f_contd, kexit_info
|
||||
GET_CONTEXT
|
||||
// stack: ctx, @SEGMENT_CALLDATA, t0_addr, 8, t0_addr, t_1, flag, blake2_f_contd, kexit_info
|
||||
%mload_packing
|
||||
// stack: t_0, t0_addr, t_1, flag, blake2_f_contd, kexit_info
|
||||
SWAP1
|
||||
// stack: t0_addr, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
%sub_const(128) // 16 * 8
|
||||
// stack: m0_addr=t0_addr-128, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
|
||||
%rep 16
|
||||
// stack: 68 + 8 * i, m_(i-1), ..., m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
PUSH 8
|
||||
// stack: 8, 68 + 8 * i, m_(i-1), ..., m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
DUP2
|
||||
// stack: 68 + 8 * i, 8, 68 + 8 * i, m_(i-1), ..., m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
PUSH @SEGMENT_CALLDATA
|
||||
// stack: @SEGMENT_CALLDATA, 68 + 8 * i, 8, 68 + 8 * i, m_(i-1), ..., m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
GET_CONTEXT
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 68 + 8 * i, 8, 68 + 8 * i, m_(i-1), ..., m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
%mload_packing
|
||||
// stack: m_i, 68 + 8 * i, m_(i-1), ..., m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
SWAP1
|
||||
// stack: 68 + 8 * i, m_i, m_(i-1), ..., m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
%add_const(8)
|
||||
%endrep
|
||||
// stack: 68 + 8 * 16 = 196, m_15, ..., m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
%sub_const(192) // 16 * 8 (m values) + 8 * 8 (h values)
|
||||
// stack: h0_addr, m_15, ..., m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
|
||||
%rep 8
|
||||
// stack: 4 + 8 * i, h_(i-1), ..., h_0, m_15..m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
PUSH 8
|
||||
// stack: 8, 4 + 8 * i, h_(i-1), ..., h_0, m_15..m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
DUP2
|
||||
// stack: 4 + 8 * i, 8, 4 + 8 * i, h_(i-1), ..., h_0, m_15..m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
PUSH @SEGMENT_CALLDATA
|
||||
// stack: @SEGMENT_CALLDATA, 4 + 8 * i, 8, 4 + 8 * i, h_(i-1), ..., h_0, m_15..m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
GET_CONTEXT
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 4 + 8 * i, 8, 4 + 8 * i, h_(i-1), ..., h_0, m_15..m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
%mload_packing
|
||||
// stack: h_i, 4 + 8 * i, h_(i-1), ..., h_0, m_15..m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
SWAP1
|
||||
// stack: 4 + 8 * i, h_i, h_(i-1), ..., h_0, m_15..m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
%add_const(8)
|
||||
%endrep
|
||||
// stack: 4 + 8 * 8 = 68, h_7, ..., h_0, m_15..m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
POP
|
||||
|
||||
%stack () -> (@SEGMENT_CALLDATA, 0, 4)
|
||||
GET_CONTEXT
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 0, 4, h_7..h_0, m_15..m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
%mload_packing
|
||||
// stack: rounds, h_7..h_0, m_15..m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
|
||||
DUP1
|
||||
// stack: rounds, rounds, h_7..h_0, m_15..m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
%charge_gas
|
||||
|
||||
// stack: rounds, h_7..h_0, m_15..m_0, t_0, t_1, flag, blake2_f_contd, kexit_info
|
||||
%jump(blake2_f)
|
||||
blake2_f_contd:
|
||||
// stack: h_0', h_1', h_2', h_3', h_4', h_5', h_6', h_7', kexit_info
|
||||
// Store the result hash to the parent's return data using `mstore_unpacking`.
|
||||
|
||||
%mstore_parent_context_metadata(@CTX_METADATA_RETURNDATA_SIZE, 32)
|
||||
PUSH 0
|
||||
// stack: addr_0=0, h_0', h_1', h_2', h_3', h_4', h_5', h_6', h_7', kexit_info
|
||||
%mload_context_metadata(@CTX_METADATA_PARENT_CONTEXT)
|
||||
// stack: parent_ctx, addr_0=0, h_0', h_1', h_2', h_3', h_4', h_5', h_6', h_7', kexit_info
|
||||
|
||||
%rep 8
|
||||
// stack: parent_ctx, addr_i, h_i', ..., h_7', kexit_info
|
||||
%stack (ctx, addr, h_i) -> (ctx, @SEGMENT_RETURNDATA, addr, h_i, 4, addr, ctx)
|
||||
// stack: parent_ctx, @SEGMENT_RETURNDATA, addr_i, h_i', 4, addr_i, parent_ctx, h_(i+1)', ..., h_7', kexit_info
|
||||
%mstore_unpacking
|
||||
// stack: addr_i, parent_ctx, h_(i+1)', ..., h_7', kexit_info
|
||||
%add_const(4)
|
||||
// stack: addr_(i+1), parent_ctx, h_(i+1)', ..., h_7', kexit_info
|
||||
SWAP1
|
||||
// stack: parent_ctx, addr_(i+1), h_(i+1)', ..., h_7', kexit_info
|
||||
%endrep
|
||||
|
||||
// stack: kexit_info
|
||||
%jump(pop_and_return_success)
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
global precompile_bn_add:
|
||||
// stack: address, retdest, new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: address, retdest, new_ctx, (old stack)
|
||||
%pop2
|
||||
// stack: new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: new_ctx, (old stack)
|
||||
DUP1
|
||||
SET_CONTEXT
|
||||
// stack: (empty)
|
||||
@ -11,25 +11,30 @@ global precompile_bn_add:
|
||||
%charge_gas_const(@BN_ADD_GAS)
|
||||
|
||||
// Load x0, y0, x1, y1 from the call data using `mload_packing`.
|
||||
PUSH bn_add_return
|
||||
// stack: bn_add_return, kexit_info
|
||||
%stack () -> (@SEGMENT_CALLDATA, 96, 32)
|
||||
GET_CONTEXT
|
||||
%stack (ctx, kexit_info) -> (ctx, @SEGMENT_CALLDATA, 96, 32, bn_add_contd, kexit_info)
|
||||
%jump(mload_packing)
|
||||
bn_add_contd:
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 96, 32, bn_add_return, kexit_info
|
||||
%mload_packing
|
||||
// stack: y1, bn_add_return, kexit_info
|
||||
%stack () -> (@SEGMENT_CALLDATA, 64, 32)
|
||||
GET_CONTEXT
|
||||
%stack (ctx, y1, kexit_info) -> (ctx, @SEGMENT_CALLDATA, 64, 32, bn_add_contd2, y1, kexit_info)
|
||||
%jump(mload_packing)
|
||||
bn_add_contd2:
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 64, 32, y1, bn_add_return, kexit_info
|
||||
%mload_packing
|
||||
// stack: x1, y1, bn_add_return, kexit_info
|
||||
%stack () -> (@SEGMENT_CALLDATA, 32, 32)
|
||||
GET_CONTEXT
|
||||
%stack (ctx, x1, y1, kexit_info) -> (ctx, @SEGMENT_CALLDATA, 32, 32, bn_add_contd3, x1, y1, kexit_info)
|
||||
%jump(mload_packing)
|
||||
bn_add_contd3:
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 32, 32, x1, y1, bn_add_return, kexit_info
|
||||
%mload_packing
|
||||
// stack: y0, x1, y1, bn_add_return, kexit_info
|
||||
%stack () -> (@SEGMENT_CALLDATA, 0, 32)
|
||||
GET_CONTEXT
|
||||
%stack (ctx, y0, x1, y1, kexit_info) -> (ctx, @SEGMENT_CALLDATA, 0, 32, bn_add_contd4, y0, x1, y1, kexit_info)
|
||||
%jump(mload_packing)
|
||||
bn_add_contd4:
|
||||
%stack (x0, y0, x1, y1, kexit_info) -> (x0, y0, x1, y1, bn_add_contd5, kexit_info)
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 0, 32, y0, x1, y1, bn_add_return, kexit_info
|
||||
%mload_packing
|
||||
// stack: x0, y0, x1, y1, bn_add_return, kexit_info
|
||||
%jump(bn_add)
|
||||
bn_add_contd5:
|
||||
bn_add_return:
|
||||
// stack: x, y, kexit_info
|
||||
DUP2 %eq_const(@U256_MAX) // bn_add returns (U256_MAX, U256_MAX) on bad input.
|
||||
DUP2 %eq_const(@U256_MAX) // bn_add returns (U256_MAX, U256_MAX) on bad input.
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
global precompile_bn_mul:
|
||||
// stack: address, retdest, new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: address, retdest, new_ctx, (old stack)
|
||||
%pop2
|
||||
// stack: new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: new_ctx, (old stack)
|
||||
DUP1
|
||||
SET_CONTEXT
|
||||
// stack: (empty)
|
||||
@ -11,21 +11,25 @@ global precompile_bn_mul:
|
||||
%charge_gas_const(@BN_MUL_GAS)
|
||||
|
||||
// Load x, y, n from the call data using `mload_packing`.
|
||||
PUSH bn_mul_return
|
||||
// stack: bn_mul_return, kexit_info
|
||||
%stack () -> (@SEGMENT_CALLDATA, 64, 32)
|
||||
GET_CONTEXT
|
||||
%stack (ctx, kexit_info) -> (ctx, @SEGMENT_CALLDATA, 64, 32, bn_mul_contd, kexit_info)
|
||||
%jump(mload_packing)
|
||||
bn_mul_contd:
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 64, 32, bn_mul_return, kexit_info
|
||||
%mload_packing
|
||||
// stack: n, bn_mul_return, kexit_info
|
||||
%stack () -> (@SEGMENT_CALLDATA, 32, 32)
|
||||
GET_CONTEXT
|
||||
%stack (ctx, n, kexit_info) -> (ctx, @SEGMENT_CALLDATA, 32, 32, bn_mul_contd2, n, kexit_info)
|
||||
%jump(mload_packing)
|
||||
bn_mul_contd2:
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 32, 32, n, bn_mul_return, kexit_info
|
||||
%mload_packing
|
||||
// stack: y, n, bn_mul_return, kexit_info
|
||||
%stack () -> (@SEGMENT_CALLDATA, 0, 32)
|
||||
GET_CONTEXT
|
||||
%stack (ctx, y, n, kexit_info) -> (ctx, @SEGMENT_CALLDATA, 0, 32, bn_mul_contd3, y, n, kexit_info)
|
||||
%jump(mload_packing)
|
||||
bn_mul_contd3:
|
||||
%stack (x, y, n, kexit_info) -> (x, y, n, bn_mul_contd4, kexit_info)
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 0, 32, y, n, bn_mul_return, kexit_info
|
||||
%mload_packing
|
||||
// stack: x, y, n, bn_mul_return, kexit_info
|
||||
%jump(bn_mul)
|
||||
bn_mul_contd4:
|
||||
bn_mul_return:
|
||||
// stack: Px, Py, kexit_info
|
||||
DUP2 %eq_const(@U256_MAX) // bn_mul returns (U256_MAX, U256_MAX) on bad input.
|
||||
DUP2 %eq_const(@U256_MAX) // bn_mul returns (U256_MAX, U256_MAX) on bad input.
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
global precompile_ecrec:
|
||||
// stack: address, retdest, new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: address, retdest, new_ctx, (old stack)
|
||||
%pop2
|
||||
// stack: new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: new_ctx, (old stack)
|
||||
DUP1
|
||||
SET_CONTEXT
|
||||
// stack: (empty)
|
||||
@ -11,25 +11,30 @@ global precompile_ecrec:
|
||||
%charge_gas_const(@ECREC_GAS)
|
||||
|
||||
// Load hash, v, r, s from the call data using `mload_packing`.
|
||||
PUSH ecrec_return
|
||||
// stack: ecrec_return, kexit_info
|
||||
%stack () -> (@SEGMENT_CALLDATA, 96, 32)
|
||||
GET_CONTEXT
|
||||
%stack (ctx, kexit_info) -> (ctx, @SEGMENT_CALLDATA, 96, 32, ecrec_contd, kexit_info)
|
||||
%jump(mload_packing)
|
||||
ecrec_contd:
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 96, 32, ecrec_return, kexit_info
|
||||
%mload_packing
|
||||
// stack: s, ecrec_return, kexit_info
|
||||
%stack () -> (@SEGMENT_CALLDATA, 64, 32)
|
||||
GET_CONTEXT
|
||||
%stack (ctx, s, kexit_info) -> (ctx, @SEGMENT_CALLDATA, 64, 32, ecrec_contd2, s, kexit_info)
|
||||
%jump(mload_packing)
|
||||
ecrec_contd2:
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 64, 32, s, ecrec_return, kexit_info
|
||||
%mload_packing
|
||||
// stack: r, s, ecrec_return, kexit_info
|
||||
%stack () -> (@SEGMENT_CALLDATA, 32, 32)
|
||||
GET_CONTEXT
|
||||
%stack (ctx, r, s, kexit_info) -> (ctx, @SEGMENT_CALLDATA, 32, 32, ecrec_contd3, r, s, kexit_info)
|
||||
%jump(mload_packing)
|
||||
ecrec_contd3:
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 32, 32, r, s, ecrec_return, kexit_info
|
||||
%mload_packing
|
||||
// stack: v, r, s, ecrec_return, kexit_info
|
||||
%stack () -> (@SEGMENT_CALLDATA, 0, 32)
|
||||
GET_CONTEXT
|
||||
%stack (ctx, v, r, s, kexit_info) -> (ctx, @SEGMENT_CALLDATA, 0, 32, ecrec_contd4, v, r, s, kexit_info)
|
||||
%jump(mload_packing)
|
||||
ecrec_contd4:
|
||||
%stack (hash, v, r, s, kexit_info) -> (hash, v, r, s, ecrec_contd5, kexit_info)
|
||||
// stack: ctx, @SEGMENT_CALLDATA, 0, 32, v, r, s, ecrec_return, kexit_info
|
||||
%mload_packing
|
||||
// stack: hash, v, r, s, ecrec_return, kexit_info
|
||||
%jump(ecrecover)
|
||||
ecrec_contd5:
|
||||
ecrec_return:
|
||||
// stack: address, kexit_info
|
||||
DUP1 %eq_const(@U256_MAX) %jumpi(ecrec_bad_input) // ecrecover returns U256_MAX on bad input.
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
global precompile_id:
|
||||
// stack: address, retdest, new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: address, retdest, new_ctx, (old stack)
|
||||
%pop2
|
||||
// stack: new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: new_ctx, (old stack)
|
||||
DUP1
|
||||
SET_CONTEXT
|
||||
// stack: (empty)
|
||||
|
||||
@ -1,16 +1,16 @@
|
||||
%macro handle_precompiles
|
||||
// stack: address, new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: address, new_ctx, (old stack)
|
||||
PUSH %%after
|
||||
SWAP1
|
||||
// stack: address, %%after, new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: address, %%after, new_ctx, (old stack)
|
||||
%jump(handle_precompiles)
|
||||
%%after:
|
||||
// stack: new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: new_ctx, (old stack)
|
||||
%pop4
|
||||
%endmacro
|
||||
|
||||
global handle_precompiles:
|
||||
// stack: address, retdest, new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: address, retdest, new_ctx, (old stack)
|
||||
DUP1 %eq_const(@ECREC) %jumpi(precompile_ecrec)
|
||||
DUP1 %eq_const(@SHA256) %jumpi(precompile_sha256)
|
||||
DUP1 %eq_const(@RIP160) %jumpi(precompile_rip160)
|
||||
@ -31,6 +31,36 @@ global pop_and_return_success:
|
||||
PUSH 1 // success
|
||||
%jump(terminate_common)
|
||||
|
||||
%macro handle_precompiles_from_eoa
|
||||
// stack: retdest
|
||||
%mload_txn_field(@TXN_FIELD_TO)
|
||||
// stack: addr, retdest
|
||||
DUP1 %is_precompile
|
||||
%jumpi(handle_precompiles_from_eoa)
|
||||
// stack: addr, retdest
|
||||
POP
|
||||
%endmacro
|
||||
|
||||
global handle_precompiles_from_eoa:
|
||||
// stack: addr, retdest
|
||||
%create_context
|
||||
// stack: new_ctx, addr, retdest
|
||||
%set_new_ctx_parent_pc(process_message_txn_after_call)
|
||||
%non_intrinisic_gas %set_new_ctx_gas_limit
|
||||
// stack: new_ctx, addr, retdest
|
||||
|
||||
// Set calldatasize and copy txn data to calldata.
|
||||
%mload_txn_field(@TXN_FIELD_DATA_LEN)
|
||||
%stack (calldata_size, new_ctx) -> (calldata_size, new_ctx, calldata_size)
|
||||
%set_new_ctx_calldata_size
|
||||
%stack (new_ctx, calldata_size) -> (new_ctx, @SEGMENT_CALLDATA, 0, 0, @SEGMENT_TXN_DATA, 0, calldata_size, handle_precompiles_from_eoa_finish, new_ctx)
|
||||
%jump(memcpy)
|
||||
|
||||
handle_precompiles_from_eoa_finish:
|
||||
%stack (new_ctx, addr, retdest) -> (addr, new_ctx, retdest)
|
||||
%handle_precompiles
|
||||
PANIC // We already checked that a precompile is called, so this should be unreachable.
|
||||
|
||||
%macro zero_out_kernel_general
|
||||
PUSH 0 PUSH 0 %mstore_kernel_general
|
||||
PUSH 0 PUSH 1 %mstore_kernel_general
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
global precompile_rip160:
|
||||
// stack: address, retdest, new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: address, retdest, new_ctx, (old stack)
|
||||
%pop2
|
||||
// stack: new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: new_ctx, (old stack)
|
||||
DUP1
|
||||
SET_CONTEXT
|
||||
// stack: (empty)
|
||||
@ -20,13 +20,29 @@ global precompile_rip160:
|
||||
// Copy the call data to the kernel general segment (ripemd expects it there) and call ripemd.
|
||||
%calldatasize
|
||||
GET_CONTEXT
|
||||
%stack (ctx, size) ->
|
||||
(
|
||||
0, @SEGMENT_KERNEL_GENERAL, 200, // DST
|
||||
ctx, @SEGMENT_CALLDATA, 0, // SRC
|
||||
size, ripemd, // count, retdest
|
||||
200, size, rip160_contd // ripemd input: virt, num_bytes, retdest
|
||||
)
|
||||
|
||||
// The next block of code is equivalent to the following %stack macro call
|
||||
// (unfortunately the macro call takes too long to expand dynamically).
|
||||
//
|
||||
// %stack (ctx, size) ->
|
||||
// (
|
||||
// 0, @SEGMENT_KERNEL_GENERAL, 200, // DST
|
||||
// ctx, @SEGMENT_CALLDATA, 0, // SRC
|
||||
// size, ripemd, // count, retdest
|
||||
// 200, size, rip160_contd // ripemd input: virt, num_bytes, retdest
|
||||
// )
|
||||
PUSH 200
|
||||
PUSH ripemd
|
||||
DUP4
|
||||
PUSH 0
|
||||
PUSH @SEGMENT_CALLDATA
|
||||
PUSH rip160_contd
|
||||
SWAP7
|
||||
SWAP6
|
||||
PUSH 200
|
||||
PUSH @SEGMENT_KERNEL_GENERAL
|
||||
PUSH 0
|
||||
|
||||
%jump(memcpy)
|
||||
|
||||
rip160_contd:
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
global precompile_sha256:
|
||||
// stack: address, retdest, new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: address, retdest, new_ctx, (old stack)
|
||||
%pop2
|
||||
// stack: new_ctx, kexit_info, ret_offset, ret_size
|
||||
// stack: new_ctx, (old stack)
|
||||
DUP1
|
||||
SET_CONTEXT
|
||||
// stack: (empty)
|
||||
@ -23,13 +23,31 @@ global precompile_sha256:
|
||||
// Copy the call data to the kernel general segment (sha2 expects it there) and call sha2.
|
||||
%calldatasize
|
||||
GET_CONTEXT
|
||||
%stack (ctx, size) ->
|
||||
(
|
||||
0, @SEGMENT_KERNEL_GENERAL, 1, // DST
|
||||
ctx, @SEGMENT_CALLDATA, 0, // SRC
|
||||
size, sha2, // count, retdest
|
||||
0, size, sha256_contd // sha2 input: virt, num_bytes, retdest
|
||||
)
|
||||
// stack: ctx, size
|
||||
|
||||
// The next block of code is equivalent to the following %stack macro call
|
||||
// (unfortunately the macro call takes too long to expand dynamically).
|
||||
//
|
||||
// %stack (ctx, size) ->
|
||||
// (
|
||||
// 0, @SEGMENT_KERNEL_GENERAL, 1, // DST
|
||||
// ctx, @SEGMENT_CALLDATA, 0, // SRC
|
||||
// size, sha2, // count, retdest
|
||||
// 0, size, sha256_contd // sha2 input: virt, num_bytes, retdest
|
||||
// )
|
||||
//
|
||||
PUSH 0
|
||||
PUSH sha2
|
||||
DUP4
|
||||
PUSH 0
|
||||
PUSH @SEGMENT_CALLDATA
|
||||
PUSH sha256_contd
|
||||
SWAP7
|
||||
SWAP6
|
||||
PUSH 1
|
||||
PUSH @SEGMENT_KERNEL_GENERAL
|
||||
PUSH 0
|
||||
|
||||
%jump(memcpy)
|
||||
|
||||
sha256_contd:
|
||||
|
||||
@ -1,3 +1,118 @@
|
||||
global precompile_snarkv:
|
||||
// TODO
|
||||
PANIC
|
||||
// stack: address, retdest, new_ctx, (old stack)
|
||||
%pop2
|
||||
// stack: new_ctx, (old stack)
|
||||
DUP1
|
||||
SET_CONTEXT
|
||||
// stack: (empty)
|
||||
PUSH 0x100000000 // = 2^32 (is_kernel = true)
|
||||
// stack: kexit_info
|
||||
|
||||
PUSH 192 %calldatasize DUP2 DUP2
|
||||
// stack: calldata_size, 192, calldata_size, 192, kexit_info
|
||||
MOD %jumpi(fault_exception) // calldata_size should be a multiple of 192
|
||||
DIV
|
||||
// stack: k, kexit_info
|
||||
DUP1 %mul_const(@SNARKV_DYNAMIC_GAS) %add_const(@SNARKV_STATIC_GAS)
|
||||
%stack (gas, k, kexit_info) -> (gas, kexit_info, k)
|
||||
%charge_gas
|
||||
SWAP1
|
||||
// stack: k, kexit_info
|
||||
PUSH 0
|
||||
loading_loop:
|
||||
// stack: i, k, kexit_info
|
||||
DUP2 DUP2 EQ %jumpi(loading_done)
|
||||
// stack: i, k, kexit_info
|
||||
DUP1 %mul_const(192)
|
||||
// stack: px, i, k, kexit_info
|
||||
GET_CONTEXT
|
||||
%stack (ctx, px) -> (ctx, @SEGMENT_CALLDATA, px, 32, loading_loop_contd, px)
|
||||
%jump(mload_packing)
|
||||
loading_loop_contd:
|
||||
// stack: x, px, i, k, kexit_info
|
||||
SWAP1 %add_const(32)
|
||||
GET_CONTEXT
|
||||
%stack (ctx, py) -> (ctx, @SEGMENT_CALLDATA, py, 32, loading_loop_contd2, py)
|
||||
%jump(mload_packing)
|
||||
loading_loop_contd2:
|
||||
// stack: y, py, x, i, k, kexit_info
|
||||
SWAP1 %add_const(32)
|
||||
GET_CONTEXT
|
||||
%stack (ctx, px_im) -> (ctx, @SEGMENT_CALLDATA, px_im, 32, loading_loop_contd3, px_im)
|
||||
%jump(mload_packing)
|
||||
loading_loop_contd3:
|
||||
// stack: x_im, px_im, y, x, i, k, kexit_info
|
||||
SWAP1 %add_const(32)
|
||||
// stack: px_re, x_im, y, x, i, k, kexit_info
|
||||
GET_CONTEXT
|
||||
%stack (ctx, px_re) -> (ctx, @SEGMENT_CALLDATA, px_re, 32, loading_loop_contd4, px_re)
|
||||
%jump(mload_packing)
|
||||
loading_loop_contd4:
|
||||
// stack: x_re, px_re, x_im, y, x, i, k, kexit_info
|
||||
SWAP1 %add_const(32)
|
||||
// stack: py_im, x_re, x_im, y, x, i, k, kexit_info
|
||||
GET_CONTEXT
|
||||
%stack (ctx, py_im) -> (ctx, @SEGMENT_CALLDATA, py_im, 32, loading_loop_contd5, py_im)
|
||||
%jump(mload_packing)
|
||||
loading_loop_contd5:
|
||||
// stack: y_im, py_im, x_re, x_im, y, x, i, k, kexit_info
|
||||
SWAP1 %add_const(32)
|
||||
// stack: py_re, y_im, x_re, x_im, y, x, i, k, kexit_info
|
||||
GET_CONTEXT
|
||||
%stack (ctx, py_re) -> (ctx, @SEGMENT_CALLDATA, py_re, 32, loading_loop_contd6)
|
||||
%jump(mload_packing)
|
||||
loading_loop_contd6:
|
||||
// stack: y_re, y_im, x_re, x_im, y, x, i, k, kexit_info
|
||||
SWAP1 // the EVM serializes the imaginary part first
|
||||
// stack: y_im, y_re, x_re, x_im, y, x, i, k, kexit_info
|
||||
DUP7
|
||||
// stack: i, y_im, y_re, x_re, x_im, y, x, i, k, kexit_info
|
||||
%mul_const(6) %add_const(@SNARKV_INP)
|
||||
%add_const(5)
|
||||
%mstore_kernel_bn254_pairing
|
||||
// stack: y_re, x_re, x_im, y, x, i, k, kexit_info
|
||||
DUP6
|
||||
// stack: i, y_re, x_re, x_im, y, x, i, k, kexit_info
|
||||
%mul_const(6) %add_const(@SNARKV_INP)
|
||||
%add_const(4)
|
||||
%mstore_kernel_bn254_pairing
|
||||
SWAP1 // the EVM serializes the imaginary part first
|
||||
// stack: x_im, x_re, y, x, i, k, kexit_info
|
||||
DUP5
|
||||
// stack: i, x_im, x_re, y, x, i, k, kexit_info
|
||||
%mul_const(6) %add_const(@SNARKV_INP)
|
||||
%add_const(3)
|
||||
%mstore_kernel_bn254_pairing
|
||||
// stack: x_re, y, x, i, k, kexit_info
|
||||
DUP4
|
||||
// stack: i, x_re, y, x, i, k, kexit_info
|
||||
%mul_const(6) %add_const(@SNARKV_INP)
|
||||
%add_const(2)
|
||||
%mstore_kernel_bn254_pairing
|
||||
// stack: y, x, i, k, kexit_info
|
||||
DUP3
|
||||
// stack: i, y, x, i, k, kexit_info
|
||||
%mul_const(6) %add_const(@SNARKV_INP)
|
||||
%add_const(1)
|
||||
%mstore_kernel_bn254_pairing
|
||||
// stack: x, i, k, kexit_info
|
||||
DUP2
|
||||
// stack: i, x, i, k, kexit_info
|
||||
%mul_const(6) %add_const(@SNARKV_INP)
|
||||
%mstore_kernel_bn254_pairing
|
||||
// stack: i, k, kexit_info
|
||||
%increment
|
||||
%jump(loading_loop)
|
||||
|
||||
loading_done:
|
||||
%stack (i, k) -> (k, @SNARKV_INP, @SNARKV_OUT, got_result)
|
||||
%jump(bn254_pairing)
|
||||
got_result:
|
||||
// stack: result, kexit_info
|
||||
DUP1 %eq_const(@U256_MAX) %jumpi(fault_exception)
|
||||
// stack: result, kexit_info
|
||||
// Store the result bool (repr. by a U256) to the parent's return data using `mstore_unpacking`.
|
||||
%mstore_parent_context_metadata(@CTX_METADATA_RETURNDATA_SIZE, 32)
|
||||
%mload_context_metadata(@CTX_METADATA_PARENT_CONTEXT)
|
||||
%stack (parent_ctx, address) -> (parent_ctx, @SEGMENT_RETURNDATA, 0, address, 32, pop_and_return_success)
|
||||
%jump(mstore_unpacking)
|
||||
|
||||
@ -144,6 +144,33 @@ process_contract_creation_txn_after_code_loaded:
|
||||
global process_contract_creation_txn_after_constructor:
|
||||
// stack: success, leftover_gas, new_ctx, address, retdest
|
||||
POP // TODO: Success will go into the receipt when we support that.
|
||||
|
||||
// EIP-3541: Reject new contract code starting with the 0xEF byte
|
||||
PUSH 0 %mload_current(@SEGMENT_RETURNDATA) %eq_const(0xEF) %assert_zero // TODO: need to revert changes here.
|
||||
|
||||
// stack: leftover_gas, new_ctx, address, retdest
|
||||
%returndatasize // Size of the code.
|
||||
// stack: code_size, leftover_gas, new_ctx, address, retdest
|
||||
DUP1 %gt_const(@MAX_CODE_SIZE) %jumpi(panic) // TODO: need to revert changes here.
|
||||
// stack: code_size, leftover_gas, new_ctx, address, retdest
|
||||
%mul_const(@GAS_CODEDEPOSIT) SWAP1
|
||||
// stack: leftover_gas, codedeposit_cost, new_ctx, address, retdest
|
||||
DUP2 DUP2 LT %jumpi(panic) // TODO: need to revert changes here.
|
||||
// stack: leftover_gas, codedeposit_cost, new_ctx, address, retdest
|
||||
SUB
|
||||
|
||||
// Store the code hash of the new contract.
|
||||
// stack: leftover_gas, new_ctx, address, retdest
|
||||
GET_CONTEXT
|
||||
%returndatasize
|
||||
%stack (size, ctx) -> (ctx, @SEGMENT_RETURNDATA, 0, size) // context, segment, offset, len
|
||||
KECCAK_GENERAL
|
||||
// stack: codehash, leftover_gas, new_ctx, address, retdest
|
||||
%observe_new_contract
|
||||
DUP4
|
||||
// stack: address, codehash, leftover_gas, new_ctx, address, retdest
|
||||
%set_codehash
|
||||
|
||||
// stack: leftover_gas, new_ctx, address, retdest
|
||||
%pay_coinbase_and_refund_sender
|
||||
// TODO: Delete accounts in self-destruct list and empty touched addresses.
|
||||
@ -165,6 +192,13 @@ global process_message_txn:
|
||||
%jumpi(process_message_txn_insufficient_balance)
|
||||
// stack: retdest
|
||||
|
||||
%handle_precompiles_from_eoa
|
||||
|
||||
// If to's code is empty, return.
|
||||
%mload_txn_field(@TXN_FIELD_TO) %ext_code_empty
|
||||
// stack: code_empty, retdest
|
||||
%jumpi(process_message_txn_return)
|
||||
|
||||
// Add precompiles to accessed addresses.
|
||||
PUSH @ECREC %insert_accessed_addresses_no_return
|
||||
PUSH @SHA256 %insert_accessed_addresses_no_return
|
||||
@ -175,12 +209,6 @@ global process_message_txn:
|
||||
PUSH @BN_MUL %insert_accessed_addresses_no_return
|
||||
PUSH @SNARKV %insert_accessed_addresses_no_return
|
||||
PUSH @BLAKE2_F %insert_accessed_addresses_no_return
|
||||
// TODO: Handle precompiles.
|
||||
|
||||
// If to's code is empty, return.
|
||||
%mload_txn_field(@TXN_FIELD_TO) %ext_code_empty
|
||||
// stack: code_empty, retdest
|
||||
%jumpi(process_message_txn_return)
|
||||
|
||||
// Otherwise, load to's code and execute it in a new context.
|
||||
// stack: retdest
|
||||
|
||||
@ -28,11 +28,24 @@
|
||||
// stack: to == 0
|
||||
%endmacro
|
||||
|
||||
%macro is_precompile
|
||||
// stack: addr
|
||||
DUP1 %ge_const(@ECREC) SWAP1 %le_const(@BLAKE2_F)
|
||||
// stack: addr>=1, addr<=9
|
||||
MUL // Cheaper than AND
|
||||
%endmacro
|
||||
|
||||
// Returns 1 if the account is non-existent, 0 otherwise.
|
||||
%macro is_non_existent
|
||||
// stack: addr
|
||||
%mpt_read_state_trie
|
||||
ISZERO
|
||||
DUP1
|
||||
// stack: addr, addr
|
||||
%mpt_read_state_trie ISZERO
|
||||
SWAP1
|
||||
// stack: addr, zero_state_trie
|
||||
%is_precompile ISZERO
|
||||
// stack: not_precompile, zero_state_trie
|
||||
MUL // Cheaper than AND
|
||||
%endmacro
|
||||
|
||||
// Returns 1 if the account is empty, 0 otherwise.
|
||||
@ -65,5 +78,5 @@
|
||||
// stack: addr
|
||||
DUP1 %is_non_existent
|
||||
SWAP1 %is_empty
|
||||
ADD // OR
|
||||
OR
|
||||
%endmacro
|
||||
|
||||
@ -201,8 +201,8 @@ global bn_double:
|
||||
%jump(bn_add_equal_points)
|
||||
|
||||
// Check if (x,y) is a valid curve point.
|
||||
// Returns (range & curve) || is_identity
|
||||
// where
|
||||
// Returns (range & curve) || ident
|
||||
// where
|
||||
// range = (x < N) & (y < N)
|
||||
// curve = y^2 == (x^3 + 3)
|
||||
// ident = (x,y) == (0,0)
|
||||
|
||||
@ -0,0 +1,319 @@
|
||||
/// To make the Tate pairing an invariant, the final step is to exponentiate by
|
||||
/// (p^12 - 1)/N = (p^6 - 1) * (p^2 + 1) * (p^4 - p^2 + 1)/N
|
||||
/// and thus we can exponentiate by each factor sequentially.
|
||||
///
|
||||
/// def bn254_final_exponent(y: Fp12):
|
||||
/// y = first_exp(y)
|
||||
/// y = second_exp(y)
|
||||
/// return final_exp(y)
|
||||
|
||||
global bn254_final_exponent:
|
||||
|
||||
/// first, exponentiate by (p^6 - 1) via
|
||||
/// def first_exp(y):
|
||||
/// return y.frob(6) / y
|
||||
// stack: k, inp, out, retdest {out: y}
|
||||
%stack (k, inp, out) -> (out, 0, first_exp, out)
|
||||
// stack: out, 0, first_exp, out, retdest {out: y}
|
||||
%jump(inv_fp254_12)
|
||||
first_exp:
|
||||
// stack: out, retdest {out: y , 0: y^-1}
|
||||
%frob_fp254_12_6
|
||||
// stack: out, retdest {out: y_6, 0: y^-1}
|
||||
%stack (out) -> (out, 0, out, second_exp, out)
|
||||
// stack: out, 0, out, second_exp, out, retdest {out: y_6, 0: y^-1}
|
||||
%jump(mul_fp254_12)
|
||||
|
||||
/// second, exponentiate by (p^2 + 1) via
|
||||
/// def second_exp(y):
|
||||
/// return y.frob(2) * y
|
||||
second_exp:
|
||||
// stack: out, retdest {out: y}
|
||||
%stack (out) -> (out, 0, out, out, final_exp, out)
|
||||
// stack: out, 0, out, out, final_exp, out, retdest {out: y}
|
||||
%frob_fp254_12_2_
|
||||
// stack: 0, out, out, final_exp, out, retdest {out: y, 0: y_2}
|
||||
%jump(mul_fp254_12)
|
||||
|
||||
/// Finally, we must exponentiate by (p^4 - p^2 + 1)/N
|
||||
/// To do so efficiently, we can express this power as
|
||||
/// (p^4 - p^2 + 1)/N = p^3 + (a2)p^2 - (a1)p - a0
|
||||
/// and simultaneously compute y^a4, y^a2, y^a0 where
|
||||
/// a1 = a4 + 2a2 - a0
|
||||
/// We first initialize these powers as 1 and then use
|
||||
/// binary algorithms for exponentiation.
|
||||
///
|
||||
/// def final_exp(y):
|
||||
/// y4, y2, y0 = 1, 1, 1
|
||||
/// power_loop_4()
|
||||
/// power_loop_2()
|
||||
/// power_loop_0()
|
||||
/// custom_powers()
|
||||
/// final_power()
|
||||
|
||||
final_exp:
|
||||
// stack: val, retdest
|
||||
%stack (val) -> (val, 0, val)
|
||||
// stack: val, 0, val, retdest
|
||||
%move_fp254_12
|
||||
// stack: 0, val, retdest {0: sqr}
|
||||
%stack () -> (1, 1, 1)
|
||||
// stack: 1, 1, 1, 0, val, retdest
|
||||
%mstore_kernel_bn254_pairing(12)
|
||||
%mstore_kernel_bn254_pairing(24)
|
||||
%mstore_kernel_bn254_pairing(36)
|
||||
// stack: 0, val, retdest {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%stack () -> (64, 62, 65)
|
||||
// stack: 64, 62, 65, 0, val, retdest {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jump(power_loop_4)
|
||||
|
||||
/// After computing the powers
|
||||
/// y^a4, y^a2, y^a0
|
||||
/// we would like to transform them to
|
||||
/// y^a2, y^-a1, y^-a0
|
||||
///
|
||||
/// def custom_powers()
|
||||
/// y0 = y0^{-1}
|
||||
/// y1 = y4 * y2^2 * y0
|
||||
/// return y2, y1, y0
|
||||
///
|
||||
/// And finally, upon doing so, compute the final power
|
||||
/// y^(p^3) * (y^a2)^(p^2) * (y^-a1)^p * (y^-a0)
|
||||
///
|
||||
/// def final_power()
|
||||
/// y = y.frob(3)
|
||||
/// y2 = y2.frob(2)
|
||||
/// y1 = y1.frob(1)
|
||||
/// return y * y2 * y1 * y0
|
||||
|
||||
custom_powers:
|
||||
// stack: val, retdest {12: y0, 24: y2, 36: y4}
|
||||
%stack () -> (12, 48, make_term_1)
|
||||
// stack: 12, 48, make_term_1, val, retdest {12: y0, 24: y2, 36: y4}
|
||||
%jump(inv_fp254_12)
|
||||
make_term_1:
|
||||
// stack: val, retdest {24: y2, 36: y4, 48: y0^-1}
|
||||
%stack () -> (24, 36, 36, make_term_2)
|
||||
// stack: 24, 36, 36, make_term_2, val, retdest {24: y2, 36: y4, 48: y0^-1}
|
||||
%jump(mul_fp254_12)
|
||||
make_term_2:
|
||||
// stack: val, retdest {24: y2, 36: y4 * y2, 48: y0^-1}
|
||||
%stack () -> (24, 36, 36, make_term_3)
|
||||
// stack: 24, 36, 36, make_term_3, val, retdest {24: y2, 36: y4 * y2, 48: y0^-1}
|
||||
%jump(mul_fp254_12)
|
||||
make_term_3:
|
||||
// stack: val, retdest {24: y2, 36: y4 * y2^2, 48: y0^-1}
|
||||
%stack () -> (48, 36, 36, final_power)
|
||||
// stack: 48, 36, 36, final_power, val, retdest {24: y2, 36: y4 * y2^2, 48: y0^-1}
|
||||
%jump(mul_fp254_12)
|
||||
final_power:
|
||||
// stack: val, retdest {val: y , 24: y^a2 , 36: y^a1 , 48: y^a0}
|
||||
%frob_fp254_12_3
|
||||
// stack: val, retdest {val: y_3, 24: y^a2 , 36: y^a1 , 48: y^a0}
|
||||
%stack () -> (24, 24)
|
||||
%frob_fp254_12_2_
|
||||
POP
|
||||
// stack: val, retdest {val: y_3, 24: (y^a2)_2, 36: y^a1 , 48: y^a0}
|
||||
PUSH 36
|
||||
%frob_fp254_12_1
|
||||
POP
|
||||
// stack: val, retdest {val: y_3, 24: (y^a2)_2, 36: (y^a1)_1, 48: y^a0}
|
||||
%stack (val) -> (24, val, val, penult_mul, val)
|
||||
// stack: 24, val, val, penult_mul, val, retdest {val: y_3, 24: (y^a2)_2, 36: (y^a1)_1, 48: y^a0}
|
||||
%jump(mul_fp254_12)
|
||||
penult_mul:
|
||||
// stack: val, retdest {val: y_3 * (y^a2)_2, 36: (y^a1)_1, 48: y^a0}
|
||||
%stack (val) -> (36, val, val, final_mul, val)
|
||||
// stack: 36, val, val, final_mul, val, retdest {val: y_3 * (y^a2)_2, 36: (y^a1)_1, 48: y^a0}
|
||||
%jump(mul_fp254_12)
|
||||
final_mul:
|
||||
// stack: val, retdest {val: y_3 * (y^a2)_2 * (y^a1)_1, 48: y^a0}
|
||||
%stack (val) -> (48, val, val)
|
||||
// stack: 48, val, val, retdest {val: y_3 * (y^a2)_2 * (y^a1)_1, 48: y^a0}
|
||||
%jump(mul_fp254_12)
|
||||
|
||||
|
||||
/// def power_loop_4():
|
||||
/// for i in range(64):
|
||||
/// abc = load(i, power_data_4)
|
||||
/// if a:
|
||||
/// y4 *= acc
|
||||
/// if b:
|
||||
/// y2 *= acc
|
||||
/// if c:
|
||||
/// y0 *= acc
|
||||
/// acc = square_fp254_12(acc)
|
||||
/// y4 *= acc
|
||||
///
|
||||
/// def power_loop_2():
|
||||
/// for i in range(62):
|
||||
/// ab = load(i, power_data_2)
|
||||
/// if a:
|
||||
/// y2 *= acc
|
||||
/// if b:
|
||||
/// y0 *= acc
|
||||
/// acc = square_fp254_12(acc)
|
||||
/// y2 *= acc
|
||||
///
|
||||
/// def power_loop_0():
|
||||
/// for i in range(65):
|
||||
/// a = load(i, power_data_0)
|
||||
/// if a:
|
||||
/// y0 *= acc
|
||||
/// acc = square_fp254_12(acc)
|
||||
/// y0 *= acc
|
||||
|
||||
power_loop_4:
|
||||
// stack: i , j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP1
|
||||
ISZERO
|
||||
// stack: break?, i , j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jumpi(power_loop_4_end)
|
||||
// stack: i , j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%sub_const(1)
|
||||
// stack: i-1, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP1
|
||||
%mload_kernel_code(power_data_4)
|
||||
// stack: abc, i-1, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP1
|
||||
%lt_const(100)
|
||||
// stack: skip?, abc, i-1, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jumpi(power_loop_4_b)
|
||||
// stack: abc, i-1, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%sub_const(100)
|
||||
// stack: bc, i-1, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%stack () -> (36, 36, power_loop_4_b)
|
||||
// stack: 36, 36, power_loop_4_b, bc, i-1, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP8
|
||||
// stack: sqr, 36, 36, power_loop_4_b, bc, i-1, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jump(mul_fp254_12)
|
||||
power_loop_4_b:
|
||||
// stack: bc, i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP1
|
||||
%lt_const(10)
|
||||
// stack: skip?, bc, i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jumpi(power_loop_4_c)
|
||||
// stack: bc, i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%sub_const(10)
|
||||
// stack: c, i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%stack () -> (24, 24, power_loop_4_c)
|
||||
// stack: 24, 24, power_loop_4_c, c, i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP8
|
||||
// stack: sqr, 24, 24, power_loop_4_c, c, i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jump(mul_fp254_12)
|
||||
power_loop_4_c:
|
||||
// stack: c, i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
ISZERO
|
||||
// stack: skip?, i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jumpi(power_loop_4_sq)
|
||||
// stack: i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%stack () -> (12, 12, power_loop_4_sq)
|
||||
// stack: 12, 12, power_loop_4_sq, i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP7
|
||||
// stack: sqr, 12, 12, power_loop_4_sq, i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jump(mul_fp254_12)
|
||||
power_loop_4_sq:
|
||||
// stack: i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
PUSH power_loop_4
|
||||
// stack: power_loop_4, i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP5
|
||||
DUP1
|
||||
// stack: sqr, sqr, power_loop_4, i, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jump(square_fp254_12)
|
||||
power_loop_4_end:
|
||||
// stack: 0, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
POP
|
||||
// stack: j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%stack () -> (36, 36, power_loop_2)
|
||||
// stack: 36, 36, power_loop_2, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP6
|
||||
// stack: sqr, 36, 36, power_loop_2, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jump(mul_fp254_12)
|
||||
|
||||
power_loop_2:
|
||||
// stack: j , k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP1
|
||||
ISZERO
|
||||
// stack: break?, j , k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jumpi(power_loop_2_end)
|
||||
// stack: j , k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%sub_const(1)
|
||||
// stack: j-1, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP1
|
||||
%mload_kernel_code(power_data_2)
|
||||
// stack: ab, j-1, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP1
|
||||
%lt_const(10)
|
||||
// stack: skip?, ab, j-1, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jumpi(power_loop_2_b)
|
||||
// stack: ab, j-1, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%sub_const(10)
|
||||
// stack: b, j-1, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%stack () -> (24, 24, power_loop_2_b)
|
||||
// stack: 24, 24, power_loop_2_b, b, j-1, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP7
|
||||
// stack: sqr, 24, 24, power_loop_2_b, b, j-1, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jump(mul_fp254_12)
|
||||
power_loop_2_b:
|
||||
// stack: b, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
ISZERO
|
||||
// stack: skip?, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jumpi(power_loop_2_sq)
|
||||
// stack: j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%stack () -> (12, 12, power_loop_2_sq)
|
||||
// stack: 12, 12, power_loop_2_sq, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP6
|
||||
// stack: sqr, 12, 12, power_loop_2_sq, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jump(mul_fp254_12)
|
||||
power_loop_2_sq:
|
||||
// stack: j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
PUSH power_loop_2
|
||||
// stack: power_loop_2, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP4
|
||||
DUP1
|
||||
// stack: sqr, sqr, power_loop_2, j, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jump(square_fp254_12)
|
||||
power_loop_2_end:
|
||||
// stack: 0, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
POP
|
||||
// stack: k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%stack () -> (24, 24, power_loop_0)
|
||||
// stack: 24, 24, power_loop_0, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP5
|
||||
// stack: sqr, 24, 24, power_loop_0, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jump(mul_fp254_12)
|
||||
|
||||
power_loop_0:
|
||||
// stack: k , sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP1
|
||||
ISZERO
|
||||
// stack: break?, k , sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jumpi(power_loop_0_end)
|
||||
// stack: k , sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%sub_const(1)
|
||||
// stack: k-1, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP1
|
||||
%mload_kernel_code(power_data_0)
|
||||
// stack: a, k-1, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
ISZERO
|
||||
// stack: skip?, k-1, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jumpi(power_loop_0_sq)
|
||||
// stack: k-1, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%stack () -> (12, 12, power_loop_0_sq)
|
||||
// stack: 12, 12, power_loop_0_sq, k-1, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP5
|
||||
// stack: sqr, 12, 12, power_loop_0_sq, k-1, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jump(mul_fp254_12)
|
||||
power_loop_0_sq:
|
||||
// stack: k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
PUSH power_loop_0
|
||||
// stack: power_loop_0, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
DUP3
|
||||
DUP1
|
||||
// stack: sqr, sqr, power_loop_0, k, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jump(square_fp254_12)
|
||||
power_loop_0_end:
|
||||
// stack: 0, sqr {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%stack (i, sqr) -> (12, sqr, 12, custom_powers)
|
||||
// stack: 12, sqr, 12, custom_powers {0: sqr, 12: y0, 24: y2, 36: y4}
|
||||
%jump(mul_fp254_12)
|
||||
@ -1,319 +0,0 @@
|
||||
/// To make the Tate pairing an invariant, the final step is to exponentiate by
|
||||
/// (p^12 - 1)/N = (p^6 - 1) * (p^2 + 1) * (p^4 - p^2 + 1)/N
|
||||
/// and thus we can exponentiate by each factor sequentially.
|
||||
///
|
||||
/// def bn254_invariant_exponent(y: Fp12):
|
||||
/// y = first_exp(y)
|
||||
/// y = second_exp(y)
|
||||
/// return final_exp(y)
|
||||
|
||||
global bn254_invariant_exponent:
|
||||
|
||||
/// first, exponentiate by (p^6 - 1) via
|
||||
/// def first_exp(y):
|
||||
/// return y.frob(6) / y
|
||||
// stack: out, retdest {out: y}
|
||||
%stack (out) -> (out, 0, first_exp, out)
|
||||
// stack: out, 0, first_exp, out, retdest {out: y}
|
||||
%jump(inv_fp254_12)
|
||||
first_exp:
|
||||
// stack: out, retdest {out: y , 0: y^-1}
|
||||
%frob_fp254_12_6
|
||||
// stack: out, retdest {out: y_6, 0: y^-1}
|
||||
%stack (out) -> (out, 0, out, second_exp, out)
|
||||
// stack: out, 0, out, second_exp, out, retdest {out: y_6, 0: y^-1}
|
||||
%jump(mul_fp254_12)
|
||||
|
||||
/// second, exponentiate by (p^2 + 1) via
|
||||
/// def second_exp(y):
|
||||
/// return y.frob(2) * y
|
||||
second_exp:
|
||||
// stack: out, retdest {out: y}
|
||||
%stack (out) -> (out, 0, out, out, final_exp, out)
|
||||
// stack: out, 0, out, out, final_exp, out, retdest {out: y}
|
||||
%frob_fp254_12_2_
|
||||
// stack: 0, out, out, final_exp, out, retdest {out: y, 0: y_2}
|
||||
%jump(mul_fp254_12)
|
||||
|
||||
/// Finally, we must exponentiate by (p^4 - p^2 + 1)/N
|
||||
/// To do so efficiently, we can express this power as
|
||||
/// (p^4 - p^2 + 1)/N = p^3 + (a2)p^2 - (a1)p - a0
|
||||
/// and simultaneously compute y^a4, y^a2, y^a0 where
|
||||
/// a1 = a4 + 2a2 - a0
|
||||
/// We first initialize these powers as 1 and then use
|
||||
/// binary algorithms for exponentiation.
|
||||
///
|
||||
/// def final_exp(y):
|
||||
/// y4, y2, y0 = 1, 1, 1
|
||||
/// power_loop_4()
|
||||
/// power_loop_2()
|
||||
/// power_loop_0()
|
||||
/// custom_powers()
|
||||
/// final_power()
|
||||
|
||||
final_exp:
|
||||
// stack: val, retdest
|
||||
%stack (val) -> (val, 12, val)
|
||||
// stack: val, 12, val, retdest
|
||||
%move_fp254_12
|
||||
// stack: 12, val, retdest {12: sqr}
|
||||
%stack () -> (1, 1, 1)
|
||||
// stack: 1, 1, 1, 12, val, retdest
|
||||
%mstore_kernel_bn254_pairing(24)
|
||||
%mstore_kernel_bn254_pairing(36)
|
||||
%mstore_kernel_bn254_pairing(48)
|
||||
// stack: 12, val, retdest {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%stack () -> (64, 62, 65)
|
||||
// stack: 64, 62, 65, 12, val, retdest {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jump(power_loop_4)
|
||||
|
||||
/// After computing the powers
|
||||
/// y^a4, y^a2, y^a0
|
||||
/// we would like to transform them to
|
||||
/// y^a2, y^-a1, y^-a0
|
||||
///
|
||||
/// def custom_powers()
|
||||
/// y0 = y0^{-1}
|
||||
/// y1 = y4 * y2^2 * y0
|
||||
/// return y2, y1, y0
|
||||
///
|
||||
/// And finally, upon doing so, compute the final power
|
||||
/// y^(p^3) * (y^a2)^(p^2) * (y^-a1)^p * (y^-a0)
|
||||
///
|
||||
/// def final_power()
|
||||
/// y = y.frob(3)
|
||||
/// y2 = y2.frob(2)
|
||||
/// y1 = y1.frob(1)
|
||||
/// return y * y2 * y1 * y0
|
||||
|
||||
custom_powers:
|
||||
// stack: val, retdest {24: y0, 36: y2, 48: y4}
|
||||
%stack () -> (24, 60, make_term_1)
|
||||
// stack: 24, 60, make_term_1, val, retdest {24: y0, 36: y2, 48: y4}
|
||||
%jump(inv_fp254_12)
|
||||
make_term_1:
|
||||
// stack: val, retdest {36: y2, 48: y4, 60: y0^-1}
|
||||
%stack () -> (36, 48, 48, make_term_2)
|
||||
// stack: 36, 48, 48, make_term_2, val, retdest {36: y2, 48: y4, 60: y0^-1}
|
||||
%jump(mul_fp254_12)
|
||||
make_term_2:
|
||||
// stack: val, retdest {36: y2, 48: y4 * y2, 60: y0^-1}
|
||||
%stack () -> (36, 48, 48, make_term_3)
|
||||
// stack: 36, 48, 48, make_term_3, val, retdest {36: y2, 48: y4 * y2, 60: y0^-1}
|
||||
%jump(mul_fp254_12)
|
||||
make_term_3:
|
||||
// stack: val, retdest {36: y2, 48: y4 * y2^2, 60: y0^-1}
|
||||
%stack () -> (60, 48, 48, final_power)
|
||||
// stack: 60, 48, 48, final_power, val, retdest {36: y2, 48: y4 * y2^2, 60: y0^-1}
|
||||
%jump(mul_fp254_12)
|
||||
final_power:
|
||||
// stack: val, retdest {val: y , 36: y^a2 , 48: y^a1 , 60: y^a0}
|
||||
%frob_fp254_12_3
|
||||
// stack: val, retdest {val: y_3, 36: y^a2 , 48: y^a1 , 60: y^a0}
|
||||
%stack () -> (36, 36)
|
||||
%frob_fp254_12_2_
|
||||
POP
|
||||
// stack: val, retdest {val: y_3, 36: (y^a2)_2, 48: y^a1 , 60: y^a0}
|
||||
PUSH 48
|
||||
%frob_fp254_12_1
|
||||
POP
|
||||
// stack: val, retdest {val: y_3, 36: (y^a2)_2, 48: (y^a1)_1, 60: y^a0}
|
||||
%stack (val) -> (36, val, val, penult_mul, val)
|
||||
// stack: 36, val, val, penult_mul, val, retdest {val: y_3, 36: (y^a2)_2, 48: (y^a1)_1, 60: y^a0}
|
||||
%jump(mul_fp254_12)
|
||||
penult_mul:
|
||||
// stack: val, retdest {val: y_3 * (y^a2)_2, 48: (y^a1)_1, 60: y^a0}
|
||||
%stack (val) -> (48, val, val, final_mul, val)
|
||||
// stack: 48, val, val, final_mul, val, retdest {val: y_3 * (y^a2)_2, 48: (y^a1)_1, 60: y^a0}
|
||||
%jump(mul_fp254_12)
|
||||
final_mul:
|
||||
// stack: val, retdest {val: y_3 * (y^a2)_2 * (y^a1)_1, 60: y^a0}
|
||||
%stack (val) -> (60, val, val)
|
||||
// stack: 60, val, val, retdest {val: y_3 * (y^a2)_2 * (y^a1)_1, 60: y^a0}
|
||||
%jump(mul_fp254_12)
|
||||
|
||||
|
||||
/// def power_loop_4():
|
||||
/// for i in range(64):
|
||||
/// abc = load(i, power_data_4)
|
||||
/// if a:
|
||||
/// y4 *= acc
|
||||
/// if b:
|
||||
/// y2 *= acc
|
||||
/// if c:
|
||||
/// y0 *= acc
|
||||
/// acc = square_fp254_12(acc)
|
||||
/// y4 *= acc
|
||||
///
|
||||
/// def power_loop_2():
|
||||
/// for i in range(62):
|
||||
/// ab = load(i, power_data_2)
|
||||
/// if a:
|
||||
/// y2 *= acc
|
||||
/// if b:
|
||||
/// y0 *= acc
|
||||
/// acc = square_fp254_12(acc)
|
||||
/// y2 *= acc
|
||||
///
|
||||
/// def power_loop_0():
|
||||
/// for i in range(65):
|
||||
/// a = load(i, power_data_0)
|
||||
/// if a:
|
||||
/// y0 *= acc
|
||||
/// acc = square_fp254_12(acc)
|
||||
/// y0 *= acc
|
||||
|
||||
power_loop_4:
|
||||
// stack: i , j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP1
|
||||
ISZERO
|
||||
// stack: break?, i , j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jumpi(power_loop_4_end)
|
||||
// stack: i , j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%sub_const(1)
|
||||
// stack: i-1, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP1
|
||||
%mload_kernel_code(power_data_4)
|
||||
// stack: abc, i-1, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP1
|
||||
%lt_const(100)
|
||||
// stack: skip?, abc, i-1, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jumpi(power_loop_4_b)
|
||||
// stack: abc, i-1, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%sub_const(100)
|
||||
// stack: bc, i-1, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%stack () -> (48, 48, power_loop_4_b)
|
||||
// stack: 48, 48, power_loop_4_b, bc, i-1, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP8
|
||||
// stack: sqr, 48, 48, power_loop_4_b, bc, i-1, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jump(mul_fp254_12)
|
||||
power_loop_4_b:
|
||||
// stack: bc, i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP1
|
||||
%lt_const(10)
|
||||
// stack: skip?, bc, i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jumpi(power_loop_4_c)
|
||||
// stack: bc, i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%sub_const(10)
|
||||
// stack: c, i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%stack () -> (36, 36, power_loop_4_c)
|
||||
// stack: 36, 36, power_loop_4_c, c, i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP8
|
||||
// stack: sqr, 36, 36, power_loop_4_c, c, i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jump(mul_fp254_12)
|
||||
power_loop_4_c:
|
||||
// stack: c, i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
ISZERO
|
||||
// stack: skip?, i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jumpi(power_loop_4_sq)
|
||||
// stack: i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%stack () -> (24, 24, power_loop_4_sq)
|
||||
// stack: 24, 24, power_loop_4_sq, i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP7
|
||||
// stack: sqr, 24, 24, power_loop_4_sq, i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jump(mul_fp254_12)
|
||||
power_loop_4_sq:
|
||||
// stack: i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
PUSH power_loop_4
|
||||
// stack: power_loop_4, i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP5
|
||||
DUP1
|
||||
// stack: sqr, sqr, power_loop_4, i, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jump(square_fp254_12)
|
||||
power_loop_4_end:
|
||||
// stack: 0, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
POP
|
||||
// stack: j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%stack () -> (48, 48, power_loop_2)
|
||||
// stack: 48, 48, power_loop_2, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP6
|
||||
// stack: sqr, 48, 48, power_loop_2, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jump(mul_fp254_12)
|
||||
|
||||
power_loop_2:
|
||||
// stack: j , k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP1
|
||||
ISZERO
|
||||
// stack: break?, j , k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jumpi(power_loop_2_end)
|
||||
// stack: j , k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%sub_const(1)
|
||||
// stack: j-1, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP1
|
||||
%mload_kernel_code(power_data_2)
|
||||
// stack: ab, j-1, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP1
|
||||
%lt_const(10)
|
||||
// stack: skip?, ab, j-1, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jumpi(power_loop_2_b)
|
||||
// stack: ab, j-1, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%sub_const(10)
|
||||
// stack: b, j-1, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%stack () -> (36, 36, power_loop_2_b)
|
||||
// stack: 36, 36, power_loop_2_b, b, j-1, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP7
|
||||
// stack: sqr, 36, 36, power_loop_2_b, b, j-1, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jump(mul_fp254_12)
|
||||
power_loop_2_b:
|
||||
// stack: b, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
ISZERO
|
||||
// stack: skip?, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jumpi(power_loop_2_sq)
|
||||
// stack: j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%stack () -> (24, 24, power_loop_2_sq)
|
||||
// stack: 24, 24, power_loop_2_sq, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP6
|
||||
// stack: sqr, 24, 24, power_loop_2_sq, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jump(mul_fp254_12)
|
||||
power_loop_2_sq:
|
||||
// stack: j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
PUSH power_loop_2
|
||||
// stack: power_loop_2, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP4
|
||||
DUP1
|
||||
// stack: sqr, sqr, power_loop_2, j, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jump(square_fp254_12)
|
||||
power_loop_2_end:
|
||||
// stack: 0, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
POP
|
||||
// stack: k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%stack () -> (36, 36, power_loop_0)
|
||||
// stack: 36, 36, power_loop_0, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP5
|
||||
// stack: sqr, 36, 36, power_loop_0, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jump(mul_fp254_12)
|
||||
|
||||
power_loop_0:
|
||||
// stack: k , sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP1
|
||||
ISZERO
|
||||
// stack: break?, k , sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jumpi(power_loop_0_end)
|
||||
// stack: k , sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%sub_const(1)
|
||||
// stack: k-1, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP1
|
||||
%mload_kernel_code(power_data_0)
|
||||
// stack: a, k-1, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
ISZERO
|
||||
// stack: skip?, k-1, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jumpi(power_loop_0_sq)
|
||||
// stack: k-1, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%stack () -> (24, 24, power_loop_0_sq)
|
||||
// stack: 24, 24, power_loop_0_sq, k-1, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP5
|
||||
// stack: sqr, 24, 24, power_loop_0_sq, k-1, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jump(mul_fp254_12)
|
||||
power_loop_0_sq:
|
||||
// stack: k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
PUSH power_loop_0
|
||||
// stack: power_loop_0, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
DUP3
|
||||
DUP1
|
||||
// stack: sqr, sqr, power_loop_0, k, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jump(square_fp254_12)
|
||||
power_loop_0_end:
|
||||
// stack: 0, sqr {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%stack (i, sqr) -> (24, sqr, 24, custom_powers)
|
||||
// stack: 24, sqr, 24, custom_powers {12: sqr, 24: y0, 36: y2, 48: y4}
|
||||
%jump(mul_fp254_12)
|
||||
@ -1,12 +1,3 @@
|
||||
/// def tate(P: Curve, Q: TwistedCurve) -> Fp12:
|
||||
/// out = miller_loop(P, Q)
|
||||
/// return bn254_invariant_exponent(P, Q)
|
||||
global bn254_tate:
|
||||
// stack: inp, out, retdest
|
||||
%stack (inp, out) -> (inp, out, bn254_invariant_exponent, out)
|
||||
// stack: inp, out, bn254_invariant_exponent, out, retdest
|
||||
%jump(bn254_miller)
|
||||
|
||||
/// def miller(P, Q):
|
||||
/// miller_init()
|
||||
/// miller_loop()
|
||||
@ -35,13 +26,13 @@ global bn254_tate:
|
||||
/// mul_tangent()
|
||||
|
||||
global bn254_miller:
|
||||
// stack: ptr, out, retdest
|
||||
%stack (ptr, out) -> (out, 1, ptr, out)
|
||||
// stack: out, 1, ptr, out, retdest
|
||||
%mstore_kernel_bn254_pairing
|
||||
// stack: ptr, out, retdest
|
||||
// stack: ptr, out, retdest
|
||||
%stack (ptr, out) -> (out, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ptr, out)
|
||||
// stack: out, unit, ptr, out, retdest
|
||||
%store_fp254_12
|
||||
// stack: ptr, out, retdest
|
||||
%load_fp254_6
|
||||
// stack: P, Q, out, retdest
|
||||
// stack: P, Q, out, retdest
|
||||
%stack (P: 2) -> (0, 53, P, P)
|
||||
// stack: 0, 53, O, P, Q, out, retdest
|
||||
// the head 0 lets miller_loop start with POP
|
||||
@ -64,6 +55,7 @@ miller_return:
|
||||
// stack: times, O, P, Q, out, retdest
|
||||
%stack (times, O: 2, P: 2, Q: 4, out, retdest) -> (retdest)
|
||||
// stack: retdest
|
||||
%clear_line
|
||||
JUMP
|
||||
|
||||
miller_one:
|
||||
@ -109,35 +101,35 @@ mul_tangent:
|
||||
// stack: out, out, mul_tangent_1, out, mul_tangent_2, retdest, 0xnm, times, O, P, Q, out
|
||||
%jump(square_fp254_12)
|
||||
mul_tangent_1:
|
||||
// stack: out, mul_tangent_2, retdest, 0xnm, times, O, P, Q, out
|
||||
// stack: out, mul_tangent_2, retdest, 0xnm, times, O, P, Q, out
|
||||
DUP13
|
||||
DUP13
|
||||
DUP13
|
||||
DUP13
|
||||
// stack: Q, out, mul_tangent_2, retdest, 0xnm, times, O, P, Q, out
|
||||
// stack: Q, out, mul_tangent_2, retdest, 0xnm, times, O, P, Q, out
|
||||
DUP11
|
||||
DUP11
|
||||
// stack: O, Q, out, mul_tangent_2, retdest, 0xnm, times, O, P, Q, out
|
||||
// stack: O, Q, out, mul_tangent_2, retdest, 0xnm, times, O, P, Q, out
|
||||
%tangent
|
||||
// stack: out, mul_tangent_2, retdest, 0xnm, times, O, P, Q, out {0: line}
|
||||
%stack (out) -> (out, 0, out)
|
||||
// stack: out, 0, out, mul_tangent_2, retdest, 0xnm, times, O, P, Q, out {0: line}
|
||||
// stack: out, mul_tangent_2, retdest, 0xnm, times, O, P, Q, out {12: line}
|
||||
%stack (out) -> (out, 12, out)
|
||||
// stack: out, 12, out, mul_tangent_2, retdest, 0xnm, times, O, P, Q, out {12: line}
|
||||
%jump(mul_fp254_12_sparse)
|
||||
mul_tangent_2:
|
||||
// stack: retdest, 0xnm, times, O, P, Q, out {0: line}
|
||||
// stack: retdest, 0xnm, times, O, P, Q, out {12: line}
|
||||
PUSH after_double
|
||||
// stack: after_double, retdest, 0xnm, times, O, P, Q, out {0: line}
|
||||
// stack: after_double, retdest, 0xnm, times, O, P, Q, out {12: line}
|
||||
DUP6
|
||||
DUP6
|
||||
// stack: O, after_double, retdest, 0xnm, times, O, P, Q, out {0: line}
|
||||
// stack: O, after_double, retdest, 0xnm, times, O, P, Q, out {12: line}
|
||||
%jump(bn_double)
|
||||
after_double:
|
||||
// stack: 2*O, retdest, 0xnm, times, O, P, Q, out {0: line}
|
||||
// stack: 2*O, retdest, 0xnm, times, O, P, Q, out {12: line}
|
||||
SWAP5
|
||||
POP
|
||||
SWAP5
|
||||
POP
|
||||
// stack: retdest, 0xnm, times, 2*O, P, Q, out {0: line}
|
||||
// stack: retdest, 0xnm, times, 2*O, P, Q, out {12: line}
|
||||
JUMP
|
||||
|
||||
/// def mul_cord()
|
||||
@ -146,26 +138,26 @@ after_double:
|
||||
/// O += P
|
||||
|
||||
mul_cord:
|
||||
// stack: 0xnm, times, O, P, Q, out
|
||||
// stack: 0xnm, times, O, P, Q, out
|
||||
PUSH mul_cord_1
|
||||
// stack: mul_cord_1, 0xnm, times, O, P, Q, out
|
||||
// stack: mul_cord_1, 0xnm, times, O, P, Q, out
|
||||
DUP11
|
||||
DUP11
|
||||
DUP11
|
||||
DUP11
|
||||
// stack: Q, mul_cord_1, 0xnm, times, O, P, Q, out
|
||||
// stack: Q, mul_cord_1, 0xnm, times, O, P, Q, out
|
||||
DUP9
|
||||
DUP9
|
||||
// stack: O, Q, mul_cord_1, 0xnm, times, O, P, Q, out
|
||||
// stack: O, Q, mul_cord_1, 0xnm, times, O, P, Q, out
|
||||
DUP13
|
||||
DUP13
|
||||
// stack: P, O, Q, mul_cord_1, 0xnm, times, O, P, Q, out
|
||||
// stack: P, O, Q, mul_cord_1, 0xnm, times, O, P, Q, out
|
||||
%cord
|
||||
// stack: mul_cord_1, 0xnm, times, O, P, Q, out {0: line}
|
||||
// stack: mul_cord_1, 0xnm, times, O, P, Q, out {12: line}
|
||||
DUP12
|
||||
// stack: out, mul_cord_1, 0xnm, times, O, P, Q, out {0: line}
|
||||
%stack (out) -> (out, 0, out)
|
||||
// stack: out, 0, out, mul_cord_1, 0xnm, times, O, P, Q, out {0: line}
|
||||
// stack: out, mul_cord_1, 0xnm, times, O, P, Q, out {12: line}
|
||||
%stack (out) -> (out, 12, out)
|
||||
// stack: out, 12, out, mul_cord_1, 0xnm, times, O, P, Q, out {12: line}
|
||||
%jump(mul_fp254_12_sparse)
|
||||
mul_cord_1:
|
||||
// stack: 0xnm, times, O , P, Q, out
|
||||
@ -202,7 +194,7 @@ after_add:
|
||||
// stack: py^2 , 9, px, py, qx, qx_, qy, qy_
|
||||
SUBFP254
|
||||
// stack: py^2 - 9, px, py, qx, qx_, qy, qy_
|
||||
%mstore_kernel_bn254_pairing(0)
|
||||
%mstore_kernel_bn254_pairing(12)
|
||||
// stack: px, py, qx, qx_, qy, qy_
|
||||
DUP1
|
||||
MULFP254
|
||||
@ -218,7 +210,7 @@ after_add:
|
||||
DUP3
|
||||
MULFP254
|
||||
// stack: (-3*px^2)qx, py, -3px^2, qx_, qy, qy_
|
||||
%mstore_kernel_bn254_pairing(2)
|
||||
%mstore_kernel_bn254_pairing(14)
|
||||
// stack: py, -3px^2, qx_, qy, qy_
|
||||
PUSH 2
|
||||
MULFP254
|
||||
@ -228,15 +220,15 @@ after_add:
|
||||
DUP4
|
||||
MULFP254
|
||||
// stack: (2py)qy, -3px^2, qx_, 2py, qy_
|
||||
%mstore_kernel_bn254_pairing(8)
|
||||
%mstore_kernel_bn254_pairing(20)
|
||||
// stack: -3px^2, qx_, 2py, qy_
|
||||
MULFP254
|
||||
// stack: (-3px^2)*qx_, 2py, qy_
|
||||
%mstore_kernel_bn254_pairing(3)
|
||||
%mstore_kernel_bn254_pairing(15)
|
||||
// stack: 2py, qy_
|
||||
MULFP254
|
||||
// stack: (2py)*qy_
|
||||
%mstore_kernel_bn254_pairing(9)
|
||||
%mstore_kernel_bn254_pairing(21)
|
||||
%endmacro
|
||||
|
||||
/// def cord(p1x, p1y, p2x, p2y, qx, qy):
|
||||
@ -258,7 +250,7 @@ after_add:
|
||||
// stack: p1y*p2x , p2y*p1x, p1x , p1y, p2x , p2y, qx, qx_, qy, qy_
|
||||
SUBFP254
|
||||
// stack: p1y*p2x - p2y*p1x, p1x , p1y, p2x , p2y, qx, qx_, qy, qy_
|
||||
%mstore_kernel_bn254_pairing(0)
|
||||
%mstore_kernel_bn254_pairing(12)
|
||||
// stack: p1x , p1y, p2x , p2y, qx, qx_, qy, qy_
|
||||
SWAP3
|
||||
// stack: p2y , p1y, p2x , p1x, qx, qx_, qy, qy_
|
||||
@ -273,20 +265,29 @@ after_add:
|
||||
DUP5
|
||||
MULFP254
|
||||
// stack: (p1x - p2x)qy, p2y - p1y, qx, qx_, p1x - p2x, qy_
|
||||
%mstore_kernel_bn254_pairing(8)
|
||||
%mstore_kernel_bn254_pairing(20)
|
||||
// stack: p2y - p1y, qx, qx_, p1x - p2x, qy_
|
||||
SWAP1
|
||||
// stack: qx, p2y - p1y, qx_, p1x - p2x, qy_
|
||||
DUP2
|
||||
MULFP254
|
||||
// stack: (p2y - p1y)qx, p2y - p1y, qx_, p1x - p2x, qy_
|
||||
%mstore_kernel_bn254_pairing(2)
|
||||
%mstore_kernel_bn254_pairing(14)
|
||||
// stack: p2y - p1y, qx_, p1x - p2x, qy_
|
||||
MULFP254
|
||||
// stack: (p2y - p1y)qx_, p1x - p2x, qy_
|
||||
%mstore_kernel_bn254_pairing(3)
|
||||
%mstore_kernel_bn254_pairing(15)
|
||||
// stack: p1x - p2x, qy_
|
||||
MULFP254
|
||||
// stack: (p1x - p2x)*qy_
|
||||
%mstore_kernel_bn254_pairing(9)
|
||||
%mstore_kernel_bn254_pairing(21)
|
||||
%endmacro
|
||||
|
||||
%macro clear_line
|
||||
%stack () -> (0, 0, 0, 0, 0)
|
||||
%mstore_kernel_bn254_pairing(12)
|
||||
%mstore_kernel_bn254_pairing(14)
|
||||
%mstore_kernel_bn254_pairing(15)
|
||||
%mstore_kernel_bn254_pairing(20)
|
||||
%mstore_kernel_bn254_pairing(21)
|
||||
%endmacro
|
||||
193
evm/src/cpu/kernel/asm/curve/bn254/curve_arithmetic/pairing.asm
Normal file
193
evm/src/cpu/kernel/asm/curve/bn254/curve_arithmetic/pairing.asm
Normal file
@ -0,0 +1,193 @@
|
||||
/// The input to the pairing script is a list of points
|
||||
/// P_i = n_i*G: Curve, Q_i = m_i*H: TwistedCurve
|
||||
/// where G, H are the respective generators, such that
|
||||
/// sum_i n_i*m_i = 0
|
||||
/// and therefore, due to bilinearity of the pairing:
|
||||
/// prod_i e(P_i, Q_i)
|
||||
/// = prod_i e(n_i G, m_i H)
|
||||
/// = prod_i e(G,H)^{n_i * m_i}
|
||||
/// = e(G,H)^{sum_i n_i * m_i}
|
||||
/// = e(G,H)^0
|
||||
/// = 1: Fp12
|
||||
|
||||
/// def bn254_pairing(pairs: List((Curve, TwistedCurve))) -> Bool:
|
||||
///
|
||||
/// for P, Q in pairs:
|
||||
/// if not (P.is_valid and Q.is_valid):
|
||||
/// return @U256_MAX
|
||||
///
|
||||
/// out = 1
|
||||
/// for P, Q in pairs:
|
||||
/// if P != 0 and Q != 0:
|
||||
/// out *= miller_loop(P, Q)
|
||||
///
|
||||
/// result = bn254_final_exponent(out)
|
||||
/// return result == unit_fp12
|
||||
|
||||
/// The following is a key to this API
|
||||
///
|
||||
/// - k is the number of inputs
|
||||
/// - each input given by a pair of points, one on the curve and one on the twisted curve
|
||||
/// - each input consists of 6 stack terms---2 for the curve point and 4 for the twisted curve point
|
||||
/// - the inputs are presumed to be placed on the kernel contiguously
|
||||
/// - the output (as defined above) is an Fp12 element
|
||||
/// - out and inp are the BnPairing segment offsets for the output element and input
|
||||
/// - the assembly code currently uses offsets 0-78 for scratch space
|
||||
|
||||
global bn254_pairing:
|
||||
// stack: k, inp, out, retdest
|
||||
DUP1
|
||||
|
||||
bn254_input_check:
|
||||
// stack: j , k, inp
|
||||
DUP1
|
||||
ISZERO
|
||||
// stack: end?, j , k, inp
|
||||
%jumpi(bn254_pairing_start)
|
||||
// stack: j , k, inp
|
||||
%sub_const(1)
|
||||
// stack: j=j-1, k, inp
|
||||
|
||||
%stack (j, k, inp) -> (j, inp, j, k, inp)
|
||||
// stack: j, inp, j, k, inp
|
||||
%mul_const(6)
|
||||
ADD
|
||||
// stack: inp_j=inp+6j, j, k, inp
|
||||
DUP1
|
||||
// stack: inp_j, inp_j, j, k, inp
|
||||
%load_fp254_2
|
||||
// stack: P_j, inp_j, j, k, inp
|
||||
%bn_check
|
||||
// stack: valid?, inp_j, j, k, inp
|
||||
ISZERO
|
||||
%jumpi(bn_pairing_invalid_input)
|
||||
// stack: inp_j, j, k, inp
|
||||
DUP1
|
||||
// stack: inp_j , inp_j, j, k, inp
|
||||
%add_const(2)
|
||||
// stack: inp_j', inp_j, j, k, inp
|
||||
%load_fp254_4
|
||||
// stack: Q_j, inp_j, j, k, inp
|
||||
%bn_check_twisted
|
||||
// stack: valid?, inp_j, j, k, inp
|
||||
ISZERO
|
||||
%jumpi(bn_pairing_invalid_input)
|
||||
// stack: inp_j, j, k, inp
|
||||
POP
|
||||
%jump(bn254_input_check)
|
||||
|
||||
bn_pairing_invalid_input:
|
||||
// stack: inp_j, j, k, inp, out, retdest
|
||||
%stack (inp_j, j, k, inp, out, retdest) -> (retdest, inp_j)
|
||||
JUMP
|
||||
|
||||
bn254_pairing_start:
|
||||
// stack: 0, k, inp, out, retdest
|
||||
%stack (j, k, inp, out) -> (out, 1, k, inp, out, bn254_pairing_output_validation, out)
|
||||
// stack: out, 1, k, inp, out, bn254_pairing_output_validation, out, retdest
|
||||
%mstore_kernel_bn254_pairing
|
||||
// stack: k, inp, out, bn254_pairing_output_validation, out, retdest
|
||||
|
||||
bn254_pairing_loop:
|
||||
// stack: k, inp, out, bn254_pairing_output_validation, out, retdest
|
||||
DUP1
|
||||
ISZERO
|
||||
// stack: end?, k, inp, out, bn254_pairing_output_validation, out, retdest
|
||||
%jumpi(bn254_final_exponent)
|
||||
// stack: k, inp, out, bn254_pairing_output_validation, out, retdest
|
||||
%sub_const(1)
|
||||
// stack: k=k-1, inp, out, bn254_pairing_output_validation, out, retdest
|
||||
%stack (k, inp) -> (k, inp, k, inp)
|
||||
// stack: k, inp, k, inp, out, bn254_pairing_output_validation, out, retdest
|
||||
%mul_const(6)
|
||||
ADD
|
||||
// stack: inp_k, k, inp, out, bn254_pairing_output_validation, out, retdest
|
||||
DUP1
|
||||
%load_fp254_6
|
||||
// stack: P, Q, inp_k, k, inp, out, bn254_pairing_output_validation, out, retdest
|
||||
%neutral_input
|
||||
// stack: skip?, inp_k, k, inp, out, bn254_pairing_output_validation, out, retdest
|
||||
%jumpi(bn_skip_input)
|
||||
// stack: inp_k, k, inp, out, bn254_pairing_output_validation, out, retdest
|
||||
%stack (inp_k, k, inp, out) -> (bn254_miller, inp_k, 0, mul_fp254_12, 0, out, out, bn254_pairing_loop, k, inp, out)
|
||||
// stack: bn254_miller, inp_k, 0,
|
||||
// mul_fp254_12, 0, out, out,
|
||||
// bn254_pairing_loop, k, inp, out,
|
||||
// bn254_pairing_output_validation, out, retdest
|
||||
JUMP
|
||||
|
||||
bn_skip_input:
|
||||
// stack: inp_k, k, inp, out, bn254_pairing_output_validation, out, retdest
|
||||
POP
|
||||
// stack: k, inp, out, bn254_pairing_output_validation, out, retdest
|
||||
%jump(bn254_pairing_loop)
|
||||
|
||||
|
||||
bn254_pairing_output_validation:
|
||||
// stack: out, retdest
|
||||
PUSH 1
|
||||
// stack: check, out, retdest
|
||||
%check_output_term
|
||||
%check_output_term(1)
|
||||
%check_output_term(2)
|
||||
%check_output_term(3)
|
||||
%check_output_term(4)
|
||||
%check_output_term(5)
|
||||
%check_output_term(6)
|
||||
%check_output_term(7)
|
||||
%check_output_term(8)
|
||||
%check_output_term(9)
|
||||
%check_output_term(10)
|
||||
%check_output_term(11)
|
||||
// stack: check, out, retdest
|
||||
%stack (check, out, retdest) -> (retdest, check)
|
||||
JUMP
|
||||
|
||||
%macro check_output_term
|
||||
// stack: check, out
|
||||
DUP2
|
||||
// stack: out0, check, out
|
||||
%mload_kernel_bn254_pairing
|
||||
// stack: f0, check, out
|
||||
%eq_const(1)
|
||||
// stack: check0, check, out
|
||||
MUL
|
||||
// stack: check, out
|
||||
%endmacro
|
||||
|
||||
%macro check_output_term(j)
|
||||
// stack: check, out
|
||||
DUP2
|
||||
%add_const($j)
|
||||
// stack: outj, check, out
|
||||
%mload_kernel_bn254_pairing
|
||||
// stack: fj, check, out
|
||||
ISZERO
|
||||
// stack: checkj, check, out
|
||||
MUL
|
||||
// stack: check, out
|
||||
%endmacro
|
||||
|
||||
%macro neutral_input
|
||||
// stack: P , Q
|
||||
ISZERO
|
||||
SWAP1
|
||||
ISZERO
|
||||
MUL
|
||||
// stack: P==0, Q
|
||||
SWAP4
|
||||
// stack: Q , P==0
|
||||
ISZERO
|
||||
SWAP1
|
||||
ISZERO
|
||||
MUL
|
||||
SWAP1
|
||||
ISZERO
|
||||
MUL
|
||||
SWAP1
|
||||
ISZERO
|
||||
MUL
|
||||
// stack: Q==0, P==0
|
||||
OR
|
||||
// stack: Q==0||P==0
|
||||
%endmacro
|
||||
@ -0,0 +1,94 @@
|
||||
// Check if (X,Y) is a valid curve point.
|
||||
// Returns (range & curve) || ident
|
||||
// where
|
||||
// range = (x < N) & (x_ < N) & (y < N) & (y_ < N)
|
||||
// curve = Y^2 == X^3 + 3/(9+i)
|
||||
// ident = (X,Y) == (0,0)
|
||||
|
||||
%macro bn_check_twisted
|
||||
// stack: x, x_, y, y_
|
||||
%bn_check_twisted_range
|
||||
// stack: range, x, x_, y, y_
|
||||
%bn_check_twisted_curve
|
||||
// stack: curve , range, x, x_, y, y_
|
||||
MUL // Cheaper than AND
|
||||
// stack: curve & range, x, x_, y, y_
|
||||
SWAP4
|
||||
// stack: y_, x, x_, y, curve & range
|
||||
%bn_check_twisted_ident
|
||||
// stack: ident , curve & range
|
||||
OR
|
||||
// stack: ident || (curve & range)
|
||||
%endmacro
|
||||
|
||||
%macro bn_check_twisted_range
|
||||
// stack: x, x_, y, y_
|
||||
PUSH @BN_BASE
|
||||
// stack: N, x, x_, y, y_
|
||||
%stack (N) -> (N, N, N, N)
|
||||
// stack: N, N, N, N, x, x_, y, y_
|
||||
DUP8
|
||||
// stack: y_ , N, N, N, N, x, x_, y, y_
|
||||
LT
|
||||
// stack: y_ < N, N, N, N, x, x_, y, y_
|
||||
SWAP3
|
||||
// stack: N, N, N, y_ < N, x, x_, y, y_
|
||||
DUP7
|
||||
// stack: y , N, N, N, y_ < N, x, x_, y, y_
|
||||
LT
|
||||
// stack: y < N, N, N, y_ < N, x, x_, y, y_
|
||||
SWAP2
|
||||
// stack: N, N, y < N, y_ < N, x, x_, y, y_
|
||||
DUP6
|
||||
// stack: x_ , N, N, y < N, y_ < N, x, x_, y, y_
|
||||
LT
|
||||
// stack: x_ < N, N, y < N, y_ < N, x, x_, y, y_
|
||||
SWAP1
|
||||
// stack: N, x_ < N, y < N, y_ < N, x, x_, y, y_
|
||||
DUP5
|
||||
// stack: x , N, x_ < N, y < N, y_ < N, x, x_, y, y_
|
||||
LT
|
||||
// stack: x < N, x_ < N, y < N, y_ < N, x, x_, y, y_
|
||||
MUL // Cheaper than AND
|
||||
MUL // Cheaper than AND
|
||||
MUL // Cheaper than AND
|
||||
// stack: range, x, x_, y, y_
|
||||
%endmacro
|
||||
|
||||
%macro bn_check_twisted_curve
|
||||
// stack: range, X, Y
|
||||
%stack (range, X: 2, Y: 2) -> (Y, Y, range, X, Y)
|
||||
// stack: Y, Y, range, X, Y
|
||||
%mul_fp254_2
|
||||
// stack: Y^2, range, X, Y
|
||||
%stack () -> (@BN_TWISTED_RE, @BN_TWISTED_IM)
|
||||
// stack: A, Y^2, range, X, Y
|
||||
%stack (A: 2, Y2: 2, range, X: 2) -> (X, X, X, A, Y2, range, X)
|
||||
// stack: X, X, X, A, Y^2, range, X, Y
|
||||
%mul_fp254_2
|
||||
%mul_fp254_2
|
||||
// stack: X^3 , A, Y^2, range, X, Y
|
||||
%add_fp254_2
|
||||
// stack: X^3 + A, Y^2, range, X, Y
|
||||
%eq_fp254_2
|
||||
// stack: curve, range, X, Y
|
||||
%endmacro
|
||||
|
||||
%macro bn_check_twisted_ident
|
||||
SWAP2
|
||||
// stack: a , b , c , d
|
||||
ISZERO
|
||||
SWAP3
|
||||
// stack: d , b , c , a==0
|
||||
ISZERO
|
||||
SWAP2
|
||||
// stack: c , b , d==0, a==0
|
||||
ISZERO
|
||||
SWAP1
|
||||
// stack: b , c==0, d==0, a==0
|
||||
ISZERO
|
||||
// stack: b==0, c==0, d==0, a==0
|
||||
MUL // Cheaper than AND
|
||||
MUL // Cheaper than AND
|
||||
MUL // Cheaper than AND
|
||||
%endmacro
|
||||
@ -2,32 +2,6 @@
|
||||
///// GENERAL FP12 MULTIPLICATION /////
|
||||
///////////////////////////////////////
|
||||
|
||||
/// cost: 1063
|
||||
|
||||
/// fp254_6 functions:
|
||||
/// fn | num | ops | cost
|
||||
/// -------------------------
|
||||
/// load | 8 | 40 | 320
|
||||
/// store | 5 | 40 | 200
|
||||
/// dup | 5 | 6 | 30
|
||||
/// swap | 4 | 16 | 64
|
||||
/// add | 4 | 16 | 64
|
||||
/// subr | 1 | 17 | 17
|
||||
/// mul | 3 | 157 | 471
|
||||
/// i9 | 1 | 9 | 9
|
||||
///
|
||||
/// lone stack operations:
|
||||
/// op | num
|
||||
/// ------------
|
||||
/// ADD | 3
|
||||
/// SWAP | 2
|
||||
/// DUP | 6
|
||||
/// PUSH | 6
|
||||
/// POP | 2
|
||||
/// JUMP | 6
|
||||
///
|
||||
/// TOTAL: 1201
|
||||
|
||||
/// inputs:
|
||||
/// F = f + f'z
|
||||
/// G = g + g'z
|
||||
@ -66,73 +40,73 @@ mul_fp254_12_1:
|
||||
// stack: f'g', g' , f', inA, inB, out
|
||||
%dup_fp254_6_0
|
||||
// stack: f'g', f'g', g' , f', inA, inB, out
|
||||
%store_fp254_6_sh(84)
|
||||
// stack: f'g', g' , f', inA, inB, out {84: sh(f'g')}
|
||||
%store_fp254_6(90)
|
||||
// stack: g' , f', inA, inB, out {84: sh(f'g'), 90: f'g'}
|
||||
%store_fp254_6_sh(60)
|
||||
// stack: f'g', g' , f', inA, inB, out {60: sh(f'g')}
|
||||
%store_fp254_6(66)
|
||||
// stack: g' , f', inA, inB, out {60: sh(f'g'), 66: f'g'}
|
||||
DUP13
|
||||
// stack: inA, g' , f', inA, inB, out {84: sh(f'g'), 90: f'g'}
|
||||
// stack: inA, g' , f', inA, inB, out {60: sh(f'g'), 66: f'g'}
|
||||
DUP15
|
||||
// stack: inB, inA, g' , f', inA, inB, out {84: sh(f'g'), 90: f'g'}
|
||||
// stack: inB, inA, g' , f', inA, inB, out {60: sh(f'g'), 66: f'g'}
|
||||
%load_fp254_6
|
||||
// stack: g , inA, g' , f', inA, inB, out {84: sh(f'g'), 90: f'g'}
|
||||
// stack: g , inA, g' , f', inA, inB, out {60: sh(f'g'), 66: f'g'}
|
||||
%stack (f: 6, x, g: 6) -> (g, x, f)
|
||||
// stack: g', inA, g , f', inA, inB, out {84: sh(f'g'), 90: f'g'}
|
||||
// stack: g', inA, g , f', inA, inB, out {60: sh(f'g'), 66: f'g'}
|
||||
%dup_fp254_6_7
|
||||
// stack: g,g', inA, g , f', inA, inB, out {84: sh(f'g'), 90: f'g'}
|
||||
// stack: g,g', inA, g , f', inA, inB, out {60: sh(f'g'), 66: f'g'}
|
||||
%add_fp254_6
|
||||
// stack: g+g', inA, g , f', inA, inB, out {84: sh(f'g'), 90: f'g'}
|
||||
// stack: g+g', inA, g , f', inA, inB, out {60: sh(f'g'), 66: f'g'}
|
||||
%stack (f: 6, x, g: 6) -> (g, x, f)
|
||||
// stack: g, inA, g+g', f', inA, inB, out {84: sh(f'g'), 90: f'g'}
|
||||
// stack: g, inA, g+g', f', inA, inB, out {60: sh(f'g'), 66: f'g'}
|
||||
PUSH mul_fp254_12_2
|
||||
// stack: mul_fp254_12_2, g, inA, g+g', f', inA, inB, out {84: sh(f'g'), 90: f'g'}
|
||||
// stack: mul_fp254_12_2, g, inA, g+g', f', inA, inB, out {60: sh(f'g'), 66: f'g'}
|
||||
SWAP7
|
||||
// stack: inA, g, mul_fp254_12_2, g+g', f', inA, inB, out {84: sh(f'g'), 90: f'g'}
|
||||
// stack: inA, g, mul_fp254_12_2, g+g', f', inA, inB, out {60: sh(f'g'), 66: f'g'}
|
||||
%load_fp254_6
|
||||
// stack: f, g, mul_fp254_12_2, g+g', f', inA, inB, out {84: sh(f'g'), 90: f'g'}
|
||||
// stack: f, g, mul_fp254_12_2, g+g', f', inA, inB, out {60: sh(f'g'), 66: f'g'}
|
||||
%jump(mul_fp254_6)
|
||||
mul_fp254_12_2:
|
||||
// stack: fg, g+g', f', inA, inB, out {84: sh(f'g'), 90: f'g'}
|
||||
%store_fp254_6(96)
|
||||
// stack: g+g', f', inA, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: fg, g+g', f', inA, inB, out {60: sh(f'g'), 66: f'g'}
|
||||
%store_fp254_6(72)
|
||||
// stack: g+g', f', inA, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%stack (x: 6, y: 6) -> (y, x)
|
||||
// stack: f', g+g', inA, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: f', g+g', inA, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
PUSH mul_fp254_12_3
|
||||
// stack: mul_fp254_12_3, f', g+g', inA, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: mul_fp254_12_3, f', g+g', inA, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
SWAP13
|
||||
// stack: inA, f', g+g', mul_fp254_12_3, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: inA, f', g+g', mul_fp254_12_3, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%load_fp254_6
|
||||
// stack: f,f', g+g', mul_fp254_12_3, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: f,f', g+g', mul_fp254_12_3, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%add_fp254_6
|
||||
// stack: f+f', g+g', mul_fp254_12_3, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: f+f', g+g', mul_fp254_12_3, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%jump(mul_fp254_6)
|
||||
mul_fp254_12_3:
|
||||
// stack: (f+f')(g+g'), inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
%load_fp254_6(96)
|
||||
// stack: fg, (f+f')(g+g'), inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: (f+f')(g+g'), inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%load_fp254_6(72)
|
||||
// stack: fg, (f+f')(g+g'), inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%stack (x: 6, y: 6) -> (y, x)
|
||||
// stack: (f+f')(g+g'), fg, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: (f+f')(g+g'), fg, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%dup_fp254_6_6
|
||||
// stack: fg, (f+f')(g+g'), fg, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
%load_fp254_6(90)
|
||||
// stack: f'g',fg, (f+f')(g+g'), fg, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: fg, (f+f')(g+g'), fg, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%load_fp254_6(66)
|
||||
// stack: f'g',fg, (f+f')(g+g'), fg, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%add_fp254_6
|
||||
// stack: f'g'+fg, (f+f')(g+g'), fg, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: f'g'+fg, (f+f')(g+g'), fg, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%subr_fp254_6
|
||||
// stack: (f+f')(g+g') - (f'g'+fg), fg, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: (f+f')(g+g') - (f'g'+fg), fg, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
DUP14
|
||||
%add_const(6)
|
||||
// stack: out', (f+f')(g+g') - (f'g'+fg), fg, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: out', (f+f')(g+g') - (f'g'+fg), fg, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%store_fp254_6
|
||||
// stack: fg, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
%load_fp254_6(84)
|
||||
// stack: sh(f'g') , fg, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: fg, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%load_fp254_6(60)
|
||||
// stack: sh(f'g') , fg, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%add_fp254_6
|
||||
// stack: sh(f'g') + fg, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: sh(f'g') + fg, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
DUP8
|
||||
// stack: out, sh(f'g') + fg, inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: out, sh(f'g') + fg, inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%store_fp254_6
|
||||
// stack: inB, out {84: sh(f'g'), 90: f'g', 96: fg}
|
||||
// stack: inB, out {60: sh(f'g'), 66: f'g', 72: fg}
|
||||
%pop2
|
||||
JUMP
|
||||
|
||||
@ -141,29 +115,6 @@ mul_fp254_12_3:
|
||||
///// SPARSE FP12 MULTIPLICATION /////
|
||||
//////////////////////////////////////
|
||||
|
||||
/// cost: 645
|
||||
|
||||
/// fp254_6 functions:
|
||||
/// fn | num | ops | cost
|
||||
/// ---------------------------
|
||||
/// load | 2 | 40 | 80
|
||||
/// store | 2 | 40 | 80
|
||||
/// dup | 4 | 6 | 24
|
||||
/// swap | 4 | 16 | 64
|
||||
/// add | 4 | 16 | 64
|
||||
/// mul_fp254_ | 2 | 21 | 42
|
||||
/// mul_fp254_2 | 4 | 59 | 236
|
||||
///
|
||||
/// lone stack operations:
|
||||
/// op | num
|
||||
/// ------------
|
||||
/// ADD | 6
|
||||
/// DUP | 9
|
||||
/// PUSH | 6
|
||||
/// POP | 5
|
||||
///
|
||||
/// TOTAL: 618
|
||||
|
||||
/// input:
|
||||
/// F = f + f'z
|
||||
/// G = g0 + (G1)t + (G2)tz
|
||||
@ -277,32 +228,6 @@ global mul_fp254_12_sparse:
|
||||
///// FP12 SQUARING /////
|
||||
/////////////////////////
|
||||
|
||||
/// cost: 646
|
||||
|
||||
/// fp254_6 functions:
|
||||
/// fn | num | ops | cost
|
||||
/// -------------------------
|
||||
/// load | 2 | 40 | 80
|
||||
/// store | 2 | 40 | 80
|
||||
/// dup | 2 | 6 | 12
|
||||
/// swap | 2 | 16 | 32
|
||||
/// add | 1 | 16 | 16
|
||||
/// mul | 1 | 157 | 157
|
||||
/// sq | 2 | 101 | 202
|
||||
/// dbl | 1 | 13 | 13
|
||||
///
|
||||
/// lone stack operations:
|
||||
/// op | num
|
||||
/// ------------
|
||||
/// ADD | 3
|
||||
/// SWAP | 4
|
||||
/// DUP | 5
|
||||
/// PUSH | 6
|
||||
/// POP | 3
|
||||
/// JUMP | 4
|
||||
///
|
||||
/// TOTAL:
|
||||
|
||||
/// input:
|
||||
/// F = f + f'z
|
||||
///
|
||||
|
||||
@ -32,16 +32,19 @@ global inv_fp254_12:
|
||||
// stack: out, f^-1, inp, out, retdest
|
||||
%store_fp254_12
|
||||
// stack: inp, out, retdest
|
||||
%stack (inp, out) -> (inp, out, 72, check_inv_fp254_12)
|
||||
// stack: inp, out, 72, check_inv_fp254_12, retdest
|
||||
%stack (inp, out) -> (inp, out, 60, check_inv_fp254_12)
|
||||
// stack: inp, out, 60, check_inv_fp254_12, retdest
|
||||
%jump(mul_fp254_12)
|
||||
check_inv_fp254_12:
|
||||
// stack: retdest
|
||||
PUSH 72
|
||||
PUSH 60
|
||||
%load_fp254_12
|
||||
// stack: unit?, retdest
|
||||
%assert_eq_unit_fp254_12
|
||||
// stack: retdest
|
||||
PUSH 0
|
||||
// stack: 0, retdest
|
||||
%mstore_kernel_bn254_pairing(60)
|
||||
JUMP
|
||||
|
||||
%macro prover_inv_fp254_12
|
||||
|
||||
@ -67,6 +67,31 @@
|
||||
// stack: cx, cy
|
||||
%endmacro
|
||||
|
||||
%macro eq_fp254_2
|
||||
// stack: x, x_, y, y_
|
||||
SWAP3
|
||||
// stack: y_, x_, y, x
|
||||
EQ
|
||||
// stack: y_==x_, y, x
|
||||
SWAP2
|
||||
// stack: x, y, y_==x_
|
||||
EQ
|
||||
// stack: x==y, y_==x_
|
||||
AND
|
||||
%endmacro
|
||||
|
||||
%macro add_fp254_2
|
||||
// stack: x, x_, y, y_
|
||||
SWAP3
|
||||
// stack: y_, x_, y, x
|
||||
ADDFP254
|
||||
// stack: z_, y, x
|
||||
SWAP2
|
||||
// stack: x, y, z_
|
||||
ADDFP254
|
||||
// stack: z, z_
|
||||
%endmacro
|
||||
|
||||
/// Given z = x + iy: Fp254_2, return complex conjugate z': Fp254_2
|
||||
/// where input is represented z.re, z.im and output as z'.im, z'.re
|
||||
/// cost: 9; note this returns y, x for the output x + yi
|
||||
@ -116,6 +141,31 @@
|
||||
// stack: ac - bd, bc + ad
|
||||
%endmacro
|
||||
|
||||
// load twisted curve
|
||||
|
||||
%macro load_fp254_4
|
||||
// stack: ptr
|
||||
DUP1
|
||||
%add_const(2)
|
||||
// stack: ind2, ptr
|
||||
%mload_kernel_bn254_pairing
|
||||
// stack: x2, ptr
|
||||
DUP2
|
||||
%add_const(1)
|
||||
// stack: ind1, x2, ptr
|
||||
%mload_kernel_bn254_pairing
|
||||
// stack: x1, x2, ptr
|
||||
DUP3
|
||||
%add_const(3)
|
||||
// stack: ind3, x1, x2, ptr
|
||||
%mload_kernel_bn254_pairing
|
||||
// stack: x3, x1, x2, ptr
|
||||
SWAP3
|
||||
// stack: ind0, x1, x2, x3
|
||||
%mload_kernel_bn254_pairing
|
||||
// stack: x0, x1, x2, x3
|
||||
%endmacro
|
||||
|
||||
// fp254_6 macros
|
||||
|
||||
%macro load_fp254_6
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
// Address where the working version of the hash value is stored.
|
||||
%macro blake2b_hash_value_addr
|
||||
%macro blake2_hash_value_addr
|
||||
PUSH 0
|
||||
// stack: 0
|
||||
%mload_kernel_general
|
||||
@ -10,14 +10,14 @@
|
||||
%endmacro
|
||||
|
||||
// Address where the working version of the compression internal state is stored.
|
||||
%macro blake2b_internal_state_addr
|
||||
%blake2b_hash_value_addr
|
||||
%macro blake2_internal_state_addr
|
||||
%blake2_hash_value_addr
|
||||
%add_const(8)
|
||||
%endmacro
|
||||
|
||||
// Address where the current message block is stored.
|
||||
%macro blake2b_message_addr
|
||||
%blake2b_internal_state_addr
|
||||
%macro blake2_message_addr
|
||||
%blake2_internal_state_addr
|
||||
%add_const(16)
|
||||
%endmacro
|
||||
|
||||
143
evm/src/cpu/kernel/asm/hash/blake2/blake2_f.asm
Normal file
143
evm/src/cpu/kernel/asm/hash/blake2/blake2_f.asm
Normal file
@ -0,0 +1,143 @@
|
||||
global blake2_f:
|
||||
// stack: rounds, h0...h7, m0...m15, t0, t1, flag, retdest
|
||||
|
||||
// Store the hash values.
|
||||
%blake2_hash_value_addr
|
||||
// stack: addr, rounds, h0...h7, m0...m15, t0, t1, flag, retdest
|
||||
%rep 8
|
||||
// stack: addr, rounds, h_i, ...
|
||||
%stack (addr, rounds, h_i) -> (addr, h_i, addr, rounds)
|
||||
// stack: addr, h_i, addr, rounds, ...
|
||||
%mstore_kernel_general
|
||||
%increment
|
||||
%endrep
|
||||
|
||||
// stack: addr, rounds, m0...m15, t0, t1, flag, retdest
|
||||
POP
|
||||
// stack: rounds, m0...m15, t0, t1, flag, retdest
|
||||
|
||||
// Save the message to the message working space.
|
||||
%blake2_message_addr
|
||||
// stack: message_addr, rounds, m0...m15, t0, t1, flag, retdest
|
||||
%rep 16
|
||||
// stack: message_addr, rounds, m_i, ...
|
||||
%stack (message_addr, rounds, m_i) -> (message_addr, m_i, message_addr, rounds)
|
||||
// stack: message_addr, m_i, message_addr, rounds, ...
|
||||
%mstore_kernel_general
|
||||
%increment
|
||||
%endrep
|
||||
|
||||
// stack: message_addr, rounds, t0, t1, flag, retdest
|
||||
POP
|
||||
// stack: rounds, t0, t1, flag, retdest
|
||||
|
||||
%blake2_hash_value_addr
|
||||
%add_const(7)
|
||||
%rep 8
|
||||
// stack: addr, ...
|
||||
DUP1
|
||||
// stack: addr, addr, ...
|
||||
%mload_kernel_general
|
||||
// stack: val, addr, ...
|
||||
SWAP1
|
||||
// stack: addr, val, ...
|
||||
%decrement
|
||||
%endrep
|
||||
// stack: addr, h_0, ..., h_7, rounds, t0, t1, flag, retdest
|
||||
POP
|
||||
// stack: h_0, ..., h_7, rounds, t0, t1, flag, retdest
|
||||
|
||||
// Store the initial 16 values of the internal state.
|
||||
%blake2_internal_state_addr
|
||||
// stack: start, h_0, ..., h_7, rounds, t0, t1, flag, retdest
|
||||
|
||||
// First eight words of the internal state: current hash value h_0, ..., h_7.
|
||||
%rep 8
|
||||
SWAP1
|
||||
DUP2
|
||||
%mstore_kernel_general
|
||||
%increment
|
||||
%endrep
|
||||
// stack: start + 8, rounds, t0, t1, flag, retdest
|
||||
|
||||
// Next four values of the internal state: first four IV values.
|
||||
PUSH 0
|
||||
// stack: 0, start + 8, rounds, t0, t1, flag, retdest
|
||||
%rep 4
|
||||
// stack: i, loc, ...
|
||||
DUP1
|
||||
// stack: i, i, loc, ...
|
||||
%blake2_iv
|
||||
// stack: IV_i, i, loc, ...
|
||||
DUP3
|
||||
// stack: loc, IV_i, i, loc, ...
|
||||
%mstore_kernel_general
|
||||
// stack: i, loc, ...
|
||||
%increment
|
||||
SWAP1
|
||||
%increment
|
||||
SWAP1
|
||||
// stack: i + 1, loc + 1,...
|
||||
%endrep
|
||||
// stack: 4, start + 12, rounds, t0, t1, flag, retdest
|
||||
POP
|
||||
// stack: start + 12, rounds, t0, t1, flag, retdest
|
||||
SWAP4
|
||||
// stack: flag, rounds, t0, t1, start + 12, retdest
|
||||
%mul_const(0xFFFFFFFFFFFFFFFF)
|
||||
// stack: invert_if_flag, rounds, t0, t1, start + 12, retdest
|
||||
%stack (inv, r, t0, t1, s) -> (4, s, t0, t1, inv, 0, r)
|
||||
// stack: 4, start + 12, t0, t1, invert_if_flag, 0, rounds, retdest
|
||||
|
||||
// Last four values of the internal state: last four IV values, XOR'd with
|
||||
// the values (t0, t1, invert_if_flag, 0).
|
||||
%rep 4
|
||||
// stack: i, loc, val, next_val,...
|
||||
DUP1
|
||||
// stack: i, i, loc, val, next_val,...
|
||||
%blake2_iv
|
||||
// stack: IV_i, i, loc, val, next_val,...
|
||||
DUP4
|
||||
// stack: val, IV_i, i, loc, val, next_val,...
|
||||
XOR
|
||||
// stack: val ^ IV_i, i, loc, val, next_val,...
|
||||
DUP3
|
||||
// stack: loc, val ^ IV_i, i, loc, val, next_val,...
|
||||
%mstore_kernel_general
|
||||
// stack: i, loc, val, next_val,...
|
||||
%increment
|
||||
// stack: i + 1, loc, val, next_val,...
|
||||
SWAP2
|
||||
// stack: val, loc, i + 1, next_val,...
|
||||
POP
|
||||
// stack: loc, i + 1, next_val,...
|
||||
%increment
|
||||
// stack: loc + 1, i + 1, next_val,...
|
||||
SWAP1
|
||||
// stack: i + 1, loc + 1, next_val,...
|
||||
%endrep
|
||||
// stack: 8, start + 16, rounds, retdest
|
||||
%pop2
|
||||
// stack: rounds, retdest
|
||||
|
||||
// Run rounds of G functions.
|
||||
PUSH g_functions_return
|
||||
// stack: g_functions_return, rounds, retdest
|
||||
SWAP1
|
||||
// stack: rounds, g_functions_return, retdest
|
||||
%blake2_internal_state_addr
|
||||
// stack: start, rounds, g_functions_return, retdest
|
||||
PUSH 0
|
||||
// stack: current_round=0, start, rounds, g_functions_return, retdest
|
||||
%jump(run_rounds_g_function)
|
||||
g_functions_return:
|
||||
// Finalize hash value.
|
||||
// stack: retdest
|
||||
PUSH hash_generate_return
|
||||
// stack: hash_generate_return, retdest
|
||||
%jump(blake2_generate_all_hash_values)
|
||||
hash_generate_return:
|
||||
// stack: h_0', h_1', h_2', h_3', h_4', h_5', h_6', h_7', retdest
|
||||
%stack (h: 8, retdest) -> (retdest, h)
|
||||
// stack: retdest, h_0', h_1', h_2', h_3', h_4', h_5', h_6', h_7'
|
||||
JUMP
|
||||
@ -12,4 +12,4 @@ global blake2b:
|
||||
%add_const(1)
|
||||
%mstore_kernel_general
|
||||
// stack: retdest
|
||||
%jump(blake2b_compression)
|
||||
%jump(blake2_compression)
|
||||
@ -1,15 +1,15 @@
|
||||
global blake2b_compression:
|
||||
global blake2_compression:
|
||||
// stack: retdest
|
||||
PUSH 0
|
||||
// stack: cur_block = 0, retdest
|
||||
PUSH compression_loop
|
||||
// stack: compression_loop, cur_block, retdest
|
||||
%jump(blake2b_initial_hash_value)
|
||||
%jump(blake2_initial_hash_value)
|
||||
compression_loop:
|
||||
// stack: h_0, ..., h_7, cur_block, retdest
|
||||
|
||||
// Store the hash values.
|
||||
%blake2b_hash_value_addr
|
||||
%blake2_hash_value_addr
|
||||
// stack: addr, h_0, ..., h_7, cur_block, retdest
|
||||
%rep 8
|
||||
SWAP1
|
||||
@ -63,7 +63,7 @@ compression_loop:
|
||||
// stack: cur_block_start_byte, t, cur_block, is_last_block, retdest
|
||||
|
||||
// Copy the message from the input space to the message working space.
|
||||
%blake2b_message_addr
|
||||
%blake2_message_addr
|
||||
// stack: message_addr, cur_block_start_byte, t, cur_block, is_last_block, retdest
|
||||
%rep 16
|
||||
// stack: cur_message_addr, cur_block_byte, ...
|
||||
@ -93,7 +93,7 @@ compression_loop:
|
||||
// stack: is_last_block, t, cur_block, retdest
|
||||
%mul_const(0xFFFFFFFFFFFFFFFF)
|
||||
// stack: invert_if_last_block, t, cur_block, retdest
|
||||
%blake2b_hash_value_addr
|
||||
%blake2_hash_value_addr
|
||||
%add_const(7)
|
||||
%rep 8
|
||||
// stack: addr, ...
|
||||
@ -110,7 +110,7 @@ compression_loop:
|
||||
// stack: h_0, ..., h_7, invert_if_last_block, t, cur_block, retdest
|
||||
|
||||
// Store the initial 16 values of the internal state.
|
||||
%blake2b_internal_state_addr
|
||||
%blake2_internal_state_addr
|
||||
// stack: start, h_0, ..., h_7, invert_if_last_block, t, cur_block, retdest
|
||||
|
||||
// First eight words of the internal state: current hash value h_0, ..., h_7.
|
||||
@ -129,7 +129,7 @@ compression_loop:
|
||||
// stack: i, loc, ...
|
||||
DUP1
|
||||
// stack: i, i, loc, ...
|
||||
%blake2b_iv
|
||||
%blake2_iv
|
||||
// stack: IV_i, i, loc, ...
|
||||
DUP3
|
||||
// stack: loc, IV_i, i, loc, ...
|
||||
@ -159,7 +159,7 @@ compression_loop:
|
||||
// stack: i, loc, val, next_val,...
|
||||
DUP1
|
||||
// stack: i, i, loc, val, next_val,...
|
||||
%blake2b_iv
|
||||
%blake2_iv
|
||||
// stack: IV_i, i, loc, val, next_val,...
|
||||
DUP4
|
||||
// stack: val, IV_i, i, loc, val, next_val,...
|
||||
@ -187,15 +187,18 @@ compression_loop:
|
||||
// Run 12 rounds of G functions.
|
||||
PUSH g_functions_return
|
||||
// stack: g_functions_return, cur_block, retdest
|
||||
%blake2b_internal_state_addr
|
||||
// stack: start, g_functions_return, cur_block, retdest
|
||||
%jump(run_12_rounds_g_function)
|
||||
PUSH 12
|
||||
%blake2_internal_state_addr
|
||||
// stack: start, 12, g_functions_return, cur_block, retdest
|
||||
PUSH 0
|
||||
// stack: current_round=0, start, 12, g_functions_return, cur_block, retdest
|
||||
%jump(run_rounds_g_function)
|
||||
g_functions_return:
|
||||
// Finalize hash value.
|
||||
// stack: cur_block, retdest
|
||||
PUSH hash_generate_return
|
||||
// stack: hash_generate_return, cur_block, retdest
|
||||
%jump(blake2b_generate_all_hash_values)
|
||||
%jump(blake2_generate_all_hash_values)
|
||||
hash_generate_return:
|
||||
// stack: h_0', h_1', h_2', h_3', h_4', h_5', h_6', h_7', cur_block, retdest
|
||||
DUP9
|
||||
@ -1,4 +1,4 @@
|
||||
%macro blake2b_g_function
|
||||
%macro blake2_g_function
|
||||
// Function to mix two input words, x and y, into the four words indexed by a, b, c, d (which
|
||||
// are in the range 0..16) in the internal state.
|
||||
// The internal state is stored in memory starting at the address start.
|
||||
@ -104,23 +104,23 @@
|
||||
%mstore_kernel_general
|
||||
%endmacro
|
||||
|
||||
%macro call_blake2b_g_function(a, b, c, d, x_idx, y_idx)
|
||||
%macro call_blake2_g_function(a, b, c, d, x_idx, y_idx)
|
||||
// stack: round, start
|
||||
PUSH $y_idx
|
||||
DUP2
|
||||
// stack: round, y_idx, round, start
|
||||
%blake2b_permutation
|
||||
%blake2_permutation
|
||||
// stack: s[y_idx], round, start
|
||||
%blake2b_message_addr
|
||||
%blake2_message_addr
|
||||
ADD
|
||||
%mload_kernel_general
|
||||
// stack: m[s[y_idx]], round, start
|
||||
PUSH $x_idx
|
||||
DUP3
|
||||
// stack: round, 2, m[s[y_idx]], round, start
|
||||
%blake2b_permutation
|
||||
%blake2_permutation
|
||||
// stack: s[x_idx], m[s[y_idx]], round, start
|
||||
%blake2b_message_addr
|
||||
%blake2_message_addr
|
||||
ADD
|
||||
%mload_kernel_general
|
||||
// stack: m[s[x_idx]], m[s[y_idx]], round, start
|
||||
@ -131,48 +131,45 @@
|
||||
PUSH $b
|
||||
PUSH $a
|
||||
// stack: a, b, c, d, m[s[x_idx]], m[s[y_idx]], start, round, start
|
||||
%blake2b_g_function
|
||||
%blake2_g_function
|
||||
// stack: round, start
|
||||
%endmacro
|
||||
|
||||
run_g_function_round:
|
||||
// stack: round, start, retdest
|
||||
%call_blake2b_g_function(0, 4, 8, 12, 0, 1)
|
||||
%call_blake2b_g_function(1, 5, 9, 13, 2, 3)
|
||||
%call_blake2b_g_function(2, 6, 10, 14, 4, 5)
|
||||
%call_blake2b_g_function(3, 7, 11, 15, 6, 7)
|
||||
%call_blake2b_g_function(0, 5, 10, 15, 8, 9)
|
||||
%call_blake2b_g_function(1, 6, 11, 12, 10, 11)
|
||||
%call_blake2b_g_function(2, 7, 8, 13, 12, 13)
|
||||
%call_blake2b_g_function(3, 4, 9, 14, 14, 15)
|
||||
%call_blake2_g_function(0, 4, 8, 12, 0, 1)
|
||||
%call_blake2_g_function(1, 5, 9, 13, 2, 3)
|
||||
%call_blake2_g_function(2, 6, 10, 14, 4, 5)
|
||||
%call_blake2_g_function(3, 7, 11, 15, 6, 7)
|
||||
%call_blake2_g_function(0, 5, 10, 15, 8, 9)
|
||||
%call_blake2_g_function(1, 6, 11, 12, 10, 11)
|
||||
%call_blake2_g_function(2, 7, 8, 13, 12, 13)
|
||||
%call_blake2_g_function(3, 4, 9, 14, 14, 15)
|
||||
%stack (r, s, ret) -> (ret, r, s)
|
||||
// stack: retdest, round, start
|
||||
JUMP
|
||||
|
||||
global run_12_rounds_g_function:
|
||||
// stack: start, retdest
|
||||
PUSH 0
|
||||
// stack: round=0, start, retdest
|
||||
run_next_round_g_function:
|
||||
// stack: round, start, retdest
|
||||
PUSH run_next_round_g_function_return
|
||||
// stack: run_next_round_g_function_return, round, start, retdest
|
||||
SWAP2
|
||||
// stack: start, round, run_next_round_g_function_return, retdest
|
||||
SWAP1
|
||||
// stack: round, start, run_next_round_g_function_return, retdest
|
||||
global run_rounds_g_function:
|
||||
// stack: current_round, start, rounds, retdest
|
||||
DUP3
|
||||
// stack: rounds, current_round, start, rounds, retdest
|
||||
DUP2
|
||||
// stack: current_round, rounds, current_round, start, rounds, retdest
|
||||
EQ
|
||||
%jumpi(run_rounds_g_function_end)
|
||||
// stack: current_round, start, rounds, retdest
|
||||
PUSH run_rounds_g_function_return
|
||||
// stack: run_rounds_g_function_return, current_round, start, rounds, retdest
|
||||
%stack (ret, r, s) -> (r, s, ret)
|
||||
// stack: current_round, start, run_rounds_g_function_return, rounds, retdest
|
||||
%jump(run_g_function_round)
|
||||
run_next_round_g_function_return:
|
||||
// stack: round, start, retdest
|
||||
run_rounds_g_function_return:
|
||||
// stack: round, start, rounds, retdest
|
||||
%increment
|
||||
// stack: round+1, start, retdest
|
||||
DUP1
|
||||
// stack: round+1, round+1, start, retdest
|
||||
%lt_const(12)
|
||||
// stack: round+1 < 12, round+1, start, retdest
|
||||
%jumpi(run_next_round_g_function)
|
||||
// stack: round+1, start, retdest
|
||||
%pop2
|
||||
// stack: round + 1, start, rounds, retdest
|
||||
%jump(run_rounds_g_function)
|
||||
run_rounds_g_function_end:
|
||||
// stack: current_round, start, rounds, retdest
|
||||
%pop3
|
||||
// stack: retdest
|
||||
JUMP
|
||||
|
||||
@ -1,18 +1,19 @@
|
||||
blake2b_generate_new_hash_value:
|
||||
// Generate a new hash value from the previous hash value and two elements of the internal state.
|
||||
blake2_generate_new_hash_value:
|
||||
// stack: i, retdest
|
||||
%blake2b_hash_value_addr
|
||||
%blake2_hash_value_addr
|
||||
// stack: addr, i, retdest
|
||||
DUP2
|
||||
ADD
|
||||
%mload_kernel_general
|
||||
// stack: h_i, i, retdest
|
||||
%blake2b_internal_state_addr
|
||||
%blake2_internal_state_addr
|
||||
// stack: addr, h_i, i, retdest
|
||||
DUP3
|
||||
ADD
|
||||
%mload_kernel_general
|
||||
// stack: v_i, h_i, i, retdest
|
||||
%blake2b_internal_state_addr
|
||||
%blake2_internal_state_addr
|
||||
// stack: addr, v_i, h_i, i, retdest
|
||||
SWAP1
|
||||
// stack: v_i, addr, h_i, i, retdest
|
||||
@ -28,26 +29,26 @@ blake2b_generate_new_hash_value:
|
||||
SWAP1
|
||||
JUMP
|
||||
|
||||
global blake2b_generate_all_hash_values:
|
||||
global blake2_generate_all_hash_values:
|
||||
// stack: retdest
|
||||
PUSH 8
|
||||
// stack: i=8, retdest
|
||||
blake2b_generate_hash_loop:
|
||||
blake2_generate_hash_loop:
|
||||
// stack: i, h_i', ..., h_7', retdest
|
||||
%decrement
|
||||
// stack: i-1, h_i', ..., h_7', retdest
|
||||
PUSH blake2b_generate_hash_return
|
||||
// stack: blake2b_generate_hash_return, i-1, h_i', ..., h_7', retdest
|
||||
PUSH blake2_generate_hash_return
|
||||
// stack: blake2_generate_hash_return, i-1, h_i', ..., h_7', retdest
|
||||
DUP2
|
||||
// stack: i-1, blake2b_generate_hash_return, i-1, h_i', ..., h_7', retdest
|
||||
%jump(blake2b_generate_new_hash_value)
|
||||
blake2b_generate_hash_return:
|
||||
// stack: i-1, blake2_generate_hash_return, i-1, h_i', ..., h_7', retdest
|
||||
%jump(blake2_generate_new_hash_value)
|
||||
blake2_generate_hash_return:
|
||||
// stack: h_(i-1)', i-1, h_i', ..., h_7', retdest
|
||||
SWAP1
|
||||
// stack: i-1, h_(i-1)', h_i', ..., h_7', retdest
|
||||
DUP1
|
||||
// stack: i-1, i-1, h_(i-1)', ..., h_7', retdest
|
||||
%jumpi(blake2b_generate_hash_loop)
|
||||
%jumpi(blake2_generate_hash_loop)
|
||||
// stack: i-1=0, h_0', ..., h_7', retdest
|
||||
%stack (i, h: 8, ret) -> (ret, h)
|
||||
// stack: retdest, h_0'...h_7'
|
||||
@ -1,4 +1,4 @@
|
||||
global blake2b_iv_const:
|
||||
global blake2_iv_const:
|
||||
// IV constants (big-endian)
|
||||
|
||||
// IV_0
|
||||
@ -33,19 +33,19 @@ global blake2b_iv_const:
|
||||
BYTES 91, 224, 205, 25
|
||||
BYTES 19, 126, 33, 121
|
||||
|
||||
global blake2b_iv:
|
||||
global blake2_iv:
|
||||
// stack: i, retdest
|
||||
PUSH blake2b_iv_const
|
||||
// stack: blake2b_iv_const, i, retdest
|
||||
PUSH blake2_iv_const
|
||||
// stack: blake2_iv_const, i, retdest
|
||||
SWAP1
|
||||
// stack: i, blake2b_iv_const, retdest
|
||||
// stack: i, blake2_iv_const, retdest
|
||||
%mul_const(8)
|
||||
ADD
|
||||
// stack: blake2b_iv_const + 2 * i, retdest
|
||||
// stack: blake2_iv_const + 2 * i, retdest
|
||||
DUP1
|
||||
// stack: blake2b_iv_const + 2 * i, blake2b_iv_const + 2 * i, retdest
|
||||
// stack: blake2_iv_const + 2 * i, blake2_iv_const + 2 * i, retdest
|
||||
%add_const(4)
|
||||
// stack: blake2b_iv_const + 2 * i + 1, blake2b_iv_const + 2 * i, retdest
|
||||
// stack: blake2_iv_const + 2 * i + 1, blake2_iv_const + 2 * i, retdest
|
||||
%mload_kernel_code_u32
|
||||
SWAP1
|
||||
%mload_kernel_code_u32
|
||||
@ -57,33 +57,33 @@ global blake2b_iv:
|
||||
SWAP1
|
||||
JUMP
|
||||
|
||||
%macro blake2b_iv
|
||||
%macro blake2_iv
|
||||
%stack (i) -> (i, %%after)
|
||||
%jump(blake2b_iv)
|
||||
%jump(blake2_iv)
|
||||
%%after:
|
||||
%endmacro
|
||||
|
||||
// Load the initial hash value (the IV, but with params XOR'd into the first word).
|
||||
global blake2b_initial_hash_value:
|
||||
global blake2_initial_hash_value:
|
||||
// stack: retdest
|
||||
PUSH 8
|
||||
// stack: i=8, retdest
|
||||
blake2b_initial_hash_loop:
|
||||
blake2_initial_hash_loop:
|
||||
// stack: i, IV_i, ..., IV_7, retdest
|
||||
%decrement
|
||||
// stack: i-1, IV_i, ..., IV_7, retdest
|
||||
PUSH blake2b_initial_hash_return
|
||||
// stack: blake2b_initial_hash_return, i-1, IV_i, ..., IV_7, retdest
|
||||
PUSH blake2_initial_hash_return
|
||||
// stack: blake2_initial_hash_return, i-1, IV_i, ..., IV_7, retdest
|
||||
DUP2
|
||||
// stack: i-1, blake2b_initial_hash_return, i-1, IV_i, ..., IV_7, retdest
|
||||
%jump(blake2b_iv)
|
||||
blake2b_initial_hash_return:
|
||||
// stack: i-1, blake2_initial_hash_return, i-1, IV_i, ..., IV_7, retdest
|
||||
%jump(blake2_iv)
|
||||
blake2_initial_hash_return:
|
||||
// stack: IV_(i-1), i-1, IV_i, ..., IV_7, retdest
|
||||
SWAP1
|
||||
// stack: i-1, IV_(i-1), IV_i, ..., IV_7, retdest
|
||||
DUP1
|
||||
// stack: i-1, i-1, IV_(i-1), ..., IV_7, retdest
|
||||
%jumpi(blake2b_initial_hash_loop)
|
||||
%jumpi(blake2_initial_hash_loop)
|
||||
// stack: i-1=0, IV_0, ..., IV_7, retdest
|
||||
POP
|
||||
// stack: IV_0, ..., IV_7, retdest
|
||||
@ -58,7 +58,7 @@ global permutation_9_constants:
|
||||
BYTES 15, 11, 9, 14
|
||||
BYTES 3, 12, 13, 0
|
||||
|
||||
global blake2b_permutation:
|
||||
global blake2_permutation:
|
||||
// stack: i, round, retdest
|
||||
PUSH permutation_0_constants
|
||||
// stack: permutation_0_constants, i, round, retdest
|
||||
@ -74,12 +74,12 @@ global blake2b_permutation:
|
||||
SWAP1
|
||||
JUMP
|
||||
|
||||
%macro blake2b_permutation
|
||||
%macro blake2_permutation
|
||||
// stack: round, i
|
||||
PUSH %%after
|
||||
// stack: %%after, round, i
|
||||
SWAP2
|
||||
// stack: i, round, %%after
|
||||
%jump(blake2b_permutation)
|
||||
%jump(blake2_permutation)
|
||||
%%after:
|
||||
%endmacro
|
||||
@ -34,6 +34,15 @@
|
||||
// stack: (empty)
|
||||
%endmacro
|
||||
|
||||
// Store the given context metadata field to memory.
|
||||
%macro mstore_context_metadata(field, value)
|
||||
PUSH $value
|
||||
PUSH $field
|
||||
// stack: offset, value
|
||||
%mstore_current(@SEGMENT_CONTEXT_METADATA)
|
||||
// stack: (empty)
|
||||
%endmacro
|
||||
|
||||
%macro mstore_parent_context_metadata(field)
|
||||
// stack: value
|
||||
%mload_context_metadata(@CTX_METADATA_PARENT_CONTEXT)
|
||||
|
||||
@ -43,6 +43,12 @@ mload_packing_return:
|
||||
%stack (packed_value, addr: 3, len, retdest) -> (retdest, packed_value)
|
||||
JUMP
|
||||
|
||||
%macro mload_packing
|
||||
%stack (addr: 3, len) -> (addr, len, %%after)
|
||||
%jump(mload_packing)
|
||||
%%after:
|
||||
%endmacro
|
||||
|
||||
// Pre stack: context, segment, offset, value, len, retdest
|
||||
// Post stack: offset'
|
||||
global mstore_unpacking:
|
||||
@ -82,3 +88,9 @@ mstore_unpacking_finish:
|
||||
%pop3
|
||||
%stack (offset, value, len, retdest) -> (retdest, offset)
|
||||
JUMP
|
||||
|
||||
%macro mstore_unpacking
|
||||
%stack (addr: 3, value, len) -> (addr, value, len, %%after)
|
||||
%jump(mstore_unpacking)
|
||||
%%after:
|
||||
%endmacro
|
||||
|
||||
@ -17,6 +17,7 @@ global mpt_insert:
|
||||
|
||||
// There's still the MPT_NODE_HASH case, but if we hit a hash node,
|
||||
// it means the prover failed to provide necessary Merkle data, so panic.
|
||||
global mpt_insert_hash_node:
|
||||
PANIC
|
||||
|
||||
mpt_insert_empty:
|
||||
|
||||
@ -42,6 +42,7 @@ global mpt_read:
|
||||
|
||||
// There's still the MPT_NODE_HASH case, but if we hit a hash node,
|
||||
// it means the prover failed to provide necessary Merkle data, so panic.
|
||||
global mpt_read_hash_node:
|
||||
PANIC
|
||||
|
||||
global mpt_read_empty:
|
||||
|
||||
@ -1,3 +1,34 @@
|
||||
%macro sload_current
|
||||
%stack (slot) -> (slot, %%after)
|
||||
%jump(sload_current)
|
||||
%%after:
|
||||
%endmacro
|
||||
|
||||
global sload_current:
|
||||
%stack (slot) -> (slot, after_storage_read)
|
||||
%slot_to_storage_key
|
||||
// stack: storage_key, after_storage_read
|
||||
PUSH 64 // storage_key has 64 nibbles
|
||||
%current_storage_trie
|
||||
// stack: storage_root_ptr, 64, storage_key, after_storage_read
|
||||
%jump(mpt_read)
|
||||
|
||||
global after_storage_read:
|
||||
// stack: value_ptr, retdest
|
||||
DUP1 %jumpi(storage_key_exists)
|
||||
|
||||
// Storage key not found. Return default value_ptr = 0,
|
||||
// which derefs to 0 since @SEGMENT_TRIE_DATA[0] = 0.
|
||||
%stack (value_ptr, retdest) -> (retdest, 0)
|
||||
JUMP
|
||||
|
||||
global storage_key_exists:
|
||||
// stack: value_ptr, retdest
|
||||
%mload_trie_data
|
||||
// stack: value, retdest
|
||||
SWAP1
|
||||
JUMP
|
||||
|
||||
// Read a word from the current account's storage trie.
|
||||
//
|
||||
// Pre stack: kexit_info, slot
|
||||
@ -6,38 +37,20 @@
|
||||
global sys_sload:
|
||||
// stack: kexit_info, slot
|
||||
SWAP1
|
||||
// stack: slot, kexit_info
|
||||
DUP1 %address
|
||||
// stack: addr, slot, slot, kexit_info
|
||||
%insert_accessed_storage_keys PUSH @GAS_COLDSLOAD_MINUS_WARMACCESS
|
||||
MUL
|
||||
PUSH @GAS_WARMACCESS
|
||||
ADD
|
||||
%stack (gas, slot, kexit_info) -> (gas, kexit_info, slot)
|
||||
DUP1
|
||||
// stack: slot, slot, kexit_info
|
||||
%sload_current
|
||||
|
||||
%stack (value, slot, kexit_info) -> (slot, value, kexit_info, value)
|
||||
%address
|
||||
// stack: addr, slot, value, kexit_info, value
|
||||
%insert_accessed_storage_keys
|
||||
// stack: cold_access, old_value, kexit_info, value
|
||||
SWAP1 POP
|
||||
// stack: cold_access, kexit_info, value
|
||||
%mul_const(@GAS_COLDSLOAD_MINUS_WARMACCESS)
|
||||
%add_const(@GAS_WARMACCESS)
|
||||
%charge_gas
|
||||
// stack: kexit_info, slot
|
||||
|
||||
SWAP1
|
||||
%stack (slot) -> (slot, after_storage_read)
|
||||
%slot_to_storage_key
|
||||
// stack: storage_key, after_storage_read, kexit_info
|
||||
PUSH 64 // storage_key has 64 nibbles
|
||||
%current_storage_trie
|
||||
// stack: storage_root_ptr, 64, storage_key, after_storage_read, kexit_info
|
||||
%jump(mpt_read)
|
||||
|
||||
after_storage_read:
|
||||
// stack: value_ptr, kexit_info
|
||||
DUP1 %jumpi(storage_key_exists)
|
||||
|
||||
// Storage key not found. Return default value_ptr = 0,
|
||||
// which derefs to 0 since @SEGMENT_TRIE_DATA[0] = 0.
|
||||
%stack (value_ptr, kexit_info) -> (kexit_info, 0)
|
||||
// stack: kexit_info, value
|
||||
EXIT_KERNEL
|
||||
|
||||
storage_key_exists:
|
||||
// stack: value_ptr, kexit_info
|
||||
%mload_trie_data
|
||||
// stack: value, kexit_info
|
||||
SWAP1
|
||||
EXIT_KERNEL
|
||||
|
||||
@ -6,14 +6,42 @@
|
||||
global sys_sstore:
|
||||
%check_static
|
||||
%stack (kexit_info, slot, value) -> (slot, kexit_info, slot, value)
|
||||
%address %insert_accessed_storage_keys POP // TODO: Use return value in gas calculation.
|
||||
// TODO: Assuming a cold zero -> nonzero write for now.
|
||||
PUSH @GAS_COLDSLOAD
|
||||
PUSH @GAS_SSET
|
||||
ADD
|
||||
%sload_current
|
||||
%address
|
||||
%stack (addr, current_value, kexit_info, slot, value) -> (addr, slot, current_value, current_value, kexit_info, slot, value)
|
||||
%insert_accessed_storage_keys
|
||||
// stack: cold_access, original_value, current_value, kexit_info, slot, value
|
||||
%mul_const(@GAS_COLDSLOAD)
|
||||
|
||||
// Check for warm access.
|
||||
%stack (gas, original_value, current_value, kexit_info, slot, value) ->
|
||||
(value, current_value, current_value, original_value, gas, original_value, current_value, kexit_info, slot, value)
|
||||
EQ SWAP2 EQ ISZERO
|
||||
// stack: current_value==original_value, value==current_value, gas, original_value, current_value, kexit_info, slot, value)
|
||||
ADD // OR
|
||||
%jumpi(sstore_warm)
|
||||
|
||||
// Check for sset (set a zero storage slot to a non-zero value).
|
||||
// stack: gas, original_value, current_value, kexit_info, slot, value
|
||||
DUP2 ISZERO %mul_const(@GAS_SSET) ADD
|
||||
|
||||
// Check for sreset (set a non-zero storage slot to a non-zero value).
|
||||
// stack: gas, original_value, current_value, kexit_info, slot, value
|
||||
DUP2 ISZERO ISZERO %mul_const(@GAS_SRESET) ADD
|
||||
%jump(sstore_charge_gas)
|
||||
|
||||
sstore_warm:
|
||||
// stack: gas, original_value, current_value, kexit_info, slot, value)
|
||||
%add_const(@GAS_WARMACCESS)
|
||||
|
||||
sstore_charge_gas:
|
||||
%stack (gas, original_value, current_value, kexit_info, slot, value) -> (gas, kexit_info, current_value, slot, value)
|
||||
%charge_gas
|
||||
|
||||
%stack (kexit_info, slot, value) -> (slot, value, kexit_info)
|
||||
// Check if `value` is equal to `current_value`, and if so exit the kernel early.
|
||||
%stack (kexit_info, current_value, slot, value) -> (value, current_value, slot, value, kexit_info)
|
||||
EQ %jumpi(sstore_noop)
|
||||
|
||||
// TODO: If value = 0, delete the key instead of inserting 0.
|
||||
// stack: slot, value, kexit_info
|
||||
|
||||
@ -57,3 +85,8 @@ after_storage_insert:
|
||||
after_state_insert:
|
||||
// stack: kexit_info
|
||||
EXIT_KERNEL
|
||||
|
||||
sstore_noop:
|
||||
// stack: slot, value, kexit_info
|
||||
%pop2
|
||||
EXIT_KERNEL
|
||||
|
||||
@ -1,10 +1,12 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::time::Instant;
|
||||
|
||||
use ethereum_types::U256;
|
||||
use itertools::{izip, Itertools};
|
||||
use keccak_hash::keccak;
|
||||
use log::debug;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::ast::PushTarget;
|
||||
use crate::cpu::kernel::ast::Item::LocalLabelDeclaration;
|
||||
@ -20,7 +22,7 @@ use crate::generation::prover_input::ProverInputFn;
|
||||
/// nontrivial given the circular dependency between an offset and its size.
|
||||
pub(crate) const BYTES_PER_OFFSET: u8 = 3;
|
||||
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
|
||||
pub struct Kernel {
|
||||
pub(crate) code: Vec<u8>,
|
||||
|
||||
@ -60,6 +62,16 @@ impl Kernel {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_file(&self, path: &str) {
|
||||
let kernel_serialized = serde_json::to_string(self).unwrap();
|
||||
fs::write(path, kernel_serialized).expect("Unable to write kernel to file");
|
||||
}
|
||||
|
||||
pub fn from_file(path: &str) -> Self {
|
||||
let bytes = fs::read(path).expect("Unable to read kernel file");
|
||||
serde_json::from_slice(&bytes).unwrap()
|
||||
}
|
||||
|
||||
/// Get a string representation of the current offset for debugging purposes.
|
||||
pub(crate) fn offset_name(&self, offset: usize) -> String {
|
||||
match self
|
||||
|
||||
@ -40,6 +40,14 @@ pub fn evm_constants() -> HashMap<String, U256> {
|
||||
c.insert(name.into(), U256::from(value));
|
||||
}
|
||||
|
||||
for (name, value) in CODE_SIZE_LIMIT {
|
||||
c.insert(name.into(), U256::from(value));
|
||||
}
|
||||
|
||||
for (name, value) in SNARKV_POINTERS {
|
||||
c.insert(name.into(), U256::from(value));
|
||||
}
|
||||
|
||||
for segment in Segment::all() {
|
||||
c.insert(segment.var_name().into(), (segment as u32).into());
|
||||
}
|
||||
@ -83,7 +91,7 @@ const HASH_CONSTANTS: [(&str, [u8; 32]); 2] = [
|
||||
),
|
||||
];
|
||||
|
||||
const EC_CONSTANTS: [(&str, [u8; 32]); 18] = [
|
||||
const EC_CONSTANTS: [(&str, [u8; 32]); 20] = [
|
||||
(
|
||||
"U256_MAX",
|
||||
hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||
@ -92,6 +100,14 @@ const EC_CONSTANTS: [(&str, [u8; 32]); 18] = [
|
||||
"BN_BASE",
|
||||
hex!("30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47"),
|
||||
),
|
||||
(
|
||||
"BN_TWISTED_RE",
|
||||
hex!("2b149d40ceb8aaae81be18991be06ac3b5b4c5e559dbefa33267e6dc24a138e5"),
|
||||
),
|
||||
(
|
||||
"BN_TWISTED_IM",
|
||||
hex!("009713b03af0fed4cd2cafadeed8fdf4a74fa084e52d1852e4a2bd0685c315d2"),
|
||||
),
|
||||
(
|
||||
"BN_SCALAR",
|
||||
hex!("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001"),
|
||||
@ -225,5 +241,13 @@ const PRECOMPILES_GAS: [(&str, u16); 13] = [
|
||||
("BN_MUL_GAS", 6_000),
|
||||
("SNARKV_STATIC_GAS", 45_000),
|
||||
("SNARKV_DYNAMIC_GAS", 34_000),
|
||||
("BLAKE2_F_DYNAMIC_GAS", 1),
|
||||
("BLAKE2_F__GAS", 1),
|
||||
];
|
||||
|
||||
const SNARKV_POINTERS: [(&str, u64); 2] = [("SNARKV_INP", 112), ("SNARKV_OUT", 100)];
|
||||
|
||||
const CODE_SIZE_LIMIT: [(&str, u64); 3] = [
|
||||
("MAX_CODE_SIZE", 0x6000),
|
||||
("MAX_INITCODE_SIZE", 0xc000),
|
||||
("INITCODE_WORD_COST", 2),
|
||||
];
|
||||
|
||||
@ -332,21 +332,21 @@ impl<'a> Interpreter<'a> {
|
||||
0x1d => self.run_sar(), // "SAR",
|
||||
0x20 => self.run_keccak256(), // "KECCAK256",
|
||||
0x21 => self.run_keccak_general(), // "KECCAK_GENERAL",
|
||||
0x30 => todo!(), // "ADDRESS",
|
||||
0x30 => self.run_address(), // "ADDRESS",
|
||||
0x31 => todo!(), // "BALANCE",
|
||||
0x32 => todo!(), // "ORIGIN",
|
||||
0x33 => todo!(), // "CALLER",
|
||||
0x32 => self.run_origin(), // "ORIGIN",
|
||||
0x33 => self.run_caller(), // "CALLER",
|
||||
0x34 => self.run_callvalue(), // "CALLVALUE",
|
||||
0x35 => self.run_calldataload(), // "CALLDATALOAD",
|
||||
0x36 => self.run_calldatasize(), // "CALLDATASIZE",
|
||||
0x37 => self.run_calldatacopy(), // "CALLDATACOPY",
|
||||
0x38 => todo!(), // "CODESIZE",
|
||||
0x39 => todo!(), // "CODECOPY",
|
||||
0x3a => todo!(), // "GASPRICE",
|
||||
0x38 => self.run_codesize(), // "CODESIZE",
|
||||
0x39 => self.run_codecopy(), // "CODECOPY",
|
||||
0x3a => self.run_gasprice(), // "GASPRICE",
|
||||
0x3b => todo!(), // "EXTCODESIZE",
|
||||
0x3c => todo!(), // "EXTCODECOPY",
|
||||
0x3d => todo!(), // "RETURNDATASIZE",
|
||||
0x3e => todo!(), // "RETURNDATACOPY",
|
||||
0x3d => self.run_returndatasize(), // "RETURNDATASIZE",
|
||||
0x3e => self.run_returndatacopy(), // "RETURNDATACOPY",
|
||||
0x3f => todo!(), // "EXTCODEHASH",
|
||||
0x40 => todo!(), // "BLOCKHASH",
|
||||
0x41 => self.run_coinbase(), // "COINBASE",
|
||||
@ -734,6 +734,26 @@ impl<'a> Interpreter<'a> {
|
||||
self.push(U256::from_big_endian(hash.as_bytes()));
|
||||
}
|
||||
|
||||
fn run_address(&mut self) {
|
||||
self.push(
|
||||
self.generation_state.memory.contexts[self.context].segments
|
||||
[Segment::ContextMetadata as usize]
|
||||
.get(ContextMetadata::Address as usize),
|
||||
)
|
||||
}
|
||||
|
||||
fn run_origin(&mut self) {
|
||||
self.push(self.get_txn_field(NormalizedTxnField::Origin))
|
||||
}
|
||||
|
||||
fn run_caller(&mut self) {
|
||||
self.push(
|
||||
self.generation_state.memory.contexts[self.context].segments
|
||||
[Segment::ContextMetadata as usize]
|
||||
.get(ContextMetadata::Caller as usize),
|
||||
)
|
||||
}
|
||||
|
||||
fn run_callvalue(&mut self) {
|
||||
self.push(
|
||||
self.generation_state.memory.contexts[self.context].segments
|
||||
@ -784,6 +804,63 @@ impl<'a> Interpreter<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn run_codesize(&mut self) {
|
||||
self.push(
|
||||
self.generation_state.memory.contexts[self.context].segments
|
||||
[Segment::ContextMetadata as usize]
|
||||
.get(ContextMetadata::CodeSize as usize),
|
||||
)
|
||||
}
|
||||
|
||||
fn run_codecopy(&mut self) {
|
||||
let dest_offset = self.pop().as_usize();
|
||||
let offset = self.pop().as_usize();
|
||||
let size = self.pop().as_usize();
|
||||
for i in 0..size {
|
||||
let code_byte =
|
||||
self.generation_state
|
||||
.memory
|
||||
.mload_general(self.context, Segment::Code, offset + i);
|
||||
self.generation_state.memory.mstore_general(
|
||||
self.context,
|
||||
Segment::MainMemory,
|
||||
dest_offset + i,
|
||||
code_byte,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn run_gasprice(&mut self) {
|
||||
self.push(self.get_txn_field(NormalizedTxnField::ComputedFeePerGas))
|
||||
}
|
||||
|
||||
fn run_returndatasize(&mut self) {
|
||||
self.push(
|
||||
self.generation_state.memory.contexts[self.context].segments
|
||||
[Segment::ContextMetadata as usize]
|
||||
.get(ContextMetadata::ReturndataSize as usize),
|
||||
)
|
||||
}
|
||||
|
||||
fn run_returndatacopy(&mut self) {
|
||||
let dest_offset = self.pop().as_usize();
|
||||
let offset = self.pop().as_usize();
|
||||
let size = self.pop().as_usize();
|
||||
for i in 0..size {
|
||||
let returndata_byte = self.generation_state.memory.mload_general(
|
||||
self.context,
|
||||
Segment::Returndata,
|
||||
offset + i,
|
||||
);
|
||||
self.generation_state.memory.mstore_general(
|
||||
self.context,
|
||||
Segment::MainMemory,
|
||||
dest_offset + i,
|
||||
returndata_byte,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn run_coinbase(&mut self) {
|
||||
self.push(self.get_global_metadata_field(GlobalMetadata::BlockBeneficiary))
|
||||
}
|
||||
|
||||
@ -29,6 +29,7 @@ const TEST_DATA_ADDMUL_OUTPUTS: &str = "addmul_outputs";
|
||||
const TEST_DATA_MUL_OUTPUTS: &str = "mul_outputs";
|
||||
const TEST_DATA_MODMUL_OUTPUTS: &str = "modmul_outputs";
|
||||
const TEST_DATA_MODEXP_OUTPUTS: &str = "modexp_outputs";
|
||||
const TEST_DATA_MODEXP_OUTPUTS_FULL: &str = "modexp_outputs_full";
|
||||
|
||||
const BIT_SIZES_TO_TEST: [usize; 15] = [
|
||||
0, 1, 2, 127, 128, 129, 255, 256, 257, 512, 1000, 1023, 1024, 1025, 31415,
|
||||
@ -282,7 +283,7 @@ fn test_modexp_bignum(b: BigUint, e: BigUint, m: BigUint, expected_output: BigUi
|
||||
let scratch_3 = 7 * len; // size 2*len
|
||||
let scratch_4 = 9 * len; // size 2*len
|
||||
let scratch_5 = 11 * len; // size 2*len
|
||||
let (new_memory, _new_stack) = run_test(
|
||||
let (mut new_memory, _new_stack) = run_test(
|
||||
"modexp_bignum",
|
||||
memory,
|
||||
vec![
|
||||
@ -298,6 +299,10 @@ fn test_modexp_bignum(b: BigUint, e: BigUint, m: BigUint, expected_output: BigUi
|
||||
scratch_5.into(),
|
||||
],
|
||||
)?;
|
||||
new_memory.resize(
|
||||
new_memory.len().max(output_start_loc + output_len),
|
||||
0.into(),
|
||||
);
|
||||
|
||||
let output = mem_vec_to_biguint(&new_memory[output_start_loc..output_start_loc + output_len]);
|
||||
assert_eq!(output, expected_output);
|
||||
@ -506,6 +511,47 @@ fn test_modmul_bignum_all() -> Result<()> {
|
||||
|
||||
#[test]
|
||||
fn test_modexp_bignum_all() -> Result<()> {
|
||||
let exp_bit_sizes = vec![2, 9, 11, 16];
|
||||
|
||||
for bit_size in &BIT_SIZES_TO_TEST[3..7] {
|
||||
for exp_bit_size in &exp_bit_sizes {
|
||||
let b = gen_bignum(*bit_size);
|
||||
let e = gen_bignum(*exp_bit_size);
|
||||
let m = gen_bignum(*bit_size);
|
||||
if !m.is_zero() {
|
||||
let output = b.clone().modpow(&e, &m);
|
||||
test_modexp_bignum(b, e, m, output)?;
|
||||
}
|
||||
|
||||
let b = max_bignum(*bit_size);
|
||||
let e = max_bignum(*exp_bit_size);
|
||||
let m = max_bignum(*bit_size);
|
||||
if !m.is_zero() {
|
||||
let output = b.modpow(&e, &m);
|
||||
test_modexp_bignum(b, e, m, output)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let inputs = test_data_biguint(TEST_DATA_BIGNUM_INPUTS);
|
||||
let modexp_outputs = test_data_biguint(TEST_DATA_MODEXP_OUTPUTS);
|
||||
let mut modexp_outputs_iter = modexp_outputs.into_iter();
|
||||
for b in &inputs[..9] {
|
||||
// Include only smaller exponents, to keep tests from becoming too slow.
|
||||
for e in &inputs[..6] {
|
||||
for m in &inputs[..9] {
|
||||
let output = modexp_outputs_iter.next().unwrap();
|
||||
test_modexp_bignum(b.clone(), e.clone(), m.clone(), output)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore] // Too slow to run on CI.
|
||||
fn test_modexp_bignum_all_full() -> Result<()> {
|
||||
// Only test smaller values for exponent.
|
||||
let exp_bit_sizes = vec![2, 100, 127, 128, 129];
|
||||
|
||||
@ -530,13 +576,12 @@ fn test_modexp_bignum_all() -> Result<()> {
|
||||
}
|
||||
|
||||
let inputs = test_data_biguint(TEST_DATA_BIGNUM_INPUTS);
|
||||
let modexp_outputs = test_data_biguint(TEST_DATA_MODEXP_OUTPUTS);
|
||||
let modexp_outputs = test_data_biguint(TEST_DATA_MODEXP_OUTPUTS_FULL);
|
||||
let mut modexp_outputs_iter = modexp_outputs.into_iter();
|
||||
for b in &inputs {
|
||||
// Include only smaller exponents, to keep tests from becoming too slow.
|
||||
for e in &inputs[..7] {
|
||||
// For m, skip the first input, which is zero.
|
||||
for m in &inputs[1..] {
|
||||
for m in &inputs {
|
||||
let output = modexp_outputs_iter.next().unwrap();
|
||||
test_modexp_bignum(b.clone(), e.clone(), m.clone(), output)?;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
1575
evm/src/cpu/kernel/tests/bignum/test_data/modexp_outputs_full
Normal file
1575
evm/src/cpu/kernel/tests/bignum/test_data/modexp_outputs_full
Normal file
File diff suppressed because it is too large
Load Diff
132
evm/src/cpu/kernel/tests/blake2_f.rs
Normal file
132
evm/src/cpu/kernel/tests/blake2_f.rs
Normal file
@ -0,0 +1,132 @@
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::cpu::kernel::interpreter::{
|
||||
run_interpreter_with_memory, InterpreterMemoryInitialization,
|
||||
};
|
||||
use crate::memory::segments::Segment::KernelGeneral;
|
||||
|
||||
fn reverse_bytes_u64(input: u64) -> u64 {
|
||||
let mut result = 0;
|
||||
for i in 0..8 {
|
||||
result |= ((input >> (i * 8)) & 0xff) << ((7 - i) * 8);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn convert_input(input: &str) -> Result<(u32, [u64; 8], [u64; 16], u64, u64, bool)> {
|
||||
let rounds = u32::from_str_radix(&input[..8], 16).unwrap();
|
||||
|
||||
let mut h = [0u64; 8];
|
||||
for i in 0..8 {
|
||||
h[i] = reverse_bytes_u64(
|
||||
u64::from_str_radix(&input[8 + i * 16..8 + (i + 1) * 16], 16).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut m = [0u64; 16];
|
||||
for i in 0..16 {
|
||||
m[i] = reverse_bytes_u64(
|
||||
u64::from_str_radix(&input[136 + i * 16..136 + (i + 1) * 16], 16).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
let t_0 = reverse_bytes_u64(u64::from_str_radix(&input[392..408], 16).unwrap());
|
||||
let t_1 = reverse_bytes_u64(u64::from_str_radix(&input[408..424], 16).unwrap());
|
||||
let flag = u8::from_str_radix(&input[424..426], 16).unwrap() != 0;
|
||||
|
||||
Ok((rounds, h, m, t_0, t_1, flag))
|
||||
}
|
||||
|
||||
fn convert_output(output: [u64; 8]) -> String {
|
||||
output
|
||||
.iter()
|
||||
.map(|&x| format!("{:016x}", reverse_bytes_u64(x)))
|
||||
.collect::<Vec<_>>()
|
||||
.join("")
|
||||
}
|
||||
|
||||
fn run_blake2_f(
|
||||
rounds: u32,
|
||||
h: [u64; 8],
|
||||
m: [u64; 16],
|
||||
t_0: u64,
|
||||
t_1: u64,
|
||||
flag: bool,
|
||||
) -> Result<[u64; 8]> {
|
||||
let mut stack = vec![];
|
||||
stack.push(rounds.into());
|
||||
stack.append(&mut h.iter().map(|&x| x.into()).collect());
|
||||
stack.append(&mut m.iter().map(|&x| x.into()).collect());
|
||||
stack.push(t_0.into());
|
||||
stack.push(t_1.into());
|
||||
stack.push(u8::from(flag).into());
|
||||
stack.push(0xDEADBEEFu32.into());
|
||||
|
||||
let interpreter_setup = InterpreterMemoryInitialization {
|
||||
label: "blake2_f".to_string(),
|
||||
stack,
|
||||
segment: KernelGeneral,
|
||||
memory: vec![],
|
||||
};
|
||||
|
||||
let result = run_interpreter_with_memory(interpreter_setup).unwrap();
|
||||
let mut hash = result.stack().to_vec();
|
||||
hash.reverse();
|
||||
|
||||
Ok(hash
|
||||
.iter()
|
||||
.map(|&x| x.as_u64())
|
||||
.collect::<Vec<_>>()
|
||||
.try_into()
|
||||
.unwrap())
|
||||
}
|
||||
|
||||
// Test data from EIP-152.
|
||||
|
||||
fn test_blake2_f_eip(input: &str, output: &str) -> Result<()> {
|
||||
let (rounds, h, m, t_0, t_1, flag) = convert_input(input).unwrap();
|
||||
let result = run_blake2_f(rounds, h, m, t_0, t_1, flag).unwrap();
|
||||
assert_eq!(convert_output(result), output);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blake2_f_4() -> Result<()> {
|
||||
test_blake2_f_eip(
|
||||
"0000000048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001",
|
||||
"08c9bcf367e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d282e6ad7f520e511f6c3e2b8c68059b9442be0454267ce079217e1319cde05b",
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blake2_f_5() -> Result<()> {
|
||||
test_blake2_f_eip(
|
||||
"0000000c48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001",
|
||||
"ba80a53f981c4d0d6a2797b69f12f6e94c212f14685ac4b74b12bb6fdbffa2d17d87c5392aab792dc252d5de4533cc9518d38aa8dbf1925ab92386edd4009923",
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blake2_f_6() -> Result<()> {
|
||||
test_blake2_f_eip(
|
||||
"0000000c48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000",
|
||||
"75ab69d3190a562c51aef8d88f1c2775876944407270c42c9844252c26d2875298743e7f6d5ea2f2d3e8d226039cd31b4e426ac4f2d3d666a610c2116fde4735",
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blake2_f_7() -> Result<()> {
|
||||
test_blake2_f_eip(
|
||||
"0000000148c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001",
|
||||
"b63a380cb2897d521994a85234ee2c181b5f844d2c624c002677e9703449d2fba551b3a8333bcdf5f2f7e08993d53923de3d64fcc68c034e717b9293fed7a421",
|
||||
)
|
||||
}
|
||||
|
||||
#[ignore]
|
||||
#[test]
|
||||
fn test_blake2_f_8() -> Result<()> {
|
||||
test_blake2_f_eip(
|
||||
"ffffffff48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001",
|
||||
"fc59093aafa9ab43daae0e914c57635c5402d8e3d2130eb9b3cc181de7f0ecf9b22bf99a7815ce16419e200e01846e6b5df8cc7703041bbceb571de6631d2615",
|
||||
)
|
||||
}
|
||||
@ -2,17 +2,26 @@ use anyhow::Result;
|
||||
use ethereum_types::U256;
|
||||
use rand::Rng;
|
||||
|
||||
use crate::bn254_pairing::{
|
||||
gen_fp12_sparse, invariant_exponent, miller_loop, tate, Curve, TwistedCurve,
|
||||
};
|
||||
use crate::cpu::kernel::interpreter::{
|
||||
run_interpreter_with_memory, Interpreter, InterpreterMemoryInitialization,
|
||||
};
|
||||
use crate::curve_pairings::{
|
||||
bn_final_exponent, bn_miller_loop, gen_bn_fp12_sparse, Curve, CyclicGroup,
|
||||
};
|
||||
use crate::extension_tower::{FieldExt, Fp12, Fp2, Fp6, Stack, BN254};
|
||||
use crate::memory::segments::Segment::BnPairing;
|
||||
|
||||
fn run_bn_mul_fp6(f: Fp6<BN254>, g: Fp6<BN254>, label: &str) -> Fp6<BN254> {
|
||||
let mut stack = f.to_stack().to_vec();
|
||||
fn extract_stack(interpreter: Interpreter<'static>) -> Vec<U256> {
|
||||
interpreter
|
||||
.stack()
|
||||
.iter()
|
||||
.rev()
|
||||
.cloned()
|
||||
.collect::<Vec<U256>>()
|
||||
}
|
||||
|
||||
fn run_bn_mul_fp6(f: Fp6<BN254>, g: Fp6<BN254>, label: &str) -> Vec<U256> {
|
||||
let mut stack = f.to_stack();
|
||||
if label == "mul_fp254_6" {
|
||||
stack.extend(g.to_stack().to_vec());
|
||||
}
|
||||
@ -43,10 +52,10 @@ fn test_bn_mul_fp6() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_bn_mul_fp12(f: Fp12<BN254>, g: Fp12<BN254>, label: &str) -> Fp12<BN254> {
|
||||
let in0: usize = 200;
|
||||
let in1: usize = 212;
|
||||
let out: usize = 224;
|
||||
fn run_bn_mul_fp12(f: Fp12<BN254>, g: Fp12<BN254>, label: &str) -> Vec<U256> {
|
||||
let in0: usize = 100;
|
||||
let in1: usize = 112;
|
||||
let out: usize = 124;
|
||||
|
||||
let mut stack = vec![
|
||||
U256::from(in0),
|
||||
@ -73,7 +82,7 @@ fn test_bn_mul_fp12() -> Result<()> {
|
||||
let mut rng = rand::thread_rng();
|
||||
let f: Fp12<BN254> = rng.gen::<Fp12<BN254>>();
|
||||
let g: Fp12<BN254> = rng.gen::<Fp12<BN254>>();
|
||||
let h: Fp12<BN254> = gen_fp12_sparse(&mut rng);
|
||||
let h: Fp12<BN254> = gen_bn_fp12_sparse(&mut rng);
|
||||
|
||||
let output_normal = run_bn_mul_fp12(f, g, "mul_fp254_12");
|
||||
let output_sparse = run_bn_mul_fp12(f, h, "mul_fp254_12_sparse");
|
||||
@ -109,8 +118,8 @@ fn test_bn_frob_fp6() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_bn_frob_fp12(n: usize, f: Fp12<BN254>) -> Fp12<BN254> {
|
||||
let ptr: usize = 200;
|
||||
fn run_bn_frob_fp12(f: Fp12<BN254>, n: usize) -> Vec<U256> {
|
||||
let ptr: usize = 100;
|
||||
let setup = InterpreterMemoryInitialization {
|
||||
label: format!("test_frob_fp254_12_{}", n),
|
||||
stack: vec![U256::from(ptr)],
|
||||
@ -126,8 +135,9 @@ fn run_bn_frob_fp12(n: usize, f: Fp12<BN254>) -> Fp12<BN254> {
|
||||
fn test_frob_fp12() -> Result<()> {
|
||||
let mut rng = rand::thread_rng();
|
||||
let f: Fp12<BN254> = rng.gen::<Fp12<BN254>>();
|
||||
|
||||
for n in [1, 2, 3, 6] {
|
||||
let output = run_bn_frob_fp12(n, f);
|
||||
let output = run_bn_frob_fp12(f, n);
|
||||
assert_eq!(output, f.frob(n));
|
||||
}
|
||||
Ok(())
|
||||
@ -135,8 +145,8 @@ fn test_frob_fp12() -> Result<()> {
|
||||
|
||||
#[test]
|
||||
fn test_bn_inv_fp12() -> Result<()> {
|
||||
let ptr: usize = 200;
|
||||
let inv: usize = 212;
|
||||
let ptr: usize = 100;
|
||||
let inv: usize = 112;
|
||||
let mut rng = rand::thread_rng();
|
||||
let f: Fp12<BN254> = rng.gen::<Fp12<BN254>>();
|
||||
|
||||
@ -156,103 +166,54 @@ fn test_bn_inv_fp12() -> Result<()> {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bn_final_exponentiation() -> Result<()> {
|
||||
let ptr: usize = 200;
|
||||
fn test_bn_final_exponent() -> Result<()> {
|
||||
let ptr: usize = 100;
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
let f: Fp12<BN254> = rng.gen::<Fp12<BN254>>();
|
||||
|
||||
let setup = InterpreterMemoryInitialization {
|
||||
label: "bn254_invariant_exponent".to_string(),
|
||||
stack: vec![U256::from(ptr), U256::from(0xdeadbeefu32)],
|
||||
label: "bn254_final_exponent".to_string(),
|
||||
stack: vec![
|
||||
U256::zero(),
|
||||
U256::zero(),
|
||||
U256::from(ptr),
|
||||
U256::from(0xdeadbeefu32),
|
||||
],
|
||||
segment: BnPairing,
|
||||
memory: vec![(ptr, f.to_stack().to_vec())],
|
||||
};
|
||||
|
||||
let interpreter: Interpreter = run_interpreter_with_memory(setup).unwrap();
|
||||
let output: Vec<U256> = interpreter.extract_kernel_memory(BnPairing, ptr..ptr + 12);
|
||||
let expected: Vec<U256> = invariant_exponent(f).to_stack().to_vec();
|
||||
let expected: Vec<U256> = bn_final_exponent(f).to_stack();
|
||||
|
||||
assert_eq!(output, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// The curve is cyclic with generator (1, 2)
|
||||
pub const CURVE_GENERATOR: Curve = {
|
||||
Curve {
|
||||
x: BN254 { val: U256::one() },
|
||||
y: BN254 {
|
||||
val: U256([2, 0, 0, 0]),
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
// The twisted curve is cyclic with generator (x, y) as follows
|
||||
pub const TWISTED_GENERATOR: TwistedCurve = {
|
||||
TwistedCurve {
|
||||
x: Fp2 {
|
||||
re: BN254 {
|
||||
val: U256([
|
||||
0x46debd5cd992f6ed,
|
||||
0x674322d4f75edadd,
|
||||
0x426a00665e5c4479,
|
||||
0x1800deef121f1e76,
|
||||
]),
|
||||
},
|
||||
im: BN254 {
|
||||
val: U256([
|
||||
0x97e485b7aef312c2,
|
||||
0xf1aa493335a9e712,
|
||||
0x7260bfb731fb5d25,
|
||||
0x198e9393920d483a,
|
||||
]),
|
||||
},
|
||||
},
|
||||
y: Fp2 {
|
||||
re: BN254 {
|
||||
val: U256([
|
||||
0x4ce6cc0166fa7daa,
|
||||
0xe3d1e7690c43d37b,
|
||||
0x4aab71808dcb408f,
|
||||
0x12c85ea5db8c6deb,
|
||||
]),
|
||||
},
|
||||
im: BN254 {
|
||||
val: U256([
|
||||
0x55acdadcd122975b,
|
||||
0xbc4b313370b38ef3,
|
||||
0xec9e99ad690c3395,
|
||||
0x090689d0585ff075,
|
||||
]),
|
||||
},
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_bn_miller_loop() -> Result<()> {
|
||||
let ptr: usize = 200;
|
||||
let out: usize = 206;
|
||||
let inputs: Vec<U256> = vec![
|
||||
CURVE_GENERATOR.x.val,
|
||||
CURVE_GENERATOR.y.val,
|
||||
TWISTED_GENERATOR.x.re.val,
|
||||
TWISTED_GENERATOR.x.im.val,
|
||||
TWISTED_GENERATOR.y.re.val,
|
||||
TWISTED_GENERATOR.y.im.val,
|
||||
];
|
||||
fn test_bn_miller() -> Result<()> {
|
||||
let ptr: usize = 100;
|
||||
let out: usize = 106;
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
let p: Curve<BN254> = rng.gen::<Curve<BN254>>();
|
||||
let q: Curve<Fp2<BN254>> = rng.gen::<Curve<Fp2<BN254>>>();
|
||||
|
||||
let mut input = p.to_stack();
|
||||
input.extend(q.to_stack());
|
||||
|
||||
let setup = InterpreterMemoryInitialization {
|
||||
label: "bn254_miller".to_string(),
|
||||
stack: vec![U256::from(ptr), U256::from(out), U256::from(0xdeadbeefu32)],
|
||||
segment: BnPairing,
|
||||
memory: vec![(ptr, inputs)],
|
||||
memory: vec![(ptr, input)],
|
||||
};
|
||||
let interpreter = run_interpreter_with_memory(setup).unwrap();
|
||||
let output: Vec<U256> = interpreter.extract_kernel_memory(BnPairing, out..out + 12);
|
||||
let expected = miller_loop(CURVE_GENERATOR, TWISTED_GENERATOR)
|
||||
.to_stack()
|
||||
.to_vec();
|
||||
let expected = bn_miller_loop(p, q).to_stack();
|
||||
|
||||
assert_eq!(output, expected);
|
||||
|
||||
@ -260,29 +221,41 @@ fn test_bn_miller_loop() -> Result<()> {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bn_tate_pairing() -> Result<()> {
|
||||
let ptr: usize = 200;
|
||||
let out: usize = 206;
|
||||
let inputs: Vec<U256> = vec![
|
||||
CURVE_GENERATOR.x.val,
|
||||
CURVE_GENERATOR.y.val,
|
||||
TWISTED_GENERATOR.x.re.val,
|
||||
TWISTED_GENERATOR.x.im.val,
|
||||
TWISTED_GENERATOR.y.re.val,
|
||||
TWISTED_GENERATOR.y.im.val,
|
||||
];
|
||||
fn test_bn_pairing() -> Result<()> {
|
||||
let out: usize = 100;
|
||||
let ptr: usize = 112;
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
let k: usize = rng.gen_range(1..10);
|
||||
let mut acc: i32 = 0;
|
||||
let mut input: Vec<U256> = vec![];
|
||||
for _ in 1..k {
|
||||
let m: i32 = rng.gen_range(-8..8);
|
||||
let n: i32 = rng.gen_range(-8..8);
|
||||
acc -= m * n;
|
||||
|
||||
let p: Curve<BN254> = Curve::<BN254>::int(m);
|
||||
let q: Curve<Fp2<BN254>> = Curve::<Fp2<BN254>>::int(n);
|
||||
input.extend(p.to_stack());
|
||||
input.extend(q.to_stack());
|
||||
}
|
||||
let p: Curve<BN254> = Curve::<BN254>::int(acc);
|
||||
let q: Curve<Fp2<BN254>> = Curve::<Fp2<BN254>>::GENERATOR;
|
||||
input.extend(p.to_stack());
|
||||
input.extend(q.to_stack());
|
||||
|
||||
let setup = InterpreterMemoryInitialization {
|
||||
label: "bn254_tate".to_string(),
|
||||
stack: vec![U256::from(ptr), U256::from(out), U256::from(0xdeadbeefu32)],
|
||||
label: "bn254_pairing".to_string(),
|
||||
stack: vec![
|
||||
U256::from(k),
|
||||
U256::from(ptr),
|
||||
U256::from(out),
|
||||
U256::from(0xdeadbeefu32),
|
||||
],
|
||||
segment: BnPairing,
|
||||
memory: vec![(ptr, inputs)],
|
||||
memory: vec![(ptr, input)],
|
||||
};
|
||||
let interpreter = run_interpreter_with_memory(setup).unwrap();
|
||||
let output: Vec<U256> = interpreter.extract_kernel_memory(BnPairing, out..out + 12);
|
||||
let expected = tate(CURVE_GENERATOR, TWISTED_GENERATOR).to_stack().to_vec();
|
||||
|
||||
assert_eq!(output, expected);
|
||||
|
||||
assert_eq!(interpreter.stack()[0], U256::one());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -99,12 +99,12 @@ fn test_insert_accessed_storage_keys() -> Result<()> {
|
||||
let mut rng = thread_rng();
|
||||
let n = rng.gen_range(1..10);
|
||||
let storage_keys = (0..n)
|
||||
.map(|_| (rng.gen::<Address>(), U256(rng.gen())))
|
||||
.map(|_| (rng.gen::<Address>(), U256(rng.gen()), U256(rng.gen())))
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter()
|
||||
.collect::<Vec<(Address, U256)>>();
|
||||
.collect::<Vec<(Address, U256, U256)>>();
|
||||
let storage_key_in_list = storage_keys[rng.gen_range(0..n)];
|
||||
let storage_key_not_in_list = (rng.gen::<Address>(), U256(rng.gen()));
|
||||
let storage_key_not_in_list = (rng.gen::<Address>(), U256(rng.gen()), U256(rng.gen()));
|
||||
assert!(
|
||||
!storage_keys.contains(&storage_key_not_in_list),
|
||||
"Cosmic luck or bad RNG?"
|
||||
@ -113,6 +113,7 @@ fn test_insert_accessed_storage_keys() -> Result<()> {
|
||||
// Test for storage key already in list.
|
||||
let initial_stack = vec![
|
||||
retaddr,
|
||||
storage_key_in_list.2,
|
||||
storage_key_in_list.1,
|
||||
U256::from(storage_key_in_list.0 .0.as_slice()),
|
||||
];
|
||||
@ -122,30 +123,35 @@ fn test_insert_accessed_storage_keys() -> Result<()> {
|
||||
interpreter
|
||||
.generation_state
|
||||
.memory
|
||||
.set(MemoryAddress::new(0, AccessedStorageKeys, 2 * i), addr);
|
||||
.set(MemoryAddress::new(0, AccessedStorageKeys, 3 * i), addr);
|
||||
interpreter.generation_state.memory.set(
|
||||
MemoryAddress::new(0, AccessedStorageKeys, 2 * i + 1),
|
||||
MemoryAddress::new(0, AccessedStorageKeys, 3 * i + 1),
|
||||
storage_keys[i].1,
|
||||
);
|
||||
interpreter.generation_state.memory.set(
|
||||
MemoryAddress::new(0, AccessedStorageKeys, 3 * i + 2),
|
||||
storage_keys[i].2,
|
||||
);
|
||||
}
|
||||
interpreter.generation_state.memory.set(
|
||||
MemoryAddress::new(0, GlobalMetadata, AccessedStorageKeysLen as usize),
|
||||
U256::from(2 * n),
|
||||
U256::from(3 * n),
|
||||
);
|
||||
interpreter.run()?;
|
||||
assert_eq!(interpreter.stack(), &[U256::zero()]);
|
||||
assert_eq!(interpreter.stack(), &[storage_key_in_list.2, U256::zero()]);
|
||||
assert_eq!(
|
||||
interpreter.generation_state.memory.get(MemoryAddress::new(
|
||||
0,
|
||||
GlobalMetadata,
|
||||
AccessedStorageKeysLen as usize
|
||||
)),
|
||||
U256::from(2 * n)
|
||||
U256::from(3 * n)
|
||||
);
|
||||
|
||||
// Test for storage key not in list.
|
||||
let initial_stack = vec![
|
||||
retaddr,
|
||||
storage_key_not_in_list.2,
|
||||
storage_key_not_in_list.1,
|
||||
U256::from(storage_key_not_in_list.0 .0.as_slice()),
|
||||
];
|
||||
@ -155,41 +161,56 @@ fn test_insert_accessed_storage_keys() -> Result<()> {
|
||||
interpreter
|
||||
.generation_state
|
||||
.memory
|
||||
.set(MemoryAddress::new(0, AccessedStorageKeys, 2 * i), addr);
|
||||
.set(MemoryAddress::new(0, AccessedStorageKeys, 3 * i), addr);
|
||||
interpreter.generation_state.memory.set(
|
||||
MemoryAddress::new(0, AccessedStorageKeys, 2 * i + 1),
|
||||
MemoryAddress::new(0, AccessedStorageKeys, 3 * i + 1),
|
||||
storage_keys[i].1,
|
||||
);
|
||||
interpreter.generation_state.memory.set(
|
||||
MemoryAddress::new(0, AccessedStorageKeys, 3 * i + 2),
|
||||
storage_keys[i].2,
|
||||
);
|
||||
}
|
||||
interpreter.generation_state.memory.set(
|
||||
MemoryAddress::new(0, GlobalMetadata, AccessedStorageKeysLen as usize),
|
||||
U256::from(2 * n),
|
||||
U256::from(3 * n),
|
||||
);
|
||||
interpreter.run()?;
|
||||
assert_eq!(interpreter.stack(), &[U256::one()]);
|
||||
assert_eq!(
|
||||
interpreter.stack(),
|
||||
&[storage_key_not_in_list.2, U256::one()]
|
||||
);
|
||||
assert_eq!(
|
||||
interpreter.generation_state.memory.get(MemoryAddress::new(
|
||||
0,
|
||||
GlobalMetadata,
|
||||
AccessedStorageKeysLen as usize
|
||||
)),
|
||||
U256::from(2 * (n + 1))
|
||||
U256::from(3 * (n + 1))
|
||||
);
|
||||
assert_eq!(
|
||||
interpreter
|
||||
.generation_state
|
||||
.memory
|
||||
.get(MemoryAddress::new(0, AccessedStorageKeys, 2 * n,)),
|
||||
.get(MemoryAddress::new(0, AccessedStorageKeys, 3 * n,)),
|
||||
U256::from(storage_key_not_in_list.0 .0.as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
interpreter.generation_state.memory.get(MemoryAddress::new(
|
||||
0,
|
||||
AccessedStorageKeys,
|
||||
2 * n + 1,
|
||||
3 * n + 1,
|
||||
)),
|
||||
storage_key_not_in_list.1
|
||||
);
|
||||
assert_eq!(
|
||||
interpreter.generation_state.memory.get(MemoryAddress::new(
|
||||
0,
|
||||
AccessedStorageKeys,
|
||||
3 * n + 2,
|
||||
)),
|
||||
storage_key_not_in_list.2
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
use anyhow::Result;
|
||||
use blake2::Blake2b512;
|
||||
use ethereum_types::{U256, U512};
|
||||
// use blake2::Blake2b512;
|
||||
use ethereum_types::U256;
|
||||
use rand::{thread_rng, Rng};
|
||||
use ripemd::{Digest, Ripemd160};
|
||||
use sha2::Sha256;
|
||||
@ -10,13 +10,6 @@ use crate::cpu::kernel::interpreter::{
|
||||
};
|
||||
use crate::memory::segments::Segment::KernelGeneral;
|
||||
|
||||
/// Standard Blake2b implementation.
|
||||
fn blake2b(input: Vec<u8>) -> U512 {
|
||||
let mut hasher = Blake2b512::new();
|
||||
hasher.update(input);
|
||||
U512::from(&hasher.finalize()[..])
|
||||
}
|
||||
|
||||
/// Standard RipeMD implementation.
|
||||
fn ripemd(input: Vec<u8>) -> U256 {
|
||||
let mut hasher = Ripemd160::new();
|
||||
@ -58,10 +51,6 @@ fn make_interpreter_setup(
|
||||
}
|
||||
}
|
||||
|
||||
fn combine_u256s(hi: U256, lo: U256) -> U512 {
|
||||
U512::from(lo) + (U512::from(hi) << 256)
|
||||
}
|
||||
|
||||
fn prepare_test<T>(
|
||||
hash_fn_label: &str,
|
||||
hash_input_virt: (usize, usize),
|
||||
@ -99,28 +88,6 @@ fn test_hash_256(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_hash_512(
|
||||
hash_fn_label: &str,
|
||||
hash_input_virt: (usize, usize),
|
||||
standard_implementation: &dyn Fn(Vec<u8>) -> U512,
|
||||
) -> Result<()> {
|
||||
let (expected, result_stack) =
|
||||
prepare_test(hash_fn_label, hash_input_virt, standard_implementation).unwrap();
|
||||
|
||||
// Extract the final output.
|
||||
let actual = combine_u256s(result_stack[0], result_stack[1]);
|
||||
|
||||
// Check that the result is correct.
|
||||
assert_eq!(expected, actual);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blake2b() -> Result<()> {
|
||||
test_hash_512("blake2b", (0, 2), &blake2b)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ripemd() -> Result<()> {
|
||||
test_hash_256("ripemd", (200, 200), &ripemd)
|
||||
@ -130,3 +97,40 @@ fn test_ripemd() -> Result<()> {
|
||||
fn test_sha2() -> Result<()> {
|
||||
test_hash_256("sha2", (0, 1), &sha2)
|
||||
}
|
||||
|
||||
// Since the Blake precompile requires only the blake2_f compression function instead of the full blake2b hash,
|
||||
// the full hash function is not included in the kernel. To include it, blake2/compression.asm and blake2/main.asm
|
||||
// must be added to the kernel.
|
||||
|
||||
// /// Standard Blake2b implementation.
|
||||
// fn blake2b(input: Vec<u8>) -> U512 {
|
||||
// let mut hasher = Blake2b512::new();
|
||||
// hasher.update(input);
|
||||
// U512::from(&hasher.finalize()[..])
|
||||
// }
|
||||
|
||||
// fn combine_u256s(hi: U256, lo: U256) -> U512 {
|
||||
// U512::from(lo) + (U512::from(hi) << 256)
|
||||
// }
|
||||
|
||||
// fn test_hash_512(
|
||||
// hash_fn_label: &str,
|
||||
// hash_input_virt: (usize, usize),
|
||||
// standard_implementation: &dyn Fn(Vec<u8>) -> U512,
|
||||
// ) -> Result<()> {
|
||||
// let (expected, result_stack) =
|
||||
// prepare_test(hash_fn_label, hash_input_virt, standard_implementation).unwrap();
|
||||
|
||||
// // Extract the final output.
|
||||
// let actual = combine_u256s(result_stack[0], result_stack[1]);
|
||||
|
||||
// // Check that the result is correct.
|
||||
// assert_eq!(expected, actual);
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
|
||||
// #[test]
|
||||
// fn test_blake2b() -> Result<()> {
|
||||
// test_hash_512("blake2b", (0, 2), &blake2b)
|
||||
// }
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
mod account_code;
|
||||
mod balance;
|
||||
mod bignum;
|
||||
mod blake2_f;
|
||||
mod bls381;
|
||||
mod bn254;
|
||||
mod core;
|
||||
|
||||
@ -1,25 +1,75 @@
|
||||
use std::ops::Add;
|
||||
use std::ops::{Add, Mul, Neg};
|
||||
|
||||
use ethereum_types::U256;
|
||||
use rand::distributions::Standard;
|
||||
use rand::prelude::Distribution;
|
||||
use rand::Rng;
|
||||
|
||||
use crate::extension_tower::{FieldExt, Fp12, Fp2, Fp6, BN254};
|
||||
use crate::extension_tower::{FieldExt, Fp12, Fp2, Fp6, Stack, BN254};
|
||||
|
||||
// The curve consists of pairs (x, y): (BN254, BN254) | y^2 = x^3 + 2
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub struct Curve {
|
||||
pub x: BN254,
|
||||
pub y: BN254,
|
||||
pub struct Curve<T>
|
||||
where
|
||||
T: FieldExt,
|
||||
{
|
||||
pub x: T,
|
||||
pub y: T,
|
||||
}
|
||||
|
||||
impl<T: FieldExt> Curve<T> {
|
||||
pub fn unit() -> Self {
|
||||
Curve {
|
||||
x: T::ZERO,
|
||||
y: T::ZERO,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: FieldExt + Stack> Curve<T> {
|
||||
pub fn on_stack(self) -> Vec<U256> {
|
||||
let mut stack = self.x.on_stack();
|
||||
stack.extend(self.y.on_stack());
|
||||
stack
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Curve<T>
|
||||
where
|
||||
T: FieldExt,
|
||||
Curve<T>: CyclicGroup,
|
||||
{
|
||||
pub fn int(z: i32) -> Self {
|
||||
Curve::<T>::GENERATOR * z
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Distribution<Curve<T>> for Standard
|
||||
where
|
||||
T: FieldExt,
|
||||
Curve<T>: CyclicGroup,
|
||||
{
|
||||
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Curve<T> {
|
||||
Curve::<T>::GENERATOR * rng.gen::<i32>()
|
||||
}
|
||||
}
|
||||
|
||||
/// Standard addition formula for elliptic curves, restricted to the cases
|
||||
/// where neither inputs nor output would ever be the identity O. source:
|
||||
/// https://en.wikipedia.org/wiki/Elliptic_curve#Algebraic_interpretation
|
||||
impl Add for Curve {
|
||||
impl<T: FieldExt> Add for Curve<T> {
|
||||
type Output = Self;
|
||||
|
||||
fn add(self, other: Self) -> Self {
|
||||
if self == Curve::<T>::unit() {
|
||||
return other;
|
||||
}
|
||||
if other == Curve::<T>::unit() {
|
||||
return self;
|
||||
}
|
||||
if self == -other {
|
||||
return Curve::<T>::unit();
|
||||
}
|
||||
let m = if self == other {
|
||||
BN254::new(3) * self.x * self.x / (BN254::new(2) * self.y)
|
||||
T::new(3) * self.x * self.x / (T::new(2) * self.y)
|
||||
} else {
|
||||
(other.y - self.y) / (other.x - self.x)
|
||||
};
|
||||
@ -31,33 +81,130 @@ impl Add for Curve {
|
||||
}
|
||||
}
|
||||
|
||||
// The twisted curve consists of pairs (x, y): (Fp2, Fp2) | y^2 = x^3 + 3/(9 + i)
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub struct TwistedCurve {
|
||||
pub x: Fp2<BN254>,
|
||||
pub y: Fp2<BN254>,
|
||||
impl<T: FieldExt> Neg for Curve<T> {
|
||||
type Output = Curve<T>;
|
||||
|
||||
fn neg(self) -> Self {
|
||||
Curve {
|
||||
x: self.x,
|
||||
y: -self.y,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait CyclicGroup {
|
||||
const GENERATOR: Self;
|
||||
}
|
||||
|
||||
/// The BN curve consists of pairs
|
||||
/// (x, y): (BN254, BN254) | y^2 = x^3 + 2
|
||||
// with generator given by (1, 2)
|
||||
impl CyclicGroup for Curve<BN254> {
|
||||
const GENERATOR: Curve<BN254> = Curve {
|
||||
x: BN254 { val: U256::one() },
|
||||
y: BN254 {
|
||||
val: U256([2, 0, 0, 0]),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
impl<T> Mul<i32> for Curve<T>
|
||||
where
|
||||
T: FieldExt,
|
||||
Curve<T>: CyclicGroup,
|
||||
{
|
||||
type Output = Curve<T>;
|
||||
|
||||
fn mul(self, other: i32) -> Self {
|
||||
if other == 0 {
|
||||
return Curve::<T>::unit();
|
||||
}
|
||||
if self == Curve::<T>::unit() {
|
||||
return Curve::<T>::unit();
|
||||
}
|
||||
|
||||
let mut x: Curve<T> = self;
|
||||
if other.is_negative() {
|
||||
x = -x;
|
||||
}
|
||||
let mut result = Curve::<T>::unit();
|
||||
|
||||
let mut exp = other.unsigned_abs() as usize;
|
||||
while exp > 0 {
|
||||
if exp % 2 == 1 {
|
||||
result = result + x;
|
||||
}
|
||||
exp >>= 1;
|
||||
x = x + x;
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
/// The twisted curve consists of pairs
|
||||
/// (x, y): (Fp2<BN254>, Fp2<BN254>) | y^2 = x^3 + 3/(9 + i)
|
||||
/// with generator given as follows
|
||||
impl CyclicGroup for Curve<Fp2<BN254>> {
|
||||
const GENERATOR: Curve<Fp2<BN254>> = Curve {
|
||||
x: Fp2 {
|
||||
re: BN254 {
|
||||
val: U256([
|
||||
0x46debd5cd992f6ed,
|
||||
0x674322d4f75edadd,
|
||||
0x426a00665e5c4479,
|
||||
0x1800deef121f1e76,
|
||||
]),
|
||||
},
|
||||
im: BN254 {
|
||||
val: U256([
|
||||
0x97e485b7aef312c2,
|
||||
0xf1aa493335a9e712,
|
||||
0x7260bfb731fb5d25,
|
||||
0x198e9393920d483a,
|
||||
]),
|
||||
},
|
||||
},
|
||||
y: Fp2 {
|
||||
re: BN254 {
|
||||
val: U256([
|
||||
0x4ce6cc0166fa7daa,
|
||||
0xe3d1e7690c43d37b,
|
||||
0x4aab71808dcb408f,
|
||||
0x12c85ea5db8c6deb,
|
||||
]),
|
||||
},
|
||||
im: BN254 {
|
||||
val: U256([
|
||||
0x55acdadcd122975b,
|
||||
0xbc4b313370b38ef3,
|
||||
0xec9e99ad690c3395,
|
||||
0x090689d0585ff075,
|
||||
]),
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// The tate pairing takes a point each from the curve and its twist and outputs an Fp12 element
|
||||
pub fn tate(p: Curve, q: TwistedCurve) -> Fp12<BN254> {
|
||||
let miller_output = miller_loop(p, q);
|
||||
invariant_exponent(miller_output)
|
||||
pub fn bn_tate(p: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
|
||||
let miller_output = bn_miller_loop(p, q);
|
||||
bn_final_exponent(miller_output)
|
||||
}
|
||||
|
||||
/// Standard code for miller loop, can be found on page 99 at this url:
|
||||
/// https://static1.squarespace.com/static/5fdbb09f31d71c1227082339/t/5ff394720493bd28278889c6/1609798774687/PairingsForBeginners.pdf#page=107
|
||||
/// where EXP is a hardcoding of the array of Booleans that the loop traverses
|
||||
pub fn miller_loop(p: Curve, q: TwistedCurve) -> Fp12<BN254> {
|
||||
/// where BN_EXP is a hardcoding of the array of Booleans that the loop traverses
|
||||
pub fn bn_miller_loop(p: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
|
||||
let mut r = p;
|
||||
let mut acc: Fp12<BN254> = Fp12::<BN254>::UNIT;
|
||||
let mut line: Fp12<BN254>;
|
||||
|
||||
for i in EXP {
|
||||
line = tangent(r, q);
|
||||
for i in BN_EXP {
|
||||
line = bn_tangent(r, q);
|
||||
r = r + r;
|
||||
acc = line * acc * acc;
|
||||
if i {
|
||||
line = cord(p, r, q);
|
||||
line = bn_cord(p, r, q);
|
||||
r = r + p;
|
||||
acc = line * acc;
|
||||
}
|
||||
@ -66,22 +213,22 @@ pub fn miller_loop(p: Curve, q: TwistedCurve) -> Fp12<BN254> {
|
||||
}
|
||||
|
||||
/// The sloped line function for doubling a point
|
||||
pub fn tangent(p: Curve, q: TwistedCurve) -> Fp12<BN254> {
|
||||
pub fn bn_tangent(p: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
|
||||
let cx = -BN254::new(3) * p.x * p.x;
|
||||
let cy = BN254::new(2) * p.y;
|
||||
sparse_embed(p.y * p.y - BN254::new(9), q.x * cx, q.y * cy)
|
||||
bn_sparse_embed(p.y * p.y - BN254::new(9), q.x * cx, q.y * cy)
|
||||
}
|
||||
|
||||
/// The sloped line function for adding two points
|
||||
pub fn cord(p1: Curve, p2: Curve, q: TwistedCurve) -> Fp12<BN254> {
|
||||
pub fn bn_cord(p1: Curve<BN254>, p2: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
|
||||
let cx = p2.y - p1.y;
|
||||
let cy = p1.x - p2.x;
|
||||
sparse_embed(p1.y * p2.x - p2.y * p1.x, q.x * cx, q.y * cy)
|
||||
bn_sparse_embed(p1.y * p2.x - p2.y * p1.x, q.x * cx, q.y * cy)
|
||||
}
|
||||
|
||||
/// The tangent and cord functions output sparse Fp12 elements.
|
||||
/// This map embeds the nonzero coefficients into an Fp12.
|
||||
pub fn sparse_embed(g000: BN254, g01: Fp2<BN254>, g11: Fp2<BN254>) -> Fp12<BN254> {
|
||||
pub fn bn_sparse_embed(g000: BN254, g01: Fp2<BN254>, g11: Fp2<BN254>) -> Fp12<BN254> {
|
||||
let g0 = Fp6 {
|
||||
t0: Fp2 {
|
||||
re: g000,
|
||||
@ -100,8 +247,8 @@ pub fn sparse_embed(g000: BN254, g01: Fp2<BN254>, g11: Fp2<BN254>) -> Fp12<BN254
|
||||
Fp12 { z0: g0, z1: g1 }
|
||||
}
|
||||
|
||||
pub fn gen_fp12_sparse<R: Rng + ?Sized>(rng: &mut R) -> Fp12<BN254> {
|
||||
sparse_embed(
|
||||
pub fn gen_bn_fp12_sparse<R: Rng + ?Sized>(rng: &mut R) -> Fp12<BN254> {
|
||||
bn_sparse_embed(
|
||||
rng.gen::<BN254>(),
|
||||
rng.gen::<Fp2<BN254>>(),
|
||||
rng.gen::<Fp2<BN254>>(),
|
||||
@ -120,10 +267,10 @@ pub fn gen_fp12_sparse<R: Rng + ?Sized>(rng: &mut R) -> Fp12<BN254> {
|
||||
/// (p^4 - p^2 + 1)/N = p^3 + (a2)p^2 - (a1)p - a0
|
||||
/// where 0 < a0, a1, a2 < p. Then the final power is given by
|
||||
/// y = y_3 * (y^a2)_2 * (y^-a1)_1 * (y^-a0)
|
||||
pub fn invariant_exponent(f: Fp12<BN254>) -> Fp12<BN254> {
|
||||
pub fn bn_final_exponent(f: Fp12<BN254>) -> Fp12<BN254> {
|
||||
let mut y = f.frob(6) / f;
|
||||
y = y.frob(2) * y;
|
||||
let (y_a2, y_a1, y_a0) = get_custom_powers(y);
|
||||
let (y_a2, y_a1, y_a0) = get_bn_custom_powers(y);
|
||||
y.frob(3) * y_a2.frob(2) * y_a1.frob(1) * y_a0
|
||||
}
|
||||
|
||||
@ -135,10 +282,10 @@ pub fn invariant_exponent(f: Fp12<BN254>) -> Fp12<BN254> {
|
||||
/// y^a2, y^a1 = y^a4 * y^a2 * y^a2 * y^(-a0), y^(-a0)
|
||||
///
|
||||
/// Representing a4, a2, a0 in *little endian* binary, define
|
||||
/// EXPS4 = [(a4[i], a2[i], a0[i]) for i in 0..len(a4)]
|
||||
/// EXPS2 = [ (a2[i], a0[i]) for i in len(a4)..len(a2)]
|
||||
/// EXPS0 = [ a0[i] for i in len(a2)..len(a0)]
|
||||
fn get_custom_powers(f: Fp12<BN254>) -> (Fp12<BN254>, Fp12<BN254>, Fp12<BN254>) {
|
||||
/// BN_EXPS4 = [(a4[i], a2[i], a0[i]) for i in 0..len(a4)]
|
||||
/// BN_EXPS2 = [ (a2[i], a0[i]) for i in len(a4)..len(a2)]
|
||||
/// BN_EXPS0 = [ a0[i] for i in len(a2)..len(a0)]
|
||||
fn get_bn_custom_powers(f: Fp12<BN254>) -> (Fp12<BN254>, Fp12<BN254>, Fp12<BN254>) {
|
||||
let mut sq: Fp12<BN254> = f;
|
||||
let mut y0: Fp12<BN254> = Fp12::<BN254>::UNIT;
|
||||
let mut y2: Fp12<BN254> = Fp12::<BN254>::UNIT;
|
||||
@ -147,7 +294,7 @@ fn get_custom_powers(f: Fp12<BN254>) -> (Fp12<BN254>, Fp12<BN254>, Fp12<BN254>)
|
||||
// proceed via standard squaring algorithm for exponentiation
|
||||
|
||||
// must keep multiplying all three values: a4, a2, a0
|
||||
for (a, b, c) in EXPS4 {
|
||||
for (a, b, c) in BN_EXPS4 {
|
||||
if a {
|
||||
y4 = y4 * sq;
|
||||
}
|
||||
@ -163,7 +310,7 @@ fn get_custom_powers(f: Fp12<BN254>) -> (Fp12<BN254>, Fp12<BN254>, Fp12<BN254>)
|
||||
y4 = y4 * sq;
|
||||
|
||||
// must keep multiplying remaining two values: a2, a0
|
||||
for (a, b) in EXPS2 {
|
||||
for (a, b) in BN_EXPS2 {
|
||||
if a {
|
||||
y2 = y2 * sq;
|
||||
}
|
||||
@ -176,7 +323,7 @@ fn get_custom_powers(f: Fp12<BN254>) -> (Fp12<BN254>, Fp12<BN254>, Fp12<BN254>)
|
||||
y2 = y2 * sq;
|
||||
|
||||
// must keep multiplying final remaining value: a0
|
||||
for a in EXPS0 {
|
||||
for a in BN_EXPS0 {
|
||||
if a {
|
||||
y0 = y0 * sq;
|
||||
}
|
||||
@ -192,7 +339,7 @@ fn get_custom_powers(f: Fp12<BN254>) -> (Fp12<BN254>, Fp12<BN254>, Fp12<BN254>)
|
||||
(y2, y4 * y2 * y2 * y0_inv, y0_inv)
|
||||
}
|
||||
|
||||
const EXP: [bool; 253] = [
|
||||
const BN_EXP: [bool; 253] = [
|
||||
true, false, false, false, false, false, true, true, false, false, true, false, false, false,
|
||||
true, false, false, true, true, true, false, false, true, true, true, false, false, true,
|
||||
false, true, true, true, false, false, false, false, true, false, false, true, true, false,
|
||||
@ -216,7 +363,7 @@ const EXP: [bool; 253] = [
|
||||
|
||||
// The folowing constants are defined above get_custom_powers
|
||||
|
||||
const EXPS4: [(bool, bool, bool); 64] = [
|
||||
const BN_EXPS4: [(bool, bool, bool); 64] = [
|
||||
(true, true, false),
|
||||
(true, true, true),
|
||||
(true, true, true),
|
||||
@ -283,7 +430,7 @@ const EXPS4: [(bool, bool, bool); 64] = [
|
||||
(true, true, true),
|
||||
];
|
||||
|
||||
const EXPS2: [(bool, bool); 62] = [
|
||||
const BN_EXPS2: [(bool, bool); 62] = [
|
||||
(true, false),
|
||||
(true, true),
|
||||
(false, false),
|
||||
@ -348,7 +495,7 @@ const EXPS2: [(bool, bool); 62] = [
|
||||
(true, true),
|
||||
];
|
||||
|
||||
const EXPS0: [bool; 65] = [
|
||||
const BN_EXPS0: [bool; 65] = [
|
||||
false, false, true, false, false, true, true, false, true, false, true, true, true, false,
|
||||
true, false, false, false, true, false, false, true, false, true, false, true, true, false,
|
||||
false, false, false, false, true, false, true, false, true, true, true, false, false, true,
|
||||
@ -1,3 +1,4 @@
|
||||
use std::fmt::Debug;
|
||||
use std::ops::{Add, Div, Mul, Neg, Sub};
|
||||
|
||||
use ethereum_types::{U256, U512};
|
||||
@ -6,6 +7,8 @@ use rand::Rng;
|
||||
|
||||
pub trait FieldExt:
|
||||
Copy
|
||||
+ std::fmt::Debug
|
||||
+ std::cmp::PartialEq
|
||||
+ std::ops::Add<Output = Self>
|
||||
+ std::ops::Neg<Output = Self>
|
||||
+ std::ops::Sub<Output = Self>
|
||||
@ -14,6 +17,7 @@ pub trait FieldExt:
|
||||
{
|
||||
const ZERO: Self;
|
||||
const UNIT: Self;
|
||||
fn new(val: usize) -> Self;
|
||||
fn inv(self) -> Self;
|
||||
}
|
||||
|
||||
@ -29,14 +33,6 @@ pub struct BN254 {
|
||||
pub val: U256,
|
||||
}
|
||||
|
||||
impl BN254 {
|
||||
pub fn new(val: usize) -> BN254 {
|
||||
BN254 {
|
||||
val: U256::from(val),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Distribution<BN254> for Standard {
|
||||
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BN254 {
|
||||
let xs = rng.gen::<[u64; 4]>();
|
||||
@ -90,6 +86,11 @@ impl Mul for BN254 {
|
||||
impl FieldExt for BN254 {
|
||||
const ZERO: Self = BN254 { val: U256::zero() };
|
||||
const UNIT: Self = BN254 { val: U256::one() };
|
||||
fn new(val: usize) -> BN254 {
|
||||
BN254 {
|
||||
val: U256::from(val),
|
||||
}
|
||||
}
|
||||
fn inv(self) -> BN254 {
|
||||
let exp = BN_BASE - 2;
|
||||
let mut current = self;
|
||||
@ -130,12 +131,6 @@ pub struct BLS381 {
|
||||
}
|
||||
|
||||
impl BLS381 {
|
||||
pub fn new(val: usize) -> BLS381 {
|
||||
BLS381 {
|
||||
val: U512::from(val),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn lo(self) -> U256 {
|
||||
U256(self.val.0[..4].try_into().unwrap())
|
||||
}
|
||||
@ -233,6 +228,11 @@ impl Mul for BLS381 {
|
||||
impl FieldExt for BLS381 {
|
||||
const ZERO: Self = BLS381 { val: U512::zero() };
|
||||
const UNIT: Self = BLS381 { val: U512::one() };
|
||||
fn new(val: usize) -> BLS381 {
|
||||
BLS381 {
|
||||
val: U512::from(val),
|
||||
}
|
||||
}
|
||||
fn inv(self) -> BLS381 {
|
||||
let exp = BLS_BASE - 2;
|
||||
let mut current = self;
|
||||
@ -364,6 +364,14 @@ impl<T: FieldExt> FieldExt for Fp2<T> {
|
||||
re: T::UNIT,
|
||||
im: T::ZERO,
|
||||
};
|
||||
|
||||
fn new(val: usize) -> Fp2<T> {
|
||||
Fp2 {
|
||||
re: T::new(val),
|
||||
im: T::ZERO,
|
||||
}
|
||||
}
|
||||
|
||||
/// The inverse of z is given by z'/||z||^2 since ||z||^2 = zz'
|
||||
fn inv(self) -> Fp2<T> {
|
||||
let norm_sq = self.norm_sq();
|
||||
@ -974,6 +982,14 @@ where
|
||||
t2: Fp2::<T>::ZERO,
|
||||
};
|
||||
|
||||
fn new(val: usize) -> Fp6<T> {
|
||||
Fp6 {
|
||||
t0: Fp2::<T>::new(val),
|
||||
t1: Fp2::<T>::ZERO,
|
||||
t2: Fp2::<T>::ZERO,
|
||||
}
|
||||
}
|
||||
|
||||
/// Let x_n = x^(p^n) and note that
|
||||
/// x_0 = x^(p^0) = x^1 = x
|
||||
/// (x_n)_m = (x^(p^n))^(p^m) = x^(p^n * p^m) = x^(p^(n+m)) = x_{n+m}
|
||||
@ -1039,6 +1055,13 @@ where
|
||||
z1: Fp6::<T>::ZERO,
|
||||
};
|
||||
|
||||
fn new(val: usize) -> Fp12<T> {
|
||||
Fp12 {
|
||||
z0: Fp6::<T>::new(val),
|
||||
z1: Fp6::<T>::ZERO,
|
||||
}
|
||||
}
|
||||
|
||||
/// By Galois Theory, given x: Fp12, the product
|
||||
/// phi = Prod_{i=0}^11 x_i
|
||||
/// lands in BN254, and hence the inverse of x is given by
|
||||
@ -1220,6 +1243,12 @@ impl Stack for BN254 {
|
||||
}
|
||||
}
|
||||
|
||||
impl Stack for BN254 {
|
||||
fn on_stack(self) -> Vec<U256> {
|
||||
vec![self.val]
|
||||
}
|
||||
}
|
||||
|
||||
impl Stack for BLS381 {
|
||||
const SIZE: usize = 2;
|
||||
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
use core::mem::{self, MaybeUninit};
|
||||
use std::collections::BTreeMap;
|
||||
use std::ops::Range;
|
||||
|
||||
@ -19,6 +20,9 @@ use plonky2::plonk::config::{AlgebraicHasher, GenericConfig, Hasher};
|
||||
use plonky2::plonk::proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget};
|
||||
use plonky2::recursion::cyclic_recursion::check_cyclic_proof_verifier_data;
|
||||
use plonky2::recursion::dummy_circuit::cyclic_base_proof;
|
||||
use plonky2::util::serialization::{
|
||||
Buffer, GateSerializer, IoResult, Read, WitnessGeneratorSerializer, Write,
|
||||
};
|
||||
use plonky2::util::timing::TimingTree;
|
||||
use plonky2_util::log2_ceil;
|
||||
|
||||
@ -47,6 +51,7 @@ const THRESHOLD_DEGREE_BITS: usize = 13;
|
||||
/// `degree_bits`, this contains a chain of recursive circuits for shrinking that STARK from
|
||||
/// `degree_bits` to a constant `THRESHOLD_DEGREE_BITS`. It also contains a special root circuit
|
||||
/// for combining each STARK's shrunk wrapper proof into a single proof.
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub struct AllRecursiveCircuits<F, C, const D: usize>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
@ -64,12 +69,13 @@ where
|
||||
|
||||
/// Data for the EVM root circuit, which is used to combine each STARK's shrunk wrapper proof
|
||||
/// into a single proof.
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub struct RootCircuitData<F, C, const D: usize>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
C: GenericConfig<D, F = F>,
|
||||
{
|
||||
circuit: CircuitData<F, C, D>,
|
||||
pub circuit: CircuitData<F, C, D>,
|
||||
proof_with_pis: [ProofWithPublicInputsTarget<D>; NUM_TABLES],
|
||||
/// For each table, various inner circuits may be used depending on the initial table size.
|
||||
/// This target holds the index of the circuit (within `final_circuits()`) that was used.
|
||||
@ -79,37 +85,182 @@ where
|
||||
cyclic_vk: VerifierCircuitTarget,
|
||||
}
|
||||
|
||||
impl<F, C, const D: usize> RootCircuitData<F, C, D>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
C: GenericConfig<D, F = F>,
|
||||
{
|
||||
pub fn to_buffer(
|
||||
&self,
|
||||
buffer: &mut Vec<u8>,
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<()> {
|
||||
buffer.write_circuit_data(&self.circuit, gate_serializer, generator_serializer)?;
|
||||
for proof in &self.proof_with_pis {
|
||||
buffer.write_target_proof_with_public_inputs(proof)?;
|
||||
}
|
||||
for index in self.index_verifier_data {
|
||||
buffer.write_target(index)?;
|
||||
}
|
||||
buffer.write_target_verifier_circuit(&self.cyclic_vk)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn from_buffer(
|
||||
buffer: &mut Buffer,
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<Self> {
|
||||
let circuit = buffer.read_circuit_data(gate_serializer, generator_serializer)?;
|
||||
let mut proof_with_pis = Vec::with_capacity(NUM_TABLES);
|
||||
for _ in 0..NUM_TABLES {
|
||||
proof_with_pis.push(buffer.read_target_proof_with_public_inputs()?);
|
||||
}
|
||||
let mut index_verifier_data = Vec::with_capacity(NUM_TABLES);
|
||||
for _ in 0..NUM_TABLES {
|
||||
index_verifier_data.push(buffer.read_target()?);
|
||||
}
|
||||
let cyclic_vk = buffer.read_target_verifier_circuit()?;
|
||||
|
||||
Ok(Self {
|
||||
circuit,
|
||||
proof_with_pis: proof_with_pis.try_into().unwrap(),
|
||||
index_verifier_data: index_verifier_data.try_into().unwrap(),
|
||||
cyclic_vk,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Data for the aggregation circuit, which is used to compress two proofs into one. Each inner
|
||||
/// proof can be either an EVM root proof or another aggregation proof.
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub struct AggregationCircuitData<F, C, const D: usize>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
C: GenericConfig<D, F = F>,
|
||||
{
|
||||
circuit: CircuitData<F, C, D>,
|
||||
pub circuit: CircuitData<F, C, D>,
|
||||
lhs: AggregationChildTarget<D>,
|
||||
rhs: AggregationChildTarget<D>,
|
||||
cyclic_vk: VerifierCircuitTarget,
|
||||
}
|
||||
|
||||
impl<F, C, const D: usize> AggregationCircuitData<F, C, D>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
C: GenericConfig<D, F = F>,
|
||||
{
|
||||
pub fn to_buffer(
|
||||
&self,
|
||||
buffer: &mut Vec<u8>,
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<()> {
|
||||
buffer.write_circuit_data(&self.circuit, gate_serializer, generator_serializer)?;
|
||||
buffer.write_target_verifier_circuit(&self.cyclic_vk)?;
|
||||
self.lhs.to_buffer(buffer)?;
|
||||
self.rhs.to_buffer(buffer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn from_buffer(
|
||||
buffer: &mut Buffer,
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<Self> {
|
||||
let circuit = buffer.read_circuit_data(gate_serializer, generator_serializer)?;
|
||||
let cyclic_vk = buffer.read_target_verifier_circuit()?;
|
||||
let lhs = AggregationChildTarget::from_buffer(buffer)?;
|
||||
let rhs = AggregationChildTarget::from_buffer(buffer)?;
|
||||
Ok(Self {
|
||||
circuit,
|
||||
lhs,
|
||||
rhs,
|
||||
cyclic_vk,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub struct AggregationChildTarget<const D: usize> {
|
||||
is_agg: BoolTarget,
|
||||
agg_proof: ProofWithPublicInputsTarget<D>,
|
||||
evm_proof: ProofWithPublicInputsTarget<D>,
|
||||
}
|
||||
|
||||
impl<const D: usize> AggregationChildTarget<D> {
|
||||
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
|
||||
buffer.write_target_bool(self.is_agg)?;
|
||||
buffer.write_target_proof_with_public_inputs(&self.agg_proof)?;
|
||||
buffer.write_target_proof_with_public_inputs(&self.evm_proof)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
|
||||
let is_agg = buffer.read_target_bool()?;
|
||||
let agg_proof = buffer.read_target_proof_with_public_inputs()?;
|
||||
let evm_proof = buffer.read_target_proof_with_public_inputs()?;
|
||||
Ok(Self {
|
||||
is_agg,
|
||||
agg_proof,
|
||||
evm_proof,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub struct BlockCircuitData<F, C, const D: usize>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
C: GenericConfig<D, F = F>,
|
||||
{
|
||||
circuit: CircuitData<F, C, D>,
|
||||
pub circuit: CircuitData<F, C, D>,
|
||||
has_parent_block: BoolTarget,
|
||||
parent_block_proof: ProofWithPublicInputsTarget<D>,
|
||||
agg_root_proof: ProofWithPublicInputsTarget<D>,
|
||||
cyclic_vk: VerifierCircuitTarget,
|
||||
}
|
||||
|
||||
impl<F, C, const D: usize> BlockCircuitData<F, C, D>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
C: GenericConfig<D, F = F>,
|
||||
{
|
||||
pub fn to_buffer(
|
||||
&self,
|
||||
buffer: &mut Vec<u8>,
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<()> {
|
||||
buffer.write_circuit_data(&self.circuit, gate_serializer, generator_serializer)?;
|
||||
buffer.write_target_bool(self.has_parent_block)?;
|
||||
buffer.write_target_proof_with_public_inputs(&self.parent_block_proof)?;
|
||||
buffer.write_target_proof_with_public_inputs(&self.agg_root_proof)?;
|
||||
buffer.write_target_verifier_circuit(&self.cyclic_vk)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn from_buffer(
|
||||
buffer: &mut Buffer,
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<Self> {
|
||||
let circuit = buffer.read_circuit_data(gate_serializer, generator_serializer)?;
|
||||
let has_parent_block = buffer.read_target_bool()?;
|
||||
let parent_block_proof = buffer.read_target_proof_with_public_inputs()?;
|
||||
let agg_root_proof = buffer.read_target_proof_with_public_inputs()?;
|
||||
let cyclic_vk = buffer.read_target_verifier_circuit()?;
|
||||
Ok(Self {
|
||||
circuit,
|
||||
has_parent_block,
|
||||
parent_block_proof,
|
||||
agg_root_proof,
|
||||
cyclic_vk,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, C, const D: usize> AllRecursiveCircuits<F, C, D>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
@ -124,44 +275,105 @@ where
|
||||
[(); C::HCO::WIDTH]:,
|
||||
[(); C::HCI::WIDTH]:,
|
||||
{
|
||||
pub fn to_bytes(
|
||||
&self,
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<Vec<u8>> {
|
||||
// TODO: would be better to initialize it dynamically based on the supported max degree.
|
||||
let mut buffer = Vec::with_capacity(1 << 34);
|
||||
self.root
|
||||
.to_buffer(&mut buffer, gate_serializer, generator_serializer)?;
|
||||
self.aggregation
|
||||
.to_buffer(&mut buffer, gate_serializer, generator_serializer)?;
|
||||
self.block
|
||||
.to_buffer(&mut buffer, gate_serializer, generator_serializer)?;
|
||||
for table in &self.by_table {
|
||||
table.to_buffer(&mut buffer, gate_serializer, generator_serializer)?;
|
||||
}
|
||||
Ok(buffer)
|
||||
}
|
||||
|
||||
pub fn from_bytes(
|
||||
bytes: &[u8],
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<Self> {
|
||||
let mut buffer = Buffer::new(bytes.to_vec());
|
||||
let root =
|
||||
RootCircuitData::from_buffer(&mut buffer, gate_serializer, generator_serializer)?;
|
||||
let aggregation = AggregationCircuitData::from_buffer(
|
||||
&mut buffer,
|
||||
gate_serializer,
|
||||
generator_serializer,
|
||||
)?;
|
||||
let block =
|
||||
BlockCircuitData::from_buffer(&mut buffer, gate_serializer, generator_serializer)?;
|
||||
|
||||
// Tricky use of MaybeUninit to remove the need for implementing Debug
|
||||
// for all underlying types, necessary to convert a by_table Vec to an array.
|
||||
let by_table = {
|
||||
let mut by_table: [MaybeUninit<RecursiveCircuitsForTable<F, C, D>>; NUM_TABLES] =
|
||||
unsafe { MaybeUninit::uninit().assume_init() };
|
||||
for table in &mut by_table[..] {
|
||||
let value = RecursiveCircuitsForTable::from_buffer(
|
||||
&mut buffer,
|
||||
gate_serializer,
|
||||
generator_serializer,
|
||||
)?;
|
||||
*table = MaybeUninit::new(value);
|
||||
}
|
||||
unsafe {
|
||||
mem::transmute::<_, [RecursiveCircuitsForTable<F, C, D>; NUM_TABLES]>(by_table)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
root,
|
||||
aggregation,
|
||||
block,
|
||||
by_table,
|
||||
})
|
||||
}
|
||||
|
||||
/// Preprocess all recursive circuits used by the system.
|
||||
pub fn new(
|
||||
all_stark: &AllStark<F, D>,
|
||||
degree_bits_range: Range<usize>,
|
||||
degree_bits_ranges: &[Range<usize>; NUM_TABLES],
|
||||
stark_config: &StarkConfig,
|
||||
) -> Self {
|
||||
let cpu = RecursiveCircuitsForTable::new(
|
||||
Table::Cpu,
|
||||
&all_stark.cpu_stark,
|
||||
degree_bits_range.clone(),
|
||||
degree_bits_ranges[0].clone(),
|
||||
&all_stark.cross_table_lookups,
|
||||
stark_config,
|
||||
);
|
||||
let keccak = RecursiveCircuitsForTable::new(
|
||||
Table::Keccak,
|
||||
&all_stark.keccak_stark,
|
||||
degree_bits_range.clone(),
|
||||
degree_bits_ranges[1].clone(),
|
||||
&all_stark.cross_table_lookups,
|
||||
stark_config,
|
||||
);
|
||||
let keccak_sponge = RecursiveCircuitsForTable::new(
|
||||
Table::KeccakSponge,
|
||||
&all_stark.keccak_sponge_stark,
|
||||
degree_bits_range.clone(),
|
||||
degree_bits_ranges[2].clone(),
|
||||
&all_stark.cross_table_lookups,
|
||||
stark_config,
|
||||
);
|
||||
let logic = RecursiveCircuitsForTable::new(
|
||||
Table::Logic,
|
||||
&all_stark.logic_stark,
|
||||
degree_bits_range.clone(),
|
||||
degree_bits_ranges[3].clone(),
|
||||
&all_stark.cross_table_lookups,
|
||||
stark_config,
|
||||
);
|
||||
let memory = RecursiveCircuitsForTable::new(
|
||||
Table::Memory,
|
||||
&all_stark.memory_stark,
|
||||
degree_bits_range,
|
||||
degree_bits_ranges[4].clone(),
|
||||
&all_stark.cross_table_lookups,
|
||||
stark_config,
|
||||
);
|
||||
@ -484,7 +696,8 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
struct RecursiveCircuitsForTable<F, C, const D: usize>
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub struct RecursiveCircuitsForTable<F, C, const D: usize>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
C: GenericConfig<D, F = F>,
|
||||
@ -504,6 +717,39 @@ where
|
||||
[(); C::HCO::WIDTH]:,
|
||||
[(); C::HCI::WIDTH]:,
|
||||
{
|
||||
pub fn to_buffer(
|
||||
&self,
|
||||
buffer: &mut Vec<u8>,
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<()> {
|
||||
buffer.write_usize(self.by_stark_size.len())?;
|
||||
for (&size, table) in &self.by_stark_size {
|
||||
buffer.write_usize(size)?;
|
||||
table.to_buffer(buffer, gate_serializer, generator_serializer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn from_buffer(
|
||||
buffer: &mut Buffer,
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<Self> {
|
||||
let length = buffer.read_usize()?;
|
||||
let mut by_stark_size = BTreeMap::new();
|
||||
for _ in 0..length {
|
||||
let key = buffer.read_usize()?;
|
||||
let table = RecursiveCircuitsForTableSize::from_buffer(
|
||||
buffer,
|
||||
gate_serializer,
|
||||
generator_serializer,
|
||||
)?;
|
||||
by_stark_size.insert(key, table);
|
||||
}
|
||||
Ok(Self { by_stark_size })
|
||||
}
|
||||
|
||||
fn new<S: Stark<F, D>>(
|
||||
table: Table,
|
||||
stark: &S,
|
||||
@ -549,6 +795,7 @@ where
|
||||
|
||||
/// A chain of shrinking wrapper circuits, ending with a final circuit with `degree_bits`
|
||||
/// `THRESHOLD_DEGREE_BITS`.
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
struct RecursiveCircuitsForTableSize<F, C, const D: usize>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
@ -568,6 +815,67 @@ where
|
||||
[(); C::HCO::WIDTH]:,
|
||||
[(); C::HCI::WIDTH]:,
|
||||
{
|
||||
pub fn to_buffer(
|
||||
&self,
|
||||
buffer: &mut Vec<u8>,
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<()> {
|
||||
buffer.write_usize(self.shrinking_wrappers.len())?;
|
||||
if !self.shrinking_wrappers.is_empty() {
|
||||
buffer.write_common_circuit_data(
|
||||
&self.shrinking_wrappers[0].circuit.common,
|
||||
gate_serializer,
|
||||
)?;
|
||||
}
|
||||
for wrapper in &self.shrinking_wrappers {
|
||||
buffer.write_prover_only_circuit_data(
|
||||
&wrapper.circuit.prover_only,
|
||||
generator_serializer,
|
||||
)?;
|
||||
buffer.write_verifier_only_circuit_data(&wrapper.circuit.verifier_only)?;
|
||||
buffer.write_target_proof_with_public_inputs(&wrapper.proof_with_pis_target)?;
|
||||
}
|
||||
self.initial_wrapper
|
||||
.to_buffer(buffer, gate_serializer, generator_serializer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn from_buffer(
|
||||
buffer: &mut Buffer,
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<Self> {
|
||||
let length = buffer.read_usize()?;
|
||||
let mut shrinking_wrappers = Vec::with_capacity(length);
|
||||
if length != 0 {
|
||||
let common = buffer.read_common_circuit_data(gate_serializer)?;
|
||||
|
||||
for _ in 0..length {
|
||||
let prover_only =
|
||||
buffer.read_prover_only_circuit_data(generator_serializer, &common)?;
|
||||
let verifier_only = buffer.read_verifier_only_circuit_data()?;
|
||||
let proof_with_pis_target = buffer.read_target_proof_with_public_inputs()?;
|
||||
shrinking_wrappers.push(PlonkWrapperCircuit {
|
||||
circuit: CircuitData {
|
||||
common: common.clone(),
|
||||
prover_only,
|
||||
verifier_only,
|
||||
},
|
||||
proof_with_pis_target,
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
let initial_wrapper =
|
||||
StarkWrapperCircuit::from_buffer(buffer, gate_serializer, generator_serializer)?;
|
||||
|
||||
Ok(Self {
|
||||
initial_wrapper,
|
||||
shrinking_wrappers,
|
||||
})
|
||||
}
|
||||
|
||||
fn new<S: Stark<F, D>>(
|
||||
table: Table,
|
||||
stark: &S,
|
||||
|
||||
@ -5,6 +5,7 @@ use anyhow::{bail, Error};
|
||||
use ethereum_types::{BigEndianHash, H256, U256, U512};
|
||||
use itertools::Itertools;
|
||||
use plonky2::field::types::Field;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::extension_tower::{FieldExt, Fp12, BLS381, BN254};
|
||||
use crate::generation::prover_input::EvmField::{
|
||||
@ -19,7 +20,7 @@ use crate::witness::util::{kernel_peek, stack_peek};
|
||||
|
||||
/// Prover input function represented as a scoped function name.
|
||||
/// Example: `PROVER_INPUT(ff::bn254_base::inverse)` is represented as `ProverInputFn([ff, bn254_base, inverse])`.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProverInputFn(Vec<String>);
|
||||
|
||||
impl From<Vec<String>> for ProverInputFn {
|
||||
|
||||
@ -5,10 +5,12 @@ use keccak_hash::keccak;
|
||||
use plonky2::field::types::Field;
|
||||
|
||||
use crate::cpu::kernel::aggregator::KERNEL;
|
||||
use crate::cpu::kernel::constants::context_metadata::ContextMetadata;
|
||||
use crate::generation::mpt::all_mpt_prover_inputs_reversed;
|
||||
use crate::generation::rlp::all_rlp_prover_inputs_reversed;
|
||||
use crate::generation::GenerationInputs;
|
||||
use crate::witness::memory::MemoryState;
|
||||
use crate::memory::segments::Segment;
|
||||
use crate::witness::memory::{MemoryAddress, MemoryState};
|
||||
use crate::witness::state::RegistersState;
|
||||
use crate::witness::traces::{TraceCheckpoint, Traces};
|
||||
use crate::witness::util::stack_peek;
|
||||
@ -83,6 +85,10 @@ impl<F: Field> GenerationState<F> {
|
||||
let tip_h256 = H256::from_uint(&tip_u256);
|
||||
let tip_h160 = H160::from(tip_h256);
|
||||
self.observe_address(tip_h160);
|
||||
} else if dst == KERNEL.global_labels["observe_new_contract"] {
|
||||
let tip_u256 = stack_peek(self, 0).expect("Empty stack");
|
||||
let tip_h256 = H256::from_uint(&tip_u256);
|
||||
self.observe_contract(tip_h256);
|
||||
}
|
||||
}
|
||||
|
||||
@ -93,6 +99,30 @@ impl<F: Field> GenerationState<F> {
|
||||
self.state_key_to_address.insert(state_key, address);
|
||||
}
|
||||
|
||||
/// Observe the given code hash and store the associated code.
|
||||
/// When called, the code corresponding to `codehash` should be stored in the return data.
|
||||
pub fn observe_contract(&mut self, codehash: H256) {
|
||||
if self.inputs.contract_code.contains_key(&codehash) {
|
||||
return; // Return early if the code hash has already been observed.
|
||||
}
|
||||
|
||||
let ctx = self.registers.context;
|
||||
let returndata_size_addr = MemoryAddress::new(
|
||||
ctx,
|
||||
Segment::ContextMetadata,
|
||||
ContextMetadata::ReturndataSize as usize,
|
||||
);
|
||||
let returndata_size = self.memory.get(returndata_size_addr).as_usize();
|
||||
let code = self.memory.contexts[ctx].segments[Segment::Returndata as usize].content
|
||||
[..returndata_size]
|
||||
.iter()
|
||||
.map(|x| x.as_u32() as u8)
|
||||
.collect::<Vec<_>>();
|
||||
debug_assert_eq!(keccak(&code), codehash);
|
||||
|
||||
self.inputs.contract_code.insert(codehash, code);
|
||||
}
|
||||
|
||||
pub fn checkpoint(&self) -> GenerationStateCheckpoint {
|
||||
GenerationStateCheckpoint {
|
||||
registers: self.registers,
|
||||
|
||||
@ -9,11 +9,11 @@
|
||||
|
||||
pub mod all_stark;
|
||||
pub mod arithmetic;
|
||||
pub mod bn254_pairing;
|
||||
pub mod config;
|
||||
pub mod constraint_consumer;
|
||||
pub mod cpu;
|
||||
pub mod cross_table_lookup;
|
||||
pub mod curve_pairings;
|
||||
pub mod extension_tower;
|
||||
pub mod fixed_recursive_verifier;
|
||||
pub mod generation;
|
||||
|
||||
@ -17,6 +17,7 @@ use plonky2::plonk::circuit_builder::CircuitBuilder;
|
||||
use plonky2::plonk::config::{AlgebraicHasher, Hasher};
|
||||
use plonky2::plonk::plonk_common::{reduce_with_powers, reduce_with_powers_ext_circuit};
|
||||
use plonky2::util::reducing::{ReducingFactor, ReducingFactorTarget};
|
||||
use plonky2::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
use plonky2_maybe_rayon::*;
|
||||
|
||||
use crate::config::StarkConfig;
|
||||
@ -89,6 +90,30 @@ pub(crate) struct GrandProductChallengeSet<T: Copy + Eq + PartialEq + Debug> {
|
||||
pub(crate) challenges: Vec<GrandProductChallenge<T>>,
|
||||
}
|
||||
|
||||
impl GrandProductChallengeSet<Target> {
|
||||
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
|
||||
buffer.write_usize(self.challenges.len())?;
|
||||
for challenge in &self.challenges {
|
||||
buffer.write_target(challenge.beta)?;
|
||||
buffer.write_target(challenge.gamma)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
|
||||
let length = buffer.read_usize()?;
|
||||
let mut challenges = Vec::with_capacity(length);
|
||||
for _ in 0..length {
|
||||
challenges.push(GrandProductChallenge {
|
||||
beta: buffer.read_target()?,
|
||||
gamma: buffer.read_target()?,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(GrandProductChallengeSet { challenges })
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute all Z polynomials (for permutation arguments).
|
||||
pub(crate) fn compute_permutation_z_polys<F, S, const D: usize>(
|
||||
stark: &S,
|
||||
|
||||
@ -12,6 +12,7 @@ use plonky2::hash::merkle_tree::MerkleCap;
|
||||
use plonky2::iop::ext_target::ExtensionTarget;
|
||||
use plonky2::iop::target::Target;
|
||||
use plonky2::plonk::config::GenericConfig;
|
||||
use plonky2::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
use plonky2_maybe_rayon::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@ -148,6 +149,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> S
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub struct StarkProofTarget<const D: usize> {
|
||||
pub trace_cap: MerkleCapTarget,
|
||||
pub permutation_ctl_zs_cap: MerkleCapTarget,
|
||||
@ -157,6 +159,31 @@ pub struct StarkProofTarget<const D: usize> {
|
||||
}
|
||||
|
||||
impl<const D: usize> StarkProofTarget<D> {
|
||||
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
|
||||
buffer.write_target_merkle_cap(&self.trace_cap)?;
|
||||
buffer.write_target_merkle_cap(&self.permutation_ctl_zs_cap)?;
|
||||
buffer.write_target_merkle_cap(&self.quotient_polys_cap)?;
|
||||
buffer.write_target_fri_proof(&self.opening_proof)?;
|
||||
self.openings.to_buffer(buffer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
|
||||
let trace_cap = buffer.read_target_merkle_cap()?;
|
||||
let permutation_ctl_zs_cap = buffer.read_target_merkle_cap()?;
|
||||
let quotient_polys_cap = buffer.read_target_merkle_cap()?;
|
||||
let opening_proof = buffer.read_target_fri_proof()?;
|
||||
let openings = StarkOpeningSetTarget::from_buffer(buffer)?;
|
||||
|
||||
Ok(Self {
|
||||
trace_cap,
|
||||
permutation_ctl_zs_cap,
|
||||
quotient_polys_cap,
|
||||
openings,
|
||||
opening_proof,
|
||||
})
|
||||
}
|
||||
|
||||
/// Recover the length of the trace from a STARK proof and a STARK config.
|
||||
pub fn recover_degree_bits(&self, config: &StarkConfig) -> usize {
|
||||
let initial_merkle_proof = &self.opening_proof.query_round_proofs[0]
|
||||
@ -276,6 +303,7 @@ impl<F: RichField + Extendable<D>, const D: usize> StarkOpeningSet<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub struct StarkOpeningSetTarget<const D: usize> {
|
||||
pub local_values: Vec<ExtensionTarget<D>>,
|
||||
pub next_values: Vec<ExtensionTarget<D>>,
|
||||
@ -286,6 +314,34 @@ pub struct StarkOpeningSetTarget<const D: usize> {
|
||||
}
|
||||
|
||||
impl<const D: usize> StarkOpeningSetTarget<D> {
|
||||
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
|
||||
buffer.write_target_ext_vec(&self.local_values)?;
|
||||
buffer.write_target_ext_vec(&self.next_values)?;
|
||||
buffer.write_target_ext_vec(&self.permutation_ctl_zs)?;
|
||||
buffer.write_target_ext_vec(&self.permutation_ctl_zs_next)?;
|
||||
buffer.write_target_vec(&self.ctl_zs_last)?;
|
||||
buffer.write_target_ext_vec(&self.quotient_polys)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
|
||||
let local_values = buffer.read_target_ext_vec::<D>()?;
|
||||
let next_values = buffer.read_target_ext_vec::<D>()?;
|
||||
let permutation_ctl_zs = buffer.read_target_ext_vec::<D>()?;
|
||||
let permutation_ctl_zs_next = buffer.read_target_ext_vec::<D>()?;
|
||||
let ctl_zs_last = buffer.read_target_vec()?;
|
||||
let quotient_polys = buffer.read_target_ext_vec::<D>()?;
|
||||
|
||||
Ok(Self {
|
||||
local_values,
|
||||
next_values,
|
||||
permutation_ctl_zs,
|
||||
permutation_ctl_zs_next,
|
||||
ctl_zs_last,
|
||||
quotient_polys,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn to_fri_openings(&self, zero: Target) -> FriOpeningsTarget<D> {
|
||||
let zeta_batch = FriOpeningBatchTarget {
|
||||
values: self
|
||||
|
||||
@ -19,6 +19,9 @@ use plonky2::plonk::circuit_data::{CircuitConfig, CircuitData, VerifierCircuitDa
|
||||
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig};
|
||||
use plonky2::plonk::proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget};
|
||||
use plonky2::util::reducing::ReducingFactorTarget;
|
||||
use plonky2::util::serialization::{
|
||||
Buffer, GateSerializer, IoResult, Read, WitnessGeneratorSerializer, Write,
|
||||
};
|
||||
use plonky2::with_context;
|
||||
use plonky2_util::log2_ceil;
|
||||
|
||||
@ -157,6 +160,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
}
|
||||
|
||||
/// Represents a circuit which recursively verifies a STARK proof.
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub(crate) struct StarkWrapperCircuit<F, C, const D: usize>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
@ -178,6 +182,39 @@ where
|
||||
[(); C::HCO::WIDTH]:,
|
||||
[(); C::HCI::WIDTH]:,
|
||||
{
|
||||
pub fn to_buffer(
|
||||
&self,
|
||||
buffer: &mut Vec<u8>,
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<()> {
|
||||
buffer.write_circuit_data(&self.circuit, gate_serializer, generator_serializer)?;
|
||||
buffer.write_target_vec(&self.init_challenger_state_target)?;
|
||||
buffer.write_target(self.zero_target)?;
|
||||
self.stark_proof_target.to_buffer(buffer)?;
|
||||
self.ctl_challenges_target.to_buffer(buffer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn from_buffer(
|
||||
buffer: &mut Buffer,
|
||||
gate_serializer: &dyn GateSerializer<F, D>,
|
||||
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
||||
) -> IoResult<Self> {
|
||||
let circuit = buffer.read_circuit_data(gate_serializer, generator_serializer)?;
|
||||
let init_challenger_state_target = buffer.read_target_vec()?;
|
||||
let zero_target = buffer.read_target()?;
|
||||
let stark_proof_target = StarkProofTarget::from_buffer(buffer)?;
|
||||
let ctl_challenges_target = GrandProductChallengeSet::from_buffer(buffer)?;
|
||||
Ok(Self {
|
||||
circuit,
|
||||
stark_proof_target,
|
||||
ctl_challenges_target,
|
||||
init_challenger_state_target: init_challenger_state_target.try_into().unwrap(),
|
||||
zero_target,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn prove(
|
||||
&self,
|
||||
proof_with_metadata: &StarkProofWithMetadata<F, C, D>,
|
||||
@ -212,6 +249,7 @@ where
|
||||
}
|
||||
|
||||
/// Represents a circuit which recursively verifies a PLONK proof.
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub(crate) struct PlonkWrapperCircuit<F, C, const D: usize>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
|
||||
@ -45,9 +45,7 @@ pub(crate) fn kernel_peek<F: Field>(
|
||||
segment: Segment,
|
||||
virt: usize,
|
||||
) -> U256 {
|
||||
state
|
||||
.memory
|
||||
.get(MemoryAddress::new(state.registers.context, segment, virt))
|
||||
state.memory.get(MemoryAddress::new(0, segment, virt))
|
||||
}
|
||||
|
||||
pub(crate) fn mem_read_with_log<F: Field>(
|
||||
|
||||
@ -28,6 +28,7 @@ type C = PoseidonGoldilocksConfig;
|
||||
|
||||
/// Test a simple token transfer to a new address.
|
||||
#[test]
|
||||
#[ignore] // Too slow to run on CI.
|
||||
fn test_basic_smart_contract() -> anyhow::Result<()> {
|
||||
init_logger();
|
||||
|
||||
|
||||
@ -1,13 +1,16 @@
|
||||
#![allow(clippy::upper_case_acronyms)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::time::Duration;
|
||||
|
||||
use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV};
|
||||
use eth_trie_utils::partial_trie::{HashedPartialTrie, PartialTrie};
|
||||
use keccak_hash::keccak;
|
||||
use log::info;
|
||||
use plonky2::field::goldilocks_field::GoldilocksField;
|
||||
use plonky2::plonk::config::PoseidonGoldilocksConfig;
|
||||
use plonky2::util::serialization::{DefaultGateSerializer, DefaultGeneratorSerializer};
|
||||
use plonky2::util::timing::TimingTree;
|
||||
use plonky2_evm::all_stark::AllStark;
|
||||
use plonky2_evm::config::StarkConfig;
|
||||
@ -92,8 +95,43 @@ fn test_empty_txn_list() -> anyhow::Result<()> {
|
||||
|
||||
verify_proof(&all_stark, proof, &config)?;
|
||||
|
||||
let all_circuits = AllRecursiveCircuits::<F, C, D>::new(&all_stark, 9..19, &config);
|
||||
let all_circuits = AllRecursiveCircuits::<F, C, D>::new(
|
||||
&all_stark,
|
||||
&[9..15, 9..15, 9..10, 9..12, 9..18], // Minimal ranges to prove an empty list
|
||||
&config,
|
||||
);
|
||||
|
||||
{
|
||||
let gate_serializer = DefaultGateSerializer;
|
||||
let generator_serializer = DefaultGeneratorSerializer {
|
||||
_phantom: PhantomData::<C>,
|
||||
};
|
||||
|
||||
let timing = TimingTree::new("serialize AllRecursiveCircuits", log::Level::Info);
|
||||
let all_circuits_bytes = all_circuits
|
||||
.to_bytes(&gate_serializer, &generator_serializer)
|
||||
.map_err(|_| anyhow::Error::msg("AllRecursiveCircuits serialization failed."))?;
|
||||
timing.filter(Duration::from_millis(100)).print();
|
||||
info!(
|
||||
"AllRecursiveCircuits length: {} bytes",
|
||||
all_circuits_bytes.len()
|
||||
);
|
||||
|
||||
let timing = TimingTree::new("deserialize AllRecursiveCircuits", log::Level::Info);
|
||||
let all_circuits_from_bytes = AllRecursiveCircuits::<F, C, D>::from_bytes(
|
||||
&all_circuits_bytes,
|
||||
&gate_serializer,
|
||||
&generator_serializer,
|
||||
)
|
||||
.map_err(|_| anyhow::Error::msg("AllRecursiveCircuits deserialization failed."))?;
|
||||
timing.filter(Duration::from_millis(100)).print();
|
||||
|
||||
assert_eq!(all_circuits, all_circuits_from_bytes);
|
||||
}
|
||||
|
||||
let mut timing = TimingTree::new("prove", log::Level::Info);
|
||||
let root_proof = all_circuits.prove_root(&all_stark, &config, inputs, &mut timing)?;
|
||||
timing.filter(Duration::from_millis(100)).print();
|
||||
all_circuits.verify_root(root_proof.clone())?;
|
||||
|
||||
let agg_proof = all_circuits.prove_aggregation(false, &root_proof, false, &root_proof)?;
|
||||
|
||||
@ -26,6 +26,7 @@ type C = PoseidonGoldilocksConfig;
|
||||
|
||||
/// The `selfBalanceGasCost` test case from https://github.com/ethereum/tests
|
||||
#[test]
|
||||
#[ignore] // Too slow to run on CI.
|
||||
fn self_balance_gas_cost() -> anyhow::Result<()> {
|
||||
init_logger();
|
||||
|
||||
|
||||
@ -46,13 +46,13 @@ impl Hash for GoldilocksField {
|
||||
|
||||
impl Display for GoldilocksField {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
Display::fmt(&self.0, f)
|
||||
Display::fmt(&self.to_canonical_u64(), f)
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for GoldilocksField {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
Debug::fmt(&self.0, f)
|
||||
Debug::fmt(&self.to_canonical_u64(), f)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -31,6 +31,7 @@ plonky2_util = { version = "0.1.0", default-features = false }
|
||||
rand = { version = "0.8.4", default-features = false }
|
||||
rand_chacha = { version = "0.3.1", optional = true, default-features = false }
|
||||
serde = { version = "1.0", default-features = false, features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
static_assertions = { version = "1.1.0", default-features = false }
|
||||
unroll = { version = "0.1.5", default-features = false }
|
||||
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
// `[dev-dependencies]`.
|
||||
|
||||
#![feature(generic_const_exprs)]
|
||||
#![allow(incomplete_features)]
|
||||
#![allow(clippy::upper_case_acronyms)]
|
||||
|
||||
use core::num::ParseIntError;
|
||||
@ -21,6 +22,7 @@ use plonky2::plonk::circuit_data::{CircuitConfig, CommonCircuitData, VerifierOnl
|
||||
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig, PoseidonGoldilocksConfig};
|
||||
use plonky2::plonk::proof::{CompressedProofWithPublicInputs, ProofWithPublicInputs};
|
||||
use plonky2::plonk::prover::prove;
|
||||
use plonky2::util::serialization::DefaultGateSerializer;
|
||||
use plonky2::util::timing::TimingTree;
|
||||
use plonky2_field::extension::Extendable;
|
||||
use plonky2_maybe_rayon::rayon;
|
||||
@ -180,6 +182,19 @@ where
|
||||
CompressedProofWithPublicInputs::from_bytes(compressed_proof_bytes, cd)?;
|
||||
assert_eq!(compressed_proof, compressed_proof_from_bytes);
|
||||
|
||||
let gate_serializer = DefaultGateSerializer;
|
||||
let common_data_bytes = cd
|
||||
.to_bytes(&gate_serializer)
|
||||
.map_err(|_| anyhow::Error::msg("CommonCircuitData serialization failed."))?;
|
||||
info!(
|
||||
"Common circuit data length: {} bytes",
|
||||
common_data_bytes.len()
|
||||
);
|
||||
let common_data_from_bytes =
|
||||
CommonCircuitData::<F, D>::from_bytes(common_data_bytes, &gate_serializer)
|
||||
.map_err(|_| anyhow::Error::msg("CommonCircuitData deserialization failed."))?;
|
||||
assert_eq!(cd, &common_data_from_bytes);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
79
plonky2/examples/fibonacci_serialization.rs
Normal file
79
plonky2/examples/fibonacci_serialization.rs
Normal file
@ -0,0 +1,79 @@
|
||||
#![allow(clippy::upper_case_acronyms)]
|
||||
|
||||
use std::fs;
|
||||
|
||||
use anyhow::Result;
|
||||
use plonky2::field::types::Field;
|
||||
use plonky2::iop::witness::{PartialWitness, WitnessWrite};
|
||||
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
||||
use plonky2::plonk::circuit_data::CircuitConfig;
|
||||
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
|
||||
|
||||
/// An example of using Plonky2 to prove a statement of the form
|
||||
/// "I know the 100th element of the Fibonacci sequence, starting with constants a and b."
|
||||
/// When a == 0 and b == 1, this is proving knowledge of the 100th (standard) Fibonacci number.
|
||||
/// This example also serializes the circuit data and proof to JSON files.
|
||||
fn main() -> Result<()> {
|
||||
const D: usize = 2;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
let config = CircuitConfig::standard_recursion_config();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
|
||||
// The arithmetic circuit.
|
||||
let initial_a = builder.add_virtual_target();
|
||||
let initial_b = builder.add_virtual_target();
|
||||
let mut prev_target = initial_a;
|
||||
let mut cur_target = initial_b;
|
||||
for _ in 0..99 {
|
||||
let temp = builder.add(prev_target, cur_target);
|
||||
prev_target = cur_target;
|
||||
cur_target = temp;
|
||||
}
|
||||
|
||||
// Public inputs are the two initial values (provided below) and the result (which is generated).
|
||||
builder.register_public_input(initial_a);
|
||||
builder.register_public_input(initial_b);
|
||||
builder.register_public_input(cur_target);
|
||||
|
||||
// Provide initial values.
|
||||
let mut pw = PartialWitness::new();
|
||||
pw.set_target(initial_a, F::ZERO);
|
||||
pw.set_target(initial_b, F::ONE);
|
||||
|
||||
let data = builder.build::<C>();
|
||||
|
||||
let common_circuit_data_serialized = serde_json::to_string(&data.common).unwrap();
|
||||
fs::write("common_circuit_data.json", common_circuit_data_serialized)
|
||||
.expect("Unable to write file");
|
||||
|
||||
let verifier_only_circuit_data_serialized = serde_json::to_string(&data.verifier_only).unwrap();
|
||||
fs::write(
|
||||
"verifier_only_circuit_data.json",
|
||||
verifier_only_circuit_data_serialized,
|
||||
)
|
||||
.expect("Unable to write file");
|
||||
|
||||
let proof = data.prove(pw)?;
|
||||
|
||||
let proof_serialized = serde_json::to_string(&proof).unwrap();
|
||||
fs::write("proof_with_public_inputs.json", proof_serialized).expect("Unable to write file");
|
||||
|
||||
let proof_challenges = proof
|
||||
.get_challenges(
|
||||
proof.get_public_inputs_hash(),
|
||||
&data.verifier_only.circuit_digest,
|
||||
&data.common,
|
||||
)
|
||||
.unwrap();
|
||||
let proof_challenges_serialized = serde_json::to_string(&proof_challenges).unwrap();
|
||||
fs::write("proof_challenges.json", proof_challenges_serialized).expect("Unable to write file");
|
||||
|
||||
println!(
|
||||
"100th Fibonacci number mod |F| (starting with {}, {}) is: {}",
|
||||
proof.public_inputs[0], proof.public_inputs[1], proof.public_inputs[2]
|
||||
);
|
||||
|
||||
data.verify(proof)
|
||||
}
|
||||
@ -4,18 +4,28 @@ use core::marker::PhantomData;
|
||||
|
||||
use anyhow::Result;
|
||||
use plonky2::field::types::{PrimeField, Sample};
|
||||
use plonky2::gates::arithmetic_base::ArithmeticBaseGenerator;
|
||||
use plonky2::gates::poseidon::PoseidonGenerator;
|
||||
use plonky2::gates::poseidon_mds::PoseidonMdsGenerator;
|
||||
use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator};
|
||||
use plonky2::iop::generator::{
|
||||
ConstantGenerator, GeneratedValues, RandomValueGenerator, SimpleGenerator,
|
||||
};
|
||||
use plonky2::iop::target::Target;
|
||||
use plonky2::iop::witness::{PartialWitness, PartitionWitness, Witness, WitnessWrite};
|
||||
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
||||
use plonky2::plonk::circuit_data::CircuitConfig;
|
||||
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
|
||||
use plonky2::plonk::circuit_data::{CircuitConfig, CircuitData};
|
||||
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig, PoseidonGoldilocksConfig};
|
||||
use plonky2::recursion::dummy_circuit::DummyProofGenerator;
|
||||
use plonky2::util::serialization::{
|
||||
Buffer, DefaultGateSerializer, IoResult, Read, WitnessGeneratorSerializer, Write,
|
||||
};
|
||||
use plonky2::{get_generator_tag_impl, impl_generator_serializer, read_generator_impl};
|
||||
use plonky2_field::extension::Extendable;
|
||||
|
||||
/// A generator used by the prover to calculate the square root (`x`) of a given value
|
||||
/// (`x_squared`), outside of the circuit, in order to supply it as an additional public input.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Default)]
|
||||
struct SquareRootGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
x: Target,
|
||||
x_squared: Target,
|
||||
@ -25,6 +35,10 @@ struct SquareRootGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
for SquareRootGenerator<F, D>
|
||||
{
|
||||
fn id(&self) -> String {
|
||||
"SquareRootGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
vec![self.x_squared]
|
||||
}
|
||||
@ -37,6 +51,43 @@ impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
|
||||
out_buffer.set_target(self.x, x);
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_target(self.x)?;
|
||||
dst.write_target(self.x_squared)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let x = src.read_target()?;
|
||||
let x_squared = src.read_target()?;
|
||||
Ok(Self {
|
||||
x,
|
||||
x_squared,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CustomGeneratorSerializer<C: GenericConfig<D>, const D: usize> {
|
||||
pub _phantom: PhantomData<C>,
|
||||
}
|
||||
|
||||
impl<F, C, const D: usize> WitnessGeneratorSerializer<F, D> for CustomGeneratorSerializer<C, D>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
C: GenericConfig<D, F = F> + 'static,
|
||||
C::Hasher: AlgebraicHasher<F, C::HCO>,
|
||||
{
|
||||
impl_generator_serializer! {
|
||||
CustomGeneratorSerializer,
|
||||
DummyProofGenerator<F, C, D>,
|
||||
ArithmeticBaseGenerator<F, D>,
|
||||
ConstantGenerator<F>,
|
||||
PoseidonGenerator<F, D>,
|
||||
PoseidonMdsGenerator<D>,
|
||||
RandomValueGenerator,
|
||||
SquareRootGenerator<F, D>
|
||||
}
|
||||
}
|
||||
|
||||
/// An example of using Plonky2 to prove a statement of the form
|
||||
@ -79,5 +130,26 @@ fn main() -> Result<()> {
|
||||
let x_squared_actual = proof.public_inputs[0];
|
||||
println!("Field element (square): {x_squared_actual}");
|
||||
|
||||
// Test serialization
|
||||
{
|
||||
let gate_serializer = DefaultGateSerializer;
|
||||
let generator_serializer = CustomGeneratorSerializer {
|
||||
_phantom: PhantomData::<C>,
|
||||
};
|
||||
|
||||
let data_bytes = data
|
||||
.to_bytes(&gate_serializer, &generator_serializer)
|
||||
.map_err(|_| anyhow::Error::msg("CircuitData serialization failed."))?;
|
||||
|
||||
let data_from_bytes = CircuitData::<F, C, D>::from_bytes(
|
||||
&data_bytes,
|
||||
&gate_serializer,
|
||||
&generator_serializer,
|
||||
)
|
||||
.map_err(|_| anyhow::Error::msg("CircuitData deserialization failed."))?;
|
||||
|
||||
assert_eq!(data, data_from_bytes);
|
||||
}
|
||||
|
||||
data.verify(proof)
|
||||
}
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::fri::reduction_strategies::FriReductionStrategy;
|
||||
|
||||
mod challenges;
|
||||
@ -13,7 +15,7 @@ mod validate_shape;
|
||||
pub mod verifier;
|
||||
pub mod witness_util;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Serialize)]
|
||||
pub struct FriConfig {
|
||||
/// `rate = 2^{-rate_bits}`.
|
||||
pub rate_bits: usize,
|
||||
@ -56,7 +58,7 @@ impl FriConfig {
|
||||
|
||||
/// FRI parameters, including generated parameters which are specific to an instance size, in
|
||||
/// contrast to `FriConfig` which is user-specified and independent of instance size.
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Serialize)]
|
||||
pub struct FriParams {
|
||||
/// User-specified FRI configuration.
|
||||
pub config: FriConfig,
|
||||
|
||||
@ -27,6 +27,7 @@ use crate::util::{log2_strict, reverse_bits, reverse_index_bits_in_place, transp
|
||||
pub const SALT_SIZE: usize = 4;
|
||||
|
||||
/// Represents a FRI oracle, i.e. a batch of polynomials which have been Merklized.
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub struct PolynomialBatch<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
{
|
||||
pub polynomials: Vec<PolynomialCoeffs<F>>,
|
||||
|
||||
@ -33,7 +33,7 @@ pub struct FriQueryStep<
|
||||
pub merkle_proof: MerkleProof<F, HC, H>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct FriQueryStepTarget<const D: usize> {
|
||||
pub evals: Vec<ExtensionTarget<D>>,
|
||||
pub merkle_proof: MerkleProofTarget,
|
||||
@ -58,7 +58,7 @@ impl<F: RichField, HC: HashConfig, H: Hasher<F, HC>> FriInitialTreeProof<F, HC,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct FriInitialTreeProofTarget {
|
||||
pub evals_proofs: Vec<(Vec<Target>, MerkleProofTarget)>,
|
||||
}
|
||||
@ -92,7 +92,7 @@ pub struct FriQueryRound<
|
||||
pub steps: Vec<FriQueryStep<F, HC, H, D>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct FriQueryRoundTarget<const D: usize> {
|
||||
pub initial_trees_proof: FriInitialTreeProofTarget,
|
||||
pub steps: Vec<FriQueryStepTarget<D>>,
|
||||
@ -129,7 +129,7 @@ pub struct FriProof<F: RichField + Extendable<D>, HC: HashConfig, H: Hasher<F, H
|
||||
pub pow_witness: F,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct FriProofTarget<const D: usize> {
|
||||
pub commit_phase_merkle_caps: Vec<MerkleCapTarget>,
|
||||
pub query_round_proofs: Vec<FriQueryRoundTarget<D>>,
|
||||
@ -393,6 +393,7 @@ impl<F: RichField + Extendable<D>, HCO: HashConfig, H: Hasher<F, HCO>, const D:
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct FriChallenges<F: RichField + Extendable<D>, const D: usize> {
|
||||
// Scaling factor to combine polynomials.
|
||||
pub fri_alpha: F::Extension,
|
||||
|
||||
@ -4,9 +4,10 @@ use alloc::vec::Vec;
|
||||
use std::time::Instant;
|
||||
|
||||
use log::debug;
|
||||
use serde::Serialize;
|
||||
|
||||
/// A method for deciding what arity to use at each reduction layer.
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Serialize)]
|
||||
pub enum FriReductionStrategy {
|
||||
/// Specifies the exact sequence of arities (expressed in bits) to use.
|
||||
Fixed(Vec<usize>),
|
||||
|
||||
@ -11,6 +11,7 @@ use crate::iop::generator::{GeneratedValues, SimpleGenerator};
|
||||
use crate::iop::target::{BoolTarget, Target};
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
/// Computes `-x`.
|
||||
@ -370,8 +371,8 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct EqualityGenerator {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct EqualityGenerator {
|
||||
x: Target,
|
||||
y: Target,
|
||||
equal: BoolTarget,
|
||||
@ -379,6 +380,10 @@ struct EqualityGenerator {
|
||||
}
|
||||
|
||||
impl<F: RichField> SimpleGenerator<F> for EqualityGenerator {
|
||||
fn id(&self) -> String {
|
||||
"EqualityGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
vec![self.x, self.y]
|
||||
}
|
||||
@ -392,6 +397,21 @@ impl<F: RichField> SimpleGenerator<F> for EqualityGenerator {
|
||||
out_buffer.set_bool_target(self.equal, x == y);
|
||||
out_buffer.set_target(self.inv, inv);
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_target(self.x)?;
|
||||
dst.write_target(self.y)?;
|
||||
dst.write_target_bool(self.equal)?;
|
||||
dst.write_target(self.inv)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let x = src.read_target()?;
|
||||
let y = src.read_target()?;
|
||||
let equal = src.read_target_bool()?;
|
||||
let inv = src.read_target()?;
|
||||
Ok(Self { x, y, equal, inv })
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a base arithmetic operation in the circuit. Used to memoize results.
|
||||
|
||||
@ -13,6 +13,7 @@ use crate::iop::target::Target;
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::util::bits_u64;
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
pub fn arithmetic_extension(
|
||||
@ -493,8 +494,8 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct QuotientGeneratorExtension<const D: usize> {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct QuotientGeneratorExtension<const D: usize> {
|
||||
numerator: ExtensionTarget<D>,
|
||||
denominator: ExtensionTarget<D>,
|
||||
quotient: ExtensionTarget<D>,
|
||||
@ -503,6 +504,10 @@ struct QuotientGeneratorExtension<const D: usize> {
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
for QuotientGeneratorExtension<D>
|
||||
{
|
||||
fn id(&self) -> String {
|
||||
"QuotientGeneratorExtension".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
let mut deps = self.numerator.to_target_array().to_vec();
|
||||
deps.extend(self.denominator.to_target_array());
|
||||
@ -515,6 +520,23 @@ impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
let quotient = num / dem;
|
||||
out_buffer.set_extension_target(self.quotient, quotient)
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_target_ext(self.numerator)?;
|
||||
dst.write_target_ext(self.denominator)?;
|
||||
dst.write_target_ext(self.quotient)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let numerator = src.read_target_ext()?;
|
||||
let denominator = src.read_target_ext()?;
|
||||
let quotient = src.read_target_ext()?;
|
||||
Ok(Self {
|
||||
numerator,
|
||||
denominator,
|
||||
quotient,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator over the powers of a certain base element `b`: `b^0, b^1, b^2, ...`.
|
||||
|
||||
@ -7,4 +7,4 @@ pub mod random_access;
|
||||
pub mod range_check;
|
||||
pub mod select;
|
||||
pub mod split_base;
|
||||
pub(crate) mod split_join;
|
||||
pub mod split_join;
|
||||
|
||||
@ -7,7 +7,7 @@ use crate::iop::target::Target;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::util::reducing::ReducingFactorTarget;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct PolynomialCoeffsExtTarget<const D: usize>(pub Vec<ExtensionTarget<D>>);
|
||||
|
||||
impl<const D: usize> PolynomialCoeffsExtTarget<D> {
|
||||
|
||||
@ -7,6 +7,7 @@ use crate::iop::generator::{GeneratedValues, SimpleGenerator};
|
||||
use crate::iop::target::{BoolTarget, Target};
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
/// Checks that `x < 2^n_log` using a `BaseSumGate`.
|
||||
@ -51,8 +52,8 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct LowHighGenerator {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct LowHighGenerator {
|
||||
integer: Target,
|
||||
n_log: usize,
|
||||
low: Target,
|
||||
@ -60,6 +61,10 @@ struct LowHighGenerator {
|
||||
}
|
||||
|
||||
impl<F: RichField> SimpleGenerator<F> for LowHighGenerator {
|
||||
fn id(&self) -> String {
|
||||
"LowHighGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
vec![self.integer]
|
||||
}
|
||||
@ -72,4 +77,24 @@ impl<F: RichField> SimpleGenerator<F> for LowHighGenerator {
|
||||
out_buffer.set_target(self.low, F::from_canonical_u64(low));
|
||||
out_buffer.set_target(self.high, F::from_canonical_u64(high));
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_target(self.integer)?;
|
||||
dst.write_usize(self.n_log)?;
|
||||
dst.write_target(self.low)?;
|
||||
dst.write_target(self.high)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let integer = src.read_target()?;
|
||||
let n_log = src.read_usize()?;
|
||||
let low = src.read_target()?;
|
||||
let high = src.read_target()?;
|
||||
Ok(Self {
|
||||
integer,
|
||||
n_log,
|
||||
low,
|
||||
high,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -13,6 +13,7 @@ use crate::iop::target::{BoolTarget, Target};
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::util::log_floor;
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
/// Split the given element into a list of targets, where each one represents a
|
||||
@ -79,13 +80,17 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct BaseSumGenerator<const B: usize> {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct BaseSumGenerator<const B: usize> {
|
||||
row: usize,
|
||||
limbs: Vec<BoolTarget>,
|
||||
}
|
||||
|
||||
impl<F: Field, const B: usize> SimpleGenerator<F> for BaseSumGenerator<B> {
|
||||
fn id(&self) -> String {
|
||||
"BaseSumGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
self.limbs.iter().map(|b| b.target).collect()
|
||||
}
|
||||
@ -102,6 +107,17 @@ impl<F: Field, const B: usize> SimpleGenerator<F> for BaseSumGenerator<B> {
|
||||
|
||||
out_buffer.set_target(Target::wire(self.row, BaseSumGate::<B>::WIRE_SUM), sum);
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.row)?;
|
||||
dst.write_target_bool_vec(&self.limbs)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let row = src.read_usize()?;
|
||||
let limbs = src.read_target_bool_vec()?;
|
||||
Ok(Self { row, limbs })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@ -9,6 +9,7 @@ use crate::iop::target::{BoolTarget, Target};
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::util::ceil_div_usize;
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
/// Split the given integer into a list of wires, where each one represents a
|
||||
@ -55,13 +56,17 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct SplitGenerator {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SplitGenerator {
|
||||
integer: Target,
|
||||
bits: Vec<Target>,
|
||||
}
|
||||
|
||||
impl<F: RichField> SimpleGenerator<F> for SplitGenerator {
|
||||
fn id(&self) -> String {
|
||||
"SplitGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
vec![self.integer]
|
||||
}
|
||||
@ -80,16 +85,31 @@ impl<F: RichField> SimpleGenerator<F> for SplitGenerator {
|
||||
"Integer too large to fit in given number of bits"
|
||||
);
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_target(self.integer)?;
|
||||
dst.write_target_vec(&self.bits)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let integer = src.read_target()?;
|
||||
let bits = src.read_target_vec()?;
|
||||
Ok(Self { integer, bits })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct WireSplitGenerator {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct WireSplitGenerator {
|
||||
integer: Target,
|
||||
gates: Vec<usize>,
|
||||
num_limbs: usize,
|
||||
}
|
||||
|
||||
impl<F: RichField> SimpleGenerator<F> for WireSplitGenerator {
|
||||
fn id(&self) -> String {
|
||||
"WireSplitGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
vec![self.integer]
|
||||
}
|
||||
@ -120,4 +140,21 @@ impl<F: RichField> SimpleGenerator<F> for WireSplitGenerator {
|
||||
self.gates.len()
|
||||
);
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_target(self.integer)?;
|
||||
dst.write_usize_vec(&self.gates)?;
|
||||
dst.write_usize(self.num_limbs)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let integer = src.read_target()?;
|
||||
let gates = src.read_usize_vec()?;
|
||||
let num_limbs = src.read_usize()?;
|
||||
Ok(Self {
|
||||
integer,
|
||||
gates,
|
||||
num_limbs,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::format;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
@ -10,7 +9,7 @@ use crate::gates::packed_util::PackedEvaluableBase;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
@ -19,6 +18,7 @@ use crate::plonk::vars::{
|
||||
EvaluationTargets, EvaluationVars, EvaluationVarsBase, EvaluationVarsBaseBatch,
|
||||
EvaluationVarsBasePacked,
|
||||
};
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
/// A gate which can perform a weighted multiply-add, i.e. `result = c0 x y + c1 z`. If the config
|
||||
/// supports enough routed wires, it can support several such operations in one gate.
|
||||
@ -60,6 +60,15 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ArithmeticGate
|
||||
format!("{self:?}")
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.num_ops)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let num_ops = src.read_usize()?;
|
||||
Ok(Self { num_ops })
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let const_0 = vars.local_constants[0];
|
||||
let const_1 = vars.local_constants[1];
|
||||
@ -117,10 +126,10 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ArithmeticGate
|
||||
constraints
|
||||
}
|
||||
|
||||
fn generators(&self, row: usize, local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
fn generators(&self, row: usize, local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
(0..self.num_ops)
|
||||
.map(|i| {
|
||||
let g: Box<dyn WitnessGenerator<F>> = Box::new(
|
||||
WitnessGeneratorRef::new(
|
||||
ArithmeticBaseGenerator {
|
||||
row,
|
||||
const_0: local_constants[0],
|
||||
@ -128,8 +137,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ArithmeticGate
|
||||
i,
|
||||
}
|
||||
.adapter(),
|
||||
);
|
||||
g
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
@ -172,8 +180,8 @@ impl<F: RichField + Extendable<D>, const D: usize> PackedEvaluableBase<F, D> for
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct ArithmeticBaseGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ArithmeticBaseGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
row: usize,
|
||||
const_0: F,
|
||||
const_1: F,
|
||||
@ -183,6 +191,10 @@ struct ArithmeticBaseGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
for ArithmeticBaseGenerator<F, D>
|
||||
{
|
||||
fn id(&self) -> String {
|
||||
"ArithmeticBaseGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
[
|
||||
ArithmeticGate::wire_ith_multiplicand_0(self.i),
|
||||
@ -208,6 +220,26 @@ impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
|
||||
out_buffer.set_target(output_target, computed_output)
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.row)?;
|
||||
dst.write_field(self.const_0)?;
|
||||
dst.write_field(self.const_1)?;
|
||||
dst.write_usize(self.i)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let row = src.read_usize()?;
|
||||
let const_0 = src.read_field()?;
|
||||
let const_1 = src.read_field()?;
|
||||
let i = src.read_usize()?;
|
||||
Ok(Self {
|
||||
row,
|
||||
const_0,
|
||||
const_1,
|
||||
i,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::format;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
@ -9,12 +8,13 @@ use crate::gates::gate::Gate;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::circuit_data::CircuitConfig;
|
||||
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
/// A gate which can perform a weighted multiply-add, i.e. `result = c0 x y + c1 z`. If the config
|
||||
/// supports enough routed wires, it can support several such operations in one gate.
|
||||
@ -56,6 +56,15 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ArithmeticExte
|
||||
format!("{self:?}")
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.num_ops)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let num_ops = src.read_usize()?;
|
||||
Ok(Self { num_ops })
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let const_0 = vars.local_constants[0];
|
||||
let const_1 = vars.local_constants[1];
|
||||
@ -122,10 +131,10 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ArithmeticExte
|
||||
constraints
|
||||
}
|
||||
|
||||
fn generators(&self, row: usize, local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
fn generators(&self, row: usize, local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
(0..self.num_ops)
|
||||
.map(|i| {
|
||||
let g: Box<dyn WitnessGenerator<F>> = Box::new(
|
||||
WitnessGeneratorRef::new(
|
||||
ArithmeticExtensionGenerator {
|
||||
row,
|
||||
const_0: local_constants[0],
|
||||
@ -133,8 +142,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ArithmeticExte
|
||||
i,
|
||||
}
|
||||
.adapter(),
|
||||
);
|
||||
g
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
@ -156,8 +164,8 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ArithmeticExte
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct ArithmeticExtensionGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ArithmeticExtensionGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
row: usize,
|
||||
const_0: F,
|
||||
const_1: F,
|
||||
@ -167,6 +175,10 @@ struct ArithmeticExtensionGenerator<F: RichField + Extendable<D>, const D: usize
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
for ArithmeticExtensionGenerator<F, D>
|
||||
{
|
||||
fn id(&self) -> String {
|
||||
"ArithmeticExtensionGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
ArithmeticExtensionGate::<D>::wires_ith_multiplicand_0(self.i)
|
||||
.chain(ArithmeticExtensionGate::<D>::wires_ith_multiplicand_1(
|
||||
@ -201,6 +213,26 @@ impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
|
||||
out_buffer.set_extension_target(output_target, computed_output)
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.row)?;
|
||||
dst.write_field(self.const_0)?;
|
||||
dst.write_field(self.const_1)?;
|
||||
dst.write_usize(self.i)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let row = src.read_usize()?;
|
||||
let const_0 = src.read_field()?;
|
||||
let const_1 = src.read_field()?;
|
||||
let i = src.read_usize()?;
|
||||
Ok(Self {
|
||||
row,
|
||||
const_0,
|
||||
const_1,
|
||||
i,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
use alloc::{format, vec};
|
||||
@ -12,7 +11,7 @@ use crate::gates::packed_util::PackedEvaluableBase;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
@ -23,6 +22,7 @@ use crate::plonk::vars::{
|
||||
EvaluationVarsBasePacked,
|
||||
};
|
||||
use crate::util::log_floor;
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
/// A gate which can decompose a number into base B little-endian limbs.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
@ -55,6 +55,15 @@ impl<F: RichField + Extendable<D>, const D: usize, const B: usize> Gate<F, D> fo
|
||||
format!("{self:?} + Base: {B}")
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.num_limbs)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let num_limbs = src.read_usize()?;
|
||||
Ok(Self { num_limbs })
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let sum = vars.local_wires[Self::WIRE_SUM];
|
||||
let limbs = vars.local_wires[self.limbs()].to_vec();
|
||||
@ -109,12 +118,12 @@ impl<F: RichField + Extendable<D>, const D: usize, const B: usize> Gate<F, D> fo
|
||||
constraints
|
||||
}
|
||||
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
let gen = BaseSplitGenerator::<B> {
|
||||
row,
|
||||
num_limbs: self.num_limbs,
|
||||
};
|
||||
vec![Box::new(gen.adapter())]
|
||||
vec![WitnessGeneratorRef::new(gen.adapter())]
|
||||
}
|
||||
|
||||
// 1 for the sum then `num_limbs` for the limbs.
|
||||
@ -160,13 +169,17 @@ impl<F: RichField + Extendable<D>, const D: usize, const B: usize> PackedEvaluab
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Default)]
|
||||
pub struct BaseSplitGenerator<const B: usize> {
|
||||
row: usize,
|
||||
num_limbs: usize,
|
||||
}
|
||||
|
||||
impl<F: RichField, const B: usize> SimpleGenerator<F> for BaseSplitGenerator<B> {
|
||||
fn id(&self) -> String {
|
||||
"BaseSplitGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
vec![Target::wire(self.row, BaseSumGate::<B>::WIRE_SUM)]
|
||||
}
|
||||
@ -195,6 +208,17 @@ impl<F: RichField, const B: usize> SimpleGenerator<F> for BaseSplitGenerator<B>
|
||||
out_buffer.set_target(b, b_value);
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.row)?;
|
||||
dst.write_usize(self.num_limbs)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let row = src.read_usize()?;
|
||||
let num_limbs = src.read_usize()?;
|
||||
Ok(Self { row, num_limbs })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
use alloc::{format, vec};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::field::extension::Extendable;
|
||||
use crate::field::packed::PackedField;
|
||||
use crate::gates::gate::Gate;
|
||||
@ -10,15 +11,16 @@ use crate::gates::packed_util::PackedEvaluableBase;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::WitnessGenerator;
|
||||
use crate::iop::generator::WitnessGeneratorRef;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::vars::{
|
||||
EvaluationTargets, EvaluationVars, EvaluationVarsBase, EvaluationVarsBaseBatch,
|
||||
EvaluationVarsBasePacked,
|
||||
};
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
/// A gate which takes a single constant parameter and outputs that value.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ConstantGate {
|
||||
pub(crate) num_consts: usize,
|
||||
}
|
||||
@ -40,6 +42,15 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ConstantGate {
|
||||
format!("{self:?}")
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.num_consts)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let num_consts = src.read_usize()?;
|
||||
Ok(Self { num_consts })
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
(0..self.num_consts)
|
||||
.map(|i| {
|
||||
@ -75,7 +86,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ConstantGate {
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn generators(&self, _row: usize, _local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
fn generators(&self, _row: usize, _local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
use alloc::{format, vec};
|
||||
@ -13,12 +12,13 @@ use crate::gates::gate::Gate;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::{ExtensionAlgebraTarget, ExtensionTarget};
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::wire::Wire;
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
/// One of the instantiations of `InterpolationGate`: allows constraints of variable
|
||||
/// degree, up to `1<<subgroup_bits`.
|
||||
@ -45,7 +45,7 @@ use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};
|
||||
///
|
||||
/// Then e[N] is the final interpolated value. The non-routed wires hold every (d - 1)'th
|
||||
/// intermediate value of p and e, starting at p[d] and e[d], where d is the gate degree.
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct CosetInterpolationGate<F: RichField + Extendable<D>, const D: usize> {
|
||||
pub subgroup_bits: usize,
|
||||
pub degree: usize,
|
||||
@ -168,6 +168,26 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for CosetInterpola
|
||||
format!("{self:?}<D={D}>")
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.subgroup_bits)?;
|
||||
dst.write_usize(self.degree)?;
|
||||
dst.write_usize(self.barycentric_weights.len())?;
|
||||
dst.write_field_vec(&self.barycentric_weights)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let subgroup_bits = src.read_usize()?;
|
||||
let degree = src.read_usize()?;
|
||||
let length = src.read_usize()?;
|
||||
let barycentric_weights: Vec<F> = src.read_field_vec(length)?;
|
||||
Ok(Self {
|
||||
subgroup_bits,
|
||||
degree,
|
||||
barycentric_weights,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let mut constraints = Vec::with_capacity(self.num_constraints());
|
||||
|
||||
@ -342,9 +362,9 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for CosetInterpola
|
||||
constraints
|
||||
}
|
||||
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
let gen = InterpolationGenerator::<F, D>::new(row, self.clone());
|
||||
vec![Box::new(gen.adapter())]
|
||||
vec![WitnessGeneratorRef::new(gen.adapter())]
|
||||
}
|
||||
|
||||
fn num_wires(&self) -> usize {
|
||||
@ -366,8 +386,8 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for CosetInterpola
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct InterpolationGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct InterpolationGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
row: usize,
|
||||
gate: CosetInterpolationGate<F, D>,
|
||||
interpolation_domain: Vec<F>,
|
||||
@ -389,6 +409,10 @@ impl<F: RichField + Extendable<D>, const D: usize> InterpolationGenerator<F, D>
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
for InterpolationGenerator<F, D>
|
||||
{
|
||||
fn id(&self) -> String {
|
||||
"InterpolationGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
let local_target = |column| {
|
||||
Target::Wire(Wire {
|
||||
@ -471,6 +495,17 @@ impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
let evaluation_value_wires = self.gate.wires_evaluation_value().map(local_wire);
|
||||
out_buffer.set_ext_wires(evaluation_value_wires, computed_eval);
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.row)?;
|
||||
self.gate.serialize(dst)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let row = src.read_usize()?;
|
||||
let gate = CosetInterpolationGate::deserialize(src)?;
|
||||
Ok(Self::new(row, gate))
|
||||
}
|
||||
}
|
||||
|
||||
/// Interpolate the polynomial defined by its values on an arbitrary domain at the given point `x`.
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
use alloc::{format, vec};
|
||||
@ -13,7 +12,7 @@ use crate::gates::packed_util::PackedEvaluableBase;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::wire::Wire;
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
@ -23,9 +22,10 @@ use crate::plonk::vars::{
|
||||
EvaluationTargets, EvaluationVars, EvaluationVarsBase, EvaluationVarsBaseBatch,
|
||||
EvaluationVarsBasePacked,
|
||||
};
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
/// A gate for raising a value to a power.
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ExponentiationGate<F: RichField + Extendable<D>, const D: usize> {
|
||||
pub num_power_bits: usize,
|
||||
pub _phantom: PhantomData<F>,
|
||||
@ -76,6 +76,15 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for Exponentiation
|
||||
format!("{self:?}<D={D}>")
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.num_power_bits)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let num_power_bits = src.read_usize()?;
|
||||
Ok(Self::new(num_power_bits))
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let base = vars.local_wires[self.wire_base()];
|
||||
|
||||
@ -164,12 +173,12 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for Exponentiation
|
||||
constraints
|
||||
}
|
||||
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
let gen = ExponentiationGenerator::<F, D> {
|
||||
row,
|
||||
gate: self.clone(),
|
||||
};
|
||||
vec![Box::new(gen.adapter())]
|
||||
vec![WitnessGeneratorRef::new(gen.adapter())]
|
||||
}
|
||||
|
||||
fn num_wires(&self) -> usize {
|
||||
@ -228,8 +237,8 @@ impl<F: RichField + Extendable<D>, const D: usize> PackedEvaluableBase<F, D>
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ExponentiationGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ExponentiationGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
row: usize,
|
||||
gate: ExponentiationGate<F, D>,
|
||||
}
|
||||
@ -237,6 +246,10 @@ struct ExponentiationGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
for ExponentiationGenerator<F, D>
|
||||
{
|
||||
fn id(&self) -> String {
|
||||
"ExponentiationGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
let local_target = |column| Target::wire(self.row, column);
|
||||
|
||||
@ -281,6 +294,17 @@ impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
let output_wire = local_wire(self.gate.wire_output());
|
||||
out_buffer.set_wire(output_wire, intermediate_values[num_power_bits - 1]);
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.row)?;
|
||||
self.gate.serialize(dst)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let row = src.read_usize()?;
|
||||
let gate = ExponentiationGate::deserialize(src)?;
|
||||
Ok(Self { row, gate })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@ -1,13 +1,14 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::string::String;
|
||||
use alloc::sync::Arc;
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
use core::any::Any;
|
||||
use core::fmt::{Debug, Error, Formatter};
|
||||
use core::hash::{Hash, Hasher};
|
||||
use core::ops::Range;
|
||||
|
||||
use hashbrown::HashMap;
|
||||
use serde::{Serialize, Serializer};
|
||||
|
||||
use crate::field::batch_util::batch_multiply_inplace;
|
||||
use crate::field::extension::{Extendable, FieldExtension};
|
||||
@ -16,16 +17,23 @@ use crate::gates::selectors::UNUSED_SELECTOR;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::WitnessGenerator;
|
||||
use crate::iop::generator::WitnessGeneratorRef;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::vars::{
|
||||
EvaluationTargets, EvaluationVars, EvaluationVarsBase, EvaluationVarsBaseBatch,
|
||||
};
|
||||
use crate::util::serialization::{Buffer, IoResult};
|
||||
|
||||
/// A custom gate.
|
||||
pub trait Gate<F: RichField + Extendable<D>, const D: usize>: 'static + Send + Sync {
|
||||
fn id(&self) -> String;
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()>;
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self>
|
||||
where
|
||||
Self: Sized;
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension>;
|
||||
|
||||
/// Like `eval_unfiltered`, but specialized for points in the base field.
|
||||
@ -162,7 +170,7 @@ pub trait Gate<F: RichField + Extendable<D>, const D: usize>: 'static + Send + S
|
||||
|
||||
/// The generators used to populate the witness.
|
||||
/// Note: This should return exactly 1 generator per operation in the gate.
|
||||
fn generators(&self, row: usize, local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>>;
|
||||
fn generators(&self, row: usize, local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>>;
|
||||
|
||||
/// The number of wires used by this gate.
|
||||
fn num_wires(&self) -> usize;
|
||||
@ -191,9 +199,20 @@ pub trait Gate<F: RichField + Extendable<D>, const D: usize>: 'static + Send + S
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapper around an `Rc<Gate>` which implements `PartialEq`, `Eq` and `Hash` based on gate IDs.
|
||||
/// A wrapper trait over a `Gate`, to allow for gate serialization.
|
||||
pub trait AnyGate<F: RichField + Extendable<D>, const D: usize>: Gate<F, D> {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
|
||||
impl<T: Gate<F, D>, F: RichField + Extendable<D>, const D: usize> AnyGate<F, D> for T {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapper around an `Arc<AnyGate>` which implements `PartialEq`, `Eq` and `Hash` based on gate IDs.
|
||||
#[derive(Clone)]
|
||||
pub struct GateRef<F: RichField + Extendable<D>, const D: usize>(pub(crate) Arc<dyn Gate<F, D>>);
|
||||
pub struct GateRef<F: RichField + Extendable<D>, const D: usize>(pub(crate) Arc<dyn AnyGate<F, D>>);
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> GateRef<F, D> {
|
||||
pub fn new<G: Gate<F, D>>(gate: G) -> GateRef<F, D> {
|
||||
@ -221,6 +240,12 @@ impl<F: RichField + Extendable<D>, const D: usize> Debug for GateRef<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> Serialize for GateRef<F, D> {
|
||||
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
|
||||
serializer.serialize_str(&self.0.id())
|
||||
}
|
||||
}
|
||||
|
||||
/// Map between gate parameters and available slots.
|
||||
/// An available slot is of the form `(row, op)`, meaning the current available slot
|
||||
/// is at gate index `row` in the `op`-th operation.
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::format;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
@ -9,12 +8,13 @@ use crate::gates::gate::Gate;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::circuit_data::CircuitConfig;
|
||||
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
/// A gate which can perform a weighted multiplication, i.e. `result = c0 x y`. If the config
|
||||
/// supports enough routed wires, it can support several such operations in one gate.
|
||||
@ -53,6 +53,15 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for MulExtensionGa
|
||||
format!("{self:?}")
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.num_ops)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let num_ops = src.read_usize()?;
|
||||
Ok(Self { num_ops })
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let const_0 = vars.local_constants[0];
|
||||
|
||||
@ -110,18 +119,17 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for MulExtensionGa
|
||||
constraints
|
||||
}
|
||||
|
||||
fn generators(&self, row: usize, local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
fn generators(&self, row: usize, local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
(0..self.num_ops)
|
||||
.map(|i| {
|
||||
let g: Box<dyn WitnessGenerator<F>> = Box::new(
|
||||
WitnessGeneratorRef::new(
|
||||
MulExtensionGenerator {
|
||||
row,
|
||||
const_0: local_constants[0],
|
||||
i,
|
||||
}
|
||||
.adapter(),
|
||||
);
|
||||
g
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
@ -143,8 +151,8 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for MulExtensionGa
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct MulExtensionGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct MulExtensionGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
row: usize,
|
||||
const_0: F,
|
||||
i: usize,
|
||||
@ -153,6 +161,10 @@ struct MulExtensionGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
for MulExtensionGenerator<F, D>
|
||||
{
|
||||
fn id(&self) -> String {
|
||||
"MulExtensionGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
MulExtensionGate::<D>::wires_ith_multiplicand_0(self.i)
|
||||
.chain(MulExtensionGate::<D>::wires_ith_multiplicand_1(self.i))
|
||||
@ -178,6 +190,19 @@ impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
|
||||
out_buffer.set_extension_target(output_target, computed_output)
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.row)?;
|
||||
dst.write_field(self.const_0)?;
|
||||
dst.write_usize(self.i)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let row = src.read_usize()?;
|
||||
let const_0 = src.read_field()?;
|
||||
let i = src.read_usize()?;
|
||||
Ok(Self { row, const_0, i })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
|
||||
@ -6,9 +5,10 @@ use crate::field::extension::Extendable;
|
||||
use crate::gates::gate::Gate;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::WitnessGenerator;
|
||||
use crate::iop::generator::WitnessGeneratorRef;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBaseBatch};
|
||||
use crate::util::serialization::{Buffer, IoResult};
|
||||
|
||||
/// A gate which does nothing.
|
||||
pub struct NoopGate;
|
||||
@ -18,6 +18,14 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for NoopGate {
|
||||
"NoopGate".into()
|
||||
}
|
||||
|
||||
fn serialize(&self, _dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize(_src: &mut Buffer) -> IoResult<Self> {
|
||||
Ok(Self)
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, _vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
Vec::new()
|
||||
}
|
||||
@ -34,7 +42,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for NoopGate {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
fn generators(&self, _row: usize, _local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
fn generators(&self, _row: usize, _local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
use alloc::{format, vec};
|
||||
@ -13,12 +12,13 @@ use crate::hash::hash_types::RichField;
|
||||
use crate::hash::poseidon;
|
||||
use crate::hash::poseidon::{Poseidon, SPONGE_WIDTH};
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::wire::Wire;
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
/// Evaluates a full Poseidon permutation with 12 state elements.
|
||||
///
|
||||
@ -99,6 +99,14 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for PoseidonGate<F
|
||||
format!("{self:?}<WIDTH={SPONGE_WIDTH}>")
|
||||
}
|
||||
|
||||
fn serialize(&self, _dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize(_src: &mut Buffer) -> IoResult<Self> {
|
||||
Ok(PoseidonGate::new())
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let mut constraints = Vec::with_capacity(self.num_constraints());
|
||||
|
||||
@ -372,12 +380,12 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for PoseidonGate<F
|
||||
constraints
|
||||
}
|
||||
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
let gen = PoseidonGenerator::<F, D> {
|
||||
row,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
vec![Box::new(gen.adapter())]
|
||||
vec![WitnessGeneratorRef::new(gen.adapter())]
|
||||
}
|
||||
|
||||
fn num_wires(&self) -> usize {
|
||||
@ -401,8 +409,8 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for PoseidonGate<F
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct PoseidonGenerator<F: RichField + Extendable<D> + Poseidon, const D: usize> {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct PoseidonGenerator<F: RichField + Extendable<D> + Poseidon, const D: usize> {
|
||||
row: usize,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
@ -410,6 +418,10 @@ struct PoseidonGenerator<F: RichField + Extendable<D> + Poseidon, const D: usize
|
||||
impl<F: RichField + Extendable<D> + Poseidon, const D: usize> SimpleGenerator<F>
|
||||
for PoseidonGenerator<F, D>
|
||||
{
|
||||
fn id(&self) -> String {
|
||||
"PoseidonGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
(0..SPONGE_WIDTH)
|
||||
.map(|i| PoseidonGate::<F, D>::wire_input(i))
|
||||
@ -499,6 +511,18 @@ impl<F: RichField + Extendable<D> + Poseidon, const D: usize> SimpleGenerator<F>
|
||||
out_buffer.set_wire(local_wire(PoseidonGate::<F, D>::wire_output(i)), state[i]);
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.row)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let row = src.read_usize()?;
|
||||
Ok(Self {
|
||||
row,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
use alloc::{format, vec};
|
||||
@ -13,11 +12,12 @@ use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::hash::poseidon::{Poseidon, SPONGE_WIDTH};
|
||||
use crate::iop::ext_target::{ExtensionAlgebraTarget, ExtensionTarget};
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
/// Poseidon MDS Gate
|
||||
#[derive(Debug, Default)]
|
||||
@ -118,6 +118,14 @@ impl<F: RichField + Extendable<D> + Poseidon, const D: usize> Gate<F, D> for Pos
|
||||
format!("{self:?}<WIDTH={SPONGE_WIDTH}>")
|
||||
}
|
||||
|
||||
fn serialize(&self, _dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize(_src: &mut Buffer) -> IoResult<Self> {
|
||||
Ok(PoseidonMdsGate::new())
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let inputs: [_; SPONGE_WIDTH] = (0..SPONGE_WIDTH)
|
||||
.map(|i| vars.get_local_ext_algebra(Self::wires_input(i)))
|
||||
@ -179,9 +187,9 @@ impl<F: RichField + Extendable<D> + Poseidon, const D: usize> Gate<F, D> for Pos
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
let gen = PoseidonMdsGenerator::<D> { row };
|
||||
vec![Box::new(gen.adapter())]
|
||||
vec![WitnessGeneratorRef::new(gen.adapter())]
|
||||
}
|
||||
|
||||
fn num_wires(&self) -> usize {
|
||||
@ -201,14 +209,18 @@ impl<F: RichField + Extendable<D> + Poseidon, const D: usize> Gate<F, D> for Pos
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct PoseidonMdsGenerator<const D: usize> {
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct PoseidonMdsGenerator<const D: usize> {
|
||||
row: usize,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D> + Poseidon, const D: usize> SimpleGenerator<F>
|
||||
for PoseidonMdsGenerator<D>
|
||||
{
|
||||
fn id(&self) -> String {
|
||||
"PoseidonMdsGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
(0..SPONGE_WIDTH)
|
||||
.flat_map(|i| {
|
||||
@ -237,6 +249,15 @@ impl<F: RichField + Extendable<D> + Poseidon, const D: usize> SimpleGenerator<F>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.row)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let row = src.read_usize()?;
|
||||
Ok(Self { row })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
use core::ops::Range;
|
||||
@ -10,12 +9,13 @@ use crate::gates::packed_util::PackedEvaluableBase;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::WitnessGenerator;
|
||||
use crate::iop::generator::WitnessGeneratorRef;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::vars::{
|
||||
EvaluationTargets, EvaluationVars, EvaluationVarsBase, EvaluationVarsBaseBatch,
|
||||
EvaluationVarsBasePacked,
|
||||
};
|
||||
use crate::util::serialization::{Buffer, IoResult};
|
||||
|
||||
/// A gate whose first four wires will be equal to a hash of public inputs.
|
||||
pub struct PublicInputGate;
|
||||
@ -31,6 +31,14 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for PublicInputGat
|
||||
"PublicInputGate".into()
|
||||
}
|
||||
|
||||
fn serialize(&self, _dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize(_src: &mut Buffer) -> IoResult<Self> {
|
||||
Ok(Self)
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
Self::wires_public_inputs_hash()
|
||||
.zip(vars.public_inputs_hash.elements)
|
||||
@ -64,7 +72,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for PublicInputGat
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn generators(&self, _row: usize, _local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
fn generators(&self, _row: usize, _local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
use alloc::{format, vec};
|
||||
@ -14,7 +13,7 @@ use crate::gates::packed_util::PackedEvaluableBase;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::wire::Wire;
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
@ -24,9 +23,10 @@ use crate::plonk::vars::{
|
||||
EvaluationTargets, EvaluationVars, EvaluationVarsBase, EvaluationVarsBaseBatch,
|
||||
EvaluationVarsBasePacked,
|
||||
};
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
/// A gate for checking that a particular element of a list matches a given value.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
#[derive(Copy, Clone, Debug, Default)]
|
||||
pub struct RandomAccessGate<F: RichField + Extendable<D>, const D: usize> {
|
||||
/// Number of bits in the index (log2 of the list size).
|
||||
pub bits: usize,
|
||||
@ -122,6 +122,20 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for RandomAccessGa
|
||||
format!("{self:?}<D={D}>")
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.bits)?;
|
||||
dst.write_usize(self.num_copies)?;
|
||||
dst.write_usize(self.num_extra_constants)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let bits = src.read_usize()?;
|
||||
let num_copies = src.read_usize()?;
|
||||
let num_extra_constants = src.read_usize()?;
|
||||
Ok(Self::new(num_copies, bits, num_extra_constants))
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let mut constraints = Vec::with_capacity(self.num_constraints());
|
||||
|
||||
@ -238,18 +252,17 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for RandomAccessGa
|
||||
constraints
|
||||
}
|
||||
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
(0..self.num_copies)
|
||||
.map(|copy| {
|
||||
let g: Box<dyn WitnessGenerator<F>> = Box::new(
|
||||
WitnessGeneratorRef::new(
|
||||
RandomAccessGenerator {
|
||||
row,
|
||||
gate: *self,
|
||||
copy,
|
||||
}
|
||||
.adapter(),
|
||||
);
|
||||
g
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
@ -325,8 +338,8 @@ impl<F: RichField + Extendable<D>, const D: usize> PackedEvaluableBase<F, D>
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct RandomAccessGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct RandomAccessGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
row: usize,
|
||||
gate: RandomAccessGate<F, D>,
|
||||
copy: usize,
|
||||
@ -335,6 +348,10 @@ struct RandomAccessGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
for RandomAccessGenerator<F, D>
|
||||
{
|
||||
fn id(&self) -> String {
|
||||
"RandomAccessGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
let local_target = |column| Target::wire(self.row, column);
|
||||
|
||||
@ -376,6 +393,19 @@ impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
set_local_wire(self.gate.wire_bit(i, copy), bit);
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.row)?;
|
||||
dst.write_usize(self.copy)?;
|
||||
self.gate.serialize(dst)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let row = src.read_usize()?;
|
||||
let copy = src.read_usize()?;
|
||||
let gate = RandomAccessGate::<F, D>::deserialize(src)?;
|
||||
Ok(Self { row, gate, copy })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
use alloc::{format, vec};
|
||||
@ -9,14 +8,15 @@ use crate::gates::gate::Gate;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
/// Computes `sum alpha^i c_i` for a vector `c_i` of `num_coeffs` elements of the base field.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ReducingGate<const D: usize> {
|
||||
pub num_coeffs: usize,
|
||||
}
|
||||
@ -60,6 +60,19 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ReducingGate<D
|
||||
format!("{self:?}")
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.num_coeffs)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let num_coeffs = src.read_usize()?;
|
||||
Ok(Self::new(num_coeffs))
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let alpha = vars.get_local_ext_algebra(Self::wires_alpha());
|
||||
let old_acc = vars.get_local_ext_algebra(Self::wires_old_acc());
|
||||
@ -137,8 +150,8 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ReducingGate<D
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
vec![Box::new(
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
vec![WitnessGeneratorRef::new(
|
||||
ReducingGenerator {
|
||||
row,
|
||||
gate: self.clone(),
|
||||
@ -164,13 +177,17 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ReducingGate<D
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ReducingGenerator<const D: usize> {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ReducingGenerator<const D: usize> {
|
||||
row: usize,
|
||||
gate: ReducingGate<D>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F> for ReducingGenerator<D> {
|
||||
fn id(&self) -> String {
|
||||
"ReducingGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
ReducingGate::<D>::wires_alpha()
|
||||
.chain(ReducingGate::<D>::wires_old_acc())
|
||||
@ -207,6 +224,17 @@ impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F> for Reduci
|
||||
}
|
||||
out_buffer.set_extension_target(output, acc);
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.row)?;
|
||||
<ReducingGate<D> as Gate<F, D>>::serialize(&self.gate, dst)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let row = src.read_usize()?;
|
||||
let gate = <ReducingGate<D> as Gate<F, D>>::deserialize(src)?;
|
||||
Ok(Self { row, gate })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
use alloc::{format, vec};
|
||||
@ -9,14 +8,15 @@ use crate::gates::gate::Gate;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGeneratorRef};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};
|
||||
use crate::util::serialization::{Buffer, IoResult, Read, Write};
|
||||
|
||||
/// Computes `sum alpha^i c_i` for a vector `c_i` of `num_coeffs` elements of the extension field.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ReducingExtensionGate<const D: usize> {
|
||||
pub num_coeffs: usize,
|
||||
}
|
||||
@ -63,6 +63,19 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ReducingExtens
|
||||
format!("{self:?}")
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.num_coeffs)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let num_coeffs = src.read_usize()?;
|
||||
Ok(Self::new(num_coeffs))
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let alpha = vars.get_local_ext_algebra(Self::wires_alpha());
|
||||
let old_acc = vars.get_local_ext_algebra(Self::wires_old_acc());
|
||||
@ -137,8 +150,8 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ReducingExtens
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
vec![Box::new(
|
||||
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<WitnessGeneratorRef<F>> {
|
||||
vec![WitnessGeneratorRef::new(
|
||||
ReducingGenerator {
|
||||
row,
|
||||
gate: self.clone(),
|
||||
@ -164,13 +177,17 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ReducingExtens
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ReducingGenerator<const D: usize> {
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ReducingGenerator<const D: usize> {
|
||||
row: usize,
|
||||
gate: ReducingExtensionGate<D>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F> for ReducingGenerator<D> {
|
||||
fn id(&self) -> String {
|
||||
"ReducingExtensionGenerator".to_string()
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
ReducingExtensionGate::<D>::wires_alpha()
|
||||
.chain(ReducingExtensionGate::<D>::wires_old_acc())
|
||||
@ -201,6 +218,17 @@ impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F> for Reduci
|
||||
acc = computed_acc;
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize(&self, dst: &mut Vec<u8>) -> IoResult<()> {
|
||||
dst.write_usize(self.row)?;
|
||||
<ReducingExtensionGate<D> as Gate<F, D>>::serialize(&self.gate, dst)
|
||||
}
|
||||
|
||||
fn deserialize(src: &mut Buffer) -> IoResult<Self> {
|
||||
let row = src.read_usize()?;
|
||||
let gate = <ReducingExtensionGate<D> as Gate<F, D>>::deserialize(src)?;
|
||||
Ok(Self { row, gate })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@ -2,6 +2,8 @@ use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
use core::ops::Range;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::field::extension::Extendable;
|
||||
use crate::field::polynomial::PolynomialValues;
|
||||
use crate::gates::gate::{GateInstance, GateRef};
|
||||
@ -10,7 +12,7 @@ use crate::hash::hash_types::RichField;
|
||||
/// Placeholder value to indicate that a gate doesn't use a selector polynomial.
|
||||
pub(crate) const UNUSED_SELECTOR: usize = u32::MAX as usize;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Serialize)]
|
||||
pub struct SelectorsInfo {
|
||||
pub(crate) selector_indices: Vec<usize>,
|
||||
pub(crate) groups: Vec<Range<usize>>,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user