Merge branch 'main' of github.com:mir-protocol/plonky2 into bls-fp2

This commit is contained in:
Dmitry Vagner 2023-04-02 22:32:42 -07:00
commit 3b607bdee8
100 changed files with 2400 additions and 923 deletions

View File

@ -8,3 +8,12 @@ opt-level = 3
[profile.bench]
opt-level = 3
[patch.crates-io]
plonky2_evm = { path = "evm" }
plonky2_field = { path = "field" }
plonky2_maybe_rayon = { path = "maybe_rayon" }
plonky2 = { path = "plonky2" }
starky = { path = "starky" }
plonky2_util = { path = "util" }

View File

@ -13,7 +13,7 @@ edition = "2021"
anyhow = "1.0.40"
blake2 = "0.10.5"
env_logger = "0.10.0"
eth_trie_utils = "0.4.1"
eth_trie_utils = "0.5.0"
ethereum-types = "0.14.0"
hex = { version = "0.4.3", optional = true }
hex-literal = "0.3.4"

View File

@ -24,6 +24,7 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/core/call.asm"),
include_str!("asm/core/create.asm"),
include_str!("asm/core/create_addresses.asm"),
include_str!("asm/core/create_contract_account.asm"),
include_str!("asm/core/gas.asm"),
include_str!("asm/core/intrinsic_gas.asm"),
include_str!("asm/core/invalid.asm"),
@ -36,6 +37,7 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/core/transfer.asm"),
include_str!("asm/core/util.asm"),
include_str!("asm/core/access_lists.asm"),
include_str!("asm/core/selfdestruct_list.asm"),
include_str!("asm/curve/bls381/util.asm"),
include_str!("asm/curve/bn254/curve_arithmetic/constants.asm"),
include_str!("asm/curve/bn254/curve_arithmetic/curve_add.asm"),
@ -111,6 +113,7 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/rlp/num_bytes.asm"),
include_str!("asm/rlp/read_to_memory.asm"),
include_str!("asm/shift.asm"),
include_str!("asm/signed.asm"),
include_str!("asm/transactions/common_decoding.asm"),
include_str!("asm/transactions/router.asm"),
include_str!("asm/transactions/type_0.asm"),

View File

@ -86,15 +86,42 @@ global extcodesize:
// Pre stack: kexit_info, address, dest_offset, offset, size
// Post stack: (empty)
global sys_extcodecopy:
// TODO: Call %update_mem_bytes to expand memory.
// TODO: Charge other gas.
%stack (kexit_info, address, dest_offset, offset, size)
-> (address, dest_offset, offset, size, kexit_info)
%u256_to_addr DUP1 %insert_accessed_addresses POP // TODO: Use return value in gas calculation.
%u256_to_addr DUP1 %insert_accessed_addresses
// stack: cold_access, address, dest_offset, offset, size, kexit_info
PUSH @GAS_COLDACCOUNTACCESS_MINUS_WARMACCESS
MUL
PUSH @GAS_WARMACCESS
ADD
// stack: Gaccess, address, dest_offset, offset, size, kexit_info
DUP5
// stack: size, Gaccess, address, dest_offset, offset, size, kexit_info
ISZERO %jumpi(sys_extcodecopy_empty)
// stack: Gaccess, address, dest_offset, offset, size, kexit_info
DUP5 %num_bytes_to_num_words %mul_const(@GAS_COPY) ADD
%stack (gas, address, dest_offset, offset, size, kexit_info) -> (gas, kexit_info, address, dest_offset, offset, size)
%charge_gas
%stack (kexit_info, address, dest_offset, offset, size) -> (dest_offset, size, kexit_info, address, dest_offset, offset, size)
ADD // TODO: check for overflow, see discussion here https://github.com/mir-protocol/plonky2/pull/930/files/a4ea0965d79561c345e2f77836c07949c7e0bc69#r1143630253
// stack: expanded_num_bytes, kexit_info, address, dest_offset, offset, size
DUP1 %ensure_reasonable_offset
%update_mem_bytes
%stack (kexit_info, address, dest_offset, offset, size) -> (address, dest_offset, offset, size, kexit_info)
%extcodecopy
// stack: kexit_info
EXIT_KERNEL
sys_extcodecopy_empty:
%stack (Gaccess, address, dest_offset, offset, size, kexit_info) -> (Gaccess, kexit_info)
%charge_gas
EXIT_KERNEL
// Pre stack: address, dest_offset, offset, size, retdest
// Post stack: (empty)
global extcodecopy:

View File

@ -169,6 +169,14 @@ global after_call_instruction:
// stack: new_ctx
%endmacro
%macro set_new_ctx_calldata_size
// stack: calldata_size, new_ctx
%stack (calldata_size, new_ctx)
-> (new_ctx, @SEGMENT_CONTEXT_METADATA, @CTX_METADATA_CALLDATA_SIZE, calldata_size, new_ctx)
MSTORE_GENERAL
// stack: new_ctx
%endmacro
%macro set_new_ctx_gas_limit
// stack: gas_limit, new_ctx
%stack (gas_limit, new_ctx)

View File

@ -1,42 +1,25 @@
// TODO: This file needs to be cleaned up.
// `create` is no longer being used for contract-creation txns,
// so it can be inlined. Also need to set metadata on new ctx.
// The CREATE syscall.
// The CREATE syscall. Address will be
// address = KEC(RLP(sender, nonce))[12:]
//
// Pre stack: kexit_info, value, code_offset, code_len
// Post stack: address
global sys_create:
// stack: kexit_info, value, code_offset, code_len
// TODO: Charge gas.
%stack (kexit_info, value, code_offset, code_len)
-> (value, 0, @SEGMENT_MAIN_MEMORY, code_offset, code_len)
-> (sys_create_got_address, value, code_offset, code_len, kexit_info)
%address
// stack: sender, value, CODE_ADDR: 3, code_len, sys_create_finish, kexit_info
%jump(create)
sys_create_finish:
// stack: address, kexit_info
SWAP1
EXIT_KERNEL
// Create a new contract account with the traditional address scheme, i.e.
// address = KEC(RLP(sender, nonce))[12:]
// This can be used both for the CREATE instruction and for contract-creation
// transactions.
//
// Pre stack: sender, endowment, CODE_ADDR: 3, code_len, retdest
// Post stack: address
// Note: CODE_ADDR refers to a (context, segment, offset) tuple.
global create:
// stack: sender, endowment, CODE_ADDR, code_len, retdest
// stack: sender, sys_create_got_address, value, code_offset, code_len, kexit_info
DUP1 %nonce
// stack: nonce, sender, endowment, CODE_ADDR, code_len, retdest
// Call get_create_address and have it return to create_inner.
%stack (nonce, sender)
-> (sender, nonce, create_inner, sender)
// stack: nonce, sender, sys_create_got_address, value, code_offset, code_len, kexit_info
SWAP1
// stack: sender, nonce, sys_create_got_address, value, code_offset, code_len, kexit_info
%jump(get_create_address)
sys_create_got_address:
// stack: address, value, code_offset, code_len, kexit_info
%jump(create_common)
// CREATE2; see EIP-1014. Address will be
// The CREATE2 syscall; see EIP-1014. Address will be
// address = KEC(0xff || sender || salt || code_hash)[12:]
//
// Pre stack: kexit_info, value, code_offset, code_len, salt
@ -45,75 +28,100 @@ global sys_create2:
// stack: kexit_info, value, code_offset, code_len, salt
// TODO: Charge gas.
SWAP4
%stack (salt) -> (salt, sys_create2_got_address)
// stack: salt, sys_create2_got_address, value, code_offset, code_len, kexit_info
%stack (salt) -> (salt, create_common)
// stack: salt, create_common, value, code_offset, code_len, kexit_info
// Hash the code.
DUP5 // code_len
DUP5 // code_offset
PUSH @SEGMENT_MAIN_MEMORY
GET_CONTEXT
KECCAK_GENERAL
// stack: hash, salt, sys_create2_got_address, value, code_offset, code_len, kexit_info
// stack: hash, salt, create_common, value, code_offset, code_len, kexit_info
%address
// stack: sender, hash, salt, sys_create2_got_address, value, code_offset, code_len, kexit_info
// stack: sender, hash, salt, create_common, value, code_offset, code_len, kexit_info
%jump(get_create2_address)
sys_create2_got_address:
// stack: address, value, code_offset, code_len, kexit_info
%address
%stack (sender, address, value, code_offset, code_len, kexit_info)
-> (address, sender, value, 0, @SEGMENT_MAIN_MEMORY, code_offset, code_len,
sys_create2_finish, kexit_info)
%jump(create_inner)
sys_create2_finish:
// stack: address, kexit_info
SWAP1
EXIT_KERNEL
// Pre stack: address, sender, endowment, CODE_ADDR, code_len, retdest
// Pre stack: address, value, code_offset, code_len, kexit_info
// Post stack: address
// Note: CODE_ADDR refers to a (context, segment, offset) tuple.
global create_inner:
// stack: address, sender, endowment, CODE_ADDR, code_len, retdest
global create_common:
// stack: address, value, code_offset, code_len, kexit_info
DUP1 %insert_accessed_addresses_no_return
%stack (address, sender, endowment)
-> (sender, address, endowment, sender, address)
%transfer_eth
// stack: transfer_eth_status, sender, address, CODE_ADDR, code_len, retdest
%jumpi(fault_exception)
// stack: sender, address, CODE_ADDR, code_len, retdest
// Increment the sender's nonce.
%address
%increment_nonce
// stack: address, CODE_ADDR, code_len, retdest
// stack: address, value, code_offset, code_len, kexit_info
// Deduct value from the caller.
DUP2
%address
// stack: sender, value, address, value, code_offset, code_len, kexit_info
%deduct_eth
// stack: deduct_eth_status, address, value, code_offset, code_len, kexit_info
%jumpi(fault_exception)
// stack: address, value, code_offset, code_len, kexit_info
// Create the new contract account in the state trie.
DUP1 DUP3
// stack: value, address, address, value, code_offset, code_len, kexit_info
%create_contract_account
// stack: status, address, value, code_offset, code_len, kexit_info
%jumpi(fault_exception)
// stack: address, value, code_offset, code_len, kexit_info
%create_context
// stack: new_ctx, address, CODE_ADDR, code_len, retdest
%stack (new_ctx, address, src_ctx, src_segment, src_offset, code_len)
-> (new_ctx, @SEGMENT_CODE, 0,
src_ctx, src_segment, src_offset,
code_len, run_constructor,
new_ctx, address)
// stack: new_ctx, address, value, code_offset, code_len, kexit_info
GET_CONTEXT
// stack: src_ctx, new_ctx, address, value, code_offset, code_len, kexit_info
// Copy the code from txdata to the new context's code segment.
%stack (src_ctx, new_ctx, address, value, code_offset, code_len)
-> (new_ctx, @SEGMENT_CODE, 0, // DST
src_ctx, @SEGMENT_MAIN_MEMORY, code_offset, // SRC
code_len,
run_constructor,
new_ctx, value, address)
%jump(memcpy)
run_constructor:
// stack: new_ctx, address, retdest
// At this point, the initialization code has been loaded.
// Save our return address in memory, so we'll be in `after_constructor`
// after the new context returns.
// Note: We can't use %mstore_context_metadata because we're writing to
// memory owned by the new context, not the current one.
%stack (new_ctx) -> (new_ctx, @SEGMENT_CONTEXT_METADATA,
@CTX_METADATA_PARENT_PC, after_constructor, new_ctx)
MSTORE_GENERAL
// stack: new_ctx, address, retdest
// stack: new_ctx, value, address, kexit_info
%set_new_ctx_value
// stack: new_ctx, address, kexit_info
// Now, switch to the new context and go to usermode with PC=0.
SET_CONTEXT
// stack: (empty, since we're in the new context)
PUSH 0
EXIT_KERNEL
// Each line in the block below does not change the stack.
DUP2 %set_new_ctx_addr
%address %set_new_ctx_caller
%set_new_ctx_parent_ctx
%set_new_ctx_parent_pc(after_constructor)
// stack: new_ctx, address, kexit_info
// All but 1/64 of the sender's remaining gas goes to the constructor.
SWAP2
// stack: kexit_info, address, new_ctx
%drain_all_but_one_64th_gas
%stack (kexit_info, drained_gas, address, new_ctx) -> (drained_gas, new_ctx, address, kexit_info)
%set_new_ctx_gas_limit
// stack: new_ctx, address, kexit_info
%enter_new_ctx
// (Old context) stack: new_ctx, address, kexit_info
after_constructor:
// stack: address, retdest
// TODO: If code was returned, store it in the account.
SWAP1
JUMP
// stack: success, leftover_gas, new_ctx, address, kexit_info
SWAP2
// stack: new_ctx, leftover_gas, success, address, kexit_info
POP // TODO: Ignoring new_ctx for now, but we will need it to store code that was returned, if any.
// stack: leftover_gas, success, address, kexit_info
%shl_const(192)
// stack: leftover_gas << 192, success, address, kexit_info
SWAP2
// stack: address, success, leftover_gas << 192, kexit_info
MUL
// stack: address_if_success, leftover_gas << 192, kexit_info
SWAP2
// stack: kexit_info, leftover_gas << 192, address_if_success
ADD
// stack: kexit_info, address_if_success
EXIT_KERNEL

View File

@ -19,7 +19,8 @@ global get_create_address:
PUSH 0 // context
// stack: RLP_ADDR: 3, rlp_len, retdest
KECCAK_GENERAL
%mod_const(0x10000000000000000000000000000000000000000) // 2^160
// stack: hash, retdest
%u256_to_addr
// stack: address, retdest
%observe_new_address
SWAP1
@ -54,8 +55,9 @@ get_create2_address_finish:
POP
%stack (retdest) -> (0, @SEGMENT_KERNEL_GENERAL, 0, 85, retdest) // context, segment, offset, len
KECCAK_GENERAL
// stack: hash, retdest
%u256_to_addr
// stack: address, retdest
%mod_const(0x10000000000000000000000000000000000000000) // 2^160
%observe_new_address
SWAP1
JUMP

View File

@ -0,0 +1,51 @@
// Create a smart contract account with the given address and the given endowment value.
// Pre stack: value, address
// Post stack: status
%macro create_contract_account
// stack: value, address
DUP2 %mpt_read_state_trie
// stack: existing_account_ptr, value, address
// If the account doesn't exist, there's no need to check its balance or nonce,
// so we can skip ahead, setting existing_balance = existing_account_ptr = 0.
DUP1 ISZERO %jumpi(%%do_insert)
// stack: existing_account_ptr, value, address
DUP1 %mload_trie_data // nonce = account[0]
// stack: nonce, existing_account_ptr, value, address
%jumpi(%%error_nonzero_nonce)
// stack: existing_account_ptr, value, address
%increment %mload_trie_data // balance = account[1]
%%do_insert:
// stack: existing_balance, value, address
ADD
// stack: new_acct_value, address
// Write the new account's data to MPT data, and get a pointer to it.
%get_trie_data_size
// stack: account_ptr, new_acct_value, address
PUSH 1 %append_to_trie_data // nonce = 1
// stack: account_ptr, new_acct_value, address
SWAP1 %append_to_trie_data // balance = new_acct_value
// stack: account_ptr, address
PUSH 0 %append_to_trie_data // storage_root = nil
// stack: account_ptr, address
PUSH @EMPTY_STRING_HASH %append_to_trie_data // code_hash = keccak('')
// stack: account_ptr, address
SWAP1
// stack: address, account_ptr
%addr_to_state_key
// stack: state_key, account_ptr
%mpt_insert_state_trie
// stack: (empty)
PUSH 0 // success
%jump(%%end)
// If the nonce is nonzero, that means a contract has already been deployed to this address.
// (This should be impossible with contract creation transactions or CREATE, but possible with CREATE2.)
// So we return 1 to indicate an error.
%%error_nonzero_nonce:
%stack (existing_account_ptr, address, value) -> (1)
%%end:
// stack: status
%endmacro

View File

@ -56,3 +56,45 @@ global sys_gasprice:
// stack: gas_price, kexit_info
SWAP1
EXIT_KERNEL
// Checks how much gas is remaining in this context, given the current kexit_info.
%macro leftover_gas
// stack: kexit_info
%shr_const(192)
// stack: gas_used
%mload_context_metadata(@CTX_METADATA_GAS_LIMIT)
// stack: gas_limit, gas_used
SWAP1
// stack: gas_used, gas_limit
DUP2 DUP2 LT
// stack: gas_used < gas_limit, gas_used, gas_limit
SWAP2
// stack: gas_limit, gas_used, gas_used < gas_limit
SUB
// stack: gas_limit - gas_used, gas_used < gas_limit
MUL
// stack: leftover_gas = (gas_limit - gas_used) * (gas_used < gas_limit)
%endmacro
// Given the current kexit_info, drains all but one 64th of its remaining gas.
// Returns how much gas was drained.
%macro drain_all_but_one_64th_gas
// stack: kexit_info
DUP1 %leftover_gas
// stack: leftover_gas, kexit_info
%all_but_one_64th
// stack: all_but_one_64th, kexit_info
%stack (all_but_one_64th, kexit_info) -> (all_but_one_64th, kexit_info, all_but_one_64th)
%charge_gas
// stack: kexit_info, drained_gas
%endmacro
// This is L(n), the "all but one 64th" function in the yellowpaper, i.e.
// L(n) = n - floor(n / 64)
%macro all_but_one_64th
// stack: n
DUP1 %div_const(64)
// stack: floor(n / 64), n
SWAP1 SUB
// stack: n - floor(n / 64)
%endmacro

View File

@ -20,11 +20,44 @@ global process_normalized_txn:
%mload_txn_field(@TXN_FIELD_GAS_LIMIT)
%assert_ge
// TODO: Check that txn nonce matches account nonce.
// TODO: Assert nonce is correct.
// TODO: Assert sender has no code.
// TODO: Assert sender balance >= gas_limit * gas_price + value.
// TODO: Assert chain ID matches block metadata?
%mload_txn_field(@TXN_FIELD_ORIGIN)
// stack: sender, retdest
// Check that txn nonce matches account nonce.
DUP1 %nonce
// stack: sender_nonce, sender, retdest
%mload_txn_field(@TXN_FIELD_NONCE)
// stack: tx_nonce, sender_nonce, sender, retdest
%assert_eq
// stack: sender, retdest
// Assert sender has no code.
DUP1 %ext_code_empty %assert_nonzero
// stack: sender, retdest
// Assert sender balance >= gas_limit * gas_price + value.
%balance
// stack: sender_balance, retdest
%mload_txn_field(@TXN_FIELD_COMPUTED_FEE_PER_GAS)
%mload_txn_field(@TXN_FIELD_GAS_LIMIT)
MUL
%mload_txn_field(@TXN_FIELD_VALUE)
ADD
%assert_le
// stack: retdest
// Assert chain ID matches block metadata
%mload_txn_field(@TXN_FIELD_CHAIN_ID_PRESENT)
// stack: chain_id_present, retdest
DUP1
%mload_txn_field(@TXN_FIELD_CHAIN_ID)
// stack: tx_chain_id, chain_id_present, chain_id_present, retdest
MUL SWAP1
// stack: chain_id_present, filtered_tx_chain_id, retdest
%mload_global_metadata(@GLOBAL_METADATA_BLOCK_CHAIN_ID)
MUL
// stack: filtered_block_chain_id, filtered_tx_chain_id, retdest
%assert_eq
// stack: retdest
global buy_gas:
@ -68,22 +101,15 @@ global process_contract_creation_txn:
%jumpi(panic)
// stack: address, retdest
// Write the new account's data to MPT data, and get a pointer to it.
%get_trie_data_size
// stack: account_ptr, address, retdest
PUSH 1 %append_to_trie_data // nonce = 1
// stack: account_ptr, address, retdest
DUP2 %balance %mload_txn_field(@TXN_FIELD_VALUE) ADD %append_to_trie_data // balance = old_balance + txn_value
// stack: account_ptr, address, retdest
PUSH 0 %append_to_trie_data // storage_root = nil
// stack: account_ptr, address, retdest
PUSH @EMPTY_STRING_HASH %append_to_trie_data // code_hash = keccak('')
// stack: account_ptr, address, retdest
DUP2
// stack: address, account_ptr, address, retdest
%addr_to_state_key
// stack: state_key, account_ptr, address, retdest
%mpt_insert_state_trie
// Create the new contract account in the state trie.
DUP1
%mload_txn_field(@TXN_FIELD_VALUE)
// stack: value, address, address, retdest
%create_contract_account
// stack: status, address, retdest
// It should be impossible to create address collisions with a contract creation txn,
// since the address was derived from nonce, unlike with CREATE2.
%jumpi(panic)
// stack: address, retdest
%create_context
@ -120,6 +146,7 @@ global process_contract_creation_txn_after_constructor:
POP // TODO: Success will go into the receipt when we support that.
// stack: leftover_gas, new_ctx, address, retdest
%pay_coinbase_and_refund_sender
// TODO: Delete accounts in self-destruct list and empty touched addresses.
// stack: new_ctx, address, retdest
POP
POP
@ -193,8 +220,14 @@ global process_message_txn_code_loaded:
%non_intrinisic_gas %set_new_ctx_gas_limit
// stack: new_ctx, retdest
// TODO: Copy TXN_DATA to CALLDATA
// Set calldatasize and copy txn data to calldata.
%mload_txn_field(@TXN_FIELD_DATA_LEN)
%stack (calldata_size, new_ctx, retdest) -> (calldata_size, new_ctx, calldata_size, retdest)
%set_new_ctx_calldata_size
%stack (new_ctx, calldata_size, retdest) -> (new_ctx, @SEGMENT_CALLDATA, 0, 0, @SEGMENT_TXN_DATA, 0, calldata_size, process_message_txn_code_loaded_finish, new_ctx, retdest)
%jump(memcpy)
process_message_txn_code_loaded_finish:
%enter_new_ctx
// (Old context) stack: new_ctx, retdest
@ -203,6 +236,7 @@ global process_message_txn_after_call:
POP // TODO: Success will go into the receipt when we support that.
// stack: leftover_gas, new_ctx, retdest
%pay_coinbase_and_refund_sender
// TODO: Delete accounts in self-destruct list and empty touched addresses.
// stack: new_ctx, retdest
POP
JUMP

View File

@ -0,0 +1,12 @@
/// Self-destruct list.
/// Implemented as an append-only array, with the length stored in the global metadata.
%macro insert_selfdestruct_list
// stack: addr
%mload_global_metadata(@GLOBAL_METADATA_SELFDESTRUCT_LIST_LEN)
%stack (len, addr) -> (len, addr, len)
%mstore_kernel(@SEGMENT_SELFDESTRUCT_LIST) // Store new address at the end of the array.
// stack: len
%increment
%mstore_global_metadata(@GLOBAL_METADATA_SELFDESTRUCT_LIST_LEN) // Store new length.
%endmacro

View File

@ -1,18 +1,6 @@
// Labels for unimplemented syscalls to make the kernel assemble.
// Each label should be removed from this file once it is implemented.
global sys_sdiv:
PANIC
global sys_smod:
PANIC
global sys_signextend:
PANIC
global sys_slt:
PANIC
global sys_sgt:
PANIC
global sys_sar:
PANIC
global sys_blockhash:
PANIC
global sys_prevrandao:

View File

@ -10,7 +10,10 @@ global sys_stop:
%jump(terminate_common)
global sys_return:
// stack: kexit_info
// stack: kexit_info, offset, size
// TODO: For now we're ignoring the returned data. Need to return it to the parent context.
%stack (kexit_info, offset, size) -> (kexit_info)
%leftover_gas
// stack: leftover_gas
// TODO: Set parent context's CTX_METADATA_RETURNDATA_SIZE.
@ -19,22 +22,68 @@ global sys_return:
%jump(terminate_common)
global sys_selfdestruct:
// stack: kexit_info, address
// stack: kexit_info, recipient
SWAP1 %u256_to_addr
DUP1 %insert_accessed_addresses_no_return // TODO: Use return value in gas calculation.
// stack: address, kexit_info
SWAP1
// TODO: Charge gas.
// TODO: Add address to the access list.
%consume_gas_const(@GAS_SELFDESTRUCT)
%address DUP1 %balance
// Insert recipient into the accessed addresses list.
// stack: balance, address, recipient, kexit_info
DUP3 %insert_accessed_addresses
// Compute gas.
// stack: cold_access, balance, address, recipient, kexit_info
%mul_const(@GAS_COLDACCOUNTACCESS)
DUP2
// stack: balance, gas_coldaccess, balance, address, recipient, kexit_info
ISZERO %not_bit
// stack: balance!=0, gas_coldaccess, balance, address, recipient, kexit_info
DUP5 %is_dead MUL %mul_const(@GAS_NEWACCOUNT)
// stack: gas_newaccount, gas_coldaccess, balance, address, recipient, kexit_info
ADD %add_const(@GAS_SELFDESTRUCT)
%stack (gas, balance, address, recipient, kexit_info) -> (gas, kexit_info, balance, address, recipient)
%charge_gas
%stack (kexit_info, balance, address, recipient) -> (balance, address, recipient, kexit_info)
// Insert address into the selfdestruct set.
// stack: balance, address, recipient, kexit_info
DUP2 %insert_selfdestruct_list
// Set the balance of the address to 0.
// stack: balance, address, recipient, kexit_info
PUSH 0
// stack: 0, balance, address, recipient, kexit_info
DUP3 %mpt_read_state_trie
// stack: account_ptr, 0, balance, address, recipient, kexit_info
%add_const(1)
// stack: balance_ptr, 0, balance, address, recipient, kexit_info
%mstore_trie_data // TODO: This should be a copy-on-write operation.
// If the recipient is the same as the address, then we're done.
// Otherwise, send the balance to the recipient.
%stack (balance, address, recipient, kexit_info) -> (address, recipient, recipient, balance, kexit_info)
EQ %jumpi(sys_selfdestruct_same_addr)
// stack: recipient, balance, kexit_info
%add_eth
// stack: kexit_info
%leftover_gas
// stack: leftover_gas
PUSH 1 // success
%jump(terminate_common)
sys_selfdestruct_same_addr:
// stack: recipient, balance, kexit_info
%pop2
%leftover_gas
// stack: leftover_gas
// TODO: Destroy account.
PUSH 1 // success
%jump(terminate_common)
global sys_revert:
// stack: kexit_info
// stack: kexit_info, offset, size
// TODO: For now we're ignoring the returned data. Need to return it to the parent context.
%stack (kexit_info, offset, size) -> (kexit_info)
%leftover_gas
// stack: leftover_gas
// TODO: Revert state changes.
@ -96,21 +145,3 @@ global terminate_common:
// stack: parent_pc, success, leftover_gas
JUMP
%macro leftover_gas
// stack: kexit_info
%shr_const(192)
// stack: gas_used
%mload_context_metadata(@CTX_METADATA_GAS_LIMIT)
// stack: gas_limit, gas_used
SWAP1
// stack: gas_used, gas_limit
DUP2 DUP2 LT
// stack: gas_used < gas_limit, gas_used, gas_limit
SWAP2
// stack: gas_limit, gas_used, gas_used < gas_limit
SUB
// stack: gas_limit - gas_used, gas_used < gas_limit
MUL
// stack: leftover_gas = (gas_limit - gas_used) * (gas_used < gas_limit)
%endmacro

View File

@ -81,9 +81,10 @@ global add_eth:
// stack: retdest
JUMP
global add_eth_new_account:
// TODO: Skip creation if amount == 0?
// stack: null_account_ptr, addr, amount, retdest
POP
// stack: addr, amount, retdest
DUP2 ISZERO %jumpi(add_eth_new_account_zero)
%get_trie_data_size // pointer to new account we're about to create
// stack: new_account_ptr, addr, amount, retdest
SWAP2
@ -98,6 +99,10 @@ global add_eth_new_account:
// stack: key, new_account_ptr, retdest
%jump(mpt_insert_state_trie)
add_eth_new_account_zero:
// stack: addr, amount, retdest
%pop2 JUMP
// Convenience macro to call add_eth and return where we left off.
%macro add_eth
%stack (addr, amount) -> (addr, amount, %%after)

View File

@ -30,3 +30,43 @@
// If there is no "to" field, then this is a contract creation.
// stack: to == 0
%endmacro
// Returns 1 if the account is non-existent, 0 otherwise.
%macro is_non_existent
// stack: addr
%mpt_read_state_trie
ISZERO
%endmacro
// Returns 1 if the account is empty, 0 otherwise.
%macro is_empty
// stack: addr
%mpt_read_state_trie
// stack: account_ptr
DUP1 ISZERO %jumpi(%%false)
// stack: account_ptr
DUP1 %mload_trie_data
// stack: nonce, account_ptr
ISZERO %not_bit %jumpi(%%false)
%increment DUP1 %mload_trie_data
// stack: balance, balance_ptr
ISZERO %not_bit %jumpi(%%false)
%add_const(2) %mload_trie_data
// stack: code_hash
PUSH @EMPTY_STRING_HASH
EQ
%jump(%%after)
%%false:
// stack: account_ptr
POP
PUSH 0
%%after:
%endmacro
// Returns 1 if the account is dead (i.e., empty or non-existent), 0 otherwise.
%macro is_dead
// stack: addr
DUP1 %is_non_existent
SWAP1 %is_empty
ADD // OR
%endmacro

View File

@ -73,4 +73,30 @@ recursion_return:
jump
global sys_exp:
PANIC // TODO: Implement.
// stack: x, e, return_info
push 0
// stack: shift, x, e, return_info
%jump(sys_exp_gas_loop_enter)
sys_exp_gas_loop:
%add_const(8)
sys_exp_gas_loop_enter:
dup3
dup2
shr
// stack: e >> shift, shift, x, e, return_info
%jumpi(sys_exp_gas_loop)
// stack: shift_bits, x, e, return_info
%div_const(8)
// stack: byte_size_of_e := shift_bits / 8, x, e, return_info
%mul_const(@GAS_EXPBYTE)
%add_const(@GAS_EXP)
// stack: gas_cost := 10 + 50 * byte_size_of_e, x, e, return_info
%stack(gas_cost, x, e, return_info) -> (gas_cost, return_info, x, e)
%charge_gas
%stack(return_info, x, e) -> (x, e, sys_exp_return, return_info)
jump exp
sys_exp_return:
// stack: pow(x, e), return_info
swap1
exit_kernel

View File

@ -122,7 +122,12 @@ sys_calldataload_after_mload_packing:
// Macro for {CALLDATA,CODE,RETURNDATA}COPY (W_copy in Yellow Paper).
%macro wcopy(segment)
// stack: kexit_info, dest_offset, offset, size
DUP4 %num_bytes_to_num_words %mul_const(@GAS_COPY) %add_const(@GAS_VERYLOW) %charge_gas
PUSH @GAS_VERYLOW
DUP5
// stack: size, Gverylow, kexit_info, dest_offset, offset, size
ISZERO %jumpi(%%wcopy_empty)
// stack: Gverylow, kexit_info, dest_offset, offset, size
DUP5 %num_bytes_to_num_words %mul_const(@GAS_COPY) ADD %charge_gas
%stack (kexit_info, dest_offset, offset, size) -> (dest_offset, size, kexit_info, dest_offset, offset, size)
ADD // TODO: check for overflow, see discussion here https://github.com/mir-protocol/plonky2/pull/930/files/a4ea0965d79561c345e2f77836c07949c7e0bc69#r1143630253
@ -137,6 +142,11 @@ sys_calldataload_after_mload_packing:
%%after:
// stack: kexit_info
EXIT_KERNEL
%%wcopy_empty:
// stack: Gverylow, kexit_info, dest_offset, offset, size
%charge_gas
%stack (kexit_info, dest_offset, offset, size) -> (kexit_info)
EXIT_KERNEL
%endmacro
global sys_calldatacopy:

View File

@ -3,6 +3,7 @@
// Mutate the state trie, inserting the given key-value pair.
// Pre stack: key, value_ptr, retdest
// Post stack: (empty)
// TODO: Have this take an address and do %mpt_insert_state_trie? To match mpt_read_state_trie.
global mpt_insert_state_trie:
// stack: key, value_ptr, retdest
%stack (key, value_ptr)

View File

@ -0,0 +1,216 @@
// SDIV(a, b): signed division operation.
//
// If b = 0, then SDIV(a, b) = 0,
// else if a = -2^255 and b = -1, then SDIV(a, b) = -2^255
// else SDIV(a, b) = sgn(a/b) * floor(|a/b|).
global _sys_sdiv:
// stack: num, denom, return_info
DUP1
PUSH 0x8000000000000000000000000000000000000000000000000000000000000000
GT
// stack: num_is_nonneg := sign_bit > num, num, denom, return_info
DUP1
%jumpi(sys_sdiv_nonneg_num)
// stack: num_is_nonneg, num, denom, return_info
SWAP1
PUSH 0
SUB
SWAP1
// stack: num_is_nonneg, num := -num, denom, return_info
sys_sdiv_nonneg_num:
SWAP2
DUP1
PUSH 0x8000000000000000000000000000000000000000000000000000000000000000
GT
// stack: denom_is_nonneg := sign_bit > denom, denom, num, num_is_nonneg, return_info
DUP1
%jumpi(sys_sdiv_nonneg_denom)
// stack: denom_is_nonneg, denom, num, num_is_nonneg, return_info
SWAP1
PUSH 0
SUB
// stack: denom := -denom, denom_is_nonneg, num, num_is_nonneg, return_info
SWAP1
sys_sdiv_nonneg_denom:
// stack: denom_is_nonneg, denom, num, num_is_nonneg, return_info
SWAP2
DIV
// stack: num / denom, denom_is_nonneg, num_is_nonneg, return_info
SWAP2
EQ
// stack: denom_is_nonneg == num_is_nonneg, num / denom, return_info
%jumpi(sys_sdiv_same_sign)
PUSH 0
SUB
sys_sdiv_same_sign:
SWAP1
JUMP
// SMOD(a, b): signed "modulo remainder" operation.
//
// If b != 0, then SMOD(a, b) = sgn(a) * MOD(|a|, |b|),
// else SMOD(a, 0) = 0.
global _sys_smod:
// stack: x, mod, return_info
PUSH 0x8000000000000000000000000000000000000000000000000000000000000000
// stack: sign_bit, x, mod, return_info
DUP1
DUP4
LT
// stack: mod < sign_bit, sign_bit, x, mod, return_info
%jumpi(sys_smod_pos_mod)
// mod is negative, so we negate it
// sign_bit, x, mod, return_info
SWAP2
PUSH 0
SUB
SWAP2
// sign_bit, x, mod := 0 - mod, return_info
sys_smod_pos_mod:
// At this point, we know that mod is non-negative.
DUP2
LT
// stack: x < sign_bit, x, mod, return_info
%jumpi(sys_smod_pos_x)
// x is negative, so let's negate it
// stack: x, mod, return_info
PUSH 0
SUB
// stack: x := 0 - x, mod, return_info
MOD
// negate the result
PUSH 0
SUB
SWAP1
JUMP
sys_smod_pos_x:
// Both x and mod are non-negative
// stack: x, mod, return_info
MOD
SWAP1
JUMP
// SIGNEXTEND from the Nth byte of value, where the bytes of value are
// considered in LITTLE-endian order. Just a SHL followed by a SAR.
global _sys_signextend:
// Stack: N, value, return_info
// Handle N >= 31, which is a no-op.
PUSH 31
%min
// Stack: min(31, N), value, return_info
%increment
%mul_const(8)
// Stack: 8*(N + 1), value, return_info
PUSH 256
SUB
// Stack: 256 - 8*(N + 1), value, return_info
%stack(bits, value, return_info) -> (bits, value, bits, return_info)
SHL
SWAP1
// Stack: bits, value << bits, return_info
// fall through to sys_sar
// SAR, i.e. shift arithmetic right, shifts `value` `shift` bits to
// the right, preserving sign by filling with the most significant bit.
//
// Trick: x >>s i = (x + sign_bit >>u i) - (sign_bit >>u i),
// where >>s is arithmetic shift and >>u is logical shift.
// Reference: Hacker's Delight, 2013, 2nd edition, §2-7.
global _sys_sar:
// SAR(shift, value) is the same for all shift >= 255, so we
// replace shift with min(shift, 255)
// Stack: shift, value, return_info
PUSH 255
%min
// Stack: min(shift, 255), value, return_info
// Now assume shift < 256.
// Stack: shift, value, return_info
PUSH 0x8000000000000000000000000000000000000000000000000000000000000000
DUP2
SHR
// Stack: 2^255 >> shift, shift, value, return_info
SWAP2
%add_const(0x8000000000000000000000000000000000000000000000000000000000000000)
// Stack: 2^255 + value, shift, 2^255 >> shift, return_info
SWAP1
SHR
SUB
// Stack: ((2^255 + value) >> shift) - (2^255 >> shift), return_info
SWAP1
JUMP
// SGT, i.e. signed greater than, returns 1 if lhs > rhs as signed
// integers, 0 otherwise.
//
// Just swap argument order and fall through to signed less than.
global _sys_sgt:
SWAP1
// SLT, i.e. signed less than, returns 1 if lhs < rhs as signed
// integers, 0 otherwise.
//
// Trick: x <s y iff (x ^ sign_bit) <u (y ^ sign bit),
// where <s is signed comparison and <u is unsigned comparison.
// Reference: Hacker's Delight, 2013, 2nd edition, §2-12.
global _sys_slt:
// Stack: lhs, rhs, return_info
%add_const(0x8000000000000000000000000000000000000000000000000000000000000000)
// Stack: 2^255 + lhs, rhs, return_info
SWAP1
%add_const(0x8000000000000000000000000000000000000000000000000000000000000000)
// Stack: 2^255 + rhs, 2^255 + lhs, return_info
GT
// Stack: 2^255 + lhs < 2^255 + rhs, return_info
SWAP1
JUMP
/// These are the global entry-points for the signed system
/// calls. They just delegate to a subroutine with the same name
/// preceded by an underscore.
///
/// NB: The only reason to structure things this way is so that the
/// test suite can call the _sys_opcode versions, since the test_suite
/// uses our interpreter which doesn't handle `EXIT_KERNEL` in a way
/// that allows for easy testing. The cost is two extra JUMPs per call.
global sys_sdiv:
%charge_gas_const(@GAS_LOW)
%stack(x, y, kernel_return) -> (_sys_sdiv, x, y, _syscall_return, kernel_return)
JUMP
global sys_smod:
%charge_gas_const(@GAS_LOW)
%stack(x, y, kernel_return) -> (_sys_smod, x, y, _syscall_return, kernel_return)
JUMP
global sys_signextend:
%charge_gas_const(@GAS_LOW)
%stack(x, y, kernel_return) -> (_sys_signextend, x, y, _syscall_return, kernel_return)
JUMP
global sys_sar:
%charge_gas_const(@GAS_VERYLOW)
%stack(x, y, kernel_return) -> (_sys_sar, x, y, _syscall_return, kernel_return)
JUMP
global sys_slt:
%charge_gas_const(@GAS_VERYLOW)
%stack(x, y, kernel_return) -> (_sys_slt, x, y, _syscall_return, kernel_return)
JUMP
global sys_sgt:
%charge_gas_const(@GAS_VERYLOW)
%stack(x, y, kernel_return) -> (_sys_sgt, x, y, _syscall_return, kernel_return)
JUMP
_syscall_return:
SWAP1
EXIT_KERNEL

View File

@ -160,11 +160,6 @@
// stack: input >= c, ...
%endmacro
%macro consume_gas_const(c)
PUSH $c
CONSUME_GAS
%endmacro
// If pred is zero, yields z; otherwise, yields nz
%macro select
// stack: pred, nz, z
@ -350,3 +345,11 @@
// stack: x
%mod_const(0x10000000000000000000000000000000000000000) // 2^160
%endmacro
%macro not_bit
// stack: b
PUSH 1
// stack: 1, b
SUB
// stack: 1 - b
%endmacro

View File

@ -1,6 +1,20 @@
global sys_keccak256:
// stack: kexit_info, offset, len
// TODO: Charge gas.
PUSH @GAS_KECCAK256
DUP4
// stack: len, static_gas, kexit_info, offset, len
ISZERO %jumpi(sys_keccak256_empty)
// stack: static_gas, kexit_info, offset, len
DUP4 %num_bytes_to_num_words %mul_const(@GAS_KECCAK256WORD)
ADD
%charge_gas
// stack: kexit_info, offset, len
%stack (kexit_info, offset, len) -> (offset, len, kexit_info, offset, len)
ADD // TODO: need to check for overflow?
DUP1 %ensure_reasonable_offset
%update_mem_bytes
%stack (kexit_info, offset, len) -> (offset, len, kexit_info)
PUSH @SEGMENT_MAIN_MEMORY
GET_CONTEXT
@ -10,6 +24,12 @@ global sys_keccak256:
SWAP1
EXIT_KERNEL
sys_keccak256_empty:
// stack: static_gas, kexit_info, offset, len
%charge_gas
%stack (kexit_info, offset, len) -> (kexit_info, @EMPTY_STRING_HASH)
EXIT_KERNEL
// Computes Keccak256(input_word). Clobbers @SEGMENT_KERNEL_GENERAL.
//
// Pre stack: input_word

View File

@ -49,10 +49,12 @@ pub(crate) enum GlobalMetadata {
AccessedAddressesLen = 23,
/// Length of the storage keys access list.
AccessedStorageKeysLen = 24,
/// Length of the self-destruct list.
SelfDestructListLen = 25,
}
impl GlobalMetadata {
pub(crate) const COUNT: usize = 24;
pub(crate) const COUNT: usize = 25;
pub(crate) fn all() -> [Self; Self::COUNT] {
[
@ -80,6 +82,7 @@ impl GlobalMetadata {
Self::RefundCounter,
Self::AccessedAddressesLen,
Self::AccessedStorageKeysLen,
Self::SelfDestructListLen,
]
}
@ -110,6 +113,7 @@ impl GlobalMetadata {
Self::RefundCounter => "GLOBAL_METADATA_REFUND_COUNTER",
Self::AccessedAddressesLen => "GLOBAL_METADATA_ACCESSED_ADDRESSES_LEN",
Self::AccessedStorageKeysLen => "GLOBAL_METADATA_ACCESSED_STORAGE_KEYS_LEN",
Self::SelfDestructListLen => "GLOBAL_METADATA_SELFDESTRUCT_LIST_LEN",
}
}
}

View File

@ -1,4 +1,8 @@
use eth_trie_utils::partial_trie::PartialTrie;
use std::ops::Deref;
use eth_trie_utils::partial_trie::HashedPartialTrie;
use crate::Node;
#[derive(Copy, Clone, Debug)]
pub(crate) enum PartialTrieType {
@ -12,13 +16,13 @@ pub(crate) enum PartialTrieType {
impl PartialTrieType {
pub(crate) const COUNT: usize = 5;
pub(crate) fn of(trie: &PartialTrie) -> Self {
match trie {
PartialTrie::Empty => Self::Empty,
PartialTrie::Hash(_) => Self::Hash,
PartialTrie::Branch { .. } => Self::Branch,
PartialTrie::Extension { .. } => Self::Extension,
PartialTrie::Leaf { .. } => Self::Leaf,
pub(crate) fn of(trie: &HashedPartialTrie) -> Self {
match trie.deref() {
Node::Empty => Self::Empty,
Node::Hash(_) => Self::Hash,
Node::Branch { .. } => Self::Branch,
Node::Extension { .. } => Self::Extension,
Node::Leaf { .. } => Self::Leaf,
}
}

View File

@ -385,7 +385,6 @@ impl<'a> Interpreter<'a> {
0xf5 => todo!(), // "CREATE2",
0xf6 => self.run_get_context(), // "GET_CONTEXT",
0xf7 => self.run_set_context(), // "SET_CONTEXT",
0xf8 => todo!(), // "CONSUME_GAS",
0xf9 => todo!(), // "EXIT_KERNEL",
0xfa => todo!(), // "STATICCALL",
0xfb => self.run_mload_general(), // "MLOAD_GENERAL",
@ -558,7 +557,11 @@ impl<'a> Interpreter<'a> {
fn run_shl(&mut self) {
let shift = self.pop();
let value = self.pop();
self.push(value << shift);
self.push(if shift < U256::from(256usize) {
value << shift
} else {
U256::zero()
});
}
fn run_shr(&mut self) {
@ -966,7 +969,6 @@ fn get_mnemonic(opcode: u8) -> &'static str {
0xf5 => "CREATE2",
0xf6 => "GET_CONTEXT",
0xf7 => "SET_CONTEXT",
0xf8 => "CONSUME_GAS",
0xf9 => "EXIT_KERNEL",
0xfa => "STATICCALL",
0xfb => "MLOAD_GENERAL",

View File

@ -122,7 +122,6 @@ pub fn get_opcode(mnemonic: &str) -> u8 {
"CREATE2" => 0xf5,
"GET_CONTEXT" => 0xf6,
"SET_CONTEXT" => 0xf7,
"CONSUME_GAS" => 0xf8,
"EXIT_KERNEL" => 0xf9,
"STATICCALL" => 0xfa,
"MLOAD_GENERAL" => 0xfb,

View File

@ -1,7 +1,7 @@
use std::collections::HashMap;
use anyhow::Result;
use eth_trie_utils::partial_trie::PartialTrie;
use eth_trie_utils::partial_trie::{HashedPartialTrie, PartialTrie};
use ethereum_types::{Address, BigEndianHash, H256, U256};
use keccak_hash::keccak;
use rand::{thread_rng, Rng};
@ -12,13 +12,14 @@ use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::tests::mpt::nibbles_64;
use crate::generation::mpt::{all_mpt_prover_inputs_reversed, AccountRlp};
use crate::memory::segments::Segment;
use crate::Node;
// Test account with a given code hash.
fn test_account(code: &[u8]) -> AccountRlp {
AccountRlp {
nonce: U256::from(1111),
balance: U256::from(2222),
storage_root: PartialTrie::Empty.calc_hash(),
storage_root: HashedPartialTrie::from(Node::Empty).hash(),
code_hash: keccak(code),
}
}
@ -39,7 +40,7 @@ fn prepare_interpreter(
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let mpt_insert_state_trie = KERNEL.global_labels["mpt_insert_state_trie"];
let mpt_hash_state_trie = KERNEL.global_labels["mpt_hash_state_trie"];
let mut state_trie: PartialTrie = Default::default();
let mut state_trie: HashedPartialTrie = Default::default();
let trie_inputs = Default::default();
interpreter.generation_state.registers.program_counter = load_all_mpts;
@ -96,7 +97,7 @@ fn prepare_interpreter(
let hash = H256::from_uint(&interpreter.stack()[0]);
state_trie.insert(k, rlp::encode(account).to_vec());
let expected_state_trie_hash = state_trie.calc_hash();
let expected_state_trie_hash = state_trie.hash();
assert_eq!(hash, expected_state_trie_hash);
Ok(())

View File

@ -1,5 +1,5 @@
use anyhow::Result;
use eth_trie_utils::partial_trie::PartialTrie;
use eth_trie_utils::partial_trie::{HashedPartialTrie, PartialTrie};
use ethereum_types::{Address, BigEndianHash, H256, U256};
use keccak_hash::keccak;
use rand::{thread_rng, Rng};
@ -9,13 +9,14 @@ use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::tests::mpt::nibbles_64;
use crate::generation::mpt::{all_mpt_prover_inputs_reversed, AccountRlp};
use crate::Node;
// Test account with a given code hash.
fn test_account(balance: U256) -> AccountRlp {
AccountRlp {
nonce: U256::from(1111),
balance,
storage_root: PartialTrie::Empty.calc_hash(),
storage_root: HashedPartialTrie::from(Node::Empty).hash(),
code_hash: H256::from_uint(&U256::from(8888)),
}
}
@ -30,7 +31,7 @@ fn prepare_interpreter(
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let mpt_insert_state_trie = KERNEL.global_labels["mpt_insert_state_trie"];
let mpt_hash_state_trie = KERNEL.global_labels["mpt_hash_state_trie"];
let mut state_trie: PartialTrie = Default::default();
let mut state_trie: HashedPartialTrie = Default::default();
let trie_inputs = Default::default();
interpreter.generation_state.registers.program_counter = load_all_mpts;
@ -87,7 +88,7 @@ fn prepare_interpreter(
let hash = H256::from_uint(&interpreter.stack()[0]);
state_trie.insert(k, rlp::encode(account).to_vec());
let expected_state_trie_hash = state_trie.calc_hash();
let expected_state_trie_hash = state_trie.hash();
assert_eq!(hash, expected_state_trie_hash);
Ok(())

View File

@ -10,6 +10,7 @@ mod hash;
mod mpt;
mod packing;
mod rlp;
mod signed_syscalls;
mod transaction_parsing;
use std::str::FromStr;

View File

@ -7,6 +7,7 @@ use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::tests::mpt::{extension_to_leaf, test_account_1_rlp, test_account_2_rlp};
use crate::generation::mpt::all_mpt_prover_inputs_reversed;
use crate::generation::TrieInputs;
use crate::Node;
// TODO: Test with short leaf. Might need to be a storage trie.
@ -24,11 +25,12 @@ fn mpt_hash_empty() -> Result<()> {
#[test]
fn mpt_hash_empty_branch() -> Result<()> {
let children = core::array::from_fn(|_| PartialTrie::Empty.into());
let state_trie = PartialTrie::Branch {
let children = core::array::from_fn(|_| Node::Empty.into());
let state_trie = Node::Branch {
children,
value: vec![],
};
}
.into();
let trie_inputs = TrieInputs {
state_trie,
transactions_trie: Default::default(),
@ -42,7 +44,7 @@ fn mpt_hash_empty_branch() -> Result<()> {
fn mpt_hash_hash() -> Result<()> {
let hash = H256::random();
let trie_inputs = TrieInputs {
state_trie: PartialTrie::Hash(hash),
state_trie: Node::Hash(hash).into(),
transactions_trie: Default::default(),
receipts_trie: Default::default(),
storage_tries: vec![],
@ -53,10 +55,11 @@ fn mpt_hash_hash() -> Result<()> {
#[test]
fn mpt_hash_leaf() -> Result<()> {
let state_trie = PartialTrie::Leaf {
let state_trie = Node::Leaf {
nibbles: 0xABC_u64.into(),
value: test_account_1_rlp(),
};
}
.into();
let trie_inputs = TrieInputs {
state_trie,
transactions_trie: Default::default(),
@ -80,17 +83,19 @@ fn mpt_hash_extension_to_leaf() -> Result<()> {
#[test]
fn mpt_hash_branch_to_leaf() -> Result<()> {
let leaf = PartialTrie::Leaf {
let leaf = Node::Leaf {
nibbles: 0xABC_u64.into(),
value: test_account_2_rlp(),
}
.into();
let mut children = core::array::from_fn(|_| PartialTrie::Empty.into());
let mut children = core::array::from_fn(|_| Node::Empty.into());
children[3] = leaf;
let state_trie = PartialTrie::Branch {
let state_trie = Node::Branch {
children,
value: vec![],
};
}
.into();
let trie_inputs = TrieInputs {
state_trie,
@ -124,7 +129,7 @@ fn test_state_trie(trie_inputs: TrieInputs) -> Result<()> {
interpreter.stack()
);
let hash = H256::from_uint(&interpreter.stack()[0]);
let expected_state_trie_hash = trie_inputs.state_trie.calc_hash();
let expected_state_trie_hash = trie_inputs.state_trie.hash();
assert_eq!(hash, expected_state_trie_hash);
Ok(())

View File

@ -1,5 +1,6 @@
use anyhow::Result;
use eth_trie_utils::partial_trie::{Nibbles, PartialTrie};
use eth_trie_utils::nibbles::Nibbles;
use eth_trie_utils::partial_trie::{HashedPartialTrie, PartialTrie};
use ethereum_types::{BigEndianHash, H256};
use crate::cpu::kernel::aggregator::KERNEL;
@ -10,6 +11,7 @@ use crate::cpu::kernel::tests::mpt::{
};
use crate::generation::mpt::{all_mpt_prover_inputs_reversed, AccountRlp};
use crate::generation::TrieInputs;
use crate::Node;
#[test]
fn mpt_insert_empty() -> Result<()> {
@ -19,58 +21,64 @@ fn mpt_insert_empty() -> Result<()> {
#[test]
fn mpt_insert_leaf_identical_keys() -> Result<()> {
let key = nibbles_64(0xABC);
let state_trie = PartialTrie::Leaf {
let state_trie = Node::Leaf {
nibbles: key,
value: test_account_1_rlp(),
};
}
.into();
test_state_trie(state_trie, key, test_account_2())
}
#[test]
fn mpt_insert_leaf_nonoverlapping_keys() -> Result<()> {
let state_trie = PartialTrie::Leaf {
let state_trie = Node::Leaf {
nibbles: nibbles_64(0xABC),
value: test_account_1_rlp(),
};
}
.into();
test_state_trie(state_trie, nibbles_64(0x123), test_account_2())
}
#[test]
fn mpt_insert_leaf_overlapping_keys() -> Result<()> {
let state_trie = PartialTrie::Leaf {
let state_trie = Node::Leaf {
nibbles: nibbles_64(0xABC),
value: test_account_1_rlp(),
};
}
.into();
test_state_trie(state_trie, nibbles_64(0xADE), test_account_2())
}
#[test]
#[ignore] // TODO: Not valid for state trie, all keys have same len.
fn mpt_insert_leaf_insert_key_extends_leaf_key() -> Result<()> {
let state_trie = PartialTrie::Leaf {
let state_trie = Node::Leaf {
nibbles: 0xABC_u64.into(),
value: test_account_1_rlp(),
};
}
.into();
test_state_trie(state_trie, nibbles_64(0xABCDE), test_account_2())
}
#[test]
#[ignore] // TODO: Not valid for state trie, all keys have same len.
fn mpt_insert_leaf_leaf_key_extends_insert_key() -> Result<()> {
let state_trie = PartialTrie::Leaf {
let state_trie = Node::Leaf {
nibbles: 0xABCDE_u64.into(),
value: test_account_1_rlp(),
};
}
.into();
test_state_trie(state_trie, nibbles_64(0xABC), test_account_2())
}
#[test]
fn mpt_insert_branch_replacing_empty_child() -> Result<()> {
let children = core::array::from_fn(|_| PartialTrie::Empty.into());
let state_trie = PartialTrie::Branch {
let children = core::array::from_fn(|_| Node::Empty.into());
let state_trie = Node::Branch {
children,
value: vec![],
};
}
.into();
test_state_trie(state_trie, nibbles_64(0xABC), test_account_2())
}
@ -81,20 +89,21 @@ fn mpt_insert_branch_replacing_empty_child() -> Result<()> {
#[ignore]
fn mpt_insert_extension_nonoverlapping_keys() -> Result<()> {
// Existing keys are 0xABC, 0xABCDEF; inserted key is 0x12345.
let mut children = core::array::from_fn(|_| PartialTrie::Empty.into());
children[0xD] = PartialTrie::Leaf {
let mut children = core::array::from_fn(|_| Node::Empty.into());
children[0xD] = Node::Leaf {
nibbles: 0xEF_u64.into(),
value: test_account_1_rlp(),
}
.into();
let state_trie = PartialTrie::Extension {
let state_trie = Node::Extension {
nibbles: 0xABC_u64.into(),
child: PartialTrie::Branch {
child: Node::Branch {
children,
value: test_account_1_rlp(),
}
.into(),
};
}
.into();
test_state_trie(state_trie, nibbles_64(0x12345), test_account_2())
}
@ -104,48 +113,54 @@ fn mpt_insert_extension_nonoverlapping_keys() -> Result<()> {
#[ignore]
fn mpt_insert_extension_insert_key_extends_node_key() -> Result<()> {
// Existing keys are 0xA, 0xABCD; inserted key is 0xABCDEF.
let mut children = core::array::from_fn(|_| PartialTrie::Empty.into());
children[0xB] = PartialTrie::Leaf {
let mut children = core::array::from_fn(|_| Node::Empty.into());
children[0xB] = Node::Leaf {
nibbles: 0xCD_u64.into(),
value: test_account_1_rlp(),
}
.into();
let state_trie = PartialTrie::Extension {
let state_trie = Node::Extension {
nibbles: 0xA_u64.into(),
child: PartialTrie::Branch {
child: Node::Branch {
children,
value: test_account_1_rlp(),
}
.into(),
};
}
.into();
test_state_trie(state_trie, nibbles_64(0xABCDEF), test_account_2())
}
#[test]
fn mpt_insert_branch_to_leaf_same_key() -> Result<()> {
let leaf = PartialTrie::Leaf {
let leaf = Node::Leaf {
nibbles: nibbles_count(0xBCD, 63),
value: test_account_1_rlp(),
}
.into();
let mut children = core::array::from_fn(|_| PartialTrie::Empty.into());
let mut children = core::array::from_fn(|_| Node::Empty.into());
children[0] = leaf;
let state_trie = PartialTrie::Branch {
let state_trie = Node::Branch {
children,
value: vec![],
};
}
.into();
test_state_trie(state_trie, nibbles_64(0xABCD), test_account_2())
}
/// Note: The account's storage_root is ignored, as we can't insert a new storage_root without the
/// accompanying trie data. An empty trie's storage_root is used instead.
fn test_state_trie(mut state_trie: PartialTrie, k: Nibbles, mut account: AccountRlp) -> Result<()> {
fn test_state_trie(
mut state_trie: HashedPartialTrie,
k: Nibbles,
mut account: AccountRlp,
) -> Result<()> {
assert_eq!(k.count, 64);
// Ignore any storage_root; see documentation note.
account.storage_root = PartialTrie::Empty.calc_hash();
account.storage_root = HashedPartialTrie::from(Node::Empty).hash();
let trie_inputs = TrieInputs {
state_trie: state_trie.clone(),
@ -207,7 +222,7 @@ fn test_state_trie(mut state_trie: PartialTrie, k: Nibbles, mut account: Account
let hash = H256::from_uint(&interpreter.stack()[0]);
state_trie.insert(k, rlp::encode(&account).to_vec());
let expected_state_trie_hash = state_trie.calc_hash();
let expected_state_trie_hash = state_trie.hash();
assert_eq!(hash, expected_state_trie_hash);
Ok(())

View File

@ -1,5 +1,4 @@
use anyhow::Result;
use eth_trie_utils::partial_trie::PartialTrie;
use ethereum_types::{BigEndianHash, H256, U256};
use crate::cpu::kernel::aggregator::KERNEL;
@ -9,6 +8,7 @@ use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::tests::mpt::{extension_to_leaf, test_account_1, test_account_1_rlp};
use crate::generation::mpt::all_mpt_prover_inputs_reversed;
use crate::generation::TrieInputs;
use crate::Node;
#[test]
fn load_all_mpts_empty() -> Result<()> {
@ -48,10 +48,11 @@ fn load_all_mpts_empty() -> Result<()> {
#[test]
fn load_all_mpts_leaf() -> Result<()> {
let trie_inputs = TrieInputs {
state_trie: PartialTrie::Leaf {
state_trie: Node::Leaf {
nibbles: 0xABC_u64.into(),
value: test_account_1_rlp(),
},
}
.into(),
transactions_trie: Default::default(),
receipts_trie: Default::default(),
storage_tries: vec![],
@ -100,7 +101,7 @@ fn load_all_mpts_leaf() -> Result<()> {
fn load_all_mpts_hash() -> Result<()> {
let hash = H256::random();
let trie_inputs = TrieInputs {
state_trie: PartialTrie::Hash(hash),
state_trie: Node::Hash(hash).into(),
transactions_trie: Default::default(),
receipts_trie: Default::default(),
storage_tries: vec![],
@ -134,11 +135,12 @@ fn load_all_mpts_hash() -> Result<()> {
#[test]
fn load_all_mpts_empty_branch() -> Result<()> {
let children = core::array::from_fn(|_| PartialTrie::Empty.into());
let state_trie = PartialTrie::Branch {
let children = core::array::from_fn(|_| Node::Empty.into());
let state_trie = Node::Branch {
children,
value: vec![],
};
}
.into();
let trie_inputs = TrieInputs {
state_trie,
transactions_trie: Default::default(),

View File

@ -1,7 +1,9 @@
use eth_trie_utils::partial_trie::{Nibbles, PartialTrie};
use eth_trie_utils::nibbles::Nibbles;
use eth_trie_utils::partial_trie::HashedPartialTrie;
use ethereum_types::{BigEndianHash, H256, U256};
use crate::generation::mpt::AccountRlp;
use crate::Node;
mod hash;
mod hex_prefix;
@ -46,10 +48,10 @@ pub(crate) fn test_account_2_rlp() -> Vec<u8> {
}
/// A `PartialTrie` where an extension node leads to a leaf node containing an account.
pub(crate) fn extension_to_leaf(value: Vec<u8>) -> PartialTrie {
PartialTrie::Extension {
pub(crate) fn extension_to_leaf(value: Vec<u8>) -> HashedPartialTrie {
Node::Extension {
nibbles: 0xABC_u64.into(),
child: PartialTrie::Leaf {
child: Node::Leaf {
nibbles: Nibbles {
count: 3,
packed: 0xDEF.into(),
@ -58,4 +60,5 @@ pub(crate) fn extension_to_leaf(value: Vec<u8>) -> PartialTrie {
}
.into(),
}
.into()
}

View File

@ -0,0 +1,166 @@
use ethereum_types::U256;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::interpreter::Interpreter;
/// Generate a list of inputs suitable for testing the signed operations
///
/// The result includes 0, ±1, ±2^(16i ± 1) for i = 0..15, and ±2^255
/// and then each of those ±1. Little attempt has been made to avoid
/// duplicates. Total length is 279.
fn test_inputs() -> Vec<U256> {
let mut res = vec![U256::zero()];
for i in 1..16 {
res.push(U256::one() << (16 * i));
res.push(U256::one() << (16 * i + 1));
res.push(U256::one() << (16 * i - 1));
}
res.push(U256::one() << 255);
let n = res.len();
for i in 1..n {
// push -res[i]
res.push(res[i].overflowing_neg().0);
}
let n = res.len();
for i in 0..n {
res.push(res[i].overflowing_add(U256::one()).0);
res.push(res[i].overflowing_sub(U256::one()).0);
}
res
}
// U256_TOP_BIT == 2^255.
const U256_TOP_BIT: U256 = U256([0x0, 0x0, 0x0, 0x8000000000000000]);
/// Given a U256 `value`, interpret as a signed 256-bit number and
/// return the arithmetic right shift of `value` by `shift` bit
/// positions, i.e. the right shift of `value` with sign extension.
fn u256_sar(shift: U256, value: U256) -> U256 {
// Reference: Hacker's Delight, 2013, 2nd edition, §2-7.
let shift = shift.min(U256::from(255));
((value ^ U256_TOP_BIT) >> shift)
.overflowing_sub(U256_TOP_BIT >> shift)
.0
}
/// Given a U256 x, interpret it as a signed 256-bit number and return
/// the pair abs(x) and sign(x), where sign(x) = 1 if x < 0, and 0
/// otherwise. NB: abs(x) is interpreted as an unsigned value, so
/// u256_abs_sgn(-2^255) = (2^255, -1).
fn u256_abs_sgn(x: U256) -> (U256, bool) {
let is_neg = x.bit(255);
// negate x if it's negative
let x = if is_neg { x.overflowing_neg().0 } else { x };
(x, is_neg)
}
fn u256_sdiv(x: U256, y: U256) -> U256 {
let (abs_x, x_is_neg) = u256_abs_sgn(x);
let (abs_y, y_is_neg) = u256_abs_sgn(y);
if y.is_zero() {
U256::zero()
} else {
let quot = abs_x / abs_y;
// negate the quotient if arguments had opposite signs
if x_is_neg != y_is_neg {
quot.overflowing_neg().0
} else {
quot
}
}
}
fn u256_smod(x: U256, y: U256) -> U256 {
let (abs_x, x_is_neg) = u256_abs_sgn(x);
let (abs_y, _) = u256_abs_sgn(y);
if y.is_zero() {
U256::zero()
} else {
let rem = abs_x % abs_y;
// negate the remainder if dividend was negative
if x_is_neg {
rem.overflowing_neg().0
} else {
rem
}
}
}
// signextend is just a SHL followed by SAR.
fn u256_signextend(byte: U256, value: U256) -> U256 {
// byte = min(31, byte)
let byte: u32 = byte.min(U256::from(31)).try_into().unwrap();
let bit_offset = 256 - 8 * (byte + 1);
u256_sar(U256::from(bit_offset), value << bit_offset)
}
// Reference: Hacker's Delight, 2013, 2nd edition, §2-12.
fn u256_slt(x: U256, y: U256) -> U256 {
let top_bit: U256 = U256::one() << 255;
U256::from(((x ^ top_bit) < (y ^ top_bit)) as u32)
}
fn u256_sgt(x: U256, y: U256) -> U256 {
u256_slt(y, x)
}
fn run_test(fn_label: &str, expected_fn: fn(U256, U256) -> U256, opname: &str) {
let inputs = test_inputs();
let fn_label = KERNEL.global_labels[fn_label];
let retdest = U256::from(0xDEADBEEFu32);
for &x in &inputs {
for &y in &inputs {
let stack = vec![retdest, y, x];
let mut interpreter = Interpreter::new_with_kernel(fn_label, stack);
interpreter.run().unwrap();
assert_eq!(interpreter.stack().len(), 1usize, "unexpected stack size");
let output = interpreter.stack()[0];
let expected_output = expected_fn(x, y);
assert_eq!(
output, expected_output,
"{opname}({x}, {y}): expected {expected_output} but got {output}"
);
}
}
}
#[test]
fn test_sdiv() {
// Double-check that the expected output calculation is correct in the special case.
let x = U256::one() << 255; // -2^255
let y = U256::one().overflowing_neg().0; // -1
assert_eq!(u256_sdiv(x, y), x); // SDIV(-2^255, -1) = -2^255.
run_test("_sys_sdiv", u256_sdiv, "SDIV");
}
#[test]
fn test_smod() {
run_test("_sys_smod", u256_smod, "SMOD");
}
#[test]
fn test_signextend() {
run_test("_sys_signextend", u256_signextend, "SIGNEXTEND");
}
#[test]
fn test_sar() {
run_test("_sys_sar", u256_sar, "SAR");
}
#[test]
fn test_slt() {
run_test("_sys_slt", u256_slt, "SLT");
}
#[test]
fn test_sgt() {
run_test("_sys_sgt", u256_sgt, "SGT");
}

View File

@ -8,6 +8,7 @@ use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::HashConfig;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::target::Target;
use plonky2::plonk::circuit_builder::CircuitBuilder;
@ -327,7 +328,10 @@ impl<'a, F: RichField + Extendable<D>, const D: usize>
cross_table_lookups: &'a [CrossTableLookup<F>],
ctl_challenges: &'a GrandProductChallengeSet<F>,
num_permutation_zs: &[usize; NUM_TABLES],
) -> [Vec<Self>; NUM_TABLES] {
) -> [Vec<Self>; NUM_TABLES]
where
[(); C::HCO::WIDTH]:,
{
let mut ctl_zs = proofs
.iter()
.zip(num_permutation_zs)

View File

@ -6,7 +6,7 @@ use plonky2::field::extension::Extendable;
use plonky2::fri::FriParams;
use plonky2::gates::noop::NoopGate;
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::SPONGE_WIDTH;
use plonky2::hash::hashing::HashConfig;
use plonky2::iop::challenger::RecursiveChallenger;
use plonky2::iop::target::{BoolTarget, Target};
use plonky2::iop::witness::{PartialWitness, WitnessWrite};
@ -49,6 +49,7 @@ pub struct AllRecursiveCircuits<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
[(); C::HCO::WIDTH]:,
{
/// The EVM root circuit, which aggregates the (shrunk) per-table recursive proofs.
pub root: RootCircuitData<F, C, D>,
@ -111,13 +112,15 @@ impl<F, C, const D: usize> AllRecursiveCircuits<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F> + 'static,
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::Hasher::HASH_SIZE]:,
[(); CpuStark::<F, D>::COLUMNS]:,
[(); KeccakStark::<F, D>::COLUMNS]:,
[(); KeccakSpongeStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
/// Preprocess all recursive circuits used by the system.
pub fn new(
@ -184,11 +187,14 @@ where
let recursive_proofs =
core::array::from_fn(|i| builder.add_virtual_proof_with_pis(inner_common_data[i]));
let pis: [_; NUM_TABLES] = core::array::from_fn(|i| {
PublicInputs::from_vec(&recursive_proofs[i].public_inputs, stark_config)
PublicInputs::<Target, C::HCO>::from_vec(
&recursive_proofs[i].public_inputs,
stark_config,
)
});
let index_verifier_data = core::array::from_fn(|_i| builder.add_virtual_target());
let mut challenger = RecursiveChallenger::<F, C::Hasher, D>::new(&mut builder);
let mut challenger = RecursiveChallenger::<F, C::HCO, C::Hasher, D>::new(&mut builder);
for pi in &pis {
for h in &pi.trace_cap {
challenger.observe_elements(h);
@ -214,12 +220,12 @@ where
}
let state = challenger.compact(&mut builder);
for k in 0..SPONGE_WIDTH {
for k in 0..C::HCO::WIDTH {
builder.connect(state[k], pis[0].challenger_state_before[k]);
}
// Check that the challenger state is consistent between proofs.
for i in 1..NUM_TABLES {
for k in 0..SPONGE_WIDTH {
for k in 0..C::HCO::WIDTH {
builder.connect(
pis[i].challenger_state_before[k],
pis[i - 1].challenger_state_after[k],
@ -267,7 +273,7 @@ where
let cyclic_vk = builder.add_verifier_data_public_inputs();
RootCircuitData {
circuit: builder.build(),
circuit: builder.build::<C>(),
proof_with_pis: recursive_proofs,
index_verifier_data,
cyclic_vk,
@ -471,6 +477,7 @@ struct RecursiveCircuitsForTable<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
[(); C::HCO::WIDTH]:,
{
/// A map from `log_2(height)` to a chain of shrinking recursion circuits starting at that
/// height.
@ -481,8 +488,10 @@ impl<F, C, const D: usize> RecursiveCircuitsForTable<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::Hasher::HASH_SIZE]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
fn new<S: Stark<F, D>>(
table: Table,
@ -533,6 +542,7 @@ struct RecursiveCircuitsForTableSize<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
[(); C::HCO::WIDTH]:,
{
initial_wrapper: StarkWrapperCircuit<F, C, D>,
shrinking_wrappers: Vec<PlonkWrapperCircuit<F, C, D>>,
@ -542,8 +552,10 @@ impl<F, C, const D: usize> RecursiveCircuitsForTableSize<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::Hasher::HASH_SIZE]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
fn new<S: Stark<F, D>>(
table: Table,
@ -584,7 +596,7 @@ where
builder.verify_proof::<C>(&proof_with_pis_target, &last_vk, &last.common);
builder.register_public_inputs(&proof_with_pis_target.public_inputs); // carry PIs forward
add_common_recursion_gates(&mut builder);
let circuit = builder.build();
let circuit = builder.build::<C>();
assert!(
circuit.common.degree_bits() < last_degree_bits,

View File

@ -1,6 +1,6 @@
use std::collections::HashMap;
use eth_trie_utils::partial_trie::PartialTrie;
use eth_trie_utils::partial_trie::HashedPartialTrie;
use ethereum_types::{Address, BigEndianHash, H256, U256};
use plonky2::field::extension::Extendable;
use plonky2::field::polynomial::PolynomialValues;
@ -59,19 +59,19 @@ pub struct GenerationInputs {
pub struct TrieInputs {
/// A partial version of the state trie prior to these transactions. It should include all nodes
/// that will be accessed by these transactions.
pub state_trie: PartialTrie,
pub state_trie: HashedPartialTrie,
/// A partial version of the transaction trie prior to these transactions. It should include all
/// nodes that will be accessed by these transactions.
pub transactions_trie: PartialTrie,
pub transactions_trie: HashedPartialTrie,
/// A partial version of the receipt trie prior to these transactions. It should include all nodes
/// that will be accessed by these transactions.
pub receipts_trie: PartialTrie,
pub receipts_trie: HashedPartialTrie,
/// A partial version of each storage trie prior to these transactions. It should include all
/// storage tries, and nodes therein, that will be accessed by these transactions.
pub storage_tries: Vec<(Address, PartialTrie)>,
pub storage_tries: Vec<(Address, HashedPartialTrie)>,
}
fn apply_metadata_memops<F: RichField + Extendable<D>, const D: usize>(

View File

@ -1,12 +1,15 @@
use std::collections::HashMap;
use std::ops::Deref;
use eth_trie_utils::partial_trie::{Nibbles, PartialTrie};
use eth_trie_utils::nibbles::Nibbles;
use eth_trie_utils::partial_trie::{HashedPartialTrie, PartialTrie};
use ethereum_types::{BigEndianHash, H256, U256};
use keccak_hash::keccak;
use rlp_derive::{RlpDecodable, RlpEncodable};
use crate::cpu::kernel::constants::trie_type::PartialTrieType;
use crate::generation::TrieInputs;
use crate::Node;
#[derive(RlpEncodable, RlpDecodable, Debug)]
pub struct AccountRlp {
@ -21,7 +24,7 @@ impl Default for AccountRlp {
Self {
nonce: U256::zero(),
balance: U256::zero(),
storage_root: PartialTrie::Empty.calc_hash(),
storage_root: HashedPartialTrie::from(Node::Empty).hash(),
code_hash: keccak([]),
}
}
@ -70,17 +73,18 @@ pub(crate) fn all_mpt_prover_inputs(trie_inputs: &TrieInputs) -> Vec<U256> {
/// is serialized as `(TYPE_LEAF, key, value)`, where key is a `(nibbles, depth)` pair and `value`
/// is a variable-length structure which depends on which trie we're dealing with.
pub(crate) fn mpt_prover_inputs<F>(
trie: &PartialTrie,
trie: &HashedPartialTrie,
prover_inputs: &mut Vec<U256>,
parse_value: &F,
) where
F: Fn(&[u8]) -> Vec<U256>,
{
prover_inputs.push((PartialTrieType::of(trie) as u32).into());
match trie {
PartialTrie::Empty => {}
PartialTrie::Hash(h) => prover_inputs.push(U256::from_big_endian(h.as_bytes())),
PartialTrie::Branch { children, value } => {
match trie.deref() {
Node::Empty => {}
Node::Hash(h) => prover_inputs.push(U256::from_big_endian(h.as_bytes())),
Node::Branch { children, value } => {
if value.is_empty() {
prover_inputs.push(U256::zero()); // value_present = 0
} else {
@ -92,12 +96,12 @@ pub(crate) fn mpt_prover_inputs<F>(
mpt_prover_inputs(child, prover_inputs, parse_value);
}
}
PartialTrie::Extension { nibbles, child } => {
Node::Extension { nibbles, child } => {
prover_inputs.push(nibbles.count.into());
prover_inputs.push(nibbles.packed);
mpt_prover_inputs(child, prover_inputs, parse_value);
}
PartialTrie::Leaf { nibbles, value } => {
Node::Leaf { nibbles, value } => {
prover_inputs.push(nibbles.count.into());
prover_inputs.push(nibbles.packed);
let leaf = parse_value(value);
@ -109,16 +113,16 @@ pub(crate) fn mpt_prover_inputs<F>(
/// Like `mpt_prover_inputs`, but for the state trie, which is a bit unique since each value
/// leads to a storage trie which we recursively traverse.
pub(crate) fn mpt_prover_inputs_state_trie(
trie: &PartialTrie,
trie: &HashedPartialTrie,
key: Nibbles,
prover_inputs: &mut Vec<U256>,
storage_tries_by_state_key: &HashMap<Nibbles, &PartialTrie>,
storage_tries_by_state_key: &HashMap<Nibbles, &HashedPartialTrie>,
) {
prover_inputs.push((PartialTrieType::of(trie) as u32).into());
match trie {
PartialTrie::Empty => {}
PartialTrie::Hash(h) => prover_inputs.push(U256::from_big_endian(h.as_bytes())),
PartialTrie::Branch { children, value } => {
match trie.deref() {
Node::Empty => {}
Node::Hash(h) => prover_inputs.push(U256::from_big_endian(h.as_bytes())),
Node::Branch { children, value } => {
assert!(value.is_empty(), "State trie should not have branch values");
prover_inputs.push(U256::zero()); // value_present = 0
@ -135,7 +139,7 @@ pub(crate) fn mpt_prover_inputs_state_trie(
);
}
}
PartialTrie::Extension { nibbles, child } => {
Node::Extension { nibbles, child } => {
prover_inputs.push(nibbles.count.into());
prover_inputs.push(nibbles.packed);
let extended_key = key.merge_nibbles(nibbles);
@ -146,7 +150,7 @@ pub(crate) fn mpt_prover_inputs_state_trie(
storage_tries_by_state_key,
);
}
PartialTrie::Leaf { nibbles, value } => {
Node::Leaf { nibbles, value } => {
let account: AccountRlp = rlp::decode(value).expect("Decoding failed");
let AccountRlp {
nonce,
@ -155,14 +159,14 @@ pub(crate) fn mpt_prover_inputs_state_trie(
code_hash,
} = account;
let storage_hash_only = PartialTrie::Hash(storage_root);
let storage_hash_only = HashedPartialTrie::new(Node::Hash(storage_root));
let merged_key = key.merge_nibbles(nibbles);
let storage_trie: &PartialTrie = storage_tries_by_state_key
let storage_trie: &HashedPartialTrie = storage_tries_by_state_key
.get(&merged_key)
.copied()
.unwrap_or(&storage_hash_only);
assert_eq!(storage_trie.calc_hash(), storage_root,
assert_eq!(storage_trie.hash(), storage_root,
"In TrieInputs, an account's storage_root didn't match the associated storage trie hash");
prover_inputs.push(nibbles.count.into());

View File

@ -2,7 +2,7 @@
use std::collections::HashMap;
use eth_trie_utils::partial_trie::Nibbles;
use eth_trie_utils::nibbles::Nibbles;
use ethereum_types::{BigEndianHash, H256, U256};
use crate::cpu::kernel::constants::trie_type::PartialTrieType;

View File

@ -1,6 +1,7 @@
use plonky2::field::extension::Extendable;
use plonky2::fri::proof::{FriProof, FriProofTarget};
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::HashConfig;
use plonky2::iop::challenger::{Challenger, RecursiveChallenger};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig};
@ -13,14 +14,21 @@ use crate::permutation::{
};
use crate::proof::*;
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> AllProof<F, C, D> {
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> AllProof<F, C, D>
where
[(); C::HCO::WIDTH]:,
{
/// Computes all Fiat-Shamir challenges used in the STARK proof.
pub(crate) fn get_challenges(
&self,
all_stark: &AllStark<F, D>,
config: &StarkConfig,
) -> AllProofChallenges<F, D> {
let mut challenger = Challenger::<F, C::Hasher>::new();
) -> AllProofChallenges<F, D>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let mut challenger = Challenger::<F, C::HCO, C::Hasher>::new();
for proof in &self.stark_proofs {
challenger.observe_cap(&proof.proof.trace_cap);
@ -53,8 +61,12 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> A
&self,
all_stark: &AllStark<F, D>,
config: &StarkConfig,
) -> AllChallengerState<F, D> {
let mut challenger = Challenger::<F, C::Hasher>::new();
) -> AllChallengerState<F, C::HCO, D>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let mut challenger = Challenger::<F, C::HCO, C::Hasher>::new();
for proof in &self.stark_proofs {
challenger.observe_cap(&proof.proof.trace_cap);
@ -94,11 +106,15 @@ where
/// Computes all Fiat-Shamir challenges used in the STARK proof.
pub(crate) fn get_challenges(
&self,
challenger: &mut Challenger<F, C::Hasher>,
challenger: &mut Challenger<F, C::HCO, C::Hasher>,
stark_use_permutation: bool,
stark_permutation_batch_size: usize,
config: &StarkConfig,
) -> StarkProofChallenges<F, D> {
) -> StarkProofChallenges<F, D>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let degree_bits = self.recover_degree_bits(config);
let StarkProof {
@ -153,13 +169,15 @@ impl<const D: usize> StarkProofTarget<D> {
pub(crate) fn get_challenges<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>>(
&self,
builder: &mut CircuitBuilder<F, D>,
challenger: &mut RecursiveChallenger<F, C::Hasher, D>,
challenger: &mut RecursiveChallenger<F, C::HCO, C::Hasher, D>,
stark_use_permutation: bool,
stark_permutation_batch_size: usize,
config: &StarkConfig,
) -> StarkProofChallengesTarget<D>
where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let StarkProofTarget {
permutation_ctl_zs_cap,

View File

@ -3,6 +3,7 @@
#![allow(clippy::too_many_arguments)]
#![allow(clippy::type_complexity)]
#![allow(clippy::field_reassign_with_default)]
#![allow(clippy::upper_case_acronyms)]
#![feature(let_chains)]
#![feature(generic_const_exprs)]
@ -34,6 +35,7 @@ pub mod vars;
pub mod verifier;
pub mod witness;
use eth_trie_utils::partial_trie::HashedPartialTrie;
// Set up Jemalloc
#[cfg(not(target_env = "msvc"))]
use jemallocator::Jemalloc;
@ -41,3 +43,5 @@ use jemallocator::Jemalloc;
#[cfg(not(target_env = "msvc"))]
#[global_allocator]
static GLOBAL: Jemalloc = Jemalloc;
pub type Node = eth_trie_utils::partial_trie::Node<HashedPartialTrie>;

View File

@ -48,10 +48,12 @@ pub enum Segment {
AccessedAddresses = 23,
/// List of storage keys that have been accessed in the current transaction.
AccessedStorageKeys = 24,
/// List of addresses that have called SELFDESTRUCT in the current transaction.
SelfDestructList = 25,
}
impl Segment {
pub(crate) const COUNT: usize = 25;
pub(crate) const COUNT: usize = 26;
pub(crate) fn all() -> [Self; Self::COUNT] {
[
@ -80,6 +82,7 @@ impl Segment {
Self::BnPairing,
Self::AccessedAddresses,
Self::AccessedStorageKeys,
Self::SelfDestructList,
]
}
@ -111,6 +114,7 @@ impl Segment {
Segment::BnPairing => "SEGMENT_KERNEL_BN_PAIRING",
Segment::AccessedAddresses => "SEGMENT_ACCESSED_ADDRESSES",
Segment::AccessedStorageKeys => "SEGMENT_ACCESSED_STORAGE_KEYS",
Segment::SelfDestructList => "SEGMENT_SELFDESTRUCT_LIST",
}
}
@ -142,6 +146,7 @@ impl Segment {
Segment::BnPairing => 256,
Segment::AccessedAddresses => 256,
Segment::AccessedStorageKeys => 256,
Segment::SelfDestructList => 256,
}
}
}

View File

@ -9,6 +9,7 @@ use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::HashConfig;
use plonky2::iop::challenger::{Challenger, RecursiveChallenger};
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::target::Target;
@ -175,29 +176,38 @@ fn poly_product_elementwise<F: Field>(
product
}
fn get_grand_product_challenge<F: RichField, H: Hasher<F>>(
challenger: &mut Challenger<F, H>,
) -> GrandProductChallenge<F> {
fn get_grand_product_challenge<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(
challenger: &mut Challenger<F, HC, H>,
) -> GrandProductChallenge<F>
where
[(); HC::WIDTH]:,
{
let beta = challenger.get_challenge();
let gamma = challenger.get_challenge();
GrandProductChallenge { beta, gamma }
}
pub(crate) fn get_grand_product_challenge_set<F: RichField, H: Hasher<F>>(
challenger: &mut Challenger<F, H>,
pub(crate) fn get_grand_product_challenge_set<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(
challenger: &mut Challenger<F, HC, H>,
num_challenges: usize,
) -> GrandProductChallengeSet<F> {
) -> GrandProductChallengeSet<F>
where
[(); HC::WIDTH]:,
{
let challenges = (0..num_challenges)
.map(|_| get_grand_product_challenge(challenger))
.collect();
GrandProductChallengeSet { challenges }
}
pub(crate) fn get_n_grand_product_challenge_sets<F: RichField, H: Hasher<F>>(
challenger: &mut Challenger<F, H>,
pub(crate) fn get_n_grand_product_challenge_sets<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(
challenger: &mut Challenger<F, HC, H>,
num_challenges: usize,
num_sets: usize,
) -> Vec<GrandProductChallengeSet<F>> {
) -> Vec<GrandProductChallengeSet<F>>
where
[(); HC::WIDTH]:,
{
(0..num_sets)
.map(|_| get_grand_product_challenge_set(challenger, num_challenges))
.collect()
@ -205,12 +215,16 @@ pub(crate) fn get_n_grand_product_challenge_sets<F: RichField, H: Hasher<F>>(
fn get_grand_product_challenge_target<
F: RichField + Extendable<D>,
H: AlgebraicHasher<F>,
HC: HashConfig,
H: AlgebraicHasher<F, HC>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
challenger: &mut RecursiveChallenger<F, H, D>,
) -> GrandProductChallenge<Target> {
challenger: &mut RecursiveChallenger<F, HC, H, D>,
) -> GrandProductChallenge<Target>
where
[(); HC::WIDTH]:,
{
let beta = challenger.get_challenge(builder);
let gamma = challenger.get_challenge(builder);
GrandProductChallenge { beta, gamma }
@ -218,13 +232,17 @@ fn get_grand_product_challenge_target<
pub(crate) fn get_grand_product_challenge_set_target<
F: RichField + Extendable<D>,
H: AlgebraicHasher<F>,
HC: HashConfig,
H: AlgebraicHasher<F, HC>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
challenger: &mut RecursiveChallenger<F, H, D>,
challenger: &mut RecursiveChallenger<F, HC, H, D>,
num_challenges: usize,
) -> GrandProductChallengeSet<Target> {
) -> GrandProductChallengeSet<Target>
where
[(); HC::WIDTH]:,
{
let challenges = (0..num_challenges)
.map(|_| get_grand_product_challenge_target(builder, challenger))
.collect();
@ -233,14 +251,18 @@ pub(crate) fn get_grand_product_challenge_set_target<
pub(crate) fn get_n_grand_product_challenge_sets_target<
F: RichField + Extendable<D>,
H: AlgebraicHasher<F>,
HC: HashConfig,
H: AlgebraicHasher<F, HC>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
challenger: &mut RecursiveChallenger<F, H, D>,
challenger: &mut RecursiveChallenger<F, HC, H, D>,
num_challenges: usize,
num_sets: usize,
) -> Vec<GrandProductChallengeSet<Target>> {
) -> Vec<GrandProductChallengeSet<Target>>
where
[(); HC::WIDTH]:,
{
(0..num_sets)
.map(|_| get_grand_product_challenge_set_target(builder, challenger, num_challenges))
.collect()

View File

@ -7,7 +7,7 @@ use plonky2::fri::structure::{
FriOpeningBatch, FriOpeningBatchTarget, FriOpenings, FriOpeningsTarget,
};
use plonky2::hash::hash_types::{MerkleCapTarget, RichField};
use plonky2::hash::hashing::SPONGE_WIDTH;
use plonky2::hash::hashing::HashConfig;
use plonky2::hash::merkle_tree::MerkleCap;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::target::Target;
@ -21,13 +21,19 @@ use crate::permutation::GrandProductChallengeSet;
/// A STARK proof for each table, plus some metadata used to create recursive wrapper proofs.
#[derive(Debug, Clone)]
pub struct AllProof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> {
pub struct AllProof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
where
[(); C::HCO::WIDTH]:,
{
pub stark_proofs: [StarkProofWithMetadata<F, C, D>; NUM_TABLES],
pub(crate) ctl_challenges: GrandProductChallengeSet<F>,
pub public_values: PublicValues,
}
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> AllProof<F, C, D> {
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> AllProof<F, C, D>
where
[(); C::HCO::WIDTH]:,
{
pub fn degree_bits(&self, config: &StarkConfig) -> [usize; NUM_TABLES] {
core::array::from_fn(|i| self.stark_proofs[i].proof.recover_degree_bits(config))
}
@ -39,10 +45,13 @@ pub(crate) struct AllProofChallenges<F: RichField + Extendable<D>, const D: usiz
}
#[allow(unused)] // TODO: should be used soon
pub(crate) struct AllChallengerState<F: RichField + Extendable<D>, const D: usize> {
pub(crate) struct AllChallengerState<F: RichField + Extendable<D>, HC: HashConfig, const D: usize>
where
[(); HC::WIDTH]:,
{
/// Sponge state of the challenger before starting each proof,
/// along with the final state after all proofs are done. This final state isn't strictly needed.
pub states: [[F; SPONGE_WIDTH]; NUM_TABLES + 1],
pub states: [[F; HC::WIDTH]; NUM_TABLES + 1],
pub ctl_challenges: GrandProductChallengeSet<F>,
}
@ -99,15 +108,15 @@ pub struct BlockMetadataTarget {
#[derive(Debug, Clone)]
pub struct StarkProof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> {
/// Merkle cap of LDEs of trace values.
pub trace_cap: MerkleCap<F, C::Hasher>,
pub trace_cap: MerkleCap<F, C::HCO, C::Hasher>,
/// Merkle cap of LDEs of permutation Z values.
pub permutation_ctl_zs_cap: MerkleCap<F, C::Hasher>,
pub permutation_ctl_zs_cap: MerkleCap<F, C::HCO, C::Hasher>,
/// Merkle cap of LDEs of trace values.
pub quotient_polys_cap: MerkleCap<F, C::Hasher>,
pub quotient_polys_cap: MerkleCap<F, C::HCO, C::Hasher>,
/// Purported values of each polynomial at the challenge point.
pub openings: StarkOpeningSet<F, D>,
/// A batch FRI argument for all openings.
pub opening_proof: FriProof<F, C::Hasher, D>,
pub opening_proof: FriProof<F, C::HCO, C::Hasher, D>,
}
/// A `StarkProof` along with some metadata about the initial Fiat-Shamir state, which is used when
@ -117,8 +126,9 @@ pub struct StarkProofWithMetadata<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
[(); C::HCO::WIDTH]:,
{
pub(crate) init_challenger_state: [F; SPONGE_WIDTH],
pub(crate) init_challenger_state: [F; C::HCO::WIDTH],
pub(crate) proof: StarkProof<F, C, D>,
}

View File

@ -11,6 +11,7 @@ use plonky2::field::types::Field;
use plonky2::field::zero_poly_coset::ZeroPolyOnCoset;
use plonky2::fri::oracle::PolynomialBatch;
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::HashConfig;
use plonky2::iop::challenger::Challenger;
use plonky2::plonk::config::{GenericConfig, Hasher};
use plonky2::timed;
@ -50,12 +51,14 @@ pub fn prove<F, C, const D: usize>(
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
[(); C::Hasher::HASH_SIZE]:,
[(); CpuStark::<F, D>::COLUMNS]:,
[(); KeccakStark::<F, D>::COLUMNS]:,
[(); KeccakSpongeStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
[(); C::Hasher::HASH_SIZE]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let (proof, _outputs) = prove_with_outputs(all_stark, config, inputs, timing)?;
Ok(proof)
@ -78,6 +81,8 @@ where
[(); KeccakSpongeStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
timed!(timing, "build kernel", Lazy::force(&KERNEL));
let (traces, public_values, outputs) = timed!(
@ -100,12 +105,14 @@ pub(crate) fn prove_with_traces<F, C, const D: usize>(
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
[(); C::Hasher::HASH_SIZE]:,
[(); CpuStark::<F, D>::COLUMNS]:,
[(); KeccakStark::<F, D>::COLUMNS]:,
[(); KeccakSpongeStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
[(); C::Hasher::HASH_SIZE]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let rate_bits = config.fri_config.rate_bits;
let cap_height = config.fri_config.cap_height;
@ -139,7 +146,7 @@ where
.iter()
.map(|c| c.merkle_tree.cap.clone())
.collect::<Vec<_>>();
let mut challenger = Challenger::<F, C::Hasher>::new();
let mut challenger = Challenger::<F, C::HCO, C::Hasher>::new();
for cap in &trace_caps {
challenger.observe_cap(cap);
}
@ -182,7 +189,7 @@ fn prove_with_commitments<F, C, const D: usize>(
trace_poly_values: [Vec<PolynomialValues<F>>; NUM_TABLES],
trace_commitments: Vec<PolynomialBatch<F, C, D>>,
ctl_data_per_table: [CtlData<F>; NUM_TABLES],
challenger: &mut Challenger<F, C::Hasher>,
challenger: &mut Challenger<F, C::HCO, C::Hasher>,
timing: &mut TimingTree,
) -> Result<[StarkProofWithMetadata<F, C, D>; NUM_TABLES]>
where
@ -194,6 +201,8 @@ where
[(); KeccakSpongeStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let cpu_proof = timed!(
timing,
@ -276,15 +285,16 @@ pub(crate) fn prove_single_table<F, C, S, const D: usize>(
trace_poly_values: &[PolynomialValues<F>],
trace_commitment: &PolynomialBatch<F, C, D>,
ctl_data: &CtlData<F>,
challenger: &mut Challenger<F, C::Hasher>,
challenger: &mut Challenger<F, C::HCO, C::Hasher>,
timing: &mut TimingTree,
) -> Result<StarkProofWithMetadata<F, C, D>>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
[(); C::Hasher::HASH_SIZE]:,
[(); S::COLUMNS]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let degree = trace_poly_values[0].len();
let degree_bits = log2_strict(degree);

View File

@ -9,14 +9,14 @@ use plonky2::gates::exponentiation::ExponentiationGate;
use plonky2::gates::gate::GateRef;
use plonky2::gates::noop::NoopGate;
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::SPONGE_WIDTH;
use plonky2::hash::hashing::HashConfig;
use plonky2::iop::challenger::{Challenger, RecursiveChallenger};
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::target::Target;
use plonky2::iop::witness::{PartialWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::{CircuitConfig, CircuitData, VerifierCircuitData};
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig, Hasher};
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig};
use plonky2::plonk::proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget};
use plonky2::util::reducing::ReducingFactorTarget;
use plonky2::with_context;
@ -49,12 +49,15 @@ pub struct RecursiveAllProof<
pub recursive_proofs: [ProofWithPublicInputs<F, C, D>; NUM_TABLES],
}
pub(crate) struct PublicInputs<T: Copy + Eq + PartialEq + Debug> {
pub(crate) struct PublicInputs<T: Copy + Eq + PartialEq + Debug, HC: HashConfig>
where
[(); HC::WIDTH]:,
{
pub(crate) trace_cap: Vec<Vec<T>>,
pub(crate) ctl_zs_last: Vec<T>,
pub(crate) ctl_challenges: GrandProductChallengeSet<T>,
pub(crate) challenger_state_before: [T; SPONGE_WIDTH],
pub(crate) challenger_state_after: [T; SPONGE_WIDTH],
pub(crate) challenger_state_before: [T; HC::WIDTH],
pub(crate) challenger_state_after: [T; HC::WIDTH],
}
/// Similar to the unstable `Iterator::next_chunk`. Could be replaced with that when it's stable.
@ -66,7 +69,10 @@ fn next_chunk<T: Debug, const N: usize>(iter: &mut impl Iterator<Item = T>) -> [
.expect("Not enough elements")
}
impl<T: Copy + Eq + PartialEq + Debug> PublicInputs<T> {
impl<T: Copy + Eq + PartialEq + Debug, HC: HashConfig> PublicInputs<T, HC>
where
[(); HC::WIDTH]:,
{
pub(crate) fn from_vec(v: &[T], config: &StarkConfig) -> Self {
let mut iter = v.iter().copied();
let trace_cap = (0..config.fri_config.num_cap_elements())
@ -105,13 +111,17 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
inner_config: &StarkConfig,
) -> Result<()>
where
[(); C::Hasher::HASH_SIZE]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let pis: [_; NUM_TABLES] = core::array::from_fn(|i| {
PublicInputs::from_vec(&self.recursive_proofs[i].public_inputs, inner_config)
PublicInputs::<F, C::HCO>::from_vec(
&self.recursive_proofs[i].public_inputs,
inner_config,
)
});
let mut challenger = Challenger::<F, C::Hasher>::new();
let mut challenger = Challenger::<F, C::HCO, C::Hasher>::new();
for pi in &pis {
for h in &pi.trace_cap {
challenger.observe_elements(h);
@ -151,11 +161,12 @@ pub(crate) struct StarkWrapperCircuit<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
[(); C::HCO::WIDTH]:,
{
pub(crate) circuit: CircuitData<F, C, D>,
pub(crate) stark_proof_target: StarkProofTarget<D>,
pub(crate) ctl_challenges_target: GrandProductChallengeSet<Target>,
pub(crate) init_challenger_state_target: [Target; SPONGE_WIDTH],
pub(crate) init_challenger_state_target: [Target; C::HCO::WIDTH],
pub(crate) zero_target: Target,
}
@ -163,7 +174,9 @@ impl<F, C, const D: usize> StarkWrapperCircuit<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
pub(crate) fn prove(
&self,
@ -212,7 +225,9 @@ impl<F, C, const D: usize> PlonkWrapperCircuit<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
pub(crate) fn prove(
&self,
@ -241,8 +256,9 @@ pub(crate) fn recursive_stark_circuit<
) -> StarkWrapperCircuit<F, C, D>
where
[(); S::COLUMNS]:,
[(); C::Hasher::HASH_SIZE]:,
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let mut builder = CircuitBuilder::<F, D>::new(circuit_config.clone());
let zero_target = builder.zero();
@ -281,7 +297,7 @@ where
let init_challenger_state_target = core::array::from_fn(|_| builder.add_virtual_public_input());
let mut challenger =
RecursiveChallenger::<F, C::Hasher, D>::from_state(init_challenger_state_target);
RecursiveChallenger::<F, C::HCO, C::Hasher, D>::from_state(init_challenger_state_target);
let challenges = proof_target.get_challenges::<F, C>(
&mut builder,
&mut challenger,
@ -345,8 +361,9 @@ fn verify_stark_proof_with_challenges_circuit<
ctl_vars: &[CtlCheckVarsTarget<F, D>],
inner_config: &StarkConfig,
) where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); S::COLUMNS]:,
[(); C::HCO::WIDTH]:,
{
let zero = builder.zero();
let one = builder.one_extension();
@ -565,7 +582,7 @@ pub(crate) fn set_stark_proof_target<F, C: GenericConfig<D, F = F>, W, const D:
zero: Target,
) where
F: RichField + Extendable<D>,
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
W: Witness<F>,
{
witness.set_cap_target(&proof_target.trace_cap, &proof.trace_cap);

View File

@ -3,10 +3,11 @@ use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::polynomial::{PolynomialCoeffs, PolynomialValues};
use plonky2::field::types::{Field, Sample};
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::HashConfig;
use plonky2::iop::witness::{PartialWitness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::config::{GenericConfig, Hasher};
use plonky2::plonk::config::GenericConfig;
use plonky2::util::transpose;
use plonky2_util::{log2_ceil, log2_strict};
@ -86,7 +87,8 @@ pub fn test_stark_circuit_constraints<
) -> Result<()>
where
[(); S::COLUMNS]:,
[(); C::Hasher::HASH_SIZE]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
// Compute native constraint evaluation on random values.
let vars = StarkEvaluationVars {

View File

@ -5,7 +5,8 @@ use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::types::Field;
use plonky2::fri::verifier::verify_fri_proof;
use plonky2::hash::hash_types::RichField;
use plonky2::plonk::config::{GenericConfig, Hasher};
use plonky2::hash::hashing::HashConfig;
use plonky2::plonk::config::GenericConfig;
use plonky2::plonk::plonk_common::reduce_with_powers;
use crate::all_stark::{AllStark, Table};
@ -36,7 +37,8 @@ where
[(); KeccakSpongeStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
[(); C::Hasher::HASH_SIZE]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let AllProofChallenges {
stark_challenges,
@ -118,7 +120,7 @@ pub(crate) fn verify_stark_proof_with_challenges<
) -> Result<()>
where
[(); S::COLUMNS]:,
[(); C::Hasher::HASH_SIZE]:,
[(); C::HCO::WIDTH]:,
{
log::debug!("Checking proof: {}", type_name::<S>());
validate_proof_shape(stark, proof, config, ctl_vars.len())?;
@ -218,7 +220,6 @@ where
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
[(); S::COLUMNS]:,
[(); C::Hasher::HASH_SIZE]:,
{
let StarkProof {
trace_cap,

View File

@ -1,8 +1,11 @@
#![allow(clippy::upper_case_acronyms)]
use std::collections::HashMap;
use std::time::Duration;
use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV};
use eth_trie_utils::partial_trie::{Nibbles, PartialTrie};
use eth_trie_utils::nibbles::Nibbles;
use eth_trie_utils::partial_trie::{HashedPartialTrie, PartialTrie};
use ethereum_types::Address;
use hex_literal::hex;
use keccak_hash::keccak;
@ -16,12 +19,13 @@ use plonky2_evm::generation::{GenerationInputs, TrieInputs};
use plonky2_evm::proof::BlockMetadata;
use plonky2_evm::prover::prove;
use plonky2_evm::verifier::verify_proof;
use plonky2_evm::Node;
type F = GoldilocksField;
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
/// Test a simple token transfer to a new address.
/// The `add11_yml` test case from https://github.com/ethereum/tests
#[test]
fn add11_yml() -> anyhow::Result<()> {
init_logger();
@ -58,7 +62,7 @@ fn add11_yml() -> anyhow::Result<()> {
..AccountRlp::default()
};
let mut state_trie_before = PartialTrie::Empty;
let mut state_trie_before = HashedPartialTrie::from(Node::Empty);
state_trie_before.insert(
beneficiary_nibbles,
rlp::encode(&beneficiary_account_before).to_vec(),
@ -68,9 +72,9 @@ fn add11_yml() -> anyhow::Result<()> {
let tries_before = TrieInputs {
state_trie: state_trie_before,
transactions_trie: PartialTrie::Empty,
receipts_trie: PartialTrie::Empty,
storage_tries: vec![(Address::from_slice(&to), PartialTrie::Empty)],
transactions_trie: Node::Empty.into(),
receipts_trie: Node::Empty.into(),
storage_tries: vec![(Address::from_slice(&to), Node::Empty.into())],
};
let txn = hex!("f863800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d87830186a0801ba0ffb600e63115a7362e7811894a91d8ba4330e526f22121c994c4692035dfdfd5a06198379fcac8de3dbfac48b165df4bf88e2088f294b61efb9a65fe2281c76e16");
@ -110,15 +114,15 @@ fn add11_yml() -> anyhow::Result<()> {
balance: 0xde0b6b3a76586a0u64.into(),
code_hash,
// Storage map: { 0 => 2 }
storage_root: PartialTrie::Leaf {
storage_root: HashedPartialTrie::from(Node::Leaf {
nibbles: Nibbles::from_h256_be(keccak([0u8; 32])),
value: vec![2],
}
.calc_hash(),
})
.hash(),
..AccountRlp::default()
};
let mut expected_state_trie_after = PartialTrie::Empty;
let mut expected_state_trie_after = HashedPartialTrie::from(Node::Empty);
expected_state_trie_after.insert(
beneficiary_nibbles,
rlp::encode(&beneficiary_account_after).to_vec(),
@ -128,7 +132,7 @@ fn add11_yml() -> anyhow::Result<()> {
assert_eq!(
proof.public_values.trie_roots_after.state_root,
expected_state_trie_after.calc_hash()
expected_state_trie_after.hash()
);
verify_proof(&all_stark, proof, &config)

View File

@ -1,8 +1,11 @@
#![allow(clippy::upper_case_acronyms)]
use std::collections::HashMap;
use std::time::Duration;
use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV};
use eth_trie_utils::partial_trie::{Nibbles, PartialTrie};
use eth_trie_utils::nibbles::Nibbles;
use eth_trie_utils::partial_trie::{HashedPartialTrie, PartialTrie};
use ethereum_types::{Address, U256};
use hex_literal::hex;
use keccak_hash::keccak;
@ -17,6 +20,7 @@ use plonky2_evm::generation::{GenerationInputs, TrieInputs};
use plonky2_evm::proof::BlockMetadata;
use plonky2_evm::prover::prove;
use plonky2_evm::verifier::verify_proof;
use plonky2_evm::Node;
type F = GoldilocksField;
const D: usize = 2;
@ -61,27 +65,28 @@ fn test_basic_smart_contract() -> anyhow::Result<()> {
};
let state_trie_before = {
let mut children = core::array::from_fn(|_| PartialTrie::Empty.into());
children[sender_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
let mut children = core::array::from_fn(|_| Node::Empty.into());
children[sender_nibbles.get_nibble(0) as usize] = Node::Leaf {
nibbles: sender_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&sender_account_before).to_vec(),
}
.into();
children[to_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
children[to_nibbles.get_nibble(0) as usize] = Node::Leaf {
nibbles: to_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&to_account_before).to_vec(),
}
.into();
PartialTrie::Branch {
Node::Branch {
children,
value: vec![],
}
};
}
.into();
let tries_before = TrieInputs {
state_trie: state_trie_before,
transactions_trie: PartialTrie::Empty,
receipts_trie: PartialTrie::Empty,
transactions_trie: Node::Empty.into(),
receipts_trie: Node::Empty.into(),
storage_tries: vec![],
};
@ -110,7 +115,7 @@ fn test_basic_smart_contract() -> anyhow::Result<()> {
let proof = prove::<F, C, D>(&all_stark, &config, inputs, &mut timing)?;
timing.filter(Duration::from_millis(100)).print();
let expected_state_trie_after = {
let expected_state_trie_after: HashedPartialTrie = {
let txdata_gas = 2 * 16;
let gas_used = 21_000 + code_gas + txdata_gas;
@ -128,31 +133,32 @@ fn test_basic_smart_contract() -> anyhow::Result<()> {
..to_account_before
};
let mut children = core::array::from_fn(|_| PartialTrie::Empty.into());
children[beneficiary_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
let mut children = core::array::from_fn(|_| Node::Empty.into());
children[beneficiary_nibbles.get_nibble(0) as usize] = Node::Leaf {
nibbles: beneficiary_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&beneficiary_account_after).to_vec(),
}
.into();
children[sender_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
children[sender_nibbles.get_nibble(0) as usize] = Node::Leaf {
nibbles: sender_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&sender_account_after).to_vec(),
}
.into();
children[to_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
children[to_nibbles.get_nibble(0) as usize] = Node::Leaf {
nibbles: to_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&to_account_after).to_vec(),
}
.into();
PartialTrie::Branch {
Node::Branch {
children,
value: vec![],
}
};
}
.into();
assert_eq!(
proof.public_values.trie_roots_after.state_root,
expected_state_trie_after.calc_hash()
expected_state_trie_after.hash()
);
verify_proof(&all_stark, proof, &config)

View File

@ -1,8 +1,10 @@
#![allow(clippy::upper_case_acronyms)]
use std::collections::HashMap;
use std::time::Duration;
use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV};
use eth_trie_utils::partial_trie::PartialTrie;
use eth_trie_utils::partial_trie::{HashedPartialTrie, PartialTrie};
use keccak_hash::keccak;
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::plonk::config::PoseidonGoldilocksConfig;
@ -14,6 +16,7 @@ use plonky2_evm::generation::{GenerationInputs, TrieInputs};
use plonky2_evm::proof::BlockMetadata;
use plonky2_evm::prover::prove;
use plonky2_evm::verifier::verify_proof;
use plonky2_evm::Node;
type F = GoldilocksField;
const D: usize = 2;
@ -30,14 +33,14 @@ fn test_empty_txn_list() -> anyhow::Result<()> {
let block_metadata = BlockMetadata::default();
let state_trie = PartialTrie::Empty;
let transactions_trie = PartialTrie::Empty;
let receipts_trie = PartialTrie::Empty;
let state_trie = HashedPartialTrie::from(Node::Empty);
let transactions_trie = HashedPartialTrie::from(Node::Empty);
let receipts_trie = HashedPartialTrie::from(Node::Empty);
let storage_tries = vec![];
let state_trie_root = state_trie.calc_hash();
let txns_trie_root = transactions_trie.calc_hash();
let receipts_trie_root = receipts_trie.calc_hash();
let state_trie_root = state_trie.hash();
let txns_trie_root = transactions_trie.hash();
let receipts_trie_root = receipts_trie.hash();
let mut contract_code = HashMap::new();
contract_code.insert(keccak(vec![]), vec![]);

View File

@ -0,0 +1,140 @@
use std::collections::HashMap;
use std::str::FromStr;
use std::time::Duration;
use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV};
use eth_trie_utils::nibbles::Nibbles;
use eth_trie_utils::partial_trie::{HashedPartialTrie, PartialTrie};
use ethereum_types::Address;
use hex_literal::hex;
use keccak_hash::keccak;
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::plonk::config::PoseidonGoldilocksConfig;
use plonky2::util::timing::TimingTree;
use plonky2_evm::all_stark::AllStark;
use plonky2_evm::config::StarkConfig;
use plonky2_evm::generation::mpt::AccountRlp;
use plonky2_evm::generation::{GenerationInputs, TrieInputs};
use plonky2_evm::proof::BlockMetadata;
use plonky2_evm::prover::prove;
use plonky2_evm::verifier::verify_proof;
use plonky2_evm::Node;
type F = GoldilocksField;
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
/// The `selfBalanceGasCost` test case from https://github.com/ethereum/tests
#[test]
fn self_balance_gas_cost() -> anyhow::Result<()> {
init_logger();
let all_stark = AllStark::<F, D>::default();
let config = StarkConfig::standard_fast_config();
let beneficiary = hex!("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba");
let sender = hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b");
let to = hex!("1000000000000000000000000000000000000000");
let beneficiary_state_key = keccak(beneficiary);
let sender_state_key = keccak(sender);
let to_state_key = keccak(to);
let beneficiary_nibbles = Nibbles::from_bytes_be(beneficiary_state_key.as_bytes()).unwrap();
let sender_nibbles = Nibbles::from_bytes_be(sender_state_key.as_bytes()).unwrap();
let to_nibbles = Nibbles::from_bytes_be(to_state_key.as_bytes()).unwrap();
let code = [
0x5a, 0x47, 0x5a, 0x90, 0x50, 0x90, 0x03, 0x60, 0x02, 0x90, 0x03, 0x60, 0x01, 0x55, 0x00,
];
let code_hash = keccak(code);
let beneficiary_account_before = AccountRlp::default();
let sender_account_before = AccountRlp {
balance: 0x3635c9adc5dea00000u128.into(),
..AccountRlp::default()
};
let to_account_before = AccountRlp {
code_hash,
..AccountRlp::default()
};
let mut state_trie_before = HashedPartialTrie::from(Node::Empty);
state_trie_before.insert(
beneficiary_nibbles,
rlp::encode(&beneficiary_account_before).to_vec(),
);
state_trie_before.insert(sender_nibbles, rlp::encode(&sender_account_before).to_vec());
state_trie_before.insert(to_nibbles, rlp::encode(&to_account_before).to_vec());
let tries_before = TrieInputs {
state_trie: state_trie_before,
transactions_trie: Node::Empty.into(),
receipts_trie: Node::Empty.into(),
storage_tries: vec![(Address::from_slice(&to), Node::Empty.into())],
};
let txn = hex!("f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509b");
let block_metadata = BlockMetadata {
block_beneficiary: Address::from(beneficiary),
block_base_fee: 0xa.into(),
..BlockMetadata::default()
};
let mut contract_code = HashMap::new();
contract_code.insert(keccak(vec![]), vec![]);
contract_code.insert(code_hash, code.to_vec());
let inputs = GenerationInputs {
signed_txns: vec![txn.to_vec()],
tries: tries_before,
contract_code,
block_metadata,
addresses: vec![],
};
let mut timing = TimingTree::new("prove", log::Level::Debug);
let proof = prove::<F, C, D>(&all_stark, &config, inputs, &mut timing)?;
timing.filter(Duration::from_millis(100)).print();
let beneficiary_account_after = AccountRlp::default();
let sender_account_after = AccountRlp {
balance: 999999999999999568680u128.into(),
nonce: 1.into(),
..AccountRlp::default()
};
let to_account_after = AccountRlp {
code_hash,
// Storage map: { 1 => 5 }
storage_root: HashedPartialTrie::from(Node::Leaf {
// TODO: Could do keccak(pad32(1))
nibbles: Nibbles::from_str(
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6",
)
.unwrap(),
value: vec![5],
})
.hash(),
..AccountRlp::default()
};
let mut expected_state_trie_after = HashedPartialTrie::from(Node::Empty);
expected_state_trie_after.insert(
beneficiary_nibbles,
rlp::encode(&beneficiary_account_after).to_vec(),
);
expected_state_trie_after.insert(sender_nibbles, rlp::encode(&sender_account_after).to_vec());
expected_state_trie_after.insert(to_nibbles, rlp::encode(&to_account_after).to_vec());
assert_eq!(
proof.public_values.trie_roots_after.state_root,
expected_state_trie_after.hash()
);
verify_proof(&all_stark, proof, &config)
}
fn init_logger() {
let _ = try_init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "info"));
}

View File

@ -1,8 +1,11 @@
#![allow(clippy::upper_case_acronyms)]
use std::collections::HashMap;
use std::time::Duration;
use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV};
use eth_trie_utils::partial_trie::{Nibbles, PartialTrie};
use eth_trie_utils::nibbles::Nibbles;
use eth_trie_utils::partial_trie::{HashedPartialTrie, PartialTrie};
use ethereum_types::{Address, U256};
use hex_literal::hex;
use keccak_hash::keccak;
@ -16,6 +19,7 @@ use plonky2_evm::generation::{GenerationInputs, TrieInputs};
use plonky2_evm::proof::BlockMetadata;
use plonky2_evm::prover::prove;
use plonky2_evm::verifier::verify_proof;
use plonky2_evm::Node;
type F = GoldilocksField;
const D: usize = 2;
@ -45,19 +49,20 @@ fn test_simple_transfer() -> anyhow::Result<()> {
let sender_account_before = AccountRlp {
nonce: 5.into(),
balance: eth_to_wei(100_000.into()),
storage_root: PartialTrie::Empty.calc_hash(),
storage_root: HashedPartialTrie::from(Node::Empty).hash(),
code_hash: keccak([]),
};
let to_account_before = AccountRlp::default();
let state_trie_before = PartialTrie::Leaf {
let state_trie_before = Node::Leaf {
nibbles: sender_nibbles,
value: rlp::encode(&sender_account_before).to_vec(),
};
}
.into();
let tries_before = TrieInputs {
state_trie: state_trie_before,
transactions_trie: PartialTrie::Empty,
receipts_trie: PartialTrie::Empty,
transactions_trie: HashedPartialTrie::from(Node::Empty),
receipts_trie: HashedPartialTrie::from(Node::Empty),
storage_tries: vec![],
};
@ -85,7 +90,7 @@ fn test_simple_transfer() -> anyhow::Result<()> {
let proof = prove::<F, C, D>(&all_stark, &config, inputs, &mut timing)?;
timing.filter(Duration::from_millis(100)).print();
let expected_state_trie_after = {
let expected_state_trie_after: HashedPartialTrie = {
let txdata_gas = 2 * 16;
let gas_used = 21_000 + txdata_gas;
@ -103,31 +108,32 @@ fn test_simple_transfer() -> anyhow::Result<()> {
..to_account_before
};
let mut children = core::array::from_fn(|_| PartialTrie::Empty.into());
children[beneficiary_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
let mut children = core::array::from_fn(|_| Node::Empty.into());
children[beneficiary_nibbles.get_nibble(0) as usize] = Node::Leaf {
nibbles: beneficiary_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&beneficiary_account_after).to_vec(),
}
.into();
children[sender_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
children[sender_nibbles.get_nibble(0) as usize] = Node::Leaf {
nibbles: sender_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&sender_account_after).to_vec(),
}
.into();
children[to_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
children[to_nibbles.get_nibble(0) as usize] = Node::Leaf {
nibbles: to_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&to_account_after).to_vec(),
}
.into();
PartialTrie::Branch {
Node::Branch {
children,
value: vec![],
}
.into()
};
assert_eq!(
proof.public_values.trie_roots_after.state_root,
expected_state_trie_after.calc_hash()
expected_state_trie_after.hash()
);
verify_proof(&all_stark, proof, &config)

View File

@ -4,17 +4,18 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::field::types::Sample;
use plonky2::hash::hash_types::{BytesHash, RichField};
use plonky2::hash::hashing::SPONGE_WIDTH;
use plonky2::hash::keccak::KeccakHash;
use plonky2::hash::poseidon::Poseidon;
use plonky2::plonk::config::Hasher;
use plonky2::hash::poseidon::{Poseidon, SPONGE_WIDTH};
use plonky2::plonk::config::{Hasher, KeccakHashConfig};
use tynm::type_name;
pub(crate) fn bench_keccak<F: RichField>(c: &mut Criterion) {
c.bench_function("keccak256", |b| {
b.iter_batched(
|| (BytesHash::<32>::rand(), BytesHash::<32>::rand()),
|(left, right)| <KeccakHash<32> as Hasher<F>>::two_to_one(left, right),
|(left, right)| {
<KeccakHash<32> as Hasher<F, KeccakHashConfig>>::two_to_one(left, right)
},
BatchSize::SmallInput,
)
});

View File

@ -1,17 +1,23 @@
#![feature(generic_const_exprs)]
mod allocator;
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::HashConfig;
use plonky2::hash::keccak::KeccakHash;
use plonky2::hash::merkle_tree::MerkleTree;
use plonky2::hash::poseidon::PoseidonHash;
use plonky2::plonk::config::Hasher;
use plonky2::plonk::config::{Hasher, KeccakHashConfig, PoseidonHashConfig};
use tynm::type_name;
const ELEMS_PER_LEAF: usize = 135;
pub(crate) fn bench_merkle_tree<F: RichField, H: Hasher<F>>(c: &mut Criterion) {
pub(crate) fn bench_merkle_tree<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(c: &mut Criterion)
where
[(); HC::WIDTH]:,
{
let mut group = c.benchmark_group(&format!(
"merkle-tree<{}, {}>",
type_name::<F>(),
@ -23,14 +29,14 @@ pub(crate) fn bench_merkle_tree<F: RichField, H: Hasher<F>>(c: &mut Criterion) {
let size = 1 << size_log;
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| {
let leaves = vec![F::rand_vec(ELEMS_PER_LEAF); size];
b.iter(|| MerkleTree::<F, H>::new(leaves.clone(), 0));
b.iter(|| MerkleTree::<F, HC, H>::new(leaves.clone(), 0));
});
}
}
fn criterion_benchmark(c: &mut Criterion) {
bench_merkle_tree::<GoldilocksField, PoseidonHash>(c);
bench_merkle_tree::<GoldilocksField, KeccakHash<25>>(c);
bench_merkle_tree::<GoldilocksField, PoseidonHashConfig, PoseidonHash>(c);
bench_merkle_tree::<GoldilocksField, KeccakHashConfig, KeccakHash<25>>(c);
}
criterion_group!(benches, criterion_benchmark);

View File

@ -3,6 +3,9 @@
// put it in `src/bin/`, but then we wouldn't have access to
// `[dev-dependencies]`.
#![feature(generic_const_exprs)]
#![allow(clippy::upper_case_acronyms)]
use core::num::ParseIntError;
use core::ops::RangeInclusive;
use core::str::FromStr;
@ -11,6 +14,7 @@ use anyhow::{anyhow, Context as _, Result};
use log::{info, Level, LevelFilter};
use plonky2::gates::noop::NoopGate;
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::HashConfig;
use plonky2::iop::witness::{PartialWitness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::{CircuitConfig, CommonCircuitData, VerifierOnlyCircuitData};
@ -62,7 +66,11 @@ struct Options {
fn dummy_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
config: &CircuitConfig,
log2_size: usize,
) -> Result<ProofTuple<F, C, D>> {
) -> Result<ProofTuple<F, C, D>>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
// 'size' is in degree, but we want number of noop gates. A non-zero amount of padding will be added and size will be rounded to the next power of two. To hit our target size, we go just under the previous power of two and hope padding is less than half the proof.
let num_dummy_gates = match log2_size {
0 => return Err(anyhow!("size must be at least 1")),
@ -81,7 +89,7 @@ fn dummy_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D
let inputs = PartialWitness::new();
let mut timing = TimingTree::new("prove", Level::Debug);
let proof = prove(&data.prover_only, &data.common, inputs, &mut timing)?;
let proof = prove::<F, C, D>(&data.prover_only, &data.common, inputs, &mut timing)?;
timing.print();
data.verify(proof.clone())?;
@ -99,7 +107,11 @@ fn recursive_proof<
min_degree_bits: Option<usize>,
) -> Result<ProofTuple<F, C, D>>
where
InnerC::Hasher: AlgebraicHasher<F>,
InnerC::Hasher: AlgebraicHasher<F, InnerC::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
[(); InnerC::HCO::WIDTH]:,
[(); InnerC::HCI::WIDTH]:,
{
let (inner_proof, inner_vd, inner_cd) = inner;
let mut builder = CircuitBuilder::<F, D>::new(config.clone());
@ -128,7 +140,7 @@ where
pw.set_verifier_data_target(&inner_data, inner_vd);
let mut timing = TimingTree::new("prove", Level::Debug);
let proof = prove(&data.prover_only, &data.common, pw, &mut timing)?;
let proof = prove::<F, C, D>(&data.prover_only, &data.common, pw, &mut timing)?;
timing.print();
data.verify(proof.clone())?;
@ -141,7 +153,11 @@ fn test_serialization<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>,
proof: &ProofWithPublicInputs<F, C, D>,
vd: &VerifierOnlyCircuitData<C, D>,
cd: &CommonCircuitData<F, D>,
) -> Result<()> {
) -> Result<()>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let proof_bytes = proof.to_bytes();
info!("Proof length: {} bytes", proof_bytes.len());
let proof_from_bytes = ProofWithPublicInputs::from_bytes(proof_bytes, cd)?;

View File

@ -1,3 +1,5 @@
#![allow(clippy::upper_case_acronyms)]
use anyhow::Result;
use plonky2::field::types::Field;
use plonky2::iop::witness::{PartialWitness, WitnessWrite};

View File

@ -1,3 +1,5 @@
#![allow(clippy::upper_case_acronyms)]
use anyhow::Result;
use plonky2::field::types::Field;
use plonky2::iop::witness::{PartialWitness, WitnessWrite};

View File

@ -1,3 +1,5 @@
#![allow(clippy::upper_case_acronyms)]
use core::marker::PhantomData;
use anyhow::Result;

View File

@ -5,16 +5,21 @@ use crate::fri::structure::{FriOpenings, FriOpeningsTarget};
use crate::fri::FriConfig;
use crate::gadgets::polynomial::PolynomialCoeffsExtTarget;
use crate::hash::hash_types::{MerkleCapTarget, RichField};
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_tree::MerkleCap;
use crate::iop::challenger::{Challenger, RecursiveChallenger};
use crate::iop::target::Target;
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::config::{AlgebraicHasher, GenericConfig, Hasher};
impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
impl<F: RichField, HCO: HashConfig, H: Hasher<F, HCO>> Challenger<F, HCO, H>
where
[(); HCO::WIDTH]:,
{
pub fn observe_openings<const D: usize>(&mut self, openings: &FriOpenings<F, D>)
where
F: RichField + Extendable<D>,
[(); HCO::WIDTH]:,
{
for v in &openings.batches {
self.observe_extension_elements(&v.values);
@ -23,7 +28,7 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
pub fn fri_challenges<C: GenericConfig<D, F = F>, const D: usize>(
&mut self,
commit_phase_merkle_caps: &[MerkleCap<F, C::Hasher>],
commit_phase_merkle_caps: &[MerkleCap<F, C::HCO, C::Hasher>],
final_poly: &PolynomialCoeffs<F::Extension>,
pow_witness: F,
degree_bits: usize,
@ -31,6 +36,8 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
) -> FriChallenges<F, D>
where
F: RichField + Extendable<D>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let num_fri_queries = config.num_query_rounds;
let lde_size = 1 << (degree_bits + config.rate_bits);
@ -41,7 +48,7 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
let fri_betas = commit_phase_merkle_caps
.iter()
.map(|cap| {
self.observe_cap(cap);
self.observe_cap::<C::HCO, C::Hasher>(cap);
self.get_extension_challenge::<D>()
})
.collect();
@ -64,10 +71,15 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
}
}
impl<F: RichField + Extendable<D>, H: AlgebraicHasher<F>, const D: usize>
RecursiveChallenger<F, H, D>
impl<F: RichField + Extendable<D>, HCO: HashConfig, H: AlgebraicHasher<F, HCO>, const D: usize>
RecursiveChallenger<F, HCO, H, D>
where
[(); HCO::WIDTH]:,
{
pub fn observe_openings(&mut self, openings: &FriOpeningsTarget<D>) {
pub fn observe_openings(&mut self, openings: &FriOpeningsTarget<D>)
where
[(); HCO::WIDTH]:,
{
for v in &openings.batches {
self.observe_extension_elements(&v.values);
}

View File

@ -14,6 +14,7 @@ use crate::fri::prover::fri_proof;
use crate::fri::structure::{FriBatchInfo, FriInstanceInfo};
use crate::fri::FriParams;
use crate::hash::hash_types::RichField;
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_tree::MerkleTree;
use crate::iop::challenger::Challenger;
use crate::plonk::config::GenericConfig;
@ -29,7 +30,7 @@ pub const SALT_SIZE: usize = 4;
pub struct PolynomialBatch<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
{
pub polynomials: Vec<PolynomialCoeffs<F>>,
pub merkle_tree: MerkleTree<F, C::Hasher>,
pub merkle_tree: MerkleTree<F, C::HCO, C::Hasher>,
pub degree_log: usize,
pub rate_bits: usize,
pub blinding: bool,
@ -46,7 +47,10 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
cap_height: usize,
timing: &mut TimingTree,
fft_root_table: Option<&FftRootTable<F>>,
) -> Self {
) -> Self
where
[(); C::HCO::WIDTH]:,
{
let coeffs = timed!(
timing,
"IFFT",
@ -71,7 +75,10 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
cap_height: usize,
timing: &mut TimingTree,
fft_root_table: Option<&FftRootTable<F>>,
) -> Self {
) -> Self
where
[(); C::HCO::WIDTH]:,
{
let degree = polynomials[0].len();
let lde_values = timed!(
timing,
@ -161,10 +168,14 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
pub fn prove_openings(
instance: &FriInstanceInfo<F, D>,
oracles: &[&Self],
challenger: &mut Challenger<F, C::Hasher>,
challenger: &mut Challenger<F, C::HCO, C::Hasher>,
fri_params: &FriParams,
timing: &mut TimingTree,
) -> FriProof<F, C::Hasher, D> {
) -> FriProof<F, C::HCO, C::Hasher, D>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
assert!(D > 1, "Not implemented for D=1.");
let alpha = challenger.get_extension_challenge::<D>();
let mut alpha = ReducingFactor::new(alpha);

View File

@ -10,6 +10,7 @@ use crate::field::polynomial::PolynomialCoeffs;
use crate::fri::FriParams;
use crate::gadgets::polynomial::PolynomialCoeffsExtTarget;
use crate::hash::hash_types::{MerkleCapTarget, RichField};
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_proofs::{MerkleProof, MerkleProofTarget};
use crate::hash::merkle_tree::MerkleCap;
use crate::hash::path_compression::{compress_merkle_proofs, decompress_merkle_proofs};
@ -22,9 +23,14 @@ use crate::plonk::proof::{FriInferredElements, ProofChallenges};
/// Evaluations and Merkle proof produced by the prover in a FRI query step.
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)]
#[serde(bound = "")]
pub struct FriQueryStep<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> {
pub struct FriQueryStep<
F: RichField + Extendable<D>,
HC: HashConfig,
H: Hasher<F, HC>,
const D: usize,
> {
pub evals: Vec<F::Extension>,
pub merkle_proof: MerkleProof<F, H>,
pub merkle_proof: MerkleProof<F, HC, H>,
}
#[derive(Clone, Debug)]
@ -37,11 +43,11 @@ pub struct FriQueryStepTarget<const D: usize> {
/// before they are combined into a composition polynomial.
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)]
#[serde(bound = "")]
pub struct FriInitialTreeProof<F: RichField, H: Hasher<F>> {
pub evals_proofs: Vec<(Vec<F>, MerkleProof<F, H>)>,
pub struct FriInitialTreeProof<F: RichField, HC: HashConfig, H: Hasher<F, HC>> {
pub evals_proofs: Vec<(Vec<F>, MerkleProof<F, HC, H>)>,
}
impl<F: RichField, H: Hasher<F>> FriInitialTreeProof<F, H> {
impl<F: RichField, HC: HashConfig, H: Hasher<F, HC>> FriInitialTreeProof<F, HC, H> {
pub(crate) fn unsalted_eval(&self, oracle_index: usize, poly_index: usize, salted: bool) -> F {
self.unsalted_evals(oracle_index, salted)[poly_index]
}
@ -76,9 +82,14 @@ impl FriInitialTreeProofTarget {
/// Proof for a FRI query round.
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)]
#[serde(bound = "")]
pub struct FriQueryRound<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> {
pub initial_trees_proof: FriInitialTreeProof<F, H>,
pub steps: Vec<FriQueryStep<F, H, D>>,
pub struct FriQueryRound<
F: RichField + Extendable<D>,
HC: HashConfig,
H: Hasher<F, HC>,
const D: usize,
> {
pub initial_trees_proof: FriInitialTreeProof<F, HC, H>,
pub steps: Vec<FriQueryStep<F, HC, H, D>>,
}
#[derive(Clone, Debug)]
@ -90,22 +101,28 @@ pub struct FriQueryRoundTarget<const D: usize> {
/// Compressed proof of the FRI query rounds.
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)]
#[serde(bound = "")]
pub struct CompressedFriQueryRounds<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> {
pub struct CompressedFriQueryRounds<
F: RichField + Extendable<D>,
HC: HashConfig,
H: Hasher<F, HC>,
const D: usize,
> {
/// Query indices.
pub indices: Vec<usize>,
/// Map from initial indices `i` to the `FriInitialProof` for the `i`th leaf.
pub initial_trees_proofs: HashMap<usize, FriInitialTreeProof<F, H>>,
pub initial_trees_proofs: HashMap<usize, FriInitialTreeProof<F, HC, H>>,
/// For each FRI query step, a map from indices `i` to the `FriQueryStep` for the `i`th leaf.
pub steps: Vec<HashMap<usize, FriQueryStep<F, H, D>>>,
pub steps: Vec<HashMap<usize, FriQueryStep<F, HC, H, D>>>,
}
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)]
#[serde(bound = "")]
pub struct FriProof<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> {
pub struct FriProof<F: RichField + Extendable<D>, HC: HashConfig, H: Hasher<F, HC>, const D: usize>
{
/// A Merkle cap for each reduced polynomial in the commit phase.
pub commit_phase_merkle_caps: Vec<MerkleCap<F, H>>,
pub commit_phase_merkle_caps: Vec<MerkleCap<F, HC, H>>,
/// Query rounds proofs
pub query_round_proofs: Vec<FriQueryRound<F, H, D>>,
pub query_round_proofs: Vec<FriQueryRound<F, HC, H, D>>,
/// The final polynomial in coefficient form.
pub final_poly: PolynomialCoeffs<F::Extension>,
/// Witness showing that the prover did PoW.
@ -122,20 +139,31 @@ pub struct FriProofTarget<const D: usize> {
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)]
#[serde(bound = "")]
pub struct CompressedFriProof<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> {
pub struct CompressedFriProof<
F: RichField + Extendable<D>,
HC: HashConfig,
H: Hasher<F, HC>,
const D: usize,
> {
/// A Merkle cap for each reduced polynomial in the commit phase.
pub commit_phase_merkle_caps: Vec<MerkleCap<F, H>>,
pub commit_phase_merkle_caps: Vec<MerkleCap<F, HC, H>>,
/// Compressed query rounds proof.
pub query_round_proofs: CompressedFriQueryRounds<F, H, D>,
pub query_round_proofs: CompressedFriQueryRounds<F, HC, H, D>,
/// The final polynomial in coefficient form.
pub final_poly: PolynomialCoeffs<F::Extension>,
/// Witness showing that the prover did PoW.
pub pow_witness: F,
}
impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> FriProof<F, H, D> {
impl<F: RichField + Extendable<D>, HCO: HashConfig, H: Hasher<F, HCO>, const D: usize>
FriProof<F, HCO, H, D>
{
/// Compress all the Merkle paths in the FRI proof and remove duplicate indices.
pub fn compress(self, indices: &[usize], params: &FriParams) -> CompressedFriProof<F, H, D> {
pub fn compress(
self,
indices: &[usize],
params: &FriParams,
) -> CompressedFriProof<F, HCO, H, D> {
let FriProof {
commit_phase_merkle_caps,
query_round_proofs,
@ -235,14 +263,19 @@ impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> FriProof<F, H,
}
}
impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> CompressedFriProof<F, H, D> {
impl<F: RichField + Extendable<D>, HCO: HashConfig, H: Hasher<F, HCO>, const D: usize>
CompressedFriProof<F, HCO, H, D>
{
/// Decompress all the Merkle paths in the FRI proof and reinsert duplicate indices.
pub(crate) fn decompress(
self,
challenges: &ProofChallenges<F, D>,
fri_inferred_elements: FriInferredElements<F, D>,
params: &FriParams,
) -> FriProof<F, H, D> {
) -> FriProof<F, HCO, H, D>
where
[(); HCO::WIDTH]:,
{
let CompressedFriProof {
commit_phase_merkle_caps,
query_round_proofs,

View File

@ -7,7 +7,7 @@ use crate::field::polynomial::{PolynomialCoeffs, PolynomialValues};
use crate::fri::proof::{FriInitialTreeProof, FriProof, FriQueryRound, FriQueryStep};
use crate::fri::{FriConfig, FriParams};
use crate::hash::hash_types::RichField;
use crate::hash::hashing::{PlonkyPermutation, SPONGE_RATE};
use crate::hash::hashing::{HashConfig, PlonkyPermutation};
use crate::hash::merkle_tree::MerkleTree;
use crate::iop::challenger::Challenger;
use crate::plonk::config::{GenericConfig, Hasher};
@ -18,15 +18,19 @@ use crate::util::timing::TimingTree;
/// Builds a FRI proof.
pub fn fri_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
initial_merkle_trees: &[&MerkleTree<F, C::Hasher>],
initial_merkle_trees: &[&MerkleTree<F, C::HCO, C::Hasher>],
// Coefficients of the polynomial on which the LDT is performed. Only the first `1/rate` coefficients are non-zero.
lde_polynomial_coeffs: PolynomialCoeffs<F::Extension>,
// Evaluation of the polynomial on the large domain.
lde_polynomial_values: PolynomialValues<F::Extension>,
challenger: &mut Challenger<F, C::Hasher>,
challenger: &mut Challenger<F, C::HCO, C::Hasher>,
fri_params: &FriParams,
timing: &mut TimingTree,
) -> FriProof<F, C::Hasher, D> {
) -> FriProof<F, C::HCO, C::Hasher, D>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let n = lde_polynomial_values.len();
assert_eq!(lde_polynomial_coeffs.len(), n);
@ -62,16 +66,19 @@ pub fn fri_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const
}
type FriCommitedTrees<F, C, const D: usize> = (
Vec<MerkleTree<F, <C as GenericConfig<D>>::Hasher>>,
Vec<MerkleTree<F, <C as GenericConfig<D>>::HCO, <C as GenericConfig<D>>::Hasher>>,
PolynomialCoeffs<<F as Extendable<D>>::Extension>,
);
fn fri_committed_trees<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
mut coeffs: PolynomialCoeffs<F::Extension>,
mut values: PolynomialValues<F::Extension>,
challenger: &mut Challenger<F, C::Hasher>,
challenger: &mut Challenger<F, C::HCO, C::Hasher>,
fri_params: &FriParams,
) -> FriCommitedTrees<F, C, D> {
) -> FriCommitedTrees<F, C, D>
where
[(); C::HCO::WIDTH]:,
{
let mut trees = Vec::new();
let mut shift = F::MULTIPLICATIVE_GROUP_GENERATOR;
@ -84,7 +91,8 @@ fn fri_committed_trees<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>,
.par_chunks(arity)
.map(|chunk: &[F::Extension]| flatten(chunk))
.collect();
let tree = MerkleTree::<F, C::Hasher>::new(chunked_values, fri_params.config.cap_height);
let tree =
MerkleTree::<F, C::HCO, C::Hasher>::new(chunked_values, fri_params.config.cap_height);
challenger.observe_cap(&tree.cap);
trees.push(tree);
@ -113,9 +121,13 @@ fn fri_committed_trees<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>,
/// Performs the proof-of-work (a.k.a. grinding) step of the FRI protocol. Returns the PoW witness.
fn fri_proof_of_work<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
challenger: &mut Challenger<F, C::Hasher>,
challenger: &mut Challenger<F, C::HCO, C::Hasher>,
config: &FriConfig,
) -> F {
) -> F
where
[(); C::HCI::WIDTH]:,
[(); C::HCO::WIDTH]:,
{
let min_leading_zeros = config.proof_of_work_bits + (64 - F::order().bits()) as u32;
// The easiest implementation would be repeatedly clone our Challenger. With each clone, we'd
@ -126,7 +138,7 @@ fn fri_proof_of_work<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, c
// since it stores vectors, which means allocations. We'd like a more compact state to clone.
//
// We know that a duplex will be performed right after we send the PoW witness, so we can ignore
// any output_buffer, which will be invalidated. We also know input_buffer.len() < SPONGE_WIDTH,
// any output_buffer, which will be invalidated. We also know input_buffer.len() < HCO::WIDTH,
// an invariant of Challenger.
//
// We separate the duplex operation into two steps, one which can be performed now, and the
@ -146,8 +158,10 @@ fn fri_proof_of_work<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, c
let mut duplex_state = duplex_intermediate_state;
duplex_state[witness_input_pos] = F::from_canonical_u64(candidate);
duplex_state =
<<C as GenericConfig<D>>::Hasher as Hasher<F>>::Permutation::permute(duplex_state);
let pow_response = duplex_state[SPONGE_RATE - 1];
<<C as GenericConfig<D>>::Hasher as Hasher<F, C::HCO>>::Permutation::permute(
duplex_state,
);
let pow_response = duplex_state[C::HCO::RATE - 1];
let leading_zeros = pow_response.to_canonical_u64().leading_zeros();
leading_zeros >= min_leading_zeros
})
@ -167,12 +181,15 @@ fn fri_prover_query_rounds<
C: GenericConfig<D, F = F>,
const D: usize,
>(
initial_merkle_trees: &[&MerkleTree<F, C::Hasher>],
trees: &[MerkleTree<F, C::Hasher>],
challenger: &mut Challenger<F, C::Hasher>,
initial_merkle_trees: &[&MerkleTree<F, C::HCO, C::Hasher>],
trees: &[MerkleTree<F, C::HCO, C::Hasher>],
challenger: &mut Challenger<F, C::HCO, C::Hasher>,
n: usize,
fri_params: &FriParams,
) -> Vec<FriQueryRound<F, C::Hasher, D>> {
) -> Vec<FriQueryRound<F, C::HCO, C::Hasher, D>>
where
[(); C::HCO::WIDTH]:,
{
challenger
.get_n_challenges(fri_params.config.num_query_rounds)
.into_par_iter()
@ -188,11 +205,14 @@ fn fri_prover_query_round<
C: GenericConfig<D, F = F>,
const D: usize,
>(
initial_merkle_trees: &[&MerkleTree<F, C::Hasher>],
trees: &[MerkleTree<F, C::Hasher>],
initial_merkle_trees: &[&MerkleTree<F, C::HCO, C::Hasher>],
trees: &[MerkleTree<F, C::HCO, C::Hasher>],
mut x_index: usize,
fri_params: &FriParams,
) -> FriQueryRound<F, C::Hasher, D> {
) -> FriQueryRound<F, C::HCO, C::Hasher, D>
where
[(); C::HCO::WIDTH]:,
{
let mut query_steps = Vec::new();
let initial_proof = initial_merkle_trees
.iter()

View File

@ -14,6 +14,7 @@ use crate::gates::coset_interpolation::CosetInterpolationGate;
use crate::gates::gate::Gate;
use crate::gates::random_access::RandomAccessGate;
use crate::hash::hash_types::{MerkleCapTarget, RichField};
use crate::hash::hashing::HashConfig;
use crate::iop::ext_target::{flatten_target, ExtensionTarget};
use crate::iop::target::{BoolTarget, Target};
use crate::plonk::circuit_builder::CircuitBuilder;
@ -107,7 +108,8 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
proof: &FriProofTarget<D>,
params: &FriParams,
) where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
{
if let Some(max_arity_bits) = params.max_arity_bits() {
self.check_recursion_config(max_arity_bits);
@ -175,13 +177,15 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
}
}
fn fri_verify_initial_proof<H: AlgebraicHasher<F>>(
fn fri_verify_initial_proof<HC: HashConfig, H: AlgebraicHasher<F, HC>>(
&mut self,
x_index_bits: &[BoolTarget],
proof: &FriInitialTreeProofTarget,
initial_merkle_caps: &[MerkleCapTarget],
cap_index: Target,
) {
) where
[(); HC::WIDTH]:,
{
for (i, ((evals, merkle_proof), cap)) in proof
.evals_proofs
.iter()
@ -191,7 +195,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
with_context!(
self,
&format!("verify {i}'th initial Merkle proof"),
self.verify_merkle_proof_to_cap_with_cap_index::<H>(
self.verify_merkle_proof_to_cap_with_cap_index::<HC, H>(
evals.clone(),
x_index_bits,
cap_index,
@ -258,7 +262,8 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
round_proof: &FriQueryRoundTarget<D>,
params: &FriParams,
) where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
{
let n_log = log2_strict(n);
@ -272,7 +277,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
with_context!(
self,
"check FRI initial proof",
self.fri_verify_initial_proof::<C::Hasher>(
self.fri_verify_initial_proof::<C::HCO, C::Hasher>(
&x_index_bits,
&round_proof.initial_trees_proof,
initial_merkle_caps,
@ -332,7 +337,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
with_context!(
self,
"verify FRI round Merkle proof.",
self.verify_merkle_proof_to_cap_with_cap_index::<C::Hasher>(
self.verify_merkle_proof_to_cap_with_cap_index::<C::HCO, C::Hasher>(
flatten_target(evals),
&coset_index_bits,
cap_index,

View File

@ -9,7 +9,7 @@ use crate::plonk::config::GenericConfig;
use crate::plonk::plonk_common::salt_size;
pub(crate) fn validate_fri_proof_shape<F, C, const D: usize>(
proof: &FriProof<F, C::Hasher, D>,
proof: &FriProof<F, C::HCO, C::Hasher, D>,
instance: &FriInstanceInfo<F, D>,
params: &FriParams,
) -> anyhow::Result<()>

View File

@ -10,6 +10,7 @@ use crate::fri::structure::{FriBatchInfo, FriInstanceInfo, FriOpenings};
use crate::fri::validate_shape::validate_fri_proof_shape;
use crate::fri::{FriConfig, FriParams};
use crate::hash::hash_types::RichField;
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_proofs::verify_merkle_proof_to_cap;
use crate::hash::merkle_tree::MerkleCap;
use crate::plonk::config::{GenericConfig, Hasher};
@ -58,18 +59,17 @@ pub(crate) fn fri_verify_proof_of_work<F: RichField + Extendable<D>, const D: us
Ok(())
}
pub fn verify_fri_proof<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
pub fn verify_fri_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
instance: &FriInstanceInfo<F, D>,
openings: &FriOpenings<F, D>,
challenges: &FriChallenges<F, D>,
initial_merkle_caps: &[MerkleCap<F, C::Hasher>],
proof: &FriProof<F, C::Hasher, D>,
initial_merkle_caps: &[MerkleCap<F, C::HCO, C::Hasher>],
proof: &FriProof<F, C::HCO, C::Hasher, D>,
params: &FriParams,
) -> Result<()> {
) -> Result<()>
where
[(); C::HCO::WIDTH]:,
{
validate_fri_proof_shape::<F, C, D>(proof, instance, params)?;
// Size of the LDE domain.
@ -107,13 +107,16 @@ pub fn verify_fri_proof<
Ok(())
}
fn fri_verify_initial_proof<F: RichField, H: Hasher<F>>(
fn fri_verify_initial_proof<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(
x_index: usize,
proof: &FriInitialTreeProof<F, H>,
initial_merkle_caps: &[MerkleCap<F, H>],
) -> Result<()> {
proof: &FriInitialTreeProof<F, HC, H>,
initial_merkle_caps: &[MerkleCap<F, HC, H>],
) -> Result<()>
where
[(); HC::WIDTH]:,
{
for ((evals, merkle_proof), cap) in proof.evals_proofs.iter().zip(initial_merkle_caps) {
verify_merkle_proof_to_cap::<F, H>(evals.clone(), x_index, cap, merkle_proof)?;
verify_merkle_proof_to_cap::<F, HC, H>(evals.clone(), x_index, cap, merkle_proof)?;
}
Ok(())
@ -125,7 +128,7 @@ pub(crate) fn fri_combine_initial<
const D: usize,
>(
instance: &FriInstanceInfo<F, D>,
proof: &FriInitialTreeProof<F, C::Hasher>,
proof: &FriInitialTreeProof<F, C::HCO, C::Hasher>,
alpha: F::Extension,
subgroup_x: F,
precomputed_reduced_evals: &PrecomputedReducedOpenings<F, D>,
@ -168,14 +171,17 @@ fn fri_verifier_query_round<
instance: &FriInstanceInfo<F, D>,
challenges: &FriChallenges<F, D>,
precomputed_reduced_evals: &PrecomputedReducedOpenings<F, D>,
initial_merkle_caps: &[MerkleCap<F, C::Hasher>],
proof: &FriProof<F, C::Hasher, D>,
initial_merkle_caps: &[MerkleCap<F, C::HCO, C::Hasher>],
proof: &FriProof<F, C::HCO, C::Hasher, D>,
mut x_index: usize,
n: usize,
round_proof: &FriQueryRound<F, C::Hasher, D>,
round_proof: &FriQueryRound<F, C::HCO, C::Hasher, D>,
params: &FriParams,
) -> Result<()> {
fri_verify_initial_proof::<F, C::Hasher>(
) -> Result<()>
where
[(); C::HCO::WIDTH]:,
{
fri_verify_initial_proof::<F, C::HCO, C::Hasher>(
x_index,
&round_proof.initial_trees_proof,
initial_merkle_caps,
@ -216,7 +222,7 @@ fn fri_verifier_query_round<
challenges.fri_betas[i],
);
verify_merkle_proof_to_cap::<F, C::Hasher>(
verify_merkle_proof_to_cap::<F, C::HCO, C::Hasher>(
flatten(evals),
coset_index,
&proof.commit_phase_merkle_caps[i],

View File

@ -3,18 +3,20 @@ use itertools::Itertools;
use crate::field::extension::Extendable;
use crate::fri::proof::{FriProof, FriProofTarget};
use crate::hash::hash_types::RichField;
use crate::hash::hashing::HashConfig;
use crate::iop::witness::WitnessWrite;
use crate::plonk::config::AlgebraicHasher;
/// Set the targets in a `FriProofTarget` to their corresponding values in a `FriProof`.
pub fn set_fri_proof_target<F, W, H, const D: usize>(
pub fn set_fri_proof_target<F, W, HC, H, const D: usize>(
witness: &mut W,
fri_proof_target: &FriProofTarget<D>,
fri_proof: &FriProof<F, H, D>,
fri_proof: &FriProof<F, HC, H, D>,
) where
F: RichField + Extendable<D>,
W: WitnessWrite<F> + ?Sized,
H: AlgebraicHasher<F>,
HC: HashConfig,
H: AlgebraicHasher<F, HC>,
{
witness.set_target(fri_proof_target.pow_witness, fri_proof.pow_witness);

View File

@ -1,27 +1,27 @@
use crate::field::extension::Extendable;
use crate::hash::hash_types::RichField;
use crate::hash::hashing::SPONGE_WIDTH;
use crate::hash::hashing::HashConfig;
use crate::iop::target::{BoolTarget, Target};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::config::AlgebraicHasher;
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
pub fn permute<H: AlgebraicHasher<F>>(
pub fn permute<HC: HashConfig, H: AlgebraicHasher<F, HC>>(
&mut self,
inputs: [Target; SPONGE_WIDTH],
) -> [Target; SPONGE_WIDTH] {
inputs: [Target; HC::WIDTH],
) -> [Target; HC::WIDTH] {
// We don't want to swap any inputs, so set that wire to 0.
let _false = self._false();
self.permute_swapped::<H>(inputs, _false)
self.permute_swapped::<HC, H>(inputs, _false)
}
/// Conditionally swap two chunks of the inputs (useful in verifying Merkle proofs), then apply
/// a cryptographic permutation.
pub(crate) fn permute_swapped<H: AlgebraicHasher<F>>(
pub(crate) fn permute_swapped<HC: HashConfig, H: AlgebraicHasher<F, HC>>(
&mut self,
inputs: [Target; SPONGE_WIDTH],
inputs: [Target; HC::WIDTH],
swap: BoolTarget,
) -> [Target; SPONGE_WIDTH] {
) -> [Target; HC::WIDTH] {
H::permute_swapped(inputs, swap, self)
}
}

View File

@ -8,6 +8,7 @@ use crate::field::polynomial::{PolynomialCoeffs, PolynomialValues};
use crate::field::types::{Field, Sample};
use crate::gates::gate::Gate;
use crate::hash::hash_types::{HashOut, RichField};
use crate::hash::hashing::HashConfig;
use crate::iop::witness::{PartialWitness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::CircuitConfig;
@ -93,7 +94,11 @@ pub fn test_eval_fns<
const D: usize,
>(
gate: G,
) -> Result<()> {
) -> Result<()>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
// Test that `eval_unfiltered` and `eval_unfiltered_base` are coherent.
let wires_base = F::rand_vec(gate.num_wires());
let constants_base = F::rand_vec(gate.num_constants());
@ -159,5 +164,5 @@ pub fn test_eval_fns<
let data = builder.build::<C>();
let proof = data.prove(pw)?;
verify(proof, &data.verifier_only, &data.common)
verify::<F, C, D>(proof, &data.verifier_only, &data.common)
}

View File

@ -10,9 +10,8 @@ use crate::gates::gate::Gate;
use crate::gates::poseidon_mds::PoseidonMdsGate;
use crate::gates::util::StridedConstraintConsumer;
use crate::hash::hash_types::RichField;
use crate::hash::hashing::SPONGE_WIDTH;
use crate::hash::poseidon;
use crate::hash::poseidon::Poseidon;
use crate::hash::poseidon::{Poseidon, SPONGE_WIDTH};
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;
@ -510,8 +509,7 @@ mod tests {
use crate::field::types::Field;
use crate::gates::gate_testing::{test_eval_fns, test_low_degree};
use crate::gates::poseidon::PoseidonGate;
use crate::hash::hashing::SPONGE_WIDTH;
use crate::hash::poseidon::Poseidon;
use crate::hash::poseidon::{Poseidon, SPONGE_WIDTH};
use crate::iop::generator::generate_partial_witness;
use crate::iop::wire::Wire;
use crate::iop::witness::{PartialWitness, Witness, WitnessWrite};

View File

@ -11,8 +11,7 @@ use crate::field::types::Field;
use crate::gates::gate::Gate;
use crate::gates::util::StridedConstraintConsumer;
use crate::hash::hash_types::RichField;
use crate::hash::hashing::SPONGE_WIDTH;
use crate::hash::poseidon::Poseidon;
use crate::hash::poseidon::{Poseidon, SPONGE_WIDTH};
use crate::iop::ext_target::{ExtensionAlgebraTarget, ExtensionTarget};
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;

View File

@ -1,6 +1,7 @@
//! Concrete instantiation of a hash function.
use alloc::vec::Vec;
use core::fmt::Debug;
use crate::field::extension::Extendable;
use crate::hash::hash_types::{HashOut, HashOutTarget, RichField};
@ -8,62 +9,81 @@ use crate::iop::target::Target;
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::config::AlgebraicHasher;
pub(crate) const SPONGE_RATE: usize = 8;
pub(crate) const SPONGE_CAPACITY: usize = 4;
pub const SPONGE_WIDTH: usize = SPONGE_RATE + SPONGE_CAPACITY;
pub trait HashConfig: Clone + Debug + Eq + PartialEq {
const RATE: usize;
const WIDTH: usize;
}
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
pub fn hash_or_noop<H: AlgebraicHasher<F>>(&mut self, inputs: Vec<Target>) -> HashOutTarget {
pub fn hash_or_noop<HC: HashConfig, H: AlgebraicHasher<F, HC>>(
&mut self,
inputs: Vec<Target>,
) -> HashOutTarget
where
[(); HC::WIDTH]:,
{
let zero = self.zero();
if inputs.len() <= 4 {
HashOutTarget::from_partial(&inputs, zero)
} else {
self.hash_n_to_hash_no_pad::<H>(inputs)
self.hash_n_to_hash_no_pad::<HC, H>(inputs)
}
}
pub fn hash_n_to_hash_no_pad<H: AlgebraicHasher<F>>(
pub fn hash_n_to_hash_no_pad<HC: HashConfig, H: AlgebraicHasher<F, HC>>(
&mut self,
inputs: Vec<Target>,
) -> HashOutTarget {
HashOutTarget::from_vec(self.hash_n_to_m_no_pad::<H>(inputs, 4))
) -> HashOutTarget
where
[(); HC::WIDTH]:,
{
HashOutTarget::from_vec(self.hash_n_to_m_no_pad::<HC, H>(inputs, 4))
}
pub fn hash_n_to_m_no_pad<H: AlgebraicHasher<F>>(
pub fn hash_n_to_m_no_pad<HC: HashConfig, H: AlgebraicHasher<F, HC>>(
&mut self,
inputs: Vec<Target>,
num_outputs: usize,
) -> Vec<Target> {
) -> Vec<Target>
where
[(); HC::WIDTH]:,
{
let zero = self.zero();
let mut state = [zero; SPONGE_WIDTH];
let mut state = [zero; HC::WIDTH];
// Absorb all input chunks.
for input_chunk in inputs.chunks(SPONGE_RATE) {
for input_chunk in inputs.chunks(HC::RATE) {
// Overwrite the first r elements with the inputs. This differs from a standard sponge,
// where we would xor or add in the inputs. This is a well-known variant, though,
// sometimes called "overwrite mode".
state[..input_chunk.len()].copy_from_slice(input_chunk);
state = self.permute::<H>(state);
state = self.permute::<HC, H>(state);
}
// Squeeze until we have the desired number of outputs.
let mut outputs = Vec::with_capacity(num_outputs);
loop {
for i in 0..SPONGE_RATE {
for i in 0..HC::RATE {
outputs.push(state[i]);
if outputs.len() == num_outputs {
return outputs;
}
}
state = self.permute::<H>(state);
state = self.permute::<HC, H>(state);
}
}
}
/// A one-way compression function which takes two ~256 bit inputs and returns a ~256 bit output.
pub fn compress<F: RichField, P: PlonkyPermutation<F>>(x: HashOut<F>, y: HashOut<F>) -> HashOut<F> {
let mut perm_inputs = [F::ZERO; SPONGE_WIDTH];
pub fn compress<F: RichField, HC: HashConfig, P: PlonkyPermutation<F, HC>>(
x: HashOut<F>,
y: HashOut<F>,
) -> HashOut<F>
where
[(); HC::WIDTH]:,
{
let mut perm_inputs = [F::ZERO; HC::WIDTH];
perm_inputs[..4].copy_from_slice(&x.elements);
perm_inputs[4..8].copy_from_slice(&y.elements);
HashOut {
@ -72,20 +92,25 @@ pub fn compress<F: RichField, P: PlonkyPermutation<F>>(x: HashOut<F>, y: HashOut
}
/// Permutation that can be used in the sponge construction for an algebraic hash.
pub trait PlonkyPermutation<F: RichField> {
fn permute(input: [F; SPONGE_WIDTH]) -> [F; SPONGE_WIDTH];
pub trait PlonkyPermutation<F: RichField, HC: HashConfig> {
fn permute(input: [F; HC::WIDTH]) -> [F; HC::WIDTH]
where
[(); HC::WIDTH]:;
}
/// Hash a message without any padding step. Note that this can enable length-extension attacks.
/// However, it is still collision-resistant in cases where the input has a fixed length.
pub fn hash_n_to_m_no_pad<F: RichField, P: PlonkyPermutation<F>>(
pub fn hash_n_to_m_no_pad<F: RichField, HC: HashConfig, P: PlonkyPermutation<F, HC>>(
inputs: &[F],
num_outputs: usize,
) -> Vec<F> {
let mut state = [F::ZERO; SPONGE_WIDTH];
) -> Vec<F>
where
[(); HC::WIDTH]:,
{
let mut state = [F::ZERO; HC::WIDTH];
// Absorb all input chunks.
for input_chunk in inputs.chunks(SPONGE_RATE) {
for input_chunk in inputs.chunks(HC::RATE) {
state[..input_chunk.len()].copy_from_slice(input_chunk);
state = P::permute(state);
}
@ -93,7 +118,7 @@ pub fn hash_n_to_m_no_pad<F: RichField, P: PlonkyPermutation<F>>(
// Squeeze until we have the desired number of outputs.
let mut outputs = Vec::new();
loop {
for &item in state.iter().take(SPONGE_RATE) {
for &item in state.iter().take(HC::RATE) {
outputs.push(item);
if outputs.len() == num_outputs {
return outputs;
@ -103,6 +128,11 @@ pub fn hash_n_to_m_no_pad<F: RichField, P: PlonkyPermutation<F>>(
}
}
pub fn hash_n_to_hash_no_pad<F: RichField, P: PlonkyPermutation<F>>(inputs: &[F]) -> HashOut<F> {
HashOut::from_vec(hash_n_to_m_no_pad::<F, P>(inputs, 4))
pub fn hash_n_to_hash_no_pad<F: RichField, HC: HashConfig, P: PlonkyPermutation<F, HC>>(
inputs: &[F],
) -> HashOut<F>
where
[(); HC::WIDTH]:,
{
HashOut::from_vec(hash_n_to_m_no_pad::<F, HC, P>(inputs, 4))
}

View File

@ -7,16 +7,23 @@ use itertools::Itertools;
use keccak_hash::keccak;
use crate::hash::hash_types::{BytesHash, RichField};
use crate::hash::hashing::{PlonkyPermutation, SPONGE_WIDTH};
use crate::plonk::config::Hasher;
use crate::hash::hashing::PlonkyPermutation;
use crate::plonk::config::{Hasher, KeccakHashConfig};
use crate::util::serialization::Write;
pub const SPONGE_RATE: usize = 8;
pub const SPONGE_CAPACITY: usize = 4;
pub const SPONGE_WIDTH: usize = SPONGE_RATE + SPONGE_CAPACITY;
/// Keccak-256 pseudo-permutation (not necessarily one-to-one) used in the challenger.
/// A state `input: [F; 12]` is sent to the field representation of `H(input) || H(H(input)) || H(H(H(input)))`
/// where `H` is the Keccak-256 hash.
pub struct KeccakPermutation;
impl<F: RichField> PlonkyPermutation<F> for KeccakPermutation {
fn permute(input: [F; SPONGE_WIDTH]) -> [F; SPONGE_WIDTH] {
impl<F: RichField> PlonkyPermutation<F, KeccakHashConfig> for KeccakPermutation {
fn permute(input: [F; SPONGE_WIDTH]) -> [F; SPONGE_WIDTH]
where
[(); SPONGE_WIDTH]:,
{
let mut state = vec![0u8; SPONGE_WIDTH * size_of::<u64>()];
for i in 0..SPONGE_WIDTH {
state[i * size_of::<u64>()..(i + 1) * size_of::<u64>()]
@ -53,8 +60,7 @@ impl<F: RichField> PlonkyPermutation<F> for KeccakPermutation {
/// Keccak-256 hash function.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct KeccakHash<const N: usize>;
impl<F: RichField, const N: usize> Hasher<F> for KeccakHash<N> {
impl<F: RichField, const N: usize> Hasher<F, KeccakHashConfig> for KeccakHash<N> {
const HASH_SIZE: usize = N;
type Hash = BytesHash<N>;
type Permutation = KeccakPermutation;

View File

@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize};
use crate::field::extension::Extendable;
use crate::hash::hash_types::{HashOutTarget, MerkleCapTarget, RichField};
use crate::hash::hashing::SPONGE_WIDTH;
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_tree::MerkleCap;
use crate::iop::target::{BoolTarget, Target};
use crate::plonk::circuit_builder::CircuitBuilder;
@ -16,12 +16,12 @@ use crate::plonk::config::{AlgebraicHasher, Hasher};
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
#[serde(bound = "")]
pub struct MerkleProof<F: RichField, H: Hasher<F>> {
pub struct MerkleProof<F: RichField, HC: HashConfig, H: Hasher<F, HC>> {
/// The Merkle digest of each sibling subtree, staying from the bottommost layer.
pub siblings: Vec<H::Hash>,
}
impl<F: RichField, H: Hasher<F>> MerkleProof<F, H> {
impl<F: RichField, HC: HashConfig, H: Hasher<F, HC>> MerkleProof<F, HC, H> {
pub fn len(&self) -> usize {
self.siblings.len()
}
@ -39,24 +39,30 @@ pub struct MerkleProofTarget {
/// Verifies that the given leaf data is present at the given index in the Merkle tree with the
/// given root.
pub fn verify_merkle_proof<F: RichField, H: Hasher<F>>(
pub fn verify_merkle_proof<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(
leaf_data: Vec<F>,
leaf_index: usize,
merkle_root: H::Hash,
proof: &MerkleProof<F, H>,
) -> Result<()> {
proof: &MerkleProof<F, HC, H>,
) -> Result<()>
where
[(); HC::WIDTH]:,
{
let merkle_cap = MerkleCap(vec![merkle_root]);
verify_merkle_proof_to_cap(leaf_data, leaf_index, &merkle_cap, proof)
}
/// Verifies that the given leaf data is present at the given index in the Merkle tree with the
/// given cap.
pub fn verify_merkle_proof_to_cap<F: RichField, H: Hasher<F>>(
pub fn verify_merkle_proof_to_cap<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(
leaf_data: Vec<F>,
leaf_index: usize,
merkle_cap: &MerkleCap<F, H>,
proof: &MerkleProof<F, H>,
) -> Result<()> {
merkle_cap: &MerkleCap<F, HC, H>,
proof: &MerkleProof<F, HC, H>,
) -> Result<()>
where
[(); HC::WIDTH]:,
{
let mut index = leaf_index;
let mut current_digest = H::hash_or_noop(&leaf_data);
for &sibling_digest in proof.siblings.iter() {
@ -79,28 +85,32 @@ pub fn verify_merkle_proof_to_cap<F: RichField, H: Hasher<F>>(
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
/// Verifies that the given leaf data is present at the given index in the Merkle tree with the
/// given root. The index is given by its little-endian bits.
pub fn verify_merkle_proof<H: AlgebraicHasher<F>>(
pub fn verify_merkle_proof<HC: HashConfig, H: AlgebraicHasher<F, HC>>(
&mut self,
leaf_data: Vec<Target>,
leaf_index_bits: &[BoolTarget],
merkle_root: HashOutTarget,
proof: &MerkleProofTarget,
) {
) where
[(); HC::WIDTH]:,
{
let merkle_cap = MerkleCapTarget(vec![merkle_root]);
self.verify_merkle_proof_to_cap::<H>(leaf_data, leaf_index_bits, &merkle_cap, proof);
self.verify_merkle_proof_to_cap::<HC, H>(leaf_data, leaf_index_bits, &merkle_cap, proof);
}
/// Verifies that the given leaf data is present at the given index in the Merkle tree with the
/// given cap. The index is given by its little-endian bits.
pub fn verify_merkle_proof_to_cap<H: AlgebraicHasher<F>>(
pub fn verify_merkle_proof_to_cap<HC: HashConfig, H: AlgebraicHasher<F, HC>>(
&mut self,
leaf_data: Vec<Target>,
leaf_index_bits: &[BoolTarget],
merkle_cap: &MerkleCapTarget,
proof: &MerkleProofTarget,
) {
) where
[(); HC::WIDTH]:,
{
let cap_index = self.le_sum(leaf_index_bits[proof.siblings.len()..].iter().copied());
self.verify_merkle_proof_to_cap_with_cap_index::<H>(
self.verify_merkle_proof_to_cap_with_cap_index::<HC, H>(
leaf_data,
leaf_index_bits,
cap_index,
@ -111,22 +121,27 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
/// Same as `verify_merkle_proof_to_cap`, except with the final "cap index" as separate parameter,
/// rather than being contained in `leaf_index_bits`.
pub(crate) fn verify_merkle_proof_to_cap_with_cap_index<H: AlgebraicHasher<F>>(
pub(crate) fn verify_merkle_proof_to_cap_with_cap_index<
HC: HashConfig,
H: AlgebraicHasher<F, HC>,
>(
&mut self,
leaf_data: Vec<Target>,
leaf_index_bits: &[BoolTarget],
cap_index: Target,
merkle_cap: &MerkleCapTarget,
proof: &MerkleProofTarget,
) {
) where
[(); HC::WIDTH]:,
{
let zero = self.zero();
let mut state: HashOutTarget = self.hash_or_noop::<H>(leaf_data);
let mut state: HashOutTarget = self.hash_or_noop::<HC, H>(leaf_data);
for (&bit, &sibling) in leaf_index_bits.iter().zip(&proof.siblings) {
let mut perm_inputs = [zero; SPONGE_WIDTH];
let mut perm_inputs = [zero; HC::WIDTH];
perm_inputs[..4].copy_from_slice(&state.elements);
perm_inputs[4..8].copy_from_slice(&sibling.elements);
let perm_outs = self.permute_swapped::<H>(perm_inputs, bit);
let perm_outs = self.permute_swapped::<HC, H>(perm_inputs, bit);
let hash_outs = perm_outs[0..4].try_into().unwrap();
state = HashOutTarget {
elements: hash_outs,
@ -192,7 +207,10 @@ mod tests {
let n = 1 << log_n;
let cap_height = 1;
let leaves = random_data::<F>(n, 7);
let tree = MerkleTree::<F, <C as GenericConfig<D>>::Hasher>::new(leaves, cap_height);
let tree =
MerkleTree::<F, <C as GenericConfig<D>>::HCO, <C as GenericConfig<D>>::Hasher>::new(
leaves, cap_height,
);
let i: usize = OsRng.gen_range(0..n);
let proof = tree.prove(i);
@ -214,7 +232,7 @@ mod tests {
pw.set_target(data[j], tree.leaves[i][j]);
}
builder.verify_merkle_proof_to_cap::<<C as GenericConfig<D>>::InnerHasher>(
builder.verify_merkle_proof_to_cap::<<C as GenericConfig<D>>::HCI, <C as GenericConfig<D>>::InnerHasher>(
data, &i_bits, &cap_t, &proof_t,
);

View File

@ -6,6 +6,7 @@ use plonky2_maybe_rayon::*;
use serde::{Deserialize, Serialize};
use crate::hash::hash_types::RichField;
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_proofs::MerkleProof;
use crate::plonk::config::{GenericHashOut, Hasher};
use crate::util::log2_strict;
@ -15,9 +16,9 @@ use crate::util::log2_strict;
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
#[serde(bound = "")]
// TODO: Change H to GenericHashOut<F>, since this only cares about the hash, not the hasher.
pub struct MerkleCap<F: RichField, H: Hasher<F>>(pub Vec<H::Hash>);
pub struct MerkleCap<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(pub Vec<H::Hash>);
impl<F: RichField, H: Hasher<F>> MerkleCap<F, H> {
impl<F: RichField, HC: HashConfig, H: Hasher<F, HC>> MerkleCap<F, HC, H> {
pub fn len(&self) -> usize {
self.0.len()
}
@ -36,7 +37,7 @@ impl<F: RichField, H: Hasher<F>> MerkleCap<F, H> {
}
#[derive(Clone, Debug)]
pub struct MerkleTree<F: RichField, H: Hasher<F>> {
pub struct MerkleTree<F: RichField, HC: HashConfig, H: Hasher<F, HC>> {
/// The data in the leaves of the Merkle tree.
pub leaves: Vec<Vec<F>>,
@ -51,7 +52,7 @@ pub struct MerkleTree<F: RichField, H: Hasher<F>> {
pub digests: Vec<H::Hash>,
/// The Merkle cap.
pub cap: MerkleCap<F, H>,
pub cap: MerkleCap<F, HC, H>,
}
fn capacity_up_to_mut<T>(v: &mut Vec<T>, len: usize) -> &mut [MaybeUninit<T>] {
@ -66,10 +67,13 @@ fn capacity_up_to_mut<T>(v: &mut Vec<T>, len: usize) -> &mut [MaybeUninit<T>] {
}
}
fn fill_subtree<F: RichField, H: Hasher<F>>(
fn fill_subtree<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(
digests_buf: &mut [MaybeUninit<H::Hash>],
leaves: &[Vec<F>],
) -> H::Hash {
) -> H::Hash
where
[(); HC::WIDTH]:,
{
assert_eq!(leaves.len(), digests_buf.len() / 2 + 1);
if digests_buf.is_empty() {
H::hash_or_noop(&leaves[0])
@ -85,8 +89,8 @@ fn fill_subtree<F: RichField, H: Hasher<F>>(
let (left_leaves, right_leaves) = leaves.split_at(leaves.len() / 2);
let (left_digest, right_digest) = plonky2_maybe_rayon::join(
|| fill_subtree::<F, H>(left_digests_buf, left_leaves),
|| fill_subtree::<F, H>(right_digests_buf, right_leaves),
|| fill_subtree::<F, HC, H>(left_digests_buf, left_leaves),
|| fill_subtree::<F, HC, H>(right_digests_buf, right_leaves),
);
left_digest_mem.write(left_digest);
@ -95,12 +99,14 @@ fn fill_subtree<F: RichField, H: Hasher<F>>(
}
}
fn fill_digests_buf<F: RichField, H: Hasher<F>>(
fn fill_digests_buf<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(
digests_buf: &mut [MaybeUninit<H::Hash>],
cap_buf: &mut [MaybeUninit<H::Hash>],
leaves: &[Vec<F>],
cap_height: usize,
) {
) where
[(); HC::WIDTH]:,
{
// Special case of a tree that's all cap. The usual case will panic because we'll try to split
// an empty slice into chunks of `0`. (We would not need this if there was a way to split into
// `blah` chunks as opposed to chunks _of_ `blah`.)
@ -126,12 +132,15 @@ fn fill_digests_buf<F: RichField, H: Hasher<F>>(
// We have `1 << cap_height` sub-trees, one for each entry in `cap`. They are totally
// independent, so we schedule one task for each. `digests_buf` and `leaves` are split
// into `1 << cap_height` slices, one for each sub-tree.
subtree_cap.write(fill_subtree::<F, H>(subtree_digests, subtree_leaves));
subtree_cap.write(fill_subtree::<F, HC, H>(subtree_digests, subtree_leaves));
},
);
}
impl<F: RichField, H: Hasher<F>> MerkleTree<F, H> {
impl<F: RichField, HC: HashConfig, H: Hasher<F, HC>> MerkleTree<F, HC, H>
where
[(); HC::WIDTH]:,
{
pub fn new(leaves: Vec<Vec<F>>, cap_height: usize) -> Self {
let log2_leaves_len = log2_strict(leaves.len());
assert!(
@ -149,7 +158,7 @@ impl<F: RichField, H: Hasher<F>> MerkleTree<F, H> {
let digests_buf = capacity_up_to_mut(&mut digests, num_digests);
let cap_buf = capacity_up_to_mut(&mut cap, len_cap);
fill_digests_buf::<F, H>(digests_buf, cap_buf, &leaves[..], cap_height);
fill_digests_buf::<F, HC, H>(digests_buf, cap_buf, &leaves[..], cap_height);
unsafe {
// SAFETY: `fill_digests_buf` and `cap` initialized the spare capacity up to
@ -170,7 +179,7 @@ impl<F: RichField, H: Hasher<F>> MerkleTree<F, H> {
}
/// Create a Merkle proof from a leaf index.
pub fn prove(&self, leaf_index: usize) -> MerkleProof<F, H> {
pub fn prove(&self, leaf_index: usize) -> MerkleProof<F, HC, H> {
let cap_height = log2_strict(self.cap.len());
let num_layers = log2_strict(self.leaves.len()) - cap_height;
debug_assert_eq!(leaf_index >> (cap_height + num_layers), 0);
@ -220,15 +229,14 @@ mod tests {
(0..n).map(|_| F::rand_vec(k)).collect()
}
fn verify_all_leaves<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
fn verify_all_leaves<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
leaves: Vec<Vec<F>>,
cap_height: usize,
) -> Result<()> {
let tree = MerkleTree::<F, C::Hasher>::new(leaves.clone(), cap_height);
) -> Result<()>
where
[(); C::HCO::WIDTH]:,
{
let tree = MerkleTree::<F, C::HCO, C::Hasher>::new(leaves.clone(), cap_height);
for (i, leaf) in leaves.into_iter().enumerate() {
let proof = tree.prove(i);
verify_merkle_proof_to_cap(leaf, i, &tree.cap, &proof)?;
@ -247,7 +255,9 @@ mod tests {
let cap_height = log_n + 1; // Should panic if `cap_height > len_n`.
let leaves = random_data::<F>(1 << log_n, 7);
let _ = MerkleTree::<F, <C as GenericConfig<D>>::Hasher>::new(leaves, cap_height);
let _ = MerkleTree::<F, <C as GenericConfig<D>>::HCO, <C as GenericConfig<D>>::Hasher>::new(
leaves, cap_height,
);
}
#[test]

View File

@ -5,15 +5,16 @@ use hashbrown::HashMap;
use num::Integer;
use crate::hash::hash_types::RichField;
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_proofs::MerkleProof;
use crate::plonk::config::Hasher;
/// Compress multiple Merkle proofs on the same tree by removing redundancy in the Merkle paths.
pub(crate) fn compress_merkle_proofs<F: RichField, H: Hasher<F>>(
pub(crate) fn compress_merkle_proofs<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(
cap_height: usize,
indices: &[usize],
proofs: &[MerkleProof<F, H>],
) -> Vec<MerkleProof<F, H>> {
proofs: &[MerkleProof<F, HC, H>],
) -> Vec<MerkleProof<F, HC, H>> {
assert!(!proofs.is_empty());
let height = cap_height + proofs[0].siblings.len();
let num_leaves = 1 << height;
@ -53,13 +54,16 @@ pub(crate) fn compress_merkle_proofs<F: RichField, H: Hasher<F>>(
/// Decompress compressed Merkle proofs.
/// Note: The data and indices must be in the same order as in `compress_merkle_proofs`.
pub(crate) fn decompress_merkle_proofs<F: RichField, H: Hasher<F>>(
pub(crate) fn decompress_merkle_proofs<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(
leaves_data: &[Vec<F>],
leaves_indices: &[usize],
compressed_proofs: &[MerkleProof<F, H>],
compressed_proofs: &[MerkleProof<F, HC, H>],
height: usize,
cap_height: usize,
) -> Vec<MerkleProof<F, H>> {
) -> Vec<MerkleProof<F, HC, H>>
where
[(); HC::WIDTH]:,
{
let num_leaves = 1 << height;
let compressed_proofs = compressed_proofs.to_vec();
let mut decompressed_proofs = Vec::with_capacity(compressed_proofs.len());
@ -130,7 +134,11 @@ mod tests {
let h = 10;
let cap_height = 3;
let vs = (0..1 << h).map(|_| vec![F::rand()]).collect::<Vec<_>>();
let mt = MerkleTree::<F, <C as GenericConfig<D>>::Hasher>::new(vs.clone(), cap_height);
let mt =
MerkleTree::<F, <C as GenericConfig<D>>::HCO, <C as GenericConfig<D>>::Hasher>::new(
vs.clone(),
cap_height,
);
let mut rng = OsRng;
let k = rng.gen_range(1..=1 << h);

View File

@ -12,11 +12,15 @@ use crate::gates::gate::Gate;
use crate::gates::poseidon::PoseidonGate;
use crate::gates::poseidon_mds::PoseidonMdsGate;
use crate::hash::hash_types::{HashOut, RichField};
use crate::hash::hashing::{compress, hash_n_to_hash_no_pad, PlonkyPermutation, SPONGE_WIDTH};
use crate::hash::hashing::{compress, hash_n_to_hash_no_pad, PlonkyPermutation};
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::target::{BoolTarget, Target};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::config::{AlgebraicHasher, Hasher};
use crate::plonk::config::{AlgebraicHasher, Hasher, PoseidonHashConfig};
pub const SPONGE_RATE: usize = 8;
pub const SPONGE_CAPACITY: usize = 4;
pub const SPONGE_WIDTH: usize = SPONGE_RATE + SPONGE_CAPACITY;
// The number of full rounds and partial rounds is given by the
// calc_round_numbers.py script. They happen to be the same for both
@ -47,7 +51,7 @@ fn reduce_u160<F: PrimeField64>((n_lo, n_hi): (u128, u32)) -> F {
}
/// Note that these work for the Goldilocks field, but not necessarily others. See
/// `generate_constants` about how these were generated. We include enough for a WIDTH of 12;
/// `generate_constants` about how these were generated. We include enough for a width of 12;
/// smaller widths just use a subset.
#[rustfmt::skip]
pub const ALL_ROUND_CONSTANTS: [u64; MAX_WIDTH * N_ROUNDS] = [
@ -150,29 +154,28 @@ pub const ALL_ROUND_CONSTANTS: [u64; MAX_WIDTH * N_ROUNDS] = [
0x4543d9df5476d3cb, 0xf172d73e004fc90d, 0xdfd1c4febcc81238, 0xbc8dfb627fe558fc,
];
const WIDTH: usize = SPONGE_WIDTH;
pub trait Poseidon: PrimeField64 {
// Total number of round constants required: width of the input
// times number of rounds.
const N_ROUND_CONSTANTS: usize = WIDTH * N_ROUNDS;
const N_ROUND_CONSTANTS: usize = SPONGE_WIDTH * N_ROUNDS;
// The MDS matrix we use is C + D, where C is the circulant matrix whose first row is given by
// `MDS_MATRIX_CIRC`, and D is the diagonal matrix whose diagonal is given by `MDS_MATRIX_DIAG`.
const MDS_MATRIX_CIRC: [u64; WIDTH];
const MDS_MATRIX_DIAG: [u64; WIDTH];
const MDS_MATRIX_CIRC: [u64; SPONGE_WIDTH];
const MDS_MATRIX_DIAG: [u64; SPONGE_WIDTH];
// Precomputed constants for the fast Poseidon calculation. See
// the paper.
const FAST_PARTIAL_FIRST_ROUND_CONSTANT: [u64; WIDTH];
const FAST_PARTIAL_FIRST_ROUND_CONSTANT: [u64; SPONGE_WIDTH];
const FAST_PARTIAL_ROUND_CONSTANTS: [u64; N_PARTIAL_ROUNDS];
const FAST_PARTIAL_ROUND_VS: [[u64; WIDTH - 1]; N_PARTIAL_ROUNDS];
const FAST_PARTIAL_ROUND_W_HATS: [[u64; WIDTH - 1]; N_PARTIAL_ROUNDS];
const FAST_PARTIAL_ROUND_INITIAL_MATRIX: [[u64; WIDTH - 1]; WIDTH - 1];
const FAST_PARTIAL_ROUND_VS: [[u64; SPONGE_WIDTH - 1]; N_PARTIAL_ROUNDS];
const FAST_PARTIAL_ROUND_W_HATS: [[u64; SPONGE_WIDTH - 1]; N_PARTIAL_ROUNDS];
const FAST_PARTIAL_ROUND_INITIAL_MATRIX: [[u64; SPONGE_WIDTH - 1]; SPONGE_WIDTH - 1];
#[inline(always)]
#[unroll_for_loops]
fn mds_row_shf(r: usize, v: &[u64; WIDTH]) -> u128 {
debug_assert!(r < WIDTH);
fn mds_row_shf(r: usize, v: &[u64; SPONGE_WIDTH]) -> u128 {
debug_assert!(r < SPONGE_WIDTH);
// The values of `MDS_MATRIX_CIRC` and `MDS_MATRIX_DIAG` are
// known to be small, so we can accumulate all the products for
// each row and reduce just once at the end (done by the
@ -184,8 +187,8 @@ pub trait Poseidon: PrimeField64 {
// This is a hacky way of fully unrolling the loop.
for i in 0..12 {
if i < WIDTH {
res += (v[(i + r) % WIDTH] as u128) * (Self::MDS_MATRIX_CIRC[i] as u128);
if i < SPONGE_WIDTH {
res += (v[(i + r) % SPONGE_WIDTH] as u128) * (Self::MDS_MATRIX_CIRC[i] as u128);
}
}
res += (v[r] as u128) * (Self::MDS_MATRIX_DIAG[r] as u128);
@ -196,13 +199,13 @@ pub trait Poseidon: PrimeField64 {
/// Same as `mds_row_shf` for field extensions of `Self`.
fn mds_row_shf_field<F: FieldExtension<D, BaseField = Self>, const D: usize>(
r: usize,
v: &[F; WIDTH],
v: &[F; SPONGE_WIDTH],
) -> F {
debug_assert!(r < WIDTH);
debug_assert!(r < SPONGE_WIDTH);
let mut res = F::ZERO;
for i in 0..WIDTH {
res += v[(i + r) % WIDTH] * F::from_canonical_u64(Self::MDS_MATRIX_CIRC[i]);
for i in 0..SPONGE_WIDTH {
res += v[(i + r) % SPONGE_WIDTH] * F::from_canonical_u64(Self::MDS_MATRIX_CIRC[i]);
}
res += v[r] * F::from_canonical_u64(Self::MDS_MATRIX_DIAG[r]);
@ -213,17 +216,17 @@ pub trait Poseidon: PrimeField64 {
fn mds_row_shf_circuit<const D: usize>(
builder: &mut CircuitBuilder<Self, D>,
r: usize,
v: &[ExtensionTarget<D>; WIDTH],
v: &[ExtensionTarget<D>; SPONGE_WIDTH],
) -> ExtensionTarget<D>
where
Self: RichField + Extendable<D>,
{
debug_assert!(r < WIDTH);
debug_assert!(r < SPONGE_WIDTH);
let mut res = builder.zero_extension();
for i in 0..WIDTH {
for i in 0..SPONGE_WIDTH {
let c = Self::from_canonical_u64(<Self as Poseidon>::MDS_MATRIX_CIRC[i]);
res = builder.mul_const_add_extension(c, v[(i + r) % WIDTH], res);
res = builder.mul_const_add_extension(c, v[(i + r) % SPONGE_WIDTH], res);
}
{
let c = Self::from_canonical_u64(<Self as Poseidon>::MDS_MATRIX_DIAG[r]);
@ -235,17 +238,17 @@ pub trait Poseidon: PrimeField64 {
#[inline(always)]
#[unroll_for_loops]
fn mds_layer(state_: &[Self; WIDTH]) -> [Self; WIDTH] {
let mut result = [Self::ZERO; WIDTH];
fn mds_layer(state_: &[Self; SPONGE_WIDTH]) -> [Self; SPONGE_WIDTH] {
let mut result = [Self::ZERO; SPONGE_WIDTH];
let mut state = [0u64; WIDTH];
for r in 0..WIDTH {
let mut state = [0u64; SPONGE_WIDTH];
for r in 0..SPONGE_WIDTH {
state[r] = state_[r].to_noncanonical_u64();
}
// This is a hacky way of fully unrolling the loop.
for r in 0..12 {
if r < WIDTH {
if r < SPONGE_WIDTH {
let sum = Self::mds_row_shf(r, &state);
let sum_lo = sum as u64;
let sum_hi = (sum >> 64) as u32;
@ -258,11 +261,11 @@ pub trait Poseidon: PrimeField64 {
/// Same as `mds_layer` for field extensions of `Self`.
fn mds_layer_field<F: FieldExtension<D, BaseField = Self>, const D: usize>(
state: &[F; WIDTH],
) -> [F; WIDTH] {
let mut result = [F::ZERO; WIDTH];
state: &[F; SPONGE_WIDTH],
) -> [F; SPONGE_WIDTH] {
let mut result = [F::ZERO; SPONGE_WIDTH];
for r in 0..WIDTH {
for r in 0..SPONGE_WIDTH {
result[r] = Self::mds_row_shf_field(r, state);
}
@ -272,8 +275,8 @@ pub trait Poseidon: PrimeField64 {
/// Recursive version of `mds_layer`.
fn mds_layer_circuit<const D: usize>(
builder: &mut CircuitBuilder<Self, D>,
state: &[ExtensionTarget<D>; WIDTH],
) -> [ExtensionTarget<D>; WIDTH]
state: &[ExtensionTarget<D>; SPONGE_WIDTH],
) -> [ExtensionTarget<D>; SPONGE_WIDTH]
where
Self: RichField + Extendable<D>,
{
@ -281,11 +284,11 @@ pub trait Poseidon: PrimeField64 {
let mds_gate = PoseidonMdsGate::<Self, D>::new();
if builder.config.num_routed_wires >= mds_gate.num_wires() {
let index = builder.add_gate(mds_gate, vec![]);
for i in 0..WIDTH {
for i in 0..SPONGE_WIDTH {
let input_wire = PoseidonMdsGate::<Self, D>::wires_input(i);
builder.connect_extension(state[i], ExtensionTarget::from_range(index, input_wire));
}
(0..WIDTH)
(0..SPONGE_WIDTH)
.map(|i| {
let output_wire = PoseidonMdsGate::<Self, D>::wires_output(i);
ExtensionTarget::from_range(index, output_wire)
@ -294,9 +297,9 @@ pub trait Poseidon: PrimeField64 {
.try_into()
.unwrap()
} else {
let mut result = [builder.zero_extension(); WIDTH];
let mut result = [builder.zero_extension(); SPONGE_WIDTH];
for r in 0..WIDTH {
for r in 0..SPONGE_WIDTH {
result[r] = Self::mds_row_shf_circuit(builder, r, state);
}
@ -307,10 +310,10 @@ pub trait Poseidon: PrimeField64 {
#[inline(always)]
#[unroll_for_loops]
fn partial_first_constant_layer<F: FieldExtension<D, BaseField = Self>, const D: usize>(
state: &mut [F; WIDTH],
state: &mut [F; SPONGE_WIDTH],
) {
for i in 0..12 {
if i < WIDTH {
if i < SPONGE_WIDTH {
state[i] += F::from_canonical_u64(Self::FAST_PARTIAL_FIRST_ROUND_CONSTANT[i]);
}
}
@ -319,11 +322,11 @@ pub trait Poseidon: PrimeField64 {
/// Recursive version of `partial_first_constant_layer`.
fn partial_first_constant_layer_circuit<const D: usize>(
builder: &mut CircuitBuilder<Self, D>,
state: &mut [ExtensionTarget<D>; WIDTH],
state: &mut [ExtensionTarget<D>; SPONGE_WIDTH],
) where
Self: RichField + Extendable<D>,
{
for i in 0..WIDTH {
for i in 0..SPONGE_WIDTH {
let c = <Self as Poseidon>::FAST_PARTIAL_FIRST_ROUND_CONSTANT[i];
let c = Self::Extension::from_canonical_u64(c);
let c = builder.constant_extension(c);
@ -334,9 +337,9 @@ pub trait Poseidon: PrimeField64 {
#[inline(always)]
#[unroll_for_loops]
fn mds_partial_layer_init<F: FieldExtension<D, BaseField = Self>, const D: usize>(
state: &[F; WIDTH],
) -> [F; WIDTH] {
let mut result = [F::ZERO; WIDTH];
state: &[F; SPONGE_WIDTH],
) -> [F; SPONGE_WIDTH] {
let mut result = [F::ZERO; SPONGE_WIDTH];
// Initial matrix has first row/column = [1, 0, ..., 0];
@ -344,9 +347,9 @@ pub trait Poseidon: PrimeField64 {
result[0] = state[0];
for r in 1..12 {
if r < WIDTH {
if r < SPONGE_WIDTH {
for c in 1..12 {
if c < WIDTH {
if c < SPONGE_WIDTH {
// NB: FAST_PARTIAL_ROUND_INITIAL_MATRIX is stored in
// row-major order so that this dot product is cache
// friendly.
@ -364,17 +367,17 @@ pub trait Poseidon: PrimeField64 {
/// Recursive version of `mds_partial_layer_init`.
fn mds_partial_layer_init_circuit<const D: usize>(
builder: &mut CircuitBuilder<Self, D>,
state: &[ExtensionTarget<D>; WIDTH],
) -> [ExtensionTarget<D>; WIDTH]
state: &[ExtensionTarget<D>; SPONGE_WIDTH],
) -> [ExtensionTarget<D>; SPONGE_WIDTH]
where
Self: RichField + Extendable<D>,
{
let mut result = [builder.zero_extension(); WIDTH];
let mut result = [builder.zero_extension(); SPONGE_WIDTH];
result[0] = state[0];
for r in 1..WIDTH {
for c in 1..WIDTH {
for r in 1..SPONGE_WIDTH {
for c in 1..SPONGE_WIDTH {
let t = <Self as Poseidon>::FAST_PARTIAL_ROUND_INITIAL_MATRIX[r - 1][c - 1];
let t = Self::Extension::from_canonical_u64(t);
let t = builder.constant_extension(t);
@ -394,12 +397,12 @@ pub trait Poseidon: PrimeField64 {
/// (t-1)x(t-1) identity matrix.
#[inline(always)]
#[unroll_for_loops]
fn mds_partial_layer_fast(state: &[Self; WIDTH], r: usize) -> [Self; WIDTH] {
fn mds_partial_layer_fast(state: &[Self; SPONGE_WIDTH], r: usize) -> [Self; SPONGE_WIDTH] {
// Set d = [M_00 | w^] dot [state]
let mut d_sum = (0u128, 0u32); // u160 accumulator
for i in 1..12 {
if i < WIDTH {
if i < SPONGE_WIDTH {
let t = Self::FAST_PARTIAL_ROUND_W_HATS[r][i - 1] as u128;
let si = state[i].to_noncanonical_u64() as u128;
d_sum = add_u160_u128(d_sum, si * t);
@ -411,10 +414,10 @@ pub trait Poseidon: PrimeField64 {
let d = reduce_u160::<Self>(d_sum);
// result = [d] concat [state[0] * v + state[shift up by 1]]
let mut result = [Self::ZERO; WIDTH];
let mut result = [Self::ZERO; SPONGE_WIDTH];
result[0] = d;
for i in 1..12 {
if i < WIDTH {
if i < SPONGE_WIDTH {
let t = Self::from_canonical_u64(Self::FAST_PARTIAL_ROUND_VS[r][i - 1]);
result[i] = state[i].multiply_accumulate(state[0], t);
}
@ -424,21 +427,21 @@ pub trait Poseidon: PrimeField64 {
/// Same as `mds_partial_layer_fast` for field extensions of `Self`.
fn mds_partial_layer_fast_field<F: FieldExtension<D, BaseField = Self>, const D: usize>(
state: &[F; WIDTH],
state: &[F; SPONGE_WIDTH],
r: usize,
) -> [F; WIDTH] {
) -> [F; SPONGE_WIDTH] {
let s0 = state[0];
let mds0to0 = Self::MDS_MATRIX_CIRC[0] + Self::MDS_MATRIX_DIAG[0];
let mut d = s0 * F::from_canonical_u64(mds0to0);
for i in 1..WIDTH {
for i in 1..SPONGE_WIDTH {
let t = F::from_canonical_u64(Self::FAST_PARTIAL_ROUND_W_HATS[r][i - 1]);
d += state[i] * t;
}
// result = [d] concat [state[0] * v + state[shift up by 1]]
let mut result = [F::ZERO; WIDTH];
let mut result = [F::ZERO; SPONGE_WIDTH];
result[0] = d;
for i in 1..WIDTH {
for i in 1..SPONGE_WIDTH {
let t = F::from_canonical_u64(Self::FAST_PARTIAL_ROUND_VS[r][i - 1]);
result[i] = state[0] * t + state[i];
}
@ -448,25 +451,25 @@ pub trait Poseidon: PrimeField64 {
/// Recursive version of `mds_partial_layer_fast`.
fn mds_partial_layer_fast_circuit<const D: usize>(
builder: &mut CircuitBuilder<Self, D>,
state: &[ExtensionTarget<D>; WIDTH],
state: &[ExtensionTarget<D>; SPONGE_WIDTH],
r: usize,
) -> [ExtensionTarget<D>; WIDTH]
) -> [ExtensionTarget<D>; SPONGE_WIDTH]
where
Self: RichField + Extendable<D>,
{
let s0 = state[0];
let mds0to0 = Self::MDS_MATRIX_CIRC[0] + Self::MDS_MATRIX_DIAG[0];
let mut d = builder.mul_const_extension(Self::from_canonical_u64(mds0to0), s0);
for i in 1..WIDTH {
for i in 1..SPONGE_WIDTH {
let t = <Self as Poseidon>::FAST_PARTIAL_ROUND_W_HATS[r][i - 1];
let t = Self::Extension::from_canonical_u64(t);
let t = builder.constant_extension(t);
d = builder.mul_add_extension(t, state[i], d);
}
let mut result = [builder.zero_extension(); WIDTH];
let mut result = [builder.zero_extension(); SPONGE_WIDTH];
result[0] = d;
for i in 1..WIDTH {
for i in 1..SPONGE_WIDTH {
let t = <Self as Poseidon>::FAST_PARTIAL_ROUND_VS[r][i - 1];
let t = Self::Extension::from_canonical_u64(t);
let t = builder.constant_extension(t);
@ -477,10 +480,10 @@ pub trait Poseidon: PrimeField64 {
#[inline(always)]
#[unroll_for_loops]
fn constant_layer(state: &mut [Self; WIDTH], round_ctr: usize) {
fn constant_layer(state: &mut [Self; SPONGE_WIDTH], round_ctr: usize) {
for i in 0..12 {
if i < WIDTH {
let round_constant = ALL_ROUND_CONSTANTS[i + WIDTH * round_ctr];
if i < SPONGE_WIDTH {
let round_constant = ALL_ROUND_CONSTANTS[i + SPONGE_WIDTH * round_ctr];
unsafe {
state[i] = state[i].add_canonical_u64(round_constant);
}
@ -490,24 +493,24 @@ pub trait Poseidon: PrimeField64 {
/// Same as `constant_layer` for field extensions of `Self`.
fn constant_layer_field<F: FieldExtension<D, BaseField = Self>, const D: usize>(
state: &mut [F; WIDTH],
state: &mut [F; SPONGE_WIDTH],
round_ctr: usize,
) {
for i in 0..WIDTH {
state[i] += F::from_canonical_u64(ALL_ROUND_CONSTANTS[i + WIDTH * round_ctr]);
for i in 0..SPONGE_WIDTH {
state[i] += F::from_canonical_u64(ALL_ROUND_CONSTANTS[i + SPONGE_WIDTH * round_ctr]);
}
}
/// Recursive version of `constant_layer`.
fn constant_layer_circuit<const D: usize>(
builder: &mut CircuitBuilder<Self, D>,
state: &mut [ExtensionTarget<D>; WIDTH],
state: &mut [ExtensionTarget<D>; SPONGE_WIDTH],
round_ctr: usize,
) where
Self: RichField + Extendable<D>,
{
for i in 0..WIDTH {
let c = ALL_ROUND_CONSTANTS[i + WIDTH * round_ctr];
for i in 0..SPONGE_WIDTH {
let c = ALL_ROUND_CONSTANTS[i + SPONGE_WIDTH * round_ctr];
let c = Self::Extension::from_canonical_u64(c);
let c = builder.constant_extension(c);
state[i] = builder.add_extension(state[i], c);
@ -537,9 +540,9 @@ pub trait Poseidon: PrimeField64 {
#[inline(always)]
#[unroll_for_loops]
fn sbox_layer(state: &mut [Self; WIDTH]) {
fn sbox_layer(state: &mut [Self; SPONGE_WIDTH]) {
for i in 0..12 {
if i < WIDTH {
if i < SPONGE_WIDTH {
state[i] = Self::sbox_monomial(state[i]);
}
}
@ -547,9 +550,9 @@ pub trait Poseidon: PrimeField64 {
/// Same as `sbox_layer` for field extensions of `Self`.
fn sbox_layer_field<F: FieldExtension<D, BaseField = Self>, const D: usize>(
state: &mut [F; WIDTH],
state: &mut [F; SPONGE_WIDTH],
) {
for i in 0..WIDTH {
for i in 0..SPONGE_WIDTH {
state[i] = Self::sbox_monomial(state[i]);
}
}
@ -557,17 +560,17 @@ pub trait Poseidon: PrimeField64 {
/// Recursive version of `sbox_layer`.
fn sbox_layer_circuit<const D: usize>(
builder: &mut CircuitBuilder<Self, D>,
state: &mut [ExtensionTarget<D>; WIDTH],
state: &mut [ExtensionTarget<D>; SPONGE_WIDTH],
) where
Self: RichField + Extendable<D>,
{
for i in 0..WIDTH {
for i in 0..SPONGE_WIDTH {
state[i] = <Self as Poseidon>::sbox_monomial_circuit(builder, state[i]);
}
}
#[inline]
fn full_rounds(state: &mut [Self; WIDTH], round_ctr: &mut usize) {
fn full_rounds(state: &mut [Self; SPONGE_WIDTH], round_ctr: &mut usize) {
for _ in 0..HALF_N_FULL_ROUNDS {
Self::constant_layer(state, *round_ctr);
Self::sbox_layer(state);
@ -577,7 +580,7 @@ pub trait Poseidon: PrimeField64 {
}
#[inline]
fn partial_rounds(state: &mut [Self; WIDTH], round_ctr: &mut usize) {
fn partial_rounds(state: &mut [Self; SPONGE_WIDTH], round_ctr: &mut usize) {
Self::partial_first_constant_layer(state);
*state = Self::mds_partial_layer_init(state);
@ -592,7 +595,7 @@ pub trait Poseidon: PrimeField64 {
}
#[inline]
fn poseidon(input: [Self; WIDTH]) -> [Self; WIDTH] {
fn poseidon(input: [Self; SPONGE_WIDTH]) -> [Self; SPONGE_WIDTH] {
let mut state = input;
let mut round_ctr = 0;
@ -606,7 +609,7 @@ pub trait Poseidon: PrimeField64 {
// For testing only, to ensure that various tricks are correct.
#[inline]
fn partial_rounds_naive(state: &mut [Self; WIDTH], round_ctr: &mut usize) {
fn partial_rounds_naive(state: &mut [Self; SPONGE_WIDTH], round_ctr: &mut usize) {
for _ in 0..N_PARTIAL_ROUNDS {
Self::constant_layer(state, *round_ctr);
state[0] = Self::sbox_monomial(state[0]);
@ -616,7 +619,7 @@ pub trait Poseidon: PrimeField64 {
}
#[inline]
fn poseidon_naive(input: [Self; WIDTH]) -> [Self; WIDTH] {
fn poseidon_naive(input: [Self; SPONGE_WIDTH]) -> [Self; SPONGE_WIDTH] {
let mut state = input;
let mut round_ctr = 0;
@ -630,7 +633,7 @@ pub trait Poseidon: PrimeField64 {
}
pub struct PoseidonPermutation;
impl<F: RichField> PlonkyPermutation<F> for PoseidonPermutation {
impl<F: RichField> PlonkyPermutation<F, PoseidonHashConfig> for PoseidonPermutation {
fn permute(input: [F; SPONGE_WIDTH]) -> [F; SPONGE_WIDTH] {
F::poseidon(input)
}
@ -639,21 +642,21 @@ impl<F: RichField> PlonkyPermutation<F> for PoseidonPermutation {
/// Poseidon hash function.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct PoseidonHash;
impl<F: RichField> Hasher<F> for PoseidonHash {
impl<F: RichField> Hasher<F, PoseidonHashConfig> for PoseidonHash {
const HASH_SIZE: usize = 4 * 8;
type Hash = HashOut<F>;
type Permutation = PoseidonPermutation;
fn hash_no_pad(input: &[F]) -> Self::Hash {
hash_n_to_hash_no_pad::<F, Self::Permutation>(input)
hash_n_to_hash_no_pad::<F, PoseidonHashConfig, Self::Permutation>(input)
}
fn two_to_one(left: Self::Hash, right: Self::Hash) -> Self::Hash {
compress::<F, Self::Permutation>(left, right)
compress::<F, PoseidonHashConfig, Self::Permutation>(left, right)
}
}
impl<F: RichField> AlgebraicHasher<F> for PoseidonHash {
impl<F: RichField> AlgebraicHasher<F, PoseidonHashConfig> for PoseidonHash {
fn permute_swapped<const D: usize>(
inputs: [Target; SPONGE_WIDTH],
swap: BoolTarget,
@ -688,8 +691,7 @@ impl<F: RichField> AlgebraicHasher<F> for PoseidonHash {
#[cfg(test)]
pub(crate) mod test_helpers {
use crate::field::types::Field;
use crate::hash::hashing::SPONGE_WIDTH;
use crate::hash::poseidon::Poseidon;
use crate::hash::poseidon::{Poseidon, SPONGE_WIDTH};
pub(crate) fn check_test_vectors<F: Field>(
test_vectors: Vec<([u64; SPONGE_WIDTH], [u64; SPONGE_WIDTH])>,

View File

@ -4,7 +4,7 @@ use core::marker::PhantomData;
use crate::field::extension::{Extendable, FieldExtension};
use crate::hash::hash_types::{HashOut, HashOutTarget, MerkleCapTarget, RichField};
use crate::hash::hashing::{PlonkyPermutation, SPONGE_RATE, SPONGE_WIDTH};
use crate::hash::hashing::{HashConfig, PlonkyPermutation};
use crate::hash::merkle_tree::MerkleCap;
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::target::Target;
@ -13,8 +13,11 @@ use crate::plonk::config::{AlgebraicHasher, GenericHashOut, Hasher};
/// Observes prover messages, and generates challenges by hashing the transcript, a la Fiat-Shamir.
#[derive(Clone)]
pub struct Challenger<F: RichField, H: Hasher<F>> {
pub(crate) sponge_state: [F; SPONGE_WIDTH],
pub struct Challenger<F: RichField, HC: HashConfig, H: Hasher<F, HC>>
where
[(); HC::WIDTH]:,
{
pub(crate) sponge_state: [F; HC::WIDTH],
pub(crate) input_buffer: Vec<F>,
output_buffer: Vec<F>,
_phantom: PhantomData<H>,
@ -28,12 +31,15 @@ pub struct Challenger<F: RichField, H: Hasher<F>> {
/// design, but it can be viewed as a duplex sponge whose inputs are sometimes zero (when we perform
/// multiple squeezes) and whose outputs are sometimes ignored (when we perform multiple
/// absorptions). Thus the security properties of a duplex sponge still apply to our design.
impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
pub fn new() -> Challenger<F, H> {
impl<F: RichField, HC: HashConfig, H: Hasher<F, HC>> Challenger<F, HC, H>
where
[(); HC::WIDTH]:,
{
pub fn new() -> Challenger<F, HC, H> {
Challenger {
sponge_state: [F::ZERO; SPONGE_WIDTH],
input_buffer: Vec::with_capacity(SPONGE_RATE),
output_buffer: Vec::with_capacity(SPONGE_RATE),
sponge_state: [F::ZERO; HC::WIDTH],
input_buffer: Vec::with_capacity(HC::RATE),
output_buffer: Vec::with_capacity(HC::RATE),
_phantom: Default::default(),
}
}
@ -44,7 +50,7 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
self.input_buffer.push(element);
if self.input_buffer.len() == SPONGE_RATE {
if self.input_buffer.len() == HC::RATE {
self.duplexing();
}
}
@ -65,19 +71,23 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
pub fn observe_extension_elements<const D: usize>(&mut self, elements: &[F::Extension])
where
F: RichField + Extendable<D>,
[(); HC::WIDTH]:,
{
for element in elements {
self.observe_extension_element(element);
}
}
pub fn observe_hash<OH: Hasher<F>>(&mut self, hash: OH::Hash) {
pub fn observe_hash<OHC: HashConfig, OH: Hasher<F, OHC>>(&mut self, hash: OH::Hash) {
self.observe_elements(&hash.to_vec())
}
pub fn observe_cap<OH: Hasher<F>>(&mut self, cap: &MerkleCap<F, OH>) {
pub fn observe_cap<OHC: HashConfig, OH: Hasher<F, OHC>>(
&mut self,
cap: &MerkleCap<F, OHC, OH>,
) {
for &hash in &cap.0 {
self.observe_hash::<OH>(hash);
self.observe_hash::<OHC, OH>(hash);
}
}
@ -129,7 +139,7 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
/// Absorb any buffered inputs. After calling this, the input buffer will be empty, and the
/// output buffer will be full.
fn duplexing(&mut self) {
assert!(self.input_buffer.len() <= SPONGE_RATE);
assert!(self.input_buffer.len() <= HC::RATE);
// Overwrite the first r elements with the inputs. This differs from a standard sponge,
// where we would xor or add in the inputs. This is a well-known variant, though,
@ -143,10 +153,10 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
self.output_buffer.clear();
self.output_buffer
.extend_from_slice(&self.sponge_state[0..SPONGE_RATE]);
.extend_from_slice(&self.sponge_state[0..HC::RATE]);
}
pub fn compact(&mut self) -> [F; SPONGE_WIDTH] {
pub fn compact(&mut self) -> [F; HC::WIDTH] {
if !self.input_buffer.is_empty() {
self.duplexing();
}
@ -155,37 +165,48 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
}
}
impl<F: RichField, H: AlgebraicHasher<F>> Default for Challenger<F, H> {
impl<F: RichField, HC: HashConfig, H: AlgebraicHasher<F, HC>> Default for Challenger<F, HC, H>
where
[(); HC::WIDTH]:,
{
fn default() -> Self {
Self::new()
}
}
/// A recursive version of `Challenger`. The main difference is that `RecursiveChallenger`'s input
/// buffer can grow beyond `SPONGE_RATE`. This is so that `observe_element` etc do not need access
/// buffer can grow beyond `HC::RATE`. This is so that `observe_element` etc do not need access
/// to the `CircuitBuilder`.
pub struct RecursiveChallenger<F: RichField + Extendable<D>, H: AlgebraicHasher<F>, const D: usize>
pub struct RecursiveChallenger<
F: RichField + Extendable<D>,
HC: HashConfig,
H: AlgebraicHasher<F, HC>,
const D: usize,
> where
[(); HC::WIDTH]:,
{
sponge_state: [Target; SPONGE_WIDTH],
sponge_state: [Target; HC::WIDTH],
input_buffer: Vec<Target>,
output_buffer: Vec<Target>,
__: PhantomData<(F, H)>,
}
impl<F: RichField + Extendable<D>, H: AlgebraicHasher<F>, const D: usize>
RecursiveChallenger<F, H, D>
impl<F: RichField + Extendable<D>, HC: HashConfig, H: AlgebraicHasher<F, HC>, const D: usize>
RecursiveChallenger<F, HC, H, D>
where
[(); HC::WIDTH]:,
{
pub fn new(builder: &mut CircuitBuilder<F, D>) -> Self {
let zero = builder.zero();
Self {
sponge_state: [zero; SPONGE_WIDTH],
sponge_state: [zero; HC::WIDTH],
input_buffer: Vec::new(),
output_buffer: Vec::new(),
__: PhantomData,
}
}
pub fn from_state(sponge_state: [Target; SPONGE_WIDTH]) -> Self {
pub fn from_state(sponge_state: [Target; HC::WIDTH]) -> Self {
Self {
sponge_state,
input_buffer: vec![],
@ -232,8 +253,8 @@ impl<F: RichField + Extendable<D>, H: AlgebraicHasher<F>, const D: usize>
if self.output_buffer.is_empty() {
// Evaluate the permutation to produce `r` new outputs.
self.sponge_state = builder.permute::<H>(self.sponge_state);
self.output_buffer = self.sponge_state[0..SPONGE_RATE].to_vec();
self.sponge_state = builder.permute::<HC, H>(self.sponge_state);
self.output_buffer = self.sponge_state[0..HC::RATE].to_vec();
}
self.output_buffer
@ -274,7 +295,7 @@ impl<F: RichField + Extendable<D>, H: AlgebraicHasher<F>, const D: usize>
return;
}
for input_chunk in self.input_buffer.chunks(SPONGE_RATE) {
for input_chunk in self.input_buffer.chunks(HC::RATE) {
// Overwrite the first r elements with the inputs. This differs from a standard sponge,
// where we would xor or add in the inputs. This is a well-known variant, though,
// sometimes called "overwrite mode".
@ -283,15 +304,15 @@ impl<F: RichField + Extendable<D>, H: AlgebraicHasher<F>, const D: usize>
}
// Apply the permutation.
self.sponge_state = builder.permute::<H>(self.sponge_state);
self.sponge_state = builder.permute::<HC, H>(self.sponge_state);
}
self.output_buffer = self.sponge_state[0..SPONGE_RATE].to_vec();
self.output_buffer = self.sponge_state[0..HC::RATE].to_vec();
self.input_buffer.clear();
}
pub fn compact(&mut self, builder: &mut CircuitBuilder<F, D>) -> [Target; SPONGE_WIDTH] {
pub fn compact(&mut self, builder: &mut CircuitBuilder<F, D>) -> [Target; HC::WIDTH] {
self.absorb_buffered_inputs(builder);
self.output_buffer.clear();
self.sponge_state
@ -314,7 +335,11 @@ mod tests {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
let mut challenger = Challenger::<F, <C as GenericConfig<D>>::InnerHasher>::new();
let mut challenger = Challenger::<
F,
<C as GenericConfig<D>>::HCI,
<C as GenericConfig<D>>::InnerHasher,
>::new();
let mut challenges = Vec::new();
for i in 1..10 {
@ -348,7 +373,11 @@ mod tests {
.map(|&n| F::rand_vec(n))
.collect();
let mut challenger = Challenger::<F, <C as GenericConfig<D>>::InnerHasher>::new();
let mut challenger = Challenger::<
F,
<C as GenericConfig<D>>::HCI,
<C as GenericConfig<D>>::InnerHasher,
>::new();
let mut outputs_per_round: Vec<Vec<F>> = Vec::new();
for (r, inputs) in inputs_per_round.iter().enumerate() {
challenger.observe_elements(inputs);
@ -357,8 +386,12 @@ mod tests {
let config = CircuitConfig::standard_recursion_config();
let mut builder = CircuitBuilder::<F, D>::new(config);
let mut recursive_challenger =
RecursiveChallenger::<F, <C as GenericConfig<D>>::InnerHasher, D>::new(&mut builder);
let mut recursive_challenger = RecursiveChallenger::<
F,
<C as GenericConfig<D>>::HCI,
<C as GenericConfig<D>>::InnerHasher,
D,
>::new(&mut builder);
let mut recursive_outputs_per_round: Vec<Vec<Target>> = Vec::new();
for (r, inputs) in inputs_per_round.iter().enumerate() {
recursive_challenger.observe_elements(&builder.constants(inputs));

View File

@ -9,6 +9,7 @@ use crate::field::types::Field;
use crate::fri::structure::{FriOpenings, FriOpeningsTarget};
use crate::fri::witness_util::set_fri_proof_target;
use crate::hash::hash_types::{HashOut, HashOutTarget, MerkleCapTarget, RichField};
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_tree::MerkleCap;
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::target::{BoolTarget, Target};
@ -27,10 +28,10 @@ pub trait WitnessWrite<F: Field> {
.for_each(|(&t, x)| self.set_target(t, x));
}
fn set_cap_target<H: AlgebraicHasher<F>>(
fn set_cap_target<HC: HashConfig, H: AlgebraicHasher<F, HC>>(
&mut self,
ct: &MerkleCapTarget,
value: &MerkleCap<F, H>,
value: &MerkleCap<F, HC, H>,
) where
F: RichField,
{
@ -77,7 +78,7 @@ pub trait WitnessWrite<F: Field> {
proof_with_pis: &ProofWithPublicInputs<F, C, D>,
) where
F: RichField + Extendable<D>,
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
{
let ProofWithPublicInputs {
proof,
@ -103,7 +104,7 @@ pub trait WitnessWrite<F: Field> {
proof: &Proof<F, C, D>,
) where
F: RichField + Extendable<D>,
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
{
self.set_cap_target(&proof_target.wires_cap, &proof.wires_cap);
self.set_cap_target(
@ -142,7 +143,7 @@ pub trait WitnessWrite<F: Field> {
vd: &VerifierOnlyCircuitData<C, D>,
) where
F: RichField + Extendable<D>,
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
{
self.set_cap_target(&vdt.constants_sigmas_cap, &vd.constants_sigmas_cap);
self.set_hash_target(vdt.circuit_digest, vd.circuit_digest);
@ -224,10 +225,14 @@ pub trait Witness<F: Field>: WitnessWrite<F> {
}
}
fn get_merkle_cap_target<H: Hasher<F>>(&self, cap_target: MerkleCapTarget) -> MerkleCap<F, H>
fn get_merkle_cap_target<HC, H: Hasher<F, HC>>(
&self,
cap_target: MerkleCapTarget,
) -> MerkleCap<F, HC, H>
where
F: RichField,
H: AlgebraicHasher<F>,
HC: HashConfig,
H: AlgebraicHasher<F, HC>,
{
let cap = cap_target
.0

View File

@ -1,5 +1,7 @@
#![allow(clippy::too_many_arguments)]
#![allow(clippy::needless_range_loop)]
#![allow(clippy::upper_case_acronyms)]
#![feature(generic_const_exprs)]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;

View File

@ -28,6 +28,7 @@ use crate::gates::noop::NoopGate;
use crate::gates::public_input::PublicInputGate;
use crate::gates::selectors::selector_polynomials;
use crate::hash::hash_types::{HashOut, HashOutTarget, MerkleCapTarget, RichField};
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_proofs::MerkleProofTarget;
use crate::hash::merkle_tree::MerkleCap;
use crate::iop::ext_target::ExtensionTarget;
@ -434,9 +435,9 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
}
}
pub fn constant_merkle_cap<H: Hasher<F, Hash = HashOut<F>>>(
pub fn constant_merkle_cap<HC: HashConfig, H: Hasher<F, HC, Hash = HashOut<F>>>(
&mut self,
cap: &MerkleCap<F, H>,
cap: &MerkleCap<F, HC, H>,
) -> MerkleCapTarget {
MerkleCapTarget(cap.0.iter().map(|h| self.constant_hash(*h)).collect())
}
@ -446,7 +447,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
verifier_data: &VerifierOnlyCircuitData<C, D>,
) -> VerifierCircuitTarget
where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
{
VerifierCircuitTarget {
constants_sigmas_cap: self.constant_merkle_cap(&verifier_data.constants_sigmas_cap),
@ -737,7 +738,11 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
}
/// Builds a "full circuit", with both prover and verifier data.
pub fn build<C: GenericConfig<D, F = F>>(mut self) -> CircuitData<F, C, D> {
pub fn build<C: GenericConfig<D, F = F>>(mut self) -> CircuitData<F, C, D>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let mut timing = TimingTree::new("preprocess", Level::Trace);
#[cfg(feature = "std")]
let start = Instant::now();
@ -748,7 +753,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
// those hash wires match the claimed public inputs.
let num_public_inputs = self.public_inputs.len();
let public_inputs_hash =
self.hash_n_to_hash_no_pad::<C::InnerHasher>(self.public_inputs.clone());
self.hash_n_to_hash_no_pad::<C::HCI, C::InnerHasher>(self.public_inputs.clone());
let pi_gate = self.add_gate(PublicInputGate, vec![]);
for (&hash_part, wire) in public_inputs_hash
.elements
@ -825,7 +830,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
let fft_root_table = fft_root_table(max_fft_points);
let constants_sigmas_vecs = [constant_vecs, sigma_vecs.clone()].concat();
let constants_sigmas_commitment = PolynomialBatch::from_values(
let constants_sigmas_commitment = PolynomialBatch::<F, C, D>::from_values(
constants_sigmas_vecs,
rate_bits,
PlonkOracle::CONSTANTS_SIGMAS.blinding,
@ -913,7 +918,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
assert_eq!(goal_data, common, "The expected circuit data passed to cyclic recursion method did not match the actual circuit");
}
let prover_only = ProverOnlyCircuitData {
let prover_only = ProverOnlyCircuitData::<F, C, D> {
generators: self.generators,
generator_indices_by_watches,
constants_sigmas_commitment,
@ -925,7 +930,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
circuit_digest,
};
let verifier_only = VerifierOnlyCircuitData {
let verifier_only = VerifierOnlyCircuitData::<C, D> {
constants_sigmas_cap,
circuit_digest,
};
@ -941,16 +946,24 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
}
/// Builds a "prover circuit", with data needed to generate proofs but not verify them.
pub fn build_prover<C: GenericConfig<D, F = F>>(self) -> ProverCircuitData<F, C, D> {
pub fn build_prover<C: GenericConfig<D, F = F>>(self) -> ProverCircuitData<F, C, D>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
// TODO: Can skip parts of this.
let circuit_data = self.build();
let circuit_data = self.build::<C>();
circuit_data.prover_data()
}
/// Builds a "verifier circuit", with data needed to verify proofs but not generate them.
pub fn build_verifier<C: GenericConfig<D, F = F>>(self) -> VerifierCircuitData<F, C, D> {
pub fn build_verifier<C: GenericConfig<D, F = F>>(self) -> VerifierCircuitData<F, C, D>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
// TODO: Can skip parts of this.
let circuit_data = self.build();
let circuit_data = self.build::<C>();
circuit_data.verifier_data()
}
}

View File

@ -19,6 +19,7 @@ use crate::fri::{FriConfig, FriParams};
use crate::gates::gate::GateRef;
use crate::gates::selectors::SelectorsInfo;
use crate::hash::hash_types::{HashOutTarget, MerkleCapTarget, RichField};
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_tree::MerkleCap;
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::generator::WitnessGenerator;
@ -115,8 +116,12 @@ pub struct CircuitData<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>,
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
CircuitData<F, C, D>
{
pub fn prove(&self, inputs: PartialWitness<F>) -> Result<ProofWithPublicInputs<F, C, D>> {
prove(
pub fn prove(&self, inputs: PartialWitness<F>) -> Result<ProofWithPublicInputs<F, C, D>>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
prove::<F, C, D>(
&self.prover_only,
&self.common,
inputs,
@ -124,28 +129,44 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
)
}
pub fn verify(&self, proof_with_pis: ProofWithPublicInputs<F, C, D>) -> Result<()> {
verify(proof_with_pis, &self.verifier_only, &self.common)
pub fn verify(&self, proof_with_pis: ProofWithPublicInputs<F, C, D>) -> Result<()>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
verify::<F, C, D>(proof_with_pis, &self.verifier_only, &self.common)
}
pub fn verify_compressed(
&self,
compressed_proof_with_pis: CompressedProofWithPublicInputs<F, C, D>,
) -> Result<()> {
) -> Result<()>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
compressed_proof_with_pis.verify(&self.verifier_only, &self.common)
}
pub fn compress(
&self,
proof: ProofWithPublicInputs<F, C, D>,
) -> Result<CompressedProofWithPublicInputs<F, C, D>> {
) -> Result<CompressedProofWithPublicInputs<F, C, D>>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
proof.compress(&self.verifier_only.circuit_digest, &self.common)
}
pub fn decompress(
&self,
proof: CompressedProofWithPublicInputs<F, C, D>,
) -> Result<ProofWithPublicInputs<F, C, D>> {
) -> Result<ProofWithPublicInputs<F, C, D>>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
proof.decompress(&self.verifier_only.circuit_digest, &self.common)
}
@ -193,8 +214,12 @@ pub struct ProverCircuitData<
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
ProverCircuitData<F, C, D>
{
pub fn prove(&self, inputs: PartialWitness<F>) -> Result<ProofWithPublicInputs<F, C, D>> {
prove(
pub fn prove(&self, inputs: PartialWitness<F>) -> Result<ProofWithPublicInputs<F, C, D>>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
prove::<F, C, D>(
&self.prover_only,
&self.common,
inputs,
@ -217,14 +242,22 @@ pub struct VerifierCircuitData<
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
VerifierCircuitData<F, C, D>
{
pub fn verify(&self, proof_with_pis: ProofWithPublicInputs<F, C, D>) -> Result<()> {
verify(proof_with_pis, &self.verifier_only, &self.common)
pub fn verify(&self, proof_with_pis: ProofWithPublicInputs<F, C, D>) -> Result<()>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
verify::<F, C, D>(proof_with_pis, &self.verifier_only, &self.common)
}
pub fn verify_compressed(
&self,
compressed_proof_with_pis: CompressedProofWithPublicInputs<F, C, D>,
) -> Result<()> {
) -> Result<()>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
compressed_proof_with_pis.verify(&self.verifier_only, &self.common)
}
}
@ -254,17 +287,17 @@ pub struct ProverOnlyCircuitData<
pub fft_root_table: Option<FftRootTable<F>>,
/// A digest of the "circuit" (i.e. the instance, minus public inputs), which can be used to
/// seed Fiat-Shamir.
pub circuit_digest: <<C as GenericConfig<D>>::Hasher as Hasher<F>>::Hash,
pub circuit_digest: <<C as GenericConfig<D>>::Hasher as Hasher<F, C::HCO>>::Hash,
}
/// Circuit data required by the verifier, but not the prover.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct VerifierOnlyCircuitData<C: GenericConfig<D>, const D: usize> {
/// A commitment to each constant polynomial and each permutation polynomial.
pub constants_sigmas_cap: MerkleCap<C::F, C::Hasher>,
pub constants_sigmas_cap: MerkleCap<C::F, C::HCO, C::Hasher>,
/// A digest of the "circuit" (i.e. the instance, minus public inputs), which can be used to
/// seed Fiat-Shamir.
pub circuit_digest: <<C as GenericConfig<D>>::Hasher as Hasher<C::F>>::Hash,
pub circuit_digest: <<C as GenericConfig<D>>::Hasher as Hasher<C::F, C::HCO>>::Hash,
}
/// Circuit data required by both the prover and the verifier.

View File

@ -9,7 +9,7 @@ use crate::field::extension::quadratic::QuadraticExtension;
use crate::field::extension::{Extendable, FieldExtension};
use crate::field::goldilocks_field::GoldilocksField;
use crate::hash::hash_types::{HashOut, RichField};
use crate::hash::hashing::{PlonkyPermutation, SPONGE_WIDTH};
use crate::hash::hashing::{HashConfig, PlonkyPermutation};
use crate::hash::keccak::KeccakHash;
use crate::hash::poseidon::PoseidonHash;
use crate::iop::target::{BoolTarget, Target};
@ -25,7 +25,7 @@ pub trait GenericHashOut<F: RichField>:
}
/// Trait for hash functions.
pub trait Hasher<F: RichField>: Sized + Clone + Debug + Eq + PartialEq {
pub trait Hasher<F: RichField, HC: HashConfig>: Sized + Clone + Debug + Eq + PartialEq {
/// Size of `Hash` in bytes.
const HASH_SIZE: usize;
@ -33,17 +33,22 @@ pub trait Hasher<F: RichField>: Sized + Clone + Debug + Eq + PartialEq {
type Hash: GenericHashOut<F>;
/// Permutation used in the sponge construction.
type Permutation: PlonkyPermutation<F>;
type Permutation: PlonkyPermutation<F, HC>;
/// Hash a message without any padding step. Note that this can enable length-extension attacks.
/// However, it is still collision-resistant in cases where the input has a fixed length.
fn hash_no_pad(input: &[F]) -> Self::Hash;
fn hash_no_pad(input: &[F]) -> Self::Hash
where
[(); HC::WIDTH]:;
/// Pad the message using the `pad10*1` rule, then hash it.
fn hash_pad(input: &[F]) -> Self::Hash {
fn hash_pad(input: &[F]) -> Self::Hash
where
[(); HC::WIDTH]:,
{
let mut padded_input = input.to_vec();
padded_input.push(F::ONE);
while (padded_input.len() + 1) % SPONGE_WIDTH != 0 {
while (padded_input.len() + 1) % HC::WIDTH != 0 {
padded_input.push(F::ZERO);
}
padded_input.push(F::ONE);
@ -52,7 +57,10 @@ pub trait Hasher<F: RichField>: Sized + Clone + Debug + Eq + PartialEq {
/// Hash the slice if necessary to reduce its length to ~256 bits. If it already fits, this is a
/// no-op.
fn hash_or_noop(inputs: &[F]) -> Self::Hash {
fn hash_or_noop(inputs: &[F]) -> Self::Hash
where
[(); HC::WIDTH]:,
{
if inputs.len() * 8 <= Self::HASH_SIZE {
let mut inputs_bytes = vec![0u8; Self::HASH_SIZE];
for i in 0..inputs.len() {
@ -65,22 +73,22 @@ pub trait Hasher<F: RichField>: Sized + Clone + Debug + Eq + PartialEq {
}
}
fn two_to_one(left: Self::Hash, right: Self::Hash) -> Self::Hash;
fn two_to_one(left: Self::Hash, right: Self::Hash) -> Self::Hash
where
[(); HC::WIDTH]:;
}
/// Trait for algebraic hash functions, built from a permutation using the sponge construction.
pub trait AlgebraicHasher<F: RichField>: Hasher<F, Hash = HashOut<F>> {
// TODO: Adding a `const WIDTH: usize` here yields a compiler error down the line.
// Maybe try again in a while.
pub trait AlgebraicHasher<F: RichField, HC: HashConfig>: Hasher<F, HC, Hash = HashOut<F>> {
/// Circuit to conditionally swap two chunks of the inputs (useful in verifying Merkle proofs),
/// then apply the permutation.
fn permute_swapped<const D: usize>(
inputs: [Target; SPONGE_WIDTH],
inputs: [Target; HC::WIDTH],
swap: BoolTarget,
builder: &mut CircuitBuilder<F, D>,
) -> [Target; SPONGE_WIDTH]
) -> [Target; HC::WIDTH]
where
[(); HC::WIDTH]:,
F: RichField + Extendable<D>;
}
@ -92,28 +100,48 @@ pub trait GenericConfig<const D: usize>:
type F: RichField + Extendable<D, Extension = Self::FE>;
/// Field extension of degree D of the main field.
type FE: FieldExtension<D, BaseField = Self::F>;
/// Hash configuration for this GenericConfig's `Hasher`.
type HCO: HashConfig;
/// Hash configuration for this GenericConfig's `InnerHasher`.
type HCI: HashConfig;
/// Hash function used for building Merkle trees.
type Hasher: Hasher<Self::F>;
type Hasher: Hasher<Self::F, Self::HCO>;
/// Algebraic hash function used for the challenger and hashing public inputs.
type InnerHasher: AlgebraicHasher<Self::F>;
type InnerHasher: AlgebraicHasher<Self::F, Self::HCI>;
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct PoseidonHashConfig;
impl HashConfig for PoseidonHashConfig {
const RATE: usize = 8;
const WIDTH: usize = 12;
}
/// Configuration using Poseidon over the Goldilocks field.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct PoseidonGoldilocksConfig;
impl GenericConfig<2> for PoseidonGoldilocksConfig {
type F = GoldilocksField;
type FE = QuadraticExtension<Self::F>;
type HCO = PoseidonHashConfig;
type HCI = PoseidonHashConfig;
type Hasher = PoseidonHash;
type InnerHasher = PoseidonHash;
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct KeccakHashConfig;
impl HashConfig for KeccakHashConfig {
const RATE: usize = 8;
const WIDTH: usize = 12;
}
/// Configuration using truncated Keccak over the Goldilocks field.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct KeccakGoldilocksConfig;
impl GenericConfig<2> for KeccakGoldilocksConfig {
type F = GoldilocksField;
type FE = QuadraticExtension<Self::F>;
type HCO = KeccakHashConfig;
type HCI = PoseidonHashConfig;
type Hasher = KeccakHash<25>;
type InnerHasher = PoseidonHash;
}

View File

@ -9,6 +9,7 @@ use crate::fri::proof::{CompressedFriProof, FriChallenges, FriProof, FriProofTar
use crate::fri::verifier::{compute_evaluation, fri_combine_initial, PrecomputedReducedOpenings};
use crate::gadgets::polynomial::PolynomialCoeffsExtTarget;
use crate::hash::hash_types::{HashOutTarget, MerkleCapTarget, RichField};
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_tree::MerkleCap;
use crate::iop::challenger::{Challenger, RecursiveChallenger};
use crate::iop::target::Target;
@ -23,34 +24,38 @@ use crate::plonk::proof::{
use crate::util::reverse_bits;
fn get_challenges<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
public_inputs_hash: <<C as GenericConfig<D>>::InnerHasher as Hasher<F>>::Hash,
wires_cap: &MerkleCap<F, C::Hasher>,
plonk_zs_partial_products_cap: &MerkleCap<F, C::Hasher>,
quotient_polys_cap: &MerkleCap<F, C::Hasher>,
public_inputs_hash: <<C as GenericConfig<D>>::InnerHasher as Hasher<F, C::HCI>>::Hash,
wires_cap: &MerkleCap<F, C::HCO, C::Hasher>,
plonk_zs_partial_products_cap: &MerkleCap<F, C::HCO, C::Hasher>,
quotient_polys_cap: &MerkleCap<F, C::HCO, C::Hasher>,
openings: &OpeningSet<F, D>,
commit_phase_merkle_caps: &[MerkleCap<F, C::Hasher>],
commit_phase_merkle_caps: &[MerkleCap<F, C::HCO, C::Hasher>],
final_poly: &PolynomialCoeffs<F::Extension>,
pow_witness: F,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F>>::Hash,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F, C::HCO>>::Hash,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<ProofChallenges<F, D>> {
) -> anyhow::Result<ProofChallenges<F, D>>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let config = &common_data.config;
let num_challenges = config.num_challenges;
let mut challenger = Challenger::<F, C::Hasher>::new();
let mut challenger = Challenger::<F, C::HCO, C::Hasher>::new();
// Observe the instance.
challenger.observe_hash::<C::Hasher>(*circuit_digest);
challenger.observe_hash::<C::InnerHasher>(public_inputs_hash);
challenger.observe_hash::<C::HCO, C::Hasher>(*circuit_digest);
challenger.observe_hash::<C::HCI, C::InnerHasher>(public_inputs_hash);
challenger.observe_cap(wires_cap);
challenger.observe_cap::<C::HCO, C::Hasher>(wires_cap);
let plonk_betas = challenger.get_n_challenges(num_challenges);
let plonk_gammas = challenger.get_n_challenges(num_challenges);
challenger.observe_cap(plonk_zs_partial_products_cap);
challenger.observe_cap::<C::HCO, C::Hasher>(plonk_zs_partial_products_cap);
let plonk_alphas = challenger.get_n_challenges(num_challenges);
challenger.observe_cap(quotient_polys_cap);
challenger.observe_cap::<C::HCO, C::Hasher>(quotient_polys_cap);
let plonk_zeta = challenger.get_extension_challenge::<D>();
challenger.observe_openings(&openings.to_fri_openings());
@ -75,9 +80,13 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
{
pub(crate) fn fri_query_indices(
&self,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F>>::Hash,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F, C::HCO>>::Hash,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<Vec<usize>> {
) -> anyhow::Result<Vec<usize>>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
Ok(self
.get_challenges(self.get_public_inputs_hash(), circuit_digest, common_data)?
.fri_challenges
@ -87,10 +96,14 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
/// Computes all Fiat-Shamir challenges used in the Plonk proof.
pub(crate) fn get_challenges(
&self,
public_inputs_hash: <<C as GenericConfig<D>>::InnerHasher as Hasher<F>>::Hash,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F>>::Hash,
public_inputs_hash: <<C as GenericConfig<D>>::InnerHasher as Hasher<F, C::HCI>>::Hash,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F, C::HCO>>::Hash,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<ProofChallenges<F, D>> {
) -> anyhow::Result<ProofChallenges<F, D>>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let Proof {
wires_cap,
plonk_zs_partial_products_cap,
@ -126,10 +139,14 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
/// Computes all Fiat-Shamir challenges used in the Plonk proof.
pub(crate) fn get_challenges(
&self,
public_inputs_hash: <<C as GenericConfig<D>>::InnerHasher as Hasher<F>>::Hash,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F>>::Hash,
public_inputs_hash: <<C as GenericConfig<D>>::InnerHasher as Hasher<F, C::HCI>>::Hash,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F, C::HCO>>::Hash,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<ProofChallenges<F, D>> {
) -> anyhow::Result<ProofChallenges<F, D>>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let CompressedProof {
wires_cap,
plonk_zs_partial_products_cap,
@ -249,12 +266,14 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
inner_common_data: &CommonCircuitData<F, D>,
) -> ProofChallengesTarget<D>
where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let config = &inner_common_data.config;
let num_challenges = config.num_challenges;
let mut challenger = RecursiveChallenger::<F, C::Hasher, D>::new(self);
let mut challenger = RecursiveChallenger::<F, C::HCO, C::Hasher, D>::new(self);
// Observe the instance.
challenger.observe_hash(&inner_circuit_digest);
@ -297,7 +316,9 @@ impl<const D: usize> ProofWithPublicInputsTarget<D> {
inner_common_data: &CommonCircuitData<F, D>,
) -> ProofChallengesTarget<D>
where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let ProofTarget {
wires_cap,

View File

@ -15,6 +15,7 @@ use crate::fri::structure::{
};
use crate::fri::FriParams;
use crate::hash::hash_types::{MerkleCapTarget, RichField};
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_tree::MerkleCap;
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::target::Target;
@ -29,15 +30,15 @@ use crate::util::serialization::{Buffer, Read};
#[serde(bound = "")]
pub struct Proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> {
/// Merkle cap of LDEs of wire values.
pub wires_cap: MerkleCap<F, C::Hasher>,
pub wires_cap: MerkleCap<F, C::HCO, C::Hasher>,
/// Merkle cap of LDEs of Z, in the context of Plonk's permutation argument.
pub plonk_zs_partial_products_cap: MerkleCap<F, C::Hasher>,
pub plonk_zs_partial_products_cap: MerkleCap<F, C::HCO, C::Hasher>,
/// Merkle cap of LDEs of the quotient polynomial components.
pub quotient_polys_cap: MerkleCap<F, C::Hasher>,
pub quotient_polys_cap: MerkleCap<F, C::HCO, C::Hasher>,
/// Purported values of each polynomial at the challenge point.
pub openings: OpeningSet<F, D>,
/// A batch FRI argument for all openings.
pub opening_proof: FriProof<F, C::Hasher, D>,
pub opening_proof: FriProof<F, C::HCO, C::Hasher, D>,
}
#[derive(Clone, Debug)]
@ -86,9 +87,13 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
{
pub fn compress(
self,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F>>::Hash,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F, C::HCO>>::Hash,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<CompressedProofWithPublicInputs<F, C, D>> {
) -> anyhow::Result<CompressedProofWithPublicInputs<F, C, D>>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let indices = self.fri_query_indices(circuit_digest, common_data)?;
let compressed_proof = self.proof.compress(&indices, &common_data.fri_params);
Ok(CompressedProofWithPublicInputs {
@ -99,7 +104,10 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
pub(crate) fn get_public_inputs_hash(
&self,
) -> <<C as GenericConfig<D>>::InnerHasher as Hasher<F>>::Hash {
) -> <<C as GenericConfig<D>>::InnerHasher as Hasher<F, C::HCI>>::Hash
where
[(); C::HCI::WIDTH]:,
{
C::InnerHasher::hash_no_pad(&self.public_inputs)
}
@ -129,15 +137,15 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
pub struct CompressedProof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
{
/// Merkle cap of LDEs of wire values.
pub wires_cap: MerkleCap<F, C::Hasher>,
pub wires_cap: MerkleCap<F, C::HCO, C::Hasher>,
/// Merkle cap of LDEs of Z, in the context of Plonk's permutation argument.
pub plonk_zs_partial_products_cap: MerkleCap<F, C::Hasher>,
pub plonk_zs_partial_products_cap: MerkleCap<F, C::HCO, C::Hasher>,
/// Merkle cap of LDEs of the quotient polynomial components.
pub quotient_polys_cap: MerkleCap<F, C::Hasher>,
pub quotient_polys_cap: MerkleCap<F, C::HCO, C::Hasher>,
/// Purported values of each polynomial at the challenge point.
pub openings: OpeningSet<F, D>,
/// A compressed batch FRI argument for all openings.
pub opening_proof: CompressedFriProof<F, C::Hasher, D>,
pub opening_proof: CompressedFriProof<F, C::HCO, C::Hasher, D>,
}
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
@ -149,7 +157,10 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
challenges: &ProofChallenges<F, D>,
fri_inferred_elements: FriInferredElements<F, D>,
params: &FriParams,
) -> Proof<F, C, D> {
) -> Proof<F, C, D>
where
[(); C::HCO::WIDTH]:,
{
let CompressedProof {
wires_cap,
plonk_zs_partial_products_cap,
@ -184,9 +195,13 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
{
pub fn decompress(
self,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F>>::Hash,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F, C::HCO>>::Hash,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<ProofWithPublicInputs<F, C, D>> {
) -> anyhow::Result<ProofWithPublicInputs<F, C, D>>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let challenges =
self.get_challenges(self.get_public_inputs_hash(), circuit_digest, common_data)?;
let fri_inferred_elements = self.get_inferred_elements(&challenges, common_data);
@ -203,7 +218,11 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
self,
verifier_data: &VerifierOnlyCircuitData<C, D>,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<()> {
) -> anyhow::Result<()>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
ensure!(
self.public_inputs.len() == common_data.num_public_inputs,
"Number of public inputs doesn't match circuit data."
@ -218,7 +237,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
let decompressed_proof =
self.proof
.decompress(&challenges, fri_inferred_elements, &common_data.fri_params);
verify_with_challenges(
verify_with_challenges::<F, C, D>(
decompressed_proof,
public_inputs_hash,
challenges,
@ -229,7 +248,10 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
pub(crate) fn get_public_inputs_hash(
&self,
) -> <<C as GenericConfig<D>>::InnerHasher as Hasher<F>>::Hash {
) -> <<C as GenericConfig<D>>::InnerHasher as Hasher<F, C::HCI>>::Hash
where
[(); C::HCI::WIDTH]:,
{
C::InnerHasher::hash_no_pad(&self.public_inputs)
}

View File

@ -11,6 +11,7 @@ use crate::field::types::Field;
use crate::field::zero_poly_coset::ZeroPolyOnCoset;
use crate::fri::oracle::PolynomialBatch;
use crate::hash::hash_types::RichField;
use crate::hash::hashing::HashConfig;
use crate::iop::challenger::Challenger;
use crate::iop::generator::generate_partial_witness;
use crate::iop::witness::{MatrixWitness, PartialWitness, Witness};
@ -30,7 +31,13 @@ pub fn prove<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D:
common_data: &CommonCircuitData<F, D>,
inputs: PartialWitness<F>,
timing: &mut TimingTree,
) -> Result<ProofWithPublicInputs<F, C, D>> {
) -> Result<ProofWithPublicInputs<F, C, D>>
where
C::Hasher: Hasher<F, C::HCO>,
C::InnerHasher: Hasher<F, C::HCI>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let config = &common_data.config;
let num_challenges = config.num_challenges;
let quotient_degree = common_data.quotient_degree();
@ -64,7 +71,7 @@ pub fn prove<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D:
let wires_commitment = timed!(
timing,
"compute wires commitment",
PolynomialBatch::from_values(
PolynomialBatch::<F, C, D>::from_values(
wires_values,
config.fri_config.rate_bits,
config.zero_knowledge && PlonkOracle::WIRES.blinding,
@ -74,13 +81,13 @@ pub fn prove<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D:
)
);
let mut challenger = Challenger::<F, C::Hasher>::new();
let mut challenger = Challenger::<F, C::HCO, C::Hasher>::new();
// Observe the instance.
challenger.observe_hash::<C::Hasher>(prover_data.circuit_digest);
challenger.observe_hash::<C::InnerHasher>(public_inputs_hash);
challenger.observe_hash::<C::HCO, C::Hasher>(prover_data.circuit_digest);
challenger.observe_hash::<C::HCI, C::InnerHasher>(public_inputs_hash);
challenger.observe_cap(&wires_commitment.merkle_tree.cap);
challenger.observe_cap::<C::HCO, C::Hasher>(&wires_commitment.merkle_tree.cap);
let betas = challenger.get_n_challenges(num_challenges);
let gammas = challenger.get_n_challenges(num_challenges);
@ -104,7 +111,7 @@ pub fn prove<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D:
let partial_products_and_zs_commitment = timed!(
timing,
"commit to partial products and Z's",
PolynomialBatch::from_values(
PolynomialBatch::<F, C, D>::from_values(
zs_partial_products,
config.fri_config.rate_bits,
config.zero_knowledge && PlonkOracle::ZS_PARTIAL_PRODUCTS.blinding,
@ -114,14 +121,15 @@ pub fn prove<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D:
)
);
challenger.observe_cap(&partial_products_and_zs_commitment.merkle_tree.cap);
challenger
.observe_cap::<C::HCO, C::Hasher>(&partial_products_and_zs_commitment.merkle_tree.cap);
let alphas = challenger.get_n_challenges(num_challenges);
let quotient_polys = timed!(
timing,
"compute quotient polys",
compute_quotient_polys(
compute_quotient_polys::<F, C, D>(
common_data,
prover_data,
&public_inputs_hash,
@ -152,7 +160,7 @@ pub fn prove<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D:
let quotient_polys_commitment = timed!(
timing,
"commit to quotient polys",
PolynomialBatch::from_coeffs(
PolynomialBatch::<F, C, D>::from_coeffs(
all_quotient_poly_chunks,
config.fri_config.rate_bits,
config.zero_knowledge && PlonkOracle::QUOTIENT.blinding,
@ -162,7 +170,7 @@ pub fn prove<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D:
)
);
challenger.observe_cap(&quotient_polys_commitment.merkle_tree.cap);
challenger.observe_cap::<C::HCO, C::Hasher>(&quotient_polys_commitment.merkle_tree.cap);
let zeta = challenger.get_extension_challenge::<D>();
// To avoid leaking witness data, we want to ensure that our opening locations, `zeta` and
@ -177,7 +185,7 @@ pub fn prove<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D:
let openings = timed!(
timing,
"construct the opening set",
OpeningSet::new(
OpeningSet::new::<C>(
zeta,
g,
&prover_data.constants_sigmas_commitment,
@ -192,7 +200,7 @@ pub fn prove<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D:
let opening_proof = timed!(
timing,
"compute opening proofs",
PolynomialBatch::prove_openings(
PolynomialBatch::<F, C, D>::prove_openings(
&common_data.get_fri_instance(zeta),
&[
&prover_data.constants_sigmas_commitment,
@ -206,14 +214,14 @@ pub fn prove<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D:
)
);
let proof = Proof {
let proof = Proof::<F, C, D> {
wires_cap: wires_commitment.merkle_tree.cap,
plonk_zs_partial_products_cap: partial_products_and_zs_commitment.merkle_tree.cap,
quotient_polys_cap: quotient_polys_commitment.merkle_tree.cap,
openings,
opening_proof,
};
Ok(ProofWithPublicInputs {
Ok(ProofWithPublicInputs::<F, C, D> {
proof,
public_inputs,
})
@ -316,7 +324,7 @@ fn compute_quotient_polys<
>(
common_data: &CommonCircuitData<F, D>,
prover_data: &'a ProverOnlyCircuitData<F, C, D>,
public_inputs_hash: &<<C as GenericConfig<D>>::InnerHasher as Hasher<F>>::Hash,
public_inputs_hash: &<<C as GenericConfig<D>>::InnerHasher as Hasher<F, C::HCI>>::Hash,
wires_commitment: &'a PolynomialBatch<F, C, D>,
zs_partial_products_commitment: &'a PolynomialBatch<F, C, D>,
betas: &[F],

View File

@ -4,6 +4,7 @@ use crate::field::extension::Extendable;
use crate::field::types::Field;
use crate::fri::verifier::verify_fri_proof;
use crate::hash::hash_types::RichField;
use crate::hash::hashing::HashConfig;
use crate::plonk::circuit_data::{CommonCircuitData, VerifierOnlyCircuitData};
use crate::plonk::config::{GenericConfig, Hasher};
use crate::plonk::plonk_common::reduce_with_powers;
@ -16,7 +17,11 @@ pub(crate) fn verify<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, c
proof_with_pis: ProofWithPublicInputs<F, C, D>,
verifier_data: &VerifierOnlyCircuitData<C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<()> {
) -> Result<()>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
validate_proof_with_pis_shape(&proof_with_pis, common_data)?;
let public_inputs_hash = proof_with_pis.get_public_inputs_hash();
@ -26,7 +31,7 @@ pub(crate) fn verify<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, c
common_data,
)?;
verify_with_challenges(
verify_with_challenges::<F, C, D>(
proof_with_pis.proof,
public_inputs_hash,
challenges,
@ -41,11 +46,14 @@ pub(crate) fn verify_with_challenges<
const D: usize,
>(
proof: Proof<F, C, D>,
public_inputs_hash: <<C as GenericConfig<D>>::InnerHasher as Hasher<F>>::Hash,
public_inputs_hash: <<C as GenericConfig<D>>::InnerHasher as Hasher<F, C::HCI>>::Hash,
challenges: ProofChallenges<F, D>,
verifier_data: &VerifierOnlyCircuitData<C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<()> {
) -> Result<()>
where
[(); C::HCO::WIDTH]:,
{
let local_constants = &proof.openings.constants;
let local_wires = &proof.openings.wires;
let vars = EvaluationVars {

View File

@ -8,6 +8,7 @@ use crate::fri::proof::{
};
use crate::gadgets::polynomial::PolynomialCoeffsExtTarget;
use crate::hash::hash_types::{HashOutTarget, MerkleCapTarget, RichField};
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_proofs::MerkleProofTarget;
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::target::{BoolTarget, Target};
@ -29,7 +30,9 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
inner_verifier_data1: &VerifierCircuitTarget,
inner_common_data: &CommonCircuitData<F, D>,
) where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let selected_proof =
self.select_proof_with_pis(condition, proof_with_pis0, proof_with_pis1);
@ -58,7 +61,9 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
inner_common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<()>
where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let (dummy_proof_with_pis_target, dummy_verifier_data_target) =
self.dummy_proof_and_vk::<C>(inner_common_data)?;

View File

@ -4,6 +4,7 @@ use anyhow::{ensure, Result};
use crate::field::extension::Extendable;
use crate::hash::hash_types::{HashOut, HashOutTarget, MerkleCapTarget, RichField};
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_tree::MerkleCap;
use crate::iop::target::{BoolTarget, Target};
use crate::plonk::circuit_builder::CircuitBuilder;
@ -16,7 +17,7 @@ use crate::plonk::proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget};
impl<C: GenericConfig<D>, const D: usize> VerifierOnlyCircuitData<C, D> {
fn from_slice(slice: &[C::F], common_data: &CommonCircuitData<C::F, D>) -> Result<Self>
where
C::Hasher: AlgebraicHasher<C::F>,
C::Hasher: AlgebraicHasher<C::F, C::HCO>,
{
// The structure of the public inputs is `[..., circuit_digest, constants_sigmas_cap]`.
let cap_len = common_data.config.fri_config.num_cap_elements();
@ -88,7 +89,9 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
common_data: &CommonCircuitData<F, D>,
) -> Result<()>
where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let verifier_data = self
.verifier_data_public_input
@ -140,7 +143,9 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
common_data: &CommonCircuitData<F, D>,
) -> Result<()>
where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let (dummy_proof_with_pis_target, dummy_verifier_data_target) =
self.dummy_proof_and_vk::<C>(common_data)?;
@ -167,7 +172,9 @@ pub fn check_cyclic_proof_verifier_data<
common_data: &CommonCircuitData<F, D>,
) -> Result<()>
where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let pis = VerifierOnlyCircuitData::<C, D>::from_slice(&proof.public_inputs, common_data)?;
ensure!(verifier_data.constants_sigmas_cap == pis.constants_sigmas_cap);
@ -184,12 +191,14 @@ mod tests {
use crate::field::types::{Field, PrimeField64};
use crate::gates::noop::NoopGate;
use crate::hash::hash_types::{HashOutTarget, RichField};
use crate::hash::hashing::hash_n_to_hash_no_pad;
use crate::hash::hashing::{hash_n_to_hash_no_pad, HashConfig};
use crate::hash::poseidon::{PoseidonHash, PoseidonPermutation};
use crate::iop::witness::{PartialWitness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::{CircuitConfig, CommonCircuitData};
use crate::plonk::config::{AlgebraicHasher, GenericConfig, PoseidonGoldilocksConfig};
use crate::plonk::config::{
AlgebraicHasher, GenericConfig, PoseidonGoldilocksConfig, PoseidonHashConfig,
};
use crate::recursion::cyclic_recursion::check_cyclic_proof_verifier_data;
use crate::recursion::dummy_circuit::cyclic_base_proof;
@ -200,7 +209,9 @@ mod tests {
const D: usize,
>() -> CommonCircuitData<F, D>
where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let config = CircuitConfig::standard_recursion_config();
let builder = CircuitBuilder::<F, D>::new(config);
@ -245,8 +256,9 @@ mod tests {
let initial_hash_target = builder.add_virtual_hash();
builder.register_public_inputs(&initial_hash_target.elements);
let current_hash_in = builder.add_virtual_hash();
let current_hash_out =
builder.hash_n_to_hash_no_pad::<PoseidonHash>(current_hash_in.elements.to_vec());
let current_hash_out = builder.hash_n_to_hash_no_pad::<PoseidonHashConfig, PoseidonHash>(
current_hash_in.elements.to_vec(),
);
builder.register_public_inputs(&current_hash_out.elements);
let counter = builder.add_virtual_public_input();
@ -347,7 +359,8 @@ mod tests {
fn iterate_poseidon<F: RichField>(initial_state: [F; 4], n: usize) -> [F; 4] {
let mut current = initial_state;
for _ in 0..n {
current = hash_n_to_hash_no_pad::<F, PoseidonPermutation>(&current).elements;
current = hash_n_to_hash_no_pad::<F, PoseidonHashConfig, PoseidonPermutation>(&current)
.elements;
}
current
}

View File

@ -7,6 +7,7 @@ use plonky2_util::ceil_div_usize;
use crate::gates::noop::NoopGate;
use crate::hash::hash_types::RichField;
use crate::hash::hashing::HashConfig;
use crate::iop::generator::{GeneratedValues, SimpleGenerator};
use crate::iop::target::Target;
use crate::iop::witness::{PartialWitness, PartitionWitness, WitnessWrite};
@ -30,7 +31,9 @@ pub fn cyclic_base_proof<F, C, const D: usize>(
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
C::Hasher: AlgebraicHasher<C::F>,
C::Hasher: AlgebraicHasher<C::F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let pis_len = common_data.num_public_inputs;
let cap_elements = common_data.config.fri_config.num_cap_elements();
@ -46,19 +49,27 @@ where
// TODO: A bit wasteful to build a dummy circuit here. We could potentially use a proof that
// just consists of zeros, apart from public inputs.
dummy_proof(&dummy_circuit(common_data), nonzero_public_inputs).unwrap()
dummy_proof::<F, C, D>(
&dummy_circuit::<F, C, D>(common_data),
nonzero_public_inputs,
)
.unwrap()
}
/// Generate a proof for a dummy circuit. The `public_inputs` parameter let the caller specify
/// certain public inputs (identified by their indices) which should be given specific values.
/// The rest will default to zero.
pub(crate) fn dummy_proof<F, C, const D: usize>(
pub(crate) fn dummy_proof<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
circuit: &CircuitData<F, C, D>,
nonzero_public_inputs: HashMap<usize, F>,
) -> anyhow::Result<ProofWithPublicInputs<F, C, D>>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let mut pw = PartialWitness::new();
for i in 0..circuit.common.num_public_inputs {
@ -75,7 +86,11 @@ pub(crate) fn dummy_circuit<
const D: usize,
>(
common_data: &CommonCircuitData<F, D>,
) -> CircuitData<F, C, D> {
) -> CircuitData<F, C, D>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let config = common_data.config.clone();
assert!(
!common_data.config.zero_knowledge,
@ -109,10 +124,12 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<(ProofWithPublicInputsTarget<D>, VerifierCircuitTarget)>
where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let dummy_circuit = dummy_circuit::<F, C, D>(common_data);
let dummy_proof_with_pis = dummy_proof(&dummy_circuit, HashMap::new())?;
let dummy_proof_with_pis = dummy_proof::<F, C, D>(&dummy_circuit, HashMap::new())?;
let dummy_proof_with_pis_target = self.add_virtual_proof_with_pis(common_data);
let dummy_verifier_data_target =
self.add_virtual_verifier_data(self.config.fri_config.cap_height);
@ -144,7 +161,7 @@ impl<F, C, const D: usize> SimpleGenerator<F> for DummyProofGenerator<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F> + 'static,
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
{
fn dependencies(&self) -> Vec<Target> {
vec![]

View File

@ -1,5 +1,6 @@
use crate::field::extension::Extendable;
use crate::hash::hash_types::{HashOutTarget, RichField};
use crate::hash::hashing::HashConfig;
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::{CommonCircuitData, VerifierCircuitTarget};
use crate::plonk::config::{AlgebraicHasher, GenericConfig};
@ -20,14 +21,16 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
inner_verifier_data: &VerifierCircuitTarget,
inner_common_data: &CommonCircuitData<F, D>,
) where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
assert_eq!(
proof_with_pis.public_inputs.len(),
inner_common_data.num_public_inputs
);
let public_inputs_hash =
self.hash_n_to_hash_no_pad::<C::InnerHasher>(proof_with_pis.public_inputs.clone());
let public_inputs_hash = self
.hash_n_to_hash_no_pad::<C::HCI, C::InnerHasher>(proof_with_pis.public_inputs.clone());
let challenges = proof_with_pis.get_challenges::<F, C>(
self,
public_inputs_hash,
@ -53,7 +56,8 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
inner_verifier_data: &VerifierCircuitTarget,
inner_common_data: &CommonCircuitData<F, D>,
) where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
{
let one = self.one_extension();
@ -254,7 +258,8 @@ mod tests {
assert_eq!(cd.degree_bits(), 12);
// A standard recursive proof.
let (proof, vd, cd) = recursive_proof(proof, vd, cd, &standard_config, None, false, false)?;
let (proof, vd, cd) =
recursive_proof::<F, C, C, D>(proof, vd, cd, &standard_config, None, false, false)?;
assert_eq!(cd.degree_bits(), 12);
// A high-rate recursive proof, designed to be verifiable with fewer routed wires.
@ -324,7 +329,11 @@ mod tests {
fn dummy_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
config: &CircuitConfig,
num_dummy_gates: u64,
) -> Result<Proof<F, C, D>> {
) -> Result<Proof<F, C, D>>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let mut builder = CircuitBuilder::<F, D>::new(config.clone());
for _ in 0..num_dummy_gates {
builder.add_gate(NoopGate, vec![]);
@ -353,7 +362,11 @@ mod tests {
print_timing: bool,
) -> Result<Proof<F, C, D>>
where
InnerC::Hasher: AlgebraicHasher<F>,
InnerC::Hasher: AlgebraicHasher<F, InnerC::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
[(); InnerC::HCO::WIDTH]:,
[(); InnerC::HCI::WIDTH]:,
{
let mut builder = CircuitBuilder::<F, D>::new(config.clone());
let mut pw = PartialWitness::new();
@ -405,7 +418,11 @@ mod tests {
proof: &ProofWithPublicInputs<F, C, D>,
vd: &VerifierOnlyCircuitData<C, D>,
cd: &CommonCircuitData<F, D>,
) -> Result<()> {
) -> Result<()>
where
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let proof_bytes = proof.to_bytes();
info!("Proof length: {} bytes", proof_bytes.len());
let proof_from_bytes = ProofWithPublicInputs::from_bytes(proof_bytes, cd)?;

View File

@ -14,6 +14,7 @@ use crate::fri::proof::{
FriQueryStep,
};
use crate::hash::hash_types::RichField;
use crate::hash::hashing::HashConfig;
use crate::hash::merkle_proofs::MerkleProof;
use crate::hash::merkle_tree::MerkleCap;
use crate::plonk::circuit_data::CommonCircuitData;
@ -119,10 +120,11 @@ pub trait Read {
/// Reads a hash value from `self`.
#[inline]
fn read_hash<F, H>(&mut self) -> IoResult<H::Hash>
fn read_hash<F, HC, H>(&mut self) -> IoResult<H::Hash>
where
F: RichField,
H: Hasher<F>,
HC: HashConfig,
H: Hasher<F, HC>,
{
let mut buf = vec![0; H::HASH_SIZE];
self.read_exact(&mut buf)?;
@ -131,15 +133,16 @@ pub trait Read {
/// Reads a value of type [`MerkleCap`] from `self` with the given `cap_height`.
#[inline]
fn read_merkle_cap<F, H>(&mut self, cap_height: usize) -> IoResult<MerkleCap<F, H>>
fn read_merkle_cap<F, HC, H>(&mut self, cap_height: usize) -> IoResult<MerkleCap<F, HC, H>>
where
F: RichField,
H: Hasher<F>,
HC: HashConfig,
H: Hasher<F, HC>,
{
let cap_length = 1 << cap_height;
Ok(MerkleCap(
(0..cap_length)
.map(|_| self.read_hash::<F, H>())
.map(|_| self.read_hash::<F, HC, H>())
.collect::<Result<Vec<_>, _>>()?,
))
}
@ -178,15 +181,16 @@ pub trait Read {
/// Reads a value of type [`MerkleProof`] from `self`.
#[inline]
fn read_merkle_proof<F, H>(&mut self) -> IoResult<MerkleProof<F, H>>
fn read_merkle_proof<F, HC, H>(&mut self) -> IoResult<MerkleProof<F, HC, H>>
where
F: RichField,
H: Hasher<F>,
HC: HashConfig,
H: Hasher<F, HC>,
{
let length = self.read_u8()?;
Ok(MerkleProof {
siblings: (0..length)
.map(|_| self.read_hash::<F, H>())
.map(|_| self.read_hash::<F, HC, H>())
.collect::<Result<_, _>>()?,
})
}
@ -196,7 +200,7 @@ pub trait Read {
fn read_fri_initial_proof<F, C, const D: usize>(
&mut self,
common_data: &CommonCircuitData<F, D>,
) -> IoResult<FriInitialTreeProof<F, C::Hasher>>
) -> IoResult<FriInitialTreeProof<F, C::HCO, C::Hasher>>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
@ -235,7 +239,7 @@ pub trait Read {
&mut self,
arity: usize,
compressed: bool,
) -> IoResult<FriQueryStep<F, C::Hasher, D>>
) -> IoResult<FriQueryStep<F, C::HCO, C::Hasher, D>>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
@ -250,10 +254,11 @@ pub trait Read {
/// Reads a vector of [`FriQueryRound`]s from `self` with `common_data`.
#[inline]
#[allow(clippy::type_complexity)]
fn read_fri_query_rounds<F, C, const D: usize>(
&mut self,
common_data: &CommonCircuitData<F, D>,
) -> IoResult<Vec<FriQueryRound<F, C::Hasher, D>>>
) -> IoResult<Vec<FriQueryRound<F, C::HCO, C::Hasher, D>>>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
@ -281,7 +286,7 @@ pub trait Read {
fn read_fri_proof<F, C, const D: usize>(
&mut self,
common_data: &CommonCircuitData<F, D>,
) -> IoResult<FriProof<F, C::Hasher, D>>
) -> IoResult<FriProof<F, C::HCO, C::Hasher, D>>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
@ -352,7 +357,7 @@ pub trait Read {
fn read_compressed_fri_query_rounds<F, C, const D: usize>(
&mut self,
common_data: &CommonCircuitData<F, D>,
) -> IoResult<CompressedFriQueryRounds<F, C::Hasher, D>>
) -> IoResult<CompressedFriQueryRounds<F, C::HCO, C::Hasher, D>>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
@ -400,7 +405,7 @@ pub trait Read {
fn read_compressed_fri_proof<F, C, const D: usize>(
&mut self,
common_data: &CommonCircuitData<F, D>,
) -> IoResult<CompressedFriProof<F, C::Hasher, D>>
) -> IoResult<CompressedFriProof<F, C::HCO, C::Hasher, D>>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
@ -534,23 +539,25 @@ pub trait Write {
/// Writes a hash `h` to `self`.
#[inline]
fn write_hash<F, H>(&mut self, h: H::Hash) -> IoResult<()>
fn write_hash<F, HC, H>(&mut self, h: H::Hash) -> IoResult<()>
where
F: RichField,
H: Hasher<F>,
HC: HashConfig,
H: Hasher<F, HC>,
{
self.write_all(&h.to_bytes())
}
/// Writes `cap`, a value of type [`MerkleCap`], to `self`.
#[inline]
fn write_merkle_cap<F, H>(&mut self, cap: &MerkleCap<F, H>) -> IoResult<()>
fn write_merkle_cap<F, HC, H>(&mut self, cap: &MerkleCap<F, HC, H>) -> IoResult<()>
where
F: RichField,
H: Hasher<F>,
HC: HashConfig,
H: Hasher<F, HC>,
{
for &a in &cap.0 {
self.write_hash::<F, H>(a)?;
self.write_hash::<F, HC, H>(a)?;
}
Ok(())
}
@ -572,10 +579,11 @@ pub trait Write {
/// Writes a value `p` of type [`MerkleProof`] to `self.`
#[inline]
fn write_merkle_proof<F, H>(&mut self, p: &MerkleProof<F, H>) -> IoResult<()>
fn write_merkle_proof<F, HC, H>(&mut self, p: &MerkleProof<F, HC, H>) -> IoResult<()>
where
F: RichField,
H: Hasher<F>,
HC: HashConfig,
H: Hasher<F, HC>,
{
let length = p.siblings.len();
self.write_u8(
@ -584,7 +592,7 @@ pub trait Write {
.expect("Merkle proof length must fit in u8."),
)?;
for &h in &p.siblings {
self.write_hash::<F, H>(h)?;
self.write_hash::<F, HC, H>(h)?;
}
Ok(())
}
@ -593,7 +601,7 @@ pub trait Write {
#[inline]
fn write_fri_initial_proof<F, C, const D: usize>(
&mut self,
fitp: &FriInitialTreeProof<F, C::Hasher>,
fitp: &FriInitialTreeProof<F, C::HCO, C::Hasher>,
) -> IoResult<()>
where
F: RichField + Extendable<D>,
@ -610,7 +618,7 @@ pub trait Write {
#[inline]
fn write_fri_query_step<F, C, const D: usize>(
&mut self,
fqs: &FriQueryStep<F, C::Hasher, D>,
fqs: &FriQueryStep<F, C::HCO, C::Hasher, D>,
) -> IoResult<()>
where
F: RichField + Extendable<D>,
@ -624,7 +632,7 @@ pub trait Write {
#[inline]
fn write_fri_query_rounds<F, C, const D: usize>(
&mut self,
fqrs: &[FriQueryRound<F, C::Hasher, D>],
fqrs: &[FriQueryRound<F, C::HCO, C::Hasher, D>],
) -> IoResult<()>
where
F: RichField + Extendable<D>,
@ -643,7 +651,7 @@ pub trait Write {
#[inline]
fn write_fri_proof<F, C, const D: usize>(
&mut self,
fp: &FriProof<F, C::Hasher, D>,
fp: &FriProof<F, C::HCO, C::Hasher, D>,
) -> IoResult<()>
where
F: RichField + Extendable<D>,
@ -693,7 +701,7 @@ pub trait Write {
#[inline]
fn write_compressed_fri_query_rounds<F, C, const D: usize>(
&mut self,
cfqrs: &CompressedFriQueryRounds<F, C::Hasher, D>,
cfqrs: &CompressedFriQueryRounds<F, C::HCO, C::Hasher, D>,
) -> IoResult<()>
where
F: RichField + Extendable<D>,
@ -721,7 +729,7 @@ pub trait Write {
#[inline]
fn write_compressed_fri_proof<F, C, const D: usize>(
&mut self,
fp: &CompressedFriProof<F, C::Hasher, D>,
fp: &CompressedFriProof<F, C::HCO, C::Hasher, D>,
) -> IoResult<()>
where
F: RichField + Extendable<D>,

View File

@ -127,12 +127,11 @@ mod tests {
use plonky2::field::extension::Extendable;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::HashConfig;
use plonky2::iop::witness::PartialWitness;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::config::{
AlgebraicHasher, GenericConfig, Hasher, PoseidonGoldilocksConfig,
};
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig, PoseidonGoldilocksConfig};
use plonky2::util::timing::TimingTree;
use crate::config::StarkConfig;
@ -236,10 +235,13 @@ mod tests {
print_gate_counts: bool,
) -> Result<()>
where
InnerC::Hasher: AlgebraicHasher<F>,
InnerC::Hasher: AlgebraicHasher<F, InnerC::HCO>,
[(); S::COLUMNS]:,
[(); S::PUBLIC_INPUTS]:,
[(); C::Hasher::HASH_SIZE]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
[(); InnerC::HCO::WIDTH]:,
[(); InnerC::HCI::WIDTH]:,
{
let circuit_config = CircuitConfig::standard_recursion_config();
let mut builder = CircuitBuilder::<F, D>::new(circuit_config);

View File

@ -5,6 +5,7 @@ use plonky2::field::polynomial::PolynomialCoeffs;
use plonky2::fri::proof::{FriProof, FriProofTarget};
use plonky2::gadgets::polynomial::PolynomialCoeffsExtTarget;
use plonky2::hash::hash_types::{MerkleCapTarget, RichField};
use plonky2::hash::hashing::HashConfig;
use plonky2::hash::merkle_tree::MerkleCap;
use plonky2::iop::challenger::{Challenger, RecursiveChallenger};
use plonky2::iop::target::Target;
@ -20,11 +21,11 @@ use crate::stark::Stark;
fn get_challenges<F, C, S, const D: usize>(
stark: &S,
trace_cap: &MerkleCap<F, C::Hasher>,
permutation_zs_cap: Option<&MerkleCap<F, C::Hasher>>,
quotient_polys_cap: &MerkleCap<F, C::Hasher>,
trace_cap: &MerkleCap<F, C::HCO, C::Hasher>,
permutation_zs_cap: Option<&MerkleCap<F, C::HCO, C::Hasher>>,
quotient_polys_cap: &MerkleCap<F, C::HCO, C::Hasher>,
openings: &StarkOpeningSet<F, D>,
commit_phase_merkle_caps: &[MerkleCap<F, C::Hasher>],
commit_phase_merkle_caps: &[MerkleCap<F, C::HCO, C::Hasher>],
final_poly: &PolynomialCoeffs<F::Extension>,
pow_witness: F,
config: &StarkConfig,
@ -34,10 +35,12 @@ where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let num_challenges = config.num_challenges;
let mut challenger = Challenger::<F, C::Hasher>::new();
let mut challenger = Challenger::<F, C::HCO, C::Hasher>::new();
challenger.observe_cap(trace_cap);
@ -76,6 +79,8 @@ impl<F, C, const D: usize> StarkProofWithPublicInputs<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
// TODO: Should be used later in compression?
#![allow(dead_code)]
@ -145,11 +150,13 @@ pub(crate) fn get_challenges_target<
config: &StarkConfig,
) -> StarkProofChallengesTarget<D>
where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let num_challenges = config.num_challenges;
let mut challenger = RecursiveChallenger::<F, C::Hasher, D>::new(builder);
let mut challenger = RecursiveChallenger::<F, C::HCO, C::Hasher, D>::new(builder);
challenger.observe_cap(trace_cap);
@ -197,7 +204,9 @@ impl<const D: usize> StarkProofWithPublicInputsTarget<D> {
config: &StarkConfig,
) -> StarkProofChallengesTarget<D>
where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let StarkProofTarget {
trace_cap,

View File

@ -1,6 +1,7 @@
#![allow(incomplete_features)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::type_complexity)]
#![allow(clippy::upper_case_acronyms)]
#![feature(generic_const_exprs)]
#![cfg_attr(not(feature = "std"), no_std)]

View File

@ -10,6 +10,7 @@ use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::HashConfig;
use plonky2::iop::challenger::{Challenger, RecursiveChallenger};
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::target::Target;
@ -149,29 +150,38 @@ fn poly_product_elementwise<F: Field>(
product
}
fn get_permutation_challenge<F: RichField, H: Hasher<F>>(
challenger: &mut Challenger<F, H>,
) -> PermutationChallenge<F> {
fn get_permutation_challenge<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(
challenger: &mut Challenger<F, HC, H>,
) -> PermutationChallenge<F>
where
[(); HC::WIDTH]:,
{
let beta = challenger.get_challenge();
let gamma = challenger.get_challenge();
PermutationChallenge { beta, gamma }
}
fn get_permutation_challenge_set<F: RichField, H: Hasher<F>>(
challenger: &mut Challenger<F, H>,
fn get_permutation_challenge_set<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(
challenger: &mut Challenger<F, HC, H>,
num_challenges: usize,
) -> PermutationChallengeSet<F> {
) -> PermutationChallengeSet<F>
where
[(); HC::WIDTH]:,
{
let challenges = (0..num_challenges)
.map(|_| get_permutation_challenge(challenger))
.collect();
PermutationChallengeSet { challenges }
}
pub(crate) fn get_n_permutation_challenge_sets<F: RichField, H: Hasher<F>>(
challenger: &mut Challenger<F, H>,
pub(crate) fn get_n_permutation_challenge_sets<F: RichField, HC: HashConfig, H: Hasher<F, HC>>(
challenger: &mut Challenger<F, HC, H>,
num_challenges: usize,
num_sets: usize,
) -> Vec<PermutationChallengeSet<F>> {
) -> Vec<PermutationChallengeSet<F>>
where
[(); HC::WIDTH]:,
{
(0..num_sets)
.map(|_| get_permutation_challenge_set(challenger, num_challenges))
.collect()
@ -179,12 +189,16 @@ pub(crate) fn get_n_permutation_challenge_sets<F: RichField, H: Hasher<F>>(
fn get_permutation_challenge_target<
F: RichField + Extendable<D>,
H: AlgebraicHasher<F>,
HC: HashConfig,
H: AlgebraicHasher<F, HC>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
challenger: &mut RecursiveChallenger<F, H, D>,
) -> PermutationChallenge<Target> {
challenger: &mut RecursiveChallenger<F, HC, H, D>,
) -> PermutationChallenge<Target>
where
[(); HC::WIDTH]:,
{
let beta = challenger.get_challenge(builder);
let gamma = challenger.get_challenge(builder);
PermutationChallenge { beta, gamma }
@ -192,13 +206,17 @@ fn get_permutation_challenge_target<
fn get_permutation_challenge_set_target<
F: RichField + Extendable<D>,
H: AlgebraicHasher<F>,
HC: HashConfig,
H: AlgebraicHasher<F, HC>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
challenger: &mut RecursiveChallenger<F, H, D>,
challenger: &mut RecursiveChallenger<F, HC, H, D>,
num_challenges: usize,
) -> PermutationChallengeSet<Target> {
) -> PermutationChallengeSet<Target>
where
[(); HC::WIDTH]:,
{
let challenges = (0..num_challenges)
.map(|_| get_permutation_challenge_target(builder, challenger))
.collect();
@ -207,14 +225,18 @@ fn get_permutation_challenge_set_target<
pub(crate) fn get_n_permutation_challenge_sets_target<
F: RichField + Extendable<D>,
H: AlgebraicHasher<F>,
HC: HashConfig,
H: AlgebraicHasher<F, HC>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
challenger: &mut RecursiveChallenger<F, H, D>,
challenger: &mut RecursiveChallenger<F, HC, H, D>,
num_challenges: usize,
num_sets: usize,
) -> Vec<PermutationChallengeSet<Target>> {
) -> Vec<PermutationChallengeSet<Target>>
where
[(); HC::WIDTH]:,
{
(0..num_sets)
.map(|_| get_permutation_challenge_set_target(builder, challenger, num_challenges))
.collect()

View File

@ -23,15 +23,15 @@ use crate::permutation::PermutationChallengeSet;
#[derive(Debug, Clone)]
pub struct StarkProof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> {
/// Merkle cap of LDEs of trace values.
pub trace_cap: MerkleCap<F, C::Hasher>,
pub trace_cap: MerkleCap<F, C::HCO, C::Hasher>,
/// Merkle cap of LDEs of permutation Z values.
pub permutation_zs_cap: Option<MerkleCap<F, C::Hasher>>,
pub permutation_zs_cap: Option<MerkleCap<F, C::HCO, C::Hasher>>,
/// Merkle cap of LDEs of trace values.
pub quotient_polys_cap: MerkleCap<F, C::Hasher>,
pub quotient_polys_cap: MerkleCap<F, C::HCO, C::Hasher>,
/// Purported values of each polynomial at the challenge point.
pub openings: StarkOpeningSet<F, D>,
/// A batch FRI argument for all openings.
pub opening_proof: FriProof<F, C::Hasher, D>,
pub opening_proof: FriProof<F, C::HCO, C::Hasher, D>,
}
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> StarkProof<F, C, D> {
@ -88,11 +88,11 @@ pub struct CompressedStarkProof<
const D: usize,
> {
/// Merkle cap of LDEs of trace values.
pub trace_cap: MerkleCap<F, C::Hasher>,
pub trace_cap: MerkleCap<F, C::HCO, C::Hasher>,
/// Purported values of each polynomial at the challenge point.
pub openings: StarkOpeningSet<F, D>,
/// A batch FRI argument for all openings.
pub opening_proof: CompressedFriProof<F, C::Hasher, D>,
pub opening_proof: CompressedFriProof<F, C::HCO, C::Hasher, D>,
}
pub struct CompressedStarkProofWithPublicInputs<

View File

@ -11,8 +11,9 @@ use plonky2::field::types::Field;
use plonky2::field::zero_poly_coset::ZeroPolyOnCoset;
use plonky2::fri::oracle::PolynomialBatch;
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::HashConfig;
use plonky2::iop::challenger::Challenger;
use plonky2::plonk::config::{GenericConfig, Hasher};
use plonky2::plonk::config::GenericConfig;
use plonky2::timed;
use plonky2::util::timing::TimingTree;
use plonky2::util::{log2_ceil, log2_strict, transpose};
@ -42,7 +43,8 @@ where
S: Stark<F, D>,
[(); S::COLUMNS]:,
[(); S::PUBLIC_INPUTS]:,
[(); C::Hasher::HASH_SIZE]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
let degree = trace_poly_values[0].len();
let degree_bits = log2_strict(degree);

View File

@ -7,6 +7,7 @@ use plonky2::field::extension::Extendable;
use plonky2::field::types::Field;
use plonky2::fri::witness_util::set_fri_proof_target;
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::HashConfig;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::witness::Witness;
use plonky2::plonk::circuit_builder::CircuitBuilder;
@ -36,9 +37,11 @@ pub fn verify_stark_proof_circuit<
proof_with_pis: StarkProofWithPublicInputsTarget<D>,
inner_config: &StarkConfig,
) where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); S::COLUMNS]:,
[(); S::PUBLIC_INPUTS]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
assert_eq!(proof_with_pis.public_inputs.len(), S::PUBLIC_INPUTS);
let degree_bits = proof_with_pis.proof.recover_degree_bits(inner_config);
@ -72,9 +75,10 @@ fn verify_stark_proof_with_challenges_circuit<
inner_config: &StarkConfig,
degree_bits: usize,
) where
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
[(); S::COLUMNS]:,
[(); S::PUBLIC_INPUTS]:,
[(); C::HCO::WIDTH]:,
{
check_permutation_options(&stark, &proof_with_pis, &challenges).unwrap();
let one = builder.one_extension();
@ -265,7 +269,7 @@ pub fn set_stark_proof_with_pis_target<F, C: GenericConfig<D, F = F>, W, const D
stark_proof_with_pis: &StarkProofWithPublicInputs<F, C, D>,
) where
F: RichField + Extendable<D>,
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
W: Witness<F>,
{
let StarkProofWithPublicInputs {
@ -291,7 +295,7 @@ pub fn set_stark_proof_target<F, C: GenericConfig<D, F = F>, W, const D: usize>(
proof: &StarkProof<F, C, D>,
) where
F: RichField + Extendable<D>,
C::Hasher: AlgebraicHasher<F>,
C::Hasher: AlgebraicHasher<F, C::HCO>,
W: Witness<F>,
{
witness.set_cap_target(&proof_target.trace_cap, &proof.trace_cap);

View File

@ -6,10 +6,11 @@ use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::polynomial::{PolynomialCoeffs, PolynomialValues};
use plonky2::field::types::{Field, Sample};
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::HashConfig;
use plonky2::iop::witness::{PartialWitness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::config::{GenericConfig, Hasher};
use plonky2::plonk::config::GenericConfig;
use plonky2::util::{log2_ceil, log2_strict, transpose};
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
@ -89,7 +90,8 @@ pub fn test_stark_circuit_constraints<
where
[(); S::COLUMNS]:,
[(); S::PUBLIC_INPUTS]:,
[(); C::Hasher::HASH_SIZE]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
// Compute native constraint evaluation on random values.
let vars = StarkEvaluationVars {

View File

@ -7,7 +7,8 @@ use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::types::Field;
use plonky2::fri::verifier::verify_fri_proof;
use plonky2::hash::hash_types::RichField;
use plonky2::plonk::config::{GenericConfig, Hasher};
use plonky2::hash::hashing::HashConfig;
use plonky2::plonk::config::GenericConfig;
use plonky2::plonk::plonk_common::reduce_with_powers;
use crate::config::StarkConfig;
@ -31,7 +32,8 @@ pub fn verify_stark_proof<
where
[(); S::COLUMNS]:,
[(); S::PUBLIC_INPUTS]:,
[(); C::Hasher::HASH_SIZE]:,
[(); C::HCO::WIDTH]:,
[(); C::HCI::WIDTH]:,
{
ensure!(proof_with_pis.public_inputs.len() == S::PUBLIC_INPUTS);
let degree_bits = proof_with_pis.proof.recover_degree_bits(config);
@ -54,7 +56,7 @@ pub(crate) fn verify_stark_proof_with_challenges<
where
[(); S::COLUMNS]:,
[(); S::PUBLIC_INPUTS]:,
[(); C::Hasher::HASH_SIZE]:,
[(); C::HCO::WIDTH]:,
{
validate_proof_shape(&stark, &proof_with_pis, config)?;
check_permutation_options(&stark, &proof_with_pis, &challenges)?;
@ -156,7 +158,6 @@ where
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
[(); S::COLUMNS]:,
[(); C::Hasher::HASH_SIZE]:,
{
let StarkProofWithPublicInputs {
proof,