2021-07-15 07:40:41 -07:00
|
|
|
use rayon::prelude::*;
|
|
|
|
|
|
2021-05-18 16:09:22 +02:00
|
|
|
use crate::field::extension_field::{flatten, unflatten, Extendable};
|
2021-09-07 18:28:28 -07:00
|
|
|
use crate::field::field_types::RichField;
|
2021-09-30 06:56:32 +02:00
|
|
|
use crate::fri::proof::{FriInitialTreeProof, FriProof, FriQueryRound, FriQueryStep};
|
2021-05-05 18:23:59 +02:00
|
|
|
use crate::fri::FriConfig;
|
2021-07-29 22:00:29 -07:00
|
|
|
use crate::hash::hash_types::HashOut;
|
|
|
|
|
use crate::hash::hashing::hash_n_to_1;
|
|
|
|
|
use crate::hash::merkle_tree::MerkleTree;
|
|
|
|
|
use crate::iop::challenger::Challenger;
|
Automatically select FRI reduction arities (#282)
* Automatically select FRI reduction arities
This way when a proof's degree changes, we won't need to manually update the `FriConfig`s of any recursive proofs on top of it.
For now I've added two methods of selecting arities. The first, `ConstantArityBits`, just applies a fixed reduciton arity until the degree has shrunk below a certain threshold. The second, `MinSize`, searches for the sequence of arities that minimizes proof size.
Note that this optimization is approximate -- e.g. it doesn't account for the effect of compression, and doesn't count some minor contributions to proof size, like the Merkle roots from the commit phase. It also assumes we're not using Merkle caps in serialized proofs, and that we're inferring one of the evaluations, even though we haven't made those changes yet.
I think we should generally use `ConstantArityBits` for proofs that we will recurse on, since using a single arity tends to be more recursion-friendly. We could use `MinSize` for generating final bridge proofs, since we won't do further recursion on top of those.
* Fix tests
* Feedback
2021-10-04 13:52:05 -07:00
|
|
|
use crate::plonk::circuit_data::CommonCircuitData;
|
2021-07-29 22:00:29 -07:00
|
|
|
use crate::plonk::plonk_common::reduce_with_powers;
|
2021-05-05 18:23:59 +02:00
|
|
|
use crate::polynomial::polynomial::{PolynomialCoeffs, PolynomialValues};
|
2021-07-15 07:40:41 -07:00
|
|
|
use crate::timed;
|
2021-05-05 18:23:59 +02:00
|
|
|
use crate::util::reverse_index_bits_in_place;
|
2021-08-02 10:38:09 -07:00
|
|
|
use crate::util::timing::TimingTree;
|
2021-05-05 18:23:59 +02:00
|
|
|
|
|
|
|
|
/// Builds a FRI proof.
|
2021-09-07 18:28:28 -07:00
|
|
|
pub fn fri_proof<F: RichField + Extendable<D>, const D: usize>(
|
2021-05-06 17:09:55 +02:00
|
|
|
initial_merkle_trees: &[&MerkleTree<F>],
|
2021-05-05 18:23:59 +02:00
|
|
|
// Coefficients of the polynomial on which the LDT is performed. Only the first `1/rate` coefficients are non-zero.
|
2021-07-21 08:26:56 -07:00
|
|
|
lde_polynomial_coeffs: PolynomialCoeffs<F::Extension>,
|
2021-05-05 18:23:59 +02:00
|
|
|
// Evaluation of the polynomial on the large domain.
|
2021-07-21 08:26:56 -07:00
|
|
|
lde_polynomial_values: PolynomialValues<F::Extension>,
|
2021-05-05 18:23:59 +02:00
|
|
|
challenger: &mut Challenger<F>,
|
Automatically select FRI reduction arities (#282)
* Automatically select FRI reduction arities
This way when a proof's degree changes, we won't need to manually update the `FriConfig`s of any recursive proofs on top of it.
For now I've added two methods of selecting arities. The first, `ConstantArityBits`, just applies a fixed reduciton arity until the degree has shrunk below a certain threshold. The second, `MinSize`, searches for the sequence of arities that minimizes proof size.
Note that this optimization is approximate -- e.g. it doesn't account for the effect of compression, and doesn't count some minor contributions to proof size, like the Merkle roots from the commit phase. It also assumes we're not using Merkle caps in serialized proofs, and that we're inferring one of the evaluations, even though we haven't made those changes yet.
I think we should generally use `ConstantArityBits` for proofs that we will recurse on, since using a single arity tends to be more recursion-friendly. We could use `MinSize` for generating final bridge proofs, since we won't do further recursion on top of those.
* Fix tests
* Feedback
2021-10-04 13:52:05 -07:00
|
|
|
common_data: &CommonCircuitData<F, D>,
|
2021-08-02 10:38:09 -07:00
|
|
|
timing: &mut TimingTree,
|
2021-09-30 06:56:32 +02:00
|
|
|
) -> FriProof<F, D> {
|
2021-05-05 18:23:59 +02:00
|
|
|
let n = lde_polynomial_values.values.len();
|
|
|
|
|
assert_eq!(lde_polynomial_coeffs.coeffs.len(), n);
|
|
|
|
|
|
|
|
|
|
// Commit phase
|
2021-08-02 15:49:06 -07:00
|
|
|
let (trees, final_coeffs) = timed!(
|
|
|
|
|
timing,
|
|
|
|
|
"fold codewords in the commitment phase",
|
|
|
|
|
fri_committed_trees(
|
|
|
|
|
lde_polynomial_coeffs,
|
|
|
|
|
lde_polynomial_values,
|
|
|
|
|
challenger,
|
Automatically select FRI reduction arities (#282)
* Automatically select FRI reduction arities
This way when a proof's degree changes, we won't need to manually update the `FriConfig`s of any recursive proofs on top of it.
For now I've added two methods of selecting arities. The first, `ConstantArityBits`, just applies a fixed reduciton arity until the degree has shrunk below a certain threshold. The second, `MinSize`, searches for the sequence of arities that minimizes proof size.
Note that this optimization is approximate -- e.g. it doesn't account for the effect of compression, and doesn't count some minor contributions to proof size, like the Merkle roots from the commit phase. It also assumes we're not using Merkle caps in serialized proofs, and that we're inferring one of the evaluations, even though we haven't made those changes yet.
I think we should generally use `ConstantArityBits` for proofs that we will recurse on, since using a single arity tends to be more recursion-friendly. We could use `MinSize` for generating final bridge proofs, since we won't do further recursion on top of those.
* Fix tests
* Feedback
2021-10-04 13:52:05 -07:00
|
|
|
common_data,
|
2021-08-02 15:49:06 -07:00
|
|
|
)
|
2021-05-05 18:23:59 +02:00
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// PoW phase
|
|
|
|
|
let current_hash = challenger.get_hash();
|
2021-07-15 07:40:41 -07:00
|
|
|
let pow_witness = timed!(
|
2021-08-02 10:38:09 -07:00
|
|
|
timing,
|
|
|
|
|
"find for proof-of-work witness",
|
Automatically select FRI reduction arities (#282)
* Automatically select FRI reduction arities
This way when a proof's degree changes, we won't need to manually update the `FriConfig`s of any recursive proofs on top of it.
For now I've added two methods of selecting arities. The first, `ConstantArityBits`, just applies a fixed reduciton arity until the degree has shrunk below a certain threshold. The second, `MinSize`, searches for the sequence of arities that minimizes proof size.
Note that this optimization is approximate -- e.g. it doesn't account for the effect of compression, and doesn't count some minor contributions to proof size, like the Merkle roots from the commit phase. It also assumes we're not using Merkle caps in serialized proofs, and that we're inferring one of the evaluations, even though we haven't made those changes yet.
I think we should generally use `ConstantArityBits` for proofs that we will recurse on, since using a single arity tends to be more recursion-friendly. We could use `MinSize` for generating final bridge proofs, since we won't do further recursion on top of those.
* Fix tests
* Feedback
2021-10-04 13:52:05 -07:00
|
|
|
fri_proof_of_work(current_hash, &common_data.config.fri_config)
|
2021-07-15 07:40:41 -07:00
|
|
|
);
|
2021-05-05 18:23:59 +02:00
|
|
|
|
|
|
|
|
// Query phase
|
Automatically select FRI reduction arities (#282)
* Automatically select FRI reduction arities
This way when a proof's degree changes, we won't need to manually update the `FriConfig`s of any recursive proofs on top of it.
For now I've added two methods of selecting arities. The first, `ConstantArityBits`, just applies a fixed reduciton arity until the degree has shrunk below a certain threshold. The second, `MinSize`, searches for the sequence of arities that minimizes proof size.
Note that this optimization is approximate -- e.g. it doesn't account for the effect of compression, and doesn't count some minor contributions to proof size, like the Merkle roots from the commit phase. It also assumes we're not using Merkle caps in serialized proofs, and that we're inferring one of the evaluations, even though we haven't made those changes yet.
I think we should generally use `ConstantArityBits` for proofs that we will recurse on, since using a single arity tends to be more recursion-friendly. We could use `MinSize` for generating final bridge proofs, since we won't do further recursion on top of those.
* Fix tests
* Feedback
2021-10-04 13:52:05 -07:00
|
|
|
let query_round_proofs =
|
|
|
|
|
fri_prover_query_rounds(initial_merkle_trees, &trees, challenger, n, common_data);
|
2021-05-05 18:23:59 +02:00
|
|
|
|
2021-09-30 06:56:32 +02:00
|
|
|
FriProof {
|
2021-08-10 15:53:27 +02:00
|
|
|
commit_phase_merkle_caps: trees.iter().map(|t| t.cap.clone()).collect(),
|
2021-05-05 18:23:59 +02:00
|
|
|
query_round_proofs,
|
|
|
|
|
final_poly: final_coeffs,
|
|
|
|
|
pow_witness,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-07 18:28:28 -07:00
|
|
|
fn fri_committed_trees<F: RichField + Extendable<D>, const D: usize>(
|
2021-07-21 08:26:56 -07:00
|
|
|
mut coeffs: PolynomialCoeffs<F::Extension>,
|
|
|
|
|
mut values: PolynomialValues<F::Extension>,
|
2021-05-05 18:23:59 +02:00
|
|
|
challenger: &mut Challenger<F>,
|
Automatically select FRI reduction arities (#282)
* Automatically select FRI reduction arities
This way when a proof's degree changes, we won't need to manually update the `FriConfig`s of any recursive proofs on top of it.
For now I've added two methods of selecting arities. The first, `ConstantArityBits`, just applies a fixed reduciton arity until the degree has shrunk below a certain threshold. The second, `MinSize`, searches for the sequence of arities that minimizes proof size.
Note that this optimization is approximate -- e.g. it doesn't account for the effect of compression, and doesn't count some minor contributions to proof size, like the Merkle roots from the commit phase. It also assumes we're not using Merkle caps in serialized proofs, and that we're inferring one of the evaluations, even though we haven't made those changes yet.
I think we should generally use `ConstantArityBits` for proofs that we will recurse on, since using a single arity tends to be more recursion-friendly. We could use `MinSize` for generating final bridge proofs, since we won't do further recursion on top of those.
* Fix tests
* Feedback
2021-10-04 13:52:05 -07:00
|
|
|
common_data: &CommonCircuitData<F, D>,
|
2021-05-18 15:44:50 +02:00
|
|
|
) -> (Vec<MerkleTree<F>>, PolynomialCoeffs<F::Extension>) {
|
Automatically select FRI reduction arities (#282)
* Automatically select FRI reduction arities
This way when a proof's degree changes, we won't need to manually update the `FriConfig`s of any recursive proofs on top of it.
For now I've added two methods of selecting arities. The first, `ConstantArityBits`, just applies a fixed reduciton arity until the degree has shrunk below a certain threshold. The second, `MinSize`, searches for the sequence of arities that minimizes proof size.
Note that this optimization is approximate -- e.g. it doesn't account for the effect of compression, and doesn't count some minor contributions to proof size, like the Merkle roots from the commit phase. It also assumes we're not using Merkle caps in serialized proofs, and that we're inferring one of the evaluations, even though we haven't made those changes yet.
I think we should generally use `ConstantArityBits` for proofs that we will recurse on, since using a single arity tends to be more recursion-friendly. We could use `MinSize` for generating final bridge proofs, since we won't do further recursion on top of those.
* Fix tests
* Feedback
2021-10-04 13:52:05 -07:00
|
|
|
let config = &common_data.config;
|
2021-05-05 18:23:59 +02:00
|
|
|
let mut trees = Vec::new();
|
|
|
|
|
|
|
|
|
|
let mut shift = F::MULTIPLICATIVE_GROUP_GENERATOR;
|
Automatically select FRI reduction arities (#282)
* Automatically select FRI reduction arities
This way when a proof's degree changes, we won't need to manually update the `FriConfig`s of any recursive proofs on top of it.
For now I've added two methods of selecting arities. The first, `ConstantArityBits`, just applies a fixed reduciton arity until the degree has shrunk below a certain threshold. The second, `MinSize`, searches for the sequence of arities that minimizes proof size.
Note that this optimization is approximate -- e.g. it doesn't account for the effect of compression, and doesn't count some minor contributions to proof size, like the Merkle roots from the commit phase. It also assumes we're not using Merkle caps in serialized proofs, and that we're inferring one of the evaluations, even though we haven't made those changes yet.
I think we should generally use `ConstantArityBits` for proofs that we will recurse on, since using a single arity tends to be more recursion-friendly. We could use `MinSize` for generating final bridge proofs, since we won't do further recursion on top of those.
* Fix tests
* Feedback
2021-10-04 13:52:05 -07:00
|
|
|
let num_reductions = common_data.fri_params.reduction_arity_bits.len();
|
2021-05-05 18:23:59 +02:00
|
|
|
for i in 0..num_reductions {
|
Automatically select FRI reduction arities (#282)
* Automatically select FRI reduction arities
This way when a proof's degree changes, we won't need to manually update the `FriConfig`s of any recursive proofs on top of it.
For now I've added two methods of selecting arities. The first, `ConstantArityBits`, just applies a fixed reduciton arity until the degree has shrunk below a certain threshold. The second, `MinSize`, searches for the sequence of arities that minimizes proof size.
Note that this optimization is approximate -- e.g. it doesn't account for the effect of compression, and doesn't count some minor contributions to proof size, like the Merkle roots from the commit phase. It also assumes we're not using Merkle caps in serialized proofs, and that we're inferring one of the evaluations, even though we haven't made those changes yet.
I think we should generally use `ConstantArityBits` for proofs that we will recurse on, since using a single arity tends to be more recursion-friendly. We could use `MinSize` for generating final bridge proofs, since we won't do further recursion on top of those.
* Fix tests
* Feedback
2021-10-04 13:52:05 -07:00
|
|
|
let arity = 1 << common_data.fri_params.reduction_arity_bits[i];
|
2021-05-05 18:23:59 +02:00
|
|
|
|
|
|
|
|
reverse_index_bits_in_place(&mut values.values);
|
2021-08-12 07:27:33 -07:00
|
|
|
let chunked_values = values
|
|
|
|
|
.values
|
|
|
|
|
.par_chunks(arity)
|
|
|
|
|
.map(|chunk: &[F::Extension]| flatten(chunk))
|
|
|
|
|
.collect();
|
2021-08-18 14:32:24 +02:00
|
|
|
let tree = MerkleTree::new(chunked_values, config.cap_height);
|
2021-05-05 18:23:59 +02:00
|
|
|
|
2021-08-10 15:53:27 +02:00
|
|
|
challenger.observe_cap(&tree.cap);
|
2021-05-05 18:23:59 +02:00
|
|
|
trees.push(tree);
|
|
|
|
|
|
2021-05-18 15:22:06 +02:00
|
|
|
let beta = challenger.get_extension_challenge();
|
2021-05-05 18:23:59 +02:00
|
|
|
// P(x) = sum_{i<r} x^i * P_i(x^r) becomes sum_{i<r} beta^i * P_i(x).
|
|
|
|
|
coeffs = PolynomialCoeffs::new(
|
|
|
|
|
coeffs
|
|
|
|
|
.coeffs
|
2021-08-03 07:39:36 -07:00
|
|
|
.par_chunks_exact(arity)
|
2021-05-05 18:23:59 +02:00
|
|
|
.map(|chunk| reduce_with_powers(chunk, beta))
|
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
|
);
|
2021-09-05 10:27:11 -07:00
|
|
|
shift = shift.exp_u64(arity as u64);
|
2021-07-21 08:26:41 -07:00
|
|
|
values = coeffs.coset_fft(shift.into())
|
2021-05-05 18:23:59 +02:00
|
|
|
}
|
|
|
|
|
|
2021-08-15 23:45:38 -07:00
|
|
|
// The coefficients being removed here should always be zero.
|
2021-08-14 10:01:10 -07:00
|
|
|
coeffs.coeffs.truncate(coeffs.len() >> config.rate_bits);
|
|
|
|
|
|
2021-05-18 15:22:06 +02:00
|
|
|
challenger.observe_extension_elements(&coeffs.coeffs);
|
2021-05-05 18:23:59 +02:00
|
|
|
(trees, coeffs)
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-07 18:28:28 -07:00
|
|
|
fn fri_proof_of_work<F: RichField>(current_hash: HashOut<F>, config: &FriConfig) -> F {
|
2021-08-18 00:53:20 -07:00
|
|
|
(0..=F::NEG_ONE.to_canonical_u64())
|
2021-07-15 07:40:41 -07:00
|
|
|
.into_par_iter()
|
|
|
|
|
.find_any(|&i| {
|
2021-05-05 18:23:59 +02:00
|
|
|
hash_n_to_1(
|
|
|
|
|
current_hash
|
|
|
|
|
.elements
|
|
|
|
|
.iter()
|
|
|
|
|
.copied()
|
|
|
|
|
.chain(Some(F::from_canonical_u64(i)))
|
|
|
|
|
.collect(),
|
|
|
|
|
false,
|
|
|
|
|
)
|
|
|
|
|
.to_canonical_u64()
|
2021-06-17 09:49:41 +02:00
|
|
|
.leading_zeros()
|
2021-07-21 13:05:32 -07:00
|
|
|
>= config.proof_of_work_bits + (64 - F::order().bits()) as u32
|
2021-05-05 18:23:59 +02:00
|
|
|
})
|
|
|
|
|
.map(F::from_canonical_u64)
|
2021-07-15 07:40:41 -07:00
|
|
|
.expect("Proof of work failed. This is highly unlikely!")
|
2021-05-05 18:23:59 +02:00
|
|
|
}
|
|
|
|
|
|
2021-09-07 18:28:28 -07:00
|
|
|
fn fri_prover_query_rounds<F: RichField + Extendable<D>, const D: usize>(
|
2021-05-06 17:09:55 +02:00
|
|
|
initial_merkle_trees: &[&MerkleTree<F>],
|
2021-05-05 18:23:59 +02:00
|
|
|
trees: &[MerkleTree<F>],
|
|
|
|
|
challenger: &mut Challenger<F>,
|
|
|
|
|
n: usize,
|
Automatically select FRI reduction arities (#282)
* Automatically select FRI reduction arities
This way when a proof's degree changes, we won't need to manually update the `FriConfig`s of any recursive proofs on top of it.
For now I've added two methods of selecting arities. The first, `ConstantArityBits`, just applies a fixed reduciton arity until the degree has shrunk below a certain threshold. The second, `MinSize`, searches for the sequence of arities that minimizes proof size.
Note that this optimization is approximate -- e.g. it doesn't account for the effect of compression, and doesn't count some minor contributions to proof size, like the Merkle roots from the commit phase. It also assumes we're not using Merkle caps in serialized proofs, and that we're inferring one of the evaluations, even though we haven't made those changes yet.
I think we should generally use `ConstantArityBits` for proofs that we will recurse on, since using a single arity tends to be more recursion-friendly. We could use `MinSize` for generating final bridge proofs, since we won't do further recursion on top of those.
* Fix tests
* Feedback
2021-10-04 13:52:05 -07:00
|
|
|
common_data: &CommonCircuitData<F, D>,
|
2021-05-18 15:44:50 +02:00
|
|
|
) -> Vec<FriQueryRound<F, D>> {
|
Automatically select FRI reduction arities (#282)
* Automatically select FRI reduction arities
This way when a proof's degree changes, we won't need to manually update the `FriConfig`s of any recursive proofs on top of it.
For now I've added two methods of selecting arities. The first, `ConstantArityBits`, just applies a fixed reduciton arity until the degree has shrunk below a certain threshold. The second, `MinSize`, searches for the sequence of arities that minimizes proof size.
Note that this optimization is approximate -- e.g. it doesn't account for the effect of compression, and doesn't count some minor contributions to proof size, like the Merkle roots from the commit phase. It also assumes we're not using Merkle caps in serialized proofs, and that we're inferring one of the evaluations, even though we haven't made those changes yet.
I think we should generally use `ConstantArityBits` for proofs that we will recurse on, since using a single arity tends to be more recursion-friendly. We could use `MinSize` for generating final bridge proofs, since we won't do further recursion on top of those.
* Fix tests
* Feedback
2021-10-04 13:52:05 -07:00
|
|
|
(0..common_data.config.fri_config.num_query_rounds)
|
|
|
|
|
.map(|_| fri_prover_query_round(initial_merkle_trees, trees, challenger, n, common_data))
|
2021-05-05 18:23:59 +02:00
|
|
|
.collect()
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-07 18:28:28 -07:00
|
|
|
fn fri_prover_query_round<F: RichField + Extendable<D>, const D: usize>(
|
2021-05-06 17:09:55 +02:00
|
|
|
initial_merkle_trees: &[&MerkleTree<F>],
|
2021-05-05 18:23:59 +02:00
|
|
|
trees: &[MerkleTree<F>],
|
|
|
|
|
challenger: &mut Challenger<F>,
|
|
|
|
|
n: usize,
|
Automatically select FRI reduction arities (#282)
* Automatically select FRI reduction arities
This way when a proof's degree changes, we won't need to manually update the `FriConfig`s of any recursive proofs on top of it.
For now I've added two methods of selecting arities. The first, `ConstantArityBits`, just applies a fixed reduciton arity until the degree has shrunk below a certain threshold. The second, `MinSize`, searches for the sequence of arities that minimizes proof size.
Note that this optimization is approximate -- e.g. it doesn't account for the effect of compression, and doesn't count some minor contributions to proof size, like the Merkle roots from the commit phase. It also assumes we're not using Merkle caps in serialized proofs, and that we're inferring one of the evaluations, even though we haven't made those changes yet.
I think we should generally use `ConstantArityBits` for proofs that we will recurse on, since using a single arity tends to be more recursion-friendly. We could use `MinSize` for generating final bridge proofs, since we won't do further recursion on top of those.
* Fix tests
* Feedback
2021-10-04 13:52:05 -07:00
|
|
|
common_data: &CommonCircuitData<F, D>,
|
2021-05-18 15:44:50 +02:00
|
|
|
) -> FriQueryRound<F, D> {
|
2021-05-05 18:23:59 +02:00
|
|
|
let mut query_steps = Vec::new();
|
|
|
|
|
let x = challenger.get_challenge();
|
2021-09-25 19:41:48 -07:00
|
|
|
let mut x_index = x.to_canonical_u64() as usize % n;
|
2021-05-05 18:23:59 +02:00
|
|
|
let initial_proof = initial_merkle_trees
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|t| (t.get(x_index).to_vec(), t.prove(x_index)))
|
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
for (i, tree) in trees.iter().enumerate() {
|
Automatically select FRI reduction arities (#282)
* Automatically select FRI reduction arities
This way when a proof's degree changes, we won't need to manually update the `FriConfig`s of any recursive proofs on top of it.
For now I've added two methods of selecting arities. The first, `ConstantArityBits`, just applies a fixed reduciton arity until the degree has shrunk below a certain threshold. The second, `MinSize`, searches for the sequence of arities that minimizes proof size.
Note that this optimization is approximate -- e.g. it doesn't account for the effect of compression, and doesn't count some minor contributions to proof size, like the Merkle roots from the commit phase. It also assumes we're not using Merkle caps in serialized proofs, and that we're inferring one of the evaluations, even though we haven't made those changes yet.
I think we should generally use `ConstantArityBits` for proofs that we will recurse on, since using a single arity tends to be more recursion-friendly. We could use `MinSize` for generating final bridge proofs, since we won't do further recursion on top of those.
* Fix tests
* Feedback
2021-10-04 13:52:05 -07:00
|
|
|
let arity_bits = common_data.fri_params.reduction_arity_bits[i];
|
2021-08-10 15:53:27 +02:00
|
|
|
let evals = unflatten(tree.get(x_index >> arity_bits));
|
2021-05-05 18:23:59 +02:00
|
|
|
let merkle_proof = tree.prove(x_index >> arity_bits);
|
|
|
|
|
|
|
|
|
|
query_steps.push(FriQueryStep {
|
|
|
|
|
evals,
|
|
|
|
|
merkle_proof,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
x_index >>= arity_bits;
|
|
|
|
|
}
|
|
|
|
|
FriQueryRound {
|
|
|
|
|
initial_trees_proof: FriInitialTreeProof {
|
|
|
|
|
evals_proofs: initial_proof,
|
|
|
|
|
},
|
|
|
|
|
steps: query_steps,
|
|
|
|
|
}
|
|
|
|
|
}
|