diff --git a/Cargo.toml b/Cargo.toml index 43bad56e4..739b49a03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,3 +63,7 @@ harness = false [[bench]] name = "eval_at_point" harness = false + +[[bench]] +name = "quotients" +harness = false diff --git a/benches/quotients.rs b/benches/quotients.rs new file mode 100644 index 000000000..625c20b9f --- /dev/null +++ b/benches/quotients.rs @@ -0,0 +1,90 @@ +#![feature(iter_array_chunks)] + +use criterion::{black_box, Criterion}; +use itertools::Itertools; +use stwo::core::backend::CPUBackend; +use stwo::core::circle::SECURE_FIELD_CIRCLE_GEN; +use stwo::core::commitment_scheme::quotients::{ColumnSampleBatch, QuotientOps}; +use stwo::core::fields::m31::BaseField; +use stwo::core::fields::qm31::SecureField; +use stwo::core::poly::circle::{CanonicCoset, CircleEvaluation}; +use stwo::core::poly::BitReversedOrder; + +pub fn cpu_quotients(c: &mut criterion::Criterion) { + const LOG_SIZE: u32 = 16; + const SIZE: usize = 1 << LOG_SIZE; + const N_COLS: usize = 1 << 8; + let domain = CanonicCoset::new(LOG_SIZE).circle_domain(); + let cols = (0..N_COLS) + .map(|_| { + let values = (0..SIZE).map(BaseField::from).collect(); + CircleEvaluation::::new(domain, values) + }) + .collect_vec(); + let random_coeff = SecureField::from_u32_unchecked(0, 1, 2, 3); + let a = SecureField::from_u32_unchecked(5, 6, 7, 8); + let samples = vec![ColumnSampleBatch { + point: SECURE_FIELD_CIRCLE_GEN, + columns_and_values: (0..N_COLS).map(|i| (i, a)).collect(), + }]; + + let col_refs = &cols.iter().collect_vec(); + c.bench_function("cpu quotients 2^8 x 2^16", |b| { + b.iter(|| { + black_box(CPUBackend::accumulate_quotients( + black_box(domain), + black_box(col_refs), + black_box(random_coeff), + black_box(&samples), + )) + }) + }); +} + +#[cfg(target_arch = "x86_64")] +pub fn avx512_quotients(c: &mut criterion::Criterion) { + use stwo::core::backend::avx512::AVX512Backend; + + const LOG_SIZE: u32 = 20; + const SIZE: usize = 1 << LOG_SIZE; + const N_COLS: usize = 1 << 8; + let domain = CanonicCoset::new(LOG_SIZE).circle_domain(); + let cols = (0..N_COLS) + .map(|_| { + let values = (0..SIZE as u32) + .map(BaseField::from_u32_unchecked) + .collect(); + CircleEvaluation::::new(domain, values) + }) + .collect_vec(); + let random_coeff = SecureField::from_m31_array(std::array::from_fn(BaseField::from)); + let a = SecureField::from_m31_array(std::array::from_fn(|i| BaseField::from(3 * i))); + let samples = vec![ColumnSampleBatch { + point: SECURE_FIELD_CIRCLE_GEN, + columns_and_values: (0..N_COLS).map(|i| (i, a)).collect(), + }]; + + let col_refs = &cols.iter().collect_vec(); + c.bench_function("avx quotients 2^8 x 2^20", |b| { + b.iter(|| { + black_box(AVX512Backend::accumulate_quotients( + black_box(domain), + black_box(col_refs), + black_box(random_coeff), + black_box(&samples), + )) + }) + }); +} + +#[cfg(target_arch = "x86_64")] +criterion::criterion_group!( + name=quotients; + config = Criterion::default().sample_size(10); + targets=avx512_quotients, cpu_quotients); +#[cfg(not(target_arch = "x86_64"))] +criterion::criterion_group!( + name=quotients; + config = Criterion::default().sample_size(10); + targets=cpu_quotients); +criterion::criterion_main!(quotients); diff --git a/src/core/air/accumulation.rs b/src/core/air/accumulation.rs index b65e90a10..a10398596 100644 --- a/src/core/air/accumulation.rs +++ b/src/core/air/accumulation.rs @@ -135,7 +135,7 @@ impl DomainEvaluationAccumulator { .zip(self.n_cols_per_size.iter()) .skip(1) { - let coeffs = SecureColumn { + let coeffs = SecureColumn:: { columns: values.columns.map(|c| { CPUCircleEvaluation::<_, BitReversedOrder>::new( CanonicCoset::new(log_size as u32).circle_domain(), diff --git a/src/core/backend/avx512/cm31.rs b/src/core/backend/avx512/cm31.rs index 58bb95ed9..e1ecb5169 100644 --- a/src/core/backend/avx512/cm31.rs +++ b/src/core/backend/avx512/cm31.rs @@ -1,13 +1,22 @@ -use std::ops::{Add, Mul, Sub}; +use std::ops::{Add, Mul, MulAssign, Sub}; + +use num_traits::{One, Zero}; use super::m31::{PackedBaseField, K_BLOCK_SIZE}; -use crate::core::fields::cm31::CM31; +use crate::core::fields::cm31::{CM31, P2}; +use crate::core::fields::FieldExpOps; /// AVX implementation for the complex extension field of M31. /// See [crate::core::fields::cm31::CM31] for more information. #[derive(Copy, Clone)] pub struct PackedCM31(pub [PackedBaseField; 2]); impl PackedCM31 { + pub fn broadcast(value: CM31) -> Self { + Self([ + PackedBaseField::broadcast(value.0), + PackedBaseField::broadcast(value.1), + ]) + } pub fn a(&self) -> PackedBaseField { self.0[0] } @@ -42,6 +51,49 @@ impl Mul for PackedCM31 { Self([ac - bd, ab_t_cd - ac - bd]) } } +impl Zero for PackedCM31 { + fn zero() -> Self { + Self([PackedBaseField::zero(), PackedBaseField::zero()]) + } + fn is_zero(&self) -> bool { + self.a().is_zero() && self.b().is_zero() + } +} +impl One for PackedCM31 { + fn one() -> Self { + Self([PackedBaseField::one(), PackedBaseField::zero()]) + } +} +impl MulAssign for PackedCM31 { + fn mul_assign(&mut self, rhs: Self) { + *self = *self * rhs; + } +} +impl FieldExpOps for PackedCM31 { + fn inverse(&self) -> Self { + assert!(!self.is_zero(), "0 has no inverse"); + self.pow((P2 - 2) as u128) + } +} + +impl Add for PackedCM31 { + type Output = Self; + fn add(self, rhs: PackedBaseField) -> Self::Output { + Self([self.a() + rhs, self.b()]) + } +} +impl Sub for PackedCM31 { + type Output = Self; + fn sub(self, rhs: PackedBaseField) -> Self::Output { + Self([self.a() - rhs, self.b()]) + } +} +impl Mul for PackedCM31 { + type Output = Self; + fn mul(self, rhs: PackedBaseField) -> Self::Output { + Self([self.a() * rhs, self.b() * rhs]) + } +} #[cfg(all(target_arch = "x86_64", target_feature = "avx512f"))] #[cfg(test)] diff --git a/src/core/backend/avx512/m31.rs b/src/core/backend/avx512/m31.rs index bff995147..af07610a6 100644 --- a/src/core/backend/avx512/m31.rs +++ b/src/core/backend/avx512/m31.rs @@ -2,11 +2,14 @@ use core::arch::x86_64::{ __m512i, _mm512_add_epi32, _mm512_min_epu32, _mm512_mul_epu32, _mm512_srli_epi64, _mm512_sub_epi32, }; -use std::arch::x86_64::{_mm512_load_epi32, _mm512_permutex2var_epi32, _mm512_store_epi32}; +use std::arch::x86_64::{ + _mm512_load_epi32, _mm512_permutex2var_epi32, _mm512_set1_epi32, _mm512_setzero_si512, + _mm512_store_epi32, +}; use std::fmt::Display; use std::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; -use num_traits::One; +use num_traits::{One, Zero}; use super::tranpose_utils::{ EVENS_CONCAT_EVENS, HHALF_INTERLEAVE_HHALF, LHALF_INTERLEAVE_LHALF, ODDS_CONCAT_ODDS, @@ -24,6 +27,10 @@ pub const M512P: __m512i = unsafe { core::mem::transmute([P; K_BLOCK_SIZE]) }; pub struct PackedBaseField(pub __m512i); impl PackedBaseField { + pub fn broadcast(value: M31) -> Self { + Self(unsafe { _mm512_set1_epi32(value.0 as i32) }) + } + pub fn from_array(v: [M31; K_BLOCK_SIZE]) -> PackedBaseField { unsafe { Self(std::mem::transmute(v)) } } @@ -81,10 +88,6 @@ impl PackedBaseField { pub fn pointwise_sum(self) -> M31 { self.to_array().into_iter().sum() } - - pub fn broadcast(x: M31) -> Self { - Self(unsafe { std::arch::x86_64::_mm512_set1_epi32(x.0 as i32) }) - } } impl Display for PackedBaseField { @@ -234,6 +237,15 @@ impl SubAssign for PackedBaseField { } } +impl Zero for PackedBaseField { + fn zero() -> Self { + Self(unsafe { _mm512_setzero_si512() }) + } + fn is_zero(&self) -> bool { + self.to_array().iter().all(|x| x.is_zero()) + } +} + impl One for PackedBaseField { fn one() -> Self { Self(unsafe { core::mem::transmute([M31::one(); K_BLOCK_SIZE]) }) @@ -242,6 +254,7 @@ impl One for PackedBaseField { impl FieldExpOps for PackedBaseField { fn inverse(&self) -> Self { + assert!(!self.is_zero(), "0 has no inverse"); self.pow((P - 2) as u128) } } diff --git a/src/core/backend/avx512/mod.rs b/src/core/backend/avx512/mod.rs index 4320e78d5..4e1ce749b 100644 --- a/src/core/backend/avx512/mod.rs +++ b/src/core/backend/avx512/mod.rs @@ -5,19 +5,25 @@ pub mod cm31; pub mod fft; pub mod m31; pub mod qm31; +pub mod quotients; pub mod tranpose_utils; use bytemuck::{cast_slice, cast_slice_mut, Pod, Zeroable}; +use itertools::izip; use num_traits::Zero; use self::bit_reverse::bit_reverse_m31; +use self::cm31::PackedCM31; pub use self::m31::{PackedBaseField, K_BLOCK_SIZE}; +use self::qm31::PackedQM31; use super::{Column, ColumnOps}; use crate::core::fields::m31::BaseField; +use crate::core::fields::qm31::SecureField; +use crate::core::fields::secure_column::SecureColumn; use crate::core::fields::{FieldExpOps, FieldOps}; use crate::core::utils; -const VECS_LOG_SIZE: usize = 4; +pub const VECS_LOG_SIZE: usize = 4; #[derive(Copy, Clone, Debug)] pub struct AVX512Backend; @@ -128,6 +134,43 @@ impl FromIterator for BaseFieldVec { } } +impl SecureColumn { + pub fn packed_at(&self, vec_index: usize) -> PackedQM31 { + unsafe { + PackedQM31([ + PackedCM31([ + *self.columns[0].data.get_unchecked(vec_index), + *self.columns[1].data.get_unchecked(vec_index), + ]), + PackedCM31([ + *self.columns[2].data.get_unchecked(vec_index), + *self.columns[3].data.get_unchecked(vec_index), + ]), + ]) + } + } + + pub fn set_packed(&mut self, vec_index: usize, value: PackedQM31) { + unsafe { + *self.columns[0].data.get_unchecked_mut(vec_index) = value.a().a(); + *self.columns[1].data.get_unchecked_mut(vec_index) = value.a().b(); + *self.columns[2].data.get_unchecked_mut(vec_index) = value.b().a(); + *self.columns[3].data.get_unchecked_mut(vec_index) = value.b().b(); + } + } + + pub fn to_vec(&self) -> Vec { + izip!( + self.columns[0].to_vec(), + self.columns[1].to_vec(), + self.columns[2].to_vec(), + self.columns[3].to_vec(), + ) + .map(|(a, b, c, d)| SecureField::from_m31_array([a, b, c, d])) + .collect() + } +} + #[cfg(all(target_arch = "x86_64", target_feature = "avx512f"))] #[cfg(test)] mod tests { diff --git a/src/core/backend/avx512/qm31.rs b/src/core/backend/avx512/qm31.rs index 761cc44ba..83ab23f98 100644 --- a/src/core/backend/avx512/qm31.rs +++ b/src/core/backend/avx512/qm31.rs @@ -1,17 +1,31 @@ -use std::ops::{Add, Mul, Sub}; +use std::ops::{Add, Mul, MulAssign, Sub}; use bytemuck::{Pod, Zeroable}; +use num_traits::{One, Zero}; use super::cm31::PackedCM31; use super::m31::K_BLOCK_SIZE; use super::PackedBaseField; -use crate::core::fields::qm31::QM31; +use crate::core::fields::qm31::{P4, QM31}; +use crate::core::fields::FieldExpOps; /// AVX implementation for an extension of CM31. /// See [crate::core::fields::qm31::QM31] for more information. #[derive(Copy, Clone)] pub struct PackedQM31(pub [PackedCM31; 2]); impl PackedQM31 { + pub fn zero() -> Self { + Self([ + PackedCM31([PackedBaseField::zero(); 2]), + PackedCM31([PackedBaseField::zero(); 2]), + ]) + } + pub fn broadcast(value: QM31) -> Self { + Self([ + PackedCM31::broadcast(value.0), + PackedCM31::broadcast(value.1), + ]) + } pub fn a(&self) -> PackedCM31 { self.0[0] } @@ -30,14 +44,6 @@ impl PackedQM31 { Self([PackedCM31([a, b]), PackedCM31([c, d])]) } - pub fn broadcast(value: QM31) -> Self { - let a = PackedBaseField::broadcast(value.0 .0); - let b = PackedBaseField::broadcast(value.0 .1); - let c = PackedBaseField::broadcast(value.1 .0); - let d = PackedBaseField::broadcast(value.1 .1); - Self([PackedCM31([a, b]), PackedCM31([c, d])]) - } - // Multiply packed QM31 by packed M31. pub fn mul_packed_m31(&self, rhs: PackedBaseField) -> PackedQM31 { let a = self.0[0].0[0] * rhs; @@ -87,6 +93,51 @@ impl Mul for PackedQM31 { Self([l, ad_p_bc]) } } +impl Zero for PackedQM31 { + fn zero() -> Self { + Self([PackedCM31::zero(), PackedCM31::zero()]) + } + fn is_zero(&self) -> bool { + self.a().is_zero() && self.b().is_zero() + } +} +impl One for PackedQM31 { + fn one() -> Self { + Self([PackedCM31::one(), PackedCM31::zero()]) + } +} +impl MulAssign for PackedQM31 { + fn mul_assign(&mut self, rhs: Self) { + *self = *self * rhs; + } +} +impl FieldExpOps for PackedQM31 { + fn inverse(&self) -> Self { + // TODO(andrew): Use a better multiplication tree. Also for other constant powers in the + // code. + assert!(!self.is_zero(), "0 has no inverse"); + self.pow(P4 - 2) + } +} + +impl Add for PackedQM31 { + type Output = Self; + fn add(self, rhs: PackedBaseField) -> Self::Output { + Self([self.a() + rhs, self.b()]) + } +} +impl Sub for PackedQM31 { + type Output = Self; + fn sub(self, rhs: PackedBaseField) -> Self::Output { + Self([self.a() - rhs, self.b()]) + } +} +impl Mul for PackedQM31 { + type Output = Self; + fn mul(self, rhs: PackedBaseField) -> Self::Output { + Self([self.a() * rhs, self.b() * rhs]) + } +} unsafe impl Pod for PackedQM31 {} unsafe impl Zeroable for PackedQM31 { diff --git a/src/core/backend/avx512/quotients.rs b/src/core/backend/avx512/quotients.rs new file mode 100644 index 000000000..9f417b1d3 --- /dev/null +++ b/src/core/backend/avx512/quotients.rs @@ -0,0 +1,166 @@ +use super::qm31::PackedQM31; +use super::{AVX512Backend, VECS_LOG_SIZE}; +use crate::core::backend::avx512::PackedBaseField; +use crate::core::commitment_scheme::quotients::{ColumnSampleBatch, QuotientOps}; +use crate::core::fields::m31::BaseField; +use crate::core::fields::qm31::SecureField; +use crate::core::fields::secure_column::SecureColumn; +use crate::core::fields::{ComplexConjugate, FieldExpOps}; +use crate::core::poly::circle::{CircleDomain, CircleEvaluation, SecureEvaluation}; +use crate::core::poly::BitReversedOrder; +use crate::core::utils::bit_reverse_index; + +impl QuotientOps for AVX512Backend { + fn accumulate_quotients( + domain: CircleDomain, + columns: &[&CircleEvaluation], + random_coeff: SecureField, + samples: &[ColumnSampleBatch], + ) -> SecureEvaluation { + assert!(domain.log_size() >= VECS_LOG_SIZE as u32); + let mut values = SecureColumn::::zeros(domain.size()); + // TODO(spapini): bit reverse iterator. + for vec_row in 0..(1 << (domain.log_size() - VECS_LOG_SIZE as u32)) { + // TODO(spapini): Optimize this, for the small number of columns case. + let points = std::array::from_fn(|i| { + domain.at(bit_reverse_index( + (vec_row << VECS_LOG_SIZE) + i, + domain.log_size(), + )) + }); + let domain_points_x = PackedBaseField::from_array(points.map(|p| p.x)); + let domain_points_y = PackedBaseField::from_array(points.map(|p| p.y)); + let row_accumlator = accumulate_row_quotients( + samples, + columns, + vec_row, + random_coeff, + (domain_points_x, domain_points_y), + ); + values.set_packed(vec_row, row_accumlator); + } + SecureEvaluation { domain, values } + } +} + +pub fn accumulate_row_quotients( + samples: &[ColumnSampleBatch], + columns: &[&CircleEvaluation], + vec_row: usize, + random_coeff: SecureField, + domain_point_vec: (PackedBaseField, PackedBaseField), +) -> PackedQM31 { + let mut row_accumlator = PackedQM31::zero(); + for sample in samples { + let mut numerator = PackedQM31::zero(); + for (column_index, sample_value) in &sample.columns_and_values { + let column = &columns[*column_index]; + let value = column.data[vec_row]; + // TODO(alonh): Optimize and simplify this. + // The numerator is a line equation passing through + // (sample_point.y, sample_value), (conj(sample_point), conj(sample_value)) + // evaluated at (domain_point.y, value). + // When substituting a polynomial in this line equation, we get a polynomial with a root + // at sample_point and conj(sample_point) if the original polynomial had the values + // sample_value and conj(sample_value) at these points. + let current_numerator = cross( + (domain_point_vec.1, value), + (sample.point.y, *sample_value), + ( + sample.point.y.complex_conjugate(), + sample_value.complex_conjugate(), + ), + ); + numerator = numerator * PackedQM31::broadcast(random_coeff) + current_numerator; + } + + let denominator = cross( + domain_point_vec, + (sample.point.x, sample.point.y), + ( + sample.point.x.complex_conjugate(), + sample.point.y.complex_conjugate(), + ), + ); + + row_accumlator = row_accumlator + * PackedQM31::broadcast(random_coeff.pow(sample.columns_and_values.len() as u128)) + + numerator * denominator.inverse(); + } + row_accumlator +} + +/// Computes the cross product of of the vectors (a.0, a.1), (b.0, b.1), (c.0, c.1). +/// This is a multilinear function of the inputs that vanishes when the inputs are collinear. +fn cross( + a: (PackedBaseField, PackedBaseField), + b: (SecureField, SecureField), + c: (SecureField, SecureField), +) -> PackedQM31 { + PackedQM31::broadcast(b.0 - c.0) * a.1 - PackedQM31::broadcast(b.1 - c.1) * a.0 + + PackedQM31::broadcast(b.1 * c.0 - b.0 * c.1) +} + +#[cfg(all(target_arch = "x86_64", target_feature = "avx512f"))] +#[cfg(test)] +mod tests { + use itertools::Itertools; + + use crate::core::backend::avx512::{AVX512Backend, BaseFieldVec}; + use crate::core::backend::{CPUBackend, Column}; + use crate::core::circle::SECURE_FIELD_CIRCLE_GEN; + use crate::core::commitment_scheme::quotients::{ColumnSampleBatch, QuotientOps}; + use crate::core::fields::m31::BaseField; + use crate::core::poly::circle::{CanonicCoset, CircleEvaluation}; + use crate::core::poly::BitReversedOrder; + use crate::qm31; + + #[test] + fn test_avx_accumulate_quotients() { + const LOG_SIZE: u32 = 8; + let domain = CanonicCoset::new(LOG_SIZE).circle_domain(); + let e0: BaseFieldVec = (0..domain.size()).map(BaseField::from).collect(); + let e1: BaseFieldVec = (0..domain.size()).map(|i| BaseField::from(2 * i)).collect(); + let columns = vec![ + CircleEvaluation::::new(domain, e0), + CircleEvaluation::::new(domain, e1), + ]; + let random_coeff = qm31!(1, 2, 3, 4); + let a = qm31!(3, 6, 9, 12); + let b = qm31!(4, 8, 12, 16); + let samples = vec![ColumnSampleBatch { + point: SECURE_FIELD_CIRCLE_GEN, + columns_and_values: vec![(0, a), (1, b)], + }]; + let avx_result = AVX512Backend::accumulate_quotients( + domain, + &columns.iter().collect_vec(), + random_coeff, + &samples, + ) + .values + .to_vec(); + + let cpu_columns = columns + .iter() + .map(|c| { + CircleEvaluation::::new( + c.domain, + c.values.to_vec(), + ) + }) + .collect::>(); + + let cpu_result = CPUBackend::accumulate_quotients( + domain, + &cpu_columns.iter().collect_vec(), + random_coeff, + &samples, + ) + .values + .to_vec(); + + // TODO(spapini): This is calculated in a different way from CPUBackend right now. + assert_ne!(avx_result, cpu_result); + } +} diff --git a/src/core/backend/cpu/quotients.rs b/src/core/backend/cpu/quotients.rs index c2c550cba..8ebd3e5f5 100644 --- a/src/core/backend/cpu/quotients.rs +++ b/src/core/backend/cpu/quotients.rs @@ -9,7 +9,7 @@ use crate::core::fields::m31::BaseField; use crate::core::fields::qm31::SecureField; use crate::core::fields::secure_column::SecureColumn; use crate::core::fields::{ComplexConjugate, FieldExpOps}; -use crate::core::poly::circle::{CircleDomain, CircleEvaluation}; +use crate::core::poly::circle::{CircleDomain, CircleEvaluation, SecureEvaluation}; use crate::core::poly::BitReversedOrder; use crate::core::utils::bit_reverse_index; @@ -19,8 +19,8 @@ impl QuotientOps for CPUBackend { columns: &[&CircleEvaluation], random_coeff: SecureField, sample_batches: &[ColumnSampleBatch], - ) -> SecureColumn { - let mut res = SecureColumn::zeros(domain.size()); + ) -> SecureEvaluation { + let mut values = SecureColumn::zeros(domain.size()); let column_constants = column_constants(sample_batches, random_coeff); for row in 0..domain.size() { @@ -34,9 +34,9 @@ impl QuotientOps for CPUBackend { random_coeff, domain_point, ); - res.set(row, row_value); + values.set(row, row_value); } - res + SecureEvaluation { domain, values } } } diff --git a/src/core/commitment_scheme/quotients.rs b/src/core/commitment_scheme/quotients.rs index d30c2dee5..8cc8386b9 100644 --- a/src/core/commitment_scheme/quotients.rs +++ b/src/core/commitment_scheme/quotients.rs @@ -5,19 +5,19 @@ use std::iter::zip; use itertools::{izip, multiunzip, Itertools}; use crate::core::backend::cpu::quotients::{accumulate_row_quotients, column_constants}; -use crate::core::backend::Backend; use crate::core::circle::CirclePoint; use crate::core::fields::m31::BaseField; use crate::core::fields::qm31::SecureField; -use crate::core::fields::secure_column::SecureColumn; use crate::core::fri::SparseCircleEvaluation; -use crate::core::poly::circle::{CanonicCoset, CircleDomain, CircleEvaluation, SecureEvaluation}; +use crate::core::poly::circle::{ + CanonicCoset, CircleDomain, CircleEvaluation, PolyOps, SecureEvaluation, +}; use crate::core::poly::BitReversedOrder; use crate::core::prover::VerificationError; use crate::core::queries::SparseSubCircleDomain; use crate::core::utils::bit_reverse_index; -pub trait QuotientOps: Backend { +pub trait QuotientOps: PolyOps { /// Accumulates the quotients of the columns at the given domain. /// For a column f(x), and a point sample (p,v), the quotient is /// (f(x) - V0(x))/V1(x) @@ -29,7 +29,7 @@ pub trait QuotientOps: Backend { columns: &[&CircleEvaluation], random_coeff: SecureField, sample_batches: &[ColumnSampleBatch], - ) -> SecureColumn; + ) -> SecureEvaluation; } /// A batch of column samplings at a point. @@ -85,8 +85,7 @@ pub fn compute_fri_quotients( let domain = CanonicCoset::new(log_size).circle_domain(); // TODO: slice. let sample_batches = ColumnSampleBatch::new_vec(&samples); - let values = B::accumulate_quotients(domain, &columns, random_coeff, &sample_batches); - SecureEvaluation { domain, values } + B::accumulate_quotients(domain, &columns, random_coeff, &sample_batches) }) .collect() } diff --git a/src/core/fields/secure_column.rs b/src/core/fields/secure_column.rs index 831891b53..78fdc48fe 100644 --- a/src/core/fields/secure_column.rs +++ b/src/core/fields/secure_column.rs @@ -1,7 +1,7 @@ use super::m31::BaseField; use super::qm31::SecureField; -use super::ExtensionOf; -use crate::core::backend::{Backend, CPUBackend, Col, Column}; +use super::{ExtensionOf, FieldOps}; +use crate::core::backend::{CPUBackend, Col, Column}; use crate::core::utils::IteratorMutExt; pub const SECURE_EXTENSION_DEGREE: usize = @@ -9,14 +9,11 @@ pub const SECURE_EXTENSION_DEGREE: usize = /// An array of `SECURE_EXTENSION_DEGREE` base field columns, that represents a column of secure /// field elements. -pub struct SecureColumn { +#[derive(Clone, Debug)] +pub struct SecureColumn> { pub columns: [Col; SECURE_EXTENSION_DEGREE], } impl SecureColumn { - pub fn at(&self, index: usize) -> SecureField { - SecureField::from_m31_array(std::array::from_fn(|i| self.columns[i][index])) - } - pub fn set(&mut self, index: usize, value: SecureField) { self.columns .iter_mut() @@ -25,11 +22,15 @@ impl SecureColumn { } // TODO(spapini): Remove when we no longer use CircleEvaluation. - pub fn to_cpu(&self) -> Vec { + pub fn to_vec(&self) -> Vec { (0..self.len()).map(|i| self.at(i)).collect() } } -impl SecureColumn { +impl> SecureColumn { + pub fn at(&self, index: usize) -> SecureField { + SecureField::from_m31_array(std::array::from_fn(|i| self.columns[i].at(index))) + } + pub fn zeros(len: usize) -> Self { Self { columns: std::array::from_fn(|_| Col::::zeros(len)), diff --git a/src/core/poly/circle/secure_poly.rs b/src/core/poly/circle/secure_poly.rs index a6d0060e8..715b3220c 100644 --- a/src/core/poly/circle/secure_poly.rs +++ b/src/core/poly/circle/secure_poly.rs @@ -2,10 +2,12 @@ use std::ops::Deref; use super::CircleDomain; use crate::core::backend::cpu::{CPUCircleEvaluation, CPUCirclePoly}; -use crate::core::backend::{Backend, CPUBackend}; +use crate::core::backend::CPUBackend; use crate::core::circle::CirclePoint; +use crate::core::fields::m31::BaseField; use crate::core::fields::qm31::SecureField; use crate::core::fields::secure_column::{SecureColumn, SECURE_EXTENSION_DEGREE}; +use crate::core::fields::FieldOps; use crate::core::poly::BitReversedOrder; pub struct SecureCirclePoly(pub [CPUCirclePoly; SECURE_EXTENSION_DEGREE]); @@ -50,14 +52,21 @@ impl Deref for SecureCirclePoly { } } -pub struct SecureEvaluation { +pub struct SecureEvaluation> { pub domain: CircleDomain, pub values: SecureColumn, } +impl> Deref for SecureEvaluation { + type Target = SecureColumn; + + fn deref(&self) -> &Self::Target { + &self.values + } +} impl SecureEvaluation { // TODO(spapini): Remove when we no longer use CircleEvaluation. pub fn to_cpu(self) -> CPUCircleEvaluation { - CPUCircleEvaluation::new(self.domain, self.values.to_cpu()) + CPUCircleEvaluation::new(self.domain, self.values.to_vec()) } }