diff --git a/CHANGELOG.md b/CHANGELOG.md index cefd70175..a29b031f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,14 @@ # Changelog +## 0.8.3 (2024-03-15) +* Implemented `Serializable` and `Deserializable` on `String` (#258). +* Extended range of possible implementations of `ByteReader` and `ByteWriter`. (#262). + +## 0.8.2 (2024-02-27) - `utils/core` crate only +* Extend `write_many` to support `IntoIterator` (#251) + ## 0.8.1 (2024-02-21) -* Refactored util module re-exports to comply with latest clippy updates (#250). +* Refactored utils module re-exports to comply with latest clippy updates (#250). ## 0.8.0 (2024-02-06) * Added variable-length serialization and deserialization for `usize` type (#238). diff --git a/README.md b/README.md index ffc1c5d9d..8d2825c1f 100644 --- a/README.md +++ b/README.md @@ -326,7 +326,7 @@ pub fn prove_work() -> (BaseElement, StarkProof) { 0, // grinding factor FieldExtension::None, 8, // FRI folding factor - 128, // FRI max remainder length + 127, // FRI remainder max degree ); // Instantiate the prover and generate the proof. diff --git a/air/Cargo.toml b/air/Cargo.toml index f279e15ef..cd3b7adc2 100644 --- a/air/Cargo.toml +++ b/air/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "winter-air" -version = "0.8.1" +version = "0.8.3" description = "AIR components for the Winterfell STARK prover/verifier" authors = ["winterfell contributors"] readme = "README.md" license = "MIT" repository = "https://github.com/novifinancial/winterfell" -documentation = "https://docs.rs/winter-air/0.8.1" +documentation = "https://docs.rs/winter-air/0.8.3" categories = ["cryptography", "no-std"] keywords = ["crypto", "arithmetization", "air"] edition = "2021" diff --git a/air/src/air/assertions/mod.rs b/air/src/air/assertions/mod.rs index ad4b81230..74d5c68f7 100644 --- a/air/src/air/assertions/mod.rs +++ b/air/src/air/assertions/mod.rs @@ -4,12 +4,12 @@ // LICENSE file in the root directory of this source tree. use crate::errors::AssertionError; +use alloc::vec::Vec; use core::{ cmp::Ordering, fmt::{Display, Formatter}, }; use math::FieldElement; -use utils::collections::*; #[cfg(test)] mod tests; diff --git a/air/src/air/assertions/tests.rs b/air/src/air/assertions/tests.rs index 67f1eef8d..1bf8820c3 100644 --- a/air/src/air/assertions/tests.rs +++ b/air/src/air/assertions/tests.rs @@ -4,9 +4,9 @@ // LICENSE file in the root directory of this source tree. use super::{Assertion, AssertionError}; +use alloc::vec::Vec; use math::{fields::f128::BaseElement, FieldElement}; use rand_utils::{rand_value, rand_vector}; -use utils::collections::*; // SINGLE ASSERTIONS // ================================================================================================ diff --git a/air/src/air/boundary/constraint.rs b/air/src/air/boundary/constraint.rs index ec6529c41..b469a8152 100644 --- a/air/src/air/boundary/constraint.rs +++ b/air/src/air/boundary/constraint.rs @@ -6,8 +6,8 @@ use crate::LagrangeKernelEvaluationFrame; use super::{Assertion, ExtensionOf, FieldElement}; +use alloc::{collections::BTreeMap, vec::Vec}; use math::{fft, polynom}; -use utils::collections::*; // BOUNDARY CONSTRAINT // ================================================================================================ diff --git a/air/src/air/boundary/constraint_group.rs b/air/src/air/boundary/constraint_group.rs index a6a1aada6..e50ec1153 100644 --- a/air/src/air/boundary/constraint_group.rs +++ b/air/src/air/boundary/constraint_group.rs @@ -4,7 +4,7 @@ // LICENSE file in the root directory of this source tree. use super::{Assertion, BoundaryConstraint, ConstraintDivisor, ExtensionOf, FieldElement}; -use utils::collections::*; +use alloc::{collections::BTreeMap, vec::Vec}; // BOUNDARY CONSTRAINT GROUP // ================================================================================================ diff --git a/air/src/air/boundary/mod.rs b/air/src/air/boundary/mod.rs index 0d23cab3b..fff63c046 100644 --- a/air/src/air/boundary/mod.rs +++ b/air/src/air/boundary/mod.rs @@ -4,8 +4,11 @@ // LICENSE file in the root directory of this source tree. use super::{AirContext, Assertion, ConstraintDivisor}; +use alloc::{ + collections::{BTreeMap, BTreeSet}, + vec::Vec, +}; use math::{ExtensionOf, FieldElement}; -use utils::collections::*; mod constraint; pub use constraint::{BoundaryConstraint, LagrangeKernelBoundaryConstraint}; diff --git a/air/src/air/boundary/tests.rs b/air/src/air/boundary/tests.rs index f5450bd80..e5a0d8d55 100644 --- a/air/src/air/boundary/tests.rs +++ b/air/src/air/boundary/tests.rs @@ -7,10 +7,10 @@ use super::{ super::tests::{build_prng, build_sequence_poly}, Assertion, BoundaryConstraint, }; +use alloc::{collections::BTreeMap, vec::Vec}; use crypto::{hashers::Blake3_256, DefaultRandomCoin, RandomCoin}; use math::{fields::f64::BaseElement, polynom, FieldElement, StarkField}; use rand_utils::{rand_value, rand_vector, shuffle}; -use utils::collections::*; // BOUNDARY CONSTRAINT TESTS // ================================================================================================ diff --git a/air/src/air/coefficients.rs b/air/src/air/coefficients.rs index 4e88ddf45..b71939e9c 100644 --- a/air/src/air/coefficients.rs +++ b/air/src/air/coefficients.rs @@ -3,8 +3,8 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::vec::Vec; use math::FieldElement; -use utils::collections::*; // AUXILIARY TRACE SEGMENT RANDOMNESS // ================================================================================================ diff --git a/air/src/air/context.rs b/air/src/air/context.rs index 08b90a811..3915876e5 100644 --- a/air/src/air/context.rs +++ b/air/src/air/context.rs @@ -4,9 +4,9 @@ // LICENSE file in the root directory of this source tree. use crate::{air::TransitionConstraintDegree, ProofOptions, TraceInfo}; +use alloc::vec::Vec; use core::cmp; use math::StarkField; -use utils::collections::*; // AIR CONTEXT // ================================================================================================ diff --git a/air/src/air/divisor.rs b/air/src/air/divisor.rs index cd7979092..1916c06c3 100644 --- a/air/src/air/divisor.rs +++ b/air/src/air/divisor.rs @@ -4,9 +4,9 @@ // LICENSE file in the root directory of this source tree. use crate::air::Assertion; +use alloc::vec::Vec; use core::fmt::{Display, Formatter}; use math::{FieldElement, StarkField}; -use utils::collections::*; // CONSTRAINT DIVISOR // ================================================================================================ diff --git a/air/src/air/mod.rs b/air/src/air/mod.rs index 08891c411..28380ec9c 100644 --- a/air/src/air/mod.rs +++ b/air/src/air/mod.rs @@ -4,9 +4,9 @@ // LICENSE file in the root directory of this source tree. use crate::ProofOptions; +use alloc::{collections::BTreeMap, vec::Vec}; use crypto::{RandomCoin, RandomCoinError}; use math::{fft, ExtensibleField, ExtensionOf, FieldElement, StarkField, ToElements}; -use utils::collections::*; mod trace_info; pub use trace_info::TraceInfo; diff --git a/air/src/air/tests.rs b/air/src/air/tests.rs index 7bc148e76..5e7d875f0 100644 --- a/air/src/air/tests.rs +++ b/air/src/air/tests.rs @@ -8,9 +8,9 @@ use super::{ TransitionConstraintDegree, }; use crate::{AuxTraceRandElements, FieldExtension}; +use alloc::{collections::BTreeMap, vec::Vec}; use crypto::{hashers::Blake3_256, DefaultRandomCoin, RandomCoin}; use math::{fields::f64::BaseElement, get_power_series, polynom, FieldElement, StarkField}; -use utils::collections::*; // PERIODIC COLUMNS // ================================================================================================ diff --git a/air/src/air/trace_info.rs b/air/src/air/trace_info.rs index 6dde88af9..262b6da8a 100644 --- a/air/src/air/trace_info.rs +++ b/air/src/air/trace_info.rs @@ -3,11 +3,9 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::{string::ToString, vec::Vec}; use math::{StarkField, ToElements}; -use utils::{ - collections::*, string::*, ByteReader, ByteWriter, Deserializable, DeserializationError, - Serializable, -}; +use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; // CONSTANTS // ================================================================================================ diff --git a/air/src/air/transition/degree.rs b/air/src/air/transition/degree.rs index 52ea2ebb7..8302dd15b 100644 --- a/air/src/air/transition/degree.rs +++ b/air/src/air/transition/degree.rs @@ -4,8 +4,8 @@ // LICENSE file in the root directory of this source tree. use super::{super::super::ProofOptions, MIN_CYCLE_LENGTH}; +use alloc::vec::Vec; use core::cmp; -use utils::collections::*; // TRANSITION CONSTRAINT DEGREE // ================================================================================================ diff --git a/air/src/air/transition/frame.rs b/air/src/air/transition/frame.rs index 14da52b69..73b50a98d 100644 --- a/air/src/air/transition/frame.rs +++ b/air/src/air/transition/frame.rs @@ -4,8 +4,8 @@ // LICENSE file in the root directory of this source tree. use super::FieldElement; +use alloc::vec::Vec; use math::{polynom, StarkField}; -use utils::collections::*; // EVALUATION FRAME // ================================================================================================ diff --git a/air/src/air/transition/mod.rs b/air/src/air/transition/mod.rs index 6b6d76963..71f4605ee 100644 --- a/air/src/air/transition/mod.rs +++ b/air/src/air/transition/mod.rs @@ -4,7 +4,7 @@ // LICENSE file in the root directory of this source tree. use super::{AirContext, ConstraintDivisor, ExtensionOf, FieldElement}; -use utils::collections::*; +use alloc::vec::Vec; mod frame; pub use frame::{EvaluationFrame, LagrangeKernelEvaluationFrame}; diff --git a/air/src/lib.rs b/air/src/lib.rs index 867b8f879..23693fa7a 100644 --- a/air/src/lib.rs +++ b/air/src/lib.rs @@ -27,9 +27,8 @@ //! This crate also contains components describing STARK protocol parameters ([ProofOptions]) and //! proof structure ([StarkProof](proof::StarkProof)). -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] -#[cfg(not(feature = "std"))] #[macro_use] extern crate alloc; diff --git a/air/src/options.rs b/air/src/options.rs index 614d43944..45ba7016c 100644 --- a/air/src/options.rs +++ b/air/src/options.rs @@ -3,11 +3,10 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::vec::Vec; use fri::FriOptions; use math::{StarkField, ToElements}; -use utils::{ - collections::*, ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable, -}; +use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; // CONSTANTS // ================================================================================================ diff --git a/air/src/proof/commitments.rs b/air/src/proof/commitments.rs index 3876171ff..3d85157bc 100644 --- a/air/src/proof/commitments.rs +++ b/air/src/proof/commitments.rs @@ -3,10 +3,10 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::vec::Vec; use crypto::Hasher; use utils::{ - collections::*, ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable, - SliceReader, + ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable, SliceReader, }; // COMMITMENTS diff --git a/air/src/proof/context.rs b/air/src/proof/context.rs index 92b28ff65..cf4febf0c 100644 --- a/air/src/proof/context.rs +++ b/air/src/proof/context.rs @@ -4,11 +4,9 @@ // LICENSE file in the root directory of this source tree. use crate::{ProofOptions, TraceInfo}; +use alloc::{string::ToString, vec::Vec}; use math::{StarkField, ToElements}; -use utils::{ - collections::*, string::*, ByteReader, ByteWriter, Deserializable, DeserializationError, - Serializable, -}; +use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; // PROOF CONTEXT // ================================================================================================ diff --git a/air/src/proof/mod.rs b/air/src/proof/mod.rs index 94601440d..c5dd8d733 100644 --- a/air/src/proof/mod.rs +++ b/air/src/proof/mod.rs @@ -6,13 +6,12 @@ //! Contains STARK proof struct and associated components. use crate::{ProofOptions, TraceInfo}; +use alloc::vec::Vec; use core::cmp; use crypto::Hasher; use fri::FriProof; use math::FieldElement; -use utils::{ - collections::*, ByteReader, Deserializable, DeserializationError, Serializable, SliceReader, -}; +use utils::{ByteReader, Deserializable, DeserializationError, Serializable, SliceReader}; mod context; pub use context::Context; diff --git a/air/src/proof/ood_frame.rs b/air/src/proof/ood_frame.rs index a34151a0c..475139998 100644 --- a/air/src/proof/ood_frame.rs +++ b/air/src/proof/ood_frame.rs @@ -3,10 +3,10 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::vec::Vec; use math::FieldElement; use utils::{ - collections::*, ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable, - SliceReader, + ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable, SliceReader, }; use crate::LagrangeKernelEvaluationFrame; diff --git a/air/src/proof/queries.rs b/air/src/proof/queries.rs index d5bd5022a..a461485be 100644 --- a/air/src/proof/queries.rs +++ b/air/src/proof/queries.rs @@ -4,11 +4,11 @@ // LICENSE file in the root directory of this source tree. use super::Table; +use alloc::vec::Vec; use crypto::{BatchMerkleProof, ElementHasher, Hasher}; use math::FieldElement; use utils::{ - collections::*, ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable, - SliceReader, + ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable, SliceReader, }; // QUERIES diff --git a/air/src/proof/table.rs b/air/src/proof/table.rs index 6ed48e5f0..641198853 100644 --- a/air/src/proof/table.rs +++ b/air/src/proof/table.rs @@ -4,9 +4,9 @@ // LICENSE file in the root directory of this source tree. use super::{DeserializationError, SliceReader}; +use alloc::vec::Vec; use core::iter::FusedIterator; use math::FieldElement; -use utils::collections::*; use utils::ByteReader; // CONSTANTS diff --git a/crypto/Cargo.toml b/crypto/Cargo.toml index c63022374..257baf5c3 100644 --- a/crypto/Cargo.toml +++ b/crypto/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "winter-crypto" -version = "0.8.1" +version = "0.8.3" description = "Cryptographic library for the Winterfell STARK prover/verifier" authors = ["winterfell contributors"] readme = "README.md" license = "MIT" repository = "https://github.com/novifinancial/winterfell" -documentation = "https://docs.rs/winter-crypto/0.8.1" +documentation = "https://docs.rs/winter-crypto/0.8.3" categories = ["cryptography", "no-std"] keywords = ["crypto", "merkle-tree", "hash"] edition = "2021" diff --git a/crypto/README.md b/crypto/README.md index bdcf6d82f..70e2f4a5b 100644 --- a/crypto/README.md +++ b/crypto/README.md @@ -8,7 +8,6 @@ This crate contains modules with cryptographic operations needed in STARK proof * BLAKE3 with either 256-bit or 192-bit output. The smaller output version can be used to reduce STARK proof size, however, it also limits proof security level to at most 96 bits. * Rescue Prime over a 64-bit field with 256-bit output and over a 62-bit field with 248-bit output. Rescue is an arithmetization-friendly hash function and can be used in the STARK protocol when recursive proof composition is desired. However, using this function is not yet supported by the Winterfell STARK prover and verifier. * Rescue Prime over the same 64-bit field as above, with 256-bit output, but using the novel [Jive compression mode](https://eprint.iacr.org/2022/840.pdf) to obtain a smaller state and faster 2-to-1 compression. -* Griffin over the same 64-bit field as above, with 256-bit output, also using the novel [Jive compression mode](https://eprint.iacr.org/2022/840.pdf) to obtain a smaller state and faster 2-to-1 compression. ### Rescue hash function implementation Rescue hash function is implemented according to the Rescue Prime [specifications](https://eprint.iacr.org/2020/1143.pdf) with the following exception: @@ -45,36 +44,19 @@ The parameters used to instantiate the functions are: - S-Box degree: 3. - Target security level: 124-bits. -### Griffin hash function implementation -Griffin hash function is implemented according to the Griffin [specifications](https://eprint.iacr.org/2022/403.pdf) with the following differences: -* We set the number of rounds to 7, which implies a 15% security margin instead of the 20% margin used in the specifications (a 20% margin rounds up to 8 rounds), similarly to the motivation on Rescue above. -* When hashing a sequence of elements, we follow the [Hirose padding](https://www.researchgate.net/publication/325706626_Sequential_Hashing_with_Minimum_Padding) specification, similarly to RPJive64_256 instantiation. However, this means that our instantiation of Griffin cannot be used in a stream mode as the number of elements to be hashed must be known upfront. -* For instantiation `GriffinJive64_256`, we also make the following modifications: - - Instead of using the matrix suggested by the Griffin specification paper, we use a methodology developed by Polygon Zero to find an MDS matrix with coefficients which are small powers of two in frequency domain. This allows us to dramatically reduce MDS matrix multiplication time. We claim without proof that using a different MDS matrix does not affect security of the hash function. - - We use Jive as compression mode for 2-to-1 hashing. Similarly to `RpJive64_256` above, the output of the hash function is not the same when we hash 8 field elements as a sequence of elements using `hash_elements()` function and when we compress 8 field elements into 4 (e.g., for building a Merkle tree) using the 2-to-1 Jive compression mode. - -The parameters used to instantiate the function are: -* For `GriffinJive64_256`: - - Field: 64-bit prime field with modulus 264 - 232 + 1. - - State width: 8 field elements. - - Capacity size: 4 field elements. - - Digest size: 4 field elements (can be serialized into 32 bytes). - - Number of founds: 7. - - S-Box degree: 7. - - Target security level: 128-bits. ### Hash function performance One of the core operations performed during STARK proof generation is construction of Merkle trees. We care greatly about building these trees as quickly as possible, and thus, for the purposes of STARK protocol, 2-to-1 hash operation (e.g., computing a hash of two 32-byte values) is especially important. The table below contains rough benchmarks for computing a 2-to-1 hash for all currently implemented hash functions. -| CPU | BLAKE3_256 | SHA3_256 | RP64_256 | RPJ64_256 | RP62_248 | GriffinJ64_256 | -| --------------------------- | :--------: | :------: | :------: | :-------: | :------: | :------------: | -| Apple M1 Pro | 76 ns | 227 ns | 5.1 us | 3.8 us | 7.1 us | 2.2 us | -| AMD Ryzen 9 5950X @ 3.4 GHz | 62 ns | 310 ns | 5.2 us | 3.9 us | 6.9 us | 1.5 us | -| Core i9-9980KH @ 2.4 GHz | 66 ns | 400 ns | - | - | 6.6 us | - | -| Core i5-7300U @ 2.6 GHz | 81 ns | 540 ns | - | - | 9.5 us | - | -| Core i5-4300U @ 1.9 GHz | 106 ns | 675 ns | - | - | 13.9 us | - | +| CPU | BLAKE3_256 | SHA3_256 | RP64_256 | RPJ64_256 | RP62_248 | +| --------------------------- | :--------: | :------: | :------: | :-------: | :------: | +| Apple M1 Pro | 76 ns | 227 ns | 5.1 us | 3.8 us | 7.1 us | +| AMD Ryzen 9 5950X @ 3.4 GHz | 62 ns | 310 ns | 5.2 us | 3.9 us | 6.9 us | +| Core i9-9980KH @ 2.4 GHz | 66 ns | 400 ns | - | - | 6.6 us | +| Core i5-7300U @ 2.6 GHz | 81 ns | 540 ns | - | - | 9.5 us | +| Core i5-4300U @ 1.9 GHz | 106 ns | 675 ns | - | - | 13.9 us | -As can be seen from the table, BLAKE3 is by far the fastest hash function, while our implementations of algebraic hashes are between 30x (Griffin) and 70x (Rescue-Prime) slower than BLAKE3 and between 10x (Griffin) and 20x (Rescue-Prime) slower than SHA3. +As can be seen from the table, BLAKE3 is by far the fastest hash function, while our implementations of algebraic hashes are 70x slower than BLAKE3 and 20x slower than SHA3. ## Merkle [Merkle](src/merkle) module contains an implementation of a Merkle tree which supports batch proof generation and verification. Batch proofs are based on the Octopus algorithm described [here](https://eprint.iacr.org/2017/933). diff --git a/crypto/benches/hash.rs b/crypto/benches/hash.rs index 291ce5aa8..62569706b 100644 --- a/crypto/benches/hash.rs +++ b/crypto/benches/hash.rs @@ -7,7 +7,7 @@ use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion use math::fields::f128; use rand_utils::rand_value; use winter_crypto::{ - hashers::{Blake3_256, GriffinJive64_256, Rp62_248, Rp64_256, RpJive64_256, Sha3_256}, + hashers::{Blake3_256, Rp62_248, Rp64_256, RpJive64_256, Sha3_256}, Hasher, }; @@ -20,7 +20,6 @@ type Sha3Digest = ::Digest; type Rp62_248Digest = ::Digest; type Rp64_256Digest = ::Digest; type RpJive64_256Digest = ::Digest; -type GriffinJive64_256Digest = ::Digest; fn blake3(c: &mut Criterion) { let v: [Blake3Digest; 2] = [Blake3::hash(&[1u8]), Blake3::hash(&[2u8])]; @@ -118,26 +117,5 @@ fn rescue_jive256(c: &mut Criterion) { }); } -fn griffin_jive256(c: &mut Criterion) { - let v: [GriffinJive64_256Digest; 2] = - [GriffinJive64_256::hash(&[1u8]), GriffinJive64_256::hash(&[2u8])]; - c.bench_function("hash_griffin_jive64_256 (cached)", |bench| { - bench.iter(|| GriffinJive64_256::merge(black_box(&v))) - }); - - c.bench_function("hash_griffin_jive64_256 (random)", |b| { - b.iter_batched( - || { - [ - GriffinJive64_256::hash(&rand_value::().to_le_bytes()), - GriffinJive64_256::hash(&rand_value::().to_le_bytes()), - ] - }, - |state| GriffinJive64_256::merge(&state), - BatchSize::SmallInput, - ) - }); -} - -criterion_group!(hash_group, blake3, sha3, rescue248, rescue256, rescue_jive256, griffin_jive256,); +criterion_group!(hash_group, blake3, sha3, rescue248, rescue256, rescue_jive256); criterion_main!(hash_group); diff --git a/crypto/src/hash/griffin/griffin64_256_jive/digest.rs b/crypto/src/hash/griffin/griffin64_256_jive/digest.rs deleted file mode 100644 index 3a599a0ef..000000000 --- a/crypto/src/hash/griffin/griffin64_256_jive/digest.rs +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -// -// This source code is licensed under the MIT license found in the -// LICENSE file in the root directory of this source tree. - -use super::{Digest, DIGEST_SIZE}; -use core::slice; -use math::fields::f64::BaseElement; -use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; - -// DIGEST TRAIT IMPLEMENTATIONS -// ================================================================================================ - -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub struct ElementDigest([BaseElement; DIGEST_SIZE]); - -impl ElementDigest { - pub fn new(value: [BaseElement; DIGEST_SIZE]) -> Self { - Self(value) - } - - pub fn as_elements(&self) -> &[BaseElement] { - &self.0 - } - - pub fn digests_as_elements(digests: &[Self]) -> &[BaseElement] { - let p = digests.as_ptr(); - let len = digests.len() * DIGEST_SIZE; - unsafe { slice::from_raw_parts(p as *const BaseElement, len) } - } -} - -impl Digest for ElementDigest { - fn as_bytes(&self) -> [u8; 32] { - let mut result = [0; 32]; - - result[..8].copy_from_slice(&self.0[0].as_int().to_le_bytes()); - result[8..16].copy_from_slice(&self.0[1].as_int().to_le_bytes()); - result[16..24].copy_from_slice(&self.0[2].as_int().to_le_bytes()); - result[24..].copy_from_slice(&self.0[3].as_int().to_le_bytes()); - - result - } -} - -impl Default for ElementDigest { - fn default() -> Self { - ElementDigest([BaseElement::default(); DIGEST_SIZE]) - } -} - -impl Serializable for ElementDigest { - fn write_into(&self, target: &mut W) { - target.write_bytes(&self.as_bytes()); - } -} - -impl Deserializable for ElementDigest { - fn read_from(source: &mut R) -> Result { - // TODO: check if the field elements are valid? - let e1 = BaseElement::new(source.read_u64()?); - let e2 = BaseElement::new(source.read_u64()?); - let e3 = BaseElement::new(source.read_u64()?); - let e4 = BaseElement::new(source.read_u64()?); - - Ok(Self([e1, e2, e3, e4])) - } -} - -impl From<[BaseElement; DIGEST_SIZE]> for ElementDigest { - fn from(value: [BaseElement; DIGEST_SIZE]) -> Self { - Self(value) - } -} - -impl From for [BaseElement; DIGEST_SIZE] { - fn from(value: ElementDigest) -> Self { - value.0 - } -} - -impl From for [u8; 32] { - fn from(value: ElementDigest) -> Self { - value.as_bytes() - } -} - -// TESTS -// ================================================================================================ - -#[cfg(test)] -mod tests { - - use super::ElementDigest; - use rand_utils::rand_array; - use utils::{Deserializable, Serializable, SliceReader}; - - #[test] - fn digest_serialization() { - let d1 = ElementDigest(rand_array()); - - let mut bytes = vec![]; - d1.write_into(&mut bytes); - assert_eq!(32, bytes.len()); - - let mut reader = SliceReader::new(&bytes); - let d2 = ElementDigest::read_from(&mut reader).unwrap(); - - assert_eq!(d1, d2); - } -} diff --git a/crypto/src/hash/griffin/griffin64_256_jive/mod.rs b/crypto/src/hash/griffin/griffin64_256_jive/mod.rs deleted file mode 100644 index f8237c8f0..000000000 --- a/crypto/src/hash/griffin/griffin64_256_jive/mod.rs +++ /dev/null @@ -1,679 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -// -// This source code is licensed under the MIT license found in the -// LICENSE file in the root directory of this source tree. - -use super::super::mds::mds_f64_8x8::mds_multiply; -use super::{Digest, ElementHasher, Hasher}; -use core::ops::Range; -use math::{fields::f64::BaseElement, FieldElement, StarkField}; - -mod digest; -pub use digest::ElementDigest; - -#[cfg(test)] -mod tests; - -// CONSTANTS -// ================================================================================================ - -/// Sponge state is set to 8 field elements or 64 bytes; 4 elements are reserved for rate and -/// the remaining 4 elements are reserved for capacity. -const STATE_WIDTH: usize = 8; - -/// The rate portion of the state is located in elements 0 through 4. -const RATE_RANGE: Range = 0..4; -const RATE_WIDTH: usize = RATE_RANGE.end - RATE_RANGE.start; - -// The compression makes use of the Jive mode, hence ignoring the notion of sponge capacity. -const INPUT1_RANGE: Range = 0..4; -const INPUT2_RANGE: Range = 4..8; - -/// The capacity portion of the state is located in elements 4, 5, 6 and 7. -const CAPACITY_RANGE: Range = 4..8; - -/// The output of the hash function is a digest which consists of 4 field elements or 32 bytes. -/// -/// The digest is returned from state elements 0, 1, 2, 3 (the four elements of the rate). -const DIGEST_RANGE: Range = 0..4; -const DIGEST_SIZE: usize = DIGEST_RANGE.end - DIGEST_RANGE.start; - -/// The number of rounds is set to 7 to target 128-bit security level with 15% security margin -const NUM_ROUNDS: usize = 7; - -// HASHER IMPLEMENTATION -// ================================================================================================ - -/// Implementation of [Hasher] trait for Griffin hash function with 256-bit output. -/// -/// The hash function is implemented according to the Griffin -/// [specifications](https://eprint.iacr.org/2020/1143.pdf) with the following caveats: -/// * We set the number of rounds to 7, which implies a 15% security margin instead of the 20% -/// margin used in the specifications (a 20% margin rounds up to 8 rounds). The primary -/// motivation for this is that having the number of rounds be one less than a power of two -/// simplifies AIR design for computations involving the hash function. -/// * When hashing a sequence of elements, implement the Hirose padding rule. However, it also -/// means that our instantiation of Griffin cannot be used in a stream mode as the number -/// of elements to be hashed must be known upfront. -/// * Instead of using the suggested matrix as described in Griffin paper, we use a methodology -/// developed by Polygon Zero to find an MDS matrix with coefficients which are small powers -/// of two in frequency domain. This allows us to dramatically reduce matrix multiplication -/// time. We claim without proof that using a different (MDS) matrix does not affect security of -/// as the branching number is actually increased (9 instead of 6). -/// -/// The parameters used to instantiate the function are: -/// * Field: 64-bit prime field with modulus 2^64 - 2^32 + 1. -/// * State width: 8 field elements. -/// * Capacity size: 4 field elements. -/// * Number of founds: 7. -/// * S-Box degree: 7. -/// -/// The above parameters target 128-bit security level. The digest consists of four field elements -/// and it can be serialized into 32 bytes (256 bits). -/// -/// ## Hash output consistency -/// Functions [hash_elements()](GriffinJive64_256::hash_elements), [merge()](GriffinJive64_256::merge), and -/// [merge_with_int()](GriffinJive64_256::merge_with_int) are not consistent. This is because the former -/// is instantiated with a sponge construction, while the latter use the Jive compression mode and -/// hence do not rely on the sponge construction. -/// -/// In addition, [hash()](GriffinJive64_256::hash) function is not consistent with the functions mentioned -/// above. For example, if we take two field elements, serialize them to bytes and hash them using -/// [hash()](GriffinJive64_256::hash), the result will differ from the result obtained by hashing these -/// elements directly using [hash_elements()](GriffinJive64_256::hash_elements) function. The reason for -/// this difference is that [hash()](GriffinJive64_256::hash) function needs to be able to handle -/// arbitrary binary strings, which may or may not encode valid field elements - and thus, -/// deserialization procedure used by this function is different from the procedure used to -/// deserialize valid field elements. -/// -/// Thus, if the underlying data consists of valid field elements, it might make more sense -/// to deserialize them into field elements and then hash them using -/// [hash_elements()](GriffinJive64_256::hash_elements) function rather then hashing the serialized bytes -/// using [hash()](GriffinJive64_256::hash) function. -pub struct GriffinJive64_256(); - -impl Hasher for GriffinJive64_256 { - type Digest = ElementDigest; - - const COLLISION_RESISTANCE: u32 = 128; - - fn hash(bytes: &[u8]) -> Self::Digest { - // compute the number of elements required to represent the string; we will be processing - // the string in 7-byte chunks, thus the number of elements will be equal to the number - // of such chunks (including a potential partial chunk at the end). - let num_elements = if bytes.len() % 7 == 0 { - bytes.len() / 7 - } else { - bytes.len() / 7 + 1 - }; - - // initialize state to all zeros, except for the first element of the capacity part, which - // is set to 1 if the number of elements is not a multiple of RATE_WIDTH. - let mut state = [BaseElement::ZERO; STATE_WIDTH]; - if num_elements % RATE_WIDTH != 0 { - state[CAPACITY_RANGE.start] = BaseElement::ONE; - } - - // break the string into 7-byte chunks, convert each chunk into a field element, and - // absorb the element into the rate portion of the state. we use 7-byte chunks because - // every 7-byte chunk is guaranteed to map to some field element. - let mut i = 0; - let mut buf = [0_u8; 8]; - for (index, chunk) in bytes.chunks(7).enumerate() { - if index < num_elements - 1 { - buf[..7].copy_from_slice(chunk); - } else { - // if we are dealing with the last chunk, it may be smaller than 7 bytes long, so - // we need to handle it slightly differently. we also append a byte with value 1 - // to the end of the string; this pads the string in such a way that adding - // trailing zeros results in different hash - let chunk_len = chunk.len(); - buf = [0_u8; 8]; - buf[..chunk_len].copy_from_slice(chunk); - buf[chunk_len] = 1; - } - - // convert the bytes into a field element and absorb it into the rate portion of the - // state; if the rate is filled up, apply the Griffin permutation and start absorbing - // again from zero index. - state[RATE_RANGE.start + i] += BaseElement::new(u64::from_le_bytes(buf)); - i += 1; - if i % RATE_WIDTH == 0 { - Self::apply_permutation(&mut state); - i = 0; - } - } - - // if we absorbed some elements but didn't apply a permutation to them (would happen when - // the number of elements is not a multiple of RATE_WIDTH), apply a final permutation after - // padding by appending a 1 followed by as many 0 as necessary to make the input length a - // multiple of the RATE_WIDTH. - if i > 0 { - state[RATE_RANGE.start + i] = BaseElement::ONE; - i += 1; - while i != RATE_WIDTH { - state[RATE_RANGE.start + i] = BaseElement::ZERO; - i += 1; - } - Self::apply_permutation(&mut state); - } - - // return the first 4 elements of the state as hash result - ElementDigest::new(state[DIGEST_RANGE].try_into().unwrap()) - } - - // We do not rely on the sponge construction to build our compression function. Instead, we use - // the Jive compression mode designed in https://eprint.iacr.org/2022/840.pdf. - fn merge(values: &[Self::Digest; 2]) -> Self::Digest { - // initialize the state by copying the digest elements into the state - let initial_state: [BaseElement; STATE_WIDTH] = - Self::Digest::digests_as_elements(values).try_into().unwrap(); - let mut state = initial_state; - - // apply the Griffin permutation and apply the final Jive summation - Self::apply_permutation(&mut state); - - Self::apply_jive_summation(&initial_state, &state) - } - - // We do not rely on the sponge construction to build our compression function. Instead, we use - // the Jive compression mode designed in https://eprint.iacr.org/2022/840.pdf. - fn merge_with_int(seed: Self::Digest, value: u64) -> Self::Digest { - // initialize the state as follows: - // - seed is copied into the first 4 elements of the state. - // - if the value fits into a single field element, copy it into the fifth rate element - // and set the last state element to 5 (the number of elements to be hashed). - // - if the value doesn't fit into a single field element, split it into two field - // elements, copy them into state elements 5 and 6, and set the last state element - // to 6. - let mut state = [BaseElement::ZERO; STATE_WIDTH]; - state[INPUT1_RANGE].copy_from_slice(seed.as_elements()); - state[INPUT2_RANGE.start] = BaseElement::new(value); - if value < BaseElement::MODULUS { - state[INPUT2_RANGE.end - 1] = BaseElement::new(DIGEST_SIZE as u64 + 1); - } else { - state[INPUT2_RANGE.start + 1] = BaseElement::new(value / BaseElement::MODULUS); - state[INPUT2_RANGE.end - 1] = BaseElement::new(DIGEST_SIZE as u64 + 2); - } - - let initial_state = state; - // apply the Griffin permutation and apply the final Jive summation - Self::apply_permutation(&mut state); - - Self::apply_jive_summation(&initial_state, &state) - } -} - -impl ElementHasher for GriffinJive64_256 { - type BaseField = BaseElement; - - fn hash_elements>(elements: &[E]) -> Self::Digest { - // convert the elements into a list of base field elements - let elements = E::slice_as_base_elements(elements); - - // initialize state to all zeros, except for the first element of the capacity part, which - // is set to 1 if the number of elements is not a multiple of RATE_WIDTH. - let mut state = [BaseElement::ZERO; STATE_WIDTH]; - if elements.len() % RATE_WIDTH != 0 { - state[CAPACITY_RANGE.start] = BaseElement::ONE; - } - - // absorb elements into the state one by one until the rate portion of the state is filled - // up; then apply the Griffin permutation and start absorbing again; repeat until all - // elements have been absorbed - let mut i = 0; - for &element in elements.iter() { - state[RATE_RANGE.start + i] += element; - i += 1; - if i % RATE_WIDTH == 0 { - Self::apply_permutation(&mut state); - i = 0; - } - } - - // if we absorbed some elements but didn't apply a permutation to them (would happen when - // the number of elements is not a multiple of RATE_WIDTH), apply a final permutation after - // padding by appending a 1 followed by as many 0 as necessary to make the input length a - // multiple of the RATE_WIDTH. - if i > 0 { - state[RATE_RANGE.start + i] = BaseElement::ONE; - i += 1; - while i != RATE_WIDTH { - state[RATE_RANGE.start + i] = BaseElement::ZERO; - i += 1; - } - Self::apply_permutation(&mut state); - } - - // return the first 4 elements of the state as hash result - ElementDigest::new(state[DIGEST_RANGE].try_into().unwrap()) - } -} - -// HASH FUNCTION IMPLEMENTATION -// ================================================================================================ - -impl GriffinJive64_256 { - // CONSTANTS - // -------------------------------------------------------------------------------------------- - - /// The number of rounds is set to 7 to target 128-bit security level with 40% security margin. - pub const NUM_ROUNDS: usize = NUM_ROUNDS; - - /// Sponge state is set to 8 field elements or 64 bytes; 4 elements are reserved for rate and - /// the remaining 4 elements are reserved for capacity. - pub const STATE_WIDTH: usize = STATE_WIDTH; - - /// The rate portion of the state is located in elements 4 through 7 (inclusive). - pub const RATE_RANGE: Range = RATE_RANGE; - - /// The capacity portion of the state is located in elements 0, 1, 2, and 3. - pub const CAPACITY_RANGE: Range = CAPACITY_RANGE; - - /// The output of the hash function can be read from state elements 4, 5, 6, and 7. - pub const DIGEST_RANGE: Range = DIGEST_RANGE; - - /// MDS matrix used for computing the linear layer in a Griffin round. - pub const MDS: [[BaseElement; STATE_WIDTH]; STATE_WIDTH] = MDS; - - /// Round constants added to the hasher state in the first half of the Griffin round. - pub const ARK: [[BaseElement; STATE_WIDTH]; NUM_ROUNDS - 1] = ARK; - - /// Constants alpha_i for Griffin non-linear layer. - pub(crate) const ALPHA: [BaseElement; STATE_WIDTH - 2] = [ - BaseElement::new(6303398607380181568), - BaseElement::new(12606797214760363136), - BaseElement::new(463451752725960383), - BaseElement::new(6766850360106141951), - BaseElement::new(13070248967486323519), - BaseElement::new(926903505451920766), - ]; - - /// Constants beta_i for Griffin non-linear layer. - pub(crate) const BETA: [BaseElement; STATE_WIDTH - 2] = [ - BaseElement::new(5698628486727258041), - BaseElement::new(4347769877494447843), - BaseElement::new(14394168241716153727), - BaseElement::new(17391079509977791372), - BaseElement::new(13338503682279360778), - BaseElement::new(2236440758620861945), - ]; - - // GRIFFIN PERMUTATION - // -------------------------------------------------------------------------------------------- - - /// Applies Griffin permutation to the provided state. - pub fn apply_permutation(state: &mut [BaseElement; STATE_WIDTH]) { - for i in 0..NUM_ROUNDS - 1 { - Self::apply_round(state, i); - } - Self::apply_non_linear(state); - Self::apply_linear(state); - } - - /// Griffin round function. - #[inline(always)] - pub fn apply_round(state: &mut [BaseElement; STATE_WIDTH], round: usize) { - Self::apply_non_linear(state); - Self::apply_linear(state); - Self::add_constants(state, &ARK[round]); - } - - #[inline(always)] - pub fn apply_jive_summation( - initial_state: &[BaseElement; STATE_WIDTH], - final_state: &[BaseElement; STATE_WIDTH], - ) -> ElementDigest { - let mut result = [BaseElement::ZERO; DIGEST_SIZE]; - for (i, r) in result.iter_mut().enumerate() { - *r = initial_state[i] - + initial_state[DIGEST_SIZE + i] - + final_state[i] - + final_state[DIGEST_SIZE + i]; - } - - ElementDigest::new(result) - } - - // HELPER FUNCTIONS - // -------------------------------------------------------------------------------------------- - - #[inline(always)] - /// Applies the Griffin non-linear layer - /// to the current hash state. - fn apply_non_linear(state: &mut [BaseElement; STATE_WIDTH]) { - pow_inv_d(&mut state[0]); - pow_d(&mut state[1]); - - let l2 = Self::linear_function(2, state[0], state[1], BaseElement::ZERO); - state[2] *= l2.square() + Self::ALPHA[0] * l2 + Self::BETA[0]; - - let l3 = Self::linear_function(3, state[0], state[1], state[2]); - state[3] *= l3.square() + Self::ALPHA[1] * l3 + Self::BETA[1]; - - let l4 = Self::linear_function(4, state[0], state[1], state[3]); - state[4] *= l4.square() + Self::ALPHA[2] * l4 + Self::BETA[2]; - - let l5 = Self::linear_function(5, state[0], state[1], state[4]); - state[5] *= l5.square() + Self::ALPHA[3] * l5 + Self::BETA[3]; - - let l6 = Self::linear_function(6, state[0], state[1], state[5]); - state[6] *= l6.square() + Self::ALPHA[4] * l6 + Self::BETA[4]; - - let l7 = Self::linear_function(7, state[0], state[1], state[6]); - state[7] *= l7.square() + Self::ALPHA[5] * l7 + Self::BETA[5]; - } - - #[inline(always)] - fn apply_linear(state: &mut [BaseElement; STATE_WIDTH]) { - mds_multiply(state) - } - - #[inline(always)] - fn add_constants(state: &mut [BaseElement; STATE_WIDTH], ark: &[BaseElement; STATE_WIDTH]) { - state.iter_mut().zip(ark).for_each(|(s, &k)| *s += k); - } - - #[inline(always)] - fn linear_function( - round: u64, - z0: BaseElement, - z1: BaseElement, - z2: BaseElement, - ) -> BaseElement { - let (r0, r1, r2) = (z0.inner() as u128, z1.inner() as u128, z2.inner() as u128); - let r = (round - 1) as u128 * r0 + r1 + r2; - let s_hi = (r >> 64) as u64; - let s_lo = r as u64; - let z = (s_hi << 32) - s_hi; - let (res, over) = s_lo.overflowing_add(z); - - BaseElement::from_mont(res.wrapping_add(0u32.wrapping_sub(over as u32) as u64)) - } -} - -#[inline(always)] -fn pow_d(x: &mut BaseElement) { - *x = x.exp7(); -} - -#[inline(always)] -fn pow_inv_d(x: &mut BaseElement) { - // compute base^10540996611094048183 using 72 multiplications - // 10540996611094048183 = b1001001001001001001001001001000110110110110110110110110110110111 - - // compute base^10 - let t1 = x.square(); - - // compute base^100 - let t2 = t1.square(); - - // compute base^100100 - let t3 = square_assign_and_multiply::<3>(t2, t2); - - // compute base^100100100100 - let t4 = square_assign_and_multiply::<6>(t3, t3); - - // compute base^100100100100100100100100 - let t5 = square_assign_and_multiply::<12>(t4, t4); - - // compute base^100100100100100100100100100100 - let t6 = square_assign_and_multiply::<6>(t5, t3); - - // compute base^1001001001001001001001001001000100100100100100100100100100100 - let t7 = square_assign_and_multiply::<31>(t6, t6); - - // compute base^1001001001001001001001001001000110110110110110110110110110110111 - let a = (t7.square() * t6).square().square(); - let b = t1 * t2 * *x; - *x = a * b; -} - -#[inline(always)] -/// Squares an element M times, then multiplies it with tail. -fn square_assign_and_multiply(base: BaseElement, tail: BaseElement) -> BaseElement { - let mut result = base; - for _ in 0..M { - result = result.square(); - } - - result * tail -} - -// MDS -// ================================================================================================ -/// Griffin MDS matrix -const MDS: [[BaseElement; STATE_WIDTH]; STATE_WIDTH] = [ - [ - BaseElement::new(23), - BaseElement::new(8), - BaseElement::new(13), - BaseElement::new(10), - BaseElement::new(7), - BaseElement::new(6), - BaseElement::new(21), - BaseElement::new(8), - ], - [ - BaseElement::new(8), - BaseElement::new(23), - BaseElement::new(8), - BaseElement::new(13), - BaseElement::new(10), - BaseElement::new(7), - BaseElement::new(6), - BaseElement::new(21), - ], - [ - BaseElement::new(21), - BaseElement::new(8), - BaseElement::new(23), - BaseElement::new(8), - BaseElement::new(13), - BaseElement::new(10), - BaseElement::new(7), - BaseElement::new(6), - ], - [ - BaseElement::new(6), - BaseElement::new(21), - BaseElement::new(8), - BaseElement::new(23), - BaseElement::new(8), - BaseElement::new(13), - BaseElement::new(10), - BaseElement::new(7), - ], - [ - BaseElement::new(7), - BaseElement::new(6), - BaseElement::new(21), - BaseElement::new(8), - BaseElement::new(23), - BaseElement::new(8), - BaseElement::new(13), - BaseElement::new(10), - ], - [ - BaseElement::new(10), - BaseElement::new(7), - BaseElement::new(6), - BaseElement::new(21), - BaseElement::new(8), - BaseElement::new(23), - BaseElement::new(8), - BaseElement::new(13), - ], - [ - BaseElement::new(13), - BaseElement::new(10), - BaseElement::new(7), - BaseElement::new(6), - BaseElement::new(21), - BaseElement::new(8), - BaseElement::new(23), - BaseElement::new(8), - ], - [ - BaseElement::new(8), - BaseElement::new(13), - BaseElement::new(10), - BaseElement::new(7), - BaseElement::new(6), - BaseElement::new(21), - BaseElement::new(8), - BaseElement::new(23), - ], -]; - -/// Griffin Inverse MDS matrix -#[cfg(test)] -const INV_MDS: [[BaseElement; STATE_WIDTH]; STATE_WIDTH] = [ - [ - BaseElement::new(10671399028204489528), - BaseElement::new(15436289366139187412), - BaseElement::new(4624329233769728317), - BaseElement::new(18200084821960740316), - BaseElement::new(8736112961492104393), - BaseElement::new(1953609990965186349), - BaseElement::new(12477339747250042564), - BaseElement::new(1495657543820456485), - ], - [ - BaseElement::new(1495657543820456485), - BaseElement::new(10671399028204489528), - BaseElement::new(15436289366139187412), - BaseElement::new(4624329233769728317), - BaseElement::new(18200084821960740316), - BaseElement::new(8736112961492104393), - BaseElement::new(1953609990965186349), - BaseElement::new(12477339747250042564), - ], - [ - BaseElement::new(12477339747250042564), - BaseElement::new(1495657543820456485), - BaseElement::new(10671399028204489528), - BaseElement::new(15436289366139187412), - BaseElement::new(4624329233769728317), - BaseElement::new(18200084821960740316), - BaseElement::new(8736112961492104393), - BaseElement::new(1953609990965186349), - ], - [ - BaseElement::new(1953609990965186349), - BaseElement::new(12477339747250042564), - BaseElement::new(1495657543820456485), - BaseElement::new(10671399028204489528), - BaseElement::new(15436289366139187412), - BaseElement::new(4624329233769728317), - BaseElement::new(18200084821960740316), - BaseElement::new(8736112961492104393), - ], - [ - BaseElement::new(8736112961492104393), - BaseElement::new(1953609990965186349), - BaseElement::new(12477339747250042564), - BaseElement::new(1495657543820456485), - BaseElement::new(10671399028204489528), - BaseElement::new(15436289366139187412), - BaseElement::new(4624329233769728317), - BaseElement::new(18200084821960740316), - ], - [ - BaseElement::new(18200084821960740316), - BaseElement::new(8736112961492104393), - BaseElement::new(1953609990965186349), - BaseElement::new(12477339747250042564), - BaseElement::new(1495657543820456485), - BaseElement::new(10671399028204489528), - BaseElement::new(15436289366139187412), - BaseElement::new(4624329233769728317), - ], - [ - BaseElement::new(4624329233769728317), - BaseElement::new(18200084821960740316), - BaseElement::new(8736112961492104393), - BaseElement::new(1953609990965186349), - BaseElement::new(12477339747250042564), - BaseElement::new(1495657543820456485), - BaseElement::new(10671399028204489528), - BaseElement::new(15436289366139187412), - ], - [ - BaseElement::new(15436289366139187412), - BaseElement::new(4624329233769728317), - BaseElement::new(18200084821960740316), - BaseElement::new(8736112961492104393), - BaseElement::new(1953609990965186349), - BaseElement::new(12477339747250042564), - BaseElement::new(1495657543820456485), - BaseElement::new(10671399028204489528), - ], -]; - -// ROUND CONSTANTS -// ================================================================================================ - -/// Griffin round constants; -const ARK: [[BaseElement; STATE_WIDTH]; NUM_ROUNDS - 1] = [ - [ - BaseElement::new(9692712401870945221), - BaseElement::new(7618007584389424767), - BaseElement::new(5248032629877155397), - BaseElement::new(3331263627507477698), - BaseElement::new(860199187432911550), - BaseElement::new(10360526140302824670), - BaseElement::new(5014858186237911359), - BaseElement::new(4161019260461204222), - ], - [ - BaseElement::new(2649891723669882704), - BaseElement::new(15035697086627576083), - BaseElement::new(14140087988207356741), - BaseElement::new(357780579603925138), - BaseElement::new(273712483418536090), - BaseElement::new(348552596175072640), - BaseElement::new(11116926243792475367), - BaseElement::new(2475357435469270767), - ], - [ - BaseElement::new(9513699262061178678), - BaseElement::new(11735848814479196467), - BaseElement::new(12888397717055708631), - BaseElement::new(15194236579723079985), - BaseElement::new(14734897209064082180), - BaseElement::new(9352307275330595094), - BaseElement::new(2536293522055086772), - BaseElement::new(1551701365424645656), - ], - [ - BaseElement::new(17180574791560887028), - BaseElement::new(10973179380721509279), - BaseElement::new(15451549433162538377), - BaseElement::new(11230437049044589131), - BaseElement::new(14416448585168854586), - BaseElement::new(13520950449774622599), - BaseElement::new(14110026253178816443), - BaseElement::new(7562226163074683487), - ], - [ - BaseElement::new(15625584526294513461), - BaseElement::new(12868717640985007163), - BaseElement::new(5045176603305276542), - BaseElement::new(6821445918259551845), - BaseElement::new(15049718154108882541), - BaseElement::new(676731535772312475), - BaseElement::new(14779363889066167393), - BaseElement::new(17108914943169063073), - ], - [ - BaseElement::new(17529530613938644968), - BaseElement::new(13801329800663243071), - BaseElement::new(12666329335088484031), - BaseElement::new(10289051774796875319), - BaseElement::new(46795987162557096), - BaseElement::new(8590445841426612555), - BaseElement::new(7174111149249058757), - BaseElement::new(5820086182616968416), - ], -]; diff --git a/crypto/src/hash/griffin/griffin64_256_jive/tests.rs b/crypto/src/hash/griffin/griffin64_256_jive/tests.rs deleted file mode 100644 index 346f58fbe..000000000 --- a/crypto/src/hash/griffin/griffin64_256_jive/tests.rs +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -// -// This source code is licensed under the MIT license found in the -// LICENSE file in the root directory of this source tree. - -use super::{ - BaseElement, ElementDigest, ElementHasher, FieldElement, GriffinJive64_256, Hasher, StarkField, - INV_MDS, MDS, STATE_WIDTH, -}; -use proptest::prelude::*; - -use rand_utils::{rand_array, rand_value}; - -#[allow(clippy::needless_range_loop)] -#[test] -fn mds_inv_test() { - let mut mul_result = [[BaseElement::new(0); STATE_WIDTH]; STATE_WIDTH]; - for i in 0..STATE_WIDTH { - for j in 0..STATE_WIDTH { - let result = { - let mut result = BaseElement::new(0); - for k in 0..STATE_WIDTH { - result += MDS[i][k] * INV_MDS[k][j] - } - result - }; - mul_result[i][j] = result; - if i == j { - assert_eq!(result, BaseElement::new(1)); - } else { - assert_eq!(result, BaseElement::new(0)); - } - } - } -} - -#[test] -fn test_pow_d() { - let mut e: BaseElement = rand_value(); - let e_copy = e; - let e_exp = e.exp(7); - super::pow_d(&mut e); - assert_eq!(e, e_exp); - super::pow_inv_d(&mut e); - assert_eq!(e, e_copy); -} - -#[test] -fn apply_permutation() { - let mut state: [BaseElement; STATE_WIDTH] = [ - BaseElement::new(0), - BaseElement::new(1), - BaseElement::new(2), - BaseElement::new(3), - BaseElement::new(4), - BaseElement::new(5), - BaseElement::new(6), - BaseElement::new(7), - ]; - - GriffinJive64_256::apply_permutation(&mut state); - - // expected values are obtained by executing sage implementation code - // available at https://github.com/Nashtare/griffin-hash - let expected = vec![ - BaseElement::new(5100889723013202324), - BaseElement::new(6905683344086677437), - BaseElement::new(8236358786066512460), - BaseElement::new(1729367862961866374), - BaseElement::new(11501420603552582981), - BaseElement::new(15040992847148175954), - BaseElement::new(10400407304634768298), - BaseElement::new(1197713229800045418), - ]; - - assert_eq!(expected, state); -} - -#[test] -fn hash() { - let state: [BaseElement; STATE_WIDTH] = [ - BaseElement::new(0), - BaseElement::new(1), - BaseElement::new(2), - BaseElement::new(3), - BaseElement::new(4), - BaseElement::new(5), - BaseElement::new(6), - BaseElement::new(7), - ]; - - let result = GriffinJive64_256::hash_elements(&state); - - // expected values are obtained by executing sage implementation code - // available at https://github.com/Nashtare/griffin-hash - let expected = vec![ - BaseElement::new(16887612651479285699), - BaseElement::new(16469590207124000227), - BaseElement::new(11134472952466778260), - BaseElement::new(15455301814830509354), - ]; - - assert_eq!(expected, result.as_elements()); -} - -#[test] -fn hash_elements_vs_merge() { - let elements: [BaseElement; 8] = rand_array(); - - let digests: [ElementDigest; 2] = [ - ElementDigest::new(elements[..4].try_into().unwrap()), - ElementDigest::new(elements[4..].try_into().unwrap()), - ]; - - let m_result = GriffinJive64_256::merge(&digests); - let h_result = GriffinJive64_256::hash_elements(&elements); - - // Because we use the Jive compression mode, `merge` and - // `hash_elements` methods are incompatible. - assert_ne!(m_result, h_result); -} - -#[test] -fn hash_elements_vs_merge_with_int() { - let seed = ElementDigest::new(rand_array()); - - // ----- value fits into a field element ------------------------------------------------------ - let val: BaseElement = rand_value(); - let m_result = GriffinJive64_256::merge_with_int(seed, val.as_int()); - - let mut elements = seed.as_elements().to_vec(); - elements.push(val); - let h_result = GriffinJive64_256::hash_elements(&elements); - - // Because we use the Jive compression mode, `merge` and - // `hash_elements` methods are incompatible. - assert_ne!(m_result, h_result); - - // ----- value does not fit into a field element ---------------------------------------------- - let val = BaseElement::MODULUS + 2; - let m_result = GriffinJive64_256::merge_with_int(seed, val); - - let mut elements = seed.as_elements().to_vec(); - elements.push(BaseElement::new(val)); - elements.push(BaseElement::new(1)); - let h_result = GriffinJive64_256::hash_elements(&elements); - - // Because we use the Jive compression mode, `merge` and - // `hash_elements` methods are incompatible. - assert_ne!(m_result, h_result); -} - -#[test] -fn hash_padding() { - // adding a zero bytes at the end of a byte string should result in a different hash - let r1 = GriffinJive64_256::hash(&[1_u8, 2, 3]); - let r2 = GriffinJive64_256::hash(&[1_u8, 2, 3, 0]); - assert_ne!(r1, r2); - - // same as above but with bigger inputs - let r1 = GriffinJive64_256::hash(&[1_u8, 2, 3, 4, 5, 6]); - let r2 = GriffinJive64_256::hash(&[1_u8, 2, 3, 4, 5, 6, 0]); - assert_ne!(r1, r2); - - // same as above but with input splitting over two elements - let r1 = GriffinJive64_256::hash(&[1_u8, 2, 3, 4, 5, 6, 7]); - let r2 = GriffinJive64_256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0]); - assert_ne!(r1, r2); - - // same as above but with multiple zeros - let r1 = GriffinJive64_256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0, 0]); - let r2 = GriffinJive64_256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0]); - assert_ne!(r1, r2); -} - -#[test] -fn hash_elements_padding() { - let e1: [BaseElement; 2] = rand_array(); - let e2 = [e1[0], e1[1], BaseElement::ZERO]; - - let r1 = GriffinJive64_256::hash_elements(&e1); - let r2 = GriffinJive64_256::hash_elements(&e2); - assert_ne!(r1, r2); -} - -#[inline(always)] -fn apply_mds_naive(state: &mut [BaseElement; STATE_WIDTH]) { - let mut result = [BaseElement::ZERO; STATE_WIDTH]; - result.iter_mut().zip(MDS).for_each(|(r, mds_row)| { - state.iter().zip(mds_row).for_each(|(&s, m)| { - *r += m * s; - }); - }); - *state = result; -} - -proptest! { - #[test] - fn mds_freq_proptest(a in any::<[u64; STATE_WIDTH]>()) { - - let mut v1 = [BaseElement::ZERO; STATE_WIDTH]; - let mut v2; - - for i in 0..STATE_WIDTH { - v1[i] = BaseElement::new(a[i]); - } - v2 = v1; - - apply_mds_naive(&mut v1); - GriffinJive64_256::apply_linear(&mut v2); - - prop_assert_eq!(v1, v2); - } -} diff --git a/crypto/src/hash/griffin/mod.rs b/crypto/src/hash/griffin/mod.rs deleted file mode 100644 index 190526fea..000000000 --- a/crypto/src/hash/griffin/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -// -// This source code is licensed under the MIT license found in the -// LICENSE file in the root directory of this source tree. - -use super::{Digest, ElementHasher, Hasher}; - -mod griffin64_256_jive; -pub use griffin64_256_jive::GriffinJive64_256; diff --git a/crypto/src/hash/mod.rs b/crypto/src/hash/mod.rs index 99ff71105..5b8e1fe92 100644 --- a/crypto/src/hash/mod.rs +++ b/crypto/src/hash/mod.rs @@ -18,9 +18,6 @@ mod mds; mod rescue; pub use rescue::{Rp62_248, Rp64_256, RpJive64_256}; -mod griffin; -pub use griffin::GriffinJive64_256; - // HASHER TRAITS // ================================================================================================ diff --git a/crypto/src/hash/rescue/rp64_256_jive/mod.rs b/crypto/src/hash/rescue/rp64_256_jive/mod.rs index 66178baab..b28964684 100644 --- a/crypto/src/hash/rescue/rp64_256_jive/mod.rs +++ b/crypto/src/hash/rescue/rp64_256_jive/mod.rs @@ -152,7 +152,7 @@ impl Hasher for RpJive64_256 { } // convert the bytes into a field element and absorb it into the rate portion of the - // state; if the rate is filled up, apply the Griffin permutation and start absorbing + // state; if the rate is filled up, apply the Rescue-Prime permutation and start absorbing // again from zero index. state[RATE_RANGE.start + i] += BaseElement::new(u64::from_le_bytes(buf)); i += 1; @@ -237,7 +237,7 @@ impl ElementHasher for RpJive64_256 { } // absorb elements into the state one by one until the rate portion of the state is filled - // up; then apply the Griffin permutation and start absorbing again; repeat until all + // up; then apply the Rescue-Prime permutation and start absorbing again; repeat until all // elements have been absorbed let mut i = 0; for &element in elements.iter() { diff --git a/crypto/src/lib.rs b/crypto/src/lib.rs index 0f2d9de1b..ca33a7755 100644 --- a/crypto/src/lib.rs +++ b/crypto/src/lib.rs @@ -16,9 +16,8 @@ //! [RandomCoin] implementation uses a cryptographic hash function to generate pseudo-random //! elements form a seed. -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] -#[cfg(not(feature = "std"))] #[macro_use] extern crate alloc; @@ -29,7 +28,6 @@ pub mod hashers { pub use super::hash::Blake3_192; pub use super::hash::Blake3_256; - pub use super::hash::GriffinJive64_256; pub use super::hash::Rp62_248; pub use super::hash::Rp64_256; pub use super::hash::RpJive64_256; diff --git a/crypto/src/merkle/concurrent.rs b/crypto/src/merkle/concurrent.rs index ff81351fd..4dbcb0913 100644 --- a/crypto/src/merkle/concurrent.rs +++ b/crypto/src/merkle/concurrent.rs @@ -4,8 +4,9 @@ // LICENSE file in the root directory of this source tree. use crate::Hasher; +use alloc::vec::Vec; use core::slice; -use utils::{collections::*, iterators::*, rayon}; +use utils::{iterators::*, rayon}; // CONSTANTS // ================================================================================================ diff --git a/crypto/src/merkle/mod.rs b/crypto/src/merkle/mod.rs index da9073a01..5428fa6d8 100644 --- a/crypto/src/merkle/mod.rs +++ b/crypto/src/merkle/mod.rs @@ -4,8 +4,11 @@ // LICENSE file in the root directory of this source tree. use crate::{errors::MerkleTreeError, hash::Hasher}; +use alloc::{ + collections::{BTreeMap, BTreeSet}, + vec::Vec, +}; use core::slice; -use utils::collections::*; mod proofs; pub use proofs::BatchMerkleProof; diff --git a/crypto/src/merkle/proofs.rs b/crypto/src/merkle/proofs.rs index 0791152d7..c0d76464c 100644 --- a/crypto/src/merkle/proofs.rs +++ b/crypto/src/merkle/proofs.rs @@ -4,7 +4,8 @@ // LICENSE file in the root directory of this source tree. use crate::{errors::MerkleTreeError, Hasher}; -use utils::{collections::*, string::*, ByteReader, DeserializationError, Serializable}; +use alloc::{collections::BTreeMap, string::ToString, vec::Vec}; +use utils::{ByteReader, DeserializationError, Serializable}; // CONSTANTS // ================================================================================================ diff --git a/crypto/src/random/default.rs b/crypto/src/random/default.rs index a2434f812..e2f93b988 100644 --- a/crypto/src/random/default.rs +++ b/crypto/src/random/default.rs @@ -4,8 +4,8 @@ // LICENSE file in the root directory of this source tree. use crate::{errors::RandomCoinError, Digest, ElementHasher, RandomCoin}; +use alloc::vec::Vec; use math::{FieldElement, StarkField}; -use utils::collections::*; // DEFAULT RANDOM COIN IMPLEMENTATION // ================================================================================================ diff --git a/crypto/src/random/mod.rs b/crypto/src/random/mod.rs index 509c5f951..28ad0ad8d 100644 --- a/crypto/src/random/mod.rs +++ b/crypto/src/random/mod.rs @@ -4,8 +4,8 @@ // LICENSE file in the root directory of this source tree. use crate::{errors::RandomCoinError, ElementHasher, Hasher}; +use alloc::vec::Vec; use math::{FieldElement, StarkField}; -use utils::collections::*; mod default; pub use default::DefaultRandomCoin; diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 175e56a31..0c01edb03 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "examples" -version = "0.8.1" +version = "0.8.3" description = "Examples of using Winterfell STARK prover/verifier" authors = ["winterfell contributors"] readme = "README.md" diff --git a/examples/src/fibonacci/fib_small/mod.rs b/examples/src/fibonacci/fib_small/mod.rs index 5b149a42f..85bb89c42 100644 --- a/examples/src/fibonacci/fib_small/mod.rs +++ b/examples/src/fibonacci/fib_small/mod.rs @@ -33,7 +33,6 @@ type Blake3_256 = winterfell::crypto::hashers::Blake3_256; type Sha3_256 = winterfell::crypto::hashers::Sha3_256; type Rp64_256 = winterfell::crypto::hashers::Rp64_256; type RpJive64_256 = winterfell::crypto::hashers::RpJive64_256; -type GriffinJive64_256 = winterfell::crypto::hashers::GriffinJive64_256; // FIBONACCI EXAMPLE // ================================================================================================ @@ -60,9 +59,6 @@ pub fn get_example( HashFunction::RpJive64_256 => { Ok(Box::new(FibExample::::new(sequence_length, options))) } - HashFunction::GriffinJive64_256 => { - Ok(Box::new(FibExample::::new(sequence_length, options))) - } } } diff --git a/examples/src/lib.rs b/examples/src/lib.rs index 14f27a584..3801a260d 100644 --- a/examples/src/lib.rs +++ b/examples/src/lib.rs @@ -5,7 +5,7 @@ use structopt::StructOpt; use winterfell::{ - crypto::hashers::{GriffinJive64_256, Rp64_256, RpJive64_256}, + crypto::hashers::{Rp64_256, RpJive64_256}, math::fields::f128::BaseElement, FieldExtension, ProofOptions, StarkProof, VerifierError, }; @@ -88,7 +88,6 @@ impl ExampleOptions { "sha3_256" => HashFunction::Sha3_256, "rp64_256" => HashFunction::Rp64_256, "rp_jive64_256" => HashFunction::RpJive64_256, - "griffin_jive64_256" => HashFunction::GriffinJive64_256, val => panic!("'{val}' is not a valid hash function option"), }; @@ -113,7 +112,6 @@ impl ExampleOptions { "sha3_256" => proof.security_level::(conjectured), "rp64_256" => proof.security_level::(conjectured), "rp_jive64_256" => proof.security_level::(conjectured), - "griffin_jive64_256" => proof.security_level::(conjectured), val => panic!("'{val}' is not a valid hash function option"), }; @@ -236,10 +234,4 @@ pub enum HashFunction { /// /// When this function is used in the STARK protocol, proof security cannot exceed 128 bits. RpJive64_256, - - /// Griffin hash function with 256 bit output. It only works in `f64` field. - /// This instance uses the Jive compression mode in Merkle trees. - /// - /// When this function is used in the STARK protocol, proof security cannot exceed 128 bits. - GriffinJive64_256, } diff --git a/examples/src/rescue_raps/custom_trace_table.rs b/examples/src/rescue_raps/custom_trace_table.rs index b96bfe4d8..1bf8fcf14 100644 --- a/examples/src/rescue_raps/custom_trace_table.rs +++ b/examples/src/rescue_raps/custom_trace_table.rs @@ -3,7 +3,7 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. -use core_utils::{collections::*, uninit_vector}; +use core_utils::uninit_vector; use winterfell::{ math::{FieldElement, StarkField}, matrix::ColMatrix, diff --git a/fri/Cargo.toml b/fri/Cargo.toml index 8dd309fe3..0f0cfa392 100644 --- a/fri/Cargo.toml +++ b/fri/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "winter-fri" -version = "0.8.1" +version = "0.8.3" description = "Implementation of FRI protocol for the Winterfell STARK prover/verifier" authors = ["winterfell contributors"] readme = "README.md" license = "MIT" repository = "https://github.com/novifinancial/winterfell" -documentation = "https://docs.rs/winter-fri/0.8.1" +documentation = "https://docs.rs/winter-fri/0.8.3" categories = ["cryptography", "no-std"] keywords = ["crypto", "polynomial", "commitments"] edition = "2021" diff --git a/fri/src/folding/mod.rs b/fri/src/folding/mod.rs index 6d1d5127f..f1bd15d44 100644 --- a/fri/src/folding/mod.rs +++ b/fri/src/folding/mod.rs @@ -11,11 +11,12 @@ #[cfg(feature = "concurrent")] use utils::iterators::*; +use alloc::vec::Vec; use math::{ fft::{get_inv_twiddles, serial_fft}, get_power_series_with_offset, polynom, FieldElement, StarkField, }; -use utils::{collections::*, iter_mut, uninit_vector}; +use utils::{iter_mut, uninit_vector}; // DEGREE-RESPECTING PROJECTION // ================================================================================================ diff --git a/fri/src/lib.rs b/fri/src/lib.rs index 45e027c5f..6f680c428 100644 --- a/fri/src/lib.rs +++ b/fri/src/lib.rs @@ -61,9 +61,8 @@ //! * [DEEP-FRI: Sampling Outside the Box Improves Soundness](https://eprint.iacr.org/2019/336) //! * Swastik Kooparty's [talk on DEEP-FRI](https://www.youtube.com/watch?v=txo_kPSn59Y&list=PLcIyXLwiPilWvjvNkhMn283LV370Pk5CT&index=6) -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] -#[cfg(not(feature = "std"))] #[macro_use] extern crate alloc; diff --git a/fri/src/proof.rs b/fri/src/proof.rs index 456fa1904..8f7511437 100644 --- a/fri/src/proof.rs +++ b/fri/src/proof.rs @@ -3,11 +3,11 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::{string::ToString, vec::Vec}; use crypto::{BatchMerkleProof, ElementHasher, Hasher}; use math::FieldElement; use utils::{ - collections::*, string::*, ByteReader, ByteWriter, Deserializable, DeserializationError, - Serializable, SliceReader, + ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable, SliceReader, }; // FRI PROOF diff --git a/fri/src/prover/channel.rs b/fri/src/prover/channel.rs index 69ade99b1..6db858a2a 100644 --- a/fri/src/prover/channel.rs +++ b/fri/src/prover/channel.rs @@ -3,10 +3,10 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::vec::Vec; use core::marker::PhantomData; use crypto::{ElementHasher, Hasher, RandomCoin}; use math::FieldElement; -use utils::collections::*; // PROVER CHANNEL TRAIT // ================================================================================================ diff --git a/fri/src/prover/mod.rs b/fri/src/prover/mod.rs index bb48f2a7f..f75bb8355 100644 --- a/fri/src/prover/mod.rs +++ b/fri/src/prover/mod.rs @@ -9,10 +9,11 @@ use crate::{ utils::hash_values, FriOptions, }; +use alloc::vec::Vec; use core::marker::PhantomData; use crypto::{ElementHasher, Hasher, MerkleTree}; use math::{fft, FieldElement, StarkField}; -use utils::{collections::*, flatten_vector_elements, group_slice_elements, transpose_slice}; +use utils::{flatten_vector_elements, group_slice_elements, transpose_slice}; mod channel; pub use channel::{DefaultProverChannel, ProverChannel}; diff --git a/fri/src/prover/tests.rs b/fri/src/prover/tests.rs index 2bdb5528f..77cd83cbe 100644 --- a/fri/src/prover/tests.rs +++ b/fri/src/prover/tests.rs @@ -8,9 +8,10 @@ use crate::{ verifier::{DefaultVerifierChannel, FriVerifier}, FriOptions, FriProof, VerifierError, }; +use alloc::vec::Vec; use crypto::{hashers::Blake3_256, DefaultRandomCoin, Hasher, RandomCoin}; use math::{fft, fields::f128::BaseElement, FieldElement}; -use utils::{collections::*, Deserializable, Serializable, SliceReader}; +use utils::{Deserializable, Serializable, SliceReader}; type Blake3 = Blake3_256; diff --git a/fri/src/utils.rs b/fri/src/utils.rs index b2af65907..5a8d1b103 100644 --- a/fri/src/utils.rs +++ b/fri/src/utils.rs @@ -3,9 +3,10 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::vec::Vec; use crypto::ElementHasher; use math::FieldElement; -use utils::{collections::*, iter_mut, uninit_vector}; +use utils::{iter_mut, uninit_vector}; #[cfg(feature = "concurrent")] use utils::iterators::*; diff --git a/fri/src/verifier/channel.rs b/fri/src/verifier/channel.rs index 5a27176a1..569410fa5 100644 --- a/fri/src/verifier/channel.rs +++ b/fri/src/verifier/channel.rs @@ -4,9 +4,10 @@ // LICENSE file in the root directory of this source tree. use crate::{FriProof, VerifierError}; +use alloc::vec::Vec; use crypto::{BatchMerkleProof, ElementHasher, Hasher, MerkleTree}; use math::FieldElement; -use utils::{collections::*, group_vector_elements, DeserializationError}; +use utils::{group_vector_elements, DeserializationError}; // VERIFIER CHANNEL TRAIT // ================================================================================================ diff --git a/fri/src/verifier/mod.rs b/fri/src/verifier/mod.rs index 8aed9fbc5..557cda9f1 100644 --- a/fri/src/verifier/mod.rs +++ b/fri/src/verifier/mod.rs @@ -6,10 +6,10 @@ //! Contains an implementation of FRI verifier and associated components. use crate::{folding::fold_positions, utils::map_positions_to_indexes, FriOptions, VerifierError}; +use alloc::vec::Vec; use core::{marker::PhantomData, mem}; use crypto::{ElementHasher, RandomCoin}; use math::{polynom, FieldElement, StarkField}; -use utils::collections::*; mod channel; pub use channel::{DefaultVerifierChannel, VerifierChannel}; diff --git a/math/Cargo.toml b/math/Cargo.toml index 1af039185..785b3b190 100644 --- a/math/Cargo.toml +++ b/math/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "winter-math" -version = "0.8.1" +version = "0.8.3" description = "Math library for the Winterfell STARK prover/verifier" authors = ["winterfell contributors"] readme = "README.md" license = "MIT" repository = "https://github.com/novifinancial/winterfell" -documentation = "https://docs.rs/winter-math/0.8.1" +documentation = "https://docs.rs/winter-math/0.8.3" categories = ["cryptography", "no-std"] keywords = ["crypto", "finite-fields", "polynomials", "fft"] edition = "2021" diff --git a/math/src/fft/concurrent.rs b/math/src/fft/concurrent.rs index 01b989118..120e9bd9e 100644 --- a/math/src/fft/concurrent.rs +++ b/math/src/fft/concurrent.rs @@ -5,7 +5,8 @@ use super::fft_inputs::FftInputs; use crate::field::{FieldElement, StarkField}; -use utils::{collections::*, iterators::*, rayon, uninit_vector}; +use alloc::vec::Vec; +use utils::{iterators::*, rayon, uninit_vector}; // POLYNOMIAL EVALUATION // ================================================================================================ diff --git a/math/src/fft/mod.rs b/math/src/fft/mod.rs index 3676ff6a2..e06cf910e 100644 --- a/math/src/fft/mod.rs +++ b/math/src/fft/mod.rs @@ -16,6 +16,7 @@ use crate::{ field::{FieldElement, StarkField}, utils::get_power_series, }; +use alloc::vec::Vec; pub mod fft_inputs; pub mod real_u64; @@ -24,8 +25,6 @@ mod serial; #[cfg(feature = "concurrent")] mod concurrent; -use utils::collections::*; - #[cfg(test)] mod tests; diff --git a/math/src/fft/serial.rs b/math/src/fft/serial.rs index f06b18412..a29c0fd84 100644 --- a/math/src/fft/serial.rs +++ b/math/src/fft/serial.rs @@ -5,7 +5,8 @@ use super::fft_inputs::FftInputs; use crate::{field::StarkField, FieldElement}; -use utils::{collections::*, uninit_vector}; +use alloc::vec::Vec; +use utils::uninit_vector; // POLYNOMIAL EVALUATION // ================================================================================================ diff --git a/math/src/fft/tests.rs b/math/src/fft/tests.rs index aef26e24e..d363d01ab 100644 --- a/math/src/fft/tests.rs +++ b/math/src/fft/tests.rs @@ -9,8 +9,8 @@ use crate::{ polynom, utils::get_power_series, }; +use alloc::vec::Vec; use rand_utils::rand_vector; -use utils::collections::*; // CORE ALGORITHMS // ================================================================================================ diff --git a/math/src/field/extensions/cubic.rs b/math/src/field/extensions/cubic.rs index 9e2033feb..a0d789b3a 100644 --- a/math/src/field/extensions/cubic.rs +++ b/math/src/field/extensions/cubic.rs @@ -4,14 +4,18 @@ // LICENSE file in the root directory of this source tree. use super::{ExtensibleField, ExtensionOf, FieldElement}; +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; use core::{ fmt, ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, slice, }; use utils::{ - collections::*, string::*, AsBytes, ByteReader, ByteWriter, Deserializable, - DeserializationError, Randomizable, Serializable, SliceReader, + AsBytes, ByteReader, ByteWriter, Deserializable, DeserializationError, Randomizable, + Serializable, SliceReader, }; #[cfg(feature = "serde")] diff --git a/math/src/field/extensions/quadratic.rs b/math/src/field/extensions/quadratic.rs index 9431a47e0..32194816a 100644 --- a/math/src/field/extensions/quadratic.rs +++ b/math/src/field/extensions/quadratic.rs @@ -4,14 +4,18 @@ // LICENSE file in the root directory of this source tree. use super::{ExtensibleField, ExtensionOf, FieldElement}; +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; use core::{ fmt, ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, slice, }; use utils::{ - collections::*, string::*, AsBytes, ByteReader, ByteWriter, Deserializable, - DeserializationError, Randomizable, Serializable, SliceReader, + AsBytes, ByteReader, ByteWriter, Deserializable, DeserializationError, Randomizable, + Serializable, SliceReader, }; #[cfg(feature = "serde")] diff --git a/math/src/field/f128/mod.rs b/math/src/field/f128/mod.rs index cdd288d92..cab5a9465 100644 --- a/math/src/field/f128/mod.rs +++ b/math/src/field/f128/mod.rs @@ -11,6 +11,10 @@ //! sub-optimal as well. use super::{ExtensibleField, FieldElement, StarkField}; +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; use core::{ fmt::{Debug, Display, Formatter}, mem, @@ -18,8 +22,8 @@ use core::{ slice, }; use utils::{ - collections::*, string::*, AsBytes, ByteReader, ByteWriter, Deserializable, - DeserializationError, Randomizable, Serializable, + AsBytes, ByteReader, ByteWriter, Deserializable, DeserializationError, Randomizable, + Serializable, }; #[cfg(feature = "serde")] diff --git a/math/src/field/f128/tests.rs b/math/src/field/f128/tests.rs index cabf5cce1..6efdafe94 100644 --- a/math/src/field/f128/tests.rs +++ b/math/src/field/f128/tests.rs @@ -5,9 +5,9 @@ use super::{AsBytes, BaseElement, ByteReader, DeserializationError, FieldElement, StarkField, M}; use crate::field::{ExtensionOf, QuadExtension}; +use alloc::vec::Vec; use num_bigint::BigUint; use rand_utils::{rand_value, rand_vector}; -use utils::collections::*; use utils::SliceReader; // BASIC ALGEBRA diff --git a/math/src/field/f62/mod.rs b/math/src/field/f62/mod.rs index d6967ab63..60fa1673f 100644 --- a/math/src/field/f62/mod.rs +++ b/math/src/field/f62/mod.rs @@ -10,6 +10,10 @@ //! stored in the Montgomery form using `u64` as the backing type. use super::{ExtensibleField, FieldElement, StarkField}; +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; use core::{ fmt::{Debug, Display, Formatter}, mem, @@ -17,8 +21,8 @@ use core::{ slice, }; use utils::{ - collections::*, string::*, AsBytes, ByteReader, ByteWriter, Deserializable, - DeserializationError, Randomizable, Serializable, + AsBytes, ByteReader, ByteWriter, Deserializable, DeserializationError, Randomizable, + Serializable, }; #[cfg(feature = "serde")] diff --git a/math/src/field/f64/mod.rs b/math/src/field/f64/mod.rs index 31f224e0b..d8a788dec 100644 --- a/math/src/field/f64/mod.rs +++ b/math/src/field/f64/mod.rs @@ -15,6 +15,10 @@ //! * $8$ is the 64th root of unity which opens up potential for optimized FFT implementations. use super::{ExtensibleField, FieldElement, StarkField}; +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; use core::{ fmt::{Debug, Display, Formatter}, mem, @@ -22,8 +26,8 @@ use core::{ slice, }; use utils::{ - collections::*, string::*, AsBytes, ByteReader, ByteWriter, Deserializable, - DeserializationError, Randomizable, Serializable, + AsBytes, ByteReader, ByteWriter, Deserializable, DeserializationError, Randomizable, + Serializable, }; #[cfg(feature = "serde")] diff --git a/math/src/field/traits.rs b/math/src/field/traits.rs index 842f4fe2e..9668ee832 100644 --- a/math/src/field/traits.rs +++ b/math/src/field/traits.rs @@ -3,6 +3,7 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::vec::Vec; use core::{ fmt::{Debug, Display}, ops::{ @@ -10,9 +11,7 @@ use core::{ SubAssign, }, }; -use utils::{ - collections::*, AsBytes, Deserializable, DeserializationError, Randomizable, Serializable, -}; +use utils::{AsBytes, Deserializable, DeserializationError, Randomizable, Serializable}; // FIELD ELEMENT // ================================================================================================ diff --git a/math/src/lib.rs b/math/src/lib.rs index e63b521d8..4a814a421 100644 --- a/math/src/lib.rs +++ b/math/src/lib.rs @@ -87,9 +87,8 @@ //! //! Number of threads can be configured via `RAYON_NUM_THREADS` environment variable -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] -#[cfg(not(feature = "std"))] #[macro_use] extern crate alloc; diff --git a/math/src/polynom/mod.rs b/math/src/polynom/mod.rs index c2e206a43..09f346445 100644 --- a/math/src/polynom/mod.rs +++ b/math/src/polynom/mod.rs @@ -25,8 +25,9 @@ //! ``` use crate::{field::FieldElement, utils::batch_inversion}; +use alloc::vec::Vec; use core::mem; -use utils::{collections::*, group_vector_elements}; +use utils::group_vector_elements; #[cfg(test)] mod tests; diff --git a/math/src/polynom/tests.rs b/math/src/polynom/tests.rs index 1f4132210..c8847e647 100644 --- a/math/src/polynom/tests.rs +++ b/math/src/polynom/tests.rs @@ -8,7 +8,7 @@ use crate::{ field::{f128::BaseElement, FieldElement, StarkField}, utils::get_power_series, }; -use utils::collections::*; +use alloc::vec::Vec; #[test] fn eval() { diff --git a/math/src/utils/mod.rs b/math/src/utils/mod.rs index 22a028ae2..9add32096 100644 --- a/math/src/utils/mod.rs +++ b/math/src/utils/mod.rs @@ -3,8 +3,10 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::vec::Vec; + use crate::{field::FieldElement, ExtensionOf}; -use utils::{batch_iter_mut, collections::*, iter_mut, uninit_vector}; +use utils::{batch_iter_mut, iter_mut, uninit_vector}; #[cfg(feature = "concurrent")] use utils::iterators::*; diff --git a/prover/Cargo.toml b/prover/Cargo.toml index e86ca90b5..30d8143ea 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "winter-prover" -version = "0.8.1" +version = "0.8.3" description = "Winterfell STARK prover" authors = ["winterfell contributors"] readme = "README.md" license = "MIT" repository = "https://github.com/novifinancial/winterfell" -documentation = "https://docs.rs/winter-prover/0.8.1" +documentation = "https://docs.rs/winter-prover/0.8.3" categories = ["cryptography", "no-std"] keywords = ["crypto", "zkp", "stark", "prover"] edition = "2021" diff --git a/prover/src/channel.rs b/prover/src/channel.rs index 149c4e0d1..b782c2b2e 100644 --- a/prover/src/channel.rs +++ b/prover/src/channel.rs @@ -7,11 +7,11 @@ use air::{ proof::{Commitments, Context, OodFrame, OodFrameTraceStates, Queries, StarkProof}, Air, ConstraintCompositionCoefficients, DeepCompositionCoefficients, }; +use alloc::vec::Vec; use core::marker::PhantomData; use crypto::{ElementHasher, RandomCoin}; use fri::FriProof; use math::{FieldElement, ToElements}; -use utils::collections::*; #[cfg(feature = "concurrent")] use utils::iterators::*; diff --git a/prover/src/composer/mod.rs b/prover/src/composer/mod.rs index 24260c989..478236efe 100644 --- a/prover/src/composer/mod.rs +++ b/prover/src/composer/mod.rs @@ -5,8 +5,9 @@ use super::{constraints::CompositionPoly, StarkDomain, TracePolyTable}; use air::{proof::OodFrameTraceStates, DeepCompositionCoefficients}; +use alloc::vec::Vec; use math::{add_in_place, fft, mul_acc, polynom, ExtensionOf, FieldElement, StarkField}; -use utils::{collections::*, iter_mut}; +use utils::iter_mut; #[cfg(feature = "concurrent")] use utils::iterators::*; diff --git a/prover/src/constraints/commitment.rs b/prover/src/constraints/commitment.rs index e07d0bc35..ee204c220 100644 --- a/prover/src/constraints/commitment.rs +++ b/prover/src/constraints/commitment.rs @@ -5,9 +5,9 @@ use super::RowMatrix; use air::proof::Queries; +use alloc::vec::Vec; use crypto::{ElementHasher, MerkleTree}; use math::FieldElement; -use utils::collections::*; // CONSTRAINT COMMITMENT // ================================================================================================ diff --git a/prover/src/constraints/composition_poly.rs b/prover/src/constraints/composition_poly.rs index 1bcb56cca..13afd9050 100644 --- a/prover/src/constraints/composition_poly.rs +++ b/prover/src/constraints/composition_poly.rs @@ -4,8 +4,8 @@ // LICENSE file in the root directory of this source tree. use super::{ColMatrix, StarkDomain}; +use alloc::vec::Vec; use math::{fft, polynom::degree_of, FieldElement}; -use utils::collections::*; // CONSTRAINT COMPOSITION POLYNOMIAL TRACE // ================================================================================================ @@ -138,8 +138,8 @@ fn segment( #[cfg(test)] mod tests { + use alloc::vec::Vec; use math::fields::f128::BaseElement; - use utils::collections::*; #[test] fn segment() { diff --git a/prover/src/constraints/evaluation_table.rs b/prover/src/constraints/evaluation_table.rs index e794f5a6d..b57897f4a 100644 --- a/prover/src/constraints/evaluation_table.rs +++ b/prover/src/constraints/evaluation_table.rs @@ -4,8 +4,9 @@ // LICENSE file in the root directory of this source tree. use super::{ConstraintDivisor, StarkDomain}; +use alloc::vec::Vec; use math::{batch_inversion, FieldElement, StarkField}; -use utils::{batch_iter_mut, collections::*, iter_mut, uninit_vector}; +use utils::{batch_iter_mut, iter_mut, uninit_vector}; #[cfg(debug_assertions)] use math::fft; diff --git a/prover/src/constraints/evaluator/boundary.rs b/prover/src/constraints/evaluator/boundary.rs index ba7485579..8c7137d97 100644 --- a/prover/src/constraints/evaluator/boundary.rs +++ b/prover/src/constraints/evaluator/boundary.rs @@ -5,8 +5,8 @@ use super::StarkDomain; use air::{Air, AuxTraceRandElements, ConstraintDivisor}; +use alloc::{collections::BTreeMap, vec::Vec}; use math::{fft, ExtensionOf, FieldElement}; -use utils::collections::*; // CONSTANTS // ================================================================================================ diff --git a/prover/src/constraints/evaluator/default.rs b/prover/src/constraints/evaluator/default.rs index 8ede10331..21c6d6d80 100644 --- a/prover/src/constraints/evaluator/default.rs +++ b/prover/src/constraints/evaluator/default.rs @@ -12,8 +12,9 @@ use air::{ LagrangeKernelBoundaryConstraint, LagrangeKernelEvaluationFrame, LagrangeKernelTransitionConstraints, TransitionConstraints, }; +use alloc::vec::Vec; use math::{batch_inversion, FieldElement}; -use utils::{collections::*, iter_mut}; +use utils::iter_mut; #[cfg(feature = "concurrent")] use utils::{iterators::*, rayon}; diff --git a/prover/src/constraints/evaluator/periodic_table.rs b/prover/src/constraints/evaluator/periodic_table.rs index f90119f6d..0fcbd7070 100644 --- a/prover/src/constraints/evaluator/periodic_table.rs +++ b/prover/src/constraints/evaluator/periodic_table.rs @@ -4,8 +4,9 @@ // LICENSE file in the root directory of this source tree. use air::Air; +use alloc::{collections::BTreeMap, vec::Vec}; use math::{fft, StarkField}; -use utils::{collections::*, uninit_vector}; +use utils::uninit_vector; pub struct PeriodicValueTable { values: Vec, @@ -94,10 +95,10 @@ impl PeriodicValueTable { mod tests { use crate::tests::MockAir; use air::Air; + use alloc::vec::Vec; use math::{ fields::f128::BaseElement, get_power_series_with_offset, polynom, FieldElement, StarkField, }; - use utils::collections::*; #[test] fn periodic_value_table() { diff --git a/prover/src/domain.rs b/prover/src/domain.rs index 5cb20cc67..43f04a424 100644 --- a/prover/src/domain.rs +++ b/prover/src/domain.rs @@ -4,8 +4,8 @@ // LICENSE file in the root directory of this source tree. use air::Air; +use alloc::vec::Vec; use math::{fft, get_power_series, StarkField}; -use utils::collections::*; // TYPES AND INTERFACES // ================================================================================================ diff --git a/prover/src/lib.rs b/prover/src/lib.rs index d3e9a154e..69073145c 100644 --- a/prover/src/lib.rs +++ b/prover/src/lib.rs @@ -37,9 +37,8 @@ //! also depends on the capabilities of the machine used to generate the proofs (i.e. on number //! of CPU cores and memory bandwidth). -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] -#[cfg(not(feature = "std"))] #[macro_use] extern crate alloc; @@ -55,8 +54,8 @@ pub use utils::{ SliceReader, }; +use alloc::vec::Vec; use fri::FriProver; -use utils::collections::*; pub use math; use math::{ @@ -286,7 +285,7 @@ pub trait Prover { for i in 0..trace.info().num_aux_segments() { let num_columns = trace.info().get_aux_segment_width(i); let (aux_segment, rand_elements) = { - let _ = info_span!("build_aux_trace_segment", num_columns).entered(); + let span = info_span!("build_aux_trace_segment", num_columns).entered(); // draw a set of random elements required to build an auxiliary trace segment let rand_elements = channel.get_aux_trace_segment_rand_elements(i); @@ -301,6 +300,7 @@ pub trait Prover { .build_aux_segment(&aux_trace_segments, &rand_elements, lagrange_rand_elements) .expect("failed build auxiliary trace segment"); + drop(span); (aux_segment, rand_elements) }; assert_eq!(aux_segment.num_cols(), num_columns); diff --git a/prover/src/matrix/col_matrix.rs b/prover/src/matrix/col_matrix.rs index 55e9b6b6c..8f49b12ab 100644 --- a/prover/src/matrix/col_matrix.rs +++ b/prover/src/matrix/col_matrix.rs @@ -4,10 +4,11 @@ // LICENSE file in the root directory of this source tree. use crate::StarkDomain; +use alloc::vec::Vec; use core::{iter::FusedIterator, slice}; use crypto::{ElementHasher, MerkleTree}; use math::{fft, polynom, FieldElement}; -use utils::{batch_iter_mut, collections::*, iter, iter_mut, uninit_vector}; +use utils::{batch_iter_mut, iter, iter_mut, uninit_vector}; #[cfg(feature = "concurrent")] use utils::iterators::*; diff --git a/prover/src/matrix/row_matrix.rs b/prover/src/matrix/row_matrix.rs index 4e23a9ea2..ec2d0fb34 100644 --- a/prover/src/matrix/row_matrix.rs +++ b/prover/src/matrix/row_matrix.rs @@ -5,9 +5,9 @@ use super::{ColMatrix, Segment}; use crate::StarkDomain; +use alloc::vec::Vec; use crypto::{ElementHasher, MerkleTree}; use math::{fft, FieldElement, StarkField}; -use utils::collections::*; use utils::{batch_iter_mut, flatten_vector_elements, uninit_vector}; #[cfg(feature = "concurrent")] diff --git a/prover/src/matrix/segments.rs b/prover/src/matrix/segments.rs index 025272290..0b76142e9 100644 --- a/prover/src/matrix/segments.rs +++ b/prover/src/matrix/segments.rs @@ -4,9 +4,10 @@ // LICENSE file in the root directory of this source tree. use super::ColMatrix; +use alloc::vec::Vec; use core::ops::Deref; use math::{fft::fft_inputs::FftInputs, FieldElement, StarkField}; -use utils::{collections::*, group_vector_elements, uninit_vector}; +use utils::{group_vector_elements, uninit_vector}; #[cfg(feature = "concurrent")] use utils::iterators::*; diff --git a/prover/src/matrix/tests.rs b/prover/src/matrix/tests.rs index ebfa5de78..b7bfbca77 100644 --- a/prover/src/matrix/tests.rs +++ b/prover/src/matrix/tests.rs @@ -7,8 +7,8 @@ use crate::{ math::{fields::f64::BaseElement, get_power_series, polynom, StarkField}, ColMatrix, RowMatrix, }; +use alloc::vec::Vec; use rand_utils::rand_vector; -use utils::collections::*; #[test] fn test_eval_poly_with_offset_matrix() { diff --git a/prover/src/tests/mod.rs b/prover/src/tests/mod.rs index 18fbfb403..6addc1a2c 100644 --- a/prover/src/tests/mod.rs +++ b/prover/src/tests/mod.rs @@ -8,8 +8,8 @@ use air::{ Air, AirContext, Assertion, EvaluationFrame, FieldExtension, ProofOptions, TraceInfo, TransitionConstraintDegree, }; +use alloc::vec::Vec; use math::{fields::f128::BaseElement, FieldElement, StarkField}; -use utils::collections::*; // FIBONACCI TRACE BUILDER // ================================================================================================ diff --git a/prover/src/trace/poly_table.rs b/prover/src/trace/poly_table.rs index 6b3c94c9e..2cbf68082 100644 --- a/prover/src/trace/poly_table.rs +++ b/prover/src/trace/poly_table.rs @@ -8,8 +8,8 @@ use crate::{ ColMatrix, }; use air::{proof::OodFrameTraceStates, LagrangeKernelEvaluationFrame}; +use alloc::vec::Vec; use math::{FieldElement, StarkField}; -use utils::collections::*; // TRACE POLYNOMIAL TABLE // ================================================================================================ diff --git a/prover/src/trace/tests.rs b/prover/src/trace/tests.rs index f76eb2b9c..4f3f40966 100644 --- a/prover/src/trace/tests.rs +++ b/prover/src/trace/tests.rs @@ -4,8 +4,8 @@ // LICENSE file in the root directory of this source tree. use crate::{tests::build_fib_trace, Trace}; +use alloc::vec::Vec; use math::fields::f128::BaseElement; -use utils::collections::*; #[test] fn new_trace_table() { diff --git a/prover/src/trace/trace_lde/default/mod.rs b/prover/src/trace/trace_lde/default/mod.rs index 487051529..4cdb9158c 100644 --- a/prover/src/trace/trace_lde/default/mod.rs +++ b/prover/src/trace/trace_lde/default/mod.rs @@ -9,9 +9,9 @@ use super::{ }; use crate::{RowMatrix, DEFAULT_SEGMENT_WIDTH}; use air::LagrangeKernelEvaluationFrame; +use alloc::vec::Vec; use crypto::MerkleTree; use tracing::info_span; -use utils::collections::*; #[cfg(test)] mod tests; diff --git a/prover/src/trace/trace_lde/default/tests.rs b/prover/src/trace/trace_lde/default/tests.rs index e07f29cd8..777e1ada8 100644 --- a/prover/src/trace/trace_lde/default/tests.rs +++ b/prover/src/trace/trace_lde/default/tests.rs @@ -7,12 +7,12 @@ use crate::{ tests::{build_fib_trace, MockAir}, DefaultTraceLde, StarkDomain, Trace, TraceLde, }; +use alloc::vec::Vec; use crypto::{hashers::Blake3_256, ElementHasher, MerkleTree}; use math::{ fields::f128::BaseElement, get_power_series, get_power_series_with_offset, polynom, FieldElement, StarkField, }; -use utils::collections::*; type Blake3 = Blake3_256; diff --git a/prover/src/trace/trace_lde/mod.rs b/prover/src/trace/trace_lde/mod.rs index a11c27a75..a3d9193e6 100644 --- a/prover/src/trace/trace_lde/mod.rs +++ b/prover/src/trace/trace_lde/mod.rs @@ -6,8 +6,8 @@ use super::{ColMatrix, EvaluationFrame, FieldElement, TracePolyTable}; use crate::StarkDomain; use air::{proof::Queries, LagrangeKernelEvaluationFrame, TraceInfo}; +use alloc::vec::Vec; use crypto::{ElementHasher, Hasher}; -use utils::collections::*; mod default; pub use default::DefaultTraceLde; diff --git a/prover/src/trace/trace_table.rs b/prover/src/trace/trace_table.rs index f196ad5bd..87aec2a19 100644 --- a/prover/src/trace/trace_table.rs +++ b/prover/src/trace/trace_table.rs @@ -5,8 +5,9 @@ use super::{ColMatrix, Trace}; use air::{EvaluationFrame, TraceInfo}; +use alloc::vec::Vec; use math::{FieldElement, StarkField}; -use utils::{collections::*, uninit_vector}; +use utils::uninit_vector; #[cfg(feature = "concurrent")] use utils::{iterators::*, rayon}; @@ -197,7 +198,10 @@ impl TraceTable { /// Panics if `fragment_length` is smaller than 2, greater than the length of the trace, /// or is not a power of two. #[cfg(not(feature = "concurrent"))] - pub fn fragments(&mut self, fragment_length: usize) -> vec::IntoIter> { + pub fn fragments( + &mut self, + fragment_length: usize, + ) -> alloc::vec::IntoIter> { self.build_fragments(fragment_length).into_iter() } diff --git a/utils/core/Cargo.toml b/utils/core/Cargo.toml index 0603d5a6a..84d29dfa0 100644 --- a/utils/core/Cargo.toml +++ b/utils/core/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "winter-utils" -version = "0.8.1" +version = "0.8.3" description = "Utilities for the Winterfell STARK prover/verifier" authors = ["winterfell contributors"] readme = "README.md" license = "MIT" repository = "https://github.com/novifinancial/winterfell" -documentation = "https://docs.rs/winter-utils/0.8.1" +documentation = "https://docs.rs/winter-utils/0.8.3" categories = ["cryptography", "no-std"] keywords = ["serialization", "transmute"] edition = "2021" diff --git a/utils/core/src/boxed.rs b/utils/core/src/boxed.rs index e7ecc4998..cb1c3fac5 100644 --- a/utils/core/src/boxed.rs +++ b/utils/core/src/boxed.rs @@ -3,8 +3,4 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. -#[cfg(not(feature = "std"))] pub use alloc::boxed::Box; - -#[cfg(feature = "std")] -pub use std::boxed::Box; diff --git a/utils/core/src/collections.rs b/utils/core/src/collections.rs index 37716b28d..00deb5eda 100644 --- a/utils/core/src/collections.rs +++ b/utils/core/src/collections.rs @@ -4,19 +4,6 @@ // LICENSE file in the root directory of this source tree. //! Feature-based re-export of common collection components. -//! -//! When `std` feature is enabled, this module exports collections from the Rust standard library. -//! When `alloc` feature is enabled, same collected are provided without relying on the Rust -//! standard library. -#[cfg(not(feature = "std"))] pub use alloc::collections::{btree_map, btree_set, BTreeMap, BTreeSet}; - -#[cfg(not(feature = "std"))] pub use alloc::vec::{self as vec, Vec}; - -#[cfg(feature = "std")] -pub use std::collections::{btree_map, btree_set, BTreeMap, BTreeSet}; - -#[cfg(feature = "std")] -pub use std::vec::{self as vec, Vec}; diff --git a/utils/core/src/errors.rs b/utils/core/src/errors.rs index 00f69b5c6..52df2b007 100644 --- a/utils/core/src/errors.rs +++ b/utils/core/src/errors.rs @@ -3,7 +3,7 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. -use crate::string::*; +use alloc::string::String; use core::fmt; // DESERIALIZATION ERROR diff --git a/utils/core/src/lib.rs b/utils/core/src/lib.rs index 3c44230db..1abe34f9b 100644 --- a/utils/core/src/lib.rs +++ b/utils/core/src/lib.rs @@ -5,23 +5,35 @@ //! This crate contains utility traits, functions, and macros used by other crates of Winterfell //! STARK prover and verifier. +#![no_std] -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(not(feature = "std"))] #[macro_use] extern crate alloc; +#[cfg(feature = "std")] +extern crate std; + +#[deprecated(since = "0.8.2", note = "You should prefer to import from `alloc::boxed::*`")] pub mod boxed; +// TODO: Remove this when deprecation period for `boxed` module is over +#[allow(deprecated)] +pub use boxed::*; + +#[deprecated( + since = "0.8.2", + note = "You should prefer to import from `alloc::collections::*`" +)] pub mod collections; pub mod iterators; +#[deprecated(since = "0.8.2", note = "You should prefer to import from `alloc::string::*`")] pub mod string; -pub use boxed::*; -use collections::*; +use alloc::vec::Vec; use core::{mem, slice}; mod serde; +#[cfg(feature = "std")] +pub use serde::ReadAdapter; pub use serde::{ByteReader, ByteWriter, Deserializable, Serializable, SliceReader}; mod errors; diff --git a/utils/core/src/serde/byte_reader.rs b/utils/core/src/serde/byte_reader.rs index 5c90ee5dc..5ce991324 100644 --- a/utils/core/src/serde/byte_reader.rs +++ b/utils/core/src/serde/byte_reader.rs @@ -3,9 +3,16 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; +#[cfg(feature = "std")] +use core::cell::{Ref, RefCell}; +#[cfg(feature = "std")] +use std::io::BufRead; + use super::{Deserializable, DeserializationError}; -use crate::collections::*; -use crate::string::*; // BYTE READER TRAIT // ================================================================================================ @@ -194,10 +201,432 @@ pub trait ByteReader { } } +// STANDARD LIBRARY ADAPTER +// ================================================================================================ + +/// An adapter of [ByteReader] to any type that implements [std::io::Read] +/// +/// In particular, this covers things like [std::fs::File], standard input, etc. +#[cfg(feature = "std")] +pub struct ReadAdapter<'a> { + // NOTE: The [ByteReader] trait does not currently support reader implementations that require + // mutation during `peek_u8`, `has_more_bytes`, and `check_eor`. These (or equivalent) + // operations on the standard library [std::io::BufRead] trait require a mutable reference, as + // it may be necessary to read from the underlying input to implement them. + // + // To handle this, we wrap the underlying reader in an [RefCell], this allows us to mutate the + // reader if necessary during a call to one of the above-mentioned trait methods, without + // sacrificing safety - at the cost of enforcing Rust's borrowing semantics dynamically. + // + // This should not be a problem in practice, except in the case where `read_slice` is called, + // and the reference returned is from `reader` directly, rather than `buf`. If a call to one + // of the above-mentioned methods is made while that reference is live, and we attempt to read + // from `reader`, a panic will occur. + // + // Ultimately, this should be addressed by making the [ByteReader] trait align with the standard + // library I/O traits, so this is a temporary solution. + reader: RefCell>, + // A temporary buffer to store chunks read from `reader` that are larger than what is required for + // the higher-level [ByteReader] APIs. + // + // By default we attempt to satisfy reads from `reader` directly, but that is not always possible. + buf: alloc::vec::Vec, + // The position in `buf` at which we should start reading the next byte, when `buf` is non-empty. + pos: usize, + // This is set when we attempt to read from `reader` and get an empty buffer. This indicates that + // once we exhaust `buf`, we have truly reached end-of-file. + // + // We will use this to more accurately handle functions like `has_more_bytes` when this is set. + guaranteed_eof: bool, +} + +#[cfg(feature = "std")] +impl<'a> ReadAdapter<'a> { + /// Create a new [ByteReader] adapter for the given implementation of [std::io::Read] + pub fn new(reader: &'a mut dyn std::io::Read) -> Self { + Self { + reader: RefCell::new(std::io::BufReader::with_capacity(256, reader)), + buf: Default::default(), + pos: 0, + guaranteed_eof: false, + } + } + + /// Get the internal adapter buffer as a (possibly empty) slice of bytes + #[inline(always)] + fn buffer(&self) -> &[u8] { + self.buf.get(self.pos..).unwrap_or(&[]) + } + + /// Get the internal adapter buffer as a slice of bytes, or `None` if the buffer is empty + #[inline(always)] + fn non_empty_buffer(&self) -> Option<&[u8]> { + self.buf.get(self.pos..).filter(|b| !b.is_empty()) + } + + /// Return the current reader buffer as a (possibly empty) slice of bytes. + /// + /// This buffer being empty _does not_ mean we're at EOF, you must call [non_empty_reader_buffer_mut] first. + #[inline(always)] + fn reader_buffer(&self) -> Ref<'_, [u8]> { + Ref::map(self.reader.borrow(), |r| r.buffer()) + } + + /// Return the current reader buffer, reading from the underlying reader + /// if the buffer is empty. + /// + /// Returns `Ok` only if the buffer is non-empty, and no errors occurred + /// while filling it (if filling was needed). + fn non_empty_reader_buffer_mut(&mut self) -> Result<&[u8], DeserializationError> { + use std::io::ErrorKind; + let buf = self.reader.get_mut().fill_buf().map_err(|e| match e.kind() { + ErrorKind::UnexpectedEof => DeserializationError::UnexpectedEOF, + e => DeserializationError::UnknownError(e.to_string()), + })?; + if buf.is_empty() { + self.guaranteed_eof = true; + Err(DeserializationError::UnexpectedEOF) + } else { + Ok(buf) + } + } + + /// Same as [non_empty_reader_buffer_mut], but with dynamically-enforced + /// borrow check rules so that it can be called in functions like `peek_u8`. + /// + /// This comes with overhead for the dynamic checks, so you should prefer + /// to call [non_empty_reader_buffer_mut] if you already have a mutable + /// reference to `self` + fn non_empty_reader_buffer(&self) -> Result, DeserializationError> { + use std::io::ErrorKind; + let mut reader = self.reader.borrow_mut(); + let buf = reader.fill_buf().map_err(|e| match e.kind() { + ErrorKind::UnexpectedEof => DeserializationError::UnexpectedEOF, + e => DeserializationError::UnknownError(e.to_string()), + })?; + if buf.is_empty() { + Err(DeserializationError::UnexpectedEOF) + } else { + // Re-borrow immutably + drop(reader); + Ok(self.reader_buffer()) + } + } + + /// Returns true if there is sufficient capacity remaining in `buf` to hold `n` bytes + #[inline] + fn has_remaining_capacity(&self, n: usize) -> bool { + let remaining = self.buf.capacity() - self.buffer().len(); + remaining >= n + } + + /// Takes the next byte from the input, returning an error if the operation fails + fn pop(&mut self) -> Result { + if let Some(byte) = self.non_empty_buffer().map(|b| b[0]) { + self.pos += 1; + return Ok(byte); + } + let result = self.non_empty_reader_buffer_mut().map(|b| b[0]); + if result.is_ok() { + self.reader.get_mut().consume(1); + } else { + self.guaranteed_eof = true; + } + result + } + + /// Takes the next `N` bytes from the input as an array, returning an error if the operation fails + fn read_exact(&mut self) -> Result<[u8; N], DeserializationError> { + let buf = self.buffer(); + let mut output = [0; N]; + match buf.len() { + 0 => { + let buf = self.non_empty_reader_buffer_mut()?; + if buf.len() < N { + return Err(DeserializationError::UnexpectedEOF); + } + // SAFETY: This copy is guaranteed to be safe, as we have validated above + // that `buf` has at least N bytes, and `output` is defined to be exactly + // N bytes. + unsafe { + core::ptr::copy_nonoverlapping(buf.as_ptr(), output.as_mut_ptr(), N); + } + self.reader.get_mut().consume(N); + } + n if n >= N => { + // SAFETY: This copy is guaranteed to be safe, as we have validated above + // that `buf` has at least N bytes, and `output` is defined to be exactly + // N bytes. + unsafe { + core::ptr::copy_nonoverlapping(buf.as_ptr(), output.as_mut_ptr(), N); + } + self.pos += N; + } + n => { + // We have to fill from both the local and reader buffers + self.non_empty_reader_buffer_mut()?; + let reader_buf = self.reader_buffer(); + match reader_buf.len() { + #[cfg(debug_assertions)] + 0 => unreachable!("expected reader buffer to be non-empty to reach here"), + #[cfg(not(debug_assertions))] + // SAFETY: The call to `non_empty_reader_buffer_mut` will return an error + // if `reader_buffer` is non-empty, as a result is is impossible to reach + // here with a length of 0. + 0 => unsafe { core::hint::unreachable_unchecked() }, + // We got enough in one request + m if m + n >= N => { + let needed = N - n; + let dst = output.as_mut_ptr(); + // SAFETY: Both copies are guaranteed to be in-bounds: + // + // * `output` is defined to be exactly N bytes + // * `buf` is guaranteed to be < N bytes + // * `reader_buf` is guaranteed to have the remaining bytes needed, + // and we only copy exactly that many bytes + unsafe { + core::ptr::copy_nonoverlapping(self.buffer().as_ptr(), dst, n); + core::ptr::copy_nonoverlapping(reader_buf.as_ptr(), dst.add(n), needed); + drop(reader_buf); + } + self.pos += n; + self.reader.get_mut().consume(needed); + } + // We didn't get enough, but haven't necessarily reached eof yet, so fall back + // to filling `self.buf` + m => { + let needed = N - (m + n); + drop(reader_buf); + self.buffer_at_least(needed)?; + debug_assert!(self.buffer().len() >= N, "expected buffer to be at least {N} bytes after call to buffer_at_least"); + // SAFETY: This is guaranteed to be an in-bounds copy + unsafe { + core::ptr::copy_nonoverlapping( + self.buffer().as_ptr(), + output.as_mut_ptr(), + N, + ); + } + self.pos += N; + return Ok(output); + } + } + } + } + + // Check if we should reset our internal buffer + if self.buffer().is_empty() && self.pos > 0 { + unsafe { + self.buf.set_len(0); + } + } + + Ok(output) + } + + /// Fill `self.buf` with `count` bytes + /// + /// This should only be called when we can't read from the reader directly + fn buffer_at_least(&mut self, mut count: usize) -> Result<(), DeserializationError> { + // Read until we have at least `count` bytes, or until we reach end-of-file, + // which ever comes first. + loop { + // If we have succesfully read `count` bytes, we're done + if count == 0 || self.buf.len() >= count { + break Ok(()); + } + + // This operation will return an error if the underlying reader hits EOF + self.non_empty_reader_buffer_mut()?; + + // Extend `self.buf` with the bytes read from the underlying reader. + // + // NOTE: We have to re-borrow the reader buffer here, since we can't get a mutable + // reference to `self.buf` while holding an immutable reference to the reader buffer. + let reader = self.reader.get_mut(); + let buf = reader.buffer(); + let consumed = buf.len(); + self.buf.extend_from_slice(buf); + reader.consume(consumed); + count = count.saturating_sub(consumed); + } + } +} + +#[cfg(feature = "std")] +impl<'a> ByteReader for ReadAdapter<'a> { + #[inline(always)] + fn read_u8(&mut self) -> Result { + self.pop() + } + + /// NOTE: If we happen to not have any bytes buffered yet when this is called, then we will be + /// forced to try and read from the underlying reader. This requires a mutable reference, which + /// is obtained dynamically via [RefCell]. + /// + ///
+ /// Callers must ensure that they do not hold any immutable references to the buffer of this + /// reader when calling this function so as to avoid a situtation in which the dynamic borrow + /// check fails. Specifically, you must not be holding a reference to the result of + /// [Self::read_slice] when this function is called. + ///
+ fn peek_u8(&self) -> Result { + if let Some(byte) = self.buffer().first() { + return Ok(*byte); + } + self.non_empty_reader_buffer().map(|b| b[0]) + } + + fn read_slice(&mut self, len: usize) -> Result<&[u8], DeserializationError> { + // Edge case + if len == 0 { + return Ok(&[]); + } + + // If we have unused buffer, and the consumed portion is + // large enough, we will move the unused portion of the buffer + // to the start, freeing up bytes at the end for more reads + // before forcing a reallocation + let should_optimize_storage = self.pos >= 16 && !self.has_remaining_capacity(len); + if should_optimize_storage { + // We're going to optimize storage first + let buf = self.buffer(); + let src = buf.as_ptr(); + let count = buf.len(); + let dst = self.buf.as_mut_ptr(); + unsafe { + core::ptr::copy(src, dst, count); + self.buf.set_len(count); + self.pos = 0; + } + } + + // Fill the buffer so we have at least `len` bytes available, + // this will return an error if we hit EOF first + self.buffer_at_least(len)?; + + Ok(&self.buffer()[0..len]) + } + + #[inline] + fn read_array(&mut self) -> Result<[u8; N], DeserializationError> { + if N == 0 { + return Ok([0; N]); + } + self.read_exact() + } + + fn check_eor(&self, num_bytes: usize) -> Result<(), DeserializationError> { + // Do we have sufficient data in the local buffer? + let buffer_len = self.buffer().len(); + if buffer_len >= num_bytes { + return Ok(()); + } + + // What about if we include what is in the local buffer and the reader's buffer? + let reader_buffer_len = self.non_empty_reader_buffer().map(|b| b.len())?; + let buffer_len = buffer_len + reader_buffer_len; + if buffer_len >= num_bytes { + return Ok(()); + } + + // We have no more input, thus can't fulfill a request of `num_bytes` + if self.guaranteed_eof { + return Err(DeserializationError::UnexpectedEOF); + } + + // Because this function is read-only, we must optimistically assume we can read `num_bytes` + // from the input, and fail later if that does not hold. We know we're not at EOF yet, but + // that's all we can say without buffering more from the reader. We could make use of + // `buffer_at_least`, which would guarantee a correct result, but it would also impose + // additional restrictions on the use of this function, e.g. not using it while holding a + // reference returned from `read_slice`. Since it is not a memory safety violation to return + // an optimistic result here, it makes for a better tradeoff. + Ok(()) + } + + #[inline] + fn has_more_bytes(&self) -> bool { + !self.buffer().is_empty() || self.non_empty_reader_buffer().is_ok() + } +} + +// CURSOR +// ================================================================================================ + +#[cfg(feature = "std")] +macro_rules! cursor_remaining_buf { + ($cursor:ident) => {{ + let buf = $cursor.get_ref().as_ref(); + let start = $cursor.position().min(buf.len() as u64) as usize; + &buf[start..] + }}; +} + +#[cfg(feature = "std")] +impl> ByteReader for std::io::Cursor { + fn read_u8(&mut self) -> Result { + let buf = cursor_remaining_buf!(self); + if buf.is_empty() { + Err(DeserializationError::UnexpectedEOF) + } else { + let byte = buf[0]; + self.set_position(self.position() + 1); + Ok(byte) + } + } + + fn peek_u8(&self) -> Result { + cursor_remaining_buf!(self) + .first() + .copied() + .ok_or(DeserializationError::UnexpectedEOF) + } + + fn read_slice(&mut self, len: usize) -> Result<&[u8], DeserializationError> { + let pos = self.position(); + let size = self.get_ref().as_ref().len() as u64; + if size.saturating_sub(pos) < len as u64 { + Err(DeserializationError::UnexpectedEOF) + } else { + self.set_position(pos + len as u64); + let start = pos.min(size) as usize; + Ok(&self.get_ref().as_ref()[start..(start + len)]) + } + } + + fn read_array(&mut self) -> Result<[u8; N], DeserializationError> { + self.read_slice(N).map(|bytes| { + let mut result = [0u8; N]; + result.copy_from_slice(bytes); + result + }) + } + + fn check_eor(&self, num_bytes: usize) -> Result<(), DeserializationError> { + if cursor_remaining_buf!(self).len() >= num_bytes { + Ok(()) + } else { + Err(DeserializationError::UnexpectedEOF) + } + } + + #[inline] + fn has_more_bytes(&self) -> bool { + let pos = self.position(); + let size = self.get_ref().as_ref().len() as u64; + pos < size + } +} + // SLICE READER // ================================================================================================ /// Implements [ByteReader] trait for a slice of bytes. +/// +/// NOTE: If you are building with the `std` feature, you should probably prefer [std::io::Cursor] +/// instead. However, [SliceReader] is still useful in no-std environments until stabilization of +/// the `core_io_borrowed_buf` feature. pub struct SliceReader<'a> { source: &'a [u8], pos: usize, @@ -249,3 +678,66 @@ impl<'a> ByteReader for SliceReader<'a> { self.pos < self.source.len() } } + +#[cfg(all(test, feature = "std"))] +mod tests { + use super::*; + use crate::ByteWriter; + use std::io::Cursor; + + #[test] + fn read_adapter_empty() -> Result<(), DeserializationError> { + let mut reader = std::io::empty(); + let mut adapter = ReadAdapter::new(&mut reader); + assert!(!adapter.has_more_bytes()); + assert_eq!(adapter.check_eor(8), Err(DeserializationError::UnexpectedEOF)); + assert_eq!(adapter.peek_u8(), Err(DeserializationError::UnexpectedEOF)); + assert_eq!(adapter.read_u8(), Err(DeserializationError::UnexpectedEOF)); + assert_eq!(adapter.read_slice(0), Ok([].as_slice())); + assert_eq!(adapter.read_slice(1), Err(DeserializationError::UnexpectedEOF)); + assert_eq!(adapter.read_array(), Ok([])); + assert_eq!(adapter.read_array::<1>(), Err(DeserializationError::UnexpectedEOF)); + Ok(()) + } + + #[test] + fn read_adapter_passthrough() -> Result<(), DeserializationError> { + let mut reader = std::io::repeat(0b101); + let mut adapter = ReadAdapter::new(&mut reader); + assert!(adapter.has_more_bytes()); + assert_eq!(adapter.check_eor(8), Ok(())); + assert_eq!(adapter.peek_u8(), Ok(0b101)); + assert_eq!(adapter.read_u8(), Ok(0b101)); + assert_eq!(adapter.read_slice(0), Ok([].as_slice())); + assert_eq!(adapter.read_slice(4), Ok([0b101, 0b101, 0b101, 0b101].as_slice())); + assert_eq!(adapter.read_array(), Ok([])); + assert_eq!(adapter.read_array(), Ok([0b101, 0b101])); + Ok(()) + } + + #[test] + fn read_adapter_exact() { + const VALUE: usize = 2048; + let mut reader = Cursor::new(VALUE.to_le_bytes()); + let mut adapter = ReadAdapter::new(&mut reader); + assert_eq!(usize::from_le_bytes(adapter.read_array().unwrap()), VALUE); + assert!(!adapter.has_more_bytes()); + assert_eq!(adapter.peek_u8(), Err(DeserializationError::UnexpectedEOF)); + assert_eq!(adapter.read_u8(), Err(DeserializationError::UnexpectedEOF)); + } + + #[test] + fn read_adapter_roundtrip() { + const VALUE: usize = 2048; + + // Write VALUE to storage + let mut cursor = Cursor::new([0; core::mem::size_of::()]); + cursor.write_usize(VALUE); + + // Read VALUE from storage + cursor.set_position(0); + let mut adapter = ReadAdapter::new(&mut cursor); + + assert_eq!(adapter.read_usize(), Ok(VALUE)); + } +} diff --git a/utils/core/src/serde/byte_writer.rs b/utils/core/src/serde/byte_writer.rs index 1a32b484b..bdcbc24e2 100644 --- a/utils/core/src/serde/byte_writer.rs +++ b/utils/core/src/serde/byte_writer.rs @@ -4,7 +4,6 @@ // LICENSE file in the root directory of this source tree. use super::Serializable; -use crate::collections::*; // BYTE WRITER TRAIT // ================================================================================================ @@ -100,7 +99,11 @@ pub trait ByteWriter: Sized { /// Serializes all `elements` and writes the resulting bytes into `self`. /// /// This method does not write any metadata (e.g. number of serialized elements) into `self`. - fn write_many(&mut self, elements: &[S]) { + fn write_many(&mut self, elements: T) + where + T: IntoIterator, + S: Serializable, + { for element in elements { element.write_into(self); } @@ -110,7 +113,20 @@ pub trait ByteWriter: Sized { // BYTE WRITER IMPLEMENTATIONS // ================================================================================================ -impl ByteWriter for Vec { +#[cfg(feature = "std")] +impl ByteWriter for W { + #[inline(always)] + fn write_u8(&mut self, byte: u8) { + ::write_all(self, &[byte]).expect("write failed") + } + #[inline(always)] + fn write_bytes(&mut self, bytes: &[u8]) { + ::write_all(self, bytes).expect("write failed") + } +} + +#[cfg(not(feature = "std"))] +impl ByteWriter for alloc::vec::Vec { fn write_u8(&mut self, value: u8) { self.push(value); } @@ -129,3 +145,24 @@ fn encoded_len(value: usize) -> usize { let len = zeros.saturating_sub(1) / 7; 9 - core::cmp::min(len, 8) } + +#[cfg(all(test, feature = "std"))] +mod tests { + use super::*; + use std::io::Cursor; + + #[test] + fn write_adapter_passthrough() { + let mut writer = Cursor::new([0u8; 128]); + writer.write_bytes(b"nope"); + let buf = writer.get_ref(); + assert_eq!(&buf[..4], b"nope"); + } + + #[test] + #[should_panic] + fn write_adapter_writer_out_of_capacity() { + let mut writer = Cursor::new([0; 2]); + writer.write_bytes(b"nope"); + } +} diff --git a/utils/core/src/serde/mod.rs b/utils/core/src/serde/mod.rs index fc23ff532..335af1c4d 100644 --- a/utils/core/src/serde/mod.rs +++ b/utils/core/src/serde/mod.rs @@ -3,12 +3,20 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::{ + collections::{BTreeMap, BTreeSet}, + string::String, + vec::Vec, +}; + use super::DeserializationError; -use crate::collections::*; mod byte_reader; pub use byte_reader::{ByteReader, SliceReader}; +#[cfg(feature = "std")] +pub use byte_reader::ReadAdapter; + mod byte_writer; pub use byte_writer::ByteWriter; @@ -16,7 +24,7 @@ pub use byte_writer::ByteWriter; // ================================================================================================ /// Defines how to serialize `Self` into bytes. -pub trait Serializable: Sized { +pub trait Serializable { // REQUIRED METHODS // -------------------------------------------------------------------------------------------- /// Serializes `self` into bytes and writes these bytes into the `target`. @@ -40,6 +48,12 @@ pub trait Serializable: Sized { } } +impl Serializable for &T { + fn write_into(&self, target: &mut W) { + (*self).write_into(target) + } +} + impl Serializable for () { fn write_into(&self, _target: &mut W) {} } @@ -176,21 +190,53 @@ impl Serializable for Option { } } -impl Serializable for &Option { +impl Serializable for [T; C] { fn write_into(&self, target: &mut W) { - match self { - Some(v) => { - target.write_bool(true); - v.write_into(target); - } - None => target.write_bool(false), + target.write_many(self) + } +} + +impl Serializable for [T] { + fn write_into(&self, target: &mut W) { + target.write_usize(self.len()); + for element in self.iter() { + element.write_into(target); } } } -impl Serializable for [T; C] { +impl Serializable for Vec { fn write_into(&self, target: &mut W) { - target.write_many(self) + target.write_usize(self.len()); + target.write_many(self); + } +} + +impl Serializable for BTreeMap { + fn write_into(&self, target: &mut W) { + target.write_usize(self.len()); + target.write_many(self); + } +} + +impl Serializable for BTreeSet { + fn write_into(&self, target: &mut W) { + target.write_usize(self.len()); + target.write_many(self); + } +} + +impl Serializable for str { + fn write_into(&self, target: &mut W) { + target.write_usize(self.len()); + target.write_many(self.as_bytes()); + } +} + +impl Serializable for String { + fn write_into(&self, target: &mut W) { + target.write_usize(self.len()); + target.write_many(self.as_bytes()); } } @@ -384,3 +430,36 @@ impl Deserializable for [T; C] { Ok(res) } } + +impl Deserializable for Vec { + fn read_from(source: &mut R) -> Result { + let len = source.read_usize()?; + source.read_many(len) + } +} + +impl Deserializable for BTreeMap { + fn read_from(source: &mut R) -> Result { + let len = source.read_usize()?; + let data = source.read_many(len)?; + Ok(BTreeMap::from_iter(data)) + } +} + +impl Deserializable for BTreeSet { + fn read_from(source: &mut R) -> Result { + let len = source.read_usize()?; + let data = source.read_many(len)?; + Ok(BTreeSet::from_iter(data)) + } +} + +impl Deserializable for String { + fn read_from(source: &mut R) -> Result { + let len = source.read_usize()?; + let data = source.read_many(len)?; + + String::from_utf8(data) + .map_err(|err| DeserializationError::InvalidValue(format!("{}", err))) + } +} diff --git a/utils/core/src/string.rs b/utils/core/src/string.rs index baff60d49..e2ff9cac3 100644 --- a/utils/core/src/string.rs +++ b/utils/core/src/string.rs @@ -4,13 +4,5 @@ // LICENSE file in the root directory of this source tree. //! Feature-based re-export of common string components. -//! -//! When `std` feature is enabled, this module exports string components from the Rust standard -//! library. When `alloc` feature is enabled, same components are provided without relying on the -//! Rust standard library. -#[cfg(not(feature = "std"))] pub use alloc::string::{String, ToString}; - -#[cfg(feature = "std")] -pub use std::string::{String, ToString}; diff --git a/utils/core/src/tests.rs b/utils/core/src/tests.rs index af92aaa8c..ef1484c59 100644 --- a/utils/core/src/tests.rs +++ b/utils/core/src/tests.rs @@ -3,7 +3,8 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. -use super::{collections::*, ByteReader, ByteWriter, Serializable, SliceReader}; +use super::{ByteReader, ByteWriter, Serializable, SliceReader}; +use alloc::vec::Vec; use proptest::prelude::{any, proptest}; // VECTOR UTILS TESTS @@ -139,7 +140,7 @@ fn write_serializable_batch() { assert_eq!(64, target.len()); let batch2 = [5u128, 6, 7, 8]; - target.write_many(&batch2); + target.write_many(batch2); assert_eq!(128, target.len()); let mut reader = SliceReader::new(&target); @@ -157,7 +158,7 @@ fn write_serializable_array_batch() { assert_eq!(64, target.len()); let batch2 = [[5u128, 6], [7, 8]]; - target.write_many(&batch2); + target.write_many(batch2); assert_eq!(128, target.len()); let mut reader = SliceReader::new(&target); diff --git a/utils/rand/Cargo.toml b/utils/rand/Cargo.toml index 2c944d563..181f06bb3 100644 --- a/utils/rand/Cargo.toml +++ b/utils/rand/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "winter-rand-utils" -version = "0.8.1" +version = "0.8.3" description = "Random value generation utilities for Winterfell crates" authors = ["winterfell contributors"] readme = "README.md" license = "MIT" repository = "https://github.com/novifinancial/winterfell" -documentation = "https://docs.rs/winter-rand-utils/0.8.1" +documentation = "https://docs.rs/winter-rand-utils/0.8.3" categories = ["cryptography"] keywords = ["rand"] edition = "2021" diff --git a/verifier/Cargo.toml b/verifier/Cargo.toml index 4ef474827..a6f9f5517 100644 --- a/verifier/Cargo.toml +++ b/verifier/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "winter-verifier" -version = "0.8.1" +version = "0.8.3" description = "Winterfell STARK verifier" authors = ["winterfell contributors"] readme = "README.md" license = "MIT" repository = "https://github.com/novifinancial/winterfell" -documentation = "https://docs.rs/winter-verifier/0.8.1" +documentation = "https://docs.rs/winter-verifier/0.8.3" categories = ["cryptography", "no-std"] keywords = ["crypto", "zkp", "stark", "verifier"] edition = "2021" diff --git a/verifier/src/channel.rs b/verifier/src/channel.rs index 752626c10..fa8c28e86 100644 --- a/verifier/src/channel.rs +++ b/verifier/src/channel.rs @@ -8,10 +8,10 @@ use air::{ proof::{ParsedOodFrame, Queries, StarkProof, Table}, Air, EvaluationFrame, LagrangeKernelEvaluationFrame, }; +use alloc::{string::ToString, vec::Vec}; use crypto::{BatchMerkleProof, ElementHasher, MerkleTree}; use fri::VerifierChannel as FriVerifierChannel; use math::{FieldElement, StarkField}; -use utils::{collections::*, string::*}; // VERIFIER CHANNEL // ================================================================================================ diff --git a/verifier/src/composer.rs b/verifier/src/composer.rs index fc7c60d12..243e04c11 100644 --- a/verifier/src/composer.rs +++ b/verifier/src/composer.rs @@ -4,8 +4,8 @@ // LICENSE file in the root directory of this source tree. use air::{proof::Table, Air, DeepCompositionCoefficients, EvaluationFrame}; +use alloc::vec::Vec; use math::{batch_inversion, FieldElement}; -use utils::collections::*; // DEEP COMPOSER // ================================================================================================ diff --git a/verifier/src/errors.rs b/verifier/src/errors.rs index f9f877058..068f3d5ae 100644 --- a/verifier/src/errors.rs +++ b/verifier/src/errors.rs @@ -5,8 +5,8 @@ //! Contains common error types for prover and verifier. +use alloc::string::String; use core::fmt; -use utils::string::*; // VERIFIER ERROR // ================================================================================================ diff --git a/verifier/src/evaluator.rs b/verifier/src/evaluator.rs index dd3d8dfb6..e70aef264 100644 --- a/verifier/src/evaluator.rs +++ b/verifier/src/evaluator.rs @@ -7,8 +7,8 @@ use air::{ Air, AuxTraceRandElements, ConstraintCompositionCoefficients, EvaluationFrame, LagrangeKernelEvaluationFrame, LagrangeKernelTransitionConstraints, }; +use alloc::vec::Vec; use math::{polynom, FieldElement}; -use utils::collections::*; // CONSTRAINT EVALUATION // ================================================================================================ diff --git a/verifier/src/lib.rs b/verifier/src/lib.rs index 9b2cd55c5..20020e89c 100644 --- a/verifier/src/lib.rs +++ b/verifier/src/lib.rs @@ -26,9 +26,8 @@ //! need to be in tens of thousands. And even for hundreds of thousands of asserted values, the //! verification time should not exceed 50 ms. -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] -#[cfg(not(feature = "std"))] #[macro_use] extern crate alloc; @@ -45,9 +44,14 @@ use math::{ FieldElement, ToElements, }; +#[deprecated( + since = "0.8.2", + note = "You should prefer the types from libstd/liballoc instead" +)] +#[allow(deprecated)] +pub use utils::collections::*; pub use utils::{ - collections::*, ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable, - SliceReader, + ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable, SliceReader, }; pub use crypto; diff --git a/winterfell/Cargo.toml b/winterfell/Cargo.toml index 68be128d0..e1cf4b00d 100644 --- a/winterfell/Cargo.toml +++ b/winterfell/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "winterfell" -version = "0.8.1" +version = "0.8.3" description = "Winterfell STARK prover and verifier" authors = ["winterfell contributors"] readme = "../README.md" license = "MIT" repository = "https://github.com/novifinancial/winterfell" -documentation = "https://docs.rs/winterfell/0.8.1" +documentation = "https://docs.rs/winterfell/0.8.3" categories = ["cryptography", "no-std"] keywords = ["crypto", "zkp", "stark", "prover", "verifier"] edition = "2021" diff --git a/winterfell/src/lib.rs b/winterfell/src/lib.rs index 69e768921..f4fdfc8cc 100644 --- a/winterfell/src/lib.rs +++ b/winterfell/src/lib.rs @@ -18,12 +18,12 @@ //! 1. Define an *algebraic intermediate representation* (AIR) for your computation. This can //! be done by implementing [Air] trait. //! 2. Define an execution trace for your computation. This can be done by implementing [Trace] -//! trait. Alternatively, you can use [TraceTable] struct which already implements [Trace] +//! trait. Alternatively, you can use the [TraceTable] struct which already implements [Trace] //! trait in cases when this generic implementation works for your use case. //! 3. Execute your computation and record its execution trace. //! 4. Define your prover by implementing [Prover] trait. Then execute [Prover::prove()] function //! passing the trace generated in the previous step into it as a parameter. The function will -//! return a instance of [StarkProof]. +//! return an instance of [StarkProof]. //! //! This `StarkProof` can be serialized and sent to a STARK verifier for verification. The size //! of proof depends on the specifics of a given computation, but for most computations it should @@ -31,10 +31,10 @@ //! computations). //! //! Proof generation time is also highly dependent on the specifics of a given computation, but -//! also depends on the capabilities of the machine used to generate the proofs (i.e. on number +//! also depends on the capabilities of the machine used to generate the proofs (i.e. on the number //! of CPU cores and memory bandwidth). //! -//! When the crate is compiled with `concurrent` feature enabled, proof generation will be +//! When the crate is compiled with the `concurrent` feature enabled, proof generation will be //! performed in multiple threads (usually, as many threads as there are logical cores on the //! machine). The number of threads can be configured via `RAYON_NUM_THREADS` environment //! variable. @@ -44,18 +44,18 @@ //! do the following: //! //! 1. Define an *algebraic intermediate representation* (AIR) for you computation. This AIR -//! must be the same as the one used during proof generation process. +//! must be the same as the one used during the proof generation process. //! 2. Execute [verify()] function and supply the AIR of your computation together with the //! [StarkProof] and related public inputs as parameters. //! //! Proof verification is extremely fast and is nearly independent of the complexity of the -//! computation being verified. In vast majority of cases proofs can be verified in 3 - 5 ms +//! computation being verified. In the vast majority of cases, proofs can be verified in 3 - 5 ms //! on a modern mid-range laptop CPU (using a single core). //! //! There is one exception, however: if a computation requires a lot of `sequence` assertions //! (see [Assertion] for more info), the verification time will grow linearly in the number of //! asserted values. But for the impact to be noticeable, the number of asserted values would -//! need to be in tens of thousands. And even for hundreds of thousands of asserted values, the +//! need to be in tens of thousands. And even for hundreds of thousands of assorted values, the //! verification time should not exceed 50 ms. //! //! # Examples @@ -77,10 +77,10 @@ //! ``` //! //! This computation starts with an element in a finite field and then, for the specified number -//! of steps, cubes the element and adds value `42` to it. +//! of steps, cubes the element, and adds value `42` to it. //! //! Suppose, we run this computation for a million steps and get some result. Using STARKs we can -//! prove that we did the work correctly without requiring any verifying party to re-execute the +//! Prove that we did the work correctly without requiring any verifying party to re-execute the //! computation. Here is how to do it: //! //! First, we need to define an *execution trace* for our computation. This trace should capture @@ -99,7 +99,7 @@ //! | ... | //! | 1,048,575 | 247770943907079986105389697876176586605 | //! -//! To record the trace, we'll use the [TraceTable] struct. The function below, is just a +//! To record the trace, we'll use the [TraceTable] struct. The function below is just a //! modified version of the `do_work()` function which records every intermediate state of the //! computation in the [TraceTable] struct: //! @@ -346,7 +346,7 @@ //! type TraceLde> = DefaultTraceLde; //! type ConstraintEvaluator<'a, E: FieldElement> = //! DefaultConstraintEvaluator<'a, Self::Air, E>; -//! +//! //! // Our public inputs consist of the first and last value in the execution trace. //! fn get_pub_inputs(&self, trace: &Self::Trace) -> PublicInputs { //! let last_step = trace.length() - 1; @@ -367,7 +367,7 @@ //! domain: &StarkDomain, //! ) -> (Self::TraceLde, TracePolyTable) { //! DefaultTraceLde::new(trace_info, main_trace, domain) -//! } +//! } //! //! fn new_evaluator<'a, E: FieldElement>( //! &self, @@ -384,7 +384,7 @@ //! //! In the code below, we will execute our computation and get the result together with the proof //! that the computation was executed correctly. Then, we will use this proof (together with the -//! public inputs) to verify that we did in fact execute the computation and got the claimed +//! public inputs) to verify that we did execute the computation and got the claimed //! result. //! //! ``` @@ -507,7 +507,7 @@ //! # ) -> (Self::TraceLde, TracePolyTable) { //! # DefaultTraceLde::new(trace_info, main_trace, domain) //! # } -//! # +//! # //! # fn new_evaluator<'a, E: FieldElement>( //! # &self, //! # air: &'a Self::Air, @@ -567,9 +567,9 @@ //! * STARKs vs. SNARKs: [A Cambrian Explosion of Crypto Proofs](https://nakamoto.com/cambrian-explosion-of-crypto-proofs/) //! //! Vitalik Buterin's blog series on zk-STARKs: -//! * [STARKs, part 1: Proofs with Polynomials](https://vitalik.ca/general/2017/11/09/starks_part_1.html) -//! * [STARKs, part 2: Thank Goodness it's FRI-day](https://vitalik.ca/general/2017/11/22/starks_part_2.html) -//! * [STARKs, part 3: Into the Weeds](https://vitalik.ca/general/2018/07/21/starks_part_3.html) +//! * [STARKs, part 1: Proofs with Polynomials](https://vitalik.eth.limo/general/2017/11/09/starks_part_1.html) +//! * [STARKs, part 2: Thank Goodness it's FRI-day](https://vitalik.eth.limo/general/2017/11/22/starks_part_2.html) +//! * [STARKs, part 3: Into the Weeds](https://vitalik.eth.limo/general/2018/07/21/starks_part_3.html) //! //! StarkWare's STARK Math blog series: //! * [STARK Math: The Journey Begins](https://medium.com/starkware/stark-math-the-journey-begins-51bd2b063c71) @@ -578,8 +578,9 @@ //! * [Low Degree Testing](https://medium.com/starkware/low-degree-testing-f7614f5172db) //! * [A Framework for Efficient STARKs](https://medium.com/starkware/a-framework-for-efficient-starks-19608ba06fbe) -#![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] +#![no_std] + +#[cfg(test)] extern crate alloc; pub use prover::{ diff --git a/winterfell/src/tests.rs b/winterfell/src/tests.rs index b794d93e4..fa9118d5c 100644 --- a/winterfell/src/tests.rs +++ b/winterfell/src/tests.rs @@ -1,13 +1,12 @@ use super::*; +use alloc::vec; +use alloc::vec::Vec; use prover::{ crypto::{hashers::Blake3_256, DefaultRandomCoin}, math::{fields::f64::BaseElement, ExtensionOf, FieldElement}, matrix::ColMatrix, }; -#[cfg(not(feature = "std"))] -use alloc::{vec, vec::Vec}; - #[test] fn test_lagrange_kernel_air() { let trace = LagrangeMockTrace::new();