diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 95eeef44..1313d644 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -86,7 +86,7 @@ jobs: args: ${{matrix.features}} no-std: - name: build ${{matrix.toolchain}} no-std for wasm32-unknown-unknown + name: build ${{matrix.toolchain}} no-std for wasm32-unknown-unknown runs-on: ubuntu-latest strategy: fail-fast: false @@ -106,4 +106,28 @@ jobs: uses: actions-rs/cargo@v1 with: command: build - args: --no-default-features --target wasm32-unknown-unknown \ No newline at end of file + args: --no-default-features --target wasm32-unknown-unknown + + docs: + name: Verify the docs on ${{matrix.toolchain}} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + toolchain: [stable] + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - name: Install rust + uses: actions-rs/toolchain@v1 + with: + toolchain: ${{matrix.toolchain}} + override: true + - name: Check docs + uses: actions-rs/cargo@v1 + env: + RUSTDOCFLAGS: -D warnings + with: + command: doc + args: --verbose --all-features --keep-going diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 47a46eb2..e1452de4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -72,7 +72,7 @@ For example, a new change to the AIR crate might have the following message: `fe // ================================================================================ ``` -- [Rustfmt](https://github.com/rust-lang/rustfmt) and [Clippy](https://github.com/rust-lang/rust-clippy) linting is included in CI pipeline. Anyways it's prefferable to run linting locally before push: +- [Rustfmt](https://github.com/rust-lang/rustfmt) and [Clippy](https://github.com/rust-lang/rust-clippy) linting is included in CI pipeline. Anyways it's preferable to run linting locally before push: ``` cargo fix --allow-staged --allow-dirty --all-targets --all-features; cargo fmt; cargo clippy --workspace --all-targets --all-features -- -D warnings ``` diff --git a/src/dsa/rpo_falcon512/mod.rs b/src/dsa/rpo_falcon512/mod.rs index 5bbe5cfb..3ccc266e 100644 --- a/src/dsa/rpo_falcon512/mod.rs +++ b/src/dsa/rpo_falcon512/mod.rs @@ -39,10 +39,10 @@ const NONCE_LEN: usize = 40; const NONCE_ELEMENTS: usize = 8; /// Public key length as a u8 vector. -const PK_LEN: usize = 897; +pub const PK_LEN: usize = 897; /// Secret key length as a u8 vector. -const SK_LEN: usize = 1281; +pub const SK_LEN: usize = 1281; /// Signature length as a u8 vector. const SIG_LEN: usize = 626; diff --git a/src/dsa/rpo_falcon512/polynomial.rs b/src/dsa/rpo_falcon512/polynomial.rs index fdb9e340..1d7c5867 100644 --- a/src/dsa/rpo_falcon512/polynomial.rs +++ b/src/dsa/rpo_falcon512/polynomial.rs @@ -4,7 +4,7 @@ use core::ops::{Add, Mul, Sub}; // FALCON POLYNOMIAL // ================================================================================================ -/// A polynomial over Z_p[x]/(phi) where phi := x^512 + 1 +/// A polynomial over Z_p\[x\]/(phi) where phi := x^512 + 1 #[derive(Debug, Copy, Clone, PartialEq)] pub struct Polynomial([u16; N]); @@ -24,7 +24,7 @@ impl Polynomial { Self(data) } - /// Decodes raw bytes representing a public key into a polynomial in Z_p[x]/(phi). + /// Decodes raw bytes representing a public key into a polynomial in Z_p\[x\]/(phi). /// /// # Errors /// Returns an error if: @@ -69,14 +69,14 @@ impl Polynomial { } } - /// Decodes the signature into the coefficients of a polynomial in Z_p[x]/(phi). It assumes + /// Decodes the signature into the coefficients of a polynomial in Z_p\[x\]/(phi). It assumes /// that the signature has been encoded using the uncompressed format. /// /// # Errors /// Returns an error if: /// - The signature has been encoded using a different algorithm than the reference compressed /// encoding algorithm. - /// - The encoded signature polynomial is in Z_p[x]/(phi') where phi' = x^N' + 1 and N' != 512. + /// - The encoded signature polynomial is in Z_p\[x\]/(phi') where phi' = x^N' + 1 and N' != 512. /// - While decoding the high bits of a coefficient, the current accumulated value of its /// high bits is larger than 2048. /// - The decoded coefficient is -0. @@ -149,12 +149,12 @@ impl Polynomial { // POLYNOMIAL OPERATIONS // -------------------------------------------------------------------------------------------- - /// Multiplies two polynomials over Z_p[x] without reducing modulo p. Given that the degrees + /// Multiplies two polynomials over Z_p\[x\] without reducing modulo p. Given that the degrees /// of the input polynomials are less than 512 and their coefficients are less than the modulus /// q equal to 12289, the resulting product polynomial is guaranteed to have coefficients less /// than the Miden prime. /// - /// Note that this multiplication is not over Z_p[x]/(phi). + /// Note that this multiplication is not over Z_p\[x\]/(phi). pub fn mul_modulo_p(a: &Self, b: &Self) -> [u64; 1024] { let mut c = [0; 2 * N]; for i in 0..N { @@ -166,8 +166,8 @@ impl Polynomial { c } - /// Reduces a polynomial, that is the product of two polynomials over Z_p[x], modulo - /// the irreducible polynomial phi. This results in an element in Z_p[x]/(phi). + /// Reduces a polynomial, that is the product of two polynomials over Z_p\[x\], modulo + /// the irreducible polynomial phi. This results in an element in Z_p\[x\]/(phi). pub fn reduce_negacyclic(a: &[u64; 1024]) -> Self { let mut c = [0; N]; for i in 0..N { @@ -181,7 +181,7 @@ impl Polynomial { Self(c) } - /// Computes the norm squared of a polynomial in Z_p[x]/(phi) after normalizing its + /// Computes the norm squared of a polynomial in Z_p\[x\]/(phi) after normalizing its /// coefficients to be in the interval (-p/2, p/2]. pub fn sq_norm(&self) -> u64 { let mut res = 0; @@ -203,7 +203,7 @@ impl Default for Polynomial { } } -/// Multiplication over Z_p[x]/(phi) +/// Multiplication over Z_p\[x\]/(phi) impl Mul for Polynomial { type Output = Self; @@ -227,7 +227,7 @@ impl Mul for Polynomial { } } -/// Addition over Z_p[x]/(phi) +/// Addition over Z_p\[x\]/(phi) impl Add for Polynomial { type Output = Self; @@ -239,7 +239,7 @@ impl Add for Polynomial { } } -/// Subtraction over Z_p[x]/(phi) +/// Subtraction over Z_p\[x\]/(phi) impl Sub for Polynomial { type Output = Self; diff --git a/src/dsa/rpo_falcon512/signature.rs b/src/dsa/rpo_falcon512/signature.rs index df98915b..55f2f64e 100644 --- a/src/dsa/rpo_falcon512/signature.rs +++ b/src/dsa/rpo_falcon512/signature.rs @@ -11,7 +11,7 @@ use core::cell::OnceCell; /// An RPO Falcon512 signature over a message. /// -/// The signature is a pair of polynomials (s1, s2) in (Z_p[x]/(phi))^2, where: +/// The signature is a pair of polynomials (s1, s2) in (Z_p\[x\]/(phi))^2, where: /// - p := 12289 /// - phi := x^512 + 1 /// - s1 = c - s2 * h @@ -86,7 +86,7 @@ impl Signature { // HASH-TO-POINT // -------------------------------------------------------------------------------------------- - /// Returns a polynomial in Z_p[x]/(phi) representing the hash of the provided message. + /// Returns a polynomial in Z_p\[x\]/(phi) representing the hash of the provided message. pub fn hash_to_point(&self, message: Word) -> Polynomial { hash_to_point(message, &self.nonce()) } diff --git a/src/hash/rescue/rpo/digest.rs b/src/hash/rescue/rpo/digest.rs index a4cfa174..06454584 100644 --- a/src/hash/rescue/rpo/digest.rs +++ b/src/hash/rescue/rpo/digest.rs @@ -33,6 +33,11 @@ impl RpoDigest { { digests.flat_map(|d| d.0.iter()) } + + /// Returns hexadecimal representation of this digest prefixed with `0x`. + pub fn to_hex(&self) -> String { + bytes_to_hex_string(self.as_bytes()) + } } impl Digest for RpoDigest { @@ -158,7 +163,7 @@ impl From for [u8; DIGEST_BYTES] { impl From for String { /// The returned string starts with `0x`. fn from(value: RpoDigest) -> Self { - bytes_to_hex_string(value.as_bytes()) + value.to_hex() } } diff --git a/src/hash/rescue/rpo/mod.rs b/src/hash/rescue/rpo/mod.rs index 24a9f681..864f5689 100644 --- a/src/hash/rescue/rpo/mod.rs +++ b/src/hash/rescue/rpo/mod.rs @@ -27,7 +27,7 @@ mod tests; /// * Number of founds: 7. /// * S-Box degree: 7. /// -/// The above parameters target 128-bit security level. The digest consists of four field elements +/// The above parameters target a 128-bit security level. The digest consists of four field elements /// and it can be serialized into 32 bytes (256 bits). /// /// ## Hash output consistency @@ -55,13 +55,7 @@ mod tests; pub struct Rpo256(); impl Hasher for Rpo256 { - /// Rpo256 collision resistance is the same as the security level, that is 128-bits. - /// - /// #### Collision resistance - /// - /// However, our setup of the capacity registers might drop it to 126. - /// - /// Related issue: [#69](https://github.com/0xPolygonMiden/crypto/issues/69) + /// Rpo256 collision resistance is 128-bits. const COLLISION_RESISTANCE: u32 = 128; type Digest = RpoDigest; diff --git a/src/hash/rescue/rpx/digest.rs b/src/hash/rescue/rpx/digest.rs index a9a236a7..80057ca0 100644 --- a/src/hash/rescue/rpx/digest.rs +++ b/src/hash/rescue/rpx/digest.rs @@ -33,6 +33,11 @@ impl RpxDigest { { digests.flat_map(|d| d.0.iter()) } + + /// Returns hexadecimal representation of this digest prefixed with `0x`. + pub fn to_hex(&self) -> String { + bytes_to_hex_string(self.as_bytes()) + } } impl Digest for RpxDigest { @@ -158,7 +163,7 @@ impl From for [u8; DIGEST_BYTES] { impl From for String { /// The returned string starts with `0x`. fn from(value: RpxDigest) -> Self { - bytes_to_hex_string(value.as_bytes()) + value.to_hex() } } diff --git a/src/hash/rescue/rpx/mod.rs b/src/hash/rescue/rpx/mod.rs index 5458c072..6b947997 100644 --- a/src/hash/rescue/rpx/mod.rs +++ b/src/hash/rescue/rpx/mod.rs @@ -2,8 +2,8 @@ use super::{ add_constants, add_constants_and_apply_inv_sbox, add_constants_and_apply_sbox, apply_inv_sbox, apply_mds, apply_sbox, CubeExtension, Digest, ElementHasher, Felt, FieldElement, Hasher, StarkField, ARK1, ARK2, BINARY_CHUNK_SIZE, CAPACITY_RANGE, DIGEST_BYTES, DIGEST_RANGE, - DIGEST_SIZE, INPUT1_RANGE, INPUT2_RANGE, MDS, NUM_ROUNDS, ONE, RATE_RANGE, RATE_WIDTH, - STATE_WIDTH, ZERO, + DIGEST_SIZE, INPUT1_RANGE, INPUT2_RANGE, MDS, NUM_ROUNDS, RATE_RANGE, RATE_WIDTH, STATE_WIDTH, + ZERO, }; use core::{convert::TryInto, ops::Range}; @@ -30,7 +30,7 @@ pub type CubicExtElement = CubeExtension; /// - (M): `apply_mds` → `add_constants`. /// * Permutation: (FB) (E) (FB) (E) (FB) (E) (M). /// -/// The above parameters target 128-bit security level. The digest consists of four field elements +/// The above parameters target a 128-bit security level. The digest consists of four field elements /// and it can be serialized into 32 bytes (256 bits). /// /// ## Hash output consistency @@ -58,13 +58,7 @@ pub type CubicExtElement = CubeExtension; pub struct Rpx256(); impl Hasher for Rpx256 { - /// Rpx256 collision resistance is the same as the security level, that is 128-bits. - /// - /// #### Collision resistance - /// - /// However, our setup of the capacity registers might drop it to 126. - /// - /// Related issue: [#69](https://github.com/0xPolygonMiden/crypto/issues/69) + /// Rpx256 collision resistance is 128-bits. const COLLISION_RESISTANCE: u32 = 128; type Digest = RpxDigest; @@ -73,14 +67,16 @@ impl Hasher for Rpx256 { // initialize the state with zeroes let mut state = [ZERO; STATE_WIDTH]; - // set the capacity (first element) to a flag on whether or not the input length is evenly - // divided by the rate. this will prevent collisions between padded and non-padded inputs, - // and will rule out the need to perform an extra permutation in case of evenly divided - // inputs. - let is_rate_multiple = bytes.len() % RATE_WIDTH == 0; - if !is_rate_multiple { - state[CAPACITY_RANGE.start] = ONE; - } + // determine the number of field elements needed to encode `bytes` when each field element + // represents at most 7 bytes. + let num_field_elem = bytes.len().div_ceil(BINARY_CHUNK_SIZE); + + // set the first capacity element to `RATE_WIDTH + (num_field_elem % RATE_WIDTH)`. We do + // this to achieve: + // 1. Domain separating hashing of `[u8]` from hashing of `[Felt]`. + // 2. Avoiding collisions at the `[Felt]` representation of the encoded bytes. + state[CAPACITY_RANGE.start] = + Felt::from((RATE_WIDTH + (num_field_elem % RATE_WIDTH)) as u8); // initialize a buffer to receive the little-endian elements. let mut buf = [0_u8; 8]; @@ -94,7 +90,7 @@ impl Hasher for Rpx256 { let i = bytes.chunks(BINARY_CHUNK_SIZE).fold(0, |i, chunk| { // the last element of the iteration may or may not be a full chunk. if it's not, then // we need to pad the remainder bytes of the chunk with zeroes, separated by a `1`. - // this will avoid collisions. + // this will avoid collisions at the bytes level. if chunk.len() == BINARY_CHUNK_SIZE { buf[..BINARY_CHUNK_SIZE].copy_from_slice(chunk); } else { @@ -120,10 +116,10 @@ impl Hasher for Rpx256 { // if we absorbed some elements but didn't apply a permutation to them (would happen when // the number of elements is not a multiple of RATE_WIDTH), apply the RPX permutation. we // don't need to apply any extra padding because the first capacity element contains a - // flag indicating whether the input is evenly divisible by the rate. + // flag indicating the number of field elements constituting the last block when the latter + // is not divisible by `RATE_WIDTH`. if i != 0 { state[RATE_RANGE.start + i..RATE_RANGE.end].fill(ZERO); - state[RATE_RANGE.start + i] = ONE; Self::apply_permutation(&mut state); } @@ -148,25 +144,20 @@ impl Hasher for Rpx256 { fn merge_with_int(seed: Self::Digest, value: u64) -> Self::Digest { // initialize the state as follows: // - seed is copied into the first 4 elements of the rate portion of the state. - // - if the value fits into a single field element, copy it into the fifth rate element - // and set the sixth rate element to 1. + // - if the value fits into a single field element, copy it into the fifth rate element and + // set the first capacity element to 5. // - if the value doesn't fit into a single field element, split it into two field - // elements, copy them into rate elements 5 and 6, and set the seventh rate element - // to 1. - // - set the first capacity element to 1 + // elements, copy them into rate elements 5 and 6 and set the first capacity element to 6. let mut state = [ZERO; STATE_WIDTH]; state[INPUT1_RANGE].copy_from_slice(seed.as_elements()); state[INPUT2_RANGE.start] = Felt::new(value); if value < Felt::MODULUS { - state[INPUT2_RANGE.start + 1] = ONE; + state[CAPACITY_RANGE.start] = Felt::from(5_u8); } else { state[INPUT2_RANGE.start + 1] = Felt::new(value / Felt::MODULUS); - state[INPUT2_RANGE.start + 2] = ONE; + state[CAPACITY_RANGE.start] = Felt::from(6_u8); } - // common padding for both cases - state[CAPACITY_RANGE.start] = ONE; - // apply the RPX permutation and return the first four elements of the state Self::apply_permutation(&mut state); RpxDigest::new(state[DIGEST_RANGE].try_into().unwrap()) @@ -181,11 +172,9 @@ impl ElementHasher for Rpx256 { let elements = E::slice_as_base_elements(elements); // initialize state to all zeros, except for the first element of the capacity part, which - // is set to 1 if the number of elements is not a multiple of RATE_WIDTH. + // is set to `elements.len() % RATE_WIDTH`. let mut state = [ZERO; STATE_WIDTH]; - if elements.len() % RATE_WIDTH != 0 { - state[CAPACITY_RANGE.start] = ONE; - } + state[CAPACITY_RANGE.start] = Self::BaseField::from((elements.len() % RATE_WIDTH) as u8); // absorb elements into the state one by one until the rate portion of the state is filled // up; then apply the Rescue permutation and start absorbing again; repeat until all @@ -202,11 +191,8 @@ impl ElementHasher for Rpx256 { // if we absorbed some elements but didn't apply a permutation to them (would happen when // the number of elements is not a multiple of RATE_WIDTH), apply the RPX permutation after - // padding by appending a 1 followed by as many 0 as necessary to make the input length a - // multiple of the RATE_WIDTH. + // padding by as many 0 as necessary to make the input length a multiple of the RATE_WIDTH. if i > 0 { - state[RATE_RANGE.start + i] = ONE; - i += 1; while i != RATE_WIDTH { state[RATE_RANGE.start + i] = ZERO; i += 1; @@ -354,7 +340,7 @@ impl Rpx256 { add_constants(state, &ARK1[round]); } - /// Computes an exponentiation to the power 7 in cubic extension field + /// Computes an exponentiation to the power 7 in cubic extension field. #[inline(always)] pub fn exp7(x: CubeExtension) -> CubeExtension { let x2 = x.square(); diff --git a/src/merkle/delta.rs b/src/merkle/delta.rs index baede807..846eb162 100644 --- a/src/merkle/delta.rs +++ b/src/merkle/delta.rs @@ -19,7 +19,7 @@ pub struct MerkleStoreDelta(pub Vec<(RpoDigest, MerkleTreeDelta)>); // MERKLE TREE DELTA // ================================================================================================ -/// [MerkleDelta] stores the differences between the initial and final Merkle tree states. +/// [MerkleTreeDelta] stores the differences between the initial and final Merkle tree states. /// /// The differences are represented as follows: /// - depth: the depth of the merkle tree. @@ -46,7 +46,7 @@ impl MerkleTreeDelta { // ACCESSORS // -------------------------------------------------------------------------------------------- - /// Returns the depth of the Merkle tree the [MerkleDelta] is associated with. + /// Returns the depth of the Merkle tree the [MerkleTreeDelta] is associated with. pub fn depth(&self) -> u8 { self.depth } @@ -74,7 +74,7 @@ impl MerkleTreeDelta { } } -/// Extracts a [MerkleDelta] object by comparing the leaves of two Merkle trees specifies by +/// Extracts a [MerkleTreeDelta] object by comparing the leaves of two Merkle trees specifies by /// their roots and depth. pub fn merkle_tree_delta>( tree_root_1: RpoDigest, diff --git a/src/merkle/mmr/delta.rs b/src/merkle/mmr/delta.rs index 4bd9961e..df4bc464 100644 --- a/src/merkle/mmr/delta.rs +++ b/src/merkle/mmr/delta.rs @@ -1,16 +1,16 @@ use super::super::{RpoDigest, Vec}; -/// Container for the update data of a [PartialMmr] +/// Container for the update data of a [super::PartialMmr] #[derive(Debug)] pub struct MmrDelta { - /// The new version of the [Mmr] + /// The new version of the [super::Mmr] pub forest: usize, /// Update data. /// /// The data is packed as follows: /// 1. All the elements needed to perform authentication path updates. These are the right - /// siblings required to perform tree merges on the [PartialMmr]. + /// siblings required to perform tree merges on the [super::PartialMmr]. /// 2. The new peaks. pub data: Vec, } diff --git a/src/merkle/mmr/full.rs b/src/merkle/mmr/full.rs index b47437d7..38365670 100644 --- a/src/merkle/mmr/full.rs +++ b/src/merkle/mmr/full.rs @@ -280,7 +280,7 @@ impl Mmr { // Update the depth of the tree to correspond to a subtree forest_target >>= 1; - // compute the indeces of the right and left subtrees based on the post-order + // compute the indices of the right and left subtrees based on the post-order let right_offset = index - 1; let left_offset = right_offset - nodes_in_forest(forest_target); diff --git a/src/merkle/mmr/partial.rs b/src/merkle/mmr/partial.rs index 9fe90678..73b014d1 100644 --- a/src/merkle/mmr/partial.rs +++ b/src/merkle/mmr/partial.rs @@ -101,14 +101,31 @@ impl PartialMmr { MmrPeaks::new(self.forest, self.peaks.clone()).expect("invalid MMR peaks") } - /// Given a leaf position, returns the Merkle path to its corresponding peak. - /// - /// If the position is greater-or-equal than the tree size an error is returned. If the - /// requested value is not tracked returns `None`. + /// Returns true if this partial MMR tracks an authentication path for the leaf at the + /// specified position. + pub fn is_tracked(&self, pos: usize) -> bool { + if pos >= self.forest { + return false; + } else if pos == self.forest - 1 && self.forest & 1 != 0 { + // if the number of leaves in the MMR is odd and the position is for the last leaf + // whether the leaf is tracked is defined by the `track_latest` flag + return self.track_latest; + } + + let leaf_index = InOrderIndex::from_leaf_pos(pos); + self.is_tracked_node(&leaf_index) + } + + /// Given a leaf position, returns the Merkle path to its corresponding peak, or None if this + /// partial MMR does not track an authentication paths for the specified leaf. /// /// Note: The leaf position is the 0-indexed number corresponding to the order the leaves were /// added, this corresponds to the MMR size _prior_ to adding the element. So the 1st element /// has position 0, the second position 1, and so on. + /// + /// # Errors + /// Returns an error if the specified position is greater-or-equal than the number of leaves + /// in the underlying MMR. pub fn open(&self, pos: usize) -> Result, MmrError> { let tree_bit = leaf_to_corresponding_tree(pos, self.forest).ok_or(MmrError::InvalidPosition(pos))?; @@ -149,13 +166,13 @@ impl PartialMmr { /// /// The order of iteration is not defined. If a leaf is not presented in this partial MMR it /// is silently ignored. - pub fn inner_nodes<'a, I: Iterator + 'a>( + pub fn inner_nodes<'a, I: Iterator + 'a>( &'a self, mut leaves: I, ) -> impl Iterator + '_ { let stack = if let Some((pos, leaf)) = leaves.next() { - let idx = InOrderIndex::from_leaf_pos(*pos); - vec![(idx, *leaf)] + let idx = InOrderIndex::from_leaf_pos(pos); + vec![(idx, leaf)] } else { Vec::new() }; @@ -425,14 +442,14 @@ impl From<&PartialMmr> for MmrPeaks { // ================================================================================================ /// An iterator over every inner node of the [PartialMmr]. -pub struct InnerNodeIterator<'a, I: Iterator> { +pub struct InnerNodeIterator<'a, I: Iterator> { nodes: &'a BTreeMap, leaves: I, stack: Vec<(InOrderIndex, RpoDigest)>, seen_nodes: BTreeSet, } -impl<'a, I: Iterator> Iterator for InnerNodeIterator<'a, I> { +impl<'a, I: Iterator> Iterator for InnerNodeIterator<'a, I> { type Item = InnerNodeInfo; fn next(&mut self) -> Option { @@ -459,8 +476,8 @@ impl<'a, I: Iterator> Iterator for InnerNodeItera // the previous leaf has been processed, try to process the next leaf if let Some((pos, leaf)) = self.leaves.next() { - let idx = InOrderIndex::from_leaf_pos(*pos); - self.stack.push((idx, *leaf)); + let idx = InOrderIndex::from_leaf_pos(pos); + self.stack.push((idx, leaf)); } } @@ -626,11 +643,11 @@ mod tests { partial_mmr.add(1, node1, &proof1.merkle_path).unwrap(); // empty iterator should have no nodes - assert_eq!(partial_mmr.inner_nodes([].iter()).next(), None); + assert_eq!(partial_mmr.inner_nodes([].iter().cloned()).next(), None); // build Merkle store from authentication paths in partial MMR let mut store: MerkleStore = MerkleStore::new(); - store.extend(partial_mmr.inner_nodes([(1, node1)].iter())); + store.extend(partial_mmr.inner_nodes([(1, node1)].iter().cloned())); let index1 = NodeIndex::new(2, 1).unwrap(); let path1 = store.get_path(first_peak, index1).unwrap().path; @@ -655,12 +672,12 @@ mod tests { // make sure there are no duplicates let leaves = [(0, node0), (1, node1), (2, node2)]; let mut nodes = BTreeSet::new(); - for node in partial_mmr.inner_nodes(leaves.iter()) { + for node in partial_mmr.inner_nodes(leaves.iter().cloned()) { assert!(nodes.insert(node.value)); } // and also that the store is still be built correctly - store.extend(partial_mmr.inner_nodes(leaves.iter())); + store.extend(partial_mmr.inner_nodes(leaves.iter().cloned())); let index0 = NodeIndex::new(2, 0).unwrap(); let index1 = NodeIndex::new(2, 1).unwrap(); @@ -687,7 +704,7 @@ mod tests { // build Merkle store from authentication paths in partial MMR let mut store: MerkleStore = MerkleStore::new(); - store.extend(partial_mmr.inner_nodes([(1, node1), (5, node5)].iter())); + store.extend(partial_mmr.inner_nodes([(1, node1), (5, node5)].iter().cloned())); let index1 = NodeIndex::new(2, 1).unwrap(); let index5 = NodeIndex::new(1, 1).unwrap(); diff --git a/src/merkle/path.rs b/src/merkle/path.rs index a9c86b1a..3da78710 100644 --- a/src/merkle/path.rs +++ b/src/merkle/path.rs @@ -165,7 +165,7 @@ impl<'a> Iterator for InnerNodeIterator<'a> { // MERKLE PATH CONTAINERS // ================================================================================================ -/// A container for a [Word] value and its [MerklePath] opening. +/// A container for a [crate::Word] value and its [MerklePath] opening. #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct ValuePath { /// The node value opening for `path`. @@ -187,7 +187,7 @@ impl From<(MerklePath, Word)> for ValuePath { } } -/// A container for a [MerklePath] and its [Word] root. +/// A container for a [MerklePath] and its [crate::Word] root. /// /// This structure does not provide any guarantees regarding the correctness of the path to the /// root. For more information, check [MerklePath::verify]. diff --git a/src/merkle/smt/mod.rs b/src/merkle/smt/mod.rs index 03920952..63e6ac89 100644 --- a/src/merkle/smt/mod.rs +++ b/src/merkle/smt/mod.rs @@ -199,7 +199,7 @@ impl LeafIndex { } impl LeafIndex { - pub fn new_max_depth(value: u64) -> Self { + pub const fn new_max_depth(value: u64) -> Self { LeafIndex { index: NodeIndex::new_unchecked(SMT_MAX_DEPTH, value), } diff --git a/src/merkle/smt/simple/mod.rs b/src/merkle/smt/simple/mod.rs index 05c8e214..4186cf7d 100644 --- a/src/merkle/smt/simple/mod.rs +++ b/src/merkle/smt/simple/mod.rs @@ -28,6 +28,12 @@ pub struct SimpleSmt { } impl SimpleSmt { + // CONSTANTS + // -------------------------------------------------------------------------------------------- + + /// The default value used to compute the hash of empty leaves + pub const EMPTY_VALUE: Word = >::EMPTY_VALUE; + // CONSTRUCTORS // -------------------------------------------------------------------------------------------- @@ -36,7 +42,7 @@ impl SimpleSmt { /// All leaves in the returned tree are set to [ZERO; 4]. /// /// # Errors - /// Returns an error if [DEPTH] is 0 or is greater than 64. + /// Returns an error if DEPTH is 0 or is greater than 64. pub fn new() -> Result { // validate the range of the depth. if DEPTH < SMT_MIN_DEPTH { diff --git a/src/merkle/store/tests.rs b/src/merkle/store/tests.rs index 309b496e..74986b47 100644 --- a/src/merkle/store/tests.rs +++ b/src/merkle/store/tests.rs @@ -107,7 +107,7 @@ fn test_merkle_tree() -> Result<(), MerkleError> { "node 3 must be the same for both MerkleTree and MerkleStore" ); - // STORE MERKLE PATH MATCHS ============================================================== + // STORE MERKLE PATH MATCHES ============================================================== // assert the merkle path returned by the store is the same as the one in the tree let result = store.get_path(mtree.root(), NodeIndex::make(mtree.depth(), 0)).unwrap(); assert_eq!( @@ -293,7 +293,7 @@ fn test_sparse_merkle_tree() -> Result<(), MerkleError> { "node 4 must be the same for both SparseMerkleTree and MerkleStore" ); - // STORE MERKLE PATH MATCHS ============================================================== + // STORE MERKLE PATH MATCHES ============================================================== // assert the merkle path returned by the store is the same as the one in the tree let result = store.get_path(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 0)).unwrap(); assert_eq!( @@ -428,7 +428,7 @@ fn test_add_merkle_paths() -> Result<(), MerkleError> { "node 3 must be the same for both PartialMerkleTree and MerkleStore" ); - // STORE MERKLE PATH MATCHS ============================================================== + // STORE MERKLE PATH MATCHES ============================================================== // assert the merkle path returned by the store is the same as the one in the pmt let result = store.get_path(pmt.root(), NodeIndex::make(pmt.max_depth(), 0)).unwrap(); assert_eq!( @@ -651,7 +651,7 @@ fn get_leaf_depth_works_depth_64() { let index = NodeIndex::new(64, k).unwrap(); // assert the leaf doesn't exist before the insert. the returned depth should always - // increment with the paths count of the set, as they are insersecting one another up to + // increment with the paths count of the set, as they are intersecting one another up to // the first bits of the used key. assert_eq!(d, store.get_leaf_depth(root, 64, k).unwrap()); diff --git a/src/rand/rpo.rs b/src/rand/rpo.rs index 7c7d97c8..284355a1 100644 --- a/src/rand/rpo.rs +++ b/src/rand/rpo.rs @@ -19,7 +19,7 @@ const HALF_RATE_WIDTH: usize = (Rpo256::RATE_RANGE.end - Rpo256::RATE_RANGE.star // RPO RANDOM COIN // ================================================================================================ /// A simplified version of the `SPONGE_PRG` reseedable pseudo-random number generator algorithm -/// described in https://eprint.iacr.org/2011/499.pdf. +/// described in . /// /// The simplification is related to the following facts: /// 1. A call to the reseed method implies one and only one call to the permutation function.